diff --git a/.clang-format b/.clang-format index 30863c27a8..8aead30c8c 100644 --- a/.clang-format +++ b/.clang-format @@ -1,29 +1,29 @@ -# This file is used by clang-format to autoformat paddle source code -# -# The clang-format is part of llvm toolchain. -# It need to install llvm and clang to format source code style. -# -# The basic usage is, -# clang-format -i -style=file PATH/TO/SOURCE/CODE -# -# The -style=file implicit use ".clang-format" file located in one of -# parent directory. -# The -i means inplace change. -# -# The document of clang-format is -# http://clang.llvm.org/docs/ClangFormat.html -# http://clang.llvm.org/docs/ClangFormatStyleOptions.html ---- -Language: Cpp -BasedOnStyle: Google -IndentWidth: 2 -TabWidth: 2 -ContinuationIndentWidth: 4 -MaxEmptyLinesToKeep: 2 -AccessModifierOffset: -2 # The private/protected/public has no indent in class -Standard: Cpp11 -AllowAllParametersOfDeclarationOnNextLine: true -BinPackParameters: false -BinPackArguments: false -... - +# This file is used by clang-format to autoformat paddle source code +# +# The clang-format is part of llvm toolchain. +# It need to install llvm and clang to format source code style. +# +# The basic usage is, +# clang-format -i -style=file PATH/TO/SOURCE/CODE +# +# The -style=file implicit use ".clang-format" file located in one of +# parent directory. +# The -i means inplace change. +# +# The document of clang-format is +# http://clang.llvm.org/docs/ClangFormat.html +# http://clang.llvm.org/docs/ClangFormatStyleOptions.html +--- +Language: Cpp +BasedOnStyle: Google +IndentWidth: 2 +TabWidth: 2 +ContinuationIndentWidth: 4 +MaxEmptyLinesToKeep: 2 +AccessModifierOffset: -2 # The private/protected/public has no indent in class +Standard: Cpp11 +AllowAllParametersOfDeclarationOnNextLine: true +BinPackParameters: false +BinPackArguments: false +... + diff --git a/.clang_format.hook b/.clang_format.hook index 1d92821686..586119911e 100644 --- a/.clang_format.hook +++ b/.clang_format.hook @@ -1,15 +1,15 @@ -#!/bin/bash -set -e - -readonly VERSION="3.8" - -version=$(clang-format -version) - -if ! [[ $version == *"$VERSION"* ]]; then - echo "clang-format version check failed." - echo "a version contains '$VERSION' is needed, but get '$version'" - echo "you can install the right version, and make an soft-link to '\$PATH' env" - exit -1 -fi - -clang-format $@ +#!/bin/bash +set -e + +readonly VERSION="3.8" + +version=$(clang-format -version) + +if ! [[ $version == *"$VERSION"* ]]; then + echo "clang-format version check failed." + echo "a version contains '$VERSION' is needed, but get '$version'" + echo "you can install the right version, and make an soft-link to '\$PATH' env" + exit -1 +fi + +clang-format $@ diff --git a/.gitignore b/.gitignore index 491f881f32..8404f736fc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,134 +1,134 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -__MACOSX -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pycharm -.DS_Store -.idea/ -FETCH_HEAD - -# vscode -.vscode - -# numpy -.npy - -# vtk -*.vtk -*.vtu - -# auto generated version file by setuptools_scm -ppsci/_version.py +# Byte-compiled / optimized / DLL files +__pycache__/ +__MACOSX +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pycharm +.DS_Store +.idea/ +FETCH_HEAD + +# vscode +.vscode + +# numpy +.npy + +# vtk +*.vtk +*.vtu + +# auto generated version file by setuptools_scm +ppsci/_version.py diff --git a/.gitmodules b/.gitmodules index 906815b803..6c9a0444b6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream [submodule "competition/IJCAI_2024_CAR"] path = competition/IJCAI_2024_CAR url = https://atomgit.com/paddlenumberone/ijcai_car.git @@ -22,3 +23,8 @@ [submodule "ppsci/externals/paddle_scatter"] path = ppsci/externals/paddle_scatter url = https://github.com/PFCCLab/paddle_scatter +======= +[submodule "competition/IJCAI_2024_CAR"] + path = competition/IJCAI_2024_CAR + url = https://atomgit.com/paddlenumberone/ijcai_car.git +>>>>>>> Stashed changes diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 33fb9870fa..e3d8ab5d1a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,56 +1,56 @@ -repos: - - repo: https://github.com/PyCQA/isort - rev: 5.11.5 - hooks: - - id: isort - args: ["--multi-line=7", "--sl", "--profile", "black", "--filter-files"] - - - repo: https://github.com/psf/black - rev: 22.3.0 - hooks: - - id: black - - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: "v0.0.272" - hooks: - - id: ruff - - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: a11d9314b22d8f8c7556443875b731ef05965464 - hooks: - - id: check-merge-conflict - - id: check-symlinks - - id: detect-private-key - files: (?!.*paddle)^.*$ - - id: end-of-file-fixer - - id: trailing-whitespace - - id: check-case-conflict - - id: check-yaml - exclude: "mkdocs.yml|recipe/meta.yaml" - - id: pretty-format-json - args: [--autofix] - - id: requirements-txt-fixer - - - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.0.1 - hooks: - - id: forbid-crlf - files: \.md$ - - id: remove-crlf - files: \.md$ - - id: forbid-tabs - files: \.md$ - - id: remove-tabs - files: \.md$ - - - repo: local - hooks: - - id: clang-format - name: clang-format - description: Format files with ClangFormat - entry: bash .clang_format.hook -i - language: system - files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$ - -exclude: | - ^jointContribution/ +repos: + - repo: https://github.com/PyCQA/isort + rev: 5.11.5 + hooks: + - id: isort + args: ["--multi-line=7", "--sl", "--profile", "black", "--filter-files"] + + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: "v0.0.272" + hooks: + - id: ruff + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: a11d9314b22d8f8c7556443875b731ef05965464 + hooks: + - id: check-merge-conflict + - id: check-symlinks + - id: detect-private-key + files: (?!.*paddle)^.*$ + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + exclude: "mkdocs.yml|recipe/meta.yaml" + - id: pretty-format-json + args: [--autofix] + - id: requirements-txt-fixer + + - repo: https://github.com/Lucas-C/pre-commit-hooks + rev: v1.0.1 + hooks: + - id: forbid-crlf + files: \.md$ + - id: remove-crlf + files: \.md$ + - id: forbid-tabs + files: \.md$ + - id: remove-tabs + files: \.md$ + + - repo: local + hooks: + - id: clang-format + name: clang-format + description: Format files with ClangFormat + entry: bash .clang_format.hook -i + language: system + files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|cuh|proto)$ + +exclude: | + ^jointContribution/ diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 2fa58a8f93..83a5f1e382 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -1,23 +1,23 @@ -# .readthedocs.yaml -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -# Required -version: 2 - -# Set the version of Python and other tools you might need -build: - os: ubuntu-22.04 - tools: - python: "3.9" - -# Build documentation in the docs/ directory with Sphinx -mkdocs: - configuration: mkdocs.yml - -# We recommend specifying your dependencies to enable reproducible builds: -# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html -python: - install: - - requirements: docs/requirements.txt - - requirements: requirements.txt +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.9" + +# Build documentation in the docs/ directory with Sphinx +mkdocs: + configuration: mkdocs.yml + +# We recommend specifying your dependencies to enable reproducible builds: +# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt + - requirements: requirements.txt diff --git a/LICENSE b/LICENSE index 8db3174f5a..3d3145f7c8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,203 +1,203 @@ -Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index 24c257639c..4c29aede2b 100644 --- a/README.md +++ b/README.md @@ -1,342 +1,332 @@ -# PaddleScience - - -> *Developed with [PaddlePaddle](https://www.paddlepaddle.org.cn/)* - -[![Version](https://img.shields.io/pypi/v/paddlesci)](https://pypi.org/project/paddlesci/) -[![Conda](https://anaconda.org/paddlescience/paddlescience/badges/version.svg)](https://anaconda.org/PaddleScience/paddlescience) -[![Python Version](https://img.shields.io/pypi/pyversions/paddlesci)](https://pypi.org/project/paddlesci/) -[![Doc](https://img.shields.io/readthedocs/paddlescience-docs/latest)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/) -[![Code Style](https://img.shields.io/badge/code_style-black-black)](https://github.com/psf/black) -[![Hydra](https://img.shields.io/badge/config-hydra-89b8cd)](https://hydra.cc/) -[![License](https://img.shields.io/github/license/PaddlePaddle/PaddleScience)](https://github.com/PaddlePaddle/PaddleScience/blob/develop/LICENSE) -[![Update](https://anaconda.org/paddlescience/paddlescience/badges/latest_release_date.svg)](https://anaconda.org/PaddleScience/paddlescience) - - -[📘 使用文档](https://paddlescience-docs.readthedocs.io/zh-cn/latest/) | -[🛠️ 安装使用](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/install_setup/) | -[📘 快速开始](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/quickstart/) | -[👀 案例列表](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn/) | -[🆕 最近更新](https://paddlescience-docs.readthedocs.io/zh-cn/latest/#_4) | -[🤔 问题反馈](https://github.com/PaddlePaddle/PaddleScience/issues/new/choose) - - -🔥 [飞桨AI for Science共创计划2期](https://aistudio.baidu.com/activitydetail/1502019365),免费提供海量算力等资源,欢迎报名。 - -🔥 [飞桨AI for Science前沿讲座系列课程 & 代码入门与实操课程进行中](https://mp.weixin.qq.com/s/n-vGnGM9di_3IByTC56hUw),清华、北大、中科院等高校机构知名学者分享前沿研究成果,火热报名中。 - - - -## 👀简介 - -PaddleScience 是一个基于深度学习框架 PaddlePaddle 开发的科学计算套件,利用深度神经网络的学习能力和 PaddlePaddle 框架的自动(高阶)微分机制,解决物理、化学、气象等领域的问题。支持物理机理驱动、数据驱动、数理融合三种求解方式,并提供了基础 API 和详尽文档供用户使用与二次开发。 - - -## 📝案例列表 - -

数学(AI for Math)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 亥姆霍兹方程 | [SPINN(Helmholtz3D)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/spinn) | 机理驱动 | SPINN | 无监督学习 | - | [Paper](https://arxiv.org/pdf/2306.15969) | -| 相场方程 | [Allen-Cahn](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat) | [Paper](https://arxiv.org/pdf/2402.00326) | -| 微分方程 | [拉普拉斯方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/laplace2d) | 机理驱动 | MLP | 无监督学习 | - | - | -| 微分方程 | [伯格斯方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deephpms) | 机理驱动 | MLP | 无监督学习 | [Data](https://github.com/maziarraissi/DeepHPMs/tree/master/Data) | [Paper](https://arxiv.org/pdf/1801.06637.pdf) | -| 微分方程 | [非线性偏微分方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/pirbn) | 机理驱动 | PIRBN | 无监督学习 | - | [Paper](https://arxiv.org/abs/2304.06234) | -| 微分方程 | [洛伦兹方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/lorenz) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | -| 微分方程 | [若斯叻方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/rossler) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | -| 算子学习 | [DeepONet](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deeponet) | 数据驱动 | MLP | 监督学习 | [Data](https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html) | [Paper](https://export.arxiv.org/pdf/1910.03193.pdf) | -| 微分方程 | [梯度增强的物理知识融合 PDE 求解](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/gpinn/poisson_1d.py) | 机理驱动 | gPINN | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.cma.2022.114823) | -| 积分方程 | [沃尔泰拉积分方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/volterra_ide) | 机理驱动 | MLP | 无监督学习 | - | [Project](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) | -| 微分方程 | [分数阶微分方程](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py) | 机理驱动 | MLP | 无监督学习 | - | - | -| 光孤子 | [Optical soliton](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nlsmb) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| -| 光纤怪波 | [Optical rogue wave](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nlsmb) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| -| 域分解 | [XPINN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/xpinns) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.4208/cicp.OA-2020-0164)| -| 布鲁塞尔扩散系统 | [3D-Brusselator](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/brusselator3d) | 数据驱动 | LNO | 监督学习 | - | [Paper](https://arxiv.org/abs/2303.10528)| -| 符号回归 | [Transformer4SR](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/transformer4sr.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2312.04070)| -| 算子学习 | [隐空间神经算子LNO](https://github.com/L-I-M-I-T/LatentNeuralOperator) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2406.03923)| - -
-

技术科学(AI for Technology)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 汽车表面阻力预测 | [DrivAerNet](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/drivaernet/) | 数据驱动 | RegDGCNN | 监督学习 | [Data](https://dataset.bj.bcebos.com/PaddleScience/DNNFluid-Car/DrivAer%2B%2B/data.tar) | [Paper](https://www.researchgate.net/publication/378937154_DrivAerNet_A_Parametric_Car_Dataset_for_Data-Driven_Aerodynamic_Design_and_Graph-Based_Drag_Prediction) | -| 一维线性对流问题 | [1D 线性对流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/adv_cvit/) | 数据驱动 | ViT | 监督学习 | [Data](https://github.com/Zhengyu-Huang/Operator-Learning/tree/main/data) | [Paper](https://arxiv.org/abs/2405.13998) | -| 非定常不可压流体 | [2D 方腔浮力驱动流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ns_cvit/) | 数据驱动 | ViT | 监督学习 | [Data](https://huggingface.co/datasets/pdearena/NavierStokes-2D) | [Paper](https://arxiv.org/abs/2405.13998) | -| 定常不可压流体 | [Re3200 2D 定常方腔流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_steady) | 机理驱动 | MLP | 无监督学习 | - | | -| 定常不可压流体 | [2D 达西流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/darcy2d) | 机理驱动 | MLP | 无监督学习 | - | | -| 定常不可压流体 | [2D 管道流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/labelfree_DNN_surrogate) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/1906.02382) | -| 定常不可压流体 | [3D 颅内动脉瘤](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/aneurysm) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar) | [Project](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html)| -| 定常不可压流体 | [任意 2D 几何体绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deepcfd) | 数据驱动 | DeepCFD | 监督学习 | - | [Paper](https://arxiv.org/abs/2004.08826)| -| 非定常不可压流体 | [2D 非定常方腔流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_unsteady) | 机理驱动 | MLP | 无监督学习 | - | - | -| 非定常不可压流体 | [Re100 2D 圆柱绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cylinder2d_unsteady) | 机理驱动 | MLP | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar) | [Paper](https://arxiv.org/abs/2004.08826)| -| 非定常不可压流体 | [Re100~750 2D 圆柱绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cylinder2d_unsteady_transformer_physx) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957)| -| 可压缩流体 | [2D 空气激波](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/shock_wave) | 机理驱动 | PINN-WE | 无监督学习 | - | [Paper](https://arxiv.org/abs/2206.03864)| -| 飞行器设计 | [MeshGraphNets](https://aistudio.baidu.com/projectdetail/5322713) | 数据驱动 | GNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/184320) | [Paper](https://arxiv.org/abs/2010.03409)| -| 飞行器设计 | [火箭发动机真空羽流](https://aistudio.baidu.com/projectdetail/4486133) | 数据驱动 | CNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | - | -| 飞行器设计 | [Deep-Flow-Prediction](https://aistudio.baidu.com/projectdetail/5671596) | 数据驱动 | TurbNetG | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/197778) | [Paper](https://arxiv.org/abs/1810.08217) | -| 通用流场模拟 | [气动外形设计](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/amgnet) | 数据驱动 | AMGNet | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip) | [Paper](https://arxiv.org/abs/1810.08217) | -| 流固耦合 | [涡激振动](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/viv) | 机理驱动 | MLP | 半监督学习 | [Data](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fsi/VIV_Training_Neta100.mat) | [Paper](https://arxiv.org/abs/2206.03864)| -| 多相流 | [气液两相流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bubble) | 机理驱动 | BubbleNet | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat) | [Paper](https://pubs.aip.org/aip/adv/article/12/3/035153/2819394/Predicting-micro-bubble-dynamics-with-semi-physics)| -| 多相流 | [twophasePINN](https://aistudio.baidu.com/projectdetail/5379212) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.mlwa.2021.100029)| -| 流场高分辨率重构 | [2D 湍流流场重构](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/tempoGAN) | 数据驱动 | tempoGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://dl.acm.org/doi/10.1145/3197517.3201304)| -| 流场高分辨率重构 | [2D 湍流流场重构](https://aistudio.baidu.com/projectdetail/4493261?contributionType=1) | 数据驱动 | cycleGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://arxiv.org/abs/2007.15324)| -| 流场高分辨率重构 | [基于Voronoi嵌入辅助深度学习的稀疏传感器全局场重建](https://aistudio.baidu.com/projectdetail/5807904) | 数据驱动 | CNN | 监督学习 | [Data1](https://drive.google.com/drive/folders/1K7upSyHAIVtsyNAqe6P8TY1nS5WpxJ2c)
[Data2](https://drive.google.com/drive/folders/1pVW4epkeHkT2WHZB7Dym5IURcfOP4cXu)
[Data3](https://drive.google.com/drive/folders/1xIY_jIu-hNcRY-TTf4oYX1Xg4_fx8ZvD) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | -| 流场预测 | [Catheter](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/catheter/) | 数据驱动 | FNO | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/291940) | [Paper](https://www.science.org/doi/pdf/10.1126/sciadv.adj1741) | -| 求解器耦合 | [CFD-GCN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cfdgcn) | 数据驱动 | GCN | 监督学习 | [Data](https://aistudio.baidu.com/aistudio/datasetdetail/184778)
[Mesh](https://paddle-org.bj.bcebos.com/paddlescience/datasets/CFDGCN/meshes.tar) | [Paper](https://arxiv.org/abs/2007.04439)| -| 受力分析 | [1D 欧拉梁变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/euler_beam) | 机理驱动 | MLP | 无监督学习 | - | - | -| 受力分析 | [2D 平板变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/biharmonic2d) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/2108.07243) | -| 受力分析 | [3D 连接件变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bracket) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar) | [Tutorial](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html) | -| 受力分析 | [结构震动模拟](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/phylstm) | 机理驱动 | PhyLSTM | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/PhyLSTM/data_boucwen.mat) | [Paper](https://arxiv.org/abs/2002.10253) | -| 受力分析 | [2D 弹塑性结构](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/epnn) | 机理驱动 | EPNN | 无监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstate-16-plas.dat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstress-16-plas.dat) | [Paper](https://arxiv.org/abs/2204.12088) | -| 受力分析和逆问题 | [3D 汽车控制臂变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/control_arm) | 机理驱动 | MLP | 无监督学习 | - | - | -| 受力分析和逆问题 | [3D 心脏仿真](https://paddlescience-docs.readthedocs.io/zh/examples/heart.md) | 数理融合 | PINN | 监督学习 | - | - | -| 拓扑优化 | [2D 拓扑优化](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/topopt) | 数据驱动 | TopOptNN | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/topopt/top_dataset.h5) | [Paper](https://arxiv.org/pdf/1709.09578) | -| 热仿真 | [1D 换热器热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_exchanger) | 机理驱动 | PI-DeepONet | 无监督学习 | - | - | -| 热仿真 | [2D 热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_pinn) | 机理驱动 | PINN | 无监督学习 | - | [Paper](https://arxiv.org/abs/1711.10561)| -| 热仿真 | [2D 芯片热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/chip_heat) | 机理驱动 | PI-DeepONet | 无监督学习 | - | [Paper](https://doi.org/10.1063/5.0194245)| - -
-

材料科学(AI for Material)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 材料设计 | [散射板设计(反问题)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/hpinns) | 数理融合 | 数据驱动 | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_valid.mat) | [Paper](https://arxiv.org/pdf/2102.04626.pdf) | -| 晶体材料属性预测 | [CGCNN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cgcnn/) | 数据驱动 | GNN | 监督学习 | [MP](https://next-gen.materialsproject.org/) / [Perovskite](https://cmr.fysik.dtu.dk/cubic_perovskites/cubic_perovskites.html) / [C2DB](https://cmr.fysik.dtu.dk/c2db/c2db.html) / [test](https://paddle-org.bj.bcebos.com/paddlescience%2Fdatasets%2Fcgcnn%2Fcgcnn-test.zip) | [Paper](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301) | -| 分子生成 | [MoFlow](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/moflow/) | 数据驱动 | Flow Model | 监督学习 | [qm9/ zink250k](https://aistudio.baidu.com/datasetdetail/282687) | [Paper](https://arxiv.org/abs/2006.10137v1) | -| 分子属性预测 | [IFM](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ifm/) | 数据驱动 | MLP | 监督学习 | [tox21/sider/hiv/bace/bbbp](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ifm/#:~:text=molecules%20%E6%95%B0%E6%8D%AE%E9%9B%86-,dataset.zip,-%EF%BC%8C%E6%88%96Google%20Drive) | [Paper](https://openreview.net/pdf?id=NLFqlDeuzt) | - - -
-

地球科学(AI for Earth Science)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 天气预报 | [Extformer-MoE 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/extformer_moe.md) | 数据驱动 | FourCastNet | 监督学习 | [enso](https://tianchi.aliyun.com/dataset/98942) | - | -| 天气预报 | [FourCastNet 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/fourcastnet) | 数据驱动 | FourCastNet | 监督学习 | [ERA5](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | -| 天气预报 | [NowCastNet 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nowcastnet) | 数据驱动 | NowCastNet | 监督学习 | [MRMS](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://www.nature.com/articles/s41586-023-06184-4) | -| 天气预报 | [GraphCast 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/graphcast) | 数据驱动 | GraphCastNet | 监督学习 | - | [Paper](https://arxiv.org/abs/2212.12794) | -| 天气预报 | [GenCast 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/gencast) | 数据驱动 | Diffusion | 监督学习 | [Gencast](https://console.cloud.google.com/storage/browser/dm_graphcast) | [Paper](https://arxiv.org/abs/2312.15796) | -| 天气预报 | [FengWu 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/fengwu) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/pdf/2304.02948) | -| 天气预报 | [Pangu-Weather 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/pangu_weather) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/pdf/2211.02556) | -| 大气污染物 | [UNet 污染物扩散](https://aistudio.baidu.com/projectdetail/5663515?channel=0&channelType=0&sUid=438690&shared=1&ts=1698221963752) | 数据驱动 | UNet | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/198102) | - | -| 天气预报 | [DGMR 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/dgmr.md) | 数据驱动 | DGMR | 监督学习 | [UK dataset](https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km) | [Paper](https://arxiv.org/pdf/2104.00954.pdf) | -| 地震波形反演 | [VelocityGAN 地震波形反演](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/velocity_gan.md) | 数据驱动 | VelocityGAN | 监督学习 | [OpenFWI](https://openfwi-lanl.github.io/docs/data.html#vel) | [Paper](https://arxiv.org/abs/1809.10262v6) | - - -## 🕘最近更新 - -- 基于 PaddleScience 的 ADR 方程求解方法 [Physics-informed neural networks for advection–diffusion–Langmuir adsorption processes](https://doi.org/10.1063/5.0221924) 被 Physics of Fluids 2024 接受。 -- 添加 [IJCAI 2024: 任意三维几何外形车辆的风阻快速预测竞赛](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5),track A, B, C 的 paddle/pytorch 代码链接。 -- 添加 SPINN(基于 Helmholtz3D 方程求解) [helmholtz3d](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/spinn/)。 -- 添加 CVit(基于 Advection 方程和 N-S 方程求解) [CVit(Navier-Stokes)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ns_cvit/)、[CVit(Advection)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/adv_cvit/)。 -- 添加 PirateNet(基于 Allen-cahn 方程和 N-S 方程求解) [Allen-Cahn](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn/)、[LDC2D(Re3200)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_steady/)。 -- 基于 PaddleScience 的快速热仿真方法 [A fast general thermal simulation model based on MultiBranch Physics-Informed deep operator neural network](https://doi.org/10.1063/5.0194245) 被 Physics of Fluids 2024 接受。 -- 添加多目标优化算法 [Relobralo](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/api/loss/mtl/#ppsci.loss.mtl.Relobralo) 。 -- 添加气泡流求解案例([Bubble](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bubble))、机翼优化案例([DeepCFD](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deepcfd/))、热传导仿真案例([HeatPINN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_pinn))、非线性短临预报模型([Nowcasting(仅推理)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nowcastnet))、拓扑优化案例([TopOpt](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/topopt))、矩形平板线弹性方程求解案例([Biharmonic2D](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/biharmonic2d))。 -- 添加二维血管案例([LabelFree-DNN-Surrogate](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/labelfree_DNN_surrogate/#4))、空气激波案例([ShockWave](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/shock_wave/))、去噪网络模型([DUCNN](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/DU_CNN))、风电预测模型([Deep Spatial Temporal](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/Deep-Spatio-Temporal))、域分解模型([XPINNs](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/XPINNs))、积分方程求解案例([Volterra Equation](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/volterra_ide))、分数阶方程求解案例([Fractional Poisson 2D](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py))。 -- 针对串联方程和复杂方程场景,`Equation` 模块支持基于 [sympy](https://docs.sympy.org/dev/tutorials/intro-tutorial/intro.html) 的符号计算,并支持和 python 函数混合使用([#507](https://github.com/PaddlePaddle/PaddleScience/pull/507)、[#505](https://github.com/PaddlePaddle/PaddleScience/pull/505))。 -- `Geometry` 模块和 `InteriorConstraint`、`InitialConstraint` 支持计算 SDF 微分功能([#539](https://github.com/PaddlePaddle/PaddleScience/pull/539))。 -- 添加 **M**ulti**T**ask**L**earning(`ppsci.loss.mtl`) 多任务学习模块,针对多任务优化(如 PINN 方法)进一步提升性能,使用方式:[多任务学习指南](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/user_guide/#24)([#493](https://github.com/PaddlePaddle/PaddleScience/pull/505)、[#492](https://github.com/PaddlePaddle/PaddleScience/pull/505))。 - - - -## ✨特性 - -- 支持简单几何和复杂 STL 几何的采样与布尔运算。 -- 支持包括 Dirichlet、Neumann、Robin 以及自定义边界条件。 -- 支持物理机理驱动、数据驱动、数理融合三种问题求解方式。涵盖流体、结构、气象等领域 20+ 案例。 -- 支持结果可视化输出与日志结构化保存。 -- 完善的 type hints,用户使用和代码贡献全流程文档,经典案例 AI studio 快速体验,降低使用门槛,提高开发效率。 -- 支持基于 sympy 符号计算库的方程表示与联立方程组计算。 -- 更多特性正在开发中... - - -## 🚀安装使用 - -### 安装 PaddlePaddle - - -请根据您的运行环境,访问 [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html) 官网,安装 3.0 或 develop 版的 PaddlePaddle。 - -安装完毕之后,运行以下命令,验证 Paddle 是否安装成功。 - -``` shell -python -c "import paddle; paddle.utils.run_check()" -``` - -如果出现 `PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now.` 信息,说明您已成功安装,可以继续安装 PaddleScience。 - - -### 安装 PaddleScience - -1. 基础功能安装 - - **从以下四种安装方式中,任选一种均可安装。** - - - git 源码安装[**推荐**] - - 执行以下命令,从 github 上 clone PaddleScience 源代码,并以 editable 的方式安装 PaddleScience。 - - ``` shell - git clone -b develop https://github.com/PaddlePaddle/PaddleScience.git - # 若 github clone 速度比较慢,可以使用 gitee clone - # git clone -b develop https://gitee.com/paddlepaddle/PaddleScience.git - - cd PaddleScience - - # install paddlesci with editable mode - python -m pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple - ``` - - - - pip 安装 - - 执行以下命令以 pip 的方式安装 release / nightly build 版本的 PaddleScience。 - - ``` shell - # release - python -m pip install -U paddlesci -i https://pypi.tuna.tsinghua.edu.cn/simple - # nightly build - # python -m pip install https://paddle-qa.bj.bcebos.com/PaddleScience/whl/latest/dist/paddlesci-0.0.0-py3-none-any.whl -i https://pypi.tuna.tsinghua.edu.cn/simple - ``` - - - - conda 安装 - - 执行以下命令以 conda 的方式安装 release / nightly build 版本的 PaddleScience。 - - ``` shell - # nightly build - conda install paddlescience::paddlesci -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle -c conda-forge - # release - # conda install paddlescience::paddlescience=1.3.0 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle -c conda-forge - ``` - - - - 设置 PYTHONPATH 并手动安装 requirements - - 如果在您的环境中,上述两种方式都无法正常安装,则可以选择本方式,在终端内临时将环境变量 `PYTHONPATH` 设置为 PaddleScience 的**绝对路径**,如下所示。 - - ``` shell - cd PaddleScience - export PYTHONPATH=$PYTHONPATH:$PWD # for linux - set PYTHONPATH=%cd% # for windows - python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple # manually install requirements - ``` - - 注:上述方式的优点是步骤简单无需安装,缺点是当环境变量生效的终端被关闭后,需要重新执行上述命令设置 `PYTHONPATH` 才能再次使用 PaddleScience,较为繁琐。 - -2. 验证安装 - - ``` py - python -c "import ppsci; ppsci.utils.run_check()" - ``` - -3. 开始使用 - - ``` py - import ppsci - - # write your code here... - ``` - -如需基于复杂几何文件(`*.stl`, `*.mesh`, `*.obj`)文件进行训练、测试等流程,请参考完整安装流程:[**安装与使用**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/install_setup/) - -## ⚡️快速开始 - -请参考 [**快速开始**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/quickstart/) - -## 🎈生态工具 - - -除 PaddleScience 外,Paddle 框架同时支持了科学计算领域相关的研发套件和基础工具: - -| 工具 | 简介 | 支持情况 | -| -- | -- | -- | -| [DeepXDE](https://github.com/lululxvi/deepxde/tree/master?tab=readme-ov-file#deepxde) | 方程求解套件 | 全量支持 | -| [DeepMD-kit](https://docs.deepmodeling.com/projects/deepmd/en/latest/index.html) | 分子动力学套件 | 部分支持 | -| [Modulus-sym](https://github.com/PaddlePaddle/modulus-sym/tree/paddle?tab=readme-ov-file#modulus-symbolic-betapaddle-backend) | AI仿真套件 | 全量支持 | -| [NVIDIA/warp](https://github.com/NVIDIA/warp) | 基于 Python 的 GPU 高性能仿真和图形库 | 全量支持 | -| [tensorly](https://github.com/tensorly/tensorly) | 张量运算库 | 全量支持 | -| [Open3D](https://github.com/PFCCLab/Open3D.git) | 三维图形库 | 全量支持 | -| [neuraloperator](https://github.com/PFCCLab/neuraloperator) | 神经算子库 | 全量支持 | -| [paddle_scatter](https://github.com/PFCCLab/paddle_scatter) | 张量稀疏计算库 | 全量支持 | -| [paddle_harmonics](https://github.com/PFCCLab/paddle_harmonics.git) | 球面谐波变换库 | 全量支持 | -| [deepali](https://github.com/PFCCLab/deepali) | 图像、点云配准库 | 全量支持 | -| [DLPACK(v0.8)](https://dmlc.github.io/dlpack/latest/index.html) | 跨框架张量内存共享协议 | 全量支持 | - - - -## 💬支持与建议 - -如在使用过程中遇到问题或想提出开发建议,欢迎在 [**Discussion**](https://github.com/PaddlePaddle/PaddleScience/discussions/new?category=general) 中提出,或者在 [**Issue**](https://github.com/PaddlePaddle/PaddleScience/issues/new/choose) 页面新建 issue,会有专业的研发人员进行解答。 - - - -## 👫开源共建 - -PaddleScience 项目欢迎并依赖开发人员和开源社区中的用户,会不定期推出开源活动。 - -> 在开源活动中如需使用 PaddleScience 进行开发,可参考 [**PaddleScience 开发与贡献指南**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/development/) 以提升开发效率和质量。 - -- 🔥第七期黑客松 - - 面向全球开发者的深度学习领域编程活动,鼓励开发者了解与参与飞桨深度学习开源项目。活动进行中:[PaddlePaddle Hackathon 7th 开源贡献个人挑战赛](https://github.com/PaddlePaddle/Paddle/issues/67603) - -- 🎁快乐开源 - - 旨在鼓励更多的开发者参与到飞桨科学计算社区的开源建设中,帮助社区修复 bug 或贡献 feature,加入开源、共建飞桨。了解编程基本知识的入门用户即可参与,活动进行中: - [PaddleScience 快乐开源活动表单](https://github.com/PaddlePaddle/PaddleScience/issues/379) - - - -## 🎯共创计划 - -PaddleScience 作为一个开源项目,欢迎来各行各业的伙伴携手共建基于飞桨的 AI for Science 领域顶尖开源项目, 打造活跃的前瞻性的 AI for Science 开源社区,建立产学研闭环,推动科研创新与产业赋能。点击了解 [飞桨AI for Science共创计划](https://aistudio.baidu.com/activitydetail/1502019365)。 - - - -## ❤️致谢 - -- PaddleScience 的部分模块和案例设计受 [NVIDIA-Modulus](https://github.com/NVIDIA/modulus/tree/main)、[DeepXDE](https://github.com/lululxvi/deepxde/tree/master)、[PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP/tree/develop)、[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/develop) 等优秀开源套件的启发。 - -- PaddleScience 的部分案例和代码由以下优秀社区开发者贡献,(完整的贡献者请参考: [Contributors](https://github.com/PaddlePaddle/PaddleScience/graphs/contributors)): - [Asthestarsfalll](https://github.com/Asthestarsfalll), - [co63oc](https://github.com/co63oc), - [MayYouBeProsperous](https://github.com/MayYouBeProsperous), - [AndPuQing](https://github.com/AndPuQing), - [lknt](https://github.com/lknt), - [mrcangye](https://github.com/mrcangye), - [yangguohao](https://github.com/yangguohao), - [ooooo-create](https://github.com/ooooo-create), - [megemini](https://github.com/megemini), - [DUCH714](https://github.com/DUCH714), - [zlynna](https://github.com/zlynna), - [jjyaoao](https://github.com/jjyaoao), - [jiamingkong](https://github.com/jiamingkong), - [Liyulingyue](https://github.com/Liyulingyue), - [DrRyanHuang](https://github.com/DrRyanHuang), - [zbt78](https://github.com/zbt78), - [Gxinhu](https://github.com/Gxinhu), - [XYM](https://github.com/XYM), - [xusuyong](https://github.com/xusuyong), - [DrownFish19](https://github.com/DrownFish19), - [NKNaN](https://github.com/NKNaN), - [ruoyunbai](https://github.com/ruoyunbai), - [sanbuphy](https://github.com/sanbuphy), - [ccsuzzh](https://github.com/ccsuzzh), - [enkilee](https://github.com/enkilee), - [GreatV](https://github.com/GreatV) - ... - -## 🤝合作单位 - -![cooperation](./docs/images/overview/cooperation.png) - - -## 📜开源协议 - -[Apache License 2.0](https://github.com/PaddlePaddle/PaddleScience/blob/develop/LICENSE) - +# PaddleScience + + +> *Developed with [PaddlePaddle](https://www.paddlepaddle.org.cn/)* + +[![Version](https://img.shields.io/pypi/v/paddlesci)](https://pypi.org/project/paddlesci/) +[![Conda](https://anaconda.org/paddlescience/paddlescience/badges/version.svg)](https://anaconda.org/PaddleScience/paddlescience) +[![Python Version](https://img.shields.io/pypi/pyversions/paddlesci)](https://pypi.org/project/paddlesci/) +[![Doc](https://img.shields.io/readthedocs/paddlescience-docs/latest)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/) +[![Code Style](https://img.shields.io/badge/code_style-black-black)](https://github.com/psf/black) +[![Hydra](https://img.shields.io/badge/config-hydra-89b8cd)](https://hydra.cc/) +[![License](https://img.shields.io/github/license/PaddlePaddle/PaddleScience)](https://github.com/PaddlePaddle/PaddleScience/blob/develop/LICENSE) +[![Update](https://anaconda.org/paddlescience/paddlescience/badges/latest_release_date.svg)](https://anaconda.org/PaddleScience/paddlescience) + + +[📘 使用文档](https://paddlescience-docs.readthedocs.io/zh-cn/latest/) | +[🛠️ 安装使用](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/install_setup/) | +[📘 快速开始](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/quickstart/) | +[👀 案例列表](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn/) | +[🆕 最近更新](https://paddlescience-docs.readthedocs.io/zh-cn/latest/#_4) | +[🤔 问题反馈](https://github.com/PaddlePaddle/PaddleScience/issues/new/choose) + +🔥 [飞桨AI for Science前沿讲座系列课程 & 代码入门与实操课程进行中 ](https://mp.weixin.qq.com/s/n-vGnGM9di_3IByTC56hUw),清华、北大、中科院等高校机构知名学者分享前沿研究成果,火热报名中。 + +🔥 [开放原子第二届开源大赛:飞桨科学计算工具组件开发大赛](https://competition.atomgit.com/competitionInfo?id=805ad94637707d062f24e54265d85731),总奖金25万人民币,火热报名中 + +🔥 [PaddlePaddle Hackathon 7th 开源贡献个人挑战赛](https://github.com/PaddlePaddle/Paddle/issues/67603) + +🔥 [CIKM 2024: AI辅助的先进空气动力学-优化汽车设计以实现最佳性能](https://competition.atomgit.com/competitionInfo?id=cda4e961b0c25858ca0fd2a4bdf87520),已进入评奖阶段。 + +🔥 [IJCAI 2024: 任意三维几何外形车辆的风阻快速预测竞赛](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5),track A, B, C 代码: [paddle实现](./jointContribution/IJCAI_2024/README.md) | [pytorch实现](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5)(点击**排行榜**可查看各个赛道前10名的代码) + + +## 👀简介 + +PaddleScience 是一个基于深度学习框架 PaddlePaddle 开发的科学计算套件,利用深度神经网络的学习能力和 PaddlePaddle 框架的自动(高阶)微分机制,解决物理、化学、气象等领域的问题。支持物理机理驱动、数据驱动、数理融合三种求解方式,并提供了基础 API 和详尽文档供用户使用与二次开发。 + + +## 📝案例列表 + +

数学(AI for Math)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 亥姆霍兹方程 | [SPINN(Helmholtz3D)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/spinn) | 机理驱动 | SPINN | 无监督学习 | - | [Paper](https://arxiv.org/pdf/2306.15969) | +| 相场方程 | [Allen-Cahn](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat) | [Paper](https://arxiv.org/pdf/2402.00326) | +| 微分方程 | [拉普拉斯方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/laplace2d) | 机理驱动 | MLP | 无监督学习 | - | - | +| 微分方程 | [伯格斯方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deephpms) | 机理驱动 | MLP | 无监督学习 | [Data](https://github.com/maziarraissi/DeepHPMs/tree/master/Data) | [Paper](https://arxiv.org/pdf/1801.06637.pdf) | +| 微分方程 | [非线性偏微分方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/pirbn) | 机理驱动 | PIRBN | 无监督学习 | - | [Paper](https://arxiv.org/abs/2304.06234) | +| 微分方程 | [洛伦兹方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/lorenz) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | +| 微分方程 | [若斯叻方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/rossler) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | +| 算子学习 | [DeepONet](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deeponet) | 数据驱动 | MLP | 监督学习 | [Data](https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html) | [Paper](https://export.arxiv.org/pdf/1910.03193.pdf) | +| 微分方程 | [梯度增强的物理知识融合 PDE 求解](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/gpinn/poisson_1d.py) | 机理驱动 | gPINN | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.cma.2022.114823) | +| 积分方程 | [沃尔泰拉积分方程](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/volterra_ide) | 机理驱动 | MLP | 无监督学习 | - | [Project](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) | +| 微分方程 | [分数阶微分方程](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py) | 机理驱动 | MLP | 无监督学习 | - | - | +| 光孤子 | [Optical soliton](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nlsmb) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| +| 光纤怪波 | [Optical rogue wave](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nlsmb) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| +| 域分解 | [XPINN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/xpinns) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.4208/cicp.OA-2020-0164)| +| 布鲁塞尔扩散系统 | [3D-Brusselator](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/brusselator3d) | 数据驱动 | LNO | 监督学习 | - | [Paper](https://arxiv.org/abs/2303.10528)| +| 符号回归 | [Transformer4SR](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/transformer4sr.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2312.04070)| + +
+

技术科学(AI for Technology)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 一维线性对流问题 | [1D 线性对流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/adv_cvit/) | 数据驱动 | ViT | 监督学习 | [Data](https://github.com/Zhengyu-Huang/Operator-Learning/tree/main/data) | [Paper](https://arxiv.org/abs/2405.13998) | +| 非定常不可压流体 | [2D 方腔浮力驱动流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ns_cvit/) | 数据驱动 | ViT | 监督学习 | [Data](https://huggingface.co/datasets/pdearena/NavierStokes-2D) | [Paper](https://arxiv.org/abs/2405.13998) | +| 定常不可压流体 | [Re3200 2D 定常方腔流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_steady) | 机理驱动 | MLP | 无监督学习 | - | | +| 定常不可压流体 | [2D 达西流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/darcy2d) | 机理驱动 | MLP | 无监督学习 | - | | +| 定常不可压流体 | [2D 管道流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/labelfree_DNN_surrogate) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/1906.02382) | +| 定常不可压流体 | [3D 颅内动脉瘤](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/aneurysm) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar) | [Project](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html)| +| 定常不可压流体 | [任意 2D 几何体绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deepcfd) | 数据驱动 | DeepCFD | 监督学习 | - | [Paper](https://arxiv.org/abs/2004.08826)| +| 非定常不可压流体 | [2D 非定常方腔流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_unsteady) | 机理驱动 | MLP | 无监督学习 | - | - | +| 非定常不可压流体 | [Re100 2D 圆柱绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cylinder2d_unsteady) | 机理驱动 | MLP | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar) | [Paper](https://arxiv.org/abs/2004.08826)| +| 非定常不可压流体 | [Re100~750 2D 圆柱绕流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cylinder2d_unsteady_transformer_physx) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957)| +| 可压缩流体 | [2D 空气激波](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/shock_wave) | 机理驱动 | PINN-WE | 无监督学习 | - | [Paper](https://arxiv.org/abs/2206.03864)| +| 飞行器设计 | [MeshGraphNets](https://aistudio.baidu.com/projectdetail/5322713) | 数据驱动 | GNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/184320) | [Paper](https://arxiv.org/abs/2010.03409)| +| 飞行器设计 | [火箭发动机真空羽流](https://aistudio.baidu.com/projectdetail/4486133) | 数据驱动 | CNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | - | +| 飞行器设计 | [Deep-Flow-Prediction](https://aistudio.baidu.com/projectdetail/5671596) | 数据驱动 | TurbNetG | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/197778) | [Paper](https://arxiv.org/abs/1810.08217) | +| 通用流场模拟 | [气动外形设计](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/amgnet) | 数据驱动 | AMGNet | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip) | [Paper](https://arxiv.org/abs/1810.08217) | +| 流固耦合 | [涡激振动](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/viv) | 机理驱动 | MLP | 半监督学习 | [Data](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fsi/VIV_Training_Neta100.mat) | [Paper](https://arxiv.org/abs/2206.03864)| +| 多相流 | [气液两相流](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bubble) | 机理驱动 | BubbleNet | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat) | [Paper](https://pubs.aip.org/aip/adv/article/12/3/035153/2819394/Predicting-micro-bubble-dynamics-with-semi-physics)| +| 多相流 | [twophasePINN](https://aistudio.baidu.com/projectdetail/5379212) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.mlwa.2021.100029)| +| 流场高分辨率重构 | [2D 湍流流场重构](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/tempoGAN) | 数据驱动 | tempoGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://dl.acm.org/doi/10.1145/3197517.3201304)| +| 流场高分辨率重构 | [2D 湍流流场重构](https://aistudio.baidu.com/projectdetail/4493261?contributionType=1) | 数据驱动 | cycleGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://arxiv.org/abs/2007.15324)| +| 流场高分辨率重构 | [基于Voronoi嵌入辅助深度学习的稀疏传感器全局场重建](https://aistudio.baidu.com/projectdetail/5807904) | 数据驱动 | CNN | 监督学习 | [Data1](https://drive.google.com/drive/folders/1K7upSyHAIVtsyNAqe6P8TY1nS5WpxJ2c)
[Data2](https://drive.google.com/drive/folders/1pVW4epkeHkT2WHZB7Dym5IURcfOP4cXu)
[Data3](https://drive.google.com/drive/folders/1xIY_jIu-hNcRY-TTf4oYX1Xg4_fx8ZvD) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | +| 流场预测 | [Catheter](https://aistudio.baidu.com/projectdetail/5379212) | 数据驱动 | FNO | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/291940) | [Paper](https://www.science.org/doi/pdf/10.1126/sciadv.adj1741) | +| 求解器耦合 | [CFD-GCN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/cfdgcn) | 数据驱动 | GCN | 监督学习 | [Data](https://aistudio.baidu.com/aistudio/datasetdetail/184778)
[Mesh](https://paddle-org.bj.bcebos.com/paddlescience/datasets/CFDGCN/meshes.tar) | [Paper](https://arxiv.org/abs/2007.04439)| +| 受力分析 | [1D 欧拉梁变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/euler_beam) | 机理驱动 | MLP | 无监督学习 | - | - | +| 受力分析 | [2D 平板变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/biharmonic2d) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/2108.07243) | +| 受力分析 | [3D 连接件变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bracket) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar) | [Tutorial](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html) | +| 受力分析 | [结构震动模拟](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/phylstm) | 机理驱动 | PhyLSTM | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/PhyLSTM/data_boucwen.mat) | [Paper](https://arxiv.org/abs/2002.10253) | +| 受力分析 | [2D 弹塑性结构](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/epnn) | 机理驱动 | EPNN | 无监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstate-16-plas.dat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstress-16-plas.dat) | [Paper](https://arxiv.org/abs/2204.12088) | +| 受力分析和逆问题 | [3D 汽车控制臂变形](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/control_arm) | 机理驱动 | MLP | 无监督学习 | - | - | +| 受力分析和逆问题 | [3D 心脏仿真](https://paddlescience-docs.readthedocs.io/zh/examples/heart.md) | 数理融合 | PINN | 监督学习 | - | - | +| 拓扑优化 | [2D 拓扑优化](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/topopt) | 数据驱动 | TopOptNN | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/topopt/top_dataset.h5) | [Paper](https://arxiv.org/pdf/1709.09578) | +| 热仿真 | [1D 换热器热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_exchanger) | 机理驱动 | PI-DeepONet | 无监督学习 | - | - | +| 热仿真 | [2D 热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_pinn) | 机理驱动 | PINN | 无监督学习 | - | [Paper](https://arxiv.org/abs/1711.10561)| +| 热仿真 | [2D 芯片热仿真](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/chip_heat) | 机理驱动 | PI-DeepONet | 无监督学习 | - | [Paper](https://doi.org/10.1063/5.0194245)| + +
+

材料科学(AI for Material)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 材料设计 | [散射板设计(反问题)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/hpinns) | 数理融合 | 数据驱动 | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_valid.mat) | [Paper](https://arxiv.org/pdf/2102.04626.pdf) | + +
+

地球科学(AI for Earth Science)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 天气预报 | [Extformer-MoE 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/extformer_moe.md) | 数据驱动 | FourCastNet | 监督学习 | [enso](https://tianchi.aliyun.com/dataset/98942) | - | +| 天气预报 | [FourCastNet 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/fourcastnet) | 数据驱动 | FourCastNet | 监督学习 | [ERA5](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | +| 天气预报 | [NowCastNet 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nowcastnet) | 数据驱动 | NowCastNet | 监督学习 | [MRMS](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://www.nature.com/articles/s41586-023-06184-4) | +| 天气预报 | [GraphCast 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/graphcast) | 数据驱动 | GraphCastNet | 监督学习 | - | [Paper](https://arxiv.org/abs/2212.12794) | +| 大气污染物 | [UNet 污染物扩散](https://aistudio.baidu.com/projectdetail/5663515?channel=0&channelType=0&sUid=438690&shared=1&ts=1698221963752) | 数据驱动 | UNet | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/198102) | - | +| 天气预报 | [DGMR 气象预报](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/dgmr.md) | 数据驱动 | DGMR | 监督学习 | [UK dataset](https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km) | [Paper](https://arxiv.org/pdf/2104.00954.pdf) | +| 地震波形反演 | [VelocityGAN 地震波形反演](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/velocity_gan.md) | 数据驱动 | VelocityGAN | 监督学习 | [OpenFWI](https://openfwi-lanl.github.io/docs/data.html#vel) | [Paper](https://arxiv.org/abs/1809.10262v6) | + + +## 🕘最近更新 + +- 基于 PaddleScience 的 ADR 方程求解方法 [Physics-informed neural networks for advection–diffusion–Langmuir adsorption processes](https://doi.org/10.1063/5.0221924) 被 Physics of Fluids 2024 接受。 +- 添加 [IJCAI 2024: 任意三维几何外形车辆的风阻快速预测竞赛](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5),track A, B, C 的 paddle/pytorch 代码链接。 +- 添加 SPINN(基于 Helmholtz3D 方程求解) [helmholtz3d](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/spinn/)。 +- 添加 CVit(基于 Advection 方程和 N-S 方程求解) [CVit(Navier-Stokes)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ns_cvit/)、[CVit(Advection)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/adv_cvit/)。 +- 添加 PirateNet(基于 Allen-cahn 方程和 N-S 方程求解) [Allen-Cahn](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/allen_cahn/)、[LDC2D(Re3200)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/ldc2d_steady/)。 +- 基于 PaddleScience 的快速热仿真方法 [A fast general thermal simulation model based on MultiBranch Physics-Informed deep operator neural network](https://doi.org/10.1063/5.0194245) 被 Physics of Fluids 2024 接受。 +- 添加多目标优化算法 [Relobralo](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/api/loss/mtl/#ppsci.loss.mtl.Relobralo) 。 +- 添加气泡流求解案例([Bubble](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/bubble))、机翼优化案例([DeepCFD](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/deepcfd/))、热传导仿真案例([HeatPINN](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/heat_pinn))、非线性短临预报模型([Nowcasting(仅推理)](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/nowcastnet))、拓扑优化案例([TopOpt](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/topopt))、矩形平板线弹性方程求解案例([Biharmonic2D](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/biharmonic2d))。 +- 添加二维血管案例([LabelFree-DNN-Surrogate](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/labelfree_DNN_surrogate/#4))、空气激波案例([ShockWave](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/shock_wave/))、去噪网络模型([DUCNN](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/DU_CNN))、风电预测模型([Deep Spatial Temporal](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/Deep-Spatio-Temporal))、域分解模型([XPINNs](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/XPINNs))、积分方程求解案例([Volterra Equation](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/examples/volterra_ide))、分数阶方程求解案例([Fractional Poisson 2D](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py))。 +- 针对串联方程和复杂方程场景,`Equation` 模块支持基于 [sympy](https://docs.sympy.org/dev/tutorials/intro-tutorial/intro.html) 的符号计算,并支持和 python 函数混合使用([#507](https://github.com/PaddlePaddle/PaddleScience/pull/507)、[#505](https://github.com/PaddlePaddle/PaddleScience/pull/505))。 +- `Geometry` 模块和 `InteriorConstraint`、`InitialConstraint` 支持计算 SDF 微分功能([#539](https://github.com/PaddlePaddle/PaddleScience/pull/539))。 +- 添加 **M**ulti**T**ask**L**earning(`ppsci.loss.mtl`) 多任务学习模块,针对多任务优化(如 PINN 方法)进一步提升性能,使用方式:[多任务学习指南](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/user_guide/#24)([#493](https://github.com/PaddlePaddle/PaddleScience/pull/505)、[#492](https://github.com/PaddlePaddle/PaddleScience/pull/505))。 + + + +## ✨特性 + +- 支持简单几何和复杂 STL 几何的采样与布尔运算。 +- 支持包括 Dirichlet、Neumann、Robin 以及自定义边界条件。 +- 支持物理机理驱动、数据驱动、数理融合三种问题求解方式。涵盖流体、结构、气象等领域 20+ 案例。 +- 支持结果可视化输出与日志结构化保存。 +- 完善的 type hints,用户使用和代码贡献全流程文档,经典案例 AI studio 快速体验,降低使用门槛,提高开发效率。 +- 支持基于 sympy 符号计算库的方程表示与联立方程组计算。 +- 更多特性正在开发中... + + +## 🚀安装使用 + +### 安装 PaddlePaddle + + +请在 [PaddlePaddle](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/develop/install/pip/linux-pip.html) 官网按照您的运行环境,安装 3.0-beta 或 develop 版的 PaddlePaddle。 + +安装完毕之后,运行以下命令,验证 Paddle 是否安装成功。 + +``` shell +python -c "import paddle; paddle.utils.run_check()" +``` + +如果出现 `PaddlePaddle is installed successfully! Let's start deep learning with PaddlePaddle now.` 信息,说明您已成功安装,可以继续安装 PaddleScience。 + + +### 安装 PaddleScience + +1. 基础功能安装 + + **从以下四种安装方式中,任选一种均可安装。** + + - git 源码安装[**推荐**] + + 执行以下命令,从 github 上 clone PaddleScience 源代码,并以 editable 的方式安装 PaddleScience。 + + ``` shell + git clone -b develop https://github.com/PaddlePaddle/PaddleScience.git + # 若 github clone 速度比较慢,可以使用 gitee clone + # git clone -b develop https://gitee.com/paddlepaddle/PaddleScience.git + + cd PaddleScience + + # install paddlesci with editable mode + python -m pip install -e . -i https://pypi.tuna.tsinghua.edu.cn/simple + ``` + + + - pip 安装 + + 执行以下命令以 pip 的方式安装 release / nightly build 版本的 PaddleScience。 + + ``` shell + # release + python -m pip install -U paddlesci -i https://pypi.tuna.tsinghua.edu.cn/simple + # nightly build + # python -m pip install https://paddle-qa.bj.bcebos.com/PaddleScience/whl/latest/dist/paddlesci-0.0.0-py3-none-any.whl -i https://pypi.tuna.tsinghua.edu.cn/simple + ``` + + + - conda 安装 + + 执行以下命令以 conda 的方式安装 release / nightly build 版本的 PaddleScience。 + + ``` shell + # nightly build + conda install paddlescience::paddlesci -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle -c conda-forge + # release + # conda install paddlescience::paddlescience=1.3.0 -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/Paddle -c conda-forge + ``` + + + - 设置 PYTHONPATH 并手动安装 requirements + + 如果在您的环境中,上述两种方式都无法正常安装,则可以选择本方式,在终端内将环境变量 `PYTHONPATH` 临时设置为 `PaddleScience` 的**绝对路径**,如下所示。 + + ``` shell + cd PaddleScience + export PYTHONPATH=$PYTHONPATH:$PWD # for linux + set PYTHONPATH=%cd% # for windows + python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple # manually install requirements + ``` + + 注:上述方式的优点是步骤简单无需安装,缺点是当环境变量生效的终端被关闭后,需要重新执行上述命令设置 `PYTHONPATH` 才能再次使用 PaddleScience,较为繁琐。 + +2. 验证安装 + + ``` py + python -c "import ppsci; ppsci.utils.run_check()" + ``` + +3. 开始使用 + + ``` py + import ppsci + + # write your code here... + ``` + +如需基于复杂几何文件(`*.stl`, `*.mesh`, `*.obj`)文件进行训练、测试等流程,请参考完整安装流程:[**安装与使用**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/install_setup/) + +## ⚡️快速开始 + +请参考 [**快速开始**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/quickstart/) + +## 🎈生态工具 + + +除 PaddleScience 外,Paddle 框架同时支持了科学计算领域相关的研发套件和基础工具: + +| 工具 | 简介 | 支持情况 | +| -- | -- | -- | +| [Modulus-sym](https://github.com/PaddlePaddle/modulus-sym/tree/paddle?tab=readme-ov-file#modulus-symbolic-betapaddle-backend) | AI仿真套件 | 全量支持 | +| [DeepXDE](https://github.com/lululxvi/deepxde/tree/master?tab=readme-ov-file#deepxde) | 方程求解套件 | 全量支持 | +| [DeepMD-kit](https://github.com/deepmodeling/deepmd-kit/tree/paddle#deepmd-kitpaddlepaddle-backend) | 分子动力学套件 | 部分支持 | +| [TensorLy](https://tensorly.org/dev/index.html) | 张量计算库 | 全量支持 | +| [NVIDIA/warp](https://github.com/NVIDIA/warp) | 高性能仿真/图形库 | 全量支持 | +| [DLPACK(v0.8)](https://dmlc.github.io/dlpack/latest/index.html) | 跨框架张量内存共享协议 | 全量支持 | + + + +## 💬支持与建议 + +如使用过程中遇到问题或想提出开发建议,欢迎在 [**Discussion**](https://github.com/PaddlePaddle/PaddleScience/discussions/new?category=general) 提出建议,或者在 [**Issue**](https://github.com/PaddlePaddle/PaddleScience/issues/new/choose) 页面新建 issue,会有专业的研发人员进行解答。 + + + +## 👫开源共建 + +PaddleScience 项目欢迎并依赖开发人员和开源社区中的用户,会不定期推出开源活动。 + +> 在开源活动中如需使用 PaddleScience 进行开发,可参考 [**PaddleScience 开发与贡献指南**](https://paddlescience-docs.readthedocs.io/zh-cn/latest/zh/development/) 以提升开发效率和质量。 + +- 🔥第七期黑客松 + + 面向全球开发者的深度学习领域编程活动,鼓励开发者了解与参与飞桨深度学习开源项目。活动进行中:[PaddlePaddle Hackathon 7th 开源贡献个人挑战赛](https://github.com/PaddlePaddle/Paddle/issues/67603) + +- 🎁快乐开源 + + 旨在鼓励更多的开发者参与到飞桨科学计算社区的开源建设中,帮助社区修复 bug 或贡献 feature,加入开源、共建飞桨。了解编程基本知识的入门用户即可参与,活动进行中: + [PaddleScience 快乐开源活动表单](https://github.com/PaddlePaddle/PaddleScience/issues/379) + + + +## 🎯共创计划 + +PaddleScience 作为一个开源项目,欢迎来各行各业的伙伴携手共建基于飞桨的 AI for Science 领域顶尖开源项目, 打造活跃的前瞻性的 AI for Science 开源社区,建立产学研闭环,推动科研创新与产业赋能。点击了解 [飞桨AI for Science共创计划](https://www.paddlepaddle.org.cn/science)。 + + + +## ❤️致谢 + +- PaddleScience 的部分模块和案例设计受 [NVIDIA-Modulus](https://github.com/NVIDIA/modulus/tree/main)、[DeepXDE](https://github.com/lululxvi/deepxde/tree/master)、[PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP/tree/develop)、[PaddleClas](https://github.com/PaddlePaddle/PaddleClas/tree/develop) 等优秀开源套件的启发。 + +- PaddleScience 的部分案例和代码由以下优秀社区开发者贡献,(完整的贡献者请参考: [Contributors](https://github.com/PaddlePaddle/PaddleScience/graphs/contributors)): + [Asthestarsfalll](https://github.com/Asthestarsfalll), + [co63oc](https://github.com/co63oc), + [MayYouBeProsperous](https://github.com/MayYouBeProsperous), + [AndPuQing](https://github.com/AndPuQing), + [lknt](https://github.com/lknt), + [mrcangye](https://github.com/mrcangye), + [yangguohao](https://github.com/yangguohao), + [ooooo-create](https://github.com/ooooo-create), + [megemini](https://github.com/megemini), + [DUCH714](https://github.com/DUCH714), + [zlynna](https://github.com/zlynna), + [jjyaoao](https://github.com/jjyaoao), + [jiamingkong](https://github.com/jiamingkong), + [Liyulingyue](https://github.com/Liyulingyue), + [DrRyanHuang](https://github.com/DrRyanHuang), + [zbt78](https://github.com/zbt78), + [Gxinhu](https://github.com/Gxinhu), + [XYM](https://github.com/XYM), + [xusuyong](https://github.com/xusuyong), + [DrownFish19](https://github.com/DrownFish19), + [NKNaN](https://github.com/NKNaN), + [ruoyunbai](https://github.com/ruoyunbai), + [sanbuphy](https://github.com/sanbuphy), + [ccsuzzh](https://github.com/ccsuzzh), + [enkilee](https://github.com/enkilee), + [GreatV](https://github.com/GreatV) + ... + +## 🤝合作单位 + +![cooperation](./docs/images/overview/cooperation.png) + + +## 📜开源协议 + +[Apache License 2.0](https://github.com/PaddlePaddle/PaddleScience/blob/develop/LICENSE) + diff --git a/deploy/__init__.py b/deploy/__init__.py index 2fb6e61820..0e142ad1be 100644 --- a/deploy/__init__.py +++ b/deploy/__init__.py @@ -1,17 +1,17 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -deploy module is designed for inference and deployment. -""" +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +deploy module is designed for inference and deployment. +""" diff --git a/deploy/python_infer/__init__.py b/deploy/python_infer/__init__.py index 47ea6b79ce..c2764e01f0 100644 --- a/deploy/python_infer/__init__.py +++ b/deploy/python_infer/__init__.py @@ -1,30 +1,30 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from deploy.python_infer.base import Predictor -from deploy.python_infer.pinn_predictor import PINNPredictor - - -# alias as PINNPredictor can be used in most cases -class GeneralPredictor(PINNPredictor): - """Use PINNPredictor as GeneralPredictor.""" - - pass - - -__all__ = [ - "Predictor", - "PINNPredictor", - "GeneralPredictor", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from deploy.python_infer.base import Predictor +from deploy.python_infer.pinn_predictor import PINNPredictor + + +# alias as PINNPredictor can be used in most cases +class GeneralPredictor(PINNPredictor): + """Use PINNPredictor as GeneralPredictor.""" + + pass + + +__all__ = [ + "Predictor", + "PINNPredictor", + "GeneralPredictor", +] diff --git a/deploy/python_infer/base.py b/deploy/python_infer/base.py index cac0ac72ca..08e0a35ebd 100644 --- a/deploy/python_infer/base.py +++ b/deploy/python_infer/base.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -252,3 +253,259 @@ def _compatibility_check(self): "Please install onnxruntime-gpu with `pip install onnxruntime-gpu`" " when device is set to 'gpu'\n" ) +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +import platform +from os import path as osp +from typing import TYPE_CHECKING +from typing import Optional +from typing import Tuple + +import paddle +from paddle import inference as paddle_inference +from typing_extensions import Literal + +from ppsci.utils import logger + +if TYPE_CHECKING: + import onnxruntime + + +class Predictor: + """ + Initializes the inference engine with the given parameters. + + Args: + pdmodel_path (Optional[str]): Path to the PaddlePaddle model file. Defaults to None. + pdiparams_path (Optional[str]): Path to the PaddlePaddle model parameters file. Defaults to None. + device (Literal["gpu", "cpu", "npu", "xpu"], optional): Device to use for inference. Defaults to "cpu". + engine (Literal["native", "tensorrt", "onnx", "mkldnn"], optional): Inference engine to use. Defaults to "native". + precision (Literal["fp32", "fp16", "int8"], optional): Precision to use for inference. Defaults to "fp32". + onnx_path (Optional[str], optional): Path to the ONNX model file. Defaults to None. + ir_optim (bool, optional): Whether to use IR optimization. Defaults to True. + min_subgraph_size (int, optional): Minimum subgraph size for IR optimization. Defaults to 15. + gpu_mem (int, optional): Initial size of GPU memory pool(MB). Defaults to 500(MB). + gpu_id (int, optional): GPU ID to use. Defaults to 0. + num_cpu_threads (int, optional): Number of CPU threads to use. Defaults to 1. + """ + + def __init__( + self, + pdmodel_path: Optional[str] = None, + pdiparams_path: Optional[str] = None, + *, + device: Literal["gpu", "cpu", "npu", "xpu"] = "cpu", + engine: Literal["native", "tensorrt", "onnx", "mkldnn"] = "native", + precision: Literal["fp32", "fp16", "int8"] = "fp32", + onnx_path: Optional[str] = None, + ir_optim: bool = True, + min_subgraph_size: int = 15, + gpu_mem: int = 500, + gpu_id: int = 0, + max_batch_size: int = 10, + num_cpu_threads: int = 10, + ): + self.pdmodel_path = pdmodel_path + self.pdiparams_path = pdiparams_path + + self._check_device(device) + self.device = device + self._check_engine(engine) + self.engine = engine + self._check_precision(precision) + self.precision = precision + self._compatibility_check() + + self.onnx_path = onnx_path + self.ir_optim = ir_optim + self.min_subgraph_size = min_subgraph_size + self.gpu_mem = gpu_mem + self.gpu_id = gpu_id + self.max_batch_size = max_batch_size + self.num_cpu_threads = num_cpu_threads + + if self.engine == "onnx": + self.predictor, self.config = self._create_onnx_predictor() + else: + self.predictor, self.config = self._create_paddle_predictor() + + logger.message( + f"Inference with engine: {self.engine}, precision: {self.precision}, " + f"device: {self.device}." + ) + + def predict(self, input_dict): + raise NotImplementedError( + f"Method 'predict' is should be implemented in {self.__class__.__name__} class." + ) + + def _create_paddle_predictor( + self, + ) -> Tuple[paddle_inference.Predictor, paddle_inference.Config]: + if paddle.framework.use_pir_api(): + # NOTE: Using 'json' as suffix instead of 'pdmodel' in PIR mode + self.pdmodel_path = self.pdmodel_path.replace(".pdmodel", ".json", 1) + + if not osp.exists(self.pdmodel_path): + raise FileNotFoundError( + f"Given 'pdmodel_path': {self.pdmodel_path} does not exist. " + "Please check if cfg.INFER.pdmodel_path is correct." + ) + if not osp.exists(self.pdiparams_path): + raise FileNotFoundError( + f"Given 'pdiparams_path': {self.pdiparams_path} does not exist. " + "Please check if cfg.INFER.pdiparams_path is correct." + ) + + config = paddle_inference.Config(self.pdmodel_path, self.pdiparams_path) + if self.device == "gpu": + config.enable_use_gpu(self.gpu_mem, self.gpu_id) + if self.engine == "tensorrt": + if self.precision == "fp16": + precision = paddle_inference.Config.Precision.Half + elif self.precision == "int8": + precision = paddle_inference.Config.Precision.Int8 + else: + precision = paddle_inference.Config.Precision.Float32 + config.enable_tensorrt_engine( + workspace_size=1 << 30, + precision_mode=precision, + max_batch_size=self.max_batch_size, + min_subgraph_size=self.min_subgraph_size, + use_calib_mode=False, + ) + # collect shape + pdmodel_dir = osp.dirname(self.pdmodel_path) + trt_shape_path = osp.join(pdmodel_dir, "trt_dynamic_shape.txt") + + if not osp.exists(trt_shape_path): + config.collect_shape_range_info(trt_shape_path) + logger.message( + f"Save collected dynamic shape info to: {trt_shape_path}" + ) + try: + config.enable_tuned_tensorrt_dynamic_shape(trt_shape_path, True) + except Exception as e: + logger.warning(e) + logger.warning( + "TRT dynamic shape is disabled for your paddlepaddle < 2.3.0" + ) + + elif self.device == "npu": + config.enable_custom_device("npu") + elif self.device == "xpu": + config.enable_xpu(10 * 1024 * 1024) + else: + config.disable_gpu() + if self.engine == "mkldnn": + # 'set_mkldnn_cache_capatity' is not available on macOS + if platform.system() != "Darwin": + ... + # cache 10 different shapes for mkldnn to avoid memory leak + # config.set_mkldnn_cache_capacity(10) + config.enable_mkldnn() + + if self.precision == "fp16": + config.enable_mkldnn_bfloat16() + + config.set_cpu_math_library_num_threads(self.num_cpu_threads) + + # enable memory optim + config.enable_memory_optim() + # config.disable_glog_info() + # enable zero copy + config.switch_use_feed_fetch_ops(False) + config.switch_ir_optim(self.ir_optim) + + predictor = paddle_inference.create_predictor(config) + return predictor, config + + def _create_onnx_predictor( + self, + ) -> Tuple["onnxruntime.InferenceSession", "onnxruntime.SessionOptions"]: + if not osp.exists(self.onnx_path): + raise FileNotFoundError( + f"Given 'onnx_path' {self.onnx_path} does not exist. " + "Please check if it is correct." + ) + + try: + import onnxruntime as ort + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Please install onnxruntime with `pip install onnxruntime`." + ) + + # set config for onnx predictor + config = ort.SessionOptions() + config.intra_op_num_threads = self.num_cpu_threads + if self.ir_optim: + config.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL + + # instantiate onnx predictor + providers = ( + ["CUDAExecutionProvider", "CPUExecutionProvider"] + if self.device != "cpu" + else ["CPUExecutionProvider"] + ) + predictor = ort.InferenceSession( + self.onnx_path, sess_options=config, providers=providers + ) + return predictor, config + + def _check_device(self, device: str): + if device not in ["gpu", "cpu", "npu", "xpu"]: + raise ValueError( + "Inference only supports 'gpu', 'cpu', 'npu' and 'xpu' devices, " + f"but got {device}." + ) + + def _check_engine(self, engine: str): + if engine not in ["native", "tensorrt", "onnx", "mkldnn"]: + raise ValueError( + "Inference only supports 'native', 'tensorrt', 'onnx' and 'mkldnn' " + f"engines, but got {engine}." + ) + + def _check_precision(self, precision: str): + if precision not in ["fp32", "fp16", "int8"]: + raise ValueError( + "Inference only supports 'fp32', 'fp16' and 'int8' " + f"precision, but got {precision}." + ) + + def _compatibility_check(self): + if self.engine == "onnx": + if not ( + importlib.util.find_spec("onnxruntime") + or importlib.util.find_spec("onnxruntime-gpu") + ): + raise ModuleNotFoundError( + "\nPlease install onnxruntime first when engine is 'onnx'\n" + "* For CPU inference, use `pip install onnxruntime -i https://pypi.tuna.tsinghua.edu.cn/simple`\n" + "* For GPU inference, use `pip install onnxruntime-gpu -i https://pypi.tuna.tsinghua.edu.cn/simple`" + ) + import onnxruntime as ort + + if self.device == "gpu" and ort.get_device() != "GPU": + raise RuntimeError( + "Please install onnxruntime-gpu with `pip install onnxruntime-gpu`" + " when device is set to 'gpu'\n" + ) +>>>>>>> Stashed changes diff --git a/deploy/python_infer/pinn_predictor.py b/deploy/python_infer/pinn_predictor.py index c37481d11c..a81718d1b1 100644 --- a/deploy/python_infer/pinn_predictor.py +++ b/deploy/python_infer/pinn_predictor.py @@ -1,192 +1,192 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict -from typing import List -from typing import Optional -from typing import Union - -import numpy as np -import paddle -from omegaconf import DictConfig - -from deploy.python_infer import base -from ppsci.utils import logger -from ppsci.utils import misc - - -class PINNPredictor(base.Predictor): - """General predictor for PINN-based models. - - Args: - cfg (DictConfig): Running configuration. - - Examples: - >>> import numpy as np - >>> import paddle - >>> from omegaconf import DictConfig - >>> from paddle.static import InputSpec - >>> import ppsci - >>> from deploy.python_infer import pinn_predictor - >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) - >>> static_model = paddle.jit.to_static( - ... model, - ... input_spec=[ - ... { - ... key: InputSpec([None, 1], "float32", name=key) - ... for key in model.input_keys - ... }, - ... ], - ... ) - >>> paddle.jit.save(static_model, "./inference") - >>> cfg = DictConfig( - ... { - ... "log_freq": 10, - ... "INFER": { - ... "pdmodel_path": "./inference.pdmodel", - ... "pdiparams_path": "./inference.pdiparams", - ... "device": "cpu", - ... "engine": "native", - ... "precision": "fp32", - ... "onnx_path": None, - ... "ir_optim": True, - ... "min_subgraph_size": 15, - ... "gpu_mem": 500, - ... "gpu_id": 0, - ... "max_batch_size": 10, - ... "num_cpu_threads": 10, - ... } - ... } - ... ) - >>> predictor = pinn_predictor.PINNPredictor(cfg) # doctest: +SKIP - >>> pred = predictor.predict( - ... { - ... "x": np.random.randn(4, 1).astype("float32"), - ... "y": np.random.randn(4, 1).astype("float32"), - ... }, - ... batch_size=2, - ... ) # doctest: +SKIP - >>> for k, v in pred.items(): # doctest: +SKIP - ... print(k, v.shape) # doctest: +SKIP - save_infer_model/scale_0.tmp_0 (4, 1) - save_infer_model/scale_1.tmp_0 (4, 1) - save_infer_model/scale_2.tmp_0 (4, 1) - """ - - def __init__( - self, - cfg: DictConfig, - ): - super().__init__( - cfg.INFER.pdmodel_path, - cfg.INFER.pdiparams_path, - device=cfg.INFER.device, - engine=cfg.INFER.engine, - precision=cfg.INFER.precision, - onnx_path=cfg.INFER.onnx_path, - ir_optim=cfg.INFER.ir_optim, - min_subgraph_size=cfg.INFER.min_subgraph_size, - gpu_mem=cfg.INFER.gpu_mem, - gpu_id=cfg.INFER.gpu_id, - max_batch_size=cfg.INFER.max_batch_size, - num_cpu_threads=cfg.INFER.num_cpu_threads, - ) - self.log_freq = cfg.log_freq - - def predict( - self, - input_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], - batch_size: Optional[int] = 64, - ) -> Dict[str, np.ndarray]: - """ - Predicts the output of the model for the given input. - - Args: - input_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): - A dictionary containing the input data. - batch_size (Optional[int]): The batch size to use for prediction. - If None, input will be directly sent to the model - without batch slicing. Defaults to 64. - - Returns: - Dict[str, np.ndarray]: A dictionary containing the predicted output. - """ - if batch_size and batch_size > self.max_batch_size: - logger.warning( - f"batch_size({batch_size}) is larger than " - f"max_batch_size({self.max_batch_size}), which may occur error." - ) - - if self.engine != "onnx": - # prepare input handle(s) - input_handles = { - name: self.predictor.get_input_handle(name) for name in input_dict - } - # prepare output handle(s) - output_handles = { - name: self.predictor.get_output_handle(name) - for name in self.predictor.get_output_names() - } - else: - # input_names = [node_arg.name for node_arg in self.predictor.get_inputs()] - output_names: List[str] = [ - node_arg.name for node_arg in self.predictor.get_outputs() - ] - - num_samples = len(next(iter(input_dict.values()))) - batch_num = (num_samples + (batch_size - 1)) // batch_size if batch_size else 1 - pred_dict = misc.Prettydefaultdict(list) - - # inference by batch - for batch_id in range(1, batch_num + 1): - if batch_id == 1 or batch_id % self.log_freq == 0 or batch_id == batch_num: - logger.info(f"Predicting batch {batch_id}/{batch_num}") - - # prepare batch input dict - if batch_size: - st = (batch_id - 1) * batch_size - ed = min(num_samples, batch_id * batch_size) - batch_input_dict = {key: input_dict[key][st:ed] for key in input_dict} - else: - batch_input_dict = {**input_dict} - - # send batch input data to input handle(s) - if self.engine != "onnx": - for name, handle in input_handles.items(): - handle.copy_from_cpu(batch_input_dict[name]) - - # run predictor - if self.engine != "onnx": - self.predictor.run() - # receive batch output data from output handle(s) - batch_output_dict = { - name: output_handles[name].copy_to_cpu() for name in output_handles - } - else: - batch_outputs = self.predictor.run( - output_names=output_names, - input_feed=batch_input_dict, - ) - batch_output_dict = { - name: output for (name, output) in zip(output_names, batch_outputs) - } - - # collect batch output data - for key, batch_output in batch_output_dict.items(): - pred_dict[key].append(batch_output) - - # concatenate local predictions - pred_dict = {key: np.concatenate(value) for key, value in pred_dict.items()} - - return pred_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict +from typing import List +from typing import Optional +from typing import Union + +import numpy as np +import paddle +from omegaconf import DictConfig + +from deploy.python_infer import base +from ppsci.utils import logger +from ppsci.utils import misc + + +class PINNPredictor(base.Predictor): + """General predictor for PINN-based models. + + Args: + cfg (DictConfig): Running configuration. + + Examples: + >>> import numpy as np + >>> import paddle + >>> from omegaconf import DictConfig + >>> from paddle.static import InputSpec + >>> import ppsci + >>> from deploy.python_infer import pinn_predictor + >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) + >>> static_model = paddle.jit.to_static( + ... model, + ... input_spec=[ + ... { + ... key: InputSpec([None, 1], "float32", name=key) + ... for key in model.input_keys + ... }, + ... ], + ... ) + >>> paddle.jit.save(static_model, "./inference") + >>> cfg = DictConfig( + ... { + ... "log_freq": 10, + ... "INFER": { + ... "pdmodel_path": "./inference.pdmodel", + ... "pdiparams_path": "./inference.pdiparams", + ... "device": "cpu", + ... "engine": "native", + ... "precision": "fp32", + ... "onnx_path": None, + ... "ir_optim": True, + ... "min_subgraph_size": 15, + ... "gpu_mem": 500, + ... "gpu_id": 0, + ... "max_batch_size": 10, + ... "num_cpu_threads": 10, + ... } + ... } + ... ) + >>> predictor = pinn_predictor.PINNPredictor(cfg) # doctest: +SKIP + >>> pred = predictor.predict( + ... { + ... "x": np.random.randn(4, 1).astype("float32"), + ... "y": np.random.randn(4, 1).astype("float32"), + ... }, + ... batch_size=2, + ... ) # doctest: +SKIP + >>> for k, v in pred.items(): # doctest: +SKIP + ... print(k, v.shape) # doctest: +SKIP + save_infer_model/scale_0.tmp_0 (4, 1) + save_infer_model/scale_1.tmp_0 (4, 1) + save_infer_model/scale_2.tmp_0 (4, 1) + """ + + def __init__( + self, + cfg: DictConfig, + ): + super().__init__( + cfg.INFER.pdmodel_path, + cfg.INFER.pdiparams_path, + device=cfg.INFER.device, + engine=cfg.INFER.engine, + precision=cfg.INFER.precision, + onnx_path=cfg.INFER.onnx_path, + ir_optim=cfg.INFER.ir_optim, + min_subgraph_size=cfg.INFER.min_subgraph_size, + gpu_mem=cfg.INFER.gpu_mem, + gpu_id=cfg.INFER.gpu_id, + max_batch_size=cfg.INFER.max_batch_size, + num_cpu_threads=cfg.INFER.num_cpu_threads, + ) + self.log_freq = cfg.log_freq + + def predict( + self, + input_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], + batch_size: Optional[int] = 64, + ) -> Dict[str, np.ndarray]: + """ + Predicts the output of the model for the given input. + + Args: + input_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): + A dictionary containing the input data. + batch_size (Optional[int]): The batch size to use for prediction. + If None, input will be directly sent to the model + without batch slicing. Defaults to 64. + + Returns: + Dict[str, np.ndarray]: A dictionary containing the predicted output. + """ + if batch_size and batch_size > self.max_batch_size: + logger.warning( + f"batch_size({batch_size}) is larger than " + f"max_batch_size({self.max_batch_size}), which may occur error." + ) + + if self.engine != "onnx": + # prepare input handle(s) + input_handles = { + name: self.predictor.get_input_handle(name) for name in input_dict + } + # prepare output handle(s) + output_handles = { + name: self.predictor.get_output_handle(name) + for name in self.predictor.get_output_names() + } + else: + # input_names = [node_arg.name for node_arg in self.predictor.get_inputs()] + output_names: List[str] = [ + node_arg.name for node_arg in self.predictor.get_outputs() + ] + + num_samples = len(next(iter(input_dict.values()))) + batch_num = (num_samples + (batch_size - 1)) // batch_size if batch_size else 1 + pred_dict = misc.Prettydefaultdict(list) + + # inference by batch + for batch_id in range(1, batch_num + 1): + if batch_id == 1 or batch_id % self.log_freq == 0 or batch_id == batch_num: + logger.info(f"Predicting batch {batch_id}/{batch_num}") + + # prepare batch input dict + if batch_size: + st = (batch_id - 1) * batch_size + ed = min(num_samples, batch_id * batch_size) + batch_input_dict = {key: input_dict[key][st:ed] for key in input_dict} + else: + batch_input_dict = {**input_dict} + + # send batch input data to input handle(s) + if self.engine != "onnx": + for name, handle in input_handles.items(): + handle.copy_from_cpu(batch_input_dict[name]) + + # run predictor + if self.engine != "onnx": + self.predictor.run() + # receive batch output data from output handle(s) + batch_output_dict = { + name: output_handles[name].copy_to_cpu() for name in output_handles + } + else: + batch_outputs = self.predictor.run( + output_names=output_names, + input_feed=batch_input_dict, + ) + batch_output_dict = { + name: output for (name, output) in zip(output_names, batch_outputs) + } + + # collect batch output data + for key, batch_output in batch_output_dict.items(): + pred_dict[key].append(batch_output) + + # concatenate local predictions + pred_dict = {key: np.concatenate(value) for key, value in pred_dict.items()} + + return pred_dict diff --git a/docker/Dockerfile b/docker/Dockerfile index 852a7e20cc..10c808cbfb 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,60 +1,60 @@ -FROM paddlepaddle/paddle:latest-dev-cuda11.6-cudnn8.4-trt8.4-gcc82 - -RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-security main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-updates main restricted universe multiverse" >> /etc/apt/sources.list -RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-backports main restricted universe multiverse" >> /etc/apt/sources.list - -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y -o Acquire::Retries=10 --no-install-recommends \ - g++ gcc cmake wget xz-utils git make zlib1g-dev openssl libssl-dev libffi-dev libeigen3-dev sqlite3 libsqlite3-dev libx11-dev libgl1-mesa-dev bzip2 libbz2-dev liblzma-dev \ - # pymesh dependency - libeigen3-dev libgmp-dev libgmpxx4ldbl libmpfr-dev libboost-dev libboost-thread-dev libtbb-dev && \ - apt-get clean autoclean && \ - rm -rf /var/lib/apt/lists/* - -RUN wget https://mirrors.huaweicloud.com/python/3.9.7/Python-3.9.7.tgz && \ - tar -xf Python-3.9.7.tgz && \ - cd Python-3.9.7 && \ - ./configure --with-ssl && \ - make -j -notest && \ - make install && \ - cd .. && rm -rf Python-3.9.7 && rm Python-3.9.7.tgz && \ - ln -s /usr/local/bin/python3.9 /usr/local/bin/python - -RUN python3 -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple && pip install pysdf open3d db-sqlite3 -i https://pypi.tuna.tsinghua.edu.cn/simple - -# install pymesh and paddle -RUN python3 -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple && pip install "numpy>=1.20.0,<=1.23.1" -i https://pypi.tuna.tsinghua.edu.cn/simple -COPY ./pymesh.tar.xz /src/pymesh.tar.xz - -RUN cd /src && \ - ls /src && \ - tar -xvf pymesh.tar.xz && \ - cd pymesh && \ - python3 -m pip install -r python/requirements.txt && \ - python3 setup.py install --user && \ - rm -rf /src - -ENV PATH="$PATH:/usr/lib/x86_64-linux-gnu/" -ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64/:$LD_LIBRARY_PATH" - -RUN python3 -m ensurepip --upgrade && \ - pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \ - # ensure paddle install normally - python3 -m pip install paddlepaddle-gpu==0.0.0.post116 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html - -RUN apt-get update -RUN apt-get install -y -o Acquire::Retries=10 --no-install-recommends openssh-server - -RUN echo 'ldconfig' >> /root/.bashrc +FROM paddlepaddle/paddle:latest-dev-cuda11.6-cudnn8.4-trt8.4-gcc82 + +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-security main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-updates main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-proposed main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb-src http://mirrors.aliyun.com/ubuntu/ bionic-backports main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-security main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-updates main restricted universe multiverse" >> /etc/apt/sources.list +RUN echo "deb http://mirrors.tuna.tsinghua.edu.cn/ubuntu/ xenial-backports main restricted universe multiverse" >> /etc/apt/sources.list + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y -o Acquire::Retries=10 --no-install-recommends \ + g++ gcc cmake wget xz-utils git make zlib1g-dev openssl libssl-dev libffi-dev libeigen3-dev sqlite3 libsqlite3-dev libx11-dev libgl1-mesa-dev bzip2 libbz2-dev liblzma-dev \ + # pymesh dependency + libeigen3-dev libgmp-dev libgmpxx4ldbl libmpfr-dev libboost-dev libboost-thread-dev libtbb-dev && \ + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* + +RUN wget https://mirrors.huaweicloud.com/python/3.9.7/Python-3.9.7.tgz && \ + tar -xf Python-3.9.7.tgz && \ + cd Python-3.9.7 && \ + ./configure --with-ssl && \ + make -j -notest && \ + make install && \ + cd .. && rm -rf Python-3.9.7 && rm Python-3.9.7.tgz && \ + ln -s /usr/local/bin/python3.9 /usr/local/bin/python + +RUN python3 -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple && pip install pysdf open3d db-sqlite3 -i https://pypi.tuna.tsinghua.edu.cn/simple + +# install pymesh and paddle +RUN python3 -m pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple && pip install "numpy>=1.20.0,<=1.23.1" -i https://pypi.tuna.tsinghua.edu.cn/simple +COPY ./pymesh.tar.xz /src/pymesh.tar.xz + +RUN cd /src && \ + ls /src && \ + tar -xvf pymesh.tar.xz && \ + cd pymesh && \ + python3 -m pip install -r python/requirements.txt && \ + python3 setup.py install --user && \ + rm -rf /src + +ENV PATH="$PATH:/usr/lib/x86_64-linux-gnu/" +ENV LD_LIBRARY_PATH="/usr/local/cuda/lib64/:$LD_LIBRARY_PATH" + +RUN python3 -m ensurepip --upgrade && \ + pip3 config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple && \ + # ensure paddle install normally + python3 -m pip install paddlepaddle-gpu==0.0.0.post116 -f https://www.paddlepaddle.org.cn/whl/linux/gpu/develop.html + +RUN apt-get update +RUN apt-get install -y -o Acquire::Retries=10 --no-install-recommends openssh-server + +RUN echo 'ldconfig' >> /root/.bashrc diff --git a/docker/run.sh b/docker/run.sh index 0af121fd72..6f83c687b0 100644 --- a/docker/run.sh +++ b/docker/run.sh @@ -1,9 +1,9 @@ -docker build . -t paddlescience:latest - -if [ -x "$(command -v nvidia-docker)" ]; then - nvidia-docker run --name paddlescience_container --network=host -it paddlescience -elif [ -x "$(command -v docker)" ]; then - docker run --name paddlescience_container --gpus all --network=host -it paddlescience -else - echo "Docker start failed, please install nvidia-docker or docker(>=19.03) first" -fi +docker build . -t paddlescience:latest + +if [ -x "$(command -v nvidia-docker)" ]; then + nvidia-docker run --name paddlescience_container --network=host -it paddlescience +elif [ -x "$(command -v docker)" ]; then + docker run --name paddlescience_container --gpus all --network=host -it paddlescience +else + echo "Docker start failed, please install nvidia-docker or docker(>=19.03) first" +fi diff --git a/docs/index.md b/docs/index.md index 0eedf31d2c..69f345c2f4 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,239 +1,231 @@ -# PaddleScience - ---8<-- -./README.md:status ---8<-- - ---8<-- -./README.md:announcement ---8<-- - - - ---8<-- -./README.md:description ---8<-- - ---8<-- -./docs/zh/overview.md:panorama ---8<-- - -## 📝案例列表 - - - -

数学(AI for Math)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 亥姆霍兹方程 | [SPINN(Helmholtz3D)](./zh/examples/spinn.md) | 机理驱动 | SPINN | 无监督学习 | - | [Paper](https://arxiv.org/pdf/2306.15969) | -| 相场方程 | [Allen-Cahn](./zh/examples/allen_cahn.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat) | [Paper](https://arxiv.org/pdf/2402.00326) | -| 微分方程 | [拉普拉斯方程](./zh/examples/laplace2d.md) | 机理驱动 | MLP | 无监督学习 | - | - | -| 微分方程 | [伯格斯方程](./zh/examples/deephpms.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://github.com/maziarraissi/DeepHPMs/tree/master/Data) | [Paper](https://arxiv.org/pdf/1801.06637.pdf) | -| 微分方程 | [非线性偏微分方程](./zh/examples/pirbn.md) | 机理驱动 | PIRBN | 无监督学习 | - | [Paper](https://arxiv.org/abs/2304.06234) | -| 微分方程 | [洛伦兹方程](./zh/examples/lorenz.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | -| 微分方程 | [若斯叻方程](./zh/examples/rossler.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | -| 算子学习 | [DeepONet](./zh/examples/deeponet.md) | 数据驱动 | MLP | 监督学习 | [Data](https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html) | [Paper](https://export.arxiv.org/pdf/1910.03193.pdf) | -| 微分方程 | [梯度增强的物理知识融合 PDE 求解](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/gpinn/poisson_1d.py) | 机理驱动 | gPINN | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.cma.2022.114823) | -| 积分方程 | [沃尔泰拉积分方程](./zh/examples/volterra_ide.md) | 机理驱动 | MLP | 无监督学习 | - | [Project](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) | -| 微分方程 | [分数阶微分方程](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py) | 机理驱动 | MLP | 无监督学习 | - | - | -| 光孤子 | [Optical soliton](./zh/examples/nlsmb.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| -| 光纤怪波 | [Optical rogue wave](./zh/examples/nlsmb.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| -| 域分解 | [XPINN](./zh/examples/xpinns.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.4208/cicp.OA-2020-0164)| -| 布鲁塞尔扩散系统 | [3D-Brusselator](./zh/examples/brusselator3d.md) | 数据驱动 | LNO | 监督学习 | - | [Paper](https://arxiv.org/abs/2303.10528)| -| 符号回归 | [Transformer4SR](./zh/examples/transformer4sr.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2312.04070)| -| 算子学习 | [隐空间神经算子LNO](https://github.com/L-I-M-I-T/LatentNeuralOperator) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2406.03923)| - -
-

技术科学(AI for Technology)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 汽车表面阻力预测 | [DrivAerNet](./zh/examples/drivaernet.md) | 数据驱动 | RegDGCNN | 监督学习 | [Data](https://dataset.bj.bcebos.com/PaddleScience/DNNFluid-Car/DrivAer%2B%2B/data.tar) | [Paper](https://www.researchgate.net/publication/378937154_DrivAerNet_A_Parametric_Car_Dataset_for_Data-Driven_Aerodynamic_Design_and_Graph-Based_Drag_Prediction) | -| 一维线性对流问题 | [1D 线性对流](./zh/examples/adv_cvit.md) | 数据驱动 | ViT | 监督学习 | [Data](https://github.com/Zhengyu-Huang/Operator-Learning/tree/main/data) | [Paper](https://arxiv.org/abs/2405.13998) | -| 非定常不可压流体 | [2D 方腔浮力驱动流](./zh/examples/ns_cvit.md) | 数据驱动 | ViT | 监督学习 | [Data](https://huggingface.co/datasets/pdearena/NavierStokes-2D) | [Paper](https://arxiv.org/abs/2405.13998) | -| 定常不可压流体 | [Re3200 2D 定常方腔流](./zh/examples/ldc2d_steady.md) | 机理驱动 | MLP | 无监督学习 | - | | -| 定常不可压流体 | [2D 达西流](./zh/examples/darcy2d.md) | 机理驱动 | MLP | 无监督学习 | - | | -| 定常不可压流体 | [2D 管道流](./zh/examples/labelfree_DNN_surrogate.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/1906.02382) | -| 定常不可压流体 | [3D 颅内动脉瘤](./zh/examples/aneurysm.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar) | [Project](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html)| -| 定常不可压流体 | [任意 2D 几何体绕流](./zh/examples/deepcfd.md) | 数据驱动 | DeepCFD | 监督学习 | - | [Paper](https://arxiv.org/abs/2004.08826)| -| 非定常不可压流体 | [2D 非定常方腔流](./zh/examples/ldc2d_unsteady.md) | 机理驱动 | MLP | 无监督学习 | - | -| -| 非定常不可压流体 | [Re100 2D 圆柱绕流](./zh/examples/cylinder2d_unsteady.md) | 机理驱动 | MLP | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar) | [Paper](https://arxiv.org/abs/2004.08826)| -| 非定常不可压流体 | [Re100~750 2D 圆柱绕流](./zh/examples/cylinder2d_unsteady_transformer_physx.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957)| -| 可压缩流体 | [2D 空气激波](./zh/examples/shock_wave.md) | 机理驱动 | PINN-WE | 无监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | -| -| 飞行器设计 | [MeshGraphNets](https://aistudio.baidu.com/projectdetail/5322713) | 数据驱动 | GNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/184320) | [Paper](https://arxiv.org/abs/2010.03409)| -| 飞行器设计 | [火箭发动机真空羽流](https://aistudio.baidu.com/projectdetail/4486133) | 数据驱动 | CNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | - | -| 飞行器设计 | [Deep-Flow-Prediction](https://aistudio.baidu.com/projectdetail/5671596) | 数据驱动 | TurbNetG | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/197778) | [Paper](https://arxiv.org/abs/1810.08217) | -| 通用流场模拟 | [气动外形设计](./zh/examples/amgnet.md) | 数据驱动 | AMGNet | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip) | [Paper](https://arxiv.org/abs/1810.08217) | -| 流固耦合 | [涡激振动](./zh/examples/viv.md) | 机理驱动 | MLP | 半监督学习 | [Data](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fsi/VIV_Training_Neta100.mat) | [Paper](https://arxiv.org/abs/2206.03864)| -| 多相流 | [气液两相流](./zh/examples/bubble.md) | 机理驱动 | BubbleNet | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat) | [Paper](https://pubs.aip.org/aip/adv/article/12/3/035153/2819394/Predicting-micro-bubble-dynamics-with-semi-physics)| -| 多相流 | [twophasePINN](https://aistudio.baidu.com/projectdetail/5379212) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.mlwa.2021.100029)| -| 流场高分辨率重构 | [2D 湍流流场重构](./zh/examples/tempoGAN.md) | 数据驱动 | tempoGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://dl.acm.org/doi/10.1145/3197517.3201304)| -| 流场高分辨率重构 | [2D 湍流流场重构](https://aistudio.baidu.com/projectdetail/4493261?contributionType=1) | 数据驱动 | cycleGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://arxiv.org/abs/2007.15324)| -| 流场高分辨率重构 | [基于Voronoi嵌入辅助深度学习的稀疏传感器全局场重建](https://aistudio.baidu.com/projectdetail/5807904) | 数据驱动 | CNN | 监督学习 | [Data1](https://drive.google.com/drive/folders/1K7upSyHAIVtsyNAqe6P8TY1nS5WpxJ2c)
[Data2](https://drive.google.com/drive/folders/1pVW4epkeHkT2WHZB7Dym5IURcfOP4cXu)
[Data3](https://drive.google.com/drive/folders/1xIY_jIu-hNcRY-TTf4oYX1Xg4_fx8ZvD) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | -| 流场预测 | [Catheter](https://aistudio.baidu.com/projectdetail/5379212) | 数据驱动 | FNO | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/291940) | [Paper](https://www.science.org/doi/pdf/10.1126/sciadv.adj1741) | -| 求解器耦合 | [CFD-GCN](./zh/examples/cfdgcn.md) | 数据驱动 | GCN | 监督学习 | [Data](https://aistudio.baidu.com/aistudio/datasetdetail/184778)
[Mesh](https://paddle-org.bj.bcebos.com/paddlescience/datasets/CFDGCN/meshes.tar) | [Paper](https://arxiv.org/abs/2007.04439)| -| 受力分析 | [1D 欧拉梁变形](./zh/examples/euler_beam.md) | 机理驱动 | MLP | 无监督学习 | - | - | -| 受力分析 | [2D 平板变形](./zh/examples/biharmonic2d.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/2108.07243) | -| 受力分析 | [3D 连接件变形](./zh/examples/bracket.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar) | [Tutorial](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html) | -| 受力分析 | [结构震动模拟](./zh/examples/phylstm.md) | 机理驱动 | PhyLSTM | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/PhyLSTM/data_boucwen.mat) | [Paper](https://arxiv.org/abs/2002.10253) | -| 受力分析 | [2D 弹塑性结构](./zh/examples/epnn.md) | 机理驱动 | EPNN | 无监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstate-16-plas.dat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstress-16-plas.dat) | [Paper](https://arxiv.org/abs/2204.12088) | -| 受力分析和逆问题 | [3D 汽车控制臂变形](./zh/examples/control_arm.md) | 机理驱动 | MLP | 无监督学习 | - | - | -| 受力分析和逆问题 | [3D 心脏仿真](./zh/examples/heart.md) | 数理融合 | PINN | 监督学习 | - | - | -| 拓扑优化 | [2D 拓扑优化](./zh/examples/topopt.md) | 数据驱动 | TopOptNN | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/topopt/top_dataset.h5) | [Paper](https://arxiv.org/pdf/1709.09578) | -| 热仿真 | [1D 换热器热仿真](./zh/examples/heat_exchanger.md) | 机理驱动 | PI-DeepONet | 无监督学习 | - | - | -| 热仿真 | [2D 热仿真](./zh/examples/heat_pinn.md) | 机理驱动 | PINN | 无监督学习 | - | [Paper](https://arxiv.org/abs/1711.10561)| -| 热仿真 | [2D 芯片热仿真](./zh/examples/chip_heat.md) | 机理驱动 | PI-DeepONet | 无监督学习 | - | [Paper](https://doi.org/10.1063/5.0194245)| - -
-

材料科学(AI for Material)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 材料设计 | [散射板设计(反问题)](./zh/examples/hpinns.md) | 机理驱动 | Transformer | 无监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_valid.mat) | [Paper](https://arxiv.org/pdf/2102.04626.pdf) | -| 晶体材料属性预测 | [CGCNN](./zh/examples/cgcnn.md) | 数据驱动 | GNN | 监督学习 | [MP](https://next-gen.materialsproject.org/) / [Perovskite](https://cmr.fysik.dtu.dk/cubic_perovskites/cubic_perovskites.html) / [C2DB](https://cmr.fysik.dtu.dk/c2db/c2db.html) / [test](https://paddle-org.bj.bcebos.com/paddlescience%2Fdatasets%2Fcgcnn%2Fcgcnn-test.zip) | [Paper](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.145301) | - -
-

地球科学(AI for Earth Science)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 天气预报 | [Extformer-MoE 气象预报](./zh/examples/extformer_moe.md) | 数据驱动 | FourCastNet | 监督学习 | [enso](https://tianchi.aliyun.com/dataset/98942) | - | -| 天气预报 | [FourCastNet 气象预报](./zh/examples/fourcastnet.md) | 数据驱动 | FourCastNet | 监督学习 | [ERA5](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | -| 天气预报 | [NowCastNet 气象预报](./zh/examples/nowcastnet.md) | 数据驱动 | NowCastNet | 监督学习 | [MRMS](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://www.nature.com/articles/s41586-023-06184-4) | -| 天气预报 | [GraphCast 气象预报](./zh/examples/graphcast.md) | 数据驱动 | GraphCastNet | 监督学习 | - | [Paper](https://arxiv.org/abs/2212.12794) | -| 天气预报 | [GenCast 气象预报](./zh/examples/gencast.md) | 数据驱动 | Diffusion | 监督学习 | [Gencast](https://console.cloud.google.com/storage/browser/dm_graphcast) | [Paper](https://arxiv.org/abs/2312.15796) | -| 天气预报 | [FengWu 气象预报](./zh/examples/fengwu.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/pdf/2304.02948) | -| 天气预报 | [Pangu-Weather 气象预报](./zh/examples/pangu_weather.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/pdf/2211.02556) | -| 大气污染物 | [UNet 污染物扩散](https://aistudio.baidu.com/projectdetail/5663515?channel=0&channelType=0&sUid=438690&shared=1&ts=1698221963752) | 数据驱动 | UNet | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/198102) | - | -| 天气预报 | [DGMR 气象预报](./zh/examples/dgmr.md) | 数据驱动 | DGMR | 监督学习 | [UK dataset](https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km) | [Paper](https://arxiv.org/pdf/2104.00954.pdf) | -| 地震波形反演 | [VelocityGAN 地震波形反演](./zh/examples/velocity_gan.md) | 数据驱动 | VelocityGAN | 监督学习 | [OpenFWI](https://openfwi-lanl.github.io/docs/data.html#vel) | [Paper](https://arxiv.org/abs/1809.10262v6) | -| 交通预测 | [TGCN 交通流量预测](./zh/examples/tgcn.md) | 数据驱动 | GCN & CNN | 监督学习 | [PEMSD4 & PEMSD8](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tgcn/tgcn_data.zip) | - | - -
-

化学科学 (AI for Chemistry)

- -| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | -|-----|---------|-----|---------|----|---------|---------| -| 化学分子生成 | [Moflow](./zh/examples/moflow.md) | 数据驱动 | moflow | 监督学习 | qm9/ zink250k | [MoFlow: An Invertible Flow Model for Generating Molecular Graphs](https://arxiv.org/abs/2006.10137v1) | -| 化学反应预测 | [IFM](./zh/examples/ifm.md) | 数据驱动 | FourCastNet | 监督学习 | tox21/sider/hiv/bace/bbbp | [Understanding the Limitations of Deep Models for Molecular property prediction: Insights and Solutions](https://openreview.net/pdf?id=NLFqlDeuzt) | - -## 🚀快速安装 - -=== "方式1: 源码安装[推荐]" - - --8<-- - ./README.md:git_install - --8<-- - -=== "方式2: pip安装" - - ``` sh - python -m pip install -U paddlesci - ``` - -**完整安装流程**:[安装与使用](./zh/install_setup.md) - ---8<-- -./README.md:update ---8<-- - ---8<-- -./README.md:feature ---8<-- - -## 🎈生态工具 - ---8<-- -./README.md:adaptation ---8<-- - ---8<-- -./README.md:support ---8<-- - ---8<-- -./README.md:contribution ---8<-- - ---8<-- -./README.md:collaboration ---8<-- - ---8<-- -./README.md:thanks ---8<-- - -- PaddleScience 的部分代码由以下优秀开发者贡献(按 [Contributors](https://github.com/PaddlePaddle/PaddleScience/graphs/contributors) 排序): - - -
- -## 🤝合作单位 - -![cooperation](./images/overview/cooperation.png) - ---8<-- -./README.md:license ---8<-- +# PaddleScience + +--8<-- +./README.md:status +--8<-- + +🔥 [飞桨AI for Science前沿讲座系列课程 & 代码入门与实操课程进行中 ](https://mp.weixin.qq.com/s/n-vGnGM9di_3IByTC56hUw),清华、北大、中科院等高校机构知名学者分享前沿研究成果,火热报名中。 + +🔥 [开放原子第二届开源大赛:飞桨科学计算工具组件开发大赛](https://competition.atomgit.com/competitionInfo?id=805ad94637707d062f24e54265d85731),总奖金25万人民币,火热报名中。 + +🔥 [PaddlePaddle Hackathon 7th 开源贡献个人挑战赛](https://github.com/PaddlePaddle/Paddle/issues/67603) + +🔥 [CIKM 2024: AI辅助的先进空气动力学-优化汽车设计以实现最佳性能](https://competition.atomgit.com/competitionInfo?id=cda4e961b0c25858ca0fd2a4bdf87520),已进入评奖阶段。 + +🔥 [IJCAI 2024: 任意三维几何外形车辆的风阻快速预测竞赛](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5),track A, B, C 代码:[paddle实现](https://github.com/PaddlePaddle/PaddleScience/tree/develop/jointContribution/IJCAI_2024) | [pytorch实现](https://competition.atomgit.com/competitionInfo?id=7f3f276465e9e845fd3a811d2d6925b5)(点击**排行榜**可查看各个赛道前10名的代码) + + + +--8<-- +./README.md:description +--8<-- + +--8<-- +./docs/zh/overview.md:panorama +--8<-- + +## 📝案例列表 + + + +

数学(AI for Math)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 亥姆霍兹方程 | [SPINN(Helmholtz3D)](./zh/examples/spinn.md) | 机理驱动 | SPINN | 无监督学习 | - | [Paper](https://arxiv.org/pdf/2306.15969) | +| 相场方程 | [Allen-Cahn](./zh/examples/allen_cahn.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat) | [Paper](https://arxiv.org/pdf/2402.00326) | +| 微分方程 | [拉普拉斯方程](./zh/examples/laplace2d.md) | 机理驱动 | MLP | 无监督学习 | - | - | +| 微分方程 | [伯格斯方程](./zh/examples/deephpms.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://github.com/maziarraissi/DeepHPMs/tree/master/Data) | [Paper](https://arxiv.org/pdf/1801.06637.pdf) | +| 微分方程 | [非线性偏微分方程](./zh/examples/pirbn.md) | 机理驱动 | PIRBN | 无监督学习 | - | [Paper](https://arxiv.org/abs/2304.06234) | +| 微分方程 | [洛伦兹方程](./zh/examples/lorenz.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | +| 微分方程 | [若斯叻方程](./zh/examples/rossler.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957) | +| 算子学习 | [DeepONet](./zh/examples/deeponet.md) | 数据驱动 | MLP | 监督学习 | [Data](https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html) | [Paper](https://export.arxiv.org/pdf/1910.03193.pdf) | +| 微分方程 | [梯度增强的物理知识融合 PDE 求解](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/gpinn/poisson_1d.py) | 机理驱动 | gPINN | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.cma.2022.114823) | +| 积分方程 | [沃尔泰拉积分方程](./zh/examples/volterra_ide.md) | 机理驱动 | MLP | 无监督学习 | - | [Project](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) | +| 微分方程 | [分数阶微分方程](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fpde/fractional_poisson_2d.py) | 机理驱动 | MLP | 无监督学习 | - | - | +| 光孤子 | [Optical soliton](./zh/examples/nlsmb.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| +| 光纤怪波 | [Optical rogue wave](./zh/examples/nlsmb.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1007/s11071-023-08824-w)| +| 域分解 | [XPINN](./zh/examples/xpinns.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.4208/cicp.OA-2020-0164)| +| 布鲁塞尔扩散系统 | [3D-Brusselator](./zh/examples/brusselator3d.md) | 数据驱动 | LNO | 监督学习 | - | [Paper](https://arxiv.org/abs/2303.10528)| +| 符号回归 | [Transformer4SR](./zh/examples/transformer4sr.md) | 数据驱动 | Transformer | 监督学习 | - | [Paper](https://arxiv.org/abs/2312.04070)| + +
+

技术科学(AI for Technology)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 一维线性对流问题 | [1D 线性对流](./zh/examples/adv_cvit.md) | 数据驱动 | ViT | 监督学习 | [Data](https://github.com/Zhengyu-Huang/Operator-Learning/tree/main/data) | [Paper](https://arxiv.org/abs/2405.13998) | +| 非定常不可压流体 | [2D 方腔浮力驱动流](./zh/examples/ns_cvit.md) | 数据驱动 | ViT | 监督学习 | [Data](https://huggingface.co/datasets/pdearena/NavierStokes-2D) | [Paper](https://arxiv.org/abs/2405.13998) | +| 定常不可压流体 | [Re3200 2D 定常方腔流](./zh/examples/ldc2d_steady.md) | 机理驱动 | MLP | 无监督学习 | - | | +| 定常不可压流体 | [2D 达西流](./zh/examples/darcy2d.md) | 机理驱动 | MLP | 无监督学习 | - | | +| 定常不可压流体 | [2D 管道流](./zh/examples/labelfree_DNN_surrogate.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/1906.02382) | +| 定常不可压流体 | [3D 颅内动脉瘤](./zh/examples/aneurysm.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/aneurysm/aneurysm_dataset.tar) | [Project](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html)| +| 定常不可压流体 | [任意 2D 几何体绕流](./zh/examples/deepcfd.md) | 数据驱动 | DeepCFD | 监督学习 | - | [Paper](https://arxiv.org/abs/2004.08826)| +| 非定常不可压流体 | [2D 非定常方腔流](./zh/examples/ldc2d_unsteady.md) | 机理驱动 | MLP | 无监督学习 | - | -| +| 非定常不可压流体 | [Re100 2D 圆柱绕流](./zh/examples/cylinder2d_unsteady.md) | 机理驱动 | MLP | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar) | [Paper](https://arxiv.org/abs/2004.08826)| +| 非定常不可压流体 | [Re100~750 2D 圆柱绕流](./zh/examples/cylinder2d_unsteady_transformer_physx.md) | 数据驱动 | Transformer-Physx | 监督学习 | [Data](https://github.com/zabaras/transformer-physx) | [Paper](https://arxiv.org/abs/2010.03957)| +| 可压缩流体 | [2D 空气激波](./zh/examples/shock_wave.md) | 机理驱动 | PINN-WE | 无监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | -| +| 飞行器设计 | [MeshGraphNets](https://aistudio.baidu.com/projectdetail/5322713) | 数据驱动 | GNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/184320) | [Paper](https://arxiv.org/abs/2010.03409)| +| 飞行器设计 | [火箭发动机真空羽流](https://aistudio.baidu.com/projectdetail/4486133) | 数据驱动 | CNN | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/167250) | - | +| 飞行器设计 | [Deep-Flow-Prediction](https://aistudio.baidu.com/projectdetail/5671596) | 数据驱动 | TurbNetG | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/197778) | [Paper](https://arxiv.org/abs/1810.08217) | +| 通用流场模拟 | [气动外形设计](./zh/examples/amgnet.md) | 数据驱动 | AMGNet | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip) | [Paper](https://arxiv.org/abs/1810.08217) | +| 流固耦合 | [涡激振动](./zh/examples/viv.md) | 机理驱动 | MLP | 半监督学习 | [Data](https://github.com/PaddlePaddle/PaddleScience/blob/develop/examples/fsi/VIV_Training_Neta100.mat) | [Paper](https://arxiv.org/abs/2206.03864)| +| 多相流 | [气液两相流](./zh/examples/bubble.md) | 机理驱动 | BubbleNet | 半监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat) | [Paper](https://pubs.aip.org/aip/adv/article/12/3/035153/2819394/Predicting-micro-bubble-dynamics-with-semi-physics)| +| 多相流 | [twophasePINN](https://aistudio.baidu.com/projectdetail/5379212) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://doi.org/10.1016/j.mlwa.2021.100029)| +| 流场高分辨率重构 | [2D 湍流流场重构](./zh/examples/tempoGAN.md) | 数据驱动 | tempoGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://dl.acm.org/doi/10.1145/3197517.3201304)| +| 流场高分辨率重构 | [2D 湍流流场重构](https://aistudio.baidu.com/projectdetail/4493261?contributionType=1) | 数据驱动 | cycleGAN | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tempoGAN/2d_valid.mat) | [Paper](https://arxiv.org/abs/2007.15324)| +| 流场高分辨率重构 | [基于Voronoi嵌入辅助深度学习的稀疏传感器全局场重建](https://aistudio.baidu.com/projectdetail/5807904) | 数据驱动 | CNN | 监督学习 | [Data1](https://drive.google.com/drive/folders/1K7upSyHAIVtsyNAqe6P8TY1nS5WpxJ2c)
[Data2](https://drive.google.com/drive/folders/1pVW4epkeHkT2WHZB7Dym5IURcfOP4cXu)
[Data3](https://drive.google.com/drive/folders/1xIY_jIu-hNcRY-TTf4oYX1Xg4_fx8ZvD) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | +| 流场预测 | [Catheter](https://aistudio.baidu.com/projectdetail/5379212) | 数据驱动 | FNO | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/291940) | [Paper](https://www.science.org/doi/pdf/10.1126/sciadv.adj1741) | +| 求解器耦合 | [CFD-GCN](./zh/examples/cfdgcn.md) | 数据驱动 | GCN | 监督学习 | [Data](https://aistudio.baidu.com/aistudio/datasetdetail/184778)
[Mesh](https://paddle-org.bj.bcebos.com/paddlescience/datasets/CFDGCN/meshes.tar) | [Paper](https://arxiv.org/abs/2007.04439)| +| 受力分析 | [1D 欧拉梁变形](./zh/examples/euler_beam.md) | 机理驱动 | MLP | 无监督学习 | - | - | +| 受力分析 | [2D 平板变形](./zh/examples/biharmonic2d.md) | 机理驱动 | MLP | 无监督学习 | - | [Paper](https://arxiv.org/abs/2108.07243) | +| 受力分析 | [3D 连接件变形](./zh/examples/bracket.md) | 机理驱动 | MLP | 无监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar) | [Tutorial](https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html) | +| 受力分析 | [结构震动模拟](./zh/examples/phylstm.md) | 机理驱动 | PhyLSTM | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/PhyLSTM/data_boucwen.mat) | [Paper](https://arxiv.org/abs/2002.10253) | +| 受力分析 | [2D 弹塑性结构](./zh/examples/epnn.md) | 机理驱动 | EPNN | 无监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstate-16-plas.dat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/epnn/dstress-16-plas.dat) | [Paper](https://arxiv.org/abs/2204.12088) | +| 受力分析和逆问题 | [3D 汽车控制臂变形](./zh/examples/control_arm.md) | 机理驱动 | MLP | 无监督学习 | - | - | +| 受力分析和逆问题 | [3D 心脏仿真](./zh/examples/heart.md) | 数理融合 | PINN | 监督学习 | - | - | +| 拓扑优化 | [2D 拓扑优化](./zh/examples/topopt.md) | 数据驱动 | TopOptNN | 监督学习 | [Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/topopt/top_dataset.h5) | [Paper](https://arxiv.org/pdf/1709.09578) | +| 热仿真 | [1D 换热器热仿真](./zh/examples/heat_exchanger.md) | 机理驱动 | PI-DeepONet | 无监督学习 | - | - | +| 热仿真 | [2D 热仿真](./zh/examples/heat_pinn.md) | 机理驱动 | PINN | 无监督学习 | - | [Paper](https://arxiv.org/abs/1711.10561)| +| 热仿真 | [2D 芯片热仿真](./zh/examples/chip_heat.md) | 机理驱动 | PI-DeepONet | 无监督学习 | - | [Paper](https://doi.org/10.1063/5.0194245)| + +
+

材料科学(AI for Material)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 材料设计 | [散射板设计(反问题)](./zh/examples/hpinns.md) | 数理融合 | 数据驱动 | 监督学习 | [Train Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_train.mat)
[Eval Data](https://paddle-org.bj.bcebos.com/paddlescience/datasets/hPINNs/hpinns_holo_valid.mat) | [Paper](https://arxiv.org/pdf/2102.04626.pdf) | + +
+

地球科学(AI for Earth Science)

+ +| 问题类型 | 案例名称 | 优化算法 | 模型类型 | 训练方式 | 数据集 | 参考资料 | +|-----|---------|-----|---------|----|---------|---------| +| 天气预报 | [Extformer-MoE 气象预报](./zh/examples/extformer_moe.md) | 数据驱动 | FourCastNet | 监督学习 | [enso](https://tianchi.aliyun.com/dataset/98942) | - | +| 天气预报 | [FourCastNet 气象预报](./zh/examples/fourcastnet.md) | 数据驱动 | FourCastNet | 监督学习 | [ERA5](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://arxiv.org/pdf/2202.11214.pdf) | +| 天气预报 | [NowCastNet 气象预报](./zh/examples/nowcastnet.md) | 数据驱动 | NowCastNet | 监督学习 | [MRMS](https://app.globus.org/file-manager?origin_id=945b3c9e-0f8c-11ed-8daf-9f359c660fbd&origin_path=%2F~%2Fdata%2F) | [Paper](https://www.nature.com/articles/s41586-023-06184-4) | +| 天气预报 | [GraphCast 气象预报](./zh/examples/graphcast.md) | 数据驱动 | GraphCastNet | 监督学习 | - | [Paper](https://arxiv.org/abs/2212.12794) | +| 大气污染物 | [UNet 污染物扩散](https://aistudio.baidu.com/projectdetail/5663515?channel=0&channelType=0&sUid=438690&shared=1&ts=1698221963752) | 数据驱动 | UNet | 监督学习 | [Data](https://aistudio.baidu.com/datasetdetail/198102) | - | +| 天气预报 | [DGMR 气象预报](./zh/examples/dgmr.md) | 数据驱动 | DGMR | 监督学习 | [UK dataset](https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km) | [Paper](https://arxiv.org/pdf/2104.00954.pdf) | +| 地震波形反演 | [VelocityGAN 地震波形反演](./zh/examples/velocity_gan.md) | 数据驱动 | VelocityGAN | 监督学习 | [OpenFWI](https://openfwi-lanl.github.io/docs/data.html#vel) | [Paper](https://arxiv.org/abs/1809.10262v6) | +| 交通预测 | [TGCN 交通流量预测](./zh/examples/tgcn.md) | 数据驱动 | GCN & CNN | 监督学习 | [PEMSD4 & PEMSD8](https://paddle-org.bj.bcebos.com/paddlescience/datasets/tgcn/tgcn_data.zip) | - | + +## 🚀快速安装 + +=== "方式1: 源码安装[推荐]" + + --8<-- + ./README.md:git_install + --8<-- + +=== "方式2: pip安装" + + ``` sh + python -m pip install -U paddlesci + ``` + +**完整安装流程**:[安装与使用](./zh/install_setup.md) + +--8<-- +./README.md:update +--8<-- + +--8<-- +./README.md:feature +--8<-- + +## 🎈生态工具 + +--8<-- +./README.md:adaptation +--8<-- + +--8<-- +./README.md:support +--8<-- + +--8<-- +./README.md:contribution +--8<-- + +--8<-- +./README.md:collaboration +--8<-- + +--8<-- +./README.md:thanks +--8<-- + +- PaddleScience 的部分代码由以下优秀开发者贡献(按 [Contributors](https://github.com/PaddlePaddle/PaddleScience/graphs/contributors) 排序): + + +
+ +## 🤝合作单位 + +![cooperation](./images/overview/cooperation.png) + +--8<-- +./README.md:license +--8<-- diff --git a/docs/javascripts/bd_statistics.js b/docs/javascripts/bd_statistics.js index c15209c91f..d268921a50 100644 --- a/docs/javascripts/bd_statistics.js +++ b/docs/javascripts/bd_statistics.js @@ -1,7 +1,7 @@ -var _hmt = _hmt || []; -(function() { - var hm = document.createElement("script"); - hm.src = "https://hm.baidu.com/hm.js?0b2ac2d9147125c1ea1c5ec75bd3a253"; - var s = document.getElementsByTagName("script")[0]; - s.parentNode.insertBefore(hm, s); -})(); +var _hmt = _hmt || []; +(function() { + var hm = document.createElement("script"); + hm.src = "https://hm.baidu.com/hm.js?0b2ac2d9147125c1ea1c5ec75bd3a253"; + var s = document.getElementsByTagName("script")[0]; + s.parentNode.insertBefore(hm, s); +})(); diff --git a/docs/javascripts/contributors.js b/docs/javascripts/contributors.js index 8799c78e31..c16d00f1b9 100644 --- a/docs/javascripts/contributors.js +++ b/docs/javascripts/contributors.js @@ -1,26 +1,26 @@ -const owner = 'PaddlePaddle'; -const repo = 'PaddleScience'; - -fetch(`https://api.github.com/repos/${owner}/${repo}/contributors?per_page=65&page=1`, { - headers: { - "Accept": "application/vnd.github.v3+json" - } -}) -.then(response => { - if (!response.ok) { - throw new Error(`HTTP error! status: ${response.status}`); - } - return response.json(); -}) -.then(data => { - const contributorsDiv = document.getElementById('contributors'); - data.forEach(contributor => { - const a = document.createElement('a'); - a.href = `https://github.com/${contributor.login}`; - a.innerHTML = `avatar`; - contributorsDiv.appendChild(a); - }); -}) -.catch((error) => { - console.error('Fetching contributors failed:', error); -}); +const owner = 'PaddlePaddle'; +const repo = 'PaddleScience'; + +fetch(`https://api.github.com/repos/${owner}/${repo}/contributors?per_page=65&page=1`, { + headers: { + "Accept": "application/vnd.github.v3+json" + } +}) +.then(response => { + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + return response.json(); +}) +.then(data => { + const contributorsDiv = document.getElementById('contributors'); + data.forEach(contributor => { + const a = document.createElement('a'); + a.href = `https://github.com/${contributor.login}`; + a.innerHTML = `avatar`; + contributorsDiv.appendChild(a); + }); +}) +.catch((error) => { + console.error('Fetching contributors failed:', error); +}); diff --git a/docs/javascripts/mathjax.js b/docs/javascripts/mathjax.js index 8552839016..d9ae99239d 100644 --- a/docs/javascripts/mathjax.js +++ b/docs/javascripts/mathjax.js @@ -1,16 +1,16 @@ -window.MathJax = { - tex: { - inlineMath: [["\\(", "\\)"]], - displayMath: [["\\[", "\\]"]], - processEscapes: true, - processEnvironments: true - }, - options: { - ignoreHtmlClass: ".*|", - processHtmlClass: "arithmatex" - } - }; - - document$.subscribe(() => { - MathJax.typesetPromise() - }) +window.MathJax = { + tex: { + inlineMath: [["\\(", "\\)"]], + displayMath: [["\\[", "\\]"]], + processEscapes: true, + processEnvironments: true + }, + options: { + ignoreHtmlClass: ".*|", + processHtmlClass: "arithmatex" + } + }; + + document$.subscribe(() => { + MathJax.typesetPromise() + }) diff --git a/docs/requirements.txt b/docs/requirements.txt index 18e95ecb5d..ca90f9ca7d 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream Jinja2~=3.1 matplotlib mike @@ -15,3 +16,22 @@ numpy>=1.20.0,<2.0.0 pyyaml scipy sympy +======= +Jinja2~=3.1 +matplotlib +mike +mkdocs +mkdocs-autorefs +mkdocs-git-revision-date-localized-plugin +mkdocs-glightbox +mkdocs-include-markdown-plugin +mkdocs-material +mkdocs-material-extensions +mkdocs-video +mkdocstrings +mkdocstrings-python +numpy>=1.20.0,<2.0.0 +pyyaml +scipy +sympy +>>>>>>> Stashed changes diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index dcbba2d206..0d136815b2 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,30 +1,30 @@ -:root { - /* primary */ - --md-primary-fg-color: #6067e7; - --md-primary-fg-color--light: #5d63db; - --md-primary-fg-color--dark: #000; -} - -.md-grid { - /* readable page width */ - max-width: 1550px; -} - -.md-header__topic > .md-ellipsis { - overflow: visible; - text-overflow: ellipsis; - white-space: nowrap; -} - -.avatar { - height: 64px; - width: 64px; - border: 2px solid rgba(128, 128, 128, 0.308); - border-radius: 50%; -} - -.avatar:hover { - box-shadow: 0 8px 16px 0 rgba(0, 0, 0, 0.4); - transition: 0.4s; - transform:translateY(-10px); -} +:root { + /* primary */ + --md-primary-fg-color: #6067e7; + --md-primary-fg-color--light: #5d63db; + --md-primary-fg-color--dark: #000; +} + +.md-grid { + /* readable page width */ + max-width: 1550px; +} + +.md-header__topic > .md-ellipsis { + overflow: visible; + text-overflow: ellipsis; + white-space: nowrap; +} + +.avatar { + height: 64px; + width: 64px; + border: 2px solid rgba(128, 128, 128, 0.308); + border-radius: 50%; +} + +.avatar:hover { + box-shadow: 0 8px 16px 0 rgba(0, 0, 0, 0.4); + transition: 0.4s; + transform:translateY(-10px); +} diff --git a/docs/zh/api/arch.md b/docs/zh/api/arch.md index e69bf9e752..c474ab9bcd 100644 --- a/docs/zh/api/arch.md +++ b/docs/zh/api/arch.md @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Arch(网络模型) 模块 ::: ppsci.arch @@ -43,3 +44,46 @@ - IFMMLP show_root_heading: true heading_level: 3 +======= +# Arch(网络模型) 模块 + +::: ppsci.arch + handler: python + options: + members: + - AFNONet + - AMGNet + - Arch + - AutoEncoder + - ChipDeepONets + - CrystalGraphConvNet + - CuboidTransformer + - CVit1D + - CylinderEmbedding + - DeepONet + - DeepPhyLSTM + - DGMR + - Discriminator + - ExtFormerMoECuboid + - FNO1d + - Generator + - HEDeepONets + - LorenzEmbedding + - MLP + - ModelList + - ModifiedMLP + - NowcastNet + - SFNONet + - SPINN + - TFNO1dNet + - TFNO2dNet + - TFNO3dNet + - Transformer + - UNetEx + - UNONet + - USCNN + - LNO + - TGCN + show_root_heading: true + heading_level: 3 +>>>>>>> Stashed changes diff --git a/docs/zh/examples/allen_cahn.md b/docs/zh/examples/allen_cahn.md index daeb803d13..b9ac1093f6 100644 --- a/docs/zh/examples/allen_cahn.md +++ b/docs/zh/examples/allen_cahn.md @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Allen-Cahn AI Studio快速体验 @@ -248,3 +249,247 @@ examples/allen_cahn/allen_cahn_piratenet.py - [PIRATENETS: PHYSICS-INFORMED DEEP LEARNING WITHRESIDUAL ADAPTIVE NETWORKS](https://arxiv.org/pdf/2402.00326.pdf) - [Allen-Cahn equation](https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/examples/allen_cahn/README.md) - [Gradient Alignment in Physics-informed Neural Networks: A Second-Order Optimization Perspective](https://arxiv.org/abs/2502.00604) +======= +# Allen-Cahn + +AI Studio快速体验 + +=== "模型训练命令" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat -P ./dataset/ + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat --create-dirs -o ./dataset/allen_cahn.mat + python allen_cahn_piratenet.py + ``` + +=== "模型评估命令" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat -P ./dataset/ + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat --create-dirs -o ./dataset/allen_cahn.mat + python allen_cahn_piratenet.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_piratenet_pretrained.pdparams + ``` + +=== "模型导出命令" + + ``` sh + python allen_cahn_piratenet.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat -P ./dataset/ + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AllenCahn/allen_cahn.mat --create-dirs -o ./dataset/allen_cahn.mat + python allen_cahn_piratenet.py mode=infer + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [allen_cahn_piratenet_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_piratenet_pretrained.pdparams) | L2Rel.u: 1.2e-05 | + +## 1. 背景简介 + +Allen-Cahn 方程(有时也叫作模型方程或相场方程)是一种数学模型,通常用于描述两种不同相之间的界面演化。这个方程最早由Samuel Allen和John Cahn在1970年代提出,用以描述合金中相分离的过程。Allen-Cahn 方程是一种非线性偏微分方程,其一般形式可以写为: + +$$ \frac{\partial u}{\partial t} = \varepsilon^2 \Delta u - F'(u) $$ + +这里: + +- $u(\mathbf{x},t)$ 是一个场变量,代表某个物理量,例如合金的组分浓度或者晶体中的有序参数。 +- $t$ 表示时间。 +- $\mathbf{x}$ 表示空间位置。 +- $\Delta$ 是Laplace算子,对应于空间变量的二阶偏导数(即 $\Delta u = \nabla^2 u$ ),用来描述空间扩散过程。 +- $\varepsilon$ 是一个正的小参数,它与相界面的宽度相关。 +- $F(u)$ 是一个双稳态势能函数,通常取为$F(u) = \frac{1}{4}(u^2-1)^2$,这使得 $F'(u) = u^3 - u$ 是其导数,这代表了非线性的反应项,负责驱动系统向稳定状态演化。 + +这个方程中的 $F'(u)$ 项使得在 $u=1$ 和 $u=-1$ 附近有两个稳定的平衡态,这对应于不同的物理相。而 $\varepsilon^2 \Delta u$ 项则描述了相界面的曲率引起的扩散效应,这导致界面趋向于减小曲率。因此,Allen-Cahn 方程描述了由于相界面曲率和势能影响而发生的相变。 + +在实际应用中,该方程还可能包含边界条件和初始条件,以便对特定问题进行数值模拟和分析。例如,在特定的物理问题中,可能会有 Neumann 边界条件(导数为零,表示无通量穿过边界)或 Dirichlet 边界条件(固定的边界值)。 + +本案例解决以下 Allen-Cahn 方程: + +$$ +\begin{aligned} + & u_t - 0.0001 u_{xx} + 5 u^3 - 5 u = 0,\quad t \in [0, 1],\ x\in[-1, 1],\\ + &u(x,0) = x^2 \cos(\pi x),\\ + &u(t, -1) = u(t, 1),\\ + &u_x(t, -1) = u_x(t, 1). +\end{aligned} +$$ + +## 2. 问题定义 + +根据上述方程,可知计算域为$[0, 1]\times [-1, 1]$,含有一个初始条件: $u(x,0) = x^2 \cos(\pi x)$,两个周期边界条件:$u(t, -1) = u(t, 1)$、$u_x(t, -1) = u_x(t, 1)$。 + +## 3. 问题求解 + +接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 +为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 模型构建 + +在 Allen-Cahn 问题中,每一个已知的坐标点 $(t, x)$ 都有对应的待求解的未知量 $(u)$, +,在这里使用 PirateNet 来表示 $(t, x)$ 到 $(u)$ 的映射函数 $f: \mathbb{R}^2 \to \mathbb{R}^1$ ,即: + +$$ +u = f(t, x) +$$ + +上式中 $f$ 即为 PirateNet 模型本身,用 PaddleScience 代码表示如下 + +``` py linenums="63" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:63:64 +--8<-- +``` + +为了在计算时,准确快速地访问具体变量的值,在这里指定网络模型的输入变量名是 `("t", "x")`,输出变量名是 `("u")`,这些命名与后续代码保持一致。 + +接着通过指定 PirateNet 的层数、神经元个数,就实例化出了一个拥有 3 个 PiraBlock,每个 PiraBlock 的隐层神经元个数为 256 的神经网络模型 `model`, 并且使用 `tanh` 作为激活函数。 + +``` yaml linenums="34" +--8<-- +examples/allen_cahn/conf/allen_cahn_piratenet.yaml:34:40 +--8<-- +``` + +### 3.2 方程构建 + +Allen-Cahn 微分方程可以用如下代码表示: + +``` py linenums="66" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:66:67 +--8<-- +``` + +### 3.3 计算域构建 + +本问题的计算域为 $[0, 1]\times [-1, 1]$,其中用于训练的数据已提前生成,保存在 `./dataset/allen_cahn.mat` 中,读取并生成计算域内的离散点。 + +``` py linenums="69" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:69:81 +--8<-- +``` + +### 3.4 约束构建 + +#### 3.4.1 内部点约束 + +以作用在内部点上的 `SupervisedConstraint` 为例,代码如下: + +``` py linenums="94" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:94:110 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是用于训练的数据配置,由于我们使用实时随机生成的数据,而不是固定数据点,因此填入自定义的输入数据/标签生成函数; + +第二个参数是方程表达式,因此传入 Allen-Cahn 的方程对象; + +第三个参数是损失函数,此处选用 `CausalMSELoss` 函数,其会根据 `causal` 和 `tol` 参数,对不同的时间窗口进行重新加权, 能更好地优化瞬态问题; + +第四个参数是约束条件的名字,需要给每一个约束条件命名,方便后续对其索引。此处命名为 "PDE" 即可。 + +#### 3.4.2 周期边界约束 + +此处我们采用 hard-constraint 的方式,在神经网络模型中,对输入数据使用cos、sin等周期函数进行周期化,从而让$u_{\theta}$在数学上直接满足方程的周期性质。 +根据方程可得函数$u(t, x)$在$x$轴上的周期为 2,因此将该周期设置到模型配置里即可。 + +``` yaml linenums="41" +--8<-- +examples/allen_cahn/conf/allen_cahn_piratenet.yaml:41:42 +--8<-- +``` + +#### 3.4.3 初值约束 + +第三个约束条件是初值约束,代码如下: + +``` py linenums="112" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:112:125 +--8<-- +``` + +在微分方程约束、初值约束构建完毕之后,以刚才的命名为关键字,封装到一个字典中,方便后续访问。 + +``` py linenums="126" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:126:130 +--8<-- +``` + +### 3.5 超参数设定 + +接下来需要指定训练轮数和学习率,此处按实验经验,使用 300 轮训练轮数,0.001 的初始学习率。 + +``` yaml linenums="50" +--8<-- +examples/allen_cahn/conf/allen_cahn_piratenet.yaml:50:63 +--8<-- +``` + +### 3.6 优化器构建 + +训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器,并配合使用机器学习中常用的 ExponentialDecay 学习率调整策略。 + +``` py linenums="132" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:132:136 +--8<-- +``` + +### 3.7 评估器构建 + +在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器。 + +``` py linenums="138" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:138:156 +--8<-- +``` + +### 3.8 模型训练、评估与可视化 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。 + +``` py linenums="158" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py:158:184 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="allen_cahn_piratenet.py" +--8<-- +examples/allen_cahn/allen_cahn_piratenet.py +--8<-- +``` + +## 5. 结果展示 + +在计算域上均匀采样出 $201\times501$ 个点,其预测结果和解析解如下图所示。 + +
+ ![allen_cahn_piratenet.jpg](https://paddle-org.bj.bcebos.com/paddlescience/docs/AllenCahn/allen_cahn_piratenet_ac.png){ loading=lazy } +
左侧为 PaddleScience 预测结果,中间为解析解结果,右侧为两者的差值
+
+ +可以看到对于函数$u(t, x)$,模型的预测结果和解析解的结果基本一致。 + +## 6. 参考资料 + +- [PIRATENETS: PHYSICS-INFORMED DEEP LEARNING WITHRESIDUAL ADAPTIVE NETWORKS](https://arxiv.org/pdf/2402.00326.pdf) +- [Allen-Cahn equation](https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/examples/allen_cahn/README.md) +>>>>>>> Stashed changes diff --git a/docs/zh/examples/amgnet.md b/docs/zh/examples/amgnet.md index 75d6fa0125..cafc3327b3 100644 --- a/docs/zh/examples/amgnet.md +++ b/docs/zh/examples/amgnet.md @@ -1,343 +1,343 @@ -# AMGNet - - - -!!! info "注意事项" - - 本案例运行前需通过 `pip install -r requirements.txt` 命令,安装 [**P**addle **G**raph **L**earning](https://github.com/PaddlePaddle/PGL) 图学习工具和 [PyAMG](https://github.com/pyamg/pyamg) 代数多重网格工具。 - -=== "模型训练命令" - - === "amgnet_airfoil" - - ``` sh - # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip - # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip - # unzip it - unzip data.zip - python amgnet_airfoil.py - ``` - === "amgnet_cylinder" - - ``` sh - # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip - # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip - # unzip it - unzip data.zip - python amgnet_cylinder.py - ``` - -=== "模型评估命令" - - === "amgnet_airfoil" - - ``` sh - # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip - # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip - # unzip it - unzip data.zip - python amgnet_airfoil.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_airfoil_pretrained.pdparams - ``` - === "amgnet_cylinder" - - ``` sh - # linux - wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip - # windows - # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip - # unzip it - unzip data.zip - python amgnet_cylinder.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_cylinder_pretrained.pdparams - ``` - -| 预训练模型 | 指标 | -|:--| :--| -| [amgnet_airfoil_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_airfoil_pretrained.pdparams) | loss(RMSE_validator): 0.0001
RMSE.RMSE(RMSE_validator): 0.01315 | -| [amgnet_cylinder_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_cylinder_pretrained.pdparams) | loss(RMSE_validator): 0.00048
RMSE.RMSE(RMSE_validator): 0.02197 | - -## 1. 背景简介 - -近年来,深度学习在计算机视觉和自然语言处理方面的成功应用,促使人们探索人工智能在科学计算领域的应用,尤其是在计算流体力学(CFD)领域的应用。 - -流体是非常复杂的物理系统,流体的行为由 Navier-Stokes 方程控制。基于网格的有限体积或有限元模拟方法是 CFD 中广泛使用的数值方法。计算流体动力学研究的物理问题往往非常复杂,通常需要大量的计算资源才能求出问题的解,因此需要在求解精度和计算成本之间进行权衡。为了进行数值模拟,计算域通常被网格离散化,由于网格具有良好的几何和物理问题表示能力,同时和图结构相契合,所以这篇文章的作者使用图神经网络,通过训练 CFD 仿真数据,构建了一种数据驱动模型来进行流场预测。 - -## 2. 问题定义 - -作者提出了一种基于图神经网络的 CFD 计算模型,称为 AMGNET(A Multi-scale Graph neural Network),该模型可以预测在不同物理参数下的流场。该方法有以下几个特点: - -- AMGNET 把 CFD 中的网格转化为图结构,通过图神经网络进行信息的处理和聚合,相比于传统的 GCN 方法,该方法的预测误差明显更低。 - -- AMGNET 可以同时计算流体在 x 和 y 方向的速度,同时还能计算流体压强。 - -- AMGNET 通过 RS 算法(Olson and Schroder, 2018)进行了图的粗化,仅使用少量节点即可进行预测,进一步提高了预测速度。 - -下图为该方法的网络结构图。该模型的基本原理就是将网格结构转化为图结构,然后通过网格中节点的物理信息、位置信息以及节点类型对图中的节点和边进行编码。接着对得到的图神经网络使用基于代数多重网格算法(RS)的粗化层进行粗化,将所有节点分类为粗节点集和细节点集,其中粗节点集是细节点集的子集。粗图的节点集合就是粗节点集,于是完成了图的粗化,缩小了图的规模。粗化完成后通过设计的图神经网络信息传递块(GN)来总结和提取图的特征。之后图恢复层采用反向操作,使用空间插值法(Qi et al.,2017)对图进行上采样。例如要对节点 $i$ 插值,则在粗图中找到距离节点 $i$ 最近的 $k$ 个节点,然后通过公式计算得到节点 $i$ 的特征。最后,通过解码器得到每个节点的速度与压力信息。 - -![AMGNet_overview](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/amgnet.png) - -## 3. 问题求解 - -接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 -为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 数据集下载 - -该案例使用的机翼数据集 Airfoil 来自 de Avila Belbute-Peres 等人,其中翼型数据集采用 NACA0012 翼型,包括 train, test 以及对应的网格数据 mesh_fine;圆柱数据集是原作者利用软件计算的 CFD 算例。 - -执行以下命令,下载并解压数据集。 - -``` sh -wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -unzip data.zip -``` - -### 3.2 模型构建 - -在本问题中,我们使用图神经网络 `AMGNet` 作为模型,其接收图结构数据,输出预测结果。 - -=== "airfoil" - - ``` py linenums="61" - --8<-- - examples/amgnet/amgnet_airfoil.py:61:62 - --8<-- - ``` - -=== "cylinder" - - ``` py linenums="61" - --8<-- - examples/amgnet/amgnet_cylinder.py:61:62 - --8<-- - ``` - -为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("input", )`,输出变量名是 `("pred", )`,这些命名与后续代码保持一致。 - -### 3.3 约束构建 - -在本案例中,我们使用监督数据集对模型进行训练,因此需要构建监督约束。 - -在定义约束之前,我们需要指定数据集的路径等相关配置,将这些信息存放到对应的 YAML 文件中,如下所示。 - -=== "airfoil" - - ``` yaml linenums="21" - --8<-- - examples/amgnet/conf/amgnet_airfoil.yaml:21:27 - --8<-- - ``` - -=== "cylinder" - - ``` yaml linenums="21" - --8<-- - examples/amgnet/conf/amgnet_cylinder.yaml:21:27 - --8<-- - ``` - -接着定义训练损失函数的计算过程,如下所示。 - -=== "airfoil" - - ``` py linenums="35" - --8<-- - examples/amgnet/amgnet_airfoil.py:35:40 - --8<-- - ``` - -=== "cylinder" - - ``` py linenums="35" - --8<-- - examples/amgnet/amgnet_cylinder.py:35:40 - --8<-- - ``` - -最后构建监督约束,如下所示。 - -=== "airfoil" - - ``` py linenums="82" - --8<-- - examples/amgnet/amgnet_airfoil.py:82:90 - --8<-- - ``` - -=== "cylinder" - - ``` py linenums="82" - --8<-- - examples/amgnet/amgnet_cylinder.py:82:90 - --8<-- - ``` - -### 3.4 超参数设定 - -设置训练轮数等参数,如下所示。 - -=== "airfoil" - - ``` yaml linenums="50" - --8<-- - examples/amgnet/conf/amgnet_airfoil.yaml:50:52 - --8<-- - ``` - -=== "cylinder" - - ``` yaml linenums="50" - --8<-- - examples/amgnet/conf/amgnet_cylinder.yaml:50:52 - --8<-- - ``` - -### 3.5 优化器构建 - -训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器,并使用固定的 `5e-4` 作为学习率。 - -=== "airfoil" - - ``` py linenums="92" - --8<-- - examples/amgnet/amgnet_airfoil.py:92:93 - --8<-- - ``` - -=== "cylinder" - - ``` py linenums="92" - --8<-- - examples/amgnet/amgnet_cylinder.py:92:93 - --8<-- - ``` - -### 3.6 评估器构建 - -在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器,构建过程与 [约束构建](#33) 类似,只需把数据目录改为测试集的目录,并在配置文件中设置 `EVAL.batch_size=1` 即可。 - -=== "airfoil" - - ``` py linenums="95" - --8<-- - examples/amgnet/amgnet_airfoil.py:95:118 - --8<-- - ``` -=== "cylinder" - - ``` py linenums="95" - --8<-- - examples/amgnet/amgnet_cylinder.py:95:118 - --8<-- - ``` - -评估指标为预测结果和真实结果的 RMSE 值,因此需自定义指标计算函数,如下所示。 - -=== "airfoil" - - ``` py linenums="43" - --8<-- - examples/amgnet/amgnet_airfoil.py:43:52 - --8<-- - ``` -=== "cylinder" - - ``` py linenums="43" - --8<-- - examples/amgnet/amgnet_cylinder.py:43:52 - --8<-- - ``` - -### 3.7 模型训练 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 - -=== "airfoil" - - ``` py linenums="120" - --8<-- - examples/amgnet/amgnet_airfoil.py:120:136 - --8<-- - ``` -=== "cylinder" - - ``` py linenums="120" - --8<-- - examples/amgnet/amgnet_cylinder.py:120:136 - --8<-- - ``` - -### 3.8 结果可视化 - -训练完毕之后程序会对测试集中的数据进行预测,并以图片的形式对结果进行可视化,如下所示。 - -=== "airfoil" - - ``` py linenums="138" - --8<-- - examples/amgnet/amgnet_airfoil.py:138:151 - --8<-- - ``` -=== "cylinder" - - ``` py linenums="138" - --8<-- - examples/amgnet/amgnet_cylinder.py:138:151 - --8<-- - ``` - -## 4. 完整代码 - -=== "airfoil" - - ``` py linenums="1" title="amgnet_airfoil.py" - --8<-- - examples/amgnet/amgnet_airfoil.py - --8<-- - ``` -=== "cylinder" - - ``` py linenums="1" title="amgnet_airfoil.py" - --8<-- - examples/amgnet/amgnet_cylinder.py - --8<-- - ``` - -## 5. 结果展示 - -下方展示了模型对计算域中每个点的压力$p(x,y)$、x(水平)方向流速$u(x,y)$、y(垂直)方向流速$v(x,y)$的预测结果与参考结果。 - -=== "airfoil" - -
- ![Airfoil_0_vec_x](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png0_field.png){ loading=lazy } -
左:预测 x 方向流速 p,右:实际 x 方向流速
- ![Airfoil_0_p](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png1_field.png){ loading=lazy } -
左:预测压力 p,右:实际压力 p
- ![Airfoil_0_vec_y](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png2_field.png){ loading=lazy } -
左:预测y方向流速 p,右:实际 y 方向流速
-
- -=== "cylinder" - -
- ![Cylinder_0_vec_x](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png0_field.png){ loading=lazy } -
左:预测 x 方向流速 p,右:实际 x 方向流速
- ![Cylinder_0_p](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png1_field.png){ loading=lazy } -
左:预测压力 p,右:实际压力 p
- ![Cylinder_0_vec_y](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png2_field.png){ loading=lazy } -
左:预测 y 方向流速 p,右:实际 y 方向流速
-
- -可以看到模型预测结果与真实结果基本一致。 - -## 6. 参考文献 - -- [AMGNET: multi-scale graph neural networks for flow field prediction](https://doi.org/10.1080/09540091.2022.2131737) -- [AMGNet - Github](https://github.com/baoshiaijhin/amgnet) -- [AMGNet - AIStudio](https://aistudio.baidu.com/projectdetail/5592458) +# AMGNet + + + +!!! info "注意事项" + + 本案例运行前需通过 `pip install -r requirements.txt` 命令,安装 [**P**addle **G**raph **L**earning](https://github.com/PaddlePaddle/PGL) 图学习工具和 [PyAMG](https://github.com/pyamg/pyamg) 代数多重网格工具。 + +=== "模型训练命令" + + === "amgnet_airfoil" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip + # unzip it + unzip data.zip + python amgnet_airfoil.py + ``` + === "amgnet_cylinder" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip + # unzip it + unzip data.zip + python amgnet_cylinder.py + ``` + +=== "模型评估命令" + + === "amgnet_airfoil" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip + # unzip it + unzip data.zip + python amgnet_airfoil.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_airfoil_pretrained.pdparams + ``` + === "amgnet_cylinder" + + ``` sh + # linux + wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip + # windows + # curl https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip -o data.zip + # unzip it + unzip data.zip + python amgnet_cylinder.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_cylinder_pretrained.pdparams + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [amgnet_airfoil_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_airfoil_pretrained.pdparams) | loss(RMSE_validator): 0.0001
RMSE.RMSE(RMSE_validator): 0.01315 | +| [amgnet_cylinder_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/amgnet/amgnet_cylinder_pretrained.pdparams) | loss(RMSE_validator): 0.00048
RMSE.RMSE(RMSE_validator): 0.02197 | + +## 1. 背景简介 + +近年来,深度学习在计算机视觉和自然语言处理方面的成功应用,促使人们探索人工智能在科学计算领域的应用,尤其是在计算流体力学(CFD)领域的应用。 + +流体是非常复杂的物理系统,流体的行为由 Navier-Stokes 方程控制。基于网格的有限体积或有限元模拟方法是 CFD 中广泛使用的数值方法。计算流体动力学研究的物理问题往往非常复杂,通常需要大量的计算资源才能求出问题的解,因此需要在求解精度和计算成本之间进行权衡。为了进行数值模拟,计算域通常被网格离散化,由于网格具有良好的几何和物理问题表示能力,同时和图结构相契合,所以这篇文章的作者使用图神经网络,通过训练 CFD 仿真数据,构建了一种数据驱动模型来进行流场预测。 + +## 2. 问题定义 + +作者提出了一种基于图神经网络的 CFD 计算模型,称为 AMGNET(A Multi-scale Graph neural Network),该模型可以预测在不同物理参数下的流场。该方法有以下几个特点: + +- AMGNET 把 CFD 中的网格转化为图结构,通过图神经网络进行信息的处理和聚合,相比于传统的 GCN 方法,该方法的预测误差明显更低。 + +- AMGNET 可以同时计算流体在 x 和 y 方向的速度,同时还能计算流体压强。 + +- AMGNET 通过 RS 算法(Olson and Schroder, 2018)进行了图的粗化,仅使用少量节点即可进行预测,进一步提高了预测速度。 + +下图为该方法的网络结构图。该模型的基本原理就是将网格结构转化为图结构,然后通过网格中节点的物理信息、位置信息以及节点类型对图中的节点和边进行编码。接着对得到的图神经网络使用基于代数多重网格算法(RS)的粗化层进行粗化,将所有节点分类为粗节点集和细节点集,其中粗节点集是细节点集的子集。粗图的节点集合就是粗节点集,于是完成了图的粗化,缩小了图的规模。粗化完成后通过设计的图神经网络信息传递块(GN)来总结和提取图的特征。之后图恢复层采用反向操作,使用空间插值法(Qi et al.,2017)对图进行上采样。例如要对节点 $i$ 插值,则在粗图中找到距离节点 $i$ 最近的 $k$ 个节点,然后通过公式计算得到节点 $i$ 的特征。最后,通过解码器得到每个节点的速度与压力信息。 + +![AMGNet_overview](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/amgnet.png) + +## 3. 问题求解 + +接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 +为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 数据集下载 + +该案例使用的机翼数据集 Airfoil 来自 de Avila Belbute-Peres 等人,其中翼型数据集采用 NACA0012 翼型,包括 train, test 以及对应的网格数据 mesh_fine;圆柱数据集是原作者利用软件计算的 CFD 算例。 + +执行以下命令,下载并解压数据集。 + +``` sh +wget -nc https://paddle-org.bj.bcebos.com/paddlescience/datasets/AMGNet/data.zip +unzip data.zip +``` + +### 3.2 模型构建 + +在本问题中,我们使用图神经网络 `AMGNet` 作为模型,其接收图结构数据,输出预测结果。 + +=== "airfoil" + + ``` py linenums="61" + --8<-- + examples/amgnet/amgnet_airfoil.py:61:62 + --8<-- + ``` + +=== "cylinder" + + ``` py linenums="61" + --8<-- + examples/amgnet/amgnet_cylinder.py:61:62 + --8<-- + ``` + +为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("input", )`,输出变量名是 `("pred", )`,这些命名与后续代码保持一致。 + +### 3.3 约束构建 + +在本案例中,我们使用监督数据集对模型进行训练,因此需要构建监督约束。 + +在定义约束之前,我们需要指定数据集的路径等相关配置,将这些信息存放到对应的 YAML 文件中,如下所示。 + +=== "airfoil" + + ``` yaml linenums="21" + --8<-- + examples/amgnet/conf/amgnet_airfoil.yaml:21:27 + --8<-- + ``` + +=== "cylinder" + + ``` yaml linenums="21" + --8<-- + examples/amgnet/conf/amgnet_cylinder.yaml:21:27 + --8<-- + ``` + +接着定义训练损失函数的计算过程,如下所示。 + +=== "airfoil" + + ``` py linenums="35" + --8<-- + examples/amgnet/amgnet_airfoil.py:35:40 + --8<-- + ``` + +=== "cylinder" + + ``` py linenums="35" + --8<-- + examples/amgnet/amgnet_cylinder.py:35:40 + --8<-- + ``` + +最后构建监督约束,如下所示。 + +=== "airfoil" + + ``` py linenums="82" + --8<-- + examples/amgnet/amgnet_airfoil.py:82:90 + --8<-- + ``` + +=== "cylinder" + + ``` py linenums="82" + --8<-- + examples/amgnet/amgnet_cylinder.py:82:90 + --8<-- + ``` + +### 3.4 超参数设定 + +设置训练轮数等参数,如下所示。 + +=== "airfoil" + + ``` yaml linenums="50" + --8<-- + examples/amgnet/conf/amgnet_airfoil.yaml:50:52 + --8<-- + ``` + +=== "cylinder" + + ``` yaml linenums="50" + --8<-- + examples/amgnet/conf/amgnet_cylinder.yaml:50:52 + --8<-- + ``` + +### 3.5 优化器构建 + +训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器,并使用固定的 `5e-4` 作为学习率。 + +=== "airfoil" + + ``` py linenums="92" + --8<-- + examples/amgnet/amgnet_airfoil.py:92:93 + --8<-- + ``` + +=== "cylinder" + + ``` py linenums="92" + --8<-- + examples/amgnet/amgnet_cylinder.py:92:93 + --8<-- + ``` + +### 3.6 评估器构建 + +在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器,构建过程与 [约束构建](#33) 类似,只需把数据目录改为测试集的目录,并在配置文件中设置 `EVAL.batch_size=1` 即可。 + +=== "airfoil" + + ``` py linenums="95" + --8<-- + examples/amgnet/amgnet_airfoil.py:95:118 + --8<-- + ``` +=== "cylinder" + + ``` py linenums="95" + --8<-- + examples/amgnet/amgnet_cylinder.py:95:118 + --8<-- + ``` + +评估指标为预测结果和真实结果的 RMSE 值,因此需自定义指标计算函数,如下所示。 + +=== "airfoil" + + ``` py linenums="43" + --8<-- + examples/amgnet/amgnet_airfoil.py:43:52 + --8<-- + ``` +=== "cylinder" + + ``` py linenums="43" + --8<-- + examples/amgnet/amgnet_cylinder.py:43:52 + --8<-- + ``` + +### 3.7 模型训练 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 + +=== "airfoil" + + ``` py linenums="120" + --8<-- + examples/amgnet/amgnet_airfoil.py:120:136 + --8<-- + ``` +=== "cylinder" + + ``` py linenums="120" + --8<-- + examples/amgnet/amgnet_cylinder.py:120:136 + --8<-- + ``` + +### 3.8 结果可视化 + +训练完毕之后程序会对测试集中的数据进行预测,并以图片的形式对结果进行可视化,如下所示。 + +=== "airfoil" + + ``` py linenums="138" + --8<-- + examples/amgnet/amgnet_airfoil.py:138:151 + --8<-- + ``` +=== "cylinder" + + ``` py linenums="138" + --8<-- + examples/amgnet/amgnet_cylinder.py:138:151 + --8<-- + ``` + +## 4. 完整代码 + +=== "airfoil" + + ``` py linenums="1" title="amgnet_airfoil.py" + --8<-- + examples/amgnet/amgnet_airfoil.py + --8<-- + ``` +=== "cylinder" + + ``` py linenums="1" title="amgnet_airfoil.py" + --8<-- + examples/amgnet/amgnet_cylinder.py + --8<-- + ``` + +## 5. 结果展示 + +下方展示了模型对计算域中每个点的压力$p(x,y)$、x(水平)方向流速$u(x,y)$、y(垂直)方向流速$v(x,y)$的预测结果与参考结果。 + +=== "airfoil" + +
+ ![Airfoil_0_vec_x](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png0_field.png){ loading=lazy } +
左:预测 x 方向流速 p,右:实际 x 方向流速
+ ![Airfoil_0_p](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png1_field.png){ loading=lazy } +
左:预测压力 p,右:实际压力 p
+ ![Airfoil_0_vec_y](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/airfoil_0field.png2_field.png){ loading=lazy } +
左:预测y方向流速 p,右:实际 y 方向流速
+
+ +=== "cylinder" + +
+ ![Cylinder_0_vec_x](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png0_field.png){ loading=lazy } +
左:预测 x 方向流速 p,右:实际 x 方向流速
+ ![Cylinder_0_p](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png1_field.png){ loading=lazy } +
左:预测压力 p,右:实际压力 p
+ ![Cylinder_0_vec_y](https://paddle-org.bj.bcebos.com/paddlescience/docs/AMGNet/cylinder_0field.png2_field.png){ loading=lazy } +
左:预测 y 方向流速 p,右:实际 y 方向流速
+
+ +可以看到模型预测结果与真实结果基本一致。 + +## 6. 参考文献 + +- [AMGNET: multi-scale graph neural networks for flow field prediction](https://doi.org/10.1080/09540091.2022.2131737) +- [AMGNet - Github](https://github.com/baoshiaijhin/amgnet) +- [AMGNet - AIStudio](https://aistudio.baidu.com/projectdetail/5592458) diff --git a/docs/zh/examples/catheter.md b/docs/zh/examples/catheter.md index bc6e2ee293..488e8bc6af 100644 --- a/docs/zh/examples/catheter.md +++ b/docs/zh/examples/catheter.md @@ -1,563 +1,563 @@ -# AI-aided geometric design of anti-infection catheters(人工智能辅助的抗感染导管几何设计) - -Distributed under a creative commons Attribution license 4.0 (CC BY). - -## 1. 背景简介 -### 1.1 论文信息 -| 年份 | 期刊 | 作者 | 引用数 | 论文PDF | -| -------------- | --------------- | ------------------------------------------------------------------------------------------------ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| 3 January 2024 | Science Advance | Tingtao Zhou, X Wan, DZ Huang, Zongyi Li, Z Peng, A Anandkumar, JF Brady, PW Sternberg, C Daraio | 15 | [Paper](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters.pdf), [Supplementary PDF 1](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/sciadv.adj1741_sm.pdf) | - -### 1.2 作者介绍 - -- 第一作者:加州理工学院 Tingtao Zhou
研究方向:统计物理学、流体力学、活性物质、无序材料
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter5.png) - -- 通讯作者:加州理工学院 工程与应用科学部 Chiara Daraio (Cited 21038)
教师主页:https://www.eas.caltech.edu/people/daraio
研究方向:力学 材料 非线性动力学 软物质 生物材料
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter6.png) - -- 通讯作者:加州理工学院 生物学和生物工程学部 Paul W. Sternberg (Cited 56555)
教师主页:https://www.bbe.caltech.edu/people/paul-w-sternberg
研究方向:秀丽隐杆线虫发育的系统生物学;性别与睡眠背后的神经回路;线虫功能基因组学与化学生态学;文本挖掘。
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter7.png) - -- 其他作者所属机构
加州理工学院,工程与应用科学部\化学与化学工程系\生物与生物工程系
北京大学,北京国际数学研究中心
Meta Platforms公司(前Facebook),Reality Labs部门 - -### 1.3 模型&复现代码 - -| 问题类型 | 在线运行 | 神经网络 | 预训练模型 | 指标 | -| -------------------- | -------------------------------------------------------------------------------------------------------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | -| 算子神经网络预测流场 | [人工智能辅助的抗感染导管几何设计](https://aistudio.baidu.com/projectdetail/8252779?sUid=1952564&shared=1&ts=172724369783) | 傅立叶几何神经算子 | [GeoFNO_pretrained.pdparams](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams) | loss(MAE): 0.0664 | - - -=== "模型训练命令" - - ``` sh - # linux - wget https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/data.zip - # windows - # curl https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/data.zip -o data.zip - unzip data.zip - python catheter.py - ``` - -=== "预训练模型快速评估" - - ``` sh - python catheter.py mode=eval EVAL.pretrained_model=https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams - ``` - -在狭窄管道内的流体环境中,细菌能借助流体动力学作用逆流迁移,对使用植入性导管的患者构成泌尿道感染的严重威胁。尽管已有提议采用涂层与结构化表面来抑制导管内的细菌滋生,但遗憾的是,至今尚无一种表面结构或涂层技术能从根本上解决污染难题。鉴于此,我们依据逆流游动的物理原理,创新性地提出了一种几何设计方案,并通过AI模型对细菌流入动力学进行预测与优化。相较于传统模拟方法,所采用的傅立叶神经算子人工智能技术实现了显著的速度提升。 - -在准二维微流体实验中,我们以大肠杆菌为对象,验证了该设计的抗感染机制,并在临床相关流速下,通过 3D 打印的导管原型对其有效性进行了评估。实验结果显示,我们的导管设计在抑制导管上游端细菌污染方面,实现了 1-2 个数量级的提升,有望大幅延长导管的安全留置时间,并整体降低导管相关性尿路感染的风险。 - -## 2. 问题定义 - -导管相关尿路感染(CAUTIs)(1-5)是住院患者中最常见的感染之一,每年约造成3000万美元的损失(6)。从材料/设备的角度来看,以往预防此类感染的方法包括用抗菌银nm粒子浸渍导管(7)或使用抗生素锁溶液、抗粘附或抗菌材料(8, 9)。然而,这些方法的效果均未能超越严格的护理程序,当前临床实践中预防CAUTI的重点是减少导管的留置时间来预防感染。设计一种在流体存在下能减少细菌活动性的导管,将对当前CAUTI的管理带来显著改善。 - -这样的设计需要我们了解微生物在受限条件下流体流动中的运动模式。典型的微生物轨迹在奔跑(直线推进)和翻滚(随机改变方向)之间交替,以探索环境(10-13)。流体动力学相互作用和群体感应导致更复杂的动态行为,如增强对表面的吸引力(14, 15)和集体群游运动(16-19)。在剪切流中,微观的奔跑-翻滚(RTP)运动可以导致宏观的逆流游动(20-27)。通常,被动粒子除了扩散扩散外,还会被对流至下游(28)。然而,微生物的自驱动导致其宏观传输在性质上有所不同:细菌体在穿越管道时会被流体涡度旋转,从而使其逆着流动方向游动。生物微游动体和合成主动粒子都表现出逆流运动性。对于生物微游动体,如大肠杆菌和哺乳动物精子,其前后体不对称性以及由此产生的与壁面的流体动力学相互作用,通常被用来解释其逆流游动行为(20, 21, 25, 29-31)。 - -另一方面,对于可忽略大小的点状主动粒子,逆流游动现象仍然存在(24, 27)。考虑一个点状主动粒子接近壁面的情况:其前端必须指向壁面。在壁面附近,泊肃叶流(在其最大值处)的涡度总是使粒子重新定向到上游方向(也见材料与方法部分)(27),然后它们沿着壁面逆流游动(图1,A和B)。许多其他因素,如体形不对称、鞭毛的手性以及细菌与边界之间的流体动力学相互作用,也会影响逆流游动行为。最近的实验(32)已经证明了在微流控通道中大肠杆菌的超污染现象,这凸显了其幂律运行时间分布的重要性,该分布显著增强了细菌逆流游动的倾向,使细菌能够持续逆着流动方向游动。 - -预防细菌污染的主流策略包括以下几种: - -- (i)物理屏障,如过滤器或膜(33-38); - -- (ii)抗菌剂,如抗生素(36, 37); - -- (iii)对医疗设备进行表面改性以减少细菌粘附和生物膜形成(38-44); - -- (iv)控制物理/化学环境,如高温/低温、低氧水平或使用消毒剂来抑制细菌的生长和存活(45-48); - -- (v)严格的消毒程序,如戴手套和穿隔离衣(49-51); - -- (vi)定期监测患者状况,以便及早发现并治疗细菌污染(52-54)。 - -虽然已经提出了各种表面改性或涂层来减少细菌粘附,但尚未有研究表明它们能有效防止逆流游动或导管污染(38-40)。其他被动的抗菌方法,如膜或过滤,可能难以直接应用于留置导管的患者。 - -与抗生素或其他化学方法相比,通过几何形状控制微生物分布在抗生素耐药性方面更为安全(55-58)。在其他情况下,已经使用了特定形状来限制和捕获不需要的细菌(59)。由于“干燥”的几何整流效应,不对称形状也可以影响运动细菌的分区(60, 61),并且挤出的边界形状可以局部增强泊肃叶流的涡度,增强程度与挤出曲率成正比。 - -我们致力于设计能够防止细菌逆流游动并最大程度减少污染的导管。为了优化导管的几何形状,我们将设计空间限制为在导管内壁放置三角形障碍物。我们捕捉了自驱动球体出现的最简单的逆流游动物理机制(27),并进行了流体和粒子动力学模拟,以找出几何设计原则(图1C)。我们将流体动力学和几何整流效应结合为一个随机偏微分方程(SPDE),以此模拟细菌的分布。然后,我们使用模拟数据训练了一个基于几何聚焦傅里叶神经算子(Geo-FNO)的人工智能(AI)模型(62, 63),以学习SPDE的解,并使用训练好的模型来优化导管的几何形状(图1D)。基于优化后的设计,我们制造了准二维(2D)微流控装置(图1E)和3D打印的原型导管(图1F),以评估我们的概念的有效性。实验结果表明,与我们的标准导管相比,细菌超污染抑制效果提高了多达两个数量级,这为导管相关尿路感染(CAUTI)的管理提供了一条新途径。 - -![图1. 提出的导管相关尿路感染(CAUTI)机制与抗感染设计流程示意图](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter.png) - -**图1. 提出的导管相关尿路感染(CAUTI)机制与抗感染设计流程示意图** - -- **(A)提出的CAUTI机制**:尿液从患者膀胱内通过导管向外流出时,细菌能够逆着尿流方向(即上游)游动,进而可能侵入患者体内并引发感染。 -- **(B)细菌的跑动-翻滚运动与上游游动机制**:细菌通过一种特有的跑动-翻滚运动模式,在液体环境中实现上游游动。 -- **(C)模拟探索导管形状**:利用模拟技术,探索不同导管形状对细菌上游游动的影响,以期找到能够抑制细菌上游游动的导管设计。 -- **(D)人工智能辅助优化**:采用Geo-FNO框架进行人工智能辅助优化,进一步细化导管的设计参数,提升其对细菌上游游动的抑制效果。 -- **(E)二维通道微流控实验**:在二维微流控通道中,对优化后的导管设计进行实验验证,评估其在实际流体环境中的抗感染性能。 -- **(F)三维实验验证**:使用设计的实际尺寸导管进行三维实验,进一步验证其在临床使用条件下的抗感染效果。 - -我们致力于设计能够防止细菌向上游移动并最大程度减少污染的导管。为了优化导管的几何形状,我们将设计空间限定为在导管内壁布置三角形障碍物。我们捕捉了自驱动球体所展现的最简单的上游游动物理机制(27),并进行了流体和粒子动力学模拟,以找出几何设计原则(图1C)。通过将流体力学和几何整流效应耦合为随机偏微分方程(SPDE),我们对细菌分布进行了建模。随后,我们使用模拟数据训练了一个基于几何聚焦傅里叶神经算子(Geo-FNO)(62, 63)的人工智能(AI)模型,以学习SPDE的解,并利用训练好的模型来优化导管的几何形状(图1D)。基于优化后的设计,我们制作了准二维(2D)微流控装置(图1E)和3D打印的原型导管(图1F),以评估我们的设计理念的有效性。实验结果表明,与标准导管相比,我们的设计在抑制细菌超标污染方面提高了多达两个数量级,为导管相关尿路感染(CAUTI)的管理提供了一条新途径。 - -### 2.1 探究微观机制 - -我们采用了一个简单的模型(27)来描述剪切流中细菌的动力学行为。在这个模型中,细菌被近似为可忽略大小的球体,其方向$q$由以下方程得出: -基于细菌上游游泳的物理机制,建立相应的数学模型,通常使用 ABP 模型进行表示: - -$$ -\frac{d\vec{q}}{dt} = \frac{1}{2} \vec{\omega} + \frac{2}{\tau_R} \eta(t) \times \vec{q} -$$ - -该模型考虑了细菌与导管壁之间的流体动力学相互作用,以及细菌的形状、大小和表面性质等因素。 -其中 - -- $dt(q)$ 代表细菌方向变化率 -- $ω$ 代表局部流体涡量 -- $η(t)$ 代表高斯噪声, 满足$<η(t)>=0$ 和 $<η(0)η(t)>=\delta(t)I$ -- $\vec{q}$ 代表细菌方向向量 -- $\tau_R$ 代表平均运行时间(更多细节详见补充材料) - -我们首先通过数值模拟研究了传统表面改性方法,如抗菌nm粒子涂层(36, 42)、工程化粗糙度或疏水性处理(65, 66),在抑制细菌上游游动中的作用。这些改性表面能够防止细菌过于接近壁面。为了模拟这些表面的存在,我们假设它们会导致细菌从表面脱离,并至少保持在距离表面3μm的位置,这个距离超过了典型的E.coli大肠杆菌体长(1至2μm)。虽然表面改性也可能影响细菌与壁面之间的流体力学相互作用,但在我们基于点状球体的简单通用模型中忽略了这一点。 - -我们发现,在所测试的流速范围内,表面排斥对细菌的上游游动行为几乎没有影响。通过比较光滑通道内(图2D)和表面改性通道内(图2E)持续游动细菌的模拟轨迹,我们发现它们的上游游动行为相似。 - -我们采用两个群体统计指标来量化抑制细菌上游游动的有效性: - -- (i)平均上游游动距离$x_{up}=-\int_{0}^{-\infty}\rho(x)xdx$,通过计算细菌分布函数$ρ(x)$的加权平均值得出,其中$x$为细菌位置; - -- (ii)前$1\%$上游游动最远的细菌所能到达的距离$x_{1\%}$。模拟结果显示,表面改性仅在中等流速下略微减少了$x_{up}$,但对$x_{1\%}$几乎没有影响(图2F中的蓝线和粉线)。这种表面改性效果不佳的结果与近几年一些论文的实验观察结果一致(39, 40)。 - -随后,我们通过添加物理障碍物来探索导管表面几何形状的作用。我们发现,对称和不对称的障碍物都能显著抑制细菌的上游游动(如图2F中的黑色和绿色线条所示)。我们确定了两种协同效应:首先,障碍物的斜率会在细菌从障碍物顶部出发时改变其游动方向,从而打断了它们沿着管壁表面的连续攀爬。不对称的形状会使细菌的运动偏向下游(如图2A所示),这在模拟的0流速下的轨迹(补充材料和图S1)以及低流速下上游游动统计数据的差异(图2F中的黑色和绿色线条)中均有所体现。其次,在有限的流速下,流场与光滑通道中的泊肃叶流不同(如图2B所示)。在泊肃叶流中,涡量会使细菌转向下游。而在障碍物附近,涡量会增强,导致细菌转向上游(如图2C和补充材料图S2所示), 从而加强了细菌的转向机制。结合这两种效应,我们预计在具有优化障碍物几何形状的通道中,细菌的上游游动将显著减少。 - -设计优化的参数空间由四个参数表征:障碍物基底长度$L$、高度$h$、尖端位置$s$以及障碍物间距$d$;我们用W表示通道宽度(图2G)。为了优化这个空间,我们设定了两个约束条件。首先,如果相邻障碍物过于接近,它们尖端的涡旋就会开始重叠。由于这种重叠,最大有效涡旋强度(正好在障碍物尖端;有效涡旋的数学定义见补充材料)和涡旋的有效尺寸都会减小。此外,还会形成更大的边界层和滞流区(图S2,A和B)。因此,我们将障碍物间距约束为$d > 0.5W$(图S2G)。其次,在其他参数固定的情况下,随着h的增加,障碍物尖端的有效涡旋强度也会增加(图S2,C至H),这有利于促进涡旋重定向效应。然而,当$h = W/2$时,管道显然会发生堵塞。这种随着$h$增加而堵塞加剧的趋势反映在为了保持相同的有效流速而所需压力降的持续增加上(图S2I)。为了避免堵塞,我们将高度约束为$h < 0.3W$。 - -![图2. 障碍物抑制上游游动和几何优化的物理机制](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter2.png) - -**图2. 障碍物抑制上游游动和几何优化的物理机制** - -- **(A)无流动时的几何整流效应**:描述了在没有流体流动的情况下,几何形状对细菌游动方向的影响。 - -- **(B)光滑通道中的泊肃叶流**:彩色背景显示流涡量的相对大小,颜色越深表示涡量越大。在光滑通道中,泊肃叶流产生的涡量使细菌头部向下游旋转。 - -- **(C)带有对称障碍物的通道中的流动**:在带有对称障碍物的通道中,障碍物顶部附近的流速和涡量增强,这导致更强的扭矩作用在细菌上,使其重定向至下游。 - -- **(D)和(E)不同条件下的细菌模拟轨迹**: - - - **(D)光滑通道**:在宽度为50μm的二维光滑通道中,细菌的模拟轨迹显示其持续游动状态。 - - - **(E)排斥细菌的表面改性通道**:在表面经过改性以排斥细菌的通道中,细菌的游动轨迹受到显著影响。 - -- **(F)上游游动的群体统计**: - - - 实线(左侧y轴)表示平均上游距离,反映了细菌群体在上游方向上的平均游动距离。 - - - 虚线(右侧y轴)表示群体中前1%游动者的上游距离,揭示了少数高效游动细菌的表现。 - - - 不同颜色的线条代表不同的通道条件:蓝色为光滑通道,橙色为表面改性通道,黑色为对称障碍物通道,绿色为不对称障碍物通道。 - -- **(G)AI算子神经网络模型和结果**: - - - Geo-FnO模型旨在学习导管几何形状与细菌分布之间的关系,通过一系列神经算子层实现。 - - - 模型首先将不规则的通道几何形状映射到单位段[0,1],然后在潜在空间中应用基于傅里叶的内核进行预测。 - - - 最后,将预测的细菌分布从潜在空间转换回物理空间。 - - - 右图展示了随机初始条件(黑色)和优化后的设计(粉色)的对比,以及通过流体和粒子动力学模拟验证的Geo-FnO预测结果(绿色虚线)。 - -![图3. 微流控实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter3.png) - -**图3. 微流控实验** - -- **(A)微流控实验示意图**:微流控通道的一端连接着装有成像溶液的注射器,另一端则连接着装有大肠杆菌的储液池。长箭头表示流动方向。 - -- **(B)细菌在锐角处的积聚**:由于流动停滞,细菌在通道的锐角处积聚。 - -- **(C)微流控通道的明场图像**:展示了通道的实际结构。 - -- **(D)细菌从通道壁上脱落的典型事件**: - - - 细菌(白色点)的轨迹在过去5s内以黄色线条显示。 - - - 上图展示了一种类型1的轨迹,其中细菌从障碍物尖端脱落。 - - - 下图展示了一种典型的类型2轨迹,其中细菌从通道的平滑部分脱落。 - - - 左列为实验图像,右列为模拟图像。 - -- **(E)脱落事件的统计**:提供了关于细菌脱落事件的统计数据。 - -![图4. 3D打印导管原型的实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter4.png) - -**Fig. 4. 3D打印导管原型的实验** - -- **(A)实验设置**:导管的下游端连接到大肠杆菌的储液池,上游端连接到由注射泵控制的装满培养液的注射器。1小时后,将导管切成等长段,并提取内部液体进行24小时培养。在显微镜下计数大肠杆菌菌落的数量,以反映每段导管中的细菌数量。 - -- **(B)光滑导管中的大肠杆菌超污染**:展示了在光滑导管中大肠杆菌的污染情况。 - -- **(C)设计导管与光滑导管的比较**:对比了设计导管与光滑导管在细菌污染方面的差异。插图显示了相同数据在对数尺度上的绘制。 - -### 2.2 AI辅助优化的几何设计 - -近年来,基于人工智能(AI)的模型,如神经算子,已被用于学习流体动力学和其他领域的正向模拟或观测模型的替代品。由于这些模型是可微分的,因此它们可以直接用于逆向设计,即我们可以使用梯度直接在设计空间中进行优化。这使得生成以前未研究过的设计变得更加简洁高效。我们使用了一个AI模型来优化通道形状,该形状由上述描述的四个参数和两个约束条件表征(图2G)。这种方法首先将不规则的通道几何形状映射到潜在空间(一个单位的导管片段长度$[0,1]$)中的一个函数,然后在潜在空间中应用傅里叶神经算子(FNO)模型,最后将细菌分布转换回物理空间(图2G)。然后,我们使用这个训练好的替代模型进行逆向设计优化,以确定最佳的通道形状。为了评估每种设计的有效性,我们测量了在$T=500$s时,三种流速($5、10$和$15μm/s$)下的平均⟨$x_{up}$⟩值。我们基于几何感知傅里叶神经算子的AI辅助形状设计,在加权细菌分布方面比训练数据中的给定形状提高了约$20\%$。整个设计优化过程非常快速:并行生成1000个训练实例(在50个GPU上运行10小时),每个实例需要30分钟;在1个GPU上训练模型需要20分钟;而我们训练好的AI模型在1个GPU上生成最优设计仅需15s。优化过程得出了以下最优结构参数:$d=62.26μm,h=30.0μm,s=-19.56μm,L=12.27μm$,对于通道宽度$W=100μm$。根据上文所述的机制,这种结构提供了强大的几何整流和涡旋重定向效应,以抑制细菌的逆流游动。 - -### 2.3 微流控实验 - -为了评估优化结构的有效性,我们制作了宽度$W=100μm$(壁到壁的距离)且垂直深度为20μm的准二维微流控通道,以便在显微镜下观察细菌的运动情况(图3A)。我们选取了逆流游动的细菌子集,并根据它们从壁上脱落的位置进行了分类。如果细菌从障碍物的顶部脱落,则将其轨迹标记为“类型1”(图3D,上方);如果细菌从壁的平滑部分脱落,则将其标记为“类型2”(图3D,下方)。类型1的轨迹会同时受到几何整流和增强的流体动力学旋转破坏效应的影响。而类型2的轨迹则不会受到几何整流效应的影响,仅会受到轻微的涡旋重定向效应,因为涡量的增强在障碍物尖端最为强烈。对于流速$U_0<100μm/s$的情况,70%到80%的逆流游动轨迹属于类型1(图3E)。我们还注意到,在这些实验中观察到的所有逆流游动轨迹都被重定向到了下游(图3E,红线)。在尖锐的拐角附近观察到了细菌积聚现象(图3B),这可能是由于停滞区的存在(图2C和附图S1,拐角附近的白色区域)。为了防止细菌在拐角处积聚,我们将几何形状用半径$r=h/2$的圆弧进行了圆滑处理(图3C)。 - -### 2.4 宏观尺度导管实验 - -上述展示的机制和设计原则很容易扩展到导管上。在三维管道中,细菌可以通过横截面的任何切割线穿过管道(附图S2J)。由于与上述相同的机制(图2,A、B、F至I,以及附图S1),仅在壁附近的无量纲剪切率起作用(27),因此靠近边界移动的细菌(附图S2J中的轨迹1)仍然可以逆流游动。超污染细菌的游动距离可以超过1mm(32),这与重新缩放的障碍物尺寸相当,预计在这些尺度上整流效应会持续存在(61)。数量级估计表明,也可以采用伴随方法的雷诺数下降(71)。我们注意到,几何设计不能完全消除细菌的逆流游动,特别是在接近零流速的情况下。然而,它极大地减少了超污染的数量,并可能显著延长导管的留置时间。使用我们设计的导管预计不需要改变常规临床方案或重新培训医务人员。此外,我们的解决方案不会向导管中引入化学物质,因此是安全的,并且不需要额外的维护。我们的几何设计方法预计与其他程序措施、抗菌表面改性和环境控制方法兼容。 - - -![S1. 微流控实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS1.png) - -**图S1. 主动布朗粒子的模拟轨迹示例** - -在(A)(C)具有对称障碍物的通道中和(B)(D)具有不对称障碍物的通道中的轨迹。(A)(B)无流体流动。(C)(D)有流体流动。颜色表示局部归一化涡量。 - -![S2. 主动布朗粒子的模拟轨迹示例](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS2-1.png) -![S2. 主动布朗粒子的模拟轨迹示例](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS2-2.png) - -**图S2. 几何优化约束和缩放的考虑因素** - -- (A-H)归一化涡量作为障碍物高度ℎ和障碍物间距𝑑的函数。 -- (I)沿通道一个周期内的归一化压降作为归一化障碍物高度ℎ/𝑑的函数。 -- (J)宏观圆柱形管内细菌运动的横截面视图。与轨迹2相比,轨迹2中的细菌在管中心附近经历强烈的下游流动,而采取轨迹1的另一个细菌则靠近管壁。因此,细菌1所经历的流动条件与我们考虑的微流控通道中的条件更为相似。其上游游动行为和几何抑制机制将与微流控条件相似,只是存在定量上的差异。 - -### 2.5 流体和粒子动力学模拟 - -我们使用COMSOL软件(72)模拟了具有无滑移边界条件的通道内的斯托克斯流。随后,将得到的速度和涡量场耦合到粒子动力学模拟中,而在稀释悬浮液和小粒子尺寸的极限情况下,忽略了粒子运动对流体动力学的反馈。粒子动力学由具有高斯统计特性的主动布朗粒子(ABP)模型和具有幂律(Levy)统计特性的运行-休止(RTP)模型描述。模拟是使用我们内部开发的GPU Julia代码进行的,模拟时间步长为10^-4s。在ABP模型中,单个粒子的动力学是根据过阻尼的朗之万方程进行积分的。 - -$$0=-\zeta(U-u)+\zeta U_0q(t)+\sqrt{2D_T\xi(t)}$$ - -$$d{q}/dt=\left[1/2{\omega}+B{q}\times(\mathbf{E}\cdot{q})+\sqrt{2/\tau_R{\eta}({t})}\right]\times{q}$$ - -其中,$ζ$是粘性阻力系数,$U$是粒子的速度,${q}$是粒子的方向向量,$u$是局部流速,$ω$是局部流场的涡量向量,$E$是流场的局部应变率张量。$B$是一个几何系数(3、74),对于无限细的杆状物体,它等于1,对于球体,它等于0。由于B的值对上游游动统计的影响不显著(27),因此我们在这里展示的结果中设$B=0$。$ξ(t)$是满足$⟨ξ(t)⟩=0和⟨ξ(0)ξ(t)⟩=δ(t)I$的高斯随机噪声。由于细菌是μm级的粒子,它们的布朗运动相对较弱,因此在模拟中我们将平移扩散系数DT设置为$0.1 μm²/s$。只要这个值保持较小,其变化对结果的影响就不大。 - -η是满足$⟨η(t)⟩=0和⟨η(0)η(t)⟩=δ(t)I$的高斯噪声,τR是平均运行时间。在RTP(Run-and-Tumble,奔跑-翻滚)模型中,单个粒子在$0 - - - - - -**视频S1-S2. 不同流动条件下细菌从壁面脱落的记录** - - - -**视频S3. 实时的优化设计** - -### 2.10 3D导管长期实验 - -使用Connex-Triplex 3D打印机打印了原型导管管(包括几何设计款和光滑款)。设计有障碍物的管内部结构与准二维结构相似,但进行了放大,并围绕通道的中心线旋转,使得障碍物成为内壁上的挤出环。考虑到可用的3D打印精度和典型导管的尺寸,这些原型的内径为1.6cm。对于设计有障碍物的管,挤出环之间的间距为1mm。为了便于清除3D打印产生的支撑材料,每根管被打印成两半,长边呈榫头形状,在去除支撑材料后组装成完整的管。 - -如图4A所示,管的上端连接到一个由机械泵控制的注射器,以保持恒定的流速。管的下端连接到一个直径为80mm的培养皿,作为E. coli(大肠杆菌)的储液池。1小时后,将管切成$2cm$长的段,并将每段内的液体转移到培养板上,同时丢弃最上游和最下游的段。在室温下培养培养板24小时后,计数每个培养板上的细菌菌落数量,以反映管相应部分的污染量。 - -为了计数菌落数量,在培养板上选择了四个圆形、等距、直径为8mm的区域(见图S5)。通过计算这四个区域内菌落的总数,并乘以整个培养板面积与这四个区域面积的比例(即25倍),来估算整个培养板上的菌落总数。当培养板上的菌落过多,变得过于拥挤或重叠以至于无法精确计数时,我们将整个培养板上的菌落总数记为30,000。 - -### 2.11 讨论 - -在本研究中,我们介绍了一种医用导管内表面的有效几何设计,旨在抑制细菌的逆流游动和过度污染。我们的设计思路是基于阻碍细菌逆流游动的物理机制,同时考虑了具有幂律动力学的球形粒子流变导向的一般模型。由于传染性微生物在形状、鞭毛特征和流体动力学相互作用方面存在差异,为简化设计和提高设计的通用性,本研究采用的简化模型忽略了细菌运动的细节,如鞭毛的螺旋性(29)和与边界的流体动力学相互作用(20)。模拟结果用于指导实验设计,而非特定预测大肠杆菌的实验结果。未来研究可采用更复杂的模型,考虑特定微生物种类的细节。 - -我们发现,由于涡旋重叠的相互作用,在障碍物尖端附近会产生有效的涡量,为此我们确定了障碍物之间间距的下限(图S2和补充材料)。障碍物高度的限制是在增强有效涡量和避免管道堵塞之间做出的权衡(图S2)。虽然我们选择使用这种人工智能框架来优化导管的几何形状,但也可以采用其他方法,如结合数值求解器的遗传算法(70)或结合伴随方法的梯度下降法(71)。 - -我们注意到,几何设计无法完全消除细菌的逆流游动,尤其是在流速接近零的情况下。然而,它能显著减少过度污染的量,并可能大幅延长导管的留置时间。使用我们设计的导管预计不需要改变常规临床方案或重新培训医务人员。此外,我们的解决方案不会在导管中引入化学物质,因此是安全的,也不需要额外的维护。我们预计,这种几何设计方法将与其他程序措施、抗菌表面改性和环境控制方法相兼容。 - -## 3. 问题求解 - -论文采用几何聚焦傅里叶神经算子(Geo-FNO)构建AI模型。该模型能够学习并解决与几何形状相关的随机偏微分方程(SPDE),从而实现对导管几何形状的优化,并通过微流体实验和3D打印技术,制作具有不同几何形状的导管原型,并测试其抑制细菌上游游泳的效果。 -接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。为了快速理解 PaddleScience,接下来仅对模型构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API 文档](../api/arch.md)。 - -### 3.1 数据集介绍 - -数据文件说明如下: - -| `./data.zip/training/` | | `./data.zip/test/` | | -| :-------------------------------: | :----------------: | :---------------------------: | :---------------: | -| 文件名 | 说明 | 文件名 | 说明 | -| training/x_1d_structured_mesh.npy | 形状为(2001, 3003) | test/x_1d_structured_mesh.npy | 形状为(2001, 300) | -| training/y_1d_structured_mesh.npy | 形状为(2001, 3003) | test/y_1d_structured_mesh.npy | 形状为(2001, 300) | -| training/data_info.npy | 形状为(7, 3003) | test/data_info.npy | 形状为(7, 300) | -| training/density_1d_data.npy | 形状为(2001, 3003) | test/density_1d_data.npy | 形状为(2001, 300) | - -在加载数据之后,需要将 x、y 进行合并,同时对于合并后的训练数据重新 `reshape` 为 `(1000, 2001, 2)` 的格式,具体代码如下 - -```py ---8<-- -examples/catheter/catheter.py:31:75 ---8<-- -``` - -### 3.2 GeoFNO 模型 - -GeoFNO 是一种基于 **几何聚焦傅里叶神经算子 (Geo-FNO** ) 的机器学习模型,它将几何形状转换到傅里叶空间,从而更好地捕捉形状的特征,并利用傅里叶变换的可逆性,可以将结果转换回物理空间。 - -在论文中,该模型能够学习并解决与几何形状相关的偏微分方程(SPDE),从而实现对导管几何形状的优化, 代码表示如下 - -```py ---8<-- -ppsci/arch/geofno.py:95:205 ---8<-- -``` - -为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("input",)`,输出变量名是 `("output",)`,这些命名与后续代码保持一致。 - -接着通过指定 FNO1d 的层数、特征通道数,神经元个数,并通过加载上文所提及的初始化权重模型,我们就实例化出了一个神经网络模型 `model`。 - -### 3.3 模型训练、评估 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 - -```python ---8<-- -examples/catheter/catheter.py:162:177 ---8<-- -``` - -## 4. 结果展示 - -=== "训练、推理loss" - -下方展示了训练后模型对测试数据的第一次预测结果以及最后一次预测结果。 - -=== "第一次预测结果" - - ![1725427977357](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter10.png) - -=== "最后一次预测结果" - - ![1725428017615](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter9.png) - -=== "训练测试损失" - - ![1725894134717](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter8.png) - -可以看到模型预测结果与真实结果基本一致,优化后的导管具有特定的几何形状,如障碍物分布和间距等,这些形状特征能够显著影响流体动力学相互作用,从而抑制细菌的上游游泳行为。 - -## 6. 参考 - -参考代码: /zongyi-li/Geo-FNO-catheter - -参考文献列表 - -1. J. W. Warren, the catheter and urinary tract infection. Med. Clin. North Am. 75, 481–493 -(1991). -2. l. e. nicolle, catheter- related urinary tract infection. Drugs Aging 22, 627–639 (2005). -3. e. K. Shuman, c. e. chenoweth, Urinary catheter- associated infections. Infect. Dis. Clin. -North Am. 32, 885–897 (2018). -4. n. Buetti, A. tabah, J. F. timsit, W. Zingg, What is new in catheter use and catheter -infection prevention in the icU. Curr. Opin. Crit. Care 26, 459–465 (2020). -5. l. chuang, P. A. tambyah, catheter-associated urinary tract infection. J. Infect. Chemother. -27, 1400–1406 (2021). -6. e. Zimlichman, d. henderson, O. tamir, c. Franz, P. Song, c. K. Yamin, c. Keohane, -c. R. denham, d. W. Bates, health care–associated infections. JAMA Intern. Med. 173, -2039–2046 (2013). -7. U. Samuel, J. Guggenbichler, Prevention of catheter-related infections: the potential of a -new nano-silver impregnated catheter. Int. J. Antimicrob. Agents 23, 75–78 (2004). -8. W. Kohnen, B. Jansen, Polymer materials for the prevention of catheter-related infections. -Zentralbl. Bakteriol. 283, 175–186 (1995). -9. A. hameed, F. chinegwundoh, A. thwaini, Prevention of catheter-related urinary tract -infections. Med. Hypotheses 71, 148–152 (2010). -10. h. c. Berg, d. A. Brown, chemotaxis in Escherichia coli analysed by three-dimensional -tracking. Nature 239, 500–504 (1972). -11. h. c. Berg, the rotary motor of bacterial flagella. Annu. Rev. Biochem. 72, 19–54 (2003). -12. h. c. Berg, E. coli in Motion (Springer, 2004). -13. M. Polin, i. tuval, K. drescher, J. P. Gollub, R. e. Goldstein, chlamydomonas swims with two -“gears” in a eukaryotic version of run-and- tumble locomotion. Science 325, 487–490 -(2009). -14. A. P. Berke, l. turner, h. c. Berg, e. lauga, hydrodynamic attraction of swimming -microorganisms by surfaces. Phys. Rev. Lett. 101, 038102 (2008). -15. e. lauga, t. R. Powers, the hydrodynamics of swimming microorganisms. Rep. Prog. Phys. -72, 096601 (2009). -16. d. Kaiser, Bacterial swarming: A re-examination of cell-movement patterns. Curr. Biol. 17, -R561–R570 (2007). -17. n. verstraeten, K. Braeken, B. debkumari, M. Fauvart, J. Fransaer, J. vermant, J. Michiels, -living on a surface: Swarming and biofilm formation. Trends Microbiol. 16, 496–506 -(2008). -18. d. B. Kearns, A field guide to bacterial swarming motility. Nat. Rev. Microbiol. 8, 634–644 -(2010). -19. d. Ghosh, X. cheng, to cross or not to cross: collective swimming of Escherichia coli under -two-dimensional confinement. Phys. Rev. Res. 4, 023105 (2022). -20. J. hill, O. Kalkanci, J. l. McMurry, h. Koser, hydrodynamic surface interactions enable -Escherichia coli to seek efficient routes to swim upstream. Phys. Rev. Lett. 98, 068101 (2007). -21. t. Kaya, h. Koser, direct upstream motility in Escherichia coli. Biophys. J. 102, 1514–1523 -(2012). -22. Marcos, h. c. Fu, t. R. Powers, R. Stocker, Bacterial rheotaxis. Proc. Natl. Acad. Sci. U.S.A. -109, 4780–4785 (2012). -23. Y. Shen, A. Siryaporn, S. lecuyer, Z. Gitai, h. A. Stone, Flow directs surface-attached -bacteria to twitch upstream. Biophys. J. 103, 146–151 (2012). -24. A. Zöttl, h. Stark, nonlinear dynamics of a microswimmer in Poiseuille flow. Phys. Rev. Lett. -108, 218104 (2012). -25. c.-K. tung, F. Ardon, A. Roy, d. l. Koch, S. S. Suarez, M. Wu, emergence of upstream -swimming via a hydrodynamic transition. Phys. Rev. Lett. 114, 108102 (2015). -26. A. J. Mathijssen, t. n. Shendruk, J. M. Yeomans, A. doostmohammadi, Upstream -swimming in microbiological flows. Phys. Rev. Lett. 116, 028104 (2016). -27. Z. Peng, J. F. Brady, Upstream swimming and taylor dispersion of active Brownian -particles. Phys. Rev. Fluids 5, 073102 (2020). -28. G. i. taylor, dispersion of soluble matter in solvent flowing slowly through a tube. -Proc. R. Soc. A-Math. Phys. Eng. Sci. 219, 186–203 (1953). -29. t. Kaya, h. Koser, characterization of hydrodynamic surface interactions of Escherichia coli -cell bodies in shear flow. Phys. Rev. Lett. 103, 138103 (2009). -30. v. Kantsler, J. dunkel, M. Blayney, R. e. Goldstein, Rheotaxis facilitates upstream -navigation of mammalian sperm cells. eLife 3, e02403 (2014). -31. t. Omori, t. ishikawa, Upward swimming of a sperm cell in shear flow. Phys. Rev. E. 93, -032402 (2016). -32. n. Figueroa-Morales, A. Rivera, R. Soto, A. lindner, e. Altshuler, É. clément, E. coli -“super-contaminates” narrow ducts fostered by broad run-time distribution. Sci. Adv. 6, -eaay0155 (2020). -33. B. e. logan, t. A. hilbert, R. G. Arnold, Removal of bacteria in laboratory filters: Models and -experiments. Water Res. 27, 955–962 (1993). -34. W. dzik, Use of leukodepletion filters for the removal of bacteria. Immunol. Invest. 24, -95–115 (1995). -35. l. Fernandez Garcia, S. Alvarez Blanco, F. A. Riera Rodriguez, Microfiltration applied to -dairy streams: Removal of bacteria. J. Sci. Food Agric. 93, 187–196 (2013). -36. G. Franci, A. Falanga, S. Galdiero, l. Palomba, M. Rai, G. Morelli, M. Galdiero, Silver -nanoparticles as potential antibacterial agents. Molecules 20, 8856–8874 (2015). -37. M. i. hutchings, A. W. truman, B. Wilkinson, Antibiotics: Past, present and future. Curr. -Opin. Microbiol. 51, 72–80 (2019). -38. J. W. costerton, h. M. lappin-Scott, introduction to microbial biofilms, in Microbial Biofilms, -h. M. lappin-Scott, J. W. costerton, eds. (cambridge Univ. Press, 1995), pp. 1–11. -39. W.- h. Sheng, W.-J. Ko, J.-t. Wang, S.-c. chang, P.-R. hsueh, K.-t. luh, evaluation of -antiseptic-impregnated central venous catheters for prevention of catheter-related -infection in intensive care unit patients. Diagn. Microbiol. Infect. Dis. 38, 1–5 (2000). -40. W. M. dunne Jr., Bacterial adhesion: Seen any good biofilms lately? Clin. Microbiol. Rev. 15, -155–166 (2002). -41. R. P. Allaker, the use of nanoparticles to control oral biofilm formation. J. Dent. Res. 89, -1175–1186 (2010). -42. M. l. Knetsch, l. h. Koole, new strategies in the development of antimicrobial coatings: -the example of increasing usage of silver and silver nanoparticles. Polymers 3, 340–366 -(2011). -43. M. Birkett, l. dover, c. cherian lukose, A. Wasy Zia, M. M. tambuwala, Á. Serrano-Aroca, -Recent advances in metal-based antimicrobial coatings for high-touch surfaces. Int. J. -Mol. Sci. 23, 1162 (2022). -44. J. R. lex, R. Koucheki, n. A. Stavropoulos, J. di Michele, J. S. toor, K. tsoi, P. c. Ferguson, -R. e. turcotte, P. J. Papagelopoulos, Megaprosthesis anti-bacterial coatings: A -comprehensive translational review. Acta Biomater. 140, 136–148 (2022). -45. J. Monod, the growth of bacterial cultures. Annu. Rev. Microbiol. 3, 371–394 (1949). -46. M. hecker, W. Schumann, U. völker, heat-shock and general stress response in Bacillus -subtilis. Mol. Microbiol. 19, 417–428 (1996). -47. P. Setlow, Spores of Bacillus subtilis: their resistance to and killing by radiation, heat and -chemicals. J. Appl. Microbiol. 101, 514–525 (2006). -48. M. Falagas, P. thomaidis, i. Kotsantis, K. Sgouros, G. Samonis, d. Karageorgopoulos, -Airborne hydrogen peroxide for disinfection of the hospital environment and infection -control: A systematic review. J. Hosp. Infect. 78, 171–177 (2011). -49. W. A. Rutala, d. J. Weber, disinfection and sterilization in health care facilities: What -clinicians need to know. Clin. Infect. Dis. 39, 702–709 (2004). -50. W. A. Rutala, d. J. Weber, disinfection and sterilization: An overview. Am. J. Infect. Control -41, S2–S5 (2013). -51. n. P. tipnis, d. J. Burgess, Sterilization of implantable polymer-based medical devices: A -review. Int. J. Pharm. 544, 455–460 (2018). -52. M. Berger, R. Shiau, J. M. Weintraub, Review of syndromic surveillance: implications for -waterborne disease detection. J. Epidemiol. Community Health 60, 543–550 (2006). -53. M. v. Storey, B. van der Gaag, B. P. Burns, Advances in on-line drinking water quality -monitoring and early warning systems. Water Res. 45, 741–747 (2011). -54. S. hyllestad, e. Amato, K. nygård, l. vold, P. Aavitsland, the effectiveness of syndromic -surveillance for the early detection of waterborne outbreaks: A systematic review. -BMC Infect. Dis. 21, 696 (2021). -55. F. Baquero, J.- l. Martínez, R. cantón, Antibiotics and antibiotic resistance in water -environments. Curr. Opin. Biotechnol. 19, 260–265 (2008). -56. R. i. Aminov, the role of antibiotics and antibiotic resistance in nature. Environ. Microbiol. -11, 2970–2988 (2009). -57. J. M. Munita, c. A. Arias, Mechanisms of antibiotic resistance. Microbiol Spectr, (2016). -58. U. theuretzbacher, K. Bush, S. harbarth, M. Paul, J. h. Rex, e. tacconelli, G. e. thwaites, -critical analysis of antibacterial agents in clinical development. Nat. Rev. Microbiol. 18, -286–298 (2020). -59. R. di Giacomo, S. Krödel, B. Maresca, P. Benzoni, R. Rusconi, R. Stocker, c. daraio, -deployable micro- traps to sequester motile bacteria. Sci. Rep. 7, 45897 (2017). -60. P. Galajda, J. Keymer, P. chaikin, R. Austin, A wall of funnels concentrates swimming -bacteria. J. Bacteriol. 189, 8704–8707 (2007). -61. c. M. Kjeldbjerg, J. F. Brady, theory for the casimir effect and the partitioning of active -matter. Soft Matter 17, 523–530 (2021). -62. Z. li, n. Kovachki, K. Azizzadenesheli, B. liu, K. Bhattacharya, A. Stuart, A. Anandkumar. -Fourier neural operator for parametric partial differential equations. arXiv:2010.08895 -[cs.lG] (2020). -63. Z. li, d. Z. huang, B. liu, A. Anandkumar. Fourier neural operator with learned -deformations for pdes on general geometries. arXiv:2207.05209 [cs.lG] (2022). -64. A. J. Mathijssen, n. Figueroa-Morales, G. Junot, É. clément, A. lindner, A. Zöttl, Oscillatory -surface rheotaxis of swimming E. coli bacteria. Nat. Commun. 10, 3434 (2019). -65. S. B. Goodman, Z. Yao, M. Keeney, F. Yang, the future of biologic coatings for orthopaedic -implants. Biomaterials 34, 3174–3183 (2013). -66. A. Jaggessar, h. Shahali, A. Mathew, P. K. Yarlagadda, Bio-mimicking nano and -micro-structured surface fabrication for antibacterial properties in medical implants. -J. Nanobiotechnol. 15, 64 (2017). -67. e. Macedo, R. Malhotra, R. claure- del Granado, P. Fedullo, R. l. Mehta, defining urine -output criterion for acute kidney injury in critically ill patients. Nephrol Dial Transplant 26, -509–515 (2011). -68. K. B. chenitz, M. B. lane-Fall, decreased urine output and acute kidney injury in the -postanesthesia care unit. Anesthesiol. Clin. 30, 513–526 (2012). -69. J. A. Kellum, F. e. Sileanu, R. Murugan, n. lucko, A. d. Shaw, G. clermont, classifying AKi by -urine output versus serum creatinine level. J. Am. Soc. Nephrol. 26, 2231–2238 (2015). -70. S. Mirjalili, Genetic algorithm, in Evolutionary Algorithms and Neural Networks: Theory and -Applications (Springer, 2019), pp. 43–55. -71. S. Ruder, An overview of gradient descent optimization algorithms. arXiv:1609.04747 -[cs.lG] (2016). -72. c. Multiphysics, introduction to cOMSOl multiphysics. cOMSOl Multiphysics, Burlington, -MA, accessed 2018 Feb 9: 32 (1998). -73. G. B. Jeffery, the motion of ellipsoidal particles immersed in a viscous fluid. Proc. R. soc. Lond. -Ser. A-Contain. Pap. Math. Phys. Character 102, 161–179 (1922). -74. F. P. Bretherton, the motion of rigid particles in a shear flow at low Reynolds number. -J. Fluid Mech. 14, 284–304 (1962). -75. t. Zhou, Z. Peng, M. Gulian, J. F. Brady, distribution and pressure of active lévy swimmers -under confinement. J. Phys. A Math. Theor. 54, 275002 (2021). -76. P. i. Frazier, J. Wang, Bayesian optimization for materials design, in Information Science for -Materials Discovery and Design (Springer, 2016), pp. 45–75. -77. Y. Zhang, d. W. Apley, W. chen, Bayesian optimization for materials design with mixed -quantitative and qualitative variables. Sci. Rep. 10, 4924 (2020). -78. J. Schindelin, i. Arganda-carreras, e. Frise, v. Kaynig, M. longair, t. Pietzsch, S. Preibisch, -c. Rueden, S. Saalfeld, B. Schmid, Fiji: An open-source platform for biological-image -analysis. Nat. Methods 9, 676–682 (2012). -79. d. ershov, M.-S. Phan, J. W. Pylvänäinen, S. U. Rigaud, l. le Blanc, A. charles- Orszag, -J. R. conway, R. F. laine, n. h. Roy, d. Bonazzi, Bringing trackMate into the era of -machine-learning and deep-learning. bioRxiv 458852 [Preprint] (2021). https://doi. -org/10.1101/2021.09.03.458852. +# AI-aided geometric design of anti-infection catheters(人工智能辅助的抗感染导管几何设计) + +Distributed under a creative commons Attribution license 4.0 (CC BY). + +## 1. 背景简介 +### 1.1 论文信息 +| 年份 | 期刊 | 作者 | 引用数 | 论文PDF | +| -------------- | --------------- | ------------------------------------------------------------------------------------------------ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 3 January 2024 | Science Advance | Tingtao Zhou, X Wan, DZ Huang, Zongyi Li, Z Peng, A Anandkumar, JF Brady, PW Sternberg, C Daraio | 15 | [Paper](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters.pdf), [Supplementary PDF 1](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/sciadv.adj1741_sm.pdf) | + +### 1.2 作者介绍 + +- 第一作者:加州理工学院 Tingtao Zhou
研究方向:统计物理学、流体力学、活性物质、无序材料
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter5.png) + +- 通讯作者:加州理工学院 工程与应用科学部 Chiara Daraio (Cited 21038)
教师主页:https://www.eas.caltech.edu/people/daraio
研究方向:力学 材料 非线性动力学 软物质 生物材料
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter6.png) + +- 通讯作者:加州理工学院 生物学和生物工程学部 Paul W. Sternberg (Cited 56555)
教师主页:https://www.bbe.caltech.edu/people/paul-w-sternberg
研究方向:秀丽隐杆线虫发育的系统生物学;性别与睡眠背后的神经回路;线虫功能基因组学与化学生态学;文本挖掘。
![alt text](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter7.png) + +- 其他作者所属机构
加州理工学院,工程与应用科学部\化学与化学工程系\生物与生物工程系
北京大学,北京国际数学研究中心
Meta Platforms公司(前Facebook),Reality Labs部门 + +### 1.3 模型&复现代码 + +| 问题类型 | 在线运行 | 神经网络 | 预训练模型 | 指标 | +| -------------------- | -------------------------------------------------------------------------------------------------------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | +| 算子神经网络预测流场 | [人工智能辅助的抗感染导管几何设计](https://aistudio.baidu.com/projectdetail/8252779?sUid=1952564&shared=1&ts=172724369783) | 傅立叶几何神经算子 | [GeoFNO_pretrained.pdparams](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams) | loss(MAE): 0.0664 | + + +=== "模型训练命令" + + ``` sh + # linux + wget https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/data.zip + # windows + # curl https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/data.zip -o data.zip + unzip data.zip + python catheter.py + ``` + +=== "预训练模型快速评估" + + ``` sh + python catheter.py mode=eval EVAL.pretrained_model=https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams + ``` + +在狭窄管道内的流体环境中,细菌能借助流体动力学作用逆流迁移,对使用植入性导管的患者构成泌尿道感染的严重威胁。尽管已有提议采用涂层与结构化表面来抑制导管内的细菌滋生,但遗憾的是,至今尚无一种表面结构或涂层技术能从根本上解决污染难题。鉴于此,我们依据逆流游动的物理原理,创新性地提出了一种几何设计方案,并通过AI模型对细菌流入动力学进行预测与优化。相较于传统模拟方法,所采用的傅立叶神经算子人工智能技术实现了显著的速度提升。 + +在准二维微流体实验中,我们以大肠杆菌为对象,验证了该设计的抗感染机制,并在临床相关流速下,通过 3D 打印的导管原型对其有效性进行了评估。实验结果显示,我们的导管设计在抑制导管上游端细菌污染方面,实现了 1-2 个数量级的提升,有望大幅延长导管的安全留置时间,并整体降低导管相关性尿路感染的风险。 + +## 2. 问题定义 + +导管相关尿路感染(CAUTIs)(1-5)是住院患者中最常见的感染之一,每年约造成3000万美元的损失(6)。从材料/设备的角度来看,以往预防此类感染的方法包括用抗菌银nm粒子浸渍导管(7)或使用抗生素锁溶液、抗粘附或抗菌材料(8, 9)。然而,这些方法的效果均未能超越严格的护理程序,当前临床实践中预防CAUTI的重点是减少导管的留置时间来预防感染。设计一种在流体存在下能减少细菌活动性的导管,将对当前CAUTI的管理带来显著改善。 + +这样的设计需要我们了解微生物在受限条件下流体流动中的运动模式。典型的微生物轨迹在奔跑(直线推进)和翻滚(随机改变方向)之间交替,以探索环境(10-13)。流体动力学相互作用和群体感应导致更复杂的动态行为,如增强对表面的吸引力(14, 15)和集体群游运动(16-19)。在剪切流中,微观的奔跑-翻滚(RTP)运动可以导致宏观的逆流游动(20-27)。通常,被动粒子除了扩散扩散外,还会被对流至下游(28)。然而,微生物的自驱动导致其宏观传输在性质上有所不同:细菌体在穿越管道时会被流体涡度旋转,从而使其逆着流动方向游动。生物微游动体和合成主动粒子都表现出逆流运动性。对于生物微游动体,如大肠杆菌和哺乳动物精子,其前后体不对称性以及由此产生的与壁面的流体动力学相互作用,通常被用来解释其逆流游动行为(20, 21, 25, 29-31)。 + +另一方面,对于可忽略大小的点状主动粒子,逆流游动现象仍然存在(24, 27)。考虑一个点状主动粒子接近壁面的情况:其前端必须指向壁面。在壁面附近,泊肃叶流(在其最大值处)的涡度总是使粒子重新定向到上游方向(也见材料与方法部分)(27),然后它们沿着壁面逆流游动(图1,A和B)。许多其他因素,如体形不对称、鞭毛的手性以及细菌与边界之间的流体动力学相互作用,也会影响逆流游动行为。最近的实验(32)已经证明了在微流控通道中大肠杆菌的超污染现象,这凸显了其幂律运行时间分布的重要性,该分布显著增强了细菌逆流游动的倾向,使细菌能够持续逆着流动方向游动。 + +预防细菌污染的主流策略包括以下几种: + +- (i)物理屏障,如过滤器或膜(33-38); + +- (ii)抗菌剂,如抗生素(36, 37); + +- (iii)对医疗设备进行表面改性以减少细菌粘附和生物膜形成(38-44); + +- (iv)控制物理/化学环境,如高温/低温、低氧水平或使用消毒剂来抑制细菌的生长和存活(45-48); + +- (v)严格的消毒程序,如戴手套和穿隔离衣(49-51); + +- (vi)定期监测患者状况,以便及早发现并治疗细菌污染(52-54)。 + +虽然已经提出了各种表面改性或涂层来减少细菌粘附,但尚未有研究表明它们能有效防止逆流游动或导管污染(38-40)。其他被动的抗菌方法,如膜或过滤,可能难以直接应用于留置导管的患者。 + +与抗生素或其他化学方法相比,通过几何形状控制微生物分布在抗生素耐药性方面更为安全(55-58)。在其他情况下,已经使用了特定形状来限制和捕获不需要的细菌(59)。由于“干燥”的几何整流效应,不对称形状也可以影响运动细菌的分区(60, 61),并且挤出的边界形状可以局部增强泊肃叶流的涡度,增强程度与挤出曲率成正比。 + +我们致力于设计能够防止细菌逆流游动并最大程度减少污染的导管。为了优化导管的几何形状,我们将设计空间限制为在导管内壁放置三角形障碍物。我们捕捉了自驱动球体出现的最简单的逆流游动物理机制(27),并进行了流体和粒子动力学模拟,以找出几何设计原则(图1C)。我们将流体动力学和几何整流效应结合为一个随机偏微分方程(SPDE),以此模拟细菌的分布。然后,我们使用模拟数据训练了一个基于几何聚焦傅里叶神经算子(Geo-FNO)的人工智能(AI)模型(62, 63),以学习SPDE的解,并使用训练好的模型来优化导管的几何形状(图1D)。基于优化后的设计,我们制造了准二维(2D)微流控装置(图1E)和3D打印的原型导管(图1F),以评估我们的概念的有效性。实验结果表明,与我们的标准导管相比,细菌超污染抑制效果提高了多达两个数量级,这为导管相关尿路感染(CAUTI)的管理提供了一条新途径。 + +![图1. 提出的导管相关尿路感染(CAUTI)机制与抗感染设计流程示意图](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter.png) + +**图1. 提出的导管相关尿路感染(CAUTI)机制与抗感染设计流程示意图** + +- **(A)提出的CAUTI机制**:尿液从患者膀胱内通过导管向外流出时,细菌能够逆着尿流方向(即上游)游动,进而可能侵入患者体内并引发感染。 +- **(B)细菌的跑动-翻滚运动与上游游动机制**:细菌通过一种特有的跑动-翻滚运动模式,在液体环境中实现上游游动。 +- **(C)模拟探索导管形状**:利用模拟技术,探索不同导管形状对细菌上游游动的影响,以期找到能够抑制细菌上游游动的导管设计。 +- **(D)人工智能辅助优化**:采用Geo-FNO框架进行人工智能辅助优化,进一步细化导管的设计参数,提升其对细菌上游游动的抑制效果。 +- **(E)二维通道微流控实验**:在二维微流控通道中,对优化后的导管设计进行实验验证,评估其在实际流体环境中的抗感染性能。 +- **(F)三维实验验证**:使用设计的实际尺寸导管进行三维实验,进一步验证其在临床使用条件下的抗感染效果。 + +我们致力于设计能够防止细菌向上游移动并最大程度减少污染的导管。为了优化导管的几何形状,我们将设计空间限定为在导管内壁布置三角形障碍物。我们捕捉了自驱动球体所展现的最简单的上游游动物理机制(27),并进行了流体和粒子动力学模拟,以找出几何设计原则(图1C)。通过将流体力学和几何整流效应耦合为随机偏微分方程(SPDE),我们对细菌分布进行了建模。随后,我们使用模拟数据训练了一个基于几何聚焦傅里叶神经算子(Geo-FNO)(62, 63)的人工智能(AI)模型,以学习SPDE的解,并利用训练好的模型来优化导管的几何形状(图1D)。基于优化后的设计,我们制作了准二维(2D)微流控装置(图1E)和3D打印的原型导管(图1F),以评估我们的设计理念的有效性。实验结果表明,与标准导管相比,我们的设计在抑制细菌超标污染方面提高了多达两个数量级,为导管相关尿路感染(CAUTI)的管理提供了一条新途径。 + +### 2.1 探究微观机制 + +我们采用了一个简单的模型(27)来描述剪切流中细菌的动力学行为。在这个模型中,细菌被近似为可忽略大小的球体,其方向$q$由以下方程得出: +基于细菌上游游泳的物理机制,建立相应的数学模型,通常使用 ABP 模型进行表示: + +$$ +\frac{d\vec{q}}{dt} = \frac{1}{2} \vec{\omega} + \frac{2}{\tau_R} \eta(t) \times \vec{q} +$$ + +该模型考虑了细菌与导管壁之间的流体动力学相互作用,以及细菌的形状、大小和表面性质等因素。 +其中 + +- $dt(q)$ 代表细菌方向变化率 +- $ω$ 代表局部流体涡量 +- $η(t)$ 代表高斯噪声, 满足$<η(t)>=0$ 和 $<η(0)η(t)>=\delta(t)I$ +- $\vec{q}$ 代表细菌方向向量 +- $\tau_R$ 代表平均运行时间(更多细节详见补充材料) + +我们首先通过数值模拟研究了传统表面改性方法,如抗菌nm粒子涂层(36, 42)、工程化粗糙度或疏水性处理(65, 66),在抑制细菌上游游动中的作用。这些改性表面能够防止细菌过于接近壁面。为了模拟这些表面的存在,我们假设它们会导致细菌从表面脱离,并至少保持在距离表面3μm的位置,这个距离超过了典型的E.coli大肠杆菌体长(1至2μm)。虽然表面改性也可能影响细菌与壁面之间的流体力学相互作用,但在我们基于点状球体的简单通用模型中忽略了这一点。 + +我们发现,在所测试的流速范围内,表面排斥对细菌的上游游动行为几乎没有影响。通过比较光滑通道内(图2D)和表面改性通道内(图2E)持续游动细菌的模拟轨迹,我们发现它们的上游游动行为相似。 + +我们采用两个群体统计指标来量化抑制细菌上游游动的有效性: + +- (i)平均上游游动距离$x_{up}=-\int_{0}^{-\infty}\rho(x)xdx$,通过计算细菌分布函数$ρ(x)$的加权平均值得出,其中$x$为细菌位置; + +- (ii)前$1\%$上游游动最远的细菌所能到达的距离$x_{1\%}$。模拟结果显示,表面改性仅在中等流速下略微减少了$x_{up}$,但对$x_{1\%}$几乎没有影响(图2F中的蓝线和粉线)。这种表面改性效果不佳的结果与近几年一些论文的实验观察结果一致(39, 40)。 + +随后,我们通过添加物理障碍物来探索导管表面几何形状的作用。我们发现,对称和不对称的障碍物都能显著抑制细菌的上游游动(如图2F中的黑色和绿色线条所示)。我们确定了两种协同效应:首先,障碍物的斜率会在细菌从障碍物顶部出发时改变其游动方向,从而打断了它们沿着管壁表面的连续攀爬。不对称的形状会使细菌的运动偏向下游(如图2A所示),这在模拟的0流速下的轨迹(补充材料和图S1)以及低流速下上游游动统计数据的差异(图2F中的黑色和绿色线条)中均有所体现。其次,在有限的流速下,流场与光滑通道中的泊肃叶流不同(如图2B所示)。在泊肃叶流中,涡量会使细菌转向下游。而在障碍物附近,涡量会增强,导致细菌转向上游(如图2C和补充材料图S2所示), 从而加强了细菌的转向机制。结合这两种效应,我们预计在具有优化障碍物几何形状的通道中,细菌的上游游动将显著减少。 + +设计优化的参数空间由四个参数表征:障碍物基底长度$L$、高度$h$、尖端位置$s$以及障碍物间距$d$;我们用W表示通道宽度(图2G)。为了优化这个空间,我们设定了两个约束条件。首先,如果相邻障碍物过于接近,它们尖端的涡旋就会开始重叠。由于这种重叠,最大有效涡旋强度(正好在障碍物尖端;有效涡旋的数学定义见补充材料)和涡旋的有效尺寸都会减小。此外,还会形成更大的边界层和滞流区(图S2,A和B)。因此,我们将障碍物间距约束为$d > 0.5W$(图S2G)。其次,在其他参数固定的情况下,随着h的增加,障碍物尖端的有效涡旋强度也会增加(图S2,C至H),这有利于促进涡旋重定向效应。然而,当$h = W/2$时,管道显然会发生堵塞。这种随着$h$增加而堵塞加剧的趋势反映在为了保持相同的有效流速而所需压力降的持续增加上(图S2I)。为了避免堵塞,我们将高度约束为$h < 0.3W$。 + +![图2. 障碍物抑制上游游动和几何优化的物理机制](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter2.png) + +**图2. 障碍物抑制上游游动和几何优化的物理机制** + +- **(A)无流动时的几何整流效应**:描述了在没有流体流动的情况下,几何形状对细菌游动方向的影响。 + +- **(B)光滑通道中的泊肃叶流**:彩色背景显示流涡量的相对大小,颜色越深表示涡量越大。在光滑通道中,泊肃叶流产生的涡量使细菌头部向下游旋转。 + +- **(C)带有对称障碍物的通道中的流动**:在带有对称障碍物的通道中,障碍物顶部附近的流速和涡量增强,这导致更强的扭矩作用在细菌上,使其重定向至下游。 + +- **(D)和(E)不同条件下的细菌模拟轨迹**: + - - **(D)光滑通道**:在宽度为50μm的二维光滑通道中,细菌的模拟轨迹显示其持续游动状态。 + - - **(E)排斥细菌的表面改性通道**:在表面经过改性以排斥细菌的通道中,细菌的游动轨迹受到显著影响。 + +- **(F)上游游动的群体统计**: + - - 实线(左侧y轴)表示平均上游距离,反映了细菌群体在上游方向上的平均游动距离。 + - - 虚线(右侧y轴)表示群体中前1%游动者的上游距离,揭示了少数高效游动细菌的表现。 + - - 不同颜色的线条代表不同的通道条件:蓝色为光滑通道,橙色为表面改性通道,黑色为对称障碍物通道,绿色为不对称障碍物通道。 + +- **(G)AI算子神经网络模型和结果**: + - - Geo-FnO模型旨在学习导管几何形状与细菌分布之间的关系,通过一系列神经算子层实现。 + - - 模型首先将不规则的通道几何形状映射到单位段[0,1],然后在潜在空间中应用基于傅里叶的内核进行预测。 + - - 最后,将预测的细菌分布从潜在空间转换回物理空间。 + - - 右图展示了随机初始条件(黑色)和优化后的设计(粉色)的对比,以及通过流体和粒子动力学模拟验证的Geo-FnO预测结果(绿色虚线)。 + +![图3. 微流控实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter3.png) + +**图3. 微流控实验** + +- **(A)微流控实验示意图**:微流控通道的一端连接着装有成像溶液的注射器,另一端则连接着装有大肠杆菌的储液池。长箭头表示流动方向。 + +- **(B)细菌在锐角处的积聚**:由于流动停滞,细菌在通道的锐角处积聚。 + +- **(C)微流控通道的明场图像**:展示了通道的实际结构。 + +- **(D)细菌从通道壁上脱落的典型事件**: + - - 细菌(白色点)的轨迹在过去5s内以黄色线条显示。 + - - 上图展示了一种类型1的轨迹,其中细菌从障碍物尖端脱落。 + - - 下图展示了一种典型的类型2轨迹,其中细菌从通道的平滑部分脱落。 + - - 左列为实验图像,右列为模拟图像。 + +- **(E)脱落事件的统计**:提供了关于细菌脱落事件的统计数据。 + +![图4. 3D打印导管原型的实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter4.png) + +**Fig. 4. 3D打印导管原型的实验** + +- **(A)实验设置**:导管的下游端连接到大肠杆菌的储液池,上游端连接到由注射泵控制的装满培养液的注射器。1小时后,将导管切成等长段,并提取内部液体进行24小时培养。在显微镜下计数大肠杆菌菌落的数量,以反映每段导管中的细菌数量。 + +- **(B)光滑导管中的大肠杆菌超污染**:展示了在光滑导管中大肠杆菌的污染情况。 + +- **(C)设计导管与光滑导管的比较**:对比了设计导管与光滑导管在细菌污染方面的差异。插图显示了相同数据在对数尺度上的绘制。 + +### 2.2 AI辅助优化的几何设计 + +近年来,基于人工智能(AI)的模型,如神经算子,已被用于学习流体动力学和其他领域的正向模拟或观测模型的替代品。由于这些模型是可微分的,因此它们可以直接用于逆向设计,即我们可以使用梯度直接在设计空间中进行优化。这使得生成以前未研究过的设计变得更加简洁高效。我们使用了一个AI模型来优化通道形状,该形状由上述描述的四个参数和两个约束条件表征(图2G)。这种方法首先将不规则的通道几何形状映射到潜在空间(一个单位的导管片段长度$[0,1]$)中的一个函数,然后在潜在空间中应用傅里叶神经算子(FNO)模型,最后将细菌分布转换回物理空间(图2G)。然后,我们使用这个训练好的替代模型进行逆向设计优化,以确定最佳的通道形状。为了评估每种设计的有效性,我们测量了在$T=500$s时,三种流速($5、10$和$15μm/s$)下的平均⟨$x_{up}$⟩值。我们基于几何感知傅里叶神经算子的AI辅助形状设计,在加权细菌分布方面比训练数据中的给定形状提高了约$20\%$。整个设计优化过程非常快速:并行生成1000个训练实例(在50个GPU上运行10小时),每个实例需要30分钟;在1个GPU上训练模型需要20分钟;而我们训练好的AI模型在1个GPU上生成最优设计仅需15s。优化过程得出了以下最优结构参数:$d=62.26μm,h=30.0μm,s=-19.56μm,L=12.27μm$,对于通道宽度$W=100μm$。根据上文所述的机制,这种结构提供了强大的几何整流和涡旋重定向效应,以抑制细菌的逆流游动。 + +### 2.3 微流控实验 + +为了评估优化结构的有效性,我们制作了宽度$W=100μm$(壁到壁的距离)且垂直深度为20μm的准二维微流控通道,以便在显微镜下观察细菌的运动情况(图3A)。我们选取了逆流游动的细菌子集,并根据它们从壁上脱落的位置进行了分类。如果细菌从障碍物的顶部脱落,则将其轨迹标记为“类型1”(图3D,上方);如果细菌从壁的平滑部分脱落,则将其标记为“类型2”(图3D,下方)。类型1的轨迹会同时受到几何整流和增强的流体动力学旋转破坏效应的影响。而类型2的轨迹则不会受到几何整流效应的影响,仅会受到轻微的涡旋重定向效应,因为涡量的增强在障碍物尖端最为强烈。对于流速$U_0<100μm/s$的情况,70%到80%的逆流游动轨迹属于类型1(图3E)。我们还注意到,在这些实验中观察到的所有逆流游动轨迹都被重定向到了下游(图3E,红线)。在尖锐的拐角附近观察到了细菌积聚现象(图3B),这可能是由于停滞区的存在(图2C和附图S1,拐角附近的白色区域)。为了防止细菌在拐角处积聚,我们将几何形状用半径$r=h/2$的圆弧进行了圆滑处理(图3C)。 + +### 2.4 宏观尺度导管实验 + +上述展示的机制和设计原则很容易扩展到导管上。在三维管道中,细菌可以通过横截面的任何切割线穿过管道(附图S2J)。由于与上述相同的机制(图2,A、B、F至I,以及附图S1),仅在壁附近的无量纲剪切率起作用(27),因此靠近边界移动的细菌(附图S2J中的轨迹1)仍然可以逆流游动。超污染细菌的游动距离可以超过1mm(32),这与重新缩放的障碍物尺寸相当,预计在这些尺度上整流效应会持续存在(61)。数量级估计表明,也可以采用伴随方法的雷诺数下降(71)。我们注意到,几何设计不能完全消除细菌的逆流游动,特别是在接近零流速的情况下。然而,它极大地减少了超污染的数量,并可能显著延长导管的留置时间。使用我们设计的导管预计不需要改变常规临床方案或重新培训医务人员。此外,我们的解决方案不会向导管中引入化学物质,因此是安全的,并且不需要额外的维护。我们的几何设计方法预计与其他程序措施、抗菌表面改性和环境控制方法兼容。 + + +![S1. 微流控实验](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS1.png) + +**图S1. 主动布朗粒子的模拟轨迹示例** + +在(A)(C)具有对称障碍物的通道中和(B)(D)具有不对称障碍物的通道中的轨迹。(A)(B)无流体流动。(C)(D)有流体流动。颜色表示局部归一化涡量。 + +![S2. 主动布朗粒子的模拟轨迹示例](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS2-1.png) +![S2. 主动布朗粒子的模拟轨迹示例](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheterS2-2.png) + +**图S2. 几何优化约束和缩放的考虑因素** + +- (A-H)归一化涡量作为障碍物高度ℎ和障碍物间距𝑑的函数。 +- (I)沿通道一个周期内的归一化压降作为归一化障碍物高度ℎ/𝑑的函数。 +- (J)宏观圆柱形管内细菌运动的横截面视图。与轨迹2相比,轨迹2中的细菌在管中心附近经历强烈的下游流动,而采取轨迹1的另一个细菌则靠近管壁。因此,细菌1所经历的流动条件与我们考虑的微流控通道中的条件更为相似。其上游游动行为和几何抑制机制将与微流控条件相似,只是存在定量上的差异。 + +### 2.5 流体和粒子动力学模拟 + +我们使用COMSOL软件(72)模拟了具有无滑移边界条件的通道内的斯托克斯流。随后,将得到的速度和涡量场耦合到粒子动力学模拟中,而在稀释悬浮液和小粒子尺寸的极限情况下,忽略了粒子运动对流体动力学的反馈。粒子动力学由具有高斯统计特性的主动布朗粒子(ABP)模型和具有幂律(Levy)统计特性的运行-休止(RTP)模型描述。模拟是使用我们内部开发的GPU Julia代码进行的,模拟时间步长为10^-4s。在ABP模型中,单个粒子的动力学是根据过阻尼的朗之万方程进行积分的。 + +$$0=-\zeta(U-u)+\zeta U_0q(t)+\sqrt{2D_T\xi(t)}$$ + +$$d{q}/dt=\left[1/2{\omega}+B{q}\times(\mathbf{E}\cdot{q})+\sqrt{2/\tau_R{\eta}({t})}\right]\times{q}$$ + +其中,$ζ$是粘性阻力系数,$U$是粒子的速度,${q}$是粒子的方向向量,$u$是局部流速,$ω$是局部流场的涡量向量,$E$是流场的局部应变率张量。$B$是一个几何系数(3、74),对于无限细的杆状物体,它等于1,对于球体,它等于0。由于B的值对上游游动统计的影响不显著(27),因此我们在这里展示的结果中设$B=0$。$ξ(t)$是满足$⟨ξ(t)⟩=0和⟨ξ(0)ξ(t)⟩=δ(t)I$的高斯随机噪声。由于细菌是μm级的粒子,它们的布朗运动相对较弱,因此在模拟中我们将平移扩散系数DT设置为$0.1 μm²/s$。只要这个值保持较小,其变化对结果的影响就不大。 + +η是满足$⟨η(t)⟩=0和⟨η(0)η(t)⟩=δ(t)I$的高斯噪声,τR是平均运行时间。在RTP(Run-and-Tumble,奔跑-翻滚)模型中,单个粒子在$0 + + + + + +**视频S1-S2. 不同流动条件下细菌从壁面脱落的记录** + + + +**视频S3. 实时的优化设计** + +### 2.10 3D导管长期实验 + +使用Connex-Triplex 3D打印机打印了原型导管管(包括几何设计款和光滑款)。设计有障碍物的管内部结构与准二维结构相似,但进行了放大,并围绕通道的中心线旋转,使得障碍物成为内壁上的挤出环。考虑到可用的3D打印精度和典型导管的尺寸,这些原型的内径为1.6cm。对于设计有障碍物的管,挤出环之间的间距为1mm。为了便于清除3D打印产生的支撑材料,每根管被打印成两半,长边呈榫头形状,在去除支撑材料后组装成完整的管。 + +如图4A所示,管的上端连接到一个由机械泵控制的注射器,以保持恒定的流速。管的下端连接到一个直径为80mm的培养皿,作为E. coli(大肠杆菌)的储液池。1小时后,将管切成$2cm$长的段,并将每段内的液体转移到培养板上,同时丢弃最上游和最下游的段。在室温下培养培养板24小时后,计数每个培养板上的细菌菌落数量,以反映管相应部分的污染量。 + +为了计数菌落数量,在培养板上选择了四个圆形、等距、直径为8mm的区域(见图S5)。通过计算这四个区域内菌落的总数,并乘以整个培养板面积与这四个区域面积的比例(即25倍),来估算整个培养板上的菌落总数。当培养板上的菌落过多,变得过于拥挤或重叠以至于无法精确计数时,我们将整个培养板上的菌落总数记为30,000。 + +### 2.11 讨论 + +在本研究中,我们介绍了一种医用导管内表面的有效几何设计,旨在抑制细菌的逆流游动和过度污染。我们的设计思路是基于阻碍细菌逆流游动的物理机制,同时考虑了具有幂律动力学的球形粒子流变导向的一般模型。由于传染性微生物在形状、鞭毛特征和流体动力学相互作用方面存在差异,为简化设计和提高设计的通用性,本研究采用的简化模型忽略了细菌运动的细节,如鞭毛的螺旋性(29)和与边界的流体动力学相互作用(20)。模拟结果用于指导实验设计,而非特定预测大肠杆菌的实验结果。未来研究可采用更复杂的模型,考虑特定微生物种类的细节。 + +我们发现,由于涡旋重叠的相互作用,在障碍物尖端附近会产生有效的涡量,为此我们确定了障碍物之间间距的下限(图S2和补充材料)。障碍物高度的限制是在增强有效涡量和避免管道堵塞之间做出的权衡(图S2)。虽然我们选择使用这种人工智能框架来优化导管的几何形状,但也可以采用其他方法,如结合数值求解器的遗传算法(70)或结合伴随方法的梯度下降法(71)。 + +我们注意到,几何设计无法完全消除细菌的逆流游动,尤其是在流速接近零的情况下。然而,它能显著减少过度污染的量,并可能大幅延长导管的留置时间。使用我们设计的导管预计不需要改变常规临床方案或重新培训医务人员。此外,我们的解决方案不会在导管中引入化学物质,因此是安全的,也不需要额外的维护。我们预计,这种几何设计方法将与其他程序措施、抗菌表面改性和环境控制方法相兼容。 + +## 3. 问题求解 + +论文采用几何聚焦傅里叶神经算子(Geo-FNO)构建AI模型。该模型能够学习并解决与几何形状相关的随机偏微分方程(SPDE),从而实现对导管几何形状的优化,并通过微流体实验和3D打印技术,制作具有不同几何形状的导管原型,并测试其抑制细菌上游游泳的效果。 +接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。为了快速理解 PaddleScience,接下来仅对模型构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API 文档](../api/arch.md)。 + +### 3.1 数据集介绍 + +数据文件说明如下: + +| `./data.zip/training/` | | `./data.zip/test/` | | +| :-------------------------------: | :----------------: | :---------------------------: | :---------------: | +| 文件名 | 说明 | 文件名 | 说明 | +| training/x_1d_structured_mesh.npy | 形状为(2001, 3003) | test/x_1d_structured_mesh.npy | 形状为(2001, 300) | +| training/y_1d_structured_mesh.npy | 形状为(2001, 3003) | test/y_1d_structured_mesh.npy | 形状为(2001, 300) | +| training/data_info.npy | 形状为(7, 3003) | test/data_info.npy | 形状为(7, 300) | +| training/density_1d_data.npy | 形状为(2001, 3003) | test/density_1d_data.npy | 形状为(2001, 300) | + +在加载数据之后,需要将 x、y 进行合并,同时对于合并后的训练数据重新 `reshape` 为 `(1000, 2001, 2)` 的格式,具体代码如下 + +```py +--8<-- +examples/catheter/catheter.py:31:75 +--8<-- +``` + +### 3.2 GeoFNO 模型 + +GeoFNO 是一种基于 **几何聚焦傅里叶神经算子 (Geo-FNO** ) 的机器学习模型,它将几何形状转换到傅里叶空间,从而更好地捕捉形状的特征,并利用傅里叶变换的可逆性,可以将结果转换回物理空间。 + +在论文中,该模型能够学习并解决与几何形状相关的偏微分方程(SPDE),从而实现对导管几何形状的优化, 代码表示如下 + +```py +--8<-- +ppsci/arch/geofno.py:95:205 +--8<-- +``` + +为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("input",)`,输出变量名是 `("output",)`,这些命名与后续代码保持一致。 + +接着通过指定 FNO1d 的层数、特征通道数,神经元个数,并通过加载上文所提及的初始化权重模型,我们就实例化出了一个神经网络模型 `model`。 + +### 3.3 模型训练、评估 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 + +```python +--8<-- +examples/catheter/catheter.py:162:177 +--8<-- +``` + +## 4. 结果展示 + +=== "训练、推理loss" + +下方展示了训练后模型对测试数据的第一次预测结果以及最后一次预测结果。 + +=== "第一次预测结果" + + ![1725427977357](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter10.png) + +=== "最后一次预测结果" + + ![1725428017615](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter9.png) + +=== "训练测试损失" + + ![1725894134717](https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/catheter8.png) + +可以看到模型预测结果与真实结果基本一致,优化后的导管具有特定的几何形状,如障碍物分布和间距等,这些形状特征能够显著影响流体动力学相互作用,从而抑制细菌的上游游泳行为。 + +## 6. 参考 + +参考代码: /zongyi-li/Geo-FNO-catheter + +参考文献列表 + +1. J. W. Warren, the catheter and urinary tract infection. Med. Clin. North Am. 75, 481–493 +(1991). +2. l. e. nicolle, catheter- related urinary tract infection. Drugs Aging 22, 627–639 (2005). +3. e. K. Shuman, c. e. chenoweth, Urinary catheter- associated infections. Infect. Dis. Clin. +North Am. 32, 885–897 (2018). +4. n. Buetti, A. tabah, J. F. timsit, W. Zingg, What is new in catheter use and catheter +infection prevention in the icU. Curr. Opin. Crit. Care 26, 459–465 (2020). +5. l. chuang, P. A. tambyah, catheter-associated urinary tract infection. J. Infect. Chemother. +27, 1400–1406 (2021). +6. e. Zimlichman, d. henderson, O. tamir, c. Franz, P. Song, c. K. Yamin, c. Keohane, +c. R. denham, d. W. Bates, health care–associated infections. JAMA Intern. Med. 173, +2039–2046 (2013). +7. U. Samuel, J. Guggenbichler, Prevention of catheter-related infections: the potential of a +new nano-silver impregnated catheter. Int. J. Antimicrob. Agents 23, 75–78 (2004). +8. W. Kohnen, B. Jansen, Polymer materials for the prevention of catheter-related infections. +Zentralbl. Bakteriol. 283, 175–186 (1995). +9. A. hameed, F. chinegwundoh, A. thwaini, Prevention of catheter-related urinary tract +infections. Med. Hypotheses 71, 148–152 (2010). +10. h. c. Berg, d. A. Brown, chemotaxis in Escherichia coli analysed by three-dimensional +tracking. Nature 239, 500–504 (1972). +11. h. c. Berg, the rotary motor of bacterial flagella. Annu. Rev. Biochem. 72, 19–54 (2003). +12. h. c. Berg, E. coli in Motion (Springer, 2004). +13. M. Polin, i. tuval, K. drescher, J. P. Gollub, R. e. Goldstein, chlamydomonas swims with two +“gears” in a eukaryotic version of run-and- tumble locomotion. Science 325, 487–490 +(2009). +14. A. P. Berke, l. turner, h. c. Berg, e. lauga, hydrodynamic attraction of swimming +microorganisms by surfaces. Phys. Rev. Lett. 101, 038102 (2008). +15. e. lauga, t. R. Powers, the hydrodynamics of swimming microorganisms. Rep. Prog. Phys. +72, 096601 (2009). +16. d. Kaiser, Bacterial swarming: A re-examination of cell-movement patterns. Curr. Biol. 17, +R561–R570 (2007). +17. n. verstraeten, K. Braeken, B. debkumari, M. Fauvart, J. Fransaer, J. vermant, J. Michiels, +living on a surface: Swarming and biofilm formation. Trends Microbiol. 16, 496–506 +(2008). +18. d. B. Kearns, A field guide to bacterial swarming motility. Nat. Rev. Microbiol. 8, 634–644 +(2010). +19. d. Ghosh, X. cheng, to cross or not to cross: collective swimming of Escherichia coli under +two-dimensional confinement. Phys. Rev. Res. 4, 023105 (2022). +20. J. hill, O. Kalkanci, J. l. McMurry, h. Koser, hydrodynamic surface interactions enable +Escherichia coli to seek efficient routes to swim upstream. Phys. Rev. Lett. 98, 068101 (2007). +21. t. Kaya, h. Koser, direct upstream motility in Escherichia coli. Biophys. J. 102, 1514–1523 +(2012). +22. Marcos, h. c. Fu, t. R. Powers, R. Stocker, Bacterial rheotaxis. Proc. Natl. Acad. Sci. U.S.A. +109, 4780–4785 (2012). +23. Y. Shen, A. Siryaporn, S. lecuyer, Z. Gitai, h. A. Stone, Flow directs surface-attached +bacteria to twitch upstream. Biophys. J. 103, 146–151 (2012). +24. A. Zöttl, h. Stark, nonlinear dynamics of a microswimmer in Poiseuille flow. Phys. Rev. Lett. +108, 218104 (2012). +25. c.-K. tung, F. Ardon, A. Roy, d. l. Koch, S. S. Suarez, M. Wu, emergence of upstream +swimming via a hydrodynamic transition. Phys. Rev. Lett. 114, 108102 (2015). +26. A. J. Mathijssen, t. n. Shendruk, J. M. Yeomans, A. doostmohammadi, Upstream +swimming in microbiological flows. Phys. Rev. Lett. 116, 028104 (2016). +27. Z. Peng, J. F. Brady, Upstream swimming and taylor dispersion of active Brownian +particles. Phys. Rev. Fluids 5, 073102 (2020). +28. G. i. taylor, dispersion of soluble matter in solvent flowing slowly through a tube. +Proc. R. Soc. A-Math. Phys. Eng. Sci. 219, 186–203 (1953). +29. t. Kaya, h. Koser, characterization of hydrodynamic surface interactions of Escherichia coli +cell bodies in shear flow. Phys. Rev. Lett. 103, 138103 (2009). +30. v. Kantsler, J. dunkel, M. Blayney, R. e. Goldstein, Rheotaxis facilitates upstream +navigation of mammalian sperm cells. eLife 3, e02403 (2014). +31. t. Omori, t. ishikawa, Upward swimming of a sperm cell in shear flow. Phys. Rev. E. 93, +032402 (2016). +32. n. Figueroa-Morales, A. Rivera, R. Soto, A. lindner, e. Altshuler, É. clément, E. coli +“super-contaminates” narrow ducts fostered by broad run-time distribution. Sci. Adv. 6, +eaay0155 (2020). +33. B. e. logan, t. A. hilbert, R. G. Arnold, Removal of bacteria in laboratory filters: Models and +experiments. Water Res. 27, 955–962 (1993). +34. W. dzik, Use of leukodepletion filters for the removal of bacteria. Immunol. Invest. 24, +95–115 (1995). +35. l. Fernandez Garcia, S. Alvarez Blanco, F. A. Riera Rodriguez, Microfiltration applied to +dairy streams: Removal of bacteria. J. Sci. Food Agric. 93, 187–196 (2013). +36. G. Franci, A. Falanga, S. Galdiero, l. Palomba, M. Rai, G. Morelli, M. Galdiero, Silver +nanoparticles as potential antibacterial agents. Molecules 20, 8856–8874 (2015). +37. M. i. hutchings, A. W. truman, B. Wilkinson, Antibiotics: Past, present and future. Curr. +Opin. Microbiol. 51, 72–80 (2019). +38. J. W. costerton, h. M. lappin-Scott, introduction to microbial biofilms, in Microbial Biofilms, +h. M. lappin-Scott, J. W. costerton, eds. (cambridge Univ. Press, 1995), pp. 1–11. +39. W.- h. Sheng, W.-J. Ko, J.-t. Wang, S.-c. chang, P.-R. hsueh, K.-t. luh, evaluation of +antiseptic-impregnated central venous catheters for prevention of catheter-related +infection in intensive care unit patients. Diagn. Microbiol. Infect. Dis. 38, 1–5 (2000). +40. W. M. dunne Jr., Bacterial adhesion: Seen any good biofilms lately? Clin. Microbiol. Rev. 15, +155–166 (2002). +41. R. P. Allaker, the use of nanoparticles to control oral biofilm formation. J. Dent. Res. 89, +1175–1186 (2010). +42. M. l. Knetsch, l. h. Koole, new strategies in the development of antimicrobial coatings: +the example of increasing usage of silver and silver nanoparticles. Polymers 3, 340–366 +(2011). +43. M. Birkett, l. dover, c. cherian lukose, A. Wasy Zia, M. M. tambuwala, Á. Serrano-Aroca, +Recent advances in metal-based antimicrobial coatings for high-touch surfaces. Int. J. +Mol. Sci. 23, 1162 (2022). +44. J. R. lex, R. Koucheki, n. A. Stavropoulos, J. di Michele, J. S. toor, K. tsoi, P. c. Ferguson, +R. e. turcotte, P. J. Papagelopoulos, Megaprosthesis anti-bacterial coatings: A +comprehensive translational review. Acta Biomater. 140, 136–148 (2022). +45. J. Monod, the growth of bacterial cultures. Annu. Rev. Microbiol. 3, 371–394 (1949). +46. M. hecker, W. Schumann, U. völker, heat-shock and general stress response in Bacillus +subtilis. Mol. Microbiol. 19, 417–428 (1996). +47. P. Setlow, Spores of Bacillus subtilis: their resistance to and killing by radiation, heat and +chemicals. J. Appl. Microbiol. 101, 514–525 (2006). +48. M. Falagas, P. thomaidis, i. Kotsantis, K. Sgouros, G. Samonis, d. Karageorgopoulos, +Airborne hydrogen peroxide for disinfection of the hospital environment and infection +control: A systematic review. J. Hosp. Infect. 78, 171–177 (2011). +49. W. A. Rutala, d. J. Weber, disinfection and sterilization in health care facilities: What +clinicians need to know. Clin. Infect. Dis. 39, 702–709 (2004). +50. W. A. Rutala, d. J. Weber, disinfection and sterilization: An overview. Am. J. Infect. Control +41, S2–S5 (2013). +51. n. P. tipnis, d. J. Burgess, Sterilization of implantable polymer-based medical devices: A +review. Int. J. Pharm. 544, 455–460 (2018). +52. M. Berger, R. Shiau, J. M. Weintraub, Review of syndromic surveillance: implications for +waterborne disease detection. J. Epidemiol. Community Health 60, 543–550 (2006). +53. M. v. Storey, B. van der Gaag, B. P. Burns, Advances in on-line drinking water quality +monitoring and early warning systems. Water Res. 45, 741–747 (2011). +54. S. hyllestad, e. Amato, K. nygård, l. vold, P. Aavitsland, the effectiveness of syndromic +surveillance for the early detection of waterborne outbreaks: A systematic review. +BMC Infect. Dis. 21, 696 (2021). +55. F. Baquero, J.- l. Martínez, R. cantón, Antibiotics and antibiotic resistance in water +environments. Curr. Opin. Biotechnol. 19, 260–265 (2008). +56. R. i. Aminov, the role of antibiotics and antibiotic resistance in nature. Environ. Microbiol. +11, 2970–2988 (2009). +57. J. M. Munita, c. A. Arias, Mechanisms of antibiotic resistance. Microbiol Spectr, (2016). +58. U. theuretzbacher, K. Bush, S. harbarth, M. Paul, J. h. Rex, e. tacconelli, G. e. thwaites, +critical analysis of antibacterial agents in clinical development. Nat. Rev. Microbiol. 18, +286–298 (2020). +59. R. di Giacomo, S. Krödel, B. Maresca, P. Benzoni, R. Rusconi, R. Stocker, c. daraio, +deployable micro- traps to sequester motile bacteria. Sci. Rep. 7, 45897 (2017). +60. P. Galajda, J. Keymer, P. chaikin, R. Austin, A wall of funnels concentrates swimming +bacteria. J. Bacteriol. 189, 8704–8707 (2007). +61. c. M. Kjeldbjerg, J. F. Brady, theory for the casimir effect and the partitioning of active +matter. Soft Matter 17, 523–530 (2021). +62. Z. li, n. Kovachki, K. Azizzadenesheli, B. liu, K. Bhattacharya, A. Stuart, A. Anandkumar. +Fourier neural operator for parametric partial differential equations. arXiv:2010.08895 +[cs.lG] (2020). +63. Z. li, d. Z. huang, B. liu, A. Anandkumar. Fourier neural operator with learned +deformations for pdes on general geometries. arXiv:2207.05209 [cs.lG] (2022). +64. A. J. Mathijssen, n. Figueroa-Morales, G. Junot, É. clément, A. lindner, A. Zöttl, Oscillatory +surface rheotaxis of swimming E. coli bacteria. Nat. Commun. 10, 3434 (2019). +65. S. B. Goodman, Z. Yao, M. Keeney, F. Yang, the future of biologic coatings for orthopaedic +implants. Biomaterials 34, 3174–3183 (2013). +66. A. Jaggessar, h. Shahali, A. Mathew, P. K. Yarlagadda, Bio-mimicking nano and +micro-structured surface fabrication for antibacterial properties in medical implants. +J. Nanobiotechnol. 15, 64 (2017). +67. e. Macedo, R. Malhotra, R. claure- del Granado, P. Fedullo, R. l. Mehta, defining urine +output criterion for acute kidney injury in critically ill patients. Nephrol Dial Transplant 26, +509–515 (2011). +68. K. B. chenitz, M. B. lane-Fall, decreased urine output and acute kidney injury in the +postanesthesia care unit. Anesthesiol. Clin. 30, 513–526 (2012). +69. J. A. Kellum, F. e. Sileanu, R. Murugan, n. lucko, A. d. Shaw, G. clermont, classifying AKi by +urine output versus serum creatinine level. J. Am. Soc. Nephrol. 26, 2231–2238 (2015). +70. S. Mirjalili, Genetic algorithm, in Evolutionary Algorithms and Neural Networks: Theory and +Applications (Springer, 2019), pp. 43–55. +71. S. Ruder, An overview of gradient descent optimization algorithms. arXiv:1609.04747 +[cs.lG] (2016). +72. c. Multiphysics, introduction to cOMSOl multiphysics. cOMSOl Multiphysics, Burlington, +MA, accessed 2018 Feb 9: 32 (1998). +73. G. B. Jeffery, the motion of ellipsoidal particles immersed in a viscous fluid. Proc. R. soc. Lond. +Ser. A-Contain. Pap. Math. Phys. Character 102, 161–179 (1922). +74. F. P. Bretherton, the motion of rigid particles in a shear flow at low Reynolds number. +J. Fluid Mech. 14, 284–304 (1962). +75. t. Zhou, Z. Peng, M. Gulian, J. F. Brady, distribution and pressure of active lévy swimmers +under confinement. J. Phys. A Math. Theor. 54, 275002 (2021). +76. P. i. Frazier, J. Wang, Bayesian optimization for materials design, in Information Science for +Materials Discovery and Design (Springer, 2016), pp. 45–75. +77. Y. Zhang, d. W. Apley, W. chen, Bayesian optimization for materials design with mixed +quantitative and qualitative variables. Sci. Rep. 10, 4924 (2020). +78. J. Schindelin, i. Arganda-carreras, e. Frise, v. Kaynig, M. longair, t. Pietzsch, S. Preibisch, +c. Rueden, S. Saalfeld, B. Schmid, Fiji: An open-source platform for biological-image +analysis. Nat. Methods 9, 676–682 (2012). +79. d. ershov, M.-S. Phan, J. W. Pylvänäinen, S. U. Rigaud, l. le Blanc, A. charles- Orszag, +J. R. conway, R. F. laine, n. h. Roy, d. Bonazzi, Bringing trackMate into the era of +machine-learning and deep-learning. bioRxiv 458852 [Preprint] (2021). https://doi. +org/10.1101/2021.09.03.458852. diff --git a/docs/zh/examples/earthformer.md b/docs/zh/examples/earthformer.md index 6a17ee38a4..721d23fa12 100644 --- a/docs/zh/examples/earthformer.md +++ b/docs/zh/examples/earthformer.md @@ -1,505 +1,505 @@ -# EarthFormer - -开始训练、评估前,请先下载 - -[ICAR-ENSO数据集](https://tianchi.aliyun.com/dataset/98942) - -[SEVIR数据集](https://nbviewer.org/github/MIT-AI-Accelerator/eie-sevir/blob/master/examples/SEVIR_Tutorial.ipynb#download -) - -=== "模型训练命令" - - ``` sh - # ICAR-ENSO 数据模型训练 - python examples/earthformer/earthformer_enso_train.py - # SEVIR 数据模型训练 - python examples/earthformer/earthformer_sevir_train.py - - ``` - -=== "模型评估命令" - - ``` sh - # ICAR-ENSO 模型评估 - python examples/earthformer/earthformer_enso_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_enso.pdparams - # SEVIR 模型评估 - python examples/earthformer/earthformer_sevir_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_sevir.pdparams - ``` - -=== "模型导出命令" - - ``` sh - # ICAR-ENSO 模型推理 - python examples/earthformer/earthformer_enso_train.py mode=export - # SEVIR 模型推理 - python examples/earthformer/earthformer_sevir_train.py mode=export - ``` - -=== "模型推理命令" - - ``` sh - # ICAR-ENSO 模型推理 - python examples/earthformer/earthformer_enso_train.py mode=infer - # SEVIR 模型推理 - python examples/earthformer/earthformer_sevir_train.py mode=infer - ``` -| 模型 | 变量名称 | C-Nino3.4-M | C-Nino3.4-WM | MSE(1E-4) | -| :-- | :-- | :-- | :-- | :-- | -| [ENSO 模型](https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_enso.pdparams) | sst | 0.74130 | 2.28990 | 2.5000 | - -| 模型 | 变量名称 | CSI-M | CSI-219 | CSI-181 | CSI-160 | CSI-133 | CSI-74 | CSI-16 | MSE(1E-4) | -| :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | -| [SEVIR 模型](https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_sevir.pdparams) | vil | 0.4419 | 0.1791 | 0.2848 | 0.3232 | 0.4271 | 0.6860 | 0.7513 | 3.6957 | - -## 1. 背景简介 - -地球是一个复杂的系统。地球系统的变化,从温度波动等常规事件到干旱、冰雹和厄尔尼诺/南方涛动 (ENSO) 等极端事件,影响着我们的日常生活。在所有后果中,地球系统的变化会影响农作物产量、航班延误、引发洪水和森林火灾。对这些变化进行准确及时的预测可以帮助人们采取必要的预防措施以避免危机,或者更好地利用风能和太阳能等自然资源。因此,改进地球变化(例如天气和气候)的预测模型具有巨大的社会经济影响。 - -Earthformer,一种用于地球系统预测的时空转换器。为了更好地探索时空注意力的设计,论文提出了 Cuboid Attention ,它是高效时空注意力的通用构建块。这个想法是将输入张量分解为不重叠的长方体,并行应用长方体级自注意力。由于我们将 O(N2) 自注意力限制在局部长方体内,因此整体复杂度大大降低。不同类型的相关性可以通过不同的长方体分解来捕获。同时论文引入了一组关注所有局部长方体的全局向量,从而收集系统的整体状态。通过关注全局向量,局部长方体可以掌握系统的总体动态并相互共享信息。 - -## 2. 模型原理 - -本章节仅对 EarthFormer 的模型原理进行简单地介绍,详细的理论推导请阅读 [Earthformer: Exploring Space-Time Transformers for Earth System Forecasting](https://arxiv.org/abs/2207.05833)。 - -Earthformer 的网络模型使用了基于 Cuboid Attention 的分层 Transformer incoder-decoder 。这个想法是将数据分解为长方体并并行应用长方体级自注意力。这些长方体进一步与全局向量的集合连接。 - -模型的总体结构如图所示: - -
- ![Earthformer-arch](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer_arch.png){ loading=lazy style="margin:0 auto;height:150%;width:150%"} -
EarthFormer 网络模型
-
- -EarthFormer 原代码中训练了 ICAR-ENSO 数据集中海面温度 (sst) 和 SEVIR 数据集中对云总降水量 (vil) 的估计模型,接下来将介绍这两个模型的训练、推理过程。 - -### 2.1 ICAR-ENSO 和 SEVIR 模型的训练、推理过程 - -模型预训练阶段是基于随机初始化的网络权重对模型进行训练,如下图所示,其中 $[x_{i}]_{i=1}^{T}$ 表示长度为 $T$ 时空序列的输入气象数据,$[y_{T+i}]_{i=1}^{K}$ 表示预测未来 $K$ 步的气象数据,$[y_{T+i_true}]_{i=1}^{K}$ 表示未来 $K$ 步的真实数据,如海面温度数据和云总降水量数据。最后网络模型预测的输出和真值计算 mse 损失函数。 - -
- ![earthformer-pretraining](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer-pretrain.png){ loading=lazy style="margin:0 auto;height:70%;width:70%"} -
earthformer 模型预训练
-
- -在推理阶段,给定长度序列为 $T$ 的数据,得到长度序列为 $K$ 的预测结果。 - -
- ![earthformer-pretraining](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer-infer.png){ loading=lazy style="margin:0 auto;height:60%;width:60%"} -
earthformer 模型推理
-
- -## 3. 海面温度模型实现 - -接下来开始讲解如何基于 PaddleScience 代码,实现 EarthFormer 模型的训练与推理。关于该案例中的其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 数据集介绍 - -数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 ICAR-ENSO 数据集。 - -本数据集由气候与应用前沿研究院 ICAR 提供。数据包括 CMIP5/6 模式的历史模拟数据和美国 SODA 模式重建的近100多年历史观测同化数据。每个样本包含以下气象及时空变量:海表温度异常 (SST) ,热含量异常 (T300),纬向风异常 (Ua),经向风异常 (Va),数据维度为 (year,month,lat,lon)。训练数据提供对应月份的 Nino3.4 index 标签数据。测试用的初始场数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用 NPY 格式保存。 - -**训练数据:** - -每个数据样本第一维度 (year) 表征数据所对应起始年份,对于 CMIP 数据共 291 年,其中 1-2265 为 CMIP6 中 15 个模式提供的 151 年的历史模拟数据 (总共:151年 *15 个模式=2265) ;2266-4645 为 CMIP5 中 17 个模式提供的 140 年的历史模拟数据 (总共:140 年*17 个模式=2380)。对于历史观测同化数据为美国提供的 SODA 数据。 - -**训练数据标签** - -标签数据为 Nino3.4 SST 异常指数,数据维度为 (year,month)。 - -CMIP(SODA)_train.nc 对应的标签数据当前时刻 Nino3.4 SST 异常指数的三个月滑动平均值,因此数据维度与维度介绍同训练数据一致。 - -注:三个月滑动平均值为当前月与未来两个月的平均值。 - -**测试数据** - -测试用的初始场 (输入) 数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用NPY格式保存,维度为 (12,lat,lon, 4), 12 为 t 时刻及过去 11 个时刻,4 为预测因子,并按照 SST,T300,Ua,Va 的顺序存放。 - -EarthFFormer 模型对于 ICAR-ENSO 数据集的训练中,只对其中海面温度 (SST) 进行训练和预测。训练海温异常观测的 12 步 (一年) ,预测海温异常最多 14 步。 - -### 3.2 模型预训练 - -#### 3.2.1 约束构建 - -本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 - -数据加载的代码如下: - -``` py linenums="35" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:35:56 ---8<-- -``` - -其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 16,`num_works` 为 8。 - -定义监督约束的代码如下: - -``` py linenums="58" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:58:64 ---8<-- -``` - -`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; - -第二个参数是损失函数的定义,这里使用自定义的损失函数 `mse_loss`; - -第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 - -#### 3.2.2 模型构建 - -在该案例中,海面温度模型基于 CuboidTransformer 网络模型实现,用 PaddleScience 代码表示如下: - -``` py linenums="97" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:97:99 ---8<-- -``` - -网络模型的参数通过配置文件进行设置如下: - -``` yaml linenums="46" title="examples/earthformer/conf/earthformer_enso_pretrain.yaml" ---8<-- -examples/earthformer/conf/earthformer_enso_pretrain.yaml:46:105 ---8<-- -``` - -其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 - -#### 3.2.3 学习率与优化器构建 - -本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `2e-4`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 -`weight_decay`,用 PaddleScience 代码表示如下: - -``` py linenums="101" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:101:126 ---8<-- -``` - -#### 3.2.4 评估器构建 - -本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: - -``` py linenums="68" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:68:95 ---8<-- -``` - -`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`RMSE`、`corr_nino3.4_epoch` 和 `corr_nino3.4_weighted_epoch`。 - -#### 3.2.5 模型训练与评估 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 - -``` py linenums="128" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:128:146 ---8<-- -``` - -### 3.3 模型评估可视化 - -#### 3.3.1 测试集上评估模型 - -构建模型的代码为: - -``` py linenums="179" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:179:181 ---8<-- -``` - -构建评估器的代码为: - -``` py linenums="150" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:150:177 ---8<-- -``` - -#### 3.3.2 模型导出 - -构建模型的代码为: - -``` py linenums="199" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:199:202 ---8<-- -``` - -实例化 `ppsci.solver.Solver`: - -``` py linenums="204" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:204:208 ---8<-- -``` - -构建模型输入格式并导出静态模型: - -``` py linenums="212" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:212:218 ---8<-- -``` - -`InputSpec` 函数中第一个设置模型输入尺寸,第二个参数设置输入数据类型,第三个设置输入数据的 `Key`. - -#### 3.3.3 模型推理 - -创建预测器: - -``` py linenums="222" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:222:224 ---8<-- -``` - -准备预测数据: - -``` py linenums="226" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:226:249 ---8<-- -``` - -进行模型预测与预测值保存: - -``` py linenums="253" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py:253:258 ---8<-- -``` - -## 4. 云总降水量 vil 模型实现 - -### 4.1 数据集介绍 - -数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 SEVIR 数据集。 - -The Storm Event ImagRy(SEVIR) 数据集是由麻省理工林肯实验室和亚马逊收集并提供的。SEVIR 是一个经过注释、整理和时空对齐的数据集,包含 10,000 多个天气事件,每个事件由 384 千米 x 384 千米的图像序列组成,时间跨度为 4 小时。SEVIR 中的图像通过五种不同的数据类型进行采样和对齐:GOES-16 高级基线成像仪的三个通道 (C02、C09、C13)、NEXRAD 垂直液态水含量 (vil) 和 GOES-16 地球静止闪电成像 (GLM) 闪烁图。 - -SEVIR数据集的结构包括两部分:目录 (Catalog) 和数据文件 (Data File)。目录是一个 CSV 文件,其中包含描述事件元数据的行。数据文件是一组 HDF5 文件,包含特定传感器类型的事件。这些文件中的数据以 4D 张量形式存储,形状为 N x L x W x T,其中 N 是文件中的事件数,LxW 是图像大小,T 是图像序列中的时间步数。 -
- ![SEVIR](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/sevir.png){ loading=lazy style="margin:0 auto;height:100%;width:100%"} -
SEVIR 传感器类型说明
-
- -EarthFormer 采用 SEVIR 中的 NEXRAD 垂直液态水含量 (VIL) 作为降水预报的基准,即在 65 分钟的垂直综合液体背景下,预测未来 60 分钟的垂直综合液体。因此,分辨率为 13x384x384→12x384x384。 - -### 4.2 模型预训练 - -#### 4.2.1 约束构建 - -本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 - -数据加载的代码如下: - -``` py linenums="27" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:27:59 ---8<-- -``` - -其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 1,`num_works` 为 8。 - -定义监督约束的代码如下: - -``` py linenums="61" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:61:67 ---8<-- -``` - -`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; - -第二个参数是损失函数的定义,这里使用自定义的损失函数 `mse_loss`; - -第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 - -### 4.2.2 模型构建 - -在该案例中,云总降水量模型基于 CuboidTransformer 网络模型实现,用 PaddleScience 代码表示如下: - -``` py linenums="117" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:117:119 ---8<-- -``` - -定义模型的参数通过配置进行设置,如下: - -``` yaml linenums="58" title="examples/earthformer/conf/earthformer_sevir_pretrain.yaml" ---8<-- -examples/earthformer/conf/earthformer_sevir_pretrain.yaml:58:117 ---8<-- -``` - -其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 - -#### 4.2.3 学习率与优化器构建 - -本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `1e-3`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 `weight_decay`,用 PaddleScience 代码表示如下: - -``` py linenums="121" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:121:146 ---8<-- -``` - -#### 4.2.4 评估器构建 - -本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: - -``` py linenums="71" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:71:115 ---8<-- -``` - -`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`csi`、`pod`、`sucr`和 `bias`,且后四个评价指标分别使用不同的阈值 `[16,74,133,160,181,219]`。 - -#### 4.2.5 模型训练 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 - -``` py linenums="148" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:148:164 ---8<-- -``` - -#### 4.2.6 模型评估 - -由于目前 `paddlescience` 中的验证策略分为两类,一类是直接对验证数据集进行模型输出拼接,然后计算评价指标。另一类是按照每个 batch_size 计算评价指标,然后拼接,最后对所有结果求平均,该方法默认数据之间没有关联性。但是 `SEVIR` 数据集数据之间有关联性,所以不适用第二种方法;又由于 `SEVIR` 数据集量大,使用第一种方法验证显存需求大,因此验证 `SEVIR` 数据集使用的方法如下: - -- 1.对一个 batch size 计算 `hits`、`misses` 和 `fas` 三个数据 -- 2.对数据集所有数据保存所有 `batch` 的三个值的累加和. -- 3.对三个值的累加和计算 `csi`、`pod`、`sucr`和 `bias` 四个指标。 - -``` py linenums="165" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:165:181 ---8<-- -``` - -### 4.3 模型评估可视化 - -#### 4.3.1 测试集上评估模型 - -构建模型的代码为: - -``` py linenums="231" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:231:233 ---8<-- -``` - -构建评估器的代码为: - -``` py linenums="185" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:185:229 ---8<-- -``` - -模型评估: - -``` py linenums="246" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:246:262 ---8<-- -``` - -#### 4.3.2 模型导出 - -构建模型的代码为: - -``` py linenums="266" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:266:269 ---8<-- -``` - -实例化 `ppsci.solver.Solver`: - -``` py linenums="271" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:271:275 ---8<-- -``` - -构建模型输入格式并导出静态模型: - -``` py linenums="279" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:279:285 ---8<-- -``` - -`InputSpec` 函数中第一个设置模型输入尺寸,第二个参数设置输入数据类型,第三个设置输入数据的 `Key`. - -#### 4.3.3 模型推理 - -创建预测器: - -``` py linenums="293" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:293:294 ---8<-- -``` - -准备预测数据并进行对应模式的数据预处理: - -``` py linenums="295" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:295:314 ---8<-- -``` - -进行模型预测并可视化: - -``` py linenums="318" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py:318:330 ---8<-- -``` - -## 5. 完整代码 - -``` py linenums="1" title="examples/earthformer/earthformer_enso_train.py" ---8<-- -examples/earthformer/earthformer_enso_train.py ---8<-- -``` - -``` py linenums="1" title="examples/earthformer/earthformer_sevir_train.py" ---8<-- -examples/earthformer/earthformer_sevir_train.py ---8<-- -``` - -## 6. 结果展示 - -下图展示了云总降水量模型按照65分钟的输入数据,得到60分钟间隔的预测结果和真值结果。 - -
- ![SEVIR-predict](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/sevir-predict.png){ loading=lazy style="margin:0 auto;height:100%;width:100%"} -
SEVIR 中 vil 的预测结果("prediction")与真值结果("target")
-
- -说明: - -Hit:TP, Miss:FN, False Alarm:FP - -第一行: 输入数据; - -第二行: 真值结果; - -第三行: 预测结果; - -第四行: 设定阈值为 `74` 情况下,TP、FN、FP 三种情况标记 - -第五行: 在所有阈值情况下,TP、FN、FP 三种情况标记 +# EarthFormer + +开始训练、评估前,请先下载 + +[ICAR-ENSO数据集](https://tianchi.aliyun.com/dataset/98942) + +[SEVIR数据集](https://nbviewer.org/github/MIT-AI-Accelerator/eie-sevir/blob/master/examples/SEVIR_Tutorial.ipynb#download +) + +=== "模型训练命令" + + ``` sh + # ICAR-ENSO 数据模型训练 + python examples/earthformer/earthformer_enso_train.py + # SEVIR 数据模型训练 + python examples/earthformer/earthformer_sevir_train.py + + ``` + +=== "模型评估命令" + + ``` sh + # ICAR-ENSO 模型评估 + python examples/earthformer/earthformer_enso_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_enso.pdparams + # SEVIR 模型评估 + python examples/earthformer/earthformer_sevir_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_sevir.pdparams + ``` + +=== "模型导出命令" + + ``` sh + # ICAR-ENSO 模型推理 + python examples/earthformer/earthformer_enso_train.py mode=export + # SEVIR 模型推理 + python examples/earthformer/earthformer_sevir_train.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + # ICAR-ENSO 模型推理 + python examples/earthformer/earthformer_enso_train.py mode=infer + # SEVIR 模型推理 + python examples/earthformer/earthformer_sevir_train.py mode=infer + ``` +| 模型 | 变量名称 | C-Nino3.4-M | C-Nino3.4-WM | MSE(1E-4) | +| :-- | :-- | :-- | :-- | :-- | +| [ENSO 模型](https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_enso.pdparams) | sst | 0.74130 | 2.28990 | 2.5000 | + +| 模型 | 变量名称 | CSI-M | CSI-219 | CSI-181 | CSI-160 | CSI-133 | CSI-74 | CSI-16 | MSE(1E-4) | +| :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | :-- | +| [SEVIR 模型](https://paddle-org.bj.bcebos.com/paddlescience/models/earthformer/earthformer_sevir.pdparams) | vil | 0.4419 | 0.1791 | 0.2848 | 0.3232 | 0.4271 | 0.6860 | 0.7513 | 3.6957 | + +## 1. 背景简介 + +地球是一个复杂的系统。地球系统的变化,从温度波动等常规事件到干旱、冰雹和厄尔尼诺/南方涛动 (ENSO) 等极端事件,影响着我们的日常生活。在所有后果中,地球系统的变化会影响农作物产量、航班延误、引发洪水和森林火灾。对这些变化进行准确及时的预测可以帮助人们采取必要的预防措施以避免危机,或者更好地利用风能和太阳能等自然资源。因此,改进地球变化(例如天气和气候)的预测模型具有巨大的社会经济影响。 + +Earthformer,一种用于地球系统预测的时空转换器。为了更好地探索时空注意力的设计,论文提出了 Cuboid Attention ,它是高效时空注意力的通用构建块。这个想法是将输入张量分解为不重叠的长方体,并行应用长方体级自注意力。由于我们将 O(N2) 自注意力限制在局部长方体内,因此整体复杂度大大降低。不同类型的相关性可以通过不同的长方体分解来捕获。同时论文引入了一组关注所有局部长方体的全局向量,从而收集系统的整体状态。通过关注全局向量,局部长方体可以掌握系统的总体动态并相互共享信息。 + +## 2. 模型原理 + +本章节仅对 EarthFormer 的模型原理进行简单地介绍,详细的理论推导请阅读 [Earthformer: Exploring Space-Time Transformers for Earth System Forecasting](https://arxiv.org/abs/2207.05833)。 + +Earthformer 的网络模型使用了基于 Cuboid Attention 的分层 Transformer incoder-decoder 。这个想法是将数据分解为长方体并并行应用长方体级自注意力。这些长方体进一步与全局向量的集合连接。 + +模型的总体结构如图所示: + +
+ ![Earthformer-arch](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer_arch.png){ loading=lazy style="margin:0 auto;height:150%;width:150%"} +
EarthFormer 网络模型
+
+ +EarthFormer 原代码中训练了 ICAR-ENSO 数据集中海面温度 (sst) 和 SEVIR 数据集中对云总降水量 (vil) 的估计模型,接下来将介绍这两个模型的训练、推理过程。 + +### 2.1 ICAR-ENSO 和 SEVIR 模型的训练、推理过程 + +模型预训练阶段是基于随机初始化的网络权重对模型进行训练,如下图所示,其中 $[x_{i}]_{i=1}^{T}$ 表示长度为 $T$ 时空序列的输入气象数据,$[y_{T+i}]_{i=1}^{K}$ 表示预测未来 $K$ 步的气象数据,$[y_{T+i_true}]_{i=1}^{K}$ 表示未来 $K$ 步的真实数据,如海面温度数据和云总降水量数据。最后网络模型预测的输出和真值计算 mse 损失函数。 + +
+ ![earthformer-pretraining](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer-pretrain.png){ loading=lazy style="margin:0 auto;height:70%;width:70%"} +
earthformer 模型预训练
+
+ +在推理阶段,给定长度序列为 $T$ 的数据,得到长度序列为 $K$ 的预测结果。 + +
+ ![earthformer-pretraining](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/earthformer-infer.png){ loading=lazy style="margin:0 auto;height:60%;width:60%"} +
earthformer 模型推理
+
+ +## 3. 海面温度模型实现 + +接下来开始讲解如何基于 PaddleScience 代码,实现 EarthFormer 模型的训练与推理。关于该案例中的其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 数据集介绍 + +数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 ICAR-ENSO 数据集。 + +本数据集由气候与应用前沿研究院 ICAR 提供。数据包括 CMIP5/6 模式的历史模拟数据和美国 SODA 模式重建的近100多年历史观测同化数据。每个样本包含以下气象及时空变量:海表温度异常 (SST) ,热含量异常 (T300),纬向风异常 (Ua),经向风异常 (Va),数据维度为 (year,month,lat,lon)。训练数据提供对应月份的 Nino3.4 index 标签数据。测试用的初始场数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用 NPY 格式保存。 + +**训练数据:** + +每个数据样本第一维度 (year) 表征数据所对应起始年份,对于 CMIP 数据共 291 年,其中 1-2265 为 CMIP6 中 15 个模式提供的 151 年的历史模拟数据 (总共:151年 *15 个模式=2265) ;2266-4645 为 CMIP5 中 17 个模式提供的 140 年的历史模拟数据 (总共:140 年*17 个模式=2380)。对于历史观测同化数据为美国提供的 SODA 数据。 + +**训练数据标签** + +标签数据为 Nino3.4 SST 异常指数,数据维度为 (year,month)。 + +CMIP(SODA)_train.nc 对应的标签数据当前时刻 Nino3.4 SST 异常指数的三个月滑动平均值,因此数据维度与维度介绍同训练数据一致。 + +注:三个月滑动平均值为当前月与未来两个月的平均值。 + +**测试数据** + +测试用的初始场 (输入) 数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用NPY格式保存,维度为 (12,lat,lon, 4), 12 为 t 时刻及过去 11 个时刻,4 为预测因子,并按照 SST,T300,Ua,Va 的顺序存放。 + +EarthFFormer 模型对于 ICAR-ENSO 数据集的训练中,只对其中海面温度 (SST) 进行训练和预测。训练海温异常观测的 12 步 (一年) ,预测海温异常最多 14 步。 + +### 3.2 模型预训练 + +#### 3.2.1 约束构建 + +本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 + +数据加载的代码如下: + +``` py linenums="35" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:35:56 +--8<-- +``` + +其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 16,`num_works` 为 8。 + +定义监督约束的代码如下: + +``` py linenums="58" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:58:64 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; + +第二个参数是损失函数的定义,这里使用自定义的损失函数 `mse_loss`; + +第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 + +#### 3.2.2 模型构建 + +在该案例中,海面温度模型基于 CuboidTransformer 网络模型实现,用 PaddleScience 代码表示如下: + +``` py linenums="97" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:97:99 +--8<-- +``` + +网络模型的参数通过配置文件进行设置如下: + +``` yaml linenums="46" title="examples/earthformer/conf/earthformer_enso_pretrain.yaml" +--8<-- +examples/earthformer/conf/earthformer_enso_pretrain.yaml:46:105 +--8<-- +``` + +其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 + +#### 3.2.3 学习率与优化器构建 + +本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `2e-4`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 +`weight_decay`,用 PaddleScience 代码表示如下: + +``` py linenums="101" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:101:126 +--8<-- +``` + +#### 3.2.4 评估器构建 + +本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: + +``` py linenums="68" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:68:95 +--8<-- +``` + +`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`RMSE`、`corr_nino3.4_epoch` 和 `corr_nino3.4_weighted_epoch`。 + +#### 3.2.5 模型训练与评估 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 + +``` py linenums="128" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:128:146 +--8<-- +``` + +### 3.3 模型评估可视化 + +#### 3.3.1 测试集上评估模型 + +构建模型的代码为: + +``` py linenums="179" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:179:181 +--8<-- +``` + +构建评估器的代码为: + +``` py linenums="150" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:150:177 +--8<-- +``` + +#### 3.3.2 模型导出 + +构建模型的代码为: + +``` py linenums="199" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:199:202 +--8<-- +``` + +实例化 `ppsci.solver.Solver`: + +``` py linenums="204" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:204:208 +--8<-- +``` + +构建模型输入格式并导出静态模型: + +``` py linenums="212" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:212:218 +--8<-- +``` + +`InputSpec` 函数中第一个设置模型输入尺寸,第二个参数设置输入数据类型,第三个设置输入数据的 `Key`. + +#### 3.3.3 模型推理 + +创建预测器: + +``` py linenums="222" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:222:224 +--8<-- +``` + +准备预测数据: + +``` py linenums="226" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:226:249 +--8<-- +``` + +进行模型预测与预测值保存: + +``` py linenums="253" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py:253:258 +--8<-- +``` + +## 4. 云总降水量 vil 模型实现 + +### 4.1 数据集介绍 + +数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 SEVIR 数据集。 + +The Storm Event ImagRy(SEVIR) 数据集是由麻省理工林肯实验室和亚马逊收集并提供的。SEVIR 是一个经过注释、整理和时空对齐的数据集,包含 10,000 多个天气事件,每个事件由 384 千米 x 384 千米的图像序列组成,时间跨度为 4 小时。SEVIR 中的图像通过五种不同的数据类型进行采样和对齐:GOES-16 高级基线成像仪的三个通道 (C02、C09、C13)、NEXRAD 垂直液态水含量 (vil) 和 GOES-16 地球静止闪电成像 (GLM) 闪烁图。 + +SEVIR数据集的结构包括两部分:目录 (Catalog) 和数据文件 (Data File)。目录是一个 CSV 文件,其中包含描述事件元数据的行。数据文件是一组 HDF5 文件,包含特定传感器类型的事件。这些文件中的数据以 4D 张量形式存储,形状为 N x L x W x T,其中 N 是文件中的事件数,LxW 是图像大小,T 是图像序列中的时间步数。 +
+ ![SEVIR](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/sevir.png){ loading=lazy style="margin:0 auto;height:100%;width:100%"} +
SEVIR 传感器类型说明
+
+ +EarthFormer 采用 SEVIR 中的 NEXRAD 垂直液态水含量 (VIL) 作为降水预报的基准,即在 65 分钟的垂直综合液体背景下,预测未来 60 分钟的垂直综合液体。因此,分辨率为 13x384x384→12x384x384。 + +### 4.2 模型预训练 + +#### 4.2.1 约束构建 + +本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 + +数据加载的代码如下: + +``` py linenums="27" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:27:59 +--8<-- +``` + +其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 1,`num_works` 为 8。 + +定义监督约束的代码如下: + +``` py linenums="61" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:61:67 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; + +第二个参数是损失函数的定义,这里使用自定义的损失函数 `mse_loss`; + +第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 + +### 4.2.2 模型构建 + +在该案例中,云总降水量模型基于 CuboidTransformer 网络模型实现,用 PaddleScience 代码表示如下: + +``` py linenums="117" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:117:119 +--8<-- +``` + +定义模型的参数通过配置进行设置,如下: + +``` yaml linenums="58" title="examples/earthformer/conf/earthformer_sevir_pretrain.yaml" +--8<-- +examples/earthformer/conf/earthformer_sevir_pretrain.yaml:58:117 +--8<-- +``` + +其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 + +#### 4.2.3 学习率与优化器构建 + +本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `1e-3`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 `weight_decay`,用 PaddleScience 代码表示如下: + +``` py linenums="121" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:121:146 +--8<-- +``` + +#### 4.2.4 评估器构建 + +本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: + +``` py linenums="71" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:71:115 +--8<-- +``` + +`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`csi`、`pod`、`sucr`和 `bias`,且后四个评价指标分别使用不同的阈值 `[16,74,133,160,181,219]`。 + +#### 4.2.5 模型训练 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 + +``` py linenums="148" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:148:164 +--8<-- +``` + +#### 4.2.6 模型评估 + +由于目前 `paddlescience` 中的验证策略分为两类,一类是直接对验证数据集进行模型输出拼接,然后计算评价指标。另一类是按照每个 batch_size 计算评价指标,然后拼接,最后对所有结果求平均,该方法默认数据之间没有关联性。但是 `SEVIR` 数据集数据之间有关联性,所以不适用第二种方法;又由于 `SEVIR` 数据集量大,使用第一种方法验证显存需求大,因此验证 `SEVIR` 数据集使用的方法如下: + +- 1.对一个 batch size 计算 `hits`、`misses` 和 `fas` 三个数据 +- 2.对数据集所有数据保存所有 `batch` 的三个值的累加和. +- 3.对三个值的累加和计算 `csi`、`pod`、`sucr`和 `bias` 四个指标。 + +``` py linenums="165" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:165:181 +--8<-- +``` + +### 4.3 模型评估可视化 + +#### 4.3.1 测试集上评估模型 + +构建模型的代码为: + +``` py linenums="231" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:231:233 +--8<-- +``` + +构建评估器的代码为: + +``` py linenums="185" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:185:229 +--8<-- +``` + +模型评估: + +``` py linenums="246" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:246:262 +--8<-- +``` + +#### 4.3.2 模型导出 + +构建模型的代码为: + +``` py linenums="266" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:266:269 +--8<-- +``` + +实例化 `ppsci.solver.Solver`: + +``` py linenums="271" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:271:275 +--8<-- +``` + +构建模型输入格式并导出静态模型: + +``` py linenums="279" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:279:285 +--8<-- +``` + +`InputSpec` 函数中第一个设置模型输入尺寸,第二个参数设置输入数据类型,第三个设置输入数据的 `Key`. + +#### 4.3.3 模型推理 + +创建预测器: + +``` py linenums="293" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:293:294 +--8<-- +``` + +准备预测数据并进行对应模式的数据预处理: + +``` py linenums="295" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:295:314 +--8<-- +``` + +进行模型预测并可视化: + +``` py linenums="318" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py:318:330 +--8<-- +``` + +## 5. 完整代码 + +``` py linenums="1" title="examples/earthformer/earthformer_enso_train.py" +--8<-- +examples/earthformer/earthformer_enso_train.py +--8<-- +``` + +``` py linenums="1" title="examples/earthformer/earthformer_sevir_train.py" +--8<-- +examples/earthformer/earthformer_sevir_train.py +--8<-- +``` + +## 6. 结果展示 + +下图展示了云总降水量模型按照65分钟的输入数据,得到60分钟间隔的预测结果和真值结果。 + +
+ ![SEVIR-predict](https://paddle-org.bj.bcebos.com/paddlescience/docs/earthformer/sevir-predict.png){ loading=lazy style="margin:0 auto;height:100%;width:100%"} +
SEVIR 中 vil 的预测结果("prediction")与真值结果("target")
+
+ +说明: + +Hit:TP, Miss:FN, False Alarm:FP + +第一行: 输入数据; + +第二行: 真值结果; + +第三行: 预测结果; + +第四行: 设定阈值为 `74` 情况下,TP、FN、FP 三种情况标记 + +第五行: 在所有阈值情况下,TP、FN、FP 三种情况标记 diff --git a/docs/zh/examples/extformer_moe.md b/docs/zh/examples/extformer_moe.md index a23a7f83d0..397cf1052e 100644 --- a/docs/zh/examples/extformer_moe.md +++ b/docs/zh/examples/extformer_moe.md @@ -1,227 +1,227 @@ -# Extformer-MoE - -!!! note - - 1. 开始训练、评估前,请先下载 [ICAR-ENSO数据集](https://tianchi.aliyun.com/dataset/98942),并对应修改 yaml 配置文件中的 `FILE_PATH` 为解压后的数据集路径。 - 2. 开始训练、评估前,请安装 `xarray` 和 `h5netcdf`:`pip install requirements.txt` - 3. 若训练时显存不足,可指定 `MODEL.checkpoint_level` 为 `1` 或 `2`,此时使用 recompute 模式运行,以训练时间换取显存。 - -=== "模型训练命令" - - ``` sh - # ICAR-ENSO 数据预训练模型: Extformer-MoE - python extformer_moe_enso_train.py - # python extformer_moe_enso_train.py MODEL.checkpoint_level=1 # using recompute to run in device with small GPU memory - # python extformer_moe_enso_train.py MODEL.checkpoint_level=2 # using recompute to run in device with small GPU memory - ``` - -=== "模型评估命令" - - ``` sh - # ICAR-ENSO 模型评估: Extformer-MoE - python extformer_moe_enso_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/extformer-moe/extformer_moe_pretrained.pdparams - ``` - -| 模型 | 变量名称 | C-Nino3.4-M | C-Nino3.4-WM | MSE(1E-4) | MAE(1E-1) | RMSE | -| :-- | :-- | :-- | :-- | :-- | :-- | :-- | -| [Extformer-MoE](https://paddle-org.bj.bcebos.com/paddlescience/models/extformer-moe/extformer_moe_pretrained.pdparams) | sst | 0.7651 | 2.39771 | 3.0000 | 0.1291 | 0.50243 | - -## 1. 背景简介 - -地球是一个复杂的系统。地球系统的变化,从温度波动等常规事件到干旱、冰雹和厄尔尼诺/南方涛动 (ENSO) 等极端事件,影响着我们的日常生活。在所有后果中,地球系统的变化会影响农作物产量、航班延误、引发洪水和森林火灾。对这些变化进行准确及时的预测可以帮助人们采取必要的预防措施以避免危机,或者更好地利用风能和太阳能等自然资源。因此,改进地球变化(例如天气和气候)的预测模型具有巨大的社会经济影响。 - -近年来,深度学习模型在天气和气候预报任务中显示出了巨大的潜力。相较于传统的数值模拟方法,深度学习方法通过利用视觉神经网络 (ViT) 或图神经网络 (GNN) 等新兴技术直接从海量再分析数据中学习当前和未来天气或气候状态之间的复杂映射关系,在预测效率和精度方面均取得了显著的提升。然而,地球变化中发生的极端事件往往呈现出长距离时空同步关联、时空分布规律多样以及极值观测信号稀疏等特点,给基于深度学习的地球系统极端事件预测模型的构建带来了诸多新的技术挑战。 - -### 1.1 长距离时空同步关联 - -面对复杂耦合的地球变化系统,现有基于视觉和图深度学习的技术在建模极端天气呈现出的长距离时空关联性时存在诸多不足。具体而言,基于视觉深度学习的智能预报模型(例如华为的盘古气象大模型)仅限于计算局部区域内的信息交互,无法高效利用来自遥远区域的全局信息。相比之下,基于图神经网络的天气预报方法(例如谷歌的GraphCast)可以通过预定义的图结构进行远程信息传播,然而先验图结构难以有效识别影响极端天气的关键长距离信息且容易受到噪声影响,导致模型产生有偏甚至错误的预测结果。此外,地球系统的气象数据一般具有海量的网格点,在挖掘全局的长距离时空关联信息的同时,可能会导致模型复杂度的激增,如何高效建模时空数据中的长距离关联成为地球系统极端事件预测的重大挑战。 - -Earthformer,一种用于地球系统预测的时空转换器。为了更好地探索时空注意力的设计,其中设计了 Cuboid Attention ,它是高效时空注意力的通用构建块。这个想法是将输入张量分解为不重叠的长方体,并行应用长方体级自注意力。由于我们将 O(N2) 自注意力限制在局部长方体内,因此模型整体复杂度大大降低。不同类型的相关性可以通过不同的长方体分解来捕获。同时 Earthformer 引入了一组关注所有局部长方体的全局向量,从而收集系统的整体状态。通过关注全局向量,局部长方体可以掌握系统的总体动态并相互共享信息,从而捕获到地球系统的长距离关联信息。 - -### 1.2 时空分布规律多样 - -精准建模时空分布规律的多样性是提升地球系统极端事件预测的关键。现有方法在时域和空域均使用共享的参数,无法有效捕捉特定于时段和地理位置独特的的极端天气特征模式。 - -混合专家(MoE, Mixture-of-Experts)网络,它包含一组专家网络和门控网络。每个专家网络都是独立的神经网络,拥有独立的参数,门控网络自适应地为每个输入单元选择一个独特的专家网络子集。在训练和推理过程中,每个输入单元只需要利用一个很小的专家网络子集,因此可以扩大专家网络的总数,在增强模型表达能力的同时维持相对较小的计算复杂度。在地球系统中,MoE 可以通过学习与时间、地理位置、模型输入相关的独有参数集合,从而增强模型捕捉时空分布差异性的能力。 - -### 1.3 极值观测信号稀疏 - -气象数据的不均衡分布会导致模型偏向于预测频繁出现的正常气象状况,而低估了观测值稀少的极端状况,因为模型训练中常用的回归损失函数比如均方误差(MSE)损失会导致预测结果的过平滑现象。与具有离散标签空间的不平衡分类问题不同,不平衡回归问题具有连续的标签空间,为极端预测问题带来了更大的挑战。 - -Rank-N-Contrast(RNC)是一种表征学习方法,旨在学习一种回归感知的样本表征,该表征以连续标签空间中的距离为依据,对嵌入空间中的样本间距离进行排序,然后利用它来预测最终连续的标签。在地球系统极端预测问题中,RNC 可以对气象数据的表征进行规范,使其满足嵌入空间的连续性,和标签空间对齐,最终缓解极端事件的预测结果的过平滑问题。 - -## 2. 模型原理 - -### 2.1 Earthformer - -本章节仅对 EarthFormer 的模型原理进行简单地介绍,详细的理论推导请阅读 [Earthformer: Exploring Space-Time Transformers for Earth System Forecasting](https://arxiv.org/abs/2207.05833)。 - -Earthformer 的网络模型使用了基于 Cuboid Attention 的分层 Encoder-Decoder 架构Transformer,它将数据分解为长方体并并行应用长方体级自注意力,这些长方体进一步与全局向量的集合交互以捕获全局信息。 - -Earthformer 的总体结构如图所示: - -
- -
- -### 2.2 Mixture-of-Experts - -本章节仅对 Mixture-of-Experts 的原理进行简单地介绍,详细的理论推导请阅读 [Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer -](https://arxiv.org/abs/1701.06538)。 - -混合专家(MoE, Mixture-of-Experts)网络,它包含一组参数独立的专家网络 $E_1,E_2,...,E_n$ 和门控网络 $G$。给定输入 $x$,MoE 网络的输出为 $y=\sum_{i=1}^n G(x)_iE_i(x)$。 - -MoE 的总体结构如图所示: - -
- -
- -### 2.3 Rank-N-Contrast - -Rank-N-Contrast(RNC)是一种根据样本在标签空间中的相互间的排序,通过对比来学习以学习连续性表征的的回归方法。RNC 的一个简单示例如图所示: - -
- -
- -### 2.4 Extformer-MoE 模型的训练、推理过程 - -模型预训练阶段是基于随机初始化的网络权重对模型进行训练,如下图所示,其中 $[x_{i}]_{i=1}^{T}$ 表示长度为 $T$ 时空序列的输入气象数据,$[y_{i}]_{i=1}^{K}$ 表示预测未来 $K$ 步的气象数据,$[y_{i_True}]_{i=1}^{K}$ 表示未来 $K$ 步的真实数据,如海面温度数据和云总降水量数据。最后网络模型预测的输出和真值计算 mse 损失函数。在推理阶段,给定长度序列为 $T$ 的数据,得到长度序列为 $K$ 的预测结果。 - -## 3. 海面温度模型实现 - -接下来开始讲解如何基于 PaddleScience 代码,实现 Extformer-MoE 模型的训练与推理。关于该案例中的其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 数据集介绍 - -数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 ICAR-ENSO 数据集。 - -本数据集由气候与应用前沿研究院 ICAR 提供。数据包括 CMIP5/6 模式的历史模拟数据和美国 SODA 模式重建的近100多年历史观测同化数据。每个样本包含以下气象及时空变量:海表温度异常 (SST) ,热含量异常 (T300),纬向风异常 (Ua),经向风异常 (Va),数据维度为 (year,month,lat,lon)。训练数据提供对应月份的 Nino3.4 index 标签数据。测试用的初始场数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用 NPY 格式保存。 - -**训练数据:** - -每个数据样本第一维度 (year) 表征数据所对应起始年份,对于 CMIP 数据共 291 年,其中 1-2265 为 CMIP6 中 15 个模式提供的 151 年的历史模拟数据 (总共:151年 *15 个模式=2265) ;2266-4645 为 CMIP5 中 17 个模式提供的 140 年的历史模拟数据 (总共:140 年*17 个模式=2380)。对于历史观测同化数据为美国提供的 SODA 数据。 - -**训练数据标签** - -标签数据为 Nino3.4 SST 异常指数,数据维度为 (year,month)。 - -CMIP(SODA)_train.nc 对应的标签数据当前时刻 Nino3.4 SST 异常指数的三个月滑动平均值,因此数据维度与维度介绍同训练数据一致。 - -注:三个月滑动平均值为当前月与未来两个月的平均值。 - -**测试数据** - -测试用的初始场 (输入) 数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用NPY格式保存,维度为 (12,lat,lon, 4), 12 为 t 时刻及过去 11 个时刻,4 为预测因子,并按照 SST,T300,Ua,Va 的顺序存放。 - -EarthFFormer 模型对于 ICAR-ENSO 数据集的训练中,只对其中海面温度 (SST) 进行训练和预测。训练海温异常观测的 12 步 (一年) ,预测海温异常最多 14 步。 - -### 3.2 模型预训练 - -#### 3.2.1 约束构建 - -本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 - -数据加载的代码如下: - -``` py linenums="25" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:25:47 ---8<-- -``` - -其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ExtMoEENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 16,`num_works` 为 8。 - -定义监督约束的代码如下: - -``` py linenums="49" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:49:55 ---8<-- -``` - -`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; - -第二个参数是损失函数的定义,这里使用自定义的损失函数; - -第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 - -#### 3.2.2 模型构建 - -在该案例中,海面温度模型基于 ExtFormerMoECuboid 网络模型实现,用 PaddleScience 代码表示如下: - -``` py linenums="88" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:88:92 ---8<-- -``` - -网络模型的参数通过配置文件进行设置如下: - -``` yaml linenums="47" title="examples/earthformer/conf/earthformer_enso_pretrain.yaml" ---8<-- -examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml:47:129 ---8<-- -``` - -其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 - -#### 3.2.3 学习率与优化器构建 - -本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `2e-4`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 -`weight_decay`,用 PaddleScience 代码表示如下: - -``` py linenums="94" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:94:119 ---8<-- -``` - -#### 3.2.4 评估器构建 - -本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: - -``` py linenums="59" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:59:86 ---8<-- -``` - -`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`RMSE`、`corr_nino3.4_epoch` 和 `corr_nino3.4_weighted_epoch`。 - -#### 3.2.5 模型训练与评估 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 - -``` py linenums="121" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:121:137 ---8<-- -``` - -### 3.3 模型评估 - -构建模型的代码为: - -``` py linenums="138" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:138:139 ---8<-- -``` - -构建评估器的代码为: - -``` py linenums="142" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py:142:182 ---8<-- -``` - -## 4. 完整代码 - -``` py linenums="1" title="examples/extformer_moe/extformer_moe_enso_train.py" ---8<-- -examples/extformer_moe/extformer_moe_enso_train.py ---8<-- -``` +# Extformer-MoE + +!!! note + + 1. 开始训练、评估前,请先下载 [ICAR-ENSO数据集](https://tianchi.aliyun.com/dataset/98942),并对应修改 yaml 配置文件中的 `FILE_PATH` 为解压后的数据集路径。 + 2. 开始训练、评估前,请安装 `xarray` 和 `h5netcdf`:`pip install requirements.txt` + 3. 若训练时显存不足,可指定 `MODEL.checkpoint_level` 为 `1` 或 `2`,此时使用 recompute 模式运行,以训练时间换取显存。 + +=== "模型训练命令" + + ``` sh + # ICAR-ENSO 数据预训练模型: Extformer-MoE + python extformer_moe_enso_train.py + # python extformer_moe_enso_train.py MODEL.checkpoint_level=1 # using recompute to run in device with small GPU memory + # python extformer_moe_enso_train.py MODEL.checkpoint_level=2 # using recompute to run in device with small GPU memory + ``` + +=== "模型评估命令" + + ``` sh + # ICAR-ENSO 模型评估: Extformer-MoE + python extformer_moe_enso_train.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/extformer-moe/extformer_moe_pretrained.pdparams + ``` + +| 模型 | 变量名称 | C-Nino3.4-M | C-Nino3.4-WM | MSE(1E-4) | MAE(1E-1) | RMSE | +| :-- | :-- | :-- | :-- | :-- | :-- | :-- | +| [Extformer-MoE](https://paddle-org.bj.bcebos.com/paddlescience/models/extformer-moe/extformer_moe_pretrained.pdparams) | sst | 0.7651 | 2.39771 | 3.0000 | 0.1291 | 0.50243 | + +## 1. 背景简介 + +地球是一个复杂的系统。地球系统的变化,从温度波动等常规事件到干旱、冰雹和厄尔尼诺/南方涛动 (ENSO) 等极端事件,影响着我们的日常生活。在所有后果中,地球系统的变化会影响农作物产量、航班延误、引发洪水和森林火灾。对这些变化进行准确及时的预测可以帮助人们采取必要的预防措施以避免危机,或者更好地利用风能和太阳能等自然资源。因此,改进地球变化(例如天气和气候)的预测模型具有巨大的社会经济影响。 + +近年来,深度学习模型在天气和气候预报任务中显示出了巨大的潜力。相较于传统的数值模拟方法,深度学习方法通过利用视觉神经网络 (ViT) 或图神经网络 (GNN) 等新兴技术直接从海量再分析数据中学习当前和未来天气或气候状态之间的复杂映射关系,在预测效率和精度方面均取得了显著的提升。然而,地球变化中发生的极端事件往往呈现出长距离时空同步关联、时空分布规律多样以及极值观测信号稀疏等特点,给基于深度学习的地球系统极端事件预测模型的构建带来了诸多新的技术挑战。 + +### 1.1 长距离时空同步关联 + +面对复杂耦合的地球变化系统,现有基于视觉和图深度学习的技术在建模极端天气呈现出的长距离时空关联性时存在诸多不足。具体而言,基于视觉深度学习的智能预报模型(例如华为的盘古气象大模型)仅限于计算局部区域内的信息交互,无法高效利用来自遥远区域的全局信息。相比之下,基于图神经网络的天气预报方法(例如谷歌的GraphCast)可以通过预定义的图结构进行远程信息传播,然而先验图结构难以有效识别影响极端天气的关键长距离信息且容易受到噪声影响,导致模型产生有偏甚至错误的预测结果。此外,地球系统的气象数据一般具有海量的网格点,在挖掘全局的长距离时空关联信息的同时,可能会导致模型复杂度的激增,如何高效建模时空数据中的长距离关联成为地球系统极端事件预测的重大挑战。 + +Earthformer,一种用于地球系统预测的时空转换器。为了更好地探索时空注意力的设计,其中设计了 Cuboid Attention ,它是高效时空注意力的通用构建块。这个想法是将输入张量分解为不重叠的长方体,并行应用长方体级自注意力。由于我们将 O(N2) 自注意力限制在局部长方体内,因此模型整体复杂度大大降低。不同类型的相关性可以通过不同的长方体分解来捕获。同时 Earthformer 引入了一组关注所有局部长方体的全局向量,从而收集系统的整体状态。通过关注全局向量,局部长方体可以掌握系统的总体动态并相互共享信息,从而捕获到地球系统的长距离关联信息。 + +### 1.2 时空分布规律多样 + +精准建模时空分布规律的多样性是提升地球系统极端事件预测的关键。现有方法在时域和空域均使用共享的参数,无法有效捕捉特定于时段和地理位置独特的的极端天气特征模式。 + +混合专家(MoE, Mixture-of-Experts)网络,它包含一组专家网络和门控网络。每个专家网络都是独立的神经网络,拥有独立的参数,门控网络自适应地为每个输入单元选择一个独特的专家网络子集。在训练和推理过程中,每个输入单元只需要利用一个很小的专家网络子集,因此可以扩大专家网络的总数,在增强模型表达能力的同时维持相对较小的计算复杂度。在地球系统中,MoE 可以通过学习与时间、地理位置、模型输入相关的独有参数集合,从而增强模型捕捉时空分布差异性的能力。 + +### 1.3 极值观测信号稀疏 + +气象数据的不均衡分布会导致模型偏向于预测频繁出现的正常气象状况,而低估了观测值稀少的极端状况,因为模型训练中常用的回归损失函数比如均方误差(MSE)损失会导致预测结果的过平滑现象。与具有离散标签空间的不平衡分类问题不同,不平衡回归问题具有连续的标签空间,为极端预测问题带来了更大的挑战。 + +Rank-N-Contrast(RNC)是一种表征学习方法,旨在学习一种回归感知的样本表征,该表征以连续标签空间中的距离为依据,对嵌入空间中的样本间距离进行排序,然后利用它来预测最终连续的标签。在地球系统极端预测问题中,RNC 可以对气象数据的表征进行规范,使其满足嵌入空间的连续性,和标签空间对齐,最终缓解极端事件的预测结果的过平滑问题。 + +## 2. 模型原理 + +### 2.1 Earthformer + +本章节仅对 EarthFormer 的模型原理进行简单地介绍,详细的理论推导请阅读 [Earthformer: Exploring Space-Time Transformers for Earth System Forecasting](https://arxiv.org/abs/2207.05833)。 + +Earthformer 的网络模型使用了基于 Cuboid Attention 的分层 Encoder-Decoder 架构Transformer,它将数据分解为长方体并并行应用长方体级自注意力,这些长方体进一步与全局向量的集合交互以捕获全局信息。 + +Earthformer 的总体结构如图所示: + +
+ +
+ +### 2.2 Mixture-of-Experts + +本章节仅对 Mixture-of-Experts 的原理进行简单地介绍,详细的理论推导请阅读 [Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer +](https://arxiv.org/abs/1701.06538)。 + +混合专家(MoE, Mixture-of-Experts)网络,它包含一组参数独立的专家网络 $E_1,E_2,...,E_n$ 和门控网络 $G$。给定输入 $x$,MoE 网络的输出为 $y=\sum_{i=1}^n G(x)_iE_i(x)$。 + +MoE 的总体结构如图所示: + +
+ +
+ +### 2.3 Rank-N-Contrast + +Rank-N-Contrast(RNC)是一种根据样本在标签空间中的相互间的排序,通过对比来学习以学习连续性表征的的回归方法。RNC 的一个简单示例如图所示: + +
+ +
+ +### 2.4 Extformer-MoE 模型的训练、推理过程 + +模型预训练阶段是基于随机初始化的网络权重对模型进行训练,如下图所示,其中 $[x_{i}]_{i=1}^{T}$ 表示长度为 $T$ 时空序列的输入气象数据,$[y_{i}]_{i=1}^{K}$ 表示预测未来 $K$ 步的气象数据,$[y_{i_True}]_{i=1}^{K}$ 表示未来 $K$ 步的真实数据,如海面温度数据和云总降水量数据。最后网络模型预测的输出和真值计算 mse 损失函数。在推理阶段,给定长度序列为 $T$ 的数据,得到长度序列为 $K$ 的预测结果。 + +## 3. 海面温度模型实现 + +接下来开始讲解如何基于 PaddleScience 代码,实现 Extformer-MoE 模型的训练与推理。关于该案例中的其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 数据集介绍 + +数据集采用了 [EarthFormer](https://github.com/amazon-science/earth-forecasting-transformer/tree/main) 处理好的 ICAR-ENSO 数据集。 + +本数据集由气候与应用前沿研究院 ICAR 提供。数据包括 CMIP5/6 模式的历史模拟数据和美国 SODA 模式重建的近100多年历史观测同化数据。每个样本包含以下气象及时空变量:海表温度异常 (SST) ,热含量异常 (T300),纬向风异常 (Ua),经向风异常 (Va),数据维度为 (year,month,lat,lon)。训练数据提供对应月份的 Nino3.4 index 标签数据。测试用的初始场数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用 NPY 格式保存。 + +**训练数据:** + +每个数据样本第一维度 (year) 表征数据所对应起始年份,对于 CMIP 数据共 291 年,其中 1-2265 为 CMIP6 中 15 个模式提供的 151 年的历史模拟数据 (总共:151年 *15 个模式=2265) ;2266-4645 为 CMIP5 中 17 个模式提供的 140 年的历史模拟数据 (总共:140 年*17 个模式=2380)。对于历史观测同化数据为美国提供的 SODA 数据。 + +**训练数据标签** + +标签数据为 Nino3.4 SST 异常指数,数据维度为 (year,month)。 + +CMIP(SODA)_train.nc 对应的标签数据当前时刻 Nino3.4 SST 异常指数的三个月滑动平均值,因此数据维度与维度介绍同训练数据一致。 + +注:三个月滑动平均值为当前月与未来两个月的平均值。 + +**测试数据** + +测试用的初始场 (输入) 数据为国际多个海洋资料同化结果提供的随机抽取的 n 段 12 个时间序列,数据格式采用NPY格式保存,维度为 (12,lat,lon, 4), 12 为 t 时刻及过去 11 个时刻,4 为预测因子,并按照 SST,T300,Ua,Va 的顺序存放。 + +EarthFFormer 模型对于 ICAR-ENSO 数据集的训练中,只对其中海面温度 (SST) 进行训练和预测。训练海温异常观测的 12 步 (一年) ,预测海温异常最多 14 步。 + +### 3.2 模型预训练 + +#### 3.2.1 约束构建 + +本案例基于数据驱动的方法求解问题,因此需要使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。在定义约束之前,需要首先指定监督约束中用于数据加载的各个参数。 + +数据加载的代码如下: + +``` py linenums="25" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:25:47 +--8<-- +``` + +其中,"dataset" 字段定义了使用的 `Dataset` 类名为 `ExtMoEENSODataset`,"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,设置的 `batch_size` 为 16,`num_works` 为 8。 + +定义监督约束的代码如下: + +``` py linenums="49" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:49:55 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是数据的加载方式,这里使用上文中定义的 `train_dataloader_cfg`; + +第二个参数是损失函数的定义,这里使用自定义的损失函数; + +第三个参数是约束条件的名字,方便后续对其索引。此处命名为 `Sup`。 + +#### 3.2.2 模型构建 + +在该案例中,海面温度模型基于 ExtFormerMoECuboid 网络模型实现,用 PaddleScience 代码表示如下: + +``` py linenums="88" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:88:92 +--8<-- +``` + +网络模型的参数通过配置文件进行设置如下: + +``` yaml linenums="47" title="examples/earthformer/conf/earthformer_enso_pretrain.yaml" +--8<-- +examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml:47:129 +--8<-- +``` + +其中,`input_keys` 和 `output_keys` 分别代表网络模型输入、输出变量的名称。 + +#### 3.2.3 学习率与优化器构建 + +本案例中使用的学习率方法为 `Cosine`,学习率大小设置为 `2e-4`。优化器使用 `AdamW`,并将参数进行分组,使用不同的 +`weight_decay`,用 PaddleScience 代码表示如下: + +``` py linenums="94" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:94:119 +--8<-- +``` + +#### 3.2.4 评估器构建 + +本案例训练过程中会按照一定的训练轮数间隔,使用验证集评估当前模型的训练情况,需要使用 `SupervisedValidator` 构建评估器。代码如下: + +``` py linenums="59" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:59:86 +--8<-- +``` + +`SupervisedValidator` 评估器与 `SupervisedConstraint` 比较相似,不同的是评估器需要设置评价指标 `metric`,在这里使用了自定义的评价指标分别是 `MAE`、`MSE`、`RMSE`、`corr_nino3.4_epoch` 和 `corr_nino3.4_weighted_epoch`。 + +#### 3.2.5 模型训练与评估 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 + +``` py linenums="121" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:121:137 +--8<-- +``` + +### 3.3 模型评估 + +构建模型的代码为: + +``` py linenums="138" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:138:139 +--8<-- +``` + +构建评估器的代码为: + +``` py linenums="142" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py:142:182 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="examples/extformer_moe/extformer_moe_enso_train.py" +--8<-- +examples/extformer_moe/extformer_moe_enso_train.py +--8<-- +``` diff --git a/docs/zh/examples/heat_exchanger.md b/docs/zh/examples/heat_exchanger.md index 77530ee77c..1546d7982a 100644 --- a/docs/zh/examples/heat_exchanger.md +++ b/docs/zh/examples/heat_exchanger.md @@ -1,354 +1,354 @@ -# Heat_Exchanger - -=== "模型训练命令" - - ``` sh - python heat_exchanger.py - ``` - -=== "模型评估命令" - - ``` sh - python heat_exchanger.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/HEDeepONet/HEDeepONet_pretrained.pdparams - ``` - -=== "模型导出命令" - - ``` sh - python heat_exchanger.py mode=export - ``` - -=== "模型推理命令" - - ``` sh - python heat_exchanger.py mode=infer - ``` - -| 预训练模型 | 指标 | -|:--| :--| -| [heat_exchanger_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/HEDeepONet/HEDeepONet_pretrained.pdparams) | The L2 norm error between the actual heat exchanger efficiency and the predicted heat exchanger efficiency: 0.02087
MSE.heat_boundary(interior_mse): 0.52005
MSE.cold_boundary(interior_mse): 0.16590
MSE.wall(interior_mse): 0.01203 | - -## 1. 背景简介 - -### 1.1 换热器 - -换热器(亦称为热交换器或热交换设备)是用来使热量从热流体传递到冷流体,以满足规定的工艺要求的装置,是对流传热及热传导的一种工业应用。 - -在一般空调设备中都有换热器,即空调室内机和室外机的冷热排;换热器作放热用时称为“冷凝器”,作吸热用时称为“蒸发器”,冷媒在此二者的物理反应相反。所以家用空调机作为冷气机时,室内机的换热器称作蒸发器,室外机的则称为冷凝器;换做暖气机的角色时,则相反称之,如图所示为蒸发循环制冷系统。研究换热器热仿真可以为优化设计、提高性能和可靠性、节能减排以及新技术研发提供重要的参考和指导。 - -
- ![heat_exchanger.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/heat_exchanger.png){ loading=lazy style="height:80%;width:80%" align="center" } -
蒸发循环制冷系统
-
- -换热器在工程和科学领域具有多方面的重要性,其作用和价值主要体现在以下几个方面: - -- 能源转换效率:换热器在能源转换中扮演着重要角色。通过优化热能的传递和利用,能够提高发电厂、工业生产和其他能源转换过程的效率。它们有助于将燃料中的热能转化为电能或机械能,最大限度地利用能源资源。 -- 工业生产优化:在化工、石油、制药等行业中,换热器用于加热、冷却、蒸馏和蒸发等工艺。通过有效的换热器设计和运用,可以改善生产效率、控制温度和压力,提高产品质量,并且减少能源消耗。 -- 温度控制与调节:换热器可以用于控制温度。在工业生产中,保持适当的温度对于反应速率、产品质量和设备寿命至关重要。换热器能够帮助调节和维持系统的温度在理想的操作范围内。 -- 环境保护与可持续发展:通过提高能源转换效率和工业生产过程中的能源利用率,换热器有助于减少对自然资源的依赖,并降低对环境的负面影响。能源效率的提高也可以减少温室气体排放,有利于环境保护和可持续发展。 -- 工程设计与创新:在工程设计领域,换热器的优化设计和创新推动了工程技术的发展。不断改进的换热器设计能够提高性能、减少空间占用并适应多种复杂工艺需求。 - -综上所述,换热器在工程和科学领域中的重要性体现在其对能源利用效率、工业生产过程优化、温度控制、环境保护和工程技术创新等方面的重要贡献。这些方面的不断改进和创新推动着工程技术的发展,有助于解决能源和环境方面的重要挑战。 - -## 2. 问题定义 - -### 2.1 问题描述 - -假设换热器内部流体流动是一维的,如图所示。 - -
- ![1DHE.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/1DHE.png){ loading=lazy style="height:80%;width:80%" align="center" } -
- -忽略壁面的传热热阻和轴向热传导;与外界无热量交换,如图所示。则冷热流体和传热壁面三个节点的能量守恒方程分别为: - -$$ -\begin{aligned} -& L\left(\frac{q_m c_p}{v}\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial \tau}-L\left(q_m c_p\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{w}}-T_{\mathrm{c}}\right), \\ -& L\left(\frac{q_m c_p}{v}\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial \tau}+L\left(q_m c_p\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{w}}-T_{\mathrm{h}}\right), \\ -& \left(M c_p\right)_{\mathrm{w}} \frac{\partial T_{\mathrm{w}}}{\partial \tau}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{h}}-T_{\mathrm{w}}\right)+\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{c}}-T_{\mathrm{w}}\right). -\end{aligned} -$$ - -其中: - -- $T$ 代表温度, -- $q_m$ 代表质量流量, -- $c_p$ 代表比热容, -- $v$ 代表流速, -- $L$ 代表流动长度, -- $\eta_{\mathrm{o}}$ 代表翅片表面效率, -- $\alpha$ 代表传热系数, -- $A$ 代表传热面积, -- $M$ 代表传热结构的质量, -- $\tau$ 代表对应时间, -- $x$ 代表流动方向, -- 下标 $\mathrm{h}$、$\mathrm{c}$ 和 $\mathrm{w}$ 分别表示热边流体、冷边流体和换热壁面。 - -换热器冷、热流体进出口参数满足能量守恒, 即: - -$$ -\left(q_m c_p\right)_{\mathrm{h}}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{h}, \text { out }}\right)=\left(q_m c_p\right)_c\left(T_{\mathrm{c}, \text {out }}-T_{\mathrm{c}, \text {in }}\right). -$$ - -换热器效率 $\eta$ 为实际传热量与理论最大的传热量之比,即: - -$$ -\eta=\frac{\left(q_m c_p\right)_{\mathrm{h}}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{h}, \text { out }}\right)}{\left(q_m c_p\right)_{\text {min }}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{c}, \text { in }}\right)}, -$$ - -式中,下标 $min$ 表示冷热流体热容较小值。 - -### 2.2 PI-DeepONet模型 - -PI-DeepONet模型,将 DeepONet 和 PINN 方法相结合,是一种结合了物理信息和算子学习的深度神经网络模型。这种模型可以通过控制方程的物理信息来增强 DeepONet 模型,同时可以将不同的 PDE 配置分别作为不同的分支网络的输入数据,从而可以有效地用于在各种(参数和非参数)PDE 配置下进行超快速的模型预测。 - -对于换热器问题,PI-DeepONet 模型可以表示为如图所示的模型结构: - -
- ![PI-DeepONet.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/PI-DeepONet.png){ loading=lazy style="height:80%;width:80%" align="center" } -
- -如图所示,我们一共使用了 2 个分支网络和一个主干网络,分支网络分别输入热边的质量流量和冷边的质量流量,主干网络输入一维坐标点坐标和时间信息。每个分支网和主干网均输出 $q$ 维特征向量,通过Hadamard(逐元素)乘积组合所有这些输出特征,然后将所得向量相加为预测温度场的标量输出。 - -## 3. 问题求解 - -接下来开始讲解如何将该问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该换热器热仿真问题。为了快速理解 PaddleScience,接下来仅对模型构建、约束构建等关键步骤进行阐述,而其余细节请参考[API文档](../api/arch.md)。 - -### 3.1 模型构建 - -在换热器热仿真问题中,每一个已知的坐标点 $(t, x)$ 和每一组热边的质量流量和冷边的质量流量 $(q_{mh}, q_{mc})$ 都对应一组热边流体的温度 $T_h$ 、冷边流体的温度 $T_c$ 和换热壁面的温度 $T_h$ 三个待求解的未知量。我们在这里使用 2 个分支网络和一个主干网络,3 个网络均为 MLP(Multilayer Perceptron, 多层感知机) 。 2 个分支网络分别表示 $(q_{mh}, q_{mc})$ 到输出函数 $(b_1,b_2)$ 的映射函数 $f_1,f_2: \mathbb{R}^2 \to \mathbb{R}^{3q}$,即: - -$$ -\begin{aligned} -b_1 &= f_1(q_{mh}),\\ -b_2 &= f_2(q_{mc}). -\end{aligned} -$$ - -上式中 $f_1,f_2$ 均为 MLP 模型,$(b_1,b_2)$ 分别为两个分支网络的输出函数,$3q$ 为输出函数的维数。主干网络表示 $(t, x)$ 到输出函数 $t_0$ 的映射函数 $f_3: \mathbb{R}^2 \to \mathbb{R}^{3q}$,即: - -$$ -\begin{aligned} -t_0 &= f_3(t,x). -\end{aligned} -$$ - -上式中 $f_3$ 为 MLP 模型,$(t_0)$ 为主支网络的输出函数,$3q$ 为输出函数的维数。我们可以将两个分支网络和主干网络的输出函数 $(b_1,b_2, t_0)$ 分成3组,然后对每一组的输出函数分别进行Hadamard(逐元素)乘积再相加得到标量温度场,即: - -$$ -\begin{aligned} -T_h &= \sum_{i=1}^q b_1^ib_2^i t_0^i,\\ -T_c &= \sum_{i=q+1}^{2q} b_1^ib_2^i t_0^i,\\ -T_w &= \sum_{i=2q+1}^{3q} b_1^ib_2^i t_0^i. -\end{aligned} -$$ - -我们定义 PaddleScience 内置的 HEDeepONets 模型类,并调用,PaddleScience 代码表示如下 - -``` py linenums="33" ---8<-- -examples/heat_exchanger/heat_exchanger.py:33:34 ---8<-- -``` - -这样我们就实例化出了一个拥有 3 个 MLP 模型的 HEDeepONets 模型,每个分支网络包含 9 层隐藏神经元,每层神经元数为 256,主干网络包含 6 层隐藏神经元,每层神经元数为 128,使用 "swish" 作为激活函数,并包含三个输出函数 $T_h,T_c,T_w$ 的神经网络模型 `model`。 - -### 3.2 计算域构建 - -对本文中换热器问题构造训练区域,即以 [0, 1] 的一维区域,且时间域为 21 个时刻 [0,1,2,...,21],该区域可以直接使用 PaddleScience 内置的空间几何 `Interval` 和时间域 `TimeDomain`,组合成时间-空间的 `TimeXGeometry` 计算域。代码如下 - -``` py linenums="36" ---8<-- -examples/heat_exchanger/heat_exchanger.py:36:43 ---8<-- -``` - -???+ tip "提示" - - `Rectangle` 和 `TimeDomain` 是两种可以单独使用的 `Geometry` 派生类。 - - 如输入数据只来自于二维矩形几何域,则可以直接使用 `ppsci.geometry.Rectangle(...)` 创建空间几何域对象; - - 如输入数据只来自一维时间域,则可以直接使用 `ppsci.geometry.TimeDomain(...)` 构建时间域对象。 - -### 3.3 输入数据构建 - -- 通过 `TimeXGeometry` 计算域来构建输入的时间和空间均匀数据, -- 通过 `np.random.rand` 来生成 (0,2) 之间的随机数,这些随机数用于构建热边和冷边的质量流量的训练和测试数据。 - -对时间、空间均匀数据和热边、冷边的质量流量数据进行组合,得到最终的训练和测试输入数据。代码如下 - -``` py linenums="45" ---8<-- -examples/heat_exchanger/heat_exchanger.py:45:63 ---8<-- -``` - -然后对训练数据按照空间坐标和时间进行分类,将训练数据和测试数据分类成左边界数据、内部数据、右边界数据以及初值数据。代码如下 - -``` py linenums="65" ---8<-- -examples/heat_exchanger/heat_exchanger.py:65:124 ---8<-- -``` - -### 3.4 方程构建 - -换热器热仿真问题由 [2.1 问题描述](#21) 中描述的方程组成,这里我们定义 PaddleScience 内置的 `HeatEquation` 方程类来构建该方程。指定该类的参数均为1,代码如下 - -``` py linenums="126" ---8<-- -examples/heat_exchanger/heat_exchanger.py:126:136 ---8<-- -``` - -### 3.5 约束构建 - -换热器热仿真问题由 [2.1 问题描述](#21) 中描述的方程组成,我们设置以下边界条件: - -$$ -\begin{aligned} -T_h(t,0) &= 10,\\ -T_c(t,1) &= 1. -\end{aligned} -$$ - -同时,我们设置初值条件: - -$$ -\begin{aligned} -T_h(0,x) &= 10,\\ -T_c(0,x) &= 1,\\ -T_w(0,x) &= 5.5. -\end{aligned} -$$ - -此时我们对左边界数据、内部数据、右边界数据以及初值数据设置四个约束条件,接下来使用 PaddleScience 内置的 `SupervisedConstraint` 构建上述四种约束条件,代码如下 - -``` py linenums="138" ---8<-- -examples/heat_exchanger/heat_exchanger.py:138:263 ---8<-- -``` - -`SupervisedConstraint` 的第一个参数是监督约束的读取配置,其中 `“dataset”` 字段表示使用的训练数据集信息,各个字段分别表示: - -1. `name`: 数据集类型,此处 `"NamedArrayDataset"` 表示分 batch 顺序读取数据; -2. `input`: 输入变量名; -3. `label`: 标签变量名; -4. `weight`: 权重大小。 - -"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,另外还指定了该类初始化时参数 `drop_last` 为 `False`、`shuffle` 为 `True`。 - -第二个参数是损失函数,此处我们选用常用的 MSE 函数,且 `reduction` 为 `"mean"`,即我们会将参与计算的所有数据点产生的损失项求和取平均; - -第三个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。 - -在微分方程约束和监督约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 - -``` py linenums="264" ---8<-- -examples/heat_exchanger/heat_exchanger.py:264:270 ---8<-- -``` - -### 3.6 优化器构建 - -接下来我们需要指定学习率,学习率设为 0.001,训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器。 - -``` py linenums="272" ---8<-- -examples/heat_exchanger/heat_exchanger.py:272:273 ---8<-- -``` - -### 3.7 评估器构建 - -在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,我们使用 `ppsci.validate.SupervisedValidator` 构建评估器。 - -``` py linenums="275" ---8<-- -examples/heat_exchanger/heat_exchanger.py:275:349 ---8<-- -``` - -配置与 [3.5 约束构建](#35) 的设置类似。 - -### 3.8 模型训练 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 - -``` py linenums="351" ---8<-- -examples/heat_exchanger/heat_exchanger.py:351:371 ---8<-- -``` - -### 3.9 结果可视化 - -最后在给定的可视化区域上进行预测并可视化,设冷边和热边的质量流量均为1,可视化数据是区域内的一维点集,每个时刻 $t$ 对应的坐标是 $x^i$,对应值是 $(T_h^{i}, T_c^i, T_w^i)$,在此我们画出 $T_h,T_c,T_w$ 随时间的变化图像。同时根据换热器效率的公式计算出换热器效率 $\eta$ ,画出换热器效率 $\eta$ 随时间的变化图像,代码如下: - -``` py linenums="373" ---8<-- -examples/heat_exchanger/heat_exchanger.py:373:430 ---8<-- -``` - -## 4. 完整代码 - -``` py linenums="1" title="heat_exchanger.py" ---8<-- -examples/heat_exchanger/heat_exchanger.py ---8<-- -``` - -## 5. 结果展示 - -如图所示为不同时刻热边温度、冷边温度、壁面温度 $T_h, T_c, T_w$ 随传热面积 $A$ 的变化图像以及换热器效率 $\eta$ 随时间的变化图像。 - -???+ info "说明" - - 本案例只作为demo展示,尚未进行充分调优,下方部分展示结果可能与 OpenFOAM 存在一定差别。 - -
- ![T_h.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_h.png){ loading=lazy style="height:80%;width:80%" align="center" } -
不同时刻热边温度 T_h 随传热面积 A 的变化图像
-
- -
- ![T_c.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_c.png){ loading=lazy style="height:80%;width:80%" align="center" } -
不同时刻冷边温度 T_c 随传热面积 A 的变化图像
-
- -
- ![T_w.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_w.png){ loading=lazy style="height:80%;width:80%" align="center" } -
不同时刻壁面温度 T_w 随传热面积 A 的变化图像
-
- -
- ![eta.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/eta.png){ loading=lazy style="height:80%;width:80%" align="center" } -
换热器效率随时间的变化图像
-
- -从图中可以看出: - -- 热边温度在 $A=1$ 处随时间的变化逐渐递减,冷边温度在 $A=0$ 处随时间的变化逐渐递增; -- 壁面温度在 $A=1$ 处随时间的变化逐渐递减,在 $A=0$ 处随时间的变化逐渐递增; -- 换热器效率随时间的变化逐渐递增,在 $t=21$ 时达到最大值。 - -同时我们可以假设热边质量流量和冷边质量流量相等,即 $q_h=q_c$,定义传热单元数: - -$$ -NTU = \dfrac{Ak}{(q_mc)_{min}}. -$$ - -对不同的传热单元数,我们可以分别计算对应的换热器效率,并画出换热器效率随传热单元数的变化图像,如图所示。 - -
- ![eta-1.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/eta-1.png){ loading=lazy style="height:80%;width:80%" align="center" } -
换热器效率随传热单元数的变化图像
-
- -从图中可以看出:换热器效率随传热单元数的变化逐渐递增,这也符合实际的换热器效率随传热单元数的变化规律。 +# Heat_Exchanger + +=== "模型训练命令" + + ``` sh + python heat_exchanger.py + ``` + +=== "模型评估命令" + + ``` sh + python heat_exchanger.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/HEDeepONet/HEDeepONet_pretrained.pdparams + ``` + +=== "模型导出命令" + + ``` sh + python heat_exchanger.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + python heat_exchanger.py mode=infer + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [heat_exchanger_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/HEDeepONet/HEDeepONet_pretrained.pdparams) | The L2 norm error between the actual heat exchanger efficiency and the predicted heat exchanger efficiency: 0.02087
MSE.heat_boundary(interior_mse): 0.52005
MSE.cold_boundary(interior_mse): 0.16590
MSE.wall(interior_mse): 0.01203 | + +## 1. 背景简介 + +### 1.1 换热器 + +换热器(亦称为热交换器或热交换设备)是用来使热量从热流体传递到冷流体,以满足规定的工艺要求的装置,是对流传热及热传导的一种工业应用。 + +在一般空调设备中都有换热器,即空调室内机和室外机的冷热排;换热器作放热用时称为“冷凝器”,作吸热用时称为“蒸发器”,冷媒在此二者的物理反应相反。所以家用空调机作为冷气机时,室内机的换热器称作蒸发器,室外机的则称为冷凝器;换做暖气机的角色时,则相反称之,如图所示为蒸发循环制冷系统。研究换热器热仿真可以为优化设计、提高性能和可靠性、节能减排以及新技术研发提供重要的参考和指导。 + +
+ ![heat_exchanger.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/heat_exchanger.png){ loading=lazy style="height:80%;width:80%" align="center" } +
蒸发循环制冷系统
+
+ +换热器在工程和科学领域具有多方面的重要性,其作用和价值主要体现在以下几个方面: + +- 能源转换效率:换热器在能源转换中扮演着重要角色。通过优化热能的传递和利用,能够提高发电厂、工业生产和其他能源转换过程的效率。它们有助于将燃料中的热能转化为电能或机械能,最大限度地利用能源资源。 +- 工业生产优化:在化工、石油、制药等行业中,换热器用于加热、冷却、蒸馏和蒸发等工艺。通过有效的换热器设计和运用,可以改善生产效率、控制温度和压力,提高产品质量,并且减少能源消耗。 +- 温度控制与调节:换热器可以用于控制温度。在工业生产中,保持适当的温度对于反应速率、产品质量和设备寿命至关重要。换热器能够帮助调节和维持系统的温度在理想的操作范围内。 +- 环境保护与可持续发展:通过提高能源转换效率和工业生产过程中的能源利用率,换热器有助于减少对自然资源的依赖,并降低对环境的负面影响。能源效率的提高也可以减少温室气体排放,有利于环境保护和可持续发展。 +- 工程设计与创新:在工程设计领域,换热器的优化设计和创新推动了工程技术的发展。不断改进的换热器设计能够提高性能、减少空间占用并适应多种复杂工艺需求。 + +综上所述,换热器在工程和科学领域中的重要性体现在其对能源利用效率、工业生产过程优化、温度控制、环境保护和工程技术创新等方面的重要贡献。这些方面的不断改进和创新推动着工程技术的发展,有助于解决能源和环境方面的重要挑战。 + +## 2. 问题定义 + +### 2.1 问题描述 + +假设换热器内部流体流动是一维的,如图所示。 + +
+ ![1DHE.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/1DHE.png){ loading=lazy style="height:80%;width:80%" align="center" } +
+ +忽略壁面的传热热阻和轴向热传导;与外界无热量交换,如图所示。则冷热流体和传热壁面三个节点的能量守恒方程分别为: + +$$ +\begin{aligned} +& L\left(\frac{q_m c_p}{v}\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial \tau}-L\left(q_m c_p\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{w}}-T_{\mathrm{c}}\right), \\ +& L\left(\frac{q_m c_p}{v}\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial \tau}+L\left(q_m c_p\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{w}}-T_{\mathrm{h}}\right), \\ +& \left(M c_p\right)_{\mathrm{w}} \frac{\partial T_{\mathrm{w}}}{\partial \tau}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{h}}-T_{\mathrm{w}}\right)+\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{c}}-T_{\mathrm{w}}\right). +\end{aligned} +$$ + +其中: + +- $T$ 代表温度, +- $q_m$ 代表质量流量, +- $c_p$ 代表比热容, +- $v$ 代表流速, +- $L$ 代表流动长度, +- $\eta_{\mathrm{o}}$ 代表翅片表面效率, +- $\alpha$ 代表传热系数, +- $A$ 代表传热面积, +- $M$ 代表传热结构的质量, +- $\tau$ 代表对应时间, +- $x$ 代表流动方向, +- 下标 $\mathrm{h}$、$\mathrm{c}$ 和 $\mathrm{w}$ 分别表示热边流体、冷边流体和换热壁面。 + +换热器冷、热流体进出口参数满足能量守恒, 即: + +$$ +\left(q_m c_p\right)_{\mathrm{h}}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{h}, \text { out }}\right)=\left(q_m c_p\right)_c\left(T_{\mathrm{c}, \text {out }}-T_{\mathrm{c}, \text {in }}\right). +$$ + +换热器效率 $\eta$ 为实际传热量与理论最大的传热量之比,即: + +$$ +\eta=\frac{\left(q_m c_p\right)_{\mathrm{h}}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{h}, \text { out }}\right)}{\left(q_m c_p\right)_{\text {min }}\left(T_{\mathrm{h}, \text { in }}-T_{\mathrm{c}, \text { in }}\right)}, +$$ + +式中,下标 $min$ 表示冷热流体热容较小值。 + +### 2.2 PI-DeepONet模型 + +PI-DeepONet模型,将 DeepONet 和 PINN 方法相结合,是一种结合了物理信息和算子学习的深度神经网络模型。这种模型可以通过控制方程的物理信息来增强 DeepONet 模型,同时可以将不同的 PDE 配置分别作为不同的分支网络的输入数据,从而可以有效地用于在各种(参数和非参数)PDE 配置下进行超快速的模型预测。 + +对于换热器问题,PI-DeepONet 模型可以表示为如图所示的模型结构: + +
+ ![PI-DeepONet.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/PI-DeepONet.png){ loading=lazy style="height:80%;width:80%" align="center" } +
+ +如图所示,我们一共使用了 2 个分支网络和一个主干网络,分支网络分别输入热边的质量流量和冷边的质量流量,主干网络输入一维坐标点坐标和时间信息。每个分支网和主干网均输出 $q$ 维特征向量,通过Hadamard(逐元素)乘积组合所有这些输出特征,然后将所得向量相加为预测温度场的标量输出。 + +## 3. 问题求解 + +接下来开始讲解如何将该问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该换热器热仿真问题。为了快速理解 PaddleScience,接下来仅对模型构建、约束构建等关键步骤进行阐述,而其余细节请参考[API文档](../api/arch.md)。 + +### 3.1 模型构建 + +在换热器热仿真问题中,每一个已知的坐标点 $(t, x)$ 和每一组热边的质量流量和冷边的质量流量 $(q_{mh}, q_{mc})$ 都对应一组热边流体的温度 $T_h$ 、冷边流体的温度 $T_c$ 和换热壁面的温度 $T_h$ 三个待求解的未知量。我们在这里使用 2 个分支网络和一个主干网络,3 个网络均为 MLP(Multilayer Perceptron, 多层感知机) 。 2 个分支网络分别表示 $(q_{mh}, q_{mc})$ 到输出函数 $(b_1,b_2)$ 的映射函数 $f_1,f_2: \mathbb{R}^2 \to \mathbb{R}^{3q}$,即: + +$$ +\begin{aligned} +b_1 &= f_1(q_{mh}),\\ +b_2 &= f_2(q_{mc}). +\end{aligned} +$$ + +上式中 $f_1,f_2$ 均为 MLP 模型,$(b_1,b_2)$ 分别为两个分支网络的输出函数,$3q$ 为输出函数的维数。主干网络表示 $(t, x)$ 到输出函数 $t_0$ 的映射函数 $f_3: \mathbb{R}^2 \to \mathbb{R}^{3q}$,即: + +$$ +\begin{aligned} +t_0 &= f_3(t,x). +\end{aligned} +$$ + +上式中 $f_3$ 为 MLP 模型,$(t_0)$ 为主支网络的输出函数,$3q$ 为输出函数的维数。我们可以将两个分支网络和主干网络的输出函数 $(b_1,b_2, t_0)$ 分成3组,然后对每一组的输出函数分别进行Hadamard(逐元素)乘积再相加得到标量温度场,即: + +$$ +\begin{aligned} +T_h &= \sum_{i=1}^q b_1^ib_2^i t_0^i,\\ +T_c &= \sum_{i=q+1}^{2q} b_1^ib_2^i t_0^i,\\ +T_w &= \sum_{i=2q+1}^{3q} b_1^ib_2^i t_0^i. +\end{aligned} +$$ + +我们定义 PaddleScience 内置的 HEDeepONets 模型类,并调用,PaddleScience 代码表示如下 + +``` py linenums="33" +--8<-- +examples/heat_exchanger/heat_exchanger.py:33:34 +--8<-- +``` + +这样我们就实例化出了一个拥有 3 个 MLP 模型的 HEDeepONets 模型,每个分支网络包含 9 层隐藏神经元,每层神经元数为 256,主干网络包含 6 层隐藏神经元,每层神经元数为 128,使用 "swish" 作为激活函数,并包含三个输出函数 $T_h,T_c,T_w$ 的神经网络模型 `model`。 + +### 3.2 计算域构建 + +对本文中换热器问题构造训练区域,即以 [0, 1] 的一维区域,且时间域为 21 个时刻 [0,1,2,...,21],该区域可以直接使用 PaddleScience 内置的空间几何 `Interval` 和时间域 `TimeDomain`,组合成时间-空间的 `TimeXGeometry` 计算域。代码如下 + +``` py linenums="36" +--8<-- +examples/heat_exchanger/heat_exchanger.py:36:43 +--8<-- +``` + +???+ tip "提示" + + `Rectangle` 和 `TimeDomain` 是两种可以单独使用的 `Geometry` 派生类。 + + 如输入数据只来自于二维矩形几何域,则可以直接使用 `ppsci.geometry.Rectangle(...)` 创建空间几何域对象; + + 如输入数据只来自一维时间域,则可以直接使用 `ppsci.geometry.TimeDomain(...)` 构建时间域对象。 + +### 3.3 输入数据构建 + +- 通过 `TimeXGeometry` 计算域来构建输入的时间和空间均匀数据, +- 通过 `np.random.rand` 来生成 (0,2) 之间的随机数,这些随机数用于构建热边和冷边的质量流量的训练和测试数据。 + +对时间、空间均匀数据和热边、冷边的质量流量数据进行组合,得到最终的训练和测试输入数据。代码如下 + +``` py linenums="45" +--8<-- +examples/heat_exchanger/heat_exchanger.py:45:63 +--8<-- +``` + +然后对训练数据按照空间坐标和时间进行分类,将训练数据和测试数据分类成左边界数据、内部数据、右边界数据以及初值数据。代码如下 + +``` py linenums="65" +--8<-- +examples/heat_exchanger/heat_exchanger.py:65:124 +--8<-- +``` + +### 3.4 方程构建 + +换热器热仿真问题由 [2.1 问题描述](#21) 中描述的方程组成,这里我们定义 PaddleScience 内置的 `HeatEquation` 方程类来构建该方程。指定该类的参数均为1,代码如下 + +``` py linenums="126" +--8<-- +examples/heat_exchanger/heat_exchanger.py:126:136 +--8<-- +``` + +### 3.5 约束构建 + +换热器热仿真问题由 [2.1 问题描述](#21) 中描述的方程组成,我们设置以下边界条件: + +$$ +\begin{aligned} +T_h(t,0) &= 10,\\ +T_c(t,1) &= 1. +\end{aligned} +$$ + +同时,我们设置初值条件: + +$$ +\begin{aligned} +T_h(0,x) &= 10,\\ +T_c(0,x) &= 1,\\ +T_w(0,x) &= 5.5. +\end{aligned} +$$ + +此时我们对左边界数据、内部数据、右边界数据以及初值数据设置四个约束条件,接下来使用 PaddleScience 内置的 `SupervisedConstraint` 构建上述四种约束条件,代码如下 + +``` py linenums="138" +--8<-- +examples/heat_exchanger/heat_exchanger.py:138:263 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是监督约束的读取配置,其中 `“dataset”` 字段表示使用的训练数据集信息,各个字段分别表示: + +1. `name`: 数据集类型,此处 `"NamedArrayDataset"` 表示分 batch 顺序读取数据; +2. `input`: 输入变量名; +3. `label`: 标签变量名; +4. `weight`: 权重大小。 + +"sampler" 字段定义了使用的 `Sampler` 类名为 `BatchSampler`,另外还指定了该类初始化时参数 `drop_last` 为 `False`、`shuffle` 为 `True`。 + +第二个参数是损失函数,此处我们选用常用的 MSE 函数,且 `reduction` 为 `"mean"`,即我们会将参与计算的所有数据点产生的损失项求和取平均; + +第三个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。 + +在微分方程约束和监督约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 + +``` py linenums="264" +--8<-- +examples/heat_exchanger/heat_exchanger.py:264:270 +--8<-- +``` + +### 3.6 优化器构建 + +接下来我们需要指定学习率,学习率设为 0.001,训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器。 + +``` py linenums="272" +--8<-- +examples/heat_exchanger/heat_exchanger.py:272:273 +--8<-- +``` + +### 3.7 评估器构建 + +在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,我们使用 `ppsci.validate.SupervisedValidator` 构建评估器。 + +``` py linenums="275" +--8<-- +examples/heat_exchanger/heat_exchanger.py:275:349 +--8<-- +``` + +配置与 [3.5 约束构建](#35) 的设置类似。 + +### 3.8 模型训练 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估。 + +``` py linenums="351" +--8<-- +examples/heat_exchanger/heat_exchanger.py:351:371 +--8<-- +``` + +### 3.9 结果可视化 + +最后在给定的可视化区域上进行预测并可视化,设冷边和热边的质量流量均为1,可视化数据是区域内的一维点集,每个时刻 $t$ 对应的坐标是 $x^i$,对应值是 $(T_h^{i}, T_c^i, T_w^i)$,在此我们画出 $T_h,T_c,T_w$ 随时间的变化图像。同时根据换热器效率的公式计算出换热器效率 $\eta$ ,画出换热器效率 $\eta$ 随时间的变化图像,代码如下: + +``` py linenums="373" +--8<-- +examples/heat_exchanger/heat_exchanger.py:373:430 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="heat_exchanger.py" +--8<-- +examples/heat_exchanger/heat_exchanger.py +--8<-- +``` + +## 5. 结果展示 + +如图所示为不同时刻热边温度、冷边温度、壁面温度 $T_h, T_c, T_w$ 随传热面积 $A$ 的变化图像以及换热器效率 $\eta$ 随时间的变化图像。 + +???+ info "说明" + + 本案例只作为demo展示,尚未进行充分调优,下方部分展示结果可能与 OpenFOAM 存在一定差别。 + +
+ ![T_h.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_h.png){ loading=lazy style="height:80%;width:80%" align="center" } +
不同时刻热边温度 T_h 随传热面积 A 的变化图像
+
+ +
+ ![T_c.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_c.png){ loading=lazy style="height:80%;width:80%" align="center" } +
不同时刻冷边温度 T_c 随传热面积 A 的变化图像
+
+ +
+ ![T_w.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/T_w.png){ loading=lazy style="height:80%;width:80%" align="center" } +
不同时刻壁面温度 T_w 随传热面积 A 的变化图像
+
+ +
+ ![eta.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/eta.png){ loading=lazy style="height:80%;width:80%" align="center" } +
换热器效率随时间的变化图像
+
+ +从图中可以看出: + +- 热边温度在 $A=1$ 处随时间的变化逐渐递减,冷边温度在 $A=0$ 处随时间的变化逐渐递增; +- 壁面温度在 $A=1$ 处随时间的变化逐渐递减,在 $A=0$ 处随时间的变化逐渐递增; +- 换热器效率随时间的变化逐渐递增,在 $t=21$ 时达到最大值。 + +同时我们可以假设热边质量流量和冷边质量流量相等,即 $q_h=q_c$,定义传热单元数: + +$$ +NTU = \dfrac{Ak}{(q_mc)_{min}}. +$$ + +对不同的传热单元数,我们可以分别计算对应的换热器效率,并画出换热器效率随传热单元数的变化图像,如图所示。 + +
+ ![eta-1.png](https://paddle-org.bj.bcebos.com/paddlescience/docs/HEDeepONet/eta-1.png){ loading=lazy style="height:80%;width:80%" align="center" } +
换热器效率随传热单元数的变化图像
+
+ +从图中可以看出:换热器效率随传热单元数的变化逐渐递增,这也符合实际的换热器效率随传热单元数的变化规律。 diff --git a/docs/zh/examples/images/ML/psc_nn_overview.png b/docs/zh/examples/images/ML/psc_nn_overview.png new file mode 100644 index 0000000000..f8afd96416 Binary files /dev/null and b/docs/zh/examples/images/ML/psc_nn_overview.png differ diff --git a/docs/zh/examples/params/solar_cell_pretrained.pdparams b/docs/zh/examples/params/solar_cell_pretrained.pdparams new file mode 100644 index 0000000000..b22937a19b Binary files /dev/null and b/docs/zh/examples/params/solar_cell_pretrained.pdparams differ diff --git a/docs/zh/examples/perovskite_solar_cells_nn.md b/docs/zh/examples/perovskite_solar_cells_nn.md new file mode 100644 index 0000000000..09603d54ae --- /dev/null +++ b/docs/zh/examples/perovskite_solar_cells_nn.md @@ -0,0 +1,173 @@ +# psc_NN(Machine Learning for Perovskite Solar Cells: An Open-Source Pipeline) + +!!! note "注意事项" + + 1. 开始训练前,请确保数据集已正确放置在 `data/cleaned/` 目录下。 + 2. 训练和评估需要安装额外的依赖包,请使用 `pip install -r requirements.txt` 安装。 + 3. 为获得最佳性能,建议使用 GPU 进行训练。 + +=== "模型训练命令" + + ``` sh + python psc_nn.py mode=train + ``` + +=== "模型评估命令" + + ``` sh + # 使用本地预训练模型 + python psc_nn.py mode=eval eval.pretrained_model_path="Your pdparams path" + ``` + + ``` sh + # 或使用远程预训练模型 + python psc_nn.py mode=eval eval.pretrained_model_path="https://paddle-org.bj.bcebos.com/paddlescience/models/PerovskiteSolarCells/solar_cell_pretrained.pdparams" + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [solar_cell_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/PerovskiteSolarCells/solar_cell_pretrained.pdparams) | RMSE: 3.91798 | + +## 1. 背景简介 + +太阳能电池是一种通过光电效应将光能直接转换为电能的关键能源器件,其性能预测是优化和设计太阳能电池的重要环节。然而,传统的性能预测方法往往依赖于复杂的物理模拟和大量的实验测试,不仅成本高昂,且耗时较长,制约了研究与开发的效率。 + +近年来,深度学习和机器学习技术的快速发展,为太阳能电池性能预测提供了创新的方法。通过机器学习技术,可以显著加快开发速度,同时实现与实验结果相当的预测精度。特别是在钙钛矿太阳能电池研究中,材料的化学组成和结构多样性为模型训练带来了新的挑战。为了解决这一问题,研究者们通常将材料的特性转换为固定长度的特征向量,以适配机器学习模型。尽管如此,不同性能指标的特征表示设计仍需不断优化,同时对模型预测结果的可解释性要求也更为严格。 + +本研究中,通过利用包含钙钛矿太阳能电池特性信息的全面数据库(PDP),我们构建并评估了包括 XGBoost、psc_nn 在内的多种机器学习模型,专注于预测短路电流密度(Jsc)。研究结果表明,结合深度学习与超参数优化工具(如 Optuna)能够显著提升太阳能电池设计的效率,为新型太阳能电池研发提供了更精确且高效的解决方案。 + +## 2. 模型原理 + +本章节仅对太阳能电池性能预测模型的原理进行简单地介绍,详细的理论推导请阅读 [Machine Learning for Perovskite Solar Cells: An Open-Source Pipeline](https://onlinelibrary.wiley.com/doi/10.1002/apxr.202400060)。 + +该方法的主要思想是通过人工神经网络建立光谱响应数据与短路电流密度(Jsc)之间的非线性映射关系。人工神经网络模型的总体结构如下图所示: + +![psc_nn_overview](images/ML/psc_nn_overview.png) + +本案例采用多层感知机(MLP)作为基础模型架构,主要包括以下几个部分: + +1. 输入层:接收 2808 维的光谱响应数据 +2. 隐藏层:4-6 层全连接层,每层的神经元数量通过 Optuna 优化 +3. 激活函数:使用 ReLU 激活函数引入非线性特性 +4. 输出层:输出预测的 Jsc 值 + +通过这种方式,我们可以自动找到最适合当前任务的模型配置,提高模型的预测性能。 + +## 3. 模型实现 + +本章节我们讲解如何基于 PaddleScience 代码实现钙钛矿太阳能电池性能预测模型。本案例结合 Optuna 框架进行超参数优化,并使用 PaddleScience 内置的各种功能模块。为了快速理解 PaddleScience,接下来仅对模型构建、约束构建、评估器构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 数据集介绍 + +本案例使用的数据集包含 Perovskite Database Project(PDP) 数据。数据集分为以下几个部分: + +1. 训练集: + - 特征数据:`data/cleaned/training.csv` + - 标签数据:`data/cleaned/training_labels.csv` +2. 验证集: + - 特征数据:`data/cleaned/validation.csv` + - 标签数据:`data/cleaned/validation_labels.csv` + +为了方便数据处理,我们实现了一个辅助函数 `create_tensor_dict` 来创建输入和标签的 tensor 字典: + +``` py linenums="36" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:36:42 +--8<-- +``` + +数据集的读取和预处理代码如下: + +``` py linenums="123" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:123:143 +--8<-- +``` + +为了进行超参数优化,我们将训练集进一步划分为训练集和验证集: + +``` py linenums="135" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:135:140 +--8<-- +``` + +### 3.2 模型构建 + +本案例使用 PaddleScience 内置的 `ppsci.arch.MLP` 构建多层感知机模型。模型的超参数通过 Optuna 框架进行优化,主要包括: + +1. 网络层数:4-6层 +2. 每层神经元数量:10-input_dim/2 +3. 激活函数:ReLU +4. 输入维度:2808(光谱响应数据维度) +5. 输出维度:1(Jsc 预测值) + +模型定义代码如下: + +``` py linenums="104" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:104:121 +--8<-- +``` + +### 3.3 损失函数设计 + +考虑到数据集中不同样本的重要性可能不同,我们设计了一个加权均方误差损失函数。该函数对较大的 Jsc 值赋予更高的权重,以提高模型在高性能太阳能电池上的预测准确性: + +``` py linenums="24" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:24:34 +--8<-- +``` + +### 3.4 约束构建 + +本案例基于数据驱动的方法求解问题,因此使用 PaddleScience 内置的 `SupervisedConstraint` 构建监督约束。为了减少代码重复,我们实现了 `create_constraint` 函数来创建监督约束: + +``` py linenums="44" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:44:64 +--8<-- +``` + +### 3.5 评估器构建 + +为了实时监测模型的训练情况,我们实现了 `create_validator` 函数来创建评估器: + +``` py linenums="66" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:66:82 +--8<-- +``` + +### 3.6 优化器构建 + +为了统一管理优化器和学习率调度器的创建,我们实现了 `create_optimizer` 函数: + +``` py linenums="84" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:84:102 +--8<-- +``` + +### 3.7 模型训练与评估 + +在训练过程中,我们使用上述封装的函数来创建数据字典、约束、评估器和优化器: + +``` py linenums="202" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py:202:215 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="examples/perovskite_solar_cells/psc_nn.py" +--8<-- +examples/perovskite_solar_cells/psc_nn.py +--8<-- +``` + +## 5. 参考文献 + +- [Machine Learning for Perovskite Solar Cells: An Open-Source Pipeline](https://onlinelibrary.wiley.com/doi/10.1002/apxr.202400060) diff --git a/docs/zh/examples/pirbn.md b/docs/zh/examples/pirbn.md index 39c78884ba..9d46c099d3 100644 --- a/docs/zh/examples/pirbn.md +++ b/docs/zh/examples/pirbn.md @@ -1,155 +1,155 @@ -# PIRBN - -=== "模型训练和评估命令" - - ``` sh - cd PaddleScience/jointContribution/PIRBN - python main.py - ``` - -## 1. 背景简介 - -我们最近发现经过训练,物理信息神经网络(PINN)往往会成为局部近似函数。这一观察结果促使我们开发了一种新型的物理-信息径向基网络(PIRBN),该网络在整个训练过程中都能够维持局部近似性质。与深度神经网络不同,PIRBN 仅包含一个隐藏层和一个径向基“激活”函数。在适当的条件下,我们证明了使用梯度下降方法训练 PIRBN 可以收敛到高斯过程。此外,我们还通过神经邻近核(NTK)理论研究了 PIRBN 的训练动态。此外,我们还对 PIRBN 的初始化策略进行了全面调查。基于数值示例,我们发现 PIRBN 在解决具有高频特征和病态计算域的非线性偏微分方程方面比PINN更有效。此外,现有的 PINN 数值技术,如自适应学习、分解和不同类型的损失函数,也适用于 PIRBN。 - -
- ![介绍](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_1.png){ loading=lazy } -
网络的结构
-
-图片左侧为常见神经网络结构的输入层,隐藏层,输出层,隐藏层包含激活层,a 中为单层隐藏层,b 中为多层隐藏层,图片右侧为 PIRBN 网络的激活函数,计算网络的损失 Loss 并反向传递。图片说明当使用 PIRBN 时,每个 RBF 神经元仅在输入接近神经元中心时被激活。直观地说,PIRBN 具有局部逼近特性。通过梯度下降算法训练一个 PIRBN 也可以通过 NTK 理论进行分析。 - -
- ![gaussian](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_2.png){ loading=lazy } -
不同阶数的高斯激活函数
-
-(a) 0, 1, 2 阶高斯激活函数 -(b) 设置不同 b 值 -(c) 设置不同 c 值 - -当使用高斯函数作为激活函数时,输入与输出之间的映射关系可以数学上表示为高斯函数的某种形式。RBF 网络是一种常用于模式识别、数据插值和函数逼近的神经网络,其关键特征是使用径向基函数作为激活函数,使得网络具有更好的全局逼近能力和灵活性。 - -## 2. 问题定义 - -在 NTK 和基于 NTK 的适应性训练方法的帮助下,PINN 在处理具有高频特征的问题时的性能可以得到显著提升。例如,考虑一个偏微分方程及其边界条件: - -$$ -\begin{aligned} -& \frac{\mathrm{d}^2}{\mathrm{~d} x^2} u(x)-4 \mu^2 \pi^2 \sin (2 \mu \pi x)=0, \text { for } x \in[0,1] \\ -& u(0)=u(1)=0 -\end{aligned} -$$ - -其中$\mu$是一个控制PDE解的频率特征的常数。 - -## 3. 问题求解 - -接下来开始讲解如何将问题一步一步地转化为 PaddlePaddle 代码,用深度学习的方法求解该问题。 -为了快速理解 PaddlePaddle,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 模型构建 - -在 PIRBN 问题中,建立网络,用 PaddlePaddle 代码表示如下 - -``` py linenums="40" ---8<-- -jointContribution/PIRBN/main.py:40:42 ---8<-- -``` - -### 3.2 数据构建 - -本案例涉及读取数据构建,如下所示 - -``` py linenums="18" ---8<-- -jointContribution/PIRBN/main.py:18:38 ---8<-- -``` - -### 3.3 训练和评估构建 - -训练和评估构建,设置损失计算函数,返回字段,代码如下所示: - -``` py linenums="52" ---8<-- -jointContribution/PIRBN/train.py:52:90 ---8<-- -``` - -### 3.4 超参数设定 - -接下来我们需要指定训练轮数,此处我们按实验经验,使用 20001 轮训练轮数。 - -``` py linenums="43" ---8<-- -jointContribution/PIRBN/main.py:43:43 ---8<-- -``` - -### 3.5 优化器构建 - -训练过程会调用优化器来更新模型参数,此处选择 `Adam` 优化器并设定 `learning_rate` 为 1e-3。 - -``` py linenums="33" ---8<-- -jointContribution/PIRBN/train.py:33:35 ---8<-- -``` - -### 3.6 模型训练与评估 - -模型训练与评估 - -``` py linenums="92" ---8<-- -jointContribution/PIRBN/train.py:92:99 ---8<-- -``` - -## 4. 完整代码 - -``` py linenums="1" title="main.py" ---8<-- -jointContribution/PIRBN/main.py ---8<-- -``` - -## 5. 结果展示 - -PINN 案例针对 epoch=20001 和 learning\_rate=1e-3 的参数配置进行了实验,结果返回Loss为 0.13567。 - -PIRBN 案例针对 epoch=20001 和 learning\_rate=1e-3 的参数配置进行了实验,结果返回Loss为 0.59471。 - -
- ![PINN](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_3.png){ loading=lazy } -
PINN 结果图
-
-图为使用双曲正切函数(tanh)作为激活函数(activation function),并且使用 LuCun 初始化方法来初始化神经网络中的所有参数。 - -- 图中子图 1 为预测值和真实值的曲线比较 -- 图中子图 2 为误差值 -- 图中子图 3 为损失值 -- 图中子图 4 为训练 1 次的 Kg 图 -- 图中子图 5 为训练 2000 次的 Kg 图 -- 图中子图 6 为训练 20000 次的 Kg 图 - -可以看到预测值和真实值可以匹配,误差值逐渐升高然后逐渐减少,Loss 历史降低后波动,Kg 图随训练次数增加而逐渐收敛。 - -
- ![PIRBN](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_4.png){ loading=lazy } -
PIRBN 结果图
-
-图为使用高斯函数(gaussian function)作为激活函数(activation function)生成的数据,并且使用 LuCun 初始化方法来初始化神经网络中的所有参数。 - -- 图中子图 1 为预测值和真实值的曲线比较 -- 图中子图 2 为误差值 -- 图中子图 3 为损失值 -- 图中子图 4 为训练 1 次的 Kg 图 -- 图中子图 5 为训练 2000 次的 Kg 图 -- 图中子图 6 为训练 20000 次的 Kg 图 - -可以看到预测值和真实值可以匹配,误差值逐渐升高然后逐渐减少再升高,Loss 历史降低后波动,Kg 图随训练次数增加而逐渐收敛。 - -## 6. 参考资料 - -- [Physics-informed radial basis network (PIRBN): A local approximating neural network for solving nonlinear PDEs](https://arxiv.org/abs/2304.06234) -- +# PIRBN + +=== "模型训练和评估命令" + + ``` sh + cd PaddleScience/jointContribution/PIRBN + python main.py + ``` + +## 1. 背景简介 + +我们最近发现经过训练,物理信息神经网络(PINN)往往会成为局部近似函数。这一观察结果促使我们开发了一种新型的物理-信息径向基网络(PIRBN),该网络在整个训练过程中都能够维持局部近似性质。与深度神经网络不同,PIRBN 仅包含一个隐藏层和一个径向基“激活”函数。在适当的条件下,我们证明了使用梯度下降方法训练 PIRBN 可以收敛到高斯过程。此外,我们还通过神经邻近核(NTK)理论研究了 PIRBN 的训练动态。此外,我们还对 PIRBN 的初始化策略进行了全面调查。基于数值示例,我们发现 PIRBN 在解决具有高频特征和病态计算域的非线性偏微分方程方面比PINN更有效。此外,现有的 PINN 数值技术,如自适应学习、分解和不同类型的损失函数,也适用于 PIRBN。 + +
+ ![介绍](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_1.png){ loading=lazy } +
网络的结构
+
+图片左侧为常见神经网络结构的输入层,隐藏层,输出层,隐藏层包含激活层,a 中为单层隐藏层,b 中为多层隐藏层,图片右侧为 PIRBN 网络的激活函数,计算网络的损失 Loss 并反向传递。图片说明当使用 PIRBN 时,每个 RBF 神经元仅在输入接近神经元中心时被激活。直观地说,PIRBN 具有局部逼近特性。通过梯度下降算法训练一个 PIRBN 也可以通过 NTK 理论进行分析。 + +
+ ![gaussian](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_2.png){ loading=lazy } +
不同阶数的高斯激活函数
+
+(a) 0, 1, 2 阶高斯激活函数 +(b) 设置不同 b 值 +(c) 设置不同 c 值 + +当使用高斯函数作为激活函数时,输入与输出之间的映射关系可以数学上表示为高斯函数的某种形式。RBF 网络是一种常用于模式识别、数据插值和函数逼近的神经网络,其关键特征是使用径向基函数作为激活函数,使得网络具有更好的全局逼近能力和灵活性。 + +## 2. 问题定义 + +在 NTK 和基于 NTK 的适应性训练方法的帮助下,PINN 在处理具有高频特征的问题时的性能可以得到显著提升。例如,考虑一个偏微分方程及其边界条件: + +$$ +\begin{aligned} +& \frac{\mathrm{d}^2}{\mathrm{~d} x^2} u(x)-4 \mu^2 \pi^2 \sin (2 \mu \pi x)=0, \text { for } x \in[0,1] \\ +& u(0)=u(1)=0 +\end{aligned} +$$ + +其中$\mu$是一个控制PDE解的频率特征的常数。 + +## 3. 问题求解 + +接下来开始讲解如何将问题一步一步地转化为 PaddlePaddle 代码,用深度学习的方法求解该问题。 +为了快速理解 PaddlePaddle,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 模型构建 + +在 PIRBN 问题中,建立网络,用 PaddlePaddle 代码表示如下 + +``` py linenums="40" +--8<-- +jointContribution/PIRBN/main.py:40:42 +--8<-- +``` + +### 3.2 数据构建 + +本案例涉及读取数据构建,如下所示 + +``` py linenums="18" +--8<-- +jointContribution/PIRBN/main.py:18:38 +--8<-- +``` + +### 3.3 训练和评估构建 + +训练和评估构建,设置损失计算函数,返回字段,代码如下所示: + +``` py linenums="52" +--8<-- +jointContribution/PIRBN/train.py:52:90 +--8<-- +``` + +### 3.4 超参数设定 + +接下来我们需要指定训练轮数,此处我们按实验经验,使用 20001 轮训练轮数。 + +``` py linenums="43" +--8<-- +jointContribution/PIRBN/main.py:43:43 +--8<-- +``` + +### 3.5 优化器构建 + +训练过程会调用优化器来更新模型参数,此处选择 `Adam` 优化器并设定 `learning_rate` 为 1e-3。 + +``` py linenums="33" +--8<-- +jointContribution/PIRBN/train.py:33:35 +--8<-- +``` + +### 3.6 模型训练与评估 + +模型训练与评估 + +``` py linenums="92" +--8<-- +jointContribution/PIRBN/train.py:92:99 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="main.py" +--8<-- +jointContribution/PIRBN/main.py +--8<-- +``` + +## 5. 结果展示 + +PINN 案例针对 epoch=20001 和 learning\_rate=1e-3 的参数配置进行了实验,结果返回Loss为 0.13567。 + +PIRBN 案例针对 epoch=20001 和 learning\_rate=1e-3 的参数配置进行了实验,结果返回Loss为 0.59471。 + +
+ ![PINN](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_3.png){ loading=lazy } +
PINN 结果图
+
+图为使用双曲正切函数(tanh)作为激活函数(activation function),并且使用 LuCun 初始化方法来初始化神经网络中的所有参数。 + +- 图中子图 1 为预测值和真实值的曲线比较 +- 图中子图 2 为误差值 +- 图中子图 3 为损失值 +- 图中子图 4 为训练 1 次的 Kg 图 +- 图中子图 5 为训练 2000 次的 Kg 图 +- 图中子图 6 为训练 20000 次的 Kg 图 + +可以看到预测值和真实值可以匹配,误差值逐渐升高然后逐渐减少,Loss 历史降低后波动,Kg 图随训练次数增加而逐渐收敛。 + +
+ ![PIRBN](https://paddle-org.bj.bcebos.com/paddlescience/docs/PIRBN/PIRBN_4.png){ loading=lazy } +
PIRBN 结果图
+
+图为使用高斯函数(gaussian function)作为激活函数(activation function)生成的数据,并且使用 LuCun 初始化方法来初始化神经网络中的所有参数。 + +- 图中子图 1 为预测值和真实值的曲线比较 +- 图中子图 2 为误差值 +- 图中子图 3 为损失值 +- 图中子图 4 为训练 1 次的 Kg 图 +- 图中子图 5 为训练 2000 次的 Kg 图 +- 图中子图 6 为训练 20000 次的 Kg 图 + +可以看到预测值和真实值可以匹配,误差值逐渐升高然后逐渐减少再升高,Loss 历史降低后波动,Kg 图随训练次数增加而逐渐收敛。 + +## 6. 参考资料 + +- [Physics-informed radial basis network (PIRBN): A local approximating neural network for solving nonlinear PDEs](https://arxiv.org/abs/2304.06234) +- diff --git a/docs/zh/examples/viv.md b/docs/zh/examples/viv.md index 6d3ceb27e1..db992013b7 100644 --- a/docs/zh/examples/viv.md +++ b/docs/zh/examples/viv.md @@ -1,208 +1,208 @@ -# VIV(vortex induced vibration) - -AI Studio快速体验 - -=== "模型训练命令" - - ``` sh - python viv.py - ``` - -=== "模型评估命令" - - ``` sh - python viv.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/viv/viv_pretrained.pdparams - ``` - -=== "模型导出命令" - - ``` sh - python viv.py mode=export - ``` - -=== "模型推理命令" - - ``` sh - python viv.py mode=infer - ``` - -| 预训练模型 | 指标 | -|:--| :--| -| [viv_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/viv_pretrained.pdparams)
[viv_pretrained.pdeqn](https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/viv_pretrained.pdeqn) | eta_l2/MSE.eta: 0.00875
eta_l2/MSE.f: 0.00921 | - -## 1. 背景简介 - -涡激振动(Vortex-Induced Vibration,VIV)是一种流固耦合振动现象,主要发生在流体绕过柱体或管体时。在海洋工程和风工程中,这种振动现象具有重要应用。 - -在海洋工程中,涡激振动问题主要涉及海洋平台(如桩基、立管等)的涡激振动响应分析。这些平台在海流中运行,会受到涡激振动的影响。这种振动可能会导致平台结构的疲劳损伤,因此在进行海洋平台设计时,需要考虑这一问题。 - -在风工程中,涡激振动问题主要涉及风力发电机的涡激振动响应分析。风力发电机叶片在运行过程中受到气流的涡激振动,这种振动可能会导致叶片的疲劳损伤。为了确保风力发电机的安全运行,需要对这一问题进行深入的研究。 - -总之,涡激振动问题的应用主要涉及海洋工程和风工程领域,对于这些领域的发展具有重要意义。 - -当涡流脱落频率接近结构的固有频率时,圆柱会发生涡激振动,VIV系统相当于一个弹簧-阻尼系统: - -![VIV_1D_SpringDamper](https://paddle-org.bj.bcebos.com/paddlescience/docs/ViV/VIV_1D_SpringDamper.png) - -## 2. 问题定义 - -本问题涉及的控制方程涉及三个物理量:$λ_1$、$λ_2$ 和 $ρ$,分别表示自然阻尼、结构特性刚度和质量,控制方程定义如下所示: - -$$ -\rho \dfrac{\partial^2 \eta}{\partial t^2} + \lambda_1 \dfrac{\partial \eta}{\partial t} + \lambda_2 \eta = f -$$ - -该模型基于无量纲速度 $U_r=\dfrac{u}{f_n*d}=8.5$ 对应 $Re=500$ 的假设。我们使用通过圆柱的流体引起的圆柱振动的横向振幅 $\eta$ 和相应的升力 $f$ 作为监督数据。 - -## 3. 问题求解 - -接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 -为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 模型构建 - -在 VIV 问题中,给定时间 $t$,上述系统都有横向振幅 $\eta$ 和升力 $f$ 作为待求解的未知量,并且该系统本身还包含两个参数 $\lambda_1, \lambda_2$。因此我们在这里使用比较简单的 MLP(Multilayer Perceptron, 多层感知机) 来表示 $t$ 到 $(\eta, f)$ 的映射函数 $g: \mathbb{R}^1 \to \mathbb{R}^2$ ,即: - -$$ -\eta, f = g(t) -$$ - -上式中 $g$ 即为 MLP 模型本身,用 PaddleScience 代码表示如下 - -``` py linenums="22" ---8<-- -examples/fsi/viv.py:22:23 ---8<-- -``` - -为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("t_f",)`,输出变量名是 `("eta",)`, - `t_f` 代表输入时间 $t$,`eta` 代表输出振幅 $\eta$ 这些命名与后续代码保持一致。 - -接着通过指定 MLP 的层数、神经元个数以及激活函数,我们就实例化出了一个拥有 5 层隐藏神经元,每层神经元数为 50,使用 "tanh" 作为激活函数的神经网络模型 `model`。 - -### 3.2 方程构建 - -由于 VIV 使用的是 VIV 方程,因此可以直接使用 PaddleScience 内置的 `VIV`。 - -``` py linenums="25" ---8<-- -examples/fsi/viv.py:25:26 ---8<-- -``` - -我们在该方程中添加了两个可学习的参数 `k1` 和 `k2` 来估计 $\lambda_1$ 和 $\lambda_2$,且它们的关系是 $\lambda_1 = e^{k1}, \lambda_2 = e^{k2}$ - -因此我们在实例化 `VIV` 类时需指定必要的参数:质量 `rho=2`,初始化值`k1=-4`,`k2=0`。 - -### 3.3 计算域构建 - -本文中 VIV 问题作用在 $t \in [0.0625, 9.9375]$ 中的 100 个离散时间点上,这 100 个时间点已经保存在文件 `examples/fsi/VIV_Training_Neta100.mat` 作为输入数据,因此不需要显式构建计算域。 - -### 3.4 约束构建 - -本文采用监督学习的方式,对模型输出 $\eta$ 和基于 $\eta$ 计算出的升力 $f$,这两个物理量进行约束。 - -#### 3.4.1 监督约束 - -由于我们以监督学习方式进行训练,此处采用监督约束 `SupervisedConstraint`: - -``` py linenums="28" ---8<-- -examples/fsi/viv.py:28:48 ---8<-- -``` - -`SupervisedConstraint` 的第一个参数是监督约束的读取配置,此处填入在 [3.2 方程构建](#32) 章节中实例化好的 `train_dataloader_cfg`; - -第二个参数是损失函数,此处我们选用常用的MSE函数,且 `reduction` 设置为 `"mean"`,即我们会将参与计算的所有数据点产生的损失项求和取平均; - -第三个参数是方程表达式,用于描述如何计算约束目标,此处填入 `eta` 的计算函数和在 [3.2 方程构建](#32) 章节中实例化好的 `equation["VIV"].equations`; - -第四个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。此处我们命名为 "Sup" 即可。 - -在监督约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 - -``` py linenums="49" ---8<-- -examples/fsi/viv.py:49:50 ---8<-- -``` - -### 3.5 超参数设定 - -接下来我们需要指定训练轮数和学习率,此处我们按实验经验,使用 10000 轮训练轮数,并每隔 10000 个epochs评估一次模型精度。 - -``` yaml linenums="42" ---8<-- -examples/fsi/conf/viv.yaml:42:49 ---8<-- -``` - -### 3.6 优化器构建 - -训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器和 `Step` 间隔衰减学习率。 - -``` py linenums="52" ---8<-- -examples/fsi/viv.py:52:54 ---8<-- -``` - -???+ note "说明" - - VIV 方程含有两个 **可学习参数** k1和k2,因此需要将方程与 `model` 一起传入优化器。 - -### 3.7 评估器构建 - -在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器。 - -``` py linenums="56" ---8<-- -examples/fsi/viv.py:56:72 ---8<-- -``` - -评价指标 `metric` 选择 `ppsci.metric.MSE` 即可; - -其余配置与 [3.4.1 监督约束构建](#341) 的设置类似。 - -### 3.8 可视化器构建 - -在模型评估时,如果评估结果是可以可视化的数据,我们可以选择合适的可视化器来对输出结果进行可视化。 - -本文需要可视化的数据是 $t-\eta$ 和 $t-f$ 两组关系图,假设每个时刻 $t$ 的坐标是 $t_i$,则对应网络输出为 $\eta_i$,升力为 $f_i$,因此我们只需要将评估过程中产生的所有 $(t_i, \eta_i, f_i)$ 保存成图片即可。代码如下: - -``` py linenums="74" ---8<-- -examples/fsi/viv.py:74:93 ---8<-- -``` - -### 3.9 模型训练、评估与可视化 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。 - -``` py linenums="95" ---8<-- -examples/fsi/viv.py:95:111 ---8<-- -``` - -## 4. 完整代码 - -``` py linenums="1" title="viv.py" ---8<-- -examples/fsi/viv.py ---8<-- -``` - -## 5. 结果展示 - -模型预测结果如下所示,横轴为时间自变量$t$,$\eta_{gt}$为参考振幅,$\eta$为模型预测振幅,$f_{gt}$为参考升力,$f$为模型预测升力。 - -
- ![Viv_result](https://paddle-org.bj.bcebos.com/paddlescience/docs/ViV/eta_f_pred.png){ loading=lazy } -
振幅 eta 与升力 f 随时间t变化的预测结果和参考结果
-
- -可以看到模型对在$[0,10]$时间范围内,对振幅和升力的预测结果与参考结果基本一致。 +# VIV(vortex induced vibration) + +AI Studio快速体验 + +=== "模型训练命令" + + ``` sh + python viv.py + ``` + +=== "模型评估命令" + + ``` sh + python viv.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/viv/viv_pretrained.pdparams + ``` + +=== "模型导出命令" + + ``` sh + python viv.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + python viv.py mode=infer + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [viv_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/viv_pretrained.pdparams)
[viv_pretrained.pdeqn](https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/viv_pretrained.pdeqn) | eta_l2/MSE.eta: 0.00875
eta_l2/MSE.f: 0.00921 | + +## 1. 背景简介 + +涡激振动(Vortex-Induced Vibration,VIV)是一种流固耦合振动现象,主要发生在流体绕过柱体或管体时。在海洋工程和风工程中,这种振动现象具有重要应用。 + +在海洋工程中,涡激振动问题主要涉及海洋平台(如桩基、立管等)的涡激振动响应分析。这些平台在海流中运行,会受到涡激振动的影响。这种振动可能会导致平台结构的疲劳损伤,因此在进行海洋平台设计时,需要考虑这一问题。 + +在风工程中,涡激振动问题主要涉及风力发电机的涡激振动响应分析。风力发电机叶片在运行过程中受到气流的涡激振动,这种振动可能会导致叶片的疲劳损伤。为了确保风力发电机的安全运行,需要对这一问题进行深入的研究。 + +总之,涡激振动问题的应用主要涉及海洋工程和风工程领域,对于这些领域的发展具有重要意义。 + +当涡流脱落频率接近结构的固有频率时,圆柱会发生涡激振动,VIV系统相当于一个弹簧-阻尼系统: + +![VIV_1D_SpringDamper](https://paddle-org.bj.bcebos.com/paddlescience/docs/ViV/VIV_1D_SpringDamper.png) + +## 2. 问题定义 + +本问题涉及的控制方程涉及三个物理量:$λ_1$、$λ_2$ 和 $ρ$,分别表示自然阻尼、结构特性刚度和质量,控制方程定义如下所示: + +$$ +\rho \dfrac{\partial^2 \eta}{\partial t^2} + \lambda_1 \dfrac{\partial \eta}{\partial t} + \lambda_2 \eta = f +$$ + +该模型基于无量纲速度 $U_r=\dfrac{u}{f_n*d}=8.5$ 对应 $Re=500$ 的假设。我们使用通过圆柱的流体引起的圆柱振动的横向振幅 $\eta$ 和相应的升力 $f$ 作为监督数据。 + +## 3. 问题求解 + +接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 +为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 模型构建 + +在 VIV 问题中,给定时间 $t$,上述系统都有横向振幅 $\eta$ 和升力 $f$ 作为待求解的未知量,并且该系统本身还包含两个参数 $\lambda_1, \lambda_2$。因此我们在这里使用比较简单的 MLP(Multilayer Perceptron, 多层感知机) 来表示 $t$ 到 $(\eta, f)$ 的映射函数 $g: \mathbb{R}^1 \to \mathbb{R}^2$ ,即: + +$$ +\eta, f = g(t) +$$ + +上式中 $g$ 即为 MLP 模型本身,用 PaddleScience 代码表示如下 + +``` py linenums="22" +--8<-- +examples/fsi/viv.py:22:23 +--8<-- +``` + +为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `("t_f",)`,输出变量名是 `("eta",)`, + `t_f` 代表输入时间 $t$,`eta` 代表输出振幅 $\eta$ 这些命名与后续代码保持一致。 + +接着通过指定 MLP 的层数、神经元个数以及激活函数,我们就实例化出了一个拥有 5 层隐藏神经元,每层神经元数为 50,使用 "tanh" 作为激活函数的神经网络模型 `model`。 + +### 3.2 方程构建 + +由于 VIV 使用的是 VIV 方程,因此可以直接使用 PaddleScience 内置的 `VIV`。 + +``` py linenums="25" +--8<-- +examples/fsi/viv.py:25:26 +--8<-- +``` + +我们在该方程中添加了两个可学习的参数 `k1` 和 `k2` 来估计 $\lambda_1$ 和 $\lambda_2$,且它们的关系是 $\lambda_1 = e^{k1}, \lambda_2 = e^{k2}$ + +因此我们在实例化 `VIV` 类时需指定必要的参数:质量 `rho=2`,初始化值`k1=-4`,`k2=0`。 + +### 3.3 计算域构建 + +本文中 VIV 问题作用在 $t \in [0.0625, 9.9375]$ 中的 100 个离散时间点上,这 100 个时间点已经保存在文件 `examples/fsi/VIV_Training_Neta100.mat` 作为输入数据,因此不需要显式构建计算域。 + +### 3.4 约束构建 + +本文采用监督学习的方式,对模型输出 $\eta$ 和基于 $\eta$ 计算出的升力 $f$,这两个物理量进行约束。 + +#### 3.4.1 监督约束 + +由于我们以监督学习方式进行训练,此处采用监督约束 `SupervisedConstraint`: + +``` py linenums="28" +--8<-- +examples/fsi/viv.py:28:48 +--8<-- +``` + +`SupervisedConstraint` 的第一个参数是监督约束的读取配置,此处填入在 [3.2 方程构建](#32) 章节中实例化好的 `train_dataloader_cfg`; + +第二个参数是损失函数,此处我们选用常用的MSE函数,且 `reduction` 设置为 `"mean"`,即我们会将参与计算的所有数据点产生的损失项求和取平均; + +第三个参数是方程表达式,用于描述如何计算约束目标,此处填入 `eta` 的计算函数和在 [3.2 方程构建](#32) 章节中实例化好的 `equation["VIV"].equations`; + +第四个参数是约束条件的名字,我们需要给每一个约束条件命名,方便后续对其索引。此处我们命名为 "Sup" 即可。 + +在监督约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 + +``` py linenums="49" +--8<-- +examples/fsi/viv.py:49:50 +--8<-- +``` + +### 3.5 超参数设定 + +接下来我们需要指定训练轮数和学习率,此处我们按实验经验,使用 10000 轮训练轮数,并每隔 10000 个epochs评估一次模型精度。 + +``` yaml linenums="42" +--8<-- +examples/fsi/conf/viv.yaml:42:49 +--8<-- +``` + +### 3.6 优化器构建 + +训练过程会调用优化器来更新模型参数,此处选择较为常用的 `Adam` 优化器和 `Step` 间隔衰减学习率。 + +``` py linenums="52" +--8<-- +examples/fsi/viv.py:52:54 +--8<-- +``` + +???+ note "说明" + + VIV 方程含有两个 **可学习参数** k1和k2,因此需要将方程与 `model` 一起传入优化器。 + +### 3.7 评估器构建 + +在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.SupervisedValidator` 构建评估器。 + +``` py linenums="56" +--8<-- +examples/fsi/viv.py:56:72 +--8<-- +``` + +评价指标 `metric` 选择 `ppsci.metric.MSE` 即可; + +其余配置与 [3.4.1 监督约束构建](#341) 的设置类似。 + +### 3.8 可视化器构建 + +在模型评估时,如果评估结果是可以可视化的数据,我们可以选择合适的可视化器来对输出结果进行可视化。 + +本文需要可视化的数据是 $t-\eta$ 和 $t-f$ 两组关系图,假设每个时刻 $t$ 的坐标是 $t_i$,则对应网络输出为 $\eta_i$,升力为 $f_i$,因此我们只需要将评估过程中产生的所有 $(t_i, \eta_i, f_i)$ 保存成图片即可。代码如下: + +``` py linenums="74" +--8<-- +examples/fsi/viv.py:74:93 +--8<-- +``` + +### 3.9 模型训练、评估与可视化 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练、评估、可视化。 + +``` py linenums="95" +--8<-- +examples/fsi/viv.py:95:111 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="viv.py" +--8<-- +examples/fsi/viv.py +--8<-- +``` + +## 5. 结果展示 + +模型预测结果如下所示,横轴为时间自变量$t$,$\eta_{gt}$为参考振幅,$\eta$为模型预测振幅,$f_{gt}$为参考升力,$f$为模型预测升力。 + +
+ ![Viv_result](https://paddle-org.bj.bcebos.com/paddlescience/docs/ViV/eta_f_pred.png){ loading=lazy } +
振幅 eta 与升力 f 随时间t变化的预测结果和参考结果
+
+ +可以看到模型对在$[0,10]$时间范围内,对振幅和升力的预测结果与参考结果基本一致。 diff --git a/docs/zh/examples/volterra_ide.md b/docs/zh/examples/volterra_ide.md index 21fcd5659c..d8df03674f 100644 --- a/docs/zh/examples/volterra_ide.md +++ b/docs/zh/examples/volterra_ide.md @@ -1,219 +1,219 @@ -# Volterra integral equation - -AI Studio快速体验 - -=== "模型训练命令" - - ``` sh - python volterra_ide.py - ``` - -=== "模型评估命令" - - ``` sh - python volterra_ide.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams - ``` - -=== "模型导出命令" - - ``` sh - python volterra_ide.py mode=export - ``` - -=== "模型推理命令" - - ``` sh - python volterra_ide.py mode=infer - ``` - -| 预训练模型 | 指标 | -|:--| :--| -| [volterra_ide_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams) | loss(L2Rel_Validator): 0.00023
L2Rel.u(L2Rel_Validator): 0.00023 | - -## 1. 背景简介 - -Volterra integral equation(沃尔泰拉积分方程)是一种积分方程,即方程中含有对待求解函数的积分运算,其有两种形式,如下所示 - -$$ -\begin{aligned} - f(t) &= \int_a^t K(t, s) x(s) d s \\ - x(t) &= f(t)+\int_a^t K(t, s) x(s) d s -\end{aligned} -$$ - -在数学领域,沃尔泰拉方程可以用于表达各种多变量概率分布,是进行多变量统计分析的有力工具。这使得它在处理复杂数据结构时非常有用,例如在机器学习领域。沃尔泰拉方程还可以用于计算不同维度属性的相关性,以及模拟复杂的数据集结构,以便为机器学习任务提供有效的数据支持。 - -在生物学领域,沃尔泰拉方程被用作渔业生产的指导,对生态平衡和环境保护有重要意义。此外,该方程还在疾病防治,人口统计等方面有应用。值得一提的是,沃尔泰拉方程的建立是数学在生物学领域应用的首次成功尝试,推动了生物数学这门科学的产生和发展。 - -本案例以第二种方程为例,使用深度学习的方式进行求解。 - -## 2. 问题定义 - -假设存在如下 IDE 方程: - -$$ -u(t) = -\dfrac{du}{dt} + \int_{t_0}^t e^{t-s} u(s) d s -$$ - -其中 $u(t)$ 就是待求解的函数,而 $-\dfrac{du}{dt}$ 对应了 $f(t)$,$e^{t-s}$ 对应了 $K(t,s)$。 -因此可以利用神经网络模型,以 $t$ 为输入,$u(t)$ 为输出,根据上述方程构建微分约束,进行无监督学习最终拟合出待求解的函数 $u(t)$。 - -为了方便在计算机中进行求解,我们将上式进行移项,让积分项作为左侧,非积分项放至右侧,如下所示: - -$$ -\int_{t_0}^t e^{t-s} u(s) d s = u(t) + \dfrac{du}{dt} -$$ - -## 3. 问题求解 - -接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 -为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 - -### 3.1 模型构建 - -在上述问题中,我们确定了输入为 $x$,输出为 $u(x)$,因此我们使用,用 PaddleScience 代码表示如下: - -``` py linenums="39" ---8<-- -examples/ide/volterra_ide.py:39:40 ---8<-- -``` - -为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `"x"`(即公式中的 $t$),输出变量名是 `"u"`,接着通过指定 `MLP` 的隐藏层层数、神经元个数,我们就实例化出了神经网络模型 `model`。 - -### 3.2 计算域构建 - -Volterra_IDE 问题的积分域是 $a$ ~ $t$,其中 `a` 为固定常数 0,`t` 的范围为 0 ~ 5,因此可以使用PaddleScience 内置的一维几何 `TimeDomain` 作为计算域。 - -``` py linenums="42" ---8<-- -examples/ide/volterra_ide.py:42:43 ---8<-- -``` - -### 3.3 方程构建 - -由于 Volterra_IDE 使用的是积分方程,因此可以直接使用 PaddleScience 内置的 `ppsci.equation.Volterra`,并指定所需的参数:积分下限 `a`、`t` 的离散取值点数 `num_points`、一维高斯积分点的个数 `quad_deg`、$K(t,s)$ 核函数 `kernel_func`、$u(t) - f(t)$ 等式右侧表达式 `func`。 - -``` py linenums="45" ---8<-- -examples/ide/volterra_ide.py:45:61 ---8<-- -``` - -### 3.4 约束构建 - -#### 3.4.1 内部点约束 - -本文采用无监督学习的方式,对移项后方程的左、右两侧进行约束,让其尽量相等。 - -由于等式左侧涉及到积分计算(实际采用高斯积分近似计算),因此在 0 ~ 5 区间内采样出多个 `t_i` 点后,还需要计算其用于高斯积分的点集,即对每一个 `(0,t_i)` 区间,都计算出一一对应的高斯积分点集 `quad_i` 和点权 `weight_i`。PaddleScience 将这一步作为输入数据的预处理,加入到代码中,如下所示 - -``` py linenums="63" ---8<-- -examples/ide/volterra_ide.py:63:117 ---8<-- -``` - -#### 3.4.2 初值约束 - -在 $t=0$ 时,有以下初值条件: - -$$ -u(0) = e^{-t} \cosh(t)|_{t=0} = e^{0} \cosh(0) = 1 -$$ - -因此可以加入 `t=0` 时的初值条件,代码如下所示 - -``` py linenums="119" ---8<-- -examples/ide/volterra_ide.py:119:137 ---8<-- -``` - -在微分方程约束、初值约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 - -``` py linenums="138" ---8<-- -examples/ide/volterra_ide.py:138:142 ---8<-- -``` - -### 3.5 超参数设定 - -接下来我们需要指定训练轮数和学习率,此处我们按实验经验,让 `L-BFGS` 优化器进行一轮优化即可,但一轮优化内的 `max_iters` 数可以设置为一个较大的一个数 `15000`。 - -``` yaml linenums="39" ---8<-- -examples/ide/conf/volterra_ide.yaml:39:57 ---8<-- -``` - -### 3.6 优化器构建 - -训练过程会调用优化器来更新模型参数,此处选择较为常用的 `LBFGS` 优化器。 - -``` py linenums="144" ---8<-- -examples/ide/volterra_ide.py:144:145 ---8<-- -``` - -### 3.7 评估器构建 - -在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.GeometryValidator` 构建评估器。 - -``` py linenums="147" ---8<-- -examples/ide/volterra_ide.py:147:161 ---8<-- -``` - -评价指标 `metric` 选择 `ppsci.metric.L2Rel` 即可。 - -其余配置与 [3.4 约束构建](#34) 的设置类似。 - -### 3.8 模型训练 - -完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 - -``` py linenums="163" ---8<-- -examples/ide/volterra_ide.py:163:181 ---8<-- -``` - -### 3.9 结果可视化 - -在模型训练完毕之后,我们可以手动构造 0 ~ 5 区间内均匀 100 个点,作为评估的积分上限 `t` 进行预测,并可视化结果。 - -``` py linenums="183" ---8<-- -examples/ide/volterra_ide.py:183:194 ---8<-- -``` - -## 4. 完整代码 - -``` py linenums="1" title="volterra_ide.py" ---8<-- -examples/ide/volterra_ide.py ---8<-- -``` - -## 5. 结果展示 - -模型预测结果如下所示,$t$为自变量,$u(t)$为积分方程标准解函数,$\hat{u}(t)$为模型预测的积分方程解函数 - -
- ![result](https://paddle-org.bj.bcebos.com/paddlescience/docs/Volterra_IDE/Volterra_IDE.png){ loading=lazy } -
模型求解结果(橙色散点)和参考结果(蓝色曲线)
-
- -可以看到模型对积分方程在$[0,5]$区间内的预测结果$\hat{u}(t)$和标准解结果$u(t)$基本一致。 - -## 6. 参考文献 - -- [DeepXDE - Antiderivative operator from an unaligned dataset](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) -- [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval) -- [Volterra integral equation](https://en.wikipedia.org/wiki/Volterra_integral_equation) +# Volterra integral equation + +AI Studio快速体验 + +=== "模型训练命令" + + ``` sh + python volterra_ide.py + ``` + +=== "模型评估命令" + + ``` sh + python volterra_ide.py mode=eval EVAL.pretrained_model_path=https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams + ``` + +=== "模型导出命令" + + ``` sh + python volterra_ide.py mode=export + ``` + +=== "模型推理命令" + + ``` sh + python volterra_ide.py mode=infer + ``` + +| 预训练模型 | 指标 | +|:--| :--| +| [volterra_ide_pretrained.pdparams](https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams) | loss(L2Rel_Validator): 0.00023
L2Rel.u(L2Rel_Validator): 0.00023 | + +## 1. 背景简介 + +Volterra integral equation(沃尔泰拉积分方程)是一种积分方程,即方程中含有对待求解函数的积分运算,其有两种形式,如下所示 + +$$ +\begin{aligned} + f(t) &= \int_a^t K(t, s) x(s) d s \\ + x(t) &= f(t)+\int_a^t K(t, s) x(s) d s +\end{aligned} +$$ + +在数学领域,沃尔泰拉方程可以用于表达各种多变量概率分布,是进行多变量统计分析的有力工具。这使得它在处理复杂数据结构时非常有用,例如在机器学习领域。沃尔泰拉方程还可以用于计算不同维度属性的相关性,以及模拟复杂的数据集结构,以便为机器学习任务提供有效的数据支持。 + +在生物学领域,沃尔泰拉方程被用作渔业生产的指导,对生态平衡和环境保护有重要意义。此外,该方程还在疾病防治,人口统计等方面有应用。值得一提的是,沃尔泰拉方程的建立是数学在生物学领域应用的首次成功尝试,推动了生物数学这门科学的产生和发展。 + +本案例以第二种方程为例,使用深度学习的方式进行求解。 + +## 2. 问题定义 + +假设存在如下 IDE 方程: + +$$ +u(t) = -\dfrac{du}{dt} + \int_{t_0}^t e^{t-s} u(s) d s +$$ + +其中 $u(t)$ 就是待求解的函数,而 $-\dfrac{du}{dt}$ 对应了 $f(t)$,$e^{t-s}$ 对应了 $K(t,s)$。 +因此可以利用神经网络模型,以 $t$ 为输入,$u(t)$ 为输出,根据上述方程构建微分约束,进行无监督学习最终拟合出待求解的函数 $u(t)$。 + +为了方便在计算机中进行求解,我们将上式进行移项,让积分项作为左侧,非积分项放至右侧,如下所示: + +$$ +\int_{t_0}^t e^{t-s} u(s) d s = u(t) + \dfrac{du}{dt} +$$ + +## 3. 问题求解 + +接下来开始讲解如何将问题一步一步地转化为 PaddleScience 代码,用深度学习的方法求解该问题。 +为了快速理解 PaddleScience,接下来仅对模型构建、方程构建、计算域构建等关键步骤进行阐述,而其余细节请参考 [API文档](../api/arch.md)。 + +### 3.1 模型构建 + +在上述问题中,我们确定了输入为 $x$,输出为 $u(x)$,因此我们使用,用 PaddleScience 代码表示如下: + +``` py linenums="39" +--8<-- +examples/ide/volterra_ide.py:39:40 +--8<-- +``` + +为了在计算时,准确快速地访问具体变量的值,我们在这里指定网络模型的输入变量名是 `"x"`(即公式中的 $t$),输出变量名是 `"u"`,接着通过指定 `MLP` 的隐藏层层数、神经元个数,我们就实例化出了神经网络模型 `model`。 + +### 3.2 计算域构建 + +Volterra_IDE 问题的积分域是 $a$ ~ $t$,其中 `a` 为固定常数 0,`t` 的范围为 0 ~ 5,因此可以使用PaddleScience 内置的一维几何 `TimeDomain` 作为计算域。 + +``` py linenums="42" +--8<-- +examples/ide/volterra_ide.py:42:43 +--8<-- +``` + +### 3.3 方程构建 + +由于 Volterra_IDE 使用的是积分方程,因此可以直接使用 PaddleScience 内置的 `ppsci.equation.Volterra`,并指定所需的参数:积分下限 `a`、`t` 的离散取值点数 `num_points`、一维高斯积分点的个数 `quad_deg`、$K(t,s)$ 核函数 `kernel_func`、$u(t) - f(t)$ 等式右侧表达式 `func`。 + +``` py linenums="45" +--8<-- +examples/ide/volterra_ide.py:45:61 +--8<-- +``` + +### 3.4 约束构建 + +#### 3.4.1 内部点约束 + +本文采用无监督学习的方式,对移项后方程的左、右两侧进行约束,让其尽量相等。 + +由于等式左侧涉及到积分计算(实际采用高斯积分近似计算),因此在 0 ~ 5 区间内采样出多个 `t_i` 点后,还需要计算其用于高斯积分的点集,即对每一个 `(0,t_i)` 区间,都计算出一一对应的高斯积分点集 `quad_i` 和点权 `weight_i`。PaddleScience 将这一步作为输入数据的预处理,加入到代码中,如下所示 + +``` py linenums="63" +--8<-- +examples/ide/volterra_ide.py:63:117 +--8<-- +``` + +#### 3.4.2 初值约束 + +在 $t=0$ 时,有以下初值条件: + +$$ +u(0) = e^{-t} \cosh(t)|_{t=0} = e^{0} \cosh(0) = 1 +$$ + +因此可以加入 `t=0` 时的初值条件,代码如下所示 + +``` py linenums="119" +--8<-- +examples/ide/volterra_ide.py:119:137 +--8<-- +``` + +在微分方程约束、初值约束构建完毕之后,以我们刚才的命名为关键字,封装到一个字典中,方便后续访问。 + +``` py linenums="138" +--8<-- +examples/ide/volterra_ide.py:138:142 +--8<-- +``` + +### 3.5 超参数设定 + +接下来我们需要指定训练轮数和学习率,此处我们按实验经验,让 `L-BFGS` 优化器进行一轮优化即可,但一轮优化内的 `max_iters` 数可以设置为一个较大的一个数 `15000`。 + +``` yaml linenums="39" +--8<-- +examples/ide/conf/volterra_ide.yaml:39:57 +--8<-- +``` + +### 3.6 优化器构建 + +训练过程会调用优化器来更新模型参数,此处选择较为常用的 `LBFGS` 优化器。 + +``` py linenums="144" +--8<-- +examples/ide/volterra_ide.py:144:145 +--8<-- +``` + +### 3.7 评估器构建 + +在训练过程中通常会按一定轮数间隔,用验证集(测试集)评估当前模型的训练情况,因此使用 `ppsci.validate.GeometryValidator` 构建评估器。 + +``` py linenums="147" +--8<-- +examples/ide/volterra_ide.py:147:161 +--8<-- +``` + +评价指标 `metric` 选择 `ppsci.metric.L2Rel` 即可。 + +其余配置与 [3.4 约束构建](#34) 的设置类似。 + +### 3.8 模型训练 + +完成上述设置之后,只需要将上述实例化的对象按顺序传递给 `ppsci.solver.Solver`,然后启动训练。 + +``` py linenums="163" +--8<-- +examples/ide/volterra_ide.py:163:181 +--8<-- +``` + +### 3.9 结果可视化 + +在模型训练完毕之后,我们可以手动构造 0 ~ 5 区间内均匀 100 个点,作为评估的积分上限 `t` 进行预测,并可视化结果。 + +``` py linenums="183" +--8<-- +examples/ide/volterra_ide.py:183:194 +--8<-- +``` + +## 4. 完整代码 + +``` py linenums="1" title="volterra_ide.py" +--8<-- +examples/ide/volterra_ide.py +--8<-- +``` + +## 5. 结果展示 + +模型预测结果如下所示,$t$为自变量,$u(t)$为积分方程标准解函数,$\hat{u}(t)$为模型预测的积分方程解函数 + +
+ ![result](https://paddle-org.bj.bcebos.com/paddlescience/docs/Volterra_IDE/Volterra_IDE.png){ loading=lazy } +
模型求解结果(橙色散点)和参考结果(蓝色曲线)
+
+ +可以看到模型对积分方程在$[0,5]$区间内的预测结果$\hat{u}(t)$和标准解结果$u(t)$基本一致。 + +## 6. 参考文献 + +- [DeepXDE - Antiderivative operator from an unaligned dataset](https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py) +- [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval) +- [Volterra integral equation](https://en.wikipedia.org/wiki/Volterra_integral_equation) diff --git a/docs/zh/quickstart.md b/docs/zh/quickstart.md index 06da4d258b..bcea3a5708 100644 --- a/docs/zh/quickstart.md +++ b/docs/zh/quickstart.md @@ -1,266 +1,266 @@ -# 快速开始 - -AI Studio快速体验 - -本文通过一个简单的 demo 及其扩展问题,介绍如何使用 PaddleScience 训练模型,解决一类方程学习与预测问题,并可视化预测结果。 - -## 1. 问题简介 - -假设我们希望用神经网络模型去拟合 $x \in [-\pi, \pi]$ 区间内,$u=\sin(x)$ 这一函数。在拟合函数已知和未知两种情形下,如何去尽可能地准确拟合 $u=\sin(x)$。 - -第一种场景下,假设已知目标函数 $u$ 的解析解就是 $u=\sin(x)$,我们采用监督训练的思路,直接用该公式生成标签因变量 $u$,与自变量 $x$ 共同作为监督数据对模型进行训练。 - -第二种场景下,假设不知道目标函数 $u$ 的解析解,但我们知道其满足某种微分关系,我们这里以其中一个满足条件的微分方程 $\dfrac{\partial u} {\partial x}=\cos(x)$ 为例,介绍如何生成数据进行训练。 - -## 2. 场景一 - -目标拟合函数: - -$$ -u=\sin(x), x \in [-\pi, \pi]. -$$ - -我们生成 $N$ 组数据对 $(x_i, u_i), i=1,...,N$ 作为监督数据进行训练即可。 - -在撰写代码之前,我们首先导入必要的包。 - -``` py linenums="1" ---8<-- -examples/quick_start/case1.py:1:4 ---8<-- -``` - -然后创建日志和模型保存目录供训练过程记录和保存使用,这一步是绝大部分案例在正式开始前都需要进行的操作。 - -``` py linenums="6" ---8<-- -examples/quick_start/case1.py:6:13 ---8<-- -``` - -接下来正式开始撰写代码。 - -首先定义问题区间,我们使用 `ppsci.geometry.Interval` 定义一个线段几何形状,方便后续在该线段上对 $x$ 进行采样。 - -``` py linenums="15" ---8<-- -examples/quick_start/case1.py:15:17 ---8<-- -``` - -然后定义一个简单的 3 层 MLP 模型。 - -``` py linenums="19" ---8<-- -examples/quick_start/case1.py:19:20 ---8<-- -``` - -上述代码表示模型接受自变量 $x$ 作为输入,输出预测结果 $\hat{u}$ - -然后我们定义已知的 $u=\sin(x)$ 计算函数,作为 `ppsci.constraint.InteriorConstraint` 的参数,用于计算标签数据,`InteriorConstraint` 表示以给定的几何形状或数据集中的数据作为输入,联合给定的标签数据,指导模型进行优化。 - -``` py linenums="22" ---8<-- -examples/quick_start/case1.py:22:47 ---8<-- -``` - -此处的 `interior_constraint` 表示一个训练目标,即我们希望在 $[-\pi, \pi]$ 这段区间内,优化模型让模型的预测结果 $\hat{u}$ 尽可能地接近它的标签值 $u$。 - -接下来就可以开始定义模型训练相关的内容,比如训练轮数、优化器、可视化器。 - -``` py linenums="48" ---8<-- -examples/quick_start/case1.py:48:66 ---8<-- -``` - -最后将上述定义的对象传递给训练调度类 `Solver`,即可开始模型训练 - -``` py linenums="67" ---8<-- -examples/quick_start/case1.py:67:79 ---8<-- -``` - -训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 - -``` py linenums="81" ---8<-- -examples/quick_start/case1.py:81:86 ---8<-- -``` - -再对这 1000 个点的预测结果进行可视化 - -``` py linenums="88" ---8<-- -examples/quick_start/case1.py:88:89 ---8<-- -``` - -训练记录下所示 - -``` log -... -... -ppsci INFO: [Train][Epoch 9/10][Iter 80/100] lr: 0.00200, loss: 0.00663, EQ: 0.00663, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17756.64, eta: 0:00:00 -ppsci INFO: [Train][Epoch 9/10][Iter 90/100] lr: 0.00200, loss: 0.00598, EQ: 0.00598, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17793.97, eta: 0:00:00 -ppsci INFO: [Train][Epoch 9/10][Iter 100/100] lr: 0.00200, loss: 0.00547, EQ: 0.00547, batch_cost: 0.00179s, reader_cost: 0.00011s, ips: 17864.08, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 10/100] lr: 0.00200, loss: 0.00079, EQ: 0.00079, batch_cost: 0.00182s, reader_cost: 0.00012s, ips: 17547.05, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 20/100] lr: 0.00200, loss: 0.00075, EQ: 0.00075, batch_cost: 0.00183s, reader_cost: 0.00011s, ips: 17482.92, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 30/100] lr: 0.00200, loss: 0.00077, EQ: 0.00077, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17539.51, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 40/100] lr: 0.00200, loss: 0.00074, EQ: 0.00074, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17587.51, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 50/100] lr: 0.00200, loss: 0.00071, EQ: 0.00071, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17563.59, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 60/100] lr: 0.00200, loss: 0.00070, EQ: 0.00070, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17604.60, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 70/100] lr: 0.00200, loss: 0.00074, EQ: 0.00074, batch_cost: 0.00181s, reader_cost: 0.00011s, ips: 17699.28, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 80/100] lr: 0.00200, loss: 0.00077, EQ: 0.00077, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17764.92, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 90/100] lr: 0.00200, loss: 0.00075, EQ: 0.00075, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17795.87, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 100/100] lr: 0.00200, loss: 0.00071, EQ: 0.00071, batch_cost: 0.00179s, reader_cost: 0.00011s, ips: 17872.00, eta: 0:00:00 -``` - -训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 - -``` py linenums="81" ---8<-- -examples/quick_start/case1.py:81:86 ---8<-- -``` - -可以看到利用标准解监督训练模型,在标准解附近仍有很好的预测能力,L2-相对误差为 0.02677。 - -预测结果可视化如下所示 - -![u=sin(x) prediction](../images/quickstart/u_pred_case1.png) - -场景一的完整代码如下所示 - -``` py linenums="1" title="examples/quick_start/case1.py" ---8<-- -examples/quick_start/case1.py ---8<-- -``` - -## 3. 场景二 - -可以看到场景一的监督训练方式能较好地解决函数拟合问题,但一般情况下我们是无法得知拟合函数本身的解析式的,因此也无法直接构造因变量的监督数据。 - -虽然无法求出解析式直接构造监督数据,但往往可以利用相关数学知识,推导出目标拟合函数符合的某种数学关系,以训练模型以满足这种数学关系的方式,达到以“间接监督”的方式优化模型的目的。 - -假设我们不再使用 $u=\sin(x)$ 这一先验公式,因而无法计算标签数据 $u$。因此使用如下方程组,其含有一个偏微分方程和边界条件 - -$$ -\begin{cases} -\begin{aligned} - \dfrac{\partial u} {\partial x} &= \cos(x) \\ - u(-\pi) &= 2 -\end{aligned} -\end{cases} -$$ - -构造数据对 $(x_i, \cos(x_i)), i=1,...,N$。 -这意味着我们仍然能保持模型的输入、输出不变,但优化目标变成了:让 $\dfrac{\partial \hat{u}} {\partial x}$ 尽可能地接近 $\cos(x)$,且 $\hat{u}(-\pi)$ 也要尽可能地接近 $2$。 - -基于以上理论,我们对场景一的代码进行少量的改写即可得到本场景二的代码。 - -首先由于我们需要使用一阶微分这一操作,因此在代码开头处需导入一阶微分 API - -``` py linenums="1" hl_lines="2" ---8<-- -examples/quick_start/case2.py:1:5 ---8<-- -``` - -然后在原来的标签计算函数下方,新增一个微分标签值计算函数 - -``` py linenums="28" hl_lines="4" ---8<-- -examples/quick_start/case2.py:28:30 ---8<-- -``` - -接着将 `interior_constraint` 这一约束条件从约束“模型输出”,改为约束“模型输出对输入的一阶微分” - -``` py linenums="33" hl_lines="4" ---8<-- -examples/quick_start/case2.py:33:49 ---8<-- -``` - -考虑到一般情况下偏微分方程的解会存在待定系数,需通过定解条件(初(边)值条件)来确定,因此需要在 `interior_constraint` 构建代码的后面,额外添加一个边界条件约束 `bc_constraint`,如下所示 - -``` py linenums="50" ---8<-- -examples/quick_start/case2.py:50:65 ---8<-- -``` - -1. 对应边界条件 $u(x_0)=sin(x_0)+2$ - -然后将该边界约束 `bc_constraint` 添加到 `constraint` 中 - -``` py linenums="66" hl_lines="4" ---8<-- -examples/quick_start/case2.py:66:70 ---8<-- -``` - -同样地,修改 Visualizer 绘制的标准解为 $sin(x)+2$ - -``` py linenums="77" hl_lines="5" ---8<-- -examples/quick_start/case2.py:77:89 ---8<-- -``` - -修改完毕后执行训练 - -``` py linenums="91" ---8<-- -examples/quick_start/case2.py:91:102 ---8<-- -``` - -训练日志如下所示 - -``` log -... -... -ppsci INFO: [Train][Epoch 9/10][Iter 90/100] lr: 0.00200, loss: 0.00176, EQ: 0.00087, BC: 0.00088, batch_cost: 0.00346s, reader_cost: 0.00024s, ips: 9527.80, eta: 0:00:00 -ppsci INFO: [Train][Epoch 9/10][Iter 100/100] lr: 0.00200, loss: 0.00170, EQ: 0.00087, BC: 0.00083, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9452.07, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 10/100] lr: 0.00200, loss: 0.00107, EQ: 0.00072, BC: 0.00035, batch_cost: 0.00350s, reader_cost: 0.00025s, ips: 9424.75, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 20/100] lr: 0.00200, loss: 0.00116, EQ: 0.00083, BC: 0.00033, batch_cost: 0.00350s, reader_cost: 0.00025s, ips: 9441.33, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 30/100] lr: 0.00200, loss: 0.00103, EQ: 0.00079, BC: 0.00024, batch_cost: 0.00355s, reader_cost: 0.00025s, ips: 9291.90, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 40/100] lr: 0.00200, loss: 0.00108, EQ: 0.00078, BC: 0.00030, batch_cost: 0.00353s, reader_cost: 0.00025s, ips: 9348.09, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 50/100] lr: 0.00200, loss: 0.00163, EQ: 0.00082, BC: 0.00082, batch_cost: 0.00350s, reader_cost: 0.00024s, ips: 9416.24, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 60/100] lr: 0.00200, loss: 0.00160, EQ: 0.00083, BC: 0.00077, batch_cost: 0.00353s, reader_cost: 0.00024s, ips: 9345.73, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 70/100] lr: 0.00200, loss: 0.00150, EQ: 0.00082, BC: 0.00068, batch_cost: 0.00351s, reader_cost: 0.00024s, ips: 9393.89, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 80/100] lr: 0.00200, loss: 0.00146, EQ: 0.00081, BC: 0.00064, batch_cost: 0.00350s, reader_cost: 0.00024s, ips: 9424.81, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 90/100] lr: 0.00200, loss: 0.00138, EQ: 0.00081, BC: 0.00058, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9444.12, eta: 0:00:00 -ppsci INFO: [Train][Epoch 10/10][Iter 100/100] lr: 0.00200, loss: 0.00133, EQ: 0.00079, BC: 0.00054, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9461.54, eta: 0:00:00 -``` - -训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 - -``` py linenums="104" ---8<-- -examples/quick_start/case2.py:104:109 ---8<-- -``` - -可以看到利用微分方程训练的模型,在标准解附近仍有很好的预测能力,L2-相对误差为 0.00564。 - -预测结果可视化如下所示 - -![u=sin(x)+2 prediction](../images/quickstart/u_pred_case2.png) - -可以发现利用微分关系训练的模型仍然具备良好的预测能力,并且结合定解条件,能学习出同时符合微分方程和定解条件的正确解模型。 - -场景二的完整代码如下所示 - -``` py linenums="1" ---8<-- -examples/quick_start/case2.py ---8<-- -``` +# 快速开始 + +AI Studio快速体验 + +本文通过一个简单的 demo 及其扩展问题,介绍如何使用 PaddleScience 训练模型,解决一类方程学习与预测问题,并可视化预测结果。 + +## 1. 问题简介 + +假设我们希望用神经网络模型去拟合 $x \in [-\pi, \pi]$ 区间内,$u=\sin(x)$ 这一函数。在拟合函数已知和未知两种情形下,如何去尽可能地准确拟合 $u=\sin(x)$。 + +第一种场景下,假设已知目标函数 $u$ 的解析解就是 $u=\sin(x)$,我们采用监督训练的思路,直接用该公式生成标签因变量 $u$,与自变量 $x$ 共同作为监督数据对模型进行训练。 + +第二种场景下,假设不知道目标函数 $u$ 的解析解,但我们知道其满足某种微分关系,我们这里以其中一个满足条件的微分方程 $\dfrac{\partial u} {\partial x}=\cos(x)$ 为例,介绍如何生成数据进行训练。 + +## 2. 场景一 + +目标拟合函数: + +$$ +u=\sin(x), x \in [-\pi, \pi]. +$$ + +我们生成 $N$ 组数据对 $(x_i, u_i), i=1,...,N$ 作为监督数据进行训练即可。 + +在撰写代码之前,我们首先导入必要的包。 + +``` py linenums="1" +--8<-- +examples/quick_start/case1.py:1:4 +--8<-- +``` + +然后创建日志和模型保存目录供训练过程记录和保存使用,这一步是绝大部分案例在正式开始前都需要进行的操作。 + +``` py linenums="6" +--8<-- +examples/quick_start/case1.py:6:13 +--8<-- +``` + +接下来正式开始撰写代码。 + +首先定义问题区间,我们使用 `ppsci.geometry.Interval` 定义一个线段几何形状,方便后续在该线段上对 $x$ 进行采样。 + +``` py linenums="15" +--8<-- +examples/quick_start/case1.py:15:17 +--8<-- +``` + +然后定义一个简单的 3 层 MLP 模型。 + +``` py linenums="19" +--8<-- +examples/quick_start/case1.py:19:20 +--8<-- +``` + +上述代码表示模型接受自变量 $x$ 作为输入,输出预测结果 $\hat{u}$ + +然后我们定义已知的 $u=\sin(x)$ 计算函数,作为 `ppsci.constraint.InteriorConstraint` 的参数,用于计算标签数据,`InteriorConstraint` 表示以给定的几何形状或数据集中的数据作为输入,联合给定的标签数据,指导模型进行优化。 + +``` py linenums="22" +--8<-- +examples/quick_start/case1.py:22:47 +--8<-- +``` + +此处的 `interior_constraint` 表示一个训练目标,即我们希望在 $[-\pi, \pi]$ 这段区间内,优化模型让模型的预测结果 $\hat{u}$ 尽可能地接近它的标签值 $u$。 + +接下来就可以开始定义模型训练相关的内容,比如训练轮数、优化器、可视化器。 + +``` py linenums="48" +--8<-- +examples/quick_start/case1.py:48:66 +--8<-- +``` + +最后将上述定义的对象传递给训练调度类 `Solver`,即可开始模型训练 + +``` py linenums="67" +--8<-- +examples/quick_start/case1.py:67:79 +--8<-- +``` + +训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 + +``` py linenums="81" +--8<-- +examples/quick_start/case1.py:81:86 +--8<-- +``` + +再对这 1000 个点的预测结果进行可视化 + +``` py linenums="88" +--8<-- +examples/quick_start/case1.py:88:89 +--8<-- +``` + +训练记录下所示 + +``` log +... +... +ppsci INFO: [Train][Epoch 9/10][Iter 80/100] lr: 0.00200, loss: 0.00663, EQ: 0.00663, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17756.64, eta: 0:00:00 +ppsci INFO: [Train][Epoch 9/10][Iter 90/100] lr: 0.00200, loss: 0.00598, EQ: 0.00598, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17793.97, eta: 0:00:00 +ppsci INFO: [Train][Epoch 9/10][Iter 100/100] lr: 0.00200, loss: 0.00547, EQ: 0.00547, batch_cost: 0.00179s, reader_cost: 0.00011s, ips: 17864.08, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 10/100] lr: 0.00200, loss: 0.00079, EQ: 0.00079, batch_cost: 0.00182s, reader_cost: 0.00012s, ips: 17547.05, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 20/100] lr: 0.00200, loss: 0.00075, EQ: 0.00075, batch_cost: 0.00183s, reader_cost: 0.00011s, ips: 17482.92, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 30/100] lr: 0.00200, loss: 0.00077, EQ: 0.00077, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17539.51, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 40/100] lr: 0.00200, loss: 0.00074, EQ: 0.00074, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17587.51, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 50/100] lr: 0.00200, loss: 0.00071, EQ: 0.00071, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17563.59, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 60/100] lr: 0.00200, loss: 0.00070, EQ: 0.00070, batch_cost: 0.00182s, reader_cost: 0.00011s, ips: 17604.60, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 70/100] lr: 0.00200, loss: 0.00074, EQ: 0.00074, batch_cost: 0.00181s, reader_cost: 0.00011s, ips: 17699.28, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 80/100] lr: 0.00200, loss: 0.00077, EQ: 0.00077, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17764.92, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 90/100] lr: 0.00200, loss: 0.00075, EQ: 0.00075, batch_cost: 0.00180s, reader_cost: 0.00011s, ips: 17795.87, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 100/100] lr: 0.00200, loss: 0.00071, EQ: 0.00071, batch_cost: 0.00179s, reader_cost: 0.00011s, ips: 17872.00, eta: 0:00:00 +``` + +训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 + +``` py linenums="81" +--8<-- +examples/quick_start/case1.py:81:86 +--8<-- +``` + +可以看到利用标准解监督训练模型,在标准解附近仍有很好的预测能力,L2-相对误差为 0.02677。 + +预测结果可视化如下所示 + +![u=sin(x) prediction](../images/quickstart/u_pred_case1.png) + +场景一的完整代码如下所示 + +``` py linenums="1" title="examples/quick_start/case1.py" +--8<-- +examples/quick_start/case1.py +--8<-- +``` + +## 3. 场景二 + +可以看到场景一的监督训练方式能较好地解决函数拟合问题,但一般情况下我们是无法得知拟合函数本身的解析式的,因此也无法直接构造因变量的监督数据。 + +虽然无法求出解析式直接构造监督数据,但往往可以利用相关数学知识,推导出目标拟合函数符合的某种数学关系,以训练模型以满足这种数学关系的方式,达到以“间接监督”的方式优化模型的目的。 + +假设我们不再使用 $u=\sin(x)$ 这一先验公式,因而无法计算标签数据 $u$。因此使用如下方程组,其含有一个偏微分方程和边界条件 + +$$ +\begin{cases} +\begin{aligned} + \dfrac{\partial u} {\partial x} &= \cos(x) \\ + u(-\pi) &= 2 +\end{aligned} +\end{cases} +$$ + +构造数据对 $(x_i, \cos(x_i)), i=1,...,N$。 +这意味着我们仍然能保持模型的输入、输出不变,但优化目标变成了:让 $\dfrac{\partial \hat{u}} {\partial x}$ 尽可能地接近 $\cos(x)$,且 $\hat{u}(-\pi)$ 也要尽可能地接近 $2$。 + +基于以上理论,我们对场景一的代码进行少量的改写即可得到本场景二的代码。 + +首先由于我们需要使用一阶微分这一操作,因此在代码开头处需导入一阶微分 API + +``` py linenums="1" hl_lines="2" +--8<-- +examples/quick_start/case2.py:1:5 +--8<-- +``` + +然后在原来的标签计算函数下方,新增一个微分标签值计算函数 + +``` py linenums="28" hl_lines="4" +--8<-- +examples/quick_start/case2.py:28:30 +--8<-- +``` + +接着将 `interior_constraint` 这一约束条件从约束“模型输出”,改为约束“模型输出对输入的一阶微分” + +``` py linenums="33" hl_lines="4" +--8<-- +examples/quick_start/case2.py:33:49 +--8<-- +``` + +考虑到一般情况下偏微分方程的解会存在待定系数,需通过定解条件(初(边)值条件)来确定,因此需要在 `interior_constraint` 构建代码的后面,额外添加一个边界条件约束 `bc_constraint`,如下所示 + +``` py linenums="50" +--8<-- +examples/quick_start/case2.py:50:65 +--8<-- +``` + +1. 对应边界条件 $u(x_0)=sin(x_0)+2$ + +然后将该边界约束 `bc_constraint` 添加到 `constraint` 中 + +``` py linenums="66" hl_lines="4" +--8<-- +examples/quick_start/case2.py:66:70 +--8<-- +``` + +同样地,修改 Visualizer 绘制的标准解为 $sin(x)+2$ + +``` py linenums="77" hl_lines="5" +--8<-- +examples/quick_start/case2.py:77:89 +--8<-- +``` + +修改完毕后执行训练 + +``` py linenums="91" +--8<-- +examples/quick_start/case2.py:91:102 +--8<-- +``` + +训练日志如下所示 + +``` log +... +... +ppsci INFO: [Train][Epoch 9/10][Iter 90/100] lr: 0.00200, loss: 0.00176, EQ: 0.00087, BC: 0.00088, batch_cost: 0.00346s, reader_cost: 0.00024s, ips: 9527.80, eta: 0:00:00 +ppsci INFO: [Train][Epoch 9/10][Iter 100/100] lr: 0.00200, loss: 0.00170, EQ: 0.00087, BC: 0.00083, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9452.07, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 10/100] lr: 0.00200, loss: 0.00107, EQ: 0.00072, BC: 0.00035, batch_cost: 0.00350s, reader_cost: 0.00025s, ips: 9424.75, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 20/100] lr: 0.00200, loss: 0.00116, EQ: 0.00083, BC: 0.00033, batch_cost: 0.00350s, reader_cost: 0.00025s, ips: 9441.33, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 30/100] lr: 0.00200, loss: 0.00103, EQ: 0.00079, BC: 0.00024, batch_cost: 0.00355s, reader_cost: 0.00025s, ips: 9291.90, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 40/100] lr: 0.00200, loss: 0.00108, EQ: 0.00078, BC: 0.00030, batch_cost: 0.00353s, reader_cost: 0.00025s, ips: 9348.09, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 50/100] lr: 0.00200, loss: 0.00163, EQ: 0.00082, BC: 0.00082, batch_cost: 0.00350s, reader_cost: 0.00024s, ips: 9416.24, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 60/100] lr: 0.00200, loss: 0.00160, EQ: 0.00083, BC: 0.00077, batch_cost: 0.00353s, reader_cost: 0.00024s, ips: 9345.73, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 70/100] lr: 0.00200, loss: 0.00150, EQ: 0.00082, BC: 0.00068, batch_cost: 0.00351s, reader_cost: 0.00024s, ips: 9393.89, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 80/100] lr: 0.00200, loss: 0.00146, EQ: 0.00081, BC: 0.00064, batch_cost: 0.00350s, reader_cost: 0.00024s, ips: 9424.81, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 90/100] lr: 0.00200, loss: 0.00138, EQ: 0.00081, BC: 0.00058, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9444.12, eta: 0:00:00 +ppsci INFO: [Train][Epoch 10/10][Iter 100/100] lr: 0.00200, loss: 0.00133, EQ: 0.00079, BC: 0.00054, batch_cost: 0.00349s, reader_cost: 0.00024s, ips: 9461.54, eta: 0:00:00 +``` + +训练完毕后再用刚才取的 1000 个点与标准解计算 L2-相对误差 + +``` py linenums="104" +--8<-- +examples/quick_start/case2.py:104:109 +--8<-- +``` + +可以看到利用微分方程训练的模型,在标准解附近仍有很好的预测能力,L2-相对误差为 0.00564。 + +预测结果可视化如下所示 + +![u=sin(x)+2 prediction](../images/quickstart/u_pred_case2.png) + +可以发现利用微分关系训练的模型仍然具备良好的预测能力,并且结合定解条件,能学习出同时符合微分方程和定解条件的正确解模型。 + +场景二的完整代码如下所示 + +``` py linenums="1" +--8<-- +examples/quick_start/case2.py +--8<-- +``` diff --git a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py index 939e886674..fbe6d6bf98 100644 --- a/examples/NLS-MB/NLS-MB_optical_rogue_wave.py +++ b/examples/NLS-MB/NLS-MB_optical_rogue_wave.py @@ -1,455 +1,455 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def analytic_solution(out): - I = 1j - t = out["t"] - x = out["x"] - EExact = ( - (-1565 * x**2 + (648 * I + 76 * t) * x - 68 * t**2 + 51) - * np.exp(-I / 8 * (-12 * t + 65 * x)) - / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) - ) - pExact = ( - ( - 9796900 * I * x**4 - + (4056480 - 951520 * I * t) * x**3 - + (-579432 * I + 874464 * I * t**2 - 196992 * t) * x**2 - + (-36448 - 41344 * I * t**3 + 176256 * t**2 - 50592 * I * t) * x - + 884 * I - + 18496 * I * t**4 - + 8160 * I * t**2 - - 4352 * t - ) - * np.exp(-I / 8 * (-12 * t + 65 * x)) - / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) ** 2 - ) - etaExact = ( - 4624 * t**4 - - 10336 * t**3 * x - + (218616 * x**2 + 6664) * t**2 - + (-237880 * x**3 + 158440 * x) * t - + 2449225 * x**4 - - 136934 * x**2 - - 799 - ) / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) ** 2 - - return ( - np.real(EExact), - np.imag(EExact), - np.real(pExact), - np.imag(pExact), - etaExact, - ) - - -def plot( - t: np.ndarray, - x: np.ndarray, - E_ref: np.ndarray, - E_pred: np.ndarray, - p_ref: np.ndarray, - p_pred: np.ndarray, - eta_ref: np.ndarray, - eta_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(10, 10)) - plt.subplot(3, 3, 1) - plt.title("E_ref") - plt.tricontourf(x, t, E_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 2) - plt.title("E_pred") - plt.tricontourf(x, t, E_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 3) - plt.title("E_diff") - plt.tricontourf(x, t, np.abs(E_ref - E_pred), levels=256, cmap="jet") - plt.subplot(3, 3, 4) - plt.title("p_ref") - plt.tricontourf(x, t, p_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 5) - plt.title("p_pred") - plt.tricontourf(x, t, p_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 6) - plt.title("p_diff") - plt.tricontourf(x, t, np.abs(p_ref - p_pred), levels=256, cmap="jet") - plt.subplot(3, 3, 7) - plt.title("eta_ref") - plt.tricontourf(x, t, eta_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 8) - plt.title("eta_pred") - plt.tricontourf(x, t, eta_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 9) - plt.title("eta_diff") - plt.tricontourf(x, t, np.abs(eta_ref - eta_pred), levels=256, cmap="jet") - - fig_path = osp.join(output_dir, "pred_optical_rogue_wave.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=0.5, time=True) - } - - # set geometry - x_lower = -0.5 - x_upper = 0.5 - t_lower = -2.5 - t_upper = 2.5 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - X, T = np.meshgrid( - np.linspace(x_lower, x_upper, 256), np.linspace(t_lower, t_upper, 256) - ) - X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) - - # Boundary and Initial conditions - ic = X_star[:, 1] == t_lower - idx_ic = np.random.choice(np.where(ic)[0], 200, replace=False) - lb = X_star[:, 0] == x_lower - idx_lb = np.random.choice(np.where(lb)[0], 200, replace=False) - ub = X_star[:, 0] == x_upper - idx_ub = np.random.choice(np.where(ub)[0], 200, replace=False) - icbc_idx = np.hstack((idx_lb, idx_ic, idx_ub)) - X_u_train = X_star[icbc_idx].astype("float32") - X_u_train = {"t": X_u_train[:, 1:2], "x": X_u_train[:, 0:1]} - - Eu_train, Ev_train, pu_train, pv_train, eta_train = analytic_solution(X_u_train) - - train_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"t": X_u_train["t"], "x": X_u_train["x"]}, - "label": { - "Eu": Eu_train, - "Ev": Ev_train, - "pu": pu_train, - "pv": pv_train, - "eta": eta_train, - }, - }, - "batch_size": 600, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": 20000, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss(), - evenly=True, - name="EQ", - ) - - # supervised constraint s.t ||u-u_0|| - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss("mean"), - name="Sup", - ) - - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - sup_constraint.name: sup_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": 20600, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # fine-tuning pretrained model with L-BFGS - OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir - logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") - EPOCHS = cfg.TRAIN.epochs // 10 - optimizer_lbfgs = ppsci.optimizer.LBFGS( - cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter - )(model) - solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, - eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, - eval_freq=cfg.TRAIN.lbfgs.eval_freq, - equation=equation, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # visualize prediction - vis_points = geom["time_interval"].sample_interior(20000, evenly=True) - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) - pred = solver.predict(vis_points, return_numpy=True) - t = vis_points["t"][:, 0] - x = vis_points["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = pred["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=0.5, time=True) - } - - # set geometry - x_lower = -0.5 - x_upper = 0.5 - t_lower = -2.5 - t_upper = 2.5 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": 20600, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - # visualize prediction - vis_points = geom["time_interval"].sample_interior(20000, evenly=True) - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) - pred = solver.predict(vis_points, return_numpy=True) - t = vis_points["t"][:, 0] - x = vis_points["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = pred["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - x_lower = -0.5 - x_upper = 0.5 - t_lower = -2.5 - t_upper = 2.5 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC - input_dict = geom["time_interval"].sample_interior(NPOINT_TOTAL, evenly=True) - - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - # visualize prediction - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(input_dict) - t = input_dict["t"][:, 0] - x = input_dict["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(output_dict["Eu"] ** 2 + output_dict["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(output_dict["pu"] ** 2 + output_dict["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = output_dict["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="NLS-MB_rogue_wave.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def analytic_solution(out): + I = 1j + t = out["t"] + x = out["x"] + EExact = ( + (-1565 * x**2 + (648 * I + 76 * t) * x - 68 * t**2 + 51) + * np.exp(-I / 8 * (-12 * t + 65 * x)) + / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) + ) + pExact = ( + ( + 9796900 * I * x**4 + + (4056480 - 951520 * I * t) * x**3 + + (-579432 * I + 874464 * I * t**2 - 196992 * t) * x**2 + + (-36448 - 41344 * I * t**3 + 176256 * t**2 - 50592 * I * t) * x + + 884 * I + + 18496 * I * t**4 + + 8160 * I * t**2 + - 4352 * t + ) + * np.exp(-I / 8 * (-12 * t + 65 * x)) + / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) ** 2 + ) + etaExact = ( + 4624 * t**4 + - 10336 * t**3 * x + + (218616 * x**2 + 6664) * t**2 + + (-237880 * x**3 + 158440 * x) * t + + 2449225 * x**4 + - 136934 * x**2 + - 799 + ) / (1565 * x**2 - 76 * x * t + 68 * t**2 + 17) ** 2 + + return ( + np.real(EExact), + np.imag(EExact), + np.real(pExact), + np.imag(pExact), + etaExact, + ) + + +def plot( + t: np.ndarray, + x: np.ndarray, + E_ref: np.ndarray, + E_pred: np.ndarray, + p_ref: np.ndarray, + p_pred: np.ndarray, + eta_ref: np.ndarray, + eta_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(10, 10)) + plt.subplot(3, 3, 1) + plt.title("E_ref") + plt.tricontourf(x, t, E_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 2) + plt.title("E_pred") + plt.tricontourf(x, t, E_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 3) + plt.title("E_diff") + plt.tricontourf(x, t, np.abs(E_ref - E_pred), levels=256, cmap="jet") + plt.subplot(3, 3, 4) + plt.title("p_ref") + plt.tricontourf(x, t, p_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 5) + plt.title("p_pred") + plt.tricontourf(x, t, p_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 6) + plt.title("p_diff") + plt.tricontourf(x, t, np.abs(p_ref - p_pred), levels=256, cmap="jet") + plt.subplot(3, 3, 7) + plt.title("eta_ref") + plt.tricontourf(x, t, eta_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 8) + plt.title("eta_pred") + plt.tricontourf(x, t, eta_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 9) + plt.title("eta_diff") + plt.tricontourf(x, t, np.abs(eta_ref - eta_pred), levels=256, cmap="jet") + + fig_path = osp.join(output_dir, "pred_optical_rogue_wave.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=0.5, time=True) + } + + # set geometry + x_lower = -0.5 + x_upper = 0.5 + t_lower = -2.5 + t_upper = 2.5 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + X, T = np.meshgrid( + np.linspace(x_lower, x_upper, 256), np.linspace(t_lower, t_upper, 256) + ) + X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) + + # Boundary and Initial conditions + ic = X_star[:, 1] == t_lower + idx_ic = np.random.choice(np.where(ic)[0], 200, replace=False) + lb = X_star[:, 0] == x_lower + idx_lb = np.random.choice(np.where(lb)[0], 200, replace=False) + ub = X_star[:, 0] == x_upper + idx_ub = np.random.choice(np.where(ub)[0], 200, replace=False) + icbc_idx = np.hstack((idx_lb, idx_ic, idx_ub)) + X_u_train = X_star[icbc_idx].astype("float32") + X_u_train = {"t": X_u_train[:, 1:2], "x": X_u_train[:, 0:1]} + + Eu_train, Ev_train, pu_train, pv_train, eta_train = analytic_solution(X_u_train) + + train_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"t": X_u_train["t"], "x": X_u_train["x"]}, + "label": { + "Eu": Eu_train, + "Ev": Ev_train, + "pu": pu_train, + "pv": pv_train, + "eta": eta_train, + }, + }, + "batch_size": 600, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": 20000, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss(), + evenly=True, + name="EQ", + ) + + # supervised constraint s.t ||u-u_0|| + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss("mean"), + name="Sup", + ) + + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + sup_constraint.name: sup_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": 20600, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # fine-tuning pretrained model with L-BFGS + OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir + logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") + EPOCHS = cfg.TRAIN.epochs // 10 + optimizer_lbfgs = ppsci.optimizer.LBFGS( + cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter + )(model) + solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer_lbfgs, + None, + EPOCHS, + cfg.TRAIN.lbfgs.iters_per_epoch, + eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, + eval_freq=cfg.TRAIN.lbfgs.eval_freq, + equation=equation, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # visualize prediction + vis_points = geom["time_interval"].sample_interior(20000, evenly=True) + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) + pred = solver.predict(vis_points, return_numpy=True) + t = vis_points["t"][:, 0] + x = vis_points["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = pred["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=0.5, time=True) + } + + # set geometry + x_lower = -0.5 + x_upper = 0.5 + t_lower = -2.5 + t_upper = 2.5 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": 20600, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + # visualize prediction + vis_points = geom["time_interval"].sample_interior(20000, evenly=True) + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) + pred = solver.predict(vis_points, return_numpy=True) + t = vis_points["t"][:, 0] + x = vis_points["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = pred["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + x_lower = -0.5 + x_upper = 0.5 + t_lower = -2.5 + t_upper = 2.5 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC + input_dict = geom["time_interval"].sample_interior(NPOINT_TOTAL, evenly=True) + + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + # visualize prediction + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(input_dict) + t = input_dict["t"][:, 0] + x = input_dict["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(output_dict["Eu"] ** 2 + output_dict["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(output_dict["pu"] ** 2 + output_dict["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = output_dict["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="NLS-MB_rogue_wave.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/NLS-MB/NLS-MB_optical_soliton.py b/examples/NLS-MB/NLS-MB_optical_soliton.py index 14a5a5d720..901b8044ef 100644 --- a/examples/NLS-MB/NLS-MB_optical_soliton.py +++ b/examples/NLS-MB/NLS-MB_optical_soliton.py @@ -1,430 +1,430 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def analytic_solution(out): - t, x = out["t"], out["x"] - Eu_true = 2 * np.cos(2 * t) / np.cosh(2 * t + 6 * x) - - Ev_true = -2 * np.sin(2 * t) / np.cosh(2 * t + 6 * x) - - pu_true = ( - (np.exp(-2 * t - 6 * x) - np.exp(2 * t + 6 * x)) - * np.cos(2 * t) - / np.cosh(2 * t + 6 * x) ** 2 - ) - pv_true = ( - -(np.exp(-2 * t - 6 * x) - np.exp(2 * t + 6 * x)) - * np.sin(2 * t) - / np.cosh(2 * t + 6 * x) ** 2 - ) - eta_true = (np.cosh(2 * t + 6 * x) ** 2 - 2) / np.cosh(2 * t + 6 * x) ** 2 - - return Eu_true, Ev_true, pu_true, pv_true, eta_true - - -def plot( - t: np.ndarray, - x: np.ndarray, - E_ref: np.ndarray, - E_pred: np.ndarray, - p_ref: np.ndarray, - p_pred: np.ndarray, - eta_ref: np.ndarray, - eta_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(10, 10)) - plt.subplot(3, 3, 1) - plt.title("E_ref") - plt.tricontourf(x, t, E_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 2) - plt.title("E_pred") - plt.tricontourf(x, t, E_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 3) - plt.title("E_diff") - plt.tricontourf(x, t, np.abs(E_ref - E_pred), levels=256, cmap="jet") - plt.subplot(3, 3, 4) - plt.title("p_ref") - plt.tricontourf(x, t, p_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 5) - plt.title("p_pred") - plt.tricontourf(x, t, p_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 6) - plt.title("p_diff") - plt.tricontourf(x, t, np.abs(p_ref - p_pred), levels=256, cmap="jet") - plt.subplot(3, 3, 7) - plt.title("eta_ref") - plt.tricontourf(x, t, eta_ref, levels=256, cmap="jet") - plt.subplot(3, 3, 8) - plt.title("eta_pred") - plt.tricontourf(x, t, eta_pred, levels=256, cmap="jet") - plt.subplot(3, 3, 9) - plt.title("eta_diff") - plt.tricontourf(x, t, np.abs(eta_ref - eta_pred), levels=256, cmap="jet") - fig_path = osp.join(output_dir, "pred_optical_soliton.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=-1, time=True) - } - - x_lower = -1 - x_upper = 1 - t_lower = -1 - t_upper = 1 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - X, T = np.meshgrid( - np.linspace(x_lower, x_upper, 256), np.linspace(t_lower, t_upper, 256) - ) - X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) - - # Boundary and Initial conditions - ic = X_star[:, 1] == t_lower - idx_ic = np.random.choice(np.where(ic)[0], 200, replace=False) - lb = X_star[:, 0] == x_lower - idx_lb = np.random.choice(np.where(lb)[0], 200, replace=False) - ub = X_star[:, 0] == x_upper - idx_ub = np.random.choice(np.where(ub)[0], 200, replace=False) - icbc_idx = np.hstack((idx_lb, idx_ic, idx_ub)) - X_u_train = X_star[icbc_idx].astype("float32") - X_u_train = {"t": X_u_train[:, 1:2], "x": X_u_train[:, 0:1]} - - Eu_train, Ev_train, pu_train, pv_train, eta_train = analytic_solution(X_u_train) - - train_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"t": X_u_train["t"], "x": X_u_train["x"]}, - "label": { - "Eu": Eu_train, - "Ev": Ev_train, - "pu": pu_train, - "pv": pv_train, - "eta": eta_train, - }, - }, - "batch_size": 600, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": 20000, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss(), - evenly=True, - name="EQ", - ) - - # supervised constraint s.t ||u-u_0|| - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss("mean"), - name="Sup", - ) - - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - sup_constraint.name: sup_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": 20600, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # fine-tuning pretrained model with L-BFGS - OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir - logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") - EPOCHS = cfg.TRAIN.epochs // 10 - optimizer_lbfgs = ppsci.optimizer.LBFGS( - cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter - )(model) - solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, - eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, - eval_freq=cfg.TRAIN.lbfgs.eval_freq, - equation=equation, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # visualize prediction - vis_points = geom["time_interval"].sample_interior(20000, evenly=True) - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) - pred = solver.predict(vis_points, return_numpy=True) - t = vis_points["t"][:, 0] - x = vis_points["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = pred["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=-1, time=True) - } - - # set geometry - x_lower = -1 - x_upper = 1 - t_lower = -1 - t_upper = 1 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["NLS-MB"].equations, - { - "Schrodinger_1": 0, - "Schrodinger_2": 0, - "Maxwell_1": 0, - "Maxwell_2": 0, - "Bloch": 0, - }, - geom["time_interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": 20600, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - # visualize prediction - vis_points = geom["time_interval"].sample_interior(20000, evenly=True) - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) - pred = solver.predict(vis_points, return_numpy=True) - t = vis_points["t"][:, 0] - x = vis_points["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = pred["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - x_lower = -1 - x_upper = 1 - t_lower = -1 - t_upper = 1 - # set timestamps(including initial t0) - timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_interval": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), - ppsci.geometry.Interval(x_lower, x_upper), - ) - } - - NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC - input_dict = geom["time_interval"].sample_interior(NPOINT_TOTAL, evenly=True) - - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - # visualize prediction - Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(input_dict) - t = input_dict["t"][:, 0] - x = input_dict["x"][:, 0] - E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] - E_pred = np.sqrt(output_dict["Eu"] ** 2 + output_dict["Ev"] ** 2)[:, 0] - p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] - p_pred = np.sqrt(output_dict["pu"] ** 2 + output_dict["pv"] ** 2)[:, 0] - eta_ref = eta_true[:, 0] - eta_pred = output_dict["eta"][:, 0] - - # plot - plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) - - -@hydra.main(version_base=None, config_path="./conf", config_name="NLS-MB_soliton.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def analytic_solution(out): + t, x = out["t"], out["x"] + Eu_true = 2 * np.cos(2 * t) / np.cosh(2 * t + 6 * x) + + Ev_true = -2 * np.sin(2 * t) / np.cosh(2 * t + 6 * x) + + pu_true = ( + (np.exp(-2 * t - 6 * x) - np.exp(2 * t + 6 * x)) + * np.cos(2 * t) + / np.cosh(2 * t + 6 * x) ** 2 + ) + pv_true = ( + -(np.exp(-2 * t - 6 * x) - np.exp(2 * t + 6 * x)) + * np.sin(2 * t) + / np.cosh(2 * t + 6 * x) ** 2 + ) + eta_true = (np.cosh(2 * t + 6 * x) ** 2 - 2) / np.cosh(2 * t + 6 * x) ** 2 + + return Eu_true, Ev_true, pu_true, pv_true, eta_true + + +def plot( + t: np.ndarray, + x: np.ndarray, + E_ref: np.ndarray, + E_pred: np.ndarray, + p_ref: np.ndarray, + p_pred: np.ndarray, + eta_ref: np.ndarray, + eta_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(10, 10)) + plt.subplot(3, 3, 1) + plt.title("E_ref") + plt.tricontourf(x, t, E_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 2) + plt.title("E_pred") + plt.tricontourf(x, t, E_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 3) + plt.title("E_diff") + plt.tricontourf(x, t, np.abs(E_ref - E_pred), levels=256, cmap="jet") + plt.subplot(3, 3, 4) + plt.title("p_ref") + plt.tricontourf(x, t, p_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 5) + plt.title("p_pred") + plt.tricontourf(x, t, p_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 6) + plt.title("p_diff") + plt.tricontourf(x, t, np.abs(p_ref - p_pred), levels=256, cmap="jet") + plt.subplot(3, 3, 7) + plt.title("eta_ref") + plt.tricontourf(x, t, eta_ref, levels=256, cmap="jet") + plt.subplot(3, 3, 8) + plt.title("eta_pred") + plt.tricontourf(x, t, eta_pred, levels=256, cmap="jet") + plt.subplot(3, 3, 9) + plt.title("eta_diff") + plt.tricontourf(x, t, np.abs(eta_ref - eta_pred), levels=256, cmap="jet") + fig_path = osp.join(output_dir, "pred_optical_soliton.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=-1, time=True) + } + + x_lower = -1 + x_upper = 1 + t_lower = -1 + t_upper = 1 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + X, T = np.meshgrid( + np.linspace(x_lower, x_upper, 256), np.linspace(t_lower, t_upper, 256) + ) + X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) + + # Boundary and Initial conditions + ic = X_star[:, 1] == t_lower + idx_ic = np.random.choice(np.where(ic)[0], 200, replace=False) + lb = X_star[:, 0] == x_lower + idx_lb = np.random.choice(np.where(lb)[0], 200, replace=False) + ub = X_star[:, 0] == x_upper + idx_ub = np.random.choice(np.where(ub)[0], 200, replace=False) + icbc_idx = np.hstack((idx_lb, idx_ic, idx_ub)) + X_u_train = X_star[icbc_idx].astype("float32") + X_u_train = {"t": X_u_train[:, 1:2], "x": X_u_train[:, 0:1]} + + Eu_train, Ev_train, pu_train, pv_train, eta_train = analytic_solution(X_u_train) + + train_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"t": X_u_train["t"], "x": X_u_train["x"]}, + "label": { + "Eu": Eu_train, + "Ev": Ev_train, + "pu": pu_train, + "pv": pv_train, + "eta": eta_train, + }, + }, + "batch_size": 600, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": 20000, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss(), + evenly=True, + name="EQ", + ) + + # supervised constraint s.t ||u-u_0|| + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss("mean"), + name="Sup", + ) + + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + sup_constraint.name: sup_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": 20600, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # fine-tuning pretrained model with L-BFGS + OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir + logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") + EPOCHS = cfg.TRAIN.epochs // 10 + optimizer_lbfgs = ppsci.optimizer.LBFGS( + cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter + )(model) + solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer_lbfgs, + None, + EPOCHS, + cfg.TRAIN.lbfgs.iters_per_epoch, + eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, + eval_freq=cfg.TRAIN.lbfgs.eval_freq, + equation=equation, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # visualize prediction + vis_points = geom["time_interval"].sample_interior(20000, evenly=True) + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) + pred = solver.predict(vis_points, return_numpy=True) + t = vis_points["t"][:, 0] + x = vis_points["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = pred["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NLS-MB": ppsci.equation.NLSMB(alpha_1=0.5, alpha_2=-1, omega_0=-1, time=True) + } + + # set geometry + x_lower = -1 + x_upper = 1 + t_lower = -1 + t_upper = 1 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["NLS-MB"].equations, + { + "Schrodinger_1": 0, + "Schrodinger_2": 0, + "Maxwell_1": 0, + "Maxwell_2": 0, + "Bloch": 0, + }, + geom["time_interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": 20600, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + # visualize prediction + vis_points = geom["time_interval"].sample_interior(20000, evenly=True) + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(vis_points) + pred = solver.predict(vis_points, return_numpy=True) + t = vis_points["t"][:, 0] + x = vis_points["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(pred["Eu"] ** 2 + pred["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(pred["pu"] ** 2 + pred["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = pred["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + x_lower = -1 + x_upper = 1 + t_lower = -1 + t_upper = 1 + # set timestamps(including initial t0) + timestamps = np.linspace(t_lower, t_upper, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_interval": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(t_lower, t_upper, timestamps=timestamps), + ppsci.geometry.Interval(x_lower, x_upper), + ) + } + + NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC + input_dict = geom["time_interval"].sample_interior(NPOINT_TOTAL, evenly=True) + + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + # visualize prediction + Eu_true, Ev_true, pu_true, pv_true, eta_true = analytic_solution(input_dict) + t = input_dict["t"][:, 0] + x = input_dict["x"][:, 0] + E_ref = np.sqrt(Eu_true**2 + Ev_true**2)[:, 0] + E_pred = np.sqrt(output_dict["Eu"] ** 2 + output_dict["Ev"] ** 2)[:, 0] + p_ref = np.sqrt(pu_true**2 + pv_true**2)[:, 0] + p_pred = np.sqrt(output_dict["pu"] ** 2 + output_dict["pv"] ** 2)[:, 0] + eta_ref = eta_true[:, 0] + eta_pred = output_dict["eta"][:, 0] + + # plot + plot(t, x, E_ref, E_pred, p_ref, p_pred, eta_ref, eta_pred, cfg.output_dir) + + +@hydra.main(version_base=None, config_path="./conf", config_name="NLS-MB_soliton.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml index 98764d8908..10f9d4d5b9 100644 --- a/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml +++ b/examples/NLS-MB/conf/NLS-MB_rogue_wave.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -75,3 +76,81 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 64 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_NLS-MB_rogue_wave/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +log_freq: 100 +output_dir: ${hydra:run.dir} +NPOINT_INTERIOR: 20000 +NPOINT_BC: 600 +NTIME_ALL: 200 + +# model settings +MODEL: + input_keys: ["t", "x"] + output_keys: ["Eu", "Ev", "pu", "pv", "eta"] + num_layers: 5 + hidden_size: 64 + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + lbfgs: + iters_per_epoch: ${TRAIN.iters_per_epoch} + output_dir: ${output_dir}LBFGS + learning_rate: 1.0 + max_iter: 1 + eval_freq: ${TRAIN.eval_freq} + eval_during_train: ${TRAIN.eval_during_train} + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: false + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/NLS-MB/NLS-MB_rogue_wave_pretrained.pdparams + export_path: ./inference/NLS-MB_rogue_wave + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/NLS-MB/conf/NLS-MB_soliton.yaml b/examples/NLS-MB/conf/NLS-MB_soliton.yaml index 94eabfdffe..92f0bc60a5 100644 --- a/examples/NLS-MB/conf/NLS-MB_soliton.yaml +++ b/examples/NLS-MB/conf/NLS-MB_soliton.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -75,3 +76,81 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 64 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_NLS-MB_soliton/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +log_freq: 100 +output_dir: ${hydra:run.dir} +NPOINT_INTERIOR: 20000 +NPOINT_BC: 600 +NTIME_ALL: 200 + +# model settings +MODEL: + input_keys: ["t", "x"] + output_keys: ["Eu", "Ev", "pu", "pv", "eta"] + num_layers: 5 + hidden_size: 64 + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + lbfgs: + iters_per_epoch: ${TRAIN.iters_per_epoch} + output_dir: ${output_dir}LBFGS + learning_rate: 1.0 + max_iter: 1 + eval_freq: ${TRAIN.eval_freq} + eval_during_train: ${TRAIN.eval_during_train} + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: false + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/NLS-MB/NLS-MB_soliton_pretrained.pdparams + export_path: ./inference/NLS-MB_soliton + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/RegAE/RegAE.py b/examples/RegAE/RegAE.py index 455290f935..0ebfd3b6c5 100644 --- a/examples/RegAE/RegAE.py +++ b/examples/RegAE/RegAE.py @@ -1,175 +1,175 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from os import path as osp - -import hydra -import paddle -from omegaconf import DictConfig -from paddle.nn import functional as F - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.AutoEncoder(**cfg.MODEL) - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "NPZDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": ("p_train",), - "label_keys": ("p_train",), - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": False, - }, - } - - def loss_expr(output_dict, label_dict, weight_dict=None): - mu, log_sigma = output_dict["mu"], output_dict["log_sigma"] - - base = paddle.exp(2.0 * log_sigma) + paddle.pow(mu, 2) - 1.0 - 2.0 * log_sigma - KLLoss = 0.5 * paddle.sum(base) / mu.shape[0] - - return { - "decode_loss": F.mse_loss(output_dict["decoder_z"], label_dict["p_train"]) - + KLLoss - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(loss_expr), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "NPZDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": ("p_train",), - "label_keys": ("p_train",), - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": False, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(loss_expr), - metric={"L2Rel": ppsci.metric.L2Rel()}, - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.AutoEncoder(**cfg.MODEL) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "NPZDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": ("p_train",), - "label_keys": ("p_train",), - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": False, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.MSELoss(), - output_expr={"p_hat": lambda out: out["p_hat"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - None, - output_dir=cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate after finished training - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="RegAE.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from os import path as osp + +import hydra +import paddle +from omegaconf import DictConfig +from paddle.nn import functional as F + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.AutoEncoder(**cfg.MODEL) + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "NPZDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": ("p_train",), + "label_keys": ("p_train",), + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": False, + }, + } + + def loss_expr(output_dict, label_dict, weight_dict=None): + mu, log_sigma = output_dict["mu"], output_dict["log_sigma"] + + base = paddle.exp(2.0 * log_sigma) + paddle.pow(mu, 2) - 1.0 - 2.0 * log_sigma + KLLoss = 0.5 * paddle.sum(base) / mu.shape[0] + + return { + "decode_loss": F.mse_loss(output_dict["decoder_z"], label_dict["p_train"]) + + KLLoss + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(loss_expr), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "NPZDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": ("p_train",), + "label_keys": ("p_train",), + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": False, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(loss_expr), + metric={"L2Rel": ppsci.metric.L2Rel()}, + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.AutoEncoder(**cfg.MODEL) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "NPZDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": ("p_train",), + "label_keys": ("p_train",), + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": False, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.MSELoss(), + output_expr={"p_hat": lambda out: out["p_hat"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + None, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate after finished training + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="RegAE.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/RegAE/conf/RegAE.yaml b/examples/RegAE/conf/RegAE.yaml index 53698296de..67a09621ab 100644 --- a/examples/RegAE/conf/RegAE.yaml +++ b/examples/RegAE/conf/RegAE.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -52,3 +53,56 @@ EVAL: pretrained_model_path: null eval_with_no_grad: true batch_size: 128 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_RegAE/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 1 +output_dir: ${hydra:run.dir} +TRAIN_FILE_PATH: data.npz +VALID_FILE_PATH: data.npz + +# model settings +MODEL: + input_keys: ["p_train",] + output_keys: ["mu", "log_sigma", "decoder_z"] + input_dim: 10000 + latent_dim: 100 + hidden_dim: 100 + +# training settings +TRAIN: + epochs: 2 + iters_per_epoch: 625 + eval_during_train: false + save_freq: 200 + eval_freq: 200 + learning_rate: 0.0001 + batch_size: 128 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/RegAE/dataloader.py b/examples/RegAE/dataloader.py index fbe1acb46a..ed87c1ebd9 100644 --- a/examples/RegAE/dataloader.py +++ b/examples/RegAE/dataloader.py @@ -1,157 +1,157 @@ -""" -输入数据形状 10^5 * 100 * 100 - -1.按照8:2划分训练数据集和测试数据集 -2.通过训练数据进行标准正则化 -""" -import numpy as np -import paddle -from paddle import io - - -class ZScoreNormalize: - """ - Desc: Normalization utilities with std mean - """ - - def __init__(self): - self.mean = 0.0 - self.std = 1.0 - - def fit(self, data): - self.mean = np.mean(data) - self.std = np.std(data) - - def transform(self, data): - mean = ( - paddle.full([], self.mean, dtype=data.dtype) - if paddle.is_tensor(data) - else self.mean - ) - std = ( - paddle.full([], self.std, dtype=data.dtype) - if paddle.is_tensor(data) - else self.std - ) - return (data - mean) / std - - def inverse_transform(self, data): - mean = ( - paddle.full([], self.mean, dtype=data.dtype) - if paddle.is_tensor(data) - else self.mean - ) - std = ( - paddle.full([], self.std, dtype=data.dtype) - if paddle.is_tensor(data) - else self.std - ) - return (data * std) + mean - - -class MinMaxNormalize: - """ - Desc: Normalization utilities with min max - """ - - def __init__(self): - self.min = 0.0 - self.max = 1.0 - - def fit(self, data): - self.min = np.min(data, axis=0) - self.max = np.max(data, axis=0) - - def transform(self, data): - _min = ( - paddle.full([], self.min, dtype=data.dtype) - if paddle.is_tensor(data) - else self.min - ) - _max = ( - paddle.full([], self.max, dtype=data.dtype) - if paddle.is_tensor(data) - else self.max - ) - data = 1.0 * (data - _min) / (_max - _min) - return 2.0 * data - 1.0 - - def inverse_transform(self, data, axis=None): - _min = ( - paddle.full([], self.min, dtype=data.dtype) - if paddle.is_tensor(data) - else self.min - ) - _max = ( - paddle.full([], self.max, dtype=data.dtype) - if paddle.is_tensor(data) - else self.max - ) - data = (data + 1.0) / 2.0 - return 1.0 * data * (_max - _min) + _min - - -class CustomDataset(io.Dataset): - def __init__(self, file_path, data_type="train"): - """ - - :param file_path: - :param data_type: train or test - """ - super().__init__() - all_data = np.load(file_path) - data = all_data["data"] - num, _, _ = data.shape - data = data.reshape(num, -1) - - self.neighbors = all_data["neighbors"] - self.areasoverlengths = all_data["areasoverlengths"] - self.dirichletnodes = all_data["dirichletnodes"] - self.dirichleths = all_data["dirichletheads"] - self.Qs = np.zeros([all_data["coords"].shape[-1]]) - self.val_data = all_data["test_data"] - - self.data_type = data_type - - self.train_len = int(num * 0.8) - self.test_len = num - self.train_len - - self.train_data = data[: self.train_len] - self.test_data = data[self.train_len :] - - self.normalizer = ZScoreNormalize() - self.normalizer.fit(self.train_data) - - self.train_data = self.normalizer.transform(self.train_data) - self.test_data = self.normalizer.transform(self.test_data) - - def __getitem__(self, idx): - if self.data_type == "train": - return self.train_data[idx] - else: - return self.test_data[idx] - - def __len__(self): - if self.data_type == "train": - return self.train_len - else: - return self.test_len - - -if __name__ == "__main__": - train_data = CustomDataset(file_path="data/gaussian_train.npz", data_type="train") - test_data = CustomDataset(file_path="data/gaussian_train.npz", data_type="test") - train_loader = io.DataLoader( - train_data, batch_size=128, shuffle=True, drop_last=True, num_workers=0 - ) - test_loader = io.DataLoader( - test_data, batch_size=128, shuffle=True, drop_last=True, num_workers=0 - ) - - for i, data_item in enumerate(train_loader()): - print(data_item) - - if i == 2: - break - - # np.savez("data.npz", p_train=train_data.train_data, p_test=train_data.test_data) +""" +输入数据形状 10^5 * 100 * 100 + +1.按照8:2划分训练数据集和测试数据集 +2.通过训练数据进行标准正则化 +""" +import numpy as np +import paddle +from paddle import io + + +class ZScoreNormalize: + """ + Desc: Normalization utilities with std mean + """ + + def __init__(self): + self.mean = 0.0 + self.std = 1.0 + + def fit(self, data): + self.mean = np.mean(data) + self.std = np.std(data) + + def transform(self, data): + mean = ( + paddle.full([], self.mean, dtype=data.dtype) + if paddle.is_tensor(data) + else self.mean + ) + std = ( + paddle.full([], self.std, dtype=data.dtype) + if paddle.is_tensor(data) + else self.std + ) + return (data - mean) / std + + def inverse_transform(self, data): + mean = ( + paddle.full([], self.mean, dtype=data.dtype) + if paddle.is_tensor(data) + else self.mean + ) + std = ( + paddle.full([], self.std, dtype=data.dtype) + if paddle.is_tensor(data) + else self.std + ) + return (data * std) + mean + + +class MinMaxNormalize: + """ + Desc: Normalization utilities with min max + """ + + def __init__(self): + self.min = 0.0 + self.max = 1.0 + + def fit(self, data): + self.min = np.min(data, axis=0) + self.max = np.max(data, axis=0) + + def transform(self, data): + _min = ( + paddle.full([], self.min, dtype=data.dtype) + if paddle.is_tensor(data) + else self.min + ) + _max = ( + paddle.full([], self.max, dtype=data.dtype) + if paddle.is_tensor(data) + else self.max + ) + data = 1.0 * (data - _min) / (_max - _min) + return 2.0 * data - 1.0 + + def inverse_transform(self, data, axis=None): + _min = ( + paddle.full([], self.min, dtype=data.dtype) + if paddle.is_tensor(data) + else self.min + ) + _max = ( + paddle.full([], self.max, dtype=data.dtype) + if paddle.is_tensor(data) + else self.max + ) + data = (data + 1.0) / 2.0 + return 1.0 * data * (_max - _min) + _min + + +class CustomDataset(io.Dataset): + def __init__(self, file_path, data_type="train"): + """ + + :param file_path: + :param data_type: train or test + """ + super().__init__() + all_data = np.load(file_path) + data = all_data["data"] + num, _, _ = data.shape + data = data.reshape(num, -1) + + self.neighbors = all_data["neighbors"] + self.areasoverlengths = all_data["areasoverlengths"] + self.dirichletnodes = all_data["dirichletnodes"] + self.dirichleths = all_data["dirichletheads"] + self.Qs = np.zeros([all_data["coords"].shape[-1]]) + self.val_data = all_data["test_data"] + + self.data_type = data_type + + self.train_len = int(num * 0.8) + self.test_len = num - self.train_len + + self.train_data = data[: self.train_len] + self.test_data = data[self.train_len :] + + self.normalizer = ZScoreNormalize() + self.normalizer.fit(self.train_data) + + self.train_data = self.normalizer.transform(self.train_data) + self.test_data = self.normalizer.transform(self.test_data) + + def __getitem__(self, idx): + if self.data_type == "train": + return self.train_data[idx] + else: + return self.test_data[idx] + + def __len__(self): + if self.data_type == "train": + return self.train_len + else: + return self.test_len + + +if __name__ == "__main__": + train_data = CustomDataset(file_path="data/gaussian_train.npz", data_type="train") + test_data = CustomDataset(file_path="data/gaussian_train.npz", data_type="test") + train_loader = io.DataLoader( + train_data, batch_size=128, shuffle=True, drop_last=True, num_workers=0 + ) + test_loader = io.DataLoader( + test_data, batch_size=128, shuffle=True, drop_last=True, num_workers=0 + ) + + for i, data_item in enumerate(train_loader()): + print(data_item) + + if i == 2: + break + + # np.savez("data.npz", p_train=train_data.train_data, p_test=train_data.test_data) diff --git a/examples/adv/adv_cvit.py b/examples/adv/adv_cvit.py index 93ca099550..89fdac5e8a 100644 --- a/examples/adv/adv_cvit.py +++ b/examples/adv/adv_cvit.py @@ -1,265 +1,265 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/cvit/tree/main/adv/ -""" - -from os import path as osp - -import einops -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - -dtype = paddle.get_default_dtype() - - -def plot_result(pred: np.ndarray, label: np.ndarray, output_dir: str): - def compute_tvd(f, g, dx): - assert f.shape == g.shape - df = np.abs(np.diff(f, axis=1)) - dg = np.abs(np.diff(g, axis=1)) - - tvd = np.sum(np.abs(df - dg), axis=1) * dx - return tvd - - tvd = compute_tvd(np.squeeze(pred, axis=-1), label, 1 / 199) - logger.message( - f"mean: {np.mean(tvd)}, " - f"median: {np.median(tvd)}, " - f"max: {np.amax(tvd)}, " - f"min: {np.amin(tvd)}" - ) - - best_idx = np.argmin(tvd) - worst_idx = np.argmax(tvd) - logger.message(f"best: {best_idx}, worst: {worst_idx}") - - idx = worst_idx - x = np.linspace(0, 1, 200) - plt.plot(x, pred[idx], "r--") - plt.plot(x, label[idx], "b-") - plt.title(f"CViT (TV: {tvd[idx]:.2f})") - plt.xlabel("$y$") - plt.ylim([-1.4, 1.4]) - - plt.tight_layout() - plt.savefig(osp.join(output_dir, "adv_cvit.png")) - logger.message(f"Result saved to: {osp.join(output_dir, 'adv_cvit.png')}") - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.CVit1D(**cfg.MODEL) - - # prepare dataset - inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) - outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) - grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) - grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) - - ## swapping the first two axes: - inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) - outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) - grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) - - idx = np.random.permutation(inputs.shape[0]) - n_train = 20000 - n_test = 10000 - inputs_train, outputs_train, grid_train = ( - inputs[idx[:n_train]], - outputs[idx[:n_train]], - grid[idx[:n_train]], - ) - inputs_test, outputs_test, grid_test = ( - inputs[idx[-n_test:]], - outputs[idx[-n_test:]], - grid[idx[-n_test:]], - ) - - # set constraint - def gen_input_batch_train(): - batch_idx = np.random.randint(0, inputs_train.shape[0], [cfg.TRAIN.batch_size]) - grid_idx = np.sort( - np.random.randint(0, inputs_train.shape[1], [cfg.TRAIN.grid_size]) - ) - return { - "u": inputs_train[batch_idx], - "y": grid_train[batch_idx][:, grid_idx], - "batch_idx": batch_idx, - "grid_idx": grid_idx, - } - - def gen_label_batch_train(input_batch): - batch_idx, grid_idx = input_batch.pop("batch_idx"), input_batch.pop("grid_idx") - return { - "s": outputs_train[batch_idx][:, grid_idx, None], - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch_train, - "label": gen_label_batch_train, - }, - }, - output_expr={"s": lambda out: out["s"]}, - loss=ppsci.loss.MSELoss("mean"), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.AdamW( - lr_scheduler, - weight_decay=cfg.TRAIN.weight_decay, - grad_clip=paddle.nn.ClipGradByGlobalNorm(cfg.TRAIN.grad_clip), - )(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - cfg=cfg, - ) - # train model - solver.train() - # visualzie result on ema model - solver.ema_model.apply_shadow() - pred_s = solver.predict( - {"u": inputs_test, "y": grid_test}, - batch_size=cfg.EVAL.batch_size, - return_numpy=True, - )["s"] - - plot_result(pred_s, outputs_test, cfg.output_dir) - solver.ema_model.restore() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.CVit1D(**cfg.MODEL) - - # prepare dataset - inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) - outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) - grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) - grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) - - ## swapping the first two axes: - inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) - outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) - grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) - - idx = np.random.permutation(inputs.shape[0]) - n_test = 10000 - inputs_test, outputs_test, grid_test = ( - inputs[idx[-n_test:]], - outputs[idx[-n_test:]], - grid[idx[-n_test:]], - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - cfg=cfg, - ) - pred_s = solver.predict( - {"u": inputs_test, "y": grid_test}, - batch_size=cfg.EVAL.batch_size, - return_numpy=True, - )["s"] - - plot_result(pred_s, outputs_test, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.CVit1D(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver(model, cfg=cfg) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - model.input_keys[0]: InputSpec( - [None, cfg.INFER.spatial_dims, 1], - name=model.input_keys[0], - ), - model.input_keys[1]: InputSpec( - [None, cfg.INFER.grid_size[0], 1], - name=model.input_keys[1], - ), - }, - ] - # NOTE: Put einops into ignore module when exporting, or error will occur - solver.export( - input_spec, cfg.INFER.export_path, with_onnx=False, ignore_modules=[einops] - ) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # prepare dataset - inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) - outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) - grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) - grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) - - ## swapping the first two axes: - inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) - outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) - grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) - - idx = np.random.permutation(inputs.shape[0]) - n_test = 10000 - inputs_test, outputs_test, grid_test = ( - inputs[idx[-n_test:]], - outputs[idx[-n_test:]], - grid[idx[-n_test:]], - ) - - output_dict = predictor.predict( - {"u": inputs_test, "y": grid_test}, - batch_size=cfg.INFER.batch_size, - ) - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - plot_result(output_dict[cfg.MODEL.output_keys[0]], outputs_test, "./") - - -@hydra.main(version_base=None, config_path="./conf", config_name="adv_cvit.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/cvit/tree/main/adv/ +""" + +from os import path as osp + +import einops +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + +dtype = paddle.get_default_dtype() + + +def plot_result(pred: np.ndarray, label: np.ndarray, output_dir: str): + def compute_tvd(f, g, dx): + assert f.shape == g.shape + df = np.abs(np.diff(f, axis=1)) + dg = np.abs(np.diff(g, axis=1)) + + tvd = np.sum(np.abs(df - dg), axis=1) * dx + return tvd + + tvd = compute_tvd(np.squeeze(pred, axis=-1), label, 1 / 199) + logger.message( + f"mean: {np.mean(tvd)}, " + f"median: {np.median(tvd)}, " + f"max: {np.amax(tvd)}, " + f"min: {np.amin(tvd)}" + ) + + best_idx = np.argmin(tvd) + worst_idx = np.argmax(tvd) + logger.message(f"best: {best_idx}, worst: {worst_idx}") + + idx = worst_idx + x = np.linspace(0, 1, 200) + plt.plot(x, pred[idx], "r--") + plt.plot(x, label[idx], "b-") + plt.title(f"CViT (TV: {tvd[idx]:.2f})") + plt.xlabel("$y$") + plt.ylim([-1.4, 1.4]) + + plt.tight_layout() + plt.savefig(osp.join(output_dir, "adv_cvit.png")) + logger.message(f"Result saved to: {osp.join(output_dir, 'adv_cvit.png')}") + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.CVit1D(**cfg.MODEL) + + # prepare dataset + inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) + outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) + grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) + grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) + + ## swapping the first two axes: + inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) + outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) + grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) + + idx = np.random.permutation(inputs.shape[0]) + n_train = 20000 + n_test = 10000 + inputs_train, outputs_train, grid_train = ( + inputs[idx[:n_train]], + outputs[idx[:n_train]], + grid[idx[:n_train]], + ) + inputs_test, outputs_test, grid_test = ( + inputs[idx[-n_test:]], + outputs[idx[-n_test:]], + grid[idx[-n_test:]], + ) + + # set constraint + def gen_input_batch_train(): + batch_idx = np.random.randint(0, inputs_train.shape[0], [cfg.TRAIN.batch_size]) + grid_idx = np.sort( + np.random.randint(0, inputs_train.shape[1], [cfg.TRAIN.grid_size]) + ) + return { + "u": inputs_train[batch_idx], + "y": grid_train[batch_idx][:, grid_idx], + "batch_idx": batch_idx, + "grid_idx": grid_idx, + } + + def gen_label_batch_train(input_batch): + batch_idx, grid_idx = input_batch.pop("batch_idx"), input_batch.pop("grid_idx") + return { + "s": outputs_train[batch_idx][:, grid_idx, None], + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch_train, + "label": gen_label_batch_train, + }, + }, + output_expr={"s": lambda out: out["s"]}, + loss=ppsci.loss.MSELoss("mean"), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.AdamW( + lr_scheduler, + weight_decay=cfg.TRAIN.weight_decay, + grad_clip=paddle.nn.ClipGradByGlobalNorm(cfg.TRAIN.grad_clip), + )(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + cfg=cfg, + ) + # train model + solver.train() + # visualzie result on ema model + solver.ema_model.apply_shadow() + pred_s = solver.predict( + {"u": inputs_test, "y": grid_test}, + batch_size=cfg.EVAL.batch_size, + return_numpy=True, + )["s"] + + plot_result(pred_s, outputs_test, cfg.output_dir) + solver.ema_model.restore() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.CVit1D(**cfg.MODEL) + + # prepare dataset + inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) + outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) + grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) + grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) + + ## swapping the first two axes: + inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) + outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) + grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) + + idx = np.random.permutation(inputs.shape[0]) + n_test = 10000 + inputs_test, outputs_test, grid_test = ( + inputs[idx[-n_test:]], + outputs[idx[-n_test:]], + grid[idx[-n_test:]], + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + cfg=cfg, + ) + pred_s = solver.predict( + {"u": inputs_test, "y": grid_test}, + batch_size=cfg.EVAL.batch_size, + return_numpy=True, + )["s"] + + plot_result(pred_s, outputs_test, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.CVit1D(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + model.input_keys[0]: InputSpec( + [None, cfg.INFER.spatial_dims, 1], + name=model.input_keys[0], + ), + model.input_keys[1]: InputSpec( + [None, cfg.INFER.grid_size[0], 1], + name=model.input_keys[1], + ), + }, + ] + # NOTE: Put einops into ignore module when exporting, or error will occur + solver.export( + input_spec, cfg.INFER.export_path, with_onnx=False, ignore_modules=[einops] + ) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # prepare dataset + inputs = np.load(osp.join(cfg.DATA_DIR, "adv_a0.npy")).astype(dtype) + outputs = np.load(osp.join(cfg.DATA_DIR, "adv_aT.npy")).astype(dtype) + grid = np.linspace(0, 1, inputs.shape[0], dtype=dtype) + grid = einops.repeat(grid, "i -> i b", b=inputs.shape[1]) + + ## swapping the first two axes: + inputs = einops.rearrange(inputs, "i j -> j i 1") # (40000, 200, 1) + outputs = einops.rearrange(outputs, "i j -> j i") # (40000, 200) + grid = einops.rearrange(grid, "i j -> j i 1") # (40000, 200, 1) + + idx = np.random.permutation(inputs.shape[0]) + n_test = 10000 + inputs_test, outputs_test, grid_test = ( + inputs[idx[-n_test:]], + outputs[idx[-n_test:]], + grid[idx[-n_test:]], + ) + + output_dict = predictor.predict( + {"u": inputs_test, "y": grid_test}, + batch_size=cfg.INFER.batch_size, + ) + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + plot_result(output_dict[cfg.MODEL.output_keys[0]], outputs_test, "./") + + +@hydra.main(version_base=None, config_path="./conf", config_name="adv_cvit.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/adv/conf/adv_cvit.yaml b/examples/adv/conf/adv_cvit.yaml index c107a322be..080f619780 100644 --- a/examples/adv/conf/adv_cvit.yaml +++ b/examples/adv/conf/adv_cvit.yaml @@ -1,106 +1,106 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_adv_cvit/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA_DIR: ./data/ - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 1 - coords_dim: 1 - spatial_dims: 200 - patch_size: [4] - grid_size: [200] - latent_dim: 256 - emb_dim: 256 - depth: 6 - num_heads: 16 - dec_emb_dim: 256 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 1 - out_dim: 1 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200000 - iters_per_epoch: 1 - save_freq: 10000 - eval_during_train: false - eval_freq: 5 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-4 - gamma: 0.95 - decay_steps: 1000 - by_epoch: false - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 256 - grid_size: 128 - pretrained_model_path: null - checkpoint_path: null - ema: - use_ema: true - decay: 0.999 - avg_freq: 1 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 1000 - grid_size: 200 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cvit/adv_cvit_pretrained.pdparams - export_path: ./inference/adv_cvit - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - spatial_dims: ${MODEL.spatial_dims} - grid_size: ${MODEL.grid_size} - batch_size: 1000 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_adv_cvit/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_DIR: ./data/ + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 1 + coords_dim: 1 + spatial_dims: 200 + patch_size: [4] + grid_size: [200] + latent_dim: 256 + emb_dim: 256 + depth: 6 + num_heads: 16 + dec_emb_dim: 256 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 1 + out_dim: 1 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200000 + iters_per_epoch: 1 + save_freq: 10000 + eval_during_train: false + eval_freq: 5 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-4 + gamma: 0.95 + decay_steps: 1000 + by_epoch: false + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 256 + grid_size: 128 + pretrained_model_path: null + checkpoint_path: null + ema: + use_ema: true + decay: 0.999 + avg_freq: 1 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1000 + grid_size: 200 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cvit/adv_cvit_pretrained.pdparams + export_path: ./inference/adv_cvit + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + spatial_dims: ${MODEL.spatial_dims} + grid_size: ${MODEL.grid_size} + batch_size: 1000 diff --git a/examples/allen_cahn/allen_cahn_causal.py b/examples/allen_cahn/allen_cahn_causal.py index 58e6c798ea..61e95cbe73 100644 --- a/examples/allen_cahn/allen_cahn_causal.py +++ b/examples/allen_cahn/allen_cahn_causal.py @@ -1,303 +1,303 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy.io as sio -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import misc - -dtype = paddle.get_default_dtype() - - -def plot( - t_star: np.ndarray, - x_star: np.ndarray, - u_ref: np.ndarray, - u_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(18, 5)) - TT, XX = np.meshgrid(t_star, x_star, indexing="ij") - u_ref = u_ref.reshape([len(t_star), len(x_star)]) - - plt.subplot(1, 3, 1) - plt.pcolor(TT, XX, u_ref, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Exact") - plt.tight_layout() - - plt.subplot(1, 3, 2) - plt.pcolor(TT, XX, u_pred, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Predicted") - plt.tight_layout() - - plt.subplot(1, 3, 3) - plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Absolute error") - plt.tight_layout() - - fig_path = osp.join(output_dir, "ac.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - u0 = u_ref[0, :] # [nx, ] - - t0 = t_star[0] # float - t1 = t_star[-1] # float - - x0 = x_star[0] # float - x1 = x_star[-1] # float - - # set constraint - def gen_input_batch(): - tx = np.random.uniform( - [t0, x0], - [t1, x1], - (cfg.TRAIN.batch_size, 2), - ).astype(dtype) - return { - "t": np.sort(tx[:, 0:1], axis=0), - "x": tx[:, 1:2], - } - - def gen_label_batch(input_batch): - return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} - - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch, - "label": gen_label_batch, - }, - }, - output_expr=equation["AllenCahn"].equations, - loss=ppsci.loss.CausalMSELoss( - cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol - ), - name="PDE", - ) - - ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} - ic_label = {"u": u0.reshape([-1, 1])} - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": ic_input, - "label": ic_label, - }, - }, - output_expr={"u": lambda out: out["u"]}, - loss=ppsci.loss.MSELoss("mean"), - name="IC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - ic.name: ic, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - equation=equation, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, - cfg=cfg, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - - input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) - - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="allen_cahn_causal.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + # set constraint + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": np.sort(tx[:, 0:1], axis=0), + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.CausalMSELoss( + cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol + ), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=True, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + equation=equation, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + use_tbd=True, + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="allen_cahn_causal.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/allen_cahn/allen_cahn_defalut_ntk.py b/examples/allen_cahn/allen_cahn_defalut_ntk.py index 28e4f7da65..92913ad9a3 100644 --- a/examples/allen_cahn/allen_cahn_defalut_ntk.py +++ b/examples/allen_cahn/allen_cahn_defalut_ntk.py @@ -1,306 +1,306 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy.io as sio -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.loss import mtl -from ppsci.utils import misc - -dtype = paddle.get_default_dtype() - - -def plot( - t_star: np.ndarray, - x_star: np.ndarray, - u_ref: np.ndarray, - u_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(18, 5)) - TT, XX = np.meshgrid(t_star, x_star, indexing="ij") - u_ref = u_ref.reshape([len(t_star), len(x_star)]) - - plt.subplot(1, 3, 1) - plt.pcolor(TT, XX, u_ref, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Exact") - plt.tight_layout() - - plt.subplot(1, 3, 2) - plt.pcolor(TT, XX, u_pred, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Predicted") - plt.tight_layout() - - plt.subplot(1, 3, 3) - plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Absolute error") - plt.tight_layout() - - fig_path = osp.join(output_dir, "ac.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"AllenCahn": ppsci.equation.AllenCahn(0.01**2)} - - # set constraint - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - u0 = u_ref[0, :] # [nx, ] - - t0 = t_star[0] # float - t1 = t_star[-1] # float - - x0 = x_star[0] # float - x1 = x_star[-1] # float - - def gen_input_batch(): - tx = np.random.uniform( - [t0, x0], - [t1, x1], - (cfg.TRAIN.batch_size, 2), - ).astype(dtype) - return { - "t": np.sort(tx[:, 0:1], axis=0), - "x": tx[:, 1:2], - } - - def gen_label_batch(input_batch): - return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} - - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch, - "label": gen_label_batch, - }, - }, - output_expr=equation["AllenCahn"].equations, - loss=ppsci.loss.CausalMSELoss( - cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol - ), - name="PDE", - ) - - ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} - ic_label = {"u": u0.reshape([-1, 1])} - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": ic_input, - "label": ic_label, - }, - }, - output_expr={"u": lambda out: out["u"]}, - loss=ppsci.loss.MSELoss("mean"), - name="IC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - ic.name: ic, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - loss_aggregator=mtl.NTK( - model, - len(constraint), - cfg.TRAIN.ntk.update_freq, - ), - cfg=cfg, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - - input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) - # mapping data to cfg.INFER.output_keys - - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="allen_cahn_defalut_ntk.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(0.01**2)} + + # set constraint + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": np.sort(tx[:, 0:1], axis=0), + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.CausalMSELoss( + cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol + ), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=True, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + loss_aggregator=mtl.NTK( + model, + len(constraint), + cfg.TRAIN.ntk.update_freq, + ), + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + # mapping data to cfg.INFER.output_keys + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="allen_cahn_defalut_ntk.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/allen_cahn/allen_cahn_default.py b/examples/allen_cahn/allen_cahn_default.py index 3870b6f6e7..6de6d18cdb 100644 --- a/examples/allen_cahn/allen_cahn_default.py +++ b/examples/allen_cahn/allen_cahn_default.py @@ -1,292 +1,292 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy.io as sio -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.loss import mtl -from ppsci.utils import misc - -dtype = paddle.get_default_dtype() - - -def plot( - t_star: np.ndarray, - x_star: np.ndarray, - u_ref: np.ndarray, - u_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(18, 5)) - TT, XX = np.meshgrid(t_star, x_star, indexing="ij") - u_ref = u_ref.reshape([len(t_star), len(x_star)]) - - plt.subplot(1, 3, 1) - plt.pcolor(TT, XX, u_ref, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Exact") - plt.tight_layout() - - plt.subplot(1, 3, 2) - plt.pcolor(TT, XX, u_pred, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Predicted") - plt.tight_layout() - - plt.subplot(1, 3, 3) - plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Absolute error") - plt.tight_layout() - - fig_path = osp.join(output_dir, "ac.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} - - # set constraint - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - u0 = u_ref[0, :] # [nx, ] - - t0 = t_star[0] # float - t1 = t_star[-1] # float - - x0 = x_star[0] # float - x1 = x_star[-1] # float - - def gen_input_batch(): - tx = np.random.uniform( - [t0, x0], - [t1, x1], - (cfg.TRAIN.batch_size, 2), - ).astype(dtype) - return { - "t": np.sort(tx[:, 0:1], axis=0), - "x": tx[:, 1:2], - } - - def gen_label_batch(input_batch): - return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} - - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch, - "label": gen_label_batch, - }, - }, - output_expr=equation["AllenCahn"].equations, - loss=ppsci.loss.CausalMSELoss( - cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol - ), - name="PDE", - ) - - ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} - ic_label = {"u": u0.reshape([-1, 1])} - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": ic_input, - "label": ic_label, - }, - }, - output_expr={"u": lambda out: out["u"]}, - loss=ppsci.loss.MSELoss("mean"), - name="IC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - ic.name: ic, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - equation=equation, - validator=validator, - loss_aggregator=mtl.GradNorm( - model, - len(constraint), - cfg.TRAIN.grad_norm.update_freq, - cfg.TRAIN.grad_norm.momentum, - ), - cfg=cfg, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - log_freq=cfg.log_freq, - validator=validator, - cfg=cfg, - ) - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver(model, cfg=cfg) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - - input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) - - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="allen_cahn_default.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} + + # set constraint + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": np.sort(tx[:, 0:1], axis=0), + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.CausalMSELoss( + cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol + ), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + loss_aggregator=mtl.GradNorm( + model, + len(constraint), + cfg.TRAIN.grad_norm.update_freq, + cfg.TRAIN.grad_norm.momentum, + ), + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + log_freq=cfg.log_freq, + validator=validator, + cfg=cfg, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="allen_cahn_default.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/allen_cahn/allen_cahn_piratenet.py b/examples/allen_cahn/allen_cahn_piratenet.py index 2bcb8f8c98..5c6546db8d 100644 --- a/examples/allen_cahn/allen_cahn_piratenet.py +++ b/examples/allen_cahn/allen_cahn_piratenet.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn """ @@ -297,3 +298,296 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} + + # set constraint + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": np.sort(tx[:, 0:1], axis=0), + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.CausalMSELoss( + cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol + ), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + loss_aggregator=mtl.GradNorm( + model, + len(constraint), + cfg.TRAIN.grad_norm.update_freq, + cfg.TRAIN.grad_norm.momentum, + ), + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="allen_cahn_piratenet.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/allen_cahn_plain.py b/examples/allen_cahn/allen_cahn_plain.py index 39a07b1227..4c1e48a01e 100644 --- a/examples/allen_cahn/allen_cahn_plain.py +++ b/examples/allen_cahn/allen_cahn_plain.py @@ -1,299 +1,299 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy.io as sio -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import misc - -dtype = paddle.get_default_dtype() - - -def plot( - t_star: np.ndarray, - x_star: np.ndarray, - u_ref: np.ndarray, - u_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(18, 5)) - TT, XX = np.meshgrid(t_star, x_star, indexing="ij") - u_ref = u_ref.reshape([len(t_star), len(x_star)]) - - plt.subplot(1, 3, 1) - plt.pcolor(TT, XX, u_ref, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Exact") - plt.tight_layout() - - plt.subplot(1, 3, 2) - plt.pcolor(TT, XX, u_pred, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Predicted") - plt.tight_layout() - - plt.subplot(1, 3, 3) - plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Absolute error") - plt.tight_layout() - - fig_path = osp.join(output_dir, "ac.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - u0 = u_ref[0, :] # [nx, ] - - t0 = t_star[0] # float - t1 = t_star[-1] # float - - x0 = x_star[0] # float - x1 = x_star[-1] # float - - # set constraint - def gen_input_batch(): - tx = np.random.uniform( - [t0, x0], - [t1, x1], - (cfg.TRAIN.batch_size, 2), - ).astype(dtype) - return { - "t": tx[:, 0:1], - "x": tx[:, 1:2], - } - - def gen_label_batch(input_batch): - return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} - - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch, - "label": gen_label_batch, - }, - }, - output_expr=equation["AllenCahn"].equations, - loss=ppsci.loss.MSELoss("mean"), - name="PDE", - ) - - ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} - ic_label = {"u": u0.reshape([-1, 1])} - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": ic_input, - "label": ic_label, - }, - }, - output_expr={"u": lambda out: out["u"]}, - loss=ppsci.loss.MSELoss("mean"), - name="IC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - ic.name: ic, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - equation=equation, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - use_tbd=True, - cfg=cfg, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - - input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) - - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -@hydra.main(version_base=None, config_path="./conf", config_name="allen_cahn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(eps=0.01)} + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + # set constraint + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": tx[:, 0:1], + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.MSELoss("mean"), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=True, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + equation=equation, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + use_tbd=True, + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main(version_base=None, config_path="./conf", config_name="allen_cahn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/allen_cahn/allen_cahn_sota.py b/examples/allen_cahn/allen_cahn_sota.py index 1fb4b7055b..1b36a96fe1 100644 --- a/examples/allen_cahn/allen_cahn_sota.py +++ b/examples/allen_cahn/allen_cahn_sota.py @@ -1,304 +1,304 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy.io as sio -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.loss import mtl -from ppsci.utils import misc - -dtype = paddle.get_default_dtype() - - -def plot( - t_star: np.ndarray, - x_star: np.ndarray, - u_ref: np.ndarray, - u_pred: np.ndarray, - output_dir: str, -): - fig = plt.figure(figsize=(18, 5)) - TT, XX = np.meshgrid(t_star, x_star, indexing="ij") - u_ref = u_ref.reshape([len(t_star), len(x_star)]) - - plt.subplot(1, 3, 1) - plt.pcolor(TT, XX, u_ref, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Exact") - plt.tight_layout() - - plt.subplot(1, 3, 2) - plt.pcolor(TT, XX, u_pred, cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Predicted") - plt.tight_layout() - - plt.subplot(1, 3, 3) - plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") - plt.colorbar() - plt.xlabel("t") - plt.ylabel("x") - plt.title("Absolute error") - plt.tight_layout() - - fig_path = osp.join(output_dir, "ac.png") - print(f"Saving figure to {fig_path}") - fig.savefig(fig_path, bbox_inches="tight", dpi=400) - plt.close() - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.ModifiedMLP(**cfg.MODEL) - - # set equation - equation = {"AllenCahn": ppsci.equation.AllenCahn(0.01**2)} - - # set constraint - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - u0 = u_ref[0, :] # [nx, ] - - t0 = t_star[0] # float - t1 = t_star[-1] # float - - x0 = x_star[0] # float - x1 = x_star[-1] # float - - def gen_input_batch(): - tx = np.random.uniform( - [t0, x0], - [t1, x1], - (cfg.TRAIN.batch_size, 2), - ).astype(dtype) - return { - "t": np.sort(tx[:, 0:1], axis=0), - "x": tx[:, 1:2], - } - - def gen_label_batch(input_batch): - return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} - - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "ContinuousNamedArrayDataset", - "input": gen_input_batch, - "label": gen_label_batch, - }, - }, - output_expr=equation["AllenCahn"].equations, - loss=ppsci.loss.CausalMSELoss( - cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol - ), - name="PDE", - ) - - ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} - ic_label = {"u": u0.reshape([-1, 1])} - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": ic_input, - "label": ic_label, - }, - }, - output_expr={"u": lambda out: out["u"]}, - loss=ppsci.loss.MSELoss("mean"), - name="IC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - ic.name: ic, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - loss_aggregator=mtl.NTK( - model, - len(constraint), - cfg.TRAIN.ntk.update_freq, - ), - cfg=cfg, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - - # set validator - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - eval_label = {"u": u_ref.reshape([-1, 1])} - u_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": eval_data, - "label": eval_label, - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="u_validator", - ) - validator = {u_validator.name: u_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - u_pred = solver.predict( - eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True - )["u"] - u_pred = u_pred.reshape([len(t_star), len(x_star)]) - - # plot - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - data = sio.loadmat(cfg.DATA_PATH) - u_ref = data["usol"].astype(dtype) # (nt, nx) - t_star = data["t"].flatten().astype(dtype) # [nt, ] - x_star = data["x"].flatten().astype(dtype) # [nx, ] - tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) - - input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) - # mapping data to cfg.INFER.output_keys - - plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) - - -@hydra.main(version_base=None, config_path="./conf", config_name="allen_cahn_sota.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/allen_cahn +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot( + t_star: np.ndarray, + x_star: np.ndarray, + u_ref: np.ndarray, + u_pred: np.ndarray, + output_dir: str, +): + fig = plt.figure(figsize=(18, 5)) + TT, XX = np.meshgrid(t_star, x_star, indexing="ij") + u_ref = u_ref.reshape([len(t_star), len(x_star)]) + + plt.subplot(1, 3, 1) + plt.pcolor(TT, XX, u_ref, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Exact") + plt.tight_layout() + + plt.subplot(1, 3, 2) + plt.pcolor(TT, XX, u_pred, cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Predicted") + plt.tight_layout() + + plt.subplot(1, 3, 3) + plt.pcolor(TT, XX, np.abs(u_ref - u_pred), cmap="jet") + plt.colorbar() + plt.xlabel("t") + plt.ylabel("x") + plt.title("Absolute error") + plt.tight_layout() + + fig_path = osp.join(output_dir, "ac.png") + print(f"Saving figure to {fig_path}") + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.ModifiedMLP(**cfg.MODEL) + + # set equation + equation = {"AllenCahn": ppsci.equation.AllenCahn(0.01**2)} + + # set constraint + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + u0 = u_ref[0, :] # [nx, ] + + t0 = t_star[0] # float + t1 = t_star[-1] # float + + x0 = x_star[0] # float + x1 = x_star[-1] # float + + def gen_input_batch(): + tx = np.random.uniform( + [t0, x0], + [t1, x1], + (cfg.TRAIN.batch_size, 2), + ).astype(dtype) + return { + "t": np.sort(tx[:, 0:1], axis=0), + "x": tx[:, 1:2], + } + + def gen_label_batch(input_batch): + return {"allen_cahn": np.zeros([cfg.TRAIN.batch_size, 1], dtype)} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["AllenCahn"].equations, + loss=ppsci.loss.CausalMSELoss( + cfg.TRAIN.causal.n_chunks, "mean", tol=cfg.TRAIN.causal.tol + ), + name="PDE", + ) + + ic_input = {"t": np.full([len(x_star), 1], t0), "x": x_star.reshape([-1, 1])} + ic_label = {"u": u0.reshape([-1, 1])} + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": ic_input, + "label": ic_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name="IC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + ic.name: ic, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=True, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + loss_aggregator=mtl.NTK( + model, + len(constraint), + cfg.TRAIN.ntk.update_freq, + ), + cfg=cfg, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + + # set validator + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + eval_data = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + eval_label = {"u": u_ref.reshape([-1, 1])} + u_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="u_validator", + ) + validator = {u_validator.name: u_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + u_pred = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + )["u"] + u_pred = u_pred.reshape([len(t_star), len(x_star)]) + + # plot + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.DATA_PATH) + u_ref = data["usol"].astype(dtype) # (nt, nx) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape([len(t_star), len(x_star)]) + # mapping data to cfg.INFER.output_keys + + plot(t_star, x_star, u_ref, u_pred, cfg.output_dir) + + +@hydra.main(version_base=None, config_path="./conf", config_name="allen_cahn_sota.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/allen_cahn/conf/allen_cahn.yaml b/examples/allen_cahn/conf/allen_cahn.yaml index 4fbc18617d..80235e7d73 100644 --- a/examples/allen_cahn/conf/allen_cahn.yaml +++ b/examples/allen_cahn/conf/allen_cahn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -82,3 +83,89 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_plain/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, false] + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml index 4e577f1500..4304b7430d 100644 --- a/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml +++ b/examples/allen_cahn/conf/allen_cahn_causal_fourier_rwf.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -91,3 +92,98 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_causal_fourier_rwf/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, false] + fourier: + dim: 256 + scale: 1.0 + random_weight: + mean: 0.5 + std: 0.1 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml index 7fe499eed3..83a14cf018 100644 --- a/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml +++ b/examples/allen_cahn/conf/allen_cahn_defalut_ntk.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -94,3 +95,101 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_defalut_ntk/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: false + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, False] + fourier: + dim: 256 + scale: 2.0 + random_weight: + mean: 1.0 + std: 0.1 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + ntk: + update_freq: 1000 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/conf/allen_cahn_default.yaml b/examples/allen_cahn/conf/allen_cahn_default.yaml index b5c451de0b..211cf6cfe4 100644 --- a/examples/allen_cahn/conf/allen_cahn_default.yaml +++ b/examples/allen_cahn/conf/allen_cahn_default.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -95,3 +96,102 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_default/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: true + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, false] + fourier: + dim: 256 + scale: 1.0 + random_weight: + mean: 0.5 + std: 0.1 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: 4096 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + grad_norm: + update_freq: 1000 + momentum: 0.9 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_plain_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/conf/allen_cahn_piratenet.yaml b/examples/allen_cahn/conf/allen_cahn_piratenet.yaml index 7d5a324e50..87861e783d 100644 --- a/examples/allen_cahn/conf/allen_cahn_piratenet.yaml +++ b/examples/allen_cahn/conf/allen_cahn_piratenet.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -96,3 +97,101 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_piratenet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_blocks: 3 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, false] + fourier: + dim: 256 + scale: 2.0 + random_weight: + mean: 1.0 + std: 0.1 + +# training settings +TRAIN: + epochs: 300 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 10 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + batch_size: 8192 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + grad_norm: + update_freq: 1000 + momentum: 0.9 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/AllenCahn/allen_cahn_piratenet_pretrained.pdparams + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/allen_cahn/conf/allen_cahn_sota.yaml b/examples/allen_cahn/conf/allen_cahn_sota.yaml index 207c87d972..6d59f754d4 100644 --- a/examples/allen_cahn/conf/allen_cahn_sota.yaml +++ b/examples/allen_cahn/conf/allen_cahn_sota.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -94,3 +95,101 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_allen_cahn_sota/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: false + +DATA_PATH: ./dataset/allen_cahn.mat + +# model settings +MODEL: + input_keys: [t, x] + output_keys: [u] + num_layers: 4 + hidden_size: 256 + activation: tanh + periods: + x: [2.0, False] + fourier: + dim: 256 + scale: 2.0 + random_weight: + mean: 1.0 + std: 0.1 + +# training settings +TRAIN: + epochs: 300 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + batch_size: 8192 + pretrained_model_path: null + checkpoint_path: null + causal: + n_chunks: 32 + tol: 1.0 + ntk: + update_freq: 1000 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/allen_cahn + pdmodel_path: ${INFER.export_path}.pdmodel + pdpiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/amgnet/amgnet_airfoil.py b/examples/amgnet/amgnet_airfoil.py index 7bcf9f8548..4a01acea8d 100644 --- a/examples/amgnet/amgnet_airfoil.py +++ b/examples/amgnet/amgnet_airfoil.py @@ -1,226 +1,226 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from os import path as osp -from typing import TYPE_CHECKING -from typing import Dict -from typing import List - -import hydra -import utils -from omegaconf import DictConfig -from paddle.nn import functional as F - -import ppsci -from ppsci.utils import logger - -if TYPE_CHECKING: - import paddle - import pgl - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "pgl.Graph"], - *args, -) -> paddle.Tensor: - return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} - - -def eval_rmse_func( - output_dict: Dict[str, List["paddle.Tensor"]], - label_dict: Dict[str, List["pgl.Graph"]], - *args, -) -> Dict[str, paddle.Tensor]: - mse_losses = [ - F.mse_loss(pred, label.y) - for (pred, label) in zip(output_dict["pred"], label_dict["label"]) - ] - return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set airfoil model - model = ppsci.arch.AMGNet(**cfg.MODEL) - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.TRAIN_DATA_DIR, - "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - output_expr={"pred": lambda out: out["pred"]}, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - - # visualize prediction - logger.message("Now visualizing prediction, please wait...") - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "airfoil", - ) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set airfoil model - model = ppsci.arch.AMGNet(**cfg.MODEL) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate model - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "airfoil", - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="amgnet_airfoil.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from os import path as osp +from typing import TYPE_CHECKING +from typing import Dict +from typing import List + +import hydra +import utils +from omegaconf import DictConfig +from paddle.nn import functional as F + +import ppsci +from ppsci.utils import logger + +if TYPE_CHECKING: + import paddle + import pgl + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "pgl.Graph"], + *args, +) -> paddle.Tensor: + return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} + + +def eval_rmse_func( + output_dict: Dict[str, List["paddle.Tensor"]], + label_dict: Dict[str, List["pgl.Graph"]], + *args, +) -> Dict[str, paddle.Tensor]: + mse_losses = [ + F.mse_loss(pred, label.y) + for (pred, label) in zip(output_dict["pred"], label_dict["label"]) + ] + return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set airfoil model + model = ppsci.arch.AMGNet(**cfg.MODEL) + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.TRAIN_DATA_DIR, + "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + output_expr={"pred": lambda out: out["pred"]}, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + + # visualize prediction + logger.message("Now visualizing prediction, please wait...") + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "airfoil", + ) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set airfoil model + model = ppsci.arch.AMGNet(**cfg.MODEL) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate model + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "airfoil", + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="amgnet_airfoil.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/amgnet/amgnet_cylinder.py b/examples/amgnet/amgnet_cylinder.py index 4203f6052e..339e433632 100644 --- a/examples/amgnet/amgnet_cylinder.py +++ b/examples/amgnet/amgnet_cylinder.py @@ -1,226 +1,226 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from os import path as osp -from typing import TYPE_CHECKING -from typing import Dict -from typing import List - -import hydra -import utils -from omegaconf import DictConfig -from paddle.nn import functional as F - -import ppsci -from ppsci.utils import logger - -if TYPE_CHECKING: - import paddle - import pgl - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "pgl.Graph"], - *args, -) -> paddle.Tensor: - return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} - - -def eval_rmse_func( - output_dict: Dict[str, List["paddle.Tensor"]], - label_dict: Dict[str, List["pgl.Graph"]], - *args, -) -> Dict[str, paddle.Tensor]: - mse_losses = [ - F.mse_loss(pred, label.y) - for (pred, label) in zip(output_dict["pred"], label_dict["label"]) - ] - return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set cylinder model - model = ppsci.arch.AMGNet(**cfg.MODEL) - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "MeshCylinderDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.TRAIN_DATA_DIR, - "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - output_expr={"pred": lambda out: out["pred"]}, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshCylinderDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - - # visualize prediction - logger.message("Now visualizing prediction, please wait...") - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "cylinder", - ) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set airfoil model - model = ppsci.arch.AMGNet(**cfg.MODEL) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshCylinderDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate model - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "cylinder", - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="amgnet_cylinder.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from os import path as osp +from typing import TYPE_CHECKING +from typing import Dict +from typing import List + +import hydra +import utils +from omegaconf import DictConfig +from paddle.nn import functional as F + +import ppsci +from ppsci.utils import logger + +if TYPE_CHECKING: + import paddle + import pgl + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "pgl.Graph"], + *args, +) -> paddle.Tensor: + return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} + + +def eval_rmse_func( + output_dict: Dict[str, List["paddle.Tensor"]], + label_dict: Dict[str, List["pgl.Graph"]], + *args, +) -> Dict[str, paddle.Tensor]: + mse_losses = [ + F.mse_loss(pred, label.y) + for (pred, label) in zip(output_dict["pred"], label_dict["label"]) + ] + return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set cylinder model + model = ppsci.arch.AMGNet(**cfg.MODEL) + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "MeshCylinderDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.TRAIN_DATA_DIR, + "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + output_expr={"pred": lambda out: out["pred"]}, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshCylinderDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + + # visualize prediction + logger.message("Now visualizing prediction, please wait...") + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "cylinder", + ) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set airfoil model + model = ppsci.arch.AMGNet(**cfg.MODEL) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshCylinderDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate model + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "cylinder", + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="amgnet_cylinder.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/amgnet/conf/amgnet_airfoil.yaml b/examples/amgnet/conf/amgnet_airfoil.yaml index 9213230ef4..b8e58bd99b 100644 --- a/examples/amgnet/conf/amgnet_airfoil.yaml +++ b/examples/amgnet/conf/amgnet_airfoil.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -65,3 +66,71 @@ EVAL: batch_size: 1 pretrained_model_path: null eval_with_no_grad: true +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_amgnet_airfoil/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition + +# set training data path +TRAIN_DATA_DIR: "./data/NACA0012_interpolate/outputs_train" +TRAIN_MESH_GRAPH_PATH: "./data/NACA0012_interpolate/mesh_fine.su2" + +# set evaluate data path +EVAL_DATA_DIR: "./data/NACA0012_interpolate/outputs_test" +EVAL_MESH_GRAPH_PATH: "./data/NACA0012_interpolate/mesh_fine.su2" + +# model settings +MODEL: + input_keys: ["input"] + output_keys: ["pred"] + input_dim: 5 + output_dim: 3 + latent_dim: 128 + num_layers: 2 + message_passing_aggregator: "sum" + message_passing_steps: 6 + speed: "norm" + +# training settings +TRAIN: + epochs: 500 + iters_per_epoch: 42 + save_freq: 50 + eval_during_train: true + eval_freq: 50 + learning_rate: 5.0e-4 + batch_size: 4 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 1 + pretrained_model_path: null + eval_with_no_grad: true +>>>>>>> Stashed changes diff --git a/examples/amgnet/conf/amgnet_cylinder.yaml b/examples/amgnet/conf/amgnet_cylinder.yaml index 39cc559372..6a67e2e59d 100644 --- a/examples/amgnet/conf/amgnet_cylinder.yaml +++ b/examples/amgnet/conf/amgnet_cylinder.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -65,3 +66,71 @@ EVAL: batch_size: 1 pretrained_model_path: null eval_with_no_grad: true +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_amgnet_cylinder/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition + +# set training data path +TRAIN_DATA_DIR: "./data/cylinderdata/train" +TRAIN_MESH_GRAPH_PATH: "./data/cylinderdata/cylinder.su2" + +# set evaluate data path +EVAL_DATA_DIR: "./data/cylinderdata/test" +EVAL_MESH_GRAPH_PATH: "./data/cylinderdata/cylinder.su2" + +# model settings +MODEL: + input_keys: ["input"] + output_keys: ["pred"] + input_dim: 4 + output_dim: 3 + latent_dim: 128 + num_layers: 2 + message_passing_aggregator: "sum" + message_passing_steps: 6 + speed: "norm" + +# training settings +TRAIN: + epochs: 500 + iters_per_epoch: 42 + save_freq: 50 + eval_during_train: true + eval_freq: 50 + learning_rate: 5.0e-4 + batch_size: 4 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 1 + pretrained_model_path: null + eval_with_no_grad: true +>>>>>>> Stashed changes diff --git a/examples/amgnet/requirements.txt b/examples/amgnet/requirements.txt index 2bc47469db..6b790ce0e1 100644 --- a/examples/amgnet/requirements.txt +++ b/examples/amgnet/requirements.txt @@ -1,4 +1,4 @@ -git+https://github.com/PaddlePaddle/PGL.git -matplotlib -pyamg -scipy +git+https://github.com/PaddlePaddle/PGL.git +matplotlib +pyamg +scipy diff --git a/examples/amgnet/utils.py b/examples/amgnet/utils.py index 5a8cd3e7f5..c467a01ee7 100644 --- a/examples/amgnet/utils.py +++ b/examples/amgnet/utils.py @@ -1,287 +1,287 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import math -import os -import pathlib -import warnings -from os import path as osp -from typing import BinaryIO -from typing import List -from typing import Optional -from typing import Text -from typing import Tuple -from typing import Union - -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import paddle -from paddle.vision import transforms as T -from PIL import Image - -matplotlib.use("Agg") - - -@paddle.no_grad() -def make_grid( - tensor: Union[paddle.Tensor, List[paddle.Tensor]], - nrow: int = 8, - padding: int = 2, - normalize: bool = False, - value_range: Optional[Tuple[int, int]] = None, - scale_each: bool = False, - pad_value: int = 0, - **kwargs, -) -> paddle.Tensor: - if not ( - isinstance(tensor, paddle.Tensor) - or ( - isinstance(tensor, list) - and all(isinstance(t, paddle.Tensor) for t in tensor) - ) - ): - raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") - - if "range" in kwargs.keys(): - warning = "range will be deprecated, please use value_range instead." - warnings.warn(warning) - value_range = kwargs["range"] - - # if list of tensors, convert to a 4D mini-batch Tensor - if isinstance(tensor, list): - tensor = paddle.stack(tensor, axis=0) - - if tensor.ndim == 2: # single image H x W - tensor = tensor.unsqueeze(0) - if tensor.ndim == 3: # single image - if tensor.shape[0] == 1: # if single-channel, convert to 3-channel - tensor = paddle.concat((tensor, tensor, tensor), 0) - tensor = tensor.unsqueeze(0) - if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images - tensor = paddle.concat((tensor, tensor, tensor), 1) - - if normalize is True: - if value_range is not None: - if not isinstance(value_range, tuple): - raise TypeError( - "value_range has to be a tuple (min, max) if specified. min and max are numbers" - ) - - def norm_ip(img, low, high): - img.clip(min=low, max=high) - img = img - low - img = img / max(high - low, 1e-5) - - def norm_range(t, value_range): - if value_range is not None: - norm_ip(t, value_range[0], value_range[1]) - else: - norm_ip(t, float(t.min()), float(t.max())) - - if scale_each is True: - for t in tensor: # loop over mini-batch dimension - norm_range(t, value_range) - else: - norm_range(tensor, value_range) - - if tensor.shape[0] == 1: - return tensor.squeeze(0) - - # make the mini-batch of images into a grid - nmaps = tensor.shape[0] - xmaps = min(nrow, nmaps) - ymaps = int(math.ceil(float(nmaps) / xmaps)) - height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) - num_channels = tensor.shape[1] - grid = paddle.full( - (num_channels, height * ymaps + padding, width * xmaps + padding), pad_value - ) - k = 0 - for y in range(ymaps): - for x in range(xmaps): - if k >= nmaps: - break - grid[ - :, - y * height + padding : (y + 1) * height, - x * width + padding : (x + 1) * width, - ] = tensor[k] - k = k + 1 - return grid - - -@paddle.no_grad() -def save_image( - tensor: Union[paddle.Tensor, List[paddle.Tensor]], - fp: Union[Text, pathlib.Path, BinaryIO], - format: Optional[str] = None, - **kwargs, -) -> None: - grid = make_grid(tensor, **kwargs) - ndarr = ( - paddle.clip(grid * 255 + 0.5, 0, 255).transpose([1, 2, 0]).cast("uint8").numpy() - ) - im = Image.fromarray(ndarr) - os.makedirs(osp.dirname(fp), exist_ok=True) - im.save(fp, format=format) - - -def log_images( - nodes, - pred, - true, - elems_list, - index, - mode, - aoa=0, - mach=0, - file="field.png", -): - for field in range(pred.shape[1]): - true_img = plot_field( - nodes, - elems_list, - true[:, field], - mode=mode, - col=field, - clim=(-0.8, 0.8), - title="true", - ) - true_img = T.ToTensor()(true_img) - - pred_img = plot_field( - nodes, - elems_list, - pred[:, field], - mode=mode, - col=field, - clim=(-0.8, 0.8), - title="pred", - ) - pred_img = T.ToTensor()(pred_img) - imgs = [pred_img, true_img] - grid = make_grid(paddle.stack(imgs), padding=0) - out_file = file + f"{field}" - if mode == "airfoil": - if aoa == 8.0 and mach == 0.65: - save_image( - grid, "./result/image/" + str(index) + out_file + "_field.png" - ) - save_image( - grid, "./result/image/airfoil/" + str(index) + out_file + "_field.png" - ) - elif mode == "cylinder": - if aoa == 39.0: - save_image( - grid, "./result/image/" + str(index) + out_file + "_field.png" - ) - save_image( - grid, "./result/image/cylinder/" + str(index) + out_file + "_field.png" - ) - else: - raise ValueError( - f"Argument 'mode' should be 'airfoil' or 'cylinder', but got {mode}." - ) - - -def plot_field( - nodes: paddle.Tensor, - elems_list, - field: paddle.Tensor, - mode, - col, - contour=False, - clim=None, - zoom=True, - get_array=True, - out_file=None, - show=False, - title="", -): - elems_list = sum(elems_list, []) - tris, _ = quad2tri(elems_list) - tris = np.array(tris) - x, y = nodes[:, :2].t().detach().numpy() - field = field.detach().numpy() - fig = plt.figure(dpi=800) - if contour: - plt.tricontourf(x, y, tris, field) - else: - plt.tripcolor(x, y, tris, field) - if clim: - plt.clim(*clim) - colorbar = plt.colorbar() - if mode == "airfoil": - if col == 0: - colorbar.set_label("x-velocity", fontsize=16) - elif col == 1: - colorbar.set_label("pressure", fontsize=16) - elif col == 2: - colorbar.set_label("y-velocity", fontsize=16) - if mode == "cylinder": - if col == 0: - colorbar.set_label("pressure", fontsize=16) - elif col == 1: - colorbar.set_label("x-velocity", fontsize=16) - elif col == 2: - colorbar.set_label("y-velocity", fontsize=16) - if zoom: - if mode == "airfoil": - plt.xlim(left=-0.5, right=1.5) - plt.ylim(bottom=-0.5, top=0.5) - else: - plt.xlim(left=-5, right=5.0) - plt.ylim(bottom=-5, top=5.0) - - if title: - plt.title(title) - - if out_file is not None: - plt.savefig(out_file) - plt.close() - - if show: - plt.show() - - if get_array: - if mode == "airfoil": - plt.gca().invert_yaxis() - fig.canvas.draw() - array = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) - array = array.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - fig.clf() - fig.clear() - plt.close() - return array - - -def quad2tri(elems): - new_elems = [] - new_edges = [] - for e in elems: - if len(e) <= 3: - new_elems.append(e) - else: - new_elems.append([e[0], e[1], e[2]]) - new_elems.append([e[0], e[2], e[3]]) - new_edges.append(paddle.to_tensor(([[e[0]], [e[2]]]), dtype=paddle.int64)) - new_edges = ( - paddle.concat(new_edges, axis=1) - if new_edges - else paddle.to_tensor([], dtype=paddle.int64) - ) - return new_elems, new_edges +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import math +import os +import pathlib +import warnings +from os import path as osp +from typing import BinaryIO +from typing import List +from typing import Optional +from typing import Text +from typing import Tuple +from typing import Union + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import paddle +from paddle.vision import transforms as T +from PIL import Image + +matplotlib.use("Agg") + + +@paddle.no_grad() +def make_grid( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, + **kwargs, +) -> paddle.Tensor: + if not ( + isinstance(tensor, paddle.Tensor) + or ( + isinstance(tensor, list) + and all(isinstance(t, paddle.Tensor) for t in tensor) + ) + ): + raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") + + if "range" in kwargs.keys(): + warning = "range will be deprecated, please use value_range instead." + warnings.warn(warning) + value_range = kwargs["range"] + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = paddle.stack(tensor, axis=0) + + if tensor.ndim == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.ndim == 3: # single image + if tensor.shape[0] == 1: # if single-channel, convert to 3-channel + tensor = paddle.concat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images + tensor = paddle.concat((tensor, tensor, tensor), 1) + + if normalize is True: + if value_range is not None: + if not isinstance(value_range, tuple): + raise TypeError( + "value_range has to be a tuple (min, max) if specified. min and max are numbers" + ) + + def norm_ip(img, low, high): + img.clip(min=low, max=high) + img = img - low + img = img / max(high - low, 1e-5) + + def norm_range(t, value_range): + if value_range is not None: + norm_ip(t, value_range[0], value_range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if tensor.shape[0] == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.shape[0] + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) + num_channels = tensor.shape[1] + grid = paddle.full( + (num_channels, height * ymaps + padding, width * xmaps + padding), pad_value + ) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + grid[ + :, + y * height + padding : (y + 1) * height, + x * width + padding : (x + 1) * width, + ] = tensor[k] + k = k + 1 + return grid + + +@paddle.no_grad() +def save_image( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + fp: Union[Text, pathlib.Path, BinaryIO], + format: Optional[str] = None, + **kwargs, +) -> None: + grid = make_grid(tensor, **kwargs) + ndarr = ( + paddle.clip(grid * 255 + 0.5, 0, 255).transpose([1, 2, 0]).cast("uint8").numpy() + ) + im = Image.fromarray(ndarr) + os.makedirs(osp.dirname(fp), exist_ok=True) + im.save(fp, format=format) + + +def log_images( + nodes, + pred, + true, + elems_list, + index, + mode, + aoa=0, + mach=0, + file="field.png", +): + for field in range(pred.shape[1]): + true_img = plot_field( + nodes, + elems_list, + true[:, field], + mode=mode, + col=field, + clim=(-0.8, 0.8), + title="true", + ) + true_img = T.ToTensor()(true_img) + + pred_img = plot_field( + nodes, + elems_list, + pred[:, field], + mode=mode, + col=field, + clim=(-0.8, 0.8), + title="pred", + ) + pred_img = T.ToTensor()(pred_img) + imgs = [pred_img, true_img] + grid = make_grid(paddle.stack(imgs), padding=0) + out_file = file + f"{field}" + if mode == "airfoil": + if aoa == 8.0 and mach == 0.65: + save_image( + grid, "./result/image/" + str(index) + out_file + "_field.png" + ) + save_image( + grid, "./result/image/airfoil/" + str(index) + out_file + "_field.png" + ) + elif mode == "cylinder": + if aoa == 39.0: + save_image( + grid, "./result/image/" + str(index) + out_file + "_field.png" + ) + save_image( + grid, "./result/image/cylinder/" + str(index) + out_file + "_field.png" + ) + else: + raise ValueError( + f"Argument 'mode' should be 'airfoil' or 'cylinder', but got {mode}." + ) + + +def plot_field( + nodes: paddle.Tensor, + elems_list, + field: paddle.Tensor, + mode, + col, + contour=False, + clim=None, + zoom=True, + get_array=True, + out_file=None, + show=False, + title="", +): + elems_list = sum(elems_list, []) + tris, _ = quad2tri(elems_list) + tris = np.array(tris) + x, y = nodes[:, :2].t().detach().numpy() + field = field.detach().numpy() + fig = plt.figure(dpi=800) + if contour: + plt.tricontourf(x, y, tris, field) + else: + plt.tripcolor(x, y, tris, field) + if clim: + plt.clim(*clim) + colorbar = plt.colorbar() + if mode == "airfoil": + if col == 0: + colorbar.set_label("x-velocity", fontsize=16) + elif col == 1: + colorbar.set_label("pressure", fontsize=16) + elif col == 2: + colorbar.set_label("y-velocity", fontsize=16) + if mode == "cylinder": + if col == 0: + colorbar.set_label("pressure", fontsize=16) + elif col == 1: + colorbar.set_label("x-velocity", fontsize=16) + elif col == 2: + colorbar.set_label("y-velocity", fontsize=16) + if zoom: + if mode == "airfoil": + plt.xlim(left=-0.5, right=1.5) + plt.ylim(bottom=-0.5, top=0.5) + else: + plt.xlim(left=-5, right=5.0) + plt.ylim(bottom=-5, top=5.0) + + if title: + plt.title(title) + + if out_file is not None: + plt.savefig(out_file) + plt.close() + + if show: + plt.show() + + if get_array: + if mode == "airfoil": + plt.gca().invert_yaxis() + fig.canvas.draw() + array = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) + array = array.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + fig.clf() + fig.clear() + plt.close() + return array + + +def quad2tri(elems): + new_elems = [] + new_edges = [] + for e in elems: + if len(e) <= 3: + new_elems.append(e) + else: + new_elems.append([e[0], e[1], e[2]]) + new_elems.append([e[0], e[2], e[3]]) + new_edges.append(paddle.to_tensor(([[e[0]], [e[2]]]), dtype=paddle.int64)) + new_edges = ( + paddle.concat(new_edges, axis=1) + if new_edges + else paddle.to_tensor([], dtype=paddle.int64) + ) + return new_elems, new_edges diff --git a/examples/aneurysm/aneurysm.py b/examples/aneurysm/aneurysm.py index 7f871ba007..7b019d7ea3 100644 --- a/examples/aneurysm/aneurysm.py +++ b/examples/aneurysm/aneurysm.py @@ -1,409 +1,409 @@ -""" -Reference: https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html -""" - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import reader - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NavierStokes": ppsci.equation.NavierStokes( - cfg.NU * cfg.SCALE, cfg.RHO, cfg.DIM, False - ), - "NormalDotVec": ppsci.equation.NormalDotVec(("u", "v", "w")), - } - - # set geometry - inlet_geo = ppsci.geometry.Mesh(cfg.INLET_STL_PATH) - outlet_geo = ppsci.geometry.Mesh(cfg.OUTLET_STL_PATH) - noslip_geo = ppsci.geometry.Mesh(cfg.NOSLIP_STL_PATH) - integral_geo = ppsci.geometry.Mesh(cfg.INTEGRAL_STL_PATH) - interior_geo = ppsci.geometry.Mesh(cfg.INTERIOR_STL_PATH) - - # normalize meshes - inlet_geo = inlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) - outlet_geo = outlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) - noslip_geo = noslip_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) - integral_geo = integral_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) - interior_geo = interior_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) - geom = { - "inlet_geo": inlet_geo, - "outlet_geo": outlet_geo, - "noslip_geo": noslip_geo, - "integral_geo": integral_geo, - "interior_geo": interior_geo, - } - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - INLET_AREA = 21.1284 * (cfg.SCALE**2) - INLET_RADIUS = np.sqrt(INLET_AREA / np.pi) - - def _compute_parabola(_in): - centered_x = _in["x"] - cfg.INLET_CENTER[0] - centered_y = _in["y"] - cfg.INLET_CENTER[1] - centered_z = _in["z"] - cfg.INLET_CENTER[2] - distance = np.sqrt(centered_x**2 + centered_y**2 + centered_z**2) - parabola = cfg.INLET_VEL * np.maximum((1 - (distance / INLET_RADIUS) ** 2), 0) - return parabola - - def inlet_u_ref_func(_in): - return cfg.INLET_NORMAL[0] * _compute_parabola(_in) - - def inlet_v_ref_func(_in): - return cfg.INLET_NORMAL[1] * _compute_parabola(_in) - - def inlet_w_ref_func(_in): - return cfg.INLET_NORMAL[2] * _compute_parabola(_in) - - bc_inlet = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": inlet_u_ref_func, "v": inlet_v_ref_func, "w": inlet_w_ref_func}, - geom["inlet_geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_inlet}, - ppsci.loss.MSELoss("sum"), - name="inlet", - ) - bc_outlet = ppsci.constraint.BoundaryConstraint( - {"p": lambda d: d["p"]}, - {"p": 0}, - geom["outlet_geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_outlet}, - ppsci.loss.MSELoss("sum"), - name="outlet", - ) - bc_noslip = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": 0, "v": 0, "w": 0}, - geom["noslip_geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_noslip}, - ppsci.loss.MSELoss("sum"), - name="no_slip", - ) - pde = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0}, - geom["interior_geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.pde}, - ppsci.loss.MSELoss("sum"), - name="interior", - ) - igc_outlet = ppsci.constraint.IntegralConstraint( - equation["NormalDotVec"].equations, - {"normal_dot_vec": 2.54}, - geom["outlet_geo"], - { - **train_dataloader_cfg, - "iters_per_epoch": cfg.TRAIN.iters_integral.igc_outlet, - "batch_size": cfg.TRAIN.batch_size.igc_outlet, - "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_outlet, - }, - ppsci.loss.IntegralLoss("sum"), - weight_dict=cfg.TRAIN.weight.igc_outlet, - name="igc_outlet", - ) - igc_integral = ppsci.constraint.IntegralConstraint( - equation["NormalDotVec"].equations, - {"normal_dot_vec": -2.54}, - geom["integral_geo"], - { - **train_dataloader_cfg, - "iters_per_epoch": cfg.TRAIN.iters_integral.igc_integral, - "batch_size": cfg.TRAIN.batch_size.igc_integral, - "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_integral, - }, - ppsci.loss.IntegralLoss("sum"), - weight_dict=cfg.TRAIN.weight.igc_integral, - name="igc_integral", - ) - # wrap constraints together - constraint = { - bc_inlet.name: bc_inlet, - bc_outlet.name: bc_outlet, - bc_noslip.name: bc_noslip, - pde.name: pde, - igc_outlet.name: igc_outlet, - igc_integral.name: igc_integral, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.EVAL_CSV_PATH, - ("x", "y", "z", "u", "v", "w", "p"), - { - "x": "Points:0", - "y": "Points:1", - "z": "Points:2", - "u": "U:0", - "v": "U:1", - "w": "U:2", - "p": "p", - }, - ) - input_dict = { - "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, - "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, - "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, - } - if "area" in input_dict.keys(): - input_dict["area"] *= cfg.SCALE ** (equation["NavierStokes"].dim) - - label_dict = { - "p": eval_data_dict["p"], - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "sampler": {"name": "BatchSampler"}, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "p": lambda out: out["p"], - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"MSE": ppsci.metric.MSE()}, - name="ref_u_v_w_p", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer(optional) - visualizer = { - "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu( - input_dict, - { - "p": lambda out: out["p"], - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - batch_size=cfg.EVAL.batch_size, - prefix="result_u_v_w_p", - ), - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=True, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.EVAL_CSV_PATH, - ("x", "y", "z", "u", "v", "w", "p"), - { - "x": "Points:0", - "y": "Points:1", - "z": "Points:2", - "u": "U:0", - "v": "U:1", - "w": "U:2", - "p": "p", - }, - ) - input_dict = { - "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, - "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, - "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, - } - - label_dict = { - "p": eval_data_dict["p"], - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "sampler": {"name": "BatchSampler"}, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "p": lambda out: out["p"], - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"MSE": ppsci.metric.MSE()}, - name="ref_u_v_w_p", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer - visualizer = { - "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu( - input_dict, - { - "p": lambda out: out["p"], - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - batch_size=cfg.EVAL.batch_size, - prefix="result_u_v_w_p", - ), - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - eval_data_dict = reader.load_csv_file( - cfg.EVAL_CSV_PATH, - ("x", "y", "z", "u", "v", "w", "p"), - { - "x": "Points:0", - "y": "Points:1", - "z": "Points:2", - "u": "U:0", - "v": "U:1", - "w": "U:2", - "p": "p", - }, - ) - input_dict = { - "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, - "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, - "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, - } - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - ppsci.visualize.save_vtu_from_dict( - "./aneurysm_pred.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="aneurysm.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/intermediate/adding_stl_files.html +""" + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import reader + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes( + cfg.NU * cfg.SCALE, cfg.RHO, cfg.DIM, False + ), + "NormalDotVec": ppsci.equation.NormalDotVec(("u", "v", "w")), + } + + # set geometry + inlet_geo = ppsci.geometry.Mesh(cfg.INLET_STL_PATH) + outlet_geo = ppsci.geometry.Mesh(cfg.OUTLET_STL_PATH) + noslip_geo = ppsci.geometry.Mesh(cfg.NOSLIP_STL_PATH) + integral_geo = ppsci.geometry.Mesh(cfg.INTEGRAL_STL_PATH) + interior_geo = ppsci.geometry.Mesh(cfg.INTERIOR_STL_PATH) + + # normalize meshes + inlet_geo = inlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) + outlet_geo = outlet_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) + noslip_geo = noslip_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) + integral_geo = integral_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) + interior_geo = interior_geo.translate(-np.array(cfg.CENTER)).scale(cfg.SCALE) + geom = { + "inlet_geo": inlet_geo, + "outlet_geo": outlet_geo, + "noslip_geo": noslip_geo, + "integral_geo": integral_geo, + "interior_geo": interior_geo, + } + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + INLET_AREA = 21.1284 * (cfg.SCALE**2) + INLET_RADIUS = np.sqrt(INLET_AREA / np.pi) + + def _compute_parabola(_in): + centered_x = _in["x"] - cfg.INLET_CENTER[0] + centered_y = _in["y"] - cfg.INLET_CENTER[1] + centered_z = _in["z"] - cfg.INLET_CENTER[2] + distance = np.sqrt(centered_x**2 + centered_y**2 + centered_z**2) + parabola = cfg.INLET_VEL * np.maximum((1 - (distance / INLET_RADIUS) ** 2), 0) + return parabola + + def inlet_u_ref_func(_in): + return cfg.INLET_NORMAL[0] * _compute_parabola(_in) + + def inlet_v_ref_func(_in): + return cfg.INLET_NORMAL[1] * _compute_parabola(_in) + + def inlet_w_ref_func(_in): + return cfg.INLET_NORMAL[2] * _compute_parabola(_in) + + bc_inlet = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": inlet_u_ref_func, "v": inlet_v_ref_func, "w": inlet_w_ref_func}, + geom["inlet_geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_inlet}, + ppsci.loss.MSELoss("sum"), + name="inlet", + ) + bc_outlet = ppsci.constraint.BoundaryConstraint( + {"p": lambda d: d["p"]}, + {"p": 0}, + geom["outlet_geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_outlet}, + ppsci.loss.MSELoss("sum"), + name="outlet", + ) + bc_noslip = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": 0, "v": 0, "w": 0}, + geom["noslip_geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_noslip}, + ppsci.loss.MSELoss("sum"), + name="no_slip", + ) + pde = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0}, + geom["interior_geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.pde}, + ppsci.loss.MSELoss("sum"), + name="interior", + ) + igc_outlet = ppsci.constraint.IntegralConstraint( + equation["NormalDotVec"].equations, + {"normal_dot_vec": 2.54}, + geom["outlet_geo"], + { + **train_dataloader_cfg, + "iters_per_epoch": cfg.TRAIN.iters_integral.igc_outlet, + "batch_size": cfg.TRAIN.batch_size.igc_outlet, + "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_outlet, + }, + ppsci.loss.IntegralLoss("sum"), + weight_dict=cfg.TRAIN.weight.igc_outlet, + name="igc_outlet", + ) + igc_integral = ppsci.constraint.IntegralConstraint( + equation["NormalDotVec"].equations, + {"normal_dot_vec": -2.54}, + geom["integral_geo"], + { + **train_dataloader_cfg, + "iters_per_epoch": cfg.TRAIN.iters_integral.igc_integral, + "batch_size": cfg.TRAIN.batch_size.igc_integral, + "integral_batch_size": cfg.TRAIN.integral_batch_size.igc_integral, + }, + ppsci.loss.IntegralLoss("sum"), + weight_dict=cfg.TRAIN.weight.igc_integral, + name="igc_integral", + ) + # wrap constraints together + constraint = { + bc_inlet.name: bc_inlet, + bc_outlet.name: bc_outlet, + bc_noslip.name: bc_noslip, + pde.name: pde, + igc_outlet.name: igc_outlet, + igc_integral.name: igc_integral, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.EVAL_CSV_PATH, + ("x", "y", "z", "u", "v", "w", "p"), + { + "x": "Points:0", + "y": "Points:1", + "z": "Points:2", + "u": "U:0", + "v": "U:1", + "w": "U:2", + "p": "p", + }, + ) + input_dict = { + "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, + "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, + "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, + } + if "area" in input_dict.keys(): + input_dict["area"] *= cfg.SCALE ** (equation["NavierStokes"].dim) + + label_dict = { + "p": eval_data_dict["p"], + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "sampler": {"name": "BatchSampler"}, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "p": lambda out: out["p"], + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"MSE": ppsci.metric.MSE()}, + name="ref_u_v_w_p", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer(optional) + visualizer = { + "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu( + input_dict, + { + "p": lambda out: out["p"], + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + batch_size=cfg.EVAL.batch_size, + prefix="result_u_v_w_p", + ), + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=True, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.EVAL_CSV_PATH, + ("x", "y", "z", "u", "v", "w", "p"), + { + "x": "Points:0", + "y": "Points:1", + "z": "Points:2", + "u": "U:0", + "v": "U:1", + "w": "U:2", + "p": "p", + }, + ) + input_dict = { + "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, + "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, + "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, + } + + label_dict = { + "p": eval_data_dict["p"], + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "sampler": {"name": "BatchSampler"}, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "p": lambda out: out["p"], + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"MSE": ppsci.metric.MSE()}, + name="ref_u_v_w_p", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer + visualizer = { + "visualize_u_v_w_p": ppsci.visualize.VisualizerVtu( + input_dict, + { + "p": lambda out: out["p"], + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + batch_size=cfg.EVAL.batch_size, + prefix="result_u_v_w_p", + ), + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + eval_data_dict = reader.load_csv_file( + cfg.EVAL_CSV_PATH, + ("x", "y", "z", "u", "v", "w", "p"), + { + "x": "Points:0", + "y": "Points:1", + "z": "Points:2", + "u": "U:0", + "v": "U:1", + "w": "U:2", + "p": "p", + }, + ) + input_dict = { + "x": (eval_data_dict["x"] - cfg.CENTER[0]) * cfg.SCALE, + "y": (eval_data_dict["y"] - cfg.CENTER[1]) * cfg.SCALE, + "z": (eval_data_dict["z"] - cfg.CENTER[2]) * cfg.SCALE, + } + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + ppsci.visualize.save_vtu_from_dict( + "./aneurysm_pred.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="aneurysm.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/aneurysm/aneurysm_flow.py b/examples/aneurysm/aneurysm_flow.py index 8c788c8072..06bcfd8a5c 100644 --- a/examples/aneurysm/aneurysm_flow.py +++ b/examples/aneurysm/aneurysm_flow.py @@ -1,448 +1,448 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/Jianxun-Wang/LabelFree-DNN-Surrogate -""" - -import math -import os -import os.path as osp - -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger -from ppsci.utils import misc - -paddle.framework.core.set_prim_eager_enabled(True) - - -def train(cfg: DictConfig): - # Physic properties - P_OUT = 0 # pressure at the outlet of pipe - P_IN = 0.1 # pressure at the inlet of pipe - NU = 1e-3 - RHO = 1 - - # Geometry - L = 1 - X_IN = 0 - X_OUT = X_IN + L - R_INLET = 0.05 - mu = 0.5 * (X_OUT - X_IN) - x_initial = np.linspace(X_IN, X_OUT, 100, dtype=paddle.get_default_dtype()).reshape( - 100, 1 - ) - x_20_copy = np.tile(x_initial, (20, 1)) # duplicate 20 times of x for dataloader - SIGMA = 0.1 - SCALE_START = -0.02 - SCALE_END = 0 - scale_initial = np.linspace( - SCALE_START, SCALE_END, 50, endpoint=True, dtype=paddle.get_default_dtype() - ).reshape(50, 1) - scale = np.tile(scale_initial, (len(x_20_copy), 1)) - x = np.array([np.tile(val, len(scale_initial)) for val in x_20_copy]).reshape( - len(scale), 1 - ) - - # Axisymmetric boundary - r_func = ( - scale - / math.sqrt(2 * np.pi * SIGMA**2) - * np.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) - ) - - # Visualize stenosis(scale == 0.2) - PLOT_DIR = osp.join(cfg.output_dir, "visu") - os.makedirs(PLOT_DIR, exist_ok=True) - y_up = (R_INLET - r_func) * np.ones_like(x) - y_down = (-R_INLET + r_func) * np.ones_like(x) - idx = np.where(scale == 0) # plot vessel which scale is 0.2 by finding its indices - plt.figure() - plt.scatter(x[idx], y_up[idx]) - plt.scatter(x[idx], y_down[idx]) - plt.axis("equal") - plt.savefig(osp.join(PLOT_DIR, "idealized_stenotic_vessel"), bbox_inches="tight") - - # Points and shuffle(for alignment) - y = np.zeros([len(x), 1], dtype=paddle.get_default_dtype()) - for x0 in x_initial: - index = np.where(x[:, 0] == x0)[0] - # y is linear to scale, so we place linspace to get 1000 x, it corresponds to vessels - y[index] = np.linspace( - -max(y_up[index]), - max(y_up[index]), - len(index), - dtype=paddle.get_default_dtype(), - ).reshape(len(index), -1) - - idx = np.where(scale == 0) # plot vessel which scale is 0.2 by finding its indices - plt.figure() - plt.scatter(x[idx], y[idx]) - plt.axis("equal") - plt.savefig(osp.join(PLOT_DIR, "one_scale_sample"), bbox_inches="tight") - interior_geom = ppsci.geometry.PointCloud( - interior={"x": x, "y": y, "scale": scale}, - coord_keys=("x", "y", "scale"), - ) - geom = {"interior": interior_geom} - - def init_func(m): - if misc.typename(m) == "Linear": - ppsci.utils.initializer.kaiming_normal_(m.weight, reverse=True) - - model_1 = ppsci.arch.MLP(("x", "y", "scale"), ("u",), 3, 20, "silu") - model_2 = ppsci.arch.MLP(("x", "y", "scale"), ("v",), 3, 20, "silu") - model_3 = ppsci.arch.MLP(("x", "y", "scale"), ("p",), 3, 20, "silu") - model_1.apply(init_func) - model_2.apply(init_func) - model_3.apply(init_func) - - class Transform: - def __init__(self) -> None: - pass - - def output_transform_u(self, in_, out): - x, y, scale = in_["x"], in_["y"], in_["scale"] - r_func = ( - scale - / np.sqrt(2 * np.pi * SIGMA**2) - * paddle.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) - ) - self.h = R_INLET - r_func - u = out["u"] - # The no-slip condition of velocity on the wall - return {"u": u * (self.h**2 - y**2)} - - def output_transform_v(self, in_, out): - y = in_["y"] - v = out["v"] - # The no-slip condition of velocity on the wall - return {"v": (self.h**2 - y**2) * v} - - def output_transform_p(self, in_, out): - x = in_["x"] - p = out["p"] - # The pressure inlet [p_in = 0.1] and outlet [p_out = 0] - return { - "p": ((P_IN - P_OUT) * (X_OUT - x) / L + (X_IN - x) * (X_OUT - x) * p) - } - - transform = Transform() - model_1.register_output_transform(transform.output_transform_u) - model_2.register_output_transform(transform.output_transform_v) - model_3.register_output_transform(transform.output_transform_p) - model = ppsci.arch.ModelList((model_1, model_2, model_3)) - optimizer_1 = ppsci.optimizer.Adam( - cfg.TRAIN.learning_rate, - beta1=cfg.TRAIN.beta1, - beta2=cfg.TRAIN.beta2, - epsilon=cfg.TRAIN.epsilon, - )(model_1) - optimizer_2 = ppsci.optimizer.Adam( - cfg.TRAIN.learning_rate, - beta1=cfg.TRAIN.beta1, - beta2=cfg.TRAIN.beta2, - epsilon=cfg.TRAIN.epsilon, - )(model_2) - optimizer_3 = ppsci.optimizer.Adam( - cfg.TRAIN.learning_rate, - beta1=cfg.TRAIN.beta1, - beta2=cfg.TRAIN.beta2, - epsilon=cfg.TRAIN.epsilon, - )(model_3) - optimizer = ppsci.optimizer.OptimizerList((optimizer_1, optimizer_2, optimizer_3)) - - equation = {"NavierStokes": ppsci.equation.NavierStokes(NU, RHO, 2, False)} - - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom=geom["interior"], - dataloader_cfg={ - "dataset": "NamedArrayDataset", - "num_workers": 1, - "batch_size": cfg.TRAIN.batch_size, - "iters_per_epoch": int(x.shape[0] / cfg.TRAIN.batch_size), - "sampler": { - "name": "BatchSampler", - "shuffle": True, - "drop_last": False, - }, - }, - loss=ppsci.loss.MSELoss("mean"), - evenly=True, - name="EQ", - ) - constraint = {pde_constraint.name: pde_constraint} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - log_freq=cfg.log_freq, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=int(x.shape[0] / cfg.TRAIN.batch_size), - save_freq=cfg.save_freq, - equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - solver.train() - - -def evaluate(cfg: DictConfig): - PLOT_DIR = osp.join(cfg.output_dir, "visu") - os.makedirs(PLOT_DIR, exist_ok=True) - - # Physic properties - P_OUT = 0 # pressure at the outlet of pipe - P_IN = 0.1 # pressure at the inlet of pipe - NU = 1e-3 - - # Geometry - L = 1 - X_IN = 0 - X_OUT = X_IN + L - R_INLET = 0.05 - mu = 0.5 * (X_OUT - X_IN) - SIGMA = 0.1 - - def init_func(m): - if misc.typename(m) == "Linear": - ppsci.utils.initializer.kaiming_normal_(m.weight, reverse=True) - - model_1 = ppsci.arch.MLP(("x", "y", "scale"), ("u",), 3, 20, "silu") - model_2 = ppsci.arch.MLP(("x", "y", "scale"), ("v",), 3, 20, "silu") - model_3 = ppsci.arch.MLP(("x", "y", "scale"), ("p",), 3, 20, "silu") - model_1.apply(init_func) - model_2.apply(init_func) - model_3.apply(init_func) - - class Transform: - def __init__(self) -> None: - pass - - def output_transform_u(self, in_, out): - x, y, scale = in_["x"], in_["y"], in_["scale"] - r_func = ( - scale - / np.sqrt(2 * np.pi * SIGMA**2) - * paddle.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) - ) - self.h = R_INLET - r_func - u = out["u"] - # The no-slip condition of velocity on the wall - return {"u": u * (self.h**2 - y**2)} - - def output_transform_v(self, in_, out): - y = in_["y"] - v = out["v"] - # The no-slip condition of velocity on the wall - return {"v": (self.h**2 - y**2) * v} - - def output_transform_p(self, in_, out): - x = in_["x"] - p = out["p"] - # The pressure inlet [p_in = 0.1] and outlet [p_out = 0] - return { - "p": ((P_IN - P_OUT) * (X_OUT - x) / L + (X_IN - x) * (X_OUT - x) * p) - } - - transform = Transform() - model_1.register_output_transform(transform.output_transform_u) - model_2.register_output_transform(transform.output_transform_v) - model_3.register_output_transform(transform.output_transform_p) - model = ppsci.arch.ModelList((model_1, model_2, model_3)) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - def model_predict( - x: np.ndarray, y: np.ndarray, scale: np.ndarray, solver: ppsci.solver.Solver - ): - xt = paddle.to_tensor(x) - yt = paddle.to_tensor(y) - scalet = paddle.full_like(xt, scale) - input_dict = {"x": xt, "y": yt, "scale": scalet} - output_dict = solver.predict(input_dict, batch_size=100, return_numpy=True) - return output_dict - - scale_test = np.load("./data/aneurysm_scale0005to002_eval0to002mean001_3sigma.npz")[ - "scale" - ] - CASE_SELECTED = [1, 151, 486] - PLOT_X = 0.8 - PLOT_Y = 0.06 - FONTSIZE = 14 - axis_limit = [0, 1, -0.15, 0.15] - path = "./data/cases/" - D_P = 0.1 - error_u = [] - error_v = [] - N_CL = 200 # number of sampling points in centerline (confused about centerline, but the paper did not explain) - x_centerline = np.linspace( - X_IN, X_OUT, N_CL, dtype=paddle.get_default_dtype() - ).reshape(N_CL, 1) - for case_id in CASE_SELECTED: - scale = scale_test[case_id - 1] - data_CFD = np.load(osp.join(path, f"{case_id}CFD_contour.npz")) - x = data_CFD["x"].astype(paddle.get_default_dtype()) - y = data_CFD["y"].astype(paddle.get_default_dtype()) - u_cfd = data_CFD["U"].astype(paddle.get_default_dtype()) - # p_cfd = data_CFD["P"].astype(paddle.get_default_dtype()) # missing data - - n = len(x) - output_dict = model_predict( - x.reshape(n, 1), - y.reshape(n, 1), - np.full((n, 1), scale, dtype=paddle.get_default_dtype()), - solver, - ) - u, v, _ = ( - output_dict["u"], - output_dict["v"], - output_dict["p"], - ) - w = np.zeros_like(u) - u_vec = np.concatenate([u, v, w], axis=1) - error_u.append( - np.linalg.norm(u_vec[:, 0] - u_cfd[:, 0]) / (D_P * len(u_vec[:, 0])) - ) - error_v.append( - np.linalg.norm(u_vec[:, 1] - u_cfd[:, 1]) / (D_P * len(u_vec[:, 0])) - ) - - # Stream-wise velocity component u - plt.figure() - plt.subplot(212) - plt.scatter(x, y, c=u_vec[:, 0], vmin=min(u_cfd[:, 0]), vmax=max(u_cfd[:, 0])) - plt.text(PLOT_X, PLOT_Y, r"DNN", {"color": "b", "fontsize": FONTSIZE}) - plt.axis(axis_limit) - plt.colorbar() - plt.subplot(211) - plt.scatter(x, y, c=u_cfd[:, 0], vmin=min(u_cfd[:, 0]), vmax=max(u_cfd[:, 0])) - plt.colorbar() - plt.text(PLOT_X, PLOT_Y, r"CFD", {"color": "b", "fontsize": FONTSIZE}) - plt.axis(axis_limit) - plt.savefig( - osp.join(PLOT_DIR, f"{case_id}_scale_{scale}_uContour_test.png"), - bbox_inches="tight", - ) - - # Span-wise velocity component v - plt.figure() - plt.subplot(212) - plt.scatter(x, y, c=u_vec[:, 1], vmin=min(u_cfd[:, 1]), vmax=max(u_cfd[:, 1])) - plt.text(PLOT_X, PLOT_Y, r"DNN", {"color": "b", "fontsize": FONTSIZE}) - plt.axis(axis_limit) - plt.colorbar() - plt.subplot(211) - plt.scatter(x, y, c=u_cfd[:, 1], vmin=min(u_cfd[:, 1]), vmax=max(u_cfd[:, 1])) - plt.colorbar() - plt.text(PLOT_X, PLOT_Y, r"CFD", {"color": "b", "fontsize": FONTSIZE}) - plt.axis(axis_limit) - plt.savefig( - osp.join(PLOT_DIR, f"{case_id}_scale_{scale}_vContour_test.png"), - bbox_inches="tight", - ) - plt.close("all") - - # Centerline wall shear profile tau_c (downside) - data_CFD_wss = np.load(osp.join(path, f"{case_id}CFD_wss.npz")) - x_initial = data_CFD_wss["x"] - wall_shear_mag_up = data_CFD_wss["wss"] - - D_H = 0.001 # The span-wise distance is approximately the height of the wall - r_cl = ( - scale - / np.sqrt(2 * np.pi * SIGMA**2) - * np.exp(-((x_centerline - mu) ** 2) / (2 * SIGMA**2)) - ) - y_wall = (-R_INLET + D_H) * np.ones_like(x_centerline) + r_cl - output_dict_wss = model_predict( - x_centerline, - y_wall, - np.full((N_CL, 1), scale, dtype=paddle.get_default_dtype()), - solver, - ) - v_cl_total = np.zeros_like( - x_centerline - ) # assuming normal velocity along the wall is zero - u_cl = output_dict_wss["u"] - v_cl = output_dict_wss["v"] - v_cl_total = np.sqrt(u_cl**2 + v_cl**2) - tau_c = NU * v_cl_total / D_H - plt.figure() - plt.plot( - x_initial, - wall_shear_mag_up, - label="CFD", - color="darkblue", - linestyle="-", - lw=3.0, - alpha=1.0, - ) - plt.plot( - x_initial, - tau_c, - label="DNN", - color="red", - linestyle="--", - dashes=(5, 5), - lw=2.0, - alpha=1.0, - ) - plt.xlabel(r"x", fontsize=16) - plt.ylabel(r"$\tau_{c}$", fontsize=16) - plt.legend(prop={"size": 16}) - plt.savefig( - osp.join(PLOT_DIR, f"{case_id}_nu__{scale}_wallshear_test.png"), - bbox_inches="tight", - ) - plt.close("all") - logger.message( - f"Table 1 : Aneurysm - Geometry error u : {sum(error_u) / len(error_u): .3e}" - ) - logger.message( - f"Table 1 : Aneurysm - Geometry error v : {sum(error_v) / len(error_v): .3e}" - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="aneurysm_flow.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/Jianxun-Wang/LabelFree-DNN-Surrogate +""" + +import math +import os +import os.path as osp + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger +from ppsci.utils import misc + +paddle.framework.core.set_prim_eager_enabled(True) + + +def train(cfg: DictConfig): + # Physic properties + P_OUT = 0 # pressure at the outlet of pipe + P_IN = 0.1 # pressure at the inlet of pipe + NU = 1e-3 + RHO = 1 + + # Geometry + L = 1 + X_IN = 0 + X_OUT = X_IN + L + R_INLET = 0.05 + mu = 0.5 * (X_OUT - X_IN) + x_initial = np.linspace(X_IN, X_OUT, 100, dtype=paddle.get_default_dtype()).reshape( + 100, 1 + ) + x_20_copy = np.tile(x_initial, (20, 1)) # duplicate 20 times of x for dataloader + SIGMA = 0.1 + SCALE_START = -0.02 + SCALE_END = 0 + scale_initial = np.linspace( + SCALE_START, SCALE_END, 50, endpoint=True, dtype=paddle.get_default_dtype() + ).reshape(50, 1) + scale = np.tile(scale_initial, (len(x_20_copy), 1)) + x = np.array([np.tile(val, len(scale_initial)) for val in x_20_copy]).reshape( + len(scale), 1 + ) + + # Axisymmetric boundary + r_func = ( + scale + / math.sqrt(2 * np.pi * SIGMA**2) + * np.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) + ) + + # Visualize stenosis(scale == 0.2) + PLOT_DIR = osp.join(cfg.output_dir, "visu") + os.makedirs(PLOT_DIR, exist_ok=True) + y_up = (R_INLET - r_func) * np.ones_like(x) + y_down = (-R_INLET + r_func) * np.ones_like(x) + idx = np.where(scale == 0) # plot vessel which scale is 0.2 by finding its indices + plt.figure() + plt.scatter(x[idx], y_up[idx]) + plt.scatter(x[idx], y_down[idx]) + plt.axis("equal") + plt.savefig(osp.join(PLOT_DIR, "idealized_stenotic_vessel"), bbox_inches="tight") + + # Points and shuffle(for alignment) + y = np.zeros([len(x), 1], dtype=paddle.get_default_dtype()) + for x0 in x_initial: + index = np.where(x[:, 0] == x0)[0] + # y is linear to scale, so we place linspace to get 1000 x, it corresponds to vessels + y[index] = np.linspace( + -max(y_up[index]), + max(y_up[index]), + len(index), + dtype=paddle.get_default_dtype(), + ).reshape(len(index), -1) + + idx = np.where(scale == 0) # plot vessel which scale is 0.2 by finding its indices + plt.figure() + plt.scatter(x[idx], y[idx]) + plt.axis("equal") + plt.savefig(osp.join(PLOT_DIR, "one_scale_sample"), bbox_inches="tight") + interior_geom = ppsci.geometry.PointCloud( + interior={"x": x, "y": y, "scale": scale}, + coord_keys=("x", "y", "scale"), + ) + geom = {"interior": interior_geom} + + def init_func(m): + if misc.typename(m) == "Linear": + ppsci.utils.initializer.kaiming_normal_(m.weight, reverse=True) + + model_1 = ppsci.arch.MLP(("x", "y", "scale"), ("u",), 3, 20, "silu") + model_2 = ppsci.arch.MLP(("x", "y", "scale"), ("v",), 3, 20, "silu") + model_3 = ppsci.arch.MLP(("x", "y", "scale"), ("p",), 3, 20, "silu") + model_1.apply(init_func) + model_2.apply(init_func) + model_3.apply(init_func) + + class Transform: + def __init__(self) -> None: + pass + + def output_transform_u(self, in_, out): + x, y, scale = in_["x"], in_["y"], in_["scale"] + r_func = ( + scale + / np.sqrt(2 * np.pi * SIGMA**2) + * paddle.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) + ) + self.h = R_INLET - r_func + u = out["u"] + # The no-slip condition of velocity on the wall + return {"u": u * (self.h**2 - y**2)} + + def output_transform_v(self, in_, out): + y = in_["y"] + v = out["v"] + # The no-slip condition of velocity on the wall + return {"v": (self.h**2 - y**2) * v} + + def output_transform_p(self, in_, out): + x = in_["x"] + p = out["p"] + # The pressure inlet [p_in = 0.1] and outlet [p_out = 0] + return { + "p": ((P_IN - P_OUT) * (X_OUT - x) / L + (X_IN - x) * (X_OUT - x) * p) + } + + transform = Transform() + model_1.register_output_transform(transform.output_transform_u) + model_2.register_output_transform(transform.output_transform_v) + model_3.register_output_transform(transform.output_transform_p) + model = ppsci.arch.ModelList((model_1, model_2, model_3)) + optimizer_1 = ppsci.optimizer.Adam( + cfg.TRAIN.learning_rate, + beta1=cfg.TRAIN.beta1, + beta2=cfg.TRAIN.beta2, + epsilon=cfg.TRAIN.epsilon, + )(model_1) + optimizer_2 = ppsci.optimizer.Adam( + cfg.TRAIN.learning_rate, + beta1=cfg.TRAIN.beta1, + beta2=cfg.TRAIN.beta2, + epsilon=cfg.TRAIN.epsilon, + )(model_2) + optimizer_3 = ppsci.optimizer.Adam( + cfg.TRAIN.learning_rate, + beta1=cfg.TRAIN.beta1, + beta2=cfg.TRAIN.beta2, + epsilon=cfg.TRAIN.epsilon, + )(model_3) + optimizer = ppsci.optimizer.OptimizerList((optimizer_1, optimizer_2, optimizer_3)) + + equation = {"NavierStokes": ppsci.equation.NavierStokes(NU, RHO, 2, False)} + + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom=geom["interior"], + dataloader_cfg={ + "dataset": "NamedArrayDataset", + "num_workers": 1, + "batch_size": cfg.TRAIN.batch_size, + "iters_per_epoch": int(x.shape[0] / cfg.TRAIN.batch_size), + "sampler": { + "name": "BatchSampler", + "shuffle": True, + "drop_last": False, + }, + }, + loss=ppsci.loss.MSELoss("mean"), + evenly=True, + name="EQ", + ) + constraint = {pde_constraint.name: pde_constraint} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + log_freq=cfg.log_freq, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=int(x.shape[0] / cfg.TRAIN.batch_size), + save_freq=cfg.save_freq, + equation=equation, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + solver.train() + + +def evaluate(cfg: DictConfig): + PLOT_DIR = osp.join(cfg.output_dir, "visu") + os.makedirs(PLOT_DIR, exist_ok=True) + + # Physic properties + P_OUT = 0 # pressure at the outlet of pipe + P_IN = 0.1 # pressure at the inlet of pipe + NU = 1e-3 + + # Geometry + L = 1 + X_IN = 0 + X_OUT = X_IN + L + R_INLET = 0.05 + mu = 0.5 * (X_OUT - X_IN) + SIGMA = 0.1 + + def init_func(m): + if misc.typename(m) == "Linear": + ppsci.utils.initializer.kaiming_normal_(m.weight, reverse=True) + + model_1 = ppsci.arch.MLP(("x", "y", "scale"), ("u",), 3, 20, "silu") + model_2 = ppsci.arch.MLP(("x", "y", "scale"), ("v",), 3, 20, "silu") + model_3 = ppsci.arch.MLP(("x", "y", "scale"), ("p",), 3, 20, "silu") + model_1.apply(init_func) + model_2.apply(init_func) + model_3.apply(init_func) + + class Transform: + def __init__(self) -> None: + pass + + def output_transform_u(self, in_, out): + x, y, scale = in_["x"], in_["y"], in_["scale"] + r_func = ( + scale + / np.sqrt(2 * np.pi * SIGMA**2) + * paddle.exp(-((x - mu) ** 2) / (2 * SIGMA**2)) + ) + self.h = R_INLET - r_func + u = out["u"] + # The no-slip condition of velocity on the wall + return {"u": u * (self.h**2 - y**2)} + + def output_transform_v(self, in_, out): + y = in_["y"] + v = out["v"] + # The no-slip condition of velocity on the wall + return {"v": (self.h**2 - y**2) * v} + + def output_transform_p(self, in_, out): + x = in_["x"] + p = out["p"] + # The pressure inlet [p_in = 0.1] and outlet [p_out = 0] + return { + "p": ((P_IN - P_OUT) * (X_OUT - x) / L + (X_IN - x) * (X_OUT - x) * p) + } + + transform = Transform() + model_1.register_output_transform(transform.output_transform_u) + model_2.register_output_transform(transform.output_transform_v) + model_3.register_output_transform(transform.output_transform_p) + model = ppsci.arch.ModelList((model_1, model_2, model_3)) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + def model_predict( + x: np.ndarray, y: np.ndarray, scale: np.ndarray, solver: ppsci.solver.Solver + ): + xt = paddle.to_tensor(x) + yt = paddle.to_tensor(y) + scalet = paddle.full_like(xt, scale) + input_dict = {"x": xt, "y": yt, "scale": scalet} + output_dict = solver.predict(input_dict, batch_size=100, return_numpy=True) + return output_dict + + scale_test = np.load("./data/aneurysm_scale0005to002_eval0to002mean001_3sigma.npz")[ + "scale" + ] + CASE_SELECTED = [1, 151, 486] + PLOT_X = 0.8 + PLOT_Y = 0.06 + FONTSIZE = 14 + axis_limit = [0, 1, -0.15, 0.15] + path = "./data/cases/" + D_P = 0.1 + error_u = [] + error_v = [] + N_CL = 200 # number of sampling points in centerline (confused about centerline, but the paper did not explain) + x_centerline = np.linspace( + X_IN, X_OUT, N_CL, dtype=paddle.get_default_dtype() + ).reshape(N_CL, 1) + for case_id in CASE_SELECTED: + scale = scale_test[case_id - 1] + data_CFD = np.load(osp.join(path, f"{case_id}CFD_contour.npz")) + x = data_CFD["x"].astype(paddle.get_default_dtype()) + y = data_CFD["y"].astype(paddle.get_default_dtype()) + u_cfd = data_CFD["U"].astype(paddle.get_default_dtype()) + # p_cfd = data_CFD["P"].astype(paddle.get_default_dtype()) # missing data + + n = len(x) + output_dict = model_predict( + x.reshape(n, 1), + y.reshape(n, 1), + np.full((n, 1), scale, dtype=paddle.get_default_dtype()), + solver, + ) + u, v, _ = ( + output_dict["u"], + output_dict["v"], + output_dict["p"], + ) + w = np.zeros_like(u) + u_vec = np.concatenate([u, v, w], axis=1) + error_u.append( + np.linalg.norm(u_vec[:, 0] - u_cfd[:, 0]) / (D_P * len(u_vec[:, 0])) + ) + error_v.append( + np.linalg.norm(u_vec[:, 1] - u_cfd[:, 1]) / (D_P * len(u_vec[:, 0])) + ) + + # Stream-wise velocity component u + plt.figure() + plt.subplot(212) + plt.scatter(x, y, c=u_vec[:, 0], vmin=min(u_cfd[:, 0]), vmax=max(u_cfd[:, 0])) + plt.text(PLOT_X, PLOT_Y, r"DNN", {"color": "b", "fontsize": FONTSIZE}) + plt.axis(axis_limit) + plt.colorbar() + plt.subplot(211) + plt.scatter(x, y, c=u_cfd[:, 0], vmin=min(u_cfd[:, 0]), vmax=max(u_cfd[:, 0])) + plt.colorbar() + plt.text(PLOT_X, PLOT_Y, r"CFD", {"color": "b", "fontsize": FONTSIZE}) + plt.axis(axis_limit) + plt.savefig( + osp.join(PLOT_DIR, f"{case_id}_scale_{scale}_uContour_test.png"), + bbox_inches="tight", + ) + + # Span-wise velocity component v + plt.figure() + plt.subplot(212) + plt.scatter(x, y, c=u_vec[:, 1], vmin=min(u_cfd[:, 1]), vmax=max(u_cfd[:, 1])) + plt.text(PLOT_X, PLOT_Y, r"DNN", {"color": "b", "fontsize": FONTSIZE}) + plt.axis(axis_limit) + plt.colorbar() + plt.subplot(211) + plt.scatter(x, y, c=u_cfd[:, 1], vmin=min(u_cfd[:, 1]), vmax=max(u_cfd[:, 1])) + plt.colorbar() + plt.text(PLOT_X, PLOT_Y, r"CFD", {"color": "b", "fontsize": FONTSIZE}) + plt.axis(axis_limit) + plt.savefig( + osp.join(PLOT_DIR, f"{case_id}_scale_{scale}_vContour_test.png"), + bbox_inches="tight", + ) + plt.close("all") + + # Centerline wall shear profile tau_c (downside) + data_CFD_wss = np.load(osp.join(path, f"{case_id}CFD_wss.npz")) + x_initial = data_CFD_wss["x"] + wall_shear_mag_up = data_CFD_wss["wss"] + + D_H = 0.001 # The span-wise distance is approximately the height of the wall + r_cl = ( + scale + / np.sqrt(2 * np.pi * SIGMA**2) + * np.exp(-((x_centerline - mu) ** 2) / (2 * SIGMA**2)) + ) + y_wall = (-R_INLET + D_H) * np.ones_like(x_centerline) + r_cl + output_dict_wss = model_predict( + x_centerline, + y_wall, + np.full((N_CL, 1), scale, dtype=paddle.get_default_dtype()), + solver, + ) + v_cl_total = np.zeros_like( + x_centerline + ) # assuming normal velocity along the wall is zero + u_cl = output_dict_wss["u"] + v_cl = output_dict_wss["v"] + v_cl_total = np.sqrt(u_cl**2 + v_cl**2) + tau_c = NU * v_cl_total / D_H + plt.figure() + plt.plot( + x_initial, + wall_shear_mag_up, + label="CFD", + color="darkblue", + linestyle="-", + lw=3.0, + alpha=1.0, + ) + plt.plot( + x_initial, + tau_c, + label="DNN", + color="red", + linestyle="--", + dashes=(5, 5), + lw=2.0, + alpha=1.0, + ) + plt.xlabel(r"x", fontsize=16) + plt.ylabel(r"$\tau_{c}$", fontsize=16) + plt.legend(prop={"size": 16}) + plt.savefig( + osp.join(PLOT_DIR, f"{case_id}_nu__{scale}_wallshear_test.png"), + bbox_inches="tight", + ) + plt.close("all") + logger.message( + f"Table 1 : Aneurysm - Geometry error u : {sum(error_u) / len(error_u): .3e}" + ) + logger.message( + f"Table 1 : Aneurysm - Geometry error v : {sum(error_v) / len(error_v): .3e}" + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="aneurysm_flow.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/aneurysm/conf/aneurysm.yaml b/examples/aneurysm/conf/aneurysm.yaml index adb82e97e2..83408c07e1 100644 --- a/examples/aneurysm/conf/aneurysm.yaml +++ b/examples/aneurysm/conf/aneurysm.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -116,3 +117,123 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 batch_size: 1024 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_aneurysm/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.025 +SCALE: 0.4 +RHO: 1.0 +DIM: 3 + +# set geometry file path +INLET_STL_PATH: "./stl/aneurysm_inlet.stl" +OUTLET_STL_PATH: "./stl/aneurysm_outlet.stl" +NOSLIP_STL_PATH: "./stl/aneurysm_noslip.stl" +INTEGRAL_STL_PATH: "./stl/aneurysm_integral.stl" +INTERIOR_STL_PATH: "./stl/aneurysm_closed.stl" + +# inlet velocity profile +CENTER: [-18.40381048596882, -50.285383353981196, 12.848136936899031] +INLET_NORMAL: [0.8526, -0.428, 0.299] +INLET_CENTER: [-4.24298030045776, 4.082857101816247, -4.637790193399717] +INLET_VEL: 1.5 + +# set evaluate data path +EVAL_CSV_PATH: "./data/aneurysm_parabolicInlet_sol0.csv" + +# model settings +MODEL: + input_keys: ["x", "y", "z"] + output_keys: ["u", "v", "w", "p"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 1500 + iters_per_epoch: 1000 + iters_integral: + igc_outlet: 100 + igc_integral: 100 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.95 + decay_steps: 15000 + by_epoch: false + batch_size: + bc_inlet: 1100 + bc_outlet: 650 + bc_noslip: 5200 + pde: 6000 + igc_outlet: 1 + igc_integral: 1 + integral_batch_size: + igc_outlet: 310 + igc_integral: 310 + weight: + igc_outlet: {"normal_dot_vec": 0.1} + igc_integral: {"normal_dot_vec": 0.1} + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/aneurysm/aneurysm_pretrained.pdparams + export_path: ./inference/aneurysm + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 +>>>>>>> Stashed changes diff --git a/examples/aneurysm/conf/aneurysm_flow.yaml b/examples/aneurysm/conf/aneurysm_flow.yaml index 785bd02851..9237c4ecfb 100644 --- a/examples/aneurysm/conf/aneurysm_flow.yaml +++ b/examples/aneurysm/conf/aneurysm_flow.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -46,3 +47,52 @@ TRAIN: EVAL: pretrained_model_path: null eval_with_no_grad: true +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_aneurysm_flow/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +save_freq: 50 + + +# training settings +TRAIN: + epochs: 400 + learning_rate: 1e-3 + beta1: 0.9 + beta2: 0.99 + epsilon: 1e-15 + batch_size: 50 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true +>>>>>>> Stashed changes diff --git a/examples/biharmonic2d/biharmonic2d.py b/examples/biharmonic2d/biharmonic2d.py index ec599f6cee..d118b9aade 100644 --- a/examples/biharmonic2d/biharmonic2d.py +++ b/examples/biharmonic2d/biharmonic2d.py @@ -1,457 +1,457 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import matplotlib.gridspec as gridspec -import matplotlib.pyplot as plt -import numpy as np -import paddle -import sympy as sp -from mpl_toolkits.axes_grid1 import make_axes_locatable -from omegaconf import DictConfig -from scipy.interpolate import griddata - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -def plotting(figname, output_dir, data, griddata_points, griddata_xi, boundary): - plt.clf() - fig = plt.figure(figname, figsize=(15, 12)) - gs = gridspec.GridSpec(2, 3) - gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) - - for i, key in enumerate(data): - plot_data = griddata( - griddata_points, - data[key].flatten(), - griddata_xi, - method="cubic", - ) - - ax = plt.subplot(gs[i // 3, i % 3]) - h = ax.imshow( - plot_data, - interpolation="nearest", - cmap="jet", - extent=boundary, - origin="lower", - aspect="auto", - ) - divider = make_axes_locatable(ax) - cax = divider.append_axes("right", size="5%", pad=0.05) - fig.colorbar(h, cax=cax) - ax.axis("equal") - ax.set_xlim(0, boundary[1]) - ax.set_ylim(0, boundary[3]) - ax.set_xlabel("$x$") - ax.set_ylabel("$y$") - plt.tick_params(labelsize=12) - ax.set_title(key, fontsize=10) - - plt.savefig(osp.join(output_dir, figname)) - plt.close() - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set models - disp_net = ppsci.arch.MLP(**cfg.MODEL) - - # set optimizer - optimizer_adam = ppsci.optimizer.Adam(**cfg.TRAIN.optimizer.adam)(disp_net) - optimizer_lbfgs = ppsci.optimizer.LBFGS(**cfg.TRAIN.optimizer.lbfgs)(disp_net) - - # set equation - x, y = sp.symbols("x y") - Q = cfg.Q_0 * sp.sin(np.pi * x / cfg.LENGTH) * sp.sin(np.pi * y / cfg.WIDTH) - equation = { - "Biharmonic": ppsci.equation.Biharmonic( - dim=2, q=Q, D=cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) - ), - } - - # set geometry - plate = ppsci.geometry.Rectangle((0, 0), (cfg.LENGTH, cfg.WIDTH)) - geom = {"geo": plate} - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - } - - # set constraint - bc_left = ppsci.constraint.BoundaryConstraint( - {"w": lambda d: d["u"]}, - {"w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: x == 0, - weight_dict={"w": cfg.TRAIN.weight.bc}, - name="BC_LEFT", - ) - bc_right = ppsci.constraint.BoundaryConstraint( - {"w": lambda d: d["u"]}, - {"w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: x == cfg.LENGTH, - weight_dict={"w": cfg.TRAIN.weight.bc}, - name="BC_RIGHT", - ) - bc_up = ppsci.constraint.BoundaryConstraint( - {"w": lambda d: d["u"]}, - {"w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: y == 0, - weight_dict={"w": cfg.TRAIN.weight.bc}, - name="BC_UP", - ) - bc_bottom = ppsci.constraint.BoundaryConstraint( - {"w": lambda d: d["u"]}, - {"w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: y == cfg.WIDTH, - weight_dict={"w": cfg.TRAIN.weight.bc}, - name="BC_BOTTOM", - ) - bc_left_My = ppsci.constraint.BoundaryConstraint( - { - "M_y": lambda d: -( - cfg.NU * hessian(d["u"], d["x"]) + hessian(d["u"], d["y"]) - ) - }, - {"M_y": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: x == 0, - weight_dict={"M_y": cfg.TRAIN.weight.bc}, - name="BC_LEFT_My", - ) - bc_right_My = ppsci.constraint.BoundaryConstraint( - { - "M_y": lambda d: -( - cfg.NU * hessian(d["u"], d["x"]) + hessian(d["u"], d["y"]) - ) - }, - {"M_y": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: x == cfg.LENGTH, - weight_dict={"M_y": cfg.TRAIN.weight.bc}, - name="BC_RIGHT_My", - ) - bc_up_Mx = ppsci.constraint.BoundaryConstraint( - { - "M_x": lambda d: -( - hessian(d["u"], d["x"]) + cfg.NU * hessian(d["u"], d["y"]) - ) - }, - {"M_x": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: y == 0, - weight_dict={"M_x": cfg.TRAIN.weight.bc}, - name="BC_UP_Mx", - ) - bc_bottom_Mx = ppsci.constraint.BoundaryConstraint( - { - "M_x": lambda d: -( - hessian(d["u"], d["x"]) + cfg.NU * hessian(d["u"], d["y"]) - ) - }, - {"M_x": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: y == cfg.WIDTH, - weight_dict={"M_x": cfg.TRAIN.weight.bc}, - name="BC_BOTTOM_Mx", - ) - interior = ppsci.constraint.InteriorConstraint( - equation["Biharmonic"].equations, - {"biharmonic": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, - ppsci.loss.MSELoss(), - criteria=lambda x, y: ((0 < x) & (x < cfg.LENGTH) & (0 < y) & (y < cfg.WIDTH)), - weight_dict={"biharmonic": cfg.TRAIN.weight.interior}, - name="INTERIOR", - ) - # wrap constraints together - constraint = { - bc_left.name: bc_left, - bc_right.name: bc_right, - bc_up.name: bc_up, - bc_bottom.name: bc_bottom, - bc_left_My.name: bc_left_My, - bc_right_My.name: bc_right_My, - bc_up_Mx.name: bc_up_Mx, - bc_bottom_Mx.name: bc_bottom_Mx, - interior.name: interior, - } - - # initialize adam solver - solver_adam = ppsci.solver.Solver( - disp_net, - constraint, - cfg.output_dir, - optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - checkpoint_path=cfg.TRAIN.checkpoint_path, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - # train model - solver_adam.train() - # plot loss - solver_adam.plot_loss_history(by_epoch=True) - # initialize lbfgs solver - solver_lbfgs = ppsci.solver.Solver( - disp_net, - constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - 1, - 1, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - checkpoint_path=cfg.TRAIN.checkpoint_path, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - # evaluate after finished training - solver_lbfgs.train() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set models - disp_net = ppsci.arch.MLP(**cfg.MODEL) - - # load pretrained model - solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.EVAL.pretrained_model_path - ) - - # generate samples - num_x = 201 - num_y = 301 - num_cords = num_x * num_y - logger.info(f"num_cords: {num_cords}") - x_grad, y_grad = np.meshgrid( - np.linspace(start=0, stop=cfg.LENGTH, num=num_x, endpoint=True), - np.linspace(start=0, stop=cfg.WIDTH, num=num_y, endpoint=True), - ) - x_faltten = paddle.to_tensor( - x_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - y_faltten = paddle.to_tensor( - y_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - outs_pred = solver.predict( - {"x": x_faltten, "y": y_faltten}, batch_size=num_cords, no_grad=False - ) - - # generate label - D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) - Q = cfg.Q_0 / ( - (np.pi**4) * D * ((1 / (cfg.LENGTH**2) + 1 / (cfg.WIDTH**2)) ** 2) - ) - outs_label = ( - paddle.to_tensor(Q, dtype=paddle.get_default_dtype()) - * paddle.sin( - paddle.to_tensor(np.pi / cfg.LENGTH, dtype=paddle.get_default_dtype()) - * x_faltten, - ) - * paddle.sin( - paddle.to_tensor(np.pi / cfg.WIDTH, dtype=paddle.get_default_dtype()) - * y_faltten, - ) - ) - - # eval - l2_error = ppsci.metric.L2Rel()(outs_pred, {"u": outs_label})["u"] - logger.info(f"l2_error: {float(l2_error)}") - - # compute other pred outs - def compute_outs(w, x, y): - D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) - w_x2 = hessian(w, x) - w_y2 = hessian(w, y) - w_x_y = jacobian(jacobian(w, x), y) - M_x = -(w_x2 + cfg.NU * w_y2) * D - M_y = -(cfg.NU * w_x2 + w_y2) * D - M_xy = (1 - cfg.NU) * w_x_y * D - Q_x = -jacobian((w_x2 + w_y2), x) * D - Q_y = -jacobian((w_x2 + w_y2), y) * D - return {"Mx": M_x, "Mxy": M_xy, "My": M_y, "Qx": Q_x, "Qy": Q_y, "w": w} - - outs = compute_outs(outs_pred["u"], x_faltten, y_faltten) - - # plotting - griddata_points = paddle.concat([x_faltten, y_faltten], axis=-1).numpy() - griddata_xi = (x_grad, y_grad) - boundary = [0, cfg.LENGTH, 0, cfg.WIDTH] - plotting( - "eval_Mx_Mxy_My_Qx_Qy_w", - cfg.output_dir, - {k: v.numpy() for k, v in outs.items()}, - griddata_points, - griddata_xi, - boundary, - ) - - -def export(cfg: DictConfig): - from paddle import nn - from paddle.static import InputSpec - - # set models - disp_net = ppsci.arch.MLP(**cfg.MODEL) - - # load pretrained model - solver = ppsci.solver.Solver( - model=disp_net, pretrained_model_path=cfg.INFER.pretrained_model_path - ) - - class Wrapped_Model(nn.Layer): - def __init__(self, model): - super().__init__() - self.model = model - - def forward(self, x): - model_out = self.model(x) - outs = self.compute_outs(model_out["u"], x["x"], x["y"]) - return outs - - def compute_outs(self, w, x, y): - D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) - w_x2 = hessian(w, x) - w_y2 = hessian(w, y) - w_x_y = jacobian(jacobian(w, x), y) - M_x = -(w_x2 + cfg.NU * w_y2) * D - M_y = -(cfg.NU * w_x2 + w_y2) * D - M_xy = (1 - cfg.NU) * w_x_y * D - Q_x = -jacobian((w_x2 + w_y2), x) * D - Q_y = -jacobian((w_x2 + w_y2), y) * D - return {"Mx": M_x, "Mxy": M_xy, "My": M_y, "Qx": Q_x, "Qy": Q_y, "w": w} - - solver.model = Wrapped_Model(solver.model) - - # export models - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in disp_net.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - # generate samples - num_x = 201 - num_y = 301 - x_grad, y_grad = np.meshgrid( - np.linspace( - start=0, stop=cfg.LENGTH, num=num_x, endpoint=True, dtype=np.float32 - ), - np.linspace( - start=0, stop=cfg.WIDTH, num=num_y, endpoint=True, dtype=np.float32 - ), - ) - x_faltten = x_grad.reshape(-1, 1) - y_faltten = y_grad.reshape(-1, 1) - - output_dict = predictor.predict( - {"x": x_faltten, "y": y_faltten}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) - } - - # plotting - griddata_points = np.concatenate([x_faltten, y_faltten], axis=-1) - griddata_xi = (x_grad, y_grad) - boundary = [0, cfg.LENGTH, 0, cfg.WIDTH] - plotting( - "eval_Mx_Mxy_My_Qx_Qy_w", - cfg.output_dir, - output_dict, - griddata_points, - griddata_xi, - boundary, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="biharmonic2d.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import matplotlib.gridspec as gridspec +import matplotlib.pyplot as plt +import numpy as np +import paddle +import sympy as sp +from mpl_toolkits.axes_grid1 import make_axes_locatable +from omegaconf import DictConfig +from scipy.interpolate import griddata + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def plotting(figname, output_dir, data, griddata_points, griddata_xi, boundary): + plt.clf() + fig = plt.figure(figname, figsize=(15, 12)) + gs = gridspec.GridSpec(2, 3) + gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) + + for i, key in enumerate(data): + plot_data = griddata( + griddata_points, + data[key].flatten(), + griddata_xi, + method="cubic", + ) + + ax = plt.subplot(gs[i // 3, i % 3]) + h = ax.imshow( + plot_data, + interpolation="nearest", + cmap="jet", + extent=boundary, + origin="lower", + aspect="auto", + ) + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + fig.colorbar(h, cax=cax) + ax.axis("equal") + ax.set_xlim(0, boundary[1]) + ax.set_ylim(0, boundary[3]) + ax.set_xlabel("$x$") + ax.set_ylabel("$y$") + plt.tick_params(labelsize=12) + ax.set_title(key, fontsize=10) + + plt.savefig(osp.join(output_dir, figname)) + plt.close() + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set models + disp_net = ppsci.arch.MLP(**cfg.MODEL) + + # set optimizer + optimizer_adam = ppsci.optimizer.Adam(**cfg.TRAIN.optimizer.adam)(disp_net) + optimizer_lbfgs = ppsci.optimizer.LBFGS(**cfg.TRAIN.optimizer.lbfgs)(disp_net) + + # set equation + x, y = sp.symbols("x y") + Q = cfg.Q_0 * sp.sin(np.pi * x / cfg.LENGTH) * sp.sin(np.pi * y / cfg.WIDTH) + equation = { + "Biharmonic": ppsci.equation.Biharmonic( + dim=2, q=Q, D=cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) + ), + } + + # set geometry + plate = ppsci.geometry.Rectangle((0, 0), (cfg.LENGTH, cfg.WIDTH)) + geom = {"geo": plate} + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + } + + # set constraint + bc_left = ppsci.constraint.BoundaryConstraint( + {"w": lambda d: d["u"]}, + {"w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: x == 0, + weight_dict={"w": cfg.TRAIN.weight.bc}, + name="BC_LEFT", + ) + bc_right = ppsci.constraint.BoundaryConstraint( + {"w": lambda d: d["u"]}, + {"w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: x == cfg.LENGTH, + weight_dict={"w": cfg.TRAIN.weight.bc}, + name="BC_RIGHT", + ) + bc_up = ppsci.constraint.BoundaryConstraint( + {"w": lambda d: d["u"]}, + {"w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: y == 0, + weight_dict={"w": cfg.TRAIN.weight.bc}, + name="BC_UP", + ) + bc_bottom = ppsci.constraint.BoundaryConstraint( + {"w": lambda d: d["u"]}, + {"w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: y == cfg.WIDTH, + weight_dict={"w": cfg.TRAIN.weight.bc}, + name="BC_BOTTOM", + ) + bc_left_My = ppsci.constraint.BoundaryConstraint( + { + "M_y": lambda d: -( + cfg.NU * hessian(d["u"], d["x"]) + hessian(d["u"], d["y"]) + ) + }, + {"M_y": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: x == 0, + weight_dict={"M_y": cfg.TRAIN.weight.bc}, + name="BC_LEFT_My", + ) + bc_right_My = ppsci.constraint.BoundaryConstraint( + { + "M_y": lambda d: -( + cfg.NU * hessian(d["u"], d["x"]) + hessian(d["u"], d["y"]) + ) + }, + {"M_y": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: x == cfg.LENGTH, + weight_dict={"M_y": cfg.TRAIN.weight.bc}, + name="BC_RIGHT_My", + ) + bc_up_Mx = ppsci.constraint.BoundaryConstraint( + { + "M_x": lambda d: -( + hessian(d["u"], d["x"]) + cfg.NU * hessian(d["u"], d["y"]) + ) + }, + {"M_x": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: y == 0, + weight_dict={"M_x": cfg.TRAIN.weight.bc}, + name="BC_UP_Mx", + ) + bc_bottom_Mx = ppsci.constraint.BoundaryConstraint( + { + "M_x": lambda d: -( + hessian(d["u"], d["x"]) + cfg.NU * hessian(d["u"], d["y"]) + ) + }, + {"M_x": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: y == cfg.WIDTH, + weight_dict={"M_x": cfg.TRAIN.weight.bc}, + name="BC_BOTTOM_Mx", + ) + interior = ppsci.constraint.InteriorConstraint( + equation["Biharmonic"].equations, + {"biharmonic": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, + ppsci.loss.MSELoss(), + criteria=lambda x, y: ((0 < x) & (x < cfg.LENGTH) & (0 < y) & (y < cfg.WIDTH)), + weight_dict={"biharmonic": cfg.TRAIN.weight.interior}, + name="INTERIOR", + ) + # wrap constraints together + constraint = { + bc_left.name: bc_left, + bc_right.name: bc_right, + bc_up.name: bc_up, + bc_bottom.name: bc_bottom, + bc_left_My.name: bc_left_My, + bc_right_My.name: bc_right_My, + bc_up_Mx.name: bc_up_Mx, + bc_bottom_Mx.name: bc_bottom_Mx, + interior.name: interior, + } + + # initialize adam solver + solver_adam = ppsci.solver.Solver( + disp_net, + constraint, + cfg.output_dir, + optimizer_adam, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + checkpoint_path=cfg.TRAIN.checkpoint_path, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + # train model + solver_adam.train() + # plot loss + solver_adam.plot_loss_history(by_epoch=True) + # initialize lbfgs solver + solver_lbfgs = ppsci.solver.Solver( + disp_net, + constraint, + cfg.output_dir, + optimizer_lbfgs, + None, + 1, + 1, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + checkpoint_path=cfg.TRAIN.checkpoint_path, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + # evaluate after finished training + solver_lbfgs.train() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set models + disp_net = ppsci.arch.MLP(**cfg.MODEL) + + # load pretrained model + solver = ppsci.solver.Solver( + model=disp_net, pretrained_model_path=cfg.EVAL.pretrained_model_path + ) + + # generate samples + num_x = 201 + num_y = 301 + num_cords = num_x * num_y + logger.info(f"num_cords: {num_cords}") + x_grad, y_grad = np.meshgrid( + np.linspace(start=0, stop=cfg.LENGTH, num=num_x, endpoint=True), + np.linspace(start=0, stop=cfg.WIDTH, num=num_y, endpoint=True), + ) + x_faltten = paddle.to_tensor( + x_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + y_faltten = paddle.to_tensor( + y_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + outs_pred = solver.predict( + {"x": x_faltten, "y": y_faltten}, batch_size=num_cords, no_grad=False + ) + + # generate label + D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) + Q = cfg.Q_0 / ( + (np.pi**4) * D * ((1 / (cfg.LENGTH**2) + 1 / (cfg.WIDTH**2)) ** 2) + ) + outs_label = ( + paddle.to_tensor(Q, dtype=paddle.get_default_dtype()) + * paddle.sin( + paddle.to_tensor(np.pi / cfg.LENGTH, dtype=paddle.get_default_dtype()) + * x_faltten, + ) + * paddle.sin( + paddle.to_tensor(np.pi / cfg.WIDTH, dtype=paddle.get_default_dtype()) + * y_faltten, + ) + ) + + # eval + l2_error = ppsci.metric.L2Rel()(outs_pred, {"u": outs_label})["u"] + logger.info(f"l2_error: {float(l2_error)}") + + # compute other pred outs + def compute_outs(w, x, y): + D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) + w_x2 = hessian(w, x) + w_y2 = hessian(w, y) + w_x_y = jacobian(jacobian(w, x), y) + M_x = -(w_x2 + cfg.NU * w_y2) * D + M_y = -(cfg.NU * w_x2 + w_y2) * D + M_xy = (1 - cfg.NU) * w_x_y * D + Q_x = -jacobian((w_x2 + w_y2), x) * D + Q_y = -jacobian((w_x2 + w_y2), y) * D + return {"Mx": M_x, "Mxy": M_xy, "My": M_y, "Qx": Q_x, "Qy": Q_y, "w": w} + + outs = compute_outs(outs_pred["u"], x_faltten, y_faltten) + + # plotting + griddata_points = paddle.concat([x_faltten, y_faltten], axis=-1).numpy() + griddata_xi = (x_grad, y_grad) + boundary = [0, cfg.LENGTH, 0, cfg.WIDTH] + plotting( + "eval_Mx_Mxy_My_Qx_Qy_w", + cfg.output_dir, + {k: v.numpy() for k, v in outs.items()}, + griddata_points, + griddata_xi, + boundary, + ) + + +def export(cfg: DictConfig): + from paddle import nn + from paddle.static import InputSpec + + # set models + disp_net = ppsci.arch.MLP(**cfg.MODEL) + + # load pretrained model + solver = ppsci.solver.Solver( + model=disp_net, pretrained_model_path=cfg.INFER.pretrained_model_path + ) + + class Wrapped_Model(nn.Layer): + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, x): + model_out = self.model(x) + outs = self.compute_outs(model_out["u"], x["x"], x["y"]) + return outs + + def compute_outs(self, w, x, y): + D = cfg.E * (cfg.HEIGHT**3) / (12.0 * (1.0 - cfg.NU**2)) + w_x2 = hessian(w, x) + w_y2 = hessian(w, y) + w_x_y = jacobian(jacobian(w, x), y) + M_x = -(w_x2 + cfg.NU * w_y2) * D + M_y = -(cfg.NU * w_x2 + w_y2) * D + M_xy = (1 - cfg.NU) * w_x_y * D + Q_x = -jacobian((w_x2 + w_y2), x) * D + Q_y = -jacobian((w_x2 + w_y2), y) * D + return {"Mx": M_x, "Mxy": M_xy, "My": M_y, "Qx": Q_x, "Qy": Q_y, "w": w} + + solver.model = Wrapped_Model(solver.model) + + # export models + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in disp_net.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + # generate samples + num_x = 201 + num_y = 301 + x_grad, y_grad = np.meshgrid( + np.linspace( + start=0, stop=cfg.LENGTH, num=num_x, endpoint=True, dtype=np.float32 + ), + np.linspace( + start=0, stop=cfg.WIDTH, num=num_y, endpoint=True, dtype=np.float32 + ), + ) + x_faltten = x_grad.reshape(-1, 1) + y_faltten = y_grad.reshape(-1, 1) + + output_dict = predictor.predict( + {"x": x_faltten, "y": y_faltten}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) + } + + # plotting + griddata_points = np.concatenate([x_faltten, y_faltten], axis=-1) + griddata_xi = (x_grad, y_grad) + boundary = [0, cfg.LENGTH, 0, cfg.WIDTH] + plotting( + "eval_Mx_Mxy_My_Qx_Qy_w", + cfg.output_dir, + output_dict, + griddata_points, + griddata_xi, + boundary, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="biharmonic2d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/biharmonic2d/conf/biharmonic2d.yaml b/examples/biharmonic2d/conf/biharmonic2d.yaml index e508cf2bd3..fa7411761d 100644 --- a/examples/biharmonic2d/conf/biharmonic2d.yaml +++ b/examples/biharmonic2d/conf/biharmonic2d.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -90,3 +91,98 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 128 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_biharmonic2d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +E: 201880.0e+6 # Pa = N/m2 +NU: 0.25 +Q_0: 980 # Pa = N/m2 +LENGTH: 2 # m +WIDTH: 3 # m +HEIGHT: 0.01 # m + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u",] + num_layers: 5 + hidden_size: 20 + activation: "tanh" + weight_norm: true + +# training settings +TRAIN: + epochs: 1000 + iters_per_epoch: 1 + optimizer: + adam: + learning_rate: 1.0e-3 + lbfgs: + learning_rate: 1.0 + max_iter: 50000 + tolerance_grad: 1.0e-8 + tolerance_change: 0 + batch_size: + bc: 125 + interior: 8000 + weight: + bc: 100 + interior: 1 + save_freq: 100 + eval_during_train: false + eval_freq: 100 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 128 + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/biharmonic2d/biharmonic2d_pretrained.pdparams + export_path: ./inference/biharmonic2d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + output_keys: ["Mx", "Mxy", "My", "Qx", "Qy", "w"] + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/bracket/bracket.py b/examples/bracket/bracket.py index 888373c591..b9224a747f 100644 --- a/examples/bracket/bracket.py +++ b/examples/bracket/bracket.py @@ -1,591 +1,591 @@ -""" -Reference: https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html -STL data files download link: https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar -pretrained model download link: https://paddle-org.bj.bcebos.com/paddlescience/models/bracket/bracket_pretrained.pdparams -""" - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci - - -def train(cfg: DictConfig): - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model = ppsci.arch.ModelList((disp_net, stress_net)) - - # specify parameters - LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) - MU = cfg.E / (2 * (1 + cfg.NU)) - MU_C = 0.01 * MU - LAMBDA_ = LAMBDA_ / MU_C - MU = MU / MU_C - SIGMA_NORMALIZATION = cfg.CHARACTERISTIC_LENGTH / ( - cfg.CHARACTERISTIC_DISPLACEMENT * MU_C - ) - T = -4.0e4 * SIGMA_NORMALIZATION - - # set equation - equation = { - "LinearElasticity": ppsci.equation.LinearElasticity( - lambda_=LAMBDA_, mu=MU, dim=3 - ) - } - - # set geometry - support = ppsci.geometry.Mesh(cfg.SUPPORT_PATH) - bracket = ppsci.geometry.Mesh(cfg.BRACKET_PATH) - aux_lower = ppsci.geometry.Mesh(cfg.AUX_LOWER_PATH) - aux_upper = ppsci.geometry.Mesh(cfg.AUX_UPPER_PATH) - cylinder_hole = ppsci.geometry.Mesh(cfg.CYLINDER_HOLE_PATH) - cylinder_lower = ppsci.geometry.Mesh(cfg.CYLINDER_LOWER_PATH) - cylinder_upper = ppsci.geometry.Mesh(cfg.CYLINDER_UPPER_PATH) - # geometry bool operation - curve_lower = aux_lower - cylinder_lower - curve_upper = aux_upper - cylinder_upper - geo = support + bracket + curve_lower + curve_upper - cylinder_hole - geom = {"geo": geo} - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - SUPPORT_ORIGIN = (-1, -1, -1) - BRACKET_ORIGIN = (-0.75, -1, -0.1) - BRACKET_DIM = (1.75, 2, 0.2) - BOUNDS_SUPPORT_X = (-1, -0.65) - BOUNDS_SUPPORT_Y = (-1, 1) - BOUNDS_SUPPORT_Z = (-1, 1) - BOUNDS_BRACKET_X = (-0.65, 1) - BOUNDS_BRACKET_Y = (-1, 1) - BOUNDS_BRACKET_Z = (-0.1, 0.1) - - bc_back = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": 0, "v": 0, "w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_back}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: x == SUPPORT_ORIGIN[0], - weight_dict=cfg.TRAIN.weight.bc_back, - name="BC_BACK", - ) - bc_front = ppsci.constraint.BoundaryConstraint( - equation["LinearElasticity"].equations, - {"traction_x": 0, "traction_y": 0, "traction_z": T}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_front}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: x == BRACKET_ORIGIN[0] + BRACKET_DIM[0], - name="BC_FRONT", - ) - bc_surface = ppsci.constraint.BoundaryConstraint( - equation["LinearElasticity"].equations, - {"traction_x": 0, "traction_y": 0, "traction_z": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_surface}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: np.logical_and( - x > SUPPORT_ORIGIN[0] + 1e-7, x < BRACKET_ORIGIN[0] + BRACKET_DIM[0] - 1e-7 - ), - name="BC_SURFACE", - ) - support_interior = ppsci.constraint.InteriorConstraint( - equation["LinearElasticity"].equations, - { - "stress_disp_xx": 0, - "stress_disp_yy": 0, - "stress_disp_zz": 0, - "stress_disp_xy": 0, - "stress_disp_xz": 0, - "stress_disp_yz": 0, - "equilibrium_x": 0, - "equilibrium_y": 0, - "equilibrium_z": 0, - }, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.support_interior}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: ( - (BOUNDS_SUPPORT_X[0] < x) - & (x < BOUNDS_SUPPORT_X[1]) - & (BOUNDS_SUPPORT_Y[0] < y) - & (y < BOUNDS_SUPPORT_Y[1]) - & (BOUNDS_SUPPORT_Z[0] < z) - & (z < BOUNDS_SUPPORT_Z[1]) - ), - weight_dict={ - "stress_disp_xx": "sdf", - "stress_disp_yy": "sdf", - "stress_disp_zz": "sdf", - "stress_disp_xy": "sdf", - "stress_disp_xz": "sdf", - "stress_disp_yz": "sdf", - "equilibrium_x": "sdf", - "equilibrium_y": "sdf", - "equilibrium_z": "sdf", - }, - name="SUPPORT_INTERIOR", - ) - bracket_interior = ppsci.constraint.InteriorConstraint( - equation["LinearElasticity"].equations, - { - "stress_disp_xx": 0, - "stress_disp_yy": 0, - "stress_disp_zz": 0, - "stress_disp_xy": 0, - "stress_disp_xz": 0, - "stress_disp_yz": 0, - "equilibrium_x": 0, - "equilibrium_y": 0, - "equilibrium_z": 0, - }, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bracket_interior}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: ( - (BOUNDS_BRACKET_X[0] < x) - & (x < BOUNDS_BRACKET_X[1]) - & (BOUNDS_BRACKET_Y[0] < y) - & (y < BOUNDS_BRACKET_Y[1]) - & (BOUNDS_BRACKET_Z[0] < z) - & (z < BOUNDS_BRACKET_Z[1]) - ), - weight_dict={ - "stress_disp_xx": "sdf", - "stress_disp_yy": "sdf", - "stress_disp_zz": "sdf", - "stress_disp_xy": "sdf", - "stress_disp_xz": "sdf", - "stress_disp_yz": "sdf", - "equilibrium_x": "sdf", - "equilibrium_y": "sdf", - "equilibrium_z": "sdf", - }, - name="BRACKET_INTERIOR", - ) - # wrap constraints together - constraint = { - bc_back.name: bc_back, - bc_front.name: bc_front, - bc_surface.name: bc_surface, - support_interior.name: support_interior, - bracket_interior.name: bracket_interior, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - ref_xyzu = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_X_PATH, - ("x", "y", "z", "u"), - { - "x": "X Location (m)", - "y": "Y Location (m)", - "z": "Z Location (m)", - "u": "Directional Deformation (m)", - }, - "\t", - ) - ref_v = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_Y_PATH, - ("v",), - {"v": "Directional Deformation (m)"}, - "\t", - ) - ref_w = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_Z_PATH, - ("w",), - {"w": "Directional Deformation (m)"}, - "\t", - ) - - ref_sxx = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_X_PATH, - ("sigma_xx",), - {"sigma_xx": "Normal Stress (Pa)"}, - "\t", - ) - ref_syy = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_Y_PATH, - ("sigma_yy",), - {"sigma_yy": "Normal Stress (Pa)"}, - "\t", - ) - ref_szz = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_Z_PATH, - ("sigma_zz",), - {"sigma_zz": "Normal Stress (Pa)"}, - "\t", - ) - - ref_sxy = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_XY_PATH, - ("sigma_xy",), - {"sigma_xy": "Shear Stress (Pa)"}, - "\t", - ) - ref_sxz = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_XZ_PATH, - ("sigma_xz",), - {"sigma_xz": "Shear Stress (Pa)"}, - "\t", - ) - ref_syz = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_YZ_PATH, - ("sigma_yz",), - {"sigma_yz": "Shear Stress (Pa)"}, - "\t", - ) - - input_dict = { - "x": ref_xyzu["x"], - "y": ref_xyzu["y"], - "z": ref_xyzu["z"], - } - label_dict = { - "u": ref_xyzu["u"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "v": ref_v["v"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "w": ref_w["w"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "sigma_xx": ref_sxx["sigma_xx"] * SIGMA_NORMALIZATION, - "sigma_yy": ref_syy["sigma_yy"] * SIGMA_NORMALIZATION, - "sigma_zz": ref_szz["sigma_zz"] * SIGMA_NORMALIZATION, - "sigma_xy": ref_sxy["sigma_xy"] * SIGMA_NORMALIZATION, - "sigma_xz": ref_sxz["sigma_xz"] * SIGMA_NORMALIZATION, - "sigma_yz": ref_syz["sigma_yz"] * SIGMA_NORMALIZATION, - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - metric={"MSE": ppsci.metric.MSE()}, - name="commercial_ref_u_v_w_sigmas", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer(optional) - visualizer = { - "visualize_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( - input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - prefix="result_u_v_w_sigmas", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model = ppsci.arch.ModelList((disp_net, stress_net)) - - # Specify parameters - LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) - MU = cfg.E / (2 * (1 + cfg.NU)) - MU_C = 0.01 * MU - LAMBDA_ = LAMBDA_ / MU_C - MU = MU / MU_C - SIGMA_NORMALIZATION = cfg.CHARACTERISTIC_LENGTH / ( - cfg.CHARACTERISTIC_DISPLACEMENT * MU_C - ) - - # set validator - ref_xyzu = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_X_PATH, - ("x", "y", "z", "u"), - { - "x": "X Location (m)", - "y": "Y Location (m)", - "z": "Z Location (m)", - "u": "Directional Deformation (m)", - }, - "\t", - ) - ref_v = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_Y_PATH, - ("v",), - {"v": "Directional Deformation (m)"}, - "\t", - ) - ref_w = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_Z_PATH, - ("w",), - {"w": "Directional Deformation (m)"}, - "\t", - ) - - ref_sxx = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_X_PATH, - ("sigma_xx",), - {"sigma_xx": "Normal Stress (Pa)"}, - "\t", - ) - ref_syy = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_Y_PATH, - ("sigma_yy",), - {"sigma_yy": "Normal Stress (Pa)"}, - "\t", - ) - ref_szz = ppsci.utils.reader.load_csv_file( - cfg.NORMAL_Z_PATH, - ("sigma_zz",), - {"sigma_zz": "Normal Stress (Pa)"}, - "\t", - ) - - ref_sxy = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_XY_PATH, - ("sigma_xy",), - {"sigma_xy": "Shear Stress (Pa)"}, - "\t", - ) - ref_sxz = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_XZ_PATH, - ("sigma_xz",), - {"sigma_xz": "Shear Stress (Pa)"}, - "\t", - ) - ref_syz = ppsci.utils.reader.load_csv_file( - cfg.SHEAR_YZ_PATH, - ("sigma_yz",), - {"sigma_yz": "Shear Stress (Pa)"}, - "\t", - ) - - input_dict = { - "x": ref_xyzu["x"], - "y": ref_xyzu["y"], - "z": ref_xyzu["z"], - } - label_dict = { - "u": ref_xyzu["u"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "v": ref_v["v"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "w": ref_w["w"] / cfg.CHARACTERISTIC_DISPLACEMENT, - "sigma_xx": ref_sxx["sigma_xx"] * SIGMA_NORMALIZATION, - "sigma_yy": ref_syy["sigma_yy"] * SIGMA_NORMALIZATION, - "sigma_zz": ref_szz["sigma_zz"] * SIGMA_NORMALIZATION, - "sigma_xy": ref_sxy["sigma_xy"] * SIGMA_NORMALIZATION, - "sigma_xz": ref_sxz["sigma_xz"] * SIGMA_NORMALIZATION, - "sigma_yz": ref_syz["sigma_yz"] * SIGMA_NORMALIZATION, - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - metric={"MSE": ppsci.metric.MSE()}, - name="commercial_ref_u_v_w_sigmas", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer(optional) - visualizer = { - "visualize_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( - input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - prefix="result_u_v_w_sigmas", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model = ppsci.arch.ModelList((disp_net, stress_net)) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - ref_xyzu = ppsci.utils.reader.load_csv_file( - cfg.DEFORMATION_X_PATH, - ("x", "y", "z", "u"), - { - "x": "X Location (m)", - "y": "Y Location (m)", - "z": "Z Location (m)", - "u": "Directional Deformation (m)", - }, - "\t", - ) - input_dict = { - "x": ref_xyzu["x"], - "y": ref_xyzu["y"], - "z": ref_xyzu["z"], - } - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_keys = cfg.MODEL.disp_net.output_keys + cfg.MODEL.stress_net.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - - ppsci.visualize.save_vtu_from_dict( - "./bracket_pred", - {**input_dict, **output_dict}, - input_dict.keys(), - output_keys, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="bracket.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +""" +Reference: https://docs.nvidia.com/deeplearning/modulus/modulus-v2209/user_guide/foundational/linear_elasticity.html +STL data files download link: https://paddle-org.bj.bcebos.com/paddlescience/datasets/bracket/bracket_dataset.tar +pretrained model download link: https://paddle-org.bj.bcebos.com/paddlescience/models/bracket/bracket_pretrained.pdparams +""" + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci + + +def train(cfg: DictConfig): + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model = ppsci.arch.ModelList((disp_net, stress_net)) + + # specify parameters + LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) + MU = cfg.E / (2 * (1 + cfg.NU)) + MU_C = 0.01 * MU + LAMBDA_ = LAMBDA_ / MU_C + MU = MU / MU_C + SIGMA_NORMALIZATION = cfg.CHARACTERISTIC_LENGTH / ( + cfg.CHARACTERISTIC_DISPLACEMENT * MU_C + ) + T = -4.0e4 * SIGMA_NORMALIZATION + + # set equation + equation = { + "LinearElasticity": ppsci.equation.LinearElasticity( + lambda_=LAMBDA_, mu=MU, dim=3 + ) + } + + # set geometry + support = ppsci.geometry.Mesh(cfg.SUPPORT_PATH) + bracket = ppsci.geometry.Mesh(cfg.BRACKET_PATH) + aux_lower = ppsci.geometry.Mesh(cfg.AUX_LOWER_PATH) + aux_upper = ppsci.geometry.Mesh(cfg.AUX_UPPER_PATH) + cylinder_hole = ppsci.geometry.Mesh(cfg.CYLINDER_HOLE_PATH) + cylinder_lower = ppsci.geometry.Mesh(cfg.CYLINDER_LOWER_PATH) + cylinder_upper = ppsci.geometry.Mesh(cfg.CYLINDER_UPPER_PATH) + # geometry bool operation + curve_lower = aux_lower - cylinder_lower + curve_upper = aux_upper - cylinder_upper + geo = support + bracket + curve_lower + curve_upper - cylinder_hole + geom = {"geo": geo} + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + SUPPORT_ORIGIN = (-1, -1, -1) + BRACKET_ORIGIN = (-0.75, -1, -0.1) + BRACKET_DIM = (1.75, 2, 0.2) + BOUNDS_SUPPORT_X = (-1, -0.65) + BOUNDS_SUPPORT_Y = (-1, 1) + BOUNDS_SUPPORT_Z = (-1, 1) + BOUNDS_BRACKET_X = (-0.65, 1) + BOUNDS_BRACKET_Y = (-1, 1) + BOUNDS_BRACKET_Z = (-0.1, 0.1) + + bc_back = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": 0, "v": 0, "w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_back}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: x == SUPPORT_ORIGIN[0], + weight_dict=cfg.TRAIN.weight.bc_back, + name="BC_BACK", + ) + bc_front = ppsci.constraint.BoundaryConstraint( + equation["LinearElasticity"].equations, + {"traction_x": 0, "traction_y": 0, "traction_z": T}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_front}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: x == BRACKET_ORIGIN[0] + BRACKET_DIM[0], + name="BC_FRONT", + ) + bc_surface = ppsci.constraint.BoundaryConstraint( + equation["LinearElasticity"].equations, + {"traction_x": 0, "traction_y": 0, "traction_z": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_surface}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: np.logical_and( + x > SUPPORT_ORIGIN[0] + 1e-7, x < BRACKET_ORIGIN[0] + BRACKET_DIM[0] - 1e-7 + ), + name="BC_SURFACE", + ) + support_interior = ppsci.constraint.InteriorConstraint( + equation["LinearElasticity"].equations, + { + "stress_disp_xx": 0, + "stress_disp_yy": 0, + "stress_disp_zz": 0, + "stress_disp_xy": 0, + "stress_disp_xz": 0, + "stress_disp_yz": 0, + "equilibrium_x": 0, + "equilibrium_y": 0, + "equilibrium_z": 0, + }, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.support_interior}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: ( + (BOUNDS_SUPPORT_X[0] < x) + & (x < BOUNDS_SUPPORT_X[1]) + & (BOUNDS_SUPPORT_Y[0] < y) + & (y < BOUNDS_SUPPORT_Y[1]) + & (BOUNDS_SUPPORT_Z[0] < z) + & (z < BOUNDS_SUPPORT_Z[1]) + ), + weight_dict={ + "stress_disp_xx": "sdf", + "stress_disp_yy": "sdf", + "stress_disp_zz": "sdf", + "stress_disp_xy": "sdf", + "stress_disp_xz": "sdf", + "stress_disp_yz": "sdf", + "equilibrium_x": "sdf", + "equilibrium_y": "sdf", + "equilibrium_z": "sdf", + }, + name="SUPPORT_INTERIOR", + ) + bracket_interior = ppsci.constraint.InteriorConstraint( + equation["LinearElasticity"].equations, + { + "stress_disp_xx": 0, + "stress_disp_yy": 0, + "stress_disp_zz": 0, + "stress_disp_xy": 0, + "stress_disp_xz": 0, + "stress_disp_yz": 0, + "equilibrium_x": 0, + "equilibrium_y": 0, + "equilibrium_z": 0, + }, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bracket_interior}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: ( + (BOUNDS_BRACKET_X[0] < x) + & (x < BOUNDS_BRACKET_X[1]) + & (BOUNDS_BRACKET_Y[0] < y) + & (y < BOUNDS_BRACKET_Y[1]) + & (BOUNDS_BRACKET_Z[0] < z) + & (z < BOUNDS_BRACKET_Z[1]) + ), + weight_dict={ + "stress_disp_xx": "sdf", + "stress_disp_yy": "sdf", + "stress_disp_zz": "sdf", + "stress_disp_xy": "sdf", + "stress_disp_xz": "sdf", + "stress_disp_yz": "sdf", + "equilibrium_x": "sdf", + "equilibrium_y": "sdf", + "equilibrium_z": "sdf", + }, + name="BRACKET_INTERIOR", + ) + # wrap constraints together + constraint = { + bc_back.name: bc_back, + bc_front.name: bc_front, + bc_surface.name: bc_surface, + support_interior.name: support_interior, + bracket_interior.name: bracket_interior, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + ref_xyzu = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_X_PATH, + ("x", "y", "z", "u"), + { + "x": "X Location (m)", + "y": "Y Location (m)", + "z": "Z Location (m)", + "u": "Directional Deformation (m)", + }, + "\t", + ) + ref_v = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_Y_PATH, + ("v",), + {"v": "Directional Deformation (m)"}, + "\t", + ) + ref_w = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_Z_PATH, + ("w",), + {"w": "Directional Deformation (m)"}, + "\t", + ) + + ref_sxx = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_X_PATH, + ("sigma_xx",), + {"sigma_xx": "Normal Stress (Pa)"}, + "\t", + ) + ref_syy = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_Y_PATH, + ("sigma_yy",), + {"sigma_yy": "Normal Stress (Pa)"}, + "\t", + ) + ref_szz = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_Z_PATH, + ("sigma_zz",), + {"sigma_zz": "Normal Stress (Pa)"}, + "\t", + ) + + ref_sxy = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_XY_PATH, + ("sigma_xy",), + {"sigma_xy": "Shear Stress (Pa)"}, + "\t", + ) + ref_sxz = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_XZ_PATH, + ("sigma_xz",), + {"sigma_xz": "Shear Stress (Pa)"}, + "\t", + ) + ref_syz = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_YZ_PATH, + ("sigma_yz",), + {"sigma_yz": "Shear Stress (Pa)"}, + "\t", + ) + + input_dict = { + "x": ref_xyzu["x"], + "y": ref_xyzu["y"], + "z": ref_xyzu["z"], + } + label_dict = { + "u": ref_xyzu["u"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "v": ref_v["v"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "w": ref_w["w"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "sigma_xx": ref_sxx["sigma_xx"] * SIGMA_NORMALIZATION, + "sigma_yy": ref_syy["sigma_yy"] * SIGMA_NORMALIZATION, + "sigma_zz": ref_szz["sigma_zz"] * SIGMA_NORMALIZATION, + "sigma_xy": ref_sxy["sigma_xy"] * SIGMA_NORMALIZATION, + "sigma_xz": ref_sxz["sigma_xz"] * SIGMA_NORMALIZATION, + "sigma_yz": ref_syz["sigma_yz"] * SIGMA_NORMALIZATION, + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + metric={"MSE": ppsci.metric.MSE()}, + name="commercial_ref_u_v_w_sigmas", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer(optional) + visualizer = { + "visualize_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( + input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + prefix="result_u_v_w_sigmas", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model = ppsci.arch.ModelList((disp_net, stress_net)) + + # Specify parameters + LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) + MU = cfg.E / (2 * (1 + cfg.NU)) + MU_C = 0.01 * MU + LAMBDA_ = LAMBDA_ / MU_C + MU = MU / MU_C + SIGMA_NORMALIZATION = cfg.CHARACTERISTIC_LENGTH / ( + cfg.CHARACTERISTIC_DISPLACEMENT * MU_C + ) + + # set validator + ref_xyzu = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_X_PATH, + ("x", "y", "z", "u"), + { + "x": "X Location (m)", + "y": "Y Location (m)", + "z": "Z Location (m)", + "u": "Directional Deformation (m)", + }, + "\t", + ) + ref_v = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_Y_PATH, + ("v",), + {"v": "Directional Deformation (m)"}, + "\t", + ) + ref_w = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_Z_PATH, + ("w",), + {"w": "Directional Deformation (m)"}, + "\t", + ) + + ref_sxx = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_X_PATH, + ("sigma_xx",), + {"sigma_xx": "Normal Stress (Pa)"}, + "\t", + ) + ref_syy = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_Y_PATH, + ("sigma_yy",), + {"sigma_yy": "Normal Stress (Pa)"}, + "\t", + ) + ref_szz = ppsci.utils.reader.load_csv_file( + cfg.NORMAL_Z_PATH, + ("sigma_zz",), + {"sigma_zz": "Normal Stress (Pa)"}, + "\t", + ) + + ref_sxy = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_XY_PATH, + ("sigma_xy",), + {"sigma_xy": "Shear Stress (Pa)"}, + "\t", + ) + ref_sxz = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_XZ_PATH, + ("sigma_xz",), + {"sigma_xz": "Shear Stress (Pa)"}, + "\t", + ) + ref_syz = ppsci.utils.reader.load_csv_file( + cfg.SHEAR_YZ_PATH, + ("sigma_yz",), + {"sigma_yz": "Shear Stress (Pa)"}, + "\t", + ) + + input_dict = { + "x": ref_xyzu["x"], + "y": ref_xyzu["y"], + "z": ref_xyzu["z"], + } + label_dict = { + "u": ref_xyzu["u"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "v": ref_v["v"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "w": ref_w["w"] / cfg.CHARACTERISTIC_DISPLACEMENT, + "sigma_xx": ref_sxx["sigma_xx"] * SIGMA_NORMALIZATION, + "sigma_yy": ref_syy["sigma_yy"] * SIGMA_NORMALIZATION, + "sigma_zz": ref_szz["sigma_zz"] * SIGMA_NORMALIZATION, + "sigma_xy": ref_sxy["sigma_xy"] * SIGMA_NORMALIZATION, + "sigma_xz": ref_sxz["sigma_xz"] * SIGMA_NORMALIZATION, + "sigma_yz": ref_syz["sigma_yz"] * SIGMA_NORMALIZATION, + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + metric={"MSE": ppsci.metric.MSE()}, + name="commercial_ref_u_v_w_sigmas", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer(optional) + visualizer = { + "visualize_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( + input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + prefix="result_u_v_w_sigmas", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model = ppsci.arch.ModelList((disp_net, stress_net)) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + ref_xyzu = ppsci.utils.reader.load_csv_file( + cfg.DEFORMATION_X_PATH, + ("x", "y", "z", "u"), + { + "x": "X Location (m)", + "y": "Y Location (m)", + "z": "Z Location (m)", + "u": "Directional Deformation (m)", + }, + "\t", + ) + input_dict = { + "x": ref_xyzu["x"], + "y": ref_xyzu["y"], + "z": ref_xyzu["z"], + } + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_keys = cfg.MODEL.disp_net.output_keys + cfg.MODEL.stress_net.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + + ppsci.visualize.save_vtu_from_dict( + "./bracket_pred", + {**input_dict, **output_dict}, + input_dict.keys(), + output_keys, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="bracket.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/bracket/conf/bracket.yaml b/examples/bracket/conf/bracket.yaml index c3b591db3d..683fc42ffb 100644 --- a/examples/bracket/conf/bracket.yaml +++ b/examples/bracket/conf/bracket.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -120,3 +121,126 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 128 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_bracket/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.3 +E: 100.0e9 +CHARACTERISTIC_LENGTH: 1.0 +CHARACTERISTIC_DISPLACEMENT: 1.0e-4 + +# set geometry file path +SUPPORT_PATH: ./stl/support.stl +BRACKET_PATH: ./stl/bracket.stl +AUX_LOWER_PATH: ./stl/aux_lower.stl +AUX_UPPER_PATH: ./stl/aux_upper.stl +CYLINDER_HOLE_PATH: ./stl/cylinder_hole.stl +CYLINDER_LOWER_PATH: ./stl/cylinder_lower.stl +CYLINDER_UPPER_PATH: ./stl/cylinder_upper.stl + +# set evaluate data path +DEFORMATION_X_PATH: ./data/deformation_x.txt +DEFORMATION_Y_PATH: ./data/deformation_y.txt +DEFORMATION_Z_PATH: ./data/deformation_z.txt +NORMAL_X_PATH: ./data/normal_x.txt +NORMAL_Y_PATH: ./data/normal_y.txt +NORMAL_Z_PATH: ./data/normal_z.txt +SHEAR_XY_PATH: ./data/shear_xy.txt +SHEAR_XZ_PATH: ./data/shear_xz.txt +SHEAR_YZ_PATH: ./data/shear_yz.txt + +# model settings +MODEL: + disp_net: + input_keys: ["x", "y", "z"] + output_keys: ["u", "v", "w"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + stress_net: + input_keys: ["x", "y", "z"] + output_keys: ["sigma_xx", "sigma_yy", "sigma_zz", "sigma_xy", "sigma_xz", "sigma_yz"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 2000 + iters_per_epoch: 1000 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + gamma: 0.95 + decay_steps: 15000 + by_epoch: false + batch_size: + bc_back: 1024 + bc_front: 128 + bc_surface: 4096 + support_interior: 2048 + bracket_interior: 1024 + weight: + bc_back: {"u": 10, "v": 10, "w": 10} + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 128 + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/bracket/bracket_pretrained.pdparams" + export_path: ./inference/bracket + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/brusselator3d/brusselator3d.py b/examples/brusselator3d/brusselator3d.py index 0ee63e55b7..29694ea86b 100644 --- a/examples/brusselator3d/brusselator3d.py +++ b/examples/brusselator3d/brusselator3d.py @@ -1,375 +1,375 @@ -""" -Paper: https://arxiv.org/abs/2303.10528 -Reference: https://github.com/qianyingcao/Laplace-Neural-Operator/tree/main/3D_Brusselator -""" -from os import path as osp -from typing import List -from typing import Literal -from typing import Tuple - -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import reader - - -class DataFuncs: - def __init__(self, orig_r: int, r: int, nt: int, nx: int, ny: int) -> None: - """Functions of data. - - Args: - orig_r (int): Oringinal resolution of data. - r (int): Multiples of downsampling at resolution. - nt (int): The number of values to take on t. - nx (int): The number of values to take on x. - ny (int): The number of values to take on y. - """ - self.orig_r = orig_r - self.r = r - self.nt = nt - self.nx = nx - self.ny = ny - - self.s = int((orig_r - 1) / r + 1) - - x = np.linspace(0, 1, orig_r) - y = np.linspace(0, 1, orig_r) - t = np.linspace(0, 1, nt) - self.tt, self.xx, self.yy = np.meshgrid(t, x, y, indexing="ij") - - def load_data(self, data_path, keys) -> List[np.ndarray]: - raw_data = reader.load_npz_file(data_path, keys) - return [raw_data[key] for key in keys] - - def get_mean_std(self, data: np.ndarray) -> Tuple[float, ...]: - min_ = np.min(data) - max_ = np.max(data) - return (min_ + max_) / 2, (max_ - min_) / 2 - - def encode(self, data, mean, std) -> np.ndarray: - return (data - mean) / std - - def decode(self, data, mean, std) -> np.ndarray: - return data * std + mean - - def gen_grid(self, grid, num) -> np.ndarray: - grid_tile = np.tile(grid, (num, 1, 1, 1)) - grid_subsampling = grid_tile[:, :, :: self.r, :: self.r] - grid_crop = grid_subsampling[:, :, : self.s, : self.s] - grid_reshape = np.reshape(grid_crop, (num, self.nt, self.s, self.s, 1)) - return grid_reshape - - def cat_grid(self, data) -> np.ndarray: - grid_t = self.gen_grid(self.tt, data.shape[0]) - grid_x = self.gen_grid(self.xx, data.shape[0]) - grid_y = self.gen_grid(self.yy, data.shape[0]) - return np.concatenate([data, grid_t, grid_x, grid_y], axis=-1).astype( - data.dtype - ) - - def transform( - self, data: np.ndarray, key: Literal["input", "label"] = "input" - ) -> np.ndarray: - if key == "input": - data_expand = np.expand_dims(data, axis=0) - data_tile = np.tile(data_expand, (self.orig_r, self.orig_r, 1, 1)) - data = np.transpose(data_tile, axes=(2, 3, 0, 1)) - data_subsampling = data[:, :, :: self.r, :: self.r] - data_crop = data_subsampling[:, :, : self.s, : self.s] - data_reshape = np.reshape( - data_crop, (data.shape[0], self.nt, self.s, self.s, 1) - ) - return data_reshape - - def draw_plot(self, save_path, pred, label): - pred = np.mean(pred, axis=(1, 2)) - label = np.mean(label, axis=(1, 2)) - t = np.linspace(0, self.nt, self.nt) - plt.figure(figsize=(8, 6)) - plt.plot(t, pred, label="pred(t)") - plt.plot(t, label, label="label(t)") - plt.xlabel("time steps") - plt.legend() - plt.savefig(save_path) - - -def train(cfg: DictConfig): - # set data functions - data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) - inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( - cfg.DATA_PATH, - ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), - ) - in_train = data_funcs.transform(inputs_train, "input") - label_train = data_funcs.transform(labels_train, "label") - in_val = data_funcs.transform(inputs_val, "input") - label_val = data_funcs.transform(labels_val, "label") - in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) - label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) - - input_constraint = data_funcs.encode(in_train, in_train_mean, in_train_std) - input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) - if not cfg.MODEL.use_grid: - input_constraint = data_funcs.cat_grid(input_constraint) - input_validator = data_funcs.cat_grid(input_validator) - - # set model - T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) - X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : data_funcs.s - ] - Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : data_funcs.s - ] - model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.Step(**cfg.TRAIN.lr_scheduler)() - optimizer = ppsci.optimizer.AdamW( - lr_scheduler, weight_decay=cfg.TRAIN.weight_decay - )(model) - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": input_constraint}, - "label": { - "output": data_funcs.encode( - label_train, label_train_mean, label_train_std - ) - }, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - }, - ppsci.loss.L2RelLoss("sum"), - name="sup_constraint", - ) - - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": input_validator}, - "label": {"output": label_val}, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 1, - }, - ppsci.loss.L2RelLoss("sum"), - { - "output": lambda out: data_funcs.decode( - out["output"], - label_train_mean, - label_train_std, - ) - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="sup_validator", - ) - - # wrap validator together - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - validator=validator, - cfg=cfg, - ) - - # train model - solver.train() - - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set data functions - data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) - inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( - cfg.DATA_PATH, - ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), - ) - in_train = data_funcs.transform(inputs_train, "input") - label_train = data_funcs.transform(labels_train, "label") - in_val = data_funcs.transform(inputs_val, "input") - label_val = data_funcs.transform(labels_val, "label") - in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) - label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) - - input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) - if not cfg.MODEL.use_grid: - input_validator = data_funcs.cat_grid(input_validator) - - # set model - T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) - X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : data_funcs.s - ] - Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : data_funcs.s - ] - model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": input_validator}, - "label": {"output": label_val}, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 1, - }, - ppsci.loss.L2RelLoss("sum"), - { - "output": lambda out: data_funcs.decode( - out["output"], - label_train_mean, - label_train_std, - ) - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="sup_validator", - ) - - # wrap validator together - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - ) - # evaluate - solver.eval() - - # visualize prediction - input_visualize = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) - if not cfg.MODEL.use_grid: - input_visualize = data_funcs.cat_grid(input_visualize) - output_dict = model({"input": paddle.to_tensor(input_visualize)}) - pred = paddle.squeeze( - data_funcs.decode(output_dict["output"], label_train_mean, label_train_std) - ).numpy() - label = np.squeeze(label_val[0]) - - data_funcs.draw_plot(osp.join(cfg.output_dir, "result"), pred, label) - - -def export(cfg: DictConfig): - # set model - T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) - X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : int((cfg.ORIG_R - 1) / cfg.RESOLUTION + 1) - ] - Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ - :, : int((cfg.ORIG_R - 1) / cfg.RESOLUTION + 1) - ] - model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec( - [ - None, - cfg.NUM_T, - cfg.NUM_X // cfg.RESOLUTION, - cfg.NUM_Y // cfg.RESOLUTION, - 1, - ], - "float32", - name=key, - ) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set data functions - data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) - inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( - cfg.DATA_PATH, - ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), - ) - in_train = data_funcs.transform(inputs_train, "input") - label_train = data_funcs.transform(labels_train, "label") - in_val = data_funcs.transform(inputs_val, "input") - label_val = data_funcs.transform(labels_val, "label") - in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) - label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) - input_infer = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) - if not cfg.MODEL.use_grid: - input_infer = data_funcs.cat_grid(input_infer) - - output_dict = predictor.predict( - {"input": input_infer}, - cfg.INFER.batch_size, - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - pred = np.squeeze( - data_funcs.decode(output_dict["output"], label_train_mean, label_train_std) - ) - label = np.squeeze(label_val[0]) - - data_funcs.draw_plot(osp.join(cfg.output_dir, "result"), pred, label) - - -@hydra.main(version_base=None, config_path="./conf", config_name="brusselator3d.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +""" +Paper: https://arxiv.org/abs/2303.10528 +Reference: https://github.com/qianyingcao/Laplace-Neural-Operator/tree/main/3D_Brusselator +""" +from os import path as osp +from typing import List +from typing import Literal +from typing import Tuple + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import reader + + +class DataFuncs: + def __init__(self, orig_r: int, r: int, nt: int, nx: int, ny: int) -> None: + """Functions of data. + + Args: + orig_r (int): Oringinal resolution of data. + r (int): Multiples of downsampling at resolution. + nt (int): The number of values to take on t. + nx (int): The number of values to take on x. + ny (int): The number of values to take on y. + """ + self.orig_r = orig_r + self.r = r + self.nt = nt + self.nx = nx + self.ny = ny + + self.s = int((orig_r - 1) / r + 1) + + x = np.linspace(0, 1, orig_r) + y = np.linspace(0, 1, orig_r) + t = np.linspace(0, 1, nt) + self.tt, self.xx, self.yy = np.meshgrid(t, x, y, indexing="ij") + + def load_data(self, data_path, keys) -> List[np.ndarray]: + raw_data = reader.load_npz_file(data_path, keys) + return [raw_data[key] for key in keys] + + def get_mean_std(self, data: np.ndarray) -> Tuple[float, ...]: + min_ = np.min(data) + max_ = np.max(data) + return (min_ + max_) / 2, (max_ - min_) / 2 + + def encode(self, data, mean, std) -> np.ndarray: + return (data - mean) / std + + def decode(self, data, mean, std) -> np.ndarray: + return data * std + mean + + def gen_grid(self, grid, num) -> np.ndarray: + grid_tile = np.tile(grid, (num, 1, 1, 1)) + grid_subsampling = grid_tile[:, :, :: self.r, :: self.r] + grid_crop = grid_subsampling[:, :, : self.s, : self.s] + grid_reshape = np.reshape(grid_crop, (num, self.nt, self.s, self.s, 1)) + return grid_reshape + + def cat_grid(self, data) -> np.ndarray: + grid_t = self.gen_grid(self.tt, data.shape[0]) + grid_x = self.gen_grid(self.xx, data.shape[0]) + grid_y = self.gen_grid(self.yy, data.shape[0]) + return np.concatenate([data, grid_t, grid_x, grid_y], axis=-1).astype( + data.dtype + ) + + def transform( + self, data: np.ndarray, key: Literal["input", "label"] = "input" + ) -> np.ndarray: + if key == "input": + data_expand = np.expand_dims(data, axis=0) + data_tile = np.tile(data_expand, (self.orig_r, self.orig_r, 1, 1)) + data = np.transpose(data_tile, axes=(2, 3, 0, 1)) + data_subsampling = data[:, :, :: self.r, :: self.r] + data_crop = data_subsampling[:, :, : self.s, : self.s] + data_reshape = np.reshape( + data_crop, (data.shape[0], self.nt, self.s, self.s, 1) + ) + return data_reshape + + def draw_plot(self, save_path, pred, label): + pred = np.mean(pred, axis=(1, 2)) + label = np.mean(label, axis=(1, 2)) + t = np.linspace(0, self.nt, self.nt) + plt.figure(figsize=(8, 6)) + plt.plot(t, pred, label="pred(t)") + plt.plot(t, label, label="label(t)") + plt.xlabel("time steps") + plt.legend() + plt.savefig(save_path) + + +def train(cfg: DictConfig): + # set data functions + data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) + inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( + cfg.DATA_PATH, + ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), + ) + in_train = data_funcs.transform(inputs_train, "input") + label_train = data_funcs.transform(labels_train, "label") + in_val = data_funcs.transform(inputs_val, "input") + label_val = data_funcs.transform(labels_val, "label") + in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) + label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + + input_constraint = data_funcs.encode(in_train, in_train_mean, in_train_std) + input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_constraint = data_funcs.cat_grid(input_constraint) + input_validator = data_funcs.cat_grid(input_validator) + + # set model + T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) + X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : data_funcs.s + ] + Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : data_funcs.s + ] + model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.Step(**cfg.TRAIN.lr_scheduler)() + optimizer = ppsci.optimizer.AdamW( + lr_scheduler, weight_decay=cfg.TRAIN.weight_decay + )(model) + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": input_constraint}, + "label": { + "output": data_funcs.encode( + label_train, label_train_mean, label_train_std + ) + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + }, + ppsci.loss.L2RelLoss("sum"), + name="sup_constraint", + ) + + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": input_validator}, + "label": {"output": label_val}, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 1, + }, + ppsci.loss.L2RelLoss("sum"), + { + "output": lambda out: data_funcs.decode( + out["output"], + label_train_mean, + label_train_std, + ) + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="sup_validator", + ) + + # wrap validator together + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + validator=validator, + cfg=cfg, + ) + + # train model + solver.train() + + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set data functions + data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) + inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( + cfg.DATA_PATH, + ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), + ) + in_train = data_funcs.transform(inputs_train, "input") + label_train = data_funcs.transform(labels_train, "label") + in_val = data_funcs.transform(inputs_val, "input") + label_val = data_funcs.transform(labels_val, "label") + in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) + label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + + input_validator = data_funcs.encode(in_val, in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_validator = data_funcs.cat_grid(input_validator) + + # set model + T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) + X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : data_funcs.s + ] + Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : data_funcs.s + ] + model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": input_validator}, + "label": {"output": label_val}, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 1, + }, + ppsci.loss.L2RelLoss("sum"), + { + "output": lambda out: data_funcs.decode( + out["output"], + label_train_mean, + label_train_std, + ) + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="sup_validator", + ) + + # wrap validator together + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + # evaluate + solver.eval() + + # visualize prediction + input_visualize = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_visualize = data_funcs.cat_grid(input_visualize) + output_dict = model({"input": paddle.to_tensor(input_visualize)}) + pred = paddle.squeeze( + data_funcs.decode(output_dict["output"], label_train_mean, label_train_std) + ).numpy() + label = np.squeeze(label_val[0]) + + data_funcs.draw_plot(osp.join(cfg.output_dir, "result"), pred, label) + + +def export(cfg: DictConfig): + # set model + T = paddle.linspace(start=0, stop=19, num=cfg.NUM_T).reshape([1, cfg.NUM_T]) + X = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : int((cfg.ORIG_R - 1) / cfg.RESOLUTION + 1) + ] + Y = paddle.linspace(start=0, stop=1, num=cfg.ORIG_R).reshape([1, cfg.ORIG_R])[ + :, : int((cfg.ORIG_R - 1) / cfg.RESOLUTION + 1) + ] + model = ppsci.arch.LNO(**cfg.MODEL, T=T, data=(X, Y)) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec( + [ + None, + cfg.NUM_T, + cfg.NUM_X // cfg.RESOLUTION, + cfg.NUM_Y // cfg.RESOLUTION, + 1, + ], + "float32", + name=key, + ) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set data functions + data_funcs = DataFuncs(cfg.ORIG_R, cfg.RESOLUTION, cfg.NUM_T, cfg.NUM_X, cfg.NUM_Y) + inputs_train, labels_train, inputs_val, labels_val = data_funcs.load_data( + cfg.DATA_PATH, + ("inputs_train", "outputs_train", "inputs_test", "outputs_test"), + ) + in_train = data_funcs.transform(inputs_train, "input") + label_train = data_funcs.transform(labels_train, "label") + in_val = data_funcs.transform(inputs_val, "input") + label_val = data_funcs.transform(labels_val, "label") + in_train_mean, in_train_std = data_funcs.get_mean_std(in_train) + label_train_mean, label_train_std = data_funcs.get_mean_std(label_train) + input_infer = data_funcs.encode(in_val[0:1], in_train_mean, in_train_std) + if not cfg.MODEL.use_grid: + input_infer = data_funcs.cat_grid(input_infer) + + output_dict = predictor.predict( + {"input": input_infer}, + cfg.INFER.batch_size, + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + pred = np.squeeze( + data_funcs.decode(output_dict["output"], label_train_mean, label_train_std) + ) + label = np.squeeze(label_val[0]) + + data_funcs.draw_plot(osp.join(cfg.output_dir, "result"), pred, label) + + +@hydra.main(version_base=None, config_path="./conf", config_name="brusselator3d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/brusselator3d/conf/brusselator3d.yaml b/examples/brusselator3d/conf/brusselator3d.yaml index ee625c6cba..2e0173f9f1 100644 --- a/examples/brusselator3d/conf/brusselator3d.yaml +++ b/examples/brusselator3d/conf/brusselator3d.yaml @@ -1,95 +1,95 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_brusselator3d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 2024 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set constant -NUM_T: 39 -NUM_X: 28 -NUM_Y: 28 -ORIG_R: 28 -RESOLUTION: 2 - -# set data path -DATA_PATH: ./data/brusselator3d_dataset.npz - -# model settings -MODEL: - input_keys: ["input"] - output_keys: ["output"] - width: 8 - modes: [4, 4, 4] - in_features: 4 - hidden_features: 64 - activation: "relu" - use_norm: true - use_grid: false - -# training settings -TRAIN: - epochs: 300 - batch_size: 50 - iters_per_epoch: 16 # NUM_TRAIN // TRAIN.batch_size - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 0.005 - gamma: 0.5 - step_size: 100 - by_epoch: true - weight_decay: 1e-4 - save_freq: 20 - eval_freq: 20 - eval_during_train: true - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 200 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/brusselator3d - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - device: gpu - engine: native - precision: fp32 - onnx_path: ${INFER.export_path}.onnx - ir_optim: true - min_subgraph_size: 10 - gpu_mem: 4000 - gpu_id: 0 - max_batch_size: 128 - num_cpu_threads: 4 - batch_size: 128 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_brusselator3d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set constant +NUM_T: 39 +NUM_X: 28 +NUM_Y: 28 +ORIG_R: 28 +RESOLUTION: 2 + +# set data path +DATA_PATH: ./data/brusselator3d_dataset.npz + +# model settings +MODEL: + input_keys: ["input"] + output_keys: ["output"] + width: 8 + modes: [4, 4, 4] + in_features: 4 + hidden_features: 64 + activation: "relu" + use_norm: true + use_grid: false + +# training settings +TRAIN: + epochs: 300 + batch_size: 50 + iters_per_epoch: 16 # NUM_TRAIN // TRAIN.batch_size + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.005 + gamma: 0.5 + step_size: 100 + by_epoch: true + weight_decay: 1e-4 + save_freq: 20 + eval_freq: 20 + eval_during_train: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 200 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/brusselator3d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 128 diff --git a/examples/bubble/bubble.py b/examples/bubble/bubble.py index a6adbd0d97..b9f239af59 100644 --- a/examples/bubble/bubble.py +++ b/examples/bubble/bubble.py @@ -1,521 +1,521 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/hanfengzhai/BubbleNet -Bubble data files download link: https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat -""" - -from os import path as osp - -import hydra -import numpy as np -import paddle -import scipy -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # load Data - data = scipy.io.loadmat(cfg.DATA_PATH) - # normalize data - p_max = data["p"].max(axis=0) - p_min = data["p"].min(axis=0) - p_norm = (data["p"] - p_min) / (p_max - p_min) - u_max = data["u"].max(axis=0) - u_min = data["u"].min(axis=0) - u_norm = (data["u"] - u_min) / (u_max - u_min) - v_max = data["v"].max(axis=0) - v_min = data["v"].min(axis=0) - v_norm = (data["v"] - v_min) / (v_max - v_min) - - u_star = u_norm # N x T - v_star = v_norm # N x T - p_star = p_norm # N x T - phil_star = data["phil"] # N x T - t_star = data["t"] # T x 1 - x_star = data["X"] # N x 2 - - N = x_star.shape[0] - T = t_star.shape[0] - - # rearrange data - xx = np.tile(x_star[:, 0:1], (1, T)) # N x T - yy = np.tile(x_star[:, 1:2], (1, T)) # N x T - tt = np.tile(t_star, (1, N)).T # N x T - - x = xx.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - y = yy.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - t = tt.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - - u = u_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - v = v_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - p = p_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - phil = phil_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - - idx = np.random.choice(N * T, int(N * T * 0.75), replace=False) - # train data - train_input = {"x": x[idx, :], "y": y[idx, :], "t": t[idx, :]} - train_label = {"u": u[idx, :], "v": v[idx, :], "p": p[idx, :], "phil": phil[idx, :]} - - # eval data - test_input = {"x": x, "y": y, "t": t} - test_label = {"u": u, "v": v, "p": p, "phil": phil} - - # set model - model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) - model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) - model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) - - # transform - def transform_out(in_, out): - psi_y = out["psi"] - y = in_["y"] - x = in_["x"] - u = jacobian(psi_y, y) - v = -jacobian(psi_y, x) - return {"u": u, "v": v} - - # register transform - model_psi.register_output_transform(transform_out) - model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) - - # set time-geometry - # set timestamps(including initial t0) - timestamps = np.linspace(0, 126, 127, endpoint=True) - geom = { - "time_rect": ppsci.geometry.PointCloud( - train_input, - ("t", "x", "y"), - ), - "time_rect_visu": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), - ppsci.geometry.Rectangle((0, 0), (15, 5)), - ), - } - - NTIME_ALL = len(timestamps) - NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - { - "pressure_Poisson": lambda out: hessian(out["p"], out["x"]) - + hessian(out["p"], out["y"]) - }, - {"pressure_Poisson": 0}, - geom["time_rect"], - { - "dataset": "IterableNamedArrayDataset", - "batch_size": cfg.TRAIN.batch_size.pde_constraint, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - name="EQ", - ) - - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": train_input, - "label": train_label, - }, - "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - name="Sup", - ) - - # wrap constraints together - constraint = { - sup_constraint.name: sup_constraint, - pde_constraint.name: pde_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_list) - - # set validator - mse_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_input, - "label": test_label, - }, - "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - metric={"MSE": ppsci.metric.MSE()}, - name="bubble_mse", - ) - validator = { - mse_validator.name: mse_validator, - } - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # visualize prediction after finished training - visu_mat = geom["time_rect_visu"].sample_interior( - NPOINT_PDE * NTIME_PDE, evenly=True - ) - # transform - def transform_out(in_, out): - psi_y = out["psi"] - y = in_["y"] - x = in_["x"] - u = jacobian(psi_y, y, create_graph=False) - v = -jacobian(psi_y, x, create_graph=False) - return {"u": u, "v": v} - - model_psi.register_output_transform(transform_out) - - pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True) - # inverse normalization - p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T - u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T - v_pred = pred_norm["v"].reshape([NTIME_PDE, NPOINT_PDE]).T - pred = { - "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), - "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), - "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), - "phil": pred_norm["phil"], - } - logger.message("Now saving visual result to: visual/result.vtu, please wait...") - ppsci.visualize.save_vtu_from_dict( - osp.join(cfg.output_dir, "visual/result.vtu"), - { - "t": visu_mat["t"], - "x": visu_mat["x"], - "y": visu_mat["y"], - "u": pred["u"], - "v": pred["v"], - "p": pred["p"], - "phil": pred["phil"], - }, - ("t", "x", "y"), - ("u", "v", "p", "phil"), - NTIME_PDE, - ) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # load Data - data = scipy.io.loadmat(cfg.DATA_PATH) - # normalize data - p_max = data["p"].max(axis=0) - p_min = data["p"].min(axis=0) - p_norm = (data["p"] - p_min) / (p_max - p_min) - u_max = data["u"].max(axis=0) - u_min = data["u"].min(axis=0) - u_norm = (data["u"] - u_min) / (u_max - u_min) - v_max = data["v"].max(axis=0) - v_min = data["v"].min(axis=0) - v_norm = (data["v"] - v_min) / (v_max - v_min) - - u_star = u_norm # N x T - v_star = v_norm # N x T - p_star = p_norm # N x T - phil_star = data["phil"] # N x T - t_star = data["t"] # T x 1 - x_star = data["X"] # N x 2 - - N = x_star.shape[0] - T = t_star.shape[0] - - # rearrange data - xx = np.tile(x_star[:, 0:1], (1, T)) # N x T - yy = np.tile(x_star[:, 1:2], (1, T)) # N x T - tt = np.tile(t_star, (1, N)).T # N x T - - x = xx.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - y = yy.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - t = tt.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - - u = u_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - v = v_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - p = p_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - phil = phil_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 - - idx = np.random.choice(N * T, int(N * T * 0.75), replace=False) - # train data - train_input = {"x": x[idx, :], "y": y[idx, :], "t": t[idx, :]} - - # eval data - test_input = {"x": x, "y": y, "t": t} - test_label = {"u": u, "v": v, "p": p, "phil": phil} - - # set model - model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) - model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) - model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) - - # transform - def transform_out(in_, out): - psi_y = out["psi"] - y = in_["y"] - x = in_["x"] - u = jacobian(psi_y, y, create_graph=False) - v = -jacobian(psi_y, x, create_graph=False) - return {"u": u, "v": v} - - # register transform - model_psi.register_output_transform(transform_out) - model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) - - # set time-geometry - # set timestamps(including initial t0) - timestamps = np.linspace(0, 126, 127, endpoint=True) - geom = { - "time_rect": ppsci.geometry.PointCloud( - train_input, - ("t", "x", "y"), - ), - "time_rect_visu": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), - ppsci.geometry.Rectangle((0, 0), (15, 5)), - ), - } - - NTIME_ALL = len(timestamps) - NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 - - # set validator - mse_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_input, - "label": test_label, - }, - "batch_size": cfg.TRAIN.batch_size.mse_validator, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - metric={"MSE": ppsci.metric.MSE()}, - name="bubble_mse", - ) - validator = { - mse_validator.name: mse_validator, - } - - # directly evaluate pretrained model(optional) - solver = ppsci.solver.Solver( - model_list, - output_dir=cfg.output_dir, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - # visualize prediction - visu_mat = geom["time_rect_visu"].sample_interior( - NPOINT_PDE * NTIME_PDE, evenly=True - ) - - pred_norm = solver.predict( - visu_mat, None, 4096 * 2, no_grad=False, return_numpy=True - ) - # inverse normalization - p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T - u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T - v_pred = pred_norm["v"].reshape([NTIME_PDE, NPOINT_PDE]).T - pred = { - "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), - "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), - "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), - "phil": pred_norm["phil"], - } - logger.message("Now saving visual result to: visual/result.vtu, please wait...") - ppsci.visualize.save_vtu_from_dict( - osp.join(cfg.output_dir, "visual/result.vtu"), - { - "t": visu_mat["t"], - "x": visu_mat["x"], - "y": visu_mat["y"], - "u": pred["u"], - "v": pred["v"], - "p": pred["p"], - "phil": pred["phil"], - }, - ("t", "x", "y"), - ("u", "v", "p", "phil"), - NTIME_PDE, - ) - - -def export(cfg: DictConfig): - # set model - model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) - model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) - model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) - - # transform - def transform_out(in_, out): - psi_y = out["psi"] - y = in_["y"] - x = in_["x"] - u = jacobian(psi_y, y, create_graph=False) - v = -jacobian(psi_y, x, create_graph=False) - return {"u": u, "v": v} - - # register transform - model_psi.register_output_transform(transform_out) - model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 1], "float32", name=key) - for key in model_list.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - # load Data - data = scipy.io.loadmat(cfg.DATA_PATH) - # normalize data - p_max = data["p"].max(axis=0) - p_min = data["p"].min(axis=0) - u_max = data["u"].max(axis=0) - u_min = data["u"].min(axis=0) - v_max = data["v"].max(axis=0) - v_min = data["v"].min(axis=0) - - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - # set time-geometry - timestamps = np.linspace(0, 126, 127, endpoint=True) - geom = { - "time_rect_visu": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), - ppsci.geometry.Rectangle((0, 0), (15, 5)), - ), - } - NTIME_ALL = len(timestamps) - NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 - input_dict = geom["time_rect_visu"].sample_interior( - NPOINT_PDE * NTIME_PDE, evenly=True - ) - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - # inverse normalization - p_pred = output_dict["p"].reshape([NTIME_PDE, NPOINT_PDE]).T - u_pred = output_dict["u"].reshape([NTIME_PDE, NPOINT_PDE]).T - v_pred = output_dict["v"].reshape([NTIME_PDE, NPOINT_PDE]).T - pred = { - "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), - "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), - "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), - "phil": output_dict["phil"], - } - ppsci.visualize.save_vtu_from_dict( - "./visual/bubble_pred.vtu", - { - "t": input_dict["t"], - "x": input_dict["x"], - "y": input_dict["y"], - "u": pred["u"], - "v": pred["v"], - "p": pred["p"], - "phil": pred["phil"], - }, - ("t", "x", "y"), - ("u", "v", "p", "phil"), - NTIME_PDE, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="bubble.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/hanfengzhai/BubbleNet +Bubble data files download link: https://paddle-org.bj.bcebos.com/paddlescience/datasets/BubbleNet/bubble.mat +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # load Data + data = scipy.io.loadmat(cfg.DATA_PATH) + # normalize data + p_max = data["p"].max(axis=0) + p_min = data["p"].min(axis=0) + p_norm = (data["p"] - p_min) / (p_max - p_min) + u_max = data["u"].max(axis=0) + u_min = data["u"].min(axis=0) + u_norm = (data["u"] - u_min) / (u_max - u_min) + v_max = data["v"].max(axis=0) + v_min = data["v"].min(axis=0) + v_norm = (data["v"] - v_min) / (v_max - v_min) + + u_star = u_norm # N x T + v_star = v_norm # N x T + p_star = p_norm # N x T + phil_star = data["phil"] # N x T + t_star = data["t"] # T x 1 + x_star = data["X"] # N x 2 + + N = x_star.shape[0] + T = t_star.shape[0] + + # rearrange data + xx = np.tile(x_star[:, 0:1], (1, T)) # N x T + yy = np.tile(x_star[:, 1:2], (1, T)) # N x T + tt = np.tile(t_star, (1, N)).T # N x T + + x = xx.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + y = yy.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + t = tt.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + + u = u_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + v = v_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + p = p_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + phil = phil_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + + idx = np.random.choice(N * T, int(N * T * 0.75), replace=False) + # train data + train_input = {"x": x[idx, :], "y": y[idx, :], "t": t[idx, :]} + train_label = {"u": u[idx, :], "v": v[idx, :], "p": p[idx, :], "phil": phil[idx, :]} + + # eval data + test_input = {"x": x, "y": y, "t": t} + test_label = {"u": u, "v": v, "p": p, "phil": phil} + + # set model + model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) + model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) + model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) + + # transform + def transform_out(in_, out): + psi_y = out["psi"] + y = in_["y"] + x = in_["x"] + u = jacobian(psi_y, y) + v = -jacobian(psi_y, x) + return {"u": u, "v": v} + + # register transform + model_psi.register_output_transform(transform_out) + model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) + + # set time-geometry + # set timestamps(including initial t0) + timestamps = np.linspace(0, 126, 127, endpoint=True) + geom = { + "time_rect": ppsci.geometry.PointCloud( + train_input, + ("t", "x", "y"), + ), + "time_rect_visu": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), + ppsci.geometry.Rectangle((0, 0), (15, 5)), + ), + } + + NTIME_ALL = len(timestamps) + NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + { + "pressure_Poisson": lambda out: hessian(out["p"], out["x"]) + + hessian(out["p"], out["y"]) + }, + {"pressure_Poisson": 0}, + geom["time_rect"], + { + "dataset": "IterableNamedArrayDataset", + "batch_size": cfg.TRAIN.batch_size.pde_constraint, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + name="EQ", + ) + + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": train_input, + "label": train_label, + }, + "batch_size": cfg.TRAIN.batch_size.sup_constraint, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + name="Sup", + ) + + # wrap constraints together + constraint = { + sup_constraint.name: sup_constraint, + pde_constraint.name: pde_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_list) + + # set validator + mse_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_input, + "label": test_label, + }, + "batch_size": cfg.TRAIN.batch_size.mse_validator, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + metric={"MSE": ppsci.metric.MSE()}, + name="bubble_mse", + ) + validator = { + mse_validator.name: mse_validator, + } + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # visualize prediction after finished training + visu_mat = geom["time_rect_visu"].sample_interior( + NPOINT_PDE * NTIME_PDE, evenly=True + ) + # transform + def transform_out(in_, out): + psi_y = out["psi"] + y = in_["y"] + x = in_["x"] + u = jacobian(psi_y, y, create_graph=False) + v = -jacobian(psi_y, x, create_graph=False) + return {"u": u, "v": v} + + model_psi.register_output_transform(transform_out) + + pred_norm = solver.predict(visu_mat, None, 4096, no_grad=False, return_numpy=True) + # inverse normalization + p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T + u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T + v_pred = pred_norm["v"].reshape([NTIME_PDE, NPOINT_PDE]).T + pred = { + "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), + "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), + "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), + "phil": pred_norm["phil"], + } + logger.message("Now saving visual result to: visual/result.vtu, please wait...") + ppsci.visualize.save_vtu_from_dict( + osp.join(cfg.output_dir, "visual/result.vtu"), + { + "t": visu_mat["t"], + "x": visu_mat["x"], + "y": visu_mat["y"], + "u": pred["u"], + "v": pred["v"], + "p": pred["p"], + "phil": pred["phil"], + }, + ("t", "x", "y"), + ("u", "v", "p", "phil"), + NTIME_PDE, + ) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # load Data + data = scipy.io.loadmat(cfg.DATA_PATH) + # normalize data + p_max = data["p"].max(axis=0) + p_min = data["p"].min(axis=0) + p_norm = (data["p"] - p_min) / (p_max - p_min) + u_max = data["u"].max(axis=0) + u_min = data["u"].min(axis=0) + u_norm = (data["u"] - u_min) / (u_max - u_min) + v_max = data["v"].max(axis=0) + v_min = data["v"].min(axis=0) + v_norm = (data["v"] - v_min) / (v_max - v_min) + + u_star = u_norm # N x T + v_star = v_norm # N x T + p_star = p_norm # N x T + phil_star = data["phil"] # N x T + t_star = data["t"] # T x 1 + x_star = data["X"] # N x 2 + + N = x_star.shape[0] + T = t_star.shape[0] + + # rearrange data + xx = np.tile(x_star[:, 0:1], (1, T)) # N x T + yy = np.tile(x_star[:, 1:2], (1, T)) # N x T + tt = np.tile(t_star, (1, N)).T # N x T + + x = xx.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + y = yy.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + t = tt.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + + u = u_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + v = v_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + p = p_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + phil = phil_star.flatten()[:, None].astype(paddle.get_default_dtype()) # NT x 1 + + idx = np.random.choice(N * T, int(N * T * 0.75), replace=False) + # train data + train_input = {"x": x[idx, :], "y": y[idx, :], "t": t[idx, :]} + + # eval data + test_input = {"x": x, "y": y, "t": t} + test_label = {"u": u, "v": v, "p": p, "phil": phil} + + # set model + model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) + model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) + model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) + + # transform + def transform_out(in_, out): + psi_y = out["psi"] + y = in_["y"] + x = in_["x"] + u = jacobian(psi_y, y, create_graph=False) + v = -jacobian(psi_y, x, create_graph=False) + return {"u": u, "v": v} + + # register transform + model_psi.register_output_transform(transform_out) + model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) + + # set time-geometry + # set timestamps(including initial t0) + timestamps = np.linspace(0, 126, 127, endpoint=True) + geom = { + "time_rect": ppsci.geometry.PointCloud( + train_input, + ("t", "x", "y"), + ), + "time_rect_visu": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), + ppsci.geometry.Rectangle((0, 0), (15, 5)), + ), + } + + NTIME_ALL = len(timestamps) + NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 + + # set validator + mse_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_input, + "label": test_label, + }, + "batch_size": cfg.TRAIN.batch_size.mse_validator, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + metric={"MSE": ppsci.metric.MSE()}, + name="bubble_mse", + ) + validator = { + mse_validator.name: mse_validator, + } + + # directly evaluate pretrained model(optional) + solver = ppsci.solver.Solver( + model_list, + output_dir=cfg.output_dir, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + # visualize prediction + visu_mat = geom["time_rect_visu"].sample_interior( + NPOINT_PDE * NTIME_PDE, evenly=True + ) + + pred_norm = solver.predict( + visu_mat, None, 4096 * 2, no_grad=False, return_numpy=True + ) + # inverse normalization + p_pred = pred_norm["p"].reshape([NTIME_PDE, NPOINT_PDE]).T + u_pred = pred_norm["u"].reshape([NTIME_PDE, NPOINT_PDE]).T + v_pred = pred_norm["v"].reshape([NTIME_PDE, NPOINT_PDE]).T + pred = { + "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), + "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), + "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), + "phil": pred_norm["phil"], + } + logger.message("Now saving visual result to: visual/result.vtu, please wait...") + ppsci.visualize.save_vtu_from_dict( + osp.join(cfg.output_dir, "visual/result.vtu"), + { + "t": visu_mat["t"], + "x": visu_mat["x"], + "y": visu_mat["y"], + "u": pred["u"], + "v": pred["v"], + "p": pred["p"], + "phil": pred["phil"], + }, + ("t", "x", "y"), + ("u", "v", "p", "phil"), + NTIME_PDE, + ) + + +def export(cfg: DictConfig): + # set model + model_psi = ppsci.arch.MLP(**cfg.MODEL.psi_net) + model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) + model_phil = ppsci.arch.MLP(**cfg.MODEL.phil_net) + + # transform + def transform_out(in_, out): + psi_y = out["psi"] + y = in_["y"] + x = in_["x"] + u = jacobian(psi_y, y, create_graph=False) + v = -jacobian(psi_y, x, create_graph=False) + return {"u": u, "v": v} + + # register transform + model_psi.register_output_transform(transform_out) + model_list = ppsci.arch.ModelList((model_psi, model_p, model_phil)) + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 1], "float32", name=key) + for key in model_list.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + # load Data + data = scipy.io.loadmat(cfg.DATA_PATH) + # normalize data + p_max = data["p"].max(axis=0) + p_min = data["p"].min(axis=0) + u_max = data["u"].max(axis=0) + u_min = data["u"].min(axis=0) + v_max = data["v"].max(axis=0) + v_min = data["v"].min(axis=0) + + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + # set time-geometry + timestamps = np.linspace(0, 126, 127, endpoint=True) + geom = { + "time_rect_visu": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(1, 126, timestamps=timestamps), + ppsci.geometry.Rectangle((0, 0), (15, 5)), + ), + } + NTIME_ALL = len(timestamps) + NPOINT_PDE, NTIME_PDE = 300 * 100, NTIME_ALL - 1 + input_dict = geom["time_rect_visu"].sample_interior( + NPOINT_PDE * NTIME_PDE, evenly=True + ) + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + # inverse normalization + p_pred = output_dict["p"].reshape([NTIME_PDE, NPOINT_PDE]).T + u_pred = output_dict["u"].reshape([NTIME_PDE, NPOINT_PDE]).T + v_pred = output_dict["v"].reshape([NTIME_PDE, NPOINT_PDE]).T + pred = { + "p": (p_pred * (p_max - p_min) + p_min).T.reshape([-1, 1]), + "u": (u_pred * (u_max - u_min) + u_min).T.reshape([-1, 1]), + "v": (v_pred * (v_max - v_min) + v_min).T.reshape([-1, 1]), + "phil": output_dict["phil"], + } + ppsci.visualize.save_vtu_from_dict( + "./visual/bubble_pred.vtu", + { + "t": input_dict["t"], + "x": input_dict["x"], + "y": input_dict["y"], + "u": pred["u"], + "v": pred["v"], + "p": pred["p"], + "phil": pred["phil"], + }, + ("t", "x", "y"), + ("u", "v", "p", "phil"), + NTIME_PDE, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="bubble.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/bubble/conf/bubble.yaml b/examples/bubble/conf/bubble.yaml index 74fe9caf96..6b9a04ef81 100644 --- a/examples/bubble/conf/bubble.yaml +++ b/examples/bubble/conf/bubble.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -85,3 +86,91 @@ INFER: max_batch_size: 8192 num_cpu_threads: 10 batch_size: 8192 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_bubble/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +DATA_PATH: bubble.mat + +# model settings +MODEL: + psi_net: + input_keys: ["t", "x", "y"] + output_keys: ["psi"] + num_layers: 9 + hidden_size: 30 + activation: "tanh" + p_net: + input_keys: ["t", "x", "y"] + output_keys: ["p"] + num_layers: 9 + hidden_size: 30 + activation: "tanh" + phil_net: + input_keys: ["t", "x", "y"] + output_keys: ["phil"] + num_layers: 9 + hidden_size: 30 + activation: "tanh" + output_keys: ["u", "v", "p", "phil"] + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + batch_size: + pde_constraint: 228595 + sup_constraint: 2419 + mse_validator: 2419 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/bubble/bubble_pretrained.pdparams + export_path: ./inference/bubble + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 8192 + num_cpu_threads: 10 + batch_size: 8192 +>>>>>>> Stashed changes diff --git a/examples/catheter/catheter.py b/examples/catheter/catheter.py index 2282a8623a..fa4da80062 100644 --- a/examples/catheter/catheter.py +++ b/examples/catheter/catheter.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -300,3 +301,308 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os import path as osp + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import L2RelLoss +from ppsci.optimizer import Adam +from ppsci.optimizer import lr_scheduler +from ppsci.utils import logger + + +# build data +def getdata( + x_path, + y_path, + para_path, + output_path, + n_data, + n, + s, + is_train=True, + is_inference=False, +): + # load data + inputX_raw = np.load(x_path)[:, 0:n_data] + inputY_raw = np.load(y_path)[:, 0:n_data] + inputPara_raw = np.load(para_path)[:, 0:n_data] + output_raw = np.load(output_path)[:, 0:n_data] + + # preprocess data + inputX = inputX_raw[:, 0::3] + inputY = inputY_raw[:, 0::3] + inputPara = inputPara_raw[:, 0::3] + label = (output_raw[:, 0::3] + output_raw[:, 1::3] + output_raw[:, 2::3]) / 3.0 + + if is_inference: + inputX = np.transpose(inputX, (1, 0)) + inputY = np.transpose(inputY, (1, 0)) + input = np.stack(arrays=[inputX, inputY], axis=-1).astype(np.float32) + input = input.reshape(n, s, 2) + return input + + inputX = paddle.to_tensor(data=inputX, dtype="float32").transpose(perm=[1, 0]) + inputY = paddle.to_tensor(data=inputY, dtype="float32").transpose(perm=[1, 0]) + input = paddle.stack(x=[inputX, inputY], axis=-1) + label = paddle.to_tensor(data=label, dtype="float32").transpose(perm=[1, 0]) + if is_train: + index = paddle.randperm(n=n) + index = index[:n] + input = paddle.index_select(input, index) + label = paddle.index_select(label, index) + input = input.reshape([n, s, 2]) + else: + input = input.reshape([n, s, 2]) + label = label.unsqueeze(axis=-1) + return input, label, inputPara + + +def plot(input: np.ndarray, out_pred: np.ndarray, output_dir: str): + os.makedirs(output_dir, exist_ok=True) + fig_path = osp.join(output_dir, "inference.png") + + xx = np.linspace(-500, 0, 2001) + fig = plt.figure(figsize=(5, 4)) + plt.plot(input[:, 0], input[:, 1], color="C1", label="Channel geometry") + plt.plot(input[:, 0], 100 - input[:, 1], color="C1") + plt.plot( + xx, + out_pred, + "--*", + color="C2", + fillstyle="none", + markevery=len(xx) // 10, + label="Predicted bacteria distribution", + ) + plt.xlabel(r"x") + plt.legend() + plt.tight_layout() + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + ppsci.utils.logger.info(f"Saving figure to {fig_path}") + + +def train(cfg: DictConfig): + # generate training dataset + inputs_train, labels_train, _ = getdata(**cfg.TRAIN_DATA, is_train=True) + + # set constraints + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": inputs_train}, + "label": {"output": labels_train}, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + L2RelLoss(reduction="sum"), + name="sup_constraint", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set model + model = ppsci.arch.FNO1d(**cfg.MODEL) + if cfg.TRAIN.use_pretrained_model is True: + logger.info( + "Loading pretrained model from {}".format(cfg.TRAIN.pretrained_model_path) + ) + model.set_state_dict(paddle.load(cfg.TRAIN.pretrained_model_path)) + + # set optimizer + ITERS_PER_EPOCH = int(cfg.TRAIN_DATA.n / cfg.TRAIN.batch_size) + scheduler = lr_scheduler.Step( + **cfg.TRAIN.lr_scheduler, iters_per_epoch=ITERS_PER_EPOCH + ) + optimizer = Adam(scheduler(), weight_decay=cfg.TRAIN.weight_decay)(model) + + # generate test dataset + inputs_test, labels_test, _ = getdata(**cfg.TEST_DATA, is_train=False) + + # set validator + l2rel_validator = { + "validator1": ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": inputs_test}, + "label": {"output": labels_test}, + }, + "batch_size": cfg.TRAIN.batch_size, + }, + L2RelLoss(reduction="sum"), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Validator", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + eval_with_no_grad=True, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=l2rel_validator, + save_freq=cfg.TRAIN.save_freq, + ) + + # train model + solver.train() + # plot losses + solver.plot_loss_history(by_epoch=True, smooth_step=1) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.FNO1d(**cfg.MODEL) + ppsci.utils.save_load.load_pretrain( + model, + cfg.EVAL.pretrained_model_path, + ) + + # set data + x_test, y_test, para = getdata(**cfg.TEST_DATA, is_train=False) + y_test = y_test.numpy() + + for sample_id in [0, 8]: + sample, uf, L_p, x1, x2, x3, h = para[:, sample_id] + mesh = x_test[sample_id, :, :] + mesh = mesh.numpy() + + y_test_pred = ( + paddle.exp( + model({"input": x_test[sample_id : sample_id + 1, :, :]})["output"] + ) + .numpy() + .flatten() + ) + logger.info( + "rel. error is ", + np.linalg.norm(y_test_pred - y_test[sample_id, :].flatten()) + / np.linalg.norm(y_test[sample_id, :].flatten()), + ) + xx = np.linspace(-500, 0, 2001) + plt.figure(figsize=(5, 4)) + + plt.plot(mesh[:, 0], mesh[:, 1], color="C1", label="Channel geometry") + plt.plot(mesh[:, 0], 100 - mesh[:, 1], color="C1") + + plt.plot( + xx, + y_test[sample_id, :], + "--o", + color="red", + markevery=len(xx) // 10, + label="Reference", + ) + plt.plot( + xx, + y_test_pred, + "--*", + color="C2", + fillstyle="none", + markevery=len(xx) // 10, + label="Predicted bacteria distribution", + ) + + plt.xlabel(r"x") + + plt.legend() + plt.tight_layout() + plt.savefig(f"Validation.{sample_id}.pdf") + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.FNO1d(**cfg.MODEL) + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 2001, 2], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy import python_infer + + predictor = python_infer.GeneralPredictor(cfg) + + # evaluate + input = getdata(**cfg.TEST_DATA, is_train=False, is_inference=True) + input_dict = {"input": input} + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_keys = ["output"] + output_dict = { + store_key: paddle.exp(paddle.to_tensor(output_dict[infer_key])) + .numpy() + .flatten() + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + + mesh = input_dict["input"][5, :, :] + yy = output_dict["output"][5] + plot(mesh, yy, cfg.output_dir) + + +@hydra.main(version_base=None, config_path="./conf", config_name="catheter.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/catheter/conf/catheter.yaml b/examples/catheter/conf/catheter.yaml index 40b732b81c..2cbfa7b61c 100644 --- a/examples/catheter/conf/catheter.yaml +++ b/examples/catheter/conf/catheter.yaml @@ -1,93 +1,93 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_geofno/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 -data_path: ./data - -TRAIN_DATA: - x_path: "${data_path}/training/x_1d_structured_mesh.npy" - y_path: "${data_path}/training/y_1d_structured_mesh.npy" - para_path: "${data_path}/training/data_info.npy" - output_path: "${data_path}/training/density_1d_data.npy" - n_data: 3000 - n: 1000 - s: 2001 - -TEST_DATA: - x_path: "${data_path}/test/x_1d_structured_mesh.npy" - y_path: "${data_path}/test/y_1d_structured_mesh.npy" - para_path: "${data_path}/test/data_info.npy" - output_path: "${data_path}/test/density_1d_data.npy" - n_data: 300 - n: 100 - s: 2001 - -# model settings -MODEL: - modes: 64 - width: 64 - padding: 100 - input_channel: 2 - output_np: 2001 - -# training settings -TRAIN: - use_pretrained_model: False - pretrained_model_path: null - lr_scheduler: - epochs: 1001 - learning_rate: 0.001 - step_size: 100 - gamma: 0.5 - epochs: 1001 - weight_decay: 0.0001 - eval_during_train: true - batch_size: 20 - save_freq: 100 - -# evaluation settings -EVAL: - pretrained_model_path: null - -# inference settings -INFER: - pretrained_model_path: https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams - export_path: ./inference/catheter - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 20 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_geofno/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +data_path: ./data + +TRAIN_DATA: + x_path: "${data_path}/training/x_1d_structured_mesh.npy" + y_path: "${data_path}/training/y_1d_structured_mesh.npy" + para_path: "${data_path}/training/data_info.npy" + output_path: "${data_path}/training/density_1d_data.npy" + n_data: 3000 + n: 1000 + s: 2001 + +TEST_DATA: + x_path: "${data_path}/test/x_1d_structured_mesh.npy" + y_path: "${data_path}/test/y_1d_structured_mesh.npy" + para_path: "${data_path}/test/data_info.npy" + output_path: "${data_path}/test/density_1d_data.npy" + n_data: 300 + n: 100 + s: 2001 + +# model settings +MODEL: + modes: 64 + width: 64 + padding: 100 + input_channel: 2 + output_np: 2001 + +# training settings +TRAIN: + use_pretrained_model: False + pretrained_model_path: null + lr_scheduler: + epochs: 1001 + learning_rate: 0.001 + step_size: 100 + gamma: 0.5 + epochs: 1001 + weight_decay: 0.0001 + eval_during_train: true + batch_size: 20 + save_freq: 100 + +# evaluation settings +EVAL: + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: https://dataset.bj.bcebos.com/PaddleScience/2024%20AI-aided%20geometric%20design%20of%20anti-infection%20catheters/result_GeoFNO.pdparams + export_path: ./inference/catheter + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 20 diff --git a/examples/cfdgcn/cfdgcn.py b/examples/cfdgcn/cfdgcn.py index d06d9c0c92..9025035b84 100644 --- a/examples/cfdgcn/cfdgcn.py +++ b/examples/cfdgcn/cfdgcn.py @@ -1,265 +1,265 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from typing import Dict -from typing import List - -import hydra -import paddle -import pgl -import su2paddle -import utils -from omegaconf import DictConfig -from paddle.nn import functional as F - -import ppsci -from ppsci.utils import logger - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "pgl.Graph"], - *args, -) -> paddle.Tensor: - return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} - - -def eval_rmse_func( - output_dict: Dict[str, List["paddle.Tensor"]], - label_dict: Dict[str, List["pgl.Graph"]], - *args, -) -> Dict[str, paddle.Tensor]: - mse_losses = [ - F.mse_loss(pred, label.y) - for (pred, label) in zip(output_dict["pred"], label_dict["label"]) - ] - return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.TRAIN_DATA_DIR, - "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, - "transpose_edges": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - output_expr={"pred": lambda out: out["pred"]}, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - process_sim = sup_constraint.data_loader.dataset._preprocess - fine_marker_dict = sup_constraint.data_loader.dataset.marker_dict - - # set model - model = ppsci.arch.CFDGCN( - **cfg.MODEL, - process_sim=process_sim, - fine_marker_dict=fine_marker_dict, - su2_module=su2paddle.SU2Module, - ) - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - "transpose_edges": True, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - - # train model - solver.train() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "cylinder", - ) - - -def evaluate(cfg: DictConfig): - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.TRAIN_DATA_DIR, - "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, - "transpose_edges": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - output_expr={"pred": lambda out: out["pred"]}, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - name="Sup", - ) - - process_sim = sup_constraint.data_loader.dataset._preprocess - fine_marker_dict = sup_constraint.data_loader.dataset.marker_dict - - # set airfoil model - model = ppsci.arch.CFDGCN( - **cfg.MODEL, - process_sim=process_sim, - fine_marker_dict=fine_marker_dict, - su2_module=su2paddle.SU2Module, - ) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "MeshAirfoilDataset", - "input_keys": ("input",), - "label_keys": ("label",), - "data_dir": cfg.EVAL_DATA_DIR, - "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, - "transpose_edges": True, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - rmse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_mse_func), - output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, - metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, - name="RMSE_validator", - ) - validator = {rmse_validator.name: rmse_validator} - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate model - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(rmse_validator.data_loader): - truefield = label["label"].y - prefield = model(input_) - utils.log_images( - input_["input"].pos, - prefield["pred"], - truefield, - rmse_validator.data_loader.dataset.elems_list, - index, - "cylinder", - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="cfdgcn.yaml") -def main(cfg: DictConfig): - su2paddle.activate_su2_mpi(remove_temp_files=True) - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Dict +from typing import List + +import hydra +import paddle +import pgl +import su2paddle +import utils +from omegaconf import DictConfig +from paddle.nn import functional as F + +import ppsci +from ppsci.utils import logger + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "pgl.Graph"], + *args, +) -> paddle.Tensor: + return {"pred": F.mse_loss(output_dict["pred"], label_dict["label"].y)} + + +def eval_rmse_func( + output_dict: Dict[str, List["paddle.Tensor"]], + label_dict: Dict[str, List["pgl.Graph"]], + *args, +) -> Dict[str, paddle.Tensor]: + mse_losses = [ + F.mse_loss(pred, label.y) + for (pred, label) in zip(output_dict["pred"], label_dict["label"]) + ] + return {"RMSE": (sum(mse_losses) / len(mse_losses)) ** 0.5} + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.TRAIN_DATA_DIR, + "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, + "transpose_edges": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + output_expr={"pred": lambda out: out["pred"]}, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + process_sim = sup_constraint.data_loader.dataset._preprocess + fine_marker_dict = sup_constraint.data_loader.dataset.marker_dict + + # set model + model = ppsci.arch.CFDGCN( + **cfg.MODEL, + process_sim=process_sim, + fine_marker_dict=fine_marker_dict, + su2_module=su2paddle.SU2Module, + ) + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + "transpose_edges": True, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + + # train model + solver.train() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "cylinder", + ) + + +def evaluate(cfg: DictConfig): + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.TRAIN_DATA_DIR, + "mesh_graph_path": cfg.TRAIN_MESH_GRAPH_PATH, + "transpose_edges": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + output_expr={"pred": lambda out: out["pred"]}, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + name="Sup", + ) + + process_sim = sup_constraint.data_loader.dataset._preprocess + fine_marker_dict = sup_constraint.data_loader.dataset.marker_dict + + # set airfoil model + model = ppsci.arch.CFDGCN( + **cfg.MODEL, + process_sim=process_sim, + fine_marker_dict=fine_marker_dict, + su2_module=su2paddle.SU2Module, + ) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "MeshAirfoilDataset", + "input_keys": ("input",), + "label_keys": ("label",), + "data_dir": cfg.EVAL_DATA_DIR, + "mesh_graph_path": cfg.EVAL_MESH_GRAPH_PATH, + "transpose_edges": True, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + rmse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_mse_func), + output_expr={"pred": lambda out: out["pred"].unsqueeze(0)}, + metric={"RMSE": ppsci.metric.FunctionalMetric(eval_rmse_func)}, + name="RMSE_validator", + ) + validator = {rmse_validator.name: rmse_validator} + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate model + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(rmse_validator.data_loader): + truefield = label["label"].y + prefield = model(input_) + utils.log_images( + input_["input"].pos, + prefield["pred"], + truefield, + rmse_validator.data_loader.dataset.elems_list, + index, + "cylinder", + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="cfdgcn.yaml") +def main(cfg: DictConfig): + su2paddle.activate_su2_mpi(remove_temp_files=True) + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/cfdgcn/cleanup.sh b/examples/cfdgcn/cleanup.sh index d33d90fa27..c8532aed3b 100755 --- a/examples/cfdgcn/cleanup.sh +++ b/examples/cfdgcn/cleanup.sh @@ -1,13 +1,13 @@ -rm -rf temp_meshes -rm -rf temp_data - -rm adjoint.dat -rm flow.dat -rm history.dat -rm forces_breakdown.dat -rm surface_adjoint.dat -rm surface_flow.dat - -rm surface_adjoint.csv -rm surface_flow.csv -rm *_write_config_temp.cfg +rm -rf temp_meshes +rm -rf temp_data + +rm adjoint.dat +rm flow.dat +rm history.dat +rm forces_breakdown.dat +rm surface_adjoint.dat +rm surface_flow.dat + +rm surface_adjoint.csv +rm surface_flow.csv +rm *_write_config_temp.cfg diff --git a/examples/cfdgcn/coarse.cfg b/examples/cfdgcn/coarse.cfg index d73d67ff97..16d51fcb02 100644 --- a/examples/cfdgcn/coarse.cfg +++ b/examples/cfdgcn/coarse.cfg @@ -1,357 +1,357 @@ -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% % -% SU2 configuration file % -% Case description: Transonic inviscid flow around a NACA0012 airfoil % -% Author: Thomas D. Economon % -% Institution: Stanford University % -% Date: 2014.06.11 % -% File Version 6.2.0 "Falcon" % -% % -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% -% -% Physical governing equations (EULER, NAVIER_STOKES, -% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, -% POISSON_EQUATION) -PHYSICAL_PROBLEM= EULER -% -% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) -MATH_PROBLEM= DIRECT -% -% Restart solution (NO, YES) -RESTART_SOL= NO - -% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% -% -% Mach number (non-dimensional, based on the free-stream values) -MACH_NUMBER= 0.8 -% -% Angle of attack (degrees) -AOA= 1.25 -% -% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) -FREESTREAM_PRESSURE= 101325.0 -% -% Free-stream temperature (273.15 K by default) -FREESTREAM_TEMPERATURE= 273.15 - -% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% -% -% Ratio of specific heats (1.4 (air), only for compressible flows) -GAMMA_VALUE= 1.4 -% -% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) -GAS_CONSTANT= 287.87 - -% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% -% -% Reference origin for moment computation -REF_ORIGIN_MOMENT_X = 0.25 -REF_ORIGIN_MOMENT_Y = 0.00 -REF_ORIGIN_MOMENT_Z = 0.00 -% -% Reference length for pitching, rolling, and yawing non-dimensional moment -REF_LENGTH= 1.0 -% -% Reference area for force coefficients (0 implies automatic calculation) -REF_AREA= 1.0 -% -% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, -% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) -REF_DIMENSIONALIZATION= DIMENSIONAL - -% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% -% -% Marker of the Euler boundary (NONE = no marker) -MARKER_EULER= ( airfoil ) -% -% Marker of the far field (NONE = no marker) -MARKER_FAR= ( farfield ) - -% ------------------------ SURFACES IDENTIFICATION ----------------------------% -% -% Marker(s) of the surface in the surface flow solution file -MARKER_PLOTTING = ( airfoil ) -% -% Marker(s) of the surface where the non-dimensional coefficients are evaluated. -MARKER_MONITORING = ( airfoil ) -% -% Marker(s) of the surface where obj. func. (design problem) will be evaluated -MARKER_DESIGNING = ( airfoil ) - -% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% -% -% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) -NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES -% -% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, -% MOMENT_Y, MOMENT_Z, EFFICIENCY, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% FORCE_X, FORCE_Y, FORCE_Z, THRUST, -% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, -% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, -% INVERSE_DESIGN_HEATFLUX) -% OBJECTIVE_FUNCTION= DRAG -% -% Courant-Friedrichs-Lewy condition of the finest grid -%CFL_NUMBER= 4.0 -CFL_NUMBER= 1.0 -%CFL_NUMBER=0.1 -% -% Number of total iterations -EXT_ITER=200 -ITER= 200 - -% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% -% -% Linear solver for implicit formulations (BCGSTAB, FGMRES) -LINEAR_SOLVER= FGMRES -% -% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) -LINEAR_SOLVER_PREC= LU_SGS -% -% Minimum error of the linear solver for implicit formulations -LINEAR_SOLVER_ERROR= 1E-6 -% -% Max number of iterations of the linear solver for the implicit formulation -LINEAR_SOLVER_ITER= 5 - -% -------------------------- MULTIGRID PARAMETERS -----------------------------% -% -% Multi-Grid Levels (0 = no multi-grid) -MGLEVEL= 2 -% -% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) -MGCYCLE= W_CYCLE -% -% Multi-Grid PreSmoothing Level -MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) -% -% Multi-Grid PostSmoothing Level -MG_POST_SMOOTH= ( 0, 0, 0, 0 ) -% -% Jacobi implicit smoothing of the correction -MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) -% -% Damping factor for the residual restriction -MG_DAMP_RESTRICTION= 1.0 -% -% Damping factor for the correction prolongation -MG_DAMP_PROLONGATION= 1.0 - -% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, -% TURKEL_PREC, MSW) -CONV_NUM_METHOD_FLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_FLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, -% BARTH_JESPERSEN, VAN_ALBADA_EDGE) -SLOPE_LIMITER_FLOW= VENKATAKRISHNAN -% -% 2nd and 4th order artificial dissipation coefficients -JST_SENSOR_COEFF= ( 0.5, 0.02 ) -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) -TIME_DISCRE_FLOW= EULER_IMPLICIT - -% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, ROE) -CONV_NUM_METHOD_ADJFLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_ADJFLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, -% SHARP_EDGES, WALL_DISTANCE) -SLOPE_LIMITER_ADJFLOW= NONE -% -% Reduction factor of the CFL coefficient in the adjoint problem -CFL_REDUCTION_ADJFLOW= 0.5 -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) -TIME_DISCRE_ADJFLOW= EULER_IMPLICIT - -% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% -% -% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, -% FFD_SETTING, FFD_NACELLE -% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST -% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, -% HICKS_HENNE, SURFACE_BUMP) -DV_KIND= HICKS_HENNE -% -% Marker of the surface in which we are going apply the shape deformation -DV_MARKER= ( airfoil ) -% -% Parameters of the shape deformation -% - NO_DEFORMATION ( 1.0 ) -% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector -% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - SCALE ( 1.0 ) -% - ANGLE_OF_ATTACK ( 1.0 ) -% - FFD_SETTING ( 1.0 ) -% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) -% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) -% - FFD_GULL ( FFD_BoxTag, j_Ind ) -% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) -% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) -% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) -% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) -% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) -% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) -% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) -DV_PARAM= ( 1, 0.5 ) -% -% Value of the shape deformation -DV_VALUE= 0.01 - -% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% -% -% Number of smoothing iterations for FEA mesh deformation -DEFORM_LINEAR_ITER= 500 -% -% Number of nonlinear deformation iterations (surface deformation increments) -DEFORM_NONLINEAR_ITER= 1 -% -% Minimum residual criteria for the linear solver convergence of grid deformation -DEFORM_LINEAR_SOLVER_ERROR= 1E-14 -% -% Print the residuals during mesh deformation to the console (YES, NO) -DEFORM_CONSOLE_OUTPUT= YES -% -% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, -% WALL_DISTANCE, CONSTANT_STIFFNESS) -DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME -% -% Visualize the surface deformation (NO, YES) -VISUALIZE_SURFACE_DEF= NO -% -% Visualize the volume deformation (NO, YES) -VISUALIZE_VOLUME_DEF= NO - -% --------------------------- CONVERGENCE PARAMETERS --------------------------% -% Convergence criteria (CAUCHY, RESIDUAL) -% -CONV_CRITERIA= RESIDUAL -% -% Residual reduction (order of magnitude with respect to the initial value) -RESIDUAL_REDUCTION= 6 -% -% Min value of the residual (log10 of the residual) -RESIDUAL_MINVAL= -8 -% -% Start Cauchy criteria at iteration number -STARTCONV_ITER= 10 -% -% Number of elements to apply the criteria -CAUCHY_ELEMS= 100 -% -% Epsilon to control the series convergence -CAUCHY_EPS= 1E-6 -% -% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, -% DELTA_LIFT, DELTA_DRAG) -CAUCHY_FUNC_FLOW= DRAG - -% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% -% Mesh input file -%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 -MESH_FILENAME=passed_as_flag_to_train.py.su2 -% -% Mesh input file format (SU2, CGNS, NETCDF_ASCII) -MESH_FORMAT= SU2 -% -% Mesh output file -MESH_OUT_FILENAME= mesh_out.su2 -% -% Restart flow input file -SOLUTION_FLOW_FILENAME= solution_flow.dat -% -% Restart adjoint input file -SOLUTION_ADJ_FILENAME= solution_adj.dat -% -% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) -%OUTPUT_FORMAT= TECPLOT_BINARY -% -% Output file convergence history (w/o extension) -CONV_FILENAME= history -% -% Output file restart flow -RESTART_FLOW_FILENAME= restart_flow.dat -% -% Output file restart adjoint -RESTART_ADJ_FILENAME= restart_adj.dat -% -% Output file flow (w/o extension) variables -VOLUME_FLOW_FILENAME= flow -% -% Output file adjoint (w/o extension) variables -VOLUME_ADJ_FILENAME= adjoint -% -% Output Objective function gradient (using continuous adjoint) -GRAD_OBJFUNC_FILENAME= of_grad.dat -% -% Output file surface flow coefficient (w/o extension) -SURFACE_FLOW_FILENAME= surface_flow -% -% Output file surface adjoint coefficient (w/o extension) -SURFACE_ADJ_FILENAME= surface_adjoint -% -% Writing solution file frequency -WRT_SOL_FREQ= 1000 -% -% Writing convergence history frequency -WRT_CON_FREQ= 1000 - -% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% -% Available flow based objective functions or constraint functions -% DRAG, LIFT, SIDEFORCE, EFFICIENCY, -% FORCE_X, FORCE_Y, FORCE_Z, -% MOMENT_X, MOMENT_Y, MOMENT_Z, -% THRUST, TORQUE, FIGURE_OF_MERIT, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, -% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, -% -% Available geometrical based objective functions or constraint functions -% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, -% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL -% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, -% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) -% -% Available design variables -% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) -% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) -% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) -% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) -% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) -% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% -% Optimization objective function with scaling factor -% ex= Objective * Scale -% OPT_OBJECTIVE= DRAG * 0.001 -% -% Optimization constraint functions with scaling factors, separated by semicolons -% ex= (Objective = Value ) * Scale, use '>','<','=' -% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 -% -% Optimization design variables, separated by semicolons -% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) - - - - -DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH -DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% SU2 configuration file % +% Case description: Transonic inviscid flow around a NACA0012 airfoil % +% Author: Thomas D. Economon % +% Institution: Stanford University % +% Date: 2014.06.11 % +% File Version 6.2.0 "Falcon" % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% +% +% Physical governing equations (EULER, NAVIER_STOKES, +% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, +% POISSON_EQUATION) +PHYSICAL_PROBLEM= EULER +% +% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) +MATH_PROBLEM= DIRECT +% +% Restart solution (NO, YES) +RESTART_SOL= NO + +% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% +% +% Mach number (non-dimensional, based on the free-stream values) +MACH_NUMBER= 0.8 +% +% Angle of attack (degrees) +AOA= 1.25 +% +% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) +FREESTREAM_PRESSURE= 101325.0 +% +% Free-stream temperature (273.15 K by default) +FREESTREAM_TEMPERATURE= 273.15 + +% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% +% +% Ratio of specific heats (1.4 (air), only for compressible flows) +GAMMA_VALUE= 1.4 +% +% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) +GAS_CONSTANT= 287.87 + +% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% +% +% Reference origin for moment computation +REF_ORIGIN_MOMENT_X = 0.25 +REF_ORIGIN_MOMENT_Y = 0.00 +REF_ORIGIN_MOMENT_Z = 0.00 +% +% Reference length for pitching, rolling, and yawing non-dimensional moment +REF_LENGTH= 1.0 +% +% Reference area for force coefficients (0 implies automatic calculation) +REF_AREA= 1.0 +% +% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, +% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) +REF_DIMENSIONALIZATION= DIMENSIONAL + +% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% +% +% Marker of the Euler boundary (NONE = no marker) +MARKER_EULER= ( airfoil ) +% +% Marker of the far field (NONE = no marker) +MARKER_FAR= ( farfield ) + +% ------------------------ SURFACES IDENTIFICATION ----------------------------% +% +% Marker(s) of the surface in the surface flow solution file +MARKER_PLOTTING = ( airfoil ) +% +% Marker(s) of the surface where the non-dimensional coefficients are evaluated. +MARKER_MONITORING = ( airfoil ) +% +% Marker(s) of the surface where obj. func. (design problem) will be evaluated +MARKER_DESIGNING = ( airfoil ) + +% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% +% +% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) +NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES +% +% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, +% MOMENT_Y, MOMENT_Z, EFFICIENCY, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% FORCE_X, FORCE_Y, FORCE_Z, THRUST, +% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, +% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, +% INVERSE_DESIGN_HEATFLUX) +% OBJECTIVE_FUNCTION= DRAG +% +% Courant-Friedrichs-Lewy condition of the finest grid +%CFL_NUMBER= 4.0 +CFL_NUMBER= 1.0 +%CFL_NUMBER=0.1 +% +% Number of total iterations +EXT_ITER=200 +ITER= 200 + +% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% +% +% Linear solver for implicit formulations (BCGSTAB, FGMRES) +LINEAR_SOLVER= FGMRES +% +% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) +LINEAR_SOLVER_PREC= LU_SGS +% +% Minimum error of the linear solver for implicit formulations +LINEAR_SOLVER_ERROR= 1E-6 +% +% Max number of iterations of the linear solver for the implicit formulation +LINEAR_SOLVER_ITER= 5 + +% -------------------------- MULTIGRID PARAMETERS -----------------------------% +% +% Multi-Grid Levels (0 = no multi-grid) +MGLEVEL= 2 +% +% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) +MGCYCLE= W_CYCLE +% +% Multi-Grid PreSmoothing Level +MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) +% +% Multi-Grid PostSmoothing Level +MG_POST_SMOOTH= ( 0, 0, 0, 0 ) +% +% Jacobi implicit smoothing of the correction +MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) +% +% Damping factor for the residual restriction +MG_DAMP_RESTRICTION= 1.0 +% +% Damping factor for the correction prolongation +MG_DAMP_PROLONGATION= 1.0 + +% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, +% TURKEL_PREC, MSW) +CONV_NUM_METHOD_FLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_FLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, +% BARTH_JESPERSEN, VAN_ALBADA_EDGE) +SLOPE_LIMITER_FLOW= VENKATAKRISHNAN +% +% 2nd and 4th order artificial dissipation coefficients +JST_SENSOR_COEFF= ( 0.5, 0.02 ) +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) +TIME_DISCRE_FLOW= EULER_IMPLICIT + +% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, ROE) +CONV_NUM_METHOD_ADJFLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_ADJFLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, +% SHARP_EDGES, WALL_DISTANCE) +SLOPE_LIMITER_ADJFLOW= NONE +% +% Reduction factor of the CFL coefficient in the adjoint problem +CFL_REDUCTION_ADJFLOW= 0.5 +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) +TIME_DISCRE_ADJFLOW= EULER_IMPLICIT + +% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% +% +% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, +% FFD_SETTING, FFD_NACELLE +% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST +% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, +% HICKS_HENNE, SURFACE_BUMP) +DV_KIND= HICKS_HENNE +% +% Marker of the surface in which we are going apply the shape deformation +DV_MARKER= ( airfoil ) +% +% Parameters of the shape deformation +% - NO_DEFORMATION ( 1.0 ) +% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector +% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - SCALE ( 1.0 ) +% - ANGLE_OF_ATTACK ( 1.0 ) +% - FFD_SETTING ( 1.0 ) +% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) +% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) +% - FFD_GULL ( FFD_BoxTag, j_Ind ) +% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) +% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) +% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) +% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) +% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) +% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) +% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) +DV_PARAM= ( 1, 0.5 ) +% +% Value of the shape deformation +DV_VALUE= 0.01 + +% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% +% +% Number of smoothing iterations for FEA mesh deformation +DEFORM_LINEAR_ITER= 500 +% +% Number of nonlinear deformation iterations (surface deformation increments) +DEFORM_NONLINEAR_ITER= 1 +% +% Minimum residual criteria for the linear solver convergence of grid deformation +DEFORM_LINEAR_SOLVER_ERROR= 1E-14 +% +% Print the residuals during mesh deformation to the console (YES, NO) +DEFORM_CONSOLE_OUTPUT= YES +% +% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, +% WALL_DISTANCE, CONSTANT_STIFFNESS) +DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME +% +% Visualize the surface deformation (NO, YES) +VISUALIZE_SURFACE_DEF= NO +% +% Visualize the volume deformation (NO, YES) +VISUALIZE_VOLUME_DEF= NO + +% --------------------------- CONVERGENCE PARAMETERS --------------------------% +% Convergence criteria (CAUCHY, RESIDUAL) +% +CONV_CRITERIA= RESIDUAL +% +% Residual reduction (order of magnitude with respect to the initial value) +RESIDUAL_REDUCTION= 6 +% +% Min value of the residual (log10 of the residual) +RESIDUAL_MINVAL= -8 +% +% Start Cauchy criteria at iteration number +STARTCONV_ITER= 10 +% +% Number of elements to apply the criteria +CAUCHY_ELEMS= 100 +% +% Epsilon to control the series convergence +CAUCHY_EPS= 1E-6 +% +% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, +% DELTA_LIFT, DELTA_DRAG) +CAUCHY_FUNC_FLOW= DRAG + +% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% +% Mesh input file +%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 +MESH_FILENAME=passed_as_flag_to_train.py.su2 +% +% Mesh input file format (SU2, CGNS, NETCDF_ASCII) +MESH_FORMAT= SU2 +% +% Mesh output file +MESH_OUT_FILENAME= mesh_out.su2 +% +% Restart flow input file +SOLUTION_FLOW_FILENAME= solution_flow.dat +% +% Restart adjoint input file +SOLUTION_ADJ_FILENAME= solution_adj.dat +% +% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) +%OUTPUT_FORMAT= TECPLOT_BINARY +% +% Output file convergence history (w/o extension) +CONV_FILENAME= history +% +% Output file restart flow +RESTART_FLOW_FILENAME= restart_flow.dat +% +% Output file restart adjoint +RESTART_ADJ_FILENAME= restart_adj.dat +% +% Output file flow (w/o extension) variables +VOLUME_FLOW_FILENAME= flow +% +% Output file adjoint (w/o extension) variables +VOLUME_ADJ_FILENAME= adjoint +% +% Output Objective function gradient (using continuous adjoint) +GRAD_OBJFUNC_FILENAME= of_grad.dat +% +% Output file surface flow coefficient (w/o extension) +SURFACE_FLOW_FILENAME= surface_flow +% +% Output file surface adjoint coefficient (w/o extension) +SURFACE_ADJ_FILENAME= surface_adjoint +% +% Writing solution file frequency +WRT_SOL_FREQ= 1000 +% +% Writing convergence history frequency +WRT_CON_FREQ= 1000 + +% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% +% Available flow based objective functions or constraint functions +% DRAG, LIFT, SIDEFORCE, EFFICIENCY, +% FORCE_X, FORCE_Y, FORCE_Z, +% MOMENT_X, MOMENT_Y, MOMENT_Z, +% THRUST, TORQUE, FIGURE_OF_MERIT, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, +% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, +% +% Available geometrical based objective functions or constraint functions +% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, +% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL +% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, +% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) +% +% Available design variables +% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) +% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) +% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) +% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) +% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) +% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% +% Optimization objective function with scaling factor +% ex= Objective * Scale +% OPT_OBJECTIVE= DRAG * 0.001 +% +% Optimization constraint functions with scaling factors, separated by semicolons +% ex= (Objective = Value ) * Scale, use '>','<','=' +% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 +% +% Optimization design variables, separated by semicolons +% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) + + + + +DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH +DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE diff --git a/examples/cfdgcn/conf/cfdgcn.yaml b/examples/cfdgcn/conf/cfdgcn.yaml index 0ec91d6671..08c3c92379 100644 --- a/examples/cfdgcn/conf/cfdgcn.yaml +++ b/examples/cfdgcn/conf/cfdgcn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -63,3 +64,69 @@ EVAL: batch_size: 1 pretrained_model_path: null eval_with_no_grad: true +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_cfdgcn/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training data path +TRAIN_DATA_DIR: "./data/NACA0012_interpolate/outputs_train" +TRAIN_MESH_GRAPH_PATH: "./data/NACA0012_interpolate/mesh_fine.su2" + +# set evaluate data path +EVAL_DATA_DIR: "./data/NACA0012_interpolate/outputs_test" +EVAL_MESH_GRAPH_PATH: "./data/NACA0012_interpolate/mesh_fine.su2" + +# model settings +MODEL: + input_keys: ["input"] + output_keys: ["pred"] + config_file: "coarse.cfg" + coarse_mesh: "./meshes/mesh_NACA0012_xcoarse.su2" + freeze_mesh: false + num_convs: 6 + num_end_convs: 3 + hidden_channel: 512 + out_channel: 3 + +# training settings +TRAIN: + epochs: 500 + iters_per_epoch: 42 + save_freq: 50 + eval_during_train: true + eval_freq: 50 + learning_rate: 5.0e-4 + batch_size: 4 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 1 + pretrained_model_path: null + eval_with_no_grad: true +>>>>>>> Stashed changes diff --git a/examples/cfdgcn/su2paddle/__init__.py b/examples/cfdgcn/su2paddle/__init__.py index 515b21ce10..33b5754075 100755 --- a/examples/cfdgcn/su2paddle/__init__.py +++ b/examples/cfdgcn/su2paddle/__init__.py @@ -1,3 +1,3 @@ -from su2paddle.su2_function import SU2Module # noqa: F401 -from su2paddle.su2_function_mpi import activate_su2_mpi # noqa: F401 -from su2paddle.su2_numpy import SU2Numpy # noqa: F401 +from su2paddle.su2_function import SU2Module # noqa: F401 +from su2paddle.su2_function_mpi import activate_su2_mpi # noqa: F401 +from su2paddle.su2_numpy import SU2Numpy # noqa: F401 diff --git a/examples/cfdgcn/su2paddle/common.py b/examples/cfdgcn/su2paddle/common.py index 1dc98dc0e8..edc39471f3 100644 --- a/examples/cfdgcn/su2paddle/common.py +++ b/examples/cfdgcn/su2paddle/common.py @@ -1,73 +1,73 @@ -from typing import List - -import paddle - - -# https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/paddlespeech/audio/utils/tensor_utils.py#L40 -def pad_sequence( - sequences: List[paddle.Tensor], - batch_first: bool = False, - padding_value: float = 0.0, -) -> paddle.Tensor: - r"""Pad a list of variable length Tensors with `padding_value`. - - `pad_sequence` stacks a list of Tensors along a new dimension, - and pads them to equal length. For example, if the input is list of - sequences with size `L x *` and if batch_first is False, and `T x B x *` - otherwise. - - `B` is batch size. It is equal to the number of elements in `sequences`. - `T` is length of the longest sequence. - `L` is length of the sequence. - `*` is any number of trailing dimensions, including none. - - Example: - >>> a = paddle.ones(25, 300) - >>> b = paddle.ones(22, 300) - >>> c = paddle.ones(15, 300) - >>> pad_sequence([a, b, c]).shape - paddle.Tensor([25, 3, 300]) - - Note: - This function returns a Tensor of size `T x B x *` or `B x T x *` - where `T` is the length of the longest sequence. This function assumes - trailing dimensions and type of all the Tensors in sequences are same. - - Args: - sequences (list[Tensor]): list of variable length sequences. - batch_first (bool, optional): output will be in `B x T x *` if True, or in - `T x B x *` otherwise - padding_value (float, optional): value for padded elements. Default: 0. - - Returns: - Tensor of size `T x B x *` if :attr:`batch_first` is `False`. - Tensor of size `B x T x *` otherwise - """ - - # assuming trailing dimensions and type of all the Tensors - # in sequences are same and fetching those from sequences[0] - max_size = paddle.shape(sequences[0]) - trailing_dims = ( - tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () - ) - max_len = max([s.shape[0] for s in sequences]) - if batch_first: - out_dims = (len(sequences), max_len) + trailing_dims - else: - out_dims = (max_len, len(sequences)) + trailing_dims - out_tensor = paddle.full(out_dims, padding_value, sequences[0].dtype) - for i, tensor in enumerate(sequences): - length = tensor.shape[0] - # use index notation to prevent duplicate references to the tensor - if batch_first: - if length != 0: - out_tensor[i, :length] = tensor - else: - out_tensor[i, length] = tensor - else: - if length != 0: - out_tensor[:length, i] = tensor - else: - out_tensor[length, i] = tensor - - return out_tensor +from typing import List + +import paddle + + +# https://github.com/PaddlePaddle/PaddleSpeech/blob/develop/paddlespeech/audio/utils/tensor_utils.py#L40 +def pad_sequence( + sequences: List[paddle.Tensor], + batch_first: bool = False, + padding_value: float = 0.0, +) -> paddle.Tensor: + r"""Pad a list of variable length Tensors with `padding_value`. + + `pad_sequence` stacks a list of Tensors along a new dimension, + and pads them to equal length. For example, if the input is list of + sequences with size `L x *` and if batch_first is False, and `T x B x *` + otherwise. + + `B` is batch size. It is equal to the number of elements in `sequences`. + `T` is length of the longest sequence. + `L` is length of the sequence. + `*` is any number of trailing dimensions, including none. + + Example: + >>> a = paddle.ones(25, 300) + >>> b = paddle.ones(22, 300) + >>> c = paddle.ones(15, 300) + >>> pad_sequence([a, b, c]).shape + paddle.Tensor([25, 3, 300]) + + Note: + This function returns a Tensor of size `T x B x *` or `B x T x *` + where `T` is the length of the longest sequence. This function assumes + trailing dimensions and type of all the Tensors in sequences are same. + + Args: + sequences (list[Tensor]): list of variable length sequences. + batch_first (bool, optional): output will be in `B x T x *` if True, or in + `T x B x *` otherwise + padding_value (float, optional): value for padded elements. Default: 0. + + Returns: + Tensor of size `T x B x *` if :attr:`batch_first` is `False`. + Tensor of size `B x T x *` otherwise + """ + + # assuming trailing dimensions and type of all the Tensors + # in sequences are same and fetching those from sequences[0] + max_size = paddle.shape(sequences[0]) + trailing_dims = ( + tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () + ) + max_len = max([s.shape[0] for s in sequences]) + if batch_first: + out_dims = (len(sequences), max_len) + trailing_dims + else: + out_dims = (max_len, len(sequences)) + trailing_dims + out_tensor = paddle.full(out_dims, padding_value, sequences[0].dtype) + for i, tensor in enumerate(sequences): + length = tensor.shape[0] + # use index notation to prevent duplicate references to the tensor + if batch_first: + if length != 0: + out_tensor[i, :length] = tensor + else: + out_tensor[i, length] = tensor + else: + if length != 0: + out_tensor[:length, i] = tensor + else: + out_tensor[length, i] = tensor + + return out_tensor diff --git a/examples/cfdgcn/su2paddle/su2_function.py b/examples/cfdgcn/su2paddle/su2_function.py index a11c4e7b79..cb10e472c5 100755 --- a/examples/cfdgcn/su2paddle/su2_function.py +++ b/examples/cfdgcn/su2paddle/su2_function.py @@ -1,177 +1,177 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math -from typing import Tuple - -import paddle -from su2paddle import common -from su2paddle import su2_function_mpi - -from mpi4py import MPI # isort:skip -import pysu2 # isort:skip - - -_global_max_ppe = -1 - - -class SU2Module(paddle.nn.Layer): - def __init__( - self, config_file: str, mesh_file: str, dims: int = 2, num_zones: int = 1 - ) -> None: - """Initialize the SU2 configurations for the provided config file. - - Args: - config_file: str - The SU2 configuration file name. - mesh_file: str - Optional parameter, if not set defaults to the mesh filename set in the config file. - Can be used to run a batch with different meshes for each sample. - Passing in mesh_file with batch_index parameter in string format (e.g., 'b{batch_index}_mesh.su2') causes each element in batch to get assigned to the correct mesh file (0 indexed). - If running multiple processes in parallel, take care to name each mesh file uniquely to avoid conflicts - (e.g., unique = str(os.getpid()); mesh_file = 'b{batch_index}_' + unique + '_mesh.su2'). - dims: int - Number of dimensions for the problem (2D or 3D). - num_zones: int - Number of zones in the simulation (only 1 supported currently). - """ - super().__init__() - if num_zones != 1: - raise ValueError("Only supports 1 zone for now.") - if MPI.COMM_WORLD.Get_rank() != 0: - raise ValueError("Not rank 0 in comm") - if not _global_max_ppe > 0: - raise ValueError( - "Before running SU2Function, a (single) call to activate_su2_mpi is needed." - ) - self.num_zones = num_zones - self.dims = dims - self.mesh_file = mesh_file - - self.forward_config = config_file - self.forward_driver = None - - def forward(self, *inputs: paddle.Tensor) -> Tuple[paddle.Tensor, ...]: - return SU2Function.apply( - *inputs, - self.forward_config, - self.mesh_file, - self.num_zones, - self.dims, - self.set_forward_driver, - ) - - def get_forward_driver(self): - if self.forward_driver is None: - raise AttributeError("Forward driver is only set after running forward()") - return self.forward_driver - - def set_forward_driver(self, f): - if self.forward_driver is not None: - self.forward_driver.Postprocessing() - self.forward_driver = f - - def __del__(self): - """Close existing drivers and MPI communicators.""" - if hasattr(self, "forward_driver") and self.forward_driver is not None: - self.forward_driver.Postprocessing() - - -class SU2Function(paddle.autograd.PyLayer): - num_params = 5 - - @staticmethod - def forward(ctx, *inputs): - su2_function_mpi.non_busy_post(MPI.COMM_WORLD) - x = inputs[: -SU2Function.num_params] - forward_config, mesh_file, num_zones, dims, set_forward_driver_hook = inputs[ - -SU2Function.num_params : - ] - - if x[0].dim() < 2: - raise ValueError( - "Input is expected to have first dimension for batch, " - "e.g. x[0, :] is first item in batch." - ) - batch_size = x[0].shape[0] - max_ppe = _global_max_ppe - workers = MPI.COMM_WORLD.Get_size() - 1 - if 0 <= workers < batch_size: - raise ValueError( - "Batch size is larger than number of workers, not enough processes to run batch." - ) - - MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) - procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) - - x = tuple([i.numpy() for i in x]) - - MPI.COMM_WORLD.bcast( - [num_zones, dims, forward_config, mesh_file, procs_per_example, x], root=0 - ) - - # instantiate forward_driver while workers work - worker_forward_config = MPI.COMM_WORLD.recv(source=1) - forward_driver = pysu2.CSinglezoneDriver( - worker_forward_config, num_zones, dims, MPI.COMM_SELF - ) - num_diff_inputs = forward_driver.GetnDiff_Inputs() - num_diff_outputs = forward_driver.GetnDiff_Outputs() - if num_diff_inputs <= 0 or num_diff_outputs <= 0: - raise ValueError( - "Need to define at least one differentiable input and output. To run without differentiation, use the SU2Numpy class." - ) - - if len(x) != num_diff_inputs: - raise ValueError( - f"{len(x)} inputs were provided, but the config file ({forward_config}) defines {num_diff_inputs} diff inputs." - ) - set_forward_driver_hook(forward_driver) - ctx.num_diff_inputs = num_diff_inputs - - outputs = [] - su2_function_mpi.non_busy_wait(MPI.COMM_WORLD) - for i in range(batch_size): - output = MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) - outputs.append(output) - outputs = tuple( - common.pad_sequence( - [paddle.to_tensor(o[i], dtype=paddle.float32) for o in outputs], - batch_first=True, - ) - for i in range(num_diff_outputs) - ) - return outputs - - @staticmethod - def backward(ctx, *grad_outputs): - su2_function_mpi.non_busy_post(MPI.COMM_WORLD) - max_ppe = _global_max_ppe - workers = MPI.COMM_WORLD.Get_size() - 1 - MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) - grad_outputs = tuple([i.numpy() for i in grad_outputs]) - MPI.COMM_WORLD.bcast(grad_outputs, root=0) - batch_size = grad_outputs[0].shape[0] - procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) - su2_function_mpi.non_busy_wait(MPI.COMM_WORLD) - grads = [] - for i in range(batch_size): - grad = MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) - grads.append(grad) - print("grads", len(grads), flush=True) - grads = tuple( - common.pad_sequence( - [paddle.to_tensor(g[i], dtype=paddle.float32) for g in grads], - batch_first=True, - ) - for i in range(ctx.num_diff_inputs) - ) - return tuple( - [grads[0], grads[1], None, None] - ) # + (None,) * SU2Function.num_params +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +from typing import Tuple + +import paddle +from su2paddle import common +from su2paddle import su2_function_mpi + +from mpi4py import MPI # isort:skip +import pysu2 # isort:skip + + +_global_max_ppe = -1 + + +class SU2Module(paddle.nn.Layer): + def __init__( + self, config_file: str, mesh_file: str, dims: int = 2, num_zones: int = 1 + ) -> None: + """Initialize the SU2 configurations for the provided config file. + + Args: + config_file: str - The SU2 configuration file name. + mesh_file: str - Optional parameter, if not set defaults to the mesh filename set in the config file. + Can be used to run a batch with different meshes for each sample. + Passing in mesh_file with batch_index parameter in string format (e.g., 'b{batch_index}_mesh.su2') causes each element in batch to get assigned to the correct mesh file (0 indexed). + If running multiple processes in parallel, take care to name each mesh file uniquely to avoid conflicts + (e.g., unique = str(os.getpid()); mesh_file = 'b{batch_index}_' + unique + '_mesh.su2'). + dims: int - Number of dimensions for the problem (2D or 3D). + num_zones: int - Number of zones in the simulation (only 1 supported currently). + """ + super().__init__() + if num_zones != 1: + raise ValueError("Only supports 1 zone for now.") + if MPI.COMM_WORLD.Get_rank() != 0: + raise ValueError("Not rank 0 in comm") + if not _global_max_ppe > 0: + raise ValueError( + "Before running SU2Function, a (single) call to activate_su2_mpi is needed." + ) + self.num_zones = num_zones + self.dims = dims + self.mesh_file = mesh_file + + self.forward_config = config_file + self.forward_driver = None + + def forward(self, *inputs: paddle.Tensor) -> Tuple[paddle.Tensor, ...]: + return SU2Function.apply( + *inputs, + self.forward_config, + self.mesh_file, + self.num_zones, + self.dims, + self.set_forward_driver, + ) + + def get_forward_driver(self): + if self.forward_driver is None: + raise AttributeError("Forward driver is only set after running forward()") + return self.forward_driver + + def set_forward_driver(self, f): + if self.forward_driver is not None: + self.forward_driver.Postprocessing() + self.forward_driver = f + + def __del__(self): + """Close existing drivers and MPI communicators.""" + if hasattr(self, "forward_driver") and self.forward_driver is not None: + self.forward_driver.Postprocessing() + + +class SU2Function(paddle.autograd.PyLayer): + num_params = 5 + + @staticmethod + def forward(ctx, *inputs): + su2_function_mpi.non_busy_post(MPI.COMM_WORLD) + x = inputs[: -SU2Function.num_params] + forward_config, mesh_file, num_zones, dims, set_forward_driver_hook = inputs[ + -SU2Function.num_params : + ] + + if x[0].dim() < 2: + raise ValueError( + "Input is expected to have first dimension for batch, " + "e.g. x[0, :] is first item in batch." + ) + batch_size = x[0].shape[0] + max_ppe = _global_max_ppe + workers = MPI.COMM_WORLD.Get_size() - 1 + if 0 <= workers < batch_size: + raise ValueError( + "Batch size is larger than number of workers, not enough processes to run batch." + ) + + MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) + procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) + + x = tuple([i.numpy() for i in x]) + + MPI.COMM_WORLD.bcast( + [num_zones, dims, forward_config, mesh_file, procs_per_example, x], root=0 + ) + + # instantiate forward_driver while workers work + worker_forward_config = MPI.COMM_WORLD.recv(source=1) + forward_driver = pysu2.CSinglezoneDriver( + worker_forward_config, num_zones, dims, MPI.COMM_SELF + ) + num_diff_inputs = forward_driver.GetnDiff_Inputs() + num_diff_outputs = forward_driver.GetnDiff_Outputs() + if num_diff_inputs <= 0 or num_diff_outputs <= 0: + raise ValueError( + "Need to define at least one differentiable input and output. To run without differentiation, use the SU2Numpy class." + ) + + if len(x) != num_diff_inputs: + raise ValueError( + f"{len(x)} inputs were provided, but the config file ({forward_config}) defines {num_diff_inputs} diff inputs." + ) + set_forward_driver_hook(forward_driver) + ctx.num_diff_inputs = num_diff_inputs + + outputs = [] + su2_function_mpi.non_busy_wait(MPI.COMM_WORLD) + for i in range(batch_size): + output = MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) + outputs.append(output) + outputs = tuple( + common.pad_sequence( + [paddle.to_tensor(o[i], dtype=paddle.float32) for o in outputs], + batch_first=True, + ) + for i in range(num_diff_outputs) + ) + return outputs + + @staticmethod + def backward(ctx, *grad_outputs): + su2_function_mpi.non_busy_post(MPI.COMM_WORLD) + max_ppe = _global_max_ppe + workers = MPI.COMM_WORLD.Get_size() - 1 + MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) + grad_outputs = tuple([i.numpy() for i in grad_outputs]) + MPI.COMM_WORLD.bcast(grad_outputs, root=0) + batch_size = grad_outputs[0].shape[0] + procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) + su2_function_mpi.non_busy_wait(MPI.COMM_WORLD) + grads = [] + for i in range(batch_size): + grad = MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) + grads.append(grad) + print("grads", len(grads), flush=True) + grads = tuple( + common.pad_sequence( + [paddle.to_tensor(g[i], dtype=paddle.float32) for g in grads], + batch_first=True, + ) + for i in range(ctx.num_diff_inputs) + ) + return tuple( + [grads[0], grads[1], None, None] + ) # + (None,) * SU2Function.num_params diff --git a/examples/cfdgcn/su2paddle/su2_function_mpi.py b/examples/cfdgcn/su2paddle/su2_function_mpi.py index 6065123b20..70e95a5e93 100755 --- a/examples/cfdgcn/su2paddle/su2_function_mpi.py +++ b/examples/cfdgcn/su2paddle/su2_function_mpi.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -368,3 +369,375 @@ def main(remove_temp_files: bool = True) -> None: if batch_rank == 0: MPI.COMM_WORLD.send(grads, dest=0) adjoint_driver.Postprocessing() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import os +import shutil +import time +import warnings +from enum import IntEnum +from typing import Dict +from typing import Sequence +from typing import Tuple +from typing import TypeVar +from typing import Union + +import numpy as np +import paddle +import SU2 +from su2paddle import su2_function + +from mpi4py import MPI # isort:skip +import pysu2 # isort:skip +import pysu2ad # isort:skip + +warnings.filterwarnings("ignore", category=DeprecationWarning) + +GenTensor = TypeVar("GenTensor", paddle.Tensor, np.ndarray) + +_non_busy_wait_max_time = 0.1 + + +class RunCode(IntEnum): + """Run codes for communication with worker processes.""" + + STOP = -1 + RUN_FORWARD = 0 + RUN_ADJOINT = 1 + + +def run_forward( + comm: MPI.Intracomm, + forward_driver: pysu2.CSinglezoneDriver, + inputs: Sequence[GenTensor], +) -> Tuple[GenTensor, ...]: + """Runs a simulation with the provided driver, using the inputs to set the values + defined in DIFF_INPUTS in the config file. + + Args: + comm: The communicator for the processes running the simulation. + forward_driver: The driver for the simulation, created using the same comm as passed into this function. + inputs: The inputs used to set the DIFF_INPUTS as defined in the configuration file. + Returns: + The outputs of the simulation, as defined in DIFF_OUTPUTS in the config file. + """ + rank = comm.Get_rank() + for i, x in enumerate(inputs): + forward_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) + forward_driver.ApplyDiff_Inputs_Vars() + + forward_driver.StartSolver() + comm.Barrier() + + is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray + if is_numpy: + array_func = np.array + cat_func = np.concatenate + else: + array_func = paddle.to_tensor(inputs[0]) + cat_func = paddle.concat + + num_diff_outputs = forward_driver.GetnDiff_Outputs() + outputs = [ + array_func(forward_driver.GetDiff_Outputs_Vars(i)) + for i in range(num_diff_outputs) + ] + + for i in range(num_diff_outputs): + if outputs[i].shape[0] > 1: + # if dealing with full-grid, reorder according to GlobalIndex + if comm.Get_size() > 1: + # gather outputs in rank 0 if more than one rank + outputs[i] = comm.gather(outputs[i], root=0) + global_inds = comm.gather(forward_driver.GetAllGlobalIndices(), root=0) + if rank == 0: + outputs[i] = cat_func(outputs[i]) + global_inds = list(sum(global_inds, tuple())) # join tuples + else: + global_inds = list(forward_driver.GetAllGlobalIndices()) + + if rank == 0: + # TODO Make the list integers on the C side + global_inds = np.array(global_inds, dtype=np.long) + if outputs[i].shape[0] != len(global_inds): + raise ValueError( + "Only full grid outputs supported by now (besides scalars)." + ) + # order by global_inds + outputs[i][global_inds] = ( + outputs[i].copy() if is_numpy else outputs[i].clone() + ) + else: + outputs[i] = None + return tuple(outputs) + + +def run_adjoint( + comm: MPI.Intracomm, + adjoint_driver: pysu2ad.CDiscAdjSinglezoneDriver, + inputs: Sequence[GenTensor], + grad_outputs: Sequence[GenTensor], +) -> Tuple[GenTensor, ...]: + """Runs a simulation with the provided driver, using the inputs to set the values defined in DIFF_INPUTS in the config file. + + Args: + comm: The communicator for the processes running the simulation. + adjoint_driver: The driver for the adjoint computation, created using the same comm as passed into this function. + inputs: The same inputs used to set the DIFF_INPUTS in the forward pass. + grad_outputs: Gradients of a scalar loss with respect to the forward outputs, see SU2Function's backward() method. + + Return: + The gradients of the loss with respect to the inputs. + """ + rank = comm.Get_rank() + for i, x in enumerate(inputs): + adjoint_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) + adjoint_driver.ApplyDiff_Inputs_Vars() + for i, g in enumerate(grad_outputs): + adjoint_driver.SetBackprop_Derivs(g.flatten().tolist(), i) + + adjoint_driver.StartSolver() + + is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray + if is_numpy: + array_func = np.array + cat_func = np.concatenate + else: + array_func = paddle.to_tensor(inputs[0]) + cat_func = paddle.concat + + num_diff_inputs = adjoint_driver.GetnDiff_Inputs() + grads = [ + array_func(adjoint_driver.GetTotal_Sens_Diff_Inputs(i)) + for i in range(num_diff_inputs) + ] + for i in range(num_diff_inputs): + if grads[i].shape[0] > 1: + # if dealing with full-grid, reorder according to GlobalIndex + if comm.Get_size() > 1: + # gather outputs in rank 0 if more than one rank + grads[i] = comm.gather(grads[i], root=0) + global_inds = comm.gather(adjoint_driver.GetAllGlobalIndices(), root=0) + if rank == 0: + grads[i] = cat_func(grads[i]) + global_inds = list(sum(global_inds, tuple())) # join tuples + else: + global_inds = list(adjoint_driver.GetAllGlobalIndices()) + + if rank == 0: + global_inds = np.array(global_inds, dtype=np.long) + if grads[i].shape[0] != len(global_inds): + raise ValueError( + "Only full grid outputs supported by now (besides scalars)." + ) + # order by global_inds + grads[i][global_inds] = ( + grads[i].copy() if is_numpy else grads[i].clone() + ) + else: + grads[i] = None + return tuple(grads) + + +def modify_config( + config: SU2.io.Config, + new_params: Dict[str, str], + outfile: Union[str, os.PathLike, None] = None, +) -> SU2.io.Config: + """Modify a config, saving the modifications to outfile if provided.""" + temp_config = config.copy() + for k, v in new_params.items(): + temp_config[k] = v + if outfile is not None: + temp_config.write(outfile) + return temp_config + + +def activate_su2_mpi( + remove_temp_files: bool = True, + max_procs_per_example: int = 1, + non_busy_wait_max_time: float = 0.1, +) -> None: + if MPI.COMM_WORLD.Get_size() < 2: + raise ValueError( + 'Need at least 1 master and 1 worker process, run with "mpirun -np ...' + ) + + if MPI.COMM_WORLD.Get_rank() != 0: + global _non_busy_wait_max_time + _non_busy_wait_max_time = non_busy_wait_max_time + main(remove_temp_files=remove_temp_files) + exit(0) + + # Only rank 0 from here on + def stop(): + non_busy_post(MPI.COMM_WORLD) + MPI.COMM_WORLD.bcast(RunCode.STOP, root=0) + + atexit.register(stop) + su2_function._global_max_ppe = max_procs_per_example + + +def non_busy_wait(comm: MPI.Intracomm) -> None: + b = comm.Ibarrier() + start = time.time() + while not b.Get_status(): + time.sleep(min((time.time() - start) / 2, _non_busy_wait_max_time)) + + +def non_busy_post(comm: MPI.Intracomm) -> None: + comm.Ibarrier() + + +def main(remove_temp_files: bool = True) -> None: + """Runs a loop for the worker processes. + Can be signaled to run either a forward simulation or an adjoint computation using RunCodes. + """ + local_comm = MPI.COMM_WORLD.Create_group( + MPI.Group.Excl(MPI.COMM_WORLD.Get_group(), [0]) + ) + local_rank = local_comm.Get_rank() + local_size = local_comm.Get_size() + ppid = str(os.getppid()) + + x = inputs = batch_comm = batch_index = batch_rank = forward_config = None + num_zones = dims = batch_solution_filename = batch_restart_filename = None + batch_size = procs_per_example = 1 + + temp_data_dir = "temp_data" + os.makedirs(temp_data_dir, exist_ok=True) + + while True: + non_busy_wait(MPI.COMM_WORLD) + run_type = MPI.COMM_WORLD.bcast(None, root=0) + if run_type == RunCode.STOP: + # remove temporary files + if local_rank == 0 and remove_temp_files: + os.system(f"rm b*_{ppid}_* 2> /dev/null") + break + + if run_type == RunCode.RUN_FORWARD: + if procs_per_example != 1 and procs_per_example != local_size: + # disconnect batch_comm from previous run, if it was created + batch_comm.Disconnect() + ( + num_zones, + dims, + forward_config, + mesh_file, + procs_per_example, + inputs, + ) = MPI.COMM_WORLD.bcast(None, root=0) + batch_size = inputs[0].shape[0] + batch_index = local_rank // procs_per_example + if procs_per_example == 1: + batch_comm = MPI.COMM_SELF + elif procs_per_example == local_size: + batch_comm = local_comm + else: + batch_comm = local_comm.Split(batch_index, local_rank) + if local_rank >= batch_size * procs_per_example: + # these procs wont be used + non_busy_post(MPI.COMM_WORLD) + continue + batch_rank = batch_comm.Get_rank() + x = [z[batch_index] for z in inputs] + + batch_forward_config = os.path.join( + temp_data_dir, f"b{batch_index}_{ppid}_{forward_config}" + ) + if batch_rank == 0: + old_config = SU2.io.Config(forward_config) + restart_filename = old_config["RESTART_FLOW_FILENAME"] + batch_restart_filename = os.path.join( + temp_data_dir, f"b{batch_index}_{ppid}_{restart_filename}" + ) + mesh_file = ( + mesh_file.format(batch_index=batch_index) + if mesh_file + else old_config["MESH_FILENAME"] + ) + new_config = { + "RESTART_FLOW_FILENAME": batch_restart_filename, + "MESH_FILENAME": mesh_file, + } + shutil.copy(forward_config, batch_forward_config) + modify_config(old_config, new_config, outfile=batch_forward_config) + if local_rank == 0: + MPI.COMM_WORLD.send(batch_forward_config, dest=0) + batch_comm.Barrier() + + forward_driver = pysu2.CSinglezoneDriver( + batch_forward_config, num_zones, dims, batch_comm + ) + # TODO SetRestart_FlowFileName is not necessary anymore, remove from C++ + # forward_driver.SetRestart_FlowFileName(batch_restart_filename) + outputs = run_forward(batch_comm, forward_driver, x) + output_lengths = [o.shape[0] for o in outputs] + non_busy_post(MPI.COMM_WORLD) + if batch_rank == 0: + MPI.COMM_WORLD.send(outputs, dest=0) + # TODO Way to get results in-memory, without writing to file? + batch_solution_filename = batch_restart_filename.replace( + "restart", "solution" + ) + shutil.move(batch_restart_filename, batch_solution_filename) + forward_driver.Postprocessing() + + elif run_type == RunCode.RUN_ADJOINT: + # assert inputs is not None, 'Run forward simulation before running the adjoint.' + inputs = None + grad_outputs = MPI.COMM_WORLD.bcast(None, root=0) + if local_rank >= batch_size * procs_per_example: + # these procs wont be used + non_busy_post(MPI.COMM_WORLD) + continue + dl = [ + z[batch_index, : output_lengths[i]] for i, z in enumerate(grad_outputs) + ] + + batch_adjoint_config = os.path.join( + temp_data_dir, f"b{batch_index}_{ppid}_adjoint_{forward_config}" + ) + + if batch_rank == 0: + old_config = SU2.io.Config(forward_config) + mesh_file = ( + mesh_file.format(batch_index=batch_index) + if mesh_file + else old_config["MESH_FILENAME"] + ) + new_config = { + "MATH_PROBLEM": "DISCRETE_ADJOINT", + "SOLUTION_FLOW_FILENAME": batch_solution_filename, + "RESTART_ADJ_FILENAME": batch_restart_filename.replace( + "flow", "adj" + ), + "MESH_FILENAME": mesh_file, + } + shutil.copy(forward_config, batch_adjoint_config) + modify_config(old_config, new_config, outfile=batch_adjoint_config) + batch_comm.Barrier() + adjoint_driver = pysu2ad.CDiscAdjSinglezoneDriver( + batch_adjoint_config, num_zones, dims, batch_comm + ) + grads = run_adjoint(batch_comm, adjoint_driver, x, dl) + non_busy_post(MPI.COMM_WORLD) + if batch_rank == 0: + MPI.COMM_WORLD.send(grads, dest=0) + adjoint_driver.Postprocessing() +>>>>>>> Stashed changes diff --git a/examples/cfdgcn/su2paddle/su2_numpy.py b/examples/cfdgcn/su2paddle/su2_numpy.py index 8494578d4b..d3938a282e 100755 --- a/examples/cfdgcn/su2paddle/su2_numpy.py +++ b/examples/cfdgcn/su2paddle/su2_numpy.py @@ -1,166 +1,166 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math - -import numpy as np -from su2paddle import su2_function_mpi - -from mpi4py import MPI # isort:skip -import pysu2 # isort:skip - - -class SU2Numpy: - """Class that uses the SU2 in-memory python wrapper - to provide differentiable physics simulations. - - Usage example for scalar output case: - - # define differentiable inputs and outputs in the config - # with DIFF_INPUTS and DIFF_OUTPUTS fields - su2 = SU2Numpy('config.cfg') - inputs = np.array([1.0]) - outputs = su2(inputs) - # if output is a scalar, we can get the gradient of the output - # with respect to the inputs by simply doing - doutput_dinputs = loss.backward() - """ - - def __init__(self, config_file, dims=2, num_zones=1): - """Initialize the SU2 configurations for the provided config file. - - Args: - config_file: str - The SU2 configuration file name. - dims: int - Number of dimensions for the problem (2D or 3D). - num_zones: int - Number of zones in the simulation (only 1 supported currently). - max_procs: int - Maximum number of MPI processes to use for SU2. If set to -1 (default), - number of processes will equal batch size. Otherwise, will use floor(max_procs / batch_size) - processes per item in batch. - In this case max_procs must be larger than the size of the batch passed in. - """ - if num_zones != 1: - raise ValueError("Only supports 1 zone for now.") - if MPI.COMM_WORLD.Get_rank() != 0: - raise ValueError("Not rank 0 in comm") - - self.comm = MPI.COMM_WORLD - self.workers = self.comm.Get_size() - 1 - - if self.workers < 1: - raise ValueError("Need at least 1 master and 1 worker process.") - - self.num_zones = num_zones - self.dims = dims - self.outputs_shape = None - self.batch_size = -1 - - self.forward_config = config_file - self.forward_driver = pysu2.CSinglezoneDriver( - self.forward_config, self.num_zones, self.dims, MPI.COMM_SELF - ) - self.num_diff_inputs = self.forward_driver.GetnDiff_Inputs() - self.num_diff_outputs = self.forward_driver.GetnDiff_Outputs() - - def __call__(self, *inputs): - return self.forward(*inputs) - - def forward(self, *inputs): - """Runs a batch of SU2 simulations. - - Args: - inputs: The differentiable inputs for the batch of simulations. - Number of inputs depends on the number of DIFF_INPUTS set in the configuration file. - Each input is of shape BATCH_SIZE x SHAPE, where SHAPE is the shape of the given input. - For example, a batch of 10 scalars would have input shape 10 x 1, - a batch of 10 vectors of length N would have input shape 10 x N. - Return: - A tuple of tensors with the batch of differentiable outputs. - Number of outputs depends on the number of DIFF_OUTPUTS set in the configuration file. - As for the inputs, each output is of shape BATCH_SIZE x SHAPE, - where SHAPE is the shape of the given output. - Outputs are always either scalars or vectors. - """ - if len(inputs) != self.num_diff_inputs: - raise ValueError( - f"{len(inputs)} inputs were provided, but the config file ({self.forward_config}) defines {self.num_diff_inputs} diff inputs." - ) - if self.num_diff_inputs > 0 and inputs[0].ndim < 2: - raise ValueError( - "Input is expected to have first dimension for batch, " - "e.g. x[0, :] is first item in batch." - ) - self.batch_size = inputs[0].shape[0] if self.num_diff_inputs > 0 else 1 - if 0 <= self.workers < self.batch_size: - raise ValueError( - "Batch size is larger than number of workers, not enough processes to run batch." - ) - procs_per_example = math.ceil(self.workers / self.batch_size) - - self.comm.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) - self.comm.bcast( - [self.num_zones, self.dims, self.forward_config, inputs], root=0 - ) - outputs = [] - for i in range(self.batch_size): - output = self.comm.recv(source=1 + i * procs_per_example) - outputs.append(output) - outputs = tuple( - np.concatenate([np.expand_dims(o[i], axis=0) for o in outputs]) - for i in range(self.num_diff_outputs) - ) - self.outputs_shape = [o.shape for o in outputs] - return outputs - - def backward(self, *grad_outputs): - """Gives the gradient of some scalar loss with respect to the inputs of the previous - forward call when provided the gradients of this loss with respect to the outputs of - the forward call. - - Args: - grad_outputs: Gradients of a scalar loss with respect to the forward outputs. - For example, if the loss is the sum of the outputs, the grad_outputs should be a all ones. - This defaults to 1.0 when the output of the forward call is just a scalar (or batch of scalars). - Return: - The gradients of the loss with respect to the forward inputs. - """ - if ( - len(grad_outputs) == 0 - and len(self.outputs_shape) == 1 - and self.outputs_shape[0][1] == 1 - ): - # if no grad_outputs was provided and just one output scalar (or batch of scalars) - # was used, then use a default grad outputs of 1.0 - grad_outputs = [np.ones(self.outputs_shape[0])] - elif self.num_diff_outputs != len(grad_outputs): - raise ValueError( - "To run backward() you need to provide the gradients of a scalar loss " - "with respect to the outputs of the forward pass" - ) - - procs_per_example = math.ceil(self.workers / self.batch_size) - self.comm.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) - self.comm.bcast(grad_outputs, root=0) - grads = [] - for i in range(self.batch_size): - grad = self.comm.recv(source=1 + i * procs_per_example) - grads.append(grad) - grads = tuple( - np.concatenate([np.expand_dims(g[i], axis=0) for g in grads]) - for i in range(self.num_diff_inputs) - ) - return grads - - def __del__(self): - """Close existing drivers and MPI communicators.""" - if self.forward_driver is not None: - self.forward_driver.Postprocessing() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math + +import numpy as np +from su2paddle import su2_function_mpi + +from mpi4py import MPI # isort:skip +import pysu2 # isort:skip + + +class SU2Numpy: + """Class that uses the SU2 in-memory python wrapper + to provide differentiable physics simulations. + + Usage example for scalar output case: + + # define differentiable inputs and outputs in the config + # with DIFF_INPUTS and DIFF_OUTPUTS fields + su2 = SU2Numpy('config.cfg') + inputs = np.array([1.0]) + outputs = su2(inputs) + # if output is a scalar, we can get the gradient of the output + # with respect to the inputs by simply doing + doutput_dinputs = loss.backward() + """ + + def __init__(self, config_file, dims=2, num_zones=1): + """Initialize the SU2 configurations for the provided config file. + + Args: + config_file: str - The SU2 configuration file name. + dims: int - Number of dimensions for the problem (2D or 3D). + num_zones: int - Number of zones in the simulation (only 1 supported currently). + max_procs: int - Maximum number of MPI processes to use for SU2. If set to -1 (default), + number of processes will equal batch size. Otherwise, will use floor(max_procs / batch_size) + processes per item in batch. + In this case max_procs must be larger than the size of the batch passed in. + """ + if num_zones != 1: + raise ValueError("Only supports 1 zone for now.") + if MPI.COMM_WORLD.Get_rank() != 0: + raise ValueError("Not rank 0 in comm") + + self.comm = MPI.COMM_WORLD + self.workers = self.comm.Get_size() - 1 + + if self.workers < 1: + raise ValueError("Need at least 1 master and 1 worker process.") + + self.num_zones = num_zones + self.dims = dims + self.outputs_shape = None + self.batch_size = -1 + + self.forward_config = config_file + self.forward_driver = pysu2.CSinglezoneDriver( + self.forward_config, self.num_zones, self.dims, MPI.COMM_SELF + ) + self.num_diff_inputs = self.forward_driver.GetnDiff_Inputs() + self.num_diff_outputs = self.forward_driver.GetnDiff_Outputs() + + def __call__(self, *inputs): + return self.forward(*inputs) + + def forward(self, *inputs): + """Runs a batch of SU2 simulations. + + Args: + inputs: The differentiable inputs for the batch of simulations. + Number of inputs depends on the number of DIFF_INPUTS set in the configuration file. + Each input is of shape BATCH_SIZE x SHAPE, where SHAPE is the shape of the given input. + For example, a batch of 10 scalars would have input shape 10 x 1, + a batch of 10 vectors of length N would have input shape 10 x N. + Return: + A tuple of tensors with the batch of differentiable outputs. + Number of outputs depends on the number of DIFF_OUTPUTS set in the configuration file. + As for the inputs, each output is of shape BATCH_SIZE x SHAPE, + where SHAPE is the shape of the given output. + Outputs are always either scalars or vectors. + """ + if len(inputs) != self.num_diff_inputs: + raise ValueError( + f"{len(inputs)} inputs were provided, but the config file ({self.forward_config}) defines {self.num_diff_inputs} diff inputs." + ) + if self.num_diff_inputs > 0 and inputs[0].ndim < 2: + raise ValueError( + "Input is expected to have first dimension for batch, " + "e.g. x[0, :] is first item in batch." + ) + self.batch_size = inputs[0].shape[0] if self.num_diff_inputs > 0 else 1 + if 0 <= self.workers < self.batch_size: + raise ValueError( + "Batch size is larger than number of workers, not enough processes to run batch." + ) + procs_per_example = math.ceil(self.workers / self.batch_size) + + self.comm.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) + self.comm.bcast( + [self.num_zones, self.dims, self.forward_config, inputs], root=0 + ) + outputs = [] + for i in range(self.batch_size): + output = self.comm.recv(source=1 + i * procs_per_example) + outputs.append(output) + outputs = tuple( + np.concatenate([np.expand_dims(o[i], axis=0) for o in outputs]) + for i in range(self.num_diff_outputs) + ) + self.outputs_shape = [o.shape for o in outputs] + return outputs + + def backward(self, *grad_outputs): + """Gives the gradient of some scalar loss with respect to the inputs of the previous + forward call when provided the gradients of this loss with respect to the outputs of + the forward call. + + Args: + grad_outputs: Gradients of a scalar loss with respect to the forward outputs. + For example, if the loss is the sum of the outputs, the grad_outputs should be a all ones. + This defaults to 1.0 when the output of the forward call is just a scalar (or batch of scalars). + Return: + The gradients of the loss with respect to the forward inputs. + """ + if ( + len(grad_outputs) == 0 + and len(self.outputs_shape) == 1 + and self.outputs_shape[0][1] == 1 + ): + # if no grad_outputs was provided and just one output scalar (or batch of scalars) + # was used, then use a default grad outputs of 1.0 + grad_outputs = [np.ones(self.outputs_shape[0])] + elif self.num_diff_outputs != len(grad_outputs): + raise ValueError( + "To run backward() you need to provide the gradients of a scalar loss " + "with respect to the outputs of the forward pass" + ) + + procs_per_example = math.ceil(self.workers / self.batch_size) + self.comm.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) + self.comm.bcast(grad_outputs, root=0) + grads = [] + for i in range(self.batch_size): + grad = self.comm.recv(source=1 + i * procs_per_example) + grads.append(grad) + grads = tuple( + np.concatenate([np.expand_dims(g[i], axis=0) for g in grads]) + for i in range(self.num_diff_inputs) + ) + return grads + + def __del__(self): + """Close existing drivers and MPI communicators.""" + if self.forward_driver is not None: + self.forward_driver.Postprocessing() diff --git a/examples/cfdgcn/utils.py b/examples/cfdgcn/utils.py index 3c843f4626..31da47999c 100644 --- a/examples/cfdgcn/utils.py +++ b/examples/cfdgcn/utils.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -287,3 +288,292 @@ def quad2tri(elems): else paddle.to_tensor([], dtype=paddle.int64) ) return new_elems, new_edges +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import math +import os +import pathlib +import warnings +from os import path as osp +from typing import BinaryIO +from typing import List +from typing import Optional +from typing import Text +from typing import Tuple +from typing import Union + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import paddle +from paddle.vision import transforms as T +from PIL import Image + +matplotlib.use("Agg") + + +@paddle.no_grad() +def make_grid( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, + **kwargs, +) -> paddle.Tensor: + if not ( + isinstance(tensor, paddle.Tensor) + or ( + isinstance(tensor, list) + and all(isinstance(t, paddle.Tensor) for t in tensor) + ) + ): + raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") + + if "range" in kwargs.keys(): + warning = "range will be deprecated, please use value_range instead." + warnings.warn(warning) + value_range = kwargs["range"] + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = paddle.stack(tensor, axis=0) + + if tensor.ndim == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.ndim == 3: # single image + if tensor.shape[0] == 1: # if single-channel, convert to 3-channel + tensor = paddle.concat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images + tensor = paddle.concat((tensor, tensor, tensor), 1) + + if normalize is True: + if value_range is not None: + if not isinstance(value_range, tuple): + raise TypeError( + "value_range has to be a tuple (min, max) if specified. min and max are numbers" + ) + + def norm_ip(img, low, high): + img.clip(min=low, max=high) + img = img - low + img = img / max(high - low, 1e-5) + + def norm_range(t, value_range): + if value_range is not None: + norm_ip(t, value_range[0], value_range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if tensor.shape[0] == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.shape[0] + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) + num_channels = tensor.shape[1] + grid = paddle.full( + (num_channels, height * ymaps + padding, width * xmaps + padding), pad_value + ) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + grid[ + :, + y * height + padding : (y + 1) * height, + x * width + padding : (x + 1) * width, + ] = tensor[k] + k = k + 1 + return grid + + +@paddle.no_grad() +def save_image( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + fp: Union[Text, pathlib.Path, BinaryIO], + format: Optional[str] = None, + **kwargs, +) -> None: + grid = make_grid(tensor, **kwargs) + ndarr = ( + paddle.clip(grid * 255 + 0.5, 0, 255).transpose([1, 2, 0]).cast("uint8").numpy() + ) + im = Image.fromarray(ndarr) + os.makedirs(osp.dirname(fp), exist_ok=True) + im.save(fp, format=format) + + +def log_images( + nodes, + pred, + true, + elems_list, + index, + mode, + aoa=0, + mach=0, + file="field.png", +): + for field in range(pred.shape[1]): + true_img = plot_field( + nodes, + elems_list, + true[:, field], + mode=mode, + col=field, + clim=(-0.8, 0.8), + title="true", + ) + true_img = T.ToTensor()(true_img) + + pred_img = plot_field( + nodes, + elems_list, + pred[:, field], + mode=mode, + col=field, + clim=(-0.8, 0.8), + title="pred", + ) + pred_img = T.ToTensor()(pred_img) + imgs = [pred_img, true_img] + grid = make_grid(paddle.stack(imgs), padding=0) + out_file = file + f"{field}" + if mode == "airfoil": + if aoa == 8.0 and mach == 0.65: + save_image( + grid, "./result/image/" + str(index) + out_file + "_field.png" + ) + save_image( + grid, "./result/image/airfoil/" + str(index) + out_file + "_field.png" + ) + elif mode == "cylinder": + if aoa == 39.0: + save_image( + grid, "./result/image/" + str(index) + out_file + "_field.png" + ) + save_image( + grid, "./result/image/cylinder/" + str(index) + out_file + "_field.png" + ) + else: + raise ValueError( + f"Argument 'mode' should be 'airfoil' or 'cylinder', but got {mode}." + ) + + +def plot_field( + nodes: paddle.Tensor, + elems_list, + field: paddle.Tensor, + mode, + col, + contour=False, + clim=None, + zoom=True, + get_array=True, + out_file=None, + show=False, + title="", +): + elems_list = sum(elems_list, []) + tris, _ = quad2tri(elems_list) + tris = np.array(tris) + x, y = nodes[:, :2].t().detach().numpy() + field = field.detach().numpy() + fig = plt.figure(dpi=800) + if contour: + plt.tricontourf(x, y, tris, field) + else: + plt.tripcolor(x, y, tris, field) + if clim: + plt.clim(*clim) + colorbar = plt.colorbar() + if mode == "airfoil": + if col == 0: + colorbar.set_label("x-velocity", fontsize=16) + elif col == 1: + colorbar.set_label("pressure", fontsize=16) + elif col == 2: + colorbar.set_label("y-velocity", fontsize=16) + if mode == "cylinder": + if col == 0: + colorbar.set_label("pressure", fontsize=16) + elif col == 1: + colorbar.set_label("x-velocity", fontsize=16) + elif col == 2: + colorbar.set_label("y-velocity", fontsize=16) + if zoom: + if mode == "airfoil": + plt.xlim(left=-0.5, right=1.5) + plt.ylim(bottom=-0.5, top=0.5) + else: + plt.xlim(left=-5, right=5.0) + plt.ylim(bottom=-5, top=5.0) + + if title: + plt.title(title) + + if out_file is not None: + plt.savefig(out_file) + plt.close() + + if show: + plt.show() + + if get_array: + if mode == "airfoil": + plt.gca().invert_yaxis() + fig.canvas.draw() + array = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) + array = array.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + fig.clf() + fig.clear() + plt.close() + return array + + +def quad2tri(elems): + new_elems = [] + new_edges = [] + for e in elems: + if len(e) <= 3: + new_elems.append(e) + else: + new_elems.append([e[0], e[1], e[2]]) + new_elems.append([e[0], e[2], e[3]]) + new_edges.append(paddle.to_tensor(([[e[0]], [e[2]]]), dtype=paddle.int64)) + new_edges = ( + paddle.concat(new_edges, axis=1) + if new_edges + else paddle.to_tensor([], dtype=paddle.int64) + ) + return new_elems, new_edges +>>>>>>> Stashed changes diff --git a/examples/cgcnn/CGCNN.py b/examples/cgcnn/CGCNN.py index 03392d9462..eb419cab90 100644 --- a/examples/cgcnn/CGCNN.py +++ b/examples/cgcnn/CGCNN.py @@ -1,147 +1,147 @@ -import warnings - -import hydra -from omegaconf import DictConfig - -import ppsci -import ppsci.constraint.supervised_constraint -import ppsci.optimizer as optim -from ppsci.arch import CrystalGraphConvNet -from ppsci.data.dataset import CGCNNDataset -from ppsci.data.dataset.cgcnn_dataset import collate_pool - -warnings.filterwarnings("ignore") - - -def train(cfg: DictConfig): - - dataset = CGCNNDataset( - cfg.TRAIN_DIR, input_keys=("i",), label_keys=("l",), id_keys=("c",) - ) - - structures, _, _ = dataset.raw_data[0] - orig_atom_fea_len = structures[0].shape[-1] - nbr_fea_len = structures[1].shape[-1] - model = CrystalGraphConvNet( - orig_atom_fea_len, - nbr_fea_len, - atom_fea_len=cfg.MODEL.atom_fea_len, - n_conv=cfg.MODEL.n_conv, - h_fea_len=cfg.MODEL.h_fea_len, - n_h=cfg.MODEL.n_h, - ) - - cgcnn_constraint = ppsci.constraint.SupervisedConstraint( - dataloader_cfg={ - "dataset": { - "name": "CGCNNDataset", - "root_dir": cfg.TRAIN_DIR, - "input_keys": ("i",), - "label_keys": ("l",), - "id_keys": ("c",), - }, - "batch_size": cfg.TRAIN.batch_size, - "collate_fn": collate_pool, - }, - loss=ppsci.loss.MAELoss("mean"), - output_expr={"l": lambda out: out["out"]}, - name="cgcnn_constraint", - ) - - constraint = {cgcnn_constraint.name: cgcnn_constraint} - - cgcnn_valid = ppsci.validate.SupervisedValidator( - dataloader_cfg={ - "dataset": { - "name": "CGCNNDataset", - "root_dir": cfg.VALID_DIR, - "input_keys": ("i",), - "label_keys": ("l",), - "id_keys": ("c",), - }, - "batch_size": cfg.TRAIN.batch_size, - "collate_fn": collate_pool, - }, - loss=ppsci.loss.MAELoss("mean"), - output_expr={"l": lambda out: out["out"]}, - metric={"MAE": ppsci.metric.MAE()}, - name="cgcnn_valid", - ) - validator = {cgcnn_valid.name: cgcnn_valid} - - optimizer = optim.Momentum( - learning_rate=cfg.TRAIN.lr, - momentum=cfg.TRAIN.momentum, - weight_decay=cfg.TRAIN.weight_decay, - )(model) - - solver = ppsci.solver.Solver( - model=model, - constraint=constraint, - optimizer=optimizer, - validator=validator, - cfg=cfg, - ) - - solver.train() - - solver.eval() - - -def evaluate(cfg: DictConfig): - - dataset = CGCNNDataset( - cfg.TEST_DIR, input_keys=("i",), label_keys=("l",), id_keys=("c",) - ) - - structures, _, _ = dataset.raw_data[0] - orig_atom_fea_len = structures[0].shape[-1] - nbr_fea_len = structures[1].shape[-1] - model = CrystalGraphConvNet( - orig_atom_fea_len, - nbr_fea_len, - atom_fea_len=cfg.MODEL.atom_fea_len, - n_conv=cfg.MODEL.n_conv, - h_fea_len=cfg.MODEL.h_fea_len, - n_h=cfg.MODEL.n_h, - ) - - cgcnn_evaluate = ppsci.validate.SupervisedValidator( - dataloader_cfg={ - "dataset": { - "name": "CGCNNDataset", - "root_dir": cfg.TEST_DIR, - "input_keys": ("i",), - "label_keys": ("l",), - "id_keys": ("c",), - }, - "batch_size": cfg.EVAL.batch_size, - "collate_fn": collate_pool, - }, - loss=ppsci.loss.MAELoss("mean"), - output_expr={"l": lambda out: out["out"]}, - metric={"MAE": ppsci.metric.MAE()}, - name="cgcnn_evaluate", - ) - validator = {cgcnn_evaluate.name: cgcnn_evaluate} - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - ) - - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="CGCNN.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +import warnings + +import hydra +from omegaconf import DictConfig + +import ppsci +import ppsci.constraint.supervised_constraint +import ppsci.optimizer as optim +from ppsci.arch import CrystalGraphConvNet +from ppsci.data.dataset import CGCNNDataset +from ppsci.data.dataset.cgcnn_dataset import collate_pool + +warnings.filterwarnings("ignore") + + +def train(cfg: DictConfig): + + dataset = CGCNNDataset( + cfg.TRAIN_DIR, input_keys=("i",), label_keys=("l",), id_keys=("c",) + ) + + structures, _, _ = dataset.raw_data[0] + orig_atom_fea_len = structures[0].shape[-1] + nbr_fea_len = structures[1].shape[-1] + model = CrystalGraphConvNet( + orig_atom_fea_len, + nbr_fea_len, + atom_fea_len=cfg.MODEL.atom_fea_len, + n_conv=cfg.MODEL.n_conv, + h_fea_len=cfg.MODEL.h_fea_len, + n_h=cfg.MODEL.n_h, + ) + + cgcnn_constraint = ppsci.constraint.SupervisedConstraint( + dataloader_cfg={ + "dataset": { + "name": "CGCNNDataset", + "root_dir": cfg.TRAIN_DIR, + "input_keys": ("i",), + "label_keys": ("l",), + "id_keys": ("c",), + }, + "batch_size": cfg.TRAIN.batch_size, + "collate_fn": collate_pool, + }, + loss=ppsci.loss.MAELoss("mean"), + output_expr={"l": lambda out: out["out"]}, + name="cgcnn_constraint", + ) + + constraint = {cgcnn_constraint.name: cgcnn_constraint} + + cgcnn_valid = ppsci.validate.SupervisedValidator( + dataloader_cfg={ + "dataset": { + "name": "CGCNNDataset", + "root_dir": cfg.VALID_DIR, + "input_keys": ("i",), + "label_keys": ("l",), + "id_keys": ("c",), + }, + "batch_size": cfg.TRAIN.batch_size, + "collate_fn": collate_pool, + }, + loss=ppsci.loss.MAELoss("mean"), + output_expr={"l": lambda out: out["out"]}, + metric={"MAE": ppsci.metric.MAE()}, + name="cgcnn_valid", + ) + validator = {cgcnn_valid.name: cgcnn_valid} + + optimizer = optim.Momentum( + learning_rate=cfg.TRAIN.lr, + momentum=cfg.TRAIN.momentum, + weight_decay=cfg.TRAIN.weight_decay, + )(model) + + solver = ppsci.solver.Solver( + model=model, + constraint=constraint, + optimizer=optimizer, + validator=validator, + cfg=cfg, + ) + + solver.train() + + solver.eval() + + +def evaluate(cfg: DictConfig): + + dataset = CGCNNDataset( + cfg.TEST_DIR, input_keys=("i",), label_keys=("l",), id_keys=("c",) + ) + + structures, _, _ = dataset.raw_data[0] + orig_atom_fea_len = structures[0].shape[-1] + nbr_fea_len = structures[1].shape[-1] + model = CrystalGraphConvNet( + orig_atom_fea_len, + nbr_fea_len, + atom_fea_len=cfg.MODEL.atom_fea_len, + n_conv=cfg.MODEL.n_conv, + h_fea_len=cfg.MODEL.h_fea_len, + n_h=cfg.MODEL.n_h, + ) + + cgcnn_evaluate = ppsci.validate.SupervisedValidator( + dataloader_cfg={ + "dataset": { + "name": "CGCNNDataset", + "root_dir": cfg.TEST_DIR, + "input_keys": ("i",), + "label_keys": ("l",), + "id_keys": ("c",), + }, + "batch_size": cfg.EVAL.batch_size, + "collate_fn": collate_pool, + }, + loss=ppsci.loss.MAELoss("mean"), + output_expr={"l": lambda out: out["out"]}, + metric={"MAE": ppsci.metric.MAE()}, + name="cgcnn_evaluate", + ) + validator = {cgcnn_evaluate.name: cgcnn_evaluate} + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="CGCNN.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/cgcnn/conf/CGCNN.yaml b/examples/cgcnn/conf/CGCNN.yaml index 08cbbffd34..c7f336d0aa 100644 --- a/examples/cgcnn/conf/CGCNN.yaml +++ b/examples/cgcnn/conf/CGCNN.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -58,3 +59,63 @@ TRAIN: EVAL: pretrained_model_path: null batch_size: 64 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_CGCNN/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +device: cpu +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +use_tbd: false + +TRAIN_DIR: "./data/train/" +VALID_DIR: "./data/valid/" +TEST_DIR: null + +# model settings +MODEL: + atom_fea_len: 64 + n_conv: 3 + h_fea_len: 128 + n_h: 1 + +# training settings +TRAIN: + epochs: 30 + eval_during_train: true + eval_freq: 1 + batch_size: 64 + lr: 0.001 + momentum: 0.9 + weight_decay: 0.01 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/chip_heat/chip_heat.py b/examples/chip_heat/chip_heat.py index 8323abe906..ada74fdfeb 100644 --- a/examples/chip_heat/chip_heat.py +++ b/examples/chip_heat/chip_heat.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -748,3 +749,754 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.fftpack +import scipy.io +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def fftind(size): + """ + Returns the momentum indices for the 2D Fast Fourier Transform (FFT). + + Args: + size (int): Size of the 2D array. + + Returns: + numpy.ndarray: Array of momentum indices for the 2D FFT. + """ + k_ind = np.mgrid[:size, :size] - int((size + 1) / 2) + k_ind = scipy.fftpack.fftshift(k_ind) + return k_ind + + +def GRF(alpha=3.0, size=128, flag_normalize=True): + """ + Generates a Gaussian random field(GRF) with a power law amplitude spectrum. + + Args: + alpha (float, optional): Power law exponent. Defaults to 3.0. + size (int, optional): Size of the output field. Defaults to 128. + flag_normalize (bool, optional): Flag indicating whether to normalize the field. Defaults to True. + + Returns: + numpy.ndarray: Generated Gaussian random field. + """ + # Defines momentum indices + k_idx = fftind(size) + # Defines the amplitude as a power law 1/|k|^(alpha/2) + amplitude = np.power(k_idx[0] ** 2 + k_idx[1] ** 2 + 1e-10, -alpha / 4.0) + amplitude[0, 0] = 0 + # Draws a complex gaussian random noise with normal + # (circular) distribution + noise = np.random.normal(size=(size, size)) + 1j * np.random.normal( + size=(size, size) + ) + # To real space + gfield = np.fft.ifft2(noise * amplitude).real + # Sets the standard deviation to one + if flag_normalize: + gfield = gfield - np.mean(gfield) + gfield = gfield / np.std(gfield) + return gfield.reshape([1, -1]) + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.ChipDeepONets(**cfg.MODEL) + # set geometry + NPOINT = cfg.NL * cfg.NW + geom = {"rect": ppsci.geometry.Rectangle((0, 0), (cfg.DL, cfg.DW))} + points = geom["rect"].sample_interior(NPOINT, evenly=True) + + # generate training data and validation data + data_u = np.ones([1, (cfg.NL - 2) * (cfg.NW - 2)]) + data_BC = np.ones([1, NPOINT]) + data_u = np.vstack((data_u, np.zeros([1, (cfg.NL - 2) * (cfg.NW - 2)]))) + data_BC = np.vstack((data_BC, np.zeros([1, NPOINT]))) + for i in range(cfg.NU - 2): + data_u = np.vstack((data_u, GRF(alpha=cfg.GRF.alpha, size=cfg.NL - 2))) + for i in range(cfg.NBC - 2): + data_BC = np.vstack((data_BC, GRF(alpha=cfg.GRF.alpha, size=cfg.NL))) + data_u = data_u.astype("float32") + data_BC = data_BC.astype("float32") + test_u = GRF(alpha=4, size=cfg.NL).astype("float32")[0] + + boundary_indices = np.where( + ( + (points["x"] == 0) + | (points["x"] == cfg.DW) + | (points["y"] == 0) + | (points["y"] == cfg.DL) + ) + ) + interior_indices = np.where( + ( + (points["x"] != 0) + & (points["x"] != cfg.DW) + & (points["y"] != 0) + & (points["y"] != cfg.DL) + ) + ) + + points["u"] = np.tile(test_u[interior_indices[0]], (NPOINT, 1)) + points["u_one"] = test_u.T.reshape([-1, 1]) + points["bc_data"] = np.tile(test_u[boundary_indices[0]], (NPOINT, 1)) + points["bc"] = np.zeros((NPOINT, 1), dtype="float32") + + top_indices = np.where(points["x"] == cfg.DW) + down_indices = np.where(points["x"] == 0) + left_indices = np.where( + (points["y"] == 0) & (points["x"] != 0) & (points["x"] != cfg.DW) + ) + right_indices = np.where( + ((points["y"] == cfg.DL) & (points["x"] != 0) & (points["x"] != cfg.DW)) + ) + + # generate validation data + ( + test_top_data, + test_down_data, + test_left_data, + test_right_data, + test_interior_data, + ) = [ + { + "x": points["x"][indices_[0]], + "y": points["y"][indices_[0]], + "u": points["u"][indices_[0]], + "u_one": points["u_one"][indices_[0]], + "bc": points["bc"][indices_[0]], + "bc_data": points["bc_data"][indices_[0]], + } + for indices_ in ( + top_indices, + down_indices, + left_indices, + right_indices, + interior_indices, + ) + ] + # generate train data + top_data = { + "x": test_top_data["x"], + "y": test_top_data["y"], + "u": data_u, + "u_one": data_BC[:, top_indices[0]].T.reshape([-1, 1]), + "bc": np.array([[0], [1], [2], [3]], dtype="float32"), + "bc_data": data_BC[:, boundary_indices[0]], + } + down_data = { + "x": test_down_data["x"], + "y": test_down_data["y"], + "u": data_u, + "u_one": data_BC[:, down_indices[0]].T.reshape([-1, 1]), + "bc": np.array([[0], [1], [2], [3]], dtype="float32"), + "bc_data": data_BC[:, boundary_indices[0]], + } + left_data = { + "x": test_left_data["x"], + "y": test_left_data["y"], + "u": data_u, + "u_one": data_BC[:, left_indices[0]].T.reshape([-1, 1]), + "bc": np.array([[0], [1], [2], [3]], dtype="float32"), + "bc_data": data_BC[:, boundary_indices[0]], + } + right_data = { + "x": test_right_data["x"], + "y": test_right_data["y"], + "u": data_u, + "u_one": data_BC[:, right_indices[0]].T.reshape([-1, 1]), + "bc": np.array([[0], [1], [2], [3]], dtype="float32"), + "bc_data": data_BC[:, boundary_indices[0]], + } + interior_data = { + "x": test_interior_data["x"], + "y": test_interior_data["y"], + "u": data_u, + "u_one": data_u.T.reshape([-1, 1]), + "bc": np.array([[0], [1], [2], [3]], dtype="float32"), + "bc_data": data_BC[:, boundary_indices[0]], + } + + # set constraint + index = ("x", "u", "bc", "bc_data") + label = {"chip": np.array([0], dtype="float32")} + weight = {"chip": np.array([cfg.TRAIN.weight], dtype="float32")} + top_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ChipHeatDataset", + "input": top_data, + "label": label, + "index": index, + "data_type": "bc_data", + "weight": weight, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "chip": lambda out: paddle.where( + out["bc"] == 1, + jacobian(out["T"], out["x"]) - out["u_one"], + paddle.where( + out["bc"] == 0, + out["T"] - out["u_one"], + paddle.where( + out["bc"] == 2, + jacobian(out["T"], out["x"]) + out["u_one"] * (out["T"] - 1), + jacobian(out["T"], out["x"]) + + out["u_one"] + * (out["T"] ** 2 - 1) + * (out["T"] ** 2 + 1) + * 5.6 + / 50000, + ), + ), + ) + }, + name="top_sup", + ) + down_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ChipHeatDataset", + "input": down_data, + "label": label, + "index": index, + "data_type": "bc_data", + "weight": weight, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "chip": lambda out: paddle.where( + out["bc"] == 1, + jacobian(out["T"], out["x"]) - out["u_one"], + paddle.where( + out["bc"] == 0, + out["T"] - out["u_one"], + paddle.where( + out["bc"] == 2, + jacobian(out["T"], out["x"]) + out["u_one"] * (out["T"] - 1), + jacobian(out["T"], out["x"]) + + out["u_one"] + * (out["T"] ** 2 - 1) + * (out["T"] ** 2 + 1) + * 5.6 + / 50000, + ), + ), + ) + }, + name="down_sup", + ) + left_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ChipHeatDataset", + "input": left_data, + "label": label, + "index": index, + "data_type": "bc_data", + "weight": weight, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "chip": lambda out: paddle.where( + out["bc"] == 1, + jacobian(out["T"], out["y"]) - out["u_one"], + paddle.where( + out["bc"] == 0, + out["T"] - out["u_one"], + paddle.where( + out["bc"] == 2, + jacobian(out["T"], out["y"]) + out["u_one"] * (out["T"] - 1), + jacobian(out["T"], out["y"]) + + out["u_one"] + * (out["T"] ** 2 - 1) + * (out["T"] ** 2 + 1) + * 5.6 + / 50000, + ), + ), + ) + }, + name="left_sup", + ) + right_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ChipHeatDataset", + "input": right_data, + "label": label, + "index": index, + "data_type": "bc_data", + "weight": weight, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "chip": lambda out: paddle.where( + out["bc"] == 1, + jacobian(out["T"], out["y"]) - out["u_one"], + paddle.where( + out["bc"] == 0, + out["T"] - out["u_one"], + paddle.where( + out["bc"] == 2, + jacobian(out["T"], out["y"]) + out["u_one"] * (out["T"] - 1), + jacobian(out["T"], out["y"]) + + out["u_one"] + * (out["T"] ** 2 - 1) + * (out["T"] ** 2 + 1) + * 5.6 + / 50000, + ), + ), + ) + }, + name="right_sup", + ) + interior_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ChipHeatDataset", + "input": interior_data, + "label": label, + "index": index, + "data_type": "u", + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "chip": lambda out: hessian(out["T"], out["x"]) + + hessian(out["T"], out["y"]) + + 100 * out["u_one"] + }, + name="interior_sup", + ) + # wrap constraints together + constraint = { + down_sup_constraint.name: down_sup_constraint, + left_sup_constraint.name: left_sup_constraint, + right_sup_constraint.name: right_sup_constraint, + interior_sup_constraint.name: interior_sup_constraint, + top_sup_constraint.name: top_sup_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + top_down_label = {"chip": np.zeros([cfg.NL, 1], dtype="float32")} + left_right_label = {"chip": np.zeros([(cfg.NL - 2), 1], dtype="float32")} + interior_label = { + "thermal_condution": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ) + } + top_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_top_data, + "label": top_down_label, + "weight": { + "chip": np.full([cfg.NL, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": cfg.NL, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="top_mse", + ) + down_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_down_data, + "label": top_down_label, + "weight": { + "chip": np.full([cfg.NL, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": cfg.NL, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="down_mse", + ) + left_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_left_data, + "label": left_right_label, + "weight": { + "chip": np.full([cfg.NL - 2, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": (cfg.NL - 2), + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="left_mse", + ) + right_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_right_data, + "label": left_right_label, + "weight": { + "chip": np.full([cfg.NL - 2, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": (cfg.NL - 2), + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="right_mse", + ) + interior_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_interior_data, + "label": interior_label, + }, + "batch_size": cfg.TRAIN.batch_size, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "thermal_condution": lambda out: ( + hessian(out["T"], out["x"]) + hessian(out["T"], out["y"]) + ) + + 100 * out["u_one"] + }, + metric={"MSE": ppsci.metric.MSE()}, + name="interior_mse", + ) + validator = { + down_validator.name: down_validator, + left_validator.name: left_validator, + right_validator.name: right_validator, + top_validator.name: top_validator, + interior_validator.name: interior_validator, + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_points = geom["rect"].sample_interior(NPOINT, evenly=True) + pred_points["u"] = points["u"] + pred_points["bc_data"] = np.zeros_like(points["bc_data"]) + pred_points["bc"] = np.repeat( + np.array([[cfg.EVAL.bc_type]], dtype="float32"), NPOINT, axis=0 + ) + pred = solver.predict(pred_points) + logger.message("Now saving visual result to: visual/result.vtu, please wait...") + ppsci.visualize.save_vtu_from_dict( + osp.join(cfg.output_dir, "visual/result.vtu"), + { + "x": pred_points["x"], + "y": pred_points["y"], + "T": pred["T"], + }, + ( + "x", + "y", + ), + ("T"), + ) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.ChipDeepONets(**cfg.MODEL) + # set geometry + NPOINT = cfg.NL * cfg.NW + geom = {"rect": ppsci.geometry.Rectangle((0, 0), (cfg.DL, cfg.DW))} + points = geom["rect"].sample_interior(NPOINT, evenly=True) + + # generate validation data + test_u = GRF(alpha=4, size=cfg.NL).astype("float32")[0] + + boundary_indices = np.where( + ( + (points["x"] == 0) + | (points["x"] == cfg.DW) + | (points["y"] == 0) + | (points["y"] == cfg.DL) + ) + ) + interior_indices = np.where( + ( + (points["x"] != 0) + & (points["x"] != cfg.DW) + & (points["y"] != 0) + & (points["y"] != cfg.DL) + ) + ) + + points["u"] = np.tile(test_u[interior_indices[0]], (NPOINT, 1)) + points["u_one"] = test_u.T.reshape([-1, 1]) + points["bc_data"] = np.tile(test_u[boundary_indices[0]], (NPOINT, 1)) + points["bc"] = np.zeros((NPOINT, 1), dtype="float32") + + top_indices = np.where(points["x"] == cfg.DW) + down_indices = np.where(points["x"] == 0) + left_indices = np.where( + (points["y"] == 0) & (points["x"] != 0) & (points["x"] != cfg.DW) + ) + right_indices = np.where( + ((points["y"] == cfg.DL) & (points["x"] != 0) & (points["x"] != cfg.DW)) + ) + + # generate validation data + ( + test_top_data, + test_down_data, + test_left_data, + test_right_data, + test_interior_data, + ) = [ + { + "x": points["x"][indices_[0]], + "y": points["y"][indices_[0]], + "u": points["u"][indices_[0]], + "u_one": points["u_one"][indices_[0]], + "bc": points["bc"][indices_[0]], + "bc_data": points["bc_data"][indices_[0]], + } + for indices_ in ( + top_indices, + down_indices, + left_indices, + right_indices, + interior_indices, + ) + ] + + # set validator + top_down_label = {"chip": np.zeros([cfg.NL, 1], dtype="float32")} + left_right_label = {"chip": np.zeros([(cfg.NL - 2), 1], dtype="float32")} + interior_label = { + "thermal_condution": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ) + } + top_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_top_data, + "label": top_down_label, + "weight": { + "chip": np.full([cfg.NL, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": cfg.NL, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="top_mse", + ) + down_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_down_data, + "label": top_down_label, + "weight": { + "chip": np.full([cfg.NL, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": cfg.NL, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="down_mse", + ) + left_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_left_data, + "label": left_right_label, + "weight": { + "chip": np.full([cfg.NL - 2, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": (cfg.NL - 2), + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="left_mse", + ) + right_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_right_data, + "label": left_right_label, + "weight": { + "chip": np.full([cfg.NL - 2, 1], cfg.TRAIN.weight, dtype="float32") + }, + }, + "batch_size": (cfg.NL - 2), + }, + ppsci.loss.MSELoss("mean"), + output_expr={"chip": lambda out: out["T"] - out["u_one"]}, + metric={"MSE": ppsci.metric.MSE()}, + name="right_mse", + ) + interior_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_interior_data, + "label": interior_label, + }, + "batch_size": cfg.TRAIN.batch_size, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "thermal_condution": lambda out: ( + hessian(out["T"], out["x"]) + hessian(out["T"], out["y"]) + ) + + 100 * out["u_one"] + }, + metric={"MSE": ppsci.metric.MSE()}, + name="interior_mse", + ) + validator = { + down_validator.name: down_validator, + left_validator.name: left_validator, + right_validator.name: right_validator, + top_validator.name: top_validator, + interior_validator.name: interior_validator, + } + + # directly evaluate pretrained model(optional) + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction result + pred_points = geom["rect"].sample_interior(NPOINT, evenly=True) + pred_points["u"] = points["u"] + pred_points["bc_data"] = np.zeros_like(points["bc_data"]) + pred_points["bc"] = np.full((NPOINT, 1), cfg.EVAL.bc_type, dtype="float32") + pred = solver.predict(pred_points) + logger.message("Now saving visual result to: visual/result.vtu, please wait...") + ppsci.visualize.save_vtu_from_dict( + osp.join(cfg.output_dir, "visual/result.vtu"), + { + "x": pred_points["x"], + "y": pred_points["y"], + "T": pred["T"], + }, + ( + "x", + "y", + ), + ("T"), + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="chip_heat.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/chip_heat/conf/chip_heat.yaml b/examples/chip_heat/conf/chip_heat.yaml index edd1dd0eba..c6c85f0f93 100644 --- a/examples/chip_heat/conf/chip_heat.yaml +++ b/examples/chip_heat/conf/chip_heat.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -78,3 +79,84 @@ TRAIN: EVAL: pretrained_model_path: null bc_type: 0 # 0: Dirichlet, 1: Neumann, 2: convection,3: heat radiation +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_chip_heat/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +DL: 1.0 # lenth of the domain +DW: 1.0 # width of the domain +NL: 20 # number of length +NW: 20 # number of width +NU: 500 # number of heat source +NBC: 500 # number of BC + +# gaussian random field settings +GRF: + alpha: 4.0 + flag_normalize: true + +# model settings +MODEL: + branch_input_keys: ['u'] + BCtype_input_keys: ['bc'] + BC_input_keys: ['bc_data'] + trunk_input_keys: ["x", "y"] + output_keys: ["T"] + num_loc: 324 # 18*18 + bctype_loc: 1 + BC_num_loc: 76 + num_features: 400 + branch_num_layers: 9 + BC_num_layers: 9 + trunk_num_layers: 6 + branch_hidden_size: 256 + BC_hidden_size: 256 + trunk_hidden_size: 128 + branch_activation: "swish" + BC_activation: "swish" + trunk_activation: "swish" + use_bias: true + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + batch_size: 1000 + weight: 500 + +# evaluation settings +EVAL: + pretrained_model_path: null + bc_type: 0 # 0: Dirichlet, 1: Neumann, 2: convection,3: heat radiation +>>>>>>> Stashed changes diff --git a/examples/control_arm/conf/forward_analysis.yaml b/examples/control_arm/conf/forward_analysis.yaml index cb60939b29..5ee693c382 100644 --- a/examples/control_arm/conf/forward_analysis.yaml +++ b/examples/control_arm/conf/forward_analysis.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -106,3 +107,114 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 64 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_control_arm/forward/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 100 + +# set working condition +NU: 0.3 +E: 1 +# T: [0, 0, 0.0025] # +Z axis +T: [-0.0025, 0, 0] # -X axis + +# set geometry file path +GEOM_PATH: ./datasets/control_arm.stl + +# set geometry parameter +CIRCLE_LEFT_CENTER_XY: [-4.4, 0] +CIRCLE_LEFT_RADIUS: 1.65 +CIRCLE_RIGHT_CENTER_XZ: [15.8, 0] +CIRCLE_RIGHT_RADIUS: 2.21 + +# model settings +MODEL: + disp_net: + input_keys: ["x", "y", "z"] + output_keys: ["u", "v", "w"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + stress_net: + input_keys: ["x", "y", "z"] + output_keys: ["sigma_xx", "sigma_yy", "sigma_zz", "sigma_xy", "sigma_xz", "sigma_yz"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 2000 + iters_per_epoch: 1000 + save_freq: 100 + eval_freq: 100 + eval_during_train: false + eval_with_no_grad: false + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-4 + gamma: 0.95 + decay_steps: 15000 + by_epoch: false + batch_size: + arm_left: 128 + arm_right: 256 + arm_surface: 4096 + arm_interior: 2048 + visualizer_vtu: 100000 + weight: + arm_right: {"u": 1, "v": 1, "w": 1} + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + eval_with_no_grad: true + pretrained_model_path: null + + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/control_arm/forward_x_axis_pretrained.pdparams + export_path: ./inference/forward_analysis + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/control_arm/conf/inverse_parameter.yaml b/examples/control_arm/conf/inverse_parameter.yaml index 4d4652b9b6..43790e1f86 100644 --- a/examples/control_arm/conf/inverse_parameter.yaml +++ b/examples/control_arm/conf/inverse_parameter.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -117,3 +118,125 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 64 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_control_arm/inverse/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 100 + +# set working condition +NU: 0.3 +E: 1 + +# set geometry file path +GEOM_PATH: ./datasets/control_arm.stl + +# set geometry parameter +CIRCLE_LEFT_CENTER_XY: [-4.4, 0] +CIRCLE_LEFT_RADIUS: 1.65 +CIRCLE_RIGHT_CENTER_XZ: [15.8, 0] +CIRCLE_RIGHT_RADIUS: 2.21 + +# model settings +MODEL: + disp_net: + input_keys: ["x", "y", "z"] + output_keys: ["u", "v", "w"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + stress_net: + input_keys: ["x", "y", "z"] + output_keys: ["sigma_xx", "sigma_yy", "sigma_zz", "sigma_xy", "sigma_xz", "sigma_yz"] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + inverse_lambda_net: + input_keys: ["x", "y", "z"] + output_keys: ["lambda_",] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + inverse_mu_net: + input_keys: ["x", "y", "z"] + output_keys: ["mu",] + num_layers: 6 + hidden_size: 512 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 100 + iters_per_epoch: 100 + save_freq: 1 + eval_freq: 1 + eval_during_train: true + eval_with_no_grad: false + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-4 + gamma: 0.95 + decay_steps: 100 + by_epoch: false + batch_size: + arm_interior: 2000 + visualizer_vtu: 100000 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + total_size: + validator: 20000 + batch_size: + validator: 1024 + visualizer_vtu: 100000 + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/control_arm/inverse_x_axis_pretrained.pdparams + export_path: ./inference/inverse_parameter + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/control_arm/forward_analysis.py b/examples/control_arm/forward_analysis.py index 31f138b6f9..373ca81dda 100644 --- a/examples/control_arm/forward_analysis.py +++ b/examples/control_arm/forward_analysis.py @@ -1,378 +1,378 @@ -from os import path as osp - -import hydra -import numpy as np -from omegaconf import DictConfig -from paddle import distributed as dist - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - # set parallel - enable_parallel = dist.get_world_size() > 1 - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model_list = ppsci.arch.ModelList((disp_net, stress_net)) - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model_list) - - # specify parameters - LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) - MU = cfg.E / (2 * (1 + cfg.NU)) - - # set equation - equation = { - "LinearElasticity": ppsci.equation.LinearElasticity( - E=None, nu=None, lambda_=LAMBDA_, mu=MU, dim=3 - ) - } - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - geom = {"geo": control_arm} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - arm_left_constraint = ppsci.constraint.BoundaryConstraint( - equation["LinearElasticity"].equations, - {"traction_x": cfg.T[0], "traction_y": cfg.T[1], "traction_z": cfg.T[2]}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_left}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: np.sqrt( - np.square(x - cfg.CIRCLE_LEFT_CENTER_XY[0]) - + np.square(y - cfg.CIRCLE_LEFT_CENTER_XY[1]) - ) - <= cfg.CIRCLE_LEFT_RADIUS + 1e-1, - name="BC_LEFT", - ) - arm_right_constraint = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": 0, "v": 0, "w": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_right}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: np.sqrt( - np.square(x - cfg.CIRCLE_RIGHT_CENTER_XZ[0]) - + np.square(z - cfg.CIRCLE_RIGHT_CENTER_XZ[1]) - ) - <= cfg.CIRCLE_RIGHT_RADIUS + 1e-1, - weight_dict=cfg.TRAIN.weight.arm_right, - name="BC_RIGHT", - ) - arm_surface_constraint = ppsci.constraint.BoundaryConstraint( - equation["LinearElasticity"].equations, - {"traction_x": 0, "traction_y": 0, "traction_z": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_surface}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: np.sqrt( - np.square(x - cfg.CIRCLE_LEFT_CENTER_XY[0]) - + np.square(y - cfg.CIRCLE_LEFT_CENTER_XY[1]) - ) - > cfg.CIRCLE_LEFT_RADIUS + 1e-1, - name="BC_SURFACE", - ) - arm_interior_constraint = ppsci.constraint.InteriorConstraint( - equation["LinearElasticity"].equations, - { - "equilibrium_x": 0, - "equilibrium_y": 0, - "equilibrium_z": 0, - "stress_disp_xx": 0, - "stress_disp_yy": 0, - "stress_disp_zz": 0, - "stress_disp_xy": 0, - "stress_disp_xz": 0, - "stress_disp_yz": 0, - }, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_interior}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - weight_dict={ - "equilibrium_x": "sdf", - "equilibrium_y": "sdf", - "equilibrium_z": "sdf", - "stress_disp_xx": "sdf", - "stress_disp_yy": "sdf", - "stress_disp_zz": "sdf", - "stress_disp_xy": "sdf", - "stress_disp_xz": "sdf", - "stress_disp_yz": "sdf", - }, - name="INTERIOR", - ) - - # re-assign to cfg.TRAIN.iters_per_epoch - if enable_parallel: - cfg.TRAIN.iters_per_epoch = len(arm_left_constraint.data_loader) - - # wrap constraints togetherg - constraint = { - arm_left_constraint.name: arm_left_constraint, - arm_right_constraint.name: arm_right_constraint, - arm_surface_constraint.name: arm_surface_constraint, - arm_interior_constraint.name: arm_interior_constraint, - } - - # set visualizer(optional) - # add inferencer data - samples = geom["geo"].sample_interior( - cfg.TRAIN.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - visualizer = { - "visulzie_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( - pred_input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - prefix="vis", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, - equation=equation, - geom=geom, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, - visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - - # train model - solver.train() - - # plot losses - solver.plot_loss_history(by_epoch=True, smooth_step=1) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model_list = ppsci.arch.ModelList((disp_net, stress_net)) - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - # geometry bool operation - geo = control_arm - geom = {"geo": geo} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - - # set visualizer(optional) - # add inferencer data - samples = geom["geo"].sample_interior( - cfg.TRAIN.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - visualizer = { - "visulzie_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( - pred_input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - "sigma_xx": lambda out: out["sigma_xx"], - "sigma_yy": lambda out: out["sigma_yy"], - "sigma_zz": lambda out: out["sigma_zz"], - "sigma_xy": lambda out: out["sigma_xy"], - "sigma_xz": lambda out: out["sigma_xz"], - "sigma_yz": lambda out: out["sigma_yz"], - }, - prefix="vis", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - output_dir=cfg.output_dir, - seed=cfg.seed, - geom=geom, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - - # visualize prediction after finished training - solver.visualize() - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - # wrap to a model_list - model_list = ppsci.arch.ModelList((disp_net, stress_net)) - - # load pretrained model - solver = ppsci.solver.Solver( - model=model_list, pretrained_model_path=cfg.INFER.pretrained_model_path - ) - - # export models - input_spec = [ - { - key: InputSpec([None, 1], "float32", name=key) - for key in cfg.MODEL.disp_net.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - from ppsci.visualize import vtu - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - # geometry bool operation - geo = control_arm - geom = {"geo": geo} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - - # set visualizer(optional) - # add inferencer data - samples = geom["geo"].sample_interior( - cfg.TRAIN.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - - output_dict = predictor.predict(pred_input_dict, cfg.INFER.batch_size) - - # mapping data to output_keys - output_keys = cfg.MODEL.disp_net.output_keys + cfg.MODEL.stress_net.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - output_dict.update(pred_input_dict) - - vtu.save_vtu_from_dict( - osp.join(cfg.output_dir, "vis"), - output_dict, - cfg.MODEL.disp_net.input_keys, - output_keys, - 1, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="forward_analysis.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +from os import path as osp + +import hydra +import numpy as np +from omegaconf import DictConfig +from paddle import distributed as dist + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + # set parallel + enable_parallel = dist.get_world_size() > 1 + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model_list = ppsci.arch.ModelList((disp_net, stress_net)) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model_list) + + # specify parameters + LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) + MU = cfg.E / (2 * (1 + cfg.NU)) + + # set equation + equation = { + "LinearElasticity": ppsci.equation.LinearElasticity( + E=None, nu=None, lambda_=LAMBDA_, mu=MU, dim=3 + ) + } + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + geom = {"geo": control_arm} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + arm_left_constraint = ppsci.constraint.BoundaryConstraint( + equation["LinearElasticity"].equations, + {"traction_x": cfg.T[0], "traction_y": cfg.T[1], "traction_z": cfg.T[2]}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_left}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: np.sqrt( + np.square(x - cfg.CIRCLE_LEFT_CENTER_XY[0]) + + np.square(y - cfg.CIRCLE_LEFT_CENTER_XY[1]) + ) + <= cfg.CIRCLE_LEFT_RADIUS + 1e-1, + name="BC_LEFT", + ) + arm_right_constraint = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": 0, "v": 0, "w": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_right}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: np.sqrt( + np.square(x - cfg.CIRCLE_RIGHT_CENTER_XZ[0]) + + np.square(z - cfg.CIRCLE_RIGHT_CENTER_XZ[1]) + ) + <= cfg.CIRCLE_RIGHT_RADIUS + 1e-1, + weight_dict=cfg.TRAIN.weight.arm_right, + name="BC_RIGHT", + ) + arm_surface_constraint = ppsci.constraint.BoundaryConstraint( + equation["LinearElasticity"].equations, + {"traction_x": 0, "traction_y": 0, "traction_z": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_surface}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: np.sqrt( + np.square(x - cfg.CIRCLE_LEFT_CENTER_XY[0]) + + np.square(y - cfg.CIRCLE_LEFT_CENTER_XY[1]) + ) + > cfg.CIRCLE_LEFT_RADIUS + 1e-1, + name="BC_SURFACE", + ) + arm_interior_constraint = ppsci.constraint.InteriorConstraint( + equation["LinearElasticity"].equations, + { + "equilibrium_x": 0, + "equilibrium_y": 0, + "equilibrium_z": 0, + "stress_disp_xx": 0, + "stress_disp_yy": 0, + "stress_disp_zz": 0, + "stress_disp_xy": 0, + "stress_disp_xz": 0, + "stress_disp_yz": 0, + }, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.arm_interior}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + weight_dict={ + "equilibrium_x": "sdf", + "equilibrium_y": "sdf", + "equilibrium_z": "sdf", + "stress_disp_xx": "sdf", + "stress_disp_yy": "sdf", + "stress_disp_zz": "sdf", + "stress_disp_xy": "sdf", + "stress_disp_xz": "sdf", + "stress_disp_yz": "sdf", + }, + name="INTERIOR", + ) + + # re-assign to cfg.TRAIN.iters_per_epoch + if enable_parallel: + cfg.TRAIN.iters_per_epoch = len(arm_left_constraint.data_loader) + + # wrap constraints togetherg + constraint = { + arm_left_constraint.name: arm_left_constraint, + arm_right_constraint.name: arm_right_constraint, + arm_surface_constraint.name: arm_surface_constraint, + arm_interior_constraint.name: arm_interior_constraint, + } + + # set visualizer(optional) + # add inferencer data + samples = geom["geo"].sample_interior( + cfg.TRAIN.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + visualizer = { + "visulzie_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( + pred_input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + prefix="vis", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + seed=cfg.seed, + equation=equation, + geom=geom, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_freq=cfg.TRAIN.eval_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, + visualizer=visualizer, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + + # train model + solver.train() + + # plot losses + solver.plot_loss_history(by_epoch=True, smooth_step=1) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model_list = ppsci.arch.ModelList((disp_net, stress_net)) + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + # geometry bool operation + geo = control_arm + geom = {"geo": geo} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + + # set visualizer(optional) + # add inferencer data + samples = geom["geo"].sample_interior( + cfg.TRAIN.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + visualizer = { + "visulzie_u_v_w_sigmas": ppsci.visualize.VisualizerVtu( + pred_input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + "sigma_xx": lambda out: out["sigma_xx"], + "sigma_yy": lambda out: out["sigma_yy"], + "sigma_zz": lambda out: out["sigma_zz"], + "sigma_xy": lambda out: out["sigma_xy"], + "sigma_xz": lambda out: out["sigma_xz"], + "sigma_yz": lambda out: out["sigma_yz"], + }, + prefix="vis", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + output_dir=cfg.output_dir, + seed=cfg.seed, + geom=geom, + log_freq=cfg.log_freq, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + + # visualize prediction after finished training + solver.visualize() + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + # wrap to a model_list + model_list = ppsci.arch.ModelList((disp_net, stress_net)) + + # load pretrained model + solver = ppsci.solver.Solver( + model=model_list, pretrained_model_path=cfg.INFER.pretrained_model_path + ) + + # export models + input_spec = [ + { + key: InputSpec([None, 1], "float32", name=key) + for key in cfg.MODEL.disp_net.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + from ppsci.visualize import vtu + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + # geometry bool operation + geo = control_arm + geom = {"geo": geo} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + + # set visualizer(optional) + # add inferencer data + samples = geom["geo"].sample_interior( + cfg.TRAIN.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + + output_dict = predictor.predict(pred_input_dict, cfg.INFER.batch_size) + + # mapping data to output_keys + output_keys = cfg.MODEL.disp_net.output_keys + cfg.MODEL.stress_net.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + output_dict.update(pred_input_dict) + + vtu.save_vtu_from_dict( + osp.join(cfg.output_dir, "vis"), + output_dict, + cfg.MODEL.disp_net.input_keys, + output_keys, + 1, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="forward_analysis.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/control_arm/inverse_parameter.py b/examples/control_arm/inverse_parameter.py index 77f17c4165..22d9c29b5e 100644 --- a/examples/control_arm/inverse_parameter.py +++ b/examples/control_arm/inverse_parameter.py @@ -1,367 +1,367 @@ -from os import path as osp - -import hydra -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) - inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) - # freeze models - disp_net.freeze() - stress_net.freeze() - # wrap to a model_list - model = ppsci.arch.ModelList( - (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) - ) - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)((inverse_lambda_net, inverse_mu_net)) - - # set equation - equation = { - "LinearElasticity": ppsci.equation.LinearElasticity( - E=None, nu=None, lambda_="lambda_", mu="mu", dim=3 - ) - } - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - # geometry bool operation - geo = control_arm - geom = {"geo": geo} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - - # set dataloader config - interior_constraint = ppsci.constraint.InteriorConstraint( - equation["LinearElasticity"].equations, - { - "stress_disp_xx": 0, - "stress_disp_yy": 0, - "stress_disp_zz": 0, - "stress_disp_xy": 0, - "stress_disp_xz": 0, - "stress_disp_yz": 0, - }, - geom["geo"], - { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - "batch_size": cfg.TRAIN.batch_size.arm_interior, - }, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - name="INTERIOR", - ) - constraint = {interior_constraint.name: interior_constraint} - - # set validator - LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) # 0.5769 - MU = cfg.E / (2 * (1 + cfg.NU)) # 0.3846 - geom_validator = ppsci.validate.GeometryValidator( - { - "lambda_": lambda out: out["lambda_"], - "mu": lambda out: out["mu"], - }, - { - "lambda_": LAMBDA_, - "mu": MU, - }, - geom["geo"], - { - "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "total_size": cfg.EVAL.total_size.validator, - "batch_size": cfg.EVAL.batch_size.validator, - }, - ppsci.loss.MSELoss("sum"), - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="geo_eval", - ) - validator = {geom_validator.name: geom_validator} - - # set visualizer(optional) - # add inferencer data - samples = geom["geo"].sample_interior( - cfg.TRAIN.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - visualizer = { - "visulzie_lambda_mu": ppsci.visualize.VisualizerVtu( - pred_input_dict, - { - "lambda": lambda out: out["lambda_"], - "mu": lambda out: out["mu"], - }, - prefix="vis", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - seed=cfg.seed, - equation=equation, - geom=geom, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - eval_freq=cfg.TRAIN.eval_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - - # train model - solver.train() - - # plot losses - solver.plot_loss_history(by_epoch=False, smooth_step=1, use_semilogy=True) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) - inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) - # wrap to a model_list - model = ppsci.arch.ModelList( - (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) - ) - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - # geometry bool operation - geo = control_arm - geom = {"geo": geo} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - - # set validator - LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) # 0.57692 - MU = cfg.E / (2 * (1 + cfg.NU)) # 0.38462 - geom_validator = ppsci.validate.GeometryValidator( - { - "lambda_": lambda out: out["lambda_"], - "mu": lambda out: out["mu"], - }, - { - "lambda_": LAMBDA_, - "mu": MU, - }, - geom["geo"], - { - "dataset": "NamedArrayDataset", - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "total_size": cfg.EVAL.total_size.validator, - "batch_size": cfg.EVAL.batch_size.validator, - }, - ppsci.loss.MSELoss("sum"), - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="geo_eval", - ) - validator = {geom_validator.name: geom_validator} - - # set visualizer(optional) - # add inferencer data - samples = geom["geo"].sample_interior( - cfg.EVAL.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - visualizer = { - "visulzie_lambda_mu": ppsci.visualize.VisualizerVtu( - pred_input_dict, - { - "lambda": lambda out: out["lambda_"], - "mu": lambda out: out["mu"], - }, - prefix="vis", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - seed=cfg.seed, - log_freq=cfg.log_freq, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - # set model - disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) - stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) - inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) - inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) - # wrap to a model_list - model = ppsci.arch.ModelList( - (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) - ) - - # load pretrained model - solver = ppsci.solver.Solver( - model=model, pretrained_model_path=cfg.INFER.pretrained_model_path - ) - - # export models - input_spec = [ - { - key: InputSpec([None, 1], "float32", name=key) - for key in cfg.MODEL.disp_net.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - from ppsci.visualize import vtu - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) - # geometry bool operation - geo = control_arm - geom = {"geo": geo} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds - samples = geom["geo"].sample_interior( - cfg.EVAL.batch_size.visualizer_vtu, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict = { - k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys - } - - output_dict = predictor.predict(pred_input_dict, cfg.INFER.batch_size) - - # mapping data to output_keys - output_keys = ( - cfg.MODEL.disp_net.output_keys - + cfg.MODEL.stress_net.output_keys - + cfg.MODEL.inverse_lambda_net.output_keys - + cfg.MODEL.inverse_mu_net.output_keys - ) - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - output_dict.update(pred_input_dict) - vtu.save_vtu_from_dict( - osp.join(cfg.output_dir, "vis"), - output_dict, - cfg.MODEL.disp_net.input_keys, - output_keys, - 1, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="inverse_parameter.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +from os import path as osp + +import hydra +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) + inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) + # freeze models + disp_net.freeze() + stress_net.freeze() + # wrap to a model_list + model = ppsci.arch.ModelList( + (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) + ) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)((inverse_lambda_net, inverse_mu_net)) + + # set equation + equation = { + "LinearElasticity": ppsci.equation.LinearElasticity( + E=None, nu=None, lambda_="lambda_", mu="mu", dim=3 + ) + } + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + # geometry bool operation + geo = control_arm + geom = {"geo": geo} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + + # set dataloader config + interior_constraint = ppsci.constraint.InteriorConstraint( + equation["LinearElasticity"].equations, + { + "stress_disp_xx": 0, + "stress_disp_yy": 0, + "stress_disp_zz": 0, + "stress_disp_xy": 0, + "stress_disp_xz": 0, + "stress_disp_yz": 0, + }, + geom["geo"], + { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + "batch_size": cfg.TRAIN.batch_size.arm_interior, + }, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + name="INTERIOR", + ) + constraint = {interior_constraint.name: interior_constraint} + + # set validator + LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) # 0.5769 + MU = cfg.E / (2 * (1 + cfg.NU)) # 0.3846 + geom_validator = ppsci.validate.GeometryValidator( + { + "lambda_": lambda out: out["lambda_"], + "mu": lambda out: out["mu"], + }, + { + "lambda_": LAMBDA_, + "mu": MU, + }, + geom["geo"], + { + "dataset": "NamedArrayDataset", + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "total_size": cfg.EVAL.total_size.validator, + "batch_size": cfg.EVAL.batch_size.validator, + }, + ppsci.loss.MSELoss("sum"), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="geo_eval", + ) + validator = {geom_validator.name: geom_validator} + + # set visualizer(optional) + # add inferencer data + samples = geom["geo"].sample_interior( + cfg.TRAIN.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + visualizer = { + "visulzie_lambda_mu": ppsci.visualize.VisualizerVtu( + pred_input_dict, + { + "lambda": lambda out: out["lambda_"], + "mu": lambda out: out["mu"], + }, + prefix="vis", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + seed=cfg.seed, + equation=equation, + geom=geom, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + eval_freq=cfg.TRAIN.eval_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_with_no_grad=cfg.TRAIN.eval_with_no_grad, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + + # train model + solver.train() + + # plot losses + solver.plot_loss_history(by_epoch=False, smooth_step=1, use_semilogy=True) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) + inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) + # wrap to a model_list + model = ppsci.arch.ModelList( + (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) + ) + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + # geometry bool operation + geo = control_arm + geom = {"geo": geo} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + + # set validator + LAMBDA_ = cfg.NU * cfg.E / ((1 + cfg.NU) * (1 - 2 * cfg.NU)) # 0.57692 + MU = cfg.E / (2 * (1 + cfg.NU)) # 0.38462 + geom_validator = ppsci.validate.GeometryValidator( + { + "lambda_": lambda out: out["lambda_"], + "mu": lambda out: out["mu"], + }, + { + "lambda_": LAMBDA_, + "mu": MU, + }, + geom["geo"], + { + "dataset": "NamedArrayDataset", + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "total_size": cfg.EVAL.total_size.validator, + "batch_size": cfg.EVAL.batch_size.validator, + }, + ppsci.loss.MSELoss("sum"), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="geo_eval", + ) + validator = {geom_validator.name: geom_validator} + + # set visualizer(optional) + # add inferencer data + samples = geom["geo"].sample_interior( + cfg.EVAL.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + visualizer = { + "visulzie_lambda_mu": ppsci.visualize.VisualizerVtu( + pred_input_dict, + { + "lambda": lambda out: out["lambda_"], + "mu": lambda out: out["mu"], + }, + prefix="vis", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + seed=cfg.seed, + log_freq=cfg.log_freq, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + # set model + disp_net = ppsci.arch.MLP(**cfg.MODEL.disp_net) + stress_net = ppsci.arch.MLP(**cfg.MODEL.stress_net) + inverse_lambda_net = ppsci.arch.MLP(**cfg.MODEL.inverse_lambda_net) + inverse_mu_net = ppsci.arch.MLP(**cfg.MODEL.inverse_mu_net) + # wrap to a model_list + model = ppsci.arch.ModelList( + (disp_net, stress_net, inverse_lambda_net, inverse_mu_net) + ) + + # load pretrained model + solver = ppsci.solver.Solver( + model=model, pretrained_model_path=cfg.INFER.pretrained_model_path + ) + + # export models + input_spec = [ + { + key: InputSpec([None, 1], "float32", name=key) + for key in cfg.MODEL.disp_net.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + from ppsci.visualize import vtu + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + control_arm = ppsci.geometry.Mesh(cfg.GEOM_PATH) + # geometry bool operation + geo = control_arm + geom = {"geo": geo} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = control_arm.bounds + samples = geom["geo"].sample_interior( + cfg.EVAL.batch_size.visualizer_vtu, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict = { + k: v for k, v in samples.items() if k in cfg.MODEL.disp_net.input_keys + } + + output_dict = predictor.predict(pred_input_dict, cfg.INFER.batch_size) + + # mapping data to output_keys + output_keys = ( + cfg.MODEL.disp_net.output_keys + + cfg.MODEL.stress_net.output_keys + + cfg.MODEL.inverse_lambda_net.output_keys + + cfg.MODEL.inverse_mu_net.output_keys + ) + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + output_dict.update(pred_input_dict) + vtu.save_vtu_from_dict( + osp.join(cfg.output_dir, "vis"), + output_dict, + cfg.MODEL.disp_net.input_keys, + output_keys, + 1, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="inverse_parameter.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml index 0c92c38550..e2c68b1266 100644 --- a/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml +++ b/examples/cylinder/2d_unsteady/conf/cylinder2d_unsteady_Re100.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -89,3 +90,95 @@ INFER: max_batch_size: 10240 num_cpu_threads: 10 batch_size: 10240 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_cylinder2d_unsteady/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# set constraint data path +DOMAIN_TRAIN_PATH: ./datasets/domain_train.csv +DOMAIN_INLET_CYLINDER_PATH: ./datasets/domain_inlet_cylinder.csv +DOMAIN_OUTLET_PATH: ./datasets/domain_outlet.csv +IC0_1_PATH: ./datasets/initial/ic0.1.csv +PROBE1_50_PATH: ./datasets/probe/probe1_50.csv + +# set validator data path +DOMAIN_EVAL_PATH: ./datasets/domain_eval.csv + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +VISCOSITY: 0.02 +DENSITY: 1.0 + +# timestamps +TIME_START: 1 +TIME_END: 50 +NUM_TIMESTAMPS: 50 +TRAIN_NUM_TIMESTAMPS: 30 + +NPOINT_PDE: 9420 +NPOINT_INLET_CYLINDER: 161 +NPOINT_OUTLET: 81 + +# model settings +MODEL: + input_keys: ["t", "x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 5 + hidden_size: 50 + activation: "tanh" + +# training settings +TRAIN: + iters_per_epoch: 1 + epochs: 40000 + eval_freq: 400 + learning_rate: 0.001 + eval_during_train: true + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 10240 + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_pretrained.pdparams + export_path: ./inference/cylinder2d_unsteady_Re100 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 10240 + num_cpu_threads: 10 + batch_size: 10240 +>>>>>>> Stashed changes diff --git a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py index 3796460901..89a34dae01 100644 --- a/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +++ b/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py @@ -1,388 +1,388 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger -from ppsci.utils import reader - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NavierStokes": ppsci.equation.NavierStokes(cfg.VISCOSITY, cfg.DENSITY, 2, True) - } - - # set timestamps - train_timestamps = np.linspace( - cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True - ).astype("float32") - train_timestamps = np.random.choice(train_timestamps, cfg.TRAIN_NUM_TIMESTAMPS) - train_timestamps.sort() - t0 = np.array([cfg.TIME_START], dtype="float32") - - val_timestamps = np.linspace( - cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True - ).astype("float32") - - logger.message(f"train_timestamps: {train_timestamps.tolist()}") - logger.message(f"val_timestamps: {val_timestamps.tolist()}") - - # set time-geometry - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain( - cfg.TIME_START, - cfg.TIME_END, - timestamps=np.concatenate((t0, train_timestamps), axis=0), - ), - ppsci.geometry.PointCloud( - reader.load_csv_file( - cfg.DOMAIN_TRAIN_PATH, - ("x", "y"), - alias_dict={"x": "Points:0", "y": "Points:1"}, - ), - ("x", "y"), - ), - ), - "time_rect_eval": ppsci.geometry.PointCloud( - reader.load_csv_file( - cfg.DOMAIN_EVAL_PATH, - ("t", "x", "y"), - ), - ("t", "x", "y"), - ), - } - - # pde/bc/sup constraint use t1~tn, initial constraint use t0 - NTIME_PDE = len(train_timestamps) - ALIAS_DICT = {"x": "Points:0", "y": "Points:1", "u": "U:0", "v": "U:1"} - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["time_rect"], - { - "dataset": "IterableNamedArrayDataset", - "batch_size": cfg.NPOINT_PDE * NTIME_PDE, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - name="EQ", - ) - bc_inlet_cylinder = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.DOMAIN_INLET_CYLINDER_PATH, - "input_keys": ("x", "y"), - "label_keys": ("u", "v"), - "alias_dict": ALIAS_DICT, - "weight_dict": {"u": 10, "v": 10}, - "timestamps": train_timestamps, - }, - }, - ppsci.loss.MSELoss("mean"), - name="BC_inlet_cylinder", - ) - bc_outlet = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.DOMAIN_OUTLET_PATH, - "input_keys": ("x", "y"), - "label_keys": ("p",), - "alias_dict": ALIAS_DICT, - "timestamps": train_timestamps, - }, - }, - ppsci.loss.MSELoss("mean"), - name="BC_outlet", - ) - ic = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.IC0_1_PATH, - "input_keys": ("x", "y"), - "label_keys": ("u", "v", "p"), - "alias_dict": ALIAS_DICT, - "weight_dict": {"u": 10, "v": 10, "p": 10}, - "timestamps": t0, - }, - }, - ppsci.loss.MSELoss("mean"), - name="IC", - ) - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.PROBE1_50_PATH, - "input_keys": ("t", "x", "y"), - "label_keys": ("u", "v"), - "alias_dict": ALIAS_DICT, - "weight_dict": {"u": 10, "v": 10}, - "timestamps": train_timestamps, - }, - }, - ppsci.loss.MSELoss("mean"), - name="Sup", - ) - - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - bc_inlet_cylinder.name: bc_inlet_cylinder, - bc_outlet.name: bc_outlet, - ic.name: ic, - sup_constraint.name: sup_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - NPOINT_EVAL = ( - cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET - ) * cfg.NUM_TIMESTAMPS - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["time_rect_eval"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("mean"), - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - vis_points = geom["time_rect_eval"].sample_interior( - (cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET) - * cfg.NUM_TIMESTAMPS, - evenly=True, - ) - visualizer = { - "visualize_u_v_p": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - num_timestamps=cfg.NUM_TIMESTAMPS, - prefix="result_u_v_p", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = { - "NavierStokes": ppsci.equation.NavierStokes(cfg.VISCOSITY, cfg.DENSITY, 2, True) - } - - # set timestamps - val_timestamps = np.linspace( - cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True - ).astype("float32") - - logger.message(f"val_timestamps: {val_timestamps.tolist()}") - - # set time-geometry - geom = { - "time_rect_eval": ppsci.geometry.PointCloud( - reader.load_csv_file( - cfg.DOMAIN_EVAL_PATH, - ("t", "x", "y"), - ), - ("t", "x", "y"), - ), - } - - # set validator - NPOINT_EVAL = ( - cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET - ) * cfg.NUM_TIMESTAMPS - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["time_rect_eval"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("mean"), - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - vis_points = geom["time_rect_eval"].sample_interior( - (cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET) - * cfg.NUM_TIMESTAMPS, - evenly=True, - ) - visualizer = { - "visualize_u_v_p": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - num_timestamps=cfg.NUM_TIMESTAMPS, - prefix="result_u_v_p", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - geom=geom, - output_dir=cfg.output_dir, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - # evaluate - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - # set time-geometry - geom = { - "time_rect_eval": ppsci.geometry.PointCloud( - reader.load_csv_file( - cfg.DOMAIN_EVAL_PATH, - ("t", "x", "y"), - ), - ("t", "x", "y"), - ), - } - NPOINT_EVAL = ( - cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET - ) * cfg.NUM_TIMESTAMPS - input_dict = geom["time_rect_eval"].sample_interior(NPOINT_EVAL, evenly=True) - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - ppsci.visualize.save_vtu_from_dict( - "./cylinder2d_unsteady_Re100_pred.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - cfg.NUM_TIMESTAMPS, - ) - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="cylinder2d_unsteady_Re100.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger +from ppsci.utils import reader + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes(cfg.VISCOSITY, cfg.DENSITY, 2, True) + } + + # set timestamps + train_timestamps = np.linspace( + cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True + ).astype("float32") + train_timestamps = np.random.choice(train_timestamps, cfg.TRAIN_NUM_TIMESTAMPS) + train_timestamps.sort() + t0 = np.array([cfg.TIME_START], dtype="float32") + + val_timestamps = np.linspace( + cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True + ).astype("float32") + + logger.message(f"train_timestamps: {train_timestamps.tolist()}") + logger.message(f"val_timestamps: {val_timestamps.tolist()}") + + # set time-geometry + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain( + cfg.TIME_START, + cfg.TIME_END, + timestamps=np.concatenate((t0, train_timestamps), axis=0), + ), + ppsci.geometry.PointCloud( + reader.load_csv_file( + cfg.DOMAIN_TRAIN_PATH, + ("x", "y"), + alias_dict={"x": "Points:0", "y": "Points:1"}, + ), + ("x", "y"), + ), + ), + "time_rect_eval": ppsci.geometry.PointCloud( + reader.load_csv_file( + cfg.DOMAIN_EVAL_PATH, + ("t", "x", "y"), + ), + ("t", "x", "y"), + ), + } + + # pde/bc/sup constraint use t1~tn, initial constraint use t0 + NTIME_PDE = len(train_timestamps) + ALIAS_DICT = {"x": "Points:0", "y": "Points:1", "u": "U:0", "v": "U:1"} + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["time_rect"], + { + "dataset": "IterableNamedArrayDataset", + "batch_size": cfg.NPOINT_PDE * NTIME_PDE, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + name="EQ", + ) + bc_inlet_cylinder = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.DOMAIN_INLET_CYLINDER_PATH, + "input_keys": ("x", "y"), + "label_keys": ("u", "v"), + "alias_dict": ALIAS_DICT, + "weight_dict": {"u": 10, "v": 10}, + "timestamps": train_timestamps, + }, + }, + ppsci.loss.MSELoss("mean"), + name="BC_inlet_cylinder", + ) + bc_outlet = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.DOMAIN_OUTLET_PATH, + "input_keys": ("x", "y"), + "label_keys": ("p",), + "alias_dict": ALIAS_DICT, + "timestamps": train_timestamps, + }, + }, + ppsci.loss.MSELoss("mean"), + name="BC_outlet", + ) + ic = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.IC0_1_PATH, + "input_keys": ("x", "y"), + "label_keys": ("u", "v", "p"), + "alias_dict": ALIAS_DICT, + "weight_dict": {"u": 10, "v": 10, "p": 10}, + "timestamps": t0, + }, + }, + ppsci.loss.MSELoss("mean"), + name="IC", + ) + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.PROBE1_50_PATH, + "input_keys": ("t", "x", "y"), + "label_keys": ("u", "v"), + "alias_dict": ALIAS_DICT, + "weight_dict": {"u": 10, "v": 10}, + "timestamps": train_timestamps, + }, + }, + ppsci.loss.MSELoss("mean"), + name="Sup", + ) + + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc_inlet_cylinder.name: bc_inlet_cylinder, + bc_outlet.name: bc_outlet, + ic.name: ic, + sup_constraint.name: sup_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + NPOINT_EVAL = ( + cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET + ) * cfg.NUM_TIMESTAMPS + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["time_rect_eval"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("mean"), + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + vis_points = geom["time_rect_eval"].sample_interior( + (cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET) + * cfg.NUM_TIMESTAMPS, + evenly=True, + ) + visualizer = { + "visualize_u_v_p": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + num_timestamps=cfg.NUM_TIMESTAMPS, + prefix="result_u_v_p", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes(cfg.VISCOSITY, cfg.DENSITY, 2, True) + } + + # set timestamps + val_timestamps = np.linspace( + cfg.TIME_START, cfg.TIME_END, cfg.NUM_TIMESTAMPS, endpoint=True + ).astype("float32") + + logger.message(f"val_timestamps: {val_timestamps.tolist()}") + + # set time-geometry + geom = { + "time_rect_eval": ppsci.geometry.PointCloud( + reader.load_csv_file( + cfg.DOMAIN_EVAL_PATH, + ("t", "x", "y"), + ), + ("t", "x", "y"), + ), + } + + # set validator + NPOINT_EVAL = ( + cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET + ) * cfg.NUM_TIMESTAMPS + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["time_rect_eval"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("mean"), + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + vis_points = geom["time_rect_eval"].sample_interior( + (cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET) + * cfg.NUM_TIMESTAMPS, + evenly=True, + ) + visualizer = { + "visualize_u_v_p": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + num_timestamps=cfg.NUM_TIMESTAMPS, + prefix="result_u_v_p", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + geom=geom, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + # evaluate + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + # set time-geometry + geom = { + "time_rect_eval": ppsci.geometry.PointCloud( + reader.load_csv_file( + cfg.DOMAIN_EVAL_PATH, + ("t", "x", "y"), + ), + ("t", "x", "y"), + ), + } + NPOINT_EVAL = ( + cfg.NPOINT_PDE + cfg.NPOINT_INLET_CYLINDER + cfg.NPOINT_OUTLET + ) * cfg.NUM_TIMESTAMPS + input_dict = geom["time_rect_eval"].sample_interior(NPOINT_EVAL, evenly=True) + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + ppsci.visualize.save_vtu_from_dict( + "./cylinder2d_unsteady_Re100_pred.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + cfg.NUM_TIMESTAMPS, + ) + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="cylinder2d_unsteady_Re100.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/cylinder/2d_unsteady/download_dataset.py b/examples/cylinder/2d_unsteady/download_dataset.py index 0e002bf09e..20f27f339d 100644 --- a/examples/cylinder/2d_unsteady/download_dataset.py +++ b/examples/cylinder/2d_unsteady/download_dataset.py @@ -1,27 +1,27 @@ -# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This *.py file is an example of downloading data of the 2d-unsteady-cylinder–flow case""" -import os -import tarfile - -import wget - -DATASETS = "https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar" - -dirname = os.path.dirname(os.path.abspath(__file__)) + "/" -print("* Running [download_dataset.py]") -wget.download(DATASETS, out=dirname) - -with tarfile.open("cylinder2d_unsteady_Re100_dataset.tar") as tar: - tar.extractall() +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This *.py file is an example of downloading data of the 2d-unsteady-cylinder–flow case""" +import os +import tarfile + +import wget + +DATASETS = "https://paddle-org.bj.bcebos.com/paddlescience/datasets/cylinder2d_unsteady_Re100/cylinder2d_unsteady_Re100_dataset.tar" + +dirname = os.path.dirname(os.path.abspath(__file__)) + "/" +print("* Running [download_dataset.py]") +wget.download(DATASETS, out=dirname) + +with tarfile.open("cylinder2d_unsteady_Re100_dataset.tar") as tar: + tar.extractall() diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml index dcf781294b..2d0615007a 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/enn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -52,3 +53,58 @@ TRAIN: EVAL: batch_size: 8 pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_cylinder2d_unsteady_transformer_physx_enn + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +TRAIN_BLOCK_SIZE: 4 +VALID_BLOCK_SIZE: 32 +TRAIN_FILE_PATH: ./datasets/cylinder_training.hdf5 +VALID_FILE_PATH: ./datasets/cylinder_valid.hdf5 + +# model settings +MODEL: + input_keys: ["states", "visc"] + output_keys: ["pred_states", "recover_states"] + +# training settings +TRAIN: + epochs: 300 + batch_size: 64 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + gamma: 0.995 + by_epoch: true + optimizer: + weight_decay: 1e-8 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 8 + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml index 8dd2db6abe..12413f57d2 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml +++ b/examples/cylinder/2d_unsteady/transformer_physx/conf/transformer.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -91,3 +92,88 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 16 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_cylinder2d_unsteady_transformer_physx_transformer/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + - EMBEDDING_MODEL_PATH + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +TRAIN_BLOCK_SIZE: 16 +VALID_BLOCK_SIZE: 256 +TRAIN_FILE_PATH: ./datasets/cylinder_training.hdf5 +VALID_FILE_PATH: ./datasets/cylinder_valid.hdf5 +log_freq: 20 + +# set working condition +EMBEDDING_MODEL_PATH: ./outputs_cylinder2d_unsteady_transformer_physx_enn/checkpoints/latest +VIS_DATA_NUMS: 1 + +# model settings +MODEL: + input_keys: ["embeds"] + output_keys: ["pred_embeds"] + num_layers: 6 + num_ctx: 16 + embed_size: 128 + num_heads: 4 + +# training settings +TRAIN: + epochs: 200 + batch_size: 4 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + T_0: 14 + T_mult: 2 + eta_min: 1.0e-9 + optimizer: + weight_decay: 1.0e-8 + eval_during_train: true + eval_freq: 50 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 16 + pretrained_model_path: null + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cylinder/cylinder_transformer_pretrained.pdparams + export_path: ./inference/cylinder_transformer + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: false + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 16 +>>>>>>> Stashed changes diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py index 021f6d0d4b..38f55aa92b 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_enn.py @@ -1,278 +1,278 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step1: training a embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def get_mean_std(data: np.ndarray, visc: np.ndarray): - mean = np.asarray( - [ - np.mean(data[:, :, 0]), - np.mean(data[:, :, 1]), - np.mean(data[:, :, 2]), - np.mean(visc), - ] - ).reshape(1, 4, 1, 1) - std = np.asarray( - [ - np.std(data[:, :, 0]), - np.std(data[:, :, 1]), - np.std(data[:, :, 2]), - np.std(visc), - ] - ).reshape(1, 4, 1, 1) - return mean, std - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={ - regularization_key: 1.0e-2 * (cfg.TRAIN_BLOCK_SIZE - 1) - } - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - - # manually init model - data_mean, data_std = get_mean_std( - sup_constraint.data_loader.dataset.data, sup_constraint.data_loader.dataset.visc - ) - model = ppsci.arch.CylinderEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - iters_per_epoch=ITERS_PER_EPOCH, - decay_steps=ITERS_PER_EPOCH, - **cfg.TRAIN.lr_scheduler, - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - weights = (10.0 * (cfg.VALID_BLOCK_SIZE - 1), 10.0 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, - eval_freq=50, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={ - regularization_key: 1.0e-2 * (cfg.TRAIN_BLOCK_SIZE - 1) - } - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - - # manually init model - data_mean, data_std = get_mean_std( - sup_constraint.data_loader.dataset.data, sup_constraint.data_loader.dataset.visc - ) - model = ppsci.arch.CylinderEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # manually build validator - weights = (10.0 * (cfg.VALID_BLOCK_SIZE - 1), 10.0 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step1: training a embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def get_mean_std(data: np.ndarray, visc: np.ndarray): + mean = np.asarray( + [ + np.mean(data[:, :, 0]), + np.mean(data[:, :, 1]), + np.mean(data[:, :, 2]), + np.mean(visc), + ] + ).reshape(1, 4, 1, 1) + std = np.asarray( + [ + np.std(data[:, :, 0]), + np.std(data[:, :, 1]), + np.std(data[:, :, 2]), + np.std(visc), + ] + ).reshape(1, 4, 1, 1) + return mean, std + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={ + regularization_key: 1.0e-2 * (cfg.TRAIN_BLOCK_SIZE - 1) + } + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # manually init model + data_mean, data_std = get_mean_std( + sup_constraint.data_loader.dataset.data, sup_constraint.data_loader.dataset.visc + ) + model = ppsci.arch.CylinderEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + iters_per_epoch=ITERS_PER_EPOCH, + decay_steps=ITERS_PER_EPOCH, + **cfg.TRAIN.lr_scheduler, + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + weights = (10.0 * (cfg.VALID_BLOCK_SIZE - 1), 10.0 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + eval_freq=50, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (10.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 10.0 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={ + regularization_key: 1.0e-2 * (cfg.TRAIN_BLOCK_SIZE - 1) + } + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + + # manually init model + data_mean, data_std = get_mean_std( + sup_constraint.data_loader.dataset.data, sup_constraint.data_loader.dataset.visc + ) + model = ppsci.arch.CylinderEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # manually build validator + weights = (10.0 * (cfg.VALID_BLOCK_SIZE - 1), 10.0 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py index 34eb6c288b..133a7497a8 100644 --- a/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py +++ b/examples/cylinder/2d_unsteady/transformer_physx/train_transformer.py @@ -1,356 +1,356 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step2: training a transformer model, based on frozen pretrained embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp -from typing import Dict - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.arch import base -from ppsci.utils import logger -from ppsci.utils import save_load - - -def build_embedding_model(embedding_model_path: str) -> ppsci.arch.CylinderEmbedding: - input_keys = ("states", "visc") - output_keys = ("pred_states", "recover_states") - regularization_key = "k_matrix" - model = ppsci.arch.CylinderEmbedding( - input_keys, output_keys + (regularization_key,) - ) - save_load.load_pretrain(model, embedding_model_path) - return model - - -class OutputTransform(object): - def __init__(self, model: base.Arch): - self.model = model - self.model.eval() - - def __call__(self, x: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: - pred_embeds = x["pred_embeds"] - pred_states = self.model.decoder(pred_embeds) - # pred_states.shape=(B, T, C, H, W) - return pred_states - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 4, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss(), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( - iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - - vis_datas = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1], - "states": states[: cfg.VIS_DATA_NUMS, 1:], - } - - visualizer = { - "visualize_states": ppsci.visualize.Visualizer2DPlot( - vis_datas, - { - "target_ux": lambda d: d["states"][:, :, 0], - "pred_ux": lambda d: output_transform(d)[:, :, 0], - "target_uy": lambda d: d["states"][:, :, 1], - "pred_uy": lambda d: output_transform(d)[:, :, 1], - "target_p": lambda d: d["states"][:, :, 2], - "preds_p": lambda d: output_transform(d)[:, :, 2], - }, - batch_size=1, - num_timestamps=10, - stride=20, - xticks=np.linspace(-2, 14, 9), - yticks=np.linspace(-4, 4, 5), - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # directly evaluate pretrained model(optional) - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "CylinderDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - vis_datas = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1], - "states": states[: cfg.VIS_DATA_NUMS, 1:], - } - - visualizer = { - "visulzie_states": ppsci.visualize.Visualizer2DPlot( - vis_datas, - { - "target_ux": lambda d: d["states"][:, :, 0], - "pred_ux": lambda d: output_transform(d)[:, :, 0], - "target_uy": lambda d: d["states"][:, :, 1], - "pred_uy": lambda d: output_transform(d)[:, :, 1], - "target_p": lambda d: d["states"][:, :, 2], - "preds_p": lambda d: output_transform(d)[:, :, 2], - }, - batch_size=1, - num_timestamps=10, - stride=20, - xticks=np.linspace(-2, 14, 9), - yticks=np.linspace(-4, 4, 5), - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction for pretrained model(optional) - solver.visualize() - - -def export(cfg: DictConfig): - # set model - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - model_cfg = { - **cfg.MODEL, - "embedding_model": embedding_model, - "input_keys": ["states"], - "output_keys": ["pred_states"], - } - model = ppsci.arch.PhysformerGPT2(**model_cfg) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - "states": InputSpec([1, 255, 3, 64, 128], "float32", name="states"), - "visc": InputSpec([1, 1], "float32", name="visc"), - }, - ] - - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy import python_infer - - predictor = python_infer.GeneralPredictor(cfg) - - dataset_cfg = { - "name": "CylinderDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - } - - dataset = ppsci.data.dataset.build_dataset(dataset_cfg) - - input_dict = { - "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1], - "visc": dataset.visc[: cfg.VIS_DATA_NUMS], - } - - output_dict = predictor.predict(input_dict) - - # mapping data to cfg.INFER.output_keys - output_keys = ["pred_states"] - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - for i in range(cfg.VIS_DATA_NUMS): - ppsci.visualize.plot.save_plot_from_2d_dict( - f"./cylinder_transformer_pred_{i}", - { - "pred_ux": output_dict["pred_states"][i][:, 0], - "pred_uy": output_dict["pred_states"][i][:, 1], - "pred_p": output_dict["pred_states"][i][:, 2], - }, - ("pred_ux", "pred_uy", "pred_p"), - 10, - 20, - np.linspace(-2, 14, 9), - np.linspace(-4, 4, 5), - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step2: training a transformer model, based on frozen pretrained embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp +from typing import Dict + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.arch import base +from ppsci.utils import logger +from ppsci.utils import save_load + + +def build_embedding_model(embedding_model_path: str) -> ppsci.arch.CylinderEmbedding: + input_keys = ("states", "visc") + output_keys = ("pred_states", "recover_states") + regularization_key = "k_matrix" + model = ppsci.arch.CylinderEmbedding( + input_keys, output_keys + (regularization_key,) + ) + save_load.load_pretrain(model, embedding_model_path) + return model + + +class OutputTransform(object): + def __init__(self, model: base.Arch): + self.model = model + self.model.eval() + + def __call__(self, x: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: + pred_embeds = x["pred_embeds"] + pred_states = self.model.decoder(pred_embeds) + # pred_states.shape=(B, T, C, H, W) + return pred_states + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 4, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( + iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + + vis_datas = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1], + "states": states[: cfg.VIS_DATA_NUMS, 1:], + } + + visualizer = { + "visualize_states": ppsci.visualize.Visualizer2DPlot( + vis_datas, + { + "target_ux": lambda d: d["states"][:, :, 0], + "pred_ux": lambda d: output_transform(d)[:, :, 0], + "target_uy": lambda d: d["states"][:, :, 1], + "pred_uy": lambda d: output_transform(d)[:, :, 1], + "target_p": lambda d: d["states"][:, :, 2], + "preds_p": lambda d: output_transform(d)[:, :, 2], + }, + batch_size=1, + num_timestamps=10, + stride=20, + xticks=np.linspace(-2, 14, 9), + yticks=np.linspace(-4, 4, 5), + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # directly evaluate pretrained model(optional) + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "CylinderDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + vis_datas = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1], + "states": states[: cfg.VIS_DATA_NUMS, 1:], + } + + visualizer = { + "visulzie_states": ppsci.visualize.Visualizer2DPlot( + vis_datas, + { + "target_ux": lambda d: d["states"][:, :, 0], + "pred_ux": lambda d: output_transform(d)[:, :, 0], + "target_uy": lambda d: d["states"][:, :, 1], + "pred_uy": lambda d: output_transform(d)[:, :, 1], + "target_p": lambda d: d["states"][:, :, 2], + "preds_p": lambda d: output_transform(d)[:, :, 2], + }, + batch_size=1, + num_timestamps=10, + stride=20, + xticks=np.linspace(-2, 14, 9), + yticks=np.linspace(-4, 4, 5), + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction for pretrained model(optional) + solver.visualize() + + +def export(cfg: DictConfig): + # set model + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + model_cfg = { + **cfg.MODEL, + "embedding_model": embedding_model, + "input_keys": ["states"], + "output_keys": ["pred_states"], + } + model = ppsci.arch.PhysformerGPT2(**model_cfg) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + "states": InputSpec([1, 255, 3, 64, 128], "float32", name="states"), + "visc": InputSpec([1, 1], "float32", name="visc"), + }, + ] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy import python_infer + + predictor = python_infer.GeneralPredictor(cfg) + + dataset_cfg = { + "name": "CylinderDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + } + + dataset = ppsci.data.dataset.build_dataset(dataset_cfg) + + input_dict = { + "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1], + "visc": dataset.visc[: cfg.VIS_DATA_NUMS], + } + + output_dict = predictor.predict(input_dict) + + # mapping data to cfg.INFER.output_keys + output_keys = ["pred_states"] + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + for i in range(cfg.VIS_DATA_NUMS): + ppsci.visualize.plot.save_plot_from_2d_dict( + f"./cylinder_transformer_pred_{i}", + { + "pred_ux": output_dict["pred_states"][i][:, 0], + "pred_uy": output_dict["pred_states"][i][:, 1], + "pred_p": output_dict["pred_states"][i][:, 2], + }, + ("pred_ux", "pred_uy", "pred_p"), + 10, + 20, + np.linspace(-2, 14, 9), + np.linspace(-4, 4, 5), + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/darcy/conf/darcy2d.yaml b/examples/darcy/conf/darcy2d.yaml index c5d165717b..b7482e943c 100644 --- a/examples/darcy/conf/darcy2d.yaml +++ b/examples/darcy/conf/darcy2d.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -81,3 +82,87 @@ INFER: max_batch_size: 8192 num_cpu_threads: 10 batch_size: 8192 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_darcy2d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NPOINT_PDE: 9801 # 99 ** 2 +NPOINT_BC: 400 # 100 * 4 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["p"] + num_layers: 5 + hidden_size: 20 + activation: "stan" + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + max_learning_rate: 1.0e-3 + end_learning_rate: 1.0e-7 + lbfgs: + iters_per_epoch: ${TRAIN.iters_per_epoch} + output_dir: ./outputs_darcy2d_L-BFGS + learning_rate: 1.0 + max_iter: 10 + eval_freq: ${TRAIN.eval_freq} + eval_during_train: ${TRAIN.eval_during_train} + eval_freq: 200 + eval_during_train: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: + residual_validator: 8192 + pretrained_model_path: null + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/darcy2d/darcy2d_pretrained.pdparams + export_path: ./inference/darcy2d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 8192 + num_cpu_threads: 10 + batch_size: 8192 +>>>>>>> Stashed changes diff --git a/examples/darcy/darcy2d.py b/examples/darcy/darcy2d.py index 12e32af173..d9c8f61021 100644 --- a/examples/darcy/darcy2d.py +++ b/examples/darcy/darcy2d.py @@ -1,362 +1,362 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"Poisson": ppsci.equation.Poisson(2)} - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} - - # set dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # set constraint - def poisson_ref_compute_func(_in): - return ( - -8.0 - * (np.pi**2) - * np.sin(2.0 * np.pi * _in["x"]) - * np.cos(2.0 * np.pi * _in["y"]) - ) - - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["Poisson"].equations, - {"poisson": poisson_ref_compute_func}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": cfg.NPOINT_PDE}, - ppsci.loss.MSELoss("sum"), - evenly=True, - name="EQ", - ) - - bc = ppsci.constraint.BoundaryConstraint( - {"p": lambda out: out["p"]}, - { - "p": lambda _in: np.sin(2.0 * np.pi * _in["x"]) - * np.cos(2.0 * np.pi * _in["y"]) - }, - geom["rect"], - {**train_dataloader_cfg, "batch_size": cfg.NPOINT_BC}, - ppsci.loss.MSELoss("sum"), - name="BC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - bc.name: bc, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.OneCycleLR(**cfg.TRAIN.lr_scheduler)() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["Poisson"].equations, - {"poisson": poisson_ref_compute_func}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": cfg.NPOINT_PDE, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - # manually collate input data for visualization, - vis_points = geom["rect"].sample_interior( - cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True - ) - visualizer = { - "visualize_p_ux_uy": ppsci.visualize.VisualizerVtu( - vis_points, - { - "p": lambda d: d["p"], - "p_ref": lambda d: paddle.sin(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "p_diff": lambda d: paddle.sin(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]) - - d["p"], - "ux": lambda d: jacobian(d["p"], d["x"]), - "ux_ref": lambda d: 2 - * np.pi - * paddle.cos(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "ux_diff": lambda d: jacobian(d["p"], d["x"]) - - 2 - * np.pi - * paddle.cos(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "uy": lambda d: jacobian(d["p"], d["y"]), - "uy_ref": lambda d: -2 - * np.pi - * paddle.sin(2 * np.pi * d["x"]) - * paddle.sin(2 * np.pi * d["y"]), - "uy_diff": lambda d: jacobian(d["p"], d["y"]) - - ( - -2 - * np.pi - * paddle.sin(2 * np.pi * d["x"]) - * paddle.sin(2 * np.pi * d["y"]) - ), - }, - prefix="result_p_ux_uy", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - # fine-tuning pretrained model with L-BFGS - OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir - logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") - EPOCHS = cfg.TRAIN.epochs // 10 - optimizer_lbfgs = ppsci.optimizer.LBFGS( - cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter - )(model) - solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer_lbfgs, - None, - EPOCHS, - cfg.TRAIN.lbfgs.iters_per_epoch, - eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, - eval_freq=cfg.TRAIN.lbfgs.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"Poisson": ppsci.equation.Poisson(2)} - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} - - # set constraint - def poisson_ref_compute_func(_in): - return ( - -8.0 - * (np.pi**2) - * np.sin(2.0 * np.pi * _in["x"]) - * np.cos(2.0 * np.pi * _in["y"]) - ) - - # set validator - residual_validator = ppsci.validate.GeometryValidator( - equation["Poisson"].equations, - {"poisson": poisson_ref_compute_func}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": cfg.NPOINT_PDE, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer - # manually collate input data for visualization, - vis_points = geom["rect"].sample_interior( - cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True - ) - visualizer = { - "visualize_p_ux_uy": ppsci.visualize.VisualizerVtu( - vis_points, - { - "p": lambda d: d["p"], - "p_ref": lambda d: paddle.sin(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "p_diff": lambda d: paddle.sin(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]) - - d["p"], - "ux": lambda d: jacobian(d["p"], d["x"]), - "ux_ref": lambda d: 2 - * np.pi - * paddle.cos(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "ux_diff": lambda d: jacobian(d["p"], d["x"]) - - 2 - * np.pi - * paddle.cos(2 * np.pi * d["x"]) - * paddle.cos(2 * np.pi * d["y"]), - "uy": lambda d: jacobian(d["p"], d["y"]), - "uy_ref": lambda d: -2 - * np.pi - * paddle.sin(2 * np.pi * d["x"]) - * paddle.sin(2 * np.pi * d["y"]), - "uy_diff": lambda d: jacobian(d["p"], d["y"]) - - ( - -2 - * np.pi - * paddle.sin(2 * np.pi * d["x"]) - * paddle.sin(2 * np.pi * d["y"]) - ), - }, - prefix="result_p_ux_uy", - ) - } - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} - # manually collate input data for visualization, - input_dict = geom["rect"].sample_interior( - cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True - ) - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - ppsci.visualize.save_vtu_from_dict( - "./visual/darcy2d.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="darcy2d.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"Poisson": ppsci.equation.Poisson(2)} + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} + + # set dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # set constraint + def poisson_ref_compute_func(_in): + return ( + -8.0 + * (np.pi**2) + * np.sin(2.0 * np.pi * _in["x"]) + * np.cos(2.0 * np.pi * _in["y"]) + ) + + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["Poisson"].equations, + {"poisson": poisson_ref_compute_func}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": cfg.NPOINT_PDE}, + ppsci.loss.MSELoss("sum"), + evenly=True, + name="EQ", + ) + + bc = ppsci.constraint.BoundaryConstraint( + {"p": lambda out: out["p"]}, + { + "p": lambda _in: np.sin(2.0 * np.pi * _in["x"]) + * np.cos(2.0 * np.pi * _in["y"]) + }, + geom["rect"], + {**train_dataloader_cfg, "batch_size": cfg.NPOINT_BC}, + ppsci.loss.MSELoss("sum"), + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.OneCycleLR(**cfg.TRAIN.lr_scheduler)() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["Poisson"].equations, + {"poisson": poisson_ref_compute_func}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": cfg.NPOINT_PDE, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + # manually collate input data for visualization, + vis_points = geom["rect"].sample_interior( + cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True + ) + visualizer = { + "visualize_p_ux_uy": ppsci.visualize.VisualizerVtu( + vis_points, + { + "p": lambda d: d["p"], + "p_ref": lambda d: paddle.sin(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "p_diff": lambda d: paddle.sin(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]) + - d["p"], + "ux": lambda d: jacobian(d["p"], d["x"]), + "ux_ref": lambda d: 2 + * np.pi + * paddle.cos(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "ux_diff": lambda d: jacobian(d["p"], d["x"]) + - 2 + * np.pi + * paddle.cos(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "uy": lambda d: jacobian(d["p"], d["y"]), + "uy_ref": lambda d: -2 + * np.pi + * paddle.sin(2 * np.pi * d["x"]) + * paddle.sin(2 * np.pi * d["y"]), + "uy_diff": lambda d: jacobian(d["p"], d["y"]) + - ( + -2 + * np.pi + * paddle.sin(2 * np.pi * d["x"]) + * paddle.sin(2 * np.pi * d["y"]) + ), + }, + prefix="result_p_ux_uy", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + # fine-tuning pretrained model with L-BFGS + OUTPUT_DIR = cfg.TRAIN.lbfgs.output_dir + logger.init_logger("ppsci", osp.join(OUTPUT_DIR, f"{cfg.mode}.log"), "info") + EPOCHS = cfg.TRAIN.epochs // 10 + optimizer_lbfgs = ppsci.optimizer.LBFGS( + cfg.TRAIN.lbfgs.learning_rate, cfg.TRAIN.lbfgs.max_iter + )(model) + solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer_lbfgs, + None, + EPOCHS, + cfg.TRAIN.lbfgs.iters_per_epoch, + eval_during_train=cfg.TRAIN.lbfgs.eval_during_train, + eval_freq=cfg.TRAIN.lbfgs.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"Poisson": ppsci.equation.Poisson(2)} + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} + + # set constraint + def poisson_ref_compute_func(_in): + return ( + -8.0 + * (np.pi**2) + * np.sin(2.0 * np.pi * _in["x"]) + * np.cos(2.0 * np.pi * _in["y"]) + ) + + # set validator + residual_validator = ppsci.validate.GeometryValidator( + equation["Poisson"].equations, + {"poisson": poisson_ref_compute_func}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": cfg.NPOINT_PDE, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer + # manually collate input data for visualization, + vis_points = geom["rect"].sample_interior( + cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True + ) + visualizer = { + "visualize_p_ux_uy": ppsci.visualize.VisualizerVtu( + vis_points, + { + "p": lambda d: d["p"], + "p_ref": lambda d: paddle.sin(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "p_diff": lambda d: paddle.sin(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]) + - d["p"], + "ux": lambda d: jacobian(d["p"], d["x"]), + "ux_ref": lambda d: 2 + * np.pi + * paddle.cos(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "ux_diff": lambda d: jacobian(d["p"], d["x"]) + - 2 + * np.pi + * paddle.cos(2 * np.pi * d["x"]) + * paddle.cos(2 * np.pi * d["y"]), + "uy": lambda d: jacobian(d["p"], d["y"]), + "uy_ref": lambda d: -2 + * np.pi + * paddle.sin(2 * np.pi * d["x"]) + * paddle.sin(2 * np.pi * d["y"]), + "uy_diff": lambda d: jacobian(d["p"], d["y"]) + - ( + -2 + * np.pi + * paddle.sin(2 * np.pi * d["x"]) + * paddle.sin(2 * np.pi * d["y"]) + ), + }, + prefix="result_p_ux_uy", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0))} + # manually collate input data for visualization, + input_dict = geom["rect"].sample_interior( + cfg.NPOINT_PDE + cfg.NPOINT_BC, evenly=True + ) + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + ppsci.visualize.save_vtu_from_dict( + "./visual/darcy2d.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="darcy2d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/deepcfd/conf/deepcfd.yaml b/examples/deepcfd/conf/deepcfd.yaml index 802a33cf0f..18c7dbc5e3 100644 --- a/examples/deepcfd/conf/deepcfd.yaml +++ b/examples/deepcfd/conf/deepcfd.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -78,3 +79,84 @@ INFER: max_batch_size: 100 num_cpu_threads: 4 batch_size: 100 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_deepcfd/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set data file path +DATAX_PATH: ./datasets/dataX.pkl +DATAY_PATH: ./datasets/dataY.pkl +SLIPT_RATIO: 0.7 # slipt dataset to train dataset and test datatset +SAMPLE_SIZE: 981 # the shape of dataX and dataY is [SAMPLE_SIZE, CHANNEL_SIZE, X_SIZE, Y_SIZE] +CHANNEL_SIZE: 3 +X_SIZE: 172 +Y_SIZE: 79 + +# model settings +MODEL: + input_key: "input" + output_key: "output" + in_channel: 3 + out_channel: 3 + kernel_size: 5 + filters: [8, 16, 32, 32] + weight_norm: false + batch_norm: false + +# training settings +TRAIN: + epochs: 1000 + learning_rate: 0.001 + weight_decay: 0.005 + eval_during_train: true + eval_freq: 50 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 8 + +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/deepcfd/deepcfd_pretrained.pdparams" + export_path: ./inference/deepcfd + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 6000 + gpu_id: 0 + max_batch_size: 100 + num_cpu_threads: 4 + batch_size: 100 +>>>>>>> Stashed changes diff --git a/examples/deepcfd/deepcfd.py b/examples/deepcfd/deepcfd.py index a7bd511e06..f0ff3baad6 100644 --- a/examples/deepcfd/deepcfd.py +++ b/examples/deepcfd/deepcfd.py @@ -1,717 +1,717 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import pickle -from typing import Dict -from typing import List -from typing import Tuple - -import hydra -import numpy as np -import paddle -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def split_tensors( - *tensors: List[np.array], ratio: float -) -> Tuple[List[np.array], List[np.array]]: - """Split tensors to two parts. - - Args: - tensors (List[np.array]): Non-empty tensor list. - ratio (float): Split ratio. For example, tensor list A is split to A1 and A2. - len(A1) / len(A) = ratio. - - Returns: - Tuple[List[np.array], List[np.array]]: Split tensors. - """ - if len(tensors) == 0: - raise ValueError("Tensors shouldn't be empty.") - - split1, split2 = [], [] - count = len(tensors[0]) - for tensor in tensors: - if len(tensor) != count: - raise ValueError("The size of tensor should be same.") - x = int(len(tensor) * ratio) - split1.append(tensor[:x]) - split2.append(tensor[x:]) - - if len(tensors) == 1: - split1, split2 = split1[0], split2[0] - return split1, split2 - - -def predict_and_save_plot( - x: np.ndarray, y: np.ndarray, index: int, solver: ppsci.solver.Solver, plot_dir: str -): - """Make prediction and save visualization of result. - - Args: - x (np.ndarray): Input of test dataset. - y (np.ndarray): Output of test dataset. - index (int): Index of data to visualizer. - solver (ppsci.solver.Solver): Trained solver. - plot_dir (str): Directory to save plot. - """ - min_u = np.min(y[index, 0, :, :]) - max_u = np.max(y[index, 0, :, :]) - - min_v = np.min(y[index, 1, :, :]) - max_v = np.max(y[index, 1, :, :]) - - min_p = np.min(y[index, 2, :, :]) - max_p = np.max(y[index, 2, :, :]) - - output = solver.predict({"input": x}, return_numpy=True) - pred_y = output["output"] - error = np.abs(y - pred_y) - - min_error_u = np.min(error[index, 0, :, :]) - max_error_u = np.max(error[index, 0, :, :]) - - min_error_v = np.min(error[index, 1, :, :]) - max_error_v = np.max(error[index, 1, :, :]) - - min_error_p = np.min(error[index, 2, :, :]) - max_error_p = np.max(error[index, 2, :, :]) - - plt.figure() - fig = plt.gcf() - fig.set_size_inches(15, 10) - plt.subplot(3, 3, 1) - plt.title("OpenFOAM", fontsize=18) - plt.imshow( - np.transpose(y[index, 0, :, :]), - cmap="jet", - vmin=min_u, - vmax=max_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("Ux", fontsize=18) - plt.subplot(3, 3, 2) - plt.title("DeepCFD", fontsize=18) - plt.imshow( - np.transpose(pred_y[index, 0, :, :]), - cmap="jet", - vmin=min_u, - vmax=max_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.subplot(3, 3, 3) - plt.title("Error", fontsize=18) - plt.imshow( - np.transpose(error[index, 0, :, :]), - cmap="jet", - vmin=min_error_u, - vmax=max_error_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.subplot(3, 3, 4) - plt.imshow( - np.transpose(y[index, 1, :, :]), - cmap="jet", - vmin=min_v, - vmax=max_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("Uy", fontsize=18) - plt.subplot(3, 3, 5) - plt.imshow( - np.transpose(pred_y[index, 1, :, :]), - cmap="jet", - vmin=min_v, - vmax=max_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.subplot(3, 3, 6) - plt.imshow( - np.transpose(error[index, 1, :, :]), - cmap="jet", - vmin=min_error_v, - vmax=max_error_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.subplot(3, 3, 7) - plt.imshow( - np.transpose(y[index, 2, :, :]), - cmap="jet", - vmin=min_p, - vmax=max_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("p", fontsize=18) - plt.subplot(3, 3, 8) - plt.imshow( - np.transpose(pred_y[index, 2, :, :]), - cmap="jet", - vmin=min_p, - vmax=max_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.subplot(3, 3, 9) - plt.imshow( - np.transpose(error[index, 2, :, :]), - cmap="jet", - vmin=min_error_p, - vmax=max_error_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.tight_layout() - plt.savefig(os.path.join(plot_dir, f"cfd_{index}.png"), bbox_inches="tight") - plt.show() - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") - - # initialize datasets - with open(cfg.DATAX_PATH, "rb") as file: - x = pickle.load(file) - with open(cfg.DATAY_PATH, "rb") as file: - y = pickle.load(file) - - # split dataset to train dataset and test dataset - train_dataset, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) - train_x, train_y = train_dataset - test_x, test_y = test_dataset - - # initialize model - model = ppsci.arch.UNetEx(**cfg.MODEL) - - CHANNELS_WEIGHTS = np.reshape( - np.sqrt( - np.mean( - np.transpose(y, (0, 2, 3, 1)).reshape( - (cfg.SAMPLE_SIZE * cfg.X_SIZE * cfg.Y_SIZE, cfg.CHANNEL_SIZE) - ) - ** 2, - axis=0, - ) - ), - (1, -1, 1, 1), - ) - - # define loss - def loss_expr( - output_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray] = None, - weight_dict: Dict[str, np.ndarray] = None, - ) -> float: - output = output_dict["output"] - y = label_dict["output"] - loss_u = (output[:, 0:1, :, :] - y[:, 0:1, :, :]) ** 2 - loss_v = (output[:, 1:2, :, :] - y[:, 1:2, :, :]) ** 2 - loss_p = (output[:, 2:3, :, :] - y[:, 2:3, :, :]).abs() - loss = (loss_u + loss_v + loss_p) / CHANNELS_WEIGHTS - return {"output": loss.sum()} - - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": train_x}, - "label": {"output": train_y}, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.FunctionalLoss(loss_expr), - name="sup_constraint", - ) - - # manually build constraint - constraint = {sup_constraint.name: sup_constraint} - - # initialize Adam optimizer - optimizer = ppsci.optimizer.Adam( - cfg.TRAIN.learning_rate, weight_decay=cfg.TRAIN.weight_decay - )(model) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": test_x}, - "label": {"output": test_y}, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - def metric_expr( - output_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray] = None, - weight_dict: Dict[str, np.ndarray] = None, - ) -> Dict[str, float]: - output = output_dict["output"] - y = label_dict["output"] - total_mse = ((output - y) ** 2).sum() / len(test_x) - ux_mse = ((output[:, 0, :, :] - test_y[:, 0, :, :]) ** 2).sum() / len(test_x) - uy_mse = ((output[:, 1, :, :] - test_y[:, 1, :, :]) ** 2).sum() / len(test_x) - p_mse = ((output[:, 2, :, :] - test_y[:, 2, :, :]) ** 2).sum() / len(test_x) - return { - "Total_MSE": total_mse, - "Ux_MSE": ux_mse, - "Uy_MSE": uy_mse, - "p_MSE": p_mse, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.FunctionalLoss(loss_expr), - {"output": lambda out: out["output"]}, - {"MSE": ppsci.metric.FunctionalMetric(metric_expr)}, - name="mse_validator", - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # train model - solver.train() - - # evaluate after finished training - solver.eval() - - PLOT_DIR = os.path.join(cfg.output_dir, "visual") - os.makedirs(PLOT_DIR, exist_ok=True) - - # visualize prediction after finished training - predict_and_save_plot(test_x, test_y, 0, solver, PLOT_DIR) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", os.path.join(cfg.output_dir, "eval.log"), "info") - - # initialize datasets - with open(cfg.DATAX_PATH, "rb") as file: - x = pickle.load(file) - with open(cfg.DATAY_PATH, "rb") as file: - y = pickle.load(file) - - # split dataset to train dataset and test dataset - train_dataset, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) - train_x, train_y = train_dataset - test_x, test_y = test_dataset - - # initialize model - model = ppsci.arch.UNetEx(**cfg.MODEL) - - CHANNELS_WEIGHTS = np.reshape( - np.sqrt( - np.mean( - np.transpose(y, (0, 2, 3, 1)).reshape( - (cfg.SAMPLE_SIZE * cfg.X_SIZE * cfg.Y_SIZE, cfg.CHANNEL_SIZE) - ) - ** 2, - axis=0, - ) - ), - (1, -1, 1, 1), - ) - - # define loss - def loss_expr( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"] = None, - weight_dict: Dict[str, "paddle.Tensor"] = None, - ) -> Dict[str, "paddle.Tensor"]: - output = output_dict["output"] - y = label_dict["output"] - loss_u = (output[:, 0:1, :, :] - y[:, 0:1, :, :]) ** 2 - loss_v = (output[:, 1:2, :, :] - y[:, 1:2, :, :]) ** 2 - loss_p = (output[:, 2:3, :, :] - y[:, 2:3, :, :]).abs() - loss = (loss_u + loss_v + loss_p) / CHANNELS_WEIGHTS - return {"custom_loss": loss.sum()} - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": test_x}, - "label": {"output": test_y}, - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - def metric_expr( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"] = None, - weight_dict: Dict[str, "paddle.Tensor"] = None, - ) -> Dict[str, "paddle.Tensor"]: - output = output_dict["output"] - y = label_dict["output"] - total_mse = ((output - y) ** 2).sum() / len(test_x) - ux_mse = ((output[:, 0, :, :] - test_y[:, 0, :, :]) ** 2).sum() / len(test_x) - uy_mse = ((output[:, 1, :, :] - test_y[:, 1, :, :]) ** 2).sum() / len(test_x) - p_mse = ((output[:, 2, :, :] - test_y[:, 2, :, :]) ** 2).sum() / len(test_x) - return { - "Total_MSE": total_mse, - "Ux_MSE": ux_mse, - "Uy_MSE": uy_mse, - "p_MSE": p_mse, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.FunctionalLoss(loss_expr), - {"output": lambda out: out["output"]}, - {"MSE": ppsci.metric.FunctionalMetric(metric_expr)}, - name="mse_validator", - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate - solver.eval() - - PLOT_DIR = os.path.join(cfg.output_dir, "visual") - os.makedirs(PLOT_DIR, exist_ok=True) - - # visualize prediction - predict_and_save_plot(test_x, test_y, 0, solver, PLOT_DIR) - - -def export(cfg: DictConfig): - model = ppsci.arch.UNetEx(**cfg.MODEL) - - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec( - [None, cfg.CHANNEL_SIZE, cfg.X_SIZE, cfg.Y_SIZE], "float32", name=key - ) - for key in model.input_keys - }, - ] - - solver.export(input_spec, cfg.INFER.export_path) - print(f"Model has been exported to {cfg.INFER.export_path}") - - -def predict_and_save_plot_infer( - x: np.ndarray, - y: np.ndarray, - pred_y: np.ndarray, - index: int, - plot_dir: str, -): - """Make prediction and save visualization of result during inference. - - Args: - x (np.ndarray): Input of test dataset. - y (np.ndarray): Ground truth output of test dataset. - pred_y (np.ndarray): Predicted output from inference. - index (int): Index of data to visualize. - plot_dir (str): Directory to save plot. - """ - - # Extract the true and predicted values for each channel - u_true = y[index, 0, :, :] - v_true = y[index, 1, :, :] - p_true = y[index, 2, :, :] - - u_pred = pred_y[index, 0, :, :] - v_pred = pred_y[index, 1, :, :] - p_pred = pred_y[index, 2, :, :] - - # Compute the absolute error between true and predicted values - error_u = np.abs(u_true - u_pred) - error_v = np.abs(v_true - v_pred) - error_p = np.abs(p_true - p_pred) - - # Calculate the min and max values for each channel - min_u, max_u = u_true.min(), u_true.max() - min_v, max_v = v_true.min(), v_true.max() - min_p, max_p = p_true.min(), p_true.max() - - min_error_u, max_error_u = error_u.min(), error_u.max() - min_error_v, max_error_v = error_v.min(), error_v.max() - min_error_p, max_error_p = error_p.min(), error_p.max() - - # Start plotting - plt.figure(figsize=(15, 10)) - - # Plot Ux channel (True, Predicted, and Error) - plt.subplot(3, 3, 1) - plt.title("OpenFOAM Ux", fontsize=18) - plt.imshow( - np.transpose(u_true), - cmap="jet", - vmin=min_u, - vmax=max_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("Ux", fontsize=18) - - plt.subplot(3, 3, 2) - plt.title("DeepCFD Ux", fontsize=18) - plt.imshow( - np.transpose(u_pred), - cmap="jet", - vmin=min_u, - vmax=max_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.subplot(3, 3, 3) - plt.title("Error Ux", fontsize=18) - plt.imshow( - np.transpose(error_u), - cmap="jet", - vmin=min_error_u, - vmax=max_error_u, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - # Plot Uy channel (True, Predicted, and Error) - plt.subplot(3, 3, 4) - plt.imshow( - np.transpose(v_true), - cmap="jet", - vmin=min_v, - vmax=max_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("Uy", fontsize=18) - - plt.subplot(3, 3, 5) - plt.imshow( - np.transpose(v_pred), - cmap="jet", - vmin=min_v, - vmax=max_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.subplot(3, 3, 6) - plt.imshow( - np.transpose(error_v), - cmap="jet", - vmin=min_error_v, - vmax=max_error_v, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - # Plot pressure channel p (True, Predicted, and Error) - plt.subplot(3, 3, 7) - plt.imshow( - np.transpose(p_true), - cmap="jet", - vmin=min_p, - vmax=max_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - plt.ylabel("p", fontsize=18) - - plt.subplot(3, 3, 8) - plt.imshow( - np.transpose(p_pred), - cmap="jet", - vmin=min_p, - vmax=max_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.subplot(3, 3, 9) - plt.imshow( - np.transpose(error_p), - cmap="jet", - vmin=min_error_p, - vmax=max_error_p, - origin="lower", - extent=[0, 260, 0, 120], - ) - plt.colorbar(orientation="horizontal") - - plt.tight_layout() - plt.savefig(os.path.join(plot_dir, f"cfd_{index}.png"), bbox_inches="tight") - plt.close() - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - # Load test dataset from serialized files - with open(cfg.DATAX_PATH, "rb") as file: - x = pickle.load(file) - with open(cfg.DATAY_PATH, "rb") as file: - y = pickle.load(file) - - # Split data into training and test sets - _, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) - test_x, test_y = test_dataset - - input_dict = {cfg.MODEL.input_key: test_x} - - # Initialize the PINN predictor model - predictor = pinn_predictor.PINNPredictor(cfg) - - # Run inference and get predictions - output_dict = predictor.predict(input_dict, batch_size=cfg.INFER.batch_size) - - # Handle model's output key structure - actual_output_key = cfg.MODEL.output_key - - output_keys = ( - actual_output_key - if isinstance(actual_output_key, (list, tuple)) - else [actual_output_key] - ) - if len(output_keys) != len(output_dict): - raise ValueError( - "The number of output_keys does not match the number of output_dict keys." - ) - - # Map model output keys to values - output_dict = { - origin: value for origin, value in zip(output_keys, output_dict.values()) - } - - concat_output = output_dict[actual_output_key] - - if concat_output.ndim != 4 or concat_output.shape[1] != 3: - raise ValueError( - f"Unexpected shape of '{actual_output_key}': {concat_output.shape}. Expected (batch_size, 3, x_size, y_size)." - ) - - try: - # Extract Ux, Uy, and pressure from the predicted output - u_pred = concat_output[:, 0, :, :] # Ux - v_pred = concat_output[:, 1, :, :] # Uy - p_pred = concat_output[:, 2, :, :] # p - except IndexError as e: - print(f"Error in splitting '{actual_output_key}': {e}") - raise - - # Combine the predictions into one array for further processing - pred_y = np.stack([u_pred, v_pred, p_pred], axis=1) - - PLOT_DIR = os.path.join(cfg.output_dir, "infer_visual") - os.makedirs(PLOT_DIR, exist_ok=True) - - # Visualize and save the first five predictions - for index in range(min(5, pred_y.shape[0])): - predict_and_save_plot_infer(test_x, test_y, pred_y, index, PLOT_DIR) - - print(f"Inference completed. Results are saved in {PLOT_DIR}") - - -@hydra.main(version_base=None, config_path="./conf", config_name="deepcfd.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import pickle +from typing import Dict +from typing import List +from typing import Tuple + +import hydra +import numpy as np +import paddle +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def split_tensors( + *tensors: List[np.array], ratio: float +) -> Tuple[List[np.array], List[np.array]]: + """Split tensors to two parts. + + Args: + tensors (List[np.array]): Non-empty tensor list. + ratio (float): Split ratio. For example, tensor list A is split to A1 and A2. + len(A1) / len(A) = ratio. + + Returns: + Tuple[List[np.array], List[np.array]]: Split tensors. + """ + if len(tensors) == 0: + raise ValueError("Tensors shouldn't be empty.") + + split1, split2 = [], [] + count = len(tensors[0]) + for tensor in tensors: + if len(tensor) != count: + raise ValueError("The size of tensor should be same.") + x = int(len(tensor) * ratio) + split1.append(tensor[:x]) + split2.append(tensor[x:]) + + if len(tensors) == 1: + split1, split2 = split1[0], split2[0] + return split1, split2 + + +def predict_and_save_plot( + x: np.ndarray, y: np.ndarray, index: int, solver: ppsci.solver.Solver, plot_dir: str +): + """Make prediction and save visualization of result. + + Args: + x (np.ndarray): Input of test dataset. + y (np.ndarray): Output of test dataset. + index (int): Index of data to visualizer. + solver (ppsci.solver.Solver): Trained solver. + plot_dir (str): Directory to save plot. + """ + min_u = np.min(y[index, 0, :, :]) + max_u = np.max(y[index, 0, :, :]) + + min_v = np.min(y[index, 1, :, :]) + max_v = np.max(y[index, 1, :, :]) + + min_p = np.min(y[index, 2, :, :]) + max_p = np.max(y[index, 2, :, :]) + + output = solver.predict({"input": x}, return_numpy=True) + pred_y = output["output"] + error = np.abs(y - pred_y) + + min_error_u = np.min(error[index, 0, :, :]) + max_error_u = np.max(error[index, 0, :, :]) + + min_error_v = np.min(error[index, 1, :, :]) + max_error_v = np.max(error[index, 1, :, :]) + + min_error_p = np.min(error[index, 2, :, :]) + max_error_p = np.max(error[index, 2, :, :]) + + plt.figure() + fig = plt.gcf() + fig.set_size_inches(15, 10) + plt.subplot(3, 3, 1) + plt.title("OpenFOAM", fontsize=18) + plt.imshow( + np.transpose(y[index, 0, :, :]), + cmap="jet", + vmin=min_u, + vmax=max_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("Ux", fontsize=18) + plt.subplot(3, 3, 2) + plt.title("DeepCFD", fontsize=18) + plt.imshow( + np.transpose(pred_y[index, 0, :, :]), + cmap="jet", + vmin=min_u, + vmax=max_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.subplot(3, 3, 3) + plt.title("Error", fontsize=18) + plt.imshow( + np.transpose(error[index, 0, :, :]), + cmap="jet", + vmin=min_error_u, + vmax=max_error_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.subplot(3, 3, 4) + plt.imshow( + np.transpose(y[index, 1, :, :]), + cmap="jet", + vmin=min_v, + vmax=max_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("Uy", fontsize=18) + plt.subplot(3, 3, 5) + plt.imshow( + np.transpose(pred_y[index, 1, :, :]), + cmap="jet", + vmin=min_v, + vmax=max_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.subplot(3, 3, 6) + plt.imshow( + np.transpose(error[index, 1, :, :]), + cmap="jet", + vmin=min_error_v, + vmax=max_error_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.subplot(3, 3, 7) + plt.imshow( + np.transpose(y[index, 2, :, :]), + cmap="jet", + vmin=min_p, + vmax=max_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("p", fontsize=18) + plt.subplot(3, 3, 8) + plt.imshow( + np.transpose(pred_y[index, 2, :, :]), + cmap="jet", + vmin=min_p, + vmax=max_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.subplot(3, 3, 9) + plt.imshow( + np.transpose(error[index, 2, :, :]), + cmap="jet", + vmin=min_error_p, + vmax=max_error_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.tight_layout() + plt.savefig(os.path.join(plot_dir, f"cfd_{index}.png"), bbox_inches="tight") + plt.show() + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", os.path.join(cfg.output_dir, "train.log"), "info") + + # initialize datasets + with open(cfg.DATAX_PATH, "rb") as file: + x = pickle.load(file) + with open(cfg.DATAY_PATH, "rb") as file: + y = pickle.load(file) + + # split dataset to train dataset and test dataset + train_dataset, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) + train_x, train_y = train_dataset + test_x, test_y = test_dataset + + # initialize model + model = ppsci.arch.UNetEx(**cfg.MODEL) + + CHANNELS_WEIGHTS = np.reshape( + np.sqrt( + np.mean( + np.transpose(y, (0, 2, 3, 1)).reshape( + (cfg.SAMPLE_SIZE * cfg.X_SIZE * cfg.Y_SIZE, cfg.CHANNEL_SIZE) + ) + ** 2, + axis=0, + ) + ), + (1, -1, 1, 1), + ) + + # define loss + def loss_expr( + output_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray] = None, + weight_dict: Dict[str, np.ndarray] = None, + ) -> float: + output = output_dict["output"] + y = label_dict["output"] + loss_u = (output[:, 0:1, :, :] - y[:, 0:1, :, :]) ** 2 + loss_v = (output[:, 1:2, :, :] - y[:, 1:2, :, :]) ** 2 + loss_p = (output[:, 2:3, :, :] - y[:, 2:3, :, :]).abs() + loss = (loss_u + loss_v + loss_p) / CHANNELS_WEIGHTS + return {"output": loss.sum()} + + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": train_x}, + "label": {"output": train_y}, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.FunctionalLoss(loss_expr), + name="sup_constraint", + ) + + # manually build constraint + constraint = {sup_constraint.name: sup_constraint} + + # initialize Adam optimizer + optimizer = ppsci.optimizer.Adam( + cfg.TRAIN.learning_rate, weight_decay=cfg.TRAIN.weight_decay + )(model) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": test_x}, + "label": {"output": test_y}, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + def metric_expr( + output_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray] = None, + weight_dict: Dict[str, np.ndarray] = None, + ) -> Dict[str, float]: + output = output_dict["output"] + y = label_dict["output"] + total_mse = ((output - y) ** 2).sum() / len(test_x) + ux_mse = ((output[:, 0, :, :] - test_y[:, 0, :, :]) ** 2).sum() / len(test_x) + uy_mse = ((output[:, 1, :, :] - test_y[:, 1, :, :]) ** 2).sum() / len(test_x) + p_mse = ((output[:, 2, :, :] - test_y[:, 2, :, :]) ** 2).sum() / len(test_x) + return { + "Total_MSE": total_mse, + "Ux_MSE": ux_mse, + "Uy_MSE": uy_mse, + "p_MSE": p_mse, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.FunctionalLoss(loss_expr), + {"output": lambda out: out["output"]}, + {"MSE": ppsci.metric.FunctionalMetric(metric_expr)}, + name="mse_validator", + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + validator=validator, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # train model + solver.train() + + # evaluate after finished training + solver.eval() + + PLOT_DIR = os.path.join(cfg.output_dir, "visual") + os.makedirs(PLOT_DIR, exist_ok=True) + + # visualize prediction after finished training + predict_and_save_plot(test_x, test_y, 0, solver, PLOT_DIR) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", os.path.join(cfg.output_dir, "eval.log"), "info") + + # initialize datasets + with open(cfg.DATAX_PATH, "rb") as file: + x = pickle.load(file) + with open(cfg.DATAY_PATH, "rb") as file: + y = pickle.load(file) + + # split dataset to train dataset and test dataset + train_dataset, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) + train_x, train_y = train_dataset + test_x, test_y = test_dataset + + # initialize model + model = ppsci.arch.UNetEx(**cfg.MODEL) + + CHANNELS_WEIGHTS = np.reshape( + np.sqrt( + np.mean( + np.transpose(y, (0, 2, 3, 1)).reshape( + (cfg.SAMPLE_SIZE * cfg.X_SIZE * cfg.Y_SIZE, cfg.CHANNEL_SIZE) + ) + ** 2, + axis=0, + ) + ), + (1, -1, 1, 1), + ) + + # define loss + def loss_expr( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"] = None, + weight_dict: Dict[str, "paddle.Tensor"] = None, + ) -> Dict[str, "paddle.Tensor"]: + output = output_dict["output"] + y = label_dict["output"] + loss_u = (output[:, 0:1, :, :] - y[:, 0:1, :, :]) ** 2 + loss_v = (output[:, 1:2, :, :] - y[:, 1:2, :, :]) ** 2 + loss_p = (output[:, 2:3, :, :] - y[:, 2:3, :, :]).abs() + loss = (loss_u + loss_v + loss_p) / CHANNELS_WEIGHTS + return {"custom_loss": loss.sum()} + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": test_x}, + "label": {"output": test_y}, + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + def metric_expr( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"] = None, + weight_dict: Dict[str, "paddle.Tensor"] = None, + ) -> Dict[str, "paddle.Tensor"]: + output = output_dict["output"] + y = label_dict["output"] + total_mse = ((output - y) ** 2).sum() / len(test_x) + ux_mse = ((output[:, 0, :, :] - test_y[:, 0, :, :]) ** 2).sum() / len(test_x) + uy_mse = ((output[:, 1, :, :] - test_y[:, 1, :, :]) ** 2).sum() / len(test_x) + p_mse = ((output[:, 2, :, :] - test_y[:, 2, :, :]) ** 2).sum() / len(test_x) + return { + "Total_MSE": total_mse, + "Ux_MSE": ux_mse, + "Uy_MSE": uy_mse, + "p_MSE": p_mse, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.FunctionalLoss(loss_expr), + {"output": lambda out: out["output"]}, + {"MSE": ppsci.metric.FunctionalMetric(metric_expr)}, + name="mse_validator", + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate + solver.eval() + + PLOT_DIR = os.path.join(cfg.output_dir, "visual") + os.makedirs(PLOT_DIR, exist_ok=True) + + # visualize prediction + predict_and_save_plot(test_x, test_y, 0, solver, PLOT_DIR) + + +def export(cfg: DictConfig): + model = ppsci.arch.UNetEx(**cfg.MODEL) + + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec( + [None, cfg.CHANNEL_SIZE, cfg.X_SIZE, cfg.Y_SIZE], "float32", name=key + ) + for key in model.input_keys + }, + ] + + solver.export(input_spec, cfg.INFER.export_path) + print(f"Model has been exported to {cfg.INFER.export_path}") + + +def predict_and_save_plot_infer( + x: np.ndarray, + y: np.ndarray, + pred_y: np.ndarray, + index: int, + plot_dir: str, +): + """Make prediction and save visualization of result during inference. + + Args: + x (np.ndarray): Input of test dataset. + y (np.ndarray): Ground truth output of test dataset. + pred_y (np.ndarray): Predicted output from inference. + index (int): Index of data to visualize. + plot_dir (str): Directory to save plot. + """ + + # Extract the true and predicted values for each channel + u_true = y[index, 0, :, :] + v_true = y[index, 1, :, :] + p_true = y[index, 2, :, :] + + u_pred = pred_y[index, 0, :, :] + v_pred = pred_y[index, 1, :, :] + p_pred = pred_y[index, 2, :, :] + + # Compute the absolute error between true and predicted values + error_u = np.abs(u_true - u_pred) + error_v = np.abs(v_true - v_pred) + error_p = np.abs(p_true - p_pred) + + # Calculate the min and max values for each channel + min_u, max_u = u_true.min(), u_true.max() + min_v, max_v = v_true.min(), v_true.max() + min_p, max_p = p_true.min(), p_true.max() + + min_error_u, max_error_u = error_u.min(), error_u.max() + min_error_v, max_error_v = error_v.min(), error_v.max() + min_error_p, max_error_p = error_p.min(), error_p.max() + + # Start plotting + plt.figure(figsize=(15, 10)) + + # Plot Ux channel (True, Predicted, and Error) + plt.subplot(3, 3, 1) + plt.title("OpenFOAM Ux", fontsize=18) + plt.imshow( + np.transpose(u_true), + cmap="jet", + vmin=min_u, + vmax=max_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("Ux", fontsize=18) + + plt.subplot(3, 3, 2) + plt.title("DeepCFD Ux", fontsize=18) + plt.imshow( + np.transpose(u_pred), + cmap="jet", + vmin=min_u, + vmax=max_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.subplot(3, 3, 3) + plt.title("Error Ux", fontsize=18) + plt.imshow( + np.transpose(error_u), + cmap="jet", + vmin=min_error_u, + vmax=max_error_u, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + # Plot Uy channel (True, Predicted, and Error) + plt.subplot(3, 3, 4) + plt.imshow( + np.transpose(v_true), + cmap="jet", + vmin=min_v, + vmax=max_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("Uy", fontsize=18) + + plt.subplot(3, 3, 5) + plt.imshow( + np.transpose(v_pred), + cmap="jet", + vmin=min_v, + vmax=max_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.subplot(3, 3, 6) + plt.imshow( + np.transpose(error_v), + cmap="jet", + vmin=min_error_v, + vmax=max_error_v, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + # Plot pressure channel p (True, Predicted, and Error) + plt.subplot(3, 3, 7) + plt.imshow( + np.transpose(p_true), + cmap="jet", + vmin=min_p, + vmax=max_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + plt.ylabel("p", fontsize=18) + + plt.subplot(3, 3, 8) + plt.imshow( + np.transpose(p_pred), + cmap="jet", + vmin=min_p, + vmax=max_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.subplot(3, 3, 9) + plt.imshow( + np.transpose(error_p), + cmap="jet", + vmin=min_error_p, + vmax=max_error_p, + origin="lower", + extent=[0, 260, 0, 120], + ) + plt.colorbar(orientation="horizontal") + + plt.tight_layout() + plt.savefig(os.path.join(plot_dir, f"cfd_{index}.png"), bbox_inches="tight") + plt.close() + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + # Load test dataset from serialized files + with open(cfg.DATAX_PATH, "rb") as file: + x = pickle.load(file) + with open(cfg.DATAY_PATH, "rb") as file: + y = pickle.load(file) + + # Split data into training and test sets + _, test_dataset = split_tensors(x, y, ratio=cfg.SLIPT_RATIO) + test_x, test_y = test_dataset + + input_dict = {cfg.MODEL.input_key: test_x} + + # Initialize the PINN predictor model + predictor = pinn_predictor.PINNPredictor(cfg) + + # Run inference and get predictions + output_dict = predictor.predict(input_dict, batch_size=cfg.INFER.batch_size) + + # Handle model's output key structure + actual_output_key = cfg.MODEL.output_key + + output_keys = ( + actual_output_key + if isinstance(actual_output_key, (list, tuple)) + else [actual_output_key] + ) + if len(output_keys) != len(output_dict): + raise ValueError( + "The number of output_keys does not match the number of output_dict keys." + ) + + # Map model output keys to values + output_dict = { + origin: value for origin, value in zip(output_keys, output_dict.values()) + } + + concat_output = output_dict[actual_output_key] + + if concat_output.ndim != 4 or concat_output.shape[1] != 3: + raise ValueError( + f"Unexpected shape of '{actual_output_key}': {concat_output.shape}. Expected (batch_size, 3, x_size, y_size)." + ) + + try: + # Extract Ux, Uy, and pressure from the predicted output + u_pred = concat_output[:, 0, :, :] # Ux + v_pred = concat_output[:, 1, :, :] # Uy + p_pred = concat_output[:, 2, :, :] # p + except IndexError as e: + print(f"Error in splitting '{actual_output_key}': {e}") + raise + + # Combine the predictions into one array for further processing + pred_y = np.stack([u_pred, v_pred, p_pred], axis=1) + + PLOT_DIR = os.path.join(cfg.output_dir, "infer_visual") + os.makedirs(PLOT_DIR, exist_ok=True) + + # Visualize and save the first five predictions + for index in range(min(5, pred_y.shape[0])): + predict_and_save_plot_infer(test_x, test_y, pred_y, index, PLOT_DIR) + + print(f"Inference completed. Results are saved in {PLOT_DIR}") + + +@hydra.main(version_base=None, config_path="./conf", config_name="deepcfd.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/deephpms/burgers.py b/examples/deephpms/burgers.py index 296241775a..9ab8f8ce31 100644 --- a/examples/deephpms/burgers.py +++ b/examples/deephpms/burgers.py @@ -1,441 +1,441 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -import paddle.nn.functional as F -import plotting as plot_func -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import reader -from ppsci.utils import save_load - - -def pde_loss_func(output_dict, *args): - losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") - return {"pde": losses} - - -def pde_l2_rel_func(output_dict, *args): - rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( - output_dict["du_t"] - ) - metric_dict = {"f_pde": rel_l2} - return metric_dict - - -def boundary_loss_func(output_dict, *args): - u_b = output_dict["u_sol"] - u_lb, u_ub = paddle.split(u_b, 2, axis=0) - - x_b = output_dict["x"] - du_x = jacobian(u_b, x_b) - - du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) - - losses = F.mse_loss(u_lb, u_ub, "sum") - losses += F.mse_loss(du_x_lb, du_x_ub, "sum") - return {"boundary": losses} - - -def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize burgers boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx} - return input_trans - - def transform_f_idn(_in): - return transform_f(_in, model_idn, "u_idn") - - def transform_f_sol(_in): - return transform_f(_in, model_sol, "u_sol") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_idn) - model_sol.register_input_transform(transform_u) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) - - # initialize optimizer - # Adam - optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) - optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) - optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol) - - # LBFGS - # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) - # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) - # optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_sol) - - # stage 1: training identification net - # manually build constraint(s) - train_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, - }, - } - - sup_constraint_idn = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - name="u_mse_sup", - ) - constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} - - # manually build validator - eval_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, - }, - } - - sup_validator_idn = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_idn = {sup_validator_idn.name: sup_validator_idn} - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_idn, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 2: training pde net - # manually build constraint(s) - train_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, - }, - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - name="f_mse_sup", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - # manually build validator - eval_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, - }, - } - - sup_validator_pde = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, - name="f_L2_sup", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 3: training solution net - # re-register transform for model 2, fit for loss of stage 3 - model_pde.register_input_transform(transform_f_sol) - - # manually build constraint(s) - train_dataloader_cfg_sol_f = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, - }, - } - train_dataloader_cfg_sol_init = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_sol",), - "alias_dict": {"t": "t0", "x": "x0", "u_sol": "u0"}, - }, - } - train_dataloader_cfg_sol_bc = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("x",), - "alias_dict": {"t": "tb", "x": "xb"}, - }, - } - - sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_f, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "f_pde": lambda out: out["f_pde"], - "du_t": lambda out: jacobian(out["u_sol"], out["t"]), - }, - name="f_mse_sup", - ) - sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_init, - ppsci.loss.MSELoss("sum"), - {"u_sol": lambda out: out["u_sol"]}, - name="u0_mse_sup", - ) - sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_bc, - ppsci.loss.FunctionalLoss(boundary_loss_func), - { - "x": lambda out: out["x"], - "u_sol": lambda out: out["u_sol"], - }, - name="ub_mse_sup", - ) - constraint_sol = { - sup_constraint_sol_f.name: sup_constraint_sol_f, - sup_constraint_sol_init.name: sup_constraint_sol_init, - sup_constraint_sol_bc.name: sup_constraint_sol_bc, - } - - # manually build validator - eval_dataloader_cfg_sol = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_sol",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, - }, - } - - sup_validator_sol = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_sol, - ppsci.loss.MSELoss("sum"), - {"u_sol": lambda out: out["u_sol"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_sol = {sup_validator_sol.name: sup_validator_sol} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_sol, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize burgers boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx} - return input_trans - - def transform_f_sol(_in): - return transform_f(_in, model_sol, "u_sol") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_sol) - model_sol.register_input_transform(transform_u) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) - - # stage 3: solution net - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load dataset - dataset_val = reader.load_mat_file( - cfg.DATASET_PATH_SOL, - keys=("t", "x", "u_sol"), - alias_dict={ - "t": "t_ori", - "x": "x_ori", - "u_sol": "Exact_ori", - }, - ) - - t_sol, x_sol = np.meshgrid( - np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) - ) - t_sol_flatten = paddle.to_tensor( - t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - x_sol_flatten = paddle.to_tensor( - x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) - - # eval - l2_error = np.linalg.norm( - dataset_val["u_sol"] - u_sol_pred["u_sol"], 2 - ) / np.linalg.norm(dataset_val["u_sol"], 2) - logger.info(f"l2_error: {l2_error}") - - # plotting - plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() - plot_func.draw_and_save( - figname="burgers_sol", - data_exact=dataset_val["u_sol"], - data_learned=u_sol_pred["u_sol"].numpy(), - boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], - griddata_points=plot_points, - griddata_xi=(t_sol, x_sol), - save_path=cfg.output_dir, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="burgers.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import paddle.nn.functional as F +import plotting as plot_func +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import reader +from ppsci.utils import save_load + + +def pde_loss_func(output_dict, *args): + losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") + return {"pde": losses} + + +def pde_l2_rel_func(output_dict, *args): + rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( + output_dict["du_t"] + ) + metric_dict = {"f_pde": rel_l2} + return metric_dict + + +def boundary_loss_func(output_dict, *args): + u_b = output_dict["u_sol"] + u_lb, u_ub = paddle.split(u_b, 2, axis=0) + + x_b = output_dict["x"] + du_x = jacobian(u_b, x_b) + + du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) + + losses = F.mse_loss(u_lb, u_ub, "sum") + losses += F.mse_loss(du_x_lb, du_x_ub, "sum") + return {"boundary": losses} + + +def train(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize burgers boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx} + return input_trans + + def transform_f_idn(_in): + return transform_f(_in, model_idn, "u_idn") + + def transform_f_sol(_in): + return transform_f(_in, model_sol, "u_sol") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_idn) + model_sol.register_input_transform(transform_u) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) + + # initialize optimizer + # Adam + optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) + optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) + optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol) + + # LBFGS + # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) + # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) + # optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_sol) + + # stage 1: training identification net + # manually build constraint(s) + train_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, + }, + } + + sup_constraint_idn = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + name="u_mse_sup", + ) + constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} + + # manually build validator + eval_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, + }, + } + + sup_validator_idn = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_idn = {sup_validator_idn.name: sup_validator_idn} + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint_idn, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_idn, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 2: training pde net + # manually build constraint(s) + train_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, + }, + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + name="f_mse_sup", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + # manually build validator + eval_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, + }, + } + + sup_validator_pde = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, + name="f_L2_sup", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_pde, + cfg.output_dir, + optimizer_pde, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 3: training solution net + # re-register transform for model 2, fit for loss of stage 3 + model_pde.register_input_transform(transform_f_sol) + + # manually build constraint(s) + train_dataloader_cfg_sol_f = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, + }, + } + train_dataloader_cfg_sol_init = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_sol",), + "alias_dict": {"t": "t0", "x": "x0", "u_sol": "u0"}, + }, + } + train_dataloader_cfg_sol_bc = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("x",), + "alias_dict": {"t": "tb", "x": "xb"}, + }, + } + + sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_f, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "f_pde": lambda out: out["f_pde"], + "du_t": lambda out: jacobian(out["u_sol"], out["t"]), + }, + name="f_mse_sup", + ) + sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_init, + ppsci.loss.MSELoss("sum"), + {"u_sol": lambda out: out["u_sol"]}, + name="u0_mse_sup", + ) + sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_bc, + ppsci.loss.FunctionalLoss(boundary_loss_func), + { + "x": lambda out: out["x"], + "u_sol": lambda out: out["u_sol"], + }, + name="ub_mse_sup", + ) + constraint_sol = { + sup_constraint_sol_f.name: sup_constraint_sol_f, + sup_constraint_sol_init.name: sup_constraint_sol_init, + sup_constraint_sol_bc.name: sup_constraint_sol_bc, + } + + # manually build validator + eval_dataloader_cfg_sol = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_sol",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, + }, + } + + sup_validator_sol = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_sol, + ppsci.loss.MSELoss("sum"), + {"u_sol": lambda out: out["u_sol"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_sol = {sup_validator_sol.name: sup_validator_sol} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_sol, + cfg.output_dir, + optimizer_sol, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_sol, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize burgers boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx} + return input_trans + + def transform_f_sol(_in): + return transform_f(_in, model_sol, "u_sol") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_sol) + model_sol.register_input_transform(transform_u) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) + + # stage 3: solution net + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load dataset + dataset_val = reader.load_mat_file( + cfg.DATASET_PATH_SOL, + keys=("t", "x", "u_sol"), + alias_dict={ + "t": "t_ori", + "x": "x_ori", + "u_sol": "Exact_ori", + }, + ) + + t_sol, x_sol = np.meshgrid( + np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) + ) + t_sol_flatten = paddle.to_tensor( + t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + x_sol_flatten = paddle.to_tensor( + x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) + + # eval + l2_error = np.linalg.norm( + dataset_val["u_sol"] - u_sol_pred["u_sol"], 2 + ) / np.linalg.norm(dataset_val["u_sol"], 2) + logger.info(f"l2_error: {l2_error}") + + # plotting + plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() + plot_func.draw_and_save( + figname="burgers_sol", + data_exact=dataset_val["u_sol"], + data_learned=u_sol_pred["u_sol"].numpy(), + boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], + griddata_points=plot_points, + griddata_xi=(t_sol, x_sol), + save_path=cfg.output_dir, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="burgers.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/deephpms/conf/burgers.yaml b/examples/deephpms/conf/burgers.yaml index 5db1a1e73a..6ce2ada936 100644 --- a/examples/deephpms/conf/burgers.yaml +++ b/examples/deephpms/conf/burgers.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -67,3 +68,73 @@ TRAIN: # evaluation settings EVAL: pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_burgers/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/burgers_sine.mat +DATASET_PATH_SOL: ./datasets/burgers_sine.mat + +# set working condition +T_LB: 0.0 +T_UB: 10.0 +X_LB: -8.0 +X_UB: 8.0 + +# model settings +MODEL: + idn_net: + input_keys: ["t", "x"] + output_keys: ["u_idn"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + pde_net: + input_keys: ["u_x", "du_x", "du_xx"] + output_keys: ["f_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + sol_net: + input_keys: ["t", "x"] + output_keys: ["u_sol"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + +# training settings +TRAIN: + epochs: 50000 # set 1 for LBFGS + iters_per_epoch: 1 + max_iter: 50000 # for LBFGS + learning_rate: 1.0e-3 + eval_during_train: false + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/deephpms/conf/korteweg_de_vries.yaml b/examples/deephpms/conf/korteweg_de_vries.yaml index 7a25585cae..0e44d9ecb3 100644 --- a/examples/deephpms/conf/korteweg_de_vries.yaml +++ b/examples/deephpms/conf/korteweg_de_vries.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -67,3 +68,73 @@ TRAIN: # evaluation settings EVAL: pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_korteweg_de_vries/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/KdV_sine.mat +DATASET_PATH_SOL: ./datasets/KdV_sine.mat + +# set working condition +T_LB: 0.0 +T_UB: 40.0 +X_LB: -20.0 +X_UB: 20.0 + +# model settings +MODEL: + idn_net: + input_keys: ["t", "x"] + output_keys: ["u_idn"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + pde_net: + input_keys: ["u_x", "du_x", "du_xx", "du_xxx"] + output_keys: ["f_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + sol_net: + input_keys: ["t", "x"] + output_keys: ["u_sol"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + max_iter: 50000 # for LBFGS + learning_rate: 1.0e-3 + eval_during_train: false + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/deephpms/conf/kuramoto_sivashinsky.yaml b/examples/deephpms/conf/kuramoto_sivashinsky.yaml index 8610631e51..d8e5e76d99 100644 --- a/examples/deephpms/conf/kuramoto_sivashinsky.yaml +++ b/examples/deephpms/conf/kuramoto_sivashinsky.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -61,3 +62,67 @@ TRAIN: # evaluation settings EVAL: pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_kuramoto_sivashinsky/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/KS.mat +DATASET_PATH_SOL: ./datasets/KS.mat + +# set working condition +T_LB: 0.0 +T_UB: 50.0 +X_LB: -10.0 +X_UB: 10.0 + +# model settings +MODEL: + idn_net: + input_keys: ["t", "x"] + output_keys: ["u_idn"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + pde_net: + input_keys: ["u_x", "du_x", "du_xx", "du_xxx", "du_xxxx"] + output_keys: ["f_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + max_iter: 50000 # for LBFGS + learning_rate: 1.0e-4 + eval_during_train: false + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/deephpms/conf/navier_stokes.yaml b/examples/deephpms/conf/navier_stokes.yaml index af1eac8c88..2516ce3ccf 100644 --- a/examples/deephpms/conf/navier_stokes.yaml +++ b/examples/deephpms/conf/navier_stokes.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -61,3 +62,67 @@ TRAIN: # evaluation settings EVAL: pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_navier_stokes/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/cylinder.mat +DATASET_PATH_SOL: ./datasets/cylinder.mat + +# set working condition +LB: [0.0, 1, -1.7] +UB: [30.0, 7.5, 1.7] + +# model settings +MODEL: + idn_net: + input_keys: ["t", "x", "y"] + output_keys: ["w_idn"] + num_layers: 4 + hidden_size: 200 + activation: "sin" + pde_net: + input_keys: ["u", "v", "w", "dw_x", "dw_y", "dw_xx", "dw_xy", "dw_yy"] + output_keys: ["f_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + max_iter: 50000 # for LBFGS + learning_rate: 1.0e-4 + batch_size: + eval: 10000 + eval_during_train: false + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/deephpms/conf/schrodinger.yaml b/examples/deephpms/conf/schrodinger.yaml index d7b11c0a24..3b3df6bb9b 100644 --- a/examples/deephpms/conf/schrodinger.yaml +++ b/examples/deephpms/conf/schrodinger.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -75,3 +76,81 @@ TRAIN: # evaluation settings EVAL: pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_schrodinger/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/NLS.mat +DATASET_PATH_SOL: ./datasets/NLS.mat + +# set working condition +T_LB: 0.0 +T_UB: 2.0 +X_LB: -5.0 +X_UB: 5.0 + +# model settings +MODEL: + idn_u_net: + input_keys: ["t", "x"] + output_keys: ["u_idn"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + idn_v_net: + input_keys: ["t", "x"] + output_keys: ["v_idn"] + num_layers: 4 + hidden_size: 50 + activation: "sin" + pde_f_net: + input_keys: ["u", "v", "du_x", "dv_x", "du_xx", "dv_xx"] + output_keys: ["f_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + pde_g_net: + input_keys: ["u", "v", "du_x", "dv_x", "du_xx", "dv_xx"] + output_keys: ["g_pde"] + num_layers: 2 + hidden_size: 100 + activation: "sin" + +# training settings +TRAIN: + epochs: 50000 + iters_per_epoch: 1 + max_iter: 50000 # for LBFGS + learning_rate: 1.0e-4 + batch_size: + eval: 10000 + eval_during_train: false + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/deephpms/korteweg_de_vries.py b/examples/deephpms/korteweg_de_vries.py index b491e0557a..371d7a5d4d 100644 --- a/examples/deephpms/korteweg_de_vries.py +++ b/examples/deephpms/korteweg_de_vries.py @@ -1,457 +1,457 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -import paddle.nn.functional as F -import plotting as plot_func -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import reader -from ppsci.utils import save_load - - -def pde_loss_func(output_dict, *args): - losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") - return {"pde": losses} - - -def pde_l2_rel_func(output_dict, *args): - rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( - output_dict["du_t"] - ) - metric_dict = {"f_pde": rel_l2} - return metric_dict - - -def boundary_loss_func(output_dict, *args): - u_b = output_dict["u_sol"] - u_lb, u_ub = paddle.split(u_b, 2, axis=0) - - x_b = output_dict["x"] - du_x = jacobian(u_b, x_b) - du_xx = hessian(u_b, x_b) - - du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) - du_xx_lb, du_xx_ub = paddle.split(du_xx, 2, axis=0) - - losses = F.mse_loss(u_lb, u_ub, "sum") - losses += F.mse_loss(du_x_lb, du_x_ub, "sum") - losses += F.mse_loss(du_xx_lb, du_xx_ub, "sum") - return {"boundary": losses} - - -def train(cfg: DictConfig): - # open FLAG for higher order differential operator when order >= 4 - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - du_xxx = jacobian(du_xx, x) - input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx, "du_xxx": du_xxx} - return input_trans - - def transform_f_idn(_in): - return transform_f(_in, model_idn, "u_idn") - - def transform_f_sol(_in): - return transform_f(_in, model_sol, "u_sol") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_idn) - model_sol.register_input_transform(transform_u) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) - - # initialize optimizer - # Adam - optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) - optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) - optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol) - - # LBFGS - # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) - # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) - # optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_sol) - - # stage 1: training identification net - # manually build constraint(s) - train_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, - }, - } - - sup_constraint_idn = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - name="u_mse_sup", - ) - constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} - - # manually build validator - eval_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, - }, - } - - sup_validator_idn = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_idn = {sup_validator_idn.name: sup_validator_idn} - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_idn, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 2: training pde net - # manually build constraint(s) - train_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, - }, - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - name="f_mse_sup", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - # manually build validator - eval_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, - }, - } - - sup_validator_pde = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, - name="f_L2_sup", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 3: training solution net - # re-register transform for model 2, fit for loss of stage 3 - model_pde.register_input_transform(transform_f_sol) - - # manually build constraint(s) - train_dataloader_cfg_sol_f = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, - }, - } - train_dataloader_cfg_sol_init = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_sol",), - "alias_dict": {"t": "t0", "x": "x0", "u_sol": "u0"}, - }, - } - train_dataloader_cfg_sol_bc = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("x",), - "alias_dict": {"t": "tb", "x": "xb"}, - }, - } - - sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_f, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "f_pde": lambda out: out["f_pde"], - "du_t": lambda out: jacobian(out["u_sol"], out["t"]), - }, - name="f_mse_sup", - ) - sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_init, - ppsci.loss.MSELoss("sum"), - {"u_sol": lambda out: out["u_sol"]}, - name="u0_mse_sup", - ) - sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_bc, - ppsci.loss.FunctionalLoss(boundary_loss_func), - { - "x": lambda out: out["x"], - "u_sol": lambda out: out["u_sol"], - }, - name="ub_mse_sup", - ) - constraint_sol = { - sup_constraint_sol_f.name: sup_constraint_sol_f, - sup_constraint_sol_init.name: sup_constraint_sol_init, - sup_constraint_sol_bc.name: sup_constraint_sol_bc, - } - - # manually build validator - eval_dataloader_cfg_sol = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_sol",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, - }, - } - - sup_validator_sol = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_sol, - ppsci.loss.MSELoss("sum"), - {"u_sol": lambda out: out["u_sol"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_sol = {sup_validator_sol.name: sup_validator_sol} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_sol, - cfg.output_dir, - optimizer_sol, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_sol, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # open FLAG for higher order differential operator when order >= 4 - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(42) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - du_xxx = jacobian(du_xx, x) - input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx, "du_xxx": du_xxx} - return input_trans - - def transform_f_sol(_in): - return transform_f(_in, model_sol, "u_sol") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_sol) - model_sol.register_input_transform(transform_u) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) - - # stage 3: solution net - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load dataset - dataset_val = reader.load_mat_file( - cfg.DATASET_PATH_SOL, - keys=("t", "x", "u_sol"), - alias_dict={ - "t": "t_ori", - "x": "x_ori", - "u_sol": "Exact_ori", - }, - ) - - t_sol, x_sol = np.meshgrid( - np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) - ) - t_sol_flatten = paddle.to_tensor( - t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - x_sol_flatten = paddle.to_tensor( - x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) - - # eval - l2_error = np.linalg.norm( - dataset_val["u_sol"] - u_sol_pred["u_sol"], 2 - ) / np.linalg.norm(dataset_val["u_sol"], 2) - logger.info(f"l2_error: {l2_error}") - - # plotting - plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() - plot_func.draw_and_save( - figname="korteweg_de_vries_sol", - data_exact=dataset_val["u_sol"], - data_learned=u_sol_pred["u_sol"].numpy(), - boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], - griddata_points=plot_points, - griddata_xi=(t_sol, x_sol), - save_path=cfg.output_dir, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="korteweg_de_vries.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import paddle.nn.functional as F +import plotting as plot_func +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import reader +from ppsci.utils import save_load + + +def pde_loss_func(output_dict, *args): + losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") + return {"pde": losses} + + +def pde_l2_rel_func(output_dict, *args): + rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( + output_dict["du_t"] + ) + metric_dict = {"f_pde": rel_l2} + return metric_dict + + +def boundary_loss_func(output_dict, *args): + u_b = output_dict["u_sol"] + u_lb, u_ub = paddle.split(u_b, 2, axis=0) + + x_b = output_dict["x"] + du_x = jacobian(u_b, x_b) + du_xx = hessian(u_b, x_b) + + du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) + du_xx_lb, du_xx_ub = paddle.split(du_xx, 2, axis=0) + + losses = F.mse_loss(u_lb, u_ub, "sum") + losses += F.mse_loss(du_x_lb, du_x_ub, "sum") + losses += F.mse_loss(du_xx_lb, du_xx_ub, "sum") + return {"boundary": losses} + + +def train(cfg: DictConfig): + # open FLAG for higher order differential operator when order >= 4 + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(42) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + du_xxx = jacobian(du_xx, x) + input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx, "du_xxx": du_xxx} + return input_trans + + def transform_f_idn(_in): + return transform_f(_in, model_idn, "u_idn") + + def transform_f_sol(_in): + return transform_f(_in, model_sol, "u_sol") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_idn) + model_sol.register_input_transform(transform_u) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) + + # initialize optimizer + # Adam + optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) + optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) + optimizer_sol = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_sol) + + # LBFGS + # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) + # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) + # optimizer_sol = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_sol) + + # stage 1: training identification net + # manually build constraint(s) + train_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, + }, + } + + sup_constraint_idn = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + name="u_mse_sup", + ) + constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} + + # manually build validator + eval_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, + }, + } + + sup_validator_idn = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_idn = {sup_validator_idn.name: sup_validator_idn} + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint_idn, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_idn, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 2: training pde net + # manually build constraint(s) + train_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, + }, + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + name="f_mse_sup", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + # manually build validator + eval_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, + }, + } + + sup_validator_pde = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, + name="f_L2_sup", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_pde, + cfg.output_dir, + optimizer_pde, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 3: training solution net + # re-register transform for model 2, fit for loss of stage 3 + model_pde.register_input_transform(transform_f_sol) + + # manually build constraint(s) + train_dataloader_cfg_sol_f = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, + }, + } + train_dataloader_cfg_sol_init = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_sol",), + "alias_dict": {"t": "t0", "x": "x0", "u_sol": "u0"}, + }, + } + train_dataloader_cfg_sol_bc = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("x",), + "alias_dict": {"t": "tb", "x": "xb"}, + }, + } + + sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_f, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "f_pde": lambda out: out["f_pde"], + "du_t": lambda out: jacobian(out["u_sol"], out["t"]), + }, + name="f_mse_sup", + ) + sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_init, + ppsci.loss.MSELoss("sum"), + {"u_sol": lambda out: out["u_sol"]}, + name="u0_mse_sup", + ) + sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_bc, + ppsci.loss.FunctionalLoss(boundary_loss_func), + { + "x": lambda out: out["x"], + "u_sol": lambda out: out["u_sol"], + }, + name="ub_mse_sup", + ) + constraint_sol = { + sup_constraint_sol_f.name: sup_constraint_sol_f, + sup_constraint_sol_init.name: sup_constraint_sol_init, + sup_constraint_sol_bc.name: sup_constraint_sol_bc, + } + + # manually build validator + eval_dataloader_cfg_sol = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_sol",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, + }, + } + + sup_validator_sol = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_sol, + ppsci.loss.MSELoss("sum"), + {"u_sol": lambda out: out["u_sol"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_sol = {sup_validator_sol.name: sup_validator_sol} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_sol, + cfg.output_dir, + optimizer_sol, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_sol, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # open FLAG for higher order differential operator when order >= 4 + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(42) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + model_sol = ppsci.arch.MLP(**cfg.MODEL.sol_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + du_xxx = jacobian(du_xx, x) + input_trans = {"u_x": u, "du_x": du_x, "du_xx": du_xx, "du_xxx": du_xxx} + return input_trans + + def transform_f_sol(_in): + return transform_f(_in, model_sol, "u_sol") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_sol) + model_sol.register_input_transform(transform_u) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde, model_sol)) + + # stage 3: solution net + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load dataset + dataset_val = reader.load_mat_file( + cfg.DATASET_PATH_SOL, + keys=("t", "x", "u_sol"), + alias_dict={ + "t": "t_ori", + "x": "x_ori", + "u_sol": "Exact_ori", + }, + ) + + t_sol, x_sol = np.meshgrid( + np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) + ) + t_sol_flatten = paddle.to_tensor( + t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + x_sol_flatten = paddle.to_tensor( + x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) + + # eval + l2_error = np.linalg.norm( + dataset_val["u_sol"] - u_sol_pred["u_sol"], 2 + ) / np.linalg.norm(dataset_val["u_sol"], 2) + logger.info(f"l2_error: {l2_error}") + + # plotting + plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() + plot_func.draw_and_save( + figname="korteweg_de_vries_sol", + data_exact=dataset_val["u_sol"], + data_learned=u_sol_pred["u_sol"].numpy(), + boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], + griddata_points=plot_points, + griddata_xi=(t_sol, x_sol), + save_path=cfg.output_dir, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="korteweg_de_vries.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/deephpms/kuramoto_sivashinsky.py b/examples/deephpms/kuramoto_sivashinsky.py index 2ea2660c09..dfebd2b8bc 100644 --- a/examples/deephpms/kuramoto_sivashinsky.py +++ b/examples/deephpms/kuramoto_sivashinsky.py @@ -1,464 +1,464 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -import paddle.nn.functional as F -import plotting as plot_func -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import reader -from ppsci.utils import save_load - - -def pde_loss_func(output_dict, *args): - losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") - return {"pde": losses} - - -def pde_l2_rel_func(output_dict, *args): - rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( - output_dict["du_t"] - ) - metric_dict = {"f_pde": rel_l2} - return metric_dict - - -def boundary_loss_func(output_dict, *args): - u_b = output_dict["u_idn"] - u_lb, u_ub = paddle.split(u_b, 2, axis=0) - - x_b = output_dict["x"] - du_x = jacobian(u_b, x_b) - du_xx = hessian(u_b, x_b) - du_xxx = jacobian(du_xx, x_b) - - du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) - du_xx_lb, du_xx_ub = paddle.split(du_xx, 2, axis=0) - du_xxx_lb, du_xxx_ub = paddle.split(du_xxx, 2, axis=0) - - losses = F.mse_loss(u_lb, u_ub, "sum") - losses += F.mse_loss(du_x_lb, du_x_ub, "sum") - losses += F.mse_loss(du_xx_lb, du_xx_ub, "sum") - losses += F.mse_loss(du_xxx_lb, du_xxx_ub, "sum") - return {"boundary": losses} - - -def train(cfg: DictConfig): - # open FLAG for higher order differential operator when order >= 4 - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - du_xxx = jacobian(du_xx, x) - du_xxxx = hessian(du_xx, x) - input_trans = { - "u_x": u, - "du_x": du_x, - "du_xx": du_xx, - "du_xxx": du_xxx, - "du_xxxx": du_xxxx, - } - return input_trans - - def transform_f_idn(_in): - return transform_f(_in, model_idn, "u_idn") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_idn) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde)) - - # initialize optimizer - # Adam - optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) - optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) - - # LBFGS - # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) - # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) - - # stage 1: training identification net - # manually build constraint(s) - train_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, - }, - } - - sup_constraint_idn = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - name="u_mse_sup", - ) - constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} - - # manually build validator - eval_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, - }, - } - - sup_validator_idn = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"u_idn": lambda out: out["u_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_idn = {sup_validator_idn.name: sup_validator_idn} - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_idn, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 2: training pde net - # manually build constraint(s) - train_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, - }, - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - name="f_mse_sup", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - # manually build validator - eval_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, - }, - } - - sup_validator_pde = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, - name="f_L2_sup", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 3: training solution net, reuse identification net - # manually build constraint(s) - train_dataloader_cfg_sol_f = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("du_t",), - "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, - }, - } - train_dataloader_cfg_sol_init = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u0_sol",), - "alias_dict": {"t": "t0", "x": "x0", "u0_sol": "u0"}, - }, - } - train_dataloader_cfg_sol_bc = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("x",), - "alias_dict": {"t": "tb", "x": "xb"}, - }, - } - - sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_f, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "f_pde": lambda out: out["f_pde"], - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - }, - name="f_mse_sup", - ) - sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_init, - ppsci.loss.MSELoss("sum"), - {"u0_sol": lambda out: out["u_idn"]}, - name="u0_mse_sup", - ) - sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_bc, - ppsci.loss.FunctionalLoss(boundary_loss_func), - { - "x": lambda out: out["x"], - "ub_sol": lambda out: out["u_idn"], - }, - name="ub_mse_sup", - ) - constraint_sol = { - sup_constraint_sol_f.name: sup_constraint_sol_f, - sup_constraint_sol_init.name: sup_constraint_sol_init, - sup_constraint_sol_bc.name: sup_constraint_sol_bc, - } - - # manually build validator - eval_dataloader_cfg_sol = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_sol",), - "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, - }, - } - - sup_validator_sol = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_sol, - ppsci.loss.MSELoss("sum"), - {"u_sol": lambda out: out["u_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="u_L2_sup", - ) - validator_sol = {sup_validator_sol.name: sup_validator_sol} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_sol, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # open FLAG for higher order differential operator when order >= 4 - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.T_UB) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - - # initialize transform - def transform_u(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_f(input, model, out_key): - in_idn = {"t": input["t"], "x": input["x"]} - x = input["x"] - u = model(in_idn)[out_key] - du_x = jacobian(u, x) - du_xx = hessian(u, x) - du_xxx = jacobian(du_xx, x) - du_xxxx = hessian(du_xx, x) - input_trans = { - "u_x": u, - "du_x": du_x, - "du_xx": du_xx, - "du_xxx": du_xxx, - "du_xxxx": du_xxxx, - } - return input_trans - - def transform_f_idn(_in): - return transform_f(_in, model_idn, "u_idn") - - # register transform - model_idn.register_input_transform(transform_u) - model_pde.register_input_transform(transform_f_idn) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde)) - - # stage 3: solution net - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load dataset - dataset_val = reader.load_mat_file( - cfg.DATASET_PATH_SOL, - keys=("t", "x", "u_sol"), - alias_dict={ - "t": "t_ori", - "x": "x_ori", - "u_sol": "Exact_ori", - }, - ) - - t_sol, x_sol = np.meshgrid( - np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) - ) - t_sol_flatten = paddle.to_tensor( - t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - x_sol_flatten = paddle.to_tensor( - x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) - - # eval - l2_error = np.linalg.norm( - dataset_val["u_sol"] - u_sol_pred["u_idn"], 2 - ) / np.linalg.norm( - dataset_val["u_sol"], 2 - ) # stage 1&3 use the same net in this example - logger.info(f"l2_error: {l2_error}") - - # plotting - plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() - plot_func.draw_and_save( - figname="kuramoto_sivashinsky_sol", - data_exact=dataset_val["u_sol"], - data_learned=u_sol_pred["u_idn"].numpy(), - boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], - griddata_points=plot_points, - griddata_xi=(t_sol, x_sol), - save_path=cfg.output_dir, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="kuramoto_sivashinsky.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import paddle.nn.functional as F +import plotting as plot_func +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import reader +from ppsci.utils import save_load + + +def pde_loss_func(output_dict, *args): + losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") + return {"pde": losses} + + +def pde_l2_rel_func(output_dict, *args): + rel_l2 = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( + output_dict["du_t"] + ) + metric_dict = {"f_pde": rel_l2} + return metric_dict + + +def boundary_loss_func(output_dict, *args): + u_b = output_dict["u_idn"] + u_lb, u_ub = paddle.split(u_b, 2, axis=0) + + x_b = output_dict["x"] + du_x = jacobian(u_b, x_b) + du_xx = hessian(u_b, x_b) + du_xxx = jacobian(du_xx, x_b) + + du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) + du_xx_lb, du_xx_ub = paddle.split(du_xx, 2, axis=0) + du_xxx_lb, du_xxx_ub = paddle.split(du_xxx, 2, axis=0) + + losses = F.mse_loss(u_lb, u_ub, "sum") + losses += F.mse_loss(du_x_lb, du_x_ub, "sum") + losses += F.mse_loss(du_xx_lb, du_xx_ub, "sum") + losses += F.mse_loss(du_xxx_lb, du_xxx_ub, "sum") + return {"boundary": losses} + + +def train(cfg: DictConfig): + # open FLAG for higher order differential operator when order >= 4 + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + du_xxx = jacobian(du_xx, x) + du_xxxx = hessian(du_xx, x) + input_trans = { + "u_x": u, + "du_x": du_x, + "du_xx": du_xx, + "du_xxx": du_xxx, + "du_xxxx": du_xxxx, + } + return input_trans + + def transform_f_idn(_in): + return transform_f(_in, model_idn, "u_idn") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_idn) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde)) + + # initialize optimizer + # Adam + optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) + optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) + + # LBFGS + # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) + # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) + + # stage 1: training identification net + # manually build constraint(s) + train_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_train", "x": "x_train", "u_idn": "u_train"}, + }, + } + + sup_constraint_idn = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + name="u_mse_sup", + ) + constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} + + # manually build validator + eval_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_idn": "u_star"}, + }, + } + + sup_validator_idn = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"u_idn": lambda out: out["u_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_idn = {sup_validator_idn.name: sup_validator_idn} + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint_idn, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_idn, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 2: training pde net + # manually build constraint(s) + train_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_train", "x": "x_train", "du_t": "t_train"}, + }, + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + name="f_mse_sup", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + # manually build validator + eval_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_star", "x": "x_star", "du_t": "t_star"}, + }, + } + + sup_validator_pde = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, + name="f_L2_sup", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_pde, + cfg.output_dir, + optimizer_pde, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 3: training solution net, reuse identification net + # manually build constraint(s) + train_dataloader_cfg_sol_f = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("du_t",), + "alias_dict": {"t": "t_f_train", "x": "x_f_train", "du_t": "t_f_train"}, + }, + } + train_dataloader_cfg_sol_init = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u0_sol",), + "alias_dict": {"t": "t0", "x": "x0", "u0_sol": "u0"}, + }, + } + train_dataloader_cfg_sol_bc = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("x",), + "alias_dict": {"t": "tb", "x": "xb"}, + }, + } + + sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_f, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "f_pde": lambda out: out["f_pde"], + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + }, + name="f_mse_sup", + ) + sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_init, + ppsci.loss.MSELoss("sum"), + {"u0_sol": lambda out: out["u_idn"]}, + name="u0_mse_sup", + ) + sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_bc, + ppsci.loss.FunctionalLoss(boundary_loss_func), + { + "x": lambda out: out["x"], + "ub_sol": lambda out: out["u_idn"], + }, + name="ub_mse_sup", + ) + constraint_sol = { + sup_constraint_sol_f.name: sup_constraint_sol_f, + sup_constraint_sol_init.name: sup_constraint_sol_init, + sup_constraint_sol_bc.name: sup_constraint_sol_bc, + } + + # manually build validator + eval_dataloader_cfg_sol = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_sol",), + "alias_dict": {"t": "t_star", "x": "x_star", "u_sol": "u_star"}, + }, + } + + sup_validator_sol = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_sol, + ppsci.loss.MSELoss("sum"), + {"u_sol": lambda out: out["u_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="u_L2_sup", + ) + validator_sol = {sup_validator_sol.name: sup_validator_sol} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_sol, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_sol, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # open FLAG for higher order differential operator when order >= 4 + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.T_UB) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + + # initialize transform + def transform_u(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_f(input, model, out_key): + in_idn = {"t": input["t"], "x": input["x"]} + x = input["x"] + u = model(in_idn)[out_key] + du_x = jacobian(u, x) + du_xx = hessian(u, x) + du_xxx = jacobian(du_xx, x) + du_xxxx = hessian(du_xx, x) + input_trans = { + "u_x": u, + "du_x": du_x, + "du_xx": du_xx, + "du_xxx": du_xxx, + "du_xxxx": du_xxxx, + } + return input_trans + + def transform_f_idn(_in): + return transform_f(_in, model_idn, "u_idn") + + # register transform + model_idn.register_input_transform(transform_u) + model_pde.register_input_transform(transform_f_idn) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde)) + + # stage 3: solution net + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load dataset + dataset_val = reader.load_mat_file( + cfg.DATASET_PATH_SOL, + keys=("t", "x", "u_sol"), + alias_dict={ + "t": "t_ori", + "x": "x_ori", + "u_sol": "Exact_ori", + }, + ) + + t_sol, x_sol = np.meshgrid( + np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) + ) + t_sol_flatten = paddle.to_tensor( + t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + x_sol_flatten = paddle.to_tensor( + x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + u_sol_pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) + + # eval + l2_error = np.linalg.norm( + dataset_val["u_sol"] - u_sol_pred["u_idn"], 2 + ) / np.linalg.norm( + dataset_val["u_sol"], 2 + ) # stage 1&3 use the same net in this example + logger.info(f"l2_error: {l2_error}") + + # plotting + plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() + plot_func.draw_and_save( + figname="kuramoto_sivashinsky_sol", + data_exact=dataset_val["u_sol"], + data_learned=u_sol_pred["u_idn"].numpy(), + boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], + griddata_points=plot_points, + griddata_xi=(t_sol, x_sol), + save_path=cfg.output_dir, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="kuramoto_sivashinsky.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/deephpms/navier_stokes.py b/examples/deephpms/navier_stokes.py index b7bbffa012..53b661c73c 100644 --- a/examples/deephpms/navier_stokes.py +++ b/examples/deephpms/navier_stokes.py @@ -1,491 +1,491 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -import paddle.nn.functional as F -import plotting as plot_func -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import reader -from ppsci.utils import save_load - - -def pde_loss_func(output_dict, *args): - losses = F.mse_loss(output_dict["f_pde"], output_dict["dw_t"], "sum") - return {"pde": losses} - - -def pde_l2_rel_func(output_dict, *args): - rel_l2 = paddle.norm(output_dict["dw_t"] - output_dict["f_pde"]) / paddle.norm( - output_dict["dw_t"] - ) - metric_dict = {"f_pde": rel_l2} - return metric_dict - - -def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - # t, x, y - lb = paddle.to_tensor(list(cfg.LB)) - ub = paddle.to_tensor(list(cfg.UB)) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - - # initialize transform - def transform_w(_in): - t, x, y = _in["t"], _in["x"], _in["y"] - X = paddle.concat([t, x, y], axis=1) - H = 2.0 * (X - lb) * paddle.pow((ub - lb), -1) - 1.0 - t, x, y = paddle.split(H, 3, axis=1) - input_trans = {"t": t, "x": x, "y": y} - return input_trans - - def transform_f(_in): - in_idn = {"t": _in["t"], "x": _in["x"], "y": _in["y"]} - x, y = _in["x"], _in["y"] - w = model_idn(in_idn)["w_idn"] - dw_x = jacobian(w, x) - dw_y = jacobian(w, y) - - dw_xx = hessian(w, x) - dw_yy = hessian(w, y) - dw_xy = jacobian(dw_x, y) - - input_trans = { - "u": _in["u"], - "v": _in["v"], - "w": w, - "dw_x": dw_x, - "dw_y": dw_y, - "dw_xx": dw_xx, - "dw_xy": dw_xy, - "dw_yy": dw_yy, - } - return input_trans - - # register transform - model_idn.register_input_transform(transform_w) - model_pde.register_input_transform(transform_f) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde)) - - # initialize optimizer - # Adam - optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) - optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) - - # LBFGS - # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) - # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) - - # stage 1: training identification net - # manually build constraint(s) - train_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("w_idn",), - "alias_dict": { - "t": "t_train", - "x": "x_train", - "y": "y_train", - "u": "u_train", - "v": "v_train", - "w_idn": "w_train", - }, - }, - } - - sup_constraint_idn = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"w_idn": lambda out: out["w_idn"]}, - name="w_mse_sup", - ) - constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} - - # manually build validator - eval_dataloader_cfg_idn = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("w_idn",), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "y": "y_star", - "u": "u_star", - "v": "v_star", - "w_idn": "w_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_idn = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {"w_idn": lambda out: out["w_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="w_L2_sup", - ) - validator_idn = {sup_validator_idn.name: sup_validator_idn} - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_idn, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 2: training pde net - # manually build constraint(s) - train_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("dw_t",), - "alias_dict": { - "t": "t_train", - "x": "x_train", - "y": "y_train", - "u": "u_train", - "v": "v_train", - "dw_t": "t_train", - }, - }, - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - name="f_mse_sup", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - # manually build validator - eval_dataloader_cfg_pde = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("dw_t",), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "y": "y_star", - "u": "u_star", - "v": "v_star", - "dw_t": "t_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_pde = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - }, - {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, - name="f_L2_sup", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 3: training solution net, reuse identification net - # manually build constraint(s) - train_dataloader_cfg_sol_f = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("dw_t",), - "alias_dict": { - "t": "t_f_train", - "x": "x_f_train", - "y": "y_f_train", - "u": "u_f_train", - "v": "v_f_train", - "dw_t": "t_f_train", - }, - }, - } - train_dataloader_cfg_sol_bc = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("wb_sol",), - "alias_dict": { - "t": "tb", - "x": "xb", - "y": "yb", - "wb_sol": "wb", - "u": "xb", - "v": "yb", - }, - }, - } - - sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_f, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "f_pde": lambda out: out["f_pde"], - "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), - }, - name="f_mse_sup", - ) - sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_bc, - ppsci.loss.MSELoss("sum"), - {"wb_sol": lambda out: out["w_idn"]}, - name="ub_mse_sup", - ) - constraint_sol = { - sup_constraint_sol_f.name: sup_constraint_sol_f, - sup_constraint_sol_bc.name: sup_constraint_sol_bc, - } - - # manually build validator - eval_dataloader_cfg_sol = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x", "y", "u", "v"), - "label_keys": ("w_sol",), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "y": "y_star", - "w_sol": "w_star", - "u": "u_star", - "v": "v_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_sol = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_sol, - ppsci.loss.MSELoss("sum"), - {"w_sol": lambda out: out["w_idn"]}, - {"l2": ppsci.metric.L2Rel()}, - name="w_L2_sup", - ) - validator_sol = {sup_validator_sol.name: sup_validator_sol} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_sol, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - # t, x, y - lb = paddle.to_tensor(list(cfg.LB)) - ub = paddle.to_tensor(list(cfg.UB)) - - # initialize models - model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) - model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) - - # initialize transform - def transform_w(_in): - t, x, y = _in["t"], _in["x"], _in["y"] - X = paddle.concat([t, x, y], axis=1) - H = 2.0 * (X - lb) * paddle.pow((ub - lb), -1) - 1.0 - t, x, y = paddle.split(H, 3, axis=1) - input_trans = {"t": t, "x": x, "y": y} - return input_trans - - def transform_f(_in): - in_idn = {"t": _in["t"], "x": _in["x"], "y": _in["y"]} - x, y = _in["x"], _in["y"] - w = model_idn(in_idn)["w_idn"] - dw_x = jacobian(w, x) - dw_y = jacobian(w, y) - - dw_xx = hessian(w, x) - dw_yy = hessian(w, y) - dw_xy = jacobian(dw_x, y) - - input_trans = { - "u": _in["u"], - "v": _in["v"], - "w": w, - "dw_x": dw_x, - "dw_y": dw_y, - "dw_xx": dw_xx, - "dw_xy": dw_xy, - "dw_yy": dw_yy, - } - return input_trans - - # register transform - model_idn.register_input_transform(transform_w) - model_pde.register_input_transform(transform_f) - - # initialize model list - model_list = ppsci.arch.ModelList((model_idn, model_pde)) - - # stage 3: solution net - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load dataset - dataset_val = reader.load_mat_file( - cfg.DATASET_PATH_SOL, - keys=("t", "x", "y", "w_sol", "grid_data"), - alias_dict={ - "t": "t_star", - "x": "x_star", - "y": "y_star", - "w_sol": "w_star", - "grid_data": "X_star", - }, - ) - input_dict = { - "t": paddle.to_tensor( - dataset_val["t"], dtype=paddle.get_default_dtype(), stop_gradient=False - ), - "x": paddle.to_tensor( - dataset_val["x"], dtype=paddle.get_default_dtype(), stop_gradient=False - ), - "y": paddle.to_tensor( - dataset_val["y"], dtype=paddle.get_default_dtype(), stop_gradient=False - ), - } - - w_sol_pred = model_idn(input_dict) - - # eval - l2_error = np.linalg.norm( - dataset_val["w_sol"] - w_sol_pred["w_idn"], 2 - ) / np.linalg.norm( - dataset_val["w_sol"], 2 - ) # stage 1&3 use the same net in this example - logger.info(f"l2_error: {l2_error}") - - # plotting - plot_func.draw_and_save_ns( - figname="navier_stokes_sol", - data_exact=dataset_val["w_sol"].reshape([-1, 151]), - data_learned=w_sol_pred["w_idn"].reshape([-1, 151]).numpy(), - grid_data=dataset_val["grid_data"].reshape([-1, 2]), - save_path=cfg.output_dir, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="navier_stokes.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import paddle.nn.functional as F +import plotting as plot_func +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import reader +from ppsci.utils import save_load + + +def pde_loss_func(output_dict, *args): + losses = F.mse_loss(output_dict["f_pde"], output_dict["dw_t"], "sum") + return {"pde": losses} + + +def pde_l2_rel_func(output_dict, *args): + rel_l2 = paddle.norm(output_dict["dw_t"] - output_dict["f_pde"]) / paddle.norm( + output_dict["dw_t"] + ) + metric_dict = {"f_pde": rel_l2} + return metric_dict + + +def train(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + # t, x, y + lb = paddle.to_tensor(list(cfg.LB)) + ub = paddle.to_tensor(list(cfg.UB)) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + + # initialize transform + def transform_w(_in): + t, x, y = _in["t"], _in["x"], _in["y"] + X = paddle.concat([t, x, y], axis=1) + H = 2.0 * (X - lb) * paddle.pow((ub - lb), -1) - 1.0 + t, x, y = paddle.split(H, 3, axis=1) + input_trans = {"t": t, "x": x, "y": y} + return input_trans + + def transform_f(_in): + in_idn = {"t": _in["t"], "x": _in["x"], "y": _in["y"]} + x, y = _in["x"], _in["y"] + w = model_idn(in_idn)["w_idn"] + dw_x = jacobian(w, x) + dw_y = jacobian(w, y) + + dw_xx = hessian(w, x) + dw_yy = hessian(w, y) + dw_xy = jacobian(dw_x, y) + + input_trans = { + "u": _in["u"], + "v": _in["v"], + "w": w, + "dw_x": dw_x, + "dw_y": dw_y, + "dw_xx": dw_xx, + "dw_xy": dw_xy, + "dw_yy": dw_yy, + } + return input_trans + + # register transform + model_idn.register_input_transform(transform_w) + model_pde.register_input_transform(transform_f) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde)) + + # initialize optimizer + # Adam + optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_idn) + optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model_pde) + + # LBFGS + # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_idn) + # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)(model_pde) + + # stage 1: training identification net + # manually build constraint(s) + train_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("w_idn",), + "alias_dict": { + "t": "t_train", + "x": "x_train", + "y": "y_train", + "u": "u_train", + "v": "v_train", + "w_idn": "w_train", + }, + }, + } + + sup_constraint_idn = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"w_idn": lambda out: out["w_idn"]}, + name="w_mse_sup", + ) + constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} + + # manually build validator + eval_dataloader_cfg_idn = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("w_idn",), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "y": "y_star", + "u": "u_star", + "v": "v_star", + "w_idn": "w_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_idn = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {"w_idn": lambda out: out["w_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="w_L2_sup", + ) + validator_idn = {sup_validator_idn.name: sup_validator_idn} + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint_idn, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_idn, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 2: training pde net + # manually build constraint(s) + train_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("dw_t",), + "alias_dict": { + "t": "t_train", + "x": "x_train", + "y": "y_train", + "u": "u_train", + "v": "v_train", + "dw_t": "t_train", + }, + }, + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + name="f_mse_sup", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + # manually build validator + eval_dataloader_cfg_pde = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("dw_t",), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "y": "y_star", + "u": "u_star", + "v": "v_star", + "dw_t": "t_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_pde = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + }, + {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, + name="f_L2_sup", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_pde, + cfg.output_dir, + optimizer_pde, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 3: training solution net, reuse identification net + # manually build constraint(s) + train_dataloader_cfg_sol_f = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("dw_t",), + "alias_dict": { + "t": "t_f_train", + "x": "x_f_train", + "y": "y_f_train", + "u": "u_f_train", + "v": "v_f_train", + "dw_t": "t_f_train", + }, + }, + } + train_dataloader_cfg_sol_bc = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("wb_sol",), + "alias_dict": { + "t": "tb", + "x": "xb", + "y": "yb", + "wb_sol": "wb", + "u": "xb", + "v": "yb", + }, + }, + } + + sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_f, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "f_pde": lambda out: out["f_pde"], + "dw_t": lambda out: jacobian(out["w_idn"], out["t"]), + }, + name="f_mse_sup", + ) + sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_bc, + ppsci.loss.MSELoss("sum"), + {"wb_sol": lambda out: out["w_idn"]}, + name="ub_mse_sup", + ) + constraint_sol = { + sup_constraint_sol_f.name: sup_constraint_sol_f, + sup_constraint_sol_bc.name: sup_constraint_sol_bc, + } + + # manually build validator + eval_dataloader_cfg_sol = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x", "y", "u", "v"), + "label_keys": ("w_sol",), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "y": "y_star", + "w_sol": "w_star", + "u": "u_star", + "v": "v_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_sol = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_sol, + ppsci.loss.MSELoss("sum"), + {"w_sol": lambda out: out["w_idn"]}, + {"l2": ppsci.metric.L2Rel()}, + name="w_L2_sup", + ) + validator_sol = {sup_validator_sol.name: sup_validator_sol} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_sol, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_sol, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + # t, x, y + lb = paddle.to_tensor(list(cfg.LB)) + ub = paddle.to_tensor(list(cfg.UB)) + + # initialize models + model_idn = ppsci.arch.MLP(**cfg.MODEL.idn_net) + model_pde = ppsci.arch.MLP(**cfg.MODEL.pde_net) + + # initialize transform + def transform_w(_in): + t, x, y = _in["t"], _in["x"], _in["y"] + X = paddle.concat([t, x, y], axis=1) + H = 2.0 * (X - lb) * paddle.pow((ub - lb), -1) - 1.0 + t, x, y = paddle.split(H, 3, axis=1) + input_trans = {"t": t, "x": x, "y": y} + return input_trans + + def transform_f(_in): + in_idn = {"t": _in["t"], "x": _in["x"], "y": _in["y"]} + x, y = _in["x"], _in["y"] + w = model_idn(in_idn)["w_idn"] + dw_x = jacobian(w, x) + dw_y = jacobian(w, y) + + dw_xx = hessian(w, x) + dw_yy = hessian(w, y) + dw_xy = jacobian(dw_x, y) + + input_trans = { + "u": _in["u"], + "v": _in["v"], + "w": w, + "dw_x": dw_x, + "dw_y": dw_y, + "dw_xx": dw_xx, + "dw_xy": dw_xy, + "dw_yy": dw_yy, + } + return input_trans + + # register transform + model_idn.register_input_transform(transform_w) + model_pde.register_input_transform(transform_f) + + # initialize model list + model_list = ppsci.arch.ModelList((model_idn, model_pde)) + + # stage 3: solution net + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load dataset + dataset_val = reader.load_mat_file( + cfg.DATASET_PATH_SOL, + keys=("t", "x", "y", "w_sol", "grid_data"), + alias_dict={ + "t": "t_star", + "x": "x_star", + "y": "y_star", + "w_sol": "w_star", + "grid_data": "X_star", + }, + ) + input_dict = { + "t": paddle.to_tensor( + dataset_val["t"], dtype=paddle.get_default_dtype(), stop_gradient=False + ), + "x": paddle.to_tensor( + dataset_val["x"], dtype=paddle.get_default_dtype(), stop_gradient=False + ), + "y": paddle.to_tensor( + dataset_val["y"], dtype=paddle.get_default_dtype(), stop_gradient=False + ), + } + + w_sol_pred = model_idn(input_dict) + + # eval + l2_error = np.linalg.norm( + dataset_val["w_sol"] - w_sol_pred["w_idn"], 2 + ) / np.linalg.norm( + dataset_val["w_sol"], 2 + ) # stage 1&3 use the same net in this example + logger.info(f"l2_error: {l2_error}") + + # plotting + plot_func.draw_and_save_ns( + figname="navier_stokes_sol", + data_exact=dataset_val["w_sol"].reshape([-1, 151]), + data_learned=w_sol_pred["w_idn"].reshape([-1, 151]).numpy(), + grid_data=dataset_val["grid_data"].reshape([-1, 2]), + save_path=cfg.output_dir, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="navier_stokes.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/deephpms/plotting.py b/examples/deephpms/plotting.py index 841315dd3b..908e4860b3 100644 --- a/examples/deephpms/plotting.py +++ b/examples/deephpms/plotting.py @@ -1,97 +1,97 @@ -from os import path as osp - -import matplotlib.gridspec as gridspec -import matplotlib.pyplot as plt -import numpy as np -from mpl_toolkits.axes_grid1 import make_axes_locatable -from scipy.interpolate import griddata - - -def _draw_subplot(subfigname, figdata, fig, gs, cmap, boundary, loc): - ax = plt.subplot(gs[:, loc]) - h = ax.imshow( - figdata, - interpolation="nearest", - cmap=cmap, - extent=boundary, # [cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB] - origin="lower", - aspect="auto", - ) - divider = make_axes_locatable(ax) - cax = divider.append_axes("right", size="5%", pad=0.05) - - fig.colorbar(h, cax=cax) - ax.set_xlabel("$t$") - ax.set_ylabel("$x$") - ax.set_aspect("auto", "box") - ax.set_title(subfigname, fontsize=10) - - -def draw_and_save( - figname, data_exact, data_learned, boundary, griddata_points, griddata_xi, save_path -): - fig = plt.figure(figname, figsize=(10, 6)) - gs = gridspec.GridSpec(1, 2) - gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) - - # Exact p(t,x,y) - plot_data_label = griddata( - griddata_points, data_exact.flatten(), griddata_xi, method="cubic" - ) - _draw_subplot("Exact Dynamics", plot_data_label, fig, gs, "jet", boundary, loc=0) - # Predicted p(t,x,y) - plot_data_pred = griddata( - griddata_points, data_learned.flatten(), griddata_xi, method="cubic" - ) - _draw_subplot("Learned Dynamics", plot_data_pred, fig, gs, "jet", boundary, loc=1) - - plt.savefig(osp.join(save_path, figname)) - plt.close() - - -def draw_and_save_ns(figname, data_exact, data_learned, grid_data, save_path): - snap = 120 - nn = 200 - lb_x, lb_y = grid_data[:, 0].min(), grid_data[:, 1].min() - ub_x, ub_y = grid_data[:, 0].max(), grid_data[:, 1].max() - x_plot = np.linspace(lb_x, ub_x, nn) - y_plot = np.linspace(lb_y, ub_y, nn) - X_plot, Y_plot = np.meshgrid(x_plot, y_plot) - - fig = plt.figure(figname, figsize=(10, 6)) - gs = gridspec.GridSpec(1, 2) - gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) - # Exact p(t,x,y) - plot_data_label = griddata( - grid_data, - data_exact[:, snap].flatten(), - (X_plot, Y_plot), - method="cubic", - ) - _draw_subplot( - "Exact Dynamics", - plot_data_label, - fig, - gs, - "seismic", - [lb_x, lb_y, ub_x, ub_y], - loc=0, - ) - # Predicted p(t,x,y) - plot_data_pred = griddata( - grid_data, - data_learned[:, snap].flatten(), - (X_plot, Y_plot), - method="cubic", - ) - _draw_subplot( - "Learned Dynamics", - plot_data_pred, - fig, - gs, - "seismic", - [lb_x, lb_y, ub_x, ub_y], - loc=1, - ) - plt.savefig(osp.join(save_path, figname)) - plt.close() +from os import path as osp + +import matplotlib.gridspec as gridspec +import matplotlib.pyplot as plt +import numpy as np +from mpl_toolkits.axes_grid1 import make_axes_locatable +from scipy.interpolate import griddata + + +def _draw_subplot(subfigname, figdata, fig, gs, cmap, boundary, loc): + ax = plt.subplot(gs[:, loc]) + h = ax.imshow( + figdata, + interpolation="nearest", + cmap=cmap, + extent=boundary, # [cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB] + origin="lower", + aspect="auto", + ) + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + + fig.colorbar(h, cax=cax) + ax.set_xlabel("$t$") + ax.set_ylabel("$x$") + ax.set_aspect("auto", "box") + ax.set_title(subfigname, fontsize=10) + + +def draw_and_save( + figname, data_exact, data_learned, boundary, griddata_points, griddata_xi, save_path +): + fig = plt.figure(figname, figsize=(10, 6)) + gs = gridspec.GridSpec(1, 2) + gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) + + # Exact p(t,x,y) + plot_data_label = griddata( + griddata_points, data_exact.flatten(), griddata_xi, method="cubic" + ) + _draw_subplot("Exact Dynamics", plot_data_label, fig, gs, "jet", boundary, loc=0) + # Predicted p(t,x,y) + plot_data_pred = griddata( + griddata_points, data_learned.flatten(), griddata_xi, method="cubic" + ) + _draw_subplot("Learned Dynamics", plot_data_pred, fig, gs, "jet", boundary, loc=1) + + plt.savefig(osp.join(save_path, figname)) + plt.close() + + +def draw_and_save_ns(figname, data_exact, data_learned, grid_data, save_path): + snap = 120 + nn = 200 + lb_x, lb_y = grid_data[:, 0].min(), grid_data[:, 1].min() + ub_x, ub_y = grid_data[:, 0].max(), grid_data[:, 1].max() + x_plot = np.linspace(lb_x, ub_x, nn) + y_plot = np.linspace(lb_y, ub_y, nn) + X_plot, Y_plot = np.meshgrid(x_plot, y_plot) + + fig = plt.figure(figname, figsize=(10, 6)) + gs = gridspec.GridSpec(1, 2) + gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.5) + # Exact p(t,x,y) + plot_data_label = griddata( + grid_data, + data_exact[:, snap].flatten(), + (X_plot, Y_plot), + method="cubic", + ) + _draw_subplot( + "Exact Dynamics", + plot_data_label, + fig, + gs, + "seismic", + [lb_x, lb_y, ub_x, ub_y], + loc=0, + ) + # Predicted p(t,x,y) + plot_data_pred = griddata( + grid_data, + data_learned[:, snap].flatten(), + (X_plot, Y_plot), + method="cubic", + ) + _draw_subplot( + "Learned Dynamics", + plot_data_pred, + fig, + gs, + "seismic", + [lb_x, lb_y, ub_x, ub_y], + loc=1, + ) + plt.savefig(osp.join(save_path, figname)) + plt.close() diff --git a/examples/deephpms/schrodinger.py b/examples/deephpms/schrodinger.py index 4ab910b2b5..ea5cce6c6a 100644 --- a/examples/deephpms/schrodinger.py +++ b/examples/deephpms/schrodinger.py @@ -1,549 +1,549 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import numpy as np -import paddle -import paddle.nn.functional as F -import plotting as plot_func -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import reader -from ppsci.utils import save_load - - -def pde_loss_func(output_dict, *args): - losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") - losses += F.mse_loss(output_dict["g_pde"], output_dict["dv_t"], "sum") - return {"pde": losses} - - -def pde_l2_rel_func(output_dict, *args): - rel_l2_f = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( - output_dict["du_t"] - ) - rel_l2_g = paddle.norm(output_dict["dv_t"] - output_dict["g_pde"]) / paddle.norm( - output_dict["dv_t"] - ) - metric_dict = {"f_pde_f": rel_l2_f, "f_pde_g": rel_l2_g} - return metric_dict - - -def boundary_loss_func(output_dict, *args): - u_b, v_b = output_dict["u_idn"], output_dict["v_idn"] - u_lb, u_ub = paddle.split(u_b, 2, axis=0) - v_lb, v_ub = paddle.split(v_b, 2, axis=0) - - x_b = output_dict["x"] - du_x = jacobian(u_b, x_b) - dv_x = jacobian(v_b, x_b) - - du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) - dv_x_lb, dv_x_ub = paddle.split(dv_x, 2, axis=0) - - losses = F.mse_loss(u_lb, u_ub, "sum") - losses += F.mse_loss(v_lb, v_ub, "sum") - losses += F.mse_loss(du_x_lb, du_x_ub, "sum") - losses += F.mse_loss(dv_x_lb, dv_x_ub, "sum") - return {"boundary": losses} - - -def sol_l2_rel_func(output_dict, label_dict): - uv_pred = paddle.sqrt(output_dict["u_idn"] ** 2 + output_dict["v_idn"] ** 2) - uv_label = paddle.sqrt(label_dict["u_idn"] ** 2 + label_dict["u_idn"] ** 2) - rel_l2 = paddle.norm(uv_label - uv_pred) / paddle.norm(uv_pred) - metric_dict = {"uv_sol": rel_l2} - return metric_dict - - -def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(np.pi / cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.X_UB) - - # initialize models - model_idn_u = ppsci.arch.MLP(**cfg.MODEL.idn_u_net) - model_idn_v = ppsci.arch.MLP(**cfg.MODEL.idn_v_net) - model_pde_f = ppsci.arch.MLP(**cfg.MODEL.pde_f_net) - model_pde_g = ppsci.arch.MLP(**cfg.MODEL.pde_g_net) - - # initialize transform - def transform_uv(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_fg(_in): - in_idn = {"t": _in["t"], "x": _in["x"]} - x = _in["x"] - u = model_idn_u(in_idn)["u_idn"] - v = model_idn_v(in_idn)["v_idn"] - - du_x = jacobian(u, x) - du_xx = hessian(u, x) - - dv_x = jacobian(v, x) - dv_xx = hessian(v, x) - - input_trans = { - "u": u, - "v": v, - "du_x": du_x, - "dv_x": dv_x, - "du_xx": du_xx, - "dv_xx": dv_xx, - } - return input_trans - - # register transform - model_idn_u.register_input_transform(transform_uv) - model_idn_v.register_input_transform(transform_uv) - model_pde_f.register_input_transform(transform_fg) - model_pde_g.register_input_transform(transform_fg) - - # initialize model list - model_list = ppsci.arch.ModelList( - (model_idn_u, model_idn_v, model_pde_f, model_pde_g) - ) - - # initialize optimizer - # Adam - optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( - (model_idn_u, model_idn_v) - ) - optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( - (model_pde_f, model_pde_g) - ) - - # LBFGS - # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_idn_u, model_idn_v)) - # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_pde_f, model_pde_g)) - - # stage 1: training identification net - # manually build constraint(s) - train_dataloader_cfg_idn = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn", "v_idn"), - "alias_dict": { - "t": "t_train", - "x": "x_train", - "u_idn": "u_train", - "v_idn": "v_train", - }, - }, - } - - sup_constraint_idn = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, - name="uv_mse_sup", - ) - constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} - - # manually build validator - eval_dataloader_cfg_idn = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("u_idn", "v_idn"), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "u_idn": "u_star", - "v_idn": "v_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_idn = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_idn, - ppsci.loss.MSELoss("sum"), - {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, - {"l2": ppsci.metric.L2Rel()}, - name="uv_L2_sup", - ) - validator_idn = {sup_validator_idn.name: sup_validator_idn} - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint_idn, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_idn, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 2: training pde net - # manually build constraint(s) - train_dataloader_cfg_pde = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t", "dv_t"), - "alias_dict": { - "t": "t_train", - "x": "x_train", - "du_t": "t_train", - "dv_t": "t_train", - }, - }, - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - "g_pde": lambda out: out["g_pde"], - }, - name="fg_mse_sup", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - # manually build validator - eval_dataloader_cfg_pde = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("t", "x"), - "label_keys": ("du_t", "dv_t"), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "du_t": "t_star", - "dv_t": "t_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_pde = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_pde, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), - "f_pde": lambda out: out["f_pde"], - "g_pde": lambda out: out["g_pde"], - }, - {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, - name="fg_L2_sup", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_pde, - cfg.output_dir, - optimizer_pde, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # stage 3: training solution net - # if lbfgs: initialize a new opt with a small initial learning rate in case loss explosion - # optimizer_idn = ppsci.optimizer.LBFGS(learning_rate=0.01, max_iter=MAX_ITER)( - # [model_idn_u, model_idn_v] - # ) - # manually build constraint(s) - train_dataloader_cfg_sol_f = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("du_t", "dv_t"), - "alias_dict": { - "t": "t_f_train", - "x": "x_f_train", - "du_t": "t_f_train", - "dv_t": "t_f_train", - }, - }, - } - train_dataloader_cfg_sol_init = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_idn", "v_idn"), - "alias_dict": {"t": "t0", "x": "x0", "u_idn": "u0", "v_idn": "v0"}, - }, - } - train_dataloader_cfg_sol_bc = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("x",), - "alias_dict": {"t": "tb", "x": "xb"}, - }, - } - - sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_f, - ppsci.loss.FunctionalLoss(pde_loss_func), - { - "f_pde": lambda out: out["f_pde"], - "g_pde": lambda out: out["g_pde"], - "du_t": lambda out: jacobian(out["u_idn"], out["t"]), - "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), - }, - name="fg_mse_sup", - ) - sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_init, - ppsci.loss.MSELoss("sum"), - {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, - name="uv0_mse_sup", - ) - sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_sol_bc, - ppsci.loss.FunctionalLoss(boundary_loss_func), - { - "x": lambda out: out["x"], - "u_idn": lambda out: out["u_idn"], - "v_idn": lambda out: out["v_idn"], - }, - name="uvb_mse_sup", - ) - constraint_sol = { - sup_constraint_sol_f.name: sup_constraint_sol_f, - sup_constraint_sol_init.name: sup_constraint_sol_init, - sup_constraint_sol_bc.name: sup_constraint_sol_bc, - } - - # manually build validator - eval_dataloader_cfg_sol = { - "dataset": { - "name": "MatDataset", - "file_path": cfg.DATASET_PATH_SOL, - "input_keys": ("t", "x"), - "label_keys": ("u_idn", "v_idn"), - "alias_dict": { - "t": "t_star", - "x": "x_star", - "u_idn": "u_star", - "v_idn": "v_star", - }, - }, - "batch_size": cfg.TRAIN.batch_size.eval, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - sup_validator_sol = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_sol, - ppsci.loss.MSELoss("sum"), - {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, - {"l2": ppsci.metric.FunctionalMetric(sol_l2_rel_func)}, - name="uv_L2_sup", - ) - validator_sol = {sup_validator_sol.name: sup_validator_sol} - - # update solver - solver = ppsci.solver.Solver( - model_list, - constraint_sol, - cfg.output_dir, - optimizer_idn, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_sol, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # initialize boundaries - t_lb = paddle.to_tensor(cfg.T_LB) - t_ub = paddle.to_tensor(np.pi / cfg.T_UB) - x_lb = paddle.to_tensor(cfg.X_LB) - x_ub = paddle.to_tensor(cfg.X_UB) - - # initialize models - model_idn_u = ppsci.arch.MLP(**cfg.MODEL.idn_u_net) - model_idn_v = ppsci.arch.MLP(**cfg.MODEL.idn_v_net) - model_pde_f = ppsci.arch.MLP(**cfg.MODEL.pde_f_net) - model_pde_g = ppsci.arch.MLP(**cfg.MODEL.pde_g_net) - - # initialize transform - def transform_uv(_in): - t, x = _in["t"], _in["x"] - t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 - x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 - input_trans = {"t": t, "x": x} - return input_trans - - def transform_fg(_in): - in_idn = {"t": _in["t"], "x": _in["x"]} - x = _in["x"] - u = model_idn_u(in_idn)["u_idn"] - v = model_idn_v(in_idn)["v_idn"] - - du_x = jacobian(u, x) - du_xx = hessian(u, x) - - dv_x = jacobian(v, x) - dv_xx = hessian(v, x) - - input_trans = { - "u": u, - "v": v, - "du_x": du_x, - "dv_x": dv_x, - "du_xx": du_xx, - "dv_xx": dv_xx, - } - return input_trans - - # register transform - model_idn_u.register_input_transform(transform_uv) - model_idn_v.register_input_transform(transform_uv) - model_pde_f.register_input_transform(transform_fg) - model_pde_g.register_input_transform(transform_fg) - - # initialize model list - model_list = ppsci.arch.ModelList( - (model_idn_u, model_idn_v, model_pde_f, model_pde_g) - ) - # stage 3: solution net - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # load dataset - dataset_val = reader.load_mat_file( - cfg.DATASET_PATH_SOL, - keys=("t", "x", "uv_sol", "u_sol", "v_sol"), - alias_dict={ - "t": "t_ori", - "x": "x_ori", - "uv_sol": "Exact_uv_ori", - "u_sol": "u_star", - "v_sol": "v_star", - }, - ) - - t_sol, x_sol = np.meshgrid( - np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) - ) - t_sol_flatten = paddle.to_tensor( - t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - x_sol_flatten = paddle.to_tensor( - x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False - ) - pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) - u_sol_pred = pred["u_idn"].numpy() - v_sol_pred = pred["v_idn"].numpy() - uv_sol_pred = np.sqrt(u_sol_pred**2 + v_sol_pred**2) - - # eval - uv_sol_star = np.sqrt(dataset_val["u_sol"] ** 2 + dataset_val["v_sol"] ** 2) - error_uv = np.linalg.norm(uv_sol_star - uv_sol_pred, 2) / np.linalg.norm( - uv_sol_star, 2 - ) - logger.info(f"l2_error_uv: {error_uv}") - - # plotting - plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() - plot_func.draw_and_save( - figname="schrodinger_uv_sol", - data_exact=dataset_val["uv_sol"], - data_learned=uv_sol_pred, - boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], - griddata_points=plot_points, - griddata_xi=(t_sol, x_sol), - save_path=cfg.output_dir, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="schrodinger.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle +import paddle.nn.functional as F +import plotting as plot_func +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import reader +from ppsci.utils import save_load + + +def pde_loss_func(output_dict, *args): + losses = F.mse_loss(output_dict["f_pde"], output_dict["du_t"], "sum") + losses += F.mse_loss(output_dict["g_pde"], output_dict["dv_t"], "sum") + return {"pde": losses} + + +def pde_l2_rel_func(output_dict, *args): + rel_l2_f = paddle.norm(output_dict["du_t"] - output_dict["f_pde"]) / paddle.norm( + output_dict["du_t"] + ) + rel_l2_g = paddle.norm(output_dict["dv_t"] - output_dict["g_pde"]) / paddle.norm( + output_dict["dv_t"] + ) + metric_dict = {"f_pde_f": rel_l2_f, "f_pde_g": rel_l2_g} + return metric_dict + + +def boundary_loss_func(output_dict, *args): + u_b, v_b = output_dict["u_idn"], output_dict["v_idn"] + u_lb, u_ub = paddle.split(u_b, 2, axis=0) + v_lb, v_ub = paddle.split(v_b, 2, axis=0) + + x_b = output_dict["x"] + du_x = jacobian(u_b, x_b) + dv_x = jacobian(v_b, x_b) + + du_x_lb, du_x_ub = paddle.split(du_x, 2, axis=0) + dv_x_lb, dv_x_ub = paddle.split(dv_x, 2, axis=0) + + losses = F.mse_loss(u_lb, u_ub, "sum") + losses += F.mse_loss(v_lb, v_ub, "sum") + losses += F.mse_loss(du_x_lb, du_x_ub, "sum") + losses += F.mse_loss(dv_x_lb, dv_x_ub, "sum") + return {"boundary": losses} + + +def sol_l2_rel_func(output_dict, label_dict): + uv_pred = paddle.sqrt(output_dict["u_idn"] ** 2 + output_dict["v_idn"] ** 2) + uv_label = paddle.sqrt(label_dict["u_idn"] ** 2 + label_dict["u_idn"] ** 2) + rel_l2 = paddle.norm(uv_label - uv_pred) / paddle.norm(uv_pred) + metric_dict = {"uv_sol": rel_l2} + return metric_dict + + +def train(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(np.pi / cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.X_UB) + + # initialize models + model_idn_u = ppsci.arch.MLP(**cfg.MODEL.idn_u_net) + model_idn_v = ppsci.arch.MLP(**cfg.MODEL.idn_v_net) + model_pde_f = ppsci.arch.MLP(**cfg.MODEL.pde_f_net) + model_pde_g = ppsci.arch.MLP(**cfg.MODEL.pde_g_net) + + # initialize transform + def transform_uv(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_fg(_in): + in_idn = {"t": _in["t"], "x": _in["x"]} + x = _in["x"] + u = model_idn_u(in_idn)["u_idn"] + v = model_idn_v(in_idn)["v_idn"] + + du_x = jacobian(u, x) + du_xx = hessian(u, x) + + dv_x = jacobian(v, x) + dv_xx = hessian(v, x) + + input_trans = { + "u": u, + "v": v, + "du_x": du_x, + "dv_x": dv_x, + "du_xx": du_xx, + "dv_xx": dv_xx, + } + return input_trans + + # register transform + model_idn_u.register_input_transform(transform_uv) + model_idn_v.register_input_transform(transform_uv) + model_pde_f.register_input_transform(transform_fg) + model_pde_g.register_input_transform(transform_fg) + + # initialize model list + model_list = ppsci.arch.ModelList( + (model_idn_u, model_idn_v, model_pde_f, model_pde_g) + ) + + # initialize optimizer + # Adam + optimizer_idn = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( + (model_idn_u, model_idn_v) + ) + optimizer_pde = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( + (model_pde_f, model_pde_g) + ) + + # LBFGS + # optimizer_idn = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_idn_u, model_idn_v)) + # optimizer_pde = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)((model_pde_f, model_pde_g)) + + # stage 1: training identification net + # manually build constraint(s) + train_dataloader_cfg_idn = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn", "v_idn"), + "alias_dict": { + "t": "t_train", + "x": "x_train", + "u_idn": "u_train", + "v_idn": "v_train", + }, + }, + } + + sup_constraint_idn = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, + name="uv_mse_sup", + ) + constraint_idn = {sup_constraint_idn.name: sup_constraint_idn} + + # manually build validator + eval_dataloader_cfg_idn = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("u_idn", "v_idn"), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "u_idn": "u_star", + "v_idn": "v_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_idn = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_idn, + ppsci.loss.MSELoss("sum"), + {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, + {"l2": ppsci.metric.L2Rel()}, + name="uv_L2_sup", + ) + validator_idn = {sup_validator_idn.name: sup_validator_idn} + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint_idn, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_idn, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 2: training pde net + # manually build constraint(s) + train_dataloader_cfg_pde = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t", "dv_t"), + "alias_dict": { + "t": "t_train", + "x": "x_train", + "du_t": "t_train", + "dv_t": "t_train", + }, + }, + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + "g_pde": lambda out: out["g_pde"], + }, + name="fg_mse_sup", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + # manually build validator + eval_dataloader_cfg_pde = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("t", "x"), + "label_keys": ("du_t", "dv_t"), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "du_t": "t_star", + "dv_t": "t_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_pde = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_pde, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), + "f_pde": lambda out: out["f_pde"], + "g_pde": lambda out: out["g_pde"], + }, + {"l2": ppsci.metric.FunctionalMetric(pde_l2_rel_func)}, + name="fg_L2_sup", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_pde, + cfg.output_dir, + optimizer_pde, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # stage 3: training solution net + # if lbfgs: initialize a new opt with a small initial learning rate in case loss explosion + # optimizer_idn = ppsci.optimizer.LBFGS(learning_rate=0.01, max_iter=MAX_ITER)( + # [model_idn_u, model_idn_v] + # ) + # manually build constraint(s) + train_dataloader_cfg_sol_f = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("du_t", "dv_t"), + "alias_dict": { + "t": "t_f_train", + "x": "x_f_train", + "du_t": "t_f_train", + "dv_t": "t_f_train", + }, + }, + } + train_dataloader_cfg_sol_init = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_idn", "v_idn"), + "alias_dict": {"t": "t0", "x": "x0", "u_idn": "u0", "v_idn": "v0"}, + }, + } + train_dataloader_cfg_sol_bc = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("x",), + "alias_dict": {"t": "tb", "x": "xb"}, + }, + } + + sup_constraint_sol_f = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_f, + ppsci.loss.FunctionalLoss(pde_loss_func), + { + "f_pde": lambda out: out["f_pde"], + "g_pde": lambda out: out["g_pde"], + "du_t": lambda out: jacobian(out["u_idn"], out["t"]), + "dv_t": lambda out: jacobian(out["v_idn"], out["t"]), + }, + name="fg_mse_sup", + ) + sup_constraint_sol_init = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_init, + ppsci.loss.MSELoss("sum"), + {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, + name="uv0_mse_sup", + ) + sup_constraint_sol_bc = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_sol_bc, + ppsci.loss.FunctionalLoss(boundary_loss_func), + { + "x": lambda out: out["x"], + "u_idn": lambda out: out["u_idn"], + "v_idn": lambda out: out["v_idn"], + }, + name="uvb_mse_sup", + ) + constraint_sol = { + sup_constraint_sol_f.name: sup_constraint_sol_f, + sup_constraint_sol_init.name: sup_constraint_sol_init, + sup_constraint_sol_bc.name: sup_constraint_sol_bc, + } + + # manually build validator + eval_dataloader_cfg_sol = { + "dataset": { + "name": "MatDataset", + "file_path": cfg.DATASET_PATH_SOL, + "input_keys": ("t", "x"), + "label_keys": ("u_idn", "v_idn"), + "alias_dict": { + "t": "t_star", + "x": "x_star", + "u_idn": "u_star", + "v_idn": "v_star", + }, + }, + "batch_size": cfg.TRAIN.batch_size.eval, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + sup_validator_sol = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_sol, + ppsci.loss.MSELoss("sum"), + {key: (lambda out, k=key: out[k]) for key in ("u_idn", "v_idn")}, + {"l2": ppsci.metric.FunctionalMetric(sol_l2_rel_func)}, + name="uv_L2_sup", + ) + validator_sol = {sup_validator_sol.name: sup_validator_sol} + + # update solver + solver = ppsci.solver.Solver( + model_list, + constraint_sol, + cfg.output_dir, + optimizer_idn, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_sol, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # initialize boundaries + t_lb = paddle.to_tensor(cfg.T_LB) + t_ub = paddle.to_tensor(np.pi / cfg.T_UB) + x_lb = paddle.to_tensor(cfg.X_LB) + x_ub = paddle.to_tensor(cfg.X_UB) + + # initialize models + model_idn_u = ppsci.arch.MLP(**cfg.MODEL.idn_u_net) + model_idn_v = ppsci.arch.MLP(**cfg.MODEL.idn_v_net) + model_pde_f = ppsci.arch.MLP(**cfg.MODEL.pde_f_net) + model_pde_g = ppsci.arch.MLP(**cfg.MODEL.pde_g_net) + + # initialize transform + def transform_uv(_in): + t, x = _in["t"], _in["x"] + t = 2.0 * (t - t_lb) * paddle.pow((t_ub - t_lb), -1) - 1.0 + x = 2.0 * (x - x_lb) * paddle.pow((x_ub - x_lb), -1) - 1.0 + input_trans = {"t": t, "x": x} + return input_trans + + def transform_fg(_in): + in_idn = {"t": _in["t"], "x": _in["x"]} + x = _in["x"] + u = model_idn_u(in_idn)["u_idn"] + v = model_idn_v(in_idn)["v_idn"] + + du_x = jacobian(u, x) + du_xx = hessian(u, x) + + dv_x = jacobian(v, x) + dv_xx = hessian(v, x) + + input_trans = { + "u": u, + "v": v, + "du_x": du_x, + "dv_x": dv_x, + "du_xx": du_xx, + "dv_xx": dv_xx, + } + return input_trans + + # register transform + model_idn_u.register_input_transform(transform_uv) + model_idn_v.register_input_transform(transform_uv) + model_pde_f.register_input_transform(transform_fg) + model_pde_g.register_input_transform(transform_fg) + + # initialize model list + model_list = ppsci.arch.ModelList( + (model_idn_u, model_idn_v, model_pde_f, model_pde_g) + ) + # stage 3: solution net + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # load dataset + dataset_val = reader.load_mat_file( + cfg.DATASET_PATH_SOL, + keys=("t", "x", "uv_sol", "u_sol", "v_sol"), + alias_dict={ + "t": "t_ori", + "x": "x_ori", + "uv_sol": "Exact_uv_ori", + "u_sol": "u_star", + "v_sol": "v_star", + }, + ) + + t_sol, x_sol = np.meshgrid( + np.squeeze(dataset_val["t"]), np.squeeze(dataset_val["x"]) + ) + t_sol_flatten = paddle.to_tensor( + t_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + x_sol_flatten = paddle.to_tensor( + x_sol.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False + ) + pred = model_list({"t": t_sol_flatten, "x": x_sol_flatten}) + u_sol_pred = pred["u_idn"].numpy() + v_sol_pred = pred["v_idn"].numpy() + uv_sol_pred = np.sqrt(u_sol_pred**2 + v_sol_pred**2) + + # eval + uv_sol_star = np.sqrt(dataset_val["u_sol"] ** 2 + dataset_val["v_sol"] ** 2) + error_uv = np.linalg.norm(uv_sol_star - uv_sol_pred, 2) / np.linalg.norm( + uv_sol_star, 2 + ) + logger.info(f"l2_error_uv: {error_uv}") + + # plotting + plot_points = paddle.concat([t_sol_flatten, x_sol_flatten], axis=-1).numpy() + plot_func.draw_and_save( + figname="schrodinger_uv_sol", + data_exact=dataset_val["uv_sol"], + data_learned=uv_sol_pred, + boundary=[cfg.T_LB, cfg.T_UB, cfg.X_LB, cfg.X_UB], + griddata_points=plot_points, + griddata_xi=(t_sol, x_sol), + save_path=cfg.output_dir, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="schrodinger.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/dgmr/conf/dgmr.yaml b/examples/dgmr/conf/dgmr.yaml index 00b4b224dd..f6bf0f322d 100644 --- a/examples/dgmr/conf/dgmr.yaml +++ b/examples/dgmr/conf/dgmr.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -58,3 +59,64 @@ MODEL: # evaluation settings EVAL: pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/dgmr/dgmr_pretrained.pdparams +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_dgmr/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: eval # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} + +# dataset settings +DATASET: + input_keys: ['input_frames'] + label_keys: ['target_frames'] + split: validation # train or validation + num_input_frames: 4 + num_target_frames: 18 + dataset_path: openclimatefix/nimrod-uk-1km + +# model settings +MODEL: + input_keys: ['input_frames'] + output_keys: ['future_images'] + forecast_steps: 18 + input_channels: 1 + output_shape: 256 + gen_lr: 5e-05 + disc_lr: 0.0002 + conv_type: 'standard' + num_samples: 6 + grid_lambda: 20.0 + beta1: 0.0 + beta2: 0.999 + latent_channels: 768 + context_channels: 384 + generation_steps: 6 + +# evaluation settings +EVAL: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/dgmr/dgmr_pretrained.pdparams +>>>>>>> Stashed changes diff --git a/examples/dgmr/dgmr.py b/examples/dgmr/dgmr.py index 52bd413323..2edf2b4777 100644 --- a/examples/dgmr/dgmr.py +++ b/examples/dgmr/dgmr.py @@ -1,246 +1,246 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/openclimatefix/skillful_nowcasting -""" -from os import path as osp -from typing import Dict -from typing import Tuple - -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -import paddle.nn as nn -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def visualize( - output_dir: str, - x: paddle.Tensor, - y: paddle.Tensor, - y_hat: paddle.Tensor, - batch_idx: int, -) -> None: - """ - Visualizes input, target, and generated images and saves them to the output directory. - - Args: - output_dir (str): Directory to save the visualization images. - x (paddle.Tensor): Input images tensor. - y (paddle.Tensor): Target images tensor. - y_hat (paddle.Tensor): Generated images tensor. - batch_idx (int): Batch index. - - Returns: - None - """ - images = x[0] - future_images = y[0] - generated_images = y_hat[0] - fig, axes = plt.subplots(2, 2) - for i, ax in enumerate(axes.flat): - alpha = images[i][0].numpy() - alpha[alpha < 1] = 0 - alpha[alpha > 1] = 1 - ax.imshow(images[i].transpose([1, 2, 0]).numpy(), alpha=alpha, cmap="viridis") - plt.subplots_adjust(hspace=0.1, wspace=0.1) - plt.savefig(osp.join(output_dir, f"Input_Image_Stack_Frame_{batch_idx}.png")) - fig, axes = plt.subplots(3, 3) - for i, ax in enumerate(axes.flat): - alpha = future_images[i][0].numpy() - alpha[alpha < 1] = 0 - alpha[alpha > 1] = 1 - ax.imshow( - future_images[i].transpose([1, 2, 0]).numpy(), alpha=alpha, cmap="viridis" - ) - plt.subplots_adjust(hspace=0.1, wspace=0.1) - plt.savefig(osp.join(output_dir, f"Target_Image_Frame_{batch_idx}.png")) - fig, axes = plt.subplots(3, 3) - for i, ax in enumerate(axes.flat): - alpha = generated_images[i][0].numpy() - alpha[alpha < 1] = 0 - alpha[alpha > 1] = 1 - ax.imshow( - generated_images[i].transpose([1, 2, 0]).numpy(), - alpha=alpha, - cmap="viridis", - ) - plt.subplots_adjust(hspace=0.1, wspace=0.1) - plt.savefig(osp.join(output_dir, f"Generated_Image_Frame_{batch_idx}.png")) - plt.close() - - -def validation( - cfg: DictConfig, - solver: ppsci.solver.Solver, - batch: Tuple[Dict[str, paddle.Tensor], ...], -): - """ - validation step. - - Args: - cfg (DictConfig): Configuration object. - solver (ppsci.solver.Solver): Solver object containing the model and related components. - batch (Tuple[Dict[str, paddle.Tensor], ...]): Input batch consisting of images and corresponding future images. - - Returns: - discriminator_loss: Loss incurred by the discriminator. - generator_loss: Loss incurred by the generator. - grid_cell_reg: Regularization term to encourage smooth transitions. - """ - images, future_images = batch - images_value = images[cfg.DATASET.input_keys[0]] - future_images_value = future_images[cfg.DATASET.label_keys[0]] - # Two discriminator steps per generator step - for _ in range(2): - predictions = solver.predict(images) - predictions_value = predictions[cfg.MODEL.output_keys[0]] - generated_sequence = paddle.concat(x=[images_value, predictions_value], axis=1) - real_sequence = paddle.concat(x=[images_value, future_images_value], axis=1) - concatenated_inputs = paddle.concat( - x=[real_sequence, generated_sequence], axis=0 - ) - concatenated_outputs = solver.model.discriminator(concatenated_inputs) - score_real, score_generated = paddle.split( - x=concatenated_outputs, - num_or_sections=[real_sequence.shape[0], generated_sequence.shape[0]], - axis=0, - ) - score_real_spatial, score_real_temporal = paddle.split( - x=score_real, num_or_sections=score_real.shape[1], axis=1 - ) - score_generated_spatial, score_generated_temporal = paddle.split( - x=score_generated, num_or_sections=score_generated.shape[1], axis=1 - ) - discriminator_loss = _loss_hinge_disc( - score_generated_spatial, score_real_spatial - ) + _loss_hinge_disc(score_generated_temporal, score_real_temporal) - - predictions_value = [ - solver.predict(images)[cfg.MODEL.output_keys[0]] for _ in range(6) - ] - grid_cell_reg = _grid_cell_regularizer( - paddle.stack(x=predictions_value, axis=0), future_images_value - ) - generated_sequence = [ - paddle.concat(x=[images_value, x], axis=1) for x in predictions_value - ] - real_sequence = paddle.concat(x=[images_value, future_images_value], axis=1) - generated_scores = [] - for g_seq in generated_sequence: - concatenated_inputs = paddle.concat(x=[real_sequence, g_seq], axis=0) - concatenated_outputs = solver.model.discriminator(concatenated_inputs) - score_real, score_generated = paddle.split( - x=concatenated_outputs, - num_or_sections=[real_sequence.shape[0], g_seq.shape[0]], - axis=0, - ) - generated_scores.append(score_generated) - generator_disc_loss = _loss_hinge_gen(paddle.concat(x=generated_scores, axis=0)) - generator_loss = generator_disc_loss + 20 * grid_cell_reg - - return discriminator_loss, generator_loss, grid_cell_reg - - -def _loss_hinge_disc(score_generated, score_real): - """Discriminator hinge loss.""" - l1 = nn.functional.relu(x=1.0 - score_real) - loss = paddle.mean(x=l1) - l2 = nn.functional.relu(x=1.0 + score_generated) - loss += paddle.mean(x=l2) - return loss - - -def _loss_hinge_gen(score_generated): - """Generator hinge loss.""" - loss = -paddle.mean(x=score_generated) - return loss - - -def _grid_cell_regularizer(generated_samples, batch_targets): - """Grid cell regularizer. - - Args: - generated_samples: Tensor of size [n_samples, batch_size, 18, 256, 256, 1]. - batch_targets: Tensor of size [batch_size, 18, 256, 256, 1]. - - Returns: - loss: A tensor of shape [batch_size]. - """ - gen_mean = paddle.mean(x=generated_samples, axis=0) - weights = paddle.clip(x=batch_targets, min=0.0, max=24.0) - loss = paddle.mean(x=paddle.abs(x=gen_mean - batch_targets) * weights) - return loss - - -def train(cfg: DictConfig): - raise NotImplementedError("Training of DGMR is not supported now.") - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.DGMR(**cfg.MODEL) - # load evaluate data - dataset = ppsci.data.dataset.DGMRDataset(**cfg.DATASET) - val_loader = paddle.io.DataLoader(dataset, batch_size=4) - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.model.eval() - - # evaluate pretrained model - d_loss = [] - g_loss = [] - grid_loss = [] - for batch_idx, batch in enumerate(val_loader): - with paddle.no_grad(): - out_dict = validation(cfg, solver, batch) - - # visualize - images = batch[0][cfg.DATASET.input_keys[0]] - future_images = batch[1][cfg.DATASET.label_keys[0]] - generated_images = solver.predict(batch[0])[cfg.MODEL.output_keys[0]] - if batch_idx % 50 == 0: - logger.message(f"Saving plot of image frame to {cfg.output_dir}") - visualize( - cfg.output_dir, images, future_images, generated_images, batch_idx - ) - - d_loss.append(out_dict[0]) - g_loss.append(out_dict[1]) - grid_loss.append(out_dict[2]) - logger.message(f"d_loss: {np.array(d_loss).mean()}") - logger.message(f"g_loss: {np.array(g_loss).mean()}") - logger.message(f"grid_loss: {np.array(grid_loss).mean()}") - - -@hydra.main(version_base=None, config_path="./conf", config_name="dgmr.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/openclimatefix/skillful_nowcasting +""" +from os import path as osp +from typing import Dict +from typing import Tuple + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +import paddle.nn as nn +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def visualize( + output_dir: str, + x: paddle.Tensor, + y: paddle.Tensor, + y_hat: paddle.Tensor, + batch_idx: int, +) -> None: + """ + Visualizes input, target, and generated images and saves them to the output directory. + + Args: + output_dir (str): Directory to save the visualization images. + x (paddle.Tensor): Input images tensor. + y (paddle.Tensor): Target images tensor. + y_hat (paddle.Tensor): Generated images tensor. + batch_idx (int): Batch index. + + Returns: + None + """ + images = x[0] + future_images = y[0] + generated_images = y_hat[0] + fig, axes = plt.subplots(2, 2) + for i, ax in enumerate(axes.flat): + alpha = images[i][0].numpy() + alpha[alpha < 1] = 0 + alpha[alpha > 1] = 1 + ax.imshow(images[i].transpose([1, 2, 0]).numpy(), alpha=alpha, cmap="viridis") + plt.subplots_adjust(hspace=0.1, wspace=0.1) + plt.savefig(osp.join(output_dir, f"Input_Image_Stack_Frame_{batch_idx}.png")) + fig, axes = plt.subplots(3, 3) + for i, ax in enumerate(axes.flat): + alpha = future_images[i][0].numpy() + alpha[alpha < 1] = 0 + alpha[alpha > 1] = 1 + ax.imshow( + future_images[i].transpose([1, 2, 0]).numpy(), alpha=alpha, cmap="viridis" + ) + plt.subplots_adjust(hspace=0.1, wspace=0.1) + plt.savefig(osp.join(output_dir, f"Target_Image_Frame_{batch_idx}.png")) + fig, axes = plt.subplots(3, 3) + for i, ax in enumerate(axes.flat): + alpha = generated_images[i][0].numpy() + alpha[alpha < 1] = 0 + alpha[alpha > 1] = 1 + ax.imshow( + generated_images[i].transpose([1, 2, 0]).numpy(), + alpha=alpha, + cmap="viridis", + ) + plt.subplots_adjust(hspace=0.1, wspace=0.1) + plt.savefig(osp.join(output_dir, f"Generated_Image_Frame_{batch_idx}.png")) + plt.close() + + +def validation( + cfg: DictConfig, + solver: ppsci.solver.Solver, + batch: Tuple[Dict[str, paddle.Tensor], ...], +): + """ + validation step. + + Args: + cfg (DictConfig): Configuration object. + solver (ppsci.solver.Solver): Solver object containing the model and related components. + batch (Tuple[Dict[str, paddle.Tensor], ...]): Input batch consisting of images and corresponding future images. + + Returns: + discriminator_loss: Loss incurred by the discriminator. + generator_loss: Loss incurred by the generator. + grid_cell_reg: Regularization term to encourage smooth transitions. + """ + images, future_images = batch + images_value = images[cfg.DATASET.input_keys[0]] + future_images_value = future_images[cfg.DATASET.label_keys[0]] + # Two discriminator steps per generator step + for _ in range(2): + predictions = solver.predict(images) + predictions_value = predictions[cfg.MODEL.output_keys[0]] + generated_sequence = paddle.concat(x=[images_value, predictions_value], axis=1) + real_sequence = paddle.concat(x=[images_value, future_images_value], axis=1) + concatenated_inputs = paddle.concat( + x=[real_sequence, generated_sequence], axis=0 + ) + concatenated_outputs = solver.model.discriminator(concatenated_inputs) + score_real, score_generated = paddle.split( + x=concatenated_outputs, + num_or_sections=[real_sequence.shape[0], generated_sequence.shape[0]], + axis=0, + ) + score_real_spatial, score_real_temporal = paddle.split( + x=score_real, num_or_sections=score_real.shape[1], axis=1 + ) + score_generated_spatial, score_generated_temporal = paddle.split( + x=score_generated, num_or_sections=score_generated.shape[1], axis=1 + ) + discriminator_loss = _loss_hinge_disc( + score_generated_spatial, score_real_spatial + ) + _loss_hinge_disc(score_generated_temporal, score_real_temporal) + + predictions_value = [ + solver.predict(images)[cfg.MODEL.output_keys[0]] for _ in range(6) + ] + grid_cell_reg = _grid_cell_regularizer( + paddle.stack(x=predictions_value, axis=0), future_images_value + ) + generated_sequence = [ + paddle.concat(x=[images_value, x], axis=1) for x in predictions_value + ] + real_sequence = paddle.concat(x=[images_value, future_images_value], axis=1) + generated_scores = [] + for g_seq in generated_sequence: + concatenated_inputs = paddle.concat(x=[real_sequence, g_seq], axis=0) + concatenated_outputs = solver.model.discriminator(concatenated_inputs) + score_real, score_generated = paddle.split( + x=concatenated_outputs, + num_or_sections=[real_sequence.shape[0], g_seq.shape[0]], + axis=0, + ) + generated_scores.append(score_generated) + generator_disc_loss = _loss_hinge_gen(paddle.concat(x=generated_scores, axis=0)) + generator_loss = generator_disc_loss + 20 * grid_cell_reg + + return discriminator_loss, generator_loss, grid_cell_reg + + +def _loss_hinge_disc(score_generated, score_real): + """Discriminator hinge loss.""" + l1 = nn.functional.relu(x=1.0 - score_real) + loss = paddle.mean(x=l1) + l2 = nn.functional.relu(x=1.0 + score_generated) + loss += paddle.mean(x=l2) + return loss + + +def _loss_hinge_gen(score_generated): + """Generator hinge loss.""" + loss = -paddle.mean(x=score_generated) + return loss + + +def _grid_cell_regularizer(generated_samples, batch_targets): + """Grid cell regularizer. + + Args: + generated_samples: Tensor of size [n_samples, batch_size, 18, 256, 256, 1]. + batch_targets: Tensor of size [batch_size, 18, 256, 256, 1]. + + Returns: + loss: A tensor of shape [batch_size]. + """ + gen_mean = paddle.mean(x=generated_samples, axis=0) + weights = paddle.clip(x=batch_targets, min=0.0, max=24.0) + loss = paddle.mean(x=paddle.abs(x=gen_mean - batch_targets) * weights) + return loss + + +def train(cfg: DictConfig): + raise NotImplementedError("Training of DGMR is not supported now.") + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.DGMR(**cfg.MODEL) + # load evaluate data + dataset = ppsci.data.dataset.DGMRDataset(**cfg.DATASET) + val_loader = paddle.io.DataLoader(dataset, batch_size=4) + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.model.eval() + + # evaluate pretrained model + d_loss = [] + g_loss = [] + grid_loss = [] + for batch_idx, batch in enumerate(val_loader): + with paddle.no_grad(): + out_dict = validation(cfg, solver, batch) + + # visualize + images = batch[0][cfg.DATASET.input_keys[0]] + future_images = batch[1][cfg.DATASET.label_keys[0]] + generated_images = solver.predict(batch[0])[cfg.MODEL.output_keys[0]] + if batch_idx % 50 == 0: + logger.message(f"Saving plot of image frame to {cfg.output_dir}") + visualize( + cfg.output_dir, images, future_images, generated_images, batch_idx + ) + + d_loss.append(out_dict[0]) + g_loss.append(out_dict[1]) + grid_loss.append(out_dict[2]) + logger.message(f"d_loss: {np.array(d_loss).mean()}") + logger.message(f"g_loss: {np.array(g_loss).mean()}") + logger.message(f"grid_loss: {np.array(grid_loss).mean()}") + + +@hydra.main(version_base=None, config_path="./conf", config_name="dgmr.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/earthformer/conf/earthformer_enso_pretrain.yaml b/examples/earthformer/conf/earthformer_enso_pretrain.yaml index 541b96d529..d757c8df55 100644 --- a/examples/earthformer/conf/earthformer_enso_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_enso_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -152,3 +153,158 @@ INFER: out_len: 14 out_stride: 1 samples_gap: 1 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_earthformer_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 0 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: ./datasets/enso/enso_round1_train_20210201 + +# dataset setting +DATASET: + label_keys: ["sst_target","nino_target"] + in_len: 12 + out_len: 14 + nino_window_t: 3 + in_stride: 1 + out_stride: 1 + train_samples_gap: 2 + eval_samples_gap: 1 + normalize_sst: true + +# model settings +MODEL: + input_keys: ["sst_data"] + output_keys: ["sst_target","nino_target"] + input_shape: [12, 24, 48, 1] + target_shape: [14, 24, 48, 1] + base_units: 64 + scale_alpha: 1.0 + + enc_depth: [1, 1] + dec_depth: [1, 1] + enc_use_inter_ffn: true + dec_use_inter_ffn: true + dec_hierarchical_pos_embed: false + + downsample: 2 + downsample_type: "patch_merge" + upsample_type: "upsample" + + num_global_vectors: 0 + use_dec_self_global: false + dec_self_update_global: true + use_dec_cross_global: false + use_global_vector_ffn: false + use_global_self_attn: false + separate_global_qkv: false + global_dim_ratio: 1 + + self_pattern: "axial" + cross_self_pattern: "axial" + cross_pattern: "cross_1x1" + dec_cross_last_n_frames: null + + attn_drop: 0.1 + proj_drop: 0.1 + ffn_drop: 0.1 + num_heads: 4 + + ffn_activation: "gelu" + gated_ffn: false + norm_layer: "layer_norm" + padding_type: "zeros" + pos_embed_type: "t+h+w" + use_relative_pos: true + self_attn_use_final_proj: true + dec_use_first_self_attn: false + + z_init_method: "zeros" + initial_downsample_type: "conv" + initial_downsample_activation: "leaky_relu" + initial_downsample_scale: [1, 1, 2] + initial_downsample_conv_layers: 2 + final_upsample_conv_layers: 1 + checkpoint_level: 2 + + attn_linear_init_mode: "0" + ffn_linear_init_mode: "0" + conv_init_mode: "0" + down_up_linear_init_mode: "0" + norm_init_mode: "0" + + +# training settings +TRAIN: + epochs: 100 + save_freq: 20 + eval_during_train: true + eval_freq: 10 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.0002 + by_epoch: true + min_lr_ratio: 1.0e-3 + wd: 1.0e-5 + batch_size: 16 + pretrained_model_path: null + checkpoint_path: null + + +# evaluation settings +EVAL: + pretrained_model_path: ./checkpoint/enso/earthformer_enso.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 1 + +INFER: + pretrained_model_path: ./checkpoint/enso/earthformer_enso.pdparams + export_path: ./inference/earthformer/enso + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 + data_path: ./datasets/enso/infer/SODA_train.nc + in_len: 12 + in_stride: 1 + out_len: 14 + out_stride: 1 + samples_gap: 1 +>>>>>>> Stashed changes diff --git a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml index 29d31e9e1a..9b1318f97c 100644 --- a/examples/earthformer/conf/earthformer_sevir_pretrain.yaml +++ b/examples/earthformer/conf/earthformer_sevir_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -184,3 +185,190 @@ INFER: interval_real_time: 5 data_type: "vil" rescale_method: "01" +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_earthformer_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 0 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: ./datasets/sevir/sevir_data + +# SEVIR dataset:raw_seq_len: 49,interval_real_time:5, img_height = 384,img_width = 384 +# SEVIR_lr dataset:raw_seq_len: 25,interval_real_time:10, img_height = 128,img_width = 128 + +# dataset setting +DATASET: + label_keys: ["vil"] + data_types: ["vil"] + seq_len: 25 + raw_seq_len: 49 + sample_mode: "sequent" + stride: 12 + batch_size: 2 + layout: "NTHWC" + in_len: 13 + out_len: 12 + split_mode: "uneven" + + shuffle_seed: 1 + rescale_method: "01" + downsample_dict: null + verbose: false + preprocess: true + +# model settings +MODEL: + input_keys: ["input"] + output_keys: ["vil"] + input_shape: [13, 384, 384, 1] + target_shape: [12, 384, 384, 1] + base_units: 128 + scale_alpha: 1.0 + + enc_depth: [1, 1] + dec_depth: [1, 1] + enc_use_inter_ffn: true + dec_use_inter_ffn: true + dec_hierarchical_pos_embed: false + + downsample: 2 + downsample_type: "patch_merge" + upsample_type: "upsample" + + num_global_vectors: 8 + use_dec_self_global: false + dec_self_update_global: true + use_dec_cross_global: false + use_global_vector_ffn: false + use_global_self_attn: true + separate_global_qkv: true + global_dim_ratio: 1 + + self_pattern: "axial" + cross_self_pattern: "axial" + cross_pattern: "cross_1x1" + dec_cross_last_n_frames: null + + attn_drop: 0.1 + proj_drop: 0.1 + ffn_drop: 0.1 + num_heads: 4 + + ffn_activation: "gelu" + gated_ffn: false + norm_layer: "layer_norm" + padding_type: "zeros" + pos_embed_type: "t+h+w" + use_relative_pos: true + self_attn_use_final_proj: true + dec_use_first_self_attn: false + + z_init_method: "zeros" + initial_downsample_type: "stack_conv" + initial_downsample_activation: "leaky_relu" + initial_downsample_stack_conv_num_layers: 3 + initial_downsample_stack_conv_dim_list: [16, 64, 128] + initial_downsample_stack_conv_downscale_list: [3, 2, 2] + initial_downsample_stack_conv_num_conv_list: [2, 2, 2] + checkpoint_level: 2 + + attn_linear_init_mode: "0" + ffn_linear_init_mode: "0" + conv_init_mode: "0" + down_up_linear_init_mode: "0" + norm_init_mode: "0" + + +# training settings +TRAIN: + epochs: 100 + save_freq: 20 + eval_during_train: true + eval_freq: 10 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + by_epoch: true + min_lr_ratio: 1.0e-3 + wd: 0.0 + batch_size: 1 + pretrained_model_path: null + checkpoint_path: null + start_date: null + end_date: [2019, 1, 1] + + +# evaluation settings +EVAL: + pretrained_model_path: ./checkpoint/sevir/earthformer_sevir.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 1 + end_date: [2019, 6, 1] + + metrics_mode: "0" + metrics_list: ["csi", "pod", "sucr", "bias"] + threshold_list: [16, 74, 133, 160, 181, 219] + + +TEST: + pretrained_model_path: ./checkpoint/sevir/earthformer_sevir.pdparams + compute_metric_by_batch: true + eval_with_no_grad: true + batch_size: 1 + start_date: [2019, 6, 1] + end_date: null + +INFER: + pretrained_model_path: ./checkpoint/sevir/earthformer_sevir.pdparams + export_path: ./inference/earthformer/sevir + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 + data_path: ./datasets/sevir/vil/2019/SEVIR_VIL_STORMEVENTS_2019_0701_1231.h5 + in_len: 13 + out_len: 12 + sevir_vis_save: ./inference/earthformer/sevir/vis + layout: "NTHWC" + plot_stride: 2 + logging_prefix: "Cuboid_SEVIR" + interval_real_time: 5 + data_type: "vil" + rescale_method: "01" +>>>>>>> Stashed changes diff --git a/examples/earthformer/earthformer_enso_train.py b/examples/earthformer/earthformer_enso_train.py index 120654c704..0a7c0204cd 100644 --- a/examples/earthformer/earthformer_enso_train.py +++ b/examples/earthformer/earthformer_enso_train.py @@ -1,282 +1,282 @@ -from os import path as osp - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig -from paddle import nn - -import examples.earthformer.enso_metric as enso_metric -import ppsci -from ppsci.data.dataset import enso_dataset -from ppsci.utils import logger - -try: - import xarray as xr -except ModuleNotFoundError: - raise ModuleNotFoundError("Please install xarray with `pip install xarray`.") - - -def get_parameter_names(model, forbidden_layer_types): - result = [] - for name, child in model.named_children(): - result += [ - f"{name}.{n}" - for n in get_parameter_names(child, forbidden_layer_types) - if not isinstance(child, tuple(forbidden_layer_types)) - ] - # Add model specific parameters (defined with nn.Parameter) since they are not in any child. - result += list(model._parameters.keys()) - return result - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "ENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 8, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "ENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - "training": "eval", - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), - metric={ - "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - decay_parameters = get_parameter_names(model, [nn.LayerNorm]) - decay_parameters = [name for name in decay_parameters if "bias" not in name] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if n in decay_parameters], - "weight_decay": cfg.TRAIN.wd, - }, - { - "params": [ - p for n, p in model.named_parameters() if n not in decay_parameters - ], - "weight_decay": 0.0, - }, - ] - - # # init optimizer and lr scheduler - lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) - lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( - **lr_scheduler_cfg, - iters_per_epoch=ITERS_PER_EPOCH, - eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, - warmup_epoch=int(0.2 * cfg.TRAIN.epochs), - )() - optimizer = paddle.optimizer.AdamW( - lr_scheduler, parameters=optimizer_grouped_parameters - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "ENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - "training": "test", - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), - metric={ - "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([1, 12, 24, 48, 1], "float32", name=key) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import predictor - - predictor = predictor.EarthformerPredictor(cfg) - - train_cmip = xr.open_dataset(cfg.INFER.data_path).transpose( - "year", "month", "lat", "lon" - ) - # select longitudes - lon = train_cmip.lon.values - lon = lon[np.logical_and(lon >= 95, lon <= 330)] - train_cmip = train_cmip.sel(lon=lon) - data = train_cmip.sst.values - data = enso_dataset.fold(data) - - idx_sst = enso_dataset.prepare_inputs_targets( - len_time=data.shape[0], - input_length=cfg.INFER.in_len, - input_gap=cfg.INFER.in_stride, - pred_shift=cfg.INFER.out_len * cfg.INFER.out_stride, - pred_length=cfg.INFER.out_len, - samples_gap=cfg.INFER.samples_gap, - ) - data = data[idx_sst].astype("float32") - - sst_data = data[..., np.newaxis] - idx = np.random.choice(len(data), None, False) - in_seq = sst_data[idx, : cfg.INFER.in_len, ...] # ( in_len, lat, lon, 1) - in_seq = in_seq[np.newaxis, ...] - target_seq = sst_data[idx, cfg.INFER.in_len :, ...] # ( out_len, lat, lon, 1) - target_seq = target_seq[np.newaxis, ...] - - pred_data = predictor.predict(in_seq, cfg.INFER.batch_size) - - # save predict data - save_path = osp.join(cfg.output_dir, "result_enso_pred.npy") - np.save(save_path, pred_data) - logger.info(f"Save output to {save_path}") - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="earthformer_enso_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig +from paddle import nn + +import examples.earthformer.enso_metric as enso_metric +import ppsci +from ppsci.data.dataset import enso_dataset +from ppsci.utils import logger + +try: + import xarray as xr +except ModuleNotFoundError: + raise ModuleNotFoundError("Please install xarray with `pip install xarray`.") + + +def get_parameter_names(model, forbidden_layer_types): + result = [] + for name, child in model.named_children(): + result += [ + f"{name}.{n}" + for n in get_parameter_names(child, forbidden_layer_types) + if not isinstance(child, tuple(forbidden_layer_types)) + ] + # Add model specific parameters (defined with nn.Parameter) since they are not in any child. + result += list(model._parameters.keys()) + return result + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "ENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + "training": "eval", + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), + metric={ + "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + decay_parameters = get_parameter_names(model, [nn.LayerNorm]) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if n in decay_parameters], + "weight_decay": cfg.TRAIN.wd, + }, + { + "params": [ + p for n, p in model.named_parameters() if n not in decay_parameters + ], + "weight_decay": 0.0, + }, + ] + + # # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( + **lr_scheduler_cfg, + iters_per_epoch=ITERS_PER_EPOCH, + eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, + warmup_epoch=int(0.2 * cfg.TRAIN.epochs), + )() + optimizer = paddle.optimizer.AdamW( + lr_scheduler, parameters=optimizer_grouped_parameters + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + "training": "test", + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_mse_func), + metric={ + "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([1, 12, 24, 48, 1], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import predictor + + predictor = predictor.EarthformerPredictor(cfg) + + train_cmip = xr.open_dataset(cfg.INFER.data_path).transpose( + "year", "month", "lat", "lon" + ) + # select longitudes + lon = train_cmip.lon.values + lon = lon[np.logical_and(lon >= 95, lon <= 330)] + train_cmip = train_cmip.sel(lon=lon) + data = train_cmip.sst.values + data = enso_dataset.fold(data) + + idx_sst = enso_dataset.prepare_inputs_targets( + len_time=data.shape[0], + input_length=cfg.INFER.in_len, + input_gap=cfg.INFER.in_stride, + pred_shift=cfg.INFER.out_len * cfg.INFER.out_stride, + pred_length=cfg.INFER.out_len, + samples_gap=cfg.INFER.samples_gap, + ) + data = data[idx_sst].astype("float32") + + sst_data = data[..., np.newaxis] + idx = np.random.choice(len(data), None, False) + in_seq = sst_data[idx, : cfg.INFER.in_len, ...] # ( in_len, lat, lon, 1) + in_seq = in_seq[np.newaxis, ...] + target_seq = sst_data[idx, cfg.INFER.in_len :, ...] # ( out_len, lat, lon, 1) + target_seq = target_seq[np.newaxis, ...] + + pred_data = predictor.predict(in_seq, cfg.INFER.batch_size) + + # save predict data + save_path = osp.join(cfg.output_dir, "result_enso_pred.npy") + np.save(save_path, pred_data) + logger.info(f"Save output to {save_path}") + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="earthformer_enso_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/earthformer/earthformer_sevir_train.py b/examples/earthformer/earthformer_sevir_train.py index fdd20128a8..d8f64299a9 100644 --- a/examples/earthformer/earthformer_sevir_train.py +++ b/examples/earthformer/earthformer_sevir_train.py @@ -1,354 +1,354 @@ -import h5py -import hydra -import numpy as np -import paddle -import sevir_metric -import sevir_vis_seq -from omegaconf import DictConfig -from paddle import nn - -import ppsci - - -def get_parameter_names(model, forbidden_layer_types): - result = [] - for name, child in model.named_children(): - result += [ - f"{name}.{n}" - for n in get_parameter_names(child, forbidden_layer_types) - if not isinstance(child, tuple(forbidden_layer_types)) - ] - # Add model specific parameters (defined with nn.Parameter) since they are not in any child. - result += list(model._parameters.keys()) - return result - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "SEVIRDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "data_types": cfg.DATASET.data_types, - "seq_len": cfg.DATASET.seq_len, - "raw_seq_len": cfg.DATASET.raw_seq_len, - "sample_mode": cfg.DATASET.sample_mode, - "stride": cfg.DATASET.stride, - "batch_size": cfg.DATASET.batch_size, - "layout": cfg.DATASET.layout, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "split_mode": cfg.DATASET.split_mode, - "start_date": cfg.TRAIN.start_date, - "end_date": cfg.TRAIN.end_date, - "preprocess": cfg.DATASET.preprocess, - "rescale_method": cfg.DATASET.rescale_method, - "shuffle": True, - "verbose": False, - "training": True, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 8, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(sevir_metric.train_mse_func), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "SEVIRDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "data_types": cfg.DATASET.data_types, - "seq_len": cfg.DATASET.seq_len, - "raw_seq_len": cfg.DATASET.raw_seq_len, - "sample_mode": cfg.DATASET.sample_mode, - "stride": cfg.DATASET.stride, - "batch_size": cfg.DATASET.batch_size, - "layout": cfg.DATASET.layout, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "split_mode": cfg.DATASET.split_mode, - "start_date": cfg.TRAIN.end_date, - "end_date": cfg.EVAL.end_date, - "preprocess": cfg.DATASET.preprocess, - "rescale_method": cfg.DATASET.rescale_method, - "shuffle": False, - "verbose": False, - "training": False, - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.MSELoss(), - metric={ - "rmse": ppsci.metric.FunctionalMetric( - sevir_metric.eval_rmse_func( - out_len=cfg.DATASET.seq_len, - layout=cfg.DATASET.layout, - metrics_mode=cfg.EVAL.metrics_mode, - metrics_list=cfg.EVAL.metrics_list, - threshold_list=cfg.EVAL.threshold_list, - ) - ), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - decay_parameters = get_parameter_names(model, [nn.LayerNorm]) - decay_parameters = [name for name in decay_parameters if "bias" not in name] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if n in decay_parameters], - "weight_decay": cfg.TRAIN.wd, - }, - { - "params": [ - p for n, p in model.named_parameters() if n not in decay_parameters - ], - "weight_decay": 0.0, - }, - ] - - # init optimizer and lr scheduler - lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) - lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( - **lr_scheduler_cfg, - iters_per_epoch=ITERS_PER_EPOCH, - eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, - warmup_epoch=int(0.2 * cfg.TRAIN.epochs), - )() - optimizer = paddle.optimizer.AdamW( - lr_scheduler, parameters=optimizer_grouped_parameters - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - # evaluate after finished training - metric = sevir_metric.eval_rmse_func( - out_len=cfg.DATASET.seq_len, - layout=cfg.DATASET.layout, - metrics_mode=cfg.EVAL.metrics_mode, - metrics_list=cfg.EVAL.metrics_list, - threshold_list=cfg.EVAL.threshold_list, - ) - - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(sup_validator.data_loader): - truefield = label["vil"].squeeze(0) - prefield = model(input_)["vil"].squeeze(0) - metric.sevir_score.update(prefield, truefield) - - metric_dict = metric.sevir_score.compute() - print(metric_dict) - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "SEVIRDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "data_types": cfg.DATASET.data_types, - "seq_len": cfg.DATASET.seq_len, - "raw_seq_len": cfg.DATASET.raw_seq_len, - "sample_mode": cfg.DATASET.sample_mode, - "stride": cfg.DATASET.stride, - "batch_size": cfg.DATASET.batch_size, - "layout": cfg.DATASET.layout, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "split_mode": cfg.DATASET.split_mode, - "start_date": cfg.TEST.start_date, - "end_date": cfg.TEST.end_date, - "preprocess": cfg.DATASET.preprocess, - "rescale_method": cfg.DATASET.rescale_method, - "shuffle": False, - "verbose": False, - "training": False, - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.MSELoss(), - metric={ - "rmse": ppsci.metric.FunctionalMetric( - sevir_metric.eval_rmse_func( - out_len=cfg.DATASET.seq_len, - layout=cfg.DATASET.layout, - metrics_mode=cfg.EVAL.metrics_mode, - metrics_list=cfg.EVAL.metrics_list, - threshold_list=cfg.EVAL.threshold_list, - ) - ), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - metric = sevir_metric.eval_rmse_func( - out_len=cfg.DATASET.seq_len, - layout=cfg.DATASET.layout, - metrics_mode=cfg.EVAL.metrics_mode, - metrics_list=cfg.EVAL.metrics_list, - threshold_list=cfg.EVAL.threshold_list, - ) - - with solver.no_grad_context_manager(True): - for index, (input_, label, _) in enumerate(sup_validator.data_loader): - truefield = label["vil"].reshape([-1, *label["vil"].shape[2:]]) - prefield = model(input_)["vil"].reshape([-1, *label["vil"].shape[2:]]) - metric.sevir_score.update(prefield, truefield) - - metric_dict = metric.sevir_score.compute() - print(metric_dict) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.CuboidTransformer( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([1, 13, 384, 384, 1], "float32", name=key) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import predictor - - from ppsci.data.dataset import sevir_dataset - - predictor = predictor.EarthformerPredictor(cfg) - - if cfg.INFER.rescale_method == "sevir": - scale_dict = sevir_dataset.PREPROCESS_SCALE_SEVIR - offset_dict = sevir_dataset.PREPROCESS_OFFSET_SEVIR - elif cfg.INFER.rescale_method == "01": - scale_dict = sevir_dataset.PREPROCESS_SCALE_01 - offset_dict = sevir_dataset.PREPROCESS_OFFSET_01 - else: - raise ValueError(f"Invalid rescale option: {cfg.INFER.rescale_method}.") - - # read h5 data - h5data = h5py.File(cfg.INFER.data_path, "r") - data = np.array(h5data[cfg.INFER.data_type]).transpose([0, 3, 1, 2]) - - idx = np.random.choice(len(data), None, False) - data = ( - scale_dict[cfg.INFER.data_type] * data[idx] + offset_dict[cfg.INFER.data_type] - ) - - input_data = data[: cfg.INFER.in_len, ...] - input_data = input_data.reshape(1, *input_data.shape, 1).astype(np.float32) - target_data = data[cfg.INFER.in_len : cfg.INFER.in_len + cfg.INFER.out_len, ...] - target_data = target_data.reshape(1, *target_data.shape, 1).astype(np.float32) - - pred_data = predictor.predict(input_data, cfg.INFER.batch_size) - - sevir_vis_seq.save_example_vis_results( - save_dir=cfg.INFER.sevir_vis_save, - save_prefix=f"data_{idx}", - in_seq=input_data, - target_seq=target_data, - pred_seq=pred_data, - layout=cfg.INFER.layout, - plot_stride=cfg.INFER.plot_stride, - label=cfg.INFER.logging_prefix, - interval_real_time=cfg.INFER.interval_real_time, - ) - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="earthformer_sevir_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +import h5py +import hydra +import numpy as np +import paddle +import sevir_metric +import sevir_vis_seq +from omegaconf import DictConfig +from paddle import nn + +import ppsci + + +def get_parameter_names(model, forbidden_layer_types): + result = [] + for name, child in model.named_children(): + result += [ + f"{name}.{n}" + for n in get_parameter_names(child, forbidden_layer_types) + if not isinstance(child, tuple(forbidden_layer_types)) + ] + # Add model specific parameters (defined with nn.Parameter) since they are not in any child. + result += list(model._parameters.keys()) + return result + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "SEVIRDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "data_types": cfg.DATASET.data_types, + "seq_len": cfg.DATASET.seq_len, + "raw_seq_len": cfg.DATASET.raw_seq_len, + "sample_mode": cfg.DATASET.sample_mode, + "stride": cfg.DATASET.stride, + "batch_size": cfg.DATASET.batch_size, + "layout": cfg.DATASET.layout, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "split_mode": cfg.DATASET.split_mode, + "start_date": cfg.TRAIN.start_date, + "end_date": cfg.TRAIN.end_date, + "preprocess": cfg.DATASET.preprocess, + "rescale_method": cfg.DATASET.rescale_method, + "shuffle": True, + "verbose": False, + "training": True, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(sevir_metric.train_mse_func), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "SEVIRDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "data_types": cfg.DATASET.data_types, + "seq_len": cfg.DATASET.seq_len, + "raw_seq_len": cfg.DATASET.raw_seq_len, + "sample_mode": cfg.DATASET.sample_mode, + "stride": cfg.DATASET.stride, + "batch_size": cfg.DATASET.batch_size, + "layout": cfg.DATASET.layout, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "split_mode": cfg.DATASET.split_mode, + "start_date": cfg.TRAIN.end_date, + "end_date": cfg.EVAL.end_date, + "preprocess": cfg.DATASET.preprocess, + "rescale_method": cfg.DATASET.rescale_method, + "shuffle": False, + "verbose": False, + "training": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.MSELoss(), + metric={ + "rmse": ppsci.metric.FunctionalMetric( + sevir_metric.eval_rmse_func( + out_len=cfg.DATASET.seq_len, + layout=cfg.DATASET.layout, + metrics_mode=cfg.EVAL.metrics_mode, + metrics_list=cfg.EVAL.metrics_list, + threshold_list=cfg.EVAL.threshold_list, + ) + ), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + decay_parameters = get_parameter_names(model, [nn.LayerNorm]) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if n in decay_parameters], + "weight_decay": cfg.TRAIN.wd, + }, + { + "params": [ + p for n, p in model.named_parameters() if n not in decay_parameters + ], + "weight_decay": 0.0, + }, + ] + + # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( + **lr_scheduler_cfg, + iters_per_epoch=ITERS_PER_EPOCH, + eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, + warmup_epoch=int(0.2 * cfg.TRAIN.epochs), + )() + optimizer = paddle.optimizer.AdamW( + lr_scheduler, parameters=optimizer_grouped_parameters + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + metric = sevir_metric.eval_rmse_func( + out_len=cfg.DATASET.seq_len, + layout=cfg.DATASET.layout, + metrics_mode=cfg.EVAL.metrics_mode, + metrics_list=cfg.EVAL.metrics_list, + threshold_list=cfg.EVAL.threshold_list, + ) + + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(sup_validator.data_loader): + truefield = label["vil"].squeeze(0) + prefield = model(input_)["vil"].squeeze(0) + metric.sevir_score.update(prefield, truefield) + + metric_dict = metric.sevir_score.compute() + print(metric_dict) + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "SEVIRDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "data_types": cfg.DATASET.data_types, + "seq_len": cfg.DATASET.seq_len, + "raw_seq_len": cfg.DATASET.raw_seq_len, + "sample_mode": cfg.DATASET.sample_mode, + "stride": cfg.DATASET.stride, + "batch_size": cfg.DATASET.batch_size, + "layout": cfg.DATASET.layout, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "split_mode": cfg.DATASET.split_mode, + "start_date": cfg.TEST.start_date, + "end_date": cfg.TEST.end_date, + "preprocess": cfg.DATASET.preprocess, + "rescale_method": cfg.DATASET.rescale_method, + "shuffle": False, + "verbose": False, + "training": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.MSELoss(), + metric={ + "rmse": ppsci.metric.FunctionalMetric( + sevir_metric.eval_rmse_func( + out_len=cfg.DATASET.seq_len, + layout=cfg.DATASET.layout, + metrics_mode=cfg.EVAL.metrics_mode, + metrics_list=cfg.EVAL.metrics_list, + threshold_list=cfg.EVAL.threshold_list, + ) + ), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + metric = sevir_metric.eval_rmse_func( + out_len=cfg.DATASET.seq_len, + layout=cfg.DATASET.layout, + metrics_mode=cfg.EVAL.metrics_mode, + metrics_list=cfg.EVAL.metrics_list, + threshold_list=cfg.EVAL.threshold_list, + ) + + with solver.no_grad_context_manager(True): + for index, (input_, label, _) in enumerate(sup_validator.data_loader): + truefield = label["vil"].reshape([-1, *label["vil"].shape[2:]]) + prefield = model(input_)["vil"].reshape([-1, *label["vil"].shape[2:]]) + metric.sevir_score.update(prefield, truefield) + + metric_dict = metric.sevir_score.compute() + print(metric_dict) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.CuboidTransformer( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([1, 13, 384, 384, 1], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import predictor + + from ppsci.data.dataset import sevir_dataset + + predictor = predictor.EarthformerPredictor(cfg) + + if cfg.INFER.rescale_method == "sevir": + scale_dict = sevir_dataset.PREPROCESS_SCALE_SEVIR + offset_dict = sevir_dataset.PREPROCESS_OFFSET_SEVIR + elif cfg.INFER.rescale_method == "01": + scale_dict = sevir_dataset.PREPROCESS_SCALE_01 + offset_dict = sevir_dataset.PREPROCESS_OFFSET_01 + else: + raise ValueError(f"Invalid rescale option: {cfg.INFER.rescale_method}.") + + # read h5 data + h5data = h5py.File(cfg.INFER.data_path, "r") + data = np.array(h5data[cfg.INFER.data_type]).transpose([0, 3, 1, 2]) + + idx = np.random.choice(len(data), None, False) + data = ( + scale_dict[cfg.INFER.data_type] * data[idx] + offset_dict[cfg.INFER.data_type] + ) + + input_data = data[: cfg.INFER.in_len, ...] + input_data = input_data.reshape(1, *input_data.shape, 1).astype(np.float32) + target_data = data[cfg.INFER.in_len : cfg.INFER.in_len + cfg.INFER.out_len, ...] + target_data = target_data.reshape(1, *target_data.shape, 1).astype(np.float32) + + pred_data = predictor.predict(input_data, cfg.INFER.batch_size) + + sevir_vis_seq.save_example_vis_results( + save_dir=cfg.INFER.sevir_vis_save, + save_prefix=f"data_{idx}", + in_seq=input_data, + target_seq=target_data, + pred_seq=pred_data, + layout=cfg.INFER.layout, + plot_stride=cfg.INFER.plot_stride, + label=cfg.INFER.logging_prefix, + interval_real_time=cfg.INFER.interval_real_time, + ) + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="earthformer_sevir_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/earthformer/enso_metric.py b/examples/earthformer/enso_metric.py index 7e398e0b89..0bfcc14a9f 100644 --- a/examples/earthformer/enso_metric.py +++ b/examples/earthformer/enso_metric.py @@ -1,127 +1,127 @@ -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import paddle -from paddle.nn import functional as F - -from ppsci.data.dataset.enso_dataset import NINO_WINDOW_T -from ppsci.data.dataset.enso_dataset import scale_back_sst - - -def compute_enso_score( - y_pred: paddle.Tensor, - y_true: paddle.Tensor, - acc_weight: Optional[Union[str, np.ndarray, paddle.Tensor]] = None, -): - """Compute the accuracy and Root Mean Squared Error (RMSE) of enso dataset. - - Args: - y_pred (paddle.Tensor): The predict data. - y_true (paddle.Tensor): The label data. - acc_weight (Optional[Union[str, np.ndarray, paddle.Tensor]], optional): The wight of accuracy. Defaults to None.use - default acc_weight specified at https://tianchi.aliyun.com/competition/entrance/531871/information. - """ - - pred = y_pred - y_pred.mean(axis=0, keepdim=True) # (N, 24) - true = y_true - y_true.mean(axis=0, keepdim=True) # (N, 24) - cor = (pred * true).sum(axis=0) / ( - paddle.sqrt(paddle.sum(pred**2, axis=0) * paddle.sum(true**2, axis=0)) - + 1e-6 - ) - - if acc_weight is None: - acc = cor.sum() - else: - nino_out_len = y_true.shape[-1] - if acc_weight == "default": - acc_weight = paddle.to_tensor( - [1.5] * 4 + [2] * 7 + [3] * 7 + [4] * (nino_out_len - 18) - )[:nino_out_len] * paddle.log(paddle.arange(nino_out_len) + 1) - elif isinstance(acc_weight, np.ndarray): - acc_weight = paddle.to_tensor(acc_weight[:nino_out_len]) - elif isinstance(acc_weight, paddle.Tensor): - acc_weight = acc_weight[:nino_out_len] - else: - raise ValueError(f"Invalid acc_weight {acc_weight}!") - acc_weight = acc_weight.to(y_pred) - acc = (acc_weight * cor).sum() - rmse = paddle.mean((y_pred - y_true) ** 2, axis=0).sqrt().sum() - return acc, rmse - - -def sst_to_nino(sst: paddle.Tensor, normalize_sst: bool = True, detach: bool = True): - """Convert sst to nino index. - - Args: - sst (paddle.Tensor): The predict data for sst. Shape = (N, T, H, W) - normalize_sst (bool, optional): Whether to use normalize for sst. Defaults to True. - detach (bool, optional): Whether to detach the tensor. Defaults to True. - - Returns: - nino_index (paddle.Tensor): The nino index. Shape = (N, T-NINO_WINDOW_T+1) - """ - - if detach: - nino_index = sst.detach() - else: - nino_index = sst - if normalize_sst: - nino_index = scale_back_sst(nino_index) - nino_index = nino_index[:, :, 10:13, 19:30].mean(axis=[2, 3]) # (N, 26) - nino_index = nino_index.unfold(axis=1, size=NINO_WINDOW_T, step=1).mean( - axis=2 - ) # (N, 24) - - return nino_index - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - *args, -) -> paddle.Tensor: - return { - "sst_target": F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) - } - - -def eval_rmse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - nino_out_len: int = 12, - *args, -) -> Dict[str, paddle.Tensor]: - pred = output_dict["sst_target"] - sst_target = label_dict["sst_target"] - nino_target = label_dict["nino_target"].astype("float32") - # mse - mae = F.l1_loss(pred, sst_target) - # mse - mse = F.mse_loss(pred, sst_target) - # rmse - nino_preds = sst_to_nino(sst=pred[..., 0]) - nino_preds_list, nino_target_list = map(list, zip((nino_preds, nino_target))) - nino_preds_list = paddle.concat(nino_preds_list, axis=0) - nino_target_list = paddle.concat(nino_target_list, axis=0) - - valid_acc, valid_nino_rmse = compute_enso_score( - y_pred=nino_preds_list, y_true=nino_target_list, acc_weight=None - ) - valid_weighted_acc, _ = compute_enso_score( - y_pred=nino_preds_list, y_true=nino_target_list, acc_weight="default" - ) - valid_acc /= nino_out_len - valid_nino_rmse /= nino_out_len - valid_weighted_acc /= nino_out_len - valid_loss = -valid_acc - - return { - "valid_loss_epoch": valid_loss, - "mse": mse, - "mae": mae, - "rmse": valid_nino_rmse, - "corr_nino3.4_epoch": valid_acc, - "corr_nino3.4_weighted_epoch": valid_weighted_acc, - } +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import paddle +from paddle.nn import functional as F + +from ppsci.data.dataset.enso_dataset import NINO_WINDOW_T +from ppsci.data.dataset.enso_dataset import scale_back_sst + + +def compute_enso_score( + y_pred: paddle.Tensor, + y_true: paddle.Tensor, + acc_weight: Optional[Union[str, np.ndarray, paddle.Tensor]] = None, +): + """Compute the accuracy and Root Mean Squared Error (RMSE) of enso dataset. + + Args: + y_pred (paddle.Tensor): The predict data. + y_true (paddle.Tensor): The label data. + acc_weight (Optional[Union[str, np.ndarray, paddle.Tensor]], optional): The wight of accuracy. Defaults to None.use + default acc_weight specified at https://tianchi.aliyun.com/competition/entrance/531871/information. + """ + + pred = y_pred - y_pred.mean(axis=0, keepdim=True) # (N, 24) + true = y_true - y_true.mean(axis=0, keepdim=True) # (N, 24) + cor = (pred * true).sum(axis=0) / ( + paddle.sqrt(paddle.sum(pred**2, axis=0) * paddle.sum(true**2, axis=0)) + + 1e-6 + ) + + if acc_weight is None: + acc = cor.sum() + else: + nino_out_len = y_true.shape[-1] + if acc_weight == "default": + acc_weight = paddle.to_tensor( + [1.5] * 4 + [2] * 7 + [3] * 7 + [4] * (nino_out_len - 18) + )[:nino_out_len] * paddle.log(paddle.arange(nino_out_len) + 1) + elif isinstance(acc_weight, np.ndarray): + acc_weight = paddle.to_tensor(acc_weight[:nino_out_len]) + elif isinstance(acc_weight, paddle.Tensor): + acc_weight = acc_weight[:nino_out_len] + else: + raise ValueError(f"Invalid acc_weight {acc_weight}!") + acc_weight = acc_weight.to(y_pred) + acc = (acc_weight * cor).sum() + rmse = paddle.mean((y_pred - y_true) ** 2, axis=0).sqrt().sum() + return acc, rmse + + +def sst_to_nino(sst: paddle.Tensor, normalize_sst: bool = True, detach: bool = True): + """Convert sst to nino index. + + Args: + sst (paddle.Tensor): The predict data for sst. Shape = (N, T, H, W) + normalize_sst (bool, optional): Whether to use normalize for sst. Defaults to True. + detach (bool, optional): Whether to detach the tensor. Defaults to True. + + Returns: + nino_index (paddle.Tensor): The nino index. Shape = (N, T-NINO_WINDOW_T+1) + """ + + if detach: + nino_index = sst.detach() + else: + nino_index = sst + if normalize_sst: + nino_index = scale_back_sst(nino_index) + nino_index = nino_index[:, :, 10:13, 19:30].mean(axis=[2, 3]) # (N, 26) + nino_index = nino_index.unfold(axis=1, size=NINO_WINDOW_T, step=1).mean( + axis=2 + ) # (N, 24) + + return nino_index + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + *args, +) -> paddle.Tensor: + return { + "sst_target": F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) + } + + +def eval_rmse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + nino_out_len: int = 12, + *args, +) -> Dict[str, paddle.Tensor]: + pred = output_dict["sst_target"] + sst_target = label_dict["sst_target"] + nino_target = label_dict["nino_target"].astype("float32") + # mse + mae = F.l1_loss(pred, sst_target) + # mse + mse = F.mse_loss(pred, sst_target) + # rmse + nino_preds = sst_to_nino(sst=pred[..., 0]) + nino_preds_list, nino_target_list = map(list, zip((nino_preds, nino_target))) + nino_preds_list = paddle.concat(nino_preds_list, axis=0) + nino_target_list = paddle.concat(nino_target_list, axis=0) + + valid_acc, valid_nino_rmse = compute_enso_score( + y_pred=nino_preds_list, y_true=nino_target_list, acc_weight=None + ) + valid_weighted_acc, _ = compute_enso_score( + y_pred=nino_preds_list, y_true=nino_target_list, acc_weight="default" + ) + valid_acc /= nino_out_len + valid_nino_rmse /= nino_out_len + valid_weighted_acc /= nino_out_len + valid_loss = -valid_acc + + return { + "valid_loss_epoch": valid_loss, + "mse": mse, + "mae": mae, + "rmse": valid_nino_rmse, + "corr_nino3.4_epoch": valid_acc, + "corr_nino3.4_weighted_epoch": valid_weighted_acc, + } diff --git a/examples/earthformer/predictor.py b/examples/earthformer/predictor.py index a7dbb5ed06..063668d999 100644 --- a/examples/earthformer/predictor.py +++ b/examples/earthformer/predictor.py @@ -1,93 +1,93 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import numpy as np -from omegaconf import DictConfig - -from deploy.python_infer import base - - -class EarthformerPredictor(base.Predictor): - """General predictor for Earthformer model. - - Args: - cfg (DictConfig): Running configuration. - """ - - def __init__( - self, - cfg: DictConfig, - ): - super().__init__( - cfg.INFER.pdmodel_path, - cfg.INFER.pdiparams_path, - device=cfg.INFER.device, - engine=cfg.INFER.engine, - precision=cfg.INFER.precision, - onnx_path=cfg.INFER.onnx_path, - ir_optim=cfg.INFER.ir_optim, - min_subgraph_size=cfg.INFER.min_subgraph_size, - gpu_mem=cfg.INFER.gpu_mem, - gpu_id=cfg.INFER.gpu_id, - max_batch_size=cfg.INFER.max_batch_size, - num_cpu_threads=cfg.INFER.num_cpu_threads, - ) - self.log_freq = cfg.log_freq - - # get input names and data handles - self.input_names = self.predictor.get_input_names() - self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) - - # get output names and data handles - self.output_names = self.predictor.get_output_names() - self.output_handle = self.predictor.get_output_handle(self.output_names[0]) - - def predict( - self, - input_data: np.ndarray, - batch_size: int = 1, - ) -> np.ndarray: - """Predicts the output of the yinglong model for the given input. - - Args: - input_data (np.ndarray): Input data of shape (N, T, H, W). - batch_size (int, optional): Batch size, now only support 1. Defaults to 1. - Returns: - np.ndarray: Prediction. - """ - if batch_size != 1: - raise ValueError( - f"EarthformerPredictor only support batch_size=1, but got {batch_size}" - ) - # prepare input handle(s) - input_handles = {self.input_names[0]: self.input_data_handle} - # prepare output handle(s) - output_handles = {self.output_names[0]: self.output_handle} - - # prepare batch input dict - batch_input_dict = { - self.input_names[0]: input_data, - } - # send batch input data to input handle(s) - for name, handle in input_handles.items(): - handle.copy_from_cpu(batch_input_dict[name]) - - # run predictor - self.predictor.run() - - # receive batch output data from output handle(s) - pred = output_handles[self.output_names[0]].copy_to_cpu() - - return pred +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np +from omegaconf import DictConfig + +from deploy.python_infer import base + + +class EarthformerPredictor(base.Predictor): + """General predictor for Earthformer model. + + Args: + cfg (DictConfig): Running configuration. + """ + + def __init__( + self, + cfg: DictConfig, + ): + super().__init__( + cfg.INFER.pdmodel_path, + cfg.INFER.pdiparams_path, + device=cfg.INFER.device, + engine=cfg.INFER.engine, + precision=cfg.INFER.precision, + onnx_path=cfg.INFER.onnx_path, + ir_optim=cfg.INFER.ir_optim, + min_subgraph_size=cfg.INFER.min_subgraph_size, + gpu_mem=cfg.INFER.gpu_mem, + gpu_id=cfg.INFER.gpu_id, + max_batch_size=cfg.INFER.max_batch_size, + num_cpu_threads=cfg.INFER.num_cpu_threads, + ) + self.log_freq = cfg.log_freq + + # get input names and data handles + self.input_names = self.predictor.get_input_names() + self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) + + # get output names and data handles + self.output_names = self.predictor.get_output_names() + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) + + def predict( + self, + input_data: np.ndarray, + batch_size: int = 1, + ) -> np.ndarray: + """Predicts the output of the yinglong model for the given input. + + Args: + input_data (np.ndarray): Input data of shape (N, T, H, W). + batch_size (int, optional): Batch size, now only support 1. Defaults to 1. + Returns: + np.ndarray: Prediction. + """ + if batch_size != 1: + raise ValueError( + f"EarthformerPredictor only support batch_size=1, but got {batch_size}" + ) + # prepare input handle(s) + input_handles = {self.input_names[0]: self.input_data_handle} + # prepare output handle(s) + output_handles = {self.output_names[0]: self.output_handle} + + # prepare batch input dict + batch_input_dict = { + self.input_names[0]: input_data, + } + # send batch input data to input handle(s) + for name, handle in input_handles.items(): + handle.copy_from_cpu(batch_input_dict[name]) + + # run predictor + self.predictor.run() + + # receive batch output data from output handle(s) + pred = output_handles[self.output_names[0]].copy_to_cpu() + + return pred diff --git a/examples/earthformer/sevir_cmap.py b/examples/earthformer/sevir_cmap.py index 27f8ac903a..c743753a4c 100644 --- a/examples/earthformer/sevir_cmap.py +++ b/examples/earthformer/sevir_cmap.py @@ -1,334 +1,334 @@ -"""Code is adapted from https://github.com/MIT-AI-Accelerator/neurips-2020-sevir. Their license is MIT License.""" - -from copy import deepcopy - -import numpy as np -from matplotlib.colors import BoundaryNorm -from matplotlib.colors import ListedColormap - -VIL_COLORS = [ - [0, 0, 0], - [0.30196078431372547, 0.30196078431372547, 0.30196078431372547], - [0.1568627450980392, 0.7450980392156863, 0.1568627450980392], - [0.09803921568627451, 0.5882352941176471, 0.09803921568627451], - [0.0392156862745098, 0.4117647058823529, 0.0392156862745098], - [0.0392156862745098, 0.29411764705882354, 0.0392156862745098], - [0.9607843137254902, 0.9607843137254902, 0.0], - [0.9294117647058824, 0.6745098039215687, 0.0], - [0.9411764705882353, 0.43137254901960786, 0.0], - [0.6274509803921569, 0.0, 0.0], - [0.9058823529411765, 0.0, 1.0], -] - -VIL_LEVELS = [0.0, 16.0, 31.0, 59.0, 74.0, 100.0, 133.0, 160.0, 181.0, 219.0, 255.0] - - -def get_cmap(type, encoded=True): - if type.lower() == "vis": - cmap, norm = vis_cmap(encoded) - vmin, vmax = (0, 10000) if encoded else (0, 1) - elif type.lower() == "vil": - cmap, norm = vil_cmap(encoded) - vmin, vmax = None, None - elif type.lower() == "ir069": - cmap, norm = c09_cmap(encoded) - vmin, vmax = (-8000, -1000) if encoded else (-80, -10) - elif type.lower() == "lght": - cmap, norm = "hot", None - vmin, vmax = 0, 5 - else: - cmap, norm = "jet", None - vmin, vmax = (-7000, 2000) if encoded else (-70, 20) - return cmap, norm, vmin, vmax - - -def vil_cmap(encoded=True): - cols = deepcopy(VIL_COLORS) - lev = deepcopy(VIL_LEVELS) - # Exactly the same error occurs in the original implementation (https://github.com/MIT-AI-Accelerator/neurips-2020-sevir/blob/master/src/display/display.py). - # ValueError: There are 10 color bins including extensions, but ncolors = 9; ncolors must equal or exceed the number of bins - # We can not replicate the visualization in notebook (https://github.com/MIT-AI-Accelerator/neurips-2020-sevir/blob/master/notebooks/AnalyzeNowcast.ipynb) without error. - nil = cols.pop(0) - under = cols[0] - # over = cols.pop() - over = cols[-1] - cmap = ListedColormap(cols) - cmap.set_bad(nil) - cmap.set_under(under) - cmap.set_over(over) - norm = BoundaryNorm(lev, cmap.N) - return cmap, norm - - -def vis_cmap(encoded=True): - cols = [ - [0, 0, 0], - [0.0392156862745098, 0.0392156862745098, 0.0392156862745098], - [0.0784313725490196, 0.0784313725490196, 0.0784313725490196], - [0.11764705882352941, 0.11764705882352941, 0.11764705882352941], - [0.1568627450980392, 0.1568627450980392, 0.1568627450980392], - [0.19607843137254902, 0.19607843137254902, 0.19607843137254902], - [0.23529411764705882, 0.23529411764705882, 0.23529411764705882], - [0.27450980392156865, 0.27450980392156865, 0.27450980392156865], - [0.3137254901960784, 0.3137254901960784, 0.3137254901960784], - [0.35294117647058826, 0.35294117647058826, 0.35294117647058826], - [0.39215686274509803, 0.39215686274509803, 0.39215686274509803], - [0.43137254901960786, 0.43137254901960786, 0.43137254901960786], - [0.47058823529411764, 0.47058823529411764, 0.47058823529411764], - [0.5098039215686274, 0.5098039215686274, 0.5098039215686274], - [0.5490196078431373, 0.5490196078431373, 0.5490196078431373], - [0.5882352941176471, 0.5882352941176471, 0.5882352941176471], - [0.6274509803921569, 0.6274509803921569, 0.6274509803921569], - [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], - [0.7058823529411765, 0.7058823529411765, 0.7058823529411765], - [0.7450980392156863, 0.7450980392156863, 0.7450980392156863], - [0.7843137254901961, 0.7843137254901961, 0.7843137254901961], - [0.8235294117647058, 0.8235294117647058, 0.8235294117647058], - [0.8627450980392157, 0.8627450980392157, 0.8627450980392157], - [0.9019607843137255, 0.9019607843137255, 0.9019607843137255], - [0.9411764705882353, 0.9411764705882353, 0.9411764705882353], - [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], - [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], - ] - lev = np.array( - [ - 0.0, - 0.02, - 0.04, - 0.06, - 0.08, - 0.1, - 0.12, - 0.14, - 0.16, - 0.2, - 0.24, - 0.28, - 0.32, - 0.36, - 0.4, - 0.44, - 0.48, - 0.52, - 0.56, - 0.6, - 0.64, - 0.68, - 0.72, - 0.76, - 0.8, - 0.9, - 1.0, - ] - ) - if encoded: - lev *= 1e4 - nil = cols.pop(0) - under = cols[0] - over = cols.pop() - cmap = ListedColormap(cols) - cmap.set_bad(nil) - cmap.set_under(under) - cmap.set_over(over) - norm = BoundaryNorm(lev, cmap.N) - return cmap, norm - - -def ir_cmap(encoded=True): - cols = [ - [0, 0, 0], - [1.0, 1.0, 1.0], - [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], - [0.9411764705882353, 0.9411764705882353, 0.9411764705882353], - [0.9019607843137255, 0.9019607843137255, 0.9019607843137255], - [0.8627450980392157, 0.8627450980392157, 0.8627450980392157], - [0.8235294117647058, 0.8235294117647058, 0.8235294117647058], - [0.7843137254901961, 0.7843137254901961, 0.7843137254901961], - [0.7450980392156863, 0.7450980392156863, 0.7450980392156863], - [0.7058823529411765, 0.7058823529411765, 0.7058823529411765], - [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], - [0.6274509803921569, 0.6274509803921569, 0.6274509803921569], - [0.5882352941176471, 0.5882352941176471, 0.5882352941176471], - [0.5490196078431373, 0.5490196078431373, 0.5490196078431373], - [0.5098039215686274, 0.5098039215686274, 0.5098039215686274], - [0.47058823529411764, 0.47058823529411764, 0.47058823529411764], - [0.43137254901960786, 0.43137254901960786, 0.43137254901960786], - [0.39215686274509803, 0.39215686274509803, 0.39215686274509803], - [0.35294117647058826, 0.35294117647058826, 0.35294117647058826], - [0.3137254901960784, 0.3137254901960784, 0.3137254901960784], - [0.27450980392156865, 0.27450980392156865, 0.27450980392156865], - [0.23529411764705882, 0.23529411764705882, 0.23529411764705882], - [0.19607843137254902, 0.19607843137254902, 0.19607843137254902], - [0.1568627450980392, 0.1568627450980392, 0.1568627450980392], - [0.11764705882352941, 0.11764705882352941, 0.11764705882352941], - [0.0784313725490196, 0.0784313725490196, 0.0784313725490196], - [0.0392156862745098, 0.0392156862745098, 0.0392156862745098], - [0.0, 0.803921568627451, 0.803921568627451], - ] - lev = np.array( - [ - -110.0, - -105.2, - -95.2, - -85.2, - -75.2, - -65.2, - -55.2, - -45.2, - -35.2, - -28.2, - -23.2, - -18.2, - -13.2, - -8.2, - -3.2, - 1.8, - 6.8, - 11.8, - 16.8, - 21.8, - 26.8, - 31.8, - 36.8, - 41.8, - 46.8, - 51.8, - 90.0, - 100.0, - ] - ) - if encoded: - lev *= 1e2 - nil = cols.pop(0) - under = cols[0] - over = cols.pop() - cmap = ListedColormap(cols) - cmap.set_bad(nil) - cmap.set_under(under) - cmap.set_over(over) - norm = BoundaryNorm(lev, cmap.N) - return cmap, norm - - -def c09_cmap(encoded=True): - cols = [ - [1.000000, 0.000000, 0.000000], - [1.000000, 0.031373, 0.000000], - [1.000000, 0.062745, 0.000000], - [1.000000, 0.094118, 0.000000], - [1.000000, 0.125490, 0.000000], - [1.000000, 0.156863, 0.000000], - [1.000000, 0.188235, 0.000000], - [1.000000, 0.219608, 0.000000], - [1.000000, 0.250980, 0.000000], - [1.000000, 0.282353, 0.000000], - [1.000000, 0.313725, 0.000000], - [1.000000, 0.349020, 0.003922], - [1.000000, 0.380392, 0.003922], - [1.000000, 0.411765, 0.003922], - [1.000000, 0.443137, 0.003922], - [1.000000, 0.474510, 0.003922], - [1.000000, 0.505882, 0.003922], - [1.000000, 0.537255, 0.003922], - [1.000000, 0.568627, 0.003922], - [1.000000, 0.600000, 0.003922], - [1.000000, 0.631373, 0.003922], - [1.000000, 0.666667, 0.007843], - [1.000000, 0.698039, 0.007843], - [1.000000, 0.729412, 0.007843], - [1.000000, 0.760784, 0.007843], - [1.000000, 0.792157, 0.007843], - [1.000000, 0.823529, 0.007843], - [1.000000, 0.854902, 0.007843], - [1.000000, 0.886275, 0.007843], - [1.000000, 0.917647, 0.007843], - [1.000000, 0.949020, 0.007843], - [1.000000, 0.984314, 0.011765], - [0.968627, 0.952941, 0.031373], - [0.937255, 0.921569, 0.050980], - [0.901961, 0.886275, 0.074510], - [0.870588, 0.854902, 0.094118], - [0.835294, 0.823529, 0.117647], - [0.803922, 0.788235, 0.137255], - [0.772549, 0.756863, 0.160784], - [0.737255, 0.725490, 0.180392], - [0.705882, 0.690196, 0.200000], - [0.670588, 0.658824, 0.223529], - [0.639216, 0.623529, 0.243137], - [0.607843, 0.592157, 0.266667], - [0.572549, 0.560784, 0.286275], - [0.541176, 0.525490, 0.309804], - [0.509804, 0.494118, 0.329412], - [0.474510, 0.462745, 0.349020], - [0.752941, 0.749020, 0.909804], - [0.800000, 0.800000, 0.929412], - [0.850980, 0.847059, 0.945098], - [0.898039, 0.898039, 0.964706], - [0.949020, 0.949020, 0.980392], - [1.000000, 1.000000, 1.000000], - [0.964706, 0.980392, 0.964706], - [0.929412, 0.960784, 0.929412], - [0.890196, 0.937255, 0.890196], - [0.854902, 0.917647, 0.854902], - [0.815686, 0.894118, 0.815686], - [0.780392, 0.874510, 0.780392], - [0.745098, 0.850980, 0.745098], - [0.705882, 0.831373, 0.705882], - [0.670588, 0.807843, 0.670588], - [0.631373, 0.788235, 0.631373], - [0.596078, 0.764706, 0.596078], - [0.560784, 0.745098, 0.560784], - [0.521569, 0.721569, 0.521569], - [0.486275, 0.701961, 0.486275], - [0.447059, 0.678431, 0.447059], - [0.411765, 0.658824, 0.411765], - [0.376471, 0.635294, 0.376471], - [0.337255, 0.615686, 0.337255], - [0.301961, 0.592157, 0.301961], - [0.262745, 0.572549, 0.262745], - [0.227451, 0.549020, 0.227451], - [0.192157, 0.529412, 0.192157], - [0.152941, 0.505882, 0.152941], - [0.117647, 0.486275, 0.117647], - [0.078431, 0.462745, 0.078431], - [0.043137, 0.443137, 0.043137], - [0.003922, 0.419608, 0.003922], - [0.003922, 0.431373, 0.027451], - [0.003922, 0.447059, 0.054902], - [0.003922, 0.462745, 0.082353], - [0.003922, 0.478431, 0.109804], - [0.003922, 0.494118, 0.137255], - [0.003922, 0.509804, 0.164706], - [0.003922, 0.525490, 0.192157], - [0.003922, 0.541176, 0.215686], - [0.003922, 0.556863, 0.243137], - [0.007843, 0.568627, 0.270588], - [0.007843, 0.584314, 0.298039], - [0.007843, 0.600000, 0.325490], - [0.007843, 0.615686, 0.352941], - [0.007843, 0.631373, 0.380392], - [0.007843, 0.647059, 0.403922], - [0.007843, 0.662745, 0.431373], - [0.007843, 0.678431, 0.458824], - [0.007843, 0.694118, 0.486275], - [0.011765, 0.705882, 0.513725], - [0.011765, 0.721569, 0.541176], - [0.011765, 0.737255, 0.568627], - [0.011765, 0.752941, 0.596078], - [0.011765, 0.768627, 0.619608], - [0.011765, 0.784314, 0.647059], - [0.011765, 0.800000, 0.674510], - [0.011765, 0.815686, 0.701961], - [0.011765, 0.831373, 0.729412], - [0.015686, 0.843137, 0.756863], - [0.015686, 0.858824, 0.784314], - [0.015686, 0.874510, 0.807843], - [0.015686, 0.890196, 0.835294], - [0.015686, 0.905882, 0.862745], - [0.015686, 0.921569, 0.890196], - [0.015686, 0.937255, 0.917647], - [0.015686, 0.952941, 0.945098], - [0.015686, 0.968627, 0.972549], - [1.000000, 1.000000, 1.000000], - ] - return ListedColormap(cols), None +"""Code is adapted from https://github.com/MIT-AI-Accelerator/neurips-2020-sevir. Their license is MIT License.""" + +from copy import deepcopy + +import numpy as np +from matplotlib.colors import BoundaryNorm +from matplotlib.colors import ListedColormap + +VIL_COLORS = [ + [0, 0, 0], + [0.30196078431372547, 0.30196078431372547, 0.30196078431372547], + [0.1568627450980392, 0.7450980392156863, 0.1568627450980392], + [0.09803921568627451, 0.5882352941176471, 0.09803921568627451], + [0.0392156862745098, 0.4117647058823529, 0.0392156862745098], + [0.0392156862745098, 0.29411764705882354, 0.0392156862745098], + [0.9607843137254902, 0.9607843137254902, 0.0], + [0.9294117647058824, 0.6745098039215687, 0.0], + [0.9411764705882353, 0.43137254901960786, 0.0], + [0.6274509803921569, 0.0, 0.0], + [0.9058823529411765, 0.0, 1.0], +] + +VIL_LEVELS = [0.0, 16.0, 31.0, 59.0, 74.0, 100.0, 133.0, 160.0, 181.0, 219.0, 255.0] + + +def get_cmap(type, encoded=True): + if type.lower() == "vis": + cmap, norm = vis_cmap(encoded) + vmin, vmax = (0, 10000) if encoded else (0, 1) + elif type.lower() == "vil": + cmap, norm = vil_cmap(encoded) + vmin, vmax = None, None + elif type.lower() == "ir069": + cmap, norm = c09_cmap(encoded) + vmin, vmax = (-8000, -1000) if encoded else (-80, -10) + elif type.lower() == "lght": + cmap, norm = "hot", None + vmin, vmax = 0, 5 + else: + cmap, norm = "jet", None + vmin, vmax = (-7000, 2000) if encoded else (-70, 20) + return cmap, norm, vmin, vmax + + +def vil_cmap(encoded=True): + cols = deepcopy(VIL_COLORS) + lev = deepcopy(VIL_LEVELS) + # Exactly the same error occurs in the original implementation (https://github.com/MIT-AI-Accelerator/neurips-2020-sevir/blob/master/src/display/display.py). + # ValueError: There are 10 color bins including extensions, but ncolors = 9; ncolors must equal or exceed the number of bins + # We can not replicate the visualization in notebook (https://github.com/MIT-AI-Accelerator/neurips-2020-sevir/blob/master/notebooks/AnalyzeNowcast.ipynb) without error. + nil = cols.pop(0) + under = cols[0] + # over = cols.pop() + over = cols[-1] + cmap = ListedColormap(cols) + cmap.set_bad(nil) + cmap.set_under(under) + cmap.set_over(over) + norm = BoundaryNorm(lev, cmap.N) + return cmap, norm + + +def vis_cmap(encoded=True): + cols = [ + [0, 0, 0], + [0.0392156862745098, 0.0392156862745098, 0.0392156862745098], + [0.0784313725490196, 0.0784313725490196, 0.0784313725490196], + [0.11764705882352941, 0.11764705882352941, 0.11764705882352941], + [0.1568627450980392, 0.1568627450980392, 0.1568627450980392], + [0.19607843137254902, 0.19607843137254902, 0.19607843137254902], + [0.23529411764705882, 0.23529411764705882, 0.23529411764705882], + [0.27450980392156865, 0.27450980392156865, 0.27450980392156865], + [0.3137254901960784, 0.3137254901960784, 0.3137254901960784], + [0.35294117647058826, 0.35294117647058826, 0.35294117647058826], + [0.39215686274509803, 0.39215686274509803, 0.39215686274509803], + [0.43137254901960786, 0.43137254901960786, 0.43137254901960786], + [0.47058823529411764, 0.47058823529411764, 0.47058823529411764], + [0.5098039215686274, 0.5098039215686274, 0.5098039215686274], + [0.5490196078431373, 0.5490196078431373, 0.5490196078431373], + [0.5882352941176471, 0.5882352941176471, 0.5882352941176471], + [0.6274509803921569, 0.6274509803921569, 0.6274509803921569], + [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], + [0.7058823529411765, 0.7058823529411765, 0.7058823529411765], + [0.7450980392156863, 0.7450980392156863, 0.7450980392156863], + [0.7843137254901961, 0.7843137254901961, 0.7843137254901961], + [0.8235294117647058, 0.8235294117647058, 0.8235294117647058], + [0.8627450980392157, 0.8627450980392157, 0.8627450980392157], + [0.9019607843137255, 0.9019607843137255, 0.9019607843137255], + [0.9411764705882353, 0.9411764705882353, 0.9411764705882353], + [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], + [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], + ] + lev = np.array( + [ + 0.0, + 0.02, + 0.04, + 0.06, + 0.08, + 0.1, + 0.12, + 0.14, + 0.16, + 0.2, + 0.24, + 0.28, + 0.32, + 0.36, + 0.4, + 0.44, + 0.48, + 0.52, + 0.56, + 0.6, + 0.64, + 0.68, + 0.72, + 0.76, + 0.8, + 0.9, + 1.0, + ] + ) + if encoded: + lev *= 1e4 + nil = cols.pop(0) + under = cols[0] + over = cols.pop() + cmap = ListedColormap(cols) + cmap.set_bad(nil) + cmap.set_under(under) + cmap.set_over(over) + norm = BoundaryNorm(lev, cmap.N) + return cmap, norm + + +def ir_cmap(encoded=True): + cols = [ + [0, 0, 0], + [1.0, 1.0, 1.0], + [0.9803921568627451, 0.9803921568627451, 0.9803921568627451], + [0.9411764705882353, 0.9411764705882353, 0.9411764705882353], + [0.9019607843137255, 0.9019607843137255, 0.9019607843137255], + [0.8627450980392157, 0.8627450980392157, 0.8627450980392157], + [0.8235294117647058, 0.8235294117647058, 0.8235294117647058], + [0.7843137254901961, 0.7843137254901961, 0.7843137254901961], + [0.7450980392156863, 0.7450980392156863, 0.7450980392156863], + [0.7058823529411765, 0.7058823529411765, 0.7058823529411765], + [0.6666666666666666, 0.6666666666666666, 0.6666666666666666], + [0.6274509803921569, 0.6274509803921569, 0.6274509803921569], + [0.5882352941176471, 0.5882352941176471, 0.5882352941176471], + [0.5490196078431373, 0.5490196078431373, 0.5490196078431373], + [0.5098039215686274, 0.5098039215686274, 0.5098039215686274], + [0.47058823529411764, 0.47058823529411764, 0.47058823529411764], + [0.43137254901960786, 0.43137254901960786, 0.43137254901960786], + [0.39215686274509803, 0.39215686274509803, 0.39215686274509803], + [0.35294117647058826, 0.35294117647058826, 0.35294117647058826], + [0.3137254901960784, 0.3137254901960784, 0.3137254901960784], + [0.27450980392156865, 0.27450980392156865, 0.27450980392156865], + [0.23529411764705882, 0.23529411764705882, 0.23529411764705882], + [0.19607843137254902, 0.19607843137254902, 0.19607843137254902], + [0.1568627450980392, 0.1568627450980392, 0.1568627450980392], + [0.11764705882352941, 0.11764705882352941, 0.11764705882352941], + [0.0784313725490196, 0.0784313725490196, 0.0784313725490196], + [0.0392156862745098, 0.0392156862745098, 0.0392156862745098], + [0.0, 0.803921568627451, 0.803921568627451], + ] + lev = np.array( + [ + -110.0, + -105.2, + -95.2, + -85.2, + -75.2, + -65.2, + -55.2, + -45.2, + -35.2, + -28.2, + -23.2, + -18.2, + -13.2, + -8.2, + -3.2, + 1.8, + 6.8, + 11.8, + 16.8, + 21.8, + 26.8, + 31.8, + 36.8, + 41.8, + 46.8, + 51.8, + 90.0, + 100.0, + ] + ) + if encoded: + lev *= 1e2 + nil = cols.pop(0) + under = cols[0] + over = cols.pop() + cmap = ListedColormap(cols) + cmap.set_bad(nil) + cmap.set_under(under) + cmap.set_over(over) + norm = BoundaryNorm(lev, cmap.N) + return cmap, norm + + +def c09_cmap(encoded=True): + cols = [ + [1.000000, 0.000000, 0.000000], + [1.000000, 0.031373, 0.000000], + [1.000000, 0.062745, 0.000000], + [1.000000, 0.094118, 0.000000], + [1.000000, 0.125490, 0.000000], + [1.000000, 0.156863, 0.000000], + [1.000000, 0.188235, 0.000000], + [1.000000, 0.219608, 0.000000], + [1.000000, 0.250980, 0.000000], + [1.000000, 0.282353, 0.000000], + [1.000000, 0.313725, 0.000000], + [1.000000, 0.349020, 0.003922], + [1.000000, 0.380392, 0.003922], + [1.000000, 0.411765, 0.003922], + [1.000000, 0.443137, 0.003922], + [1.000000, 0.474510, 0.003922], + [1.000000, 0.505882, 0.003922], + [1.000000, 0.537255, 0.003922], + [1.000000, 0.568627, 0.003922], + [1.000000, 0.600000, 0.003922], + [1.000000, 0.631373, 0.003922], + [1.000000, 0.666667, 0.007843], + [1.000000, 0.698039, 0.007843], + [1.000000, 0.729412, 0.007843], + [1.000000, 0.760784, 0.007843], + [1.000000, 0.792157, 0.007843], + [1.000000, 0.823529, 0.007843], + [1.000000, 0.854902, 0.007843], + [1.000000, 0.886275, 0.007843], + [1.000000, 0.917647, 0.007843], + [1.000000, 0.949020, 0.007843], + [1.000000, 0.984314, 0.011765], + [0.968627, 0.952941, 0.031373], + [0.937255, 0.921569, 0.050980], + [0.901961, 0.886275, 0.074510], + [0.870588, 0.854902, 0.094118], + [0.835294, 0.823529, 0.117647], + [0.803922, 0.788235, 0.137255], + [0.772549, 0.756863, 0.160784], + [0.737255, 0.725490, 0.180392], + [0.705882, 0.690196, 0.200000], + [0.670588, 0.658824, 0.223529], + [0.639216, 0.623529, 0.243137], + [0.607843, 0.592157, 0.266667], + [0.572549, 0.560784, 0.286275], + [0.541176, 0.525490, 0.309804], + [0.509804, 0.494118, 0.329412], + [0.474510, 0.462745, 0.349020], + [0.752941, 0.749020, 0.909804], + [0.800000, 0.800000, 0.929412], + [0.850980, 0.847059, 0.945098], + [0.898039, 0.898039, 0.964706], + [0.949020, 0.949020, 0.980392], + [1.000000, 1.000000, 1.000000], + [0.964706, 0.980392, 0.964706], + [0.929412, 0.960784, 0.929412], + [0.890196, 0.937255, 0.890196], + [0.854902, 0.917647, 0.854902], + [0.815686, 0.894118, 0.815686], + [0.780392, 0.874510, 0.780392], + [0.745098, 0.850980, 0.745098], + [0.705882, 0.831373, 0.705882], + [0.670588, 0.807843, 0.670588], + [0.631373, 0.788235, 0.631373], + [0.596078, 0.764706, 0.596078], + [0.560784, 0.745098, 0.560784], + [0.521569, 0.721569, 0.521569], + [0.486275, 0.701961, 0.486275], + [0.447059, 0.678431, 0.447059], + [0.411765, 0.658824, 0.411765], + [0.376471, 0.635294, 0.376471], + [0.337255, 0.615686, 0.337255], + [0.301961, 0.592157, 0.301961], + [0.262745, 0.572549, 0.262745], + [0.227451, 0.549020, 0.227451], + [0.192157, 0.529412, 0.192157], + [0.152941, 0.505882, 0.152941], + [0.117647, 0.486275, 0.117647], + [0.078431, 0.462745, 0.078431], + [0.043137, 0.443137, 0.043137], + [0.003922, 0.419608, 0.003922], + [0.003922, 0.431373, 0.027451], + [0.003922, 0.447059, 0.054902], + [0.003922, 0.462745, 0.082353], + [0.003922, 0.478431, 0.109804], + [0.003922, 0.494118, 0.137255], + [0.003922, 0.509804, 0.164706], + [0.003922, 0.525490, 0.192157], + [0.003922, 0.541176, 0.215686], + [0.003922, 0.556863, 0.243137], + [0.007843, 0.568627, 0.270588], + [0.007843, 0.584314, 0.298039], + [0.007843, 0.600000, 0.325490], + [0.007843, 0.615686, 0.352941], + [0.007843, 0.631373, 0.380392], + [0.007843, 0.647059, 0.403922], + [0.007843, 0.662745, 0.431373], + [0.007843, 0.678431, 0.458824], + [0.007843, 0.694118, 0.486275], + [0.011765, 0.705882, 0.513725], + [0.011765, 0.721569, 0.541176], + [0.011765, 0.737255, 0.568627], + [0.011765, 0.752941, 0.596078], + [0.011765, 0.768627, 0.619608], + [0.011765, 0.784314, 0.647059], + [0.011765, 0.800000, 0.674510], + [0.011765, 0.815686, 0.701961], + [0.011765, 0.831373, 0.729412], + [0.015686, 0.843137, 0.756863], + [0.015686, 0.858824, 0.784314], + [0.015686, 0.874510, 0.807843], + [0.015686, 0.890196, 0.835294], + [0.015686, 0.905882, 0.862745], + [0.015686, 0.921569, 0.890196], + [0.015686, 0.937255, 0.917647], + [0.015686, 0.952941, 0.945098], + [0.015686, 0.968627, 0.972549], + [1.000000, 1.000000, 1.000000], + ] + return ListedColormap(cols), None diff --git a/examples/earthformer/sevir_metric.py b/examples/earthformer/sevir_metric.py index 47fc225560..bf333d1150 100644 --- a/examples/earthformer/sevir_metric.py +++ b/examples/earthformer/sevir_metric.py @@ -1,281 +1,281 @@ -from typing import Dict -from typing import Optional -from typing import Sequence - -import numpy as np -import paddle -from paddle.nn import functional as F - -from ppsci.data.dataset import sevir_dataset - - -def _threshold(target, pred, T): - """ - Returns binary tensors t,p the same shape as target & pred. t = 1 wherever - target > t. p =1 wherever pred > t. p and t are set to 0 wherever EITHER - t or p are nan. - This is useful for counts that don't involve correct rejections. - - Args: - target (paddle.Tensor): label - pred (paddle.Tensor): predict - T (numeric_type): threshold - Returns: - t - p - """ - - t = (target >= T).astype("float32") - p = (pred >= T).astype("float32") - is_nan = paddle.logical_or(paddle.isnan(target), paddle.isnan(pred)) - t[is_nan] = 0 - p[is_nan] = 0 - return t, p - - -class SEVIRSkillScore: - r""" - The calculation of skill scores in SEVIR challenge is slightly different: - `mCSI = sum(mCSI_t) / T` - See https://github.com/MIT-AI-Accelerator/sevir_challenges/blob/dev/radar_nowcasting/RadarNowcastBenchmarks.ipynb for more details. - - Args: - seq_len (int): sequence length - layout (str): layout mode - mode (str): Should be in ("0", "1", "2") - "0": - cumulates hits/misses/fas of all test pixels - score_avg takes average over all thresholds - return - score_thresh shape = (1, ) - score_avg shape = (1, ) - "1": - cumulates hits/misses/fas of each step - score_avg takes average over all thresholds while keeps the seq_len dim - return - score_thresh shape = (seq_len, ) - score_avg shape = (seq_len, ) - "2": - cumulates hits/misses/fas of each step - score_avg takes average over all thresholds, then takes average over the seq_len dim - return - score_thresh shape = (1, ) - score_avg shape = (1, ) - preprocess_type (str): prepprocess type - threshold_list (Sequence[int]): threshold list - """ - - full_state_update: bool = True - - def __init__( - self, - layout: str = "NHWT", - mode: str = "0", - seq_len: Optional[int] = None, - preprocess_type: str = "sevir", - threshold_list: Sequence[int] = (16, 74, 133, 160, 181, 219), - metrics_list: Sequence[str] = ("csi", "bias", "sucr", "pod"), - eps: float = 1e-4, - dist_sync_on_step: bool = False, - ): - super().__init__() - self.layout = layout - self.preprocess_type = preprocess_type - self.threshold_list = threshold_list - self.metrics_list = metrics_list - self.eps = eps - self.mode = mode - self.seq_len = seq_len - - self.hits = paddle.zeros(shape=[len(self.threshold_list)]) - self.misses = paddle.zeros(shape=[len(self.threshold_list)]) - self.fas = paddle.zeros(shape=[len(self.threshold_list)]) - - if mode in ("0",): - self.keep_seq_len_dim = False - elif mode in ("1", "2"): - self.keep_seq_len_dim = True - assert isinstance( - self.seq_len, int - ), "seq_len must be provided when we need to keep seq_len dim." - - else: - raise NotImplementedError(f"mode {mode} not supported!") - - @staticmethod - def pod(hits, misses, fas, eps): - return hits / (hits + misses + eps) - - @staticmethod - def sucr(hits, misses, fas, eps): - return hits / (hits + fas + eps) - - @staticmethod - def csi(hits, misses, fas, eps): - return hits / (hits + misses + fas + eps) - - @staticmethod - def bias(hits, misses, fas, eps): - bias = (hits + fas) / (hits + misses + eps) - logbias = paddle.pow(bias / paddle.log(paddle.full([], 2.0)), 2.0) - return logbias - - @property - def hits_misses_fas_reduce_dims(self): - if not hasattr(self, "_hits_misses_fas_reduce_dims"): - seq_dim = self.layout.find("T") - self._hits_misses_fas_reduce_dims = list(range(len(self.layout))) - if self.keep_seq_len_dim: - self._hits_misses_fas_reduce_dims.pop(seq_dim) - return self._hits_misses_fas_reduce_dims - - def calc_seq_hits_misses_fas(self, pred, target, threshold): - """ - Args: - pred (paddle.Tensor): Predict data. - target (paddle.Tensor): True data. - threshold (int): The threshold to calculate hits, misses and fas. - - Returns: - hits (paddle.Tensor): Number of hits. - misses (paddle.Tensor): Number of misses. - fas (paddle.Tensor): Number of false positives. - each has shape (seq_len, ) - """ - - with paddle.no_grad(): - t, p = _threshold(target, pred, threshold) - hits = paddle.sum(t * p, axis=self.hits_misses_fas_reduce_dims).astype( - "int32" - ) - misses = paddle.sum( - t * (1 - p), axis=self.hits_misses_fas_reduce_dims - ).astype("int32") - fas = paddle.sum((1 - t) * p, axis=self.hits_misses_fas_reduce_dims).astype( - "int32" - ) - return hits, misses, fas - - def preprocess(self, pred, target): - if self.preprocess_type == "sevir": - pred = sevir_dataset.SEVIRDataset.process_data_dict_back( - data_dict={"vil": pred.detach().astype("float32")} - )["vil"] - target = sevir_dataset.SEVIRDataset.process_data_dict_back( - data_dict={"vil": target.detach().astype("float32")} - )["vil"] - else: - raise NotImplementedError(f"{self.preprocess_type} not supported") - return pred, target - - def update(self, pred: paddle.Tensor, target: paddle.Tensor): - pred, target = self.preprocess(pred, target) - for i, threshold in enumerate(self.threshold_list): - hits, misses, fas = self.calc_seq_hits_misses_fas(pred, target, threshold) - self.hits[i] += hits - self.misses[i] += misses - self.fas[i] += fas - - def compute(self, pred: paddle.Tensor, target: paddle.Tensor): - metrics_dict = { - "pod": self.pod, - "csi": self.csi, - "sucr": self.sucr, - "bias": self.bias, - } - ret = {} - for threshold in self.threshold_list: - ret[threshold] = {} - ret["avg"] = {} - for metrics in self.metrics_list: - if self.keep_seq_len_dim: - score_avg = np.zeros((self.seq_len,)) - else: - score_avg = 0 - # shape = (len(threshold_list), seq_len) if self.keep_seq_len_dim, - # else shape = (len(threshold_list),) - scores = metrics_dict[metrics](self.hits, self.misses, self.fas, self.eps) - scores = scores.detach().cpu().numpy() - for i, threshold in enumerate(self.threshold_list): - if self.keep_seq_len_dim: - score = scores[i] # shape = (seq_len, ) - else: - score = scores[i].item() # shape = (1, ) - if self.mode in ("0", "1"): - ret[threshold][metrics] = score - elif self.mode in ("2",): - ret[threshold][metrics] = np.mean(score).item() - else: - raise NotImplementedError(f"{self.mode} is invalid.") - score_avg += score - score_avg /= len(self.threshold_list) - if self.mode in ("0", "1"): - ret["avg"][metrics] = score_avg - elif self.mode in ("2",): - ret["avg"][metrics] = np.mean(score_avg).item() - else: - raise NotImplementedError(f"{self.mode} is invalid.") - - metrics = {} - metrics["csi_avg_loss"] = 0 - for metric in self.metrics_list: - for th in self.threshold_list: - metrics[f"{metric}_{th}"] = ret[th][metric] - metrics[f"{metric}_avg"] = ret["avg"][metric] - - metrics["csi_avg_loss"] = -metrics["csi_avg"] - return metrics - - -class eval_rmse_func: - def __init__( - self, - out_len=12, - layout="NTHWC", - metrics_mode="0", - metrics_list=["csi", "pod", "sucr", "bias"], - threshold_list=[16, 74, 133, 160, 181, 219], - *args, - ) -> Dict[str, paddle.Tensor]: - super().__init__() - self.out_len = out_len - self.layout = layout - self.metrics_mode = metrics_mode - self.metrics_list = metrics_list - self.threshold_list = threshold_list - - self.sevir_score = SEVIRSkillScore( - layout=self.layout, - mode=self.metrics_mode, - seq_len=self.out_len, - threshold_list=self.threshold_list, - metrics_list=self.metrics_list, - ) - - def __call__( - self, - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - ): - pred = output_dict["vil"] - vil_target = label_dict["vil"] - vil_target = vil_target.reshape([-1, *vil_target.shape[2:]]) - # mse - mae = F.l1_loss(pred, vil_target, "none") - mae = mae.mean(axis=tuple(range(1, mae.ndim))) - # mse - mse = F.mse_loss(pred, vil_target, "none") - mse = mse.mean(axis=tuple(range(1, mse.ndim))) - - return {"mse": mse, "mae": mae} - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - *args, -) -> paddle.Tensor: - pred = output_dict["vil"] - vil_target = label_dict["vil"] - target = vil_target.reshape([-1, *vil_target.shape[2:]]) - return {"vil": F.mse_loss(pred, target)} +from typing import Dict +from typing import Optional +from typing import Sequence + +import numpy as np +import paddle +from paddle.nn import functional as F + +from ppsci.data.dataset import sevir_dataset + + +def _threshold(target, pred, T): + """ + Returns binary tensors t,p the same shape as target & pred. t = 1 wherever + target > t. p =1 wherever pred > t. p and t are set to 0 wherever EITHER + t or p are nan. + This is useful for counts that don't involve correct rejections. + + Args: + target (paddle.Tensor): label + pred (paddle.Tensor): predict + T (numeric_type): threshold + Returns: + t + p + """ + + t = (target >= T).astype("float32") + p = (pred >= T).astype("float32") + is_nan = paddle.logical_or(paddle.isnan(target), paddle.isnan(pred)) + t[is_nan] = 0 + p[is_nan] = 0 + return t, p + + +class SEVIRSkillScore: + r""" + The calculation of skill scores in SEVIR challenge is slightly different: + `mCSI = sum(mCSI_t) / T` + See https://github.com/MIT-AI-Accelerator/sevir_challenges/blob/dev/radar_nowcasting/RadarNowcastBenchmarks.ipynb for more details. + + Args: + seq_len (int): sequence length + layout (str): layout mode + mode (str): Should be in ("0", "1", "2") + "0": + cumulates hits/misses/fas of all test pixels + score_avg takes average over all thresholds + return + score_thresh shape = (1, ) + score_avg shape = (1, ) + "1": + cumulates hits/misses/fas of each step + score_avg takes average over all thresholds while keeps the seq_len dim + return + score_thresh shape = (seq_len, ) + score_avg shape = (seq_len, ) + "2": + cumulates hits/misses/fas of each step + score_avg takes average over all thresholds, then takes average over the seq_len dim + return + score_thresh shape = (1, ) + score_avg shape = (1, ) + preprocess_type (str): prepprocess type + threshold_list (Sequence[int]): threshold list + """ + + full_state_update: bool = True + + def __init__( + self, + layout: str = "NHWT", + mode: str = "0", + seq_len: Optional[int] = None, + preprocess_type: str = "sevir", + threshold_list: Sequence[int] = (16, 74, 133, 160, 181, 219), + metrics_list: Sequence[str] = ("csi", "bias", "sucr", "pod"), + eps: float = 1e-4, + dist_sync_on_step: bool = False, + ): + super().__init__() + self.layout = layout + self.preprocess_type = preprocess_type + self.threshold_list = threshold_list + self.metrics_list = metrics_list + self.eps = eps + self.mode = mode + self.seq_len = seq_len + + self.hits = paddle.zeros(shape=[len(self.threshold_list)]) + self.misses = paddle.zeros(shape=[len(self.threshold_list)]) + self.fas = paddle.zeros(shape=[len(self.threshold_list)]) + + if mode in ("0",): + self.keep_seq_len_dim = False + elif mode in ("1", "2"): + self.keep_seq_len_dim = True + assert isinstance( + self.seq_len, int + ), "seq_len must be provided when we need to keep seq_len dim." + + else: + raise NotImplementedError(f"mode {mode} not supported!") + + @staticmethod + def pod(hits, misses, fas, eps): + return hits / (hits + misses + eps) + + @staticmethod + def sucr(hits, misses, fas, eps): + return hits / (hits + fas + eps) + + @staticmethod + def csi(hits, misses, fas, eps): + return hits / (hits + misses + fas + eps) + + @staticmethod + def bias(hits, misses, fas, eps): + bias = (hits + fas) / (hits + misses + eps) + logbias = paddle.pow(bias / paddle.log(paddle.full([], 2.0)), 2.0) + return logbias + + @property + def hits_misses_fas_reduce_dims(self): + if not hasattr(self, "_hits_misses_fas_reduce_dims"): + seq_dim = self.layout.find("T") + self._hits_misses_fas_reduce_dims = list(range(len(self.layout))) + if self.keep_seq_len_dim: + self._hits_misses_fas_reduce_dims.pop(seq_dim) + return self._hits_misses_fas_reduce_dims + + def calc_seq_hits_misses_fas(self, pred, target, threshold): + """ + Args: + pred (paddle.Tensor): Predict data. + target (paddle.Tensor): True data. + threshold (int): The threshold to calculate hits, misses and fas. + + Returns: + hits (paddle.Tensor): Number of hits. + misses (paddle.Tensor): Number of misses. + fas (paddle.Tensor): Number of false positives. + each has shape (seq_len, ) + """ + + with paddle.no_grad(): + t, p = _threshold(target, pred, threshold) + hits = paddle.sum(t * p, axis=self.hits_misses_fas_reduce_dims).astype( + "int32" + ) + misses = paddle.sum( + t * (1 - p), axis=self.hits_misses_fas_reduce_dims + ).astype("int32") + fas = paddle.sum((1 - t) * p, axis=self.hits_misses_fas_reduce_dims).astype( + "int32" + ) + return hits, misses, fas + + def preprocess(self, pred, target): + if self.preprocess_type == "sevir": + pred = sevir_dataset.SEVIRDataset.process_data_dict_back( + data_dict={"vil": pred.detach().astype("float32")} + )["vil"] + target = sevir_dataset.SEVIRDataset.process_data_dict_back( + data_dict={"vil": target.detach().astype("float32")} + )["vil"] + else: + raise NotImplementedError(f"{self.preprocess_type} not supported") + return pred, target + + def update(self, pred: paddle.Tensor, target: paddle.Tensor): + pred, target = self.preprocess(pred, target) + for i, threshold in enumerate(self.threshold_list): + hits, misses, fas = self.calc_seq_hits_misses_fas(pred, target, threshold) + self.hits[i] += hits + self.misses[i] += misses + self.fas[i] += fas + + def compute(self, pred: paddle.Tensor, target: paddle.Tensor): + metrics_dict = { + "pod": self.pod, + "csi": self.csi, + "sucr": self.sucr, + "bias": self.bias, + } + ret = {} + for threshold in self.threshold_list: + ret[threshold] = {} + ret["avg"] = {} + for metrics in self.metrics_list: + if self.keep_seq_len_dim: + score_avg = np.zeros((self.seq_len,)) + else: + score_avg = 0 + # shape = (len(threshold_list), seq_len) if self.keep_seq_len_dim, + # else shape = (len(threshold_list),) + scores = metrics_dict[metrics](self.hits, self.misses, self.fas, self.eps) + scores = scores.detach().cpu().numpy() + for i, threshold in enumerate(self.threshold_list): + if self.keep_seq_len_dim: + score = scores[i] # shape = (seq_len, ) + else: + score = scores[i].item() # shape = (1, ) + if self.mode in ("0", "1"): + ret[threshold][metrics] = score + elif self.mode in ("2",): + ret[threshold][metrics] = np.mean(score).item() + else: + raise NotImplementedError(f"{self.mode} is invalid.") + score_avg += score + score_avg /= len(self.threshold_list) + if self.mode in ("0", "1"): + ret["avg"][metrics] = score_avg + elif self.mode in ("2",): + ret["avg"][metrics] = np.mean(score_avg).item() + else: + raise NotImplementedError(f"{self.mode} is invalid.") + + metrics = {} + metrics["csi_avg_loss"] = 0 + for metric in self.metrics_list: + for th in self.threshold_list: + metrics[f"{metric}_{th}"] = ret[th][metric] + metrics[f"{metric}_avg"] = ret["avg"][metric] + + metrics["csi_avg_loss"] = -metrics["csi_avg"] + return metrics + + +class eval_rmse_func: + def __init__( + self, + out_len=12, + layout="NTHWC", + metrics_mode="0", + metrics_list=["csi", "pod", "sucr", "bias"], + threshold_list=[16, 74, 133, 160, 181, 219], + *args, + ) -> Dict[str, paddle.Tensor]: + super().__init__() + self.out_len = out_len + self.layout = layout + self.metrics_mode = metrics_mode + self.metrics_list = metrics_list + self.threshold_list = threshold_list + + self.sevir_score = SEVIRSkillScore( + layout=self.layout, + mode=self.metrics_mode, + seq_len=self.out_len, + threshold_list=self.threshold_list, + metrics_list=self.metrics_list, + ) + + def __call__( + self, + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + ): + pred = output_dict["vil"] + vil_target = label_dict["vil"] + vil_target = vil_target.reshape([-1, *vil_target.shape[2:]]) + # mse + mae = F.l1_loss(pred, vil_target, "none") + mae = mae.mean(axis=tuple(range(1, mae.ndim))) + # mse + mse = F.mse_loss(pred, vil_target, "none") + mse = mse.mean(axis=tuple(range(1, mse.ndim))) + + return {"mse": mse, "mae": mae} + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + *args, +) -> paddle.Tensor: + pred = output_dict["vil"] + vil_target = label_dict["vil"] + target = vil_target.reshape([-1, *vil_target.shape[2:]]) + return {"vil": F.mse_loss(pred, target)} diff --git a/examples/earthformer/sevir_vis_seq.py b/examples/earthformer/sevir_vis_seq.py index 3bacfb747b..b5c2516baa 100644 --- a/examples/earthformer/sevir_vis_seq.py +++ b/examples/earthformer/sevir_vis_seq.py @@ -1,247 +1,247 @@ -import os -from typing import List - -import numpy as np -import sevir_cmap -from matplotlib import pyplot as plt -from matplotlib.colors import ListedColormap -from matplotlib.patches import Patch - -from ppsci.data.dataset import sevir_dataset - -HMF_COLORS = ( - np.array([[82, 82, 82], [252, 141, 89], [255, 255, 191], [145, 191, 219]]) / 255 -) - -THRESHOLDS = (0, 16, 74, 133, 160, 181, 219, 255) - - -def plot_hit_miss_fa(ax, y_true, y_pred, thres): - mask = np.zeros_like(y_true) - mask[np.logical_and(y_true >= thres, y_pred >= thres)] = 4 - mask[np.logical_and(y_true >= thres, y_pred < thres)] = 3 - mask[np.logical_and(y_true < thres, y_pred >= thres)] = 2 - mask[np.logical_and(y_true < thres, y_pred < thres)] = 1 - cmap = ListedColormap(HMF_COLORS) - ax.imshow(mask, cmap=cmap) - - -def plot_hit_miss_fa_all_thresholds(ax, y_true, y_pred, **unused_kwargs): - fig = np.zeros(y_true.shape) - y_true_idx = np.searchsorted(THRESHOLDS, y_true) - y_pred_idx = np.searchsorted(THRESHOLDS, y_pred) - fig[y_true_idx == y_pred_idx] = 4 - fig[y_true_idx > y_pred_idx] = 3 - fig[y_true_idx < y_pred_idx] = 2 - # do not count results in these not challenging areas. - fig[np.logical_and(y_true < THRESHOLDS[1], y_pred < THRESHOLDS[1])] = 1 - cmap = ListedColormap(HMF_COLORS) - ax.imshow(fig, cmap=cmap) - - -def get_cmap_dict(s): - return { - "cmap": sevir_cmap.get_cmap(s, encoded=True)[0], - "norm": sevir_cmap.get_cmap(s, encoded=True)[1], - "vmin": sevir_cmap.get_cmap(s, encoded=True)[2], - "vmax": sevir_cmap.get_cmap(s, encoded=True)[3], - } - - -def visualize_result( - in_seq: np.array, - target_seq: np.array, - pred_seq_list: List[np.array], - label_list: List[str], - interval_real_time: float = 10.0, - idx=0, - norm=None, - plot_stride=2, - figsize=(24, 8), - fs=10, - vis_thresh=THRESHOLDS[2], - vis_hits_misses_fas=True, -): - """ - Args: - in_seq (np.array): - target_seq (np.array): - interval_real_time (float): The minutes of each plot interval - """ - - if norm is None: - norm = {"scale": 255, "shift": 0} - in_len = in_seq.shape[-1] - out_len = target_seq.shape[-1] - max_len = max(in_len, out_len) - ncols = (max_len - 1) // plot_stride + 1 - if vis_hits_misses_fas: - fig, ax = plt.subplots( - nrows=2 + 3 * len(pred_seq_list), ncols=ncols, figsize=figsize - ) - else: - fig, ax = plt.subplots( - nrows=2 + len(pred_seq_list), ncols=ncols, figsize=figsize - ) - - ax[0][0].set_ylabel("Inputs", fontsize=fs) - for i in range(0, max_len, plot_stride): - if i < in_len: - xt = in_seq[idx, :, :, i] * norm["scale"] + norm["shift"] - ax[0][i // plot_stride].imshow(xt, **get_cmap_dict("vil")) - else: - ax[0][i // plot_stride].axis("off") - - ax[1][0].set_ylabel("Target", fontsize=fs) - for i in range(0, max_len, plot_stride): - if i < out_len: - xt = target_seq[idx, :, :, i] * norm["scale"] + norm["shift"] - ax[1][i // plot_stride].imshow(xt, **get_cmap_dict("vil")) - else: - ax[1][i // plot_stride].axis("off") - - target_seq = target_seq[idx : idx + 1] * norm["scale"] + norm["shift"] - y_preds = [ - pred_seq[idx : idx + 1] * norm["scale"] + norm["shift"] - for pred_seq in pred_seq_list - ] - - # Plot model predictions - if vis_hits_misses_fas: - for k in range(len(pred_seq_list)): - for i in range(0, max_len, plot_stride): - if i < out_len: - ax[2 + 3 * k][i // plot_stride].imshow( - y_preds[k][0, :, :, i], **get_cmap_dict("vil") - ) - plot_hit_miss_fa( - ax[2 + 1 + 3 * k][i // plot_stride], - target_seq[0, :, :, i], - y_preds[k][0, :, :, i], - vis_thresh, - ) - plot_hit_miss_fa_all_thresholds( - ax[2 + 2 + 3 * k][i // plot_stride], - target_seq[0, :, :, i], - y_preds[k][0, :, :, i], - ) - else: - ax[2 + 3 * k][i // plot_stride].axis("off") - ax[2 + 1 + 3 * k][i // plot_stride].axis("off") - ax[2 + 2 + 3 * k][i // plot_stride].axis("off") - - ax[2 + 3 * k][0].set_ylabel(label_list[k] + "\nPrediction", fontsize=fs) - ax[2 + 1 + 3 * k][0].set_ylabel( - label_list[k] + f"\nScores\nThresh={vis_thresh}", fontsize=fs - ) - ax[2 + 2 + 3 * k][0].set_ylabel( - label_list[k] + "\nScores\nAll Thresh", fontsize=fs - ) - else: - for k in range(len(pred_seq_list)): - for i in range(0, max_len, plot_stride): - if i < out_len: - ax[2 + k][i // plot_stride].imshow( - y_preds[k][0, :, :, i], **get_cmap_dict("vil") - ) - else: - ax[2 + k][i // plot_stride].axis("off") - - ax[2 + k][0].set_ylabel(label_list[k] + "\nPrediction", fontsize=fs) - - for i in range(0, max_len, plot_stride): - if i < out_len: - ax[-1][i // plot_stride].set_title( - f"{int(interval_real_time * (i + plot_stride))} Minutes", y=-0.25 - ) - - for j in range(len(ax)): - for i in range(len(ax[j])): - ax[j][i].xaxis.set_ticks([]) - ax[j][i].yaxis.set_ticks([]) - - # Legend of thresholds - num_thresh_legend = len(sevir_cmap.VIL_LEVELS) - 1 - legend_elements = [ - Patch( - facecolor=sevir_cmap.VIL_COLORS[i], - label=f"{int(sevir_cmap.VIL_LEVELS[i - 1])}-{int(sevir_cmap.VIL_LEVELS[i])}", - ) - for i in range(1, num_thresh_legend + 1) - ] - ax[0][0].legend( - handles=legend_elements, - loc="center left", - bbox_to_anchor=(-1.2, -0.0), - borderaxespad=0, - frameon=False, - fontsize="10", - ) - if vis_hits_misses_fas: - # Legend of Hit, Miss and False Alarm - legend_elements = [ - Patch(facecolor=HMF_COLORS[3], edgecolor="k", label="Hit"), - Patch(facecolor=HMF_COLORS[2], edgecolor="k", label="Miss"), - Patch(facecolor=HMF_COLORS[1], edgecolor="k", label="False Alarm"), - ] - - ax[3][0].legend( - handles=legend_elements, - loc="center left", - bbox_to_anchor=(-2.2, -0.0), - borderaxespad=0, - frameon=False, - fontsize="16", - ) - - plt.subplots_adjust(hspace=0.05, wspace=0.05) - return fig, ax - - -def save_example_vis_results( - save_dir, - save_prefix, - in_seq, - target_seq, - pred_seq, - label, - layout="NHWT", - interval_real_time: float = 10.0, - idx=0, - plot_stride=2, - fs=10, - norm=None, -): - """ - Args: - in_seq (np.array): float value 0-1 - target_seq (np.array): float value 0-1 - pred_seq (np.array): float value 0-1 - interval_real_time (float): The minutes of each plot interval - """ - - in_seq = sevir_dataset.change_layout_np(in_seq, in_layout=layout).astype(np.float32) - target_seq = sevir_dataset.change_layout_np(target_seq, in_layout=layout).astype( - np.float32 - ) - pred_seq = sevir_dataset.change_layout_np(pred_seq, in_layout=layout).astype( - np.float32 - ) - fig_path = os.path.join(save_dir, f"{save_prefix}.png") - fig, ax = visualize_result( - in_seq=in_seq, - target_seq=target_seq, - pred_seq_list=[ - pred_seq, - ], - label_list=[ - label, - ], - interval_real_time=interval_real_time, - idx=idx, - plot_stride=plot_stride, - fs=fs, - norm=norm, - ) - plt.savefig(fig_path) - plt.close(fig) +import os +from typing import List + +import numpy as np +import sevir_cmap +from matplotlib import pyplot as plt +from matplotlib.colors import ListedColormap +from matplotlib.patches import Patch + +from ppsci.data.dataset import sevir_dataset + +HMF_COLORS = ( + np.array([[82, 82, 82], [252, 141, 89], [255, 255, 191], [145, 191, 219]]) / 255 +) + +THRESHOLDS = (0, 16, 74, 133, 160, 181, 219, 255) + + +def plot_hit_miss_fa(ax, y_true, y_pred, thres): + mask = np.zeros_like(y_true) + mask[np.logical_and(y_true >= thres, y_pred >= thres)] = 4 + mask[np.logical_and(y_true >= thres, y_pred < thres)] = 3 + mask[np.logical_and(y_true < thres, y_pred >= thres)] = 2 + mask[np.logical_and(y_true < thres, y_pred < thres)] = 1 + cmap = ListedColormap(HMF_COLORS) + ax.imshow(mask, cmap=cmap) + + +def plot_hit_miss_fa_all_thresholds(ax, y_true, y_pred, **unused_kwargs): + fig = np.zeros(y_true.shape) + y_true_idx = np.searchsorted(THRESHOLDS, y_true) + y_pred_idx = np.searchsorted(THRESHOLDS, y_pred) + fig[y_true_idx == y_pred_idx] = 4 + fig[y_true_idx > y_pred_idx] = 3 + fig[y_true_idx < y_pred_idx] = 2 + # do not count results in these not challenging areas. + fig[np.logical_and(y_true < THRESHOLDS[1], y_pred < THRESHOLDS[1])] = 1 + cmap = ListedColormap(HMF_COLORS) + ax.imshow(fig, cmap=cmap) + + +def get_cmap_dict(s): + return { + "cmap": sevir_cmap.get_cmap(s, encoded=True)[0], + "norm": sevir_cmap.get_cmap(s, encoded=True)[1], + "vmin": sevir_cmap.get_cmap(s, encoded=True)[2], + "vmax": sevir_cmap.get_cmap(s, encoded=True)[3], + } + + +def visualize_result( + in_seq: np.array, + target_seq: np.array, + pred_seq_list: List[np.array], + label_list: List[str], + interval_real_time: float = 10.0, + idx=0, + norm=None, + plot_stride=2, + figsize=(24, 8), + fs=10, + vis_thresh=THRESHOLDS[2], + vis_hits_misses_fas=True, +): + """ + Args: + in_seq (np.array): + target_seq (np.array): + interval_real_time (float): The minutes of each plot interval + """ + + if norm is None: + norm = {"scale": 255, "shift": 0} + in_len = in_seq.shape[-1] + out_len = target_seq.shape[-1] + max_len = max(in_len, out_len) + ncols = (max_len - 1) // plot_stride + 1 + if vis_hits_misses_fas: + fig, ax = plt.subplots( + nrows=2 + 3 * len(pred_seq_list), ncols=ncols, figsize=figsize + ) + else: + fig, ax = plt.subplots( + nrows=2 + len(pred_seq_list), ncols=ncols, figsize=figsize + ) + + ax[0][0].set_ylabel("Inputs", fontsize=fs) + for i in range(0, max_len, plot_stride): + if i < in_len: + xt = in_seq[idx, :, :, i] * norm["scale"] + norm["shift"] + ax[0][i // plot_stride].imshow(xt, **get_cmap_dict("vil")) + else: + ax[0][i // plot_stride].axis("off") + + ax[1][0].set_ylabel("Target", fontsize=fs) + for i in range(0, max_len, plot_stride): + if i < out_len: + xt = target_seq[idx, :, :, i] * norm["scale"] + norm["shift"] + ax[1][i // plot_stride].imshow(xt, **get_cmap_dict("vil")) + else: + ax[1][i // plot_stride].axis("off") + + target_seq = target_seq[idx : idx + 1] * norm["scale"] + norm["shift"] + y_preds = [ + pred_seq[idx : idx + 1] * norm["scale"] + norm["shift"] + for pred_seq in pred_seq_list + ] + + # Plot model predictions + if vis_hits_misses_fas: + for k in range(len(pred_seq_list)): + for i in range(0, max_len, plot_stride): + if i < out_len: + ax[2 + 3 * k][i // plot_stride].imshow( + y_preds[k][0, :, :, i], **get_cmap_dict("vil") + ) + plot_hit_miss_fa( + ax[2 + 1 + 3 * k][i // plot_stride], + target_seq[0, :, :, i], + y_preds[k][0, :, :, i], + vis_thresh, + ) + plot_hit_miss_fa_all_thresholds( + ax[2 + 2 + 3 * k][i // plot_stride], + target_seq[0, :, :, i], + y_preds[k][0, :, :, i], + ) + else: + ax[2 + 3 * k][i // plot_stride].axis("off") + ax[2 + 1 + 3 * k][i // plot_stride].axis("off") + ax[2 + 2 + 3 * k][i // plot_stride].axis("off") + + ax[2 + 3 * k][0].set_ylabel(label_list[k] + "\nPrediction", fontsize=fs) + ax[2 + 1 + 3 * k][0].set_ylabel( + label_list[k] + f"\nScores\nThresh={vis_thresh}", fontsize=fs + ) + ax[2 + 2 + 3 * k][0].set_ylabel( + label_list[k] + "\nScores\nAll Thresh", fontsize=fs + ) + else: + for k in range(len(pred_seq_list)): + for i in range(0, max_len, plot_stride): + if i < out_len: + ax[2 + k][i // plot_stride].imshow( + y_preds[k][0, :, :, i], **get_cmap_dict("vil") + ) + else: + ax[2 + k][i // plot_stride].axis("off") + + ax[2 + k][0].set_ylabel(label_list[k] + "\nPrediction", fontsize=fs) + + for i in range(0, max_len, plot_stride): + if i < out_len: + ax[-1][i // plot_stride].set_title( + f"{int(interval_real_time * (i + plot_stride))} Minutes", y=-0.25 + ) + + for j in range(len(ax)): + for i in range(len(ax[j])): + ax[j][i].xaxis.set_ticks([]) + ax[j][i].yaxis.set_ticks([]) + + # Legend of thresholds + num_thresh_legend = len(sevir_cmap.VIL_LEVELS) - 1 + legend_elements = [ + Patch( + facecolor=sevir_cmap.VIL_COLORS[i], + label=f"{int(sevir_cmap.VIL_LEVELS[i - 1])}-{int(sevir_cmap.VIL_LEVELS[i])}", + ) + for i in range(1, num_thresh_legend + 1) + ] + ax[0][0].legend( + handles=legend_elements, + loc="center left", + bbox_to_anchor=(-1.2, -0.0), + borderaxespad=0, + frameon=False, + fontsize="10", + ) + if vis_hits_misses_fas: + # Legend of Hit, Miss and False Alarm + legend_elements = [ + Patch(facecolor=HMF_COLORS[3], edgecolor="k", label="Hit"), + Patch(facecolor=HMF_COLORS[2], edgecolor="k", label="Miss"), + Patch(facecolor=HMF_COLORS[1], edgecolor="k", label="False Alarm"), + ] + + ax[3][0].legend( + handles=legend_elements, + loc="center left", + bbox_to_anchor=(-2.2, -0.0), + borderaxespad=0, + frameon=False, + fontsize="16", + ) + + plt.subplots_adjust(hspace=0.05, wspace=0.05) + return fig, ax + + +def save_example_vis_results( + save_dir, + save_prefix, + in_seq, + target_seq, + pred_seq, + label, + layout="NHWT", + interval_real_time: float = 10.0, + idx=0, + plot_stride=2, + fs=10, + norm=None, +): + """ + Args: + in_seq (np.array): float value 0-1 + target_seq (np.array): float value 0-1 + pred_seq (np.array): float value 0-1 + interval_real_time (float): The minutes of each plot interval + """ + + in_seq = sevir_dataset.change_layout_np(in_seq, in_layout=layout).astype(np.float32) + target_seq = sevir_dataset.change_layout_np(target_seq, in_layout=layout).astype( + np.float32 + ) + pred_seq = sevir_dataset.change_layout_np(pred_seq, in_layout=layout).astype( + np.float32 + ) + fig_path = os.path.join(save_dir, f"{save_prefix}.png") + fig, ax = visualize_result( + in_seq=in_seq, + target_seq=target_seq, + pred_seq_list=[ + pred_seq, + ], + label_list=[ + label, + ], + interval_real_time=interval_real_time, + idx=idx, + plot_stride=plot_stride, + fs=fs, + norm=norm, + ) + plt.savefig(fig_path) + plt.close(fig) diff --git a/examples/epnn/conf/epnn.yaml b/examples/epnn/conf/epnn.yaml index 87853c24a6..e0e78211d2 100644 --- a/examples/epnn/conf/epnn.yaml +++ b/examples/epnn/conf/epnn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -54,3 +55,60 @@ TRAIN: EVAL: pretrained_model_path: null eval_with_no_grad: true +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_epnn/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +DATASET_STATE: datasets/dstate-16-plas.dat +DATASET_STRESS: datasets/dstress-16-plas.dat +NTRAIN_SIZE: 40 + +# model settings +MODEL: + ihlayers: 3 + ineurons: 60 + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + save_freq: 50 + eval_during_train: true + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + gamma: 0.97 + decay_steps: 10000000 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true +>>>>>>> Stashed changes diff --git a/examples/epnn/epnn.py b/examples/epnn/epnn.py index 0c161b1856..bf74e9df83 100755 --- a/examples/epnn/epnn.py +++ b/examples/epnn/epnn.py @@ -1,207 +1,207 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/meghbali/ANNElastoplasticity -""" - -from os import path as osp - -import functions -import hydra -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - ( - input_dict_train, - label_dict_train, - input_dict_val, - label_dict_val, - ) = functions.get_data(cfg.DATASET_STATE, cfg.DATASET_STRESS, cfg.NTRAIN_SIZE) - model_list = functions.get_model_list( - cfg.MODEL.ihlayers, - cfg.MODEL.ineurons, - input_dict_train["state_x"][0].shape[1], - input_dict_train["state_y"][0].shape[1], - input_dict_train["stress_x"][0].shape[1], - ) - optimizer_list = functions.get_optimizer_list(model_list, cfg) - model_state_elasto, model_state_plastic, model_stress = model_list - model_list_obj = ppsci.arch.ModelList(model_list) - - def _transform_in_stress(_in): - return functions.transform_in_stress( - _in, model_state_elasto, "out_state_elasto" - ) - - model_state_elasto.register_input_transform(functions.transform_in) - model_state_plastic.register_input_transform(functions.transform_in) - model_stress.register_input_transform(_transform_in_stress) - model_stress.register_output_transform(functions.transform_out) - - output_keys = [ - "state_x", - "state_y", - "stress_x", - "stress_y", - "out_state_elasto", - "out_state_plastic", - "out_stress", - ] - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_train, - "label": label_dict_train, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func), - {key: (lambda out, k=key: out[k]) for key in output_keys}, - name="sup_train", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.eval_loss_func), - {key: (lambda out, k=key: out[k]) for key in output_keys}, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # initialize solver - solver = ppsci.solver.Solver( - model_list_obj, - constraint_pde, - cfg.output_dir, - optimizer_list, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator_pde, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # train model - solver.train() - functions.plotting(cfg.output_dir) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - ( - input_dict_train, - _, - input_dict_val, - label_dict_val, - ) = functions.get_data(cfg.DATASET_STATE, cfg.DATASET_STRESS, cfg.NTRAIN_SIZE) - model_list = functions.get_model_list( - cfg.MODEL.ihlayers, - cfg.MODEL.ineurons, - input_dict_train["state_x"][0].shape[1], - input_dict_train["state_y"][0].shape[1], - input_dict_train["stress_x"][0].shape[1], - ) - model_state_elasto, model_state_plastic, model_stress = model_list - model_list_obj = ppsci.arch.ModelList(model_list) - - def _transform_in_stress(_in): - return functions.transform_in_stress( - _in, model_state_elasto, "out_state_elasto" - ) - - model_state_elasto.register_input_transform(functions.transform_in) - model_state_plastic.register_input_transform(functions.transform_in) - model_stress.register_input_transform(_transform_in_stress) - model_stress.register_output_transform(functions.transform_out) - - output_keys = [ - "state_x", - "state_y", - "stress_x", - "stress_y", - "out_state_elasto", - "out_state_plastic", - "out_stress", - ] - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.eval_loss_func), - {key: (lambda out, k=key: out[k]) for key in output_keys}, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - functions.OUTPUT_DIR = cfg.output_dir - - # initialize solver - solver = ppsci.solver.Solver( - model_list_obj, - output_dir=cfg.output_dir, - validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="epnn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/meghbali/ANNElastoplasticity +""" + +from os import path as osp + +import functions +import hydra +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + ( + input_dict_train, + label_dict_train, + input_dict_val, + label_dict_val, + ) = functions.get_data(cfg.DATASET_STATE, cfg.DATASET_STRESS, cfg.NTRAIN_SIZE) + model_list = functions.get_model_list( + cfg.MODEL.ihlayers, + cfg.MODEL.ineurons, + input_dict_train["state_x"][0].shape[1], + input_dict_train["state_y"][0].shape[1], + input_dict_train["stress_x"][0].shape[1], + ) + optimizer_list = functions.get_optimizer_list(model_list, cfg) + model_state_elasto, model_state_plastic, model_stress = model_list + model_list_obj = ppsci.arch.ModelList(model_list) + + def _transform_in_stress(_in): + return functions.transform_in_stress( + _in, model_state_elasto, "out_state_elasto" + ) + + model_state_elasto.register_input_transform(functions.transform_in) + model_state_plastic.register_input_transform(functions.transform_in) + model_stress.register_input_transform(_transform_in_stress) + model_stress.register_output_transform(functions.transform_out) + + output_keys = [ + "state_x", + "state_y", + "stress_x", + "stress_y", + "out_state_elasto", + "out_state_plastic", + "out_stress", + ] + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_train, + "label": label_dict_train, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func), + {key: (lambda out, k=key: out[k]) for key in output_keys}, + name="sup_train", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.eval_loss_func), + {key: (lambda out, k=key: out[k]) for key in output_keys}, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # initialize solver + solver = ppsci.solver.Solver( + model_list_obj, + constraint_pde, + cfg.output_dir, + optimizer_list, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator_pde, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # train model + solver.train() + functions.plotting(cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + ( + input_dict_train, + _, + input_dict_val, + label_dict_val, + ) = functions.get_data(cfg.DATASET_STATE, cfg.DATASET_STRESS, cfg.NTRAIN_SIZE) + model_list = functions.get_model_list( + cfg.MODEL.ihlayers, + cfg.MODEL.ineurons, + input_dict_train["state_x"][0].shape[1], + input_dict_train["state_y"][0].shape[1], + input_dict_train["stress_x"][0].shape[1], + ) + model_state_elasto, model_state_plastic, model_stress = model_list + model_list_obj = ppsci.arch.ModelList(model_list) + + def _transform_in_stress(_in): + return functions.transform_in_stress( + _in, model_state_elasto, "out_state_elasto" + ) + + model_state_elasto.register_input_transform(functions.transform_in) + model_state_plastic.register_input_transform(functions.transform_in) + model_stress.register_input_transform(_transform_in_stress) + model_stress.register_output_transform(functions.transform_out) + + output_keys = [ + "state_x", + "state_y", + "stress_x", + "stress_y", + "out_state_elasto", + "out_state_plastic", + "out_stress", + ] + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.eval_loss_func), + {key: (lambda out, k=key: out[k]) for key in output_keys}, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + functions.OUTPUT_DIR = cfg.output_dir + + # initialize solver + solver = ppsci.solver.Solver( + model_list_obj, + output_dir=cfg.output_dir, + validator=validator_pde, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="epnn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/epnn/functions.py b/examples/epnn/functions.py index 510ff29693..8762e2d34e 100644 --- a/examples/epnn/functions.py +++ b/examples/epnn/functions.py @@ -1,440 +1,440 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" Elasto-Plastic Neural Network (EPNN) - -DEVELOPED AT: - COMPUTATIONAL GEOMECHANICS LABORATORY - DEPARTMENT OF CIVIL ENGINEERING - UNIVERSITY OF CALGARY, AB, CANADA - DIRECTOR: Prof. Richard Wan - -DEVELOPED BY: - MAHDAD EGHBALIAN - -MIT License - -Copyright (c) 2022 Mahdad Eghbalian - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -""" - -import math -from typing import Dict - -import numpy as np -import paddle - -import ppsci -from ppsci.utils import logger - -# log for loss(total, state_elasto, state_plastic, stress), eval error(total, state_elasto, state_plastic, stress) -loss_log = {} # for plotting -eval_log = {} -plot_keys = {"total", "state_elasto", "state_plastic", "stress"} -for key in plot_keys: - loss_log[key] = [] - eval_log[key] = [] - - -# transform -def transform_in(input): - input_transformed = {} - for key in input: - input_transformed[key] = paddle.squeeze(input[key], axis=0) - return input_transformed - - -def transform_out(input, out): - # Add transformed input for computing loss - out.update(input) - return out - - -def transform_in_stress(input, model, out_key): - input_elasto = model(input)[out_key] - input_elasto = input_elasto.detach().clone() - input_transformed = {} - for key in input: - input_transformed[key] = paddle.squeeze(input[key], axis=0) - input_state_m = paddle.concat( - x=( - input_elasto, - paddle.index_select( - input_transformed["state_x"], - paddle.to_tensor([0, 1, 2, 3, 7, 8, 9, 10, 11, 12]), - axis=1, - ), - ), - axis=1, - ) - input_transformed["state_x_f"] = input_state_m - return input_transformed - - -common_param = [] -gkratio = paddle.to_tensor( - data=[[0.45]], dtype=paddle.get_default_dtype(), stop_gradient=False -) - - -def val_loss_criterion(x, y): - return { - "input": 100.0 - * ( - paddle.linalg.norm(x=x["input"] - y["input"]) - / paddle.linalg.norm(x=y["input"]) - ) - } - - -def train_loss_func(output_dict, *args) -> paddle.Tensor: - """For model calculation of loss in model.train(). - - Args: - output_dict (Dict[str, paddle.Tensor]): The output dict. - - Returns: - paddle.Tensor: Loss value. - """ - # Use ppsci.loss.MAELoss to replace paddle.nn.L1Loss - loss, loss_elasto, loss_plastic, loss_stress = loss_func( - output_dict, ppsci.loss.MAELoss() - ) - loss_log["total"].append(float(loss)) - loss_log["state_elasto"].append(float(loss_elasto)) - loss_log["state_plastic"].append(float(loss_plastic)) - loss_log["stress"].append(float(loss_stress)) - return {"train_loss": loss} - - -def eval_loss_func(output_dict, *args) -> paddle.Tensor: - """For model calculation of loss in model.eval(). - - Args: - output_dict (Dict[str, paddle.Tensor]): The output dict. - - Returns: - paddle.Tensor: Loss value. - """ - error, error_elasto, error_plastic, error_stress = loss_func( - output_dict, val_loss_criterion - ) - eval_log["total"].append(float(error)) - eval_log["state_elasto"].append(float(error_elasto)) - eval_log["state_plastic"].append(float(error_plastic)) - eval_log["stress"].append(float(error_stress)) - logger.message( - f"error(total): {float(error)}, error(error_elasto): {float(error_elasto)}, error(error_plastic): {float(error_plastic)}, error(error_stress): {float(error_stress)}" - ) - return {"eval_loss": error} - - -def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: - return {"dummy_loss": paddle.full([], 0.0)} - - -def loss_func(output_dict, criterion) -> paddle.Tensor: - ( - min_elasto, - min_plastic, - range_elasto, - range_plastic, - min_stress, - range_stress, - ) = common_param - - coeff1 = 2.0 - coeff2 = 1.0 - input_elasto = output_dict["out_state_elasto"] - input_plastic = output_dict["out_state_plastic"] - input_stress = output_dict["out_stress"] - target_elasto = output_dict["state_y"][:, 0:1] - target_plastic = output_dict["state_y"][:, 1:4] - loss_elasto = criterion({"input": input_elasto}, {"input": target_elasto}) - loss_plastic = criterion({"input": input_plastic}, {"input": target_plastic}) - oneten_state = paddle.ones(shape=[3, 1], dtype=paddle.get_default_dtype()) - oneten_stress = paddle.ones( - shape=[output_dict["stress_y"].shape[0], output_dict["stress_y"].shape[1]], - dtype=paddle.get_default_dtype(), - ) - dstrain = output_dict["state_x"][:, 10:] - dstrain_real = ( - paddle.multiply(x=dstrain + coeff2, y=paddle.to_tensor(range_stress)) / coeff1 - + min_stress - ) - # predict label - dstrainpl = target_plastic - dstrainpl_real = ( - paddle.multiply(x=dstrainpl + coeff2, y=paddle.to_tensor(range_elasto[1:4])) - / coeff1 - + min_elasto[1:4] - ) - # evaluate label - dstrainel = dstrain_real - dstrainpl_real - mu = paddle.multiply(x=gkratio, y=paddle.to_tensor(input_stress[:, 0:1])) - mu_dstrainel = 2.0 * paddle.multiply(x=mu, y=paddle.to_tensor(dstrainel)) - stress_dstrainel = paddle.multiply( - x=input_stress[:, 0:1] - 2.0 / 3.0 * mu, - y=paddle.to_tensor( - paddle.multiply( - x=paddle.matmul(x=dstrainel, y=oneten_state), - y=paddle.to_tensor(oneten_stress), - ) - ), - ) - input_stress = ( - coeff1 - * paddle.divide( - x=mu_dstrainel + stress_dstrainel - min_plastic, - y=paddle.to_tensor(range_plastic), - ) - - coeff2 - ) - target_stress = output_dict["stress_y"] - loss_stress = criterion({"input": input_stress}, {"input": target_stress}) - loss = loss_elasto["input"] + loss_plastic["input"] + loss_stress["input"] - return loss, loss_elasto["input"], loss_plastic["input"], loss_stress["input"] - - -class Dataset: - def __init__(self, data_state, data_stress, itrain): - self.data_state = data_state - self.data_stress = data_stress - self.itrain = itrain - - def _cvt_to_ndarray(self, list_dict): - for key in list_dict: - list_dict[key] = np.asarray(list_dict[key]) - return list_dict - - def get(self, epochs=1): - # Slow if using BatchSampler to obtain data - input_dict_train = { - "state_x": [], - "state_y": [], - "stress_x": [], - "stress_y": [], - } - input_dict_val = { - "state_x": [], - "state_y": [], - "stress_x": [], - "stress_y": [], - } - label_dict_train = {"dummy_loss": []} - label_dict_val = {"dummy_loss": []} - for i in range(epochs): - shuffled_indices = paddle.randperm( - n=self.data_state.x_train.shape[0] - ).numpy() - input_dict_train["state_x"].append( - self.data_state.x_train[shuffled_indices[0 : self.itrain]] - ) - input_dict_train["state_y"].append( - self.data_state.y_train[shuffled_indices[0 : self.itrain]] - ) - input_dict_train["stress_x"].append( - self.data_stress.x_train[shuffled_indices[0 : self.itrain]] - ) - input_dict_train["stress_y"].append( - self.data_stress.y_train[shuffled_indices[0 : self.itrain]] - ) - label_dict_train["dummy_loss"].append(0.0) - - shuffled_indices = paddle.randperm(n=self.data_state.x_valid.shape[0]).numpy() - input_dict_val["state_x"].append( - self.data_state.x_valid[shuffled_indices[0 : self.itrain]] - ) - input_dict_val["state_y"].append( - self.data_state.y_valid[shuffled_indices[0 : self.itrain]] - ) - input_dict_val["stress_x"].append( - self.data_stress.x_valid[shuffled_indices[0 : self.itrain]] - ) - input_dict_val["stress_y"].append( - self.data_stress.y_valid[shuffled_indices[0 : self.itrain]] - ) - label_dict_val["dummy_loss"].append(0.0) - input_dict_train = self._cvt_to_ndarray(input_dict_train) - label_dict_train = self._cvt_to_ndarray(label_dict_train) - input_dict_val = self._cvt_to_ndarray(input_dict_val) - label_dict_val = self._cvt_to_ndarray(label_dict_val) - return input_dict_train, label_dict_train, input_dict_val, label_dict_val - - -class Data: - def __init__(self, dataset_path, train_p=0.6, cross_valid_p=0.2, test_p=0.2): - data = ppsci.utils.reader.load_dat_file(dataset_path) - self.x = data["X"] - self.y = data["y"] - self.train_p = train_p - self.cross_valid_p = cross_valid_p - self.test_p = test_p - - def get_shuffled_data(self): - # Need to set the seed, otherwise the loss will not match the precision - ppsci.utils.misc.set_random_seed(seed=10) - shuffled_indices = paddle.randperm(n=self.x.shape[0]).numpy() - n_train = math.floor(self.train_p * self.x.shape[0]) - n_cross_valid = math.floor(self.cross_valid_p * self.x.shape[0]) - n_test = math.floor(self.test_p * self.x.shape[0]) - self.x_train = self.x[shuffled_indices[0:n_train]] - self.y_train = self.y[shuffled_indices[0:n_train]] - self.x_valid = self.x[shuffled_indices[n_train : n_train + n_cross_valid]] - self.y_valid = self.y[shuffled_indices[n_train : n_train + n_cross_valid]] - self.x_test = self.x[ - shuffled_indices[n_train + n_cross_valid : n_train + n_cross_valid + n_test] - ] - self.y_test = self.y[ - shuffled_indices[n_train + n_cross_valid : n_train + n_cross_valid + n_test] - ] - - -def get_data(dataset_state, dataset_stress, ntrain_size): - set_common_param(dataset_state, dataset_stress) - - data_state = Data(dataset_state) - data_stress = Data(dataset_stress) - data_state.get_shuffled_data() - data_stress.get_shuffled_data() - - train_size_log10 = np.linspace( - 1, np.log10(data_state.x_train.shape[0]), num=ntrain_size - ) - train_size_float = 10**train_size_log10 - train_size = train_size_float.astype(int) - itrain = train_size[ntrain_size - 1] - - return Dataset(data_state, data_stress, itrain).get(10) - - -def set_common_param(dataset_state, dataset_stress): - get_data = ppsci.utils.reader.load_dat_file(dataset_state) - min_state = paddle.to_tensor(data=get_data["miny"]) - range_state = paddle.to_tensor(data=get_data["rangey"]) - min_dstrain = paddle.to_tensor(data=get_data["minx"][10:]) - range_dstrain = paddle.to_tensor(data=get_data["rangex"][10:]) - get_data = ppsci.utils.reader.load_dat_file(dataset_stress) - min_stress = paddle.to_tensor(data=get_data["miny"]) - range_stress = paddle.to_tensor(data=get_data["rangey"]) - common_param.extend( - [ - min_state, - min_stress, - range_state, - range_stress, - min_dstrain, - range_dstrain, - ] - ) - - -def get_model_list( - nhlayers, nneurons, state_x_output_size, state_y_output_size, stress_x_output_size -): - NHLAYERS_PLASTIC = 4 - NNEURONS_PLASTIC = 75 - hl_nodes_elasto = [nneurons] * nhlayers - hl_nodes_plastic = [NNEURONS_PLASTIC] * NHLAYERS_PLASTIC - node_sizes_state_elasto = [state_x_output_size] - node_sizes_state_plastic = [state_x_output_size] - node_sizes_stress = [stress_x_output_size + state_y_output_size - 6] - node_sizes_state_elasto.extend(hl_nodes_elasto) - node_sizes_state_plastic.extend(hl_nodes_plastic) - node_sizes_stress.extend(hl_nodes_elasto) - node_sizes_state_elasto.extend([state_y_output_size - 3]) - node_sizes_state_plastic.extend([state_y_output_size - 1]) - node_sizes_stress.extend([1]) - - activation_elasto = "leaky_relu" - activation_plastic = "leaky_relu" - activations_elasto = [activation_elasto] - activations_plastic = [activation_plastic] - activations_elasto.extend([activation_elasto for ii in range(nhlayers)]) - activations_plastic.extend([activation_plastic for ii in range(NHLAYERS_PLASTIC)]) - activations_elasto.extend([activation_elasto]) - activations_plastic.extend([activation_plastic]) - drop_p = 0.0 - n_state_elasto = ppsci.arch.Epnn( - ("state_x",), - ("out_state_elasto",), - tuple(node_sizes_state_elasto), - tuple(activations_elasto), - drop_p, - ) - n_state_plastic = ppsci.arch.Epnn( - ("state_x",), - ("out_state_plastic",), - tuple(node_sizes_state_plastic), - tuple(activations_plastic), - drop_p, - ) - n_stress = ppsci.arch.Epnn( - ("state_x_f",), - ("out_stress",), - tuple(node_sizes_stress), - tuple(activations_elasto), - drop_p, - ) - return (n_state_elasto, n_state_plastic, n_stress) - - -def get_optimizer_list(model_list, cfg): - optimizer_list = [] - lr_list = [0.001, 0.001, 0.01] - for i, model in enumerate(model_list): - scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler, learning_rate=lr_list[i] - )() - optimizer_list.append( - ppsci.optimizer.Adam(learning_rate=scheduler, weight_decay=0.0)(model) - ) - - scheduler_ratio = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler, learning_rate=0.001 - )() - optimizer_list.append( - paddle.optimizer.Adam( - parameters=[gkratio], learning_rate=scheduler_ratio, weight_decay=0.0 - ) - ) - return ppsci.optimizer.OptimizerList(optimizer_list) - - -def plotting(output_dir): - ppsci.utils.misc.plot_curve( - data=eval_log, - xlabel="Epoch", - ylabel="Training Eval", - output_dir=output_dir, - smooth_step=1, - use_semilogy=True, - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" Elasto-Plastic Neural Network (EPNN) + +DEVELOPED AT: + COMPUTATIONAL GEOMECHANICS LABORATORY + DEPARTMENT OF CIVIL ENGINEERING + UNIVERSITY OF CALGARY, AB, CANADA + DIRECTOR: Prof. Richard Wan + +DEVELOPED BY: + MAHDAD EGHBALIAN + +MIT License + +Copyright (c) 2022 Mahdad Eghbalian + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +import math +from typing import Dict + +import numpy as np +import paddle + +import ppsci +from ppsci.utils import logger + +# log for loss(total, state_elasto, state_plastic, stress), eval error(total, state_elasto, state_plastic, stress) +loss_log = {} # for plotting +eval_log = {} +plot_keys = {"total", "state_elasto", "state_plastic", "stress"} +for key in plot_keys: + loss_log[key] = [] + eval_log[key] = [] + + +# transform +def transform_in(input): + input_transformed = {} + for key in input: + input_transformed[key] = paddle.squeeze(input[key], axis=0) + return input_transformed + + +def transform_out(input, out): + # Add transformed input for computing loss + out.update(input) + return out + + +def transform_in_stress(input, model, out_key): + input_elasto = model(input)[out_key] + input_elasto = input_elasto.detach().clone() + input_transformed = {} + for key in input: + input_transformed[key] = paddle.squeeze(input[key], axis=0) + input_state_m = paddle.concat( + x=( + input_elasto, + paddle.index_select( + input_transformed["state_x"], + paddle.to_tensor([0, 1, 2, 3, 7, 8, 9, 10, 11, 12]), + axis=1, + ), + ), + axis=1, + ) + input_transformed["state_x_f"] = input_state_m + return input_transformed + + +common_param = [] +gkratio = paddle.to_tensor( + data=[[0.45]], dtype=paddle.get_default_dtype(), stop_gradient=False +) + + +def val_loss_criterion(x, y): + return { + "input": 100.0 + * ( + paddle.linalg.norm(x=x["input"] - y["input"]) + / paddle.linalg.norm(x=y["input"]) + ) + } + + +def train_loss_func(output_dict, *args) -> paddle.Tensor: + """For model calculation of loss in model.train(). + + Args: + output_dict (Dict[str, paddle.Tensor]): The output dict. + + Returns: + paddle.Tensor: Loss value. + """ + # Use ppsci.loss.MAELoss to replace paddle.nn.L1Loss + loss, loss_elasto, loss_plastic, loss_stress = loss_func( + output_dict, ppsci.loss.MAELoss() + ) + loss_log["total"].append(float(loss)) + loss_log["state_elasto"].append(float(loss_elasto)) + loss_log["state_plastic"].append(float(loss_plastic)) + loss_log["stress"].append(float(loss_stress)) + return {"train_loss": loss} + + +def eval_loss_func(output_dict, *args) -> paddle.Tensor: + """For model calculation of loss in model.eval(). + + Args: + output_dict (Dict[str, paddle.Tensor]): The output dict. + + Returns: + paddle.Tensor: Loss value. + """ + error, error_elasto, error_plastic, error_stress = loss_func( + output_dict, val_loss_criterion + ) + eval_log["total"].append(float(error)) + eval_log["state_elasto"].append(float(error_elasto)) + eval_log["state_plastic"].append(float(error_plastic)) + eval_log["stress"].append(float(error_stress)) + logger.message( + f"error(total): {float(error)}, error(error_elasto): {float(error_elasto)}, error(error_plastic): {float(error_plastic)}, error(error_stress): {float(error_stress)}" + ) + return {"eval_loss": error} + + +def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: + return {"dummy_loss": paddle.full([], 0.0)} + + +def loss_func(output_dict, criterion) -> paddle.Tensor: + ( + min_elasto, + min_plastic, + range_elasto, + range_plastic, + min_stress, + range_stress, + ) = common_param + + coeff1 = 2.0 + coeff2 = 1.0 + input_elasto = output_dict["out_state_elasto"] + input_plastic = output_dict["out_state_plastic"] + input_stress = output_dict["out_stress"] + target_elasto = output_dict["state_y"][:, 0:1] + target_plastic = output_dict["state_y"][:, 1:4] + loss_elasto = criterion({"input": input_elasto}, {"input": target_elasto}) + loss_plastic = criterion({"input": input_plastic}, {"input": target_plastic}) + oneten_state = paddle.ones(shape=[3, 1], dtype=paddle.get_default_dtype()) + oneten_stress = paddle.ones( + shape=[output_dict["stress_y"].shape[0], output_dict["stress_y"].shape[1]], + dtype=paddle.get_default_dtype(), + ) + dstrain = output_dict["state_x"][:, 10:] + dstrain_real = ( + paddle.multiply(x=dstrain + coeff2, y=paddle.to_tensor(range_stress)) / coeff1 + + min_stress + ) + # predict label + dstrainpl = target_plastic + dstrainpl_real = ( + paddle.multiply(x=dstrainpl + coeff2, y=paddle.to_tensor(range_elasto[1:4])) + / coeff1 + + min_elasto[1:4] + ) + # evaluate label + dstrainel = dstrain_real - dstrainpl_real + mu = paddle.multiply(x=gkratio, y=paddle.to_tensor(input_stress[:, 0:1])) + mu_dstrainel = 2.0 * paddle.multiply(x=mu, y=paddle.to_tensor(dstrainel)) + stress_dstrainel = paddle.multiply( + x=input_stress[:, 0:1] - 2.0 / 3.0 * mu, + y=paddle.to_tensor( + paddle.multiply( + x=paddle.matmul(x=dstrainel, y=oneten_state), + y=paddle.to_tensor(oneten_stress), + ) + ), + ) + input_stress = ( + coeff1 + * paddle.divide( + x=mu_dstrainel + stress_dstrainel - min_plastic, + y=paddle.to_tensor(range_plastic), + ) + - coeff2 + ) + target_stress = output_dict["stress_y"] + loss_stress = criterion({"input": input_stress}, {"input": target_stress}) + loss = loss_elasto["input"] + loss_plastic["input"] + loss_stress["input"] + return loss, loss_elasto["input"], loss_plastic["input"], loss_stress["input"] + + +class Dataset: + def __init__(self, data_state, data_stress, itrain): + self.data_state = data_state + self.data_stress = data_stress + self.itrain = itrain + + def _cvt_to_ndarray(self, list_dict): + for key in list_dict: + list_dict[key] = np.asarray(list_dict[key]) + return list_dict + + def get(self, epochs=1): + # Slow if using BatchSampler to obtain data + input_dict_train = { + "state_x": [], + "state_y": [], + "stress_x": [], + "stress_y": [], + } + input_dict_val = { + "state_x": [], + "state_y": [], + "stress_x": [], + "stress_y": [], + } + label_dict_train = {"dummy_loss": []} + label_dict_val = {"dummy_loss": []} + for i in range(epochs): + shuffled_indices = paddle.randperm( + n=self.data_state.x_train.shape[0] + ).numpy() + input_dict_train["state_x"].append( + self.data_state.x_train[shuffled_indices[0 : self.itrain]] + ) + input_dict_train["state_y"].append( + self.data_state.y_train[shuffled_indices[0 : self.itrain]] + ) + input_dict_train["stress_x"].append( + self.data_stress.x_train[shuffled_indices[0 : self.itrain]] + ) + input_dict_train["stress_y"].append( + self.data_stress.y_train[shuffled_indices[0 : self.itrain]] + ) + label_dict_train["dummy_loss"].append(0.0) + + shuffled_indices = paddle.randperm(n=self.data_state.x_valid.shape[0]).numpy() + input_dict_val["state_x"].append( + self.data_state.x_valid[shuffled_indices[0 : self.itrain]] + ) + input_dict_val["state_y"].append( + self.data_state.y_valid[shuffled_indices[0 : self.itrain]] + ) + input_dict_val["stress_x"].append( + self.data_stress.x_valid[shuffled_indices[0 : self.itrain]] + ) + input_dict_val["stress_y"].append( + self.data_stress.y_valid[shuffled_indices[0 : self.itrain]] + ) + label_dict_val["dummy_loss"].append(0.0) + input_dict_train = self._cvt_to_ndarray(input_dict_train) + label_dict_train = self._cvt_to_ndarray(label_dict_train) + input_dict_val = self._cvt_to_ndarray(input_dict_val) + label_dict_val = self._cvt_to_ndarray(label_dict_val) + return input_dict_train, label_dict_train, input_dict_val, label_dict_val + + +class Data: + def __init__(self, dataset_path, train_p=0.6, cross_valid_p=0.2, test_p=0.2): + data = ppsci.utils.reader.load_dat_file(dataset_path) + self.x = data["X"] + self.y = data["y"] + self.train_p = train_p + self.cross_valid_p = cross_valid_p + self.test_p = test_p + + def get_shuffled_data(self): + # Need to set the seed, otherwise the loss will not match the precision + ppsci.utils.misc.set_random_seed(seed=10) + shuffled_indices = paddle.randperm(n=self.x.shape[0]).numpy() + n_train = math.floor(self.train_p * self.x.shape[0]) + n_cross_valid = math.floor(self.cross_valid_p * self.x.shape[0]) + n_test = math.floor(self.test_p * self.x.shape[0]) + self.x_train = self.x[shuffled_indices[0:n_train]] + self.y_train = self.y[shuffled_indices[0:n_train]] + self.x_valid = self.x[shuffled_indices[n_train : n_train + n_cross_valid]] + self.y_valid = self.y[shuffled_indices[n_train : n_train + n_cross_valid]] + self.x_test = self.x[ + shuffled_indices[n_train + n_cross_valid : n_train + n_cross_valid + n_test] + ] + self.y_test = self.y[ + shuffled_indices[n_train + n_cross_valid : n_train + n_cross_valid + n_test] + ] + + +def get_data(dataset_state, dataset_stress, ntrain_size): + set_common_param(dataset_state, dataset_stress) + + data_state = Data(dataset_state) + data_stress = Data(dataset_stress) + data_state.get_shuffled_data() + data_stress.get_shuffled_data() + + train_size_log10 = np.linspace( + 1, np.log10(data_state.x_train.shape[0]), num=ntrain_size + ) + train_size_float = 10**train_size_log10 + train_size = train_size_float.astype(int) + itrain = train_size[ntrain_size - 1] + + return Dataset(data_state, data_stress, itrain).get(10) + + +def set_common_param(dataset_state, dataset_stress): + get_data = ppsci.utils.reader.load_dat_file(dataset_state) + min_state = paddle.to_tensor(data=get_data["miny"]) + range_state = paddle.to_tensor(data=get_data["rangey"]) + min_dstrain = paddle.to_tensor(data=get_data["minx"][10:]) + range_dstrain = paddle.to_tensor(data=get_data["rangex"][10:]) + get_data = ppsci.utils.reader.load_dat_file(dataset_stress) + min_stress = paddle.to_tensor(data=get_data["miny"]) + range_stress = paddle.to_tensor(data=get_data["rangey"]) + common_param.extend( + [ + min_state, + min_stress, + range_state, + range_stress, + min_dstrain, + range_dstrain, + ] + ) + + +def get_model_list( + nhlayers, nneurons, state_x_output_size, state_y_output_size, stress_x_output_size +): + NHLAYERS_PLASTIC = 4 + NNEURONS_PLASTIC = 75 + hl_nodes_elasto = [nneurons] * nhlayers + hl_nodes_plastic = [NNEURONS_PLASTIC] * NHLAYERS_PLASTIC + node_sizes_state_elasto = [state_x_output_size] + node_sizes_state_plastic = [state_x_output_size] + node_sizes_stress = [stress_x_output_size + state_y_output_size - 6] + node_sizes_state_elasto.extend(hl_nodes_elasto) + node_sizes_state_plastic.extend(hl_nodes_plastic) + node_sizes_stress.extend(hl_nodes_elasto) + node_sizes_state_elasto.extend([state_y_output_size - 3]) + node_sizes_state_plastic.extend([state_y_output_size - 1]) + node_sizes_stress.extend([1]) + + activation_elasto = "leaky_relu" + activation_plastic = "leaky_relu" + activations_elasto = [activation_elasto] + activations_plastic = [activation_plastic] + activations_elasto.extend([activation_elasto for ii in range(nhlayers)]) + activations_plastic.extend([activation_plastic for ii in range(NHLAYERS_PLASTIC)]) + activations_elasto.extend([activation_elasto]) + activations_plastic.extend([activation_plastic]) + drop_p = 0.0 + n_state_elasto = ppsci.arch.Epnn( + ("state_x",), + ("out_state_elasto",), + tuple(node_sizes_state_elasto), + tuple(activations_elasto), + drop_p, + ) + n_state_plastic = ppsci.arch.Epnn( + ("state_x",), + ("out_state_plastic",), + tuple(node_sizes_state_plastic), + tuple(activations_plastic), + drop_p, + ) + n_stress = ppsci.arch.Epnn( + ("state_x_f",), + ("out_stress",), + tuple(node_sizes_stress), + tuple(activations_elasto), + drop_p, + ) + return (n_state_elasto, n_state_plastic, n_stress) + + +def get_optimizer_list(model_list, cfg): + optimizer_list = [] + lr_list = [0.001, 0.001, 0.01] + for i, model in enumerate(model_list): + scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler, learning_rate=lr_list[i] + )() + optimizer_list.append( + ppsci.optimizer.Adam(learning_rate=scheduler, weight_decay=0.0)(model) + ) + + scheduler_ratio = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler, learning_rate=0.001 + )() + optimizer_list.append( + paddle.optimizer.Adam( + parameters=[gkratio], learning_rate=scheduler_ratio, weight_decay=0.0 + ) + ) + return ppsci.optimizer.OptimizerList(optimizer_list) + + +def plotting(output_dir): + ppsci.utils.misc.plot_curve( + data=eval_log, + xlabel="Epoch", + ylabel="Training Eval", + output_dir=output_dir, + smooth_step=1, + use_semilogy=True, + ) diff --git a/examples/euler_beam/conf/euler_beam.yaml b/examples/euler_beam/conf/euler_beam.yaml index 6827f22514..7d9a378193 100644 --- a/examples/euler_beam/conf/euler_beam.yaml +++ b/examples/euler_beam/conf/euler_beam.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -78,3 +79,86 @@ INFER: num_cpu_threads: 4 total_size: 100 batch_size: 100 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_euler_beam/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +to_static: false + +# set working condition +q: -1.0 +D: 1.0 + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["u"] + num_layers: 3 + hidden_size: 20 + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + save_freq: 1000 + eval_during_train: true + eval_freq: 1000 + learning_rate: 1.0e-3 + batch_size: + pde: 100 + bc: 4 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + total_size: 100 + +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/euler_beam/euler_beam_pretrained.pdparams" + export_path: ./inference/euler_beam + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 100 + num_cpu_threads: 4 + total_size: 100 + batch_size: 100 +>>>>>>> Stashed changes diff --git a/examples/euler_beam/euler_beam.py b/examples/euler_beam/euler_beam.py index 48cf3ac63f..6bf0a60e4a 100644 --- a/examples/euler_beam/euler_beam.py +++ b/examples/euler_beam/euler_beam.py @@ -1,267 +1,267 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hydra -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - geom = {"interval": ppsci.geometry.Interval(0, 1)} - - # set equation(s) - equation = {"biharmonic": ppsci.equation.Biharmonic(dim=1, q=cfg.q, D=cfg.D)} - - # set dataloader config - dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["biharmonic"].equations, - {"biharmonic": 0}, - geom["interval"], - {**dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.pde}, - ppsci.loss.MSELoss(), - random="Hammersley", - name="EQ", - ) - bc = ppsci.constraint.BoundaryConstraint( - { - "u0": lambda d: d["u"][0:1], - "u__x": lambda d: jacobian(d["u"], d["x"])[1:2], - "u__x__x": lambda d: hessian(d["u"], d["x"])[2:3], - "u__x__x__x": lambda d: jacobian(hessian(d["u"], d["x"]), d["x"])[3:4], - }, - {"u0": 0, "u__x": 0, "u__x__x": 0, "u__x__x__x": 0}, - geom["interval"], - {**dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, - ppsci.loss.MSELoss("sum"), - evenly=True, - name="BC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - bc.name: bc, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - def u_solution_func(out): - """compute ground truth for u as label data""" - x = out["x"] - return -(x**4) / 24 + x**3 / 6 - x**2 / 4 - - l2_rel_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.EVAL.total_size, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Metric", - ) - validator = {l2_rel_metric.name: l2_rel_metric} - - # set visualizer(optional) - visu_points = geom["interval"].sample_interior(cfg.EVAL.total_size, evenly=True) - visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visu_points, - ("x",), - { - "u_label": lambda d: u_solution_func(d), - "u_pred": lambda d: d["u"], - }, - num_timestamps=1, - prefix="result_u", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - geom = {"interval": ppsci.geometry.Interval(0, 1)} - - # set equation(s) - equation = {"biharmonic": ppsci.equation.Biharmonic(dim=1, q=cfg.q, D=cfg.D)} - - # set validator - def u_solution_func(out): - """compute ground truth for u as label data""" - x = out["x"] - return -(x**4) / 24 + x**3 / 6 - x**2 / 4 - - l2_rel_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["interval"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.EVAL.total_size, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Metric", - ) - validator = {l2_rel_metric.name: l2_rel_metric} - - # set visualizer(optional) - visu_points = geom["interval"].sample_interior(cfg.EVAL.total_size, evenly=True) - visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visu_points, - ("x",), - { - "u_label": lambda d: u_solution_func(d), - "u_pred": lambda d: d["u"], - }, - num_timestamps=1, - prefix="result_u", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - None, - cfg.output_dir, - None, - seed=cfg.seed, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - to_static=cfg.to_static, - ) - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - geom = {"interval": ppsci.geometry.Interval(0, 1)} - input_dict = geom["interval"].sample_interior(cfg.INFER.total_size, evenly=True) - - output_dict = predictor.predict({"x": input_dict["x"]}, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - def u_solution_func(out): - """compute ground truth for u as label data""" - x = out["x"] - return -(x**4) / 24 + x**3 / 6 - x**2 / 4 - - ppsci.visualize.save_plot_from_1d_dict( - "./euler_beam_pred", - {**input_dict, **output_dict, "u_label": u_solution_func(input_dict)}, - ("x",), - ("u", "u_label"), - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="euler_beam.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hydra +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + geom = {"interval": ppsci.geometry.Interval(0, 1)} + + # set equation(s) + equation = {"biharmonic": ppsci.equation.Biharmonic(dim=1, q=cfg.q, D=cfg.D)} + + # set dataloader config + dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["biharmonic"].equations, + {"biharmonic": 0}, + geom["interval"], + {**dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.pde}, + ppsci.loss.MSELoss(), + random="Hammersley", + name="EQ", + ) + bc = ppsci.constraint.BoundaryConstraint( + { + "u0": lambda d: d["u"][0:1], + "u__x": lambda d: jacobian(d["u"], d["x"])[1:2], + "u__x__x": lambda d: hessian(d["u"], d["x"])[2:3], + "u__x__x__x": lambda d: jacobian(hessian(d["u"], d["x"]), d["x"])[3:4], + }, + {"u0": 0, "u__x": 0, "u__x__x": 0, "u__x__x__x": 0}, + geom["interval"], + {**dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc}, + ppsci.loss.MSELoss("sum"), + evenly=True, + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + def u_solution_func(out): + """compute ground truth for u as label data""" + x = out["x"] + return -(x**4) / 24 + x**3 / 6 - x**2 / 4 + + l2_rel_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.EVAL.total_size, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Metric", + ) + validator = {l2_rel_metric.name: l2_rel_metric} + + # set visualizer(optional) + visu_points = geom["interval"].sample_interior(cfg.EVAL.total_size, evenly=True) + visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visu_points, + ("x",), + { + "u_label": lambda d: u_solution_func(d), + "u_pred": lambda d: d["u"], + }, + num_timestamps=1, + prefix="result_u", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + to_static=cfg.to_static, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + geom = {"interval": ppsci.geometry.Interval(0, 1)} + + # set equation(s) + equation = {"biharmonic": ppsci.equation.Biharmonic(dim=1, q=cfg.q, D=cfg.D)} + + # set validator + def u_solution_func(out): + """compute ground truth for u as label data""" + x = out["x"] + return -(x**4) / 24 + x**3 / 6 - x**2 / 4 + + l2_rel_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["interval"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.EVAL.total_size, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Metric", + ) + validator = {l2_rel_metric.name: l2_rel_metric} + + # set visualizer(optional) + visu_points = geom["interval"].sample_interior(cfg.EVAL.total_size, evenly=True) + visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visu_points, + ("x",), + { + "u_label": lambda d: u_solution_func(d), + "u_pred": lambda d: d["u"], + }, + num_timestamps=1, + prefix="result_u", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + None, + cfg.output_dir, + None, + seed=cfg.seed, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + to_static=cfg.to_static, + ) + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = {"interval": ppsci.geometry.Interval(0, 1)} + input_dict = geom["interval"].sample_interior(cfg.INFER.total_size, evenly=True) + + output_dict = predictor.predict({"x": input_dict["x"]}, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + def u_solution_func(out): + """compute ground truth for u as label data""" + x = out["x"] + return -(x**4) / 24 + x**3 / 6 - x**2 / 4 + + ppsci.visualize.save_plot_from_1d_dict( + "./euler_beam_pred", + {**input_dict, **output_dict, "u_label": u_solution_func(input_dict)}, + ("x",), + ("u", "u_label"), + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="euler_beam.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml b/examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml index 450ffd21ab..c8544eba8c 100644 --- a/examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml +++ b/examples/extformer_moe/conf/extformer_moe_enso_pretrain.yaml @@ -1,153 +1,153 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_extformer_moe_pretrain - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 0 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set train and evaluate data path -FILE_PATH: /hpc2hdd/home/hni017/Workplace/data/weather_data/icar_enso_2021/enso_round1_train_20210201 - -# dataset setting -DATASET: - label_keys: ["sst_target","nino_target"] - in_len: 12 - out_len: 14 - nino_window_t: 3 - in_stride: 1 - out_stride: 1 - train_samples_gap: 1 - eval_samples_gap: 1 - normalize_sst: true - -# model settings -MODEL: - input_keys: ["sst_data"] - output_keys: ["sst_target","nino_target","aux_loss","rank_loss"] - input_shape: [12, 24, 48, 1] - target_shape: [14, 24, 48, 1] - base_units: 64 - scale_alpha: 1.0 - - enc_depth: [1, 1] - dec_depth: [1, 1] - enc_use_inter_ffn: true - dec_use_inter_ffn: true - dec_hierarchical_pos_embed: false - - downsample: 2 - downsample_type: "patch_merge" - upsample_type: "upsample" - - num_global_vectors: 0 - use_dec_self_global: false - dec_self_update_global: true - use_dec_cross_global: false - use_global_vector_ffn: false - use_global_self_attn: false - separate_global_qkv: false - global_dim_ratio: 1 - - self_pattern: "axial" - cross_self_pattern: "axial" - cross_pattern: "cross_1x1" - dec_cross_last_n_frames: null - - attn_drop: 0.1 - proj_drop: 0.1 - ffn_drop: 0.1 - num_heads: 4 - - ffn_activation: "gelu" - gated_ffn: false - norm_layer: "layer_norm" - padding_type: "zeros" - pos_embed_type: "t+h+w" - use_relative_pos: true - self_attn_use_final_proj: true - dec_use_first_self_attn: false - - z_init_method: "zeros" - initial_downsample_type: "conv" - initial_downsample_activation: "leaky_relu" - initial_downsample_scale: [1, 1, 2] - initial_downsample_conv_layers: 2 - final_upsample_conv_layers: 1 - checkpoint_level: 0 - - attn_linear_init_mode: "0" - ffn_linear_init_mode: "0" - conv_init_mode: "0" - down_up_linear_init_mode: "0" - norm_init_mode: "0" - -# moe settings -MOE: - use_linear_moe: false - use_ffn_moe: true - use_attn_moe: false - num_experts: 10 - out_planes: 4 - importance_weight: 0.0 - load_weight: 0.0 - gate_style: "cuboid-latent" # linear, spatial-latent, cuboid-latent, spatial-latent-linear, cuboid-latent-linear - dispatch_style: "dense" # sparse, dense - aux_loss_style: "all" # all, cell - -# rnc settings -RNC: - use_rnc: true - rank_imbalance_style: "batch+T+H+W" - feature_similarity_style: "l2" - rank_imbalance_temp: 2 - label_difference_style: "l1" - rank_reg_coeff: 0.01 - loss_cal_style: "computation-efficient" # computation-efficient, memory-efficient - -# training settings -TRAIN: - epochs: 100 - save_freq: 20 - eval_during_train: true - eval_freq: 10 - lr_scheduler: - epochs: ${TRAIN.epochs} - learning_rate: 0.0002 - by_epoch: true - min_lr_ratio: 1.0e-3 - wd: 1.0e-5 - batch_size: 16 - pretrained_model_path: null - checkpoint_path: null - update_freq: 1 - -# evaluation settings -EVAL: - pretrained_model_path: ./checkpoint/enso/extformer_moe_enso.pdparams - compute_metric_by_batch: false - eval_with_no_grad: true - batch_size: 1 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_extformer_moe_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 0 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: /hpc2hdd/home/hni017/Workplace/data/weather_data/icar_enso_2021/enso_round1_train_20210201 + +# dataset setting +DATASET: + label_keys: ["sst_target","nino_target"] + in_len: 12 + out_len: 14 + nino_window_t: 3 + in_stride: 1 + out_stride: 1 + train_samples_gap: 1 + eval_samples_gap: 1 + normalize_sst: true + +# model settings +MODEL: + input_keys: ["sst_data"] + output_keys: ["sst_target","nino_target","aux_loss","rank_loss"] + input_shape: [12, 24, 48, 1] + target_shape: [14, 24, 48, 1] + base_units: 64 + scale_alpha: 1.0 + + enc_depth: [1, 1] + dec_depth: [1, 1] + enc_use_inter_ffn: true + dec_use_inter_ffn: true + dec_hierarchical_pos_embed: false + + downsample: 2 + downsample_type: "patch_merge" + upsample_type: "upsample" + + num_global_vectors: 0 + use_dec_self_global: false + dec_self_update_global: true + use_dec_cross_global: false + use_global_vector_ffn: false + use_global_self_attn: false + separate_global_qkv: false + global_dim_ratio: 1 + + self_pattern: "axial" + cross_self_pattern: "axial" + cross_pattern: "cross_1x1" + dec_cross_last_n_frames: null + + attn_drop: 0.1 + proj_drop: 0.1 + ffn_drop: 0.1 + num_heads: 4 + + ffn_activation: "gelu" + gated_ffn: false + norm_layer: "layer_norm" + padding_type: "zeros" + pos_embed_type: "t+h+w" + use_relative_pos: true + self_attn_use_final_proj: true + dec_use_first_self_attn: false + + z_init_method: "zeros" + initial_downsample_type: "conv" + initial_downsample_activation: "leaky_relu" + initial_downsample_scale: [1, 1, 2] + initial_downsample_conv_layers: 2 + final_upsample_conv_layers: 1 + checkpoint_level: 0 + + attn_linear_init_mode: "0" + ffn_linear_init_mode: "0" + conv_init_mode: "0" + down_up_linear_init_mode: "0" + norm_init_mode: "0" + +# moe settings +MOE: + use_linear_moe: false + use_ffn_moe: true + use_attn_moe: false + num_experts: 10 + out_planes: 4 + importance_weight: 0.0 + load_weight: 0.0 + gate_style: "cuboid-latent" # linear, spatial-latent, cuboid-latent, spatial-latent-linear, cuboid-latent-linear + dispatch_style: "dense" # sparse, dense + aux_loss_style: "all" # all, cell + +# rnc settings +RNC: + use_rnc: true + rank_imbalance_style: "batch+T+H+W" + feature_similarity_style: "l2" + rank_imbalance_temp: 2 + label_difference_style: "l1" + rank_reg_coeff: 0.01 + loss_cal_style: "computation-efficient" # computation-efficient, memory-efficient + +# training settings +TRAIN: + epochs: 100 + save_freq: 20 + eval_during_train: true + eval_freq: 10 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.0002 + by_epoch: true + min_lr_ratio: 1.0e-3 + wd: 1.0e-5 + batch_size: 16 + pretrained_model_path: null + checkpoint_path: null + update_freq: 1 + +# evaluation settings +EVAL: + pretrained_model_path: ./checkpoint/enso/extformer_moe_enso.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 1 diff --git a/examples/extformer_moe/enso_metric.py b/examples/extformer_moe/enso_metric.py index 204e49b91c..8b6a53d5f5 100644 --- a/examples/extformer_moe/enso_metric.py +++ b/examples/extformer_moe/enso_metric.py @@ -1,174 +1,174 @@ -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import paddle -from paddle.nn import functional as F - -from ppsci.data.dataset.enso_dataset import NINO_WINDOW_T -from ppsci.data.dataset.enso_dataset import scale_back_sst - - -def compute_enso_score( - y_pred: paddle.Tensor, - y_true: paddle.Tensor, - acc_weight: Optional[Union[str, np.ndarray, paddle.Tensor]] = None, -): - """Compute the accuracy and Root Mean Squared Error (RMSE) of enso dataset. - - Args: - y_pred (paddle.Tensor): The predict data. - y_true (paddle.Tensor): The label data. - acc_weight (Optional[Union[str, np.ndarray, paddle.Tensor]], optional): The wight of accuracy. Defaults to None.use - default acc_weight specified at https://tianchi.aliyun.com/competition/entrance/531871/information. - - """ - - pred = y_pred - y_pred.mean(axis=0, keepdim=True) # (N, 24) - true = y_true - y_true.mean(axis=0, keepdim=True) # (N, 24) - cor = (pred * true).sum(axis=0) / ( - paddle.sqrt(paddle.sum(pred**2, axis=0) * paddle.sum(true**2, axis=0)) - + 1e-6 - ) - - if acc_weight is None: - acc = cor.sum() - else: - nino_out_len = y_true.shape[-1] - if acc_weight == "default": - acc_weight = paddle.to_tensor( - [1.5] * 4 + [2] * 7 + [3] * 7 + [4] * (nino_out_len - 18) - )[:nino_out_len] * paddle.log(paddle.arange(nino_out_len) + 1) - elif isinstance(acc_weight, np.ndarray): - acc_weight = paddle.to_tensor(acc_weight[:nino_out_len]) - elif isinstance(acc_weight, paddle.Tensor): - acc_weight = acc_weight[:nino_out_len] - else: - raise ValueError(f"Invalid acc_weight {acc_weight}!") - acc_weight = acc_weight.to(y_pred) - acc = (acc_weight * cor).sum() - rmse = paddle.mean((y_pred - y_true) ** 2, axis=0).sqrt().sum() - return acc, rmse - - -def sst_to_nino(sst: paddle.Tensor, normalize_sst: bool = True, detach: bool = True): - """Convert sst to nino index. - - Args: - sst (paddle.Tensor): The predict data for sst. Shape = (N, T, H, W) - normalize_sst (bool, optional): Whether to use normalize for sst. Defaults to True. - detach (bool, optional): Whether to detach the tensor. Defaults to True. - - Returns: - nino_index (paddle.Tensor): The nino index. Shape = (N, T-NINO_WINDOW_T+1) - """ - - if detach: - nino_index = sst.detach() - else: - nino_index = sst - if normalize_sst: - nino_index = scale_back_sst(nino_index) - nino_index = nino_index[:, :, 10:13, 19:30].mean(axis=[2, 3]) # (N, 26) - nino_index = nino_index.unfold(axis=1, size=NINO_WINDOW_T, step=1).mean( - axis=2 - ) # (N, 24) - - return nino_index - - -def train_mse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - *args, -) -> paddle.Tensor: - return { - "sst_target": F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) - } - - -def train_extformer_moe_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - *args, -) -> paddle.Tensor: - - total_loss = F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) - - aux_loss = output_dict["aux_loss"] - if aux_loss is not None: - total_loss += aux_loss - - rank_loss = output_dict["rank_loss"] - if rank_loss is not None: - total_loss += rank_loss[0] - - return {"sst_target": total_loss} - - -def rnc_pretrain_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - *args, -) -> paddle.Tensor: - - rank_loss = output_dict["rank_loss"] - assert rank_loss is not None - - return rank_loss[0] - - -def eval_rmse_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - nino_out_len: int = 12, - *args, -) -> Dict[str, paddle.Tensor]: - pred = output_dict["sst_target"] - sst_target = label_dict["sst_target"] - nino_target = label_dict["nino_target"].astype("float32") - # mse - mae = F.l1_loss(pred, sst_target) - # mse - mse = F.mse_loss(pred, sst_target) - # rmse - nino_preds = sst_to_nino(sst=pred[..., 0]) - nino_preds_list, nino_target_list = map(list, zip((nino_preds, nino_target))) - nino_preds_list = paddle.concat(nino_preds_list, axis=0) - nino_target_list = paddle.concat(nino_target_list, axis=0) - - valid_acc, valid_nino_rmse = compute_enso_score( - y_pred=nino_preds_list, y_true=nino_target_list, acc_weight=None - ) - valid_weighted_acc, _ = compute_enso_score( - y_pred=nino_preds_list, y_true=nino_target_list, acc_weight="default" - ) - valid_acc /= nino_out_len - valid_nino_rmse /= nino_out_len - valid_weighted_acc /= nino_out_len - valid_loss = -valid_acc - - return { - "valid_loss_epoch": valid_loss, - "mse": mse, - "mae": mae, - "rmse": valid_nino_rmse, - "corr_nino3.4_epoch": valid_acc, - "corr_nino3.4_weighted_epoch": valid_weighted_acc, - } - - -def eval_rnc_pretrain_func( - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - nino_out_len: int = 12, - *args, -) -> Dict[str, paddle.Tensor]: - - rank_loss = output_dict["rank_loss"] - assert rank_loss is not None - - return { - "valid_loss_epoch": rank_loss[0], - } +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import paddle +from paddle.nn import functional as F + +from ppsci.data.dataset.enso_dataset import NINO_WINDOW_T +from ppsci.data.dataset.enso_dataset import scale_back_sst + + +def compute_enso_score( + y_pred: paddle.Tensor, + y_true: paddle.Tensor, + acc_weight: Optional[Union[str, np.ndarray, paddle.Tensor]] = None, +): + """Compute the accuracy and Root Mean Squared Error (RMSE) of enso dataset. + + Args: + y_pred (paddle.Tensor): The predict data. + y_true (paddle.Tensor): The label data. + acc_weight (Optional[Union[str, np.ndarray, paddle.Tensor]], optional): The wight of accuracy. Defaults to None.use + default acc_weight specified at https://tianchi.aliyun.com/competition/entrance/531871/information. + + """ + + pred = y_pred - y_pred.mean(axis=0, keepdim=True) # (N, 24) + true = y_true - y_true.mean(axis=0, keepdim=True) # (N, 24) + cor = (pred * true).sum(axis=0) / ( + paddle.sqrt(paddle.sum(pred**2, axis=0) * paddle.sum(true**2, axis=0)) + + 1e-6 + ) + + if acc_weight is None: + acc = cor.sum() + else: + nino_out_len = y_true.shape[-1] + if acc_weight == "default": + acc_weight = paddle.to_tensor( + [1.5] * 4 + [2] * 7 + [3] * 7 + [4] * (nino_out_len - 18) + )[:nino_out_len] * paddle.log(paddle.arange(nino_out_len) + 1) + elif isinstance(acc_weight, np.ndarray): + acc_weight = paddle.to_tensor(acc_weight[:nino_out_len]) + elif isinstance(acc_weight, paddle.Tensor): + acc_weight = acc_weight[:nino_out_len] + else: + raise ValueError(f"Invalid acc_weight {acc_weight}!") + acc_weight = acc_weight.to(y_pred) + acc = (acc_weight * cor).sum() + rmse = paddle.mean((y_pred - y_true) ** 2, axis=0).sqrt().sum() + return acc, rmse + + +def sst_to_nino(sst: paddle.Tensor, normalize_sst: bool = True, detach: bool = True): + """Convert sst to nino index. + + Args: + sst (paddle.Tensor): The predict data for sst. Shape = (N, T, H, W) + normalize_sst (bool, optional): Whether to use normalize for sst. Defaults to True. + detach (bool, optional): Whether to detach the tensor. Defaults to True. + + Returns: + nino_index (paddle.Tensor): The nino index. Shape = (N, T-NINO_WINDOW_T+1) + """ + + if detach: + nino_index = sst.detach() + else: + nino_index = sst + if normalize_sst: + nino_index = scale_back_sst(nino_index) + nino_index = nino_index[:, :, 10:13, 19:30].mean(axis=[2, 3]) # (N, 26) + nino_index = nino_index.unfold(axis=1, size=NINO_WINDOW_T, step=1).mean( + axis=2 + ) # (N, 24) + + return nino_index + + +def train_mse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + *args, +) -> paddle.Tensor: + return { + "sst_target": F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) + } + + +def train_extformer_moe_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + *args, +) -> paddle.Tensor: + + total_loss = F.mse_loss(output_dict["sst_target"], label_dict["sst_target"]) + + aux_loss = output_dict["aux_loss"] + if aux_loss is not None: + total_loss += aux_loss + + rank_loss = output_dict["rank_loss"] + if rank_loss is not None: + total_loss += rank_loss[0] + + return {"sst_target": total_loss} + + +def rnc_pretrain_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + *args, +) -> paddle.Tensor: + + rank_loss = output_dict["rank_loss"] + assert rank_loss is not None + + return rank_loss[0] + + +def eval_rmse_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + nino_out_len: int = 12, + *args, +) -> Dict[str, paddle.Tensor]: + pred = output_dict["sst_target"] + sst_target = label_dict["sst_target"] + nino_target = label_dict["nino_target"].astype("float32") + # mse + mae = F.l1_loss(pred, sst_target) + # mse + mse = F.mse_loss(pred, sst_target) + # rmse + nino_preds = sst_to_nino(sst=pred[..., 0]) + nino_preds_list, nino_target_list = map(list, zip((nino_preds, nino_target))) + nino_preds_list = paddle.concat(nino_preds_list, axis=0) + nino_target_list = paddle.concat(nino_target_list, axis=0) + + valid_acc, valid_nino_rmse = compute_enso_score( + y_pred=nino_preds_list, y_true=nino_target_list, acc_weight=None + ) + valid_weighted_acc, _ = compute_enso_score( + y_pred=nino_preds_list, y_true=nino_target_list, acc_weight="default" + ) + valid_acc /= nino_out_len + valid_nino_rmse /= nino_out_len + valid_weighted_acc /= nino_out_len + valid_loss = -valid_acc + + return { + "valid_loss_epoch": valid_loss, + "mse": mse, + "mae": mae, + "rmse": valid_nino_rmse, + "corr_nino3.4_epoch": valid_acc, + "corr_nino3.4_weighted_epoch": valid_weighted_acc, + } + + +def eval_rnc_pretrain_func( + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + nino_out_len: int = 12, + *args, +) -> Dict[str, paddle.Tensor]: + + rank_loss = output_dict["rank_loss"] + assert rank_loss is not None + + return { + "valid_loss_epoch": rank_loss[0], + } diff --git a/examples/extformer_moe/extformer_moe_enso_train.py b/examples/extformer_moe/extformer_moe_enso_train.py index e0e570fb95..fad88d1c0a 100644 --- a/examples/extformer_moe/extformer_moe_enso_train.py +++ b/examples/extformer_moe/extformer_moe_enso_train.py @@ -1,203 +1,203 @@ -import enso_metric -import hydra -import paddle -from omegaconf import DictConfig -from omegaconf import OmegaConf -from paddle import nn - -import ppsci - - -def get_parameter_names(model, forbidden_layer_types): - result = [] - for name, child in model.named_children(): - result += [ - f"{name}.{n}" - for n in get_parameter_names(child, forbidden_layer_types) - if not isinstance(child, tuple(forbidden_layer_types)) - ] - # Add model specific parameters since they are not in any child. - result += list(model._parameters.keys()) - return result - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "ExtMoEENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 8, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "ExtMoEENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - "training": "eval", - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), - metric={ - "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - moe_config = OmegaConf.to_object(cfg.MOE) - rnc_config = OmegaConf.to_object(cfg.RNC) - model = ppsci.arch.ExtFormerMoECuboid( - **cfg.MODEL, moe_config=moe_config, rnc_config=rnc_config - ) - - decay_parameters = get_parameter_names(model, [nn.LayerNorm]) - decay_parameters = [name for name in decay_parameters if "bias" not in name] - optimizer_grouped_parameters = [ - { - "params": [p for n, p in model.named_parameters() if n in decay_parameters], - "weight_decay": cfg.TRAIN.wd, - }, - { - "params": [ - p for n, p in model.named_parameters() if n not in decay_parameters - ], - "weight_decay": 0.0, - }, - ] - - # # init optimizer and lr scheduler - lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) - lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( - **lr_scheduler_cfg, - iters_per_epoch=ITERS_PER_EPOCH, - eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, - warmup_epoch=int(0.2 * cfg.TRAIN.epochs), - )() - optimizer = paddle.optimizer.AdamW( - lr_scheduler, parameters=optimizer_grouped_parameters - ) - - # initialize solver, eval_freq: int = 1 - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - update_freq=cfg.TRAIN.update_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "ExtMoEENSODataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "in_len": cfg.DATASET.in_len, - "out_len": cfg.DATASET.out_len, - "in_stride": cfg.DATASET.in_stride, - "out_stride": cfg.DATASET.out_stride, - "train_samples_gap": cfg.DATASET.train_samples_gap, - "eval_samples_gap": cfg.DATASET.eval_samples_gap, - "normalize_sst": cfg.DATASET.normalize_sst, - "training": "test", - }, - "batch_size": cfg.EVAL.batch_size, - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), - metric={ - "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - moe_config = OmegaConf.to_object(cfg.MOE) - rnc_config = OmegaConf.to_object(cfg.RNC) - model = ppsci.arch.ExtFormerMoECuboid( - **cfg.MODEL, moe_config=moe_config, rnc_config=rnc_config - ) - - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - ) - - # evaluate - solver.eval() - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="extformer_moe_enso_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +import enso_metric +import hydra +import paddle +from omegaconf import DictConfig +from omegaconf import OmegaConf +from paddle import nn + +import ppsci + + +def get_parameter_names(model, forbidden_layer_types): + result = [] + for name, child in model.named_children(): + result += [ + f"{name}.{n}" + for n in get_parameter_names(child, forbidden_layer_types) + if not isinstance(child, tuple(forbidden_layer_types)) + ] + # Add model specific parameters since they are not in any child. + result += list(model._parameters.keys()) + return result + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "ExtMoEENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ExtMoEENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + "training": "eval", + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), + metric={ + "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + moe_config = OmegaConf.to_object(cfg.MOE) + rnc_config = OmegaConf.to_object(cfg.RNC) + model = ppsci.arch.ExtFormerMoECuboid( + **cfg.MODEL, moe_config=moe_config, rnc_config=rnc_config + ) + + decay_parameters = get_parameter_names(model, [nn.LayerNorm]) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [p for n, p in model.named_parameters() if n in decay_parameters], + "weight_decay": cfg.TRAIN.wd, + }, + { + "params": [ + p for n, p in model.named_parameters() if n not in decay_parameters + ], + "weight_decay": 0.0, + }, + ] + + # # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( + **lr_scheduler_cfg, + iters_per_epoch=ITERS_PER_EPOCH, + eta_min=cfg.TRAIN.min_lr_ratio * cfg.TRAIN.lr_scheduler.learning_rate, + warmup_epoch=int(0.2 * cfg.TRAIN.epochs), + )() + optimizer = paddle.optimizer.AdamW( + lr_scheduler, parameters=optimizer_grouped_parameters + ) + + # initialize solver, eval_freq: int = 1 + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + update_freq=cfg.TRAIN.update_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ExtMoEENSODataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "in_len": cfg.DATASET.in_len, + "out_len": cfg.DATASET.out_len, + "in_stride": cfg.DATASET.in_stride, + "out_stride": cfg.DATASET.out_stride, + "train_samples_gap": cfg.DATASET.train_samples_gap, + "eval_samples_gap": cfg.DATASET.eval_samples_gap, + "normalize_sst": cfg.DATASET.normalize_sst, + "training": "test", + }, + "batch_size": cfg.EVAL.batch_size, + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(enso_metric.train_extformer_moe_func), + metric={ + "rmse": ppsci.metric.FunctionalMetric(enso_metric.eval_rmse_func), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + moe_config = OmegaConf.to_object(cfg.MOE) + rnc_config = OmegaConf.to_object(cfg.RNC) + model = ppsci.arch.ExtFormerMoECuboid( + **cfg.MODEL, moe_config=moe_config, rnc_config=rnc_config + ) + + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + # evaluate + solver.eval() + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="extformer_moe_enso_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/extformer_moe/requirements.txt b/examples/extformer_moe/requirements.txt index c0f424a290..2a7f2a2320 100644 --- a/examples/extformer_moe/requirements.txt +++ b/examples/extformer_moe/requirements.txt @@ -1,2 +1,2 @@ -h5netcdf -xarray==2024.2.0 +h5netcdf +xarray==2024.2.0 diff --git a/examples/fourcastnet/conf/fourcastnet_finetune.yaml b/examples/fourcastnet/conf/fourcastnet_finetune.yaml index 1dae49cbf9..0083e661f2 100644 --- a/examples/fourcastnet/conf/fourcastnet_finetune.yaml +++ b/examples/fourcastnet/conf/fourcastnet_finetune.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -97,3 +98,81 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 num_timestamps: 32 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_fourcastnet_finetune + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 1024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training hyper-parameters +IMG_H: 720 +IMG_W: 1440 +# FourCastNet use 20 atmospheric variable,their index in the dataset is from 0 to 19. +# The variable name is 'u10', 'v10', 't2m', 'sp', 'msl', 't850', 'u1000', 'v1000', 'z000', +# 'u850', 'v850', 'z850', 'u500', 'v500', 'z500', 't500', 'z50', 'r500', 'r850', 'tcwv'. +# You can obtain detailed information about each variable from +# https://cds.climate.copernicus.eu/cdsapp#!/search?text=era5&type=dataset +VARS_CHANNEL: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + +# set train data path +TRAIN_FILE_PATH: ./datasets/era5/train +DATA_MEAN_PATH: ./datasets/era5/stat/global_means.npy +DATA_STD_PATH: ./datasets/era5/stat/global_stds.npy +DATA_TIME_MEAN_PATH: ./datasets/era5/stat/time_means.npy + +# set evaluate data path +VALID_FILE_PATH: ./datasets/era5/test + +# set test data path +TEST_FILE_PATH: ./datasets/era5/out_of_sample/2018.h5 + +# model settings +MODEL: + afno: + input_keys: ["input"] + +# training settings +TRAIN: + epochs: 50 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 1e-4 + by_epoch: true + batch_size: 1 + num_timestamps: 2 + pretrained_model_path: outputs_fourcastnet_pretrain/checkpoints/latest + checkpoint_path: null + +# evaluation settings +EVAL: + num_timestamps: 32 + pretrained_model_path: null + compute_metric_by_batch: true + eval_with_no_grad: true + batch_size: 1 +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/conf/fourcastnet_precip.yaml b/examples/fourcastnet/conf/fourcastnet_precip.yaml index 4f4586bb92..a146051bde 100644 --- a/examples/fourcastnet/conf/fourcastnet_precip.yaml +++ b/examples/fourcastnet/conf/fourcastnet_precip.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -110,3 +111,92 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 num_timestamps: 6 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_fourcastnet_precip/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 1024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training hyper-parameters +IMG_H: 720 +IMG_W: 1440 +# FourCastNet use 20 atmospheric variable,their index in the dataset is from 0 to 19. +# The variable name is 'u10', 'v10', 't2m', 'sp', 'msl', 't850', 'u1000', 'v1000', 'z000', +# 'u850', 'v850', 'z850', 'u500', 'v500', 'z500', 't500', 'z50', 'r500', 'r850', 'tcwv'. +# You can obtain detailed information about each variable from +# https://cds.climate.copernicus.eu/cdsapp#!/search?text=era5&type=dataset +VARS_CHANNEL: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + +# set train data path +WIND_TRAIN_FILE_PATH: ./datasets/era5/train +WIND_MEAN_PATH: ./datasets/era5/stat/global_means.npy +WIND_STD_PATH: ./datasets/era5/stat/global_stds.npy +WIND_TIME_MEAN_PATH: ./datasets/era5/stat/time_means.npy + +TRAIN_FILE_PATH: ./datasets/era5/precip/train +TIME_MEAN_PATH: ./datasets/era5/stat/precip/time_means.npy + +# set evaluate data path +WIND_VALID_FILE_PATH: ./datasets/era5/test +VALID_FILE_PATH: ./datasets/era5/precip/test + +# set test data path +WIND_TEST_FILE_PATH: ./datasets/era5/out_of_sample/2018.h5 +TEST_FILE_PATH: ./datasets/era5/precip/out_of_sample/2018.h5 + +# set wind model path +WIND_MODEL_PATH: outputs_fourcastnet_finetune/checkpoints/latest + +# model settings +MODEL: + afno: + input_keys: ["input"] + output_keys: ["output"] + precip: + input_keys: ["input"] + output_keys: ["output"] + +# training settings +TRAIN: + epochs: 25 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 2.5e-4 + by_epoch: true + batch_size: 1 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + num_timestamps: 6 + pretrained_model_path: null + compute_metric_by_batch: true + eval_with_no_grad: true + batch_size: 1 +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml index dd0a90217c..199fd408fa 100644 --- a/examples/fourcastnet/conf/fourcastnet_pretrain.yaml +++ b/examples/fourcastnet/conf/fourcastnet_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -95,3 +96,78 @@ INFER: max_batch_size: 1024 num_cpu_threads: 10 num_timestamps: 32 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_fourcastnet_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 1024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training hyper-parameters +IMG_H: 720 +IMG_W: 1440 +# FourCastNet use 20 atmospheric variable,their index in the dataset is from 0 to 19. +# The variable name is 'u10', 'v10', 't2m', 'sp', 'msl', 't850', 'u1000', 'v1000', 'z000', +# 'u850', 'v850', 'z850', 'u500', 'v500', 'z500', 't500', 'z50', 'r500', 'r850', 'tcwv'. +# You can obtain detailed information about each variable from +# https://cds.climate.copernicus.eu/cdsapp#!/search?text=era5&type=dataset +VARS_CHANNEL: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] +USE_SAMPLED_DATA: false + +# set train data path +TRAIN_FILE_PATH: ./datasets/era5/train +DATA_MEAN_PATH: ./datasets/era5/stat/global_means.npy +DATA_STD_PATH: ./datasets/era5/stat/global_stds.npy +DATA_TIME_MEAN_PATH: ./datasets/era5/stat/time_means.npy + +# set evaluate data path +VALID_FILE_PATH: ./datasets/era5/test + +# model settings +MODEL: + afno: + input_keys: ["input"] + output_keys: ["output"] + +# training settings +TRAIN: + epochs: 150 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.0005 + by_epoch: true + batch_size: 1 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + compute_metric_by_batch: true + eval_with_no_grad: true + batch_size: 8 +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/sample_data.py b/examples/fourcastnet/sample_data.py index 5f38bea650..50291f6cdd 100755 --- a/examples/fourcastnet/sample_data.py +++ b/examples/fourcastnet/sample_data.py @@ -1,138 +1,138 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import glob -import os -import shutil -from multiprocessing import Pool -from typing import Any -from typing import Dict -from typing import Tuple - -import h5py -from paddle import io -from tqdm import tqdm - -import examples.fourcastnet.utils as fourcast_utils -import ppsci -from ppsci.utils import logger - - -def sample_func( - dataset_cfg: Dict[str, Any], save_path: str, batch_idxs: Tuple[int, ...] -): - dataset = ppsci.data.dataset.build_dataset(dataset_cfg) - for idx in tqdm(batch_idxs): - input_dict, label_dict, weight_dict = dataset[idx] - fdest = h5py.File(f"{save_path}/{idx:0>8d}.h5", "w") - for key, value in input_dict.items(): - fdest.create_dataset(f"input_dict/{key}", data=value, dtype="f") - for key, value in label_dict.items(): - fdest.create_dataset(f"label_dict/{key}", data=value, dtype="f") - if weight_dict is not None: - for key, value in weight_dict.items(): - fdest.create_dataset(f"weight_dict/{key}", data=value, dtype="f") - - -def sample_data_epoch(epoch: int): - # initialize logger - logger.init_logger("ppsci") - # set dataset path and save path - TRAIN_FILE_PATH = "./datasets/era5/train" - PRECIP_FILE_PATH = None - DATA_MEAN_PATH = "./datasets/era5/stat/global_means.npy" - DATA_STD_PATH = "./datasets/era5/stat/global_stds.npy" - TMP_SAVE_PATH = "./datasets/era5/train_split_rank0/epoch_tmp" - save_path = f"./datasets/era5/train_split_rank0/epoch_{epoch}" - # set hyper-parameters - input_keys = ("input",) - output_keys = ("output",) - IMG_H, IMG_W = 720, 1440 - # FourCastNet use 20 atmospheric variable,their index in the dataset is from 0 to 19. - # The variable name is 'u10', 'v10', 't2m', 'sp', 'msl', 't850', 'u1000', 'v1000', 'z000', - # 'u850', 'v850', 'z850', 'u500', 'v500', 'z500', 't500', 'z50', 'r500', 'r850', 'tcwv'. - # You can obtain detailed information about each variable from - # https://cds.climate.copernicus.eu/cdsapp#!/search?text=era5&type=dataset - VARS_CHANNEL = list(range(20)) - NUM_TRAINER = 1 - RANK = 0 - PROCESSES = 16 - - if len(glob.glob(TMP_SAVE_PATH + "/*.h5")): - raise FileExistsError( - f"TMP_SAVE_PATH({TMP_SAVE_PATH}) is not an empty folder, please specify an empty folder." - ) - if len(glob.glob(save_path + "/*.h5")): - raise FileExistsError( - f"save_path({save_path}) is not an empty folder, please specify an empty folder." - ) - os.makedirs(TMP_SAVE_PATH, exist_ok=True) - - data_mean, data_std = fourcast_utils.get_mean_std( - DATA_MEAN_PATH, DATA_STD_PATH, VARS_CHANNEL - ) - transforms = [ - {"SqueezeData": {}}, - {"CropData": {"xmin": (0, 0), "xmax": (IMG_H, IMG_W)}}, - {"Normalize": {"mean": data_mean, "std": data_std}}, - ] - dataset_cfg = { - "name": "ERA5Dataset", - "file_path": TRAIN_FILE_PATH, - "input_keys": input_keys, - "label_keys": output_keys, - "PRECIP_FILE_PATH": PRECIP_FILE_PATH, - "vars_channel": VARS_CHANNEL, - "transforms": transforms, - } - dataset = ppsci.data.dataset.build_dataset(dataset_cfg) - - batch_sampler = io.DistributedBatchSampler( - dataset=dataset, - batch_size=1, - shuffle=False, - num_replicas=NUM_TRAINER, - rank=RANK, - ) - batch_sampler.set_epoch(epoch) - batch_idxs = [] - for data in tqdm(batch_sampler): - batch_idxs += data - - pool = Pool(processes=PROCESSES) - for st in range(0, len(batch_idxs), len(batch_idxs) // (PROCESSES - 1)): - end = st + len(batch_idxs) // (PROCESSES - 1) - result = pool.apply_async( - sample_func, (dataset_cfg, TMP_SAVE_PATH, batch_idxs[st:end]) - ) - pool.close() - pool.join() - if result.successful(): - logger.message("successful") - shutil.move(TMP_SAVE_PATH, save_path) - logger.message(f"move {TMP_SAVE_PATH} to {save_path}") - - -def main(): - EPOCHS = 0 - sample_data_epoch(EPOCHS) - - # if you want to sample every 5 epochs, you can use the following code - # EPOCHS = 150 - # for epoch in range(0, EPOCHS, 5): - # sample_data_epoch(epoch) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import shutil +from multiprocessing import Pool +from typing import Any +from typing import Dict +from typing import Tuple + +import h5py +from paddle import io +from tqdm import tqdm + +import examples.fourcastnet.utils as fourcast_utils +import ppsci +from ppsci.utils import logger + + +def sample_func( + dataset_cfg: Dict[str, Any], save_path: str, batch_idxs: Tuple[int, ...] +): + dataset = ppsci.data.dataset.build_dataset(dataset_cfg) + for idx in tqdm(batch_idxs): + input_dict, label_dict, weight_dict = dataset[idx] + fdest = h5py.File(f"{save_path}/{idx:0>8d}.h5", "w") + for key, value in input_dict.items(): + fdest.create_dataset(f"input_dict/{key}", data=value, dtype="f") + for key, value in label_dict.items(): + fdest.create_dataset(f"label_dict/{key}", data=value, dtype="f") + if weight_dict is not None: + for key, value in weight_dict.items(): + fdest.create_dataset(f"weight_dict/{key}", data=value, dtype="f") + + +def sample_data_epoch(epoch: int): + # initialize logger + logger.init_logger("ppsci") + # set dataset path and save path + TRAIN_FILE_PATH = "./datasets/era5/train" + PRECIP_FILE_PATH = None + DATA_MEAN_PATH = "./datasets/era5/stat/global_means.npy" + DATA_STD_PATH = "./datasets/era5/stat/global_stds.npy" + TMP_SAVE_PATH = "./datasets/era5/train_split_rank0/epoch_tmp" + save_path = f"./datasets/era5/train_split_rank0/epoch_{epoch}" + # set hyper-parameters + input_keys = ("input",) + output_keys = ("output",) + IMG_H, IMG_W = 720, 1440 + # FourCastNet use 20 atmospheric variable,their index in the dataset is from 0 to 19. + # The variable name is 'u10', 'v10', 't2m', 'sp', 'msl', 't850', 'u1000', 'v1000', 'z000', + # 'u850', 'v850', 'z850', 'u500', 'v500', 'z500', 't500', 'z50', 'r500', 'r850', 'tcwv'. + # You can obtain detailed information about each variable from + # https://cds.climate.copernicus.eu/cdsapp#!/search?text=era5&type=dataset + VARS_CHANNEL = list(range(20)) + NUM_TRAINER = 1 + RANK = 0 + PROCESSES = 16 + + if len(glob.glob(TMP_SAVE_PATH + "/*.h5")): + raise FileExistsError( + f"TMP_SAVE_PATH({TMP_SAVE_PATH}) is not an empty folder, please specify an empty folder." + ) + if len(glob.glob(save_path + "/*.h5")): + raise FileExistsError( + f"save_path({save_path}) is not an empty folder, please specify an empty folder." + ) + os.makedirs(TMP_SAVE_PATH, exist_ok=True) + + data_mean, data_std = fourcast_utils.get_mean_std( + DATA_MEAN_PATH, DATA_STD_PATH, VARS_CHANNEL + ) + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (IMG_H, IMG_W)}}, + {"Normalize": {"mean": data_mean, "std": data_std}}, + ] + dataset_cfg = { + "name": "ERA5Dataset", + "file_path": TRAIN_FILE_PATH, + "input_keys": input_keys, + "label_keys": output_keys, + "PRECIP_FILE_PATH": PRECIP_FILE_PATH, + "vars_channel": VARS_CHANNEL, + "transforms": transforms, + } + dataset = ppsci.data.dataset.build_dataset(dataset_cfg) + + batch_sampler = io.DistributedBatchSampler( + dataset=dataset, + batch_size=1, + shuffle=False, + num_replicas=NUM_TRAINER, + rank=RANK, + ) + batch_sampler.set_epoch(epoch) + batch_idxs = [] + for data in tqdm(batch_sampler): + batch_idxs += data + + pool = Pool(processes=PROCESSES) + for st in range(0, len(batch_idxs), len(batch_idxs) // (PROCESSES - 1)): + end = st + len(batch_idxs) // (PROCESSES - 1) + result = pool.apply_async( + sample_func, (dataset_cfg, TMP_SAVE_PATH, batch_idxs[st:end]) + ) + pool.close() + pool.join() + if result.successful(): + logger.message("successful") + shutil.move(TMP_SAVE_PATH, save_path) + logger.message(f"move {TMP_SAVE_PATH} to {save_path}") + + +def main(): + EPOCHS = 0 + sample_data_epoch(EPOCHS) + + # if you want to sample every 5 epochs, you can use the following code + # EPOCHS = 150 + # for epoch in range(0, EPOCHS, 5): + # sample_data_epoch(epoch) + + +if __name__ == "__main__": + main() diff --git a/examples/fourcastnet/train_finetune.py b/examples/fourcastnet/train_finetune.py index 4208cfffd0..b9f461f779 100644 --- a/examples/fourcastnet/train_finetune.py +++ b/examples/fourcastnet/train_finetune.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -458,3 +459,354 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from os import path as osp +from typing import Tuple + +import h5py +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import examples.fourcastnet.utils as fourcast_utils +import ppsci +from ppsci.utils import logger + + +def get_vis_data( + file_path: str, + date_strings: Tuple[str, ...], + num_timestamps: int, + vars_channel: Tuple[int, ...], + img_h: int, + data_mean: np.ndarray, + data_std: np.ndarray, +): + _file = h5py.File(file_path, "r")["fields"] + data = [] + for date_str in date_strings: + hours_since_jan_01_epoch = fourcast_utils.date_to_hours(date_str) + ic = int(hours_since_jan_01_epoch / 6) + data.append(_file[ic : ic + num_timestamps + 1, vars_channel, 0:img_h]) + data = np.asarray(data) + + vis_data = {"input": (data[:, 0] - data_mean) / data_std} + for t in range(num_timestamps): + hour = (t + 1) * 6 + data_t = data[:, t + 1] + wind_data = [] + for i in range(data_t.shape[0]): + wind_data.append((data_t[i][0] ** 2 + data_t[i][1] ** 2) ** 0.5) + vis_data[f"target_{hour}h"] = np.asarray(wind_data) + return vis_data + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set training hyper-parameters + output_keys = tuple(f"output_{i}" for i in range(cfg.TRAIN.num_timestamps)) + + data_mean, data_std = fourcast_utils.get_mean_std( + cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.DATA_TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W, cfg.VARS_CHANNEL + ) + data_time_mean_normalize = np.expand_dims( + (data_time_mean[0] - data_mean) / data_std, 0 + ) + + # set transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + {"Normalize": {"mean": data_mean, "std": data_std}}, + ] + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "num_label_timestamps": cfg.TRAIN.num_timestamps, + "transforms": transforms, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.L2RelLoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "transforms": transforms, + "num_label_timestamps": cfg.TRAIN.num_timestamps, + "training": False, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set metric + metric = { + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, + std=data_std, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, + mean=data_time_mean_normalize, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric=metric, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set model + model_cfg = dict(cfg.MODEL.afno) + model_cfg.update( + {"output_keys": output_keys, "num_timestamps": cfg.TRAIN.num_timestamps} + ) + + model = ppsci.arch.AFNONet(**model_cfg) + + # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler_cfg.update({"iters_per_epoch": ITERS_PER_EPOCH}) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine(**lr_scheduler_cfg)() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set testing hyper-parameters + output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) + + data_mean, data_std = fourcast_utils.get_mean_std( + cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.DATA_TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W, cfg.VARS_CHANNEL + ) + data_time_mean_normalize = np.expand_dims( + (data_time_mean[0] - data_mean) / data_std, 0 + ) + + # set transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + {"Normalize": {"mean": data_mean, "std": data_std}}, + ] + + # set model + model_cfg = dict(cfg.MODEL.afno) + model_cfg.update( + {"output_keys": output_keys, "num_timestamps": cfg.EVAL.num_timestamps} + ) + model = ppsci.arch.AFNONet(**model_cfg) + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.TEST_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "transforms": transforms, + "num_label_timestamps": cfg.EVAL.num_timestamps, + "training": False, + "stride": 8, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set metirc + metric = { + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, + std=data_std, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, + mean=data_time_mean_normalize, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + } + + # set validator for testing + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric=metric, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer data + DATE_STRINGS = ("2018-09-08 00:00:00",) + vis_data = get_vis_data( + cfg.TEST_FILE_PATH, + DATE_STRINGS, + cfg.EVAL.num_timestamps, + cfg.VARS_CHANNEL, + cfg.IMG_H, + data_mean, + data_std, + ) + + def output_wind_func(d, var_name, data_mean, data_std): + output = (d[var_name] * data_std) + data_mean + wind_data = [] + for i in range(output.shape[0]): + wind_data.append((output[i][0] ** 2 + output[i][1] ** 2) ** 0.5) + return paddle.to_tensor(wind_data, paddle.get_default_dtype()) + + vis_output_expr = {} + for i in range(cfg.EVAL.num_timestamps): + hour = (i + 1) * 6 + vis_output_expr[f"output_{hour}h"] = functools.partial( + output_wind_func, + var_name=f"output_{i}", + data_mean=paddle.to_tensor(data_mean, paddle.get_default_dtype()), + data_std=paddle.to_tensor(data_std, paddle.get_default_dtype()), + ) + vis_output_expr[f"target_{hour}h"] = lambda d, hour=hour: d[f"target_{hour}h"] + # set visualizer + visualizer = { + "visualize_wind": ppsci.visualize.VisualizerWeather( + vis_data, + vis_output_expr, + xticks=np.linspace(0, 1439, 13), + xticklabels=[str(i) for i in range(360, -1, -30)], + yticks=np.linspace(0, 719, 7), + yticklabels=[str(i) for i in range(90, -91, -30)], + vmin=0, + vmax=25, + colorbar_label="m\s", + batch_size=cfg.EVAL.batch_size, + num_timestamps=cfg.EVAL.num_timestamps, + prefix="wind", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + solver.eval() + # visualize prediction from pretrained_model_path + solver.visualize() + + +@hydra.main( + version_base=None, config_path="./conf", config_name="fourcastnet_finetune.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/train_precip.py b/examples/fourcastnet/train_precip.py index a695a10a42..2219516c52 100644 --- a/examples/fourcastnet/train_precip.py +++ b/examples/fourcastnet/train_precip.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -458,3 +459,351 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import os.path as osp +from typing import Tuple + +import h5py +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import examples.fourcastnet.utils as fourcast_utils +import ppsci +from ppsci.utils import logger + + +def get_vis_data( + wind_file_path: str, + file_path: str, + date_strings: Tuple[str, ...], + num_timestamps: int, + vars_channel: Tuple[int, ...], + img_h: int, + data_mean: np.ndarray, + data_std: np.ndarray, +): + __wind_file = h5py.File(wind_file_path, "r")["fields"] + _file = h5py.File(file_path, "r")["tp"] + wind_data = [] + data = [] + for date_str in date_strings: + hours_since_jan_01_epoch = fourcast_utils.date_to_hours(date_str) + ic = int(hours_since_jan_01_epoch / 6) + wind_data.append(__wind_file[ic, vars_channel, 0:img_h]) + data.append(_file[ic + 1 : ic + num_timestamps + 1, 0:img_h]) + wind_data = np.asarray(wind_data) + data = np.asarray(data) + + vis_data = {"input": (wind_data - data_mean) / data_std} + for t in range(num_timestamps): + hour = (t + 1) * 6 + data_t = data[:, t] + vis_data[f"target_{hour}h"] = np.asarray(data_t) + return vis_data + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", f"{cfg.output_dir}/train.log", "info") + + wind_data_mean, wind_data_std = fourcast_utils.get_mean_std( + cfg.WIND_MEAN_PATH, cfg.WIND_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W + ) + + # set train transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + { + "Normalize": { + "mean": wind_data_mean, + "std": wind_data_std, + "apply_keys": ("input",), + } + }, + {"Log1p": {"scale": 1e-5, "apply_keys": ("label",)}}, + ] + + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.WIND_TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.precip.input_keys, + "label_keys": cfg.MODEL.precip.output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "precip_file_path": cfg.TRAIN_FILE_PATH, + "transforms": transforms, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.L2RelLoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.WIND_VALID_FILE_PATH, + "input_keys": cfg.MODEL.precip.input_keys, + "label_keys": cfg.MODEL.precip.output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "precip_file_path": cfg.VALID_FILE_PATH, + "transforms": transforms, + "training": False, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set metric + metric = { + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, keep_batch=True, unlog=True + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, mean=data_time_mean, keep_batch=True, unlog=True + ), + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric=metric, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set model + wind_model = ppsci.arch.AFNONet(**cfg.MODEL.afno) + ppsci.utils.save_load.load_pretrain(wind_model, path=cfg.WIND_MODEL_PATH) + model_cfg = dict(cfg.MODEL.precip) + model_cfg.update({"wind_model": wind_model}) + model = ppsci.arch.PrecipNet(**model_cfg) + + # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler_cfg.update({"iters_per_epoch": ITERS_PER_EPOCH}) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine(**lr_scheduler_cfg)() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set testing hyper-parameters + output_keys = tuple(f"output_{i}" for i in range(cfg.EVAL.num_timestamps)) + + # set model for testing + wind_model = ppsci.arch.AFNONet(**cfg.MODEL.afno) + ppsci.utils.save_load.load_pretrain(wind_model, path=cfg.WIND_MODEL_PATH) + model_cfg = dict(cfg.MODEL.precip) + model_cfg.update( + { + "output_keys": output_keys, + "num_timestamps": cfg.EVAL.num_timestamps, + "wind_model": wind_model, + } + ) + model = ppsci.arch.PrecipNet(**model_cfg) + + wind_data_mean, wind_data_std = fourcast_utils.get_mean_std( + cfg.WIND_MEAN_PATH, cfg.WIND_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W + ) + + # set train transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + { + "Normalize": { + "mean": wind_data_mean, + "std": wind_data_std, + "apply_keys": ("input",), + } + }, + {"Log1p": {"scale": 1e-5, "apply_keys": ("label",)}}, + ] + + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.WIND_TEST_FILE_PATH, + "input_keys": cfg.MODEL.precip.input_keys, + "label_keys": output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "precip_file_path": cfg.TEST_FILE_PATH, + "num_label_timestamps": cfg.EVAL.num_timestamps, + "stride": 8, + "transforms": transforms, + "training": False, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + # set metirc + metric = { + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, keep_batch=True, unlog=True + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, mean=data_time_mean, keep_batch=True, unlog=True + ), + } + + # set validator for testing + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric=metric, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set set visualizer data + DATE_STRINGS = ("2018-04-04 00:00:00",) + vis_data = get_vis_data( + cfg.WIND_TEST_FILE_PATH, + cfg.TEST_FILE_PATH, + DATE_STRINGS, + cfg.EVAL.num_timestamps, + cfg.VARS_CHANNEL, + cfg.IMG_H, + wind_data_mean, + wind_data_std, + ) + + def output_precip_func(d, var_name): + output = 1e-2 * paddle.expm1(d[var_name][0]) + return output + + visu_output_expr = {} + for i in range(cfg.EVAL.num_timestamps): + hour = (i + 1) * 6 + visu_output_expr[f"output_{hour}h"] = functools.partial( + output_precip_func, + var_name=f"output_{i}", + ) + visu_output_expr[f"target_{hour}h"] = ( + lambda d, hour=hour: d[f"target_{hour}h"] * 1000 + ) + # set visualizer + visualizer = { + "visualize_precip": ppsci.visualize.VisualizerWeather( + vis_data, + visu_output_expr, + xticks=np.linspace(0, 1439, 13), + xticklabels=[str(i) for i in range(360, -1, -30)], + yticks=np.linspace(0, 719, 7), + yticklabels=[str(i) for i in range(90, -91, -30)], + vmin=0.001, + vmax=130, + colorbar_label="mm", + log_norm=True, + batch_size=cfg.EVAL.batch_size, + num_timestamps=cfg.EVAL.num_timestamps, + prefix="precip", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + solver.eval() + # visualize prediction + solver.visualize() + + +@hydra.main( + version_base=None, config_path="./conf", config_name="fourcastnet_precip.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/train_pretrain.py b/examples/fourcastnet/train_pretrain.py index c56bd90750..8556d178e4 100644 --- a/examples/fourcastnet/train_pretrain.py +++ b/examples/fourcastnet/train_pretrain.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -389,3 +390,284 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import numpy as np +import paddle.distributed as dist +from omegaconf import DictConfig + +import examples.fourcastnet.utils as fourcast_utils +import ppsci +from ppsci.utils import logger + + +def get_data_stat(cfg: DictConfig): + data_mean, data_std = fourcast_utils.get_mean_std( + cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.DATA_TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W, cfg.VARS_CHANNEL + ) + data_time_mean_normalize = np.expand_dims( + (data_time_mean[0] - data_mean) / data_std, 0 + ) + return data_mean, data_std, data_time_mean_normalize + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + data_mean, data_std = fourcast_utils.get_mean_std( + cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.DATA_TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W, cfg.VARS_CHANNEL + ) + data_time_mean_normalize = np.expand_dims( + (data_time_mean[0] - data_mean) / data_std, 0 + ) + # set train transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + {"Normalize": {"mean": data_mean, "std": data_std}}, + ] + + # set train dataloader config + if not cfg.USE_SAMPLED_DATA: + train_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": cfg.MODEL.afno.output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "transforms": transforms, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + else: + NUM_GPUS_PER_NODE = 8 + train_dataloader_cfg = { + "dataset": { + "name": "ERA5SampledDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": cfg.MODEL.afno.output_keys, + }, + "sampler": { + "name": "DistributedBatchSampler", + "drop_last": True, + "shuffle": True, + "num_replicas": NUM_GPUS_PER_NODE, + "rank": dist.get_rank() % NUM_GPUS_PER_NODE, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 8, + } + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.L2RelLoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": cfg.MODEL.afno.output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "transforms": transforms, + "training": False, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric={ + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, + std=data_std, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, + mean=data_time_mean_normalize, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set model + model = ppsci.arch.AFNONet(**cfg.MODEL.afno) + + # init optimizer and lr scheduler + lr_scheduler_cfg = dict(cfg.TRAIN.lr_scheduler) + lr_scheduler_cfg.update({"iters_per_epoch": ITERS_PER_EPOCH}) + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine(**lr_scheduler_cfg)() + + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + data_mean, data_std = fourcast_utils.get_mean_std( + cfg.DATA_MEAN_PATH, cfg.DATA_STD_PATH, cfg.VARS_CHANNEL + ) + data_time_mean = fourcast_utils.get_time_mean( + cfg.DATA_TIME_MEAN_PATH, cfg.IMG_H, cfg.IMG_W, cfg.VARS_CHANNEL + ) + data_time_mean_normalize = np.expand_dims( + (data_time_mean[0] - data_mean) / data_std, 0 + ) + # set train transforms + transforms = [ + {"SqueezeData": {}}, + {"CropData": {"xmin": (0, 0), "xmax": (cfg.IMG_H, cfg.IMG_W)}}, + {"Normalize": {"mean": data_mean, "std": data_std}}, + ] + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "ERA5Dataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.afno.input_keys, + "label_keys": cfg.MODEL.afno.output_keys, + "vars_channel": cfg.VARS_CHANNEL, + "transforms": transforms, + "training": False, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L2RelLoss(), + metric={ + "MAE": ppsci.metric.MAE(keep_batch=True), + "LatitudeWeightedRMSE": ppsci.metric.LatitudeWeightedRMSE( + num_lat=cfg.IMG_H, + std=data_std, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + "LatitudeWeightedACC": ppsci.metric.LatitudeWeightedACC( + num_lat=cfg.IMG_H, + mean=data_time_mean_normalize, + keep_batch=True, + variable_dict={"u10": 0, "v10": 1}, + ), + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # set model + model = ppsci.arch.AFNONet(**cfg.MODEL.afno) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +@hydra.main( + version_base=None, config_path="./conf", config_name="fourcastnet_pretrain.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/fourcastnet/utils.py b/examples/fourcastnet/utils.py index 88f3a96105..7306ff9696 100644 --- a/examples/fourcastnet/utils.py +++ b/examples/fourcastnet/utils.py @@ -1,35 +1,35 @@ -from datetime import datetime -from typing import Optional -from typing import Tuple - -import numpy as np - - -def date_to_hours(date: str): - date_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S") - day_of_year = date_obj.timetuple().tm_yday - 1 - hour_of_day = date_obj.timetuple().tm_hour - hours_since_jan_01_epoch = 24 * day_of_year + hour_of_day - return hours_since_jan_01_epoch - - -def get_mean_std(mean_path: str, std_path: str, vars_channel: Tuple[int, ...]): - data_mean = np.load(mean_path).squeeze(0).astype(np.float32) - data_mean = data_mean[vars_channel] - data_std = np.load(std_path).squeeze(0).astype(np.float32) - data_std = data_std[vars_channel] - return data_mean, data_std - - -def get_time_mean( - time_mean_path: str, - img_h: int, - img_w: int, - vars_channel: Optional[Tuple[int, ...]] = None, -): - time_mean = np.load(time_mean_path).astype(np.float32) - if vars_channel is not None: - time_mean = time_mean[:, vars_channel, :img_h, :img_w] - else: - time_mean = time_mean[:, :img_h, :img_w] - return time_mean +from datetime import datetime +from typing import Optional +from typing import Tuple + +import numpy as np + + +def date_to_hours(date: str): + date_obj = datetime.strptime(date, "%Y-%m-%d %H:%M:%S") + day_of_year = date_obj.timetuple().tm_yday - 1 + hour_of_day = date_obj.timetuple().tm_hour + hours_since_jan_01_epoch = 24 * day_of_year + hour_of_day + return hours_since_jan_01_epoch + + +def get_mean_std(mean_path: str, std_path: str, vars_channel: Tuple[int, ...]): + data_mean = np.load(mean_path).squeeze(0).astype(np.float32) + data_mean = data_mean[vars_channel] + data_std = np.load(std_path).squeeze(0).astype(np.float32) + data_std = data_std[vars_channel] + return data_mean, data_std + + +def get_time_mean( + time_mean_path: str, + img_h: int, + img_w: int, + vars_channel: Optional[Tuple[int, ...]] = None, +): + time_mean = np.load(time_mean_path).astype(np.float32) + if vars_channel is not None: + time_mean = time_mean[:, vars_channel, :img_h, :img_w] + else: + time_mean = time_mean[:, :img_h, :img_w] + return time_mean diff --git a/examples/fpde/conf/fractional_poisson_2d.yaml b/examples/fpde/conf/fractional_poisson_2d.yaml index 7ef0d7cd3e..7c78aad5ba 100644 --- a/examples/fpde/conf/fractional_poisson_2d.yaml +++ b/examples/fpde/conf/fractional_poisson_2d.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -58,3 +59,73 @@ EVAL: pretrained_model_path: null eval_with_no_grad: true batch_size: 128 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_fractional_poisson_2d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +ALPHA: 1.8 +NPOINT_INTERIOR: 100 +NPOINT_BC: 1 +NPOINT_EVAL: 1000 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u"] + num_layers: 4 + hidden_size: 20 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + save_freq: 100 + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/fpde/fractional_poisson_2d.py b/examples/fpde/fractional_poisson_2d.py index 194a09934a..c5f60ff3d4 100644 --- a/examples/fpde/fractional_poisson_2d.py +++ b/examples/fpde/fractional_poisson_2d.py @@ -1,282 +1,282 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Reference: https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/fractional_Poisson_2d.py - -import math -from typing import Dict -from typing import Tuple -from typing import Union - -import hydra -import numpy as np -import paddle -from matplotlib import cm -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci - - -def plot(x, y, input_data, output_data, label_data): - fig = plt.figure() - # plot prediction - ax1 = fig.add_subplot(121, projection="3d") - surf1 = ax1.plot_surface( - x, y, output_data["u"], cmap=cm.jet, linewidth=0, antialiased=False - ) - ax1.set_zlim(0, 1.2) - ax1.set_xlabel(r"$x$") - ax1.set_ylabel(r"$y$") - ax1.set_zlabel(r"$z$") - ax1.set_title(r"$u(x,y), label$") - fig.colorbar(surf1, ax=ax1, aspect=5, orientation="horizontal") - - # plot label - ax2 = fig.add_subplot(122, projection="3d") - surf2 = ax2.plot_surface( - x, y, label_data, cmap=cm.jet, linewidth=0, antialiased=False - ) - ax2.set_zlim(0, 1.2) - ax2.set_xlabel("x") - ax2.set_ylabel("y") - ax2.set_zlabel("z") - ax2.set_title(r"$u(x,y), prediction$") - - # Add a color bar which maps values to colors. - fig.colorbar(surf2, ax=ax2, aspect=5, orientation="horizontal") - fig.subplots_adjust(wspace=0.5, hspace=0.5) - plt.savefig("fractional_poisson_2d_result.png", dpi=400) - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - def output_transform(in_, out): - return {"u": (1 - (in_["x"] ** 2 + in_["y"] ** 2)) * out["u"]} - - model.register_output_transform(output_transform) - - # set geometry - geom = {"disk": ppsci.geometry.Disk((0, 0), 1)} - - # set equation - equation = { - "fpde": ppsci.equation.FractionalPoisson(cfg.ALPHA, geom["disk"], [8, 100]) - } - - # set constraint - def u_solution_func( - out: Dict[str, Union[paddle.Tensor, np.ndarray]] - ) -> Union[paddle.Tensor, np.ndarray]: - if isinstance(out["x"], paddle.Tensor): - return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( - 1 + cfg.ALPHA / 2 - ) - return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) - - # set transform for input data - def input_data_fpde_transform( - input: Dict[str, np.ndarray], - weight: Dict[str, np.ndarray], - label: Dict[str, np.ndarray], - ) -> Tuple[ - Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] - ]: - """Get sampling points for integral. - - Args: - input (Dict[str, paddle.Tensor]): Raw input dict. - weight (Dict[str, paddle.Tensor]): Raw weight dict. - label (Dict[str, paddle.Tensor]): Raw label dict. - - Returns: - Tuple[ Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] ]: - Input dict contained sampling points, weight dict and label dict. - """ - points = np.concatenate((input["x"].numpy(), input["y"].numpy()), axis=1) - x = equation["fpde"].get_x(points) - return ( - { - **input, - **{k: paddle.to_tensor(v) for k, v in x.items()}, - }, - weight, - label, - ) - - fpde_constraint = ppsci.constraint.InteriorConstraint( - equation["fpde"].equations, - {"fpde": 0}, - geom["disk"], - { - "dataset": { - "name": "IterableNamedArrayDataset", - "transforms": ( - { - "FunctionalTransform": { - "transform_func": input_data_fpde_transform, - }, - }, - ), - }, - "batch_size": cfg.NPOINT_INTERIOR, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - random="Hammersley", - criteria=lambda x, y: ~geom["disk"].on_boundary(np.hstack((x, y))), - name="FPDE", - ) - bc = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["disk"], - { - "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": cfg.NPOINT_BC, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - criteria=lambda x, y: np.isclose(x, -1), - name="BC", - ) - # wrap constraints together - constraint = { - fpde_constraint.name: fpde_constraint, - bc.name: bc, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - l2rel_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["disk"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.NPOINT_EVAL, - }, - ppsci.loss.MSELoss(), - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Metric", - ) - validator = {l2rel_metric.name: l2rel_metric} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - equation=equation, - validator=validator, - cfg=cfg, - ) - # train model - solver.train() - - # visualize prediction after finished training - theta = np.arange(0, 2 * math.pi, 0.04, dtype=paddle.get_default_dtype()) - rho = np.arange(0, 1, 0.005, dtype=paddle.get_default_dtype()) - mt, mr = np.meshgrid(theta, rho) - x = mr * np.cos(mt) - y = mr * np.sin(mt) - - input_data = { - "x": x.reshape([-1, 1]), - "y": y.reshape([-1, 1]), - } - label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) - output_data = solver.predict(input_data, return_numpy=True) - output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} - plot(x, y, input_data, output_data, label_data) - - -def evaluate(cfg: DictConfig): - # load model - model = ppsci.load_model(cfg.pretrained_model_path) - # set geometry - geom = { - "disk": ppsci.geometry.Disk(np.array([0, 0]), np.array([1]), np.array([[0]])), - } - - def u_solution_func( - out: Dict[str, Union[paddle.Tensor, np.ndarray]] - ) -> Union[paddle.Tensor, np.ndarray]: - if isinstance(out["x"], paddle.Tensor): - return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( - 1 + cfg.ALPHA / 2 - ) - return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) - - # set validator - l2rel_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["disk"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.NPOINT_EVAL, - }, - ppsci.loss.MSELoss(), - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Metric", - ) - validator = {l2rel_metric.name: l2rel_metric} - - # initialize solver - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - ) - # train model - solver.train() - - # visualize prediction after finished training - theta = np.arange(0, 2 * math.pi, 0.04, dtype=paddle.get_default_dtype()) - rho = np.arange(0, 1, 0.005, dtype=paddle.get_default_dtype()) - mt, mr = np.meshgrid(theta, rho) - x = mr * np.cos(mt) - y = mr * np.sin(mt) - - input_data = { - "x": x.reshape([-1, 1]), - "y": y.reshape([-1, 1]), - } - - label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) - output_data = solver.predict(input_data, return_numpy=True) - output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} - - plot(x, y, input_data, output_data, label_data) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="fractional_poisson_2d.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference: https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/fractional_Poisson_2d.py + +import math +from typing import Dict +from typing import Tuple +from typing import Union + +import hydra +import numpy as np +import paddle +from matplotlib import cm +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci + + +def plot(x, y, input_data, output_data, label_data): + fig = plt.figure() + # plot prediction + ax1 = fig.add_subplot(121, projection="3d") + surf1 = ax1.plot_surface( + x, y, output_data["u"], cmap=cm.jet, linewidth=0, antialiased=False + ) + ax1.set_zlim(0, 1.2) + ax1.set_xlabel(r"$x$") + ax1.set_ylabel(r"$y$") + ax1.set_zlabel(r"$z$") + ax1.set_title(r"$u(x,y), label$") + fig.colorbar(surf1, ax=ax1, aspect=5, orientation="horizontal") + + # plot label + ax2 = fig.add_subplot(122, projection="3d") + surf2 = ax2.plot_surface( + x, y, label_data, cmap=cm.jet, linewidth=0, antialiased=False + ) + ax2.set_zlim(0, 1.2) + ax2.set_xlabel("x") + ax2.set_ylabel("y") + ax2.set_zlabel("z") + ax2.set_title(r"$u(x,y), prediction$") + + # Add a color bar which maps values to colors. + fig.colorbar(surf2, ax=ax2, aspect=5, orientation="horizontal") + fig.subplots_adjust(wspace=0.5, hspace=0.5) + plt.savefig("fractional_poisson_2d_result.png", dpi=400) + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + def output_transform(in_, out): + return {"u": (1 - (in_["x"] ** 2 + in_["y"] ** 2)) * out["u"]} + + model.register_output_transform(output_transform) + + # set geometry + geom = {"disk": ppsci.geometry.Disk((0, 0), 1)} + + # set equation + equation = { + "fpde": ppsci.equation.FractionalPoisson(cfg.ALPHA, geom["disk"], [8, 100]) + } + + # set constraint + def u_solution_func( + out: Dict[str, Union[paddle.Tensor, np.ndarray]] + ) -> Union[paddle.Tensor, np.ndarray]: + if isinstance(out["x"], paddle.Tensor): + return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( + 1 + cfg.ALPHA / 2 + ) + return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) + + # set transform for input data + def input_data_fpde_transform( + input: Dict[str, np.ndarray], + weight: Dict[str, np.ndarray], + label: Dict[str, np.ndarray], + ) -> Tuple[ + Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] + ]: + """Get sampling points for integral. + + Args: + input (Dict[str, paddle.Tensor]): Raw input dict. + weight (Dict[str, paddle.Tensor]): Raw weight dict. + label (Dict[str, paddle.Tensor]): Raw label dict. + + Returns: + Tuple[ Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] ]: + Input dict contained sampling points, weight dict and label dict. + """ + points = np.concatenate((input["x"].numpy(), input["y"].numpy()), axis=1) + x = equation["fpde"].get_x(points) + return ( + { + **input, + **{k: paddle.to_tensor(v) for k, v in x.items()}, + }, + weight, + label, + ) + + fpde_constraint = ppsci.constraint.InteriorConstraint( + equation["fpde"].equations, + {"fpde": 0}, + geom["disk"], + { + "dataset": { + "name": "IterableNamedArrayDataset", + "transforms": ( + { + "FunctionalTransform": { + "transform_func": input_data_fpde_transform, + }, + }, + ), + }, + "batch_size": cfg.NPOINT_INTERIOR, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + random="Hammersley", + criteria=lambda x, y: ~geom["disk"].on_boundary(np.hstack((x, y))), + name="FPDE", + ) + bc = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["disk"], + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": cfg.NPOINT_BC, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + criteria=lambda x, y: np.isclose(x, -1), + name="BC", + ) + # wrap constraints together + constraint = { + fpde_constraint.name: fpde_constraint, + bc.name: bc, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + l2rel_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["disk"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.NPOINT_EVAL, + }, + ppsci.loss.MSELoss(), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Metric", + ) + validator = {l2rel_metric.name: l2rel_metric} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + cfg=cfg, + ) + # train model + solver.train() + + # visualize prediction after finished training + theta = np.arange(0, 2 * math.pi, 0.04, dtype=paddle.get_default_dtype()) + rho = np.arange(0, 1, 0.005, dtype=paddle.get_default_dtype()) + mt, mr = np.meshgrid(theta, rho) + x = mr * np.cos(mt) + y = mr * np.sin(mt) + + input_data = { + "x": x.reshape([-1, 1]), + "y": y.reshape([-1, 1]), + } + label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) + output_data = solver.predict(input_data, return_numpy=True) + output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} + plot(x, y, input_data, output_data, label_data) + + +def evaluate(cfg: DictConfig): + # load model + model = ppsci.load_model(cfg.pretrained_model_path) + # set geometry + geom = { + "disk": ppsci.geometry.Disk(np.array([0, 0]), np.array([1]), np.array([[0]])), + } + + def u_solution_func( + out: Dict[str, Union[paddle.Tensor, np.ndarray]] + ) -> Union[paddle.Tensor, np.ndarray]: + if isinstance(out["x"], paddle.Tensor): + return paddle.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** ( + 1 + cfg.ALPHA / 2 + ) + return np.abs(1 - (out["x"] ** 2 + out["y"] ** 2)) ** (1 + cfg.ALPHA / 2) + + # set validator + l2rel_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["disk"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.NPOINT_EVAL, + }, + ppsci.loss.MSELoss(), + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Metric", + ) + validator = {l2rel_metric.name: l2rel_metric} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + # train model + solver.train() + + # visualize prediction after finished training + theta = np.arange(0, 2 * math.pi, 0.04, dtype=paddle.get_default_dtype()) + rho = np.arange(0, 1, 0.005, dtype=paddle.get_default_dtype()) + mt, mr = np.meshgrid(theta, rho) + x = mr * np.cos(mt) + y = mr * np.sin(mt) + + input_data = { + "x": x.reshape([-1, 1]), + "y": y.reshape([-1, 1]), + } + + label_data = u_solution_func(input_data).reshape([x.shape[0], -1]) + output_data = solver.predict(input_data, return_numpy=True) + output_data = {k: v.reshape([x.shape[0], -1]) for k, v in output_data.items()} + + plot(x, y, input_data, output_data, label_data) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="fractional_poisson_2d.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/fsi/conf/viv.yaml b/examples/fsi/conf/viv.yaml index 56037e231f..44c6e34e79 100644 --- a/examples/fsi/conf/viv.yaml +++ b/examples/fsi/conf/viv.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -80,3 +81,86 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 16 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_VIV/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +VIV_DATA_PATH: "./VIV_Training_Neta100.mat" + +# model settings +MODEL: + input_keys: ["t_f"] + output_keys: ["eta"] + num_layers: 5 + hidden_size: 50 + activation: "tanh" + +# training settings +TRAIN: + epochs: 100000 + iters_per_epoch: 1 + save_freq: 10000 + eval_during_train: true + eval_freq: 1000 + batch_size: 100 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + step_size: 20000 + gamma: 0.9 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: 32 + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/viv/viv_pretrained.pdparams" + export_path: ./inference/viv + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + input_keys: ${MODEL.input_keys} + output_keys: ["eta", "f"] + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 16 +>>>>>>> Stashed changes diff --git a/examples/fsi/viv.py b/examples/fsi/viv.py index 2d94dc6443..257f799bd8 100644 --- a/examples/fsi/viv.py +++ b/examples/fsi/viv.py @@ -1,262 +1,262 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hydra -from omegaconf import DictConfig - -import ppsci - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "MatDataset", - "file_path": cfg.VIV_DATA_PATH, - "input_keys": ("t_f",), - "label_keys": ("eta", "f"), - "weight_dict": {"eta": 100}, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - {"eta": lambda out: out["eta"], **equation["VIV"].equations}, - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.Step(**cfg.TRAIN.lr_scheduler)() - optimizer = ppsci.optimizer.Adam(lr_scheduler)((model,) + tuple(equation.values())) - - # set validator - eta_l2_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "MatDataset", - "file_path": cfg.VIV_DATA_PATH, - "input_keys": ("t_f",), - "label_keys": ("eta", "f"), - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"eta": lambda out: out["eta"], **equation["VIV"].equations}, - metric={"MSE": ppsci.metric.L2Rel()}, - name="eta_l2", - ) - validator = {eta_l2_validator.name: eta_l2_validator} - - # set visualizer(optional) - visu_mat = ppsci.utils.reader.load_mat_file( - cfg.VIV_DATA_PATH, - ("t_f", "eta_gt", "f_gt"), - alias_dict={"eta_gt": "eta", "f_gt": "f"}, - ) - visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visu_mat, - ("t_f",), - { - r"$\eta$": lambda d: d["eta"], # plot with latex title - r"$\eta_{gt}$": lambda d: d["eta_gt"], # plot with latex title - r"$f$": equation["VIV"].equations["f"], # plot with latex title - r"$f_{gt}$": lambda d: d["f_gt"], # plot with latex title - }, - num_timestamps=1, - prefix="viv_pred", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - equation=equation, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} - - # set validator - eta_l2_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "MatDataset", - "file_path": cfg.VIV_DATA_PATH, - "input_keys": ("t_f",), - "label_keys": ("eta", "f"), - }, - "batch_size": cfg.EVAL.batch_size, - }, - ppsci.loss.MSELoss("mean"), - {"eta": lambda out: out["eta"], **equation["VIV"].equations}, - metric={"MSE": ppsci.metric.L2Rel()}, - name="eta_l2", - ) - validator = {eta_l2_validator.name: eta_l2_validator} - - # set visualizer(optional) - visu_mat = ppsci.utils.reader.load_mat_file( - cfg.VIV_DATA_PATH, - ("t_f", "eta_gt", "f_gt"), - alias_dict={"eta_gt": "eta", "f_gt": "f"}, - ) - - visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visu_mat, - ("t_f",), - { - r"$\eta$": lambda d: d["eta"], # plot with latex title - r"$\eta_{gt}$": lambda d: d["eta_gt"], # plot with latex title - r"$f$": equation["VIV"].equations["f"], # plot with latex title - r"$f_{gt}$": lambda d: d["f_gt"], # plot with latex title - }, - num_timestamps=1, - prefix="viv_pred", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - equation=equation, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - - # evaluate - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - from paddle import nn - from paddle.static import InputSpec - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - # initialize equation - equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} - # initialize solver - solver = ppsci.solver.Solver( - model, - equation=equation, - cfg=cfg, - ) - # Convert equation to func - f_func = ppsci.lambdify( - solver.equation["VIV"].equations["f"], - solver.model, - list(solver.equation["VIV"].learnable_parameters), - ) - - class Wrapped_Model(nn.Layer): - def __init__(self, model, func): - super().__init__() - self.model = model - self.func = func - - def forward(self, x): - x = {**x} - model_out = self.model(x) - func_out = self.func(x) - return {**model_out, "f": func_out} - - solver.model = Wrapped_Model(model, f_func) - # export models - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path, skip_prune_program=True) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - infer_mat = ppsci.utils.reader.load_mat_file( - cfg.VIV_DATA_PATH, - ("t_f", "eta_gt", "f_gt"), - alias_dict={"eta_gt": "eta", "f_gt": "f"}, - ) - - input_dict = {key: infer_mat[key] for key in cfg.INFER.input_keys} - - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) - } - infer_mat.update(output_dict) - - ppsci.visualize.plot.save_plot_from_1d_dict( - "./viv_pred", infer_mat, ("t_f",), ("eta", "eta_gt", "f", "f_gt") - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="viv.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hydra +from omegaconf import DictConfig + +import ppsci + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "MatDataset", + "file_path": cfg.VIV_DATA_PATH, + "input_keys": ("t_f",), + "label_keys": ("eta", "f"), + "weight_dict": {"eta": 100}, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + {"eta": lambda out: out["eta"], **equation["VIV"].equations}, + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.Step(**cfg.TRAIN.lr_scheduler)() + optimizer = ppsci.optimizer.Adam(lr_scheduler)((model,) + tuple(equation.values())) + + # set validator + eta_l2_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "MatDataset", + "file_path": cfg.VIV_DATA_PATH, + "input_keys": ("t_f",), + "label_keys": ("eta", "f"), + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"eta": lambda out: out["eta"], **equation["VIV"].equations}, + metric={"MSE": ppsci.metric.L2Rel()}, + name="eta_l2", + ) + validator = {eta_l2_validator.name: eta_l2_validator} + + # set visualizer(optional) + visu_mat = ppsci.utils.reader.load_mat_file( + cfg.VIV_DATA_PATH, + ("t_f", "eta_gt", "f_gt"), + alias_dict={"eta_gt": "eta", "f_gt": "f"}, + ) + visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visu_mat, + ("t_f",), + { + r"$\eta$": lambda d: d["eta"], # plot with latex title + r"$\eta_{gt}$": lambda d: d["eta_gt"], # plot with latex title + r"$f$": equation["VIV"].equations["f"], # plot with latex title + r"$f_{gt}$": lambda d: d["f_gt"], # plot with latex title + }, + num_timestamps=1, + prefix="viv_pred", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} + + # set validator + eta_l2_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "MatDataset", + "file_path": cfg.VIV_DATA_PATH, + "input_keys": ("t_f",), + "label_keys": ("eta", "f"), + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"eta": lambda out: out["eta"], **equation["VIV"].equations}, + metric={"MSE": ppsci.metric.L2Rel()}, + name="eta_l2", + ) + validator = {eta_l2_validator.name: eta_l2_validator} + + # set visualizer(optional) + visu_mat = ppsci.utils.reader.load_mat_file( + cfg.VIV_DATA_PATH, + ("t_f", "eta_gt", "f_gt"), + alias_dict={"eta_gt": "eta", "f_gt": "f"}, + ) + + visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visu_mat, + ("t_f",), + { + r"$\eta$": lambda d: d["eta"], # plot with latex title + r"$\eta_{gt}$": lambda d: d["eta_gt"], # plot with latex title + r"$f$": equation["VIV"].equations["f"], # plot with latex title + r"$f_{gt}$": lambda d: d["f_gt"], # plot with latex title + }, + num_timestamps=1, + prefix="viv_pred", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + equation=equation, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + + # evaluate + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + from paddle import nn + from paddle.static import InputSpec + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + # initialize equation + equation = {"VIV": ppsci.equation.Vibration(2, -4, 0)} + # initialize solver + solver = ppsci.solver.Solver( + model, + equation=equation, + cfg=cfg, + ) + # Convert equation to func + f_func = ppsci.lambdify( + solver.equation["VIV"].equations["f"], + solver.model, + list(solver.equation["VIV"].learnable_parameters), + ) + + class Wrapped_Model(nn.Layer): + def __init__(self, model, func): + super().__init__() + self.model = model + self.func = func + + def forward(self, x): + x = {**x} + model_out = self.model(x) + func_out = self.func(x) + return {**model_out, "f": func_out} + + solver.model = Wrapped_Model(model, f_func) + # export models + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, skip_prune_program=True) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + infer_mat = ppsci.utils.reader.load_mat_file( + cfg.VIV_DATA_PATH, + ("t_f", "eta_gt", "f_gt"), + alias_dict={"eta_gt": "eta", "f_gt": "f"}, + ) + + input_dict = {key: infer_mat[key] for key in cfg.INFER.input_keys} + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) + } + infer_mat.update(output_dict) + + ppsci.visualize.plot.save_plot_from_1d_dict( + "./viv_pred", infer_mat, ("t_f",), ("eta", "eta_gt", "f", "f_gt") + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="viv.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/gpinn/conf/poisson_1d.yaml b/examples/gpinn/conf/poisson_1d.yaml index 49a5d38faa..63dcaddf45 100644 --- a/examples/gpinn/conf/poisson_1d.yaml +++ b/examples/gpinn/conf/poisson_1d.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -51,3 +52,57 @@ EVAL: batch_size: l2rel_validator: ${NPOINT_PDE_EVAL} pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_poisson_1d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} + +# set working condition +NPOINT_PDE: 15 +NPOINT_PDE_EVAL: 100 + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["u"] + num_layers: 3 + hidden_size: 20 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_freq: 1000 + eval_during_train: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: + l2rel_validator: ${NPOINT_PDE_EVAL} + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/gpinn/poisson_1d.py b/examples/gpinn/poisson_1d.py index a8cb3da1b2..ac2c50ed12 100644 --- a/examples/gpinn/poisson_1d.py +++ b/examples/gpinn/poisson_1d.py @@ -1,346 +1,346 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Reference: https://github.com/lu-group/gpinn/blob/main/src/poisson_1d.py - -from os import path as osp -from typing import Dict - -import hydra -import numpy as np -import paddle -import sympy as sp -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -class gPINN1D(ppsci.equation.PDE): - def __init__(self, invar: str, outvar: str): - super().__init__() - x = self.create_symbols(invar) - u = self.create_function(outvar, (x,)) - - dy_xx = u.diff(x, 2) - dy_xxx = u.diff(x, 3) - - f = 8 * sp.sin(8 * x) - for i in range(1, 5): - f += i * sp.sin(i * x) - - df_x = ( - sp.cos(x) - + 4 * sp.cos(2 * x) - + 9 * sp.cos(3 * x) - + 16 * sp.cos(4 * x) - + 64 * sp.cos(8 * x) - ) - - self.add_equation("res1", -dy_xx - f) - self.add_equation("res2", -dy_xxx - df_x) - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - invar: str = cfg.MODEL.input_keys[0] - outvar: str = cfg.MODEL.output_keys[0] - - def output_transform( - in_: Dict[str, paddle.Tensor], out: Dict[str, paddle.Tensor] - ) -> Dict[str, paddle.Tensor]: - x = in_[invar] - u = out[outvar] - return { - outvar: x + paddle.tanh(x) * paddle.tanh(np.pi - x) * u, - } - - model.register_output_transform(output_transform) - - # set equation - equation = {"gPINN": gPINN1D(invar, outvar)} - - # set geometry - geom = {"line": ppsci.geometry.Interval(0, np.pi)} - - # set dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["gPINN"].equations, - {"res1": 0, "res2": 0}, - geom["line"], - {**train_dataloader_cfg, "batch_size": cfg.NPOINT_PDE}, - ppsci.loss.MSELoss("mean", weight={"res2": 0.01}), - evenly=True, - name="EQ", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(0.001)(model) - - # set validator - def u_solution(in_): - x = in_[invar] - sol = x + 1 / 8 * np.sin(8 * x) - for i in range(1, 5): - sol += 1 / i * np.sin(i * x) - return sol - - l2rel_validator = ppsci.validate.GeometryValidator( - {outvar: lambda out: out[outvar]}, - {outvar: u_solution}, - geom["line"], - { - "dataset": "NamedArrayDataset", - "total_size": cfg.NPOINT_PDE_EVAL, - "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("mean"), - evenly=True, - metric={f"L2Rel({outvar})": ppsci.metric.L2Rel()}, - name="L2Rel", - ) - validator = {l2rel_validator.name: l2rel_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # visualize prediction for outvar - x = geom["line"].uniform_points(1000) - plt.figure() - plt.plot(x, u_solution({invar: x}), label="Exact", color="black") - plt.plot( - x, - solver.predict({invar: x}, return_numpy=True)[outvar], - label="gPINN, w = 0.01", - color="red", - linestyle="dashed", - ) - plt.legend(frameon=False) - plt.xlabel(invar) - plt.ylabel(outvar) - - x = geom["line"].uniform_points(15, boundary=False) - plt.plot(x, u_solution({invar: x}), color="black", marker="o", linestyle="none") - # save visualization result for prediction of outvar - plt.savefig(osp.join(cfg.output_dir, f"pred_{outvar}.png")) - plt.clf() - - # visualize prediction for du/dx - x = geom["line"].uniform_points(1000) - plt.figure() - - def du_x(x: np.ndarray) -> np.ndarray: - return ( - 1 - + np.cos(x) - + np.cos(2 * x) - + np.cos(3 * x) - + np.cos(4 * x) - + np.cos(8 * x) - ) - - plt.plot(x, du_x(x), label="Exact", color="black") - plt.plot( - x, - solver.predict( - {invar: x}, - return_numpy=True, - expr_dict={ - f"d{outvar}d{invar}": lambda out: jacobian(out[outvar], out[invar]) - }, - no_grad=False, - )[f"d{outvar}d{invar}"], - label="gPINN, w = 0.01", - color="red", - linestyle="dashed", - ) - x = geom["line"].uniform_points(15, boundary=False) - plt.plot(x, du_x(x), color="black", marker="o", linestyle="none") - plt.legend(frameon=False) - plt.xlabel(invar) - plt.ylabel(outvar) - # save visualization result of prediction 'du/dx' - plt.savefig(osp.join(cfg.output_dir, f"pred_d{outvar}d{invar}.png")) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - invar: str = cfg.MODEL.input_keys[0] - outvar: str = cfg.MODEL.output_keys[0] - - def output_transform(in_, out): - x = in_[invar] - u = out[outvar] - return { - outvar: x + paddle.tanh(x) * paddle.tanh(np.pi - x) * u, - } - - model.register_output_transform(output_transform) - - # set geometry - geom = {"line": ppsci.geometry.Interval(0, np.pi)} - - # set validator - def u_solution(in_): - x = in_[invar] - sol = x + 1 / 8 * np.sin(8 * x) - for i in range(1, 5): - sol += 1 / i * np.sin(i * x) - return sol - - l2rel_validator = ppsci.validate.GeometryValidator( - {outvar: lambda out: out[outvar]}, - {outvar: u_solution}, - geom["line"], - { - "dataset": "NamedArrayDataset", - "total_size": cfg.NPOINT_PDE, - "batch_size": cfg.EVAL.batch_size.l2rel_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("mean"), - evenly=True, - metric={f"L2Rel({outvar})": ppsci.metric.L2Rel()}, - name="L2Rel", - ) - validator = {l2rel_validator.name: l2rel_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - # evaluate after finished training - solver.eval() - - # visualize prediction for outvar - x = geom["line"].uniform_points(1000) - plt.figure() - plt.plot(x, u_solution({invar: x}), label="Exact", color="black") - plt.plot( - x, - solver.predict({invar: x}, return_numpy=True)[outvar], - label="gPINN, w = 0.01", - color="red", - linestyle="dashed", - ) - plt.legend(frameon=False) - plt.xlabel(invar) - plt.ylabel(outvar) - - x = geom["line"].uniform_points(15, boundary=False) - plt.plot(x, u_solution({invar: x}), color="black", marker="o", linestyle="none") - # save visualization result for prediction of outvar - plt.savefig(osp.join(cfg.output_dir, f"pred_{outvar}.png")) - plt.clf() - - # visualize prediction for du/dx - x = geom["line"].uniform_points(1000) - plt.figure() - - def du_x(x): - return ( - 1 - + np.cos(x) - + np.cos(2 * x) - + np.cos(3 * x) - + np.cos(4 * x) - + np.cos(8 * x) - ) - - plt.plot(x, du_x(x), label="Exact", color="black") - plt.plot( - x, - solver.predict( - {invar: x}, - return_numpy=True, - expr_dict={ - f"d{outvar}d{invar}": lambda out: jacobian(out[outvar], out[invar]) - }, - no_grad=False, - )[f"d{outvar}d{invar}"], - label="gPINN, w = 0.01", - color="red", - linestyle="dashed", - ) - x = geom["line"].uniform_points(15, boundary=False) - plt.plot(x, du_x(x), color="black", marker="o", linestyle="none") - plt.legend(frameon=False) - plt.xlabel(invar) - plt.ylabel(outvar) - # save visualization result of prediction 'du/dx' - plt.savefig(osp.join(cfg.output_dir, f"pred_d{outvar}d{invar}.png")) - - -@hydra.main(version_base=None, config_path="./conf", config_name="poisson_1d.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference: https://github.com/lu-group/gpinn/blob/main/src/poisson_1d.py + +from os import path as osp +from typing import Dict + +import hydra +import numpy as np +import paddle +import sympy as sp +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +class gPINN1D(ppsci.equation.PDE): + def __init__(self, invar: str, outvar: str): + super().__init__() + x = self.create_symbols(invar) + u = self.create_function(outvar, (x,)) + + dy_xx = u.diff(x, 2) + dy_xxx = u.diff(x, 3) + + f = 8 * sp.sin(8 * x) + for i in range(1, 5): + f += i * sp.sin(i * x) + + df_x = ( + sp.cos(x) + + 4 * sp.cos(2 * x) + + 9 * sp.cos(3 * x) + + 16 * sp.cos(4 * x) + + 64 * sp.cos(8 * x) + ) + + self.add_equation("res1", -dy_xx - f) + self.add_equation("res2", -dy_xxx - df_x) + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + invar: str = cfg.MODEL.input_keys[0] + outvar: str = cfg.MODEL.output_keys[0] + + def output_transform( + in_: Dict[str, paddle.Tensor], out: Dict[str, paddle.Tensor] + ) -> Dict[str, paddle.Tensor]: + x = in_[invar] + u = out[outvar] + return { + outvar: x + paddle.tanh(x) * paddle.tanh(np.pi - x) * u, + } + + model.register_output_transform(output_transform) + + # set equation + equation = {"gPINN": gPINN1D(invar, outvar)} + + # set geometry + geom = {"line": ppsci.geometry.Interval(0, np.pi)} + + # set dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["gPINN"].equations, + {"res1": 0, "res2": 0}, + geom["line"], + {**train_dataloader_cfg, "batch_size": cfg.NPOINT_PDE}, + ppsci.loss.MSELoss("mean", weight={"res2": 0.01}), + evenly=True, + name="EQ", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(0.001)(model) + + # set validator + def u_solution(in_): + x = in_[invar] + sol = x + 1 / 8 * np.sin(8 * x) + for i in range(1, 5): + sol += 1 / i * np.sin(i * x) + return sol + + l2rel_validator = ppsci.validate.GeometryValidator( + {outvar: lambda out: out[outvar]}, + {outvar: u_solution}, + geom["line"], + { + "dataset": "NamedArrayDataset", + "total_size": cfg.NPOINT_PDE_EVAL, + "batch_size": cfg.EVAL.batch_size.l2rel_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("mean"), + evenly=True, + metric={f"L2Rel({outvar})": ppsci.metric.L2Rel()}, + name="L2Rel", + ) + validator = {l2rel_validator.name: l2rel_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # visualize prediction for outvar + x = geom["line"].uniform_points(1000) + plt.figure() + plt.plot(x, u_solution({invar: x}), label="Exact", color="black") + plt.plot( + x, + solver.predict({invar: x}, return_numpy=True)[outvar], + label="gPINN, w = 0.01", + color="red", + linestyle="dashed", + ) + plt.legend(frameon=False) + plt.xlabel(invar) + plt.ylabel(outvar) + + x = geom["line"].uniform_points(15, boundary=False) + plt.plot(x, u_solution({invar: x}), color="black", marker="o", linestyle="none") + # save visualization result for prediction of outvar + plt.savefig(osp.join(cfg.output_dir, f"pred_{outvar}.png")) + plt.clf() + + # visualize prediction for du/dx + x = geom["line"].uniform_points(1000) + plt.figure() + + def du_x(x: np.ndarray) -> np.ndarray: + return ( + 1 + + np.cos(x) + + np.cos(2 * x) + + np.cos(3 * x) + + np.cos(4 * x) + + np.cos(8 * x) + ) + + plt.plot(x, du_x(x), label="Exact", color="black") + plt.plot( + x, + solver.predict( + {invar: x}, + return_numpy=True, + expr_dict={ + f"d{outvar}d{invar}": lambda out: jacobian(out[outvar], out[invar]) + }, + no_grad=False, + )[f"d{outvar}d{invar}"], + label="gPINN, w = 0.01", + color="red", + linestyle="dashed", + ) + x = geom["line"].uniform_points(15, boundary=False) + plt.plot(x, du_x(x), color="black", marker="o", linestyle="none") + plt.legend(frameon=False) + plt.xlabel(invar) + plt.ylabel(outvar) + # save visualization result of prediction 'du/dx' + plt.savefig(osp.join(cfg.output_dir, f"pred_d{outvar}d{invar}.png")) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + invar: str = cfg.MODEL.input_keys[0] + outvar: str = cfg.MODEL.output_keys[0] + + def output_transform(in_, out): + x = in_[invar] + u = out[outvar] + return { + outvar: x + paddle.tanh(x) * paddle.tanh(np.pi - x) * u, + } + + model.register_output_transform(output_transform) + + # set geometry + geom = {"line": ppsci.geometry.Interval(0, np.pi)} + + # set validator + def u_solution(in_): + x = in_[invar] + sol = x + 1 / 8 * np.sin(8 * x) + for i in range(1, 5): + sol += 1 / i * np.sin(i * x) + return sol + + l2rel_validator = ppsci.validate.GeometryValidator( + {outvar: lambda out: out[outvar]}, + {outvar: u_solution}, + geom["line"], + { + "dataset": "NamedArrayDataset", + "total_size": cfg.NPOINT_PDE, + "batch_size": cfg.EVAL.batch_size.l2rel_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("mean"), + evenly=True, + metric={f"L2Rel({outvar})": ppsci.metric.L2Rel()}, + name="L2Rel", + ) + validator = {l2rel_validator.name: l2rel_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + # evaluate after finished training + solver.eval() + + # visualize prediction for outvar + x = geom["line"].uniform_points(1000) + plt.figure() + plt.plot(x, u_solution({invar: x}), label="Exact", color="black") + plt.plot( + x, + solver.predict({invar: x}, return_numpy=True)[outvar], + label="gPINN, w = 0.01", + color="red", + linestyle="dashed", + ) + plt.legend(frameon=False) + plt.xlabel(invar) + plt.ylabel(outvar) + + x = geom["line"].uniform_points(15, boundary=False) + plt.plot(x, u_solution({invar: x}), color="black", marker="o", linestyle="none") + # save visualization result for prediction of outvar + plt.savefig(osp.join(cfg.output_dir, f"pred_{outvar}.png")) + plt.clf() + + # visualize prediction for du/dx + x = geom["line"].uniform_points(1000) + plt.figure() + + def du_x(x): + return ( + 1 + + np.cos(x) + + np.cos(2 * x) + + np.cos(3 * x) + + np.cos(4 * x) + + np.cos(8 * x) + ) + + plt.plot(x, du_x(x), label="Exact", color="black") + plt.plot( + x, + solver.predict( + {invar: x}, + return_numpy=True, + expr_dict={ + f"d{outvar}d{invar}": lambda out: jacobian(out[outvar], out[invar]) + }, + no_grad=False, + )[f"d{outvar}d{invar}"], + label="gPINN, w = 0.01", + color="red", + linestyle="dashed", + ) + x = geom["line"].uniform_points(15, boundary=False) + plt.plot(x, du_x(x), color="black", marker="o", linestyle="none") + plt.legend(frameon=False) + plt.xlabel(invar) + plt.ylabel(outvar) + # save visualization result of prediction 'du/dx' + plt.savefig(osp.join(cfg.output_dir, f"pred_d{outvar}d{invar}.png")) + + +@hydra.main(version_base=None, config_path="./conf", config_name="poisson_1d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/graphcast/conf/graphcast_small.yaml b/examples/graphcast/conf/graphcast_small.yaml index b0f1b5550d..8358457173 100644 --- a/examples/graphcast/conf/graphcast_small.yaml +++ b/examples/graphcast/conf/graphcast_small.yaml @@ -1,67 +1,67 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_graphcast_small/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: eval # running mode: train/eval -seed: 2024 -output_dir: ${hydra:run.dir} -log_freq: 20 - -DATA: - data_path: "data/dataset/source-era5_date-2022-01-01_res-1.0_levels-13_steps-01.nc" - mean_path: "data/stats/mean_by_level.nc" - stddev_diffs_path: "data/stats/diffs_stddev_by_level.nc" - stddev_path: "data/stats/stddev_by_level.nc" - type: "graphcast_small" - mesh_size: 5 - mesh2grid_edge_normalization_factor: 0.6180338738074472 - radius_query_fraction_edge_length: 0.6 - resolution: 1.0 - -MODEL: - input_keys: ["input"] - output_keys: ["pred"] - grid_node_dim: 186 - grid_node_num: 65160 - grid_node_emb_dim: 512 - mesh_node_dim: 186 - mesh_node_num: 10242 - mesh_edge_dim: 4 - mesh_node_emb_dim: 512 - mesh_edge_emb_dim: 512 - mesh2grid_edge_dim: 4 - mesh2grid_edge_emb_dim: 512 - grid2mesh_edge_dim: 4 - grid2mesh_edge_emb_dim: 512 - gnn_msg_steps: 16 - node_output_dim: 83 - -TRAIN: - epochs: 1 - -EVAL: - batch_size: 1 - pretrained_model_path: null - eval_with_no_grad: true +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_graphcast_small/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: eval # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +DATA: + data_path: "data/dataset/source-era5_date-2022-01-01_res-1.0_levels-13_steps-01.nc" + mean_path: "data/stats/mean_by_level.nc" + stddev_diffs_path: "data/stats/diffs_stddev_by_level.nc" + stddev_path: "data/stats/stddev_by_level.nc" + type: "graphcast_small" + mesh_size: 5 + mesh2grid_edge_normalization_factor: 0.6180338738074472 + radius_query_fraction_edge_length: 0.6 + resolution: 1.0 + +MODEL: + input_keys: ["input"] + output_keys: ["pred"] + grid_node_dim: 186 + grid_node_num: 65160 + grid_node_emb_dim: 512 + mesh_node_dim: 186 + mesh_node_num: 10242 + mesh_edge_dim: 4 + mesh_node_emb_dim: 512 + mesh_edge_emb_dim: 512 + mesh2grid_edge_dim: 4 + mesh2grid_edge_emb_dim: 512 + grid2mesh_edge_dim: 4 + grid2mesh_edge_emb_dim: 512 + gnn_msg_steps: 16 + node_output_dim: 83 + +TRAIN: + epochs: 1 + +EVAL: + batch_size: 1 + pretrained_model_path: null + eval_with_no_grad: true diff --git a/examples/graphcast/graphcast.py b/examples/graphcast/graphcast.py index 7d0ca83f7c..f32a4dc4b9 100644 --- a/examples/graphcast/graphcast.py +++ b/examples/graphcast/graphcast.py @@ -1,130 +1,130 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Dict - -import hydra -import numpy as np -import paddle -import plot -from omegaconf import DictConfig - -import ppsci -from ppsci.data.dataset import atmospheric_dataset - - -def eval(cfg: DictConfig): - model = ppsci.arch.GraphCastNet(**cfg.MODEL) - - # set dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "GridMeshAtmosphericDataset", - "input_keys": ("input",), - "label_keys": ("label",), - **cfg.DATA, - }, - "batch_size": cfg.EVAL.batch_size, - } - - # set validator - error_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=None, - output_expr={"pred": lambda out: out["pred"]}, - metric=None, - name="error_validator", - ) - - def loss( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, - ) -> Dict[str, paddle.Tensor]: - graph = output_dict["pred"] - pred = dataset.denormalize(graph.grid_node_feat.numpy()) - pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) - - target = graph.grid_node_outputs_to_prediction( - label_dict["label"][0].numpy(), dataset.targets_template - ) - - pred = atmospheric_dataset.dataset_to_stacked(pred) - target = atmospheric_dataset.dataset_to_stacked(target) - loss = np.average(np.square(pred.data - target.data)) - loss = paddle.full([], loss) - return {"loss": loss} - - def metric( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, - ) -> Dict[str, paddle.Tensor]: - graph = output_dict["pred"][0] - pred = dataset.denormalize(graph.grid_node_feat.numpy()) - pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) - - target = graph.grid_node_outputs_to_prediction( - label_dict["label"][0].numpy(), dataset.targets_template - ) - - metric_dic = { - var_name: np.average(target[var_name].data - pred[var_name].data) - for var_name in list(target) - } - return metric_dic - - dataset = error_validator.data_loader.dataset - error_validator.loss = ppsci.loss.FunctionalLoss(loss) - error_validator.metric = {"error": ppsci.metric.FunctionalMetric(metric)} - - validator = {error_validator.name: error_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate model - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (input_, label_, _) in enumerate(error_validator.data_loader): - output_ = model(input_) - graph = output_["pred"] - pred = dataset.denormalize(graph.grid_node_feat.numpy()) - pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) - - target = graph.grid_node_outputs_to_prediction( - label_["label"][0].numpy(), dataset.targets_template - ) - - plot.log_images(target, pred, "2m_temperature", level=50, file="result.png") - - -@hydra.main(version_base=None, config_path="./conf", config_name="graphcast_small.yaml") -def main(cfg: DictConfig): - if cfg.mode == "eval": - eval(cfg) - else: - raise ValueError(f"cfg.mode should in ['eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict + +import hydra +import numpy as np +import paddle +import plot +from omegaconf import DictConfig + +import ppsci +from ppsci.data.dataset import atmospheric_dataset + + +def eval(cfg: DictConfig): + model = ppsci.arch.GraphCastNet(**cfg.MODEL) + + # set dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "GridMeshAtmosphericDataset", + "input_keys": ("input",), + "label_keys": ("label",), + **cfg.DATA, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + error_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=None, + output_expr={"pred": lambda out: out["pred"]}, + metric=None, + name="error_validator", + ) + + def loss( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, + ) -> Dict[str, paddle.Tensor]: + graph = output_dict["pred"] + pred = dataset.denormalize(graph.grid_node_feat.numpy()) + pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) + + target = graph.grid_node_outputs_to_prediction( + label_dict["label"][0].numpy(), dataset.targets_template + ) + + pred = atmospheric_dataset.dataset_to_stacked(pred) + target = atmospheric_dataset.dataset_to_stacked(target) + loss = np.average(np.square(pred.data - target.data)) + loss = paddle.full([], loss) + return {"loss": loss} + + def metric( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, + ) -> Dict[str, paddle.Tensor]: + graph = output_dict["pred"][0] + pred = dataset.denormalize(graph.grid_node_feat.numpy()) + pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) + + target = graph.grid_node_outputs_to_prediction( + label_dict["label"][0].numpy(), dataset.targets_template + ) + + metric_dic = { + var_name: np.average(target[var_name].data - pred[var_name].data) + for var_name in list(target) + } + return metric_dic + + dataset = error_validator.data_loader.dataset + error_validator.loss = ppsci.loss.FunctionalLoss(loss) + error_validator.metric = {"error": ppsci.metric.FunctionalMetric(metric)} + + validator = {error_validator.name: error_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate model + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (input_, label_, _) in enumerate(error_validator.data_loader): + output_ = model(input_) + graph = output_["pred"] + pred = dataset.denormalize(graph.grid_node_feat.numpy()) + pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) + + target = graph.grid_node_outputs_to_prediction( + label_["label"][0].numpy(), dataset.targets_template + ) + + plot.log_images(target, pred, "2m_temperature", level=50, file="result.png") + + +@hydra.main(version_base=None, config_path="./conf", config_name="graphcast_small.yaml") +def main(cfg: DictConfig): + if cfg.mode == "eval": + eval(cfg) + else: + raise ValueError(f"cfg.mode should in ['eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/graphcast/plot.py b/examples/graphcast/plot.py index 85f5f8d5d2..4f7d0954c5 100644 --- a/examples/graphcast/plot.py +++ b/examples/graphcast/plot.py @@ -1,178 +1,178 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import math -from typing import Dict -from typing import Optional -from typing import Tuple - -import matplotlib -import matplotlib.animation as animation -import matplotlib.pyplot as plt -import numpy as np - -from ppsci.utils import checker - -if not checker.dynamic_import_to_globals("IPython"): - raise ImportError( - "Could not import IPython python package. " - "Please install it with pip install IPython." - ) -import IPython - -if not checker.dynamic_import_to_globals("xarray"): - raise ImportError( - "Could not import xarray python package. " - "Please install it with pip install xarray." - ) -import xarray - - -def select( - data: xarray.Dataset, - variable: str, - level: Optional[int] = None, - max_steps: Optional[int] = None, -) -> xarray.Dataset: - data = data[variable] - if "batch" in data.dims: - data = data.isel(batch=0) - if ( - max_steps is not None - and "time" in data.sizes - and max_steps < data.sizes["time"] - ): - data = data.isel(time=range(0, max_steps)) - if level is not None and "level" in data.coords: - data = data.sel(level=level) - return data - - -def scale( - data: xarray.Dataset, - center: Optional[float] = None, - robust: bool = False, -) -> Tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: - vmin = np.nanpercentile(data, (2 if robust else 0)) - vmax = np.nanpercentile(data, (98 if robust else 100)) - if center is not None: - diff = max(vmax - center, center - vmin) - vmin = center - diff - vmax = center + diff - return ( - data, - matplotlib.colors.Normalize(vmin, vmax), - ("RdBu_r" if center is not None else "viridis"), - ) - - -def plot_data( - data: Dict[str, xarray.Dataset], - fig_title: str, - plot_size: float = 5, - robust: bool = False, - cols: int = 4, - file: str = "result.png", -) -> Tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: - - first_data = next(iter(data.values()))[0] - max_steps = first_data.sizes.get("time", 1) - assert all(max_steps == d.sizes.get("time", 1) for d, _, _ in data.values()) - - cols = min(cols, len(data)) - rows = math.ceil(len(data) / cols) - figure = plt.figure(figsize=(plot_size * 2 * cols, plot_size * rows)) - figure.suptitle(fig_title, fontsize=16) - figure.subplots_adjust(wspace=0, hspace=0) - figure.tight_layout() - - images = [] - for i, (title, (plot_data, norm, cmap)) in enumerate(data.items()): - ax = figure.add_subplot(rows, cols, i + 1) - ax.set_xticks([]) - ax.set_yticks([]) - ax.set_title(title) - im = ax.imshow( - plot_data.isel(time=0, missing_dims="ignore"), - norm=norm, - origin="lower", - cmap=cmap, - ) - plt.colorbar( - mappable=im, - ax=ax, - orientation="vertical", - pad=0.02, - aspect=16, - shrink=0.75, - cmap=cmap, - extend=("both" if robust else "neither"), - ) - images.append(im) - - def _update(frame): - if "time" in first_data.dims: - td = datetime.timedelta( - microseconds=first_data["time"][frame].item() / 1000 - ) - figure.suptitle(f"{fig_title}, {td}", fontsize=16) - else: - figure.suptitle(fig_title, fontsize=16) - for im, (plot_data, norm, cmap) in zip(images, data.values()): - im.set_data(plot_data.isel(time=frame, missing_dims="ignore")) - - ani = animation.FuncAnimation( - fig=figure, func=_update, frames=max_steps, interval=250 - ) - plt.savefig( - file, - bbox_inches="tight", - ) - plt.close(figure.number) - return IPython.display.HTML(ani.to_jshtml()) - - -def log_images( - target: xarray.Dataset, - pred: xarray.Dataset, - variable_name: str, - level: int, - robust=True, - file="result.png", -): - plot_size = 5 - plot_max_steps = pred.sizes["time"] - - data = { - "Targets": scale( - select(target, variable_name, level, plot_max_steps), robust=robust - ), - "Predictions": scale( - select(pred, variable_name, level, plot_max_steps), robust=robust - ), - "Diff": scale( - ( - select(target, variable_name, level, plot_max_steps) - - select(pred, variable_name, level, plot_max_steps) - ), - robust=robust, - center=0, - ), - } - fig_title = variable_name - if "level" in pred[variable_name].coords: - fig_title += f" at {level} hPa" - - plot_data(data, fig_title, plot_size, robust, file=file) +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import math +from typing import Dict +from typing import Optional +from typing import Tuple + +import matplotlib +import matplotlib.animation as animation +import matplotlib.pyplot as plt +import numpy as np + +from ppsci.utils import checker + +if not checker.dynamic_import_to_globals("IPython"): + raise ImportError( + "Could not import IPython python package. " + "Please install it with pip install IPython." + ) +import IPython + +if not checker.dynamic_import_to_globals("xarray"): + raise ImportError( + "Could not import xarray python package. " + "Please install it with pip install xarray." + ) +import xarray + + +def select( + data: xarray.Dataset, + variable: str, + level: Optional[int] = None, + max_steps: Optional[int] = None, +) -> xarray.Dataset: + data = data[variable] + if "batch" in data.dims: + data = data.isel(batch=0) + if ( + max_steps is not None + and "time" in data.sizes + and max_steps < data.sizes["time"] + ): + data = data.isel(time=range(0, max_steps)) + if level is not None and "level" in data.coords: + data = data.sel(level=level) + return data + + +def scale( + data: xarray.Dataset, + center: Optional[float] = None, + robust: bool = False, +) -> Tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: + vmin = np.nanpercentile(data, (2 if robust else 0)) + vmax = np.nanpercentile(data, (98 if robust else 100)) + if center is not None: + diff = max(vmax - center, center - vmin) + vmin = center - diff + vmax = center + diff + return ( + data, + matplotlib.colors.Normalize(vmin, vmax), + ("RdBu_r" if center is not None else "viridis"), + ) + + +def plot_data( + data: Dict[str, xarray.Dataset], + fig_title: str, + plot_size: float = 5, + robust: bool = False, + cols: int = 4, + file: str = "result.png", +) -> Tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: + + first_data = next(iter(data.values()))[0] + max_steps = first_data.sizes.get("time", 1) + assert all(max_steps == d.sizes.get("time", 1) for d, _, _ in data.values()) + + cols = min(cols, len(data)) + rows = math.ceil(len(data) / cols) + figure = plt.figure(figsize=(plot_size * 2 * cols, plot_size * rows)) + figure.suptitle(fig_title, fontsize=16) + figure.subplots_adjust(wspace=0, hspace=0) + figure.tight_layout() + + images = [] + for i, (title, (plot_data, norm, cmap)) in enumerate(data.items()): + ax = figure.add_subplot(rows, cols, i + 1) + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_title(title) + im = ax.imshow( + plot_data.isel(time=0, missing_dims="ignore"), + norm=norm, + origin="lower", + cmap=cmap, + ) + plt.colorbar( + mappable=im, + ax=ax, + orientation="vertical", + pad=0.02, + aspect=16, + shrink=0.75, + cmap=cmap, + extend=("both" if robust else "neither"), + ) + images.append(im) + + def _update(frame): + if "time" in first_data.dims: + td = datetime.timedelta( + microseconds=first_data["time"][frame].item() / 1000 + ) + figure.suptitle(f"{fig_title}, {td}", fontsize=16) + else: + figure.suptitle(fig_title, fontsize=16) + for im, (plot_data, norm, cmap) in zip(images, data.values()): + im.set_data(plot_data.isel(time=frame, missing_dims="ignore")) + + ani = animation.FuncAnimation( + fig=figure, func=_update, frames=max_steps, interval=250 + ) + plt.savefig( + file, + bbox_inches="tight", + ) + plt.close(figure.number) + return IPython.display.HTML(ani.to_jshtml()) + + +def log_images( + target: xarray.Dataset, + pred: xarray.Dataset, + variable_name: str, + level: int, + robust=True, + file="result.png", +): + plot_size = 5 + plot_max_steps = pred.sizes["time"] + + data = { + "Targets": scale( + select(target, variable_name, level, plot_max_steps), robust=robust + ), + "Predictions": scale( + select(pred, variable_name, level, plot_max_steps), robust=robust + ), + "Diff": scale( + ( + select(target, variable_name, level, plot_max_steps) + - select(pred, variable_name, level, plot_max_steps) + ), + robust=robust, + center=0, + ), + } + fig_title = variable_name + if "level" in pred[variable_name].coords: + fig_title += f" at {level} hPa" + + plot_data(data, fig_title, plot_size, robust, file=file) diff --git a/examples/heart/conf/forward.yaml b/examples/heart/conf/forward.yaml index 3d77d1ab69..b81a21f3bb 100644 --- a/examples/heart/conf/forward.yaml +++ b/examples/heart/conf/forward.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -82,3 +83,97 @@ EVAL: eval_with_no_grad: true batch_size: 1000 num_vis: 100000 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_heart/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +EVAL_CSV_PATH: ./data/label.csv +DATA_CSV_PATH: ./data/forward.csv +# general settings +mode: train # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 200 + +# set geometry +GEOM_PATH: ./stl/heart.stl +BASE_PATH: ./stl/base.stl +ENDO_PATH: ./stl/endo.stl +EPI_PATH: ./stl/epi.stl + +# set working condition +E: 9 # kPa +nu: 0.45 +P: 1.064 # kPa + +# model settings +MODEL: + input_keys: ["x","y","z"] + output_keys: ["u","v","w"] + num_layers: 10 + hidden_size: 20 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.95 + decay_steps: 3000 + by_epoch: false + batch_size: + bc_base: 256 + bc_endo: 2048 + bc_epi: 32 + interior: 8000 + weight: + bc_base: {"u": 0.2, "v": 0.2, "w": 0.2} + bc_endo: {"traction_x": 0.1, "traction_y": 0.1, "traction_z": 0.1} + bc_epi: {"traction": 0.2} + interior: {"hooke_x": 0.2, "hooke_y": 0.2, "hooke_z": 0.2} + save_freq: 20 + eval_freq: 20 + eval_during_train: true + eval_with_no_grad: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1000 + num_vis: 100000 +>>>>>>> Stashed changes diff --git a/examples/heart/conf/inverse.yaml b/examples/heart/conf/inverse.yaml index 46238b2ebf..982c43deff 100644 --- a/examples/heart/conf/inverse.yaml +++ b/examples/heart/conf/inverse.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -86,3 +87,92 @@ EVAL: num_vis: 100000 # path for saved param E param_E_path: null +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_heart/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 200 + +# set geometry +GEOM_PATH: ./stl/heart.stl +BASE_PATH: ./stl/base.stl +ENDO_PATH: ./stl/endo.stl +EPI_PATH: ./stl/epi.stl +DATA_PATH: ./data/inverse.csv + +# set working condition +E: 9 # truth +nu: 0.45 +P: 1.064 # kPa + +# model settings +MODEL: + input_keys: ["x","y","z"] + output_keys: ["u","v","w"] + num_layers: 10 + hidden_size: 20 + activation: "silu" + weight_norm: true + +# training settings +TRAIN: + epochs: 100 + iters_per_epoch: 1000 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.95 + decay_steps: 3000 + by_epoch: false + batch_size: + bc_base: 256 + bc_endo: 2048 + bc_epi: 32 + interior: 8000 + weight: + bc_base: {"u": 0.1, "v": 0.1, "w": 0.1} + bc_endo: {"traction_x": 0.1, "traction_y": 0.1, "traction_z": 0.1} + bc_epi: {"traction": 0.1} + interior: {"hooke_x": 0.1, "hooke_y": 0.1, "hooke_z": 0.1} + save_freq: 20 + eval_freq: 20 + eval_during_train: true + eval_with_no_grad: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1000 + num_vis: 100000 + # path for saved param E + param_E_path: null +>>>>>>> Stashed changes diff --git a/examples/heart/equation.py b/examples/heart/equation.py index 203833cae9..7c6fe09c69 100644 --- a/examples/heart/equation.py +++ b/examples/heart/equation.py @@ -1,164 +1,164 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -import sympy as sp - -from ppsci.equation.pde import base - - -class Hooke(base.PDE): - r"""equations for umbrella opening force. - Use either (E, nu) or (lambda_, mu) to define the material properties. - - $$ - \begin{pmatrix} - t_{xx} \\ t_{yy} \\ t_{zz} \\ t_{xy} \\ t_{xz} \\ t_{yz} \\ - \end{pmatrix} - = - \begin{bmatrix} - \frac{1}{E} & -\frac{\nu}{E} & -\frac{\nu}{E} & 0 & 0 & 0 \\ - -\frac{\nu}{E} & \frac{1}{E} & -\frac{\nu}{E} & 0 & 0 & 0 \\ - -\frac{\nu}{E} & -\frac{\nu}{E} & \frac{1}{E} & 0 & 0 & 0 \\ - 0 & 0 & 0 & \frac{1}{G} & 0 & 0 \\ - 0 & 0 & 0 & 0 & \frac{1}{G} & 0 \\ - 0 & 0 & 0 & 0 & 0 & \frac{1}{G} \\ - \end{bmatrix} - \begin{pmatrix} - \varepsilon _{xx} \\ \varepsilon _{yy} \\ \varepsilon _{zz} \\ \varepsilon _{xy} \\ \varepsilon _{xz} \\ \varepsilon _{yz} \\ - \end{pmatrix} - $$ - - Args: - E (paddle.base.framework.EagerParamBase): The Young's modulus. Learnable parameter. - nu (Union[float, str]): The Poisson's ratio. - P (Union[float, str]): Left ventricular cavity pressure. - dim (int, optional): Dimension of the linear elasticity (2 or 3). Defaults to 3. - time (bool, optional): Whether contains time data. Defaults to False. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - - Examples: - >>> import ppsci - >>> E = paddle.create_parameter( - ... shape=[], - ... dtype=paddle.get_default_dtype(), - ... default_initializer=initializer.Constant(), - ... ) - >>> pde = ppsci.equation.Hooke( - ... E=E, nu=cfg.nu, P=cfg.P, dim=3 - ... ) - """ - - def __init__( - self, - E: Union[float, str, paddle.base.framework.EagerParamBase], - nu: Union[float, str], - P: Union[float, str], - dim: int = 3, - time: bool = False, - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - self.dim = dim - self.time = time - - t, x, y, z = self.create_symbols("t x y z") - normal_x, normal_y, normal_z = self.create_symbols("normal_x normal_y normal_z") - invars = (x, y) - if time: - invars = (t,) + invars - if self.dim == 3: - invars += (z,) - - u = self.create_function("u", invars) - v = self.create_function("v", invars) - w = self.create_function("w", invars) if dim == 3 else sp.Number(0) - - if isinstance(nu, str): - nu = self.create_function(nu, invars) - if isinstance(P, str): - P = self.create_function(P, invars) - if isinstance(E, str): - E = self.create_function(E, invars) - self.E = E - elif isinstance(E, paddle.base.framework.EagerParamBase): - self.E = E - self.learnable_parameters.append(self.E) - E = self.create_symbols(self.E.name) - - self.nu = nu - self.P = P - - # compute sigma - sigma_xx = u.diff(x) - sigma_yy = v.diff(y) - sigma_zz = w.diff(z) if dim == 3 else sp.Number(0) - sigma_xy = 0.5 * (u.diff(y) + v.diff(x)) - sigma_xz = 0.5 * (u.diff(z) + w.diff(x)) if dim == 3 else sp.Number(0) - sigma_yz = 0.5 * (v.diff(z) + w.diff(y)) if dim == 3 else sp.Number(0) - - # compute stress tensor t - G = E / (2 * (1 + nu)) - e = sigma_xx + sigma_yy + sigma_zz - t_xx = 2 * G * (sigma_xx + nu / (1 - 2 * nu) * e) - t_yy = 2 * G * (sigma_yy + nu / (1 - 2 * nu) * e) - t_zz = 2 * G * (sigma_zz + nu / (1 - 2 * nu) * e) - t_xy = 2 * sigma_xy * G - t_xz = 2 * sigma_xz * G - t_yz = 2 * sigma_yz * G - - # compute stress - hooke_x = t_xx.diff(x) + t_xy.diff(y) + t_xz.diff(z) - hooke_y = t_xy.diff(x) + t_yy.diff(y) + t_yz.diff(z) - hooke_z = t_xz.diff(x) + t_yz.diff(y) + t_zz.diff(z) - - # compute traction splitly - traction_x = t_xx * normal_x + t_xy * normal_y + t_xz * normal_z + P * normal_x - traction_y = t_xy * normal_x + t_yy * normal_y + t_yz * normal_z + P * normal_y - traction_z = t_xz * normal_x + t_yz * normal_y + t_zz * normal_z + P * normal_z - - # compute traction - traction_x_ = t_xx * normal_x + t_xy * normal_y + t_xz * normal_z - traction_y_ = t_xy * normal_x + t_yy * normal_y + t_yz * normal_z - traction_z_ = t_xz * normal_x + t_yz * normal_y + t_zz * normal_z - - traction = ( - traction_x_ * normal_x + traction_y_ * normal_y + traction_z_ * normal_z - ) - - # add hooke equations - self.add_equation("hooke_x", hooke_x) - self.add_equation("hooke_y", hooke_y) - if self.dim == 3: - self.add_equation("hooke_z", hooke_z) - - # add traction equations - self.add_equation("traction_x", traction_x) - self.add_equation("traction_y", traction_y) - if self.dim == 3: - self.add_equation("traction_z", traction_z) - - # add combined traction equations - self.add_equation("traction", traction) - - self._apply_detach() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import sympy as sp + +from ppsci.equation.pde import base + + +class Hooke(base.PDE): + r"""equations for umbrella opening force. + Use either (E, nu) or (lambda_, mu) to define the material properties. + + $$ + \begin{pmatrix} + t_{xx} \\ t_{yy} \\ t_{zz} \\ t_{xy} \\ t_{xz} \\ t_{yz} \\ + \end{pmatrix} + = + \begin{bmatrix} + \frac{1}{E} & -\frac{\nu}{E} & -\frac{\nu}{E} & 0 & 0 & 0 \\ + -\frac{\nu}{E} & \frac{1}{E} & -\frac{\nu}{E} & 0 & 0 & 0 \\ + -\frac{\nu}{E} & -\frac{\nu}{E} & \frac{1}{E} & 0 & 0 & 0 \\ + 0 & 0 & 0 & \frac{1}{G} & 0 & 0 \\ + 0 & 0 & 0 & 0 & \frac{1}{G} & 0 \\ + 0 & 0 & 0 & 0 & 0 & \frac{1}{G} \\ + \end{bmatrix} + \begin{pmatrix} + \varepsilon _{xx} \\ \varepsilon _{yy} \\ \varepsilon _{zz} \\ \varepsilon _{xy} \\ \varepsilon _{xz} \\ \varepsilon _{yz} \\ + \end{pmatrix} + $$ + + Args: + E (paddle.base.framework.EagerParamBase): The Young's modulus. Learnable parameter. + nu (Union[float, str]): The Poisson's ratio. + P (Union[float, str]): Left ventricular cavity pressure. + dim (int, optional): Dimension of the linear elasticity (2 or 3). Defaults to 3. + time (bool, optional): Whether contains time data. Defaults to False. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + + Examples: + >>> import ppsci + >>> E = paddle.create_parameter( + ... shape=[], + ... dtype=paddle.get_default_dtype(), + ... default_initializer=initializer.Constant(), + ... ) + >>> pde = ppsci.equation.Hooke( + ... E=E, nu=cfg.nu, P=cfg.P, dim=3 + ... ) + """ + + def __init__( + self, + E: Union[float, str, paddle.base.framework.EagerParamBase], + nu: Union[float, str], + P: Union[float, str], + dim: int = 3, + time: bool = False, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + self.dim = dim + self.time = time + + t, x, y, z = self.create_symbols("t x y z") + normal_x, normal_y, normal_z = self.create_symbols("normal_x normal_y normal_z") + invars = (x, y) + if time: + invars = (t,) + invars + if self.dim == 3: + invars += (z,) + + u = self.create_function("u", invars) + v = self.create_function("v", invars) + w = self.create_function("w", invars) if dim == 3 else sp.Number(0) + + if isinstance(nu, str): + nu = self.create_function(nu, invars) + if isinstance(P, str): + P = self.create_function(P, invars) + if isinstance(E, str): + E = self.create_function(E, invars) + self.E = E + elif isinstance(E, paddle.base.framework.EagerParamBase): + self.E = E + self.learnable_parameters.append(self.E) + E = self.create_symbols(self.E.name) + + self.nu = nu + self.P = P + + # compute sigma + sigma_xx = u.diff(x) + sigma_yy = v.diff(y) + sigma_zz = w.diff(z) if dim == 3 else sp.Number(0) + sigma_xy = 0.5 * (u.diff(y) + v.diff(x)) + sigma_xz = 0.5 * (u.diff(z) + w.diff(x)) if dim == 3 else sp.Number(0) + sigma_yz = 0.5 * (v.diff(z) + w.diff(y)) if dim == 3 else sp.Number(0) + + # compute stress tensor t + G = E / (2 * (1 + nu)) + e = sigma_xx + sigma_yy + sigma_zz + t_xx = 2 * G * (sigma_xx + nu / (1 - 2 * nu) * e) + t_yy = 2 * G * (sigma_yy + nu / (1 - 2 * nu) * e) + t_zz = 2 * G * (sigma_zz + nu / (1 - 2 * nu) * e) + t_xy = 2 * sigma_xy * G + t_xz = 2 * sigma_xz * G + t_yz = 2 * sigma_yz * G + + # compute stress + hooke_x = t_xx.diff(x) + t_xy.diff(y) + t_xz.diff(z) + hooke_y = t_xy.diff(x) + t_yy.diff(y) + t_yz.diff(z) + hooke_z = t_xz.diff(x) + t_yz.diff(y) + t_zz.diff(z) + + # compute traction splitly + traction_x = t_xx * normal_x + t_xy * normal_y + t_xz * normal_z + P * normal_x + traction_y = t_xy * normal_x + t_yy * normal_y + t_yz * normal_z + P * normal_y + traction_z = t_xz * normal_x + t_yz * normal_y + t_zz * normal_z + P * normal_z + + # compute traction + traction_x_ = t_xx * normal_x + t_xy * normal_y + t_xz * normal_z + traction_y_ = t_xy * normal_x + t_yy * normal_y + t_yz * normal_z + traction_z_ = t_xz * normal_x + t_yz * normal_y + t_zz * normal_z + + traction = ( + traction_x_ * normal_x + traction_y_ * normal_y + traction_z_ * normal_z + ) + + # add hooke equations + self.add_equation("hooke_x", hooke_x) + self.add_equation("hooke_y", hooke_y) + if self.dim == 3: + self.add_equation("hooke_z", hooke_z) + + # add traction equations + self.add_equation("traction_x", traction_x) + self.add_equation("traction_y", traction_y) + if self.dim == 3: + self.add_equation("traction_z", traction_z) + + # add combined traction equations + self.add_equation("traction", traction) + + self._apply_detach() diff --git a/examples/heart/forward.py b/examples/heart/forward.py index bc4a860e42..635bd1b449 100644 --- a/examples/heart/forward.py +++ b/examples/heart/forward.py @@ -1,294 +1,294 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import equation as eq_func -import hydra -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import reader - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set equation - equation = {"Hooke": eq_func.Hooke(E=cfg.E, nu=cfg.nu, P=cfg.P, dim=3)} - - # set geometry - heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) - base = ppsci.geometry.Mesh(cfg.BASE_PATH) - endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) - epi = ppsci.geometry.Mesh(cfg.EPI_PATH) - geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - bc_base = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": 0, "v": 0, "w": 0}, - geom["base"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_base}, - ppsci.loss.MSELoss("mean"), - weight_dict=cfg.TRAIN.weight.bc_base, - name="BC_BASE", - ) - bc_endo = ppsci.constraint.BoundaryConstraint( - equation["Hooke"].equations, - {"traction": -cfg.P}, - geom["endo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_endo}, - ppsci.loss.MSELoss("mean"), - weight_dict=cfg.TRAIN.weight.bc_endo, - name="BC_ENDO", - ) - bc_epi = ppsci.constraint.BoundaryConstraint( - equation["Hooke"].equations, - {"traction": 0}, - geom["epi"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_epi}, - ppsci.loss.MSELoss("mean"), - weight_dict=cfg.TRAIN.weight.bc_epi, - name="BC_EPI", - ) - interior = ppsci.constraint.InteriorConstraint( - equation["Hooke"].equations, - {"hooke_x": 0, "hooke_y": 0, "hooke_z": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, - ppsci.loss.MSELoss("mean"), - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - weight_dict=cfg.TRAIN.weight.interior, - name="INTERIOR", - ) - data = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.DATA_CSV_PATH, - "input_keys": ("x", "y", "z"), - "label_keys": ("u", "v", "w"), - }, - }, - ppsci.loss.MSELoss("sum"), - name="DATA", - ) - - # wrap constraints together - constraint = { - bc_base.name: bc_base, - bc_endo.name: bc_endo, - bc_epi.name: bc_epi, - interior.name: interior, - data.name: data, - } - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.EVAL_CSV_PATH, - ("x", "y", "z", "u", "v", "w"), - { - "x": "x", - "y": "y", - "z": "z", - "u": "u", - "v": "v", - "w": "w", - }, - ) - - input_dict = { - "x": eval_data_dict["x"], - "y": eval_data_dict["y"], - "z": eval_data_dict["z"], - } - - label_dict = { - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="ref_u_v_w", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer(optional) - visualizer = { - "visualize_u_v_w": ppsci.visualize.VisualizerVtu( - input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - batch_size=cfg.EVAL.batch_size, - prefix="result_u_v_w", - ), - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - equation=equation, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - - # train - solver.train() - # eval - solver.eval() - # visualize prediction after finished training - solver.visualize() - # plot loss - solver.plot_loss_history(by_epoch=True) - - -def evaluate(cfg: DictConfig): - # set models - model = ppsci.arch.MLP(**cfg.MODEL) - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.EVAL_CSV_PATH, - ("x", "y", "z", "u", "v", "w"), - { - "x": "x", - "y": "y", - "z": "z", - "u": "u", - "v": "v", - "w": "w", - }, - ) - - input_dict = { - "x": eval_data_dict["x"], - "y": eval_data_dict["y"], - "z": eval_data_dict["z"], - } - - label_dict = { - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="ref_u_v_w", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer - visualizer = { - "visualize_u_v_w": ppsci.visualize.VisualizerVtu( - input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - batch_size=cfg.EVAL.batch_size, - prefix="result_u_v_w", - ), - } - - # initialize solver - solver = ppsci.solver.Solver( - model=model, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - - # evaluate - solver.eval() - # visualize prediction - solver.visualize() - - -@hydra.main(version_base=None, config_path="./conf", config_name="forward.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import equation as eq_func +import hydra +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import reader + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set equation + equation = {"Hooke": eq_func.Hooke(E=cfg.E, nu=cfg.nu, P=cfg.P, dim=3)} + + # set geometry + heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) + base = ppsci.geometry.Mesh(cfg.BASE_PATH) + endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) + epi = ppsci.geometry.Mesh(cfg.EPI_PATH) + geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + bc_base = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": 0, "v": 0, "w": 0}, + geom["base"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_base}, + ppsci.loss.MSELoss("mean"), + weight_dict=cfg.TRAIN.weight.bc_base, + name="BC_BASE", + ) + bc_endo = ppsci.constraint.BoundaryConstraint( + equation["Hooke"].equations, + {"traction": -cfg.P}, + geom["endo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_endo}, + ppsci.loss.MSELoss("mean"), + weight_dict=cfg.TRAIN.weight.bc_endo, + name="BC_ENDO", + ) + bc_epi = ppsci.constraint.BoundaryConstraint( + equation["Hooke"].equations, + {"traction": 0}, + geom["epi"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_epi}, + ppsci.loss.MSELoss("mean"), + weight_dict=cfg.TRAIN.weight.bc_epi, + name="BC_EPI", + ) + interior = ppsci.constraint.InteriorConstraint( + equation["Hooke"].equations, + {"hooke_x": 0, "hooke_y": 0, "hooke_z": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, + ppsci.loss.MSELoss("mean"), + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + weight_dict=cfg.TRAIN.weight.interior, + name="INTERIOR", + ) + data = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.DATA_CSV_PATH, + "input_keys": ("x", "y", "z"), + "label_keys": ("u", "v", "w"), + }, + }, + ppsci.loss.MSELoss("sum"), + name="DATA", + ) + + # wrap constraints together + constraint = { + bc_base.name: bc_base, + bc_endo.name: bc_endo, + bc_epi.name: bc_epi, + interior.name: interior, + data.name: data, + } + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.EVAL_CSV_PATH, + ("x", "y", "z", "u", "v", "w"), + { + "x": "x", + "y": "y", + "z": "z", + "u": "u", + "v": "v", + "w": "w", + }, + ) + + input_dict = { + "x": eval_data_dict["x"], + "y": eval_data_dict["y"], + "z": eval_data_dict["z"], + } + + label_dict = { + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="ref_u_v_w", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer(optional) + visualizer = { + "visualize_u_v_w": ppsci.visualize.VisualizerVtu( + input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + batch_size=cfg.EVAL.batch_size, + prefix="result_u_v_w", + ), + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + + # train + solver.train() + # eval + solver.eval() + # visualize prediction after finished training + solver.visualize() + # plot loss + solver.plot_loss_history(by_epoch=True) + + +def evaluate(cfg: DictConfig): + # set models + model = ppsci.arch.MLP(**cfg.MODEL) + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.EVAL_CSV_PATH, + ("x", "y", "z", "u", "v", "w"), + { + "x": "x", + "y": "y", + "z": "z", + "u": "u", + "v": "v", + "w": "w", + }, + ) + + input_dict = { + "x": eval_data_dict["x"], + "y": eval_data_dict["y"], + "z": eval_data_dict["z"], + } + + label_dict = { + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="ref_u_v_w", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer + visualizer = { + "visualize_u_v_w": ppsci.visualize.VisualizerVtu( + input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + batch_size=cfg.EVAL.batch_size, + prefix="result_u_v_w", + ), + } + + # initialize solver + solver = ppsci.solver.Solver( + model=model, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + + # evaluate + solver.eval() + # visualize prediction + solver.visualize() + + +@hydra.main(version_base=None, config_path="./conf", config_name="forward.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/heart/inverse.py b/examples/heart/inverse.py index 1fe3b2e0ff..85b892d7f9 100644 --- a/examples/heart/inverse.py +++ b/examples/heart/inverse.py @@ -1,423 +1,423 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import equation as eq_func -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig -from paddle.nn import initializer - -import ppsci -from ppsci.metric import L2Rel -from ppsci.utils import logger -from ppsci.utils import reader - - -def train(cfg: DictConfig): - # set equation - E = paddle.create_parameter( - shape=[], - dtype=paddle.get_default_dtype(), - default_initializer=initializer.Constant(0.0), - ) - equation = {"Hooke": eq_func.Hooke(E=E, nu=cfg.nu, P=cfg.P, dim=3)} - - # set models - model = ppsci.arch.MLP(**cfg.MODEL) - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)((model,) + tuple(equation.values())) - - # set geometry - heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) - base = ppsci.geometry.Mesh(cfg.BASE_PATH) - endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) - epi = ppsci.geometry.Mesh(cfg.EPI_PATH) - geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds - - # set dataloader config - train_dataloader_cfg = { - "dataset": "NamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "num_workers": 1, - } - - # set constraint - bc_base = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, - {"u": 0, "v": 0, "w": 0}, - geom["base"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_base}, - ppsci.loss.MSELoss("sum"), - weight_dict=cfg.TRAIN.weight.bc_base, - name="BC_BASE", - ) - bc_endo = ppsci.constraint.BoundaryConstraint( - equation["Hooke"].equations, - {"traction_x": -cfg.P, "traction_y": -cfg.P, "traction_z": -cfg.P}, - geom["endo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_endo}, - ppsci.loss.MSELoss("sum"), - weight_dict=cfg.TRAIN.weight.bc_endo, - name="BC_ENDO", - ) - bc_epi = ppsci.constraint.BoundaryConstraint( - equation["Hooke"].equations, - {"traction_x": 0, "traction_y": 0, "traction_z": 0}, - geom["epi"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_epi}, - ppsci.loss.MSELoss("sum"), - weight_dict=cfg.TRAIN.weight.bc_endo, - name="BC_EPI", - ) - interior = ppsci.constraint.InteriorConstraint( - equation["Hooke"].equations, - {"hooke_x": 0, "hooke_y": 0, "hooke_z": 0}, - geom["geo"], - {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - weight_dict=cfg.TRAIN.weight.interior, - name="INTERIOR", - ) - data = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableCSVDataset", - "file_path": cfg.DATA_PATH, - "input_keys": ("x", "y", "z"), - "label_keys": ("u", "v", "w"), - }, - }, - ppsci.loss.MSELoss("sum"), - name="DATA", - ) - - # wrap constraints together - constraint = { - bc_base.name: bc_base, - bc_endo.name: bc_endo, - bc_epi.name: bc_epi, - interior.name: interior, - data.name: data, - } - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.DATA_PATH, - ("x", "y", "z", "u", "v", "w"), - { - "x": "x", - "y": "y", - "z": "z", - "u": "u", - "v": "v", - "w": "w", - }, - ) - input_dict = { - "x": eval_data_dict["x"], - "y": eval_data_dict["y"], - "z": eval_data_dict["z"], - } - label_dict = { - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="ref_u_v_w", - ) - - fake_input = np.full((1, 1), 1, dtype=np.float32) - E_label = np.full((1, 1), cfg.E, dtype=np.float32) - param_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "x": fake_input, - "y": fake_input, - "z": fake_input, - }, - "label": {"E": E_label}, - }, - "batch_size": 1, - "num_workers": 1, - }, - ppsci.loss.MSELoss("mean"), - { - "E": lambda out: E.reshape([1, 1]), - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="param_E", - ) - - validator = { - sup_validator.name: sup_validator, - param_validator.name: param_validator, - } - - # set visualizer(optional) - visualizer = { - "visualize_u_v_w": ppsci.visualize.VisualizerVtu( - input_dict, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - batch_size=cfg.EVAL.batch_size, - prefix="result_u_v_w", - ), - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - equation=equation, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - - # train - solver.train() - # eval - solver.eval() - # visualize prediction after finished training - solver.visualize() - # plot loss - solver.plot_loss_history(by_epoch=True) - - # save parameter E separately - paddle.save({"E": E}, osp.join(cfg.output_dir, "param_E.pdparams")) - - -def evaluate(cfg: DictConfig): - # set models - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) - base = ppsci.geometry.Mesh(cfg.BASE_PATH) - endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) - epi = ppsci.geometry.Mesh(cfg.EPI_PATH) - # test = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} - # set bounds - BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds - - # set validator - eval_data_dict = reader.load_csv_file( - cfg.DATA_PATH, - ("x", "y", "z", "u", "v", "w"), - { - "x": "x", - "y": "y", - "z": "z", - "u": "u", - "v": "v", - "w": "w", - }, - ) - input_dict = { - "x": eval_data_dict["x"], - "y": eval_data_dict["y"], - "z": eval_data_dict["z"], - } - label_dict = { - "u": eval_data_dict["u"], - "v": eval_data_dict["v"], - "w": eval_data_dict["w"], - } - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - }, - "num_workers": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, - ppsci.loss.MSELoss("mean"), - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="ref_u_v_w", - ) - validator = {sup_validator.name: sup_validator} - - # set visualizer(optional) - # add inferencer data endo - samples_endo = geom["endo"].sample_boundary( - cfg.EVAL.num_vis, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict_endo = { - "x": samples_endo["x"], - "y": samples_endo["y"], - "z": samples_endo["z"], - } - visualizer_endo = ppsci.visualize.VisualizerVtu( - pred_input_dict_endo, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - prefix="vtu_u_v_w_endo", - ) - # add inferencer data epi - samples_epi = geom["epi"].sample_boundary( - cfg.EVAL.num_vis, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict_epi = { - "x": samples_epi["x"], - "y": samples_epi["y"], - "z": samples_epi["z"], - } - visualizer_epi = ppsci.visualize.VisualizerVtu( - pred_input_dict_epi, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - prefix="vtu_u_v_w_epi", - ) - # add inferencer data - samples_geom = geom["geo"].sample_interior( - cfg.EVAL.num_vis, - criteria=lambda x, y, z: ( - (BOUNDS_X[0] < x) - & (x < BOUNDS_X[1]) - & (BOUNDS_Y[0] < y) - & (y < BOUNDS_Y[1]) - & (BOUNDS_Z[0] < z) - & (z < BOUNDS_Z[1]) - ), - ) - pred_input_dict_geom = { - "x": samples_geom["x"], - "y": samples_geom["y"], - "z": samples_geom["z"], - } - visualizer_geom = ppsci.visualize.VisualizerVtu( - pred_input_dict_geom, - { - "u": lambda out: out["u"], - "v": lambda out: out["v"], - "w": lambda out: out["w"], - }, - prefix="vtu_u_v_w_geom", - ) - - # wrap visualizers together - visualizer = { - "vis_eval_endo": visualizer_endo, - "visualizer_epi": visualizer_epi, - "vis_eval_geom": visualizer_geom, - } - - # initialize solver - solver = ppsci.solver.Solver( - model=model, - validator=validator, - visualizer=visualizer, - cfg=cfg, - ) - # evaluate - solver.eval() - # visualize prediction after finished training - solver.visualize() - - # evaluate E - E_truth = paddle.to_tensor(cfg.E, dtype=paddle.get_default_dtype()).reshape([1, 1]) - E_pred = paddle.load(cfg.EVAL.param_E_path)["E"].reshape([1, 1]) - l2_error = L2Rel()({"E": E_pred}, {"E": E_truth})["E"] - logger.info( - f"E_truth: {cfg.E}, E_pred: {float(E_pred)}, L2_Error: {float(l2_error)}" - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="inverse.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import equation as eq_func +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig +from paddle.nn import initializer + +import ppsci +from ppsci.metric import L2Rel +from ppsci.utils import logger +from ppsci.utils import reader + + +def train(cfg: DictConfig): + # set equation + E = paddle.create_parameter( + shape=[], + dtype=paddle.get_default_dtype(), + default_initializer=initializer.Constant(0.0), + ) + equation = {"Hooke": eq_func.Hooke(E=E, nu=cfg.nu, P=cfg.P, dim=3)} + + # set models + model = ppsci.arch.MLP(**cfg.MODEL) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)((model,) + tuple(equation.values())) + + # set geometry + heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) + base = ppsci.geometry.Mesh(cfg.BASE_PATH) + endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) + epi = ppsci.geometry.Mesh(cfg.EPI_PATH) + geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds + + # set dataloader config + train_dataloader_cfg = { + "dataset": "NamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "num_workers": 1, + } + + # set constraint + bc_base = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"], "v": lambda d: d["v"], "w": lambda d: d["w"]}, + {"u": 0, "v": 0, "w": 0}, + geom["base"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_base}, + ppsci.loss.MSELoss("sum"), + weight_dict=cfg.TRAIN.weight.bc_base, + name="BC_BASE", + ) + bc_endo = ppsci.constraint.BoundaryConstraint( + equation["Hooke"].equations, + {"traction_x": -cfg.P, "traction_y": -cfg.P, "traction_z": -cfg.P}, + geom["endo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_endo}, + ppsci.loss.MSELoss("sum"), + weight_dict=cfg.TRAIN.weight.bc_endo, + name="BC_ENDO", + ) + bc_epi = ppsci.constraint.BoundaryConstraint( + equation["Hooke"].equations, + {"traction_x": 0, "traction_y": 0, "traction_z": 0}, + geom["epi"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.bc_epi}, + ppsci.loss.MSELoss("sum"), + weight_dict=cfg.TRAIN.weight.bc_endo, + name="BC_EPI", + ) + interior = ppsci.constraint.InteriorConstraint( + equation["Hooke"].equations, + {"hooke_x": 0, "hooke_y": 0, "hooke_z": 0}, + geom["geo"], + {**train_dataloader_cfg, "batch_size": cfg.TRAIN.batch_size.interior}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + weight_dict=cfg.TRAIN.weight.interior, + name="INTERIOR", + ) + data = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableCSVDataset", + "file_path": cfg.DATA_PATH, + "input_keys": ("x", "y", "z"), + "label_keys": ("u", "v", "w"), + }, + }, + ppsci.loss.MSELoss("sum"), + name="DATA", + ) + + # wrap constraints together + constraint = { + bc_base.name: bc_base, + bc_endo.name: bc_endo, + bc_epi.name: bc_epi, + interior.name: interior, + data.name: data, + } + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.DATA_PATH, + ("x", "y", "z", "u", "v", "w"), + { + "x": "x", + "y": "y", + "z": "z", + "u": "u", + "v": "v", + "w": "w", + }, + ) + input_dict = { + "x": eval_data_dict["x"], + "y": eval_data_dict["y"], + "z": eval_data_dict["z"], + } + label_dict = { + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="ref_u_v_w", + ) + + fake_input = np.full((1, 1), 1, dtype=np.float32) + E_label = np.full((1, 1), cfg.E, dtype=np.float32) + param_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "x": fake_input, + "y": fake_input, + "z": fake_input, + }, + "label": {"E": E_label}, + }, + "batch_size": 1, + "num_workers": 1, + }, + ppsci.loss.MSELoss("mean"), + { + "E": lambda out: E.reshape([1, 1]), + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="param_E", + ) + + validator = { + sup_validator.name: sup_validator, + param_validator.name: param_validator, + } + + # set visualizer(optional) + visualizer = { + "visualize_u_v_w": ppsci.visualize.VisualizerVtu( + input_dict, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + batch_size=cfg.EVAL.batch_size, + prefix="result_u_v_w", + ), + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + + # train + solver.train() + # eval + solver.eval() + # visualize prediction after finished training + solver.visualize() + # plot loss + solver.plot_loss_history(by_epoch=True) + + # save parameter E separately + paddle.save({"E": E}, osp.join(cfg.output_dir, "param_E.pdparams")) + + +def evaluate(cfg: DictConfig): + # set models + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + heart = ppsci.geometry.Mesh(cfg.GEOM_PATH) + base = ppsci.geometry.Mesh(cfg.BASE_PATH) + endo = ppsci.geometry.Mesh(cfg.ENDO_PATH) + epi = ppsci.geometry.Mesh(cfg.EPI_PATH) + # test = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + geom = {"geo": heart, "base": base, "endo": endo, "epi": epi} + # set bounds + BOUNDS_X, BOUNDS_Y, BOUNDS_Z = heart.bounds + + # set validator + eval_data_dict = reader.load_csv_file( + cfg.DATA_PATH, + ("x", "y", "z", "u", "v", "w"), + { + "x": "x", + "y": "y", + "z": "z", + "u": "u", + "v": "v", + "w": "w", + }, + ) + input_dict = { + "x": eval_data_dict["x"], + "y": eval_data_dict["y"], + "z": eval_data_dict["z"], + } + label_dict = { + "u": eval_data_dict["u"], + "v": eval_data_dict["v"], + "w": eval_data_dict["w"], + } + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + }, + "num_workers": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + {**eval_dataloader_cfg, "batch_size": cfg.EVAL.batch_size}, + ppsci.loss.MSELoss("mean"), + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="ref_u_v_w", + ) + validator = {sup_validator.name: sup_validator} + + # set visualizer(optional) + # add inferencer data endo + samples_endo = geom["endo"].sample_boundary( + cfg.EVAL.num_vis, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict_endo = { + "x": samples_endo["x"], + "y": samples_endo["y"], + "z": samples_endo["z"], + } + visualizer_endo = ppsci.visualize.VisualizerVtu( + pred_input_dict_endo, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + prefix="vtu_u_v_w_endo", + ) + # add inferencer data epi + samples_epi = geom["epi"].sample_boundary( + cfg.EVAL.num_vis, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict_epi = { + "x": samples_epi["x"], + "y": samples_epi["y"], + "z": samples_epi["z"], + } + visualizer_epi = ppsci.visualize.VisualizerVtu( + pred_input_dict_epi, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + prefix="vtu_u_v_w_epi", + ) + # add inferencer data + samples_geom = geom["geo"].sample_interior( + cfg.EVAL.num_vis, + criteria=lambda x, y, z: ( + (BOUNDS_X[0] < x) + & (x < BOUNDS_X[1]) + & (BOUNDS_Y[0] < y) + & (y < BOUNDS_Y[1]) + & (BOUNDS_Z[0] < z) + & (z < BOUNDS_Z[1]) + ), + ) + pred_input_dict_geom = { + "x": samples_geom["x"], + "y": samples_geom["y"], + "z": samples_geom["z"], + } + visualizer_geom = ppsci.visualize.VisualizerVtu( + pred_input_dict_geom, + { + "u": lambda out: out["u"], + "v": lambda out: out["v"], + "w": lambda out: out["w"], + }, + prefix="vtu_u_v_w_geom", + ) + + # wrap visualizers together + visualizer = { + "vis_eval_endo": visualizer_endo, + "visualizer_epi": visualizer_epi, + "vis_eval_geom": visualizer_geom, + } + + # initialize solver + solver = ppsci.solver.Solver( + model=model, + validator=validator, + visualizer=visualizer, + cfg=cfg, + ) + # evaluate + solver.eval() + # visualize prediction after finished training + solver.visualize() + + # evaluate E + E_truth = paddle.to_tensor(cfg.E, dtype=paddle.get_default_dtype()).reshape([1, 1]) + E_pred = paddle.load(cfg.EVAL.param_E_path)["E"].reshape([1, 1]) + l2_error = L2Rel()({"E": E_pred}, {"E": E_truth})["E"] + logger.info( + f"E_truth: {cfg.E}, E_pred: {float(E_pred)}, L2_Error: {float(l2_error)}" + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="inverse.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/heat_exchanger/conf/heat_exchanger.yaml b/examples/heat_exchanger/conf/heat_exchanger.yaml index 34e9e449d0..c03bc611ca 100644 --- a/examples/heat_exchanger/conf/heat_exchanger.yaml +++ b/examples/heat_exchanger/conf/heat_exchanger.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -110,3 +111,116 @@ INFER: num_cpu_threads: 10 batch_size: 1000 input_keys: ['qm_h','qm_c',"x",'t'] +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +DL: 1.0 # lenth of the domain +cp_c: 1.0 # specific heat capacity of cold boundary +cp_h: 1.0 # specific heat capacity of hot boundary +cp_w: 1.0 # specific heat capacity of wall +v_h: 1.0 # flow rate of hot boundary +v_c: 1.0 # flow rate of cold boundary +alpha_h: 1.0 # surface efficiency*heat transfer coefficient*heat transfer area of hot boundary +alpha_c: 1.0 # surface efficiency*heat transfer coefficient*heat transfer area of cold boundary +L: 1.0 # flow length +M: 1.0 # heat transfer structural quality +T_hin: 10.0 # initial temperature of hot boundary +T_cin: 1.0 # initial temperature of cold boundary +T_win: 5.5 # initial temperature of wall +NTIME: 20 # number of time steps +NPOINT: 101 # number of points in the domain +NQM: 60 # Number of branch network samples + +# model settings +MODEL: + heat_input_keys: ['qm_h'] + cold_input_keys: ['qm_c'] + trunk_input_keys: ["x",'t'] + output_keys: ["T_h",'T_c','T_w'] + heat_num_loc: 1 + cold_num_loc: 1 + num_features: 100 + branch_num_layers: 9 + trunk_num_layers: 6 + branch_hidden_size: 256 + trunk_hidden_size: 128 + branch_activation: "swish" + trunk_activation: "swish" + use_bias: true + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 1000 + learning_rate: 0.001 + batch_size: 1000 + weight: + left_sup_constraint: + T_h: 20 + right_sup_constraint: + T_h: 20 + interior_sup_constraint: + heat_boundary: 1 + cold_boundary: 1 + wall: 20 + initial_sup_constraint: + T_h: 1 + T_c: 1 + T_w: 20 + +# evaluation settings +EVAL: + pretrained_model_path: null + +# visualization settings +qm_h: 1 +qm_c: 1 +eta_true: 0.5 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/HEDeepONet/HEDeepONet_pretrained.pdparams + export_path: ./inference/ldc2d_steady_Re10 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1000 + num_cpu_threads: 10 + batch_size: 1000 + input_keys: ['qm_h','qm_c',"x",'t'] +>>>>>>> Stashed changes diff --git a/examples/heat_exchanger/heat_exchanger.py b/examples/heat_exchanger/heat_exchanger.py index f3955a1de6..45e31a4dbc 100644 --- a/examples/heat_exchanger/heat_exchanger.py +++ b/examples/heat_exchanger/heat_exchanger.py @@ -1,672 +1,672 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from os import path as osp - -import hydra -import matplotlib.pyplot as plt -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.HEDeepONets(**cfg.MODEL) - - # set time-geometry - timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), - ppsci.geometry.Interval(0, cfg.DL), - ) - } - - # Generate train data and eval data - visu_input = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) - data_h = np.random.rand(cfg.NQM).reshape([-1, 1]) * 2 - data_c = np.random.rand(cfg.NQM).reshape([-1, 1]) * 2 - data_h = data_h.astype("float32") - data_c = data_c.astype("float32") - test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") - test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") - # rearrange train data and eval data - points = visu_input.copy() - points["t"] = np.repeat(points["t"], cfg.NQM, axis=0) - points["x"] = np.repeat(points["x"], cfg.NQM, axis=0) - points["qm_h"] = np.tile(data_h, (cfg.NPOINT * cfg.NTIME, 1)) - points["t"] = np.repeat(points["t"], cfg.NQM, axis=0) - points["x"] = np.repeat(points["x"], cfg.NQM, axis=0) - points["qm_h"] = np.repeat(points["qm_h"], cfg.NQM, axis=0) - points["qm_c"] = np.tile(data_c, (cfg.NPOINT * cfg.NTIME * cfg.NQM, 1)) - visu_input["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) - visu_input["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) - - left_indices = visu_input["x"] == 0 - right_indices = visu_input["x"] == cfg.DL - interior_indices = (visu_input["x"] != 0) & (visu_input["x"] != cfg.DL) - left_indices = np.where(left_indices) - right_indices = np.where(right_indices) - interior_indices = np.where(interior_indices) - - left_indices1 = points["x"] == 0 - right_indices1 = points["x"] == cfg.DL - interior_indices1 = (points["x"] != 0) & (points["x"] != cfg.DL) - initial_indices1 = points["t"] == points["t"][0] - left_indices1 = np.where(left_indices1) - right_indices1 = np.where(right_indices1) - interior_indices1 = np.where(interior_indices1) - initial_indices1 = np.where(initial_indices1) - - # Classification train data - left_data = { - "x": points["x"][left_indices1[0]], - "t": points["t"][left_indices1[0]], - "qm_h": points["qm_h"][left_indices1[0]], - "qm_c": points["qm_c"][left_indices1[0]], - } - right_data = { - "x": points["x"][right_indices1[0]], - "t": points["t"][right_indices1[0]], - "qm_h": points["qm_h"][right_indices1[0]], - "qm_c": points["qm_c"][right_indices1[0]], - } - interior_data = { - "x": points["x"], - "t": points["t"], - "qm_h": points["qm_h"], - "qm_c": points["qm_c"], - } - initial_data = { - "x": points["x"][initial_indices1[0]], - "t": points["t"][initial_indices1[0]] * 0, - "qm_h": points["qm_h"][initial_indices1[0]], - "qm_c": points["qm_c"][initial_indices1[0]], - } - # Classification eval data - test_left_data = { - "x": visu_input["x"][left_indices[0]], - "t": visu_input["t"][left_indices[0]], - "qm_h": visu_input["qm_h"][left_indices[0]], - "qm_c": visu_input["qm_c"][left_indices[0]], - } - test_right_data = { - "x": visu_input["x"][right_indices[0]], - "t": visu_input["t"][right_indices[0]], - "qm_h": visu_input["qm_h"][right_indices[0]], - "qm_c": visu_input["qm_c"][right_indices[0]], - } - test_interior_data = { - "x": visu_input["x"], - "t": visu_input["t"], - "qm_h": visu_input["qm_h"], - "qm_c": visu_input["qm_c"], - } - - # set equation - equation = { - "heat_exchanger": ppsci.equation.HeatExchanger( - cfg.alpha_h / (cfg.L * cfg.cp_h), - cfg.alpha_c / (cfg.L * cfg.cp_c), - cfg.v_h, - cfg.v_c, - cfg.alpha_h / (cfg.M * cfg.cp_w), - cfg.alpha_c / (cfg.M * cfg.cp_w), - ) - } - - # set constraint - bc_label = { - "T_h": np.zeros([left_data["x"].shape[0], 1], dtype="float32"), - } - interior_label = { - "heat_boundary": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), - "cold_boundary": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), - "wall": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), - } - initial_label = { - "T_h": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), - "T_c": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), - "T_w": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), - } - - left_sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": left_data, - "label": bc_label, - "weight": { - "T_h": np.full_like( - left_data["x"], cfg.TRAIN.weight.left_sup_constraint.T_h - ) - }, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, - name="left_sup", - ) - right_sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": right_data, - "label": bc_label, - "weight": { - "T_h": np.full_like( - right_data["x"], cfg.TRAIN.weight.right_sup_constraint.T_h - ) - }, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, - name="right_sup", - ) - interior_sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": interior_data, - "label": interior_label, - "weight": { - "heat_boundary": np.full_like( - interior_data["x"], - cfg.TRAIN.weight.interior_sup_constraint.heat_boundary, - ), - "cold_boundary": np.full_like( - interior_data["x"], - cfg.TRAIN.weight.interior_sup_constraint.cold_boundary, - ), - "wall": np.full_like( - interior_data["x"], - cfg.TRAIN.weight.interior_sup_constraint.wall, - ), - }, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr=equation["heat_exchanger"].equations, - name="interior_sup", - ) - initial_sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": initial_data, - "label": initial_label, - "weight": { - "T_h": np.full_like( - initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_h - ), - "T_c": np.full_like( - initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_c - ), - "T_w": np.full_like( - initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_w - ), - }, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={ - "T_h": lambda out: out["T_h"] - cfg.T_hin, - "T_c": lambda out: out["T_c"] - cfg.T_cin, - "T_w": lambda out: out["T_w"] - cfg.T_win, - }, - name="initial_sup", - ) - # wrap constraints together - constraint = { - left_sup_constraint.name: left_sup_constraint, - right_sup_constraint.name: right_sup_constraint, - interior_sup_constraint.name: interior_sup_constraint, - initial_sup_constraint.name: initial_sup_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - test_bc_label = { - "T_h": np.zeros([test_left_data["x"].shape[0], 1], dtype="float32"), - } - test_interior_label = { - "heat_boundary": np.zeros( - [test_interior_data["x"].shape[0], 1], dtype="float32" - ), - "cold_boundary": np.zeros( - [test_interior_data["x"].shape[0], 1], dtype="float32" - ), - "wall": np.zeros([test_interior_data["x"].shape[0], 1], dtype="float32"), - } - left_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_left_data, - "label": test_bc_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, - metric={"MSE": ppsci.metric.MSE()}, - name="left_mse", - ) - right_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_right_data, - "label": test_bc_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, - metric={"MSE": ppsci.metric.MSE()}, - name="right_mse", - ) - interior_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_interior_data, - "label": test_interior_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr=equation["heat_exchanger"].equations, - metric={"MSE": ppsci.metric.MSE()}, - name="interior_mse", - ) - validator = { - left_validator.name: left_validator, - right_validator.name: right_validator, - interior_validator.name: interior_validator, - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # plotting iteration/epoch-loss curve. - solver.plot_loss_history() - - # visualize prediction after finished training - visu_input["qm_c"] = np.full_like(visu_input["qm_c"], cfg.qm_h) - visu_input["qm_h"] = np.full_like(visu_input["qm_c"], cfg.qm_c) - pred = solver.predict(visu_input, return_numpy=True) - plot(visu_input, pred, cfg) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.HEDeepONets(**cfg.MODEL) - - # set time-geometry - timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), - ppsci.geometry.Interval(0, cfg.DL), - ) - } - - # Generate eval data - visu_input = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) - test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") - test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") - # rearrange train data and eval data - visu_input["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) - visu_input["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) - - left_indices = visu_input["x"] == 0 - right_indices = visu_input["x"] == cfg.DL - interior_indices = (visu_input["x"] != 0) & (visu_input["x"] != cfg.DL) - left_indices = np.where(left_indices) - right_indices = np.where(right_indices) - interior_indices = np.where(interior_indices) - - # Classification eval data - test_left_data = { - "x": visu_input["x"][left_indices[0]], - "t": visu_input["t"][left_indices[0]], - "qm_h": visu_input["qm_h"][left_indices[0]], - "qm_c": visu_input["qm_c"][left_indices[0]], - } - test_right_data = { - "x": visu_input["x"][right_indices[0]], - "t": visu_input["t"][right_indices[0]], - "qm_h": visu_input["qm_h"][right_indices[0]], - "qm_c": visu_input["qm_c"][right_indices[0]], - } - test_interior_data = { - "x": visu_input["x"], - "t": visu_input["t"], - "qm_h": visu_input["qm_h"], - "qm_c": visu_input["qm_c"], - } - - # set equation - equation = { - "heat_exchanger": ppsci.equation.HeatExchanger( - cfg.alpha_h / (cfg.L * cfg.cp_h), - cfg.alpha_c / (cfg.L * cfg.cp_c), - cfg.v_h, - cfg.v_c, - cfg.alpha_h / (cfg.M * cfg.cp_w), - cfg.alpha_c / (cfg.M * cfg.cp_w), - ) - } - - # set validator - test_bc_label = { - "T_h": np.zeros([test_left_data["x"].shape[0], 1], dtype="float32"), - } - test_interior_label = { - "heat_boundary": np.zeros( - [test_interior_data["x"].shape[0], 1], dtype="float32" - ), - "cold_boundary": np.zeros( - [test_interior_data["x"].shape[0], 1], dtype="float32" - ), - "wall": np.zeros([test_interior_data["x"].shape[0], 1], dtype="float32"), - } - left_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_left_data, - "label": test_bc_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={ - "T_h": lambda out: out["T_h"] - cfg.T_hin, - }, - metric={"left_MSE": ppsci.metric.MSE()}, - name="left_mse", - ) - right_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_right_data, - "label": test_bc_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr={ - "T_h": lambda out: out["T_c"] - cfg.T_cin, - }, - metric={"right_MSE": ppsci.metric.MSE()}, - name="right_mse", - ) - interior_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": test_interior_data, - "label": test_interior_label, - }, - "batch_size": cfg.NTIME, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.MSELoss("mean"), - output_expr=equation["heat_exchanger"].equations, - metric={"interior_MSE": ppsci.metric.MSE()}, - name="interior_mse", - ) - validator = { - left_validator.name: left_validator, - right_validator.name: right_validator, - interior_validator.name: interior_validator, - } - - # directly evaluate pretrained model(optional) - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - equation=equation, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - # visualize prediction after finished training - visu_input["qm_c"] = np.full_like(visu_input["qm_c"], cfg.qm_h) - visu_input["qm_h"] = np.full_like(visu_input["qm_c"], cfg.qm_c) - pred = solver.predict(visu_input, return_numpy=True) - plot(visu_input, pred, cfg) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.HEDeepONets(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set time-geometry - timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), - ppsci.geometry.Interval(0, cfg.DL), - ) - } - input_dict = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) - test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") - test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") - # rearrange train data and eval data - input_dict["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) - input_dict["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) - input_dict["qm_c"] = np.full_like(input_dict["qm_c"], cfg.qm_h) - input_dict["qm_h"] = np.full_like(input_dict["qm_c"], cfg.qm_c) - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.INFER.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - plot(input_dict, output_dict, cfg) - - -def plot(visu_input, pred, cfg: DictConfig): - x = visu_input["x"][: cfg.NPOINT] - # plot temperature of heat boundary - plt.figure() - y = np.full_like(pred["T_h"][: cfg.NPOINT], cfg.T_hin) - plt.plot(x, y, label="t = 0.0 s") - for i in range(10): - y = pred["T_h"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] - plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") - plt.xlabel("A") - plt.ylabel(r"$T_h$") - plt.legend() - plt.grid() - plt.savefig("T_h.png") - # plot temperature of cold boundary - plt.figure() - y = np.full_like(pred["T_c"][: cfg.NPOINT], cfg.T_cin) - plt.plot(x, y, label="t = 0.0 s") - for i in range(10): - y = pred["T_c"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] - plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") - plt.xlabel("A") - plt.ylabel(r"$T_c$") - plt.legend() - plt.grid() - plt.savefig("T_c.png") - # plot temperature of wall - plt.figure() - y = np.full_like(pred["T_w"][: cfg.NPOINT], cfg.T_win) - plt.plot(x, y, label="t = 0.0 s") - for i in range(10): - y = pred["T_w"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] - plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") - plt.xlabel("A") - plt.ylabel(r"$T_w$") - plt.legend() - plt.grid() - plt.savefig("T_w.png") - # plot the heat exchanger efficiency as a function of time. - plt.figure() - qm_min = np.min((visu_input["qm_h"][0], visu_input["qm_c"][0])) - eta = ( - visu_input["qm_h"][0] - * (pred["T_h"][:: cfg.NPOINT] - pred["T_h"][cfg.NPOINT - 1 :: cfg.NPOINT]) - / ( - qm_min - * (pred["T_h"][:: cfg.NPOINT] - pred["T_c"][cfg.NPOINT - 1 :: cfg.NPOINT]) - ) - ) - x = list(range(1, cfg.NTIME + 1)) - plt.plot(x, eta) - plt.xlabel("time") - plt.ylabel(r"$\eta$") - plt.grid() - plt.savefig("eta.png") - error = np.square(eta[-1] - cfg.eta_true) - logger.info( - f"The L2 norm error between the actual heat exchanger efficiency and the predicted heat exchanger efficiency is {error}" - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="heat_exchanger.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from os import path as osp + +import hydra +import matplotlib.pyplot as plt +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.HEDeepONets(**cfg.MODEL) + + # set time-geometry + timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), + ppsci.geometry.Interval(0, cfg.DL), + ) + } + + # Generate train data and eval data + visu_input = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) + data_h = np.random.rand(cfg.NQM).reshape([-1, 1]) * 2 + data_c = np.random.rand(cfg.NQM).reshape([-1, 1]) * 2 + data_h = data_h.astype("float32") + data_c = data_c.astype("float32") + test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") + test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") + # rearrange train data and eval data + points = visu_input.copy() + points["t"] = np.repeat(points["t"], cfg.NQM, axis=0) + points["x"] = np.repeat(points["x"], cfg.NQM, axis=0) + points["qm_h"] = np.tile(data_h, (cfg.NPOINT * cfg.NTIME, 1)) + points["t"] = np.repeat(points["t"], cfg.NQM, axis=0) + points["x"] = np.repeat(points["x"], cfg.NQM, axis=0) + points["qm_h"] = np.repeat(points["qm_h"], cfg.NQM, axis=0) + points["qm_c"] = np.tile(data_c, (cfg.NPOINT * cfg.NTIME * cfg.NQM, 1)) + visu_input["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) + visu_input["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) + + left_indices = visu_input["x"] == 0 + right_indices = visu_input["x"] == cfg.DL + interior_indices = (visu_input["x"] != 0) & (visu_input["x"] != cfg.DL) + left_indices = np.where(left_indices) + right_indices = np.where(right_indices) + interior_indices = np.where(interior_indices) + + left_indices1 = points["x"] == 0 + right_indices1 = points["x"] == cfg.DL + interior_indices1 = (points["x"] != 0) & (points["x"] != cfg.DL) + initial_indices1 = points["t"] == points["t"][0] + left_indices1 = np.where(left_indices1) + right_indices1 = np.where(right_indices1) + interior_indices1 = np.where(interior_indices1) + initial_indices1 = np.where(initial_indices1) + + # Classification train data + left_data = { + "x": points["x"][left_indices1[0]], + "t": points["t"][left_indices1[0]], + "qm_h": points["qm_h"][left_indices1[0]], + "qm_c": points["qm_c"][left_indices1[0]], + } + right_data = { + "x": points["x"][right_indices1[0]], + "t": points["t"][right_indices1[0]], + "qm_h": points["qm_h"][right_indices1[0]], + "qm_c": points["qm_c"][right_indices1[0]], + } + interior_data = { + "x": points["x"], + "t": points["t"], + "qm_h": points["qm_h"], + "qm_c": points["qm_c"], + } + initial_data = { + "x": points["x"][initial_indices1[0]], + "t": points["t"][initial_indices1[0]] * 0, + "qm_h": points["qm_h"][initial_indices1[0]], + "qm_c": points["qm_c"][initial_indices1[0]], + } + # Classification eval data + test_left_data = { + "x": visu_input["x"][left_indices[0]], + "t": visu_input["t"][left_indices[0]], + "qm_h": visu_input["qm_h"][left_indices[0]], + "qm_c": visu_input["qm_c"][left_indices[0]], + } + test_right_data = { + "x": visu_input["x"][right_indices[0]], + "t": visu_input["t"][right_indices[0]], + "qm_h": visu_input["qm_h"][right_indices[0]], + "qm_c": visu_input["qm_c"][right_indices[0]], + } + test_interior_data = { + "x": visu_input["x"], + "t": visu_input["t"], + "qm_h": visu_input["qm_h"], + "qm_c": visu_input["qm_c"], + } + + # set equation + equation = { + "heat_exchanger": ppsci.equation.HeatExchanger( + cfg.alpha_h / (cfg.L * cfg.cp_h), + cfg.alpha_c / (cfg.L * cfg.cp_c), + cfg.v_h, + cfg.v_c, + cfg.alpha_h / (cfg.M * cfg.cp_w), + cfg.alpha_c / (cfg.M * cfg.cp_w), + ) + } + + # set constraint + bc_label = { + "T_h": np.zeros([left_data["x"].shape[0], 1], dtype="float32"), + } + interior_label = { + "heat_boundary": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), + "cold_boundary": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), + "wall": np.zeros([interior_data["x"].shape[0], 1], dtype="float32"), + } + initial_label = { + "T_h": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), + "T_c": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), + "T_w": np.zeros([initial_data["x"].shape[0], 1], dtype="float32"), + } + + left_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": left_data, + "label": bc_label, + "weight": { + "T_h": np.full_like( + left_data["x"], cfg.TRAIN.weight.left_sup_constraint.T_h + ) + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, + name="left_sup", + ) + right_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": right_data, + "label": bc_label, + "weight": { + "T_h": np.full_like( + right_data["x"], cfg.TRAIN.weight.right_sup_constraint.T_h + ) + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, + name="right_sup", + ) + interior_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": interior_data, + "label": interior_label, + "weight": { + "heat_boundary": np.full_like( + interior_data["x"], + cfg.TRAIN.weight.interior_sup_constraint.heat_boundary, + ), + "cold_boundary": np.full_like( + interior_data["x"], + cfg.TRAIN.weight.interior_sup_constraint.cold_boundary, + ), + "wall": np.full_like( + interior_data["x"], + cfg.TRAIN.weight.interior_sup_constraint.wall, + ), + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr=equation["heat_exchanger"].equations, + name="interior_sup", + ) + initial_sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": initial_data, + "label": initial_label, + "weight": { + "T_h": np.full_like( + initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_h + ), + "T_c": np.full_like( + initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_c + ), + "T_w": np.full_like( + initial_data["x"], cfg.TRAIN.weight.initial_sup_constraint.T_w + ), + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "T_h": lambda out: out["T_h"] - cfg.T_hin, + "T_c": lambda out: out["T_c"] - cfg.T_cin, + "T_w": lambda out: out["T_w"] - cfg.T_win, + }, + name="initial_sup", + ) + # wrap constraints together + constraint = { + left_sup_constraint.name: left_sup_constraint, + right_sup_constraint.name: right_sup_constraint, + interior_sup_constraint.name: interior_sup_constraint, + initial_sup_constraint.name: initial_sup_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + test_bc_label = { + "T_h": np.zeros([test_left_data["x"].shape[0], 1], dtype="float32"), + } + test_interior_label = { + "heat_boundary": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ), + "cold_boundary": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ), + "wall": np.zeros([test_interior_data["x"].shape[0], 1], dtype="float32"), + } + left_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_left_data, + "label": test_bc_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"T_h": lambda out: out["T_h"] - cfg.T_hin}, + metric={"MSE": ppsci.metric.MSE()}, + name="left_mse", + ) + right_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_right_data, + "label": test_bc_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={"T_h": lambda out: out["T_c"] - cfg.T_cin}, + metric={"MSE": ppsci.metric.MSE()}, + name="right_mse", + ) + interior_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_interior_data, + "label": test_interior_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr=equation["heat_exchanger"].equations, + metric={"MSE": ppsci.metric.MSE()}, + name="interior_mse", + ) + validator = { + left_validator.name: left_validator, + right_validator.name: right_validator, + interior_validator.name: interior_validator, + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # plotting iteration/epoch-loss curve. + solver.plot_loss_history() + + # visualize prediction after finished training + visu_input["qm_c"] = np.full_like(visu_input["qm_c"], cfg.qm_h) + visu_input["qm_h"] = np.full_like(visu_input["qm_c"], cfg.qm_c) + pred = solver.predict(visu_input, return_numpy=True) + plot(visu_input, pred, cfg) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.HEDeepONets(**cfg.MODEL) + + # set time-geometry + timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), + ppsci.geometry.Interval(0, cfg.DL), + ) + } + + # Generate eval data + visu_input = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) + test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") + test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") + # rearrange train data and eval data + visu_input["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) + visu_input["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) + + left_indices = visu_input["x"] == 0 + right_indices = visu_input["x"] == cfg.DL + interior_indices = (visu_input["x"] != 0) & (visu_input["x"] != cfg.DL) + left_indices = np.where(left_indices) + right_indices = np.where(right_indices) + interior_indices = np.where(interior_indices) + + # Classification eval data + test_left_data = { + "x": visu_input["x"][left_indices[0]], + "t": visu_input["t"][left_indices[0]], + "qm_h": visu_input["qm_h"][left_indices[0]], + "qm_c": visu_input["qm_c"][left_indices[0]], + } + test_right_data = { + "x": visu_input["x"][right_indices[0]], + "t": visu_input["t"][right_indices[0]], + "qm_h": visu_input["qm_h"][right_indices[0]], + "qm_c": visu_input["qm_c"][right_indices[0]], + } + test_interior_data = { + "x": visu_input["x"], + "t": visu_input["t"], + "qm_h": visu_input["qm_h"], + "qm_c": visu_input["qm_c"], + } + + # set equation + equation = { + "heat_exchanger": ppsci.equation.HeatExchanger( + cfg.alpha_h / (cfg.L * cfg.cp_h), + cfg.alpha_c / (cfg.L * cfg.cp_c), + cfg.v_h, + cfg.v_c, + cfg.alpha_h / (cfg.M * cfg.cp_w), + cfg.alpha_c / (cfg.M * cfg.cp_w), + ) + } + + # set validator + test_bc_label = { + "T_h": np.zeros([test_left_data["x"].shape[0], 1], dtype="float32"), + } + test_interior_label = { + "heat_boundary": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ), + "cold_boundary": np.zeros( + [test_interior_data["x"].shape[0], 1], dtype="float32" + ), + "wall": np.zeros([test_interior_data["x"].shape[0], 1], dtype="float32"), + } + left_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_left_data, + "label": test_bc_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "T_h": lambda out: out["T_h"] - cfg.T_hin, + }, + metric={"left_MSE": ppsci.metric.MSE()}, + name="left_mse", + ) + right_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_right_data, + "label": test_bc_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr={ + "T_h": lambda out: out["T_c"] - cfg.T_cin, + }, + metric={"right_MSE": ppsci.metric.MSE()}, + name="right_mse", + ) + interior_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": test_interior_data, + "label": test_interior_label, + }, + "batch_size": cfg.NTIME, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.MSELoss("mean"), + output_expr=equation["heat_exchanger"].equations, + metric={"interior_MSE": ppsci.metric.MSE()}, + name="interior_mse", + ) + validator = { + left_validator.name: left_validator, + right_validator.name: right_validator, + interior_validator.name: interior_validator, + } + + # directly evaluate pretrained model(optional) + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + equation=equation, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + # visualize prediction after finished training + visu_input["qm_c"] = np.full_like(visu_input["qm_c"], cfg.qm_h) + visu_input["qm_h"] = np.full_like(visu_input["qm_c"], cfg.qm_c) + pred = solver.predict(visu_input, return_numpy=True) + plot(visu_input, pred, cfg) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.HEDeepONets(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set time-geometry + timestamps = np.linspace(0.0, 2, cfg.NTIME + 1, endpoint=True) + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1, timestamps=timestamps), + ppsci.geometry.Interval(0, cfg.DL), + ) + } + input_dict = geom["time_rect"].sample_interior(cfg.NPOINT * cfg.NTIME, evenly=True) + test_h = np.random.rand(1).reshape([-1, 1]).astype("float32") + test_c = np.random.rand(1).reshape([-1, 1]).astype("float32") + # rearrange train data and eval data + input_dict["qm_h"] = np.tile(test_h, (cfg.NPOINT * cfg.NTIME, 1)) + input_dict["qm_c"] = np.tile(test_c, (cfg.NPOINT * cfg.NTIME, 1)) + input_dict["qm_c"] = np.full_like(input_dict["qm_c"], cfg.qm_h) + input_dict["qm_h"] = np.full_like(input_dict["qm_c"], cfg.qm_c) + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.INFER.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + plot(input_dict, output_dict, cfg) + + +def plot(visu_input, pred, cfg: DictConfig): + x = visu_input["x"][: cfg.NPOINT] + # plot temperature of heat boundary + plt.figure() + y = np.full_like(pred["T_h"][: cfg.NPOINT], cfg.T_hin) + plt.plot(x, y, label="t = 0.0 s") + for i in range(10): + y = pred["T_h"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] + plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") + plt.xlabel("A") + plt.ylabel(r"$T_h$") + plt.legend() + plt.grid() + plt.savefig("T_h.png") + # plot temperature of cold boundary + plt.figure() + y = np.full_like(pred["T_c"][: cfg.NPOINT], cfg.T_cin) + plt.plot(x, y, label="t = 0.0 s") + for i in range(10): + y = pred["T_c"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] + plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") + plt.xlabel("A") + plt.ylabel(r"$T_c$") + plt.legend() + plt.grid() + plt.savefig("T_c.png") + # plot temperature of wall + plt.figure() + y = np.full_like(pred["T_w"][: cfg.NPOINT], cfg.T_win) + plt.plot(x, y, label="t = 0.0 s") + for i in range(10): + y = pred["T_w"][cfg.NPOINT * i * 2 : cfg.NPOINT * (i * 2 + 1)] + plt.plot(x, y, label=f"t = {(i+1)*0.1:,.1f} s") + plt.xlabel("A") + plt.ylabel(r"$T_w$") + plt.legend() + plt.grid() + plt.savefig("T_w.png") + # plot the heat exchanger efficiency as a function of time. + plt.figure() + qm_min = np.min((visu_input["qm_h"][0], visu_input["qm_c"][0])) + eta = ( + visu_input["qm_h"][0] + * (pred["T_h"][:: cfg.NPOINT] - pred["T_h"][cfg.NPOINT - 1 :: cfg.NPOINT]) + / ( + qm_min + * (pred["T_h"][:: cfg.NPOINT] - pred["T_c"][cfg.NPOINT - 1 :: cfg.NPOINT]) + ) + ) + x = list(range(1, cfg.NTIME + 1)) + plt.plot(x, eta) + plt.xlabel("time") + plt.ylabel(r"$\eta$") + plt.grid() + plt.savefig("eta.png") + error = np.square(eta[-1] - cfg.eta_true) + logger.info( + f"The L2 norm error between the actual heat exchanger efficiency and the predicted heat exchanger efficiency is {error}" + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="heat_exchanger.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/heat_pinn/conf/heat_pinn.yaml b/examples/heat_pinn/conf/heat_pinn.yaml index 540602e045..a4ee57b700 100644 --- a/examples/heat_pinn/conf/heat_pinn.yaml +++ b/examples/heat_pinn/conf/heat_pinn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -69,3 +70,75 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 128 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_heat_pinn/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u"] + num_layers: 9 + hidden_size: 20 + activation: "tanh" + +# training settings +TRAIN: + epochs: 1000 + iters_per_epoch: 1 + save_freq: 20 + learning_rate: 5.0e-4 + weight: + bc_top: 0.25 + bc_bottom: 0.25 + bc_left: 0.25 + bc_right: 0.25 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/heat_pinn/heat_pinn_pretrained.pdparams" + export_path: ./inference/heat_pinn + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/heat_pinn/fdm.py b/examples/heat_pinn/fdm.py index 16a156592c..920e2e6e7b 100644 --- a/examples/heat_pinn/fdm.py +++ b/examples/heat_pinn/fdm.py @@ -1,59 +1,59 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import itertools - -import numpy as np - - -def solve(n: int, l: float) -> np.ndarray: - """ - Solves the heat equation using the finite difference method. - Reference: https://github.com/314arhaam/heat-pinn/blob/main/codes/heatman.ipynb - - Args: - n (int): The number of grid points in each direction. - l (float): The length of the square domain. - - Returns: - np.ndarray: A 2D array containing the temperature values at each grid point. - """ - bc = {"x=-l": 75.0, "x=+l": 0.0, "y=-l": 50.0, "y=+l": 0.0} - B = np.zeros([n, n]) - T = np.zeros([n**2, n**2]) - for k, (i, j) in enumerate(itertools.product(range(n), range(n))): - M = np.zeros([n, n]) - M[i, j] = -4 - if i != 0: - M[i - 1, j] = 1 - else: - B[i, j] += -bc["y=-l"] - if i != n - 1: - M[i + 1, j] = 1 - else: - B[i, j] += -bc["y=+l"] - if j != 0: - M[i, j - 1] = 1 - else: - B[i, j] += -bc["x=-l"] - if j != n - 1: - M[i, j + 1] = 1 - else: - B[i, j] += -bc["x=+l"] - m = np.reshape(M, (1, n**2)) - T[k, :] = m - b = np.reshape(B, (n**2, 1)) - T = np.matmul(np.linalg.inv(T), b) - T = T.reshape([n, n]) - return T +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools + +import numpy as np + + +def solve(n: int, l: float) -> np.ndarray: + """ + Solves the heat equation using the finite difference method. + Reference: https://github.com/314arhaam/heat-pinn/blob/main/codes/heatman.ipynb + + Args: + n (int): The number of grid points in each direction. + l (float): The length of the square domain. + + Returns: + np.ndarray: A 2D array containing the temperature values at each grid point. + """ + bc = {"x=-l": 75.0, "x=+l": 0.0, "y=-l": 50.0, "y=+l": 0.0} + B = np.zeros([n, n]) + T = np.zeros([n**2, n**2]) + for k, (i, j) in enumerate(itertools.product(range(n), range(n))): + M = np.zeros([n, n]) + M[i, j] = -4 + if i != 0: + M[i - 1, j] = 1 + else: + B[i, j] += -bc["y=-l"] + if i != n - 1: + M[i + 1, j] = 1 + else: + B[i, j] += -bc["y=+l"] + if j != 0: + M[i, j - 1] = 1 + else: + B[i, j] += -bc["x=-l"] + if j != n - 1: + M[i, j + 1] = 1 + else: + B[i, j] += -bc["x=+l"] + m = np.reshape(M, (1, n**2)) + T[k, :] = m + b = np.reshape(B, (n**2, 1)) + T = np.matmul(np.linalg.inv(T), b) + T = T.reshape([n, n]) + return T diff --git a/examples/heat_pinn/heat_pinn.py b/examples/heat_pinn/heat_pinn.py index cfdf0b4b48..7772658339 100644 --- a/examples/heat_pinn/heat_pinn.py +++ b/examples/heat_pinn/heat_pinn.py @@ -1,307 +1,307 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import fdm -import hydra -import matplotlib.pyplot as plt -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def plot(input_data, N_EVAL, pinn_output, fdm_output, cfg): - x = input_data["x"].reshape(N_EVAL, N_EVAL) - y = input_data["y"].reshape(N_EVAL, N_EVAL) - - plt.subplot(2, 1, 1) - plt.pcolormesh(x, y, pinn_output * 75.0, cmap="magma") - plt.colorbar() - plt.title("PINN") - plt.xlabel("x") - plt.ylabel("y") - plt.tight_layout() - plt.axis("square") - - plt.subplot(2, 1, 2) - plt.pcolormesh(x, y, fdm_output, cmap="magma") - plt.colorbar() - plt.xlabel("x") - plt.ylabel("y") - plt.title("FDM") - plt.tight_layout() - plt.axis("square") - plt.savefig(osp.join(cfg.output_dir, "pinn_fdm_comparison.png")) - plt.close() - - frames_val = np.array([-0.75, -0.5, -0.25, 0.0, +0.25, +0.5, +0.75]) - frames = [*map(int, (frames_val + 1) / 2 * (N_EVAL - 1))] - height = 3 - plt.figure("", figsize=(len(frames) * height, 2 * height)) - - for i, var_index in enumerate(frames): - plt.subplot(2, len(frames), i + 1) - plt.title(f"y = {frames_val[i]:.2f}") - plt.plot( - x[:, var_index], - pinn_output[:, var_index] * 75.0, - "r--", - lw=4.0, - label="pinn", - ) - plt.plot(x[:, var_index], fdm_output[:, var_index], "b", lw=2.0, label="FDM") - plt.ylim(0.0, 100.0) - plt.xlim(-1.0, +1.0) - plt.xlabel("x") - plt.ylabel("T") - plt.tight_layout() - plt.legend() - - for i, var_index in enumerate(frames): - plt.subplot(2, len(frames), len(frames) + i + 1) - plt.title(f"x = {frames_val[i]:.2f}") - plt.plot( - y[var_index, :], - pinn_output[var_index, :] * 75.0, - "r--", - lw=4.0, - label="pinn", - ) - plt.plot(y[var_index, :], fdm_output[var_index, :], "b", lw=2.0, label="FDM") - plt.ylim(0.0, 100.0) - plt.xlim(-1.0, +1.0) - plt.xlabel("y") - plt.ylabel("T") - plt.tight_layout() - plt.legend() - - plt.savefig(osp.join(cfg.output_dir, "profiles.png")) - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"heat": ppsci.equation.Laplace(dim=2)} - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} - - # set train dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # set constraint - NPOINT_PDE = 99**2 - NPOINT_TOP = 25 - NPOINT_BOTTOM = 25 - NPOINT_LEFT = 25 - NPOINT_RIGHT = 25 - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["heat"].equations, - {"laplace": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, - ppsci.loss.MSELoss("mean"), - evenly=True, - name="EQ", - ) - bc_top = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_TOP}, - ppsci.loss.MSELoss("mean"), - weight_dict={"u": cfg.TRAIN.weight.bc_top}, - criteria=lambda x, y: np.isclose(y, 1), - name="BC_top", - ) - bc_bottom = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": 50 / 75}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_BOTTOM}, - ppsci.loss.MSELoss("mean"), - weight_dict={"u": cfg.TRAIN.weight.bc_bottom}, - criteria=lambda x, y: np.isclose(y, -1), - name="BC_bottom", - ) - bc_left = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": 1}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_LEFT}, - ppsci.loss.MSELoss("mean"), - weight_dict={"u": cfg.TRAIN.weight.bc_left}, - criteria=lambda x, y: np.isclose(x, -1), - name="BC_left", - ) - bc_right = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT}, - ppsci.loss.MSELoss("mean"), - weight_dict={"u": cfg.TRAIN.weight.bc_right}, - criteria=lambda x, y: np.isclose(x, 1), - name="BC_right", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - bc_top.name: bc_top, - bc_bottom.name: bc_bottom, - bc_left.name: bc_left, - bc_right.name: bc_right, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - equation=equation, - geom=geom, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - # train model - solver.train() - - # begin eval - N_EVAL = 100 - input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) - pinn_output = solver.predict(input_data, return_numpy=True)["u"].reshape( - N_EVAL, N_EVAL - ) - fdm_output = fdm.solve(N_EVAL, 1).T - mse_loss = np.mean(np.square(pinn_output - (fdm_output / 75.0))) - logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss}") - plot(input_data, N_EVAL, pinn_output, fdm_output, cfg) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - # begin eval - N_EVAL = 100 - input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) - pinn_output = solver.predict(input_data, no_grad=True, return_numpy=True)[ - "u" - ].reshape(N_EVAL, N_EVAL) - fdm_output = fdm.solve(N_EVAL, 1).T - mse_loss = np.mean(np.square(pinn_output - (fdm_output / 75.0))) - logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss:.5e}") - plot(input_data, N_EVAL, pinn_output, fdm_output, cfg) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - cfg=cfg, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} - # begin eval - N_EVAL = 100 - input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) - output_data = predictor.predict( - {key: input_data[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_data = { - store_key: output_data[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_data.keys()) - }["u"].reshape(N_EVAL, N_EVAL) - fdm_output = fdm.solve(N_EVAL, 1).T - mse_loss = np.mean(np.square(output_data - (fdm_output / 75.0))) - logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss:.5e}") - plot(input_data, N_EVAL, output_data, fdm_output, cfg) - - -@hydra.main(version_base=None, config_path="./conf", config_name="heat_pinn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import fdm +import hydra +import matplotlib.pyplot as plt +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def plot(input_data, N_EVAL, pinn_output, fdm_output, cfg): + x = input_data["x"].reshape(N_EVAL, N_EVAL) + y = input_data["y"].reshape(N_EVAL, N_EVAL) + + plt.subplot(2, 1, 1) + plt.pcolormesh(x, y, pinn_output * 75.0, cmap="magma") + plt.colorbar() + plt.title("PINN") + plt.xlabel("x") + plt.ylabel("y") + plt.tight_layout() + plt.axis("square") + + plt.subplot(2, 1, 2) + plt.pcolormesh(x, y, fdm_output, cmap="magma") + plt.colorbar() + plt.xlabel("x") + plt.ylabel("y") + plt.title("FDM") + plt.tight_layout() + plt.axis("square") + plt.savefig(osp.join(cfg.output_dir, "pinn_fdm_comparison.png")) + plt.close() + + frames_val = np.array([-0.75, -0.5, -0.25, 0.0, +0.25, +0.5, +0.75]) + frames = [*map(int, (frames_val + 1) / 2 * (N_EVAL - 1))] + height = 3 + plt.figure("", figsize=(len(frames) * height, 2 * height)) + + for i, var_index in enumerate(frames): + plt.subplot(2, len(frames), i + 1) + plt.title(f"y = {frames_val[i]:.2f}") + plt.plot( + x[:, var_index], + pinn_output[:, var_index] * 75.0, + "r--", + lw=4.0, + label="pinn", + ) + plt.plot(x[:, var_index], fdm_output[:, var_index], "b", lw=2.0, label="FDM") + plt.ylim(0.0, 100.0) + plt.xlim(-1.0, +1.0) + plt.xlabel("x") + plt.ylabel("T") + plt.tight_layout() + plt.legend() + + for i, var_index in enumerate(frames): + plt.subplot(2, len(frames), len(frames) + i + 1) + plt.title(f"x = {frames_val[i]:.2f}") + plt.plot( + y[var_index, :], + pinn_output[var_index, :] * 75.0, + "r--", + lw=4.0, + label="pinn", + ) + plt.plot(y[var_index, :], fdm_output[var_index, :], "b", lw=2.0, label="FDM") + plt.ylim(0.0, 100.0) + plt.xlim(-1.0, +1.0) + plt.xlabel("y") + plt.ylabel("T") + plt.tight_layout() + plt.legend() + + plt.savefig(osp.join(cfg.output_dir, "profiles.png")) + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # set output directory + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"heat": ppsci.equation.Laplace(dim=2)} + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} + + # set train dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # set constraint + NPOINT_PDE = 99**2 + NPOINT_TOP = 25 + NPOINT_BOTTOM = 25 + NPOINT_LEFT = 25 + NPOINT_RIGHT = 25 + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["heat"].equations, + {"laplace": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, + ppsci.loss.MSELoss("mean"), + evenly=True, + name="EQ", + ) + bc_top = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_TOP}, + ppsci.loss.MSELoss("mean"), + weight_dict={"u": cfg.TRAIN.weight.bc_top}, + criteria=lambda x, y: np.isclose(y, 1), + name="BC_top", + ) + bc_bottom = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": 50 / 75}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_BOTTOM}, + ppsci.loss.MSELoss("mean"), + weight_dict={"u": cfg.TRAIN.weight.bc_bottom}, + criteria=lambda x, y: np.isclose(y, -1), + name="BC_bottom", + ) + bc_left = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": 1}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_LEFT}, + ppsci.loss.MSELoss("mean"), + weight_dict={"u": cfg.TRAIN.weight.bc_left}, + criteria=lambda x, y: np.isclose(x, -1), + name="BC_left", + ) + bc_right = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT}, + ppsci.loss.MSELoss("mean"), + weight_dict={"u": cfg.TRAIN.weight.bc_right}, + criteria=lambda x, y: np.isclose(x, 1), + name="BC_right", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc_top.name: bc_top, + bc_bottom.name: bc_bottom, + bc_left.name: bc_left, + bc_right.name: bc_right, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + equation=equation, + geom=geom, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + # train model + solver.train() + + # begin eval + N_EVAL = 100 + input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) + pinn_output = solver.predict(input_data, return_numpy=True)["u"].reshape( + N_EVAL, N_EVAL + ) + fdm_output = fdm.solve(N_EVAL, 1).T + mse_loss = np.mean(np.square(pinn_output - (fdm_output / 75.0))) + logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss}") + plot(input_data, N_EVAL, pinn_output, fdm_output, cfg) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # set output directory + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + # begin eval + N_EVAL = 100 + input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) + pinn_output = solver.predict(input_data, no_grad=True, return_numpy=True)[ + "u" + ].reshape(N_EVAL, N_EVAL) + fdm_output = fdm.solve(N_EVAL, 1).T + mse_loss = np.mean(np.square(pinn_output - (fdm_output / 75.0))) + logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss:.5e}") + plot(input_data, N_EVAL, pinn_output, fdm_output, cfg) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + cfg=cfg, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-1.0, -1.0), (1.0, 1.0))} + # begin eval + N_EVAL = 100 + input_data = geom["rect"].sample_interior(N_EVAL**2, evenly=True) + output_data = predictor.predict( + {key: input_data[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_data = { + store_key: output_data[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_data.keys()) + }["u"].reshape(N_EVAL, N_EVAL) + fdm_output = fdm.solve(N_EVAL, 1).T + mse_loss = np.mean(np.square(output_data - (fdm_output / 75.0))) + logger.info(f"The norm MSE loss between the FDM and PINN is {mse_loss:.5e}") + plot(input_data, N_EVAL, output_data, fdm_output, cfg) + + +@hydra.main(version_base=None, config_path="./conf", config_name="heat_pinn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/hpinns/conf/hpinns.yaml b/examples/hpinns/conf/hpinns.yaml index c5e1942af5..33798faf31 100644 --- a/examples/hpinns/conf/hpinns.yaml +++ b/examples/hpinns/conf/hpinns.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -86,3 +87,92 @@ INFER: batch_size: 128 max_batch_size: 128 num_cpu_threads: 4 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_hpinns/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +DATASET_PATH: ./datasets/hpinns_holo_train.mat +DATASET_PATH_VALID: ./datasets/hpinns_holo_valid.mat +log_freq: 20 + +# set working condition +TRAIN_MODE: aug_lag # "soft", "penalty", "aug_lag" +TRAIN_K: 9 + +# model settings +MODEL: + re_net: + input_keys: ['x_cos_1', 'x_sin_1', 'x_cos_2', 'x_sin_2', 'x_cos_3', 'x_sin_3', 'x_cos_4', 'x_sin_4', 'x_cos_5', 'x_sin_5', 'x_cos_6', 'x_sin_6', 'y', 'y_cos_1', 'y_sin_1'] + output_keys: ["e_re"] + num_layers: 4 + hidden_size: 48 + activation: "tanh" + im_net: + input_keys: ${MODEL.re_net.input_keys} + output_keys: ["e_im"] + num_layers: 4 + hidden_size: 48 + activation: "tanh" + eps_net: + input_keys: ${MODEL.re_net.input_keys} + output_keys: ["eps"] + num_layers: 4 + hidden_size: 48 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_during_train: false + learning_rate: 0.001 + max_iter: 15000 + epochs_lbfgs: 1 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/hPINNs/hpinns_pretrained.pdparams" + export_path: ./inference/hpinns + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + output_keys: ["e_re", "e_im", "eps"] + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 8000 + gpu_id: 0 + batch_size: 128 + max_batch_size: 128 + num_cpu_threads: 4 +>>>>>>> Stashed changes diff --git a/examples/hpinns/functions.py b/examples/hpinns/functions.py index 27bfb4e654..73a1152e61 100644 --- a/examples/hpinns/functions.py +++ b/examples/hpinns/functions.py @@ -1,336 +1,336 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module is heavily adapted from https://github.com/lululxvi/hpinn -""" - -from typing import Dict -from typing import List - -import numpy as np -import paddle -import paddle.nn.functional as F - -"""All functions used in hpinns example, including functions of transform and loss.""" - -# define constants -BOX = np.array([[-2, -2], [2, 3]]) -DPML = 1 -OMEGA = 2 * np.pi -SIGMA0 = -np.log(1e-20) / (4 * DPML**3 / 3) -l_BOX = BOX + np.array([[-DPML, -DPML], [DPML, DPML]]) -beta = 2.0 -mu = 2 - -# define variables which will be updated during training -lambda_re: np.ndarray = None -lambda_im: np.ndarray = None -loss_weight: List[float] = None -train_mode: str = None - -# define log variables for plotting -loss_log = [] # record all losses, [pde, lag, obj] -loss_obj = 0.0 # record last objective loss of each k -lambda_log = [] # record all lambdas - - -# transform -def transform_in(input): - # Periodic BC in x - P = BOX[1][0] - BOX[0][0] + 2 * DPML - w = 2 * np.pi / P - x, y = input["x"], input["y"] - input_transformed = {} - for t in range(1, 7): - input_transformed[f"x_cos_{t}"] = paddle.cos(t * w * x) - input_transformed[f"x_sin_{t}"] = paddle.sin(t * w * x) - input_transformed["y"] = y - input_transformed["y_cos_1"] = paddle.cos(OMEGA * y) - input_transformed["y_sin_1"] = paddle.sin(OMEGA * y) - - return input_transformed - - -def transform_out_all(input, var): - y = input["y"] - # Zero Dirichlet BC - a, b = BOX[0][1] - DPML, BOX[1][1] + DPML - t = (1 - paddle.exp(a - y)) * (1 - paddle.exp(y - b)) - return t * var - - -def transform_out_real_part(input, out): - re = out["e_re"] - trans_out = transform_out_all(input, re) - return {"e_real": trans_out} - - -def transform_out_imaginary_part(input, out): - im = out["e_im"] - trans_out = transform_out_all(input, im) - return {"e_imaginary": trans_out} - - -def transform_out_epsilon(input, out): - eps = out["eps"] - # 1 <= eps <= 12 - eps = F.sigmoid(eps) * 11 + 1 - return {"epsilon": eps} - - -# loss -def init_lambda(output_dict: Dict[str, paddle.Tensor], bound: int): - """Init lambdas of Lagrangian and weights of losses. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - bound (int): The bound of the data range that should be used. - """ - global lambda_re, lambda_im, loss_weight - x, y = output_dict["x"], output_dict["y"] - lambda_re = np.zeros((len(x[bound:]), 1), paddle.get_default_dtype()) - lambda_im = np.zeros((len(y[bound:]), 1), paddle.get_default_dtype()) - # loss_weight: [PDE loss 1, PDE loss 2, Lagrangian loss 1, Lagrangian loss 2, objective loss] - if train_mode == "aug_lag": - loss_weight = [0.5 * mu] * 2 + [1.0, 1.0] + [1.0] - else: - loss_weight = [0.5 * mu] * 2 + [0.0, 0.0] + [1.0] - - -def update_lambda(output_dict: Dict[str, paddle.Tensor], bound: int): - """Update lambdas of Lagrangian. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - bound (int): The bound of the data range that should be used. - """ - global lambda_re, lambda_im, lambda_log - loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) - loss_re = loss_re[bound:] - loss_im = loss_im[bound:] - lambda_re += mu * loss_re.numpy() - lambda_im += mu * loss_im.numpy() - lambda_log.append([lambda_re.copy().squeeze(), lambda_im.copy().squeeze()]) - - -def update_mu(): - """Update mu.""" - global mu, loss_weight - mu *= beta - loss_weight[:2] = [0.5 * mu] * 2 - - -def _sigma_1(d): - return SIGMA0 * d**2 * np.heaviside(d, 0) - - -def _sigma_2(d): - return 2 * SIGMA0 * d * np.heaviside(d, 0) - - -def sigma(x, a, b): - """sigma(x) = 0 if a < x < b, else grows cubically from zero.""" - return _sigma_1(a - x) + _sigma_1(x - b) - - -def dsigma(x, a, b): - return -_sigma_2(a - x) + _sigma_2(x - b) - - -def perfectly_matched_layers(x: paddle.Tensor, y: paddle.Tensor): - """Apply the technique of perfectly matched layers(PMLs) proposed by paper arXiv:2108.05348. - - Args: - x (paddle.Tensor): one of input contains tensor. - y (paddle.Tensor): one of input contains tensor. - - Returns: - np.ndarray: Parameters of pde formula. - """ - x = x.numpy() - y = y.numpy() - - sigma_x = sigma(x, BOX[0][0], BOX[1][0]) - AB1 = 1 / (1 + 1j / OMEGA * sigma_x) ** 2 - A1, B1 = AB1.real, AB1.imag - - dsigma_x = dsigma(x, BOX[0][0], BOX[1][0]) - AB2 = -1j / OMEGA * dsigma_x * AB1 / (1 + 1j / OMEGA * sigma_x) - A2, B2 = AB2.real, AB2.imag - - sigma_y = sigma(y, BOX[0][1], BOX[1][1]) - AB3 = 1 / (1 + 1j / OMEGA * sigma_y) ** 2 - A3, B3 = AB3.real, AB3.imag - - dsigma_y = dsigma(y, BOX[0][1], BOX[1][1]) - AB4 = -1j / OMEGA * dsigma_y * AB3 / (1 + 1j / OMEGA * sigma_y) - A4, B4 = AB4.real, AB4.imag - return A1, B1, A2, B2, A3, B3, A4, B4 - - -def obj_func_J(y): - # Approximate the objective function - y = y.numpy() + 1.5 - h = 0.2 - return 1 / (h * np.pi**0.5) * np.exp(-((y / h) ** 2)) * (np.abs(y) < 0.5) - - -def compute_real_and_imaginary_loss( - output_dict: Dict[str, paddle.Tensor] -) -> paddle.Tensor: - """Compute real and imaginary_loss. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - - Returns: - paddle.Tensor: Real and imaginary_loss. - """ - x, y = output_dict["x"], output_dict["y"] - e_re = output_dict["e_real"] - e_im = output_dict["e_imaginary"] - eps = output_dict["epsilon"] - - condition = np.logical_and(y.numpy() < 0, y.numpy() > -1).astype( - paddle.get_default_dtype() - ) - - eps = eps * condition + 1 - condition - - de_re_x = output_dict["de_re_x"] - de_re_y = output_dict["de_re_y"] - de_re_xx = output_dict["de_re_xx"] - de_re_yy = output_dict["de_re_yy"] - de_im_x = output_dict["de_im_x"] - de_im_y = output_dict["de_im_y"] - de_im_xx = output_dict["de_im_xx"] - de_im_yy = output_dict["de_im_yy"] - - a1, b1, a2, b2, a3, b3, a4, b4 = perfectly_matched_layers(x, y) - - loss_re = ( - (a1 * de_re_xx + a2 * de_re_x + a3 * de_re_yy + a4 * de_re_y) / OMEGA - - (b1 * de_im_xx + b2 * de_im_x + b3 * de_im_yy + b4 * de_im_y) / OMEGA - + eps * OMEGA * e_re - ) - loss_im = ( - (a1 * de_im_xx + a2 * de_im_x + a3 * de_im_yy + a4 * de_im_y) / OMEGA - + (b1 * de_re_xx + b2 * de_re_x + b3 * de_re_yy + b4 * de_re_y) / OMEGA - + eps * OMEGA * e_im - + obj_func_J(y) - ) - return loss_re, loss_im - - -def pde_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: - """Compute pde loss and lagrangian loss. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - - Returns: - paddle.Tensor: PDE loss (and lagrangian loss if using Augmented Lagrangian method). - """ - global loss_log - bound = int(output_dict["bound"]) - loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) - loss_re = loss_re[bound:] - loss_im = loss_im[bound:] - - loss_eqs1 = paddle.mean(loss_re**2) - loss_eqs2 = paddle.mean(loss_im**2) - # augmented_Lagrangian - if lambda_im is None: - init_lambda(output_dict, bound) - loss_lag1 = paddle.mean(loss_re * lambda_re) - loss_lag2 = paddle.mean(loss_im * lambda_im) - - losses = ( - loss_weight[0] * loss_eqs1 - + loss_weight[1] * loss_eqs2 - + loss_weight[2] * loss_lag1 - + loss_weight[3] * loss_lag2 - ) - loss_log.append(float(loss_eqs1 + loss_eqs2)) # for plotting - loss_log.append(float(loss_lag1 + loss_lag2)) # for plotting - return {"pde": losses} - - -def obj_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: - """Compute objective loss. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - - Returns: - paddle.Tensor: Objective loss. - """ - global loss_log, loss_obj - x, y = output_dict["x"], output_dict["y"] - bound = int(output_dict["bound"]) - e_re = output_dict["e_real"] - e_im = output_dict["e_imaginary"] - - f1 = paddle.heaviside((x + 0.5) * (0.5 - x), paddle.full([], 0.5)) - f2 = paddle.heaviside((y - 1) * (2 - y), paddle.full([], 0.5)) - j = e_re[:bound] ** 2 + e_im[:bound] ** 2 - f1[:bound] * f2[:bound] - loss_opt_area = paddle.mean(j**2) - - if lambda_im is None: - init_lambda(output_dict, bound) - losses = loss_weight[4] * loss_opt_area - loss_log.append(float(loss_opt_area)) # for plotting - loss_obj = float(loss_opt_area) # for plotting - return {"obj": losses} - - -def eval_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: - """Compute objective loss for evaluation. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - - Returns: - paddle.Tensor: Objective loss. - """ - x, y = output_dict["x"], output_dict["y"] - e_re = output_dict["e_real"] - e_im = output_dict["e_imaginary"] - - f1 = paddle.heaviside((x + 0.5) * (0.5 - x), paddle.full([], 0.5)) - f2 = paddle.heaviside((y - 1) * (2 - y), paddle.full([], 0.5)) - j = e_re**2 + e_im**2 - f1 * f2 - losses = paddle.mean(j**2) - - return {"eval": losses} - - -def eval_metric_fun( - output_dict: Dict[str, paddle.Tensor], *args -) -> Dict[str, paddle.Tensor]: - """Compute metric for evaluation. - - Args: - output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. - - Returns: - Dict[str, paddle.Tensor]: MSE metric. - """ - loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) - eps_opt = paddle.concat([loss_re, loss_im], axis=-1) - metric = paddle.mean(eps_opt**2) - - metric_dict = {"eval_metric": metric} - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module is heavily adapted from https://github.com/lululxvi/hpinn +""" + +from typing import Dict +from typing import List + +import numpy as np +import paddle +import paddle.nn.functional as F + +"""All functions used in hpinns example, including functions of transform and loss.""" + +# define constants +BOX = np.array([[-2, -2], [2, 3]]) +DPML = 1 +OMEGA = 2 * np.pi +SIGMA0 = -np.log(1e-20) / (4 * DPML**3 / 3) +l_BOX = BOX + np.array([[-DPML, -DPML], [DPML, DPML]]) +beta = 2.0 +mu = 2 + +# define variables which will be updated during training +lambda_re: np.ndarray = None +lambda_im: np.ndarray = None +loss_weight: List[float] = None +train_mode: str = None + +# define log variables for plotting +loss_log = [] # record all losses, [pde, lag, obj] +loss_obj = 0.0 # record last objective loss of each k +lambda_log = [] # record all lambdas + + +# transform +def transform_in(input): + # Periodic BC in x + P = BOX[1][0] - BOX[0][0] + 2 * DPML + w = 2 * np.pi / P + x, y = input["x"], input["y"] + input_transformed = {} + for t in range(1, 7): + input_transformed[f"x_cos_{t}"] = paddle.cos(t * w * x) + input_transformed[f"x_sin_{t}"] = paddle.sin(t * w * x) + input_transformed["y"] = y + input_transformed["y_cos_1"] = paddle.cos(OMEGA * y) + input_transformed["y_sin_1"] = paddle.sin(OMEGA * y) + + return input_transformed + + +def transform_out_all(input, var): + y = input["y"] + # Zero Dirichlet BC + a, b = BOX[0][1] - DPML, BOX[1][1] + DPML + t = (1 - paddle.exp(a - y)) * (1 - paddle.exp(y - b)) + return t * var + + +def transform_out_real_part(input, out): + re = out["e_re"] + trans_out = transform_out_all(input, re) + return {"e_real": trans_out} + + +def transform_out_imaginary_part(input, out): + im = out["e_im"] + trans_out = transform_out_all(input, im) + return {"e_imaginary": trans_out} + + +def transform_out_epsilon(input, out): + eps = out["eps"] + # 1 <= eps <= 12 + eps = F.sigmoid(eps) * 11 + 1 + return {"epsilon": eps} + + +# loss +def init_lambda(output_dict: Dict[str, paddle.Tensor], bound: int): + """Init lambdas of Lagrangian and weights of losses. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + bound (int): The bound of the data range that should be used. + """ + global lambda_re, lambda_im, loss_weight + x, y = output_dict["x"], output_dict["y"] + lambda_re = np.zeros((len(x[bound:]), 1), paddle.get_default_dtype()) + lambda_im = np.zeros((len(y[bound:]), 1), paddle.get_default_dtype()) + # loss_weight: [PDE loss 1, PDE loss 2, Lagrangian loss 1, Lagrangian loss 2, objective loss] + if train_mode == "aug_lag": + loss_weight = [0.5 * mu] * 2 + [1.0, 1.0] + [1.0] + else: + loss_weight = [0.5 * mu] * 2 + [0.0, 0.0] + [1.0] + + +def update_lambda(output_dict: Dict[str, paddle.Tensor], bound: int): + """Update lambdas of Lagrangian. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + bound (int): The bound of the data range that should be used. + """ + global lambda_re, lambda_im, lambda_log + loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) + loss_re = loss_re[bound:] + loss_im = loss_im[bound:] + lambda_re += mu * loss_re.numpy() + lambda_im += mu * loss_im.numpy() + lambda_log.append([lambda_re.copy().squeeze(), lambda_im.copy().squeeze()]) + + +def update_mu(): + """Update mu.""" + global mu, loss_weight + mu *= beta + loss_weight[:2] = [0.5 * mu] * 2 + + +def _sigma_1(d): + return SIGMA0 * d**2 * np.heaviside(d, 0) + + +def _sigma_2(d): + return 2 * SIGMA0 * d * np.heaviside(d, 0) + + +def sigma(x, a, b): + """sigma(x) = 0 if a < x < b, else grows cubically from zero.""" + return _sigma_1(a - x) + _sigma_1(x - b) + + +def dsigma(x, a, b): + return -_sigma_2(a - x) + _sigma_2(x - b) + + +def perfectly_matched_layers(x: paddle.Tensor, y: paddle.Tensor): + """Apply the technique of perfectly matched layers(PMLs) proposed by paper arXiv:2108.05348. + + Args: + x (paddle.Tensor): one of input contains tensor. + y (paddle.Tensor): one of input contains tensor. + + Returns: + np.ndarray: Parameters of pde formula. + """ + x = x.numpy() + y = y.numpy() + + sigma_x = sigma(x, BOX[0][0], BOX[1][0]) + AB1 = 1 / (1 + 1j / OMEGA * sigma_x) ** 2 + A1, B1 = AB1.real, AB1.imag + + dsigma_x = dsigma(x, BOX[0][0], BOX[1][0]) + AB2 = -1j / OMEGA * dsigma_x * AB1 / (1 + 1j / OMEGA * sigma_x) + A2, B2 = AB2.real, AB2.imag + + sigma_y = sigma(y, BOX[0][1], BOX[1][1]) + AB3 = 1 / (1 + 1j / OMEGA * sigma_y) ** 2 + A3, B3 = AB3.real, AB3.imag + + dsigma_y = dsigma(y, BOX[0][1], BOX[1][1]) + AB4 = -1j / OMEGA * dsigma_y * AB3 / (1 + 1j / OMEGA * sigma_y) + A4, B4 = AB4.real, AB4.imag + return A1, B1, A2, B2, A3, B3, A4, B4 + + +def obj_func_J(y): + # Approximate the objective function + y = y.numpy() + 1.5 + h = 0.2 + return 1 / (h * np.pi**0.5) * np.exp(-((y / h) ** 2)) * (np.abs(y) < 0.5) + + +def compute_real_and_imaginary_loss( + output_dict: Dict[str, paddle.Tensor] +) -> paddle.Tensor: + """Compute real and imaginary_loss. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + + Returns: + paddle.Tensor: Real and imaginary_loss. + """ + x, y = output_dict["x"], output_dict["y"] + e_re = output_dict["e_real"] + e_im = output_dict["e_imaginary"] + eps = output_dict["epsilon"] + + condition = np.logical_and(y.numpy() < 0, y.numpy() > -1).astype( + paddle.get_default_dtype() + ) + + eps = eps * condition + 1 - condition + + de_re_x = output_dict["de_re_x"] + de_re_y = output_dict["de_re_y"] + de_re_xx = output_dict["de_re_xx"] + de_re_yy = output_dict["de_re_yy"] + de_im_x = output_dict["de_im_x"] + de_im_y = output_dict["de_im_y"] + de_im_xx = output_dict["de_im_xx"] + de_im_yy = output_dict["de_im_yy"] + + a1, b1, a2, b2, a3, b3, a4, b4 = perfectly_matched_layers(x, y) + + loss_re = ( + (a1 * de_re_xx + a2 * de_re_x + a3 * de_re_yy + a4 * de_re_y) / OMEGA + - (b1 * de_im_xx + b2 * de_im_x + b3 * de_im_yy + b4 * de_im_y) / OMEGA + + eps * OMEGA * e_re + ) + loss_im = ( + (a1 * de_im_xx + a2 * de_im_x + a3 * de_im_yy + a4 * de_im_y) / OMEGA + + (b1 * de_re_xx + b2 * de_re_x + b3 * de_re_yy + b4 * de_re_y) / OMEGA + + eps * OMEGA * e_im + + obj_func_J(y) + ) + return loss_re, loss_im + + +def pde_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: + """Compute pde loss and lagrangian loss. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + + Returns: + paddle.Tensor: PDE loss (and lagrangian loss if using Augmented Lagrangian method). + """ + global loss_log + bound = int(output_dict["bound"]) + loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) + loss_re = loss_re[bound:] + loss_im = loss_im[bound:] + + loss_eqs1 = paddle.mean(loss_re**2) + loss_eqs2 = paddle.mean(loss_im**2) + # augmented_Lagrangian + if lambda_im is None: + init_lambda(output_dict, bound) + loss_lag1 = paddle.mean(loss_re * lambda_re) + loss_lag2 = paddle.mean(loss_im * lambda_im) + + losses = ( + loss_weight[0] * loss_eqs1 + + loss_weight[1] * loss_eqs2 + + loss_weight[2] * loss_lag1 + + loss_weight[3] * loss_lag2 + ) + loss_log.append(float(loss_eqs1 + loss_eqs2)) # for plotting + loss_log.append(float(loss_lag1 + loss_lag2)) # for plotting + return {"pde": losses} + + +def obj_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: + """Compute objective loss. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + + Returns: + paddle.Tensor: Objective loss. + """ + global loss_log, loss_obj + x, y = output_dict["x"], output_dict["y"] + bound = int(output_dict["bound"]) + e_re = output_dict["e_real"] + e_im = output_dict["e_imaginary"] + + f1 = paddle.heaviside((x + 0.5) * (0.5 - x), paddle.full([], 0.5)) + f2 = paddle.heaviside((y - 1) * (2 - y), paddle.full([], 0.5)) + j = e_re[:bound] ** 2 + e_im[:bound] ** 2 - f1[:bound] * f2[:bound] + loss_opt_area = paddle.mean(j**2) + + if lambda_im is None: + init_lambda(output_dict, bound) + losses = loss_weight[4] * loss_opt_area + loss_log.append(float(loss_opt_area)) # for plotting + loss_obj = float(loss_opt_area) # for plotting + return {"obj": losses} + + +def eval_loss_fun(output_dict: Dict[str, paddle.Tensor], *args) -> paddle.Tensor: + """Compute objective loss for evaluation. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + + Returns: + paddle.Tensor: Objective loss. + """ + x, y = output_dict["x"], output_dict["y"] + e_re = output_dict["e_real"] + e_im = output_dict["e_imaginary"] + + f1 = paddle.heaviside((x + 0.5) * (0.5 - x), paddle.full([], 0.5)) + f2 = paddle.heaviside((y - 1) * (2 - y), paddle.full([], 0.5)) + j = e_re**2 + e_im**2 - f1 * f2 + losses = paddle.mean(j**2) + + return {"eval": losses} + + +def eval_metric_fun( + output_dict: Dict[str, paddle.Tensor], *args +) -> Dict[str, paddle.Tensor]: + """Compute metric for evaluation. + + Args: + output_dict (Dict[str, paddle.Tensor]): Dict of outputs contains tensor. + + Returns: + Dict[str, paddle.Tensor]: MSE metric. + """ + loss_re, loss_im = compute_real_and_imaginary_loss(output_dict) + eps_opt = paddle.concat([loss_re, loss_im], axis=-1) + metric = paddle.mean(eps_opt**2) + + metric_dict = {"eval_metric": metric} + return metric_dict diff --git a/examples/hpinns/holography.py b/examples/hpinns/holography.py index 290f2b3c2a..63d5d46edb 100644 --- a/examples/hpinns/holography.py +++ b/examples/hpinns/holography.py @@ -1,495 +1,495 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module is heavily adapted from https://github.com/lululxvi/hpinn -""" - -from os import path as osp - -import functions as func_module -import hydra -import numpy as np -import paddle -import plotting as plot_module -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # open FLAG for higher order differential operator - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) - model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) - model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) - - # initialize params - func_module.train_mode = cfg.TRAIN_MODE - loss_log_obj = [] - - # register transform - model_re.register_input_transform(func_module.transform_in) - model_im.register_input_transform(func_module.transform_in) - model_eps.register_input_transform(func_module.transform_in) - - model_re.register_output_transform(func_module.transform_out_real_part) - model_im.register_output_transform(func_module.transform_out_imaginary_part) - model_eps.register_output_transform(func_module.transform_out_epsilon) - - model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) - - # initialize Adam optimizer - optimizer_adam = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( - (model_re, model_im, model_eps) - ) - - # manually build constraint(s) - label_keys = ("x", "y", "bound", "e_real", "e_imaginary", "epsilon") - label_keys_derivative = ( - "de_re_x", - "de_re_y", - "de_re_xx", - "de_re_yy", - "de_im_x", - "de_im_y", - "de_im_xx", - "de_im_yy", - ) - output_expr = { - "x": lambda out: out["x"], - "y": lambda out: out["y"], - "bound": lambda out: out["bound"], - "e_real": lambda out: out["e_real"], - "e_imaginary": lambda out: out["e_imaginary"], - "epsilon": lambda out: out["epsilon"], - "de_re_x": lambda out: jacobian(out["e_real"], out["x"]), - "de_re_y": lambda out: jacobian(out["e_real"], out["y"]), - "de_re_xx": lambda out: hessian(out["e_real"], out["x"]), - "de_re_yy": lambda out: hessian(out["e_real"], out["y"]), - "de_im_x": lambda out: jacobian(out["e_imaginary"], out["x"]), - "de_im_y": lambda out: jacobian(out["e_imaginary"], out["y"]), - "de_im_xx": lambda out: hessian(out["e_imaginary"], out["x"]), - "de_im_yy": lambda out: hessian(out["e_imaginary"], out["y"]), - } - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys + label_keys_derivative, - "alias_dict": { - "e_real": "x", - "e_imaginary": "x", - "epsilon": "x", - **{k: "x" for k in label_keys_derivative}, - }, - }, - }, - ppsci.loss.FunctionalLoss(func_module.pde_loss_fun), - output_expr, - name="sup_constraint_pde", - ) - sup_constraint_obj = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys, - "alias_dict": {"e_real": "x", "e_imaginary": "x", "epsilon": "x"}, - }, - }, - ppsci.loss.FunctionalLoss(func_module.obj_loss_fun), - {key: lambda out, k=key: out[k] for key in label_keys}, - name="sup_constraint_obj", - ) - constraint = { - sup_constraint_pde.name: sup_constraint_pde, - sup_constraint_obj.name: sup_constraint_obj, - } - - # manually build validator - sup_validator_opt = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_VALID, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys + label_keys_derivative, - "alias_dict": { - "x": "x_opt", - "y": "y_opt", - "e_real": "x_opt", - "e_imaginary": "x_opt", - "epsilon": "x_opt", - **{k: "x_opt" for k in label_keys_derivative}, - }, - }, - }, - ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), - output_expr, - {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, - name="opt_sup", - ) - sup_validator_val = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_VALID, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys + label_keys_derivative, - "alias_dict": { - "x": "x_val", - "y": "y_val", - "e_real": "x_val", - "e_imaginary": "x_val", - "epsilon": "x_val", - **{k: "x_val" for k in label_keys_derivative}, - }, - }, - }, - ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), - output_expr, - {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, - name="val_sup", - ) - validator = { - sup_validator_opt.name: sup_validator_opt, - sup_validator_val.name: sup_validator_val, - } - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - constraint, - cfg.output_dir, - optimizer_adam, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # initialize LBFGS optimizer - optimizer_lbfgs = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)( - (model_re, model_im, model_eps) - ) - - # train: soft constraint, epoch=1 for lbfgs - if cfg.TRAIN_MODE == "soft": - solver = ppsci.solver.Solver( - model_list, - constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - # append objective loss for plot - loss_log_obj.append(func_module.loss_obj) - - # penalty and augmented Lagrangian, difference between the two is updating of lambda - if cfg.TRAIN_MODE != "soft": - train_dict = ppsci.utils.reader.load_mat_file( - cfg.DATASET_PATH, ("x", "y", "bound") - ) - in_dict = {"x": train_dict["x"], "y": train_dict["y"]} - expr_dict = output_expr.copy() - expr_dict.pop("bound") - - func_module.init_lambda(in_dict, int(train_dict["bound"])) - func_module.lambda_log.append( - [ - func_module.lambda_re.copy().squeeze(), - func_module.lambda_im.copy().squeeze(), - ] - ) - - for i in range(1, cfg.TRAIN_K + 1): - pred_dict = solver.predict( - in_dict, - expr_dict, - batch_size=np.shape(train_dict["x"])[0], - no_grad=False, - ) - func_module.update_lambda(pred_dict, int(train_dict["bound"])) - - func_module.update_mu() - logger.message(f"Iteration {i}: mu = {func_module.mu}\n") - - solver = ppsci.solver.Solver( - model_list, - constraint, - cfg.output_dir, - optimizer_lbfgs, - None, - cfg.TRAIN.epochs_lbfgs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - validator=validator, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - - # train model - solver.train() - # evaluate - solver.eval() - # append objective loss for plot - loss_log_obj.append(func_module.loss_obj) - - ################# plotting ################### - # log of loss - loss_log = np.array(func_module.loss_log).reshape(-1, 3) - - plot_module.set_params( - cfg.TRAIN_MODE, cfg.output_dir, cfg.DATASET_PATH, cfg.DATASET_PATH_VALID - ) - plot_module.plot_6a(loss_log) - if cfg.TRAIN_MODE != "soft": - plot_module.prepare_data(solver, expr_dict) - plot_module.plot_6b(loss_log_obj) - plot_module.plot_6c7c(func_module.lambda_log) - plot_module.plot_6d(func_module.lambda_log) - plot_module.plot_6ef(func_module.lambda_log) - - -def evaluate(cfg: DictConfig): - # open FLAG for higher order differential operator - paddle.framework.core.set_prim_eager_enabled(True) - - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) - model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) - model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) - - # initialize params - func_module.train_mode = cfg.TRAIN_MODE - - # register transform - model_re.register_input_transform(func_module.transform_in) - model_im.register_input_transform(func_module.transform_in) - model_eps.register_input_transform(func_module.transform_in) - - model_re.register_output_transform(func_module.transform_out_real_part) - model_im.register_output_transform(func_module.transform_out_imaginary_part) - model_eps.register_output_transform(func_module.transform_out_epsilon) - - model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) - - # manually build constraint(s) - label_keys = ("x", "y", "bound", "e_real", "e_imaginary", "epsilon") - label_keys_derivative = ( - "de_re_x", - "de_re_y", - "de_re_xx", - "de_re_yy", - "de_im_x", - "de_im_y", - "de_im_xx", - "de_im_yy", - ) - output_expr = { - "x": lambda out: out["x"], - "y": lambda out: out["y"], - "bound": lambda out: out["bound"], - "e_real": lambda out: out["e_real"], - "e_imaginary": lambda out: out["e_imaginary"], - "epsilon": lambda out: out["epsilon"], - "de_re_x": lambda out: jacobian(out["e_real"], out["x"]), - "de_re_y": lambda out: jacobian(out["e_real"], out["y"]), - "de_re_xx": lambda out: hessian(out["e_real"], out["x"]), - "de_re_yy": lambda out: hessian(out["e_real"], out["y"]), - "de_im_x": lambda out: jacobian(out["e_imaginary"], out["x"]), - "de_im_y": lambda out: jacobian(out["e_imaginary"], out["y"]), - "de_im_xx": lambda out: hessian(out["e_imaginary"], out["x"]), - "de_im_yy": lambda out: hessian(out["e_imaginary"], out["y"]), - } - - # manually build validator - sup_validator_opt = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_VALID, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys + label_keys_derivative, - "alias_dict": { - "x": "x_opt", - "y": "y_opt", - "e_real": "x_opt", - "e_imaginary": "x_opt", - "epsilon": "x_opt", - **{k: "x_opt" for k in label_keys_derivative}, - }, - }, - }, - ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), - output_expr, - {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, - name="opt_sup", - ) - sup_validator_val = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATASET_PATH_VALID, - "input_keys": ("x", "y", "bound"), - "label_keys": label_keys + label_keys_derivative, - "alias_dict": { - "x": "x_val", - "y": "y_val", - "e_real": "x_val", - "e_imaginary": "x_val", - "epsilon": "x_val", - **{k: "x_val" for k in label_keys_derivative}, - }, - }, - }, - ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), - output_expr, - {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, - name="val_sup", - ) - validator = { - sup_validator_opt.name: sup_validator_opt, - sup_validator_val.name: sup_validator_val, - } - - solver = ppsci.solver.Solver( - model_list, - output_dir=cfg.output_dir, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - - # evaluate - solver.eval() - - -def export(cfg: DictConfig): - # set model - model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) - model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) - model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) - - # register transform - model_re.register_input_transform(func_module.transform_in) - model_im.register_input_transform(func_module.transform_in) - model_eps.register_input_transform(func_module.transform_in) - - model_re.register_output_transform(func_module.transform_out_real_part) - model_im.register_output_transform(func_module.transform_out_imaginary_part) - model_eps.register_output_transform(func_module.transform_out_epsilon) - - # wrap to a model_list - model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) - - # initialize solver - solver = ppsci.solver.Solver( - model_list, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in ["x", "y"]}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - valid_dict = ppsci.utils.reader.load_mat_file( - cfg.DATASET_PATH_VALID, ("x_val", "y_val", "bound") - ) - input_dict = {"x": valid_dict["x_val"], "y": valid_dict["y_val"]} - - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) - } - - # plotting E and eps - N = ((func_module.l_BOX[1] - func_module.l_BOX[0]) / 0.05).astype(int) - input_eval = np.stack((input_dict["x"], input_dict["y"]), axis=-1).reshape( - N[0], N[1], 2 - ) - e_re = output_dict["e_re"].reshape(N[0], N[1]) - e_im = output_dict["e_im"].reshape(N[0], N[1]) - eps = output_dict["eps"].reshape(N[0], N[1]) - v_visual = e_re**2 + e_im**2 - field_visual = np.stack((v_visual, eps), axis=-1) - plot_module.field_name = ["Fig7_E", "Fig7_eps"] - plot_module.FIGNAME = "hpinns_pred" - plot_module.plot_field_holo(input_eval, field_visual) - - -@hydra.main(version_base=None, config_path="./conf", config_name="hpinns.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module is heavily adapted from https://github.com/lululxvi/hpinn +""" + +from os import path as osp + +import functions as func_module +import hydra +import numpy as np +import paddle +import plotting as plot_module +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # open FLAG for higher order differential operator + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) + model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) + model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) + + # initialize params + func_module.train_mode = cfg.TRAIN_MODE + loss_log_obj = [] + + # register transform + model_re.register_input_transform(func_module.transform_in) + model_im.register_input_transform(func_module.transform_in) + model_eps.register_input_transform(func_module.transform_in) + + model_re.register_output_transform(func_module.transform_out_real_part) + model_im.register_output_transform(func_module.transform_out_imaginary_part) + model_eps.register_output_transform(func_module.transform_out_epsilon) + + model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) + + # initialize Adam optimizer + optimizer_adam = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)( + (model_re, model_im, model_eps) + ) + + # manually build constraint(s) + label_keys = ("x", "y", "bound", "e_real", "e_imaginary", "epsilon") + label_keys_derivative = ( + "de_re_x", + "de_re_y", + "de_re_xx", + "de_re_yy", + "de_im_x", + "de_im_y", + "de_im_xx", + "de_im_yy", + ) + output_expr = { + "x": lambda out: out["x"], + "y": lambda out: out["y"], + "bound": lambda out: out["bound"], + "e_real": lambda out: out["e_real"], + "e_imaginary": lambda out: out["e_imaginary"], + "epsilon": lambda out: out["epsilon"], + "de_re_x": lambda out: jacobian(out["e_real"], out["x"]), + "de_re_y": lambda out: jacobian(out["e_real"], out["y"]), + "de_re_xx": lambda out: hessian(out["e_real"], out["x"]), + "de_re_yy": lambda out: hessian(out["e_real"], out["y"]), + "de_im_x": lambda out: jacobian(out["e_imaginary"], out["x"]), + "de_im_y": lambda out: jacobian(out["e_imaginary"], out["y"]), + "de_im_xx": lambda out: hessian(out["e_imaginary"], out["x"]), + "de_im_yy": lambda out: hessian(out["e_imaginary"], out["y"]), + } + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys + label_keys_derivative, + "alias_dict": { + "e_real": "x", + "e_imaginary": "x", + "epsilon": "x", + **{k: "x" for k in label_keys_derivative}, + }, + }, + }, + ppsci.loss.FunctionalLoss(func_module.pde_loss_fun), + output_expr, + name="sup_constraint_pde", + ) + sup_constraint_obj = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys, + "alias_dict": {"e_real": "x", "e_imaginary": "x", "epsilon": "x"}, + }, + }, + ppsci.loss.FunctionalLoss(func_module.obj_loss_fun), + {key: lambda out, k=key: out[k] for key in label_keys}, + name="sup_constraint_obj", + ) + constraint = { + sup_constraint_pde.name: sup_constraint_pde, + sup_constraint_obj.name: sup_constraint_obj, + } + + # manually build validator + sup_validator_opt = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_VALID, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys + label_keys_derivative, + "alias_dict": { + "x": "x_opt", + "y": "y_opt", + "e_real": "x_opt", + "e_imaginary": "x_opt", + "epsilon": "x_opt", + **{k: "x_opt" for k in label_keys_derivative}, + }, + }, + }, + ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), + output_expr, + {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, + name="opt_sup", + ) + sup_validator_val = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_VALID, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys + label_keys_derivative, + "alias_dict": { + "x": "x_val", + "y": "y_val", + "e_real": "x_val", + "e_imaginary": "x_val", + "epsilon": "x_val", + **{k: "x_val" for k in label_keys_derivative}, + }, + }, + }, + ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), + output_expr, + {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, + name="val_sup", + ) + validator = { + sup_validator_opt.name: sup_validator_opt, + sup_validator_val.name: sup_validator_val, + } + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + constraint, + cfg.output_dir, + optimizer_adam, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # initialize LBFGS optimizer + optimizer_lbfgs = ppsci.optimizer.LBFGS(max_iter=cfg.TRAIN.max_iter)( + (model_re, model_im, model_eps) + ) + + # train: soft constraint, epoch=1 for lbfgs + if cfg.TRAIN_MODE == "soft": + solver = ppsci.solver.Solver( + model_list, + constraint, + cfg.output_dir, + optimizer_lbfgs, + None, + cfg.TRAIN.epochs_lbfgs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + # append objective loss for plot + loss_log_obj.append(func_module.loss_obj) + + # penalty and augmented Lagrangian, difference between the two is updating of lambda + if cfg.TRAIN_MODE != "soft": + train_dict = ppsci.utils.reader.load_mat_file( + cfg.DATASET_PATH, ("x", "y", "bound") + ) + in_dict = {"x": train_dict["x"], "y": train_dict["y"]} + expr_dict = output_expr.copy() + expr_dict.pop("bound") + + func_module.init_lambda(in_dict, int(train_dict["bound"])) + func_module.lambda_log.append( + [ + func_module.lambda_re.copy().squeeze(), + func_module.lambda_im.copy().squeeze(), + ] + ) + + for i in range(1, cfg.TRAIN_K + 1): + pred_dict = solver.predict( + in_dict, + expr_dict, + batch_size=np.shape(train_dict["x"])[0], + no_grad=False, + ) + func_module.update_lambda(pred_dict, int(train_dict["bound"])) + + func_module.update_mu() + logger.message(f"Iteration {i}: mu = {func_module.mu}\n") + + solver = ppsci.solver.Solver( + model_list, + constraint, + cfg.output_dir, + optimizer_lbfgs, + None, + cfg.TRAIN.epochs_lbfgs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + validator=validator, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + + # train model + solver.train() + # evaluate + solver.eval() + # append objective loss for plot + loss_log_obj.append(func_module.loss_obj) + + ################# plotting ################### + # log of loss + loss_log = np.array(func_module.loss_log).reshape(-1, 3) + + plot_module.set_params( + cfg.TRAIN_MODE, cfg.output_dir, cfg.DATASET_PATH, cfg.DATASET_PATH_VALID + ) + plot_module.plot_6a(loss_log) + if cfg.TRAIN_MODE != "soft": + plot_module.prepare_data(solver, expr_dict) + plot_module.plot_6b(loss_log_obj) + plot_module.plot_6c7c(func_module.lambda_log) + plot_module.plot_6d(func_module.lambda_log) + plot_module.plot_6ef(func_module.lambda_log) + + +def evaluate(cfg: DictConfig): + # open FLAG for higher order differential operator + paddle.framework.core.set_prim_eager_enabled(True) + + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) + model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) + model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) + + # initialize params + func_module.train_mode = cfg.TRAIN_MODE + + # register transform + model_re.register_input_transform(func_module.transform_in) + model_im.register_input_transform(func_module.transform_in) + model_eps.register_input_transform(func_module.transform_in) + + model_re.register_output_transform(func_module.transform_out_real_part) + model_im.register_output_transform(func_module.transform_out_imaginary_part) + model_eps.register_output_transform(func_module.transform_out_epsilon) + + model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) + + # manually build constraint(s) + label_keys = ("x", "y", "bound", "e_real", "e_imaginary", "epsilon") + label_keys_derivative = ( + "de_re_x", + "de_re_y", + "de_re_xx", + "de_re_yy", + "de_im_x", + "de_im_y", + "de_im_xx", + "de_im_yy", + ) + output_expr = { + "x": lambda out: out["x"], + "y": lambda out: out["y"], + "bound": lambda out: out["bound"], + "e_real": lambda out: out["e_real"], + "e_imaginary": lambda out: out["e_imaginary"], + "epsilon": lambda out: out["epsilon"], + "de_re_x": lambda out: jacobian(out["e_real"], out["x"]), + "de_re_y": lambda out: jacobian(out["e_real"], out["y"]), + "de_re_xx": lambda out: hessian(out["e_real"], out["x"]), + "de_re_yy": lambda out: hessian(out["e_real"], out["y"]), + "de_im_x": lambda out: jacobian(out["e_imaginary"], out["x"]), + "de_im_y": lambda out: jacobian(out["e_imaginary"], out["y"]), + "de_im_xx": lambda out: hessian(out["e_imaginary"], out["x"]), + "de_im_yy": lambda out: hessian(out["e_imaginary"], out["y"]), + } + + # manually build validator + sup_validator_opt = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_VALID, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys + label_keys_derivative, + "alias_dict": { + "x": "x_opt", + "y": "y_opt", + "e_real": "x_opt", + "e_imaginary": "x_opt", + "epsilon": "x_opt", + **{k: "x_opt" for k in label_keys_derivative}, + }, + }, + }, + ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), + output_expr, + {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, + name="opt_sup", + ) + sup_validator_val = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATASET_PATH_VALID, + "input_keys": ("x", "y", "bound"), + "label_keys": label_keys + label_keys_derivative, + "alias_dict": { + "x": "x_val", + "y": "y_val", + "e_real": "x_val", + "e_imaginary": "x_val", + "epsilon": "x_val", + **{k: "x_val" for k in label_keys_derivative}, + }, + }, + }, + ppsci.loss.FunctionalLoss(func_module.eval_loss_fun), + output_expr, + {"mse": ppsci.metric.FunctionalMetric(func_module.eval_metric_fun)}, + name="val_sup", + ) + validator = { + sup_validator_opt.name: sup_validator_opt, + sup_validator_val.name: sup_validator_val, + } + + solver = ppsci.solver.Solver( + model_list, + output_dir=cfg.output_dir, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + + # evaluate + solver.eval() + + +def export(cfg: DictConfig): + # set model + model_re = ppsci.arch.MLP(**cfg.MODEL.re_net) + model_im = ppsci.arch.MLP(**cfg.MODEL.im_net) + model_eps = ppsci.arch.MLP(**cfg.MODEL.eps_net) + + # register transform + model_re.register_input_transform(func_module.transform_in) + model_im.register_input_transform(func_module.transform_in) + model_eps.register_input_transform(func_module.transform_in) + + model_re.register_output_transform(func_module.transform_out_real_part) + model_im.register_output_transform(func_module.transform_out_imaginary_part) + model_eps.register_output_transform(func_module.transform_out_epsilon) + + # wrap to a model_list + model_list = ppsci.arch.ModelList((model_re, model_im, model_eps)) + + # initialize solver + solver = ppsci.solver.Solver( + model_list, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in ["x", "y"]}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + valid_dict = ppsci.utils.reader.load_mat_file( + cfg.DATASET_PATH_VALID, ("x_val", "y_val", "bound") + ) + input_dict = {"x": valid_dict["x_val"], "y": valid_dict["y_val"]} + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.INFER.output_keys, output_dict.keys()) + } + + # plotting E and eps + N = ((func_module.l_BOX[1] - func_module.l_BOX[0]) / 0.05).astype(int) + input_eval = np.stack((input_dict["x"], input_dict["y"]), axis=-1).reshape( + N[0], N[1], 2 + ) + e_re = output_dict["e_re"].reshape(N[0], N[1]) + e_im = output_dict["e_im"].reshape(N[0], N[1]) + eps = output_dict["eps"].reshape(N[0], N[1]) + v_visual = e_re**2 + e_im**2 + field_visual = np.stack((v_visual, eps), axis=-1) + plot_module.field_name = ["Fig7_E", "Fig7_eps"] + plot_module.FIGNAME = "hpinns_pred" + plot_module.plot_field_holo(input_eval, field_visual) + + +@hydra.main(version_base=None, config_path="./conf", config_name="hpinns.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/hpinns/plotting.py b/examples/hpinns/plotting.py index c7f958b0ae..3d5698130f 100644 --- a/examples/hpinns/plotting.py +++ b/examples/hpinns/plotting.py @@ -1,346 +1,346 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module is heavily adapted from https://github.com/lululxvi/hpinn -""" - -import os -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional - -import functions as func_module -import matplotlib.pyplot as plt -import numpy as np -import seaborn as sns -from matplotlib import ticker - -import ppsci - -"""All plotting functions.""" - -# define constants -font = {"weight": "normal", "size": 10} -input_name = ("x", "y") -field_name = [ - "Fig7_E", - "Fig7_eps", - "Fig_6C_lambda_re_1", - "Fig_6C_lambda_im_1", - "Fig_6C_lambda_re_4", - "Fig_6C_lambda_im_4", - "Fig_6C_lambda_re_9", - "Fig_6C_lambda_im_9", -] - -# define constants which will be assigned later -FIGNAME: str = "" -OUTPUT_DIR: str = "" -DATASET_PATH: str = "" -DATASET_PATH_VALID: str = "" -input_valid: np.ndarray = None -output_valid: np.ndarray = None -input_train: np.ndarray = None - - -def set_params(figname, output_dir, dataset_path, dataset_path_valid): - global FIGNAME, OUTPUT_DIR, DATASET_PATH, DATASET_PATH_VALID - FIGNAME = figname - OUTPUT_DIR = output_dir + "figure/" - os.makedirs(OUTPUT_DIR, exist_ok=True) - DATASET_PATH = dataset_path - DATASET_PATH_VALID = dataset_path_valid - - -def prepare_data(solver: ppsci.solver.Solver, expr_dict: Dict[str, Callable]): - """Prepare data of input of training and validation and generate - output of validation by predicting. - - Args: - solver (ppsci.solver.Solver): Object of ppsci.solver.Solver(). - expr_dict (Dict[str, Callable]): Expression dict, which guide to - compute equation variable with callable function. - """ - global input_valid, output_valid, input_train - # train data - train_dict = ppsci.utils.reader.load_mat_file(DATASET_PATH, ("x", "y", "bound")) - - bound = int(train_dict["bound"]) - x_train = train_dict["x"][bound:] - y_train = train_dict["y"][bound:] - input_train = np.stack((x_train, y_train), axis=-1).reshape(-1, 2) - - # valid data - N = ((func_module.l_BOX[1] - func_module.l_BOX[0]) / 0.05).astype(int) - - valid_dict = ppsci.utils.reader.load_mat_file( - DATASET_PATH_VALID, ("x_val", "y_val", "bound") - ) - in_dict_val = {"x": valid_dict["x_val"], "y": valid_dict["y_val"]} - func_module.init_lambda(in_dict_val, int(valid_dict["bound"])) - - pred_dict_val = solver.predict( - in_dict_val, - expr_dict, - batch_size=np.shape(valid_dict["x_val"])[0], - no_grad=False, - return_numpy=True, - ) - - input_valid = np.stack((valid_dict["x_val"], valid_dict["y_val"]), axis=-1).reshape( - N[0], N[1], 2 - ) - output_valid = np.array( - [ - pred_dict_val["e_real"], - pred_dict_val["e_imaginary"], - pred_dict_val["epsilon"], - ] - ).T.reshape(N[0], N[1], 3) - - -def plot_field_holo( - coord_visual: np.ndarray, - field_visual: np.ndarray, - coord_lambda: Optional[np.ndarray] = None, - field_lambda: Optional[np.ndarray] = None, -): - """Plot fields of of holography example. - - Args: - coord_visual (np.ndarray): The coord of epsilon and |E|**2. - field_visual (np.ndarray): The filed of epsilon and |E|**2. - coord_lambda (Optional[np.ndarray], optional): The coord of lambda. Defaults to None. - field_lambda (Optional[np.ndarray], optional): The filed of lambda. Defaults to None. - """ - fmin, fmax = np.array([0, 1.0]), np.array([0.6, 12]) - cmin, cmax = coord_visual.min(axis=(0, 1)), coord_visual.max(axis=(0, 1)) - emin, emax = np.array([-3, -1]), np.array([3, 0]) - x_pos = coord_visual[:, :, 0] - y_pos = coord_visual[:, :, 1] - - for fi in range(len(field_name)): - if fi == 0: - # Fig7_E - plt.figure(101, figsize=(8, 6)) - plt.clf() - plt.rcParams["font.size"] = 20 - f_true = field_visual[..., fi] - plt.pcolormesh( - x_pos, - y_pos, - f_true, - cmap="rainbow", - shading="gouraud", - antialiased=True, - snap=True, - ) - cb = plt.colorbar() - plt.axis((cmin[0], cmax[0], cmin[1], cmax[1])) - plt.clim(vmin=fmin[fi], vmax=fmax[fi]) - elif fi == 1: - # Fig7_eps - plt.figure(201, figsize=(8, 1.5)) - plt.clf() - plt.rcParams["font.size"] = 20 - f_true = field_visual[..., fi] - plt.pcolormesh( - x_pos, - y_pos, - f_true, - cmap="rainbow", - shading="gouraud", - antialiased=True, - snap=True, - ) - cb = plt.colorbar() - plt.axis((emin[0], emax[0], emin[1], emax[1])) - plt.clim(vmin=fmin[fi], vmax=fmax[fi]) - elif coord_lambda is not None and field_lambda is not None: - # Fig_6C_lambda_ - plt.figure(fi * 100 + 101, figsize=(8, 6)) - plt.clf() - plt.rcParams["font.size"] = 20 - f_true = field_lambda[..., fi - 2] - plt.scatter( - coord_lambda[..., 0], - coord_lambda[..., 1], - c=f_true, - cmap="rainbow", - alpha=0.6, - ) - cb = plt.colorbar() - plt.axis((cmin[0], cmax[0], cmin[1], cmax[1])) - - # colorbar settings - cb.ax.tick_params(labelsize=20) - tick_locator = ticker.MaxNLocator( - nbins=5 - ) # the number of scale values ​​on the colorbar - cb.locator = tick_locator - cb.update_ticks() - - plt.xlabel(f"${str(input_name[0])}$", fontdict=font) - plt.ylabel(f"${str(input_name[1])}$", fontdict=font) - plt.yticks(size=10) - plt.xticks(size=10) - plt.savefig( - os.path.join( - OUTPUT_DIR, - f"{FIGNAME}_{str(field_name[fi])}.jpg", - ) - ) - - -def plot_6a(log_loss: np.ndarray): - """Plot Fig.6 A of paper. - - Args: - log_loss (np.ndarray): Losses of all training's iterations. - """ - plt.figure(300, figsize=(8, 6)) - smooth_step = 100 # how many steps of loss are squeezed to one point, num_points is epoch/smooth_step - if log_loss.shape[0] % smooth_step != 0: - vis_loss_ = log_loss[: -(log_loss.shape[0] % smooth_step), :].reshape( - -1, smooth_step, log_loss.shape[1] - ) - else: - vis_loss_ = log_loss.reshape(-1, smooth_step, log_loss.shape[1]) - - vis_loss = vis_loss_.mean(axis=1).reshape(-1, 3) - vis_loss_total = vis_loss[:, :].sum(axis=1) - vis_loss[:, 1] = vis_loss[:, 2] - vis_loss[:, 2] = vis_loss_total - for i in range(vis_loss.shape[1]): - plt.semilogy(np.arange(vis_loss.shape[0]) * smooth_step, vis_loss[:, i]) - plt.legend( - ["PDE loss", "Objective loss", "Total loss"], - loc="lower left", - prop=font, - ) - plt.xlabel("Iteration ", fontdict=font) - plt.ylabel("Loss ", fontdict=font) - plt.grid() - plt.yticks(size=10) - plt.xticks(size=10) - plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_A.jpg")) - - -def plot_6b(log_loss_obj: List[float]): - """Plot Fig.6 B of paper. - - Args: - log_loss_obj (List[float]): Objective losses of last iteration of each k. - """ - plt.figure(400, figsize=(10, 6)) - plt.clf() - plt.plot(np.arange(len(log_loss_obj)), log_loss_obj, "bo-") - plt.xlabel("k", fontdict=font) - plt.ylabel("Objective", fontdict=font) - plt.grid() - plt.yticks(size=10) - plt.xticks(size=10) - plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_B.jpg")) - - -def plot_6c7c(log_lambda: List[np.ndarray]): - """Plot Fig.6 Cs and Fig.7.Cs of paper. - - Args: - log_lambda (List[np.ndarray]): Lambdas of each k. - """ - # plot Fig.6 Cs and Fig.7.Cs of paper - global input_valid, output_valid, input_train - - field_lambda = np.concatenate( - [log_lambda[1], log_lambda[4], log_lambda[9]], axis=0 - ).T - v_visual = output_valid[..., 0] ** 2 + output_valid[..., 1] ** 2 - field_visual = np.stack((v_visual, output_valid[..., -1]), axis=-1) - plot_field_holo(input_valid, field_visual, input_train, field_lambda) - - -def plot_6d(log_lambda: List[np.ndarray]): - """Plot Fig.6 D of paper. - - Args: - log_lambda (List[np.ndarray]): Lambdas of each k. - """ - # lambda/mu - mu_ = 2 ** np.arange(1, 11) - log_lambda = np.array(log_lambda) / mu_[:, None, None] - # randomly pick 3 lambda points to represent all points of each k - ind = np.random.randint(low=0, high=np.shape(log_lambda)[-1], size=3) - la_mu_ind = log_lambda[:, :, ind] - marker = ["ro-", "bo:", "r*-", "b*:", "rp-", "bp:"] - plt.figure(500, figsize=(7, 5)) - plt.clf() - for i in range(6): - plt.plot( - np.arange(0, 10), - la_mu_ind[:, int(i % 2), int(i / 2)], - marker[i], - linewidth=2, - ) - plt.legend( - ["Re, 1", "Im, 1", "Re, 2", "Im, 2", "Re, 3", "Im, 3"], - loc="upper right", - prop=font, - ) - plt.grid() - plt.xlabel("k", fontdict=font) - plt.ylabel(r"$ \lambda^k / \mu^k_F$", fontdict=font) - plt.yticks(size=12) - plt.xticks(size=12) - plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_D_lambda.jpg")) - - -def plot_6ef(log_lambda: List[np.ndarray]): - """Plot Fig.6 E and Fig.6.F of paper. - - Args: - log_lambda (List[np.ndarray]): Lambdas of each k. - """ - # lambda/mu - mu_ = 2 ** np.arange(1, 11) - log_lambda = np.array(log_lambda) / mu_[:, None, None] - # pick k=1,4,6,9 - iter_ind = [1, 4, 6, 9] - plt.figure(600, figsize=(5, 5)) - plt.clf() - for i in iter_ind: - sns.kdeplot(log_lambda[i, 0, :], label="k = " + str(i), cut=0, linewidth=2) - plt.legend(prop=font) - plt.grid() - plt.xlim([-0.1, 0.1]) - plt.xlabel(r"$ \lambda^k_{Re} / \mu^k_F$", fontdict=font) - plt.ylabel("Frequency", fontdict=font) - plt.yticks(size=12) - plt.xticks(size=12) - plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_E.jpg")) - - plt.figure(700, figsize=(5, 5)) - plt.clf() - for i in iter_ind: - sns.kdeplot(log_lambda[i, 1, :], label="k = " + str(i), cut=0, linewidth=2) - plt.legend(prop=font) - plt.grid() - plt.xlim([-0.1, 0.1]) - plt.xlabel(r"$ \lambda^k_{Im} / \mu^k_F$", fontdict=font) - plt.ylabel("Frequency", fontdict=font) - plt.yticks(size=12) - plt.xticks(size=12) - plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_F.jpg")) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module is heavily adapted from https://github.com/lululxvi/hpinn +""" + +import os +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional + +import functions as func_module +import matplotlib.pyplot as plt +import numpy as np +import seaborn as sns +from matplotlib import ticker + +import ppsci + +"""All plotting functions.""" + +# define constants +font = {"weight": "normal", "size": 10} +input_name = ("x", "y") +field_name = [ + "Fig7_E", + "Fig7_eps", + "Fig_6C_lambda_re_1", + "Fig_6C_lambda_im_1", + "Fig_6C_lambda_re_4", + "Fig_6C_lambda_im_4", + "Fig_6C_lambda_re_9", + "Fig_6C_lambda_im_9", +] + +# define constants which will be assigned later +FIGNAME: str = "" +OUTPUT_DIR: str = "" +DATASET_PATH: str = "" +DATASET_PATH_VALID: str = "" +input_valid: np.ndarray = None +output_valid: np.ndarray = None +input_train: np.ndarray = None + + +def set_params(figname, output_dir, dataset_path, dataset_path_valid): + global FIGNAME, OUTPUT_DIR, DATASET_PATH, DATASET_PATH_VALID + FIGNAME = figname + OUTPUT_DIR = output_dir + "figure/" + os.makedirs(OUTPUT_DIR, exist_ok=True) + DATASET_PATH = dataset_path + DATASET_PATH_VALID = dataset_path_valid + + +def prepare_data(solver: ppsci.solver.Solver, expr_dict: Dict[str, Callable]): + """Prepare data of input of training and validation and generate + output of validation by predicting. + + Args: + solver (ppsci.solver.Solver): Object of ppsci.solver.Solver(). + expr_dict (Dict[str, Callable]): Expression dict, which guide to + compute equation variable with callable function. + """ + global input_valid, output_valid, input_train + # train data + train_dict = ppsci.utils.reader.load_mat_file(DATASET_PATH, ("x", "y", "bound")) + + bound = int(train_dict["bound"]) + x_train = train_dict["x"][bound:] + y_train = train_dict["y"][bound:] + input_train = np.stack((x_train, y_train), axis=-1).reshape(-1, 2) + + # valid data + N = ((func_module.l_BOX[1] - func_module.l_BOX[0]) / 0.05).astype(int) + + valid_dict = ppsci.utils.reader.load_mat_file( + DATASET_PATH_VALID, ("x_val", "y_val", "bound") + ) + in_dict_val = {"x": valid_dict["x_val"], "y": valid_dict["y_val"]} + func_module.init_lambda(in_dict_val, int(valid_dict["bound"])) + + pred_dict_val = solver.predict( + in_dict_val, + expr_dict, + batch_size=np.shape(valid_dict["x_val"])[0], + no_grad=False, + return_numpy=True, + ) + + input_valid = np.stack((valid_dict["x_val"], valid_dict["y_val"]), axis=-1).reshape( + N[0], N[1], 2 + ) + output_valid = np.array( + [ + pred_dict_val["e_real"], + pred_dict_val["e_imaginary"], + pred_dict_val["epsilon"], + ] + ).T.reshape(N[0], N[1], 3) + + +def plot_field_holo( + coord_visual: np.ndarray, + field_visual: np.ndarray, + coord_lambda: Optional[np.ndarray] = None, + field_lambda: Optional[np.ndarray] = None, +): + """Plot fields of of holography example. + + Args: + coord_visual (np.ndarray): The coord of epsilon and |E|**2. + field_visual (np.ndarray): The filed of epsilon and |E|**2. + coord_lambda (Optional[np.ndarray], optional): The coord of lambda. Defaults to None. + field_lambda (Optional[np.ndarray], optional): The filed of lambda. Defaults to None. + """ + fmin, fmax = np.array([0, 1.0]), np.array([0.6, 12]) + cmin, cmax = coord_visual.min(axis=(0, 1)), coord_visual.max(axis=(0, 1)) + emin, emax = np.array([-3, -1]), np.array([3, 0]) + x_pos = coord_visual[:, :, 0] + y_pos = coord_visual[:, :, 1] + + for fi in range(len(field_name)): + if fi == 0: + # Fig7_E + plt.figure(101, figsize=(8, 6)) + plt.clf() + plt.rcParams["font.size"] = 20 + f_true = field_visual[..., fi] + plt.pcolormesh( + x_pos, + y_pos, + f_true, + cmap="rainbow", + shading="gouraud", + antialiased=True, + snap=True, + ) + cb = plt.colorbar() + plt.axis((cmin[0], cmax[0], cmin[1], cmax[1])) + plt.clim(vmin=fmin[fi], vmax=fmax[fi]) + elif fi == 1: + # Fig7_eps + plt.figure(201, figsize=(8, 1.5)) + plt.clf() + plt.rcParams["font.size"] = 20 + f_true = field_visual[..., fi] + plt.pcolormesh( + x_pos, + y_pos, + f_true, + cmap="rainbow", + shading="gouraud", + antialiased=True, + snap=True, + ) + cb = plt.colorbar() + plt.axis((emin[0], emax[0], emin[1], emax[1])) + plt.clim(vmin=fmin[fi], vmax=fmax[fi]) + elif coord_lambda is not None and field_lambda is not None: + # Fig_6C_lambda_ + plt.figure(fi * 100 + 101, figsize=(8, 6)) + plt.clf() + plt.rcParams["font.size"] = 20 + f_true = field_lambda[..., fi - 2] + plt.scatter( + coord_lambda[..., 0], + coord_lambda[..., 1], + c=f_true, + cmap="rainbow", + alpha=0.6, + ) + cb = plt.colorbar() + plt.axis((cmin[0], cmax[0], cmin[1], cmax[1])) + + # colorbar settings + cb.ax.tick_params(labelsize=20) + tick_locator = ticker.MaxNLocator( + nbins=5 + ) # the number of scale values ​​on the colorbar + cb.locator = tick_locator + cb.update_ticks() + + plt.xlabel(f"${str(input_name[0])}$", fontdict=font) + plt.ylabel(f"${str(input_name[1])}$", fontdict=font) + plt.yticks(size=10) + plt.xticks(size=10) + plt.savefig( + os.path.join( + OUTPUT_DIR, + f"{FIGNAME}_{str(field_name[fi])}.jpg", + ) + ) + + +def plot_6a(log_loss: np.ndarray): + """Plot Fig.6 A of paper. + + Args: + log_loss (np.ndarray): Losses of all training's iterations. + """ + plt.figure(300, figsize=(8, 6)) + smooth_step = 100 # how many steps of loss are squeezed to one point, num_points is epoch/smooth_step + if log_loss.shape[0] % smooth_step != 0: + vis_loss_ = log_loss[: -(log_loss.shape[0] % smooth_step), :].reshape( + -1, smooth_step, log_loss.shape[1] + ) + else: + vis_loss_ = log_loss.reshape(-1, smooth_step, log_loss.shape[1]) + + vis_loss = vis_loss_.mean(axis=1).reshape(-1, 3) + vis_loss_total = vis_loss[:, :].sum(axis=1) + vis_loss[:, 1] = vis_loss[:, 2] + vis_loss[:, 2] = vis_loss_total + for i in range(vis_loss.shape[1]): + plt.semilogy(np.arange(vis_loss.shape[0]) * smooth_step, vis_loss[:, i]) + plt.legend( + ["PDE loss", "Objective loss", "Total loss"], + loc="lower left", + prop=font, + ) + plt.xlabel("Iteration ", fontdict=font) + plt.ylabel("Loss ", fontdict=font) + plt.grid() + plt.yticks(size=10) + plt.xticks(size=10) + plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_A.jpg")) + + +def plot_6b(log_loss_obj: List[float]): + """Plot Fig.6 B of paper. + + Args: + log_loss_obj (List[float]): Objective losses of last iteration of each k. + """ + plt.figure(400, figsize=(10, 6)) + plt.clf() + plt.plot(np.arange(len(log_loss_obj)), log_loss_obj, "bo-") + plt.xlabel("k", fontdict=font) + plt.ylabel("Objective", fontdict=font) + plt.grid() + plt.yticks(size=10) + plt.xticks(size=10) + plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_B.jpg")) + + +def plot_6c7c(log_lambda: List[np.ndarray]): + """Plot Fig.6 Cs and Fig.7.Cs of paper. + + Args: + log_lambda (List[np.ndarray]): Lambdas of each k. + """ + # plot Fig.6 Cs and Fig.7.Cs of paper + global input_valid, output_valid, input_train + + field_lambda = np.concatenate( + [log_lambda[1], log_lambda[4], log_lambda[9]], axis=0 + ).T + v_visual = output_valid[..., 0] ** 2 + output_valid[..., 1] ** 2 + field_visual = np.stack((v_visual, output_valid[..., -1]), axis=-1) + plot_field_holo(input_valid, field_visual, input_train, field_lambda) + + +def plot_6d(log_lambda: List[np.ndarray]): + """Plot Fig.6 D of paper. + + Args: + log_lambda (List[np.ndarray]): Lambdas of each k. + """ + # lambda/mu + mu_ = 2 ** np.arange(1, 11) + log_lambda = np.array(log_lambda) / mu_[:, None, None] + # randomly pick 3 lambda points to represent all points of each k + ind = np.random.randint(low=0, high=np.shape(log_lambda)[-1], size=3) + la_mu_ind = log_lambda[:, :, ind] + marker = ["ro-", "bo:", "r*-", "b*:", "rp-", "bp:"] + plt.figure(500, figsize=(7, 5)) + plt.clf() + for i in range(6): + plt.plot( + np.arange(0, 10), + la_mu_ind[:, int(i % 2), int(i / 2)], + marker[i], + linewidth=2, + ) + plt.legend( + ["Re, 1", "Im, 1", "Re, 2", "Im, 2", "Re, 3", "Im, 3"], + loc="upper right", + prop=font, + ) + plt.grid() + plt.xlabel("k", fontdict=font) + plt.ylabel(r"$ \lambda^k / \mu^k_F$", fontdict=font) + plt.yticks(size=12) + plt.xticks(size=12) + plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_D_lambda.jpg")) + + +def plot_6ef(log_lambda: List[np.ndarray]): + """Plot Fig.6 E and Fig.6.F of paper. + + Args: + log_lambda (List[np.ndarray]): Lambdas of each k. + """ + # lambda/mu + mu_ = 2 ** np.arange(1, 11) + log_lambda = np.array(log_lambda) / mu_[:, None, None] + # pick k=1,4,6,9 + iter_ind = [1, 4, 6, 9] + plt.figure(600, figsize=(5, 5)) + plt.clf() + for i in iter_ind: + sns.kdeplot(log_lambda[i, 0, :], label="k = " + str(i), cut=0, linewidth=2) + plt.legend(prop=font) + plt.grid() + plt.xlim([-0.1, 0.1]) + plt.xlabel(r"$ \lambda^k_{Re} / \mu^k_F$", fontdict=font) + plt.ylabel("Frequency", fontdict=font) + plt.yticks(size=12) + plt.xticks(size=12) + plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_E.jpg")) + + plt.figure(700, figsize=(5, 5)) + plt.clf() + for i in iter_ind: + sns.kdeplot(log_lambda[i, 1, :], label="k = " + str(i), cut=0, linewidth=2) + plt.legend(prop=font) + plt.grid() + plt.xlim([-0.1, 0.1]) + plt.xlabel(r"$ \lambda^k_{Im} / \mu^k_F$", fontdict=font) + plt.ylabel("Frequency", fontdict=font) + plt.yticks(size=12) + plt.xticks(size=12) + plt.savefig(os.path.join(OUTPUT_DIR, f"{FIGNAME}_Fig6_F.jpg")) diff --git a/examples/ide/conf/volterra_ide.yaml b/examples/ide/conf/volterra_ide.yaml index 0067535773..fa18c5cea4 100644 --- a/examples/ide/conf/volterra_ide.yaml +++ b/examples/ide/conf/volterra_ide.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -79,3 +80,85 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 16 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_volterra_IDE/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set geometry +BOUNDS: [0, 5] + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["u"] + num_layers: 3 + hidden_size: 20 + activation: "tanh" + +# training settings +TRAIN: + epochs: 1 + iters_per_epoch: 1 + save_freq: 1 + eval_during_train: true + eval_freq: 1 + optimizer: + learning_rate: 1 + max_iter: 15000 + max_eval: 1250 + tolerance_grad: 1.0e-8 + tolerance_change: 0 + history_size: 100 + quad_deg: 20 + npoint_interior: 12 + npoint_ic: 1 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + npoint_eval: 100 + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/volterra_ide/volterra_ide_pretrained.pdparams + export_path: ./inference/volterra_ide + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 16 +>>>>>>> Stashed changes diff --git a/examples/ide/volterra_ide.py b/examples/ide/volterra_ide.py index 2f4c473ed4..e431091482 100644 --- a/examples/ide/volterra_ide.py +++ b/examples/ide/volterra_ide.py @@ -1,334 +1,334 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Reference: https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py - -from os import path as osp -from typing import Dict -from typing import Tuple - -import hydra -import numpy as np -import paddle -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.autodiff import jacobian -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} - - # set equation - def kernel_func(x, s): - return np.exp(s - x) - - def func(out): - x, u = out["x"], out["u"] - return jacobian(u, x) + u - - equation = { - "volterra": ppsci.equation.Volterra( - cfg.BOUNDS[0], - cfg.TRAIN.npoint_interior, - cfg.TRAIN.quad_deg, - kernel_func, - func, - ) - } - - # set constraint - # set transform for input data - def input_data_quad_transform( - input: Dict[str, np.ndarray], - weight: Dict[str, np.ndarray], - label: Dict[str, np.ndarray], - ) -> Tuple[ - Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] - ]: - """Get sampling points for integral. - - Args: - input (Dict[str, paddle.Tensor]): Raw input dict. - weight (Dict[str, paddle.Tensor]): Raw weight dict. - label (Dict[str, paddle.Tensor]): Raw label dict. - - Returns: - Tuple[ Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] ]: - Input dict contained sampling points, weight dict and label dict. - """ - x = input["x"] # N points. - x_quad = equation["volterra"].get_quad_points(x).reshape([-1, 1]) # NxQ - x_quad = paddle.concat((x, x_quad), axis=0) # M+MxQ: [M|Q1|Q2,...,QM|] - return ( - { - **input, - "x": x_quad, - }, - weight, - label, - ) - - # interior constraint - ide_constraint = ppsci.constraint.InteriorConstraint( - equation["volterra"].equations, - {"volterra": 0}, - geom["timedomain"], - { - "dataset": { - "name": "IterableNamedArrayDataset", - "transforms": ( - { - "FunctionalTransform": { - "transform_func": input_data_quad_transform, - }, - }, - ), - }, - "batch_size": cfg.TRAIN.npoint_interior, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - evenly=True, - name="EQ", - ) - - # initial condition - def u_solution_func(in_): - if isinstance(in_["x"], paddle.Tensor): - return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) - return np.exp(-in_["x"]) * np.cosh(in_["x"]) - - ic = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["timedomain"], - { - "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": cfg.TRAIN.npoint_ic, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - criteria=geom["timedomain"].on_initial, - name="IC", - ) - # wrap constraints together - constraint = { - ide_constraint.name: ide_constraint, - ic.name: ic, - } - - # set optimizer - optimizer = ppsci.optimizer.LBFGS(**cfg.TRAIN.optimizer)(model) - - # set validator - l2rel_validator = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["timedomain"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.EVAL.npoint_eval, - }, - ppsci.loss.L2RelLoss(), - evenly=True, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Validator", - ) - validator = {l2rel_validator.name: l2rel_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - - # visualize prediction after finished training - input_data = geom["timedomain"].uniform_points(100) - label_data = u_solution_func({"x": input_data}) - output_data = solver.predict({"x": input_data}, return_numpy=True)["u"] - - plt.plot(input_data, label_data, "-", label=r"$u(t)$") - plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) - plt.legend() - plt.xlabel(r"$t$") - plt.ylabel(r"$u$") - plt.title(r"$u-t$") - plt.savefig(osp.join(cfg.output_dir, "./Volterra_IDE.png"), dpi=200) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # set output directory - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set geometry - geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} - # set validator - - def u_solution_func(in_) -> np.ndarray: - if isinstance(in_["x"], paddle.Tensor): - return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) - return np.exp(-in_["x"]) * np.cosh(in_["x"]) - - l2rel_validator = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["timedomain"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": cfg.EVAL.npoint_eval, - }, - ppsci.loss.L2RelLoss(), - evenly=True, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="L2Rel_Validator", - ) - validator = {l2rel_validator.name: l2rel_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - geom=geom, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate model - solver.eval() - - # visualize prediction - input_data = geom["timedomain"].uniform_points(cfg.EVAL.npoint_eval) - label_data = u_solution_func({"x": input_data}) - output_data = solver.predict({"x": input_data}, return_numpy=True)["u"] - - plt.plot(input_data, label_data, "-", label=r"$u(t)$") - plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) - plt.legend() - plt.xlabel(r"$t$") - plt.ylabel(r"$u$") - plt.title(r"$u-t$") - plt.savefig(osp.join(cfg.output_dir, "./Volterra_IDE.png"), dpi=200) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 1], "float32", name=key) - for key in cfg.MODEL.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} - - input_data = geom["timedomain"].uniform_points(cfg.EVAL.npoint_eval) - input_dict = {"x": input_data} - - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - def u_solution_func(in_) -> np.ndarray: - if isinstance(in_["x"], paddle.Tensor): - return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) - return np.exp(-in_["x"]) * np.cosh(in_["x"]) - - label_data = u_solution_func({"x": input_data}) - output_data = output_dict["u"] - - # save result - plt.plot(input_data, label_data, "-", label=r"$u(t)$") - plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) - plt.legend() - plt.xlabel(r"$t$") - plt.ylabel(r"$u$") - plt.title(r"$u-t$") - plt.savefig("./Volterra_IDE_pred.png", dpi=200) - - -@hydra.main(version_base=None, config_path="./conf", config_name="volterra_ide.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Reference: https://github.com/lululxvi/deepxde/blob/master/examples/pinn_forward/Volterra_IDE.py + +from os import path as osp +from typing import Dict +from typing import Tuple + +import hydra +import numpy as np +import paddle +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.autodiff import jacobian +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # set output directory + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} + + # set equation + def kernel_func(x, s): + return np.exp(s - x) + + def func(out): + x, u = out["x"], out["u"] + return jacobian(u, x) + u + + equation = { + "volterra": ppsci.equation.Volterra( + cfg.BOUNDS[0], + cfg.TRAIN.npoint_interior, + cfg.TRAIN.quad_deg, + kernel_func, + func, + ) + } + + # set constraint + # set transform for input data + def input_data_quad_transform( + input: Dict[str, np.ndarray], + weight: Dict[str, np.ndarray], + label: Dict[str, np.ndarray], + ) -> Tuple[ + Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] + ]: + """Get sampling points for integral. + + Args: + input (Dict[str, paddle.Tensor]): Raw input dict. + weight (Dict[str, paddle.Tensor]): Raw weight dict. + label (Dict[str, paddle.Tensor]): Raw label dict. + + Returns: + Tuple[ Dict[str, paddle.Tensor], Dict[str, paddle.Tensor], Dict[str, paddle.Tensor] ]: + Input dict contained sampling points, weight dict and label dict. + """ + x = input["x"] # N points. + x_quad = equation["volterra"].get_quad_points(x).reshape([-1, 1]) # NxQ + x_quad = paddle.concat((x, x_quad), axis=0) # M+MxQ: [M|Q1|Q2,...,QM|] + return ( + { + **input, + "x": x_quad, + }, + weight, + label, + ) + + # interior constraint + ide_constraint = ppsci.constraint.InteriorConstraint( + equation["volterra"].equations, + {"volterra": 0}, + geom["timedomain"], + { + "dataset": { + "name": "IterableNamedArrayDataset", + "transforms": ( + { + "FunctionalTransform": { + "transform_func": input_data_quad_transform, + }, + }, + ), + }, + "batch_size": cfg.TRAIN.npoint_interior, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + evenly=True, + name="EQ", + ) + + # initial condition + def u_solution_func(in_): + if isinstance(in_["x"], paddle.Tensor): + return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) + return np.exp(-in_["x"]) * np.cosh(in_["x"]) + + ic = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["timedomain"], + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": cfg.TRAIN.npoint_ic, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + criteria=geom["timedomain"].on_initial, + name="IC", + ) + # wrap constraints together + constraint = { + ide_constraint.name: ide_constraint, + ic.name: ic, + } + + # set optimizer + optimizer = ppsci.optimizer.LBFGS(**cfg.TRAIN.optimizer)(model) + + # set validator + l2rel_validator = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["timedomain"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.EVAL.npoint_eval, + }, + ppsci.loss.L2RelLoss(), + evenly=True, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Validator", + ) + validator = {l2rel_validator.name: l2rel_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + + # visualize prediction after finished training + input_data = geom["timedomain"].uniform_points(100) + label_data = u_solution_func({"x": input_data}) + output_data = solver.predict({"x": input_data}, return_numpy=True)["u"] + + plt.plot(input_data, label_data, "-", label=r"$u(t)$") + plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) + plt.legend() + plt.xlabel(r"$t$") + plt.ylabel(r"$u$") + plt.title(r"$u-t$") + plt.savefig(osp.join(cfg.output_dir, "./Volterra_IDE.png"), dpi=200) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # set output directory + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set geometry + geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} + # set validator + + def u_solution_func(in_) -> np.ndarray: + if isinstance(in_["x"], paddle.Tensor): + return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) + return np.exp(-in_["x"]) * np.cosh(in_["x"]) + + l2rel_validator = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["timedomain"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": cfg.EVAL.npoint_eval, + }, + ppsci.loss.L2RelLoss(), + evenly=True, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="L2Rel_Validator", + ) + validator = {l2rel_validator.name: l2rel_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + geom=geom, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate model + solver.eval() + + # visualize prediction + input_data = geom["timedomain"].uniform_points(cfg.EVAL.npoint_eval) + label_data = u_solution_func({"x": input_data}) + output_data = solver.predict({"x": input_data}, return_numpy=True)["u"] + + plt.plot(input_data, label_data, "-", label=r"$u(t)$") + plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) + plt.legend() + plt.xlabel(r"$t$") + plt.ylabel(r"$u$") + plt.title(r"$u-t$") + plt.savefig(osp.join(cfg.output_dir, "./Volterra_IDE.png"), dpi=200) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 1], "float32", name=key) + for key in cfg.MODEL.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = {"timedomain": ppsci.geometry.TimeDomain(*cfg.BOUNDS)} + + input_data = geom["timedomain"].uniform_points(cfg.EVAL.npoint_eval) + input_dict = {"x": input_data} + + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + def u_solution_func(in_) -> np.ndarray: + if isinstance(in_["x"], paddle.Tensor): + return paddle.exp(-in_["x"]) * paddle.cosh(in_["x"]) + return np.exp(-in_["x"]) * np.cosh(in_["x"]) + + label_data = u_solution_func({"x": input_data}) + output_data = output_dict["u"] + + # save result + plt.plot(input_data, label_data, "-", label=r"$u(t)$") + plt.plot(input_data, output_data, "o", label=r"$\hat{u}(t)$", markersize=4.0) + plt.legend() + plt.xlabel(r"$t$") + plt.ylabel(r"$u$") + plt.title(r"$u-t$") + plt.savefig("./Volterra_IDE_pred.png", dpi=200) + + +@hydra.main(version_base=None, config_path="./conf", config_name="volterra_ide.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/iops/iops.py b/examples/iops/iops.py index e364769a03..32a6374bf8 100644 --- a/examples/iops/iops.py +++ b/examples/iops/iops.py @@ -1,189 +1,189 @@ -import pickle - -import numpy as np -import paddle -import paddle.nn as nn -import paddle.optimizer as optim -import pandas as pd -from sklearn.metrics import classification_report -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder -from sklearn.preprocessing import StandardScaler -from sklearn.utils.class_weight import compute_class_weight - -import ppsci - - -# 数据预处理函数 -def preprocess_data(file_path): - # 读取数据 - df = pd.read_excel(file_path) - - # 编码污染类型 - label_encoder = LabelEncoder() - df["pollution_type"] = label_encoder.fit_transform(df["pollution_type"]) - - # 特征和标签 - X = df[["PM2.5", "PM10", "SO2", "NO2", "CO"]].values - y = df["pollution_type"].values - - # 标准化特征 - scaler = StandardScaler() - X_scaled = scaler.fit_transform(X) - - # 数据集划分 - X_train, X_test, y_train, y_test = train_test_split( - X_scaled, y, test_size=0.2, random_state=42, stratify=y - ) - - return X_train, X_test, y_train, y_test, scaler, label_encoder - - -# 模型训练函数 -def train_model( - X_train, - y_train, - X_test, - y_test, - class_weights, - label_classes, - batch_size=32, - epochs=100, - patience=10, -): - # 转换为 Paddle tensor - X_train_tensor = paddle.to_tensor(X_train, dtype="float32") - y_train_tensor = paddle.to_tensor(y_train, dtype="int64") - X_test_tensor = paddle.to_tensor(X_test, dtype="float32") - y_test_tensor = paddle.to_tensor(y_test, dtype="int64") - - # 使用 MLP 模型 - model = ppsci.arch.MLP( - input_keys=["input"], # 输入键 - output_keys=["output"], # 输出键 - input_dim=5, # 输入特征维度 - output_dim=len(label_classes), # 输出分类数 - num_layers=3, # 网络层数 - hidden_size=64, # 隐藏层单元数量 - activation="ReLU", # 激活函数 - ) - - # 定义损失函数和优化器 - class_weights_tensor = paddle.to_tensor(class_weights, dtype="float32") - loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor) - scheduler = paddle.optimizer.lr.StepDecay( - learning_rate=0.001, step_size=50, gamma=0.5 - ) - optimizer = optim.Adam(parameters=model.parameters(), learning_rate=scheduler) - - # 早停参数 - best_val_loss = float("inf") - early_stop_count = 0 - - for epoch in range(epochs): - model.train() - indices = np.random.permutation(len(X_train_tensor)) - X_train_tensor = X_train_tensor[indices] - y_train_tensor = y_train_tensor[indices] - - # 批量训练 - for i in range(0, len(X_train_tensor), batch_size): - X_batch = X_train_tensor[i : i + batch_size] - y_batch = y_train_tensor[i : i + batch_size] - - # 前向传播 - logits = model({"input": X_batch})["output"] - loss = loss_fn(logits, y_batch) - - # 反向传播和优化 - loss.backward() - optimizer.step() - optimizer.clear_gradients() - - # 验证阶段 - model.eval() - with paddle.no_grad(): - val_logits = model({"input": X_test_tensor})["output"] - val_loss = loss_fn(val_logits, y_test_tensor).numpy() - val_predictions = paddle.argmax(val_logits, axis=1).numpy() - val_accuracy = np.mean(val_predictions == y_test) - - print( - f"Epoch [{epoch+1}/{epochs}], Train Loss: {loss.numpy():.4f}, Val Loss: {val_loss:.4f}, Val Accuracy: {val_accuracy * 100:.2f}%" - ) - - # 早停机制 - if val_loss < best_val_loss: - best_val_loss = val_loss - early_stop_count = 0 - paddle.save(model.state_dict(), "best_model.pdparams") - else: - early_stop_count += 1 - if early_stop_count > patience: - print("早停机制触发,停止训练") - break - - return model - - -# 模型评估和保存函数 -def evaluate_and_save_model(model, X_test, y_test, label_encoder, scaler): - # 测试集评估 - model.eval() - with paddle.no_grad(): - X_test_tensor = paddle.to_tensor(X_test, dtype="float32") - test_logits = model({"input": X_test_tensor})["output"] - test_predictions = paddle.argmax(test_logits, axis=1).numpy() - - # 打印分类报告 - print( - classification_report( - y_test, test_predictions, target_names=label_encoder.classes_ - ) - ) - - # 保存模型和处理器 - paddle.save(model.state_dict(), "pollution_model.pdparams") - with open("scaler.pkl", "wb") as f: - pickle.dump(scaler, f) - with open("label_encoder.pkl", "wb") as f: - pickle.dump(label_encoder, f) - - -# 模型推理函数 -def predict_pollution(sample, model, scaler, label_encoder): - sample_scaled = scaler.transform(sample) - sample_tensor = paddle.to_tensor(sample_scaled, dtype="float32") - - with paddle.no_grad(): - prediction = model({"input": sample_tensor})["output"] - predicted_class = paddle.argmax(prediction, axis=1).numpy()[0] - - predicted_label = label_encoder.inverse_transform([predicted_class])[0] - return predicted_label - - -# 主程序 -if __name__ == "__main__": - # 数据预处理 - X_train, X_test, y_train, y_test, scaler, label_encoder = preprocess_data( - "./trainData.xlsx" - ) - - # 计算类别权重 - class_weights = compute_class_weight( - "balanced", classes=np.unique(y_train), y=y_train - ) - - # 模型训练 - model = train_model( - X_train, y_train, X_test, y_test, class_weights, label_encoder.classes_ - ) - - # 模型评估和保存 - evaluate_and_save_model(model, X_test, y_test, label_encoder, scaler) - - # 加载测试样本 - test_sample = [[28, 52, 3, 46, 0.5]] # 样本数据 - predicted_label = predict_pollution(test_sample, model, scaler, label_encoder) - print(f"预测的污染类型为: {predicted_label}") +import pickle + +import numpy as np +import paddle +import paddle.nn as nn +import paddle.optimizer as optim +import pandas as pd +from sklearn.metrics import classification_report +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import LabelEncoder +from sklearn.preprocessing import StandardScaler +from sklearn.utils.class_weight import compute_class_weight + +import ppsci + + +# 数据预处理函数 +def preprocess_data(file_path): + # 读取数据 + df = pd.read_excel(file_path) + + # 编码污染类型 + label_encoder = LabelEncoder() + df["pollution_type"] = label_encoder.fit_transform(df["pollution_type"]) + + # 特征和标签 + X = df[["PM2.5", "PM10", "SO2", "NO2", "CO"]].values + y = df["pollution_type"].values + + # 标准化特征 + scaler = StandardScaler() + X_scaled = scaler.fit_transform(X) + + # 数据集划分 + X_train, X_test, y_train, y_test = train_test_split( + X_scaled, y, test_size=0.2, random_state=42, stratify=y + ) + + return X_train, X_test, y_train, y_test, scaler, label_encoder + + +# 模型训练函数 +def train_model( + X_train, + y_train, + X_test, + y_test, + class_weights, + label_classes, + batch_size=32, + epochs=100, + patience=10, +): + # 转换为 Paddle tensor + X_train_tensor = paddle.to_tensor(X_train, dtype="float32") + y_train_tensor = paddle.to_tensor(y_train, dtype="int64") + X_test_tensor = paddle.to_tensor(X_test, dtype="float32") + y_test_tensor = paddle.to_tensor(y_test, dtype="int64") + + # 使用 MLP 模型 + model = ppsci.arch.MLP( + input_keys=["input"], # 输入键 + output_keys=["output"], # 输出键 + input_dim=5, # 输入特征维度 + output_dim=len(label_classes), # 输出分类数 + num_layers=3, # 网络层数 + hidden_size=64, # 隐藏层单元数量 + activation="ReLU", # 激活函数 + ) + + # 定义损失函数和优化器 + class_weights_tensor = paddle.to_tensor(class_weights, dtype="float32") + loss_fn = nn.CrossEntropyLoss(weight=class_weights_tensor) + scheduler = paddle.optimizer.lr.StepDecay( + learning_rate=0.001, step_size=50, gamma=0.5 + ) + optimizer = optim.Adam(parameters=model.parameters(), learning_rate=scheduler) + + # 早停参数 + best_val_loss = float("inf") + early_stop_count = 0 + + for epoch in range(epochs): + model.train() + indices = np.random.permutation(len(X_train_tensor)) + X_train_tensor = X_train_tensor[indices] + y_train_tensor = y_train_tensor[indices] + + # 批量训练 + for i in range(0, len(X_train_tensor), batch_size): + X_batch = X_train_tensor[i : i + batch_size] + y_batch = y_train_tensor[i : i + batch_size] + + # 前向传播 + logits = model({"input": X_batch})["output"] + loss = loss_fn(logits, y_batch) + + # 反向传播和优化 + loss.backward() + optimizer.step() + optimizer.clear_gradients() + + # 验证阶段 + model.eval() + with paddle.no_grad(): + val_logits = model({"input": X_test_tensor})["output"] + val_loss = loss_fn(val_logits, y_test_tensor).numpy() + val_predictions = paddle.argmax(val_logits, axis=1).numpy() + val_accuracy = np.mean(val_predictions == y_test) + + print( + f"Epoch [{epoch+1}/{epochs}], Train Loss: {loss.numpy():.4f}, Val Loss: {val_loss:.4f}, Val Accuracy: {val_accuracy * 100:.2f}%" + ) + + # 早停机制 + if val_loss < best_val_loss: + best_val_loss = val_loss + early_stop_count = 0 + paddle.save(model.state_dict(), "best_model.pdparams") + else: + early_stop_count += 1 + if early_stop_count > patience: + print("早停机制触发,停止训练") + break + + return model + + +# 模型评估和保存函数 +def evaluate_and_save_model(model, X_test, y_test, label_encoder, scaler): + # 测试集评估 + model.eval() + with paddle.no_grad(): + X_test_tensor = paddle.to_tensor(X_test, dtype="float32") + test_logits = model({"input": X_test_tensor})["output"] + test_predictions = paddle.argmax(test_logits, axis=1).numpy() + + # 打印分类报告 + print( + classification_report( + y_test, test_predictions, target_names=label_encoder.classes_ + ) + ) + + # 保存模型和处理器 + paddle.save(model.state_dict(), "pollution_model.pdparams") + with open("scaler.pkl", "wb") as f: + pickle.dump(scaler, f) + with open("label_encoder.pkl", "wb") as f: + pickle.dump(label_encoder, f) + + +# 模型推理函数 +def predict_pollution(sample, model, scaler, label_encoder): + sample_scaled = scaler.transform(sample) + sample_tensor = paddle.to_tensor(sample_scaled, dtype="float32") + + with paddle.no_grad(): + prediction = model({"input": sample_tensor})["output"] + predicted_class = paddle.argmax(prediction, axis=1).numpy()[0] + + predicted_label = label_encoder.inverse_transform([predicted_class])[0] + return predicted_label + + +# 主程序 +if __name__ == "__main__": + # 数据预处理 + X_train, X_test, y_train, y_test, scaler, label_encoder = preprocess_data( + "./trainData.xlsx" + ) + + # 计算类别权重 + class_weights = compute_class_weight( + "balanced", classes=np.unique(y_train), y=y_train + ) + + # 模型训练 + model = train_model( + X_train, y_train, X_test, y_test, class_weights, label_encoder.classes_ + ) + + # 模型评估和保存 + evaluate_and_save_model(model, X_test, y_test, label_encoder, scaler) + + # 加载测试样本 + test_sample = [[28, 52, 3, 46, 0.5]] # 样本数据 + predicted_label = predict_pollution(test_sample, model, scaler, label_encoder) + print(f"预测的污染类型为: {predicted_label}") diff --git a/examples/laplace/conf/laplace2d.yaml b/examples/laplace/conf/laplace2d.yaml index 20591a6f36..1b88915616 100644 --- a/examples/laplace/conf/laplace2d.yaml +++ b/examples/laplace/conf/laplace2d.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -72,3 +73,78 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 64 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_laplace2d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2024 +log_freq: 20 +output_dir: ${hydra:run.dir} +NPOINT_INTERIOR: 9801 +NPOINT_BC: 400 + +# set geometry +DIAGONAL_COORD: + xmin: [0.0, 0.0] + xmax: [1.0, 1.0] + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u"] + num_layers: 5 + hidden_size: 20 + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 200 + learning_rate: 0.001 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/laplace2d/laplace2d_pretrained.pdparams + export_path: ./inference/laplace2d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 64 +>>>>>>> Stashed changes diff --git a/examples/laplace/laplace2d.py b/examples/laplace/laplace2d.py index 39d6f959a0..ae8d9f7e07 100644 --- a/examples/laplace/laplace2d.py +++ b/examples/laplace/laplace2d.py @@ -1,262 +1,262 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"laplace": ppsci.equation.Laplace(dim=2)} - - # set geometry - geom = { - "rect": ppsci.geometry.Rectangle( - cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax - ) - } - - # compute ground truth function - def u_solution_func(out): - """compute ground truth for u as label data""" - x, y = out["x"], out["y"] - return np.cos(x) * np.cosh(y) - - # set train dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC - - # set constraint - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["laplace"].equations, - {"laplace": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_TOTAL}, - ppsci.loss.MSELoss("sum"), - evenly=True, - name="EQ", - ) - bc = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": cfg.NPOINT_BC}, - ppsci.loss.MSELoss("sum"), - name="BC", - ) - # wrap constraints together - constraint = { - pde_constraint.name: pde_constraint, - bc.name: bc, - } - - # set optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) - - # set validator - mse_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["rect"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": NPOINT_TOTAL, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="MSE_Metric", - ) - validator = {mse_metric.name: mse_metric} - - # set visualizer(optional) - vis_points = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) - visualizer = { - "visualize_u": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"]}, - num_timestamps=1, - prefix="result_u", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"laplace": ppsci.equation.Laplace(dim=2)} - - # set geometry - geom = { - "rect": ppsci.geometry.Rectangle( - cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax - ) - } - - # compute ground truth function - def u_solution_func(out): - """compute ground truth for u as label data""" - x, y = out["x"], out["y"] - return np.cos(x) * np.cosh(y) - - NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC - - # set validator - mse_metric = ppsci.validate.GeometryValidator( - {"u": lambda out: out["u"]}, - {"u": u_solution_func}, - geom["rect"], - { - "dataset": "IterableNamedArrayDataset", - "total_size": NPOINT_TOTAL, - }, - ppsci.loss.MSELoss(), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="MSE_Metric", - ) - validator = {mse_metric.name: mse_metric} - - # set visualizer(optional) - vis_points = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) - visualizer = { - "visualize_u": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"]}, - num_timestamps=1, - prefix="result_u", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - seed=cfg.seed, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - geom = { - "rect": ppsci.geometry.Rectangle( - cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax - ) - } - NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC - input_dict = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) - - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - # save result - ppsci.visualize.save_vtu_from_dict( - "./laplace2d_pred.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="laplace2d.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"laplace": ppsci.equation.Laplace(dim=2)} + + # set geometry + geom = { + "rect": ppsci.geometry.Rectangle( + cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax + ) + } + + # compute ground truth function + def u_solution_func(out): + """compute ground truth for u as label data""" + x, y = out["x"], out["y"] + return np.cos(x) * np.cosh(y) + + # set train dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC + + # set constraint + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["laplace"].equations, + {"laplace": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_TOTAL}, + ppsci.loss.MSELoss("sum"), + evenly=True, + name="EQ", + ) + bc = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": cfg.NPOINT_BC}, + ppsci.loss.MSELoss("sum"), + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) + + # set validator + mse_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["rect"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": NPOINT_TOTAL, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="MSE_Metric", + ) + validator = {mse_metric.name: mse_metric} + + # set visualizer(optional) + vis_points = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) + visualizer = { + "visualize_u": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"]}, + num_timestamps=1, + prefix="result_u", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"laplace": ppsci.equation.Laplace(dim=2)} + + # set geometry + geom = { + "rect": ppsci.geometry.Rectangle( + cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax + ) + } + + # compute ground truth function + def u_solution_func(out): + """compute ground truth for u as label data""" + x, y = out["x"], out["y"] + return np.cos(x) * np.cosh(y) + + NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC + + # set validator + mse_metric = ppsci.validate.GeometryValidator( + {"u": lambda out: out["u"]}, + {"u": u_solution_func}, + geom["rect"], + { + "dataset": "IterableNamedArrayDataset", + "total_size": NPOINT_TOTAL, + }, + ppsci.loss.MSELoss(), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="MSE_Metric", + ) + validator = {mse_metric.name: mse_metric} + + # set visualizer(optional) + vis_points = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) + visualizer = { + "visualize_u": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"]}, + num_timestamps=1, + prefix="result_u", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + seed=cfg.seed, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = { + "rect": ppsci.geometry.Rectangle( + cfg.DIAGONAL_COORD.xmin, cfg.DIAGONAL_COORD.xmax + ) + } + NPOINT_TOTAL = cfg.NPOINT_INTERIOR + cfg.NPOINT_BC + input_dict = geom["rect"].sample_interior(NPOINT_TOTAL, evenly=True) + + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + # save result + ppsci.visualize.save_vtu_from_dict( + "./laplace2d_pred.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="laplace2d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/ldc/conf/ldc2d_steady_Re10.yaml b/examples/ldc/conf/ldc2d_steady_Re10.yaml index 261e51bc54..e6198def2f 100644 --- a/examples/ldc/conf/ldc2d_steady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_steady_Re10.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -76,3 +77,81 @@ INFER: max_batch_size: 8192 num_cpu_threads: 10 batch_size: 8192 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_ldc2d_steady_Re10/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.01 +RHO: 1.0 + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 9 + hidden_size: 50 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 200 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + weight: + pde: {"continuity": 0.0001,"momentum_x": 0.0001,"momentum_y": 0.0001} + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: + residual_validator: 8192 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc2d_steady_Re10/ldc2d_steady_Re10_pretrained.pdparams + export_path: ./inference/ldc2d_steady_Re10 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 8192 + num_cpu_threads: 10 + batch_size: 8192 +>>>>>>> Stashed changes diff --git a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml index 957e2b790f..befda9e711 100644 --- a/examples/ldc/conf/ldc2d_unsteady_Re10.yaml +++ b/examples/ldc/conf/ldc2d_unsteady_Re10.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -77,3 +78,82 @@ INFER: max_batch_size: 8192 num_cpu_threads: 10 batch_size: 8192 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: output_ldc2d_unsteady_Re10/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU: 0.01 +RHO: 1.0 +NTIME_ALL: 16 + +# model settings +MODEL: + input_keys: ["t", "x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 9 + hidden_size: 50 + activation: "tanh" + +# training settings +TRAIN: + epochs: 20000 + iters_per_epoch: 1 + eval_during_train: true + eval_freq: 200 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 0.001 + weight: + pde: {"continuity": 0.0001,"momentum_x": 0.0001,"momentum_y": 0.0001} + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: + residual_validator: 8192 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc2d_unsteady_Re10/ldc2d_unsteady_Re10_pretrained.pdparams + export_path: ./inference/ldc2d_unsteady_Re10 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 8192 + num_cpu_threads: 10 + batch_size: 8192 +>>>>>>> Stashed changes diff --git a/examples/ldc/conf/ldc_2d_Re1000_plain.yaml b/examples/ldc/conf/ldc_2d_Re1000_plain.yaml index fc9c45c1be..975fa02e19 100644 --- a/examples/ldc/conf/ldc_2d_Re1000_plain.yaml +++ b/examples/ldc/conf/ldc_2d_Re1000_plain.yaml @@ -1,88 +1,88 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ldc_2d_Re1000_plain/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 -use_tbd: false - -# working conditions -Re: [100, 400, 1000] -epochs: [20, 40, 140] -EVAL_DATA_PATH: ./data/ldc_Re1000.mat - -# model settings -MODEL: - input_keys: ["x", "y"] - output_keys: ["u", "v", "p"] - num_layers: 4 - hidden_size: 256 - activation: tanh - -# training settings -TRAIN: - epochs: 20 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${sum:${epochs}} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 2000 - by_epoch: false - batch_size: - pde: 1024 - bc: 256 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ldc_2d_re3200 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ldc_2d_Re1000_plain/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: false + +# working conditions +Re: [100, 400, 1000] +epochs: [20, 40, 140] +EVAL_DATA_PATH: ./data/ldc_Re1000.mat + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 4 + hidden_size: 256 + activation: tanh + +# training settings +TRAIN: + epochs: 20 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${sum:${epochs}} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 2000 + by_epoch: false + batch_size: + pde: 1024 + bc: 256 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ldc_2d_re3200 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/ldc/conf/ldc_2d_Re3200_piratenet.yaml b/examples/ldc/conf/ldc_2d_Re3200_piratenet.yaml index a5c38aa55f..9d92403f5e 100644 --- a/examples/ldc/conf/ldc_2d_Re3200_piratenet.yaml +++ b/examples/ldc/conf/ldc_2d_Re3200_piratenet.yaml @@ -1,99 +1,99 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ldc_2d_Re3200_piratenet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 -use_tbd: false - -# working conditions -Re: [100, 400, 1000, 1600, 3200] -epochs: [10, 20, 50, 50, 500] -EVAL_DATA_PATH: ./data/ldc_Re3200.mat - -# model settings -MODEL: - input_keys: ["x", "y"] - output_keys: ["u", "v", "p"] - num_blocks: 4 - hidden_size: 256 - activation: tanh - fourier: - scale: 15.0 - dim: 256 - random_weight: - mean: 1.0 - std: 0.1 - -# training settings -TRAIN: - epochs: 10 - iters_per_epoch: 1000 - save_freq: 100 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${sum:${epochs}} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 10000 - warmup_epoch: 5 - by_epoch: false - batch_size: - pde: 4096 - bc: 256 - pretrained_model_path: null - checkpoint_path: null - grad_norm: - update_freq: 1000 - momentum: 0.9 - init_weights: [10, 1, 1, 100, 100] - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc/ldc_re3200_piratenet_pretrained.pdparams - export_path: ./inference/ldc_2d_re3200 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ldc_2d_Re3200_piratenet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: false + +# working conditions +Re: [100, 400, 1000, 1600, 3200] +epochs: [10, 20, 50, 50, 500] +EVAL_DATA_PATH: ./data/ldc_Re3200.mat + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_blocks: 4 + hidden_size: 256 + activation: tanh + fourier: + scale: 15.0 + dim: 256 + random_weight: + mean: 1.0 + std: 0.1 + +# training settings +TRAIN: + epochs: 10 + iters_per_epoch: 1000 + save_freq: 100 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${sum:${epochs}} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 10000 + warmup_epoch: 5 + by_epoch: false + batch_size: + pde: 4096 + bc: 256 + pretrained_model_path: null + checkpoint_path: null + grad_norm: + update_freq: 1000 + momentum: 0.9 + init_weights: [10, 1, 1, 100, 100] + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc/ldc_re3200_piratenet_pretrained.pdparams + export_path: ./inference/ldc_2d_re3200 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/ldc/conf/ldc_2d_Re3200_sota.yaml b/examples/ldc/conf/ldc_2d_Re3200_sota.yaml index 36afb2cd77..9538d534fc 100644 --- a/examples/ldc/conf/ldc_2d_Re3200_sota.yaml +++ b/examples/ldc/conf/ldc_2d_Re3200_sota.yaml @@ -1,97 +1,97 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ldc_2d_Re3200_sota/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 -use_tbd: false - -# working conditions -Re: [100, 400, 1000, 3200] -epochs: [50, 50, 100, 500] -EVAL_DATA_PATH: ./data/ldc_Re1000.mat - -# model settings -MODEL: - input_keys: ["x", "y"] - output_keys: ["u", "v", "p"] - num_layers: 5 - hidden_size: 256 - activation: tanh - fourier: - scale: 10.0 - dim: 128 - random_weight: - mean: 1.0 - std: 0.1 - -# training settings -TRAIN: - epochs: 50 - iters_per_epoch: 1000 - save_freq: 100 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${sum:${epochs}} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 10000 - by_epoch: false - batch_size: - pde: 8192 - bc: 256 - pretrained_model_path: null - checkpoint_path: null - grad_norm: - update_freq: 1000 - momentum: 0.9 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 4096 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc/ldc_re1000_sota_pretrained.pdparams - export_path: ./inference/ldc_2d_re1000 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ldc_2d_Re3200_sota/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 +use_tbd: false + +# working conditions +Re: [100, 400, 1000, 3200] +epochs: [50, 50, 100, 500] +EVAL_DATA_PATH: ./data/ldc_Re1000.mat + +# model settings +MODEL: + input_keys: ["x", "y"] + output_keys: ["u", "v", "p"] + num_layers: 5 + hidden_size: 256 + activation: tanh + fourier: + scale: 10.0 + dim: 128 + random_weight: + mean: 1.0 + std: 0.1 + +# training settings +TRAIN: + epochs: 50 + iters_per_epoch: 1000 + save_freq: 100 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${sum:${epochs}} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 10000 + by_epoch: false + batch_size: + pde: 8192 + bc: 256 + pretrained_model_path: null + checkpoint_path: null + grad_norm: + update_freq: 1000 + momentum: 0.9 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 4096 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/ldc/ldc_re1000_sota_pretrained.pdparams + export_path: ./inference/ldc_2d_re1000 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/ldc/ldc2d_steady_Re10.py b/examples/ldc/ldc2d_steady_Re10.py index 3c65079326..01b97e6887 100644 --- a/examples/ldc/ldc2d_steady_Re10.py +++ b/examples/ldc/ldc2d_steady_Re10.py @@ -1,307 +1,307 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from os import path as osp - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, False)} - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} - - # set dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - NPOINT_PDE = 99**2 - NPOINT_TOP = 101 - NPOINT_BOTTOM = 101 - NPOINT_LEFT = 99 - NPOINT_RIGHT = 99 - - # set constraint - pde = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, - ppsci.loss.MSELoss("sum"), - evenly=True, - weight_dict=cfg.TRAIN.weight.pde, - name="EQ", - ) - bc_top = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 1, "v": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_TOP}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y: np.isclose(y, 0.05), - name="BC_top", - ) - bc_bottom = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_BOTTOM}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y: np.isclose(y, -0.05), - name="BC_bottom", - ) - bc_left = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_LEFT}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y: np.isclose(x, -0.05), - name="BC_left", - ) - bc_right = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT}, - ppsci.loss.MSELoss("sum"), - criteria=lambda x, y: np.isclose(x, 0.05), - name="BC_right", - ) - # wrap constraints together - constraint = { - pde.name: pde, - bc_top.name: bc_top, - bc_bottom.name: bc_bottom, - bc_left.name: bc_left, - bc_right.name: bc_right, - } - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( - **cfg.TRAIN.lr_scheduler, - warmup_epoch=int(0.05 * cfg.TRAIN.epochs), - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - NPOINT_EVAL = NPOINT_PDE - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - # manually collate input data for visualization, - NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT - vis_points = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) - visualizer = { - "visualize_u_v": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - prefix="result_u_v", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, False)} - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} - - NPOINT_PDE = 99**2 - NPOINT_TOP = 101 - NPOINT_BOTTOM = 101 - NPOINT_LEFT = 99 - NPOINT_RIGHT = 99 - - # set validator - NPOINT_EVAL = NPOINT_PDE - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - # manually collate input data for visualization, - NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT - vis_points = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) - visualizer = { - "visualize_u_v": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - prefix="result_u_v", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction for pretrained model(optional) - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set geometry - geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} - # manually collate input data for inference - NPOINT_PDE = 99**2 - NPOINT_TOP = 101 - NPOINT_BOTTOM = 101 - NPOINT_LEFT = 99 - NPOINT_RIGHT = 99 - NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT - input_dict = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - ppsci.visualize.save_vtu_from_dict( - "./ldc2d_steady_Re10.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="ldc2d_steady_Re10.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from os import path as osp + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, False)} + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} + + # set dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + NPOINT_PDE = 99**2 + NPOINT_TOP = 101 + NPOINT_BOTTOM = 101 + NPOINT_LEFT = 99 + NPOINT_RIGHT = 99 + + # set constraint + pde = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, + ppsci.loss.MSELoss("sum"), + evenly=True, + weight_dict=cfg.TRAIN.weight.pde, + name="EQ", + ) + bc_top = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 1, "v": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_TOP}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y: np.isclose(y, 0.05), + name="BC_top", + ) + bc_bottom = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_BOTTOM}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y: np.isclose(y, -0.05), + name="BC_bottom", + ) + bc_left = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_LEFT}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y: np.isclose(x, -0.05), + name="BC_left", + ) + bc_right = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT}, + ppsci.loss.MSELoss("sum"), + criteria=lambda x, y: np.isclose(x, 0.05), + name="BC_right", + ) + # wrap constraints together + constraint = { + pde.name: pde, + bc_top.name: bc_top, + bc_bottom.name: bc_bottom, + bc_left.name: bc_left, + bc_right.name: bc_right, + } + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( + **cfg.TRAIN.lr_scheduler, + warmup_epoch=int(0.05 * cfg.TRAIN.epochs), + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + NPOINT_EVAL = NPOINT_PDE + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + # manually collate input data for visualization, + NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT + vis_points = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) + visualizer = { + "visualize_u_v": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + prefix="result_u_v", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, False)} + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} + + NPOINT_PDE = 99**2 + NPOINT_TOP = 101 + NPOINT_BOTTOM = 101 + NPOINT_LEFT = 99 + NPOINT_RIGHT = 99 + + # set validator + NPOINT_EVAL = NPOINT_PDE + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + # manually collate input data for visualization, + NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT + vis_points = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) + visualizer = { + "visualize_u_v": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + prefix="result_u_v", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction for pretrained model(optional) + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set geometry + geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} + # manually collate input data for inference + NPOINT_PDE = 99**2 + NPOINT_TOP = 101 + NPOINT_BOTTOM = 101 + NPOINT_LEFT = 99 + NPOINT_RIGHT = 99 + NPOINT_BC = NPOINT_TOP + NPOINT_BOTTOM + NPOINT_LEFT + NPOINT_RIGHT + input_dict = geom["rect"].sample_interior(NPOINT_PDE + NPOINT_BC, evenly=True) + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + ppsci.visualize.save_vtu_from_dict( + "./ldc2d_steady_Re10.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ldc2d_steady_Re10.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/ldc/ldc2d_unsteady_Re10.py b/examples/ldc/ldc2d_unsteady_Re10.py index aeb88868c4..3601c7f4d6 100644 --- a/examples/ldc/ldc2d_unsteady_Re10.py +++ b/examples/ldc/ldc2d_unsteady_Re10.py @@ -1,409 +1,409 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from os import path as osp - -import hydra -import numpy as np -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, True)} - - # set timestamps(including initial t0) - timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), - ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), - ) - } - - # set dataloader config - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - } - - # pde/bc constraint use t1~tn, initial constraint use t0 - NPOINT_PDE, NTIME_PDE = 99**2, cfg.NTIME_ALL - 1 - NPOINT_TOP, NTIME_TOP = 101, cfg.NTIME_ALL - 1 - NPOINT_DOWN, NTIME_DOWN = 101, cfg.NTIME_ALL - 1 - NPOINT_LEFT, NTIME_LEFT = 99, cfg.NTIME_ALL - 1 - NPOINT_RIGHT, NTIME_RIGHT = 99, cfg.NTIME_ALL - 1 - NPOINT_IC, NTIME_IC = 99**2, 1 - - # set constraint - pde = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_PDE * NTIME_PDE}, - ppsci.loss.MSELoss("sum"), - evenly=True, - weight_dict=cfg.TRAIN.weight.pde, # (1) - name="EQ", - ) - bc_top = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 1, "v": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_TOP * NTIME_TOP}, - ppsci.loss.MSELoss("sum"), - criteria=lambda t, x, y: np.isclose(y, 0.05), - name="BC_top", - ) - bc_down = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_DOWN * NTIME_DOWN}, - ppsci.loss.MSELoss("sum"), - criteria=lambda t, x, y: np.isclose(y, -0.05), - name="BC_down", - ) - bc_left = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_LEFT * NTIME_LEFT}, - ppsci.loss.MSELoss("sum"), - criteria=lambda t, x, y: np.isclose(x, -0.05), - name="BC_left", - ) - bc_right = ppsci.constraint.BoundaryConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT * NTIME_RIGHT}, - ppsci.loss.MSELoss("sum"), - criteria=lambda t, x, y: np.isclose(x, 0.05), - name="BC_right", - ) - ic = ppsci.constraint.InitialConstraint( - {"u": lambda out: out["u"], "v": lambda out: out["v"]}, - {"u": 0, "v": 0}, - geom["time_rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_IC * NTIME_IC}, - ppsci.loss.MSELoss("sum"), - evenly=True, - name="IC", - ) - # wrap constraints together - constraint = { - pde.name: pde, - bc_top.name: bc_top, - bc_down.name: bc_down, - bc_left.name: bc_left, - bc_right.name: bc_right, - ic.name: ic, - } - - # set training hyper-parameters - lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( - **cfg.TRAIN.lr_scheduler, - warmup_epoch=int(0.05 * cfg.TRAIN.epochs), - )() - - # set optimizer - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - # set validator - NPOINT_EVAL = NPOINT_PDE * cfg.NTIME_ALL - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, - geom["time_rect"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT - vis_initial_points = geom["time_rect"].sample_initial_interior( - (NPOINT_IC + NPOINT_BC), evenly=True - ) - vis_pde_points = geom["time_rect"].sample_interior( - (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True - ) - vis_points = vis_initial_points - # manually collate input data for visualization, - # (interior+boundary) x all timestamps - for t in range(NTIME_PDE): - for key in geom["time_rect"].dim_keys: - vis_points[key] = np.concatenate( - ( - vis_points[key], - vis_pde_points[key][ - t - * (NPOINT_PDE + NPOINT_BC) : (t + 1) - * (NPOINT_PDE + NPOINT_BC) - ], - ) - ) - - visualizer = { - "visualize_u_v": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - num_timestamps=cfg.NTIME_ALL, - prefix="result_u_v", - ) - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, True)} - - # set timestamps(including initial t0) - timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), - ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), - ) - } - - # pde/bc constraint use t1~tn, initial constraint use t0 - NPOINT_PDE = 99**2 - NPOINT_TOP = 101 - NPOINT_DOWN = 101 - NPOINT_LEFT = 99 - NPOINT_RIGHT = 99 - NPOINT_IC = 99**2 - NTIME_PDE = cfg.NTIME_ALL - 1 - - # set validator - NPOINT_EVAL = NPOINT_PDE * cfg.NTIME_ALL - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, - geom["time_rect"], - { - "dataset": "NamedArrayDataset", - "total_size": NPOINT_EVAL, - "batch_size": cfg.EVAL.batch_size.residual_validator, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE()}, - with_initial=True, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - # set visualizer(optional) - NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT - vis_initial_points = geom["time_rect"].sample_initial_interior( - (NPOINT_IC + NPOINT_BC), evenly=True - ) - vis_pde_points = geom["time_rect"].sample_interior( - (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True - ) - vis_points = vis_initial_points - # manually collate input data for visualization, - # (interior+boundary) x all timestamps - for t in range(NTIME_PDE): - for key in geom["time_rect"].dim_keys: - vis_points[key] = np.concatenate( - ( - vis_points[key], - vis_pde_points[key][ - t - * (NPOINT_PDE + NPOINT_BC) : (t + 1) - * (NPOINT_PDE + NPOINT_BC) - ], - ) - ) - - visualizer = { - "visualize_u_v": ppsci.visualize.VisualizerVtu( - vis_points, - {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, - num_timestamps=cfg.NTIME_ALL, - prefix="result_u_v", - ) - } - - # directly evaluate pretrained model(optional) - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - equation=equation, - geom=geom, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction for pretrained model(optional) - solver.visualize() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # set timestamps(including initial t0) - timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) - # set time-geometry - geom = { - "time_rect": ppsci.geometry.TimeXGeometry( - ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), - ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), - ) - } - # manually collate input data for inference - NPOINT_PDE = 99**2 - NPOINT_TOP = 101 - NPOINT_DOWN = 101 - NPOINT_LEFT = 99 - NPOINT_RIGHT = 99 - NPOINT_IC = 99**2 - NTIME_PDE = cfg.NTIME_ALL - 1 - NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT - input_dict = geom["time_rect"].sample_initial_interior( - (NPOINT_IC + NPOINT_BC), evenly=True - ) - input_pde_dict = geom["time_rect"].sample_interior( - (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True - ) - # (interior+boundary) x all timestamps - for t in range(NTIME_PDE): - for key in geom["time_rect"].dim_keys: - input_dict[key] = np.concatenate( - ( - input_dict[key], - input_pde_dict[key][ - t - * (NPOINT_PDE + NPOINT_BC) : (t + 1) - * (NPOINT_PDE + NPOINT_BC) - ], - ) - ) - output_dict = predictor.predict( - {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size - ) - - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - ppsci.visualize.save_vtu_from_dict( - "./ldc2d_unsteady_Re10_pred.vtu", - {**input_dict, **output_dict}, - input_dict.keys(), - cfg.MODEL.output_keys, - cfg.NTIME_ALL, - ) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="ldc2d_unsteady_Re10.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from os import path as osp + +import hydra +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, True)} + + # set timestamps(including initial t0) + timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), + ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), + ) + } + + # set dataloader config + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + } + + # pde/bc constraint use t1~tn, initial constraint use t0 + NPOINT_PDE, NTIME_PDE = 99**2, cfg.NTIME_ALL - 1 + NPOINT_TOP, NTIME_TOP = 101, cfg.NTIME_ALL - 1 + NPOINT_DOWN, NTIME_DOWN = 101, cfg.NTIME_ALL - 1 + NPOINT_LEFT, NTIME_LEFT = 99, cfg.NTIME_ALL - 1 + NPOINT_RIGHT, NTIME_RIGHT = 99, cfg.NTIME_ALL - 1 + NPOINT_IC, NTIME_IC = 99**2, 1 + + # set constraint + pde = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_PDE * NTIME_PDE}, + ppsci.loss.MSELoss("sum"), + evenly=True, + weight_dict=cfg.TRAIN.weight.pde, # (1) + name="EQ", + ) + bc_top = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 1, "v": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_TOP * NTIME_TOP}, + ppsci.loss.MSELoss("sum"), + criteria=lambda t, x, y: np.isclose(y, 0.05), + name="BC_top", + ) + bc_down = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_DOWN * NTIME_DOWN}, + ppsci.loss.MSELoss("sum"), + criteria=lambda t, x, y: np.isclose(y, -0.05), + name="BC_down", + ) + bc_left = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_LEFT * NTIME_LEFT}, + ppsci.loss.MSELoss("sum"), + criteria=lambda t, x, y: np.isclose(x, -0.05), + name="BC_left", + ) + bc_right = ppsci.constraint.BoundaryConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_RIGHT * NTIME_RIGHT}, + ppsci.loss.MSELoss("sum"), + criteria=lambda t, x, y: np.isclose(x, 0.05), + name="BC_right", + ) + ic = ppsci.constraint.InitialConstraint( + {"u": lambda out: out["u"], "v": lambda out: out["v"]}, + {"u": 0, "v": 0}, + geom["time_rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_IC * NTIME_IC}, + ppsci.loss.MSELoss("sum"), + evenly=True, + name="IC", + ) + # wrap constraints together + constraint = { + pde.name: pde, + bc_top.name: bc_top, + bc_down.name: bc_down, + bc_left.name: bc_left, + bc_right.name: bc_right, + ic.name: ic, + } + + # set training hyper-parameters + lr_scheduler = ppsci.optimizer.lr_scheduler.Cosine( + **cfg.TRAIN.lr_scheduler, + warmup_epoch=int(0.05 * cfg.TRAIN.epochs), + )() + + # set optimizer + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # set validator + NPOINT_EVAL = NPOINT_PDE * cfg.NTIME_ALL + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, + geom["time_rect"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT + vis_initial_points = geom["time_rect"].sample_initial_interior( + (NPOINT_IC + NPOINT_BC), evenly=True + ) + vis_pde_points = geom["time_rect"].sample_interior( + (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True + ) + vis_points = vis_initial_points + # manually collate input data for visualization, + # (interior+boundary) x all timestamps + for t in range(NTIME_PDE): + for key in geom["time_rect"].dim_keys: + vis_points[key] = np.concatenate( + ( + vis_points[key], + vis_pde_points[key][ + t + * (NPOINT_PDE + NPOINT_BC) : (t + 1) + * (NPOINT_PDE + NPOINT_BC) + ], + ) + ) + + visualizer = { + "visualize_u_v": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + num_timestamps=cfg.NTIME_ALL, + prefix="result_u_v", + ) + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"NavierStokes": ppsci.equation.NavierStokes(cfg.NU, cfg.RHO, 2, True)} + + # set timestamps(including initial t0) + timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), + ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), + ) + } + + # pde/bc constraint use t1~tn, initial constraint use t0 + NPOINT_PDE = 99**2 + NPOINT_TOP = 101 + NPOINT_DOWN = 101 + NPOINT_LEFT = 99 + NPOINT_RIGHT = 99 + NPOINT_IC = 99**2 + NTIME_PDE = cfg.NTIME_ALL - 1 + + # set validator + NPOINT_EVAL = NPOINT_PDE * cfg.NTIME_ALL + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"momentum_x": 0, "continuity": 0, "momentum_y": 0}, + geom["time_rect"], + { + "dataset": "NamedArrayDataset", + "total_size": NPOINT_EVAL, + "batch_size": cfg.EVAL.batch_size.residual_validator, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE()}, + with_initial=True, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + # set visualizer(optional) + NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT + vis_initial_points = geom["time_rect"].sample_initial_interior( + (NPOINT_IC + NPOINT_BC), evenly=True + ) + vis_pde_points = geom["time_rect"].sample_interior( + (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True + ) + vis_points = vis_initial_points + # manually collate input data for visualization, + # (interior+boundary) x all timestamps + for t in range(NTIME_PDE): + for key in geom["time_rect"].dim_keys: + vis_points[key] = np.concatenate( + ( + vis_points[key], + vis_pde_points[key][ + t + * (NPOINT_PDE + NPOINT_BC) : (t + 1) + * (NPOINT_PDE + NPOINT_BC) + ], + ) + ) + + visualizer = { + "visualize_u_v": ppsci.visualize.VisualizerVtu( + vis_points, + {"u": lambda d: d["u"], "v": lambda d: d["v"], "p": lambda d: d["p"]}, + num_timestamps=cfg.NTIME_ALL, + prefix="result_u_v", + ) + } + + # directly evaluate pretrained model(optional) + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + equation=equation, + geom=geom, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction for pretrained model(optional) + solver.visualize() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # set timestamps(including initial t0) + timestamps = np.linspace(0.0, 1.5, cfg.NTIME_ALL, endpoint=True) + # set time-geometry + geom = { + "time_rect": ppsci.geometry.TimeXGeometry( + ppsci.geometry.TimeDomain(0.0, 1.5, timestamps=timestamps), + ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05)), + ) + } + # manually collate input data for inference + NPOINT_PDE = 99**2 + NPOINT_TOP = 101 + NPOINT_DOWN = 101 + NPOINT_LEFT = 99 + NPOINT_RIGHT = 99 + NPOINT_IC = 99**2 + NTIME_PDE = cfg.NTIME_ALL - 1 + NPOINT_BC = NPOINT_TOP + NPOINT_DOWN + NPOINT_LEFT + NPOINT_RIGHT + input_dict = geom["time_rect"].sample_initial_interior( + (NPOINT_IC + NPOINT_BC), evenly=True + ) + input_pde_dict = geom["time_rect"].sample_interior( + (NPOINT_PDE + NPOINT_BC) * NTIME_PDE, evenly=True + ) + # (interior+boundary) x all timestamps + for t in range(NTIME_PDE): + for key in geom["time_rect"].dim_keys: + input_dict[key] = np.concatenate( + ( + input_dict[key], + input_pde_dict[key][ + t + * (NPOINT_PDE + NPOINT_BC) : (t + 1) + * (NPOINT_PDE + NPOINT_BC) + ], + ) + ) + output_dict = predictor.predict( + {key: input_dict[key] for key in cfg.MODEL.input_keys}, cfg.INFER.batch_size + ) + + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + ppsci.visualize.save_vtu_from_dict( + "./ldc2d_unsteady_Re10_pred.vtu", + {**input_dict, **output_dict}, + input_dict.keys(), + cfg.MODEL.output_keys, + cfg.NTIME_ALL, + ) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ldc2d_unsteady_Re10.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/ldc/ldc_2d_Re1000_plain.py b/examples/ldc/ldc_2d_Re1000_plain.py index aa077a5a10..f15c8e9386 100644 --- a/examples/ldc/ldc_2d_Re1000_plain.py +++ b/examples/ldc/ldc_2d_Re1000_plain.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc """ @@ -308,3 +309,313 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc +""" + +from __future__ import annotations + +import copy +import os +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot(U_pred: np.ndarray, output_dir: str): + os.makedirs(output_dir, exist_ok=True) + fig_path = osp.join(output_dir, "ac.png") + + fig = plt.figure() + plt.pcolor(U_pred.T, cmap="jet") + plt.colorbar() + fig.savefig(fig_path, bbox_inches="tight", dpi=400) + plt.close() + ppsci.utils.logger.info(f"Saving figure to {fig_path}") + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + def sample_points_on_square_boundary(num_pts_per_side, eps): + # Sample points along the top side (x=1 to x=0, y=1) + top_coords = np.linspace(0, 1, num_pts_per_side) + top = np.column_stack((top_coords, np.ones_like(top_coords))) + + # Sample points along the bottom side (x=0 to x=1, y=0) + bottom_coords = np.linspace(0, 1, num_pts_per_side) + bottom = np.column_stack((bottom_coords, np.zeros_like(bottom_coords))) + + # Sample points along the left side (x=0, y=1 to y=0) + left_coords = np.linspace(0, 1 - eps, num_pts_per_side) + left = np.column_stack((np.zeros_like(left_coords), left_coords)) + + # Sample points along the right side (x=1, y=0 to y=1) + right_coords = np.linspace(0, 1 - eps, num_pts_per_side) + right = np.column_stack((np.ones_like(right_coords), right_coords)) + + # Combine the points from all sides + points = np.vstack((top, bottom, left, right)) + + return points + + def train_curriculum(cfg, idx): + cfg_t = copy.deepcopy(cfg) + Re = cfg_t.Re[idx] + cfg_t.output_dir = osp.join(cfg_t.output_dir, f"Re_{int(Re)}") + cfg_t.TRAIN.epochs = cfg_t.epochs[idx] + ppsci.utils.logger.message( + f"Training curriculum {idx + 1}/{len(cfg_t.epochs)} Re={Re:.5g} epochs={cfg_t.epochs[idx]}" + ) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes(1 / Re, 1, dim=2, time=False) + } + + # set constraint + data = sio.loadmat(f"./data/ldc_Re{Re}.mat") + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) + y_star = data["y"].flatten().astype(dtype) + x0 = x_star[0] + x1 = x_star[-1] + y0 = y_star[0] + y1 = y_star[-1] + + # set N-S pde constraint + def gen_input_batch(): + tx = np.random.uniform( + [x0, y0], + [x1, y1], + (cfg_t.TRAIN.batch_size.pde, 2), + ).astype(dtype) + return {"x": tx[:, 0:1], "y": tx[:, 1:2]} + + def gen_label_batch(input_batch): + return { + "continuity": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_x": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_y": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + } + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["NavierStokes"].equations, + loss=ppsci.loss.MSELoss("mean"), + name="PDE", + ) + + # set boundary conditions + x_bc = sample_points_on_square_boundary( + cfg_t.TRAIN.batch_size.bc, eps=0.01 + ).astype( + dtype + ) # avoid singularity a right corner for u velocity + v_bc = np.zeros((cfg_t.TRAIN.batch_size.bc * 4, 1), dtype) + u_bc = copy.deepcopy(v_bc) + u_bc[: cfg_t.TRAIN.batch_size.bc] = 1.0 + bc = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": { + "x": x_bc[:, 0:1], + "y": x_bc[:, 1:2], + }, + "label": {"u": u_bc, "v": v_bc}, + }, + }, + output_expr={"u": lambda out: out["u"], "v": lambda out: out["v"]}, + loss=ppsci.loss.MSELoss("mean"), + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg_t.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + cfg=cfg_t, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg._tEVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg_t.output_dir) + + for idx in range(len(cfg.Re)): + train_curriculum(cfg, idx) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + data = sio.loadmat(cfg.EVAL_DATA_PATH) + data = dict(data) + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + # plot + plot(U_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.EVAL_DATA_PATH) + t_star = data["t"].flatten().astype(dtype) # [nt, ] + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + tx_star = misc.cartesian_product(t_star, x_star).astype(dtype) + + input_dict = {"t": tx_star[:, 0:1], "x": tx_star[:, 1:2]} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + U_pred = np.sqrt(output_dict["u"] ** 2 + output_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ldc_2d_Re1000_plain.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/ldc/ldc_2d_Re3200_piratenet.py b/examples/ldc/ldc_2d_Re3200_piratenet.py index 099c548a19..ced00975a4 100644 --- a/examples/ldc/ldc_2d_Re3200_piratenet.py +++ b/examples/ldc/ldc_2d_Re3200_piratenet.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc """ @@ -323,3 +324,328 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc +""" + +from __future__ import annotations + +import copy +import os +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot(U_pred: np.ndarray, output_dir: str): + os.makedirs(output_dir, exist_ok=True) + fig_path = osp.join(output_dir, "ac.png") + + fig = plt.figure() + plt.pcolor(U_pred.T, cmap="jet") + plt.xlabel("x") + plt.ylabel("y") + plt.colorbar() + plt.title(r"Prediction of $U=\sqrt{{u^2+v^2}}$") + fig.savefig(fig_path, bbox_inches="tight") + ppsci.utils.logger.info(f"Saving figure to {fig_path}") + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + grad_norm = mtl.GradNorm( + model, + 5, + update_freq=cfg.TRAIN.grad_norm.update_freq, + momentum=cfg.TRAIN.grad_norm.momentum, + init_weights=list(cfg.TRAIN.grad_norm.init_weights), + ) + + def sample_points_on_square_boundary(num_pts_per_side, eps): + # Sample points along the top side (x=1 to x=0, y=1) + top_coords = np.linspace(0, 1, num_pts_per_side) + top = np.column_stack((top_coords, np.ones_like(top_coords))) + + # Sample points along the bottom side (x=0 to x=1, y=0) + bottom_coords = np.linspace(0, 1, num_pts_per_side) + bottom = np.column_stack((bottom_coords, np.zeros_like(bottom_coords))) + + # Sample points along the left side (x=0, y=1 to y=0) + left_coords = np.linspace(0, 1 - eps, num_pts_per_side) + left = np.column_stack((np.zeros_like(left_coords), left_coords)) + + # Sample points along the right side (x=1, y=0 to y=1) + right_coords = np.linspace(0, 1 - eps, num_pts_per_side) + right = np.column_stack((np.ones_like(right_coords), right_coords)) + + # Combine the points from all sides + points = np.vstack((top, bottom, left, right)) + + return points + + def train_curriculum(cfg, idx): + cfg_t = copy.deepcopy(cfg) + Re = cfg_t.Re[idx] + cfg_t.output_dir = osp.join(cfg_t.output_dir, f"Re_{int(Re)}") + cfg_t.TRAIN.epochs = cfg_t.epochs[idx] + ppsci.utils.logger.message( + f"Training curriculum {idx + 1}/{len(cfg_t.epochs)} Re={Re:.5g} epochs={cfg_t.epochs[idx]}" + ) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes(1 / Re, 1, dim=2, time=False) + } + + # load data + data = sio.loadmat(f"./data/ldc_Re{Re}.mat") + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) + y_star = data["y"].flatten().astype(dtype) + x0 = x_star[0] + x1 = x_star[-1] + y0 = y_star[0] + y1 = y_star[-1] + + # set N-S pde constraint + def gen_input_batch(): + tx = np.random.uniform( + [x0, y0], + [x1, y1], + (cfg_t.TRAIN.batch_size.pde, 2), + ).astype(dtype) + return {"x": tx[:, 0:1], "y": tx[:, 1:2]} + + def gen_label_batch(input_batch): + return { + "continuity": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_x": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_y": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + } + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["NavierStokes"].equations, + loss=ppsci.loss.MSELoss("mean"), + name="PDE", + ) + + # set boundary conditions + x_bc = sample_points_on_square_boundary( + cfg_t.TRAIN.batch_size.bc, eps=0.0 + ).astype( + dtype + ) # avoid singularity a right corner for u velocity + v_bc = np.zeros((cfg_t.TRAIN.batch_size.bc * 4, 1), dtype) + u_bc = copy.deepcopy(v_bc) + lid_bc_fn = lambda x: 1 - np.cosh(50 * (x - 0.5)) / np.cosh(50 * 0.5) + u_bc[: cfg_t.TRAIN.batch_size.bc] = lid_bc_fn( + x_bc[: cfg_t.TRAIN.batch_size.bc, 0:1] + ) + bc = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": { + "x": x_bc[:, 0:1], + "y": x_bc[:, 1:2], + }, + "label": {"u": u_bc, "v": v_bc}, + }, + }, + output_expr={"u": lambda out: out["u"], "v": lambda out: out["v"]}, + loss=ppsci.loss.MSELoss("mean"), + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg_t.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + loss_aggregator=grad_norm, + cfg=cfg_t, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg_t.EVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg_t.output_dir) + + for idx in range(len(cfg.Re)): + train_curriculum(cfg, idx) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + data = sio.loadmat(cfg.EVAL_DATA_PATH) + data = dict(data) + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + # plot + plot(U_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.PirateNet(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.EVAL_DATA_PATH) + data = dict(data) + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + input_dict = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + U_pred = np.sqrt(output_dict["u"] ** 2 + output_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ldc_2d_Re3200_piratenet.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/ldc/ldc_2d_Re3200_sota.py b/examples/ldc/ldc_2d_Re3200_sota.py index ebb0c4aff2..b350bb8407 100644 --- a/examples/ldc/ldc_2d_Re3200_sota.py +++ b/examples/ldc/ldc_2d_Re3200_sota.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc """ @@ -319,3 +320,324 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +""" +Reference: https://github.com/PredictiveIntelligenceLab/jaxpi/tree/main/examples/ldc +""" + +from __future__ import annotations + +import copy +import os +from os import path as osp + +import hydra +import numpy as np +import paddle +import scipy.io as sio +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.loss import mtl +from ppsci.utils import misc + +dtype = paddle.get_default_dtype() + + +def plot(U_pred: np.ndarray, output_dir: str): + os.makedirs(output_dir, exist_ok=True) + fig_path = osp.join(output_dir, "ac.png") + + fig = plt.figure() + plt.pcolor(U_pred.T, cmap="jet") + plt.xlabel("x") + plt.ylabel("y") + plt.colorbar() + plt.title(r"Prediction of $U=\sqrt{{u^2+v^2}}$") + fig.savefig(fig_path, bbox_inches="tight") + ppsci.utils.logger.info(f"Saving figure to {fig_path}") + plt.close() + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.ModifiedMLP(**cfg.MODEL) + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + grad_norm = mtl.GradNorm( + model, + 5, + update_freq=cfg.TRAIN.grad_norm.update_freq, + momentum=cfg.TRAIN.grad_norm.momentum, + ) + + def sample_points_on_square_boundary(num_pts_per_side, eps): + # Sample points along the top side (x=1 to x=0, y=1) + top_coords = np.linspace(0, 1, num_pts_per_side) + top = np.column_stack((top_coords, np.ones_like(top_coords))) + + # Sample points along the bottom side (x=0 to x=1, y=0) + bottom_coords = np.linspace(0, 1, num_pts_per_side) + bottom = np.column_stack((bottom_coords, np.zeros_like(bottom_coords))) + + # Sample points along the left side (x=0, y=1 to y=0) + left_coords = np.linspace(0, 1 - eps, num_pts_per_side) + left = np.column_stack((np.zeros_like(left_coords), left_coords)) + + # Sample points along the right side (x=1, y=0 to y=1) + right_coords = np.linspace(0, 1 - eps, num_pts_per_side) + right = np.column_stack((np.ones_like(right_coords), right_coords)) + + # Combine the points from all sides + points = np.vstack((top, bottom, left, right)) + + return points + + def train_curriculum(cfg, idx): + cfg_t = copy.deepcopy(cfg) + Re = cfg_t.Re[idx] + cfg_t.output_dir = osp.join(cfg_t.output_dir, f"Re_{int(Re)}") + cfg_t.TRAIN.epochs = cfg_t.epochs[idx] + ppsci.utils.logger.message( + f"Training curriculum {idx + 1}/{len(cfg_t.epochs)} Re={Re:.5g} epochs={cfg_t.epochs[idx]}" + ) + + # set equation + equation = { + "NavierStokes": ppsci.equation.NavierStokes(1 / Re, 1, dim=2, time=False) + } + + # load data + data = sio.loadmat(f"./data/ldc_Re{Re}.mat") + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) + y_star = data["y"].flatten().astype(dtype) + x0 = x_star[0] + x1 = x_star[-1] + y0 = y_star[0] + y1 = y_star[-1] + + # set N-S pde constraint + def gen_input_batch(): + tx = np.random.uniform( + [x0, y0], + [x1, y1], + (cfg_t.TRAIN.batch_size.pde, 2), + ).astype(dtype) + return {"x": tx[:, 0:1], "y": tx[:, 1:2]} + + def gen_label_batch(input_batch): + return { + "continuity": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_x": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + "momentum_y": np.zeros([cfg_t.TRAIN.batch_size.pde, 1], dtype), + } + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": gen_input_batch, + "label": gen_label_batch, + }, + }, + output_expr=equation["NavierStokes"].equations, + loss=ppsci.loss.MSELoss("mean"), + name="PDE", + ) + + # set boundary conditions + x_bc = sample_points_on_square_boundary( + cfg_t.TRAIN.batch_size.bc, eps=0.01 + ).astype( + dtype + ) # avoid singularity a right corner for u velocity + v_bc = np.zeros((cfg_t.TRAIN.batch_size.bc * 4, 1), dtype) + u_bc = copy.deepcopy(v_bc) + u_bc[: cfg_t.TRAIN.batch_size.bc] = 1.0 + bc = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": { + "x": x_bc[:, 0:1], + "y": x_bc[:, 1:2], + }, + "label": {"u": u_bc, "v": v_bc}, + }, + }, + output_expr={"u": lambda out: out["u"], "v": lambda out: out["v"]}, + loss=ppsci.loss.MSELoss("mean"), + name="BC", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + bc.name: bc, + } + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg_t.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + validator=validator, + loss_aggregator=grad_norm, + cfg=cfg_t, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg_t.EVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg_t.output_dir) + + for idx in range(len(cfg.Re)): + train_curriculum(cfg, idx) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.ModifiedMLP(**cfg.MODEL) + + data = sio.loadmat(cfg.EVAL_DATA_PATH) + data = dict(data) + u_ref = data["u"].astype(dtype) + v_ref = data["v"].astype(dtype) + U_ref = np.sqrt(u_ref**2 + v_ref**2).reshape(-1, 1) + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + + # set validator + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + eval_data = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + eval_label = {"U": U_ref.reshape([-1, 1])} + U_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": eval_data, + "label": eval_label, + }, + "batch_size": cfg.EVAL.batch_size, + }, + ppsci.loss.MSELoss("mean"), + {"U": lambda out: (out["u"] ** 2 + out["v"] ** 2).sqrt()}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="U_validator", + ) + validator = {U_validator.name: U_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + pred_dict = solver.predict( + eval_data, batch_size=cfg.EVAL.batch_size, return_numpy=True + ) + U_pred = np.sqrt(pred_dict["u"] ** 2 + pred_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + # plot + plot(U_pred, cfg.output_dir) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.ModifiedMLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = sio.loadmat(cfg.EVAL_DATA_PATH) + data = dict(data) + x_star = data["x"].flatten().astype(dtype) # [nx, ] + y_star = data["y"].flatten().astype(dtype) # [ny, ] + xy_star = misc.cartesian_product(x_star, y_star).astype(dtype) + input_dict = {"x": xy_star[:, 0:1], "y": xy_star[:, 1:2]} + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + U_pred = np.sqrt(output_dict["u"] ** 2 + output_dict["v"] ** 2).reshape( + [len(x_star), len(y_star)] + ) + plot(U_pred, cfg.output_dir) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ldc_2d_Re3200_sota.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/lorenz/conf/enn.yaml b/examples/lorenz/conf/enn.yaml index 1157613623..164432ebd6 100644 --- a/examples/lorenz/conf/enn.yaml +++ b/examples/lorenz/conf/enn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -52,3 +53,58 @@ TRAIN: EVAL: batch_size: 512 pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_lorenz_enn + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +TRAIN_BLOCK_SIZE: 16 +VALID_BLOCK_SIZE: 32 +TRAIN_FILE_PATH: ./datasets/lorenz_training_rk.hdf5 +VALID_FILE_PATH: ./datasets/lorenz_valid_rk.hdf5 + +# model settings +MODEL: + input_keys: ["states"] + output_keys: ["pred_states", "recover_states"] + +# training settings +TRAIN: + epochs: 300 + batch_size: 512 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + gamma: 0.995 + by_epoch: true + optimizer: + weight_decay: 1e-8 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 512 + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/lorenz/conf/transformer.yaml b/examples/lorenz/conf/transformer.yaml index 055f439bc3..ef67895a18 100644 --- a/examples/lorenz/conf/transformer.yaml +++ b/examples/lorenz/conf/transformer.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -92,3 +93,89 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 16 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_lorenz_transformer/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + - EMBEDDING_MODEL_PATH + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +TRAIN_BLOCK_SIZE: 64 +VALID_BLOCK_SIZE: 256 +TRAIN_FILE_PATH: ./datasets/lorenz_training_rk.hdf5 +VALID_FILE_PATH: ./datasets/lorenz_valid_rk.hdf5 + +# set working condition +EMBEDDING_MODEL_PATH: ./outputs_lorenz_enn/checkpoints/latest +VIS_DATA_NUMS: 16 + +# model settings +MODEL: + input_keys: ["embeds"] + output_keys: ["pred_embeds"] + num_layers: 4 + num_ctx: 64 + embed_size: 32 + num_heads: 4 + +# training settings +TRAIN: + epochs: 200 + batch_size: 16 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + T_0: 14 + T_mult: 2 + eta_min: 1.0e-9 + optimizer: + weight_decay: 1.0e-8 + eval_during_train: true + eval_freq: 50 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 16 + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/lorenz/lorenz_transformer_pretrained.pdparams + export_path: ./inference/lorenz_transformer + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: false + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 16 +>>>>>>> Stashed changes diff --git a/examples/lorenz/train_enn.py b/examples/lorenz/train_enn.py index d3450e2976..81146fbb6a 100644 --- a/examples/lorenz/train_enn.py +++ b/examples/lorenz/train_enn.py @@ -1,263 +1,263 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step1: training a embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def get_mean_std(data: np.ndarray): - mean = np.asarray( - [np.mean(data[:, :, 0]), np.mean(data[:, :, 1]), np.mean(data[:, :, 2])] - ).reshape(1, 3) - std = np.asarray( - [np.std(data[:, :, 0]), np.std(data[:, :, 1]), np.std(data[:, :, 2])] - ).reshape(1, 3) - return mean, std - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={ - regularization_key: 1.0e-1 * (cfg.TRAIN_BLOCK_SIZE - 1) - } - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - - # manually init model - data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) - model = ppsci.arch.LorenzEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - iters_per_epoch=ITERS_PER_EPOCH, - decay_steps=ITERS_PER_EPOCH, - **cfg.TRAIN.lr_scheduler, - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={ - regularization_key: 1.0e-1 * (cfg.TRAIN_BLOCK_SIZE - 1) - } - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - - # manually init model - data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) - model = ppsci.arch.LorenzEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # manually build validator - weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step1: training a embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def get_mean_std(data: np.ndarray): + mean = np.asarray( + [np.mean(data[:, :, 0]), np.mean(data[:, :, 1]), np.mean(data[:, :, 2])] + ).reshape(1, 3) + std = np.asarray( + [np.std(data[:, :, 0]), np.std(data[:, :, 1]), np.std(data[:, :, 2])] + ).reshape(1, 3) + return mean, std + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={ + regularization_key: 1.0e-1 * (cfg.TRAIN_BLOCK_SIZE - 1) + } + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # manually init model + data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) + model = ppsci.arch.LorenzEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + iters_per_epoch=ITERS_PER_EPOCH, + decay_steps=ITERS_PER_EPOCH, + **cfg.TRAIN.lr_scheduler, + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e4 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={ + regularization_key: 1.0e-1 * (cfg.TRAIN_BLOCK_SIZE - 1) + } + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + + # manually init model + data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) + model = ppsci.arch.LorenzEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # manually build validator + weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/lorenz/train_transformer.py b/examples/lorenz/train_transformer.py index a68c404b9e..0985428339 100644 --- a/examples/lorenz/train_transformer.py +++ b/examples/lorenz/train_transformer.py @@ -1,335 +1,335 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step2: training a transformer model, based on frozen pretrained embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp -from typing import Dict - -import hydra -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.arch import base -from ppsci.utils import logger -from ppsci.utils import save_load - - -def build_embedding_model(embedding_model_path: str) -> ppsci.arch.LorenzEmbedding: - input_keys = ("states",) - output_keys = ("pred_states", "recover_states") - regularization_key = "k_matrix" - model = ppsci.arch.LorenzEmbedding(input_keys, output_keys + (regularization_key,)) - save_load.load_pretrain(model, embedding_model_path) - return model - - -class OutputTransform(object): - def __init__(self, model: base.Arch): - self.model = model - self.model.eval() - - def __call__(self, x: Dict[str, paddle.Tensor]): - pred_embeds = x["pred_embeds"] - pred_states = self.model.decoder(pred_embeds) - - return pred_states - - -def train(cfg: DictConfig): - # train time-series: 2048 time-steps: 256 block-size: 64 stride: 64 - # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 - # test time-series: 256 time-steps: 1024 - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "file_path": cfg.TRAIN_FILE_PATH, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 64, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss(), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( - iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - vis_data = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], - "states": states[: cfg.VIS_DATA_NUMS, 1:, :], - } - - visualizer = { - "visualize_states": ppsci.visualize.VisualizerScatter3D( - vis_data, - { - "pred_states": lambda d: output_transform(d), - "states": lambda d: d["states"], - }, - num_timestamps=1, - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # directly evaluate pretrained model(optional) - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "LorenzDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - vis_datas = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], - "states": states[: cfg.VIS_DATA_NUMS, 1:, :], - } - - visualizer = { - "visulzie_states": ppsci.visualize.VisualizerScatter3D( - vis_datas, - { - "pred_states": lambda d: output_transform(d), - "states": lambda d: d["states"], - }, - num_timestamps=1, - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction for pretrained model(optional) - solver.visualize() - - -def export(cfg: DictConfig): - # set model - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - model_cfg = { - **cfg.MODEL, - "embedding_model": embedding_model, - "input_keys": ["states"], - "output_keys": ["pred_states"], - } - model = ppsci.arch.PhysformerGPT2(**model_cfg) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 255, 3], "float32", name=key) - for key in model.input_keys - }, - ] - - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - dataset_cfg = { - "name": "LorenzDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - } - - dataset = ppsci.data.dataset.build_dataset(dataset_cfg) - - input_dict = { - "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1, :], - } - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_keys = ["pred_states"] - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - - input_dict = { - "states": dataset.data[: cfg.VIS_DATA_NUMS, 1:, :], - } - - data_dict = {**input_dict, **output_dict} - for i in range(cfg.VIS_DATA_NUMS): - ppsci.visualize.save_plot_from_3d_dict( - f"./lorenz_transformer_pred_{i}", - {key: value[i] for key, value in data_dict.items()}, - ("states", "pred_states"), - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step2: training a transformer model, based on frozen pretrained embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp +from typing import Dict + +import hydra +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.arch import base +from ppsci.utils import logger +from ppsci.utils import save_load + + +def build_embedding_model(embedding_model_path: str) -> ppsci.arch.LorenzEmbedding: + input_keys = ("states",) + output_keys = ("pred_states", "recover_states") + regularization_key = "k_matrix" + model = ppsci.arch.LorenzEmbedding(input_keys, output_keys + (regularization_key,)) + save_load.load_pretrain(model, embedding_model_path) + return model + + +class OutputTransform(object): + def __init__(self, model: base.Arch): + self.model = model + self.model.eval() + + def __call__(self, x: Dict[str, paddle.Tensor]): + pred_embeds = x["pred_embeds"] + pred_states = self.model.decoder(pred_embeds) + + return pred_states + + +def train(cfg: DictConfig): + # train time-series: 2048 time-steps: 256 block-size: 64 stride: 64 + # valid time-series: 64 time-steps: 1024 block-size: 256 stride: 1024 + # test time-series: 256 time-steps: 1024 + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "file_path": cfg.TRAIN_FILE_PATH, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 64, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( + iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + vis_data = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], + "states": states[: cfg.VIS_DATA_NUMS, 1:, :], + } + + visualizer = { + "visualize_states": ppsci.visualize.VisualizerScatter3D( + vis_data, + { + "pred_states": lambda d: output_transform(d), + "states": lambda d: d["states"], + }, + num_timestamps=1, + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # directly evaluate pretrained model(optional) + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "LorenzDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + vis_datas = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], + "states": states[: cfg.VIS_DATA_NUMS, 1:, :], + } + + visualizer = { + "visulzie_states": ppsci.visualize.VisualizerScatter3D( + vis_datas, + { + "pred_states": lambda d: output_transform(d), + "states": lambda d: d["states"], + }, + num_timestamps=1, + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction for pretrained model(optional) + solver.visualize() + + +def export(cfg: DictConfig): + # set model + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + model_cfg = { + **cfg.MODEL, + "embedding_model": embedding_model, + "input_keys": ["states"], + "output_keys": ["pred_states"], + } + model = ppsci.arch.PhysformerGPT2(**model_cfg) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 255, 3], "float32", name=key) + for key in model.input_keys + }, + ] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + dataset_cfg = { + "name": "LorenzDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + } + + dataset = ppsci.data.dataset.build_dataset(dataset_cfg) + + input_dict = { + "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1, :], + } + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_keys = ["pred_states"] + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + + input_dict = { + "states": dataset.data[: cfg.VIS_DATA_NUMS, 1:, :], + } + + data_dict = {**input_dict, **output_dict} + for i in range(cfg.VIS_DATA_NUMS): + ppsci.visualize.save_plot_from_3d_dict( + f"./lorenz_transformer_pred_{i}", + {key: value[i] for key, value in data_dict.items()}, + ("states", "pred_states"), + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/moflow/conf/moflow_optimize.yaml b/examples/moflow/conf/moflow_optimize.yaml index 34b948fbd4..8a8d0ee840 100644 --- a/examples/moflow/conf/moflow_optimize.yaml +++ b/examples/moflow/conf/moflow_optimize.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -106,3 +107,112 @@ OPTIMIZE: temperature: 1.0 consopt: true sim_cutoff: 0.0 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_moflow_optimize/${data_name} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +data_name: zinc250k # data select:qm9/zinc250k +seed: 1 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training hyper-parameters +qm9: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [128,128] + a_n_flow: 27 + a_n_block: 1 + a_hidden_gnn: [64] + a_hidden_lin: [128,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 1 + atomic_num_list: [6, 7, 8, 9, 0] + b_n_type: 4 + b_n_squeeze: 3 + a_n_node: 9 + valid_idx: valid_idx_qm9.json + label_keys: ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv'] + smiles_col: SMILES1 + +zinc250k: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [512,512] + a_n_flow: 38 + a_n_block: 1 + a_hidden_gnn: [256] + a_hidden_lin: [512,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 2 + atomic_num_list: [6, 7, 8, 9, 15, 16, 17, 35, 53, 0] + b_n_type: 4 + b_n_squeeze: 19 + a_n_node: 38 + valid_idx: valid_idx_zinc.json + label_keys: ['logP', 'qed', 'SAS'] + smiles_col: smiles + +# set data path +FILE_PATH: ./datasets/moflow + +# model settings +MODEL: + input_keys: ["nodes", "edges"] + output_keys: ["output", "sum_log_det"] + hyper_params: null + +MODEL_Prop: + input_keys: ["nodes", "edges"] + output_keys: ["output", "sum_log_det"] + model: null + hidden_size: null + +# evaluation settings +EVAL: + pretrained_model_path: null + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 100 + +# optimize settings +OPTIMIZE: + property_name: plogp # qed/plogp + batch_size: 256 + topk: 800 + debug: false + topscore: false + max_epochs: 3 + learning_rate: 0.001 + weight_decay: 1e-2 + hidden: [16] # Hidden dimension list for output regression + temperature: 1.0 + consopt: true + sim_cutoff: 0.0 +>>>>>>> Stashed changes diff --git a/examples/moflow/conf/moflow_test.yaml b/examples/moflow/conf/moflow_test.yaml index 2f87d32ca1..2c3a977497 100644 --- a/examples/moflow/conf/moflow_test.yaml +++ b/examples/moflow/conf/moflow_test.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -125,3 +126,131 @@ Intergrid: #在潜在空间进行插值,分子网格进行可视化生成分 inter_times: 40 correct_validity: true n_experiments: 0 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_moflow_test/${data_name} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: eval # running mode: train/eval +data_name: zinc250k # data select:qm9/zinc250k +seed: 1 +output_dir: ${hydra:run.dir} +save_score: true + +# set testing hyper-parameters +qm9: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [128,128] + a_n_flow: 27 + a_n_block: 1 + a_hidden_gnn: [64] + a_hidden_lin: [128,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 1 + atomic_num_list: [6, 7, 8, 9, 0] + b_n_type: 4 + b_n_squeeze: 3 + a_n_node: 9 + valid_idx: valid_idx_qm9.json + label_keys: ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv'] + smiles_col: SMILES1 + +zinc250k: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [512,512] + a_n_flow: 38 + a_n_block: 1 + a_hidden_gnn: [256] + a_hidden_lin: [512,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 2 + atomic_num_list: [6, 7, 8, 9, 15, 16, 17, 35, 53, 0] + b_n_type: 4 + b_n_squeeze: 19 + a_n_node: 38 + valid_idx: valid_idx_zinc.json + label_keys: ['logP', 'qed', 'SAS'] + smiles_col: smiles + +# set data path +FILE_PATH: ./datasets/moflow + +# model settings +MODEL: + input_keys: ["nodes", "edges"] + output_keys: ["output", "sum_log_det"] + hyper_params: null + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: 256 + num_workers: 0 + reconstruct: false + int2point: false + intgrid: false + inter_times: 5 + correct_validity: true + temperature: 1.0 + delta: 0.1 + n_experiments: + save_fig: true + +EVAL_mode: Intergrid #select EVAL_mode: Reconstruct/Random/Inter2point/Intergrid + +Reconstruct: #重建生成,针对不同数据集的分子进行重建生成 + batch_size: 256 + reconstruct: true + n_experiments: 0 + +Random: #随机生成,针对不同的数据集从潜在空间进行随机生成,10000个样本生成5次 + batch_size: 10000 + temperature: 0.85 + delta: 0.05 + n_experiments: 5 + save_fig: false + correct_validity: true + +Inter2point: #在潜在空间进行插值,两个分子之间插值可视化生成分子图 + batch_size: 1000 + int2point: true + temperature: 0.65 + inter_times: 50 + correct_validity: true + n_experiments: 0 + +Intergrid: #在潜在空间进行插值,分子网格进行可视化生成分子图 + batch_size: 1000 + temperature: 0.65 + delta: 5 + intgrid: true + inter_times: 40 + correct_validity: true + n_experiments: 0 +>>>>>>> Stashed changes diff --git a/examples/moflow/conf/moflow_train.yaml b/examples/moflow/conf/moflow_train.yaml index 9c52976887..382918823a 100644 --- a/examples/moflow/conf/moflow_train.yaml +++ b/examples/moflow/conf/moflow_train.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -98,3 +99,104 @@ EVAL: compute_metric_by_batch: false eval_with_no_grad: true batch_size: 100 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_moflow_train/${data_name} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +data_name: qm9 # data select:qm9/zinc250k +seed: 1 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set training hyper-parameters +qm9: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [128,128] + a_n_flow: 27 + a_n_block: 1 + a_hidden_gnn: [64] + a_hidden_lin: [128,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 1 + atomic_num_list: [6, 7, 8, 9, 0] + b_n_type: 4 + b_n_squeeze: 3 + a_n_node: 9 + valid_idx: valid_idx_qm9.json + label_keys: ['A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv'] + smiles_col: SMILES1 + +zinc250k: + b_n_flow: 10 + b_n_block: 1 + b_hidden_ch: [512,512] + a_n_flow: 38 + a_n_block: 1 + a_hidden_gnn: [256] + a_hidden_lin: [512,64] + mask_row_size_list: [1] + mask_row_stride_list: [1] + learn_dist: True + noise_scale: 0.6 + b_conv_lu: 2 + atomic_num_list: [6, 7, 8, 9, 15, 16, 17, 35, 53, 0] + b_n_type: 4 + b_n_squeeze: 19 + a_n_node: 38 + valid_idx: valid_idx_zinc.json + label_keys: ['logP', 'qed', 'SAS'] + smiles_col: smiles + +# set data path +FILE_PATH: ./datasets/moflow + +# model settings +MODEL: + input_keys: ["nodes", "edges"] + output_keys: ["output", "sum_log_det"] + hyper_params: null + +# training settings +TRAIN: + epochs: 200 + save_freq: 50 + eval_during_train: true + eval_freq: 20 + learning_rate: 0.001 + lr_decay: 0.999995 + batch_size: 256 + num_workers: 8 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 100 +>>>>>>> Stashed changes diff --git a/examples/moflow/moflow_train.py b/examples/moflow/moflow_train.py index b005c044f6..ce84106275 100644 --- a/examples/moflow/moflow_train.py +++ b/examples/moflow/moflow_train.py @@ -1,244 +1,244 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import moflow_transform -import numpy as np -import paddle -from moflow_utils import Hyperparameters -from moflow_utils import check_validity -from omegaconf import DictConfig -from tabulate import tabulate - -import ppsci -from ppsci.utils import logger - - -def infer(model, batch_size=20, temp=0.7, z_mu=None, true_adj=None): - """generate mols - - Args: - model (object): Generated eval Moflownet model - batch_size (int, optional): Batch size during evaling per GPU. Defaults to 20. - temp (float, optional): temperature of the gaussian distribution. Defaults to 0.7. - z_mu (int, optional): latent vector of a molecule. Defaults to None. - true_adj (paddle.Tensor, optional): True Adjacency. Defaults to None. - - Returns: - Tuple(paddle.Tensor, paddle.Tensor): Adjacency and nodes - """ - z_dim = model.b_size + model.a_size - mu = np.zeros(z_dim) - sigma_diag = np.ones(z_dim) - if model.hyper_params.learn_dist: - if len(model.ln_var) == 1: - sigma_diag = np.sqrt(np.exp(model.ln_var.item())) * sigma_diag - elif len(model.ln_var) == 2: - sigma_diag[: model.b_size] = ( - np.sqrt(np.exp(model.ln_var[0].item())) * sigma_diag[: model.b_size] - ) - sigma_diag[model.b_size + 1 :] = ( - np.sqrt(np.exp(model.ln_var[1].item())) * sigma_diag[model.b_size + 1 :] - ) - sigma = temp * sigma_diag - with paddle.no_grad(): - if z_mu is not None: - mu = z_mu - sigma = 0.01 * np.eye(z_dim) - z = np.random.normal(mu, sigma, (batch_size, z_dim)) - z = paddle.to_tensor(data=z).astype(paddle.get_default_dtype()) - adj, x = model.reverse(z, true_adj=true_adj) - return adj, x - - -class eval_func: - def __init__( - self, - metrics_mode, - batch_size, - atomic_num_list, - *args, - ): - super().__init__() - self.metrics_mode = metrics_mode - self.batch_size = batch_size - self.atomic_num_list = atomic_num_list - - def __call__( - self, - output_dict, - label_dict, - ): - self.metrics_mode.eval() - adj, x = infer(self.metrics_mode, self.batch_size) - validity_info = check_validity(adj, x, self.atomic_num_list) - self.metrics_mode.train() - results = dict() - results["valid"] = validity_info["valid_ratio"] - results["unique"] = validity_info["unique_ratio"] - results["abs_unique"] = validity_info["abs_unique_ratio"] - return results - - -def train(cfg: DictConfig): - # set training hyper-parameters - b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch - a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn - a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin - mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) - mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) - a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) - atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) - - model_params = Hyperparameters( - b_n_type=cfg.get(cfg.data_name).b_n_type, - b_n_flow=cfg.get(cfg.data_name).b_n_flow, - b_n_block=cfg.get(cfg.data_name).b_n_block, - b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, - b_hidden_ch=b_hidden_ch, - b_affine=True, - b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, - a_n_node=cfg.get(cfg.data_name).a_n_node, - a_n_type=a_n_type, - a_hidden_gnn=a_hidden_gnn, - a_hidden_lin=a_hidden_lin, - a_n_flow=cfg.get(cfg.data_name).a_n_flow, - a_n_block=cfg.get(cfg.data_name).a_n_block, - mask_row_size_list=mask_row_size_list, - mask_row_stride_list=mask_row_stride_list, - a_affine=True, - learn_dist=cfg.get(cfg.data_name).learn_dist, - seed=cfg.seed, - noise_scale=cfg.get(cfg.data_name).noise_scale, - ) - - logger.info("Model params:\n" + tabulate(model_params.print())) - - # set transforms - if cfg.data_name == "qm9": - transform_fn = moflow_transform.transform_fn - elif cfg.data_name == "zinc250k": - transform_fn = moflow_transform.transform_fn_zinc250k - - # set select eval data - valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) - valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) - - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "MOlFLOWDataset", - "file_path": cfg.FILE_PATH, - "data_name": cfg.data_name, - "mode": cfg.mode, - "valid_idx": valid_idx, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.get(cfg.data_name).label_keys, - "smiles_col": cfg.get(cfg.data_name).smiles_col, - "transform_fn": transform_fn, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": cfg.TRAIN.num_workers, - } - - # set model - model_cfg = dict(cfg.MODEL) - model_cfg.update({"hyper_params": model_params}) - model = ppsci.arch.MoFlowNet(**model_cfg) - - # set constraint - output_keys = cfg.MODEL.output_keys - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.FunctionalLoss(model.log_prob_loss), - {key: (lambda out, k=key: out[k]) for key in output_keys}, - name="Sup_constraint", - ) - - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - - # init optimizer and lr scheduler - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "MOlFLOWDataset", - "file_path": cfg.FILE_PATH, - "data_name": cfg.data_name, - "mode": "eval", - "valid_idx": valid_idx, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.get(cfg.data_name).label_keys, - "smiles_col": cfg.get(cfg.data_name).smiles_col, - "transform_fn": transform_fn, - }, - "batch_size": cfg.EVAL.batch_size, - } - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.FunctionalLoss(model.log_prob_loss), - {key: (lambda out, k=key: out[k]) for key in output_keys}, - metric={ - "Valid": ppsci.metric.FunctionalMetric( - eval_func(model, cfg.EVAL.batch_size, atomic_num_list) - ) - }, - name="Sup_Validator", - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - seed=cfg.seed, - validator=validator, - save_freq=cfg.TRAIN.save_freq, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # train model - solver.train() - - # validation for training - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="moflow_train.yaml") -def main(cfg: DictConfig): - train(cfg) - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import moflow_transform +import numpy as np +import paddle +from moflow_utils import Hyperparameters +from moflow_utils import check_validity +from omegaconf import DictConfig +from tabulate import tabulate + +import ppsci +from ppsci.utils import logger + + +def infer(model, batch_size=20, temp=0.7, z_mu=None, true_adj=None): + """generate mols + + Args: + model (object): Generated eval Moflownet model + batch_size (int, optional): Batch size during evaling per GPU. Defaults to 20. + temp (float, optional): temperature of the gaussian distribution. Defaults to 0.7. + z_mu (int, optional): latent vector of a molecule. Defaults to None. + true_adj (paddle.Tensor, optional): True Adjacency. Defaults to None. + + Returns: + Tuple(paddle.Tensor, paddle.Tensor): Adjacency and nodes + """ + z_dim = model.b_size + model.a_size + mu = np.zeros(z_dim) + sigma_diag = np.ones(z_dim) + if model.hyper_params.learn_dist: + if len(model.ln_var) == 1: + sigma_diag = np.sqrt(np.exp(model.ln_var.item())) * sigma_diag + elif len(model.ln_var) == 2: + sigma_diag[: model.b_size] = ( + np.sqrt(np.exp(model.ln_var[0].item())) * sigma_diag[: model.b_size] + ) + sigma_diag[model.b_size + 1 :] = ( + np.sqrt(np.exp(model.ln_var[1].item())) * sigma_diag[model.b_size + 1 :] + ) + sigma = temp * sigma_diag + with paddle.no_grad(): + if z_mu is not None: + mu = z_mu + sigma = 0.01 * np.eye(z_dim) + z = np.random.normal(mu, sigma, (batch_size, z_dim)) + z = paddle.to_tensor(data=z).astype(paddle.get_default_dtype()) + adj, x = model.reverse(z, true_adj=true_adj) + return adj, x + + +class eval_func: + def __init__( + self, + metrics_mode, + batch_size, + atomic_num_list, + *args, + ): + super().__init__() + self.metrics_mode = metrics_mode + self.batch_size = batch_size + self.atomic_num_list = atomic_num_list + + def __call__( + self, + output_dict, + label_dict, + ): + self.metrics_mode.eval() + adj, x = infer(self.metrics_mode, self.batch_size) + validity_info = check_validity(adj, x, self.atomic_num_list) + self.metrics_mode.train() + results = dict() + results["valid"] = validity_info["valid_ratio"] + results["unique"] = validity_info["unique_ratio"] + results["abs_unique"] = validity_info["abs_unique_ratio"] + return results + + +def train(cfg: DictConfig): + # set training hyper-parameters + b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch + a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn + a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin + mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) + mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) + a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) + atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) + + model_params = Hyperparameters( + b_n_type=cfg.get(cfg.data_name).b_n_type, + b_n_flow=cfg.get(cfg.data_name).b_n_flow, + b_n_block=cfg.get(cfg.data_name).b_n_block, + b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, + b_hidden_ch=b_hidden_ch, + b_affine=True, + b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, + a_n_node=cfg.get(cfg.data_name).a_n_node, + a_n_type=a_n_type, + a_hidden_gnn=a_hidden_gnn, + a_hidden_lin=a_hidden_lin, + a_n_flow=cfg.get(cfg.data_name).a_n_flow, + a_n_block=cfg.get(cfg.data_name).a_n_block, + mask_row_size_list=mask_row_size_list, + mask_row_stride_list=mask_row_stride_list, + a_affine=True, + learn_dist=cfg.get(cfg.data_name).learn_dist, + seed=cfg.seed, + noise_scale=cfg.get(cfg.data_name).noise_scale, + ) + + logger.info("Model params:\n" + tabulate(model_params.print())) + + # set transforms + if cfg.data_name == "qm9": + transform_fn = moflow_transform.transform_fn + elif cfg.data_name == "zinc250k": + transform_fn = moflow_transform.transform_fn_zinc250k + + # set select eval data + valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) + valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) + + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "MOlFLOWDataset", + "file_path": cfg.FILE_PATH, + "data_name": cfg.data_name, + "mode": cfg.mode, + "valid_idx": valid_idx, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.get(cfg.data_name).label_keys, + "smiles_col": cfg.get(cfg.data_name).smiles_col, + "transform_fn": transform_fn, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": cfg.TRAIN.num_workers, + } + + # set model + model_cfg = dict(cfg.MODEL) + model_cfg.update({"hyper_params": model_params}) + model = ppsci.arch.MoFlowNet(**model_cfg) + + # set constraint + output_keys = cfg.MODEL.output_keys + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.FunctionalLoss(model.log_prob_loss), + {key: (lambda out, k=key: out[k]) for key in output_keys}, + name="Sup_constraint", + ) + + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # init optimizer and lr scheduler + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "MOlFLOWDataset", + "file_path": cfg.FILE_PATH, + "data_name": cfg.data_name, + "mode": "eval", + "valid_idx": valid_idx, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.get(cfg.data_name).label_keys, + "smiles_col": cfg.get(cfg.data_name).smiles_col, + "transform_fn": transform_fn, + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.FunctionalLoss(model.log_prob_loss), + {key: (lambda out, k=key: out[k]) for key in output_keys}, + metric={ + "Valid": ppsci.metric.FunctionalMetric( + eval_func(model, cfg.EVAL.batch_size, atomic_num_list) + ) + }, + name="Sup_Validator", + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + seed=cfg.seed, + validator=validator, + save_freq=cfg.TRAIN.save_freq, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # train model + solver.train() + + # validation for training + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="moflow_train.yaml") +def main(cfg: DictConfig): + train(cfg) + + +if __name__ == "__main__": + main() diff --git a/examples/moflow/moflow_transform.py b/examples/moflow/moflow_transform.py index efd4904edd..dba9969a49 100644 --- a/examples/moflow/moflow_transform.py +++ b/examples/moflow/moflow_transform.py @@ -1,74 +1,74 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright 2020 Chengxi Zang - -import json - -import numpy as np - -from ppsci.utils import logger - -zinc250_atomic_num_list = [6, 7, 8, 9, 15, 16, 17, 35, 53, 0] - - -def one_hot(data, out_size=9, num_max_id=5): - assert tuple(data.shape)[0] == out_size - b = np.zeros((out_size, num_max_id)) - indices = np.where(data >= 6, data - 6, num_max_id - 1) - b[np.arange(out_size), indices] = 1 - return b - - -def transform_fn(data): - """ - :param data: ((9,), (4,9,9), (15,)) - :return: - """ - node, adj, label = data - node = one_hot(node).astype(np.float32) - adj = np.concatenate( - [adj[:3], 1 - np.sum(adj[:3], axis=0, keepdims=True)], axis=0 - ).astype(np.float32) - return node, adj, label - - -def one_hot_zinc250k(data, out_size=38): - num_max_id = len(zinc250_atomic_num_list) - assert tuple(data.shape)[0] == out_size - b = np.zeros((out_size, num_max_id), dtype=np.float32) - for i in range(out_size): - ind = zinc250_atomic_num_list.index(data[i]) - b[i, ind] = 1.0 - return b - - -def transform_fn_zinc250k(data): - node, adj, label = data - node = one_hot_zinc250k(node).astype(np.float32) - adj = np.concatenate( - [adj[:3], 1 - np.sum(adj[:3], axis=0, keepdims=True)], axis=0 - ).astype(np.float32) - return node, adj, label - - -def get_val_ids(file_path, data_name): - logger.message("loading train/valid split information from: {}".format(file_path)) - with open(file_path) as json_data: - data = json.load(json_data) - if data_name == "qm9": - val_ids = [(int(idx) - 1) for idx in data["valid_idxs"]] - elif data_name == "zinc250k": - val_ids = [(idx - 1) for idx in data] - return val_ids +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2020 Chengxi Zang + +import json + +import numpy as np + +from ppsci.utils import logger + +zinc250_atomic_num_list = [6, 7, 8, 9, 15, 16, 17, 35, 53, 0] + + +def one_hot(data, out_size=9, num_max_id=5): + assert tuple(data.shape)[0] == out_size + b = np.zeros((out_size, num_max_id)) + indices = np.where(data >= 6, data - 6, num_max_id - 1) + b[np.arange(out_size), indices] = 1 + return b + + +def transform_fn(data): + """ + :param data: ((9,), (4,9,9), (15,)) + :return: + """ + node, adj, label = data + node = one_hot(node).astype(np.float32) + adj = np.concatenate( + [adj[:3], 1 - np.sum(adj[:3], axis=0, keepdims=True)], axis=0 + ).astype(np.float32) + return node, adj, label + + +def one_hot_zinc250k(data, out_size=38): + num_max_id = len(zinc250_atomic_num_list) + assert tuple(data.shape)[0] == out_size + b = np.zeros((out_size, num_max_id), dtype=np.float32) + for i in range(out_size): + ind = zinc250_atomic_num_list.index(data[i]) + b[i, ind] = 1.0 + return b + + +def transform_fn_zinc250k(data): + node, adj, label = data + node = one_hot_zinc250k(node).astype(np.float32) + adj = np.concatenate( + [adj[:3], 1 - np.sum(adj[:3], axis=0, keepdims=True)], axis=0 + ).astype(np.float32) + return node, adj, label + + +def get_val_ids(file_path, data_name): + logger.message("loading train/valid split information from: {}".format(file_path)) + with open(file_path) as json_data: + data = json.load(json_data) + if data_name == "qm9": + val_ids = [(int(idx) - 1) for idx in data["valid_idxs"]] + elif data_name == "zinc250k": + val_ids = [(idx - 1) for idx in data] + return val_ids diff --git a/examples/moflow/moflow_utils.py b/examples/moflow/moflow_utils.py index 5be4001b3a..a273b34961 100644 --- a/examples/moflow/moflow_utils.py +++ b/examples/moflow/moflow_utils.py @@ -1,556 +1,556 @@ -import json -import math -import os -import pickle as cPickle -import re - -import networkx as nx -import numpy as np -import paddle -from rdkit import Chem -from rdkit.Chem import Draw -from rdkit.Chem import rdMolDescriptors -from rdkit.six import iteritems - -from ppsci.utils import download -from ppsci.utils import logger - -atom_decoder_m = {0: 6, 1: 7, 2: 8, 3: 9} -bond_decoder_m = { - 1: Chem.rdchem.BondType.SINGLE, - 2: Chem.rdchem.BondType.DOUBLE, - 3: Chem.rdchem.BondType.TRIPLE, -} -ATOM_VALENCY = { - 6: 4, - 7: 3, - 8: 2, - 9: 1, - 15: 3, - 16: 2, - 17: 1, - 35: 1, - 53: 1, -} - -_fscores = None - - -class Hyperparameters: - def __init__( - self, - b_n_type=4, - b_n_flow=-1, - b_n_block=-1, - b_n_squeeze=-1, - b_hidden_ch=None, - b_affine=True, - b_conv_lu=2, - a_n_node=-1, - a_n_type=-1, - a_hidden_gnn=None, - a_hidden_lin=None, - a_n_flow=-1, - a_n_block=1, - mask_row_size_list=None, - mask_row_stride_list=None, - a_affine=True, - path=None, - learn_dist=True, - seed=1, - noise_scale=0.6, - ): - """Model Hyperparameters - Args: - b_n_type (int, optional): Number of bond types/channels. - b_n_flow (int, optional): Number of masked glow coupling layers per block for bond tensor. - b_n_block (int, optional): Number of glow blocks for bond tensor. - b_n_squeeze (int, optional): Squeeze divisor, 3 for qm9, 2 for zinc250k. - b_hidden_ch (list[int,...], optional): Hidden channel list for bonds tensor, delimited list input. - b_affine (bool, optional): Using affine coupling layers for bonds glow. - b_conv_lu (int, optional): Using L-U decomposition trick for 1-1 conv in bonds glow. - a_n_node (int, optional): _Maximum number of atoms in a molecule. - a_n_type (int, optional): _Number of atom types. - a_hidden_gnn (object, optional): Hidden dimension list for graph convolution for atoms matrix, delimited list input. - a_hidden_lin (object, optional): Hidden dimension list for linear transformation for atoms, delimited list input. - a_n_flow (int, optional): _dNumber of masked flow coupling layers per block for atom matrix. - a_n_block (int, optional): Number of flow blocks for atom matrix. - mask_row_size_list (list[int,...], optional): Mask row list for atom matrix, delimited list input. - mask_row_stride_list (list[int,...], optional): _Mask row stride list for atom matrix, delimited list input. - a_affine (bool, optional): Using affine coupling layers for atom conditional graph flow. - path (str, optional): Hyperparameters save path. - learn_dist (bool, optional): learn the distribution of feature matrix. - seed (int, optional): Random seed to use. - noise_scale (float, optional): x + torch.rand(x.shape) * noise_scale. - - """ - self.b_n_type = b_n_type - self.b_n_flow = b_n_flow - self.b_n_block = b_n_block - self.b_n_squeeze = b_n_squeeze - self.b_hidden_ch = b_hidden_ch - self.b_affine = b_affine - self.b_conv_lu = b_conv_lu - self.a_n_node = a_n_node - self.a_n_type = a_n_type - self.a_hidden_gnn = a_hidden_gnn - self.a_hidden_lin = a_hidden_lin - self.a_n_flow = a_n_flow - self.a_n_block = a_n_block - self.mask_row_size_list = mask_row_size_list - self.mask_row_stride_list = mask_row_stride_list - self.a_affine = a_affine - self.path = path - self.learn_dist = learn_dist - self.seed = seed - self.noise_scale = noise_scale - if path is not None: - if os.path.exists(path) and os.path.isfile(path): - with open(path, "r") as f: - obj = json.load(f) - for key, value in obj.items(): - setattr(self, key, value) - else: - raise Exception("{} does not exist".format(path)) - - def save(self, path): - self.path = path - with open(path, "w") as f: - json.dump(self.__dict__, f, indent=4, sort_keys=True, cls=NumpyEncoder) - - def print(self): - rows = [] - for key, value in self.__dict__.items(): - rows.append([key, value]) - return rows - - -class NumpyEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, np.ndarray): - return obj.tolist() - elif isinstance(obj, paddle.Tensor): - return obj.numpy().tolist() - return json.JSONEncoder.default(self, obj) - - -def flatten_graph_data(adj, x): - return paddle.concat( - x=(adj.reshape([tuple(adj.shape)[0], -1]), x.reshape([tuple(x.shape)[0], -1])), - axis=1, - ) - - -def split_channel(x): - n = tuple(x.shape)[1] // 2 - return x[:, :n], x[:, n:] - - -def get_graph_data(x, num_nodes, num_relations, num_features): - """Converts a vector of shape [b, num_nodes, m] to Adjacency matrix - of shape [b, num_relations, num_nodes, num_nodes] - and a feature matrix of shape [b, num_nodes, num_features]. - - Args: - x (paddle.Tensor): Adjacency. - num_nodes (int): nodes number. - num_relations (int): relations number. - num_features (int): features number. - - Returns: - Tuple[paddle.Tensor, ...]: Adjacency and A feature matrix. - """ - adj = x[:, : num_nodes * num_nodes * num_relations].reshape( - [-1, num_relations, num_nodes, num_nodes] - ) - feat_mat = x[:, num_nodes * num_nodes * num_relations :].reshape( - [-1, num_nodes, num_features] - ) - return adj, feat_mat - - -def Tensor2Mol(A, x): - mol = Chem.RWMol() - atoms = np.argmax(x, 1) - atoms_exist = atoms != 4 - atoms = atoms[atoms_exist] - atoms += 6 - adj = np.argmax(A, 0) - adj = np.array(adj) - adj = adj[atoms_exist, :][:, atoms_exist] - adj[adj == 3] = -1 - adj += 1 - for atom in atoms: - mol.AddAtom(Chem.Atom(int(atom))) - for start, end in zip(*np.nonzero(adj)): - if start > end: - mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) - return mol - - -def construct_mol(x, A, atomic_num_list): - """ - - Args: - x (paddle.Tensor): nodes. - A (paddle.Tensor): Adjacency. - atomic_num_list (list): atomic list number. - Returns: - rdkit mol object - - """ - mol = Chem.RWMol() - atoms = np.argmax(x, axis=1) - atoms_exist = atoms != len(atomic_num_list) - 1 - atoms = atoms[atoms_exist] - for atom in atoms: - mol.AddAtom(Chem.Atom(int(atomic_num_list[atom]))) - adj = np.argmax(A, axis=0) - adj = np.array(adj) - adj = adj[atoms_exist, :][:, atoms_exist] - adj[adj == 3] = -1 - adj += 1 - for start, end in zip(*np.nonzero(adj)): - if start > end: - mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) - flag, atomid_valence = check_valency(mol) - if flag: - continue - else: - assert len(atomid_valence) == 2 - idx = atomid_valence[0] - v = atomid_valence[1] - an = mol.GetAtomWithIdx(idx).GetAtomicNum() - if an in (7, 8, 16) and v - ATOM_VALENCY[an] == 1: - mol.GetAtomWithIdx(idx).SetFormalCharge(1) - return mol - - -def construct_mol_with_validation(x, A, atomic_num_list): - """ - Args: - x (paddle.Tensor): nodes. - A (paddle.Tensor): Adjacency. - atomic_num_list (list): atomic list number. - - Returns: - rdkit mol object - - """ - mol = Chem.RWMol() - atoms = np.argmax(x, axis=1) - atoms_exist = atoms != len(atomic_num_list) - 1 - atoms = atoms[atoms_exist] - for atom in atoms: - mol.AddAtom(Chem.Atom(int(atomic_num_list[atom]))) - adj = np.argmax(A, axis=0) - adj = np.array(adj) - adj = adj[atoms_exist, :][:, atoms_exist] - adj[adj == 3] = -1 - adj += 1 - for start, end in zip(*np.nonzero(adj)): - if start > end: - mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) - t = adj[start, end] - while not valid_mol_can_with_seg(mol): - mol.RemoveBond(int(start), int(end)) - t = t - 1 - if t >= 1: - mol.AddBond(int(start), int(end), bond_decoder_m[t]) - return mol - - -def valid_mol(x): - s = ( - Chem.MolFromSmiles(Chem.MolToSmiles(x, isomericSmiles=True)) - if x is not None - else None - ) - if s is not None and "." not in Chem.MolToSmiles(s, isomericSmiles=True): - return s - return None - - -def valid_mol_can_with_seg(x, largest_connected_comp=True): - if x is None: - return None - sm = Chem.MolToSmiles(x, isomericSmiles=True) - mol = Chem.MolFromSmiles(sm) - if largest_connected_comp and "." in sm: - vsm = [(s, len(s)) for s in sm.split(".")] - vsm.sort(key=lambda tup: tup[1], reverse=True) - mol = Chem.MolFromSmiles(vsm[0][0]) - return mol - - -def check_valency(mol): - """ - Checks that no atoms in the mol have exceeded their possible - valency. - Args: - mol (object): rdkit mol object. - Returns: - True if no valency issues, False otherwise. - """ - try: - Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES) - return True, None - except ValueError as e: - e = str(e) - p = e.find("#") - e_sub = e[p:] - atomid_valence = list(map(int, re.findall("\\d+", e_sub))) - return False, atomid_valence - - -def correct_mol(x): - # xsm = Chem.MolToSmiles(x, isomericSmiles=True) - mol = x - while True: - flag, atomid_valence = check_valency(mol) - if flag: - break - else: - assert len(atomid_valence) == 2 - idx = atomid_valence[0] - # v = atomid_valence[1] - queue = [] - for b in mol.GetAtomWithIdx(idx).GetBonds(): - queue.append( - ( - b.GetIdx(), - int(b.GetBondType()), - b.GetBeginAtomIdx(), - b.GetEndAtomIdx(), - ) - ) - queue.sort(key=lambda tup: tup[1], reverse=True) - if len(queue) > 0: - start = queue[0][2] - end = queue[0][3] - t = queue[0][1] - 1 - mol.RemoveBond(start, end) - if t >= 1: - mol.AddBond(start, end, bond_decoder_m[t]) - return mol - - -def check_tensor(x): - return valid_mol(Tensor2Mol(*x)) - - -def adj_to_smiles(adj, x, atomic_num_list): - valid = [ - Chem.MolToSmiles( - construct_mol(x_elem, adj_elem, atomic_num_list), isomericSmiles=True - ) - for x_elem, adj_elem in zip(x, adj) - ] - return valid - - -def check_validity( - adj, - x, - atomic_num_list, - return_unique=True, - correct_validity=True, - largest_connected_comp=True, - debug=False, -): - """ - - Args: - adj (paddle.Tensor): Adjacency. - x (paddle.Tensor): nodes. - atomic_num_list (list): atomic list number. - return_unique (bool): if return unique - correct_validity (bool): if apply validity correction after the generation. - largest_connected_comp (bool): largest connected compare. - debug (bool): To run with more information. - - """ - adj = _to_numpy_array(adj) - x = _to_numpy_array(x) - if correct_validity: - valid = [] - for x_elem, adj_elem in zip(x, adj): - mol = construct_mol(x_elem, adj_elem, atomic_num_list) - cmol = correct_mol(mol) - vcmol = valid_mol_can_with_seg( - cmol, largest_connected_comp=largest_connected_comp - ) - valid.append(vcmol) - else: - valid = [ - valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) - for x_elem, adj_elem in zip(x, adj) - ] - valid = [mol for mol in valid if mol is not None] - if debug: - logger.info("valid molecules: {}/{}".format(len(valid), tuple(adj.shape)[0])) - for i, mol in enumerate(valid): - logger.info( - "[{}] {}".format(i, Chem.MolToSmiles(mol, isomericSmiles=False)) - ) - n_mols = tuple(x.shape)[0] - valid_ratio = len(valid) / n_mols - valid_smiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in valid] - unique_smiles = list(set(valid_smiles)) - unique_ratio = 0.0 - if len(valid) > 0: - unique_ratio = len(unique_smiles) / len(valid) - if return_unique: - valid_smiles = unique_smiles - valid_mols = [Chem.MolFromSmiles(s) for s in valid_smiles] - abs_unique_ratio = len(unique_smiles) / n_mols - if debug: - logger.info( - "valid: {:.3f}%, unique: {:.3f}%, abs unique: {:.3f}%".format( - valid_ratio * 100, unique_ratio * 100, abs_unique_ratio * 100 - ) - ) - results = dict() - results["valid_mols"] = valid_mols - results["valid_smiles"] = valid_smiles - results["valid_ratio"] = valid_ratio * 100 - results["unique_ratio"] = unique_ratio * 100 - results["abs_unique_ratio"] = abs_unique_ratio * 100 - return results - - -def check_novelty(gen_smiles, train_smiles, n_generated_mols): - if len(gen_smiles) == 0: - novel_ratio = 0.0 - else: - duplicates = [(1) for mol in gen_smiles if mol in train_smiles] - novel = len(gen_smiles) - sum(duplicates) - novel_ratio = novel * 100.0 / len(gen_smiles) - abs_novel_ratio = novel * 100.0 / n_generated_mols - print("novelty: {:.3f}%, abs novelty: {:.3f}%".format(novel_ratio, abs_novel_ratio)) - return novel_ratio, abs_novel_ratio - - -def _to_numpy_array(a): - if isinstance(a, paddle.Tensor): - a = a.cpu().detach().numpy() - elif isinstance(a, np.ndarray): - pass - else: - raise TypeError("a ({}) is not a paddle.Tensor".format(type(a))) - return a - - -def save_mol_png(mol, filepath, size=(600, 600)): - Draw.MolToFile(mol, filepath, size=size) - - -def readFragmentScores(name="fpscores"): - import gzip - - global _fscores - if name == "fpscores": - name = os.path.join(os.path.dirname(__file__), name) - if not os.path.exists(name): - download._download( - "https://paddle-org.bj.bcebos.com/paddlescience/models/MoFlow/fpscores.pkl.gz", - "./", - ) - _fscores = cPickle.load(gzip.open("%s.pkl.gz" % name)) - outDict = {} - for i in _fscores: - for j in range(1, len(i)): - outDict[i[j]] = float(i[0]) - _fscores = outDict - - -def numBridgeheadsAndSpiro(mol, ri=None): - nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol) - nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol) - return nBridgehead, nSpiro - - -def calculateScore(m): - if _fscores is None: - readFragmentScores() - fp = rdMolDescriptors.GetMorganFingerprint(m, 2) - fps = fp.GetNonzeroElements() - score1 = 0.0 - nf = 0 - for bitId, v in iteritems(fps): - nf += v - sfp = bitId - score1 += _fscores.get(sfp, -4) * v - score1 /= nf - nAtoms = m.GetNumAtoms() - nChiralCenters = len(Chem.FindMolChiralCenters(m, includeUnassigned=True)) - ri = m.GetRingInfo() - nBridgeheads, nSpiro = numBridgeheadsAndSpiro(m, ri) - nMacrocycles = 0 - for x in ri.AtomRings(): - if len(x) > 8: - nMacrocycles += 1 - sizePenalty = nAtoms**1.005 - nAtoms - stereoPenalty = math.log10(nChiralCenters + 1) - spiroPenalty = math.log10(nSpiro + 1) - bridgePenalty = math.log10(nBridgeheads + 1) - macrocyclePenalty = 0.0 - if nMacrocycles > 0: - macrocyclePenalty = math.log10(2) - score2 = ( - 0.0 - - sizePenalty - - stereoPenalty - - spiroPenalty - - bridgePenalty - - macrocyclePenalty - ) - score3 = 0.0 - if nAtoms > len(fps): - score3 = math.log(float(nAtoms) / len(fps)) * 0.5 - sascore = score1 + score2 + score3 - min = -4.0 - max = 2.5 - sascore = 11.0 - (sascore - min + 1) / (max - min) * 9.0 - if sascore > 8.0: - sascore = 8.0 + math.log(sascore + 1.0 - 9.0) - if sascore > 10.0: - sascore = 10.0 - elif sascore < 1.0: - sascore = 1.0 - return sascore - - -def penalized_logp(mol): - """Reward that consists of log p penalized by SA and # long cycles, - as described in (Kusner et al. 2017). Scores are normalized based on the - statistics of 250k_rndm_zinc_drugs_clean.smi dataset. - - Args: - mol (object): rdkit mol object. - - Returns: - float: Scores are normalized based on the statistics. - """ - logP_mean = 2.4570953396190123 - logP_std = 1.434324401111988 - SA_mean = -3.0525811293166134 - SA_std = 0.8335207024513095 - cycle_mean = -0.0485696876403053 - cycle_std = 0.2860212110245455 - log_p = Chem.Descriptors.MolLogP(mol) - SA = -calculateScore(mol) - cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol))) - if len(cycle_list) == 0: - cycle_length = 0 - else: - cycle_length = max([len(j) for j in cycle_list]) - if cycle_length <= 6: - cycle_length = 0 - else: - cycle_length = cycle_length - 6 - cycle_score = -cycle_length - normalized_log_p = (log_p - logP_mean) / logP_std - normalized_SA = (SA - SA_mean) / SA_std - normalized_cycle = (cycle_score - cycle_mean) / cycle_std - return normalized_log_p + normalized_SA + normalized_cycle +import json +import math +import os +import pickle as cPickle +import re + +import networkx as nx +import numpy as np +import paddle +from rdkit import Chem +from rdkit.Chem import Draw +from rdkit.Chem import rdMolDescriptors +from rdkit.six import iteritems + +from ppsci.utils import download +from ppsci.utils import logger + +atom_decoder_m = {0: 6, 1: 7, 2: 8, 3: 9} +bond_decoder_m = { + 1: Chem.rdchem.BondType.SINGLE, + 2: Chem.rdchem.BondType.DOUBLE, + 3: Chem.rdchem.BondType.TRIPLE, +} +ATOM_VALENCY = { + 6: 4, + 7: 3, + 8: 2, + 9: 1, + 15: 3, + 16: 2, + 17: 1, + 35: 1, + 53: 1, +} + +_fscores = None + + +class Hyperparameters: + def __init__( + self, + b_n_type=4, + b_n_flow=-1, + b_n_block=-1, + b_n_squeeze=-1, + b_hidden_ch=None, + b_affine=True, + b_conv_lu=2, + a_n_node=-1, + a_n_type=-1, + a_hidden_gnn=None, + a_hidden_lin=None, + a_n_flow=-1, + a_n_block=1, + mask_row_size_list=None, + mask_row_stride_list=None, + a_affine=True, + path=None, + learn_dist=True, + seed=1, + noise_scale=0.6, + ): + """Model Hyperparameters + Args: + b_n_type (int, optional): Number of bond types/channels. + b_n_flow (int, optional): Number of masked glow coupling layers per block for bond tensor. + b_n_block (int, optional): Number of glow blocks for bond tensor. + b_n_squeeze (int, optional): Squeeze divisor, 3 for qm9, 2 for zinc250k. + b_hidden_ch (list[int,...], optional): Hidden channel list for bonds tensor, delimited list input. + b_affine (bool, optional): Using affine coupling layers for bonds glow. + b_conv_lu (int, optional): Using L-U decomposition trick for 1-1 conv in bonds glow. + a_n_node (int, optional): _Maximum number of atoms in a molecule. + a_n_type (int, optional): _Number of atom types. + a_hidden_gnn (object, optional): Hidden dimension list for graph convolution for atoms matrix, delimited list input. + a_hidden_lin (object, optional): Hidden dimension list for linear transformation for atoms, delimited list input. + a_n_flow (int, optional): _dNumber of masked flow coupling layers per block for atom matrix. + a_n_block (int, optional): Number of flow blocks for atom matrix. + mask_row_size_list (list[int,...], optional): Mask row list for atom matrix, delimited list input. + mask_row_stride_list (list[int,...], optional): _Mask row stride list for atom matrix, delimited list input. + a_affine (bool, optional): Using affine coupling layers for atom conditional graph flow. + path (str, optional): Hyperparameters save path. + learn_dist (bool, optional): learn the distribution of feature matrix. + seed (int, optional): Random seed to use. + noise_scale (float, optional): x + torch.rand(x.shape) * noise_scale. + + """ + self.b_n_type = b_n_type + self.b_n_flow = b_n_flow + self.b_n_block = b_n_block + self.b_n_squeeze = b_n_squeeze + self.b_hidden_ch = b_hidden_ch + self.b_affine = b_affine + self.b_conv_lu = b_conv_lu + self.a_n_node = a_n_node + self.a_n_type = a_n_type + self.a_hidden_gnn = a_hidden_gnn + self.a_hidden_lin = a_hidden_lin + self.a_n_flow = a_n_flow + self.a_n_block = a_n_block + self.mask_row_size_list = mask_row_size_list + self.mask_row_stride_list = mask_row_stride_list + self.a_affine = a_affine + self.path = path + self.learn_dist = learn_dist + self.seed = seed + self.noise_scale = noise_scale + if path is not None: + if os.path.exists(path) and os.path.isfile(path): + with open(path, "r") as f: + obj = json.load(f) + for key, value in obj.items(): + setattr(self, key, value) + else: + raise Exception("{} does not exist".format(path)) + + def save(self, path): + self.path = path + with open(path, "w") as f: + json.dump(self.__dict__, f, indent=4, sort_keys=True, cls=NumpyEncoder) + + def print(self): + rows = [] + for key, value in self.__dict__.items(): + rows.append([key, value]) + return rows + + +class NumpyEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.ndarray): + return obj.tolist() + elif isinstance(obj, paddle.Tensor): + return obj.numpy().tolist() + return json.JSONEncoder.default(self, obj) + + +def flatten_graph_data(adj, x): + return paddle.concat( + x=(adj.reshape([tuple(adj.shape)[0], -1]), x.reshape([tuple(x.shape)[0], -1])), + axis=1, + ) + + +def split_channel(x): + n = tuple(x.shape)[1] // 2 + return x[:, :n], x[:, n:] + + +def get_graph_data(x, num_nodes, num_relations, num_features): + """Converts a vector of shape [b, num_nodes, m] to Adjacency matrix + of shape [b, num_relations, num_nodes, num_nodes] + and a feature matrix of shape [b, num_nodes, num_features]. + + Args: + x (paddle.Tensor): Adjacency. + num_nodes (int): nodes number. + num_relations (int): relations number. + num_features (int): features number. + + Returns: + Tuple[paddle.Tensor, ...]: Adjacency and A feature matrix. + """ + adj = x[:, : num_nodes * num_nodes * num_relations].reshape( + [-1, num_relations, num_nodes, num_nodes] + ) + feat_mat = x[:, num_nodes * num_nodes * num_relations :].reshape( + [-1, num_nodes, num_features] + ) + return adj, feat_mat + + +def Tensor2Mol(A, x): + mol = Chem.RWMol() + atoms = np.argmax(x, 1) + atoms_exist = atoms != 4 + atoms = atoms[atoms_exist] + atoms += 6 + adj = np.argmax(A, 0) + adj = np.array(adj) + adj = adj[atoms_exist, :][:, atoms_exist] + adj[adj == 3] = -1 + adj += 1 + for atom in atoms: + mol.AddAtom(Chem.Atom(int(atom))) + for start, end in zip(*np.nonzero(adj)): + if start > end: + mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) + return mol + + +def construct_mol(x, A, atomic_num_list): + """ + + Args: + x (paddle.Tensor): nodes. + A (paddle.Tensor): Adjacency. + atomic_num_list (list): atomic list number. + Returns: + rdkit mol object + + """ + mol = Chem.RWMol() + atoms = np.argmax(x, axis=1) + atoms_exist = atoms != len(atomic_num_list) - 1 + atoms = atoms[atoms_exist] + for atom in atoms: + mol.AddAtom(Chem.Atom(int(atomic_num_list[atom]))) + adj = np.argmax(A, axis=0) + adj = np.array(adj) + adj = adj[atoms_exist, :][:, atoms_exist] + adj[adj == 3] = -1 + adj += 1 + for start, end in zip(*np.nonzero(adj)): + if start > end: + mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) + flag, atomid_valence = check_valency(mol) + if flag: + continue + else: + assert len(atomid_valence) == 2 + idx = atomid_valence[0] + v = atomid_valence[1] + an = mol.GetAtomWithIdx(idx).GetAtomicNum() + if an in (7, 8, 16) and v - ATOM_VALENCY[an] == 1: + mol.GetAtomWithIdx(idx).SetFormalCharge(1) + return mol + + +def construct_mol_with_validation(x, A, atomic_num_list): + """ + Args: + x (paddle.Tensor): nodes. + A (paddle.Tensor): Adjacency. + atomic_num_list (list): atomic list number. + + Returns: + rdkit mol object + + """ + mol = Chem.RWMol() + atoms = np.argmax(x, axis=1) + atoms_exist = atoms != len(atomic_num_list) - 1 + atoms = atoms[atoms_exist] + for atom in atoms: + mol.AddAtom(Chem.Atom(int(atomic_num_list[atom]))) + adj = np.argmax(A, axis=0) + adj = np.array(adj) + adj = adj[atoms_exist, :][:, atoms_exist] + adj[adj == 3] = -1 + adj += 1 + for start, end in zip(*np.nonzero(adj)): + if start > end: + mol.AddBond(int(start), int(end), bond_decoder_m[adj[start, end]]) + t = adj[start, end] + while not valid_mol_can_with_seg(mol): + mol.RemoveBond(int(start), int(end)) + t = t - 1 + if t >= 1: + mol.AddBond(int(start), int(end), bond_decoder_m[t]) + return mol + + +def valid_mol(x): + s = ( + Chem.MolFromSmiles(Chem.MolToSmiles(x, isomericSmiles=True)) + if x is not None + else None + ) + if s is not None and "." not in Chem.MolToSmiles(s, isomericSmiles=True): + return s + return None + + +def valid_mol_can_with_seg(x, largest_connected_comp=True): + if x is None: + return None + sm = Chem.MolToSmiles(x, isomericSmiles=True) + mol = Chem.MolFromSmiles(sm) + if largest_connected_comp and "." in sm: + vsm = [(s, len(s)) for s in sm.split(".")] + vsm.sort(key=lambda tup: tup[1], reverse=True) + mol = Chem.MolFromSmiles(vsm[0][0]) + return mol + + +def check_valency(mol): + """ + Checks that no atoms in the mol have exceeded their possible + valency. + Args: + mol (object): rdkit mol object. + Returns: + True if no valency issues, False otherwise. + """ + try: + Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES) + return True, None + except ValueError as e: + e = str(e) + p = e.find("#") + e_sub = e[p:] + atomid_valence = list(map(int, re.findall("\\d+", e_sub))) + return False, atomid_valence + + +def correct_mol(x): + # xsm = Chem.MolToSmiles(x, isomericSmiles=True) + mol = x + while True: + flag, atomid_valence = check_valency(mol) + if flag: + break + else: + assert len(atomid_valence) == 2 + idx = atomid_valence[0] + # v = atomid_valence[1] + queue = [] + for b in mol.GetAtomWithIdx(idx).GetBonds(): + queue.append( + ( + b.GetIdx(), + int(b.GetBondType()), + b.GetBeginAtomIdx(), + b.GetEndAtomIdx(), + ) + ) + queue.sort(key=lambda tup: tup[1], reverse=True) + if len(queue) > 0: + start = queue[0][2] + end = queue[0][3] + t = queue[0][1] - 1 + mol.RemoveBond(start, end) + if t >= 1: + mol.AddBond(start, end, bond_decoder_m[t]) + return mol + + +def check_tensor(x): + return valid_mol(Tensor2Mol(*x)) + + +def adj_to_smiles(adj, x, atomic_num_list): + valid = [ + Chem.MolToSmiles( + construct_mol(x_elem, adj_elem, atomic_num_list), isomericSmiles=True + ) + for x_elem, adj_elem in zip(x, adj) + ] + return valid + + +def check_validity( + adj, + x, + atomic_num_list, + return_unique=True, + correct_validity=True, + largest_connected_comp=True, + debug=False, +): + """ + + Args: + adj (paddle.Tensor): Adjacency. + x (paddle.Tensor): nodes. + atomic_num_list (list): atomic list number. + return_unique (bool): if return unique + correct_validity (bool): if apply validity correction after the generation. + largest_connected_comp (bool): largest connected compare. + debug (bool): To run with more information. + + """ + adj = _to_numpy_array(adj) + x = _to_numpy_array(x) + if correct_validity: + valid = [] + for x_elem, adj_elem in zip(x, adj): + mol = construct_mol(x_elem, adj_elem, atomic_num_list) + cmol = correct_mol(mol) + vcmol = valid_mol_can_with_seg( + cmol, largest_connected_comp=largest_connected_comp + ) + valid.append(vcmol) + else: + valid = [ + valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) + for x_elem, adj_elem in zip(x, adj) + ] + valid = [mol for mol in valid if mol is not None] + if debug: + logger.info("valid molecules: {}/{}".format(len(valid), tuple(adj.shape)[0])) + for i, mol in enumerate(valid): + logger.info( + "[{}] {}".format(i, Chem.MolToSmiles(mol, isomericSmiles=False)) + ) + n_mols = tuple(x.shape)[0] + valid_ratio = len(valid) / n_mols + valid_smiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in valid] + unique_smiles = list(set(valid_smiles)) + unique_ratio = 0.0 + if len(valid) > 0: + unique_ratio = len(unique_smiles) / len(valid) + if return_unique: + valid_smiles = unique_smiles + valid_mols = [Chem.MolFromSmiles(s) for s in valid_smiles] + abs_unique_ratio = len(unique_smiles) / n_mols + if debug: + logger.info( + "valid: {:.3f}%, unique: {:.3f}%, abs unique: {:.3f}%".format( + valid_ratio * 100, unique_ratio * 100, abs_unique_ratio * 100 + ) + ) + results = dict() + results["valid_mols"] = valid_mols + results["valid_smiles"] = valid_smiles + results["valid_ratio"] = valid_ratio * 100 + results["unique_ratio"] = unique_ratio * 100 + results["abs_unique_ratio"] = abs_unique_ratio * 100 + return results + + +def check_novelty(gen_smiles, train_smiles, n_generated_mols): + if len(gen_smiles) == 0: + novel_ratio = 0.0 + else: + duplicates = [(1) for mol in gen_smiles if mol in train_smiles] + novel = len(gen_smiles) - sum(duplicates) + novel_ratio = novel * 100.0 / len(gen_smiles) + abs_novel_ratio = novel * 100.0 / n_generated_mols + print("novelty: {:.3f}%, abs novelty: {:.3f}%".format(novel_ratio, abs_novel_ratio)) + return novel_ratio, abs_novel_ratio + + +def _to_numpy_array(a): + if isinstance(a, paddle.Tensor): + a = a.cpu().detach().numpy() + elif isinstance(a, np.ndarray): + pass + else: + raise TypeError("a ({}) is not a paddle.Tensor".format(type(a))) + return a + + +def save_mol_png(mol, filepath, size=(600, 600)): + Draw.MolToFile(mol, filepath, size=size) + + +def readFragmentScores(name="fpscores"): + import gzip + + global _fscores + if name == "fpscores": + name = os.path.join(os.path.dirname(__file__), name) + if not os.path.exists(name): + download._download( + "https://paddle-org.bj.bcebos.com/paddlescience/models/MoFlow/fpscores.pkl.gz", + "./", + ) + _fscores = cPickle.load(gzip.open("%s.pkl.gz" % name)) + outDict = {} + for i in _fscores: + for j in range(1, len(i)): + outDict[i[j]] = float(i[0]) + _fscores = outDict + + +def numBridgeheadsAndSpiro(mol, ri=None): + nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol) + nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol) + return nBridgehead, nSpiro + + +def calculateScore(m): + if _fscores is None: + readFragmentScores() + fp = rdMolDescriptors.GetMorganFingerprint(m, 2) + fps = fp.GetNonzeroElements() + score1 = 0.0 + nf = 0 + for bitId, v in iteritems(fps): + nf += v + sfp = bitId + score1 += _fscores.get(sfp, -4) * v + score1 /= nf + nAtoms = m.GetNumAtoms() + nChiralCenters = len(Chem.FindMolChiralCenters(m, includeUnassigned=True)) + ri = m.GetRingInfo() + nBridgeheads, nSpiro = numBridgeheadsAndSpiro(m, ri) + nMacrocycles = 0 + for x in ri.AtomRings(): + if len(x) > 8: + nMacrocycles += 1 + sizePenalty = nAtoms**1.005 - nAtoms + stereoPenalty = math.log10(nChiralCenters + 1) + spiroPenalty = math.log10(nSpiro + 1) + bridgePenalty = math.log10(nBridgeheads + 1) + macrocyclePenalty = 0.0 + if nMacrocycles > 0: + macrocyclePenalty = math.log10(2) + score2 = ( + 0.0 + - sizePenalty + - stereoPenalty + - spiroPenalty + - bridgePenalty + - macrocyclePenalty + ) + score3 = 0.0 + if nAtoms > len(fps): + score3 = math.log(float(nAtoms) / len(fps)) * 0.5 + sascore = score1 + score2 + score3 + min = -4.0 + max = 2.5 + sascore = 11.0 - (sascore - min + 1) / (max - min) * 9.0 + if sascore > 8.0: + sascore = 8.0 + math.log(sascore + 1.0 - 9.0) + if sascore > 10.0: + sascore = 10.0 + elif sascore < 1.0: + sascore = 1.0 + return sascore + + +def penalized_logp(mol): + """Reward that consists of log p penalized by SA and # long cycles, + as described in (Kusner et al. 2017). Scores are normalized based on the + statistics of 250k_rndm_zinc_drugs_clean.smi dataset. + + Args: + mol (object): rdkit mol object. + + Returns: + float: Scores are normalized based on the statistics. + """ + logP_mean = 2.4570953396190123 + logP_std = 1.434324401111988 + SA_mean = -3.0525811293166134 + SA_std = 0.8335207024513095 + cycle_mean = -0.0485696876403053 + cycle_std = 0.2860212110245455 + log_p = Chem.Descriptors.MolLogP(mol) + SA = -calculateScore(mol) + cycle_list = nx.cycle_basis(nx.Graph(Chem.rdmolops.GetAdjacencyMatrix(mol))) + if len(cycle_list) == 0: + cycle_length = 0 + else: + cycle_length = max([len(j) for j in cycle_list]) + if cycle_length <= 6: + cycle_length = 0 + else: + cycle_length = cycle_length - 6 + cycle_score = -cycle_length + normalized_log_p = (log_p - logP_mean) / logP_std + normalized_SA = (SA - SA_mean) / SA_std + normalized_cycle = (cycle_score - cycle_mean) / cycle_std + return normalized_log_p + normalized_SA + normalized_cycle diff --git a/examples/moflow/optimize_moflow.py b/examples/moflow/optimize_moflow.py index 538f43cbdb..4d4fc26cb8 100644 --- a/examples/moflow/optimize_moflow.py +++ b/examples/moflow/optimize_moflow.py @@ -1,594 +1,594 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import moflow_transform -import numpy as np -import paddle -import pandas as pd -from moflow_utils import Hyperparameters -from moflow_utils import adj_to_smiles -from moflow_utils import check_validity -from moflow_utils import penalized_logp -from omegaconf import DictConfig -from rdkit import Chem -from rdkit import DataStructs -from rdkit.Chem import AllChem -from rdkit.Chem import Descriptors -from tabulate import tabulate - -import ppsci -from ppsci.data.dataset.moflow_dataset import MolGraph -from ppsci.utils import logger - - -def load_property_csv(filepath, normalize=True): - """Use qed and plogp in zinc250k_property.csv which are recalculated by rdkit - the recalculated qed results are in tiny inconsistent with qed in zinc250k.csv - e.g - zinc250k_property.csv: - qed,plogp,smile - 0.7319008436872337,3.1399057164163766,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 - 0.9411116113894995,0.17238635659148804,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1 - import rdkit - m = rdkit.Chem.MolFromSmiles('CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1') - rdkit.Chem.QED.qed(m): 0.7319008436872337 - from mflow.utils.environment import penalized_logp - penalized_logp(m): 3.1399057164163766 - However, in oringinal: - zinc250k.csv - ,smiles,logP,qed,SAS - 0,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1,5.0506,0.702012232801,2.0840945720726807 - 1,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1,3.1137,0.928975488089,3.4320038192747795 - - 0.7319008436872337 v.s. 0.702012232801 - and no plogp in zinc250k.csv dataset! - """ - df = pd.read_csv(filepath) - if normalize: - # m = df["plogp"].mean() - # std = df["plogp"].std() - # mn = df["plogp"].min() - mx = df["plogp"].max() - lower = -10 - df["plogp"] = df["plogp"].clip(lower=lower, upper=5) - df["plogp"] = (df["plogp"] - lower) / (mx - lower) - tuples = [tuple(x) for x in df.values] - logger.info("Load {} done, length: {}".format(filepath, len(tuples))) - return tuples - - -def smiles_to_adj(mol_smiles, data_name="qm9"): - """Use simles to adj, atoms - - Args: - mol_smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 - """ - if data_name == "qm9": - out_size = 9 - transform_fn = moflow_transform.transform_fn - elif data_name == "zinc250k": - out_size = 38 - transform_fn = moflow_transform.transform_fn_zinc250k - - preprocessor = MolGraph(out_size=out_size, kekulize=True) - canonical_smiles, mol = preprocessor.prepare_smiles_and_mol( - Chem.MolFromSmiles(mol_smiles) - ) - atoms, adj = preprocessor.get_input_features(mol) - atoms, adj, _ = transform_fn((atoms, adj, None)) - adj = np.expand_dims(adj, axis=0) - atoms = np.expand_dims(atoms, axis=0) - adj = paddle.to_tensor(data=adj) - atoms = paddle.to_tensor(data=atoms) - return adj, atoms - - -def optimize_mol( - model, - property_model, - smiles, - sim_cutoff, - lr=2.0, - num_iter=20, - data_name="qm9", - atomic_num_list=[6, 7, 8, 9, 0], - property_name="qed", - debug=True, - random=False, -): - """General for Optimize model. - - Args: - model: MoFlowNet pre-trained model - property_model: Optimize qed or plogp model - smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 - sim_cutoff: add similarity property - lr: learning rate - num_iter: learning total step - data_name: dataset name - atomic_num_list: atom list in smiles - property_name: Optimize qed or plogp model name - debug: To run optimization with more information - random: Random Generation from sampling or not - """ - if property_name == "qed": - propf = Descriptors.qed - elif property_name == "plogp": - propf = penalized_logp - else: - raise ValueError("Wrong property_name{}".format(property_name)) - model.eval() - property_model.eval() - with paddle.no_grad(): - bond, atoms = smiles_to_adj(smiles, data_name) - x = {"nodes": atoms, "edges": bond} - mol_vec, _ = property_model.encode(x) - if debug: - adj_rev, x_rev = property_model.reverse(mol_vec) - reverse_smiles = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) - logger.info(smiles, reverse_smiles) - output_dict = model(x) - z = output_dict["output"] - # sum_log_det_jacs = output_dict["sum_log_det"] - z0 = z[0].reshape([tuple(z[0].shape)[0], -1]) - z1 = z[1].reshape([tuple(z[1].shape)[0], -1]) - adj_rev, x_rev = model.reverse(paddle.concat(x=[z0, z1], axis=1)) - reverse_smiles2 = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) - train_smiles2 = adj_to_smiles(bond.cpu(), atoms.cpu(), atomic_num_list) - logger.info(train_smiles2, reverse_smiles2) - mol = Chem.MolFromSmiles(smiles) - fp1 = AllChem.GetMorganFingerprint(mol, 2) - start = smiles, propf(mol), None - out_0 = mol_vec.clone().detach() - out_0.stop_gradient = False - cur_vec = out_0 - out_1 = mol_vec.clone().detach() - out_1.stop_gradient = False - start_vec = out_1 - visited = [] - for step in range(num_iter): - prop_val = property_model.propNN(cur_vec).squeeze() - grad = paddle.grad(outputs=prop_val, inputs=cur_vec)[0] - if random: - rad = paddle.randn(shape=cur_vec.data.shape, dtype=cur_vec.data.dtype) - cur_vec = start_vec.data + lr * rad / paddle.sqrt(x=rad * rad) - else: - cur_vec = cur_vec.data + lr * grad.data / paddle.sqrt( - x=grad.data * grad.data - ) - out_2 = cur_vec.clone().detach() - out_2.stop_gradient = False - cur_vec = out_2 - visited.append(cur_vec) - hidden_z = paddle.concat(x=visited, axis=0) - adj, x = property_model.reverse(hidden_z) - val_res = check_validity(adj, x, atomic_num_list, debug=debug) - valid_mols = val_res["valid_mols"] - valid_smiles = val_res["valid_smiles"] - results = [] - sm_set = set() - sm_set.add(smiles) - for m, s in zip(valid_mols, valid_smiles): - if s in sm_set: - continue - sm_set.add(s) - p = propf(m) - fp2 = AllChem.GetMorganFingerprint(m, 2) - sim = DataStructs.TanimotoSimilarity(fp1, fp2) - if sim >= sim_cutoff: - results.append((s, p, sim, smiles)) - results.sort(key=lambda tup: tup[1], reverse=True) - return results, start - - -def fit_model( - model, - data, - data_prop, - N, - property_name="qed", - max_epochs=10, - learning_rate=0.001, - weight_decay=1e-05, -): - """Train for Optimize model. - - Args: - model: MoFlowNet pre-trained model - data: dataloader - data_prop: true smiles list - N: dataset number - property_name: Optimize qed or plogp model name - smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 - max_epochs: train epochs - learning_rate: train learning rate - weight_decay: train weight_decay - """ - model.train() - metrics = paddle.nn.MSELoss() - optimizer = paddle.optimizer.Adam( - parameters=model.parameters(), - learning_rate=learning_rate, - weight_decay=weight_decay, - ) - assert len(data_prop) == N - iter_per_epoch = len(data) - log_step = 20 - if property_name == "qed": - col = 0 - elif property_name == "plogp": - col = 1 - else: - raise ValueError("Wrong property_name{}".format(property_name)) - for epoch in range(max_epochs): - for i, batch in enumerate(data): - x = batch[0]["nodes"] - bs = tuple(x.shape)[0] - ps = i * bs - pe = min((i + 1) * bs, N) - true_y = [[tt[col]] for tt in data_prop[ps:pe]] - true_y = ( - paddle.to_tensor(data=true_y) - .astype(dtype="float32") - .cuda(blocking=True) - ) - optimizer.clear_grad() - output_dict = model(batch[0]) - y = output_dict["output"][1] - loss = metrics(y, true_y) - loss.backward() - optimizer.step() - - if (i + 1) % log_step == 0: - logger.info( - "Epoch [{}/{}], Iter [{}/{}], loss: {:.5f},".format( - epoch + 1, max_epochs, i + 1, iter_per_epoch, loss.item() - ) - ) - return model - - -def find_top_score_smiles( - model, - property_model, - data_name, - property_name, - train_prop, - topk, - atomic_num_list, - debug, - file_path, -): - """ - Args: - model: MoFlowNet pre-trained model - property_model: Optimize qed or plogp model - data_name: dataset name - property_name: Optimize qed or plogp model name - train_prop: true smiles list - topk: Top k smiles as seeds - atomic_num_list: atom list in smiles - debug: To run optimization with more information - file_path: result save path - """ - if property_name == "qed": - col = 0 - elif property_name == "plogp": - col = 1 - logger.info("Finding top {} score".format(property_name)) - train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col], reverse=True) - result_list = [] - for i, r in enumerate(train_prop_sorted): - if i >= topk: - break - if i % 50 == 0: - logger.info("Optimization {}/{}".format(i, topk)) - qed, plogp, smile = r - results, ori = optimize_mol( - model, - property_model, - smile, - sim_cutoff=0, - lr=0.005, - num_iter=100, - data_name=data_name, - atomic_num_list=atomic_num_list, - property_name=property_name, - random=False, - debug=debug, - ) - result_list.extend(results) - result_list.sort(key=lambda tup: tup[1], reverse=True) - train_smile = set() - for i, r in enumerate(train_prop_sorted): - qed, plogp, smile = r - train_smile.add(smile) - mol = Chem.MolFromSmiles(smile) - smile2 = Chem.MolToSmiles(mol, isomericSmiles=True) - train_smile.add(smile2) - result_list_novel = [] - for i, r in enumerate(result_list): - smile, score, sim, smile_original = r - if smile not in train_smile: - result_list_novel.append(r) - save_file_path = osp.join(file_path, property_name + "_discovered_sorted.csv") - f = open(save_file_path, "w") - for r in result_list_novel: - smile, score, sim, smile_original = r - f.write("{},{},{},{}\n".format(score, smile, sim, smile_original)) - f.flush() - f.close() - logger.message("Dump done!") - - -def constrain_optimization_smiles( - model, - property_model, - data_name, - property_name, - train_prop, - topk, - atomic_num_list, - debug, - file_path, - sim_cutoff=0.0, -): - """ - Args: - model: MoFlowNet pre-trained model - property_model: Optimize qed or plogp model - data_name: dataset name - property_name: Optimize qed or plogp model name - train_prop: true smiles list - topk: Top k smiles as seeds - atomic_num_list: atom list in smiles - debug: To run optimization with more information - file_path: result save path - sim_cutoff: add similarity property - """ - if property_name == "qed": - col = 0 - elif property_name == "plogp": - col = 1 - logger.message("Constrained optimization of {} score".format(property_name)) - train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col]) - result_list = [] - nfail = 0 - for i, r in enumerate(train_prop_sorted): - if i >= topk: - break - if i % 50 == 0: - logger.info("Optimization {}/{},".format(i, topk)) - qed, plogp, smile = r - results, ori = optimize_mol( - model, - property_model, - smile, - sim_cutoff=sim_cutoff, - lr=0.005, - num_iter=100, - data_name=data_name, - atomic_num_list=atomic_num_list, - property_name=property_name, - random=False, - debug=debug, - ) - if len(results) > 0: - smile2, property2, sim, _ = results[0] - plogp_delta = property2 - plogp - if plogp_delta >= 0: - result_list.append( - (smile2, property2, sim, smile, qed, plogp, plogp_delta) - ) - else: - nfail += 1 - logger.info("Failure:{}:{}".format(i, smile)) - else: - nfail += 1 - logger.info("Failure:{}:{}".format(i, smile)) - df = pd.DataFrame( - result_list, - columns=[ - "smile_new", - "prop_new", - "sim", - "smile_old", - "qed_old", - "plogp_old", - "plogp_delta", - ], - ) - logger.info(df.describe()) - save_file_path = osp.join(file_path, property_name + "_constrain_optimization.csv") - df.to_csv(save_file_path, index=False) - logger.message("Dump done!") - logger.info("nfail:{} in total:{}".format(nfail, topk)) - logger.info("success rate: {}".format((topk - nfail) * 1.0 / topk)) - - -def optimize(cfg: DictConfig): - # set hyper-parameters - b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch - a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn - a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin - mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) - mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) - a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) - atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) - - model_params = Hyperparameters( - b_n_type=cfg.get(cfg.data_name).b_n_type, - b_n_flow=cfg.get(cfg.data_name).b_n_flow, - b_n_block=cfg.get(cfg.data_name).b_n_block, - b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, - b_hidden_ch=b_hidden_ch, - b_affine=True, - b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, - a_n_node=cfg.get(cfg.data_name).a_n_node, - a_n_type=a_n_type, - a_hidden_gnn=a_hidden_gnn, - a_hidden_lin=a_hidden_lin, - a_n_flow=cfg.get(cfg.data_name).a_n_flow, - a_n_block=cfg.get(cfg.data_name).a_n_block, - mask_row_size_list=mask_row_size_list, - mask_row_stride_list=mask_row_stride_list, - a_affine=True, - learn_dist=cfg.get(cfg.data_name).learn_dist, - seed=cfg.seed, - noise_scale=cfg.get(cfg.data_name).noise_scale, - ) - - logger.info("Model params:\n" + tabulate(model_params.print())) - - hidden = cfg.OPTIMIZE.hidden - logger.info("Hidden dim for output regression:{}".format(hidden)) - - # set transforms - if cfg.data_name == "qm9": - transform_fn = moflow_transform.transform_fn - elif cfg.data_name == "zinc250k": - transform_fn = moflow_transform.transform_fn_zinc250k - - # set select eval data - valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) - valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) - - # set dataloader config - dataloader_cfg = { - "dataset": { - "name": "MOlFLOWDataset", - "file_path": cfg.FILE_PATH, - "data_name": cfg.data_name, - "mode": cfg.mode, - "valid_idx": valid_idx, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.get(cfg.data_name).label_keys, - "smiles_col": cfg.get(cfg.data_name).smiles_col, - "transform_fn": transform_fn, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "batch_size": cfg.OPTIMIZE.batch_size, - "num_workers": 0, - } - - # set model - model_cfg = dict(cfg.MODEL) - model_cfg.update({"hyper_params": model_params}) - model = ppsci.arch.MoFlowNet(**model_cfg) - ppsci.utils.save_load.load_pretrain(model, path=cfg.TRAIN.pretrained_model_path) - - model_prop_cfg = dict(cfg.MODEL_Prop) - model_prop_cfg.update( - { - "model": model, - "hidden_size": hidden, - } - ) - property_model = ppsci.arch.MoFlowProp(**model_prop_cfg) - train = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) - train_dataloader = ppsci.data.build_dataloader(train, dataloader_cfg) - train_idx = train.train_idx - property_model_path = osp.join( - cfg.output_dir, "{}_model.pdparams".format(cfg.OPTIMIZE.property_name) - ) - - if not osp.exists(property_model_path): - logger.message("Training regression model over molecular embedding:") - property_csv_path = osp.join( - cfg.FILE_PATH, "{}_property.csv".format(cfg.data_name) - ) - prop_list = load_property_csv(property_csv_path, normalize=True) - train_prop = [prop_list[i] for i in train_idx] - # test_prop = [prop_list[i] for i in valid_idx] - - N = len(train) - property_model = fit_model( - property_model, - train_dataloader, - train_prop, - N, - property_name=cfg.OPTIMIZE.property_name, - max_epochs=cfg.OPTIMIZE.max_epochs, - learning_rate=cfg.OPTIMIZE.learning_rate, - weight_decay=cfg.OPTIMIZE.weight_decay, - ) - logger.message( - "saving {} regression model to: {}".format( - cfg.OPTIMIZE.property_name, property_model_path - ) - ) - paddle.save(obj=property_model.state_dict(), path=property_model_path) - - else: - logger.message("Loading trained regression model for optimization") - property_csv_path = osp.join( - cfg.FILE_PATH, "{}_property.csv".format(cfg.data_name) - ) - prop_list = load_property_csv(property_csv_path, normalize=True) - train_prop = [prop_list[i] for i in train_idx] - # test_prop = [prop_list[i] for i in valid_idx] - - logger.message( - "loading {} regression model from: {}".format( - cfg.OPTIMIZE.property_name, property_model_path - ) - ) - - state_dict = paddle.load(path=property_model_path) - property_model.set_state_dict(state_dict) - property_model.eval() - model.eval() - if cfg.OPTIMIZE.topscore: - logger.message("Finding top score:") - find_top_score_smiles( - model, - property_model, - cfg.data_name, - cfg.OPTIMIZE.property_name, - train_prop, - cfg.OPTIMIZE.topk, - atomic_num_list, - cfg.OPTIMIZE.debug, - cfg.output_dir, - ) - if cfg.OPTIMIZE.consopt: - logger.message("Constrained optimization:") - constrain_optimization_smiles( - model, - property_model, - cfg.data_name, - cfg.OPTIMIZE.property_name, - train_prop, - cfg.OPTIMIZE.topk, - atomic_num_list, - cfg.OPTIMIZE.debug, - cfg.output_dir, - sim_cutoff=cfg.OPTIMIZE.sim_cutoff, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="moflow_optimize.yaml") -def main(cfg: DictConfig): - optimize(cfg) - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import moflow_transform +import numpy as np +import paddle +import pandas as pd +from moflow_utils import Hyperparameters +from moflow_utils import adj_to_smiles +from moflow_utils import check_validity +from moflow_utils import penalized_logp +from omegaconf import DictConfig +from rdkit import Chem +from rdkit import DataStructs +from rdkit.Chem import AllChem +from rdkit.Chem import Descriptors +from tabulate import tabulate + +import ppsci +from ppsci.data.dataset.moflow_dataset import MolGraph +from ppsci.utils import logger + + +def load_property_csv(filepath, normalize=True): + """Use qed and plogp in zinc250k_property.csv which are recalculated by rdkit + the recalculated qed results are in tiny inconsistent with qed in zinc250k.csv + e.g + zinc250k_property.csv: + qed,plogp,smile + 0.7319008436872337,3.1399057164163766,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 + 0.9411116113894995,0.17238635659148804,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1 + import rdkit + m = rdkit.Chem.MolFromSmiles('CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1') + rdkit.Chem.QED.qed(m): 0.7319008436872337 + from mflow.utils.environment import penalized_logp + penalized_logp(m): 3.1399057164163766 + However, in oringinal: + zinc250k.csv + ,smiles,logP,qed,SAS + 0,CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1,5.0506,0.702012232801,2.0840945720726807 + 1,C[C@@H]1CC(Nc2cncc(-c3nncn3C)c2)C[C@@H](C)C1,3.1137,0.928975488089,3.4320038192747795 + + 0.7319008436872337 v.s. 0.702012232801 + and no plogp in zinc250k.csv dataset! + """ + df = pd.read_csv(filepath) + if normalize: + # m = df["plogp"].mean() + # std = df["plogp"].std() + # mn = df["plogp"].min() + mx = df["plogp"].max() + lower = -10 + df["plogp"] = df["plogp"].clip(lower=lower, upper=5) + df["plogp"] = (df["plogp"] - lower) / (mx - lower) + tuples = [tuple(x) for x in df.values] + logger.info("Load {} done, length: {}".format(filepath, len(tuples))) + return tuples + + +def smiles_to_adj(mol_smiles, data_name="qm9"): + """Use simles to adj, atoms + + Args: + mol_smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 + """ + if data_name == "qm9": + out_size = 9 + transform_fn = moflow_transform.transform_fn + elif data_name == "zinc250k": + out_size = 38 + transform_fn = moflow_transform.transform_fn_zinc250k + + preprocessor = MolGraph(out_size=out_size, kekulize=True) + canonical_smiles, mol = preprocessor.prepare_smiles_and_mol( + Chem.MolFromSmiles(mol_smiles) + ) + atoms, adj = preprocessor.get_input_features(mol) + atoms, adj, _ = transform_fn((atoms, adj, None)) + adj = np.expand_dims(adj, axis=0) + atoms = np.expand_dims(atoms, axis=0) + adj = paddle.to_tensor(data=adj) + atoms = paddle.to_tensor(data=atoms) + return adj, atoms + + +def optimize_mol( + model, + property_model, + smiles, + sim_cutoff, + lr=2.0, + num_iter=20, + data_name="qm9", + atomic_num_list=[6, 7, 8, 9, 0], + property_name="qed", + debug=True, + random=False, +): + """General for Optimize model. + + Args: + model: MoFlowNet pre-trained model + property_model: Optimize qed or plogp model + smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 + sim_cutoff: add similarity property + lr: learning rate + num_iter: learning total step + data_name: dataset name + atomic_num_list: atom list in smiles + property_name: Optimize qed or plogp model name + debug: To run optimization with more information + random: Random Generation from sampling or not + """ + if property_name == "qed": + propf = Descriptors.qed + elif property_name == "plogp": + propf = penalized_logp + else: + raise ValueError("Wrong property_name{}".format(property_name)) + model.eval() + property_model.eval() + with paddle.no_grad(): + bond, atoms = smiles_to_adj(smiles, data_name) + x = {"nodes": atoms, "edges": bond} + mol_vec, _ = property_model.encode(x) + if debug: + adj_rev, x_rev = property_model.reverse(mol_vec) + reverse_smiles = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) + logger.info(smiles, reverse_smiles) + output_dict = model(x) + z = output_dict["output"] + # sum_log_det_jacs = output_dict["sum_log_det"] + z0 = z[0].reshape([tuple(z[0].shape)[0], -1]) + z1 = z[1].reshape([tuple(z[1].shape)[0], -1]) + adj_rev, x_rev = model.reverse(paddle.concat(x=[z0, z1], axis=1)) + reverse_smiles2 = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) + train_smiles2 = adj_to_smiles(bond.cpu(), atoms.cpu(), atomic_num_list) + logger.info(train_smiles2, reverse_smiles2) + mol = Chem.MolFromSmiles(smiles) + fp1 = AllChem.GetMorganFingerprint(mol, 2) + start = smiles, propf(mol), None + out_0 = mol_vec.clone().detach() + out_0.stop_gradient = False + cur_vec = out_0 + out_1 = mol_vec.clone().detach() + out_1.stop_gradient = False + start_vec = out_1 + visited = [] + for step in range(num_iter): + prop_val = property_model.propNN(cur_vec).squeeze() + grad = paddle.grad(outputs=prop_val, inputs=cur_vec)[0] + if random: + rad = paddle.randn(shape=cur_vec.data.shape, dtype=cur_vec.data.dtype) + cur_vec = start_vec.data + lr * rad / paddle.sqrt(x=rad * rad) + else: + cur_vec = cur_vec.data + lr * grad.data / paddle.sqrt( + x=grad.data * grad.data + ) + out_2 = cur_vec.clone().detach() + out_2.stop_gradient = False + cur_vec = out_2 + visited.append(cur_vec) + hidden_z = paddle.concat(x=visited, axis=0) + adj, x = property_model.reverse(hidden_z) + val_res = check_validity(adj, x, atomic_num_list, debug=debug) + valid_mols = val_res["valid_mols"] + valid_smiles = val_res["valid_smiles"] + results = [] + sm_set = set() + sm_set.add(smiles) + for m, s in zip(valid_mols, valid_smiles): + if s in sm_set: + continue + sm_set.add(s) + p = propf(m) + fp2 = AllChem.GetMorganFingerprint(m, 2) + sim = DataStructs.TanimotoSimilarity(fp1, fp2) + if sim >= sim_cutoff: + results.append((s, p, sim, smiles)) + results.sort(key=lambda tup: tup[1], reverse=True) + return results, start + + +def fit_model( + model, + data, + data_prop, + N, + property_name="qed", + max_epochs=10, + learning_rate=0.001, + weight_decay=1e-05, +): + """Train for Optimize model. + + Args: + model: MoFlowNet pre-trained model + data: dataloader + data_prop: true smiles list + N: dataset number + property_name: Optimize qed or plogp model name + smiles: eg. CC(C)(C)c1ccc2occ(CC(=O)Nc3ccccc3F)c2c1 + max_epochs: train epochs + learning_rate: train learning rate + weight_decay: train weight_decay + """ + model.train() + metrics = paddle.nn.MSELoss() + optimizer = paddle.optimizer.Adam( + parameters=model.parameters(), + learning_rate=learning_rate, + weight_decay=weight_decay, + ) + assert len(data_prop) == N + iter_per_epoch = len(data) + log_step = 20 + if property_name == "qed": + col = 0 + elif property_name == "plogp": + col = 1 + else: + raise ValueError("Wrong property_name{}".format(property_name)) + for epoch in range(max_epochs): + for i, batch in enumerate(data): + x = batch[0]["nodes"] + bs = tuple(x.shape)[0] + ps = i * bs + pe = min((i + 1) * bs, N) + true_y = [[tt[col]] for tt in data_prop[ps:pe]] + true_y = ( + paddle.to_tensor(data=true_y) + .astype(dtype="float32") + .cuda(blocking=True) + ) + optimizer.clear_grad() + output_dict = model(batch[0]) + y = output_dict["output"][1] + loss = metrics(y, true_y) + loss.backward() + optimizer.step() + + if (i + 1) % log_step == 0: + logger.info( + "Epoch [{}/{}], Iter [{}/{}], loss: {:.5f},".format( + epoch + 1, max_epochs, i + 1, iter_per_epoch, loss.item() + ) + ) + return model + + +def find_top_score_smiles( + model, + property_model, + data_name, + property_name, + train_prop, + topk, + atomic_num_list, + debug, + file_path, +): + """ + Args: + model: MoFlowNet pre-trained model + property_model: Optimize qed or plogp model + data_name: dataset name + property_name: Optimize qed or plogp model name + train_prop: true smiles list + topk: Top k smiles as seeds + atomic_num_list: atom list in smiles + debug: To run optimization with more information + file_path: result save path + """ + if property_name == "qed": + col = 0 + elif property_name == "plogp": + col = 1 + logger.info("Finding top {} score".format(property_name)) + train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col], reverse=True) + result_list = [] + for i, r in enumerate(train_prop_sorted): + if i >= topk: + break + if i % 50 == 0: + logger.info("Optimization {}/{}".format(i, topk)) + qed, plogp, smile = r + results, ori = optimize_mol( + model, + property_model, + smile, + sim_cutoff=0, + lr=0.005, + num_iter=100, + data_name=data_name, + atomic_num_list=atomic_num_list, + property_name=property_name, + random=False, + debug=debug, + ) + result_list.extend(results) + result_list.sort(key=lambda tup: tup[1], reverse=True) + train_smile = set() + for i, r in enumerate(train_prop_sorted): + qed, plogp, smile = r + train_smile.add(smile) + mol = Chem.MolFromSmiles(smile) + smile2 = Chem.MolToSmiles(mol, isomericSmiles=True) + train_smile.add(smile2) + result_list_novel = [] + for i, r in enumerate(result_list): + smile, score, sim, smile_original = r + if smile not in train_smile: + result_list_novel.append(r) + save_file_path = osp.join(file_path, property_name + "_discovered_sorted.csv") + f = open(save_file_path, "w") + for r in result_list_novel: + smile, score, sim, smile_original = r + f.write("{},{},{},{}\n".format(score, smile, sim, smile_original)) + f.flush() + f.close() + logger.message("Dump done!") + + +def constrain_optimization_smiles( + model, + property_model, + data_name, + property_name, + train_prop, + topk, + atomic_num_list, + debug, + file_path, + sim_cutoff=0.0, +): + """ + Args: + model: MoFlowNet pre-trained model + property_model: Optimize qed or plogp model + data_name: dataset name + property_name: Optimize qed or plogp model name + train_prop: true smiles list + topk: Top k smiles as seeds + atomic_num_list: atom list in smiles + debug: To run optimization with more information + file_path: result save path + sim_cutoff: add similarity property + """ + if property_name == "qed": + col = 0 + elif property_name == "plogp": + col = 1 + logger.message("Constrained optimization of {} score".format(property_name)) + train_prop_sorted = sorted(train_prop, key=lambda tup: tup[col]) + result_list = [] + nfail = 0 + for i, r in enumerate(train_prop_sorted): + if i >= topk: + break + if i % 50 == 0: + logger.info("Optimization {}/{},".format(i, topk)) + qed, plogp, smile = r + results, ori = optimize_mol( + model, + property_model, + smile, + sim_cutoff=sim_cutoff, + lr=0.005, + num_iter=100, + data_name=data_name, + atomic_num_list=atomic_num_list, + property_name=property_name, + random=False, + debug=debug, + ) + if len(results) > 0: + smile2, property2, sim, _ = results[0] + plogp_delta = property2 - plogp + if plogp_delta >= 0: + result_list.append( + (smile2, property2, sim, smile, qed, plogp, plogp_delta) + ) + else: + nfail += 1 + logger.info("Failure:{}:{}".format(i, smile)) + else: + nfail += 1 + logger.info("Failure:{}:{}".format(i, smile)) + df = pd.DataFrame( + result_list, + columns=[ + "smile_new", + "prop_new", + "sim", + "smile_old", + "qed_old", + "plogp_old", + "plogp_delta", + ], + ) + logger.info(df.describe()) + save_file_path = osp.join(file_path, property_name + "_constrain_optimization.csv") + df.to_csv(save_file_path, index=False) + logger.message("Dump done!") + logger.info("nfail:{} in total:{}".format(nfail, topk)) + logger.info("success rate: {}".format((topk - nfail) * 1.0 / topk)) + + +def optimize(cfg: DictConfig): + # set hyper-parameters + b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch + a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn + a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin + mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) + mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) + a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) + atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) + + model_params = Hyperparameters( + b_n_type=cfg.get(cfg.data_name).b_n_type, + b_n_flow=cfg.get(cfg.data_name).b_n_flow, + b_n_block=cfg.get(cfg.data_name).b_n_block, + b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, + b_hidden_ch=b_hidden_ch, + b_affine=True, + b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, + a_n_node=cfg.get(cfg.data_name).a_n_node, + a_n_type=a_n_type, + a_hidden_gnn=a_hidden_gnn, + a_hidden_lin=a_hidden_lin, + a_n_flow=cfg.get(cfg.data_name).a_n_flow, + a_n_block=cfg.get(cfg.data_name).a_n_block, + mask_row_size_list=mask_row_size_list, + mask_row_stride_list=mask_row_stride_list, + a_affine=True, + learn_dist=cfg.get(cfg.data_name).learn_dist, + seed=cfg.seed, + noise_scale=cfg.get(cfg.data_name).noise_scale, + ) + + logger.info("Model params:\n" + tabulate(model_params.print())) + + hidden = cfg.OPTIMIZE.hidden + logger.info("Hidden dim for output regression:{}".format(hidden)) + + # set transforms + if cfg.data_name == "qm9": + transform_fn = moflow_transform.transform_fn + elif cfg.data_name == "zinc250k": + transform_fn = moflow_transform.transform_fn_zinc250k + + # set select eval data + valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) + valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) + + # set dataloader config + dataloader_cfg = { + "dataset": { + "name": "MOlFLOWDataset", + "file_path": cfg.FILE_PATH, + "data_name": cfg.data_name, + "mode": cfg.mode, + "valid_idx": valid_idx, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.get(cfg.data_name).label_keys, + "smiles_col": cfg.get(cfg.data_name).smiles_col, + "transform_fn": transform_fn, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "batch_size": cfg.OPTIMIZE.batch_size, + "num_workers": 0, + } + + # set model + model_cfg = dict(cfg.MODEL) + model_cfg.update({"hyper_params": model_params}) + model = ppsci.arch.MoFlowNet(**model_cfg) + ppsci.utils.save_load.load_pretrain(model, path=cfg.TRAIN.pretrained_model_path) + + model_prop_cfg = dict(cfg.MODEL_Prop) + model_prop_cfg.update( + { + "model": model, + "hidden_size": hidden, + } + ) + property_model = ppsci.arch.MoFlowProp(**model_prop_cfg) + train = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) + train_dataloader = ppsci.data.build_dataloader(train, dataloader_cfg) + train_idx = train.train_idx + property_model_path = osp.join( + cfg.output_dir, "{}_model.pdparams".format(cfg.OPTIMIZE.property_name) + ) + + if not osp.exists(property_model_path): + logger.message("Training regression model over molecular embedding:") + property_csv_path = osp.join( + cfg.FILE_PATH, "{}_property.csv".format(cfg.data_name) + ) + prop_list = load_property_csv(property_csv_path, normalize=True) + train_prop = [prop_list[i] for i in train_idx] + # test_prop = [prop_list[i] for i in valid_idx] + + N = len(train) + property_model = fit_model( + property_model, + train_dataloader, + train_prop, + N, + property_name=cfg.OPTIMIZE.property_name, + max_epochs=cfg.OPTIMIZE.max_epochs, + learning_rate=cfg.OPTIMIZE.learning_rate, + weight_decay=cfg.OPTIMIZE.weight_decay, + ) + logger.message( + "saving {} regression model to: {}".format( + cfg.OPTIMIZE.property_name, property_model_path + ) + ) + paddle.save(obj=property_model.state_dict(), path=property_model_path) + + else: + logger.message("Loading trained regression model for optimization") + property_csv_path = osp.join( + cfg.FILE_PATH, "{}_property.csv".format(cfg.data_name) + ) + prop_list = load_property_csv(property_csv_path, normalize=True) + train_prop = [prop_list[i] for i in train_idx] + # test_prop = [prop_list[i] for i in valid_idx] + + logger.message( + "loading {} regression model from: {}".format( + cfg.OPTIMIZE.property_name, property_model_path + ) + ) + + state_dict = paddle.load(path=property_model_path) + property_model.set_state_dict(state_dict) + property_model.eval() + model.eval() + if cfg.OPTIMIZE.topscore: + logger.message("Finding top score:") + find_top_score_smiles( + model, + property_model, + cfg.data_name, + cfg.OPTIMIZE.property_name, + train_prop, + cfg.OPTIMIZE.topk, + atomic_num_list, + cfg.OPTIMIZE.debug, + cfg.output_dir, + ) + if cfg.OPTIMIZE.consopt: + logger.message("Constrained optimization:") + constrain_optimization_smiles( + model, + property_model, + cfg.data_name, + cfg.OPTIMIZE.property_name, + train_prop, + cfg.OPTIMIZE.topk, + atomic_num_list, + cfg.OPTIMIZE.debug, + cfg.output_dir, + sim_cutoff=cfg.OPTIMIZE.sim_cutoff, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="moflow_optimize.yaml") +def main(cfg: DictConfig): + optimize(cfg) + + +if __name__ == "__main__": + main() diff --git a/examples/moflow/requirements.txt b/examples/moflow/requirements.txt index b71d0d0a2f..53dfa383ab 100644 --- a/examples/moflow/requirements.txt +++ b/examples/moflow/requirements.txt @@ -1,7 +1,7 @@ -CairoSVG -networkx -pandas==2.2.2 -rdkit==2022.3.3 -scipy -tabulate -tqdm +CairoSVG +networkx +pandas==2.2.2 +rdkit==2022.3.3 +scipy +tabulate +tqdm diff --git a/examples/moflow/test_generate.py b/examples/moflow/test_generate.py index a5232ab536..acdf73385a 100644 --- a/examples/moflow/test_generate.py +++ b/examples/moflow/test_generate.py @@ -1,634 +1,634 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from os import path as osp - -import cairosvg -import hydra -import moflow_transform -import numpy as np -import paddle -import pandas as pd -from moflow_utils import Hyperparameters -from moflow_utils import _to_numpy_array -from moflow_utils import adj_to_smiles -from moflow_utils import check_novelty -from moflow_utils import check_validity -from moflow_utils import construct_mol -from moflow_utils import correct_mol -from moflow_utils import penalized_logp -from moflow_utils import valid_mol -from moflow_utils import valid_mol_can_with_seg -from omegaconf import DictConfig -from rdkit import Chem -from rdkit import DataStructs -from rdkit.Chem import AllChem -from rdkit.Chem import Descriptors -from rdkit.Chem import Draw -from tabulate import tabulate - -import ppsci -from ppsci.utils import logger - - -def generate_mols(model, batch_size=20, temp=0.7, z_mu=None, true_adj=None): - """generate mols - - Args: - model (object): Generated eval Moflownet model - batch_size (int, optional): Batch size during evaling per GPU. Defaults to 20. - temp (float, optional): temperature of the gaussian distribution. Defaults to 0.7. - z_mu (int, optional): latent vector of a molecule. Defaults to None. - true_adj (paddle.Tensor, optional): True Adjacency. Defaults to None. - - Returns: - Tuple(paddle.Tensor, paddle.Tensor): Adjacency and nodes - """ - z_dim = model.b_size + model.a_size - mu = np.zeros(z_dim) - sigma_diag = np.ones(z_dim) - if model.hyper_params.learn_dist: - if len(model.ln_var) == 1: - sigma_diag = np.sqrt(np.exp(model.ln_var.item())) * sigma_diag - elif len(model.ln_var) == 2: - sigma_diag[: model.b_size] = ( - np.sqrt(np.exp(model.ln_var[0].item())) * sigma_diag[: model.b_size] - ) - sigma_diag[model.b_size + 1 :] = ( - np.sqrt(np.exp(model.ln_var[1].item())) * sigma_diag[model.b_size + 1 :] - ) - sigma = temp * sigma_diag - with paddle.no_grad(): - if z_mu is not None: - mu = z_mu - sigma = 0.01 * np.eye(z_dim) - z = np.random.normal(mu, sigma, (batch_size, z_dim)) - z = paddle.to_tensor(data=z).astype(paddle.get_default_dtype()) - adj, x = model.reverse(z, true_adj=true_adj) - return adj, x - - -def generate_mols_interpolation_grid( - model, z0=None, true_adj=None, seed=0, mols_per_row=13, delta=1.0 -): - np.random.seed(seed) - latent_size = model.b_size + model.a_size - if z0 is None: - mu = np.zeros([latent_size], dtype=np.float32) - sigma = 0.02 * np.eye(latent_size, dtype=np.float32) - z0 = np.random.multivariate_normal(mu, sigma).astype(np.float32) - x = np.random.randn(latent_size) - x /= np.linalg.norm(x) - y = np.random.randn(latent_size) - y -= y.dot(x) * x - y /= np.linalg.norm(y) - num_mols_to_edge = mols_per_row // 2 - z_list = [] - for dx in range(-num_mols_to_edge, num_mols_to_edge + 1): - for dy in range(-num_mols_to_edge, num_mols_to_edge + 1): - z = z0 + x * delta * dx + y * delta * dy - z_list.append(z) - z_array = paddle.to_tensor(data=z_list).astype(dtype="float32") - adj, xf = model.reverse(z_array, true_adj=true_adj) - return adj, xf - - -def visualize_interpolation_between_2_points( - filepath, - model, - mol_smiles=None, - mols_per_row=15, - n_interpolation=100, - seed=0, - atomic_num_list=[6, 7, 8, 9, 0], - true_data=None, - device=None, - data_name="qm9", -): - if mol_smiles is not None: - raise NotImplementedError - else: - with paddle.no_grad(): - np.random.seed(seed) - mol_index = np.random.randint(0, len(true_data["edges"]), 2) - adj0 = np.expand_dims(true_data["edges"][mol_index[0]], axis=0) - x0 = np.expand_dims(true_data["nodes"][mol_index[0]], axis=0) - adj0 = paddle.to_tensor(data=adj0) - x0 = paddle.to_tensor(data=x0) - smile0 = adj_to_smiles(adj0, x0, atomic_num_list)[0] - mol0 = Chem.MolFromSmiles(smile0) - fp0 = AllChem.GetMorganFingerprint(mol0, 2) - adj1 = np.expand_dims(true_data["edges"][mol_index[1]], axis=0) - x1 = np.expand_dims(true_data["nodes"][mol_index[1]], axis=0) - adj1 = paddle.to_tensor(data=adj1) - x1 = paddle.to_tensor(data=x1) - smile1 = adj_to_smiles(adj1, x1, atomic_num_list)[0] - # mol1 = Chem.MolFromSmiles(smile1) - # fp1 = AllChem.GetMorganFingerprint(mol1, 2) - logger.info("seed smile0: {}, seed smile1: {}".format(smile0, smile1)) - x_tumple0 = {"nodes": x0, "edges": adj0} - # x_tumple1 = {"nodes": x1, "edges": adj1} - output_dict = model(x_tumple0) - z0 = output_dict["output"] - z0[0] = z0[0].reshape([tuple(z0[0].shape)[0], -1]) - z0[1] = z0[1].reshape([tuple(z0[1].shape)[0], -1]) - z0 = paddle.concat(x=(z0[0], z0[1]), axis=1).squeeze(axis=0) - z0 = _to_numpy_array(z0) - - output_dict = model(x_tumple0) - z1 = output_dict["output"] - z1[0] = z1[0].reshape([tuple(z1[0].shape)[0], -1]) - z1[1] = z1[1].reshape([tuple(z1[1].shape)[0], -1]) - z1 = paddle.concat(x=(z1[0], z1[1]), axis=1).squeeze(axis=0) - z1 = _to_numpy_array(z1) - d = z1 - z0 - z_list = [ - (z0 + i * 1.0 / (n_interpolation + 1) * d) for i in range(n_interpolation + 2) - ] - z_array = paddle.to_tensor(data=z_list).astype(dtype="float32") - - adjm, xm = model.reverse(z_array) - adjm = _to_numpy_array(adjm) - xm = _to_numpy_array(xm) - interpolation_mols = [ - valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) - for x_elem, adj_elem in zip(xm, adjm) - ] - valid_mols = [mol for mol in interpolation_mols if mol is not None] - valid_mols_smiles = [Chem.MolToSmiles(mol) for mol in valid_mols] - valid_mols_smiles_unique = list(set(valid_mols_smiles)) - valid_mols_unique = [Chem.MolFromSmiles(s) for s in valid_mols_smiles_unique] - valid_mols_smiles_unique_label = [] - for s, m in zip(valid_mols_smiles_unique, valid_mols_unique): - fp = AllChem.GetMorganFingerprint(m, 2) - sim = DataStructs.TanimotoSimilarity(fp, fp0) - s = "{:.2f}\n".format(sim) + s - if s == smile0: - s = "***[" + s + "]***" - valid_mols_smiles_unique_label.append(s) - logger.info( - "interpolation_mols valid {} / {}".format( - len(valid_mols), len(interpolation_mols) - ) - ) - if data_name == "qm9": - psize = 200, 200 - else: - psize = 200, 200 - img = Draw.MolsToGridImage( - valid_mols_unique, - legends=valid_mols_smiles_unique_label, - molsPerRow=mols_per_row, - subImgSize=psize, - ) - img.save(filepath + "_.png") - svg = Draw.MolsToGridImage( - valid_mols_unique, - legends=valid_mols_smiles_unique_label, - molsPerRow=mols_per_row, - subImgSize=psize, - useSVG=True, - ) - cairosvg.svg2pdf(bytestring=svg.encode("utf-8"), write_to=filepath + ".pdf") - cairosvg.svg2png(bytestring=svg.encode("utf-8"), write_to=filepath + ".png") - logger.message("Dump {}.png/pdf done".format(filepath)) - - -def visualize_interpolation( - filepath, - model, - mol_smiles=None, - mols_per_row=13, - delta=0.1, - seed=0, - atomic_num_list=[6, 7, 8, 9, 0], - true_data=None, - data_name="qm9", - keep_duplicate=False, - correct=True, -): - if mol_smiles is not None: - raise NotImplementedError - else: - with paddle.no_grad(): - np.random.seed(seed) - mol_index = np.random.randint(0, len(true_data)) - adj = np.expand_dims(true_data["edges"][mol_index], axis=0) - x = np.expand_dims(true_data["nodes"][mol_index], axis=0) - # adj = paddle.to_tensor(data=adj) - # x = paddle.to_tensor(data=x) - smile0 = adj_to_smiles(adj, x, atomic_num_list)[0] - mol0 = Chem.MolFromSmiles(smile0) - fp0 = AllChem.GetMorganFingerprint(mol0, 2) - logger.info("seed smile: {}".format(smile0)) - x_tumple = {"nodes": paddle.to_tensor(x), "edges": paddle.to_tensor(adj)} - output_dict = model(x_tumple) - z0 = output_dict["output"] - z0[0] = z0[0].reshape([tuple(z0[0].shape)[0], -1]) - z0[1] = z0[1].reshape([tuple(z0[1].shape)[0], -1]) - z0 = paddle.concat(x=(z0[0], z0[1]), axis=1).squeeze(axis=0) - z0 = _to_numpy_array(z0) - adjm, xm = generate_mols_interpolation_grid( - model, z0=z0, mols_per_row=mols_per_row, delta=delta, seed=seed - ) - adjm = _to_numpy_array(adjm) - xm = _to_numpy_array(xm) - if correct: - interpolation_mols = [] - for x_elem, adj_elem in zip(xm, adjm): - mol = construct_mol(x_elem, adj_elem, atomic_num_list) - cmol = correct_mol(mol) - vcmol = valid_mol_can_with_seg(cmol) - interpolation_mols.append(vcmol) - else: - interpolation_mols = [ - valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) - for x_elem, adj_elem in zip(xm, adjm) - ] - valid_mols = [mol for mol in interpolation_mols if mol is not None] - valid_mols_smiles = [Chem.MolToSmiles(mol) for mol in valid_mols] - if keep_duplicate: - valid_mols_smiles_unique = valid_mols_smiles - else: - valid_mols_smiles_unique = list(set(valid_mols_smiles)) - valid_mols_unique = [Chem.MolFromSmiles(s) for s in valid_mols_smiles_unique] - valid_mols_smiles_unique_label = [] - logger.info( - "interpolation_mols:{}, valid_mols:{}, valid_mols_smiles_unique:{}".format( - len(interpolation_mols), len(valid_mols), len(valid_mols_smiles_unique) - ) - ) - for s, m in zip(valid_mols_smiles_unique, valid_mols_unique): - fp = AllChem.GetMorganFingerprint(m, 2) - sim = DataStructs.TanimotoSimilarity(fp, fp0) - s = " {:.2f}".format(sim) - valid_mols_smiles_unique_label.append(s) - if keep_duplicate: - molsPerRow = mols_per_row - else: - molsPerRow = 9 - k = len(valid_mols_smiles_unique) - logger.info( - "interpolation_mols valid {} / {}".format( - len(valid_mols), len(interpolation_mols) - ) - ) - if data_name == "qm9": - psize = 150, 150 - else: - psize = 150, 150 - img = Draw.MolsToGridImage( - valid_mols_unique[:k], - molsPerRow=molsPerRow, - legends=valid_mols_smiles_unique_label[:k], - subImgSize=psize, - ) - img.save(filepath + "_.png") - svg = Draw.MolsToGridImage( - valid_mols_unique[:k], - molsPerRow=molsPerRow, - legends=valid_mols_smiles_unique_label[:k], - subImgSize=psize, - useSVG=True, - ) - cairosvg.svg2pdf(bytestring=svg.encode("utf-8"), write_to=filepath + ".pdf") - cairosvg.svg2png(bytestring=svg.encode("utf-8"), write_to=filepath + ".png") - logger.info("Dump {}.png/pdf done".format(filepath)) - - -def evaluate(cfg: DictConfig): - # set training hyper-parameters - b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch - a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn - a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin - mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) - mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) - a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) - atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) - - model_params = Hyperparameters( - b_n_type=cfg.get(cfg.data_name).b_n_type, - b_n_flow=cfg.get(cfg.data_name).b_n_flow, - b_n_block=cfg.get(cfg.data_name).b_n_block, - b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, - b_hidden_ch=b_hidden_ch, - b_affine=True, - b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, - a_n_node=cfg.get(cfg.data_name).a_n_node, - a_n_type=a_n_type, - a_hidden_gnn=a_hidden_gnn, - a_hidden_lin=a_hidden_lin, - a_n_flow=cfg.get(cfg.data_name).a_n_flow, - a_n_block=cfg.get(cfg.data_name).a_n_block, - mask_row_size_list=mask_row_size_list, - mask_row_stride_list=mask_row_stride_list, - a_affine=True, - learn_dist=cfg.get(cfg.data_name).learn_dist, - seed=cfg.seed, - noise_scale=cfg.get(cfg.data_name).noise_scale, - ) - - logger.info("Model params:\n" + tabulate(model_params.print())) - - batch_size = cfg.EVAL.batch_size - - # set model for testing - model_cfg = dict(cfg.MODEL) - model_cfg.update({"hyper_params": model_params}) - model = ppsci.arch.MoFlowNet(**model_cfg) - ppsci.utils.save_load.load_pretrain(model, path=cfg.EVAL.pretrained_model_path) - model.eval() - - # set transforms - if cfg.data_name == "qm9": - transform_fn = moflow_transform.transform_fn - elif cfg.data_name == "zinc250k": - transform_fn = moflow_transform.transform_fn_zinc250k - cfg.Random.update({"delta": 0.1}) - - # set select eval model - cfg.EVAL.update(cfg.get(cfg.EVAL_mode)) - # set select eval data - valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) - valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) - - # set dataloader config - dataloader_cfg = { - "dataset": { - "name": "MOlFLOWDataset", - "file_path": cfg.FILE_PATH, - "data_name": cfg.data_name, - "mode": cfg.mode, - "valid_idx": valid_idx, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.get(cfg.data_name).label_keys, - "smiles_col": cfg.get(cfg.data_name).smiles_col, - "transform_fn": transform_fn, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": cfg.EVAL.num_workers, - } - - test = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) - dataloader_cfg["dataset"].update({"mode": "train"}) - train = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) - logger.info( - "{} in total, {} training data, {} testing data, {} batchsize, train/batchsize {}".format( - len(train) + len(test), - len(train), - len(test), - batch_size, - len(train) / batch_size, - ) - ) - - if cfg.EVAL.reconstruct: - train_dataloader = ppsci.data.build_dataloader(train, dataloader_cfg) - reconstruction_rate_list = [] - max_iter = len(train_dataloader) - input_keys = cfg.MODEL.input_keys - output_keys = cfg.MODEL.output_keys - for i, batch in enumerate(train_dataloader, start=0): - output_dict = model(batch[0]) - x = batch[0][input_keys[0]] - adj = batch[0][input_keys[1]] - z = output_dict[output_keys[0]] - z0 = z[0].reshape([tuple(z[0].shape)[0], -1]) - z1 = z[1].reshape([tuple(z[1].shape)[0], -1]) - adj_rev, x_rev = model.reverse(paddle.concat(x=[z0, z1], axis=1)) - reverse_smiles = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) - train_smiles = adj_to_smiles(adj.cpu(), x.cpu(), atomic_num_list) - lb = np.array([int(a != b) for a, b in zip(train_smiles, reverse_smiles)]) - idx = np.where(lb)[0] - if len(idx) > 0: - for k in idx: - logger.info( - "{}, train: {}, reverse: {}".format( - i * batch_size + k, train_smiles[k], reverse_smiles[k] - ) - ) - reconstruction_rate = 1.0 - lb.mean() - reconstruction_rate_list.append(reconstruction_rate) - logger.message( - "iter/total: {}/{}, reconstruction_rate:{}".format( - i, max_iter, reconstruction_rate - ) - ) - reconstruction_rate_total = np.array(reconstruction_rate_list).mean() - logger.message( - "reconstruction_rate for all the train data:{} in {}".format( - reconstruction_rate_total, len(train) - ) - ) - exit(0) - - if cfg.EVAL.int2point: - inputs = train.input - labels = train.label - items = [] - for idx in range(len(train)): - input_item = [value[idx] for key, value in inputs.items()] - label_item = [value[idx] for key, value in labels.items()] - item = input_item + label_item - item = transform_fn(item) - items.append(item) - items = np.array(items, dtype=object).T - inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} - - mol_smiles = None - gen_dir = osp.join(cfg.output_dir, cfg.EVAL_mode) - logger.message("Dump figure in {}".format(gen_dir)) - if not osp.exists(gen_dir): - os.makedirs(gen_dir) - for seed in range(cfg.EVAL.inter_times): - filepath = osp.join( - gen_dir, "2points_interpolation-2point_molecules_seed{}".format(seed) - ) - visualize_interpolation_between_2_points( - filepath, - model, - mol_smiles=mol_smiles, - mols_per_row=15, - n_interpolation=50, - atomic_num_list=atomic_num_list, - seed=seed, - true_data=inputs, - data_name=cfg.data_name, - ) - exit(0) - - if cfg.EVAL.intgrid: - inputs = train.input - labels = train.label - items = [] - for idx in range(len(train)): - input_item = [value[idx] for key, value in inputs.items()] - label_item = [value[idx] for key, value in labels.items()] - item = input_item + label_item - item = transform_fn(item) - items.append(item) - items = np.array(items, dtype=object).T - inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} - - mol_smiles = None - gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) - logger.message("Dump figure in {}".format(gen_dir)) - if not os.path.exists(gen_dir): - os.makedirs(gen_dir) - for seed in range(cfg.EVAL.inter_times): - filepath = os.path.join( - gen_dir, "generated_interpolation-grid_molecules_seed{}".format(seed) - ) - visualize_interpolation( - filepath, - model, - mol_smiles=mol_smiles, - mols_per_row=9, - delta=cfg.EVAL.delta, - atomic_num_list=atomic_num_list, - seed=seed, - true_data=inputs, - data_name=cfg.data_name, - keep_duplicate=True, - ) - filepath = os.path.join( - gen_dir, - "generated_interpolation-grid_molecules_seed{}_unique".format(seed), - ) - visualize_interpolation( - filepath, - model, - mol_smiles=mol_smiles, - mols_per_row=9, - delta=cfg.EVAL.delta, - atomic_num_list=atomic_num_list, - seed=seed, - true_data=inputs, - data_name=cfg.data_name, - keep_duplicate=False, - ) - exit(0) - - inputs = train.input - labels = train.label - items = [] - for idx in range(len(train)): - input_item = [value[idx] for key, value in inputs.items()] - label_item = [value[idx] for key, value in labels.items()] - item = input_item + label_item - item = transform_fn(item) - items.append(item) - items = np.array(items, dtype=object).T - inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} - - train_x = [a for a in inputs["nodes"]] - train_adj = [a for a in inputs["edges"]] - train_smiles = adj_to_smiles(train_adj, train_x, atomic_num_list) - - valid_ratio = [] - unique_ratio = [] - novel_ratio = [] - abs_unique_ratio = [] - abs_novel_ratio = [] - for i in range(cfg.EVAL.n_experiments): - adj, x = generate_mols( - model, batch_size=batch_size, true_adj=None, temp=cfg.EVAL.temperature - ) - val_res = check_validity( - adj, x, atomic_num_list, correct_validity=cfg.EVAL.correct_validity - ) - novel_r, abs_novel_r = check_novelty( - val_res["valid_smiles"], train_smiles, tuple(x.shape)[0] - ) - novel_ratio.append(novel_r) - abs_novel_ratio.append(abs_novel_r) - unique_ratio.append(val_res["unique_ratio"]) - abs_unique_ratio.append(val_res["abs_unique_ratio"]) - valid_ratio.append(val_res["valid_ratio"]) - # n_valid = len(val_res["valid_mols"]) - if cfg.save_score: - assert len(val_res["valid_smiles"]) == len(val_res["valid_mols"]) - smiles_qed_plogp = [ - (sm, Descriptors.qed(mol), penalized_logp(mol)) - for sm, mol in zip(val_res["valid_smiles"], val_res["valid_mols"]) - ] - smiles_qed_plogp.sort(key=lambda tup: tup[2], reverse=True) - gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) - os.makedirs(gen_dir, exist_ok=True) - filepath = os.path.join( - gen_dir, "smiles_qed_plogp_{}_RankedByPlogp.csv".format(i) - ) - df = pd.DataFrame( - smiles_qed_plogp, columns=["Smiles", "QED", "Penalized_logp"] - ) - df.to_csv(filepath, index=None, header=True) - smiles_qed_plogp.sort(key=lambda tup: tup[1], reverse=True) - filepath2 = os.path.join( - gen_dir, "smiles_qed_plogp_{}_RankedByQED.csv".format(i) - ) - df2 = pd.DataFrame( - smiles_qed_plogp, columns=["Smiles", "QED", "Penalized_logp"] - ) - df2.to_csv(filepath2, index=None, header=True) - if cfg.EVAL.save_fig: - gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) - os.makedirs(gen_dir, exist_ok=True) - filepath = os.path.join(gen_dir, "generated_mols_{}.png".format(i)) - img = Draw.MolsToGridImage( - val_res["valid_mols"], - legends=val_res["valid_smiles"], - molsPerRow=20, - subImgSize=(300, 300), - ) - img.save(filepath) - logger.info( - "validity: mean={:.2f}%, sd={:.2f}%, vals={}".format( - np.mean(valid_ratio), np.std(valid_ratio), valid_ratio - ) - ) - logger.info( - "novelty: mean={:.2f}%, sd={:.2f}%, vals={}".format( - np.mean(novel_ratio), np.std(novel_ratio), novel_ratio - ) - ) - logger.info( - "uniqueness: mean={:.2f}%, sd={:.2f}%, vals={}".format( - np.mean(unique_ratio), np.std(unique_ratio), unique_ratio - ) - ) - logger.info( - "abs_novelty: mean={:.2f}%, sd={:.2f}%, vals={}".format( - np.mean(abs_novel_ratio), np.std(abs_novel_ratio), abs_novel_ratio - ) - ) - logger.info( - "abs_uniqueness: mean={:.2f}%, sd={:.2f}%, vals={}".format( - np.mean(abs_unique_ratio), np.std(abs_unique_ratio), abs_unique_ratio - ) - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="moflow_test.yaml") -def main(cfg: DictConfig): - evaluate(cfg) - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os import path as osp + +import cairosvg +import hydra +import moflow_transform +import numpy as np +import paddle +import pandas as pd +from moflow_utils import Hyperparameters +from moflow_utils import _to_numpy_array +from moflow_utils import adj_to_smiles +from moflow_utils import check_novelty +from moflow_utils import check_validity +from moflow_utils import construct_mol +from moflow_utils import correct_mol +from moflow_utils import penalized_logp +from moflow_utils import valid_mol +from moflow_utils import valid_mol_can_with_seg +from omegaconf import DictConfig +from rdkit import Chem +from rdkit import DataStructs +from rdkit.Chem import AllChem +from rdkit.Chem import Descriptors +from rdkit.Chem import Draw +from tabulate import tabulate + +import ppsci +from ppsci.utils import logger + + +def generate_mols(model, batch_size=20, temp=0.7, z_mu=None, true_adj=None): + """generate mols + + Args: + model (object): Generated eval Moflownet model + batch_size (int, optional): Batch size during evaling per GPU. Defaults to 20. + temp (float, optional): temperature of the gaussian distribution. Defaults to 0.7. + z_mu (int, optional): latent vector of a molecule. Defaults to None. + true_adj (paddle.Tensor, optional): True Adjacency. Defaults to None. + + Returns: + Tuple(paddle.Tensor, paddle.Tensor): Adjacency and nodes + """ + z_dim = model.b_size + model.a_size + mu = np.zeros(z_dim) + sigma_diag = np.ones(z_dim) + if model.hyper_params.learn_dist: + if len(model.ln_var) == 1: + sigma_diag = np.sqrt(np.exp(model.ln_var.item())) * sigma_diag + elif len(model.ln_var) == 2: + sigma_diag[: model.b_size] = ( + np.sqrt(np.exp(model.ln_var[0].item())) * sigma_diag[: model.b_size] + ) + sigma_diag[model.b_size + 1 :] = ( + np.sqrt(np.exp(model.ln_var[1].item())) * sigma_diag[model.b_size + 1 :] + ) + sigma = temp * sigma_diag + with paddle.no_grad(): + if z_mu is not None: + mu = z_mu + sigma = 0.01 * np.eye(z_dim) + z = np.random.normal(mu, sigma, (batch_size, z_dim)) + z = paddle.to_tensor(data=z).astype(paddle.get_default_dtype()) + adj, x = model.reverse(z, true_adj=true_adj) + return adj, x + + +def generate_mols_interpolation_grid( + model, z0=None, true_adj=None, seed=0, mols_per_row=13, delta=1.0 +): + np.random.seed(seed) + latent_size = model.b_size + model.a_size + if z0 is None: + mu = np.zeros([latent_size], dtype=np.float32) + sigma = 0.02 * np.eye(latent_size, dtype=np.float32) + z0 = np.random.multivariate_normal(mu, sigma).astype(np.float32) + x = np.random.randn(latent_size) + x /= np.linalg.norm(x) + y = np.random.randn(latent_size) + y -= y.dot(x) * x + y /= np.linalg.norm(y) + num_mols_to_edge = mols_per_row // 2 + z_list = [] + for dx in range(-num_mols_to_edge, num_mols_to_edge + 1): + for dy in range(-num_mols_to_edge, num_mols_to_edge + 1): + z = z0 + x * delta * dx + y * delta * dy + z_list.append(z) + z_array = paddle.to_tensor(data=z_list).astype(dtype="float32") + adj, xf = model.reverse(z_array, true_adj=true_adj) + return adj, xf + + +def visualize_interpolation_between_2_points( + filepath, + model, + mol_smiles=None, + mols_per_row=15, + n_interpolation=100, + seed=0, + atomic_num_list=[6, 7, 8, 9, 0], + true_data=None, + device=None, + data_name="qm9", +): + if mol_smiles is not None: + raise NotImplementedError + else: + with paddle.no_grad(): + np.random.seed(seed) + mol_index = np.random.randint(0, len(true_data["edges"]), 2) + adj0 = np.expand_dims(true_data["edges"][mol_index[0]], axis=0) + x0 = np.expand_dims(true_data["nodes"][mol_index[0]], axis=0) + adj0 = paddle.to_tensor(data=adj0) + x0 = paddle.to_tensor(data=x0) + smile0 = adj_to_smiles(adj0, x0, atomic_num_list)[0] + mol0 = Chem.MolFromSmiles(smile0) + fp0 = AllChem.GetMorganFingerprint(mol0, 2) + adj1 = np.expand_dims(true_data["edges"][mol_index[1]], axis=0) + x1 = np.expand_dims(true_data["nodes"][mol_index[1]], axis=0) + adj1 = paddle.to_tensor(data=adj1) + x1 = paddle.to_tensor(data=x1) + smile1 = adj_to_smiles(adj1, x1, atomic_num_list)[0] + # mol1 = Chem.MolFromSmiles(smile1) + # fp1 = AllChem.GetMorganFingerprint(mol1, 2) + logger.info("seed smile0: {}, seed smile1: {}".format(smile0, smile1)) + x_tumple0 = {"nodes": x0, "edges": adj0} + # x_tumple1 = {"nodes": x1, "edges": adj1} + output_dict = model(x_tumple0) + z0 = output_dict["output"] + z0[0] = z0[0].reshape([tuple(z0[0].shape)[0], -1]) + z0[1] = z0[1].reshape([tuple(z0[1].shape)[0], -1]) + z0 = paddle.concat(x=(z0[0], z0[1]), axis=1).squeeze(axis=0) + z0 = _to_numpy_array(z0) + + output_dict = model(x_tumple0) + z1 = output_dict["output"] + z1[0] = z1[0].reshape([tuple(z1[0].shape)[0], -1]) + z1[1] = z1[1].reshape([tuple(z1[1].shape)[0], -1]) + z1 = paddle.concat(x=(z1[0], z1[1]), axis=1).squeeze(axis=0) + z1 = _to_numpy_array(z1) + d = z1 - z0 + z_list = [ + (z0 + i * 1.0 / (n_interpolation + 1) * d) for i in range(n_interpolation + 2) + ] + z_array = paddle.to_tensor(data=z_list).astype(dtype="float32") + + adjm, xm = model.reverse(z_array) + adjm = _to_numpy_array(adjm) + xm = _to_numpy_array(xm) + interpolation_mols = [ + valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) + for x_elem, adj_elem in zip(xm, adjm) + ] + valid_mols = [mol for mol in interpolation_mols if mol is not None] + valid_mols_smiles = [Chem.MolToSmiles(mol) for mol in valid_mols] + valid_mols_smiles_unique = list(set(valid_mols_smiles)) + valid_mols_unique = [Chem.MolFromSmiles(s) for s in valid_mols_smiles_unique] + valid_mols_smiles_unique_label = [] + for s, m in zip(valid_mols_smiles_unique, valid_mols_unique): + fp = AllChem.GetMorganFingerprint(m, 2) + sim = DataStructs.TanimotoSimilarity(fp, fp0) + s = "{:.2f}\n".format(sim) + s + if s == smile0: + s = "***[" + s + "]***" + valid_mols_smiles_unique_label.append(s) + logger.info( + "interpolation_mols valid {} / {}".format( + len(valid_mols), len(interpolation_mols) + ) + ) + if data_name == "qm9": + psize = 200, 200 + else: + psize = 200, 200 + img = Draw.MolsToGridImage( + valid_mols_unique, + legends=valid_mols_smiles_unique_label, + molsPerRow=mols_per_row, + subImgSize=psize, + ) + img.save(filepath + "_.png") + svg = Draw.MolsToGridImage( + valid_mols_unique, + legends=valid_mols_smiles_unique_label, + molsPerRow=mols_per_row, + subImgSize=psize, + useSVG=True, + ) + cairosvg.svg2pdf(bytestring=svg.encode("utf-8"), write_to=filepath + ".pdf") + cairosvg.svg2png(bytestring=svg.encode("utf-8"), write_to=filepath + ".png") + logger.message("Dump {}.png/pdf done".format(filepath)) + + +def visualize_interpolation( + filepath, + model, + mol_smiles=None, + mols_per_row=13, + delta=0.1, + seed=0, + atomic_num_list=[6, 7, 8, 9, 0], + true_data=None, + data_name="qm9", + keep_duplicate=False, + correct=True, +): + if mol_smiles is not None: + raise NotImplementedError + else: + with paddle.no_grad(): + np.random.seed(seed) + mol_index = np.random.randint(0, len(true_data)) + adj = np.expand_dims(true_data["edges"][mol_index], axis=0) + x = np.expand_dims(true_data["nodes"][mol_index], axis=0) + # adj = paddle.to_tensor(data=adj) + # x = paddle.to_tensor(data=x) + smile0 = adj_to_smiles(adj, x, atomic_num_list)[0] + mol0 = Chem.MolFromSmiles(smile0) + fp0 = AllChem.GetMorganFingerprint(mol0, 2) + logger.info("seed smile: {}".format(smile0)) + x_tumple = {"nodes": paddle.to_tensor(x), "edges": paddle.to_tensor(adj)} + output_dict = model(x_tumple) + z0 = output_dict["output"] + z0[0] = z0[0].reshape([tuple(z0[0].shape)[0], -1]) + z0[1] = z0[1].reshape([tuple(z0[1].shape)[0], -1]) + z0 = paddle.concat(x=(z0[0], z0[1]), axis=1).squeeze(axis=0) + z0 = _to_numpy_array(z0) + adjm, xm = generate_mols_interpolation_grid( + model, z0=z0, mols_per_row=mols_per_row, delta=delta, seed=seed + ) + adjm = _to_numpy_array(adjm) + xm = _to_numpy_array(xm) + if correct: + interpolation_mols = [] + for x_elem, adj_elem in zip(xm, adjm): + mol = construct_mol(x_elem, adj_elem, atomic_num_list) + cmol = correct_mol(mol) + vcmol = valid_mol_can_with_seg(cmol) + interpolation_mols.append(vcmol) + else: + interpolation_mols = [ + valid_mol(construct_mol(x_elem, adj_elem, atomic_num_list)) + for x_elem, adj_elem in zip(xm, adjm) + ] + valid_mols = [mol for mol in interpolation_mols if mol is not None] + valid_mols_smiles = [Chem.MolToSmiles(mol) for mol in valid_mols] + if keep_duplicate: + valid_mols_smiles_unique = valid_mols_smiles + else: + valid_mols_smiles_unique = list(set(valid_mols_smiles)) + valid_mols_unique = [Chem.MolFromSmiles(s) for s in valid_mols_smiles_unique] + valid_mols_smiles_unique_label = [] + logger.info( + "interpolation_mols:{}, valid_mols:{}, valid_mols_smiles_unique:{}".format( + len(interpolation_mols), len(valid_mols), len(valid_mols_smiles_unique) + ) + ) + for s, m in zip(valid_mols_smiles_unique, valid_mols_unique): + fp = AllChem.GetMorganFingerprint(m, 2) + sim = DataStructs.TanimotoSimilarity(fp, fp0) + s = " {:.2f}".format(sim) + valid_mols_smiles_unique_label.append(s) + if keep_duplicate: + molsPerRow = mols_per_row + else: + molsPerRow = 9 + k = len(valid_mols_smiles_unique) + logger.info( + "interpolation_mols valid {} / {}".format( + len(valid_mols), len(interpolation_mols) + ) + ) + if data_name == "qm9": + psize = 150, 150 + else: + psize = 150, 150 + img = Draw.MolsToGridImage( + valid_mols_unique[:k], + molsPerRow=molsPerRow, + legends=valid_mols_smiles_unique_label[:k], + subImgSize=psize, + ) + img.save(filepath + "_.png") + svg = Draw.MolsToGridImage( + valid_mols_unique[:k], + molsPerRow=molsPerRow, + legends=valid_mols_smiles_unique_label[:k], + subImgSize=psize, + useSVG=True, + ) + cairosvg.svg2pdf(bytestring=svg.encode("utf-8"), write_to=filepath + ".pdf") + cairosvg.svg2png(bytestring=svg.encode("utf-8"), write_to=filepath + ".png") + logger.info("Dump {}.png/pdf done".format(filepath)) + + +def evaluate(cfg: DictConfig): + # set training hyper-parameters + b_hidden_ch = cfg.get(cfg.data_name).b_hidden_ch + a_hidden_gnn = cfg.get(cfg.data_name).a_hidden_gnn + a_hidden_lin = cfg.get(cfg.data_name).a_hidden_lin + mask_row_size_list = list(cfg.get(cfg.data_name).mask_row_size_list) + mask_row_stride_list = list(cfg.get(cfg.data_name).mask_row_stride_list) + a_n_type = len(cfg.get(cfg.data_name).atomic_num_list) + atomic_num_list = list(cfg.get(cfg.data_name).atomic_num_list) + + model_params = Hyperparameters( + b_n_type=cfg.get(cfg.data_name).b_n_type, + b_n_flow=cfg.get(cfg.data_name).b_n_flow, + b_n_block=cfg.get(cfg.data_name).b_n_block, + b_n_squeeze=cfg.get(cfg.data_name).b_n_squeeze, + b_hidden_ch=b_hidden_ch, + b_affine=True, + b_conv_lu=cfg.get(cfg.data_name).b_conv_lu, + a_n_node=cfg.get(cfg.data_name).a_n_node, + a_n_type=a_n_type, + a_hidden_gnn=a_hidden_gnn, + a_hidden_lin=a_hidden_lin, + a_n_flow=cfg.get(cfg.data_name).a_n_flow, + a_n_block=cfg.get(cfg.data_name).a_n_block, + mask_row_size_list=mask_row_size_list, + mask_row_stride_list=mask_row_stride_list, + a_affine=True, + learn_dist=cfg.get(cfg.data_name).learn_dist, + seed=cfg.seed, + noise_scale=cfg.get(cfg.data_name).noise_scale, + ) + + logger.info("Model params:\n" + tabulate(model_params.print())) + + batch_size = cfg.EVAL.batch_size + + # set model for testing + model_cfg = dict(cfg.MODEL) + model_cfg.update({"hyper_params": model_params}) + model = ppsci.arch.MoFlowNet(**model_cfg) + ppsci.utils.save_load.load_pretrain(model, path=cfg.EVAL.pretrained_model_path) + model.eval() + + # set transforms + if cfg.data_name == "qm9": + transform_fn = moflow_transform.transform_fn + elif cfg.data_name == "zinc250k": + transform_fn = moflow_transform.transform_fn_zinc250k + cfg.Random.update({"delta": 0.1}) + + # set select eval model + cfg.EVAL.update(cfg.get(cfg.EVAL_mode)) + # set select eval data + valid_idx_path = osp.join(cfg.FILE_PATH, cfg.get(cfg.data_name).valid_idx) + valid_idx = moflow_transform.get_val_ids(valid_idx_path, cfg.data_name) + + # set dataloader config + dataloader_cfg = { + "dataset": { + "name": "MOlFLOWDataset", + "file_path": cfg.FILE_PATH, + "data_name": cfg.data_name, + "mode": cfg.mode, + "valid_idx": valid_idx, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.get(cfg.data_name).label_keys, + "smiles_col": cfg.get(cfg.data_name).smiles_col, + "transform_fn": transform_fn, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": cfg.EVAL.num_workers, + } + + test = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) + dataloader_cfg["dataset"].update({"mode": "train"}) + train = ppsci.data.dataset.build_dataset(dataloader_cfg["dataset"]) + logger.info( + "{} in total, {} training data, {} testing data, {} batchsize, train/batchsize {}".format( + len(train) + len(test), + len(train), + len(test), + batch_size, + len(train) / batch_size, + ) + ) + + if cfg.EVAL.reconstruct: + train_dataloader = ppsci.data.build_dataloader(train, dataloader_cfg) + reconstruction_rate_list = [] + max_iter = len(train_dataloader) + input_keys = cfg.MODEL.input_keys + output_keys = cfg.MODEL.output_keys + for i, batch in enumerate(train_dataloader, start=0): + output_dict = model(batch[0]) + x = batch[0][input_keys[0]] + adj = batch[0][input_keys[1]] + z = output_dict[output_keys[0]] + z0 = z[0].reshape([tuple(z[0].shape)[0], -1]) + z1 = z[1].reshape([tuple(z[1].shape)[0], -1]) + adj_rev, x_rev = model.reverse(paddle.concat(x=[z0, z1], axis=1)) + reverse_smiles = adj_to_smiles(adj_rev.cpu(), x_rev.cpu(), atomic_num_list) + train_smiles = adj_to_smiles(adj.cpu(), x.cpu(), atomic_num_list) + lb = np.array([int(a != b) for a, b in zip(train_smiles, reverse_smiles)]) + idx = np.where(lb)[0] + if len(idx) > 0: + for k in idx: + logger.info( + "{}, train: {}, reverse: {}".format( + i * batch_size + k, train_smiles[k], reverse_smiles[k] + ) + ) + reconstruction_rate = 1.0 - lb.mean() + reconstruction_rate_list.append(reconstruction_rate) + logger.message( + "iter/total: {}/{}, reconstruction_rate:{}".format( + i, max_iter, reconstruction_rate + ) + ) + reconstruction_rate_total = np.array(reconstruction_rate_list).mean() + logger.message( + "reconstruction_rate for all the train data:{} in {}".format( + reconstruction_rate_total, len(train) + ) + ) + exit(0) + + if cfg.EVAL.int2point: + inputs = train.input + labels = train.label + items = [] + for idx in range(len(train)): + input_item = [value[idx] for key, value in inputs.items()] + label_item = [value[idx] for key, value in labels.items()] + item = input_item + label_item + item = transform_fn(item) + items.append(item) + items = np.array(items, dtype=object).T + inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} + + mol_smiles = None + gen_dir = osp.join(cfg.output_dir, cfg.EVAL_mode) + logger.message("Dump figure in {}".format(gen_dir)) + if not osp.exists(gen_dir): + os.makedirs(gen_dir) + for seed in range(cfg.EVAL.inter_times): + filepath = osp.join( + gen_dir, "2points_interpolation-2point_molecules_seed{}".format(seed) + ) + visualize_interpolation_between_2_points( + filepath, + model, + mol_smiles=mol_smiles, + mols_per_row=15, + n_interpolation=50, + atomic_num_list=atomic_num_list, + seed=seed, + true_data=inputs, + data_name=cfg.data_name, + ) + exit(0) + + if cfg.EVAL.intgrid: + inputs = train.input + labels = train.label + items = [] + for idx in range(len(train)): + input_item = [value[idx] for key, value in inputs.items()] + label_item = [value[idx] for key, value in labels.items()] + item = input_item + label_item + item = transform_fn(item) + items.append(item) + items = np.array(items, dtype=object).T + inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} + + mol_smiles = None + gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) + logger.message("Dump figure in {}".format(gen_dir)) + if not os.path.exists(gen_dir): + os.makedirs(gen_dir) + for seed in range(cfg.EVAL.inter_times): + filepath = os.path.join( + gen_dir, "generated_interpolation-grid_molecules_seed{}".format(seed) + ) + visualize_interpolation( + filepath, + model, + mol_smiles=mol_smiles, + mols_per_row=9, + delta=cfg.EVAL.delta, + atomic_num_list=atomic_num_list, + seed=seed, + true_data=inputs, + data_name=cfg.data_name, + keep_duplicate=True, + ) + filepath = os.path.join( + gen_dir, + "generated_interpolation-grid_molecules_seed{}_unique".format(seed), + ) + visualize_interpolation( + filepath, + model, + mol_smiles=mol_smiles, + mols_per_row=9, + delta=cfg.EVAL.delta, + atomic_num_list=atomic_num_list, + seed=seed, + true_data=inputs, + data_name=cfg.data_name, + keep_duplicate=False, + ) + exit(0) + + inputs = train.input + labels = train.label + items = [] + for idx in range(len(train)): + input_item = [value[idx] for key, value in inputs.items()] + label_item = [value[idx] for key, value in labels.items()] + item = input_item + label_item + item = transform_fn(item) + items.append(item) + items = np.array(items, dtype=object).T + inputs = {key: np.stack(items[i], axis=0) for i, key in enumerate(inputs)} + + train_x = [a for a in inputs["nodes"]] + train_adj = [a for a in inputs["edges"]] + train_smiles = adj_to_smiles(train_adj, train_x, atomic_num_list) + + valid_ratio = [] + unique_ratio = [] + novel_ratio = [] + abs_unique_ratio = [] + abs_novel_ratio = [] + for i in range(cfg.EVAL.n_experiments): + adj, x = generate_mols( + model, batch_size=batch_size, true_adj=None, temp=cfg.EVAL.temperature + ) + val_res = check_validity( + adj, x, atomic_num_list, correct_validity=cfg.EVAL.correct_validity + ) + novel_r, abs_novel_r = check_novelty( + val_res["valid_smiles"], train_smiles, tuple(x.shape)[0] + ) + novel_ratio.append(novel_r) + abs_novel_ratio.append(abs_novel_r) + unique_ratio.append(val_res["unique_ratio"]) + abs_unique_ratio.append(val_res["abs_unique_ratio"]) + valid_ratio.append(val_res["valid_ratio"]) + # n_valid = len(val_res["valid_mols"]) + if cfg.save_score: + assert len(val_res["valid_smiles"]) == len(val_res["valid_mols"]) + smiles_qed_plogp = [ + (sm, Descriptors.qed(mol), penalized_logp(mol)) + for sm, mol in zip(val_res["valid_smiles"], val_res["valid_mols"]) + ] + smiles_qed_plogp.sort(key=lambda tup: tup[2], reverse=True) + gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) + os.makedirs(gen_dir, exist_ok=True) + filepath = os.path.join( + gen_dir, "smiles_qed_plogp_{}_RankedByPlogp.csv".format(i) + ) + df = pd.DataFrame( + smiles_qed_plogp, columns=["Smiles", "QED", "Penalized_logp"] + ) + df.to_csv(filepath, index=None, header=True) + smiles_qed_plogp.sort(key=lambda tup: tup[1], reverse=True) + filepath2 = os.path.join( + gen_dir, "smiles_qed_plogp_{}_RankedByQED.csv".format(i) + ) + df2 = pd.DataFrame( + smiles_qed_plogp, columns=["Smiles", "QED", "Penalized_logp"] + ) + df2.to_csv(filepath2, index=None, header=True) + if cfg.EVAL.save_fig: + gen_dir = os.path.join(cfg.output_dir, cfg.EVAL_mode) + os.makedirs(gen_dir, exist_ok=True) + filepath = os.path.join(gen_dir, "generated_mols_{}.png".format(i)) + img = Draw.MolsToGridImage( + val_res["valid_mols"], + legends=val_res["valid_smiles"], + molsPerRow=20, + subImgSize=(300, 300), + ) + img.save(filepath) + logger.info( + "validity: mean={:.2f}%, sd={:.2f}%, vals={}".format( + np.mean(valid_ratio), np.std(valid_ratio), valid_ratio + ) + ) + logger.info( + "novelty: mean={:.2f}%, sd={:.2f}%, vals={}".format( + np.mean(novel_ratio), np.std(novel_ratio), novel_ratio + ) + ) + logger.info( + "uniqueness: mean={:.2f}%, sd={:.2f}%, vals={}".format( + np.mean(unique_ratio), np.std(unique_ratio), unique_ratio + ) + ) + logger.info( + "abs_novelty: mean={:.2f}%, sd={:.2f}%, vals={}".format( + np.mean(abs_novel_ratio), np.std(abs_novel_ratio), abs_novel_ratio + ) + ) + logger.info( + "abs_uniqueness: mean={:.2f}%, sd={:.2f}%, vals={}".format( + np.mean(abs_unique_ratio), np.std(abs_unique_ratio), abs_unique_ratio + ) + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="moflow_test.yaml") +def main(cfg: DictConfig): + evaluate(cfg) + + +if __name__ == "__main__": + main() diff --git a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml index f0a8b811f0..3f45eb609b 100644 --- a/examples/neuraloperator/conf/sfno_swe_pretrain.yaml +++ b/examples/neuraloperator/conf/sfno_swe_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -118,3 +119,124 @@ INFER: num_cpu_threads: 4 batch_size: 1 data_path: ./datasets/SWE/test_SWE_32x64.npy +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_sfno_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 666 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: ./datasets/SWE/ + +# dataset setting +DATASET: + label_keys: ["y"] + train_resolution: "32x64" + test_resolutions: ["32x64","64x128"] + + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["y"] + in_channels: 3 + out_channels: 3 + n_modes: [32, 32] + hidden_channels: 32 + projection_channels: 64 + n_layers: 4 + + use_mlp: false + mlp: + expansion: 0.5 + dropout: 0.0 + norm: 'group_norm' + fno_skip: "linear" + mlp_skip: "soft-gating" + separable: false + preactivation: false + factorization: null + rank: 1.0 + joint_factorization: false + fixed_rank_modes: null + implementation: "factorized" + domain_padding: null #0.078125 + domain_padding_mode: "one-sided" #symmetric + fft_norm: 'forward' + patching_levels: 0 + + +# training settings +TRAIN: + epochs: 300 + save_freq: 20 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 5e-3 + by_epoch: True + type: "StepDecay" + step_size: 60 + gamma: 0.5 + # ReduceOnPlateau only + scheduler_patience: 5 + + # CosineAnnealingLR + scheduler_T_max: 30 + wd: 1e-4 + batch_size: 4 + pretrained_model_path: null + checkpoint_path: null + + +# evaluation settings +EVAL: + pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 10 + +INFER: + pretrained_model_path: ./outputs_sfno_pretrain/checkpoints/best_model.pdparams + export_path: ./inference/sfno/sfno_darcyflow + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 + data_path: ./datasets/SWE/test_SWE_32x64.npy +>>>>>>> Stashed changes diff --git a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml index a7fb74cf4b..2f25ce0510 100644 --- a/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/tfno_darcyflow_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -124,3 +125,132 @@ INFER: batch_size: 1 data_path: ./datasets/darcyflow/darcy_test_16.npy grid_boundaries: [[0, 1], [0, 1]] +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_tfno_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 666 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: ./datasets/darcyflow/ + +# dataset setting +DATASET: + label_keys: ["y"] + train_resolution: 16 + test_resolutions: [16,32] + grid_boundaries: [[0, 1], [0, 1]] + positional_encoding: True + encode_input: False + encode_output: False + encoding: "channel-wise" + channel_dim: 1 + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["y"] + n_modes_height: 16 + n_modes_width: 16 + in_channels: 3 + out_channels: 1 + hidden_channels: 32 + projection_channels: 64 + n_layers: 4 + + use_mlp: False + mlp: + expansion: 0.5 + dropout: 0.0 + norm: "group_norm" + fno_skip: "linear" + mlp_skip: "soft-gating" + separable: false + preactivation: false + factorization: 'dense' + rank: 1.0 + joint_factorization: false + fixed_rank_modes: null + implementation: "factorized" + domain_padding: null #0.078125 + domain_padding_mode: "one-sided" #symmetric + fft_norm: 'forward' + patching_levels: 0 + + +# training settings +TRAIN: + epochs: 300 + save_freq: 20 + eval_during_train: true + eval_freq: 1 + training_loss: "h1" + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 5e-3 + by_epoch: True + type: "StepDecay" + step_size: 60 + gamma: 0.5 + # ReduceOnPlateau only + scheduler_patience: 5 + + # CosineAnnealingLR + scheduler_T_max: 500 + wd: 1.0e-4 + batch_size: 16 + pretrained_model_path: null + checkpoint_path: null + + +# evaluation settings +EVAL: + pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 16 + +INFER: + pretrained_model_path: ./outputs_tfno_pretrain/checkpoints/best_model.pdparams + export_path: ./inference/tfno/tfno_darcyflow + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 1 + batch_size: 1 + data_path: ./datasets/darcyflow/darcy_test_16.npy + grid_boundaries: [[0, 1], [0, 1]] +>>>>>>> Stashed changes diff --git a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml index 05fdf92090..43705cd140 100644 --- a/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml +++ b/examples/neuraloperator/conf/uno_darcyflow_pretrain.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -129,3 +130,137 @@ INFER: batch_size: 1 data_path: ./datasets/darcyflow/darcy_test_16.npy grid_boundaries: [[0, 1], [0, 1]] +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_uno_pretrain + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval/export/infer +seed: 666 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set train and evaluate data path +FILE_PATH: ./datasets/darcyflow/ + +# dataset setting +DATASET: + label_keys: ["y"] + train_resolution: 16 + test_resolutions: [16,32] + grid_boundaries: [[0, 1], [0, 1]] + positional_encoding: True + encode_input: False + encode_output: False + encoding: "channel-wise" + channel_dim: 1 + +# model settings +MODEL: + input_keys: ["x"] + output_keys: ["y"] + in_channels: 3 + out_channels: 1 + hidden_channels: 64 + projection_channels: 64 + n_layers: 5 + uno_out_channels: [32,64,64,64,32] + uno_n_modes: [[16,16],[8,8],[8,8],[8,8],[16,16]] + uno_scalings: [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]] + horizontal_skips_map: null + incremental_n_modes: null + + use_mlp: false + mlp: + expansion: 0.5 + dropout: 0.0 + norm: "group_norm" + fno_skip: "linear" + horizontal_skip: "linear" + mlp_skip: "soft-gating" + separable: false + preactivation: false + factorization: null + rank: 1.0 + joint_factorization: false + fixed_rank_modes: null + implementation: "factorized" + domain_padding: 0.2 #0.078125 + domain_padding_mode: "one-sided" #symmetric + fft_norm: 'forward' + patching_levels: 0 + + +# training settings +TRAIN: + epochs: 300 + save_freq: 20 + eval_during_train: true + eval_freq: 1 + training_loss: "h1" + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 5e-3 + by_epoch: True + type: "StepDecay" + step_size: 60 + gamma: 0.5 + # ReduceOnPlateau only + scheduler_patience: 5 + + # CosineAnnealingLR + scheduler_T_max: 30 + wd: 1.0e-4 + batch_size: 16 + # /home/aistudio/darcy_flow_small.pdparams + pretrained_model_path: null + checkpoint_path: null + + +# evaluation settings +EVAL: + pretrained_model_path: ./outputs_uno_pretrain/checkpoints/best_model.pdparams + compute_metric_by_batch: false + eval_with_no_grad: true + batch_size: 16 + +INFER: + pretrained_model_path: ./outputs_uno_pretrain/checkpoints/best_model.pdparams + export_path: ./inference/uno/uno_darcyflow + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 + data_path: ./datasets/darcyflow/darcy_test_16.npy + grid_boundaries: [[0, 1], [0, 1]] +>>>>>>> Stashed changes diff --git a/examples/neuraloperator/metric.py b/examples/neuraloperator/metric.py index 756bf22a5d..3ab75d12fd 100644 --- a/examples/neuraloperator/metric.py +++ b/examples/neuraloperator/metric.py @@ -1,407 +1,407 @@ -import math -from typing import Dict - -import paddle - - -def central_diff_1d(x, h, fix_x_bnd=False): - dx = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( - 2.0 * h - ) - - if fix_x_bnd: - dx[..., 0] = (x[..., 1] - x[..., 0]) / h - dx[..., -1] = (x[..., -1] - x[..., -2]) / h - - return dx - - -def central_diff_2d(x, h, fix_x_bnd=False, fix_y_bnd=False): - if isinstance(h, float): - h = [h, h] - dx = (paddle.roll(x, shifts=-1, axis=-2) - paddle.roll(x, shifts=1, axis=-2)) / ( - 2.0 * h[0] - ) - dy = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( - 2.0 * h[1] - ) - - if fix_x_bnd: - dx[..., 0, :] = (x[..., 1, :] - x[..., 0, :]) / h[0] - dx[..., -1, :] = (x[..., -1, :] - x[..., -2, :]) / h[0] - - if fix_y_bnd: - dy[..., :, 0] = (x[..., :, 1] - x[..., :, 0]) / h[1] - dy[..., :, -1] = (x[..., :, -1] - x[..., :, -2]) / h[1] - - return dx, dy - - -def central_diff_3d(x, h, fix_x_bnd=False, fix_y_bnd=False, fix_z_bnd=False): - if isinstance(h, float): - h = [h, h, h] - - dx = (paddle.roll(x, shifts=-1, axis=-3) - paddle.roll(x, shifts=1, axis=-3)) / ( - 2.0 * h[0] - ) - dy = (paddle.roll(x, shifts=-1, axis=-2) - paddle.roll(x, shifts=1, axis=-2)) / ( - 2.0 * h[1] - ) - dz = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( - 2.0 * h[2] - ) - - if fix_x_bnd: - dx[..., 0, :, :] = (x[..., 1, :, :] - x[..., 0, :, :]) / h[0] - dx[..., -1, :, :] = (x[..., -1, :, :] - x[..., -2, :, :]) / h[0] - - if fix_y_bnd: - dy[..., :, 0, :] = (x[..., :, 1, :] - x[..., :, 0, :]) / h[1] - dy[..., :, -1, :] = (x[..., :, -1, :] - x[..., :, -2, :]) / h[1] - - if fix_z_bnd: - dz[..., :, :, 0] = (x[..., :, :, 1] - x[..., :, :, 0]) / h[2] - dz[..., :, :, -1] = (x[..., :, :, -1] - x[..., :, :, -2]) / h[2] - - return dx, dy, dz - - -class LpLoss(object): - """loss function with rel/abs Lp loss - - Args: - d (int, optional): The scaling factor of loss. Defaults to 1. - p (int, optional): The scaling factor of diff. Defaults to 2. - L (math, optional): The founction of loss. Defaults to 2*math.pi. - reduce_dims (int, optional): The dims of reduction. Defaults to 0. - reductions (str, optional): The type of reduction. Defaults to 'sum'. - """ - - def __init__(self, d=1, p=2, L=2 * math.pi, reduce_dims=0, reductions="sum"): - super().__init__() - - self.d = d - self.p = p - - if isinstance(reduce_dims, int): - self.reduce_dims = [reduce_dims] - else: - self.reduce_dims = reduce_dims - - if self.reduce_dims is not None: - if isinstance(reductions, str): - assert reductions == "sum" or reductions == "mean" - self.reductions = [reductions] * len(self.reduce_dims) - else: - for j in range(len(reductions)): - assert reductions[j] == "sum" or reductions[j] == "mean" - self.reductions = reductions - - if isinstance(L, float): - self.L = [L] * self.d - else: - self.L = L - - def uniform_h(self, x): - h = [0.0] * self.d - for j in range(self.d, 0, -1): - h[-j] = self.L[-j] / x.size(-j) - - return h - - def reduce_all(self, x): - for j in range(len(self.reduce_dims)): - if self.reductions[j] == "sum": - x = paddle.sum(x, axis=self.reduce_dims[j], keepdim=True) - else: - x = paddle.mean(x, axis=self.reduce_dims[j], keepdim=True) - return x - - def abs(self, x, y, h=None): - # Assume uniform mesh - if h is None: - h = self.uniform_h(x) - else: - if isinstance(h, float): - h = [h] * self.d - - const = math.prod(h) ** (1.0 / self.p) - diff = const * paddle.norm( - paddle.flatten(x, start_axis=-self.d) - - paddle.flatten(y, start_axis=-self.d), - p=self.p, - axis=-1, - keepdim=False, - ) - - if self.reduce_dims is not None: - diff = self.reduce_all(diff).squeeze() - - return diff - - def rel(self, x, y): - diff = paddle.norm( - paddle.flatten(x, start_axis=-self.d) - - paddle.flatten(y, start_axis=-self.d), - p=self.p, - axis=-1, - keepdim=False, - ) - ynorm = paddle.norm( - paddle.flatten(y, start_axis=-self.d), p=self.p, axis=-1, keepdim=False - ) - diff = diff / ynorm - - if self.reduce_dims is not None: - diff = self.reduce_all(diff).squeeze() - return diff - - def __call__( - self, - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - ): - x = output_dict["y"] - y = label_dict["y"] - return {"l2": self.rel(x, y) / x.shape[0]} - - -class LpLoss_train(LpLoss): - def __init__(self, d=1, p=2, L=2 * math.pi, reduce_dims=0, reductions="sum"): - super().__init__(d=d, p=p, L=L, reduce_dims=reduce_dims, reductions=reductions) - - def __call__( - self, - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - weight_dict=None, - ): - x = output_dict["y"] - y = label_dict["y"] - return {"y": self.rel(x, y)} - - -class H1Loss(object): - """loss function with rel/abs H1 loss - - Args: - d (int, optional): The scaling factor of loss. Defaults to 1. - L (math, optional): The founction of loss. Defaults to 2*math.pi. - reduce_dims (int, optional): The dims of reduction. Defaults to 0. - reductions (str, optional): The type of reduction. Defaults to 'sum'. - fix_x_bnd (bool, optional): Whether to fix the x boundaries. Defaults to False. - fix_y_bnd (bool, optional): Whether to fix the y boundaries. Defaults to False. - fix_z_bnd (bool, optional): Whether to fix the z boundaries. Defaults to False. - """ - - def __init__( - self, - d=1, - L=2 * math.pi, - reduce_dims=0, - reductions="sum", - fix_x_bnd=False, - fix_y_bnd=False, - fix_z_bnd=False, - ): - - super().__init__() - - assert d > 0 and d < 4, "Currently only implemented for 1, 2, and 3-D." - - self.d = d - self.fix_x_bnd = fix_x_bnd - self.fix_y_bnd = fix_y_bnd - self.fix_z_bnd = fix_z_bnd - - if isinstance(reduce_dims, int): - self.reduce_dims = [reduce_dims] - else: - self.reduce_dims = reduce_dims - - if self.reduce_dims is not None: - if isinstance(reductions, str): - assert reductions == "sum" or reductions == "mean" - self.reductions = [reductions] * len(self.reduce_dims) - else: - for j in range(len(reductions)): - assert reductions[j] == "sum" or reductions[j] == "mean" - self.reductions = reductions - - if isinstance(L, float): - self.L = [L] * self.d - else: - self.L = L - - def compute_terms(self, x, y, h): - dict_x = {} - dict_y = {} - - if self.d == 1: - dict_x[0] = x - dict_y[0] = y - - x_x = central_diff_1d(x, h[0], fix_x_bnd=self.fix_x_bnd) - y_x = central_diff_1d(y, h[0], fix_x_bnd=self.fix_x_bnd) - - dict_x[1] = x_x - dict_y[1] = y_x - - elif self.d == 2: - dict_x[0] = paddle.flatten(x, start_axis=-2) - dict_y[0] = paddle.flatten(y, start_axis=-2) - - x_x, x_y = central_diff_2d( - x, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd - ) - y_x, y_y = central_diff_2d( - y, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd - ) - - dict_x[1] = paddle.flatten(x_x, start_axis=-2) - dict_x[2] = paddle.flatten(x_y, start_axis=-2) - - dict_y[1] = paddle.flatten(y_x, start_axis=-2) - dict_y[2] = paddle.flatten(y_y, start_axis=-2) - - else: - dict_x[0] = paddle.flatten(x, start_axis=-3) - dict_y[0] = paddle.flatten(y, start_axis=-3) - - x_x, x_y, x_z = central_diff_3d( - x, - h, - fix_x_bnd=self.fix_x_bnd, - fix_y_bnd=self.fix_y_bnd, - fix_z_bnd=self.fix_z_bnd, - ) - y_x, y_y, y_z = central_diff_3d( - y, - h, - fix_x_bnd=self.fix_x_bnd, - fix_y_bnd=self.fix_y_bnd, - fix_z_bnd=self.fix_z_bnd, - ) - - dict_x[1] = paddle.flatten(x_x, start_axis=-3) - dict_x[2] = paddle.flatten(x_y, start_axis=-3) - dict_x[3] = paddle.flatten(x_z, start_axis=-3) - - dict_y[1] = paddle.flatten(y_x, start_axis=-3) - dict_y[2] = paddle.flatten(y_y, start_axis=-3) - dict_y[3] = paddle.flatten(y_z, start_axis=-3) - - return dict_x, dict_y - - def uniform_h(self, x): - h = [0.0] * self.d - for j in range(self.d, 0, -1): - h[-j] = self.L[-j] / x.shape[-j] - - return h - - def reduce_all(self, x): - for j in range(len(self.reduce_dims)): - if self.reductions[j] == "sum": - x = paddle.sum(x, axis=self.reduce_dims[j], keepdim=True) - else: - x = paddle.mean(x, axis=self.reduce_dims[j], keepdim=True) - - return x - - def abs(self, x, y, h=None): - # Assume uniform mesh - if h is None: - h = self.uniform_h(x) - else: - if isinstance(h, float): - h = [h] * self.d - - dict_x, dict_y = self.compute_terms(x, y, h) - - const = math.prod(h) - diff = ( - const * paddle.norm(dict_x[0] - dict_y[0], p=2, axis=-1, keepdim=False) ** 2 - ) - - for j in range(1, self.d + 1): - diff += ( - const - * paddle.norm(dict_x[j] - dict_y[j], p=2, axis=-1, keepdim=False) ** 2 - ) - - diff = diff**0.5 - - if self.reduce_dims is not None: - diff = self.reduce_all(diff).squeeze() - - return diff - - def rel(self, x, y, h=None): - # Assume uniform mesh - if h is None: - h = self.uniform_h(x) - else: - if isinstance(h, float): - h = [h] * self.d - - dict_x, dict_y = self.compute_terms(x, y, h) - - diff = paddle.norm(dict_x[0] - dict_y[0], p=2, axis=-1, keepdim=False) ** 2 - ynorm = paddle.norm(dict_y[0], p=2, axis=-1, keepdim=False) ** 2 - - for j in range(1, self.d + 1): - diff += paddle.norm(dict_x[j] - dict_y[j], p=2, axis=-1, keepdim=False) ** 2 - ynorm += paddle.norm(dict_y[j], p=2, axis=-1, keepdim=False) ** 2 - - diff = (diff**0.5) / (ynorm**0.5) - - if self.reduce_dims is not None: - diff = self.reduce_all(diff).squeeze() - - return diff - - def __call__( - self, - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - weight_dict: Dict[str, "paddle.Tensor"] = None, - h=None, - ): - x = output_dict["y"] - y = label_dict["y"] - return {"h1": self.rel(x, y, h=h) / x.shape[0]} - - -class H1Loss_train(H1Loss): - def __init__( - self, - d=1, - L=2 * math.pi, - reduce_dims=0, - reductions="sum", - fix_x_bnd=False, - fix_y_bnd=False, - fix_z_bnd=False, - ): - super().__init__( - d=d, - L=L, - reduce_dims=reduce_dims, - reductions=reductions, - fix_x_bnd=fix_x_bnd, - fix_y_bnd=fix_y_bnd, - fix_z_bnd=fix_z_bnd, - ) - - def __call__( - self, - output_dict: Dict[str, "paddle.Tensor"], - label_dict: Dict[str, "paddle.Tensor"], - weight_dict: Dict[str, "paddle.Tensor"] = None, - h=None, - ): - x = output_dict["y"] - y = label_dict["y"] - - return {"y": self.rel(x, y, h=h)} +import math +from typing import Dict + +import paddle + + +def central_diff_1d(x, h, fix_x_bnd=False): + dx = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( + 2.0 * h + ) + + if fix_x_bnd: + dx[..., 0] = (x[..., 1] - x[..., 0]) / h + dx[..., -1] = (x[..., -1] - x[..., -2]) / h + + return dx + + +def central_diff_2d(x, h, fix_x_bnd=False, fix_y_bnd=False): + if isinstance(h, float): + h = [h, h] + dx = (paddle.roll(x, shifts=-1, axis=-2) - paddle.roll(x, shifts=1, axis=-2)) / ( + 2.0 * h[0] + ) + dy = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( + 2.0 * h[1] + ) + + if fix_x_bnd: + dx[..., 0, :] = (x[..., 1, :] - x[..., 0, :]) / h[0] + dx[..., -1, :] = (x[..., -1, :] - x[..., -2, :]) / h[0] + + if fix_y_bnd: + dy[..., :, 0] = (x[..., :, 1] - x[..., :, 0]) / h[1] + dy[..., :, -1] = (x[..., :, -1] - x[..., :, -2]) / h[1] + + return dx, dy + + +def central_diff_3d(x, h, fix_x_bnd=False, fix_y_bnd=False, fix_z_bnd=False): + if isinstance(h, float): + h = [h, h, h] + + dx = (paddle.roll(x, shifts=-1, axis=-3) - paddle.roll(x, shifts=1, axis=-3)) / ( + 2.0 * h[0] + ) + dy = (paddle.roll(x, shifts=-1, axis=-2) - paddle.roll(x, shifts=1, axis=-2)) / ( + 2.0 * h[1] + ) + dz = (paddle.roll(x, shifts=-1, axis=-1) - paddle.roll(x, shifts=1, axis=-1)) / ( + 2.0 * h[2] + ) + + if fix_x_bnd: + dx[..., 0, :, :] = (x[..., 1, :, :] - x[..., 0, :, :]) / h[0] + dx[..., -1, :, :] = (x[..., -1, :, :] - x[..., -2, :, :]) / h[0] + + if fix_y_bnd: + dy[..., :, 0, :] = (x[..., :, 1, :] - x[..., :, 0, :]) / h[1] + dy[..., :, -1, :] = (x[..., :, -1, :] - x[..., :, -2, :]) / h[1] + + if fix_z_bnd: + dz[..., :, :, 0] = (x[..., :, :, 1] - x[..., :, :, 0]) / h[2] + dz[..., :, :, -1] = (x[..., :, :, -1] - x[..., :, :, -2]) / h[2] + + return dx, dy, dz + + +class LpLoss(object): + """loss function with rel/abs Lp loss + + Args: + d (int, optional): The scaling factor of loss. Defaults to 1. + p (int, optional): The scaling factor of diff. Defaults to 2. + L (math, optional): The founction of loss. Defaults to 2*math.pi. + reduce_dims (int, optional): The dims of reduction. Defaults to 0. + reductions (str, optional): The type of reduction. Defaults to 'sum'. + """ + + def __init__(self, d=1, p=2, L=2 * math.pi, reduce_dims=0, reductions="sum"): + super().__init__() + + self.d = d + self.p = p + + if isinstance(reduce_dims, int): + self.reduce_dims = [reduce_dims] + else: + self.reduce_dims = reduce_dims + + if self.reduce_dims is not None: + if isinstance(reductions, str): + assert reductions == "sum" or reductions == "mean" + self.reductions = [reductions] * len(self.reduce_dims) + else: + for j in range(len(reductions)): + assert reductions[j] == "sum" or reductions[j] == "mean" + self.reductions = reductions + + if isinstance(L, float): + self.L = [L] * self.d + else: + self.L = L + + def uniform_h(self, x): + h = [0.0] * self.d + for j in range(self.d, 0, -1): + h[-j] = self.L[-j] / x.size(-j) + + return h + + def reduce_all(self, x): + for j in range(len(self.reduce_dims)): + if self.reductions[j] == "sum": + x = paddle.sum(x, axis=self.reduce_dims[j], keepdim=True) + else: + x = paddle.mean(x, axis=self.reduce_dims[j], keepdim=True) + return x + + def abs(self, x, y, h=None): + # Assume uniform mesh + if h is None: + h = self.uniform_h(x) + else: + if isinstance(h, float): + h = [h] * self.d + + const = math.prod(h) ** (1.0 / self.p) + diff = const * paddle.norm( + paddle.flatten(x, start_axis=-self.d) + - paddle.flatten(y, start_axis=-self.d), + p=self.p, + axis=-1, + keepdim=False, + ) + + if self.reduce_dims is not None: + diff = self.reduce_all(diff).squeeze() + + return diff + + def rel(self, x, y): + diff = paddle.norm( + paddle.flatten(x, start_axis=-self.d) + - paddle.flatten(y, start_axis=-self.d), + p=self.p, + axis=-1, + keepdim=False, + ) + ynorm = paddle.norm( + paddle.flatten(y, start_axis=-self.d), p=self.p, axis=-1, keepdim=False + ) + diff = diff / ynorm + + if self.reduce_dims is not None: + diff = self.reduce_all(diff).squeeze() + return diff + + def __call__( + self, + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + ): + x = output_dict["y"] + y = label_dict["y"] + return {"l2": self.rel(x, y) / x.shape[0]} + + +class LpLoss_train(LpLoss): + def __init__(self, d=1, p=2, L=2 * math.pi, reduce_dims=0, reductions="sum"): + super().__init__(d=d, p=p, L=L, reduce_dims=reduce_dims, reductions=reductions) + + def __call__( + self, + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + weight_dict=None, + ): + x = output_dict["y"] + y = label_dict["y"] + return {"y": self.rel(x, y)} + + +class H1Loss(object): + """loss function with rel/abs H1 loss + + Args: + d (int, optional): The scaling factor of loss. Defaults to 1. + L (math, optional): The founction of loss. Defaults to 2*math.pi. + reduce_dims (int, optional): The dims of reduction. Defaults to 0. + reductions (str, optional): The type of reduction. Defaults to 'sum'. + fix_x_bnd (bool, optional): Whether to fix the x boundaries. Defaults to False. + fix_y_bnd (bool, optional): Whether to fix the y boundaries. Defaults to False. + fix_z_bnd (bool, optional): Whether to fix the z boundaries. Defaults to False. + """ + + def __init__( + self, + d=1, + L=2 * math.pi, + reduce_dims=0, + reductions="sum", + fix_x_bnd=False, + fix_y_bnd=False, + fix_z_bnd=False, + ): + + super().__init__() + + assert d > 0 and d < 4, "Currently only implemented for 1, 2, and 3-D." + + self.d = d + self.fix_x_bnd = fix_x_bnd + self.fix_y_bnd = fix_y_bnd + self.fix_z_bnd = fix_z_bnd + + if isinstance(reduce_dims, int): + self.reduce_dims = [reduce_dims] + else: + self.reduce_dims = reduce_dims + + if self.reduce_dims is not None: + if isinstance(reductions, str): + assert reductions == "sum" or reductions == "mean" + self.reductions = [reductions] * len(self.reduce_dims) + else: + for j in range(len(reductions)): + assert reductions[j] == "sum" or reductions[j] == "mean" + self.reductions = reductions + + if isinstance(L, float): + self.L = [L] * self.d + else: + self.L = L + + def compute_terms(self, x, y, h): + dict_x = {} + dict_y = {} + + if self.d == 1: + dict_x[0] = x + dict_y[0] = y + + x_x = central_diff_1d(x, h[0], fix_x_bnd=self.fix_x_bnd) + y_x = central_diff_1d(y, h[0], fix_x_bnd=self.fix_x_bnd) + + dict_x[1] = x_x + dict_y[1] = y_x + + elif self.d == 2: + dict_x[0] = paddle.flatten(x, start_axis=-2) + dict_y[0] = paddle.flatten(y, start_axis=-2) + + x_x, x_y = central_diff_2d( + x, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd + ) + y_x, y_y = central_diff_2d( + y, h, fix_x_bnd=self.fix_x_bnd, fix_y_bnd=self.fix_y_bnd + ) + + dict_x[1] = paddle.flatten(x_x, start_axis=-2) + dict_x[2] = paddle.flatten(x_y, start_axis=-2) + + dict_y[1] = paddle.flatten(y_x, start_axis=-2) + dict_y[2] = paddle.flatten(y_y, start_axis=-2) + + else: + dict_x[0] = paddle.flatten(x, start_axis=-3) + dict_y[0] = paddle.flatten(y, start_axis=-3) + + x_x, x_y, x_z = central_diff_3d( + x, + h, + fix_x_bnd=self.fix_x_bnd, + fix_y_bnd=self.fix_y_bnd, + fix_z_bnd=self.fix_z_bnd, + ) + y_x, y_y, y_z = central_diff_3d( + y, + h, + fix_x_bnd=self.fix_x_bnd, + fix_y_bnd=self.fix_y_bnd, + fix_z_bnd=self.fix_z_bnd, + ) + + dict_x[1] = paddle.flatten(x_x, start_axis=-3) + dict_x[2] = paddle.flatten(x_y, start_axis=-3) + dict_x[3] = paddle.flatten(x_z, start_axis=-3) + + dict_y[1] = paddle.flatten(y_x, start_axis=-3) + dict_y[2] = paddle.flatten(y_y, start_axis=-3) + dict_y[3] = paddle.flatten(y_z, start_axis=-3) + + return dict_x, dict_y + + def uniform_h(self, x): + h = [0.0] * self.d + for j in range(self.d, 0, -1): + h[-j] = self.L[-j] / x.shape[-j] + + return h + + def reduce_all(self, x): + for j in range(len(self.reduce_dims)): + if self.reductions[j] == "sum": + x = paddle.sum(x, axis=self.reduce_dims[j], keepdim=True) + else: + x = paddle.mean(x, axis=self.reduce_dims[j], keepdim=True) + + return x + + def abs(self, x, y, h=None): + # Assume uniform mesh + if h is None: + h = self.uniform_h(x) + else: + if isinstance(h, float): + h = [h] * self.d + + dict_x, dict_y = self.compute_terms(x, y, h) + + const = math.prod(h) + diff = ( + const * paddle.norm(dict_x[0] - dict_y[0], p=2, axis=-1, keepdim=False) ** 2 + ) + + for j in range(1, self.d + 1): + diff += ( + const + * paddle.norm(dict_x[j] - dict_y[j], p=2, axis=-1, keepdim=False) ** 2 + ) + + diff = diff**0.5 + + if self.reduce_dims is not None: + diff = self.reduce_all(diff).squeeze() + + return diff + + def rel(self, x, y, h=None): + # Assume uniform mesh + if h is None: + h = self.uniform_h(x) + else: + if isinstance(h, float): + h = [h] * self.d + + dict_x, dict_y = self.compute_terms(x, y, h) + + diff = paddle.norm(dict_x[0] - dict_y[0], p=2, axis=-1, keepdim=False) ** 2 + ynorm = paddle.norm(dict_y[0], p=2, axis=-1, keepdim=False) ** 2 + + for j in range(1, self.d + 1): + diff += paddle.norm(dict_x[j] - dict_y[j], p=2, axis=-1, keepdim=False) ** 2 + ynorm += paddle.norm(dict_y[j], p=2, axis=-1, keepdim=False) ** 2 + + diff = (diff**0.5) / (ynorm**0.5) + + if self.reduce_dims is not None: + diff = self.reduce_all(diff).squeeze() + + return diff + + def __call__( + self, + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + weight_dict: Dict[str, "paddle.Tensor"] = None, + h=None, + ): + x = output_dict["y"] + y = label_dict["y"] + return {"h1": self.rel(x, y, h=h) / x.shape[0]} + + +class H1Loss_train(H1Loss): + def __init__( + self, + d=1, + L=2 * math.pi, + reduce_dims=0, + reductions="sum", + fix_x_bnd=False, + fix_y_bnd=False, + fix_z_bnd=False, + ): + super().__init__( + d=d, + L=L, + reduce_dims=reduce_dims, + reductions=reductions, + fix_x_bnd=fix_x_bnd, + fix_y_bnd=fix_y_bnd, + fix_z_bnd=fix_z_bnd, + ) + + def __call__( + self, + output_dict: Dict[str, "paddle.Tensor"], + label_dict: Dict[str, "paddle.Tensor"], + weight_dict: Dict[str, "paddle.Tensor"] = None, + h=None, + ): + x = output_dict["y"] + y = label_dict["y"] + + return {"y": self.rel(x, y, h=h)} diff --git a/examples/neuraloperator/predictor.py b/examples/neuraloperator/predictor.py index ddd3df6278..3e97c9654f 100644 --- a/examples/neuraloperator/predictor.py +++ b/examples/neuraloperator/predictor.py @@ -1,176 +1,176 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import numpy as np -import paddle -from omegaconf import DictConfig - -from deploy.python_infer import base -from ppsci.data.dataset import darcyflow_dataset - - -class FNOPredictor(base.Predictor): - """General predictor for Fourier Neural Operator model. - - Args: - cfg (DictConfig): Running configuration. - """ - - def __init__( - self, - cfg: DictConfig, - ): - super().__init__( - cfg.INFER.pdmodel_path, - cfg.INFER.pdiparams_path, - device=cfg.INFER.device, - engine=cfg.INFER.engine, - precision=cfg.INFER.precision, - onnx_path=cfg.INFER.onnx_path, - ir_optim=cfg.INFER.ir_optim, - min_subgraph_size=cfg.INFER.min_subgraph_size, - gpu_mem=cfg.INFER.gpu_mem, - gpu_id=cfg.INFER.gpu_id, - max_batch_size=cfg.INFER.max_batch_size, - num_cpu_threads=cfg.INFER.num_cpu_threads, - ) - self.log_freq = cfg.log_freq - - # get input names and data handles - self.input_names = self.predictor.get_input_names() - self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) - - # get output names and data handles - self.output_names = self.predictor.get_output_names() - self.output_handle = self.predictor.get_output_handle(self.output_names[0]) - - # preprocess - self.transform_x = darcyflow_dataset.PositionalEmbedding2D( - cfg.INFER.grid_boundaries - ) - - def predict( - self, - input_data: np.ndarray, - batch_size: int = 1, - ) -> np.ndarray: - """Predicts the output of the yinglong model for the given input. - - Args: - input_data (np.ndarray): Input data of shape (N, T, H, W). - batch_size (int, optional): Batch size, now only support 1. Defaults to 1. - Returns: - np.ndarray: Prediction. - """ - if batch_size != 1: - raise ValueError( - f"FNOPredictor only support batch_size=1, but got {batch_size}" - ) - # prepare input handle(s) - input_handles = {self.input_names[0]: self.input_data_handle} - # prepare output handle(s) - output_handles = {self.output_names[0]: self.output_handle} - - input_data = self.transform_x(paddle.to_tensor(input_data)).unsqueeze(0).numpy() - - # prepare batch input dict - batch_input_dict = { - self.input_names[0]: input_data, - } - # send batch input data to input handle(s) - for name, handle in input_handles.items(): - handle.copy_from_cpu(batch_input_dict[name]) - - # run predictor - self.predictor.run() - - # receive batch output data from output handle(s) - pred = output_handles[self.output_names[0]].copy_to_cpu() - - return pred - - -class SFNOPredictor(base.Predictor): - """General predictor for Spherical Fourier Neural Operator model. - - Args: - cfg (DictConfig): Running configuration. - """ - - def __init__( - self, - cfg: DictConfig, - ): - super().__init__( - cfg.INFER.pdmodel_path, - cfg.INFER.pdiparams_path, - device=cfg.INFER.device, - engine=cfg.INFER.engine, - precision=cfg.INFER.precision, - onnx_path=cfg.INFER.onnx_path, - ir_optim=cfg.INFER.ir_optim, - min_subgraph_size=cfg.INFER.min_subgraph_size, - gpu_mem=cfg.INFER.gpu_mem, - gpu_id=cfg.INFER.gpu_id, - max_batch_size=cfg.INFER.max_batch_size, - num_cpu_threads=cfg.INFER.num_cpu_threads, - ) - self.log_freq = cfg.log_freq - - # get input names and data handles - self.input_names = self.predictor.get_input_names() - self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) - - # get output names and data handles - self.output_names = self.predictor.get_output_names() - self.output_handle = self.predictor.get_output_handle(self.output_names[0]) - - def predict( - self, - input_data: np.ndarray, - batch_size: int = 1, - ) -> np.ndarray: - """Predicts the output of the yinglong model for the given input. - - Args: - input_data (np.ndarray): Input data of shape (N, T, H, W). - batch_size (int, optional): Batch size, now only support 1. Defaults to 1. - Returns: - np.ndarray: Prediction. - """ - if batch_size != 1: - raise ValueError( - f"SFNOPredictor only support batch_size=1, but got {batch_size}" - ) - # prepare input handle(s) - input_handles = {self.input_names[0]: self.input_data_handle} - # prepare output handle(s) - output_handles = {self.output_names[0]: self.output_handle} - - # prepare batch input dict - batch_input_dict = { - self.input_names[0]: input_data, - } - # send batch input data to input handle(s) - for name, handle in input_handles.items(): - handle.copy_from_cpu(batch_input_dict[name]) - - # run predictor - self.predictor.run() - - # receive batch output data from output handle(s) - pred = output_handles[self.output_names[0]].copy_to_cpu() - - return pred +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numpy as np +import paddle +from omegaconf import DictConfig + +from deploy.python_infer import base +from ppsci.data.dataset import darcyflow_dataset + + +class FNOPredictor(base.Predictor): + """General predictor for Fourier Neural Operator model. + + Args: + cfg (DictConfig): Running configuration. + """ + + def __init__( + self, + cfg: DictConfig, + ): + super().__init__( + cfg.INFER.pdmodel_path, + cfg.INFER.pdiparams_path, + device=cfg.INFER.device, + engine=cfg.INFER.engine, + precision=cfg.INFER.precision, + onnx_path=cfg.INFER.onnx_path, + ir_optim=cfg.INFER.ir_optim, + min_subgraph_size=cfg.INFER.min_subgraph_size, + gpu_mem=cfg.INFER.gpu_mem, + gpu_id=cfg.INFER.gpu_id, + max_batch_size=cfg.INFER.max_batch_size, + num_cpu_threads=cfg.INFER.num_cpu_threads, + ) + self.log_freq = cfg.log_freq + + # get input names and data handles + self.input_names = self.predictor.get_input_names() + self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) + + # get output names and data handles + self.output_names = self.predictor.get_output_names() + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) + + # preprocess + self.transform_x = darcyflow_dataset.PositionalEmbedding2D( + cfg.INFER.grid_boundaries + ) + + def predict( + self, + input_data: np.ndarray, + batch_size: int = 1, + ) -> np.ndarray: + """Predicts the output of the yinglong model for the given input. + + Args: + input_data (np.ndarray): Input data of shape (N, T, H, W). + batch_size (int, optional): Batch size, now only support 1. Defaults to 1. + Returns: + np.ndarray: Prediction. + """ + if batch_size != 1: + raise ValueError( + f"FNOPredictor only support batch_size=1, but got {batch_size}" + ) + # prepare input handle(s) + input_handles = {self.input_names[0]: self.input_data_handle} + # prepare output handle(s) + output_handles = {self.output_names[0]: self.output_handle} + + input_data = self.transform_x(paddle.to_tensor(input_data)).unsqueeze(0).numpy() + + # prepare batch input dict + batch_input_dict = { + self.input_names[0]: input_data, + } + # send batch input data to input handle(s) + for name, handle in input_handles.items(): + handle.copy_from_cpu(batch_input_dict[name]) + + # run predictor + self.predictor.run() + + # receive batch output data from output handle(s) + pred = output_handles[self.output_names[0]].copy_to_cpu() + + return pred + + +class SFNOPredictor(base.Predictor): + """General predictor for Spherical Fourier Neural Operator model. + + Args: + cfg (DictConfig): Running configuration. + """ + + def __init__( + self, + cfg: DictConfig, + ): + super().__init__( + cfg.INFER.pdmodel_path, + cfg.INFER.pdiparams_path, + device=cfg.INFER.device, + engine=cfg.INFER.engine, + precision=cfg.INFER.precision, + onnx_path=cfg.INFER.onnx_path, + ir_optim=cfg.INFER.ir_optim, + min_subgraph_size=cfg.INFER.min_subgraph_size, + gpu_mem=cfg.INFER.gpu_mem, + gpu_id=cfg.INFER.gpu_id, + max_batch_size=cfg.INFER.max_batch_size, + num_cpu_threads=cfg.INFER.num_cpu_threads, + ) + self.log_freq = cfg.log_freq + + # get input names and data handles + self.input_names = self.predictor.get_input_names() + self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) + + # get output names and data handles + self.output_names = self.predictor.get_output_names() + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) + + def predict( + self, + input_data: np.ndarray, + batch_size: int = 1, + ) -> np.ndarray: + """Predicts the output of the yinglong model for the given input. + + Args: + input_data (np.ndarray): Input data of shape (N, T, H, W). + batch_size (int, optional): Batch size, now only support 1. Defaults to 1. + Returns: + np.ndarray: Prediction. + """ + if batch_size != 1: + raise ValueError( + f"SFNOPredictor only support batch_size=1, but got {batch_size}" + ) + # prepare input handle(s) + input_handles = {self.input_names[0]: self.input_data_handle} + # prepare output handle(s) + output_handles = {self.output_names[0]: self.output_handle} + + # prepare batch input dict + batch_input_dict = { + self.input_names[0]: input_data, + } + # send batch input data to input handle(s) + for name, handle in input_handles.items(): + handle.copy_from_cpu(batch_input_dict[name]) + + # run predictor + self.predictor.run() + + # receive batch output data from output handle(s) + pred = output_handles[self.output_names[0]].copy_to_cpu() + + return pred diff --git a/examples/neuraloperator/train_sfno.py b/examples/neuraloperator/train_sfno.py index 50843c00e9..3dadcd2c0c 100644 --- a/examples/neuraloperator/train_sfno.py +++ b/examples/neuraloperator/train_sfno.py @@ -1,315 +1,315 @@ -import hydra -import metric -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "SphericalSWEDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "data_split": "train", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 0, - } - - # set loss - train_loss = metric.LpLoss_train(d=2, p=2, reduce_dims=[0, 1]) - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_loss), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg_32 = { - "dataset": { - "name": "SphericalSWEDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "data_split": "test_32x64", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_64 = { - "dataset": { - "name": "SphericalSWEDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "data_split": "test_64x128", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - l2_eval_metric = metric.LpLoss(d=2, p=2, reduce_dims=[0, 1]) - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, - name="Sup_Validator_32x64", - ) - - sup_validator_64 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_64, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, - name="Sup_Validator_64x128", - ) - - validator = { - sup_validator_32.name: sup_validator_32, - sup_validator_64.name: sup_validator_64, - } - - model = ppsci.arch.SFNONet( - **cfg.MODEL, - ) - - # init optimizer and lr scheduler - if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": - lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - factor=cfg.TRAIN.lr_scheduler.gamma, - patience=cfg.TRAIN.lr_scheduler.scheduler_patience, - mode="min", - ) - elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": - lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, - )() - elif cfg.TRAIN.lr_scheduler.type == "StepDecay": - lr_scheduler = ppsci.optimizer.lr_scheduler.Step( - epochs=cfg.TRAIN.lr_scheduler.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - step_size=cfg.TRAIN.lr_scheduler.step_size, - gamma=cfg.TRAIN.lr_scheduler.gamma, - )() - else: - raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") - optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg_32 = { - "dataset": { - "name": "SphericalSWEDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "data_split": "test_32x64", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_64 = { - "dataset": { - "name": "SphericalSWEDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "data_split": "test_64x128", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - train_loss = metric.LpLoss_train(d=2, p=2, reduce_dims=[0, 1]) - - l2_eval_metric = metric.LpLoss(d=2, p=2, reduce_dims=[0, 1]) - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, - name="Sup_Validator_32x64", - ) - - sup_validator_64 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_64, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, - name="Sup_Validator_64x128", - ) - - validator = { - sup_validator_32.name: sup_validator_32, - sup_validator_64.name: sup_validator_64, - } - - model = ppsci.arch.SFNONet( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.SFNONet( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 3, 32, 64], "float32", name=key) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import matplotlib.pyplot as plt - import predictor - - predictor = predictor.SFNOPredictor(cfg) - - data = np.load(cfg.INFER.data_path, allow_pickle=True).item() - input_data = data["x"][0].reshape(1, *data["x"].shape[1:]).astype("float32") - label = data["y"][0][0, ...].astype("float32") - - pred_data = predictor.predict(input_data, cfg.INFER.batch_size) - - fig = plt.figure(figsize=(7, 7)) - ax = fig.add_subplot(1, 3, 1) - ax.imshow(input_data.squeeze()[0, ...]) - ax.set_title("k(x)") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 2) - ax.imshow(label) - ax.set_title("Ground-truth y") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 3) - ax.imshow(pred_data.squeeze()[0, ...]) - ax.set_title("Model prediction") - plt.xticks([], []) - plt.yticks([], []) - plt.savefig(cfg.output_dir) - logger.message("save success") - plt.close(fig) - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="sfno_swe_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +import hydra +import metric +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "SphericalSWEDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "data_split": "train", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 0, + } + + # set loss + train_loss = metric.LpLoss_train(d=2, p=2, reduce_dims=[0, 1]) + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_loss), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg_32 = { + "dataset": { + "name": "SphericalSWEDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "data_split": "test_32x64", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_64 = { + "dataset": { + "name": "SphericalSWEDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "data_split": "test_64x128", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + l2_eval_metric = metric.LpLoss(d=2, p=2, reduce_dims=[0, 1]) + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, + name="Sup_Validator_32x64", + ) + + sup_validator_64 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_64, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, + name="Sup_Validator_64x128", + ) + + validator = { + sup_validator_32.name: sup_validator_32, + sup_validator_64.name: sup_validator_64, + } + + model = ppsci.arch.SFNONet( + **cfg.MODEL, + ) + + # init optimizer and lr scheduler + if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": + lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + factor=cfg.TRAIN.lr_scheduler.gamma, + patience=cfg.TRAIN.lr_scheduler.scheduler_patience, + mode="min", + ) + elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, + )() + elif cfg.TRAIN.lr_scheduler.type == "StepDecay": + lr_scheduler = ppsci.optimizer.lr_scheduler.Step( + epochs=cfg.TRAIN.lr_scheduler.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + step_size=cfg.TRAIN.lr_scheduler.step_size, + gamma=cfg.TRAIN.lr_scheduler.gamma, + )() + else: + raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") + optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg_32 = { + "dataset": { + "name": "SphericalSWEDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "data_split": "test_32x64", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_64 = { + "dataset": { + "name": "SphericalSWEDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "data_split": "test_64x128", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + train_loss = metric.LpLoss_train(d=2, p=2, reduce_dims=[0, 1]) + + l2_eval_metric = metric.LpLoss(d=2, p=2, reduce_dims=[0, 1]) + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, + name="Sup_Validator_32x64", + ) + + sup_validator_64 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_64, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={"l2": ppsci.metric.FunctionalMetric(l2_eval_metric)}, + name="Sup_Validator_64x128", + ) + + validator = { + sup_validator_32.name: sup_validator_32, + sup_validator_64.name: sup_validator_64, + } + + model = ppsci.arch.SFNONet( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.SFNONet( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 3, 32, 64], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import matplotlib.pyplot as plt + import predictor + + predictor = predictor.SFNOPredictor(cfg) + + data = np.load(cfg.INFER.data_path, allow_pickle=True).item() + input_data = data["x"][0].reshape(1, *data["x"].shape[1:]).astype("float32") + label = data["y"][0][0, ...].astype("float32") + + pred_data = predictor.predict(input_data, cfg.INFER.batch_size) + + fig = plt.figure(figsize=(7, 7)) + ax = fig.add_subplot(1, 3, 1) + ax.imshow(input_data.squeeze()[0, ...]) + ax.set_title("k(x)") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 2) + ax.imshow(label) + ax.set_title("Ground-truth y") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 3) + ax.imshow(pred_data.squeeze()[0, ...]) + ax.set_title("Model prediction") + plt.xticks([], []) + plt.yticks([], []) + plt.savefig(cfg.output_dir) + logger.message("save success") + plt.close(fig) + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="sfno_swe_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/neuraloperator/train_tfno.py b/examples/neuraloperator/train_tfno.py index 1612ae5489..a34636d660 100644 --- a/examples/neuraloperator/train_tfno.py +++ b/examples/neuraloperator/train_tfno.py @@ -1,365 +1,365 @@ -import hydra -import metric -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "train", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 0, - } - - # set loss - l2loss = metric.LpLoss_train(d=2, p=2) - h1loss = metric.H1Loss_train(d=2) - if cfg.TRAIN.training_loss == "l2": - train_loss = l2loss - if cfg.TRAIN.training_loss == "h1": - train_loss = h1loss - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_loss), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg_16 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_16x16", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_32 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_32x32", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - h1_eval_metric = metric.H1Loss(d=2) - l2_eval_metric = metric.LpLoss(d=2, p=2) - sup_validator_16 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_16, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_16x16", - ) - - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_32x32", - ) - - validator = { - sup_validator_16.name: sup_validator_16, - sup_validator_32.name: sup_validator_32, - } - - model = ppsci.arch.TFNO2dNet( - **cfg.MODEL, - ) - # init optimizer and lr scheduler - if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": - lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - factor=cfg.TRAIN.lr_scheduler.gamma, - patience=cfg.TRAIN.lr_scheduler.scheduler_patience, - mode="min", - ) - elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": - lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, - )() - elif cfg.TRAIN.lr_scheduler.type == "StepDecay": - lr_scheduler = ppsci.optimizer.lr_scheduler.Step( - epochs=cfg.TRAIN.lr_scheduler.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - step_size=cfg.TRAIN.lr_scheduler.step_size, - gamma=cfg.TRAIN.lr_scheduler.gamma, - )() - else: - raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") - optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg_16 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_16x16", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_32 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_32x32", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - # set loss - l2loss = metric.LpLoss_train(d=2, p=2) - h1loss = metric.H1Loss_train(d=2) - if cfg.TRAIN.training_loss == "l2": - train_loss = l2loss - if cfg.TRAIN.training_loss == "h1": - train_loss = h1loss - - h1_eval_metric = metric.H1Loss(d=2) - l2_eval_metric = metric.LpLoss(d=2, p=2) - sup_validator_16 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_16, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_16x16", - ) - - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_32x32", - ) - validator = { - sup_validator_16.name: sup_validator_16, - sup_validator_32.name: sup_validator_32, - } - - model = ppsci.arch.TFNO2dNet( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.TFNO2dNet( - **cfg.MODEL, - ) - model.eval() - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 3, 16, 16], "float32", name=key) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import matplotlib.pyplot as plt - import predictor - - predictor = predictor.FNOPredictor(cfg) - - data = np.load(cfg.INFER.data_path, allow_pickle=True).item() - - input_data = data["x"][0].reshape(-1, 1, *data["x"].shape[1:]).astype("float32") - label = data["y"][0].astype("float32") - - pred_data = predictor.predict(input_data, cfg.INFER.batch_size) - - fig = plt.figure(figsize=(7, 7)) - - ax = fig.add_subplot(1, 3, 1) - ax.imshow(input_data.squeeze(), cmap="gray") - ax.set_title("k(x)") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 2) - ax.imshow(label) - ax.set_title("Ground-truth y") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 3) - ax.imshow(pred_data.squeeze()) - ax.set_title("Model prediction") - plt.xticks([], []) - plt.yticks([], []) - plt.savefig(cfg.output_dir) - logger.message("save success") - plt.close(fig) - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="tfno_darcyflow_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +import hydra +import metric +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "train", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 0, + } + + # set loss + l2loss = metric.LpLoss_train(d=2, p=2) + h1loss = metric.H1Loss_train(d=2) + if cfg.TRAIN.training_loss == "l2": + train_loss = l2loss + if cfg.TRAIN.training_loss == "h1": + train_loss = h1loss + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_loss), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg_16 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_16x16", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_32 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_32x32", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + h1_eval_metric = metric.H1Loss(d=2) + l2_eval_metric = metric.LpLoss(d=2, p=2) + sup_validator_16 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_16, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_16x16", + ) + + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_32x32", + ) + + validator = { + sup_validator_16.name: sup_validator_16, + sup_validator_32.name: sup_validator_32, + } + + model = ppsci.arch.TFNO2dNet( + **cfg.MODEL, + ) + # init optimizer and lr scheduler + if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": + lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + factor=cfg.TRAIN.lr_scheduler.gamma, + patience=cfg.TRAIN.lr_scheduler.scheduler_patience, + mode="min", + ) + elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, + )() + elif cfg.TRAIN.lr_scheduler.type == "StepDecay": + lr_scheduler = ppsci.optimizer.lr_scheduler.Step( + epochs=cfg.TRAIN.lr_scheduler.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + step_size=cfg.TRAIN.lr_scheduler.step_size, + gamma=cfg.TRAIN.lr_scheduler.gamma, + )() + else: + raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") + optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg_16 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_16x16", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_32 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_32x32", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + # set loss + l2loss = metric.LpLoss_train(d=2, p=2) + h1loss = metric.H1Loss_train(d=2) + if cfg.TRAIN.training_loss == "l2": + train_loss = l2loss + if cfg.TRAIN.training_loss == "h1": + train_loss = h1loss + + h1_eval_metric = metric.H1Loss(d=2) + l2_eval_metric = metric.LpLoss(d=2, p=2) + sup_validator_16 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_16, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_16x16", + ) + + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_32x32", + ) + validator = { + sup_validator_16.name: sup_validator_16, + sup_validator_32.name: sup_validator_32, + } + + model = ppsci.arch.TFNO2dNet( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.TFNO2dNet( + **cfg.MODEL, + ) + model.eval() + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 3, 16, 16], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import matplotlib.pyplot as plt + import predictor + + predictor = predictor.FNOPredictor(cfg) + + data = np.load(cfg.INFER.data_path, allow_pickle=True).item() + + input_data = data["x"][0].reshape(-1, 1, *data["x"].shape[1:]).astype("float32") + label = data["y"][0].astype("float32") + + pred_data = predictor.predict(input_data, cfg.INFER.batch_size) + + fig = plt.figure(figsize=(7, 7)) + + ax = fig.add_subplot(1, 3, 1) + ax.imshow(input_data.squeeze(), cmap="gray") + ax.set_title("k(x)") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 2) + ax.imshow(label) + ax.set_title("Ground-truth y") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 3) + ax.imshow(pred_data.squeeze()) + ax.set_title("Model prediction") + plt.xticks([], []) + plt.yticks([], []) + plt.savefig(cfg.output_dir) + logger.message("save success") + plt.close(fig) + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="tfno_darcyflow_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/neuraloperator/train_uno.py b/examples/neuraloperator/train_uno.py index f73c2a1038..8211a34167 100644 --- a/examples/neuraloperator/train_uno.py +++ b/examples/neuraloperator/train_uno.py @@ -1,365 +1,365 @@ -import hydra -import metric -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "train", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 0, - } - - # set loss - l2loss = metric.LpLoss_train(d=2, p=2) - h1loss = metric.H1Loss_train(d=2) - if cfg.TRAIN.training_loss == "l2": - train_loss = l2loss - if cfg.TRAIN.training_loss == "h1": - train_loss = h1loss - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(train_loss), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - # set eval dataloader config - eval_dataloader_cfg_16 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_16x16", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_32 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_32x32", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - h1_eval_metric = metric.H1Loss(d=2) - l2_eval_metric = metric.LpLoss(d=2, p=2) - sup_validator_16 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_16, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_16x16", - ) - - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_32x32", - ) - - validator = { - sup_validator_16.name: sup_validator_16, - sup_validator_32.name: sup_validator_32, - } - - model = ppsci.arch.UNONet( - **cfg.MODEL, - ) - # init optimizer and lr scheduler - if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": - lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - factor=cfg.TRAIN.lr_scheduler.gamma, - patience=cfg.TRAIN.lr_scheduler.scheduler_patience, - mode="min", - ) - elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": - lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, - )() - elif cfg.TRAIN.lr_scheduler.type == "StepDecay": - lr_scheduler = ppsci.optimizer.lr_scheduler.Step( - epochs=cfg.TRAIN.lr_scheduler.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, - step_size=cfg.TRAIN.lr_scheduler.step_size, - gamma=cfg.TRAIN.lr_scheduler.gamma, - )() - else: - raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") - optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - validator=validator, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set eval dataloader config - eval_dataloader_cfg_16 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_16x16", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - eval_dataloader_cfg_32 = { - "dataset": { - "name": "DarcyFlowDataset", - "data_dir": cfg.FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.DATASET.label_keys, - "train_resolution": cfg.DATASET.train_resolution, - "test_resolutions": cfg.DATASET.test_resolutions, - "grid_boundaries": cfg.DATASET.grid_boundaries, - "encode_input": cfg.DATASET.encode_input, - "encode_output": cfg.DATASET.encode_output, - "encoding": cfg.DATASET.encoding, - "channel_dim": cfg.DATASET.channel_dim, - "data_split": "test_32x32", - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 0, - } - - # set loss - l2loss = metric.LpLoss_train(d=2, p=2) - h1loss = metric.H1Loss_train(d=2) - if cfg.TRAIN.training_loss == "l2": - train_loss = l2loss - if cfg.TRAIN.training_loss == "h1": - train_loss = h1loss - - h1_eval_metric = metric.H1Loss(d=2) - l2_eval_metric = metric.LpLoss(d=2, p=2) - sup_validator_16 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_16, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_16x16", - ) - - sup_validator_32 = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg_32, - loss=ppsci.loss.FunctionalLoss(train_loss), - metric={ - "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), - "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), - }, - name="Sup_Validator_32x32", - ) - validator = { - sup_validator_16.name: sup_validator_16, - sup_validator_32.name: sup_validator_32, - } - - model = ppsci.arch.UNONet( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # evaluate - solver.eval() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.UNONet( - **cfg.MODEL, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 3, 16, 16], "float32", name=key) - for key in model.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import matplotlib.pyplot as plt - import predictor - - predictor = predictor.FNOPredictor(cfg) - - data = np.load(cfg.INFER.data_path, allow_pickle=True).item() - - input_data = data["x"][0].reshape(-1, 1, *data["x"].shape[1:]).astype("float32") - label = data["y"][0].astype("float32") - - pred_data = predictor.predict(input_data, cfg.INFER.batch_size) - - fig = plt.figure(figsize=(7, 7)) - - ax = fig.add_subplot(1, 3, 1) - ax.imshow(input_data.squeeze(), cmap="gray") - ax.set_title("k(x)") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 2) - ax.imshow(label) - ax.set_title("Ground-truth y") - plt.xticks([], []) - plt.yticks([], []) - - ax = fig.add_subplot(1, 3, 3) - ax.imshow(pred_data.squeeze()) - ax.set_title("Model prediction") - plt.xticks([], []) - plt.yticks([], []) - plt.savefig(cfg.output_dir) - logger.message("save success") - plt.close(fig) - - -@hydra.main( - version_base=None, - config_path="./conf", - config_name="uno_darcyflow_pretrain.yaml", -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +import hydra +import metric +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "train", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 0, + } + + # set loss + l2loss = metric.LpLoss_train(d=2, p=2) + h1loss = metric.H1Loss_train(d=2) + if cfg.TRAIN.training_loss == "l2": + train_loss = l2loss + if cfg.TRAIN.training_loss == "h1": + train_loss = h1loss + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(train_loss), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + # set eval dataloader config + eval_dataloader_cfg_16 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_16x16", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_32 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_32x32", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + h1_eval_metric = metric.H1Loss(d=2) + l2_eval_metric = metric.LpLoss(d=2, p=2) + sup_validator_16 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_16, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_16x16", + ) + + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_32x32", + ) + + validator = { + sup_validator_16.name: sup_validator_16, + sup_validator_32.name: sup_validator_32, + } + + model = ppsci.arch.UNONet( + **cfg.MODEL, + ) + # init optimizer and lr scheduler + if cfg.TRAIN.lr_scheduler.type == "ReduceOnPlateau": + lr_scheduler = paddle.optimizer.lr.ReduceOnPlateau( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + factor=cfg.TRAIN.lr_scheduler.gamma, + patience=cfg.TRAIN.lr_scheduler.scheduler_patience, + mode="min", + ) + elif cfg.TRAIN.lr_scheduler.type == "CosineAnnealingDecay": + lr_scheduler = paddle.optimizer.lr.CosineAnnealingDecay( + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + T_max=cfg.TRAIN.lr_scheduler.scheduler_T_max, + )() + elif cfg.TRAIN.lr_scheduler.type == "StepDecay": + lr_scheduler = ppsci.optimizer.lr_scheduler.Step( + epochs=cfg.TRAIN.lr_scheduler.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + learning_rate=cfg.TRAIN.lr_scheduler.learning_rate, + step_size=cfg.TRAIN.lr_scheduler.step_size, + gamma=cfg.TRAIN.lr_scheduler.gamma, + )() + else: + raise ValueError(f"Got scheduler={cfg.TRAIN.lr_scheduler.type}") + optimizer = ppsci.optimizer.Adam(lr_scheduler, weight_decay=cfg.TRAIN.wd)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + validator=validator, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set eval dataloader config + eval_dataloader_cfg_16 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_16x16", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + eval_dataloader_cfg_32 = { + "dataset": { + "name": "DarcyFlowDataset", + "data_dir": cfg.FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.DATASET.label_keys, + "train_resolution": cfg.DATASET.train_resolution, + "test_resolutions": cfg.DATASET.test_resolutions, + "grid_boundaries": cfg.DATASET.grid_boundaries, + "encode_input": cfg.DATASET.encode_input, + "encode_output": cfg.DATASET.encode_output, + "encoding": cfg.DATASET.encoding, + "channel_dim": cfg.DATASET.channel_dim, + "data_split": "test_32x32", + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 0, + } + + # set loss + l2loss = metric.LpLoss_train(d=2, p=2) + h1loss = metric.H1Loss_train(d=2) + if cfg.TRAIN.training_loss == "l2": + train_loss = l2loss + if cfg.TRAIN.training_loss == "h1": + train_loss = h1loss + + h1_eval_metric = metric.H1Loss(d=2) + l2_eval_metric = metric.LpLoss(d=2, p=2) + sup_validator_16 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_16, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_16x16", + ) + + sup_validator_32 = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg_32, + loss=ppsci.loss.FunctionalLoss(train_loss), + metric={ + "h1": ppsci.metric.FunctionalMetric(h1_eval_metric), + "l2": ppsci.metric.FunctionalMetric(l2_eval_metric), + }, + name="Sup_Validator_32x32", + ) + validator = { + sup_validator_16.name: sup_validator_16, + sup_validator_32.name: sup_validator_32, + } + + model = ppsci.arch.UNONet( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + compute_metric_by_batch=cfg.EVAL.compute_metric_by_batch, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # evaluate + solver.eval() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.UNONet( + **cfg.MODEL, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 3, 16, 16], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import matplotlib.pyplot as plt + import predictor + + predictor = predictor.FNOPredictor(cfg) + + data = np.load(cfg.INFER.data_path, allow_pickle=True).item() + + input_data = data["x"][0].reshape(-1, 1, *data["x"].shape[1:]).astype("float32") + label = data["y"][0].astype("float32") + + pred_data = predictor.predict(input_data, cfg.INFER.batch_size) + + fig = plt.figure(figsize=(7, 7)) + + ax = fig.add_subplot(1, 3, 1) + ax.imshow(input_data.squeeze(), cmap="gray") + ax.set_title("k(x)") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 2) + ax.imshow(label) + ax.set_title("Ground-truth y") + plt.xticks([], []) + plt.yticks([], []) + + ax = fig.add_subplot(1, 3, 3) + ax.imshow(pred_data.squeeze()) + ax.set_title("Model prediction") + plt.xticks([], []) + plt.yticks([], []) + plt.savefig(cfg.output_dir) + logger.message("save success") + plt.close(fig) + + +@hydra.main( + version_base=None, + config_path="./conf", + config_name="uno_darcyflow_pretrain.yaml", +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/nowcastnet/conf/nowcastnet.yaml b/examples/nowcastnet/conf/nowcastnet.yaml index d9d4516bf2..6c595a58fe 100644 --- a/examples/nowcastnet/conf/nowcastnet.yaml +++ b/examples/nowcastnet/conf/nowcastnet.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -74,3 +75,82 @@ INFER: max_batch_size: 16 num_cpu_threads: 4 batch_size: 1 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_nowcastnet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: eval # running mode: train/eval +seed: 42 +log_freq: 20 +output_dir: ${hydra:run.dir} +NORMAL_DATASET_PATH: datasets/mrms/figure +LARGE_DATASET_PATH: datasets/mrms/large_figure + +# set working condition +CASE_TYPE: normal # normal/large +NUM_SAVE_SAMPLES: 10 +CPU_WORKER: 1 + +# model settings +MODEL: + normal: + input_keys: ["input"] + output_keys: ["output"] + input_length: 9 + total_length: 29 + image_width: 512 + image_height: 512 + image_ch: 2 + ngf: 32 + large: + input_keys: ["input"] + output_keys: ["output"] + input_length: 9 + total_length: 29 + image_width: 1024 + image_height: 1024 + image_ch: 2 + ngf: 32 + +# evaluation settings +EVAL: + pretrained_model_path: checkpoints/paddle_mrms_model + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/nowcastnet/nowcastnet_pretrained.pdparams + export_path: ./inference/nowcastnet + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 +>>>>>>> Stashed changes diff --git a/examples/nowcastnet/nowcastnet.py b/examples/nowcastnet/nowcastnet.py index 9156907ac6..244de75ba7 100644 --- a/examples/nowcastnet/nowcastnet.py +++ b/examples/nowcastnet/nowcastnet.py @@ -1,194 +1,194 @@ -""" -Reference: https://codeocean.com/capsule/3935105/tree/v1 -""" -from os import path as osp - -import hydra -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - print("Not supported.") - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - if cfg.CASE_TYPE == "large": - dataset_path = cfg.LARGE_DATASET_PATH - model_cfg = cfg.MODEL.large - output_dir = osp.join(cfg.output_dir, "large") - elif cfg.CASE_TYPE == "normal": - dataset_path = cfg.NORMAL_DATASET_PATH - model_cfg = cfg.MODEL.normal - output_dir = osp.join(cfg.output_dir, "normal") - else: - raise ValueError( - f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" - ) - model = ppsci.arch.NowcastNet(**model_cfg) - - input_keys = ("radar_frames",) - dataset_param = { - "input_keys": input_keys, - "label_keys": (), - "image_width": model_cfg.image_width, - "image_height": model_cfg.image_height, - "total_length": model_cfg.total_length, - "dataset_path": dataset_path, - "data_type": paddle.get_default_dtype(), - } - test_data_loader = paddle.io.DataLoader( - ppsci.data.dataset.RadarDataset(**dataset_param), - batch_size=1, - shuffle=False, - num_workers=cfg.CPU_WORKER, - drop_last=True, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=output_dir, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - - for batch_id, test_ims in enumerate(test_data_loader): - test_ims = test_ims[0][input_keys[0]].numpy() - frames_tensor = paddle.to_tensor( - data=test_ims, dtype=paddle.get_default_dtype() - ) - if batch_id <= cfg.NUM_SAVE_SAMPLES: - visualizer = { - "v_nowcastnet": ppsci.visualize.VisualizerRadar( - {"input": frames_tensor}, - { - "output": lambda out: out["output"], - }, - prefix="v_nowcastnet", - case_type=cfg.CASE_TYPE, - total_length=model_cfg.total_length, - ) - } - solver.visualizer = visualizer - # visualize prediction - solver.visualize(batch_id) - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - # set models - if cfg.CASE_TYPE == "large": - model_cfg = cfg.MODEL.large - elif cfg.CASE_TYPE == "normal": - model_cfg = cfg.MODEL.normal - else: - raise ValueError( - f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" - ) - model = ppsci.arch.NowcastNet(**model_cfg) - - # load pretrained model - solver = ppsci.solver.Solver( - model=model, pretrained_model_path=cfg.INFER.pretrained_model_path - ) - # export models - input_spec = [ - { - key: InputSpec( - [None, 29, model_cfg.image_width, model_cfg.image_height, 2], - "float32", - name=key, - ) - for key in model_cfg.input_keys - }, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import os.path as osp - - from deploy.python_infer import pinn_predictor - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - if cfg.CASE_TYPE == "large": - dataset_path = cfg.LARGE_DATASET_PATH - model_cfg = cfg.MODEL.large - output_dir = osp.join(cfg.output_dir, "large") - elif cfg.CASE_TYPE == "normal": - dataset_path = cfg.NORMAL_DATASET_PATH - model_cfg = cfg.MODEL.normal - output_dir = osp.join(cfg.output_dir, "normal") - else: - raise ValueError( - f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" - ) - - input_keys = ("radar_frames",) - dataset_param = { - "input_keys": input_keys, - "label_keys": (), - "image_width": model_cfg.image_width, - "image_height": model_cfg.image_height, - "total_length": model_cfg.total_length, - "dataset_path": dataset_path, - } - test_data_loader = paddle.io.DataLoader( - ppsci.data.dataset.RadarDataset(**dataset_param), - batch_size=cfg.INFER.batch_size, - num_workers=cfg.CPU_WORKER, - drop_last=True, - ) - for batch_id, test_ims in enumerate(test_data_loader): - if batch_id > cfg.NUM_SAVE_SAMPLES: - break - test_ims = {"input": test_ims[0][input_keys[0]].numpy()} - output_dict = predictor.predict(test_ims, cfg.INFER.batch_size) - # mapping data to model_cfg.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(model_cfg.output_keys, output_dict.keys()) - } - - visualizer = ppsci.visualize.VisualizerRadar( - test_ims, - { - "output": lambda out: out["output"], - }, - prefix="v_nowcastnet", - case_type=cfg.CASE_TYPE, - total_length=model_cfg.total_length, - ) - test_ims.update(output_dict) - visualizer.save(osp.join(output_dir, f"epoch_{batch_id}"), test_ims) - - -@hydra.main(version_base=None, config_path="./conf", config_name="nowcastnet.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://codeocean.com/capsule/3935105/tree/v1 +""" +from os import path as osp + +import hydra +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + print("Not supported.") + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + if cfg.CASE_TYPE == "large": + dataset_path = cfg.LARGE_DATASET_PATH + model_cfg = cfg.MODEL.large + output_dir = osp.join(cfg.output_dir, "large") + elif cfg.CASE_TYPE == "normal": + dataset_path = cfg.NORMAL_DATASET_PATH + model_cfg = cfg.MODEL.normal + output_dir = osp.join(cfg.output_dir, "normal") + else: + raise ValueError( + f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" + ) + model = ppsci.arch.NowcastNet(**model_cfg) + + input_keys = ("radar_frames",) + dataset_param = { + "input_keys": input_keys, + "label_keys": (), + "image_width": model_cfg.image_width, + "image_height": model_cfg.image_height, + "total_length": model_cfg.total_length, + "dataset_path": dataset_path, + "data_type": paddle.get_default_dtype(), + } + test_data_loader = paddle.io.DataLoader( + ppsci.data.dataset.RadarDataset(**dataset_param), + batch_size=1, + shuffle=False, + num_workers=cfg.CPU_WORKER, + drop_last=True, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=output_dir, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + + for batch_id, test_ims in enumerate(test_data_loader): + test_ims = test_ims[0][input_keys[0]].numpy() + frames_tensor = paddle.to_tensor( + data=test_ims, dtype=paddle.get_default_dtype() + ) + if batch_id <= cfg.NUM_SAVE_SAMPLES: + visualizer = { + "v_nowcastnet": ppsci.visualize.VisualizerRadar( + {"input": frames_tensor}, + { + "output": lambda out: out["output"], + }, + prefix="v_nowcastnet", + case_type=cfg.CASE_TYPE, + total_length=model_cfg.total_length, + ) + } + solver.visualizer = visualizer + # visualize prediction + solver.visualize(batch_id) + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + # set models + if cfg.CASE_TYPE == "large": + model_cfg = cfg.MODEL.large + elif cfg.CASE_TYPE == "normal": + model_cfg = cfg.MODEL.normal + else: + raise ValueError( + f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" + ) + model = ppsci.arch.NowcastNet(**model_cfg) + + # load pretrained model + solver = ppsci.solver.Solver( + model=model, pretrained_model_path=cfg.INFER.pretrained_model_path + ) + # export models + input_spec = [ + { + key: InputSpec( + [None, 29, model_cfg.image_width, model_cfg.image_height, 2], + "float32", + name=key, + ) + for key in model_cfg.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import os.path as osp + + from deploy.python_infer import pinn_predictor + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + if cfg.CASE_TYPE == "large": + dataset_path = cfg.LARGE_DATASET_PATH + model_cfg = cfg.MODEL.large + output_dir = osp.join(cfg.output_dir, "large") + elif cfg.CASE_TYPE == "normal": + dataset_path = cfg.NORMAL_DATASET_PATH + model_cfg = cfg.MODEL.normal + output_dir = osp.join(cfg.output_dir, "normal") + else: + raise ValueError( + f"cfg.CASE_TYPE should in ['normal', 'large'], but got '{cfg.mode}'" + ) + + input_keys = ("radar_frames",) + dataset_param = { + "input_keys": input_keys, + "label_keys": (), + "image_width": model_cfg.image_width, + "image_height": model_cfg.image_height, + "total_length": model_cfg.total_length, + "dataset_path": dataset_path, + } + test_data_loader = paddle.io.DataLoader( + ppsci.data.dataset.RadarDataset(**dataset_param), + batch_size=cfg.INFER.batch_size, + num_workers=cfg.CPU_WORKER, + drop_last=True, + ) + for batch_id, test_ims in enumerate(test_data_loader): + if batch_id > cfg.NUM_SAVE_SAMPLES: + break + test_ims = {"input": test_ims[0][input_keys[0]].numpy()} + output_dict = predictor.predict(test_ims, cfg.INFER.batch_size) + # mapping data to model_cfg.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(model_cfg.output_keys, output_dict.keys()) + } + + visualizer = ppsci.visualize.VisualizerRadar( + test_ims, + { + "output": lambda out: out["output"], + }, + prefix="v_nowcastnet", + case_type=cfg.CASE_TYPE, + total_length=model_cfg.total_length, + ) + test_ims.update(output_dict) + visualizer.save(osp.join(output_dir, f"epoch_{batch_id}"), test_ims) + + +@hydra.main(version_base=None, config_path="./conf", config_name="nowcastnet.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/ns/conf/ns_cvit_16x16.yaml b/examples/ns/conf/ns_cvit_16x16.yaml index f99356d168..441eeaf916 100644 --- a/examples/ns/conf/ns_cvit_16x16.yaml +++ b/examples/ns/conf/ns_cvit_16x16.yaml @@ -1,113 +1,113 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_16x16/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] - patch_size: [1, 16, 16] - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 768 - depth: 15 - num_heads: 12 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 2 - out_dim: 3 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 64 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - # compute_metric_by_batch: true - batch_size: 8 - rollout_steps: 4 - test_samples: 1000 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ns_cvit_16x16 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_16x16/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] + patch_size: [1, 16, 16] + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 768 + depth: 15 + num_heads: 12 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 2 + out_dim: 3 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + # compute_metric_by_batch: true + batch_size: 8 + rollout_steps: 4 + test_samples: 1000 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ns_cvit_16x16 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/conf/ns_cvit_32x32.yaml b/examples/ns/conf/ns_cvit_32x32.yaml index 96c2492f54..cf46de317f 100644 --- a/examples/ns/conf/ns_cvit_32x32.yaml +++ b/examples/ns/conf/ns_cvit_32x32.yaml @@ -1,113 +1,113 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_32x32/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] - patch_size: [1, 32, 32] - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 768 - depth: 15 - num_heads: 12 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 2 - out_dim: 3 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 64 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - # compute_metric_by_batch: true - batch_size: 8 - rollout_steps: 4 - test_samples: 1000 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ns_cvit_32x32 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_32x32/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] + patch_size: [1, 32, 32] + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 768 + depth: 15 + num_heads: 12 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 2 + out_dim: 3 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + # compute_metric_by_batch: true + batch_size: 8 + rollout_steps: 4 + test_samples: 1000 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ns_cvit_32x32 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/conf/ns_cvit_4x4.yaml b/examples/ns/conf/ns_cvit_4x4.yaml index e3a72c999e..7c33765489 100644 --- a/examples/ns/conf/ns_cvit_4x4.yaml +++ b/examples/ns/conf/ns_cvit_4x4.yaml @@ -1,113 +1,113 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_4x4/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] - patch_size: [1, 4, 4] - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 768 - depth: 15 - num_heads: 12 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 2 - out_dim: 3 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 400 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 10.0 - batch_size: 24 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - # compute_metric_by_batch: true - batch_size: 8 - rollout_steps: 4 - test_samples: 1000 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ns_cvit_4x4 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_4x4/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] + patch_size: [1, 4, 4] + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 768 + depth: 15 + num_heads: 12 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 2 + out_dim: 3 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 400 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 10.0 + batch_size: 24 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + # compute_metric_by_batch: true + batch_size: 8 + rollout_steps: 4 + test_samples: 1000 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ns_cvit_4x4 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/conf/ns_cvit_8x8.yaml b/examples/ns/conf/ns_cvit_8x8.yaml index 15361bbedb..356c265362 100644 --- a/examples/ns/conf/ns_cvit_8x8.yaml +++ b/examples/ns/conf/ns_cvit_8x8.yaml @@ -1,113 +1,113 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] - patch_size: [1, 8, 8] - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 768 - depth: 15 - num_heads: 12 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 2 - out_dim: 3 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 64 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - # compute_metric_by_batch: true - batch_size: 8 - rollout_steps: 4 - test_samples: 1000 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ns_cvit_8x8 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] + patch_size: [1, 8, 8] + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 768 + depth: 15 + num_heads: 12 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 2 + out_dim: 3 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + # compute_metric_by_batch: true + batch_size: 8 + rollout_steps: 4 + test_samples: 1000 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ns_cvit_8x8 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/conf/ns_cvit_base_8x8.yaml b/examples/ns/conf/ns_cvit_base_8x8.yaml index 4135bc8778..d9fe61ef42 100644 --- a/examples/ns/conf/ns_cvit_base_8x8.yaml +++ b/examples/ns/conf/ns_cvit_base_8x8.yaml @@ -1,113 +1,113 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_base_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] - patch_size: [1, 8, 8] - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 512 - depth: 10 - num_heads: 8 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 1 - out_dim: 3 - layer_norm_eps: 1.0e-5 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 64 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - # compute_metric_by_batch: true - batch_size: 8 - rollout_steps: 4 - test_samples: 1000 - -# inference settings -INFER: - pretrained_model_path: null - export_path: ./inference/ns_cvit_base_8x8 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_base_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] + patch_size: [1, 8, 8] + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 512 + depth: 10 + num_heads: 8 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 1 + out_dim: 3 + layer_norm_eps: 1.0e-5 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + # compute_metric_by_batch: true + batch_size: 8 + rollout_steps: 4 + test_samples: 1000 + +# inference settings +INFER: + pretrained_model_path: null + export_path: ./inference/ns_cvit_base_8x8 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/conf/ns_cvit_small_8x8.yaml b/examples/ns/conf/ns_cvit_small_8x8.yaml index 7d1835b0ac..2f2da4f447 100644 --- a/examples/ns/conf/ns_cvit_small_8x8.yaml +++ b/examples/ns/conf/ns_cvit_small_8x8.yaml @@ -1,111 +1,111 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_ns_cvit_small_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 100 - -DATA: - path: "./NavierStokes-2D" - components: ["u", "vx", "vy"] - prev_steps: 10 - pred_steps: 1 - downsample: 1 - rollout_steps: 4 - -# model settings -MODEL: - input_keys: [u, y] - output_keys: [s] - in_dim: 3 - coords_dim: 2 - spatial_dims: [10, 128, 128] # t, h, w - grid_size: [128, 128] - latent_dim: 512 - emb_dim: 384 - patch_size: [1, 8, 8] - depth: 5 - num_heads: 6 - dec_emb_dim: 512 - dec_num_heads: 16 - dec_depth: 1 - num_mlp_layers: 1 - mlp_ratio: 1 - out_dim: 3 - embedding_type: grid - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: true - eval_freq: 1 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 5000 - by_epoch: false - warmup_epoch: 5 - warmup_start_lr: 0.0 - weight_decay: 1.0e-5 - grad_clip: 1.0 - batch_size: 64 - pretrained_model_path: null - checkpoint_path: null - train_samples: 6500 - num_query_points: 1024 - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cvit/ns_cvit_pretrained.pdparams - export_path: ./inference/ns_cvit_small_8x8 - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 8 - test_samples: 1000 - rollout_steps: 4 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_ns_cvit_small_8x8/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 100 + +DATA: + path: "./NavierStokes-2D" + components: ["u", "vx", "vy"] + prev_steps: 10 + pred_steps: 1 + downsample: 1 + rollout_steps: 4 + +# model settings +MODEL: + input_keys: [u, y] + output_keys: [s] + in_dim: 3 + coords_dim: 2 + spatial_dims: [10, 128, 128] # t, h, w + grid_size: [128, 128] + latent_dim: 512 + emb_dim: 384 + patch_size: [1, 8, 8] + depth: 5 + num_heads: 6 + dec_emb_dim: 512 + dec_num_heads: 16 + dec_depth: 1 + num_mlp_layers: 1 + mlp_ratio: 1 + out_dim: 3 + embedding_type: grid + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: true + eval_freq: 1 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 5000 + by_epoch: false + warmup_epoch: 5 + warmup_start_lr: 0.0 + weight_decay: 1.0e-5 + grad_clip: 1.0 + batch_size: 64 + pretrained_model_path: null + checkpoint_path: null + train_samples: 6500 + num_query_points: 1024 + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/cvit/ns_cvit_pretrained.pdparams + export_path: ./inference/ns_cvit_small_8x8 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 8 + test_samples: 1000 + rollout_steps: 4 diff --git a/examples/ns/ns_cvit.py b/examples/ns/ns_cvit.py index bd4e1b4c9f..0188b49f4e 100644 --- a/examples/ns/ns_cvit.py +++ b/examples/ns/ns_cvit.py @@ -1,458 +1,458 @@ -""" -Reference: https://github.com/PredictiveIntelligenceLab/cvit/tree/main/ns/ -""" - -import os -import re -from os import path as osp -from typing import Dict -from typing import Sequence -from typing import Tuple - -import einops -import h5py -import hydra -import numpy as np -import paddle -import tqdm -from matplotlib import pyplot as plt -from numpy.lib.stride_tricks import sliding_window_view -from omegaconf import DictConfig - -import ppsci - -dtype = paddle.get_default_dtype() - - -# Construct the full dataset -def prepare_ns_dataset( - directory: str, - mode: str, - keys: Sequence[str], - prev_steps: int, - pred_steps: int, - num_samples: int, - downsample: int = 1, -): - # Use list comprehension for efficiency - file_names = [ - osp.join(directory, f) - for f in os.listdir(directory) - if re.match(f"^NavierStokes2D_{mode}", f) - ] - - # Initialize dictionaries to hold the inputs and outputs - data_dict = {key: [] for key in keys} - - num_files = len(file_names) - - f = h5py.File(file_names[0], "r") - s = f[mode][keys[0]].shape[0] - for i in tqdm.trange(min(num_files, num_samples // s + 1), desc="Reading files"): - with h5py.File(file_names[i], "r") as f: - data_group = f[mode] - - for key in keys: - # Use memory-mapping to reduce memory usage - data_dict[key].append(np.array(data_group[key], dtype=dtype)) - - for key in keys: - data_dict[key] = np.vstack(data_dict[key]) - - data = np.concatenate( - [np.expand_dims(arr, axis=-1) for arr in data_dict.values()], axis=-1 - ) - data = data[:num_samples, :, ::downsample, ::downsample, :] - - # Use sliding window to generate inputs and outputs - sliding_data = sliding_window_view( - data, window_shape=prev_steps + pred_steps, axis=1 - ) - sliding_data = einops.rearrange(sliding_data, "n m h w c s -> (n m) s h w c") - - inputs = sliding_data[:, :prev_steps, ...] - outputs = sliding_data[:, prev_steps : prev_steps + pred_steps, ...] - - return inputs, outputs # (B, T, H, W, C) (B, T', H, W, C) - - -def train(cfg: DictConfig): - # set model - model = ppsci.arch.CVit(**cfg.MODEL) - - # prepare training data - train_inputs, train_outputs = prepare_ns_dataset( - cfg.DATA.path, - "train", - cfg.DATA.components, - cfg.DATA.prev_steps, - cfg.DATA.pred_steps, - cfg.TRAIN.train_samples, - cfg.DATA.downsample, - ) - print("training input ", train_inputs.shape, "training label", train_outputs.shape) - train_outputs = einops.rearrange(train_outputs, "b t h w c -> b (t h w) c") - h, w = train_inputs.shape[2:4] - x_star = np.linspace(0, 1, h, dtype=dtype) - y_star = np.linspace(0, 1, w, dtype=dtype) - x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") - train_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) - train_coords = np.broadcast_to( - train_coords[None, :], [len(train_inputs), train_outputs.shape[1], 2] - ) - - # set constraint - def random_query( - input_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray], - weight_dict: Dict[str, np.ndarray], - ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: - y_key = cfg.MODEL.input_keys[1] - s_key = cfg.MODEL.output_keys[0] - # random select coords and labels - npos = input_dict[y_key].shape[1] - assert cfg.TRAIN.num_query_points <= npos, ( - f"Number of query points({cfg.TRAIN.num_query_points}) must be " - f"less than or equal to number of positions({npos})." - ) - random_pos = np.random.choice(npos, cfg.TRAIN.num_query_points, replace=False) - input_dict[y_key] = input_dict[y_key][0, random_pos] - label_dict[s_key] = label_dict[s_key][:, random_pos] - return (input_dict, label_dict, weight_dict) - - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"u": train_inputs, "y": train_coords}, - "label": {"s": train_outputs}, - "transforms": [ - { - "FunctionalTransform": { - "transform_func": random_query, - }, - }, - ], - }, - "batch_size": cfg.TRAIN.batch_size, - "auto_collation": False, # NOTE: Explicitly disable auto collation - }, - output_expr={"s": lambda out: out["s"]}, - loss=ppsci.loss.MSELoss("mean"), - name="Sup", - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.AdamW( - lr_scheduler, - weight_decay=cfg.TRAIN.weight_decay, - grad_clip=paddle.nn.ClipGradByGlobalNorm(cfg.TRAIN.grad_clip), - )(model) - - # set validator - test_inputs, test_outputs = prepare_ns_dataset( - cfg.DATA.path, - "test", - cfg.DATA.components, - cfg.DATA.prev_steps, - cfg.DATA.pred_steps, - cfg.EVAL.test_samples, - cfg.DATA.downsample, - ) - print("testing input ", test_inputs.shape, "testing label", test_outputs.shape) - test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") - h, w = test_inputs.shape[2:4] - x_star = np.linspace(0, 1, h, dtype=dtype) - y_star = np.linspace(0, 1, w, dtype=dtype) - x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") - test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) - test_coords = np.broadcast_to( - test_coords[None, :], [len(test_inputs), test_outputs.shape[1], 2] - ) - - def l2_err_func( - output_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray], - ) -> paddle.Tensor: - s_key = cfg.MODEL.output_keys[0] - l2_error = ( - (output_dict[s_key] - label_dict[s_key]).norm(axis=1) - / label_dict[s_key].norm(axis=1) - ).mean() # average along batch and channels - return {"s_l2_err": l2_error} - - s_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"u": test_inputs, "y": test_coords}, - "label": {"s": test_outputs}, - }, - "batch_size": cfg.EVAL.batch_size, - }, - loss=ppsci.loss.MSELoss("mean"), - metric={"s": ppsci.metric.FunctionalMetric(l2_err_func)}, - name="s_validator", - ) - validator = {s_validator.name: s_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - validator=validator, - optimizer=optimizer, - cfg=cfg, - ) - # train model - solver.train() - - -def evaluate(cfg: DictConfig): - # set model - model = ppsci.arch.CVit(**cfg.MODEL) - - # init validator - test_inputs, test_outputs = prepare_ns_dataset( - cfg.DATA.path, - "test", - cfg.DATA.components, - cfg.DATA.prev_steps, - cfg.DATA.pred_steps, - cfg.EVAL.test_samples, - cfg.DATA.downsample, - ) - print("test data", test_inputs.shape, test_outputs.shape) - test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") - h, w = test_inputs.shape[2:4] - x_star = np.linspace(0, 1, h, dtype=dtype) - y_star = np.linspace(0, 1, w, dtype=dtype) - x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") - test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) - test_coords = np.broadcast_to( - test_coords[None, :], [len(test_inputs), test_outputs.shape[1], 2] - ) - - def l2_err_func( - output_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray], - ) -> paddle.Tensor: - s_key = cfg.MODEL.output_keys[0] - l2_error = ( - (output_dict[s_key] - label_dict[s_key]).norm(axis=1) - / label_dict[s_key].norm(axis=1) - ).mean() # average along batch and channels - return {"s_l2_err": l2_error} - - s_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"u": test_inputs, "y": test_coords}, - "label": {"s": test_outputs}, - }, - "batch_size": cfg.EVAL.batch_size, - }, - loss=ppsci.loss.MSELoss("mean"), - metric={"s_err": ppsci.metric.FunctionalMetric(l2_err_func)}, - name="s_validator", - ) - validator = {s_validator.name: s_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - validator=validator, - cfg=cfg, - ) - # train model - solver.eval() - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.CVit(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver(model, cfg=cfg) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - model.input_keys[0]: InputSpec( - [None, *cfg.MODEL.spatial_dims, cfg.MODEL.in_dim], - "float32", - name=model.input_keys[0], - ), - model.input_keys[1]: InputSpec( - [None, cfg.MODEL.coords_dim], "float32", name=model.input_keys[1] - ), - }, - ] - solver.export( - input_spec, cfg.INFER.export_path, with_onnx=False, ignore_modules=[einops] - ) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - test_inputs, test_outputs = prepare_ns_dataset( - cfg.DATA.path, - "test", - cfg.DATA.components, - cfg.DATA.prev_steps, - cfg.DATA.rollout_steps, - cfg.INFER.test_samples, - cfg.DATA.downsample, - ) - print("test data", test_inputs.shape, test_outputs.shape) - test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") - h, w = test_inputs.shape[2:4] - x_star = np.linspace(0, 1, h, dtype=dtype) - y_star = np.linspace(0, 1, w, dtype=dtype) - x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") - test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) - s_key = cfg.MODEL.output_keys[0] - - def rollout(x, coords, prev_steps=2, pred_steps=1, rollout_steps=5): - b, _, h, w, c = x.shape - pred_list = [] - for k in range(rollout_steps): - input_dict = {"u": x, "y": coords} - pred = predictor.predict(input_dict, batch_size=None) - # mapping data to cfg.INFER.output_keys - pred = { - store_key: pred[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, pred.keys()) - }[s_key] - pred = pred.reshape(b, pred_steps, h, w, c) - pred_list.append(pred) - - # auto regression step - x = np.concatenate([x, pred], axis=1) - x = x[:, -prev_steps:] - - pred = np.concatenate(pred_list, axis=1) - return pred - - l2_error_list = [] - for i in range(0, len(test_inputs), cfg.INFER.batch_size): - st, ed = i, min(i + cfg.INFER.batch_size, len(test_inputs)) - pred = rollout( - test_inputs[st:ed], - test_coords, - prev_steps=cfg.DATA.prev_steps, - pred_steps=cfg.DATA.pred_steps, - rollout_steps=cfg.DATA.rollout_steps, - ) - pred = einops.rearrange(pred, "B T H W C-> B (T H W) C") - y = test_outputs[st:ed] - - diff_norms = np.linalg.norm(pred - y, axis=1) - y_norms = np.linalg.norm(y, axis=1) - - l2_error = (diff_norms / y_norms).mean(axis=1) - l2_error_list.append(l2_error) - - l2_error = np.mean(np.array(l2_error_list)) - print(f"{cfg.INFER.rollout_steps}-step l2_error:", l2_error) - - # plot prediction of the first sample - plt.rcParams.update( - { - # "text.usetex": True, # NOTE: This may cause error when using latex - "font.family": "serif", - "font.serif": ["Computer Modern Roman"], - "font.size": 24, - } - ) - pred = einops.rearrange( - pred, "B (T H W) C -> B T H W C", T=cfg.INFER.rollout_steps, W=w, H=h - ) - y = einops.rearrange( - y, "B (T H W) C -> B T H W C", T=cfg.INFER.rollout_steps, W=w, H=h - ) - - from mpl_toolkits.axes_grid1 import make_axes_locatable - - def plot(pred, ref, filename): - fig, axes = plt.subplots( - 3, - cfg.INFER.rollout_steps, - figsize=((cfg.INFER.rollout_steps) * 5, 3 * 5), - gridspec_kw={"width_ratios": [1, 1, 1, 1.2]}, - ) - - # plot reference - for t in range(cfg.INFER.rollout_steps): - res = pred[t] - im = axes[0, t].imshow( - res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" - ) - axes[0, t].set_yticks([]) - axes[0, t].xaxis.set_visible(False) - axes[0, 0].set_ylabel("Reference", size="large", labelpad=20) - divider = make_axes_locatable(axes[0, -1]) - cax = divider.append_axes("right", size="5%", pad=0.5) - fig.colorbar(im, cax=cax) - # plot prediction - for t in range(cfg.INFER.rollout_steps): - res = ref[t] - im = axes[1, t].imshow( - res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" - ) - axes[1, t].set_yticks([]) - axes[1, t].xaxis.set_visible(False) - axes[1, 0].set_ylabel("Prediction", size="large", labelpad=20) - divider = make_axes_locatable(axes[1, -1]) - cax = divider.append_axes("right", size="5%", pad=0.5) - fig.colorbar(im, cax=cax) - # plot abs error - for t in range(cfg.INFER.rollout_steps): - res = pred[t] - ref[t] - im = axes[2, t].imshow( - res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" - ) - axes[2, t].set_yticks([]) - axes[2, t].xaxis.set_visible(False) - axes[2, 0].set_ylabel("Abs. Error", size="large", labelpad=20) - divider = make_axes_locatable(axes[2, -1]) - cax = divider.append_axes("right", size="5%", pad=0.5) - fig.colorbar(im, cax=cax) - plt.tight_layout() - plt.savefig(filename) - plt.close() - - plot(pred[0, ..., 0], y[0, ..., 0], "./ns_u.png") - plot(pred[0, ..., 1], y[0, ..., 1], "./ns_ux.png") - plot(pred[0, ..., 2], y[0, ..., 2], "./ns_uy.png") - - -@hydra.main( - version_base=None, config_path="./conf", config_name="ns_cvit_small_8x8.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://github.com/PredictiveIntelligenceLab/cvit/tree/main/ns/ +""" + +import os +import re +from os import path as osp +from typing import Dict +from typing import Sequence +from typing import Tuple + +import einops +import h5py +import hydra +import numpy as np +import paddle +import tqdm +from matplotlib import pyplot as plt +from numpy.lib.stride_tricks import sliding_window_view +from omegaconf import DictConfig + +import ppsci + +dtype = paddle.get_default_dtype() + + +# Construct the full dataset +def prepare_ns_dataset( + directory: str, + mode: str, + keys: Sequence[str], + prev_steps: int, + pred_steps: int, + num_samples: int, + downsample: int = 1, +): + # Use list comprehension for efficiency + file_names = [ + osp.join(directory, f) + for f in os.listdir(directory) + if re.match(f"^NavierStokes2D_{mode}", f) + ] + + # Initialize dictionaries to hold the inputs and outputs + data_dict = {key: [] for key in keys} + + num_files = len(file_names) + + f = h5py.File(file_names[0], "r") + s = f[mode][keys[0]].shape[0] + for i in tqdm.trange(min(num_files, num_samples // s + 1), desc="Reading files"): + with h5py.File(file_names[i], "r") as f: + data_group = f[mode] + + for key in keys: + # Use memory-mapping to reduce memory usage + data_dict[key].append(np.array(data_group[key], dtype=dtype)) + + for key in keys: + data_dict[key] = np.vstack(data_dict[key]) + + data = np.concatenate( + [np.expand_dims(arr, axis=-1) for arr in data_dict.values()], axis=-1 + ) + data = data[:num_samples, :, ::downsample, ::downsample, :] + + # Use sliding window to generate inputs and outputs + sliding_data = sliding_window_view( + data, window_shape=prev_steps + pred_steps, axis=1 + ) + sliding_data = einops.rearrange(sliding_data, "n m h w c s -> (n m) s h w c") + + inputs = sliding_data[:, :prev_steps, ...] + outputs = sliding_data[:, prev_steps : prev_steps + pred_steps, ...] + + return inputs, outputs # (B, T, H, W, C) (B, T', H, W, C) + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.CVit(**cfg.MODEL) + + # prepare training data + train_inputs, train_outputs = prepare_ns_dataset( + cfg.DATA.path, + "train", + cfg.DATA.components, + cfg.DATA.prev_steps, + cfg.DATA.pred_steps, + cfg.TRAIN.train_samples, + cfg.DATA.downsample, + ) + print("training input ", train_inputs.shape, "training label", train_outputs.shape) + train_outputs = einops.rearrange(train_outputs, "b t h w c -> b (t h w) c") + h, w = train_inputs.shape[2:4] + x_star = np.linspace(0, 1, h, dtype=dtype) + y_star = np.linspace(0, 1, w, dtype=dtype) + x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") + train_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) + train_coords = np.broadcast_to( + train_coords[None, :], [len(train_inputs), train_outputs.shape[1], 2] + ) + + # set constraint + def random_query( + input_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray], + weight_dict: Dict[str, np.ndarray], + ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: + y_key = cfg.MODEL.input_keys[1] + s_key = cfg.MODEL.output_keys[0] + # random select coords and labels + npos = input_dict[y_key].shape[1] + assert cfg.TRAIN.num_query_points <= npos, ( + f"Number of query points({cfg.TRAIN.num_query_points}) must be " + f"less than or equal to number of positions({npos})." + ) + random_pos = np.random.choice(npos, cfg.TRAIN.num_query_points, replace=False) + input_dict[y_key] = input_dict[y_key][0, random_pos] + label_dict[s_key] = label_dict[s_key][:, random_pos] + return (input_dict, label_dict, weight_dict) + + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"u": train_inputs, "y": train_coords}, + "label": {"s": train_outputs}, + "transforms": [ + { + "FunctionalTransform": { + "transform_func": random_query, + }, + }, + ], + }, + "batch_size": cfg.TRAIN.batch_size, + "auto_collation": False, # NOTE: Explicitly disable auto collation + }, + output_expr={"s": lambda out: out["s"]}, + loss=ppsci.loss.MSELoss("mean"), + name="Sup", + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.AdamW( + lr_scheduler, + weight_decay=cfg.TRAIN.weight_decay, + grad_clip=paddle.nn.ClipGradByGlobalNorm(cfg.TRAIN.grad_clip), + )(model) + + # set validator + test_inputs, test_outputs = prepare_ns_dataset( + cfg.DATA.path, + "test", + cfg.DATA.components, + cfg.DATA.prev_steps, + cfg.DATA.pred_steps, + cfg.EVAL.test_samples, + cfg.DATA.downsample, + ) + print("testing input ", test_inputs.shape, "testing label", test_outputs.shape) + test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") + h, w = test_inputs.shape[2:4] + x_star = np.linspace(0, 1, h, dtype=dtype) + y_star = np.linspace(0, 1, w, dtype=dtype) + x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") + test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) + test_coords = np.broadcast_to( + test_coords[None, :], [len(test_inputs), test_outputs.shape[1], 2] + ) + + def l2_err_func( + output_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray], + ) -> paddle.Tensor: + s_key = cfg.MODEL.output_keys[0] + l2_error = ( + (output_dict[s_key] - label_dict[s_key]).norm(axis=1) + / label_dict[s_key].norm(axis=1) + ).mean() # average along batch and channels + return {"s_l2_err": l2_error} + + s_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"u": test_inputs, "y": test_coords}, + "label": {"s": test_outputs}, + }, + "batch_size": cfg.EVAL.batch_size, + }, + loss=ppsci.loss.MSELoss("mean"), + metric={"s": ppsci.metric.FunctionalMetric(l2_err_func)}, + name="s_validator", + ) + validator = {s_validator.name: s_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + validator=validator, + optimizer=optimizer, + cfg=cfg, + ) + # train model + solver.train() + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.CVit(**cfg.MODEL) + + # init validator + test_inputs, test_outputs = prepare_ns_dataset( + cfg.DATA.path, + "test", + cfg.DATA.components, + cfg.DATA.prev_steps, + cfg.DATA.pred_steps, + cfg.EVAL.test_samples, + cfg.DATA.downsample, + ) + print("test data", test_inputs.shape, test_outputs.shape) + test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") + h, w = test_inputs.shape[2:4] + x_star = np.linspace(0, 1, h, dtype=dtype) + y_star = np.linspace(0, 1, w, dtype=dtype) + x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") + test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) + test_coords = np.broadcast_to( + test_coords[None, :], [len(test_inputs), test_outputs.shape[1], 2] + ) + + def l2_err_func( + output_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray], + ) -> paddle.Tensor: + s_key = cfg.MODEL.output_keys[0] + l2_error = ( + (output_dict[s_key] - label_dict[s_key]).norm(axis=1) + / label_dict[s_key].norm(axis=1) + ).mean() # average along batch and channels + return {"s_l2_err": l2_error} + + s_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"u": test_inputs, "y": test_coords}, + "label": {"s": test_outputs}, + }, + "batch_size": cfg.EVAL.batch_size, + }, + loss=ppsci.loss.MSELoss("mean"), + metric={"s_err": ppsci.metric.FunctionalMetric(l2_err_func)}, + name="s_validator", + ) + validator = {s_validator.name: s_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator, + cfg=cfg, + ) + # train model + solver.eval() + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.CVit(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + model.input_keys[0]: InputSpec( + [None, *cfg.MODEL.spatial_dims, cfg.MODEL.in_dim], + "float32", + name=model.input_keys[0], + ), + model.input_keys[1]: InputSpec( + [None, cfg.MODEL.coords_dim], "float32", name=model.input_keys[1] + ), + }, + ] + solver.export( + input_spec, cfg.INFER.export_path, with_onnx=False, ignore_modules=[einops] + ) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + test_inputs, test_outputs = prepare_ns_dataset( + cfg.DATA.path, + "test", + cfg.DATA.components, + cfg.DATA.prev_steps, + cfg.DATA.rollout_steps, + cfg.INFER.test_samples, + cfg.DATA.downsample, + ) + print("test data", test_inputs.shape, test_outputs.shape) + test_outputs = einops.rearrange(test_outputs, "b t h w c -> b (t h w) c") + h, w = test_inputs.shape[2:4] + x_star = np.linspace(0, 1, h, dtype=dtype) + y_star = np.linspace(0, 1, w, dtype=dtype) + x_star, y_star = np.meshgrid(x_star, y_star, indexing="ij") + test_coords = np.hstack([x_star.flatten()[:, None], y_star.flatten()[:, None]]) + s_key = cfg.MODEL.output_keys[0] + + def rollout(x, coords, prev_steps=2, pred_steps=1, rollout_steps=5): + b, _, h, w, c = x.shape + pred_list = [] + for k in range(rollout_steps): + input_dict = {"u": x, "y": coords} + pred = predictor.predict(input_dict, batch_size=None) + # mapping data to cfg.INFER.output_keys + pred = { + store_key: pred[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, pred.keys()) + }[s_key] + pred = pred.reshape(b, pred_steps, h, w, c) + pred_list.append(pred) + + # auto regression step + x = np.concatenate([x, pred], axis=1) + x = x[:, -prev_steps:] + + pred = np.concatenate(pred_list, axis=1) + return pred + + l2_error_list = [] + for i in range(0, len(test_inputs), cfg.INFER.batch_size): + st, ed = i, min(i + cfg.INFER.batch_size, len(test_inputs)) + pred = rollout( + test_inputs[st:ed], + test_coords, + prev_steps=cfg.DATA.prev_steps, + pred_steps=cfg.DATA.pred_steps, + rollout_steps=cfg.DATA.rollout_steps, + ) + pred = einops.rearrange(pred, "B T H W C-> B (T H W) C") + y = test_outputs[st:ed] + + diff_norms = np.linalg.norm(pred - y, axis=1) + y_norms = np.linalg.norm(y, axis=1) + + l2_error = (diff_norms / y_norms).mean(axis=1) + l2_error_list.append(l2_error) + + l2_error = np.mean(np.array(l2_error_list)) + print(f"{cfg.INFER.rollout_steps}-step l2_error:", l2_error) + + # plot prediction of the first sample + plt.rcParams.update( + { + # "text.usetex": True, # NOTE: This may cause error when using latex + "font.family": "serif", + "font.serif": ["Computer Modern Roman"], + "font.size": 24, + } + ) + pred = einops.rearrange( + pred, "B (T H W) C -> B T H W C", T=cfg.INFER.rollout_steps, W=w, H=h + ) + y = einops.rearrange( + y, "B (T H W) C -> B T H W C", T=cfg.INFER.rollout_steps, W=w, H=h + ) + + from mpl_toolkits.axes_grid1 import make_axes_locatable + + def plot(pred, ref, filename): + fig, axes = plt.subplots( + 3, + cfg.INFER.rollout_steps, + figsize=((cfg.INFER.rollout_steps) * 5, 3 * 5), + gridspec_kw={"width_ratios": [1, 1, 1, 1.2]}, + ) + + # plot reference + for t in range(cfg.INFER.rollout_steps): + res = pred[t] + im = axes[0, t].imshow( + res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" + ) + axes[0, t].set_yticks([]) + axes[0, t].xaxis.set_visible(False) + axes[0, 0].set_ylabel("Reference", size="large", labelpad=20) + divider = make_axes_locatable(axes[0, -1]) + cax = divider.append_axes("right", size="5%", pad=0.5) + fig.colorbar(im, cax=cax) + # plot prediction + for t in range(cfg.INFER.rollout_steps): + res = ref[t] + im = axes[1, t].imshow( + res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" + ) + axes[1, t].set_yticks([]) + axes[1, t].xaxis.set_visible(False) + axes[1, 0].set_ylabel("Prediction", size="large", labelpad=20) + divider = make_axes_locatable(axes[1, -1]) + cax = divider.append_axes("right", size="5%", pad=0.5) + fig.colorbar(im, cax=cax) + # plot abs error + for t in range(cfg.INFER.rollout_steps): + res = pred[t] - ref[t] + im = axes[2, t].imshow( + res, cmap="turbo", vmin=res.min(), vmax=res.max(), aspect="auto" + ) + axes[2, t].set_yticks([]) + axes[2, t].xaxis.set_visible(False) + axes[2, 0].set_ylabel("Abs. Error", size="large", labelpad=20) + divider = make_axes_locatable(axes[2, -1]) + cax = divider.append_axes("right", size="5%", pad=0.5) + fig.colorbar(im, cax=cax) + plt.tight_layout() + plt.savefig(filename) + plt.close() + + plot(pred[0, ..., 0], y[0, ..., 0], "./ns_u.png") + plot(pred[0, ..., 1], y[0, ..., 1], "./ns_ux.png") + plot(pred[0, ..., 2], y[0, ..., 2], "./ns_uy.png") + + +@hydra.main( + version_base=None, config_path="./conf", config_name="ns_cvit_small_8x8.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/nsfnet/VP_NSFNet2.py b/examples/nsfnet/VP_NSFNet2.py index 94419e5735..f65d2c12bd 100644 --- a/examples/nsfnet/VP_NSFNet2.py +++ b/examples/nsfnet/VP_NSFNet2.py @@ -1,503 +1,503 @@ -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -import scipy -from omegaconf import DictConfig -from scipy.interpolate import griddata - -import ppsci -from ppsci.utils import logger - - -@hydra.main(version_base=None, config_path="./conf", config_name="VP_NSFNet2.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -def load_data(path, N_TRAIN, NB_TRAIN, N0_TRAIN): - data = scipy.io.loadmat(path) - - U_star = data["U_star"].astype("float32") # N x 2 x T - P_star = data["p_star"].astype("float32") # N x T - t_star = data["t"].astype("float32") # T x 1 - X_star = data["X_star"].astype("float32") # N x 2 - - N = X_star.shape[0] - T = t_star.shape[0] - - # rearrange data - XX = np.tile(X_star[:, 0:1], (1, T)) # N x T - YY = np.tile(X_star[:, 1:2], (1, T)) # N x T - TT = np.tile(t_star, (1, N)).T # N x T - - UU = U_star[:, 0, :] # N x T - VV = U_star[:, 1, :] # N x T - PP = P_star # N x T - - x = XX.flatten()[:, None] # NT x 1 - y = YY.flatten()[:, None] # NT x 1 - t = TT.flatten()[:, None] # NT x 1 - - u = UU.flatten()[:, None] # NT x 1 - v = VV.flatten()[:, None] # NT x 1 - p = PP.flatten()[:, None] # NT x 1 - - data1 = np.concatenate([x, y, t, u, v, p], 1) - data2 = data1[:, :][data1[:, 2] <= 7] - data3 = data2[:, :][data2[:, 0] >= 1] - data4 = data3[:, :][data3[:, 0] <= 8] - data5 = data4[:, :][data4[:, 1] >= -2] - data_domain = data5[:, :][data5[:, 1] <= 2] - data_t0 = data_domain[:, :][data_domain[:, 2] == 0] - data_y1 = data_domain[:, :][data_domain[:, 0] == 1] - data_y8 = data_domain[:, :][data_domain[:, 0] == 8] - data_x = data_domain[:, :][data_domain[:, 1] == -2] - data_x2 = data_domain[:, :][data_domain[:, 1] == 2] - data_sup_b_train = np.concatenate([data_y1, data_y8, data_x, data_x2], 0) - idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) - - x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) - y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) - t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) - - x0_train = data_t0[:, 0].reshape(data_t0[:, 0].shape[0], 1) - y0_train = data_t0[:, 1].reshape(data_t0[:, 1].shape[0], 1) - t0_train = data_t0[:, 2].reshape(data_t0[:, 2].shape[0], 1) - u0_train = data_t0[:, 3].reshape(data_t0[:, 3].shape[0], 1) - v0_train = data_t0[:, 4].reshape(data_t0[:, 4].shape[0], 1) - - xb_train = data_sup_b_train[:, 0].reshape(data_sup_b_train[:, 0].shape[0], 1) - yb_train = data_sup_b_train[:, 1].reshape(data_sup_b_train[:, 1].shape[0], 1) - tb_train = data_sup_b_train[:, 2].reshape(data_sup_b_train[:, 2].shape[0], 1) - ub_train = data_sup_b_train[:, 3].reshape(data_sup_b_train[:, 3].shape[0], 1) - vb_train = data_sup_b_train[:, 4].reshape(data_sup_b_train[:, 4].shape[0], 1) - - # set test set - snap = np.array([0]) - x_star = X_star[:, 0:1] - y_star = X_star[:, 1:2] - t_star = TT[:, snap] - - u_star = U_star[:, 0, snap] - v_star = U_star[:, 1, snap] - p_star = P_star[:, snap] - - return ( - x_train, - y_train, - t_train, - x0_train, - y0_train, - t0_train, - u0_train, - v0_train, - xb_train, - yb_train, - tb_train, - ub_train, - vb_train, - x_star, - y_star, - t_star, - u_star, - v_star, - p_star, - ) - - -def train(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) - ITERS_PER_EPOCH = cfg.iters_per_epoch - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set the number of residual samples - N_TRAIN = cfg.ntrain - - # set the number of boundary samples - NB_TRAIN = cfg.nb_train - - # set the number of initial samples - N0_TRAIN = cfg.n0_train - - ( - x_train, - y_train, - t_train, - x0_train, - y0_train, - t0_train, - u0_train, - v0_train, - xb_train, - yb_train, - tb_train, - ub_train, - vb_train, - x_star, - y_star, - t_star, - u_star, - v_star, - p_star, - ) = load_data(cfg.data_dir, N_TRAIN, NB_TRAIN, N0_TRAIN) - # set dataloader config - train_dataloader_cfg_b = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"x": xb_train, "y": yb_train, "t": tb_train}, - "label": {"u": ub_train, "v": vb_train}, - }, - "batch_size": NB_TRAIN, - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - train_dataloader_cfg_0 = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"x": x0_train, "y": y0_train, "t": t0_train}, - "label": {"u": u0_train, "v": v0_train}, - }, - "batch_size": N0_TRAIN, - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - valida_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"x": x_star, "y": y_star, "t": t_star}, - "label": {"u": u_star, "v": v_star, "p": p_star}, - }, - "total_size": u_star.shape[0], - "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") - ) - - # supervised constraint s.t ||u-u_b|| - sup_constraint_b = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_b, - ppsci.loss.MSELoss("mean"), - name="Sup_b", - ) - - # supervised constraint s.t ||u-u_0|| - sup_constraint_0 = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg_0, - ppsci.loss.MSELoss("mean"), - name="Sup_0", - ) - - # set equation constarint s.t. ||F(u)|| - equation = { - "NavierStokes": ppsci.equation.NavierStokes( - nu=1.0 / cfg.re, rho=1.0, dim=2, time=True - ), - } - - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom, - { - "dataset": {"name": "IterableNamedArrayDataset"}, - "batch_size": N_TRAIN, - "iters_per_epoch": ITERS_PER_EPOCH, - }, - ppsci.loss.MSELoss("mean"), - name="EQ", - ) - - constraint = { - pde_constraint.name: pde_constraint, - sup_constraint_b.name: sup_constraint_b, - sup_constraint_0.name: sup_constraint_0, - } - - residual_validator = ppsci.validate.SupervisedValidator( - valida_dataloader_cfg, - ppsci.loss.L2RelLoss(), - output_expr={ - "u": lambda d: d["u"], - "v": lambda d: d["v"], - "p": lambda d: d["p"] - d["p"].min() + p_star.min(), - }, - metric={"L2R": ppsci.metric.L2Rel()}, - name="Residual", - ) - - # wrap validator - validator = {residual_validator.name: residual_validator} - - # set optimizer - epoch_list = [5000, 5000, 50000, 50000] - new_epoch_list = [] - for i, _ in enumerate(epoch_list): - new_epoch_list.append(sum(epoch_list[: i + 1])) - EPOCHS = new_epoch_list[-1] - lr_list = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] - lr_scheduler = ppsci.optimizer.lr_scheduler.Piecewise( - EPOCHS, ITERS_PER_EPOCH, new_epoch_list, lr_list - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) - - logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") - # initialize solver - solver = ppsci.solver.Solver( - model=model, - constraint=constraint, - optimizer=optimizer, - epochs=EPOCHS, - lr_scheduler=lr_scheduler, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=True, - log_freq=cfg.log_freq, - eval_freq=cfg.eval_freq, - seed=SEED, - equation=equation, - geom=geom, - validator=validator, - visualizer=None, - eval_with_no_grad=False, - ) - # train model - solver.train() - - # evaluate after finished training - solver.eval() - - solver.plot_loss_history() - - -def evaluate(cfg: DictConfig): - OUTPUT_DIR = cfg.output_dir - logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - - # set random seed for reproducibility - SEED = cfg.seed - ppsci.utils.misc.set_random_seed(SEED) - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) - - # set the number of residual samples - N_TRAIN = cfg.ntrain - - data = scipy.io.loadmat(cfg.data_dir) - - U_star = data["U_star"].astype("float32") # N x 2 x T - P_star = data["p_star"].astype("float32") # N x T - t_star = data["t"].astype("float32") # T x 1 - X_star = data["X_star"].astype("float32") # N x 2 - - N = X_star.shape[0] - T = t_star.shape[0] - - # rearrange data - XX = np.tile(X_star[:, 0:1], (1, T)) # N x T - YY = np.tile(X_star[:, 1:2], (1, T)) # N x T - TT = np.tile(t_star, (1, N)).T # N x T - - UU = U_star[:, 0, :] # N x T - VV = U_star[:, 1, :] # N x T - PP = P_star # N x T - - x = XX.flatten()[:, None] # NT x 1 - y = YY.flatten()[:, None] # NT x 1 - t = TT.flatten()[:, None] # NT x 1 - - u = UU.flatten()[:, None] # NT x 1 - v = VV.flatten()[:, None] # NT x 1 - p = PP.flatten()[:, None] # NT x 1 - - data1 = np.concatenate([x, y, t, u, v, p], 1) - data2 = data1[:, :][data1[:, 2] <= 7] - data3 = data2[:, :][data2[:, 0] >= 1] - data4 = data3[:, :][data3[:, 0] <= 8] - data5 = data4[:, :][data4[:, 1] >= -2] - data_domain = data5[:, :][data5[:, 1] <= 2] - - idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) - - x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) - y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) - t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) - - snap = np.array([0]) - x_star = X_star[:, 0:1] - y_star = X_star[:, 1:2] - t_star = TT[:, snap] - - u_star = U_star[:, 0, snap] - v_star = U_star[:, 1, snap] - p_star = P_star[:, snap] - - valida_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": {"x": x_star, "y": y_star, "t": t_star}, - "label": {"u": u_star, "v": v_star, "p": p_star}, - }, - "total_size": u_star.shape[0], - "batch_size": u_star.shape[0], - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - } - - geom = ppsci.geometry.PointCloud( - {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") - ) - - # set equation constarint s.t. ||F(u)|| - equation = { - "NavierStokes": ppsci.equation.NavierStokes(nu=0.01, rho=1.0, dim=2, time=True), - } - - residual_validator = ppsci.validate.SupervisedValidator( - valida_dataloader_cfg, - ppsci.loss.L2RelLoss(), - output_expr={ - "u": lambda d: d["u"], - "v": lambda d: d["v"], - "p": lambda d: d["p"] - d["p"].min() + p_star.min(), - }, - metric={"L2R": ppsci.metric.L2Rel()}, - name="Residual", - ) - - # wrap validator - validator = {residual_validator.name: residual_validator} - - solver = ppsci.solver.Solver( - model, - equation=equation, - geom=geom, - validator=validator, - ) - - # eval - ## eval validate set - solver.eval() - - ## eval every time - us = [] - vs = [] - for i in range(0, 70): - snap = np.array([i]) - x_star = X_star[:, 0:1] - y_star = X_star[:, 1:2] - t_star = TT[:, snap] - u_star = paddle.to_tensor(U_star[:, 0, snap]) - v_star = paddle.to_tensor(U_star[:, 1, snap]) - p_star = paddle.to_tensor(P_star[:, snap]) - - solution = solver.predict({"x": x_star, "y": y_star, "t": t_star}) - u_pred = solution["u"] - v_pred = solution["v"] - p_pred = solution["p"] - p_pred = p_pred - p_pred.mean() + p_star.mean() - error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) - error_v = np.linalg.norm(v_star - v_pred, 2) / np.linalg.norm(v_star, 2) - error_p = np.linalg.norm(p_star - p_pred, 2) / np.linalg.norm(p_star, 2) - us.append(error_u) - vs.append(error_v) - print("t={:.2f},relative error of u: {:.3e}".format(t_star[0].item(), error_u)) - print("t={:.2f},relative error of v: {:.3e}".format(t_star[0].item(), error_v)) - print("t={:.2f},relative error of p: {:.3e}".format(t_star[0].item(), error_p)) - - # plot - ## vorticity - grid_x, grid_y = np.mgrid[1.0:8.0:1000j, -2.0:2.0:1000j] - x_star = paddle.to_tensor(grid_x.reshape(-1, 1).astype("float32")) - y_star = paddle.to_tensor(grid_y.reshape(-1, 1).astype("float32")) - t_star = paddle.to_tensor((4.0) * np.ones(x_star.shape).astype("float32")) - x_star.stop_gradient = False - y_star.stop_gradient = False - t_star.stop_gradient = False - sol = model.forward({"x": x_star, "y": y_star, "t": t_star}) - u_y = paddle.grad(sol["u"], y_star) - v_x = paddle.grad(sol["v"], x_star) - w = np.array(v_x) - np.array(u_y) - w = w.reshape(1000, 1000) - l1 = np.arange(-4, 0, 0.25) - l2 = np.arange(0.25, 4, 0.25) - fig = plt.figure(figsize=(16, 8), dpi=80) - plt.contour(grid_x, grid_y, w, levels=np.concatenate([l1, l2]), cmap="jet") - plt.savefig(f"{OUTPUT_DIR}/vorticity_t=4.png") - - ## relative error - t_snap = [] - for i in range(70): - t_snap.append(i / 10) - fig, ax = plt.subplots(1, 2, figsize=(12, 3)) - ax[0].plot(t_snap, us) - ax[1].plot(t_snap, vs) - ax[0].set_title("u") - ax[1].set_title("v") - fig.savefig(f"{OUTPUT_DIR}/l2_error.png") - - ## velocity - grid_x, grid_y = np.mgrid[0.0:8.0:1000j, -2.0:2.0:1000j] - for i in range(70): - snap = np.array([i]) - x_star = X_star[:, 0:1] - y_star = X_star[:, 1:2] - t_star = TT[:, snap] - points = np.concatenate([x_star, y_star], -1) - u_star = U_star[:, 0, snap] - v_star = U_star[:, 1, snap] - - solution = solver.predict({"x": x_star, "y": y_star, "t": t_star}) - u_pred = solution["u"] - v_pred = solution["v"] - u_star_ = griddata(points, u_star, (grid_x, grid_y), method="cubic") - u_pred_ = griddata(points, u_pred, (grid_x, grid_y), method="cubic") - v_star_ = griddata(points, v_star, (grid_x, grid_y), method="cubic") - v_pred_ = griddata(points, v_pred, (grid_x, grid_y), method="cubic") - fig, ax = plt.subplots(2, 2, figsize=(12, 8)) - ax[0, 0].contourf(grid_x, grid_y, u_star_[:, :, 0]) - ax[0, 1].contourf(grid_x, grid_y, u_pred_[:, :, 0]) - ax[1, 0].contourf(grid_x, grid_y, v_star_[:, :, 0]) - ax[1, 1].contourf(grid_x, grid_y, v_pred_[:, :, 0]) - ax[0, 0].set_title("u_exact") - ax[0, 1].set_title("u_pred") - ax[1, 0].set_title("v_exact") - ax[1, 1].set_title("v_pred") - fig.savefig(OUTPUT_DIR + f"/velocity_t={t_star[i]}.png") - - -if __name__ == "__main__": - main() +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +import scipy +from omegaconf import DictConfig +from scipy.interpolate import griddata + +import ppsci +from ppsci.utils import logger + + +@hydra.main(version_base=None, config_path="./conf", config_name="VP_NSFNet2.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +def load_data(path, N_TRAIN, NB_TRAIN, N0_TRAIN): + data = scipy.io.loadmat(path) + + U_star = data["U_star"].astype("float32") # N x 2 x T + P_star = data["p_star"].astype("float32") # N x T + t_star = data["t"].astype("float32") # T x 1 + X_star = data["X_star"].astype("float32") # N x 2 + + N = X_star.shape[0] + T = t_star.shape[0] + + # rearrange data + XX = np.tile(X_star[:, 0:1], (1, T)) # N x T + YY = np.tile(X_star[:, 1:2], (1, T)) # N x T + TT = np.tile(t_star, (1, N)).T # N x T + + UU = U_star[:, 0, :] # N x T + VV = U_star[:, 1, :] # N x T + PP = P_star # N x T + + x = XX.flatten()[:, None] # NT x 1 + y = YY.flatten()[:, None] # NT x 1 + t = TT.flatten()[:, None] # NT x 1 + + u = UU.flatten()[:, None] # NT x 1 + v = VV.flatten()[:, None] # NT x 1 + p = PP.flatten()[:, None] # NT x 1 + + data1 = np.concatenate([x, y, t, u, v, p], 1) + data2 = data1[:, :][data1[:, 2] <= 7] + data3 = data2[:, :][data2[:, 0] >= 1] + data4 = data3[:, :][data3[:, 0] <= 8] + data5 = data4[:, :][data4[:, 1] >= -2] + data_domain = data5[:, :][data5[:, 1] <= 2] + data_t0 = data_domain[:, :][data_domain[:, 2] == 0] + data_y1 = data_domain[:, :][data_domain[:, 0] == 1] + data_y8 = data_domain[:, :][data_domain[:, 0] == 8] + data_x = data_domain[:, :][data_domain[:, 1] == -2] + data_x2 = data_domain[:, :][data_domain[:, 1] == 2] + data_sup_b_train = np.concatenate([data_y1, data_y8, data_x, data_x2], 0) + idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) + + x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) + y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) + t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) + + x0_train = data_t0[:, 0].reshape(data_t0[:, 0].shape[0], 1) + y0_train = data_t0[:, 1].reshape(data_t0[:, 1].shape[0], 1) + t0_train = data_t0[:, 2].reshape(data_t0[:, 2].shape[0], 1) + u0_train = data_t0[:, 3].reshape(data_t0[:, 3].shape[0], 1) + v0_train = data_t0[:, 4].reshape(data_t0[:, 4].shape[0], 1) + + xb_train = data_sup_b_train[:, 0].reshape(data_sup_b_train[:, 0].shape[0], 1) + yb_train = data_sup_b_train[:, 1].reshape(data_sup_b_train[:, 1].shape[0], 1) + tb_train = data_sup_b_train[:, 2].reshape(data_sup_b_train[:, 2].shape[0], 1) + ub_train = data_sup_b_train[:, 3].reshape(data_sup_b_train[:, 3].shape[0], 1) + vb_train = data_sup_b_train[:, 4].reshape(data_sup_b_train[:, 4].shape[0], 1) + + # set test set + snap = np.array([0]) + x_star = X_star[:, 0:1] + y_star = X_star[:, 1:2] + t_star = TT[:, snap] + + u_star = U_star[:, 0, snap] + v_star = U_star[:, 1, snap] + p_star = P_star[:, snap] + + return ( + x_train, + y_train, + t_train, + x0_train, + y0_train, + t0_train, + u0_train, + v0_train, + xb_train, + yb_train, + tb_train, + ub_train, + vb_train, + x_star, + y_star, + t_star, + u_star, + v_star, + p_star, + ) + + +def train(cfg: DictConfig): + OUTPUT_DIR = cfg.output_dir + logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + + # set random seed for reproducibility + SEED = cfg.seed + ppsci.utils.misc.set_random_seed(SEED) + ITERS_PER_EPOCH = cfg.iters_per_epoch + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set the number of residual samples + N_TRAIN = cfg.ntrain + + # set the number of boundary samples + NB_TRAIN = cfg.nb_train + + # set the number of initial samples + N0_TRAIN = cfg.n0_train + + ( + x_train, + y_train, + t_train, + x0_train, + y0_train, + t0_train, + u0_train, + v0_train, + xb_train, + yb_train, + tb_train, + ub_train, + vb_train, + x_star, + y_star, + t_star, + u_star, + v_star, + p_star, + ) = load_data(cfg.data_dir, N_TRAIN, NB_TRAIN, N0_TRAIN) + # set dataloader config + train_dataloader_cfg_b = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": xb_train, "y": yb_train, "t": tb_train}, + "label": {"u": ub_train, "v": vb_train}, + }, + "batch_size": NB_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + train_dataloader_cfg_0 = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x0_train, "y": y0_train, "t": t0_train}, + "label": {"u": u0_train, "v": v0_train}, + }, + "batch_size": N0_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + valida_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x_star, "y": y_star, "t": t_star}, + "label": {"u": u_star, "v": v_star, "p": p_star}, + }, + "total_size": u_star.shape[0], + "batch_size": u_star.shape[0], + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + geom = ppsci.geometry.PointCloud( + {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") + ) + + # supervised constraint s.t ||u-u_b|| + sup_constraint_b = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_b, + ppsci.loss.MSELoss("mean"), + name="Sup_b", + ) + + # supervised constraint s.t ||u-u_0|| + sup_constraint_0 = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_0, + ppsci.loss.MSELoss("mean"), + name="Sup_0", + ) + + # set equation constarint s.t. ||F(u)|| + equation = { + "NavierStokes": ppsci.equation.NavierStokes( + nu=1.0 / cfg.re, rho=1.0, dim=2, time=True + ), + } + + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom, + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": N_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + }, + ppsci.loss.MSELoss("mean"), + name="EQ", + ) + + constraint = { + pde_constraint.name: pde_constraint, + sup_constraint_b.name: sup_constraint_b, + sup_constraint_0.name: sup_constraint_0, + } + + residual_validator = ppsci.validate.SupervisedValidator( + valida_dataloader_cfg, + ppsci.loss.L2RelLoss(), + output_expr={ + "u": lambda d: d["u"], + "v": lambda d: d["v"], + "p": lambda d: d["p"] - d["p"].min() + p_star.min(), + }, + metric={"L2R": ppsci.metric.L2Rel()}, + name="Residual", + ) + + # wrap validator + validator = {residual_validator.name: residual_validator} + + # set optimizer + epoch_list = [5000, 5000, 50000, 50000] + new_epoch_list = [] + for i, _ in enumerate(epoch_list): + new_epoch_list.append(sum(epoch_list[: i + 1])) + EPOCHS = new_epoch_list[-1] + lr_list = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] + lr_scheduler = ppsci.optimizer.lr_scheduler.Piecewise( + EPOCHS, ITERS_PER_EPOCH, new_epoch_list, lr_list + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") + # initialize solver + solver = ppsci.solver.Solver( + model=model, + constraint=constraint, + optimizer=optimizer, + epochs=EPOCHS, + lr_scheduler=lr_scheduler, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=True, + log_freq=cfg.log_freq, + eval_freq=cfg.eval_freq, + seed=SEED, + equation=equation, + geom=geom, + validator=validator, + visualizer=None, + eval_with_no_grad=False, + ) + # train model + solver.train() + + # evaluate after finished training + solver.eval() + + solver.plot_loss_history() + + +def evaluate(cfg: DictConfig): + OUTPUT_DIR = cfg.output_dir + logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + + # set random seed for reproducibility + SEED = cfg.seed + ppsci.utils.misc.set_random_seed(SEED) + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) + + # set the number of residual samples + N_TRAIN = cfg.ntrain + + data = scipy.io.loadmat(cfg.data_dir) + + U_star = data["U_star"].astype("float32") # N x 2 x T + P_star = data["p_star"].astype("float32") # N x T + t_star = data["t"].astype("float32") # T x 1 + X_star = data["X_star"].astype("float32") # N x 2 + + N = X_star.shape[0] + T = t_star.shape[0] + + # rearrange data + XX = np.tile(X_star[:, 0:1], (1, T)) # N x T + YY = np.tile(X_star[:, 1:2], (1, T)) # N x T + TT = np.tile(t_star, (1, N)).T # N x T + + UU = U_star[:, 0, :] # N x T + VV = U_star[:, 1, :] # N x T + PP = P_star # N x T + + x = XX.flatten()[:, None] # NT x 1 + y = YY.flatten()[:, None] # NT x 1 + t = TT.flatten()[:, None] # NT x 1 + + u = UU.flatten()[:, None] # NT x 1 + v = VV.flatten()[:, None] # NT x 1 + p = PP.flatten()[:, None] # NT x 1 + + data1 = np.concatenate([x, y, t, u, v, p], 1) + data2 = data1[:, :][data1[:, 2] <= 7] + data3 = data2[:, :][data2[:, 0] >= 1] + data4 = data3[:, :][data3[:, 0] <= 8] + data5 = data4[:, :][data4[:, 1] >= -2] + data_domain = data5[:, :][data5[:, 1] <= 2] + + idx = np.random.choice(data_domain.shape[0], N_TRAIN, replace=False) + + x_train = data_domain[idx, 0].reshape(data_domain[idx, 0].shape[0], 1) + y_train = data_domain[idx, 1].reshape(data_domain[idx, 1].shape[0], 1) + t_train = data_domain[idx, 2].reshape(data_domain[idx, 2].shape[0], 1) + + snap = np.array([0]) + x_star = X_star[:, 0:1] + y_star = X_star[:, 1:2] + t_star = TT[:, snap] + + u_star = U_star[:, 0, snap] + v_star = U_star[:, 1, snap] + p_star = P_star[:, snap] + + valida_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x_star, "y": y_star, "t": t_star}, + "label": {"u": u_star, "v": v_star, "p": p_star}, + }, + "total_size": u_star.shape[0], + "batch_size": u_star.shape[0], + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + geom = ppsci.geometry.PointCloud( + {"x": x_train, "y": y_train, "t": t_train}, ("x", "y", "t") + ) + + # set equation constarint s.t. ||F(u)|| + equation = { + "NavierStokes": ppsci.equation.NavierStokes(nu=0.01, rho=1.0, dim=2, time=True), + } + + residual_validator = ppsci.validate.SupervisedValidator( + valida_dataloader_cfg, + ppsci.loss.L2RelLoss(), + output_expr={ + "u": lambda d: d["u"], + "v": lambda d: d["v"], + "p": lambda d: d["p"] - d["p"].min() + p_star.min(), + }, + metric={"L2R": ppsci.metric.L2Rel()}, + name="Residual", + ) + + # wrap validator + validator = {residual_validator.name: residual_validator} + + solver = ppsci.solver.Solver( + model, + equation=equation, + geom=geom, + validator=validator, + ) + + # eval + ## eval validate set + solver.eval() + + ## eval every time + us = [] + vs = [] + for i in range(0, 70): + snap = np.array([i]) + x_star = X_star[:, 0:1] + y_star = X_star[:, 1:2] + t_star = TT[:, snap] + u_star = paddle.to_tensor(U_star[:, 0, snap]) + v_star = paddle.to_tensor(U_star[:, 1, snap]) + p_star = paddle.to_tensor(P_star[:, snap]) + + solution = solver.predict({"x": x_star, "y": y_star, "t": t_star}) + u_pred = solution["u"] + v_pred = solution["v"] + p_pred = solution["p"] + p_pred = p_pred - p_pred.mean() + p_star.mean() + error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) + error_v = np.linalg.norm(v_star - v_pred, 2) / np.linalg.norm(v_star, 2) + error_p = np.linalg.norm(p_star - p_pred, 2) / np.linalg.norm(p_star, 2) + us.append(error_u) + vs.append(error_v) + print("t={:.2f},relative error of u: {:.3e}".format(t_star[0].item(), error_u)) + print("t={:.2f},relative error of v: {:.3e}".format(t_star[0].item(), error_v)) + print("t={:.2f},relative error of p: {:.3e}".format(t_star[0].item(), error_p)) + + # plot + ## vorticity + grid_x, grid_y = np.mgrid[1.0:8.0:1000j, -2.0:2.0:1000j] + x_star = paddle.to_tensor(grid_x.reshape(-1, 1).astype("float32")) + y_star = paddle.to_tensor(grid_y.reshape(-1, 1).astype("float32")) + t_star = paddle.to_tensor((4.0) * np.ones(x_star.shape).astype("float32")) + x_star.stop_gradient = False + y_star.stop_gradient = False + t_star.stop_gradient = False + sol = model.forward({"x": x_star, "y": y_star, "t": t_star}) + u_y = paddle.grad(sol["u"], y_star) + v_x = paddle.grad(sol["v"], x_star) + w = np.array(v_x) - np.array(u_y) + w = w.reshape(1000, 1000) + l1 = np.arange(-4, 0, 0.25) + l2 = np.arange(0.25, 4, 0.25) + fig = plt.figure(figsize=(16, 8), dpi=80) + plt.contour(grid_x, grid_y, w, levels=np.concatenate([l1, l2]), cmap="jet") + plt.savefig(f"{OUTPUT_DIR}/vorticity_t=4.png") + + ## relative error + t_snap = [] + for i in range(70): + t_snap.append(i / 10) + fig, ax = plt.subplots(1, 2, figsize=(12, 3)) + ax[0].plot(t_snap, us) + ax[1].plot(t_snap, vs) + ax[0].set_title("u") + ax[1].set_title("v") + fig.savefig(f"{OUTPUT_DIR}/l2_error.png") + + ## velocity + grid_x, grid_y = np.mgrid[0.0:8.0:1000j, -2.0:2.0:1000j] + for i in range(70): + snap = np.array([i]) + x_star = X_star[:, 0:1] + y_star = X_star[:, 1:2] + t_star = TT[:, snap] + points = np.concatenate([x_star, y_star], -1) + u_star = U_star[:, 0, snap] + v_star = U_star[:, 1, snap] + + solution = solver.predict({"x": x_star, "y": y_star, "t": t_star}) + u_pred = solution["u"] + v_pred = solution["v"] + u_star_ = griddata(points, u_star, (grid_x, grid_y), method="cubic") + u_pred_ = griddata(points, u_pred, (grid_x, grid_y), method="cubic") + v_star_ = griddata(points, v_star, (grid_x, grid_y), method="cubic") + v_pred_ = griddata(points, v_pred, (grid_x, grid_y), method="cubic") + fig, ax = plt.subplots(2, 2, figsize=(12, 8)) + ax[0, 0].contourf(grid_x, grid_y, u_star_[:, :, 0]) + ax[0, 1].contourf(grid_x, grid_y, u_pred_[:, :, 0]) + ax[1, 0].contourf(grid_x, grid_y, v_star_[:, :, 0]) + ax[1, 1].contourf(grid_x, grid_y, v_pred_[:, :, 0]) + ax[0, 0].set_title("u_exact") + ax[0, 1].set_title("u_pred") + ax[1, 0].set_title("v_exact") + ax[1, 1].set_title("v_pred") + fig.savefig(OUTPUT_DIR + f"/velocity_t={t_star[i]}.png") + + +if __name__ == "__main__": + main() diff --git a/examples/nsfnet/VP_NSFNet3.py b/examples/nsfnet/VP_NSFNet3.py index da0ea5f0c0..c6bb93e071 100644 --- a/examples/nsfnet/VP_NSFNet3.py +++ b/examples/nsfnet/VP_NSFNet3.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream import hydra import matplotlib.pyplot as plt import numpy as np @@ -539,3 +540,545 @@ def evaluate(cfg: DictConfig): if __name__ == "__main__": main() +======= +import hydra +import matplotlib.pyplot as plt +import numpy as np +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def analytic_solution_generate(x, y, z, t): + a, d = 1, 1 + u = ( + -a + * ( + np.exp(a * x) * np.sin(a * y + d * z) + + np.exp(a * z) * np.cos(a * x + d * y) + ) + * np.exp(-d * d * t) + ) + v = ( + -a + * ( + np.exp(a * y) * np.sin(a * z + d * x) + + np.exp(a * x) * np.cos(a * y + d * z) + ) + * np.exp(-d * d * t) + ) + w = ( + -a + * ( + np.exp(a * z) * np.sin(a * x + d * y) + + np.exp(a * y) * np.cos(a * z + d * x) + ) + * np.exp(-d * d * t) + ) + p = ( + -0.5 + * a + * a + * ( + np.exp(2 * a * x) + + np.exp(2 * a * y) + + np.exp(2 * a * z) + + 2 * np.sin(a * x + d * y) * np.cos(a * z + d * x) * np.exp(a * (y + z)) + + 2 * np.sin(a * y + d * z) * np.cos(a * x + d * y) * np.exp(a * (z + x)) + + 2 * np.sin(a * z + d * x) * np.cos(a * y + d * z) * np.exp(a * (x + y)) + ) + * np.exp(-2 * d * d * t) + ) + + return u, v, w, p + + +def generate_data(N_TRAIN): + # generate boundary data + x1 = np.linspace(-1, 1, 31) + y1 = np.linspace(-1, 1, 31) + z1 = np.linspace(-1, 1, 31) + t1 = np.linspace(0, 1, 11) + b0 = np.array([-1] * 900) + b1 = np.array([1] * 900) + + xt = np.tile(x1[0:30], 30) + yt = np.tile(y1[0:30], 30) + xt1 = np.tile(x1[1:31], 30) + yt1 = np.tile(y1[1:31], 30) + + yr = y1[0:30].repeat(30) + zr = z1[0:30].repeat(30) + yr1 = y1[1:31].repeat(30) + zr1 = z1[1:31].repeat(30) + + train1x = np.concatenate([b1, b0, xt1, xt, xt1, xt], 0).repeat(t1.shape[0]) + train1y = np.concatenate([yt, yt1, b1, b0, yr1, yr], 0).repeat(t1.shape[0]) + train1z = np.concatenate([zr, zr1, zr, zr1, b1, b0], 0).repeat(t1.shape[0]) + train1t = np.tile(t1, 5400) + + train1ub, train1vb, train1wb, train1pb = analytic_solution_generate( + train1x, train1y, train1z, train1t + ) + + xb_train = train1x.reshape(train1x.shape[0], 1).astype("float32") + yb_train = train1y.reshape(train1y.shape[0], 1).astype("float32") + zb_train = train1z.reshape(train1z.shape[0], 1).astype("float32") + tb_train = train1t.reshape(train1t.shape[0], 1).astype("float32") + ub_train = train1ub.reshape(train1ub.shape[0], 1).astype("float32") + vb_train = train1vb.reshape(train1vb.shape[0], 1).astype("float32") + wb_train = train1wb.reshape(train1wb.shape[0], 1).astype("float32") + + # generate initial data + x_0 = np.tile(x1, 31 * 31) + y_0 = np.tile(y1.repeat(31), 31) + z_0 = z1.repeat(31 * 31) + t_0 = np.array([0] * x_0.shape[0]) + u_0, v_0, w_0, p_0 = analytic_solution_generate(x_0, y_0, z_0, t_0) + u0_train = u_0.reshape(u_0.shape[0], 1).astype("float32") + v0_train = v_0.reshape(v_0.shape[0], 1).astype("float32") + w0_train = w_0.reshape(w_0.shape[0], 1).astype("float32") + x0_train = x_0.reshape(x_0.shape[0], 1).astype("float32") + y0_train = y_0.reshape(y_0.shape[0], 1).astype("float32") + z0_train = z_0.reshape(z_0.shape[0], 1).astype("float32") + t0_train = t_0.reshape(t_0.shape[0], 1).astype("float32") + + # unsupervised part + xx = np.random.randint(31, size=N_TRAIN) / 15 - 1 + yy = np.random.randint(31, size=N_TRAIN) / 15 - 1 + zz = np.random.randint(31, size=N_TRAIN) / 15 - 1 + tt = np.random.randint(11, size=N_TRAIN) / 10 + + x_train = xx.reshape(xx.shape[0], 1).astype("float32") + y_train = yy.reshape(yy.shape[0], 1).astype("float32") + z_train = zz.reshape(zz.shape[0], 1).astype("float32") + t_train = tt.reshape(tt.shape[0], 1).astype("float32") + + # test data + x_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + y_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + z_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + t_star = (np.random.randint(11, size=(1000, 1)) / 10).astype("float32") + + u_star, v_star, w_star, p_star = analytic_solution_generate( + x_star, y_star, z_star, t_star + ) + + return ( + x_train, + y_train, + z_train, + t_train, + x0_train, + y0_train, + z0_train, + t0_train, + u0_train, + v0_train, + w0_train, + xb_train, + yb_train, + zb_train, + tb_train, + ub_train, + vb_train, + wb_train, + x_star, + y_star, + z_star, + t_star, + u_star, + v_star, + w_star, + p_star, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="VP_NSFNet3.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +def train(cfg: DictConfig): + OUTPUT_DIR = cfg.output_dir + logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + + # set random seed for reproducibility + SEED = cfg.seed + ppsci.utils.misc.set_random_seed(SEED) + ITERS_PER_EPOCH = cfg.iters_per_epoch + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set the number of residual samples + N_TRAIN = cfg.ntrain + + # set the number of boundary samples + NB_TRAIN = cfg.nb_train + + # set the number of initial samples + N0_TRAIN = cfg.n0_train + ALPHA = cfg.alpha + BETA = cfg.beta + ( + x_train, + y_train, + z_train, + t_train, + x0_train, + y0_train, + z0_train, + t0_train, + u0_train, + v0_train, + w0_train, + xb_train, + yb_train, + zb_train, + tb_train, + ub_train, + vb_train, + wb_train, + x_star, + y_star, + z_star, + t_star, + u_star, + v_star, + w_star, + p_star, + ) = generate_data(N_TRAIN) + + # set dataloader config + train_dataloader_cfg_b = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": xb_train, "y": yb_train, "z": zb_train, "t": tb_train}, + "label": {"u": ub_train, "v": vb_train, "w": wb_train}, + }, + "batch_size": NB_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + train_dataloader_cfg_0 = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x0_train, "y": y0_train, "z": z0_train, "t": t0_train}, + "label": {"u": u0_train, "v": v0_train, "w": w0_train}, + }, + "batch_size": N0_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + + valida_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x_star, "y": y_star, "z": z_star, "t": t_star}, + "label": {"u": u_star, "v": v_star, "w": w_star, "p": p_star}, + }, + "total_size": u_star.shape[0], + "batch_size": u_star.shape[0], + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + geom = ppsci.geometry.PointCloud( + {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") + ) + + # supervised constraint s.t ||u-u_b|| + sup_constraint_b = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_b, + ppsci.loss.MSELoss("mean", ALPHA), + name="Sup_b", + ) + + # supervised constraint s.t ||u-u_0|| + sup_constraint_0 = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg_0, + ppsci.loss.MSELoss("mean", BETA), + name="Sup_0", + ) + + # set equation constarint s.t. ||F(u)|| + equation = { + "NavierStokes": ppsci.equation.NavierStokes( + nu=1.0 / cfg.re, rho=1.0, dim=3, time=True + ), + } + + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0}, + geom, + { + "dataset": {"name": "IterableNamedArrayDataset"}, + "batch_size": N_TRAIN, + "iters_per_epoch": ITERS_PER_EPOCH, + }, + ppsci.loss.MSELoss("mean"), + name="EQ", + ) + + # wrap constraint + constraint = { + pde_constraint.name: pde_constraint, + sup_constraint_b.name: sup_constraint_b, + sup_constraint_0.name: sup_constraint_0, + } + + residual_validator = ppsci.validate.SupervisedValidator( + valida_dataloader_cfg, + ppsci.loss.L2RelLoss(), + output_expr={ + "u": lambda d: d["u"], + "v": lambda d: d["v"], + "p": lambda d: d["p"] - d["p"].min() + p_star.min(), + }, + metric={"L2R": ppsci.metric.L2Rel()}, + name="Residual", + ) + + # wrap validator + validator = {residual_validator.name: residual_validator} + + # set optimizer + epoch_list = [5000, 5000, 50000, 50000] + new_epoch_list = [] + for i, _ in enumerate(epoch_list): + new_epoch_list.append(sum(epoch_list[: i + 1])) + EPOCHS = new_epoch_list[-1] + lr_list = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7] + lr_scheduler = ppsci.optimizer.lr_scheduler.Piecewise( + EPOCHS, ITERS_PER_EPOCH, new_epoch_list, lr_list + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + logger.init_logger("ppsci", f"{OUTPUT_DIR}/eval.log", "info") + # initialize solver + solver = ppsci.solver.Solver( + model=model, + constraint=constraint, + optimizer=optimizer, + epochs=EPOCHS, + lr_scheduler=lr_scheduler, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=True, + log_freq=cfg.log_freq, + eval_freq=cfg.eval_freq, + seed=SEED, + equation=equation, + geom=geom, + validator=validator, + visualizer=None, + eval_with_no_grad=False, + ) + # train model + solver.train() + + # evaluate after finished training + solver.eval() + solver.plot_loss_history() + + +def evaluate(cfg: DictConfig): + OUTPUT_DIR = cfg.output_dir + logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + + # set random seed for reproducibility + SEED = cfg.seed + ppsci.utils.misc.set_random_seed(SEED) + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + ppsci.utils.load_pretrain(model, cfg.pretrained_model_path) + + # set the number of residual samples + N_TRAIN = cfg.ntrain + + # unsupervised part + xx = np.random.randint(31, size=N_TRAIN) / 15 - 1 + yy = np.random.randint(31, size=N_TRAIN) / 15 - 1 + zz = np.random.randint(31, size=N_TRAIN) / 15 - 1 + tt = np.random.randint(11, size=N_TRAIN) / 10 + + x_train = xx.reshape(xx.shape[0], 1).astype("float32") + y_train = yy.reshape(yy.shape[0], 1).astype("float32") + z_train = zz.reshape(zz.shape[0], 1).astype("float32") + t_train = tt.reshape(tt.shape[0], 1).astype("float32") + + # test data + x_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + y_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + z_star = ((np.random.rand(1000, 1) - 1 / 2) * 2).astype("float32") + t_star = (np.random.randint(11, size=(1000, 1)) / 10).astype("float32") + + u_star, v_star, w_star, p_star = analytic_solution_generate( + x_star, y_star, z_star, t_star + ) + + valida_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": {"x": x_star, "y": y_star, "z": z_star, "t": t_star}, + "label": {"u": u_star, "v": v_star, "w": w_star, "p": p_star}, + }, + "total_size": u_star.shape[0], + "batch_size": u_star.shape[0], + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + } + geom = ppsci.geometry.PointCloud( + {"x": x_train, "y": y_train, "z": z_train, "t": t_train}, ("x", "y", "z", "t") + ) + + equation = { + "NavierStokes": ppsci.equation.NavierStokes( + nu=1.0 / cfg.re, rho=1.0, dim=3, time=True + ), + } + residual_validator = ppsci.validate.SupervisedValidator( + valida_dataloader_cfg, + ppsci.loss.L2RelLoss(), + output_expr={ + "u": lambda d: d["u"], + "v": lambda d: d["v"], + "p": lambda d: d["p"] - d["p"].min() + p_star.min(), + }, + metric={"L2R": ppsci.metric.L2Rel()}, + name="Residual", + ) + + # wrap validator + validator = {residual_validator.name: residual_validator} + + # load solver + solver = ppsci.solver.Solver( + model, + equation=equation, + geom=geom, + validator=validator, + ) + + # print the relative error + us = [] + vs = [] + ws = [] + for i in [0, 0.25, 0.5, 0.75, 1.0]: + x_star, y_star, z_star = np.mgrid[-1.0:1.0:100j, -1.0:1.0:100j, -1.0:1.0:100j] + x_star, y_star, z_star = ( + x_star.reshape(-1, 1), + y_star.reshape(-1, 1), + z_star.reshape(-1, 1), + ) + t_star = i * np.ones(x_star.shape) + u_star, v_star, w_star, p_star = analytic_solution_generate( + x_star, y_star, z_star, t_star + ) + + solution = solver.predict({"x": x_star, "y": y_star, "z": z_star, "t": t_star}) + u_pred = solution["u"] + v_pred = solution["v"] + w_pred = solution["w"] + p_pred = solution["p"] + p_pred = p_pred - p_pred.mean() + p_star.mean() + error_u = np.linalg.norm(u_star - u_pred, 2) / np.linalg.norm(u_star, 2) + error_v = np.linalg.norm(v_star - v_pred, 2) / np.linalg.norm(v_star, 2) + error_w = np.linalg.norm(w_star - w_pred, 2) / np.linalg.norm(w_star, 2) + error_p = np.linalg.norm(p_star - p_pred, 2) / np.linalg.norm(p_star, 2) + us.append(error_u) + vs.append(error_v) + ws.append(error_w) + print("t={:.2f},relative error of u: {:.3e}".format(t_star[0].item(), error_u)) + print("t={:.2f},relative error of v: {:.3e}".format(t_star[0].item(), error_v)) + print("t={:.2f},relative error of w: {:.3e}".format(t_star[0].item(), error_w)) + print("t={:.2f},relative error of p: {:.3e}".format(t_star[0].item(), error_p)) + + ## plot vorticity + grid_x, grid_y = np.mgrid[-1.0:1.0:1000j, -1.0:1.0:1000j] + grid_x = grid_x.reshape(-1, 1) + grid_y = grid_y.reshape(-1, 1) + grid_z = np.zeros(grid_x.shape) + T = np.linspace(0, 1, 101) + for i in T: + t_star = i * np.ones(x_star.shape) + u_star, v_star, w_star, p_star = analytic_solution_generate( + grid_x, grid_y, grid_z, t_star + ) + + solution = solver.predict({"x": grid_x, "y": grid_y, "z": grid_z, "t": t_star}) + u_pred = np.array(solution["u"]) + v_pred = np.array(solution["v"]) + w_pred = np.array(solution["w"]) + p_pred = p_pred - p_pred.mean() + p_star.mean() + fig, ax = plt.subplots(3, 2, figsize=(12, 12)) + ax[0, 0].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + u_star.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[0, 1].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + u_pred.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[1, 0].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + v_star.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[1, 1].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + v_pred.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[2, 0].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + w_star.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[2, 1].contourf( + grid_x.reshape(1000, 1000), + grid_y.reshape(1000, 1000), + w_pred.reshape(1000, 1000), + cmap=plt.get_cmap("RdYlBu"), + ) + ax[0, 0].set_title("u_exact") + ax[0, 1].set_title("u_pred") + ax[1, 0].set_title("v_exact") + ax[1, 1].set_title("v_pred") + ax[2, 0].set_title("w_exact") + ax[2, 1].set_title("w_pred") + time = "%.3f" % i + fig.savefig(OUTPUT_DIR + f"/velocity_t={str(time)}.png") + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/nsfnet/conf/VP_NSFNet2.yaml b/examples/nsfnet/conf/VP_NSFNet2.yaml index 7356f6d055..587b3bf5df 100644 --- a/examples/nsfnet/conf/VP_NSFNet2.yaml +++ b/examples/nsfnet/conf/VP_NSFNet2.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream seed: 1234 output_dir: ./output_NSFNet2 @@ -28,3 +29,35 @@ log_freq: 5000 eval_freq: 100 pretrained_model_path: null +======= +seed: 1234 + +output_dir: ./output_NSFNet2 + +data_dir: ./data/cylinder_nektar_wake.mat + +iters_per_epoch: 1 + +MODEL: + input_keys: ["x", "y","t"] + output_keys: ["u", "v", "p"] + num_layers: 10 + hidden_size: 100 + activation: "tanh" + +mode: train + +ntrain: 140000 + +nb_train: 21300 + +n0_train: 5000 + +re: 100 + +log_freq: 5000 + +eval_freq: 100 + +pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/nsfnet/conf/VP_NSFNet3.yaml b/examples/nsfnet/conf/VP_NSFNet3.yaml index d725bf6777..85f19e5017 100644 --- a/examples/nsfnet/conf/VP_NSFNet3.yaml +++ b/examples/nsfnet/conf/VP_NSFNet3.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream seed: 1234 output_dir: ./output_NSFNet3 @@ -30,3 +31,37 @@ log_freq: 5000 eval_freq: 5000 pretrained_model_path: null +======= +seed: 1234 + +output_dir: ./output_NSFNet3 + +iters_per_epoch: 1 + +MODEL: + input_keys: ["x", "y","z","t"] + output_keys: ["u", "v", "w","p"] + num_layers: 10 + hidden_size: 100 + activation: "tanh" + +mode: train + +ntrain: 70000 + +nb_train: 59400 + +n0_train: 29791 + +alpha: 100 + +beta: 100 + +re: 1 + +log_freq: 5000 + +eval_freq: 5000 + +pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/operator_learning/conf/deeponet.yaml b/examples/operator_learning/conf/deeponet.yaml index 2dd3def696..cdf368a50d 100644 --- a/examples/operator_learning/conf/deeponet.yaml +++ b/examples/operator_learning/conf/deeponet.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -79,3 +80,85 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 128 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_deeponet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 +TRAIN_FILE_PATH: ./antiderivative_unaligned_train.npz +VALID_FILE_PATH: ./antiderivative_unaligned_test.npz + +# set working condition +NUM_Y: 1000 # number of y point for G(u) to be visualized + +# model settings +MODEL: + u_key: "u" + y_key: "y" + G_key: "G" + num_loc: 100 + num_features: 40 + branch_num_layers: 1 + trunk_num_layers: 1 + branch_hidden_size: 40 + trunk_hidden_size: 40 + branch_activation: relu + trunk_activation: relu + use_bias: true + +# training settings +TRAIN: + epochs: 10000 + iters_per_epoch: 1 + learning_rate: 1.0e-3 + save_freq: 500 + eval_freq: 500 + eval_during_train: true + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/deeponet/deeponet_pretrained.pdparams" + export_path: ./inference/deeponet + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 128 +>>>>>>> Stashed changes diff --git a/examples/operator_learning/deeponet.py b/examples/operator_learning/deeponet.py index 331b4428dc..b998ac55a8 100644 --- a/examples/operator_learning/deeponet.py +++ b/examples/operator_learning/deeponet.py @@ -1,263 +1,263 @@ -""" -Reference: https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html -""" - -import os -from os import path as osp -from typing import Callable -from typing import Tuple - -import hydra -import numpy as np -import paddle -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.DeepONet(**cfg.MODEL) - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "IterableNPZDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": ("u", "y"), - "label_keys": ("G",), - "alias_dict": {"u": "X_train0", "y": "X_train1", "G": "y_train"}, - }, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss(), - {"G": lambda out: out["G"]}, - ) - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "IterableNPZDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": ("u", "y"), - "label_keys": ("G",), - "alias_dict": {"u": "X_test0", "y": "X_test1", "G": "y_test"}, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - {"G": lambda out: out["G"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="G_eval", - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - eval_freq=cfg.TRAIN.eval_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator, - eval_during_train=cfg.TRAIN.eval_during_train, - checkpoint_path=cfg.TRAIN.checkpoint_path, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - def predict_func(input_dict): - return solver.predict(input_dict, return_numpy=True)[cfg.MODEL.G_key] - - plot(cfg, predict_func) - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # set model - model = ppsci.arch.DeepONet(**cfg.MODEL) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "IterableNPZDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": ("u", "y"), - "label_keys": ("G",), - "alias_dict": {"u": "X_test0", "y": "X_test1", "G": "y_test"}, - }, - } - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - {"G": lambda out: out["G"]}, - metric={"L2Rel": ppsci.metric.L2Rel()}, - name="G_eval", - ) - validator = {sup_validator.name: sup_validator} - - solver = ppsci.solver.Solver( - model, - None, - cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - solver.eval() - - def predict_func(input_dict): - return solver.predict(input_dict, return_numpy=True)[cfg.MODEL.G_key] - - plot(cfg, predict_func) - - -def export(cfg: DictConfig): - # set model - model = ppsci.arch.DeepONet(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export model - from paddle.static import InputSpec - - input_spec = [ - { - model.input_keys[0]: InputSpec( - [None, 1000], "float32", name=model.input_keys[0] - ), - model.input_keys[1]: InputSpec( - [None, 1], "float32", name=model.input_keys[1] - ), - } - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy import python_infer - - predictor = python_infer.GeneralPredictor(cfg) - - def predict_func(input_dict): - return next(iter(predictor.predict(input_dict).values())) - - plot(cfg, predict_func) - - -def plot(cfg: DictConfig, predict_func: Callable): - # visualize prediction for different functions u and corresponding G(u) - dtype = paddle.get_default_dtype() - - def generate_y_u_G_ref( - u_func: Callable, G_u_func: Callable - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """Generate discretized data of given function u and corresponding G(u). - - Args: - u_func (Callable): Function u. - G_u_func (Callable): Function G(u). - - Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Discretized data of u, y and G(u). - """ - x = np.linspace(0, 1, cfg.MODEL.num_loc, dtype=dtype).reshape( - [1, cfg.MODEL.num_loc] - ) - u = u_func(x) - u = np.tile(u, [cfg.NUM_Y, 1]) - - y = np.linspace(0, 1, cfg.NUM_Y, dtype=dtype).reshape([cfg.NUM_Y, 1]) - G_ref = G_u_func(y) - return u, y, G_ref - - func_u_G_pair = [ - # (title_string, func_u, func_G(u)), s.t. dG/dx == u and G(u)(0) = 0 - (r"$u=\cos(x), G(u)=sin(x$)", lambda x: np.cos(x), lambda y: np.sin(y)), # 1 - ( - r"$u=sec^2(x), G(u)=tan(x$)", - lambda x: (1 / np.cos(x)) ** 2, - lambda y: np.tan(y), - ), # 2 - ( - r"$u=sec(x)tan(x), G(u)=sec(x) - 1$", - lambda x: (1 / np.cos(x) * np.tan(x)), - lambda y: 1 / np.cos(y) - 1, - ), # 3 - ( - r"$u=1.5^x\ln{1.5}, G(u)=1.5^x-1$", - lambda x: 1.5**x * np.log(1.5), - lambda y: 1.5**y - 1, - ), # 4 - (r"$u=3x^2, G(u)=x^3$", lambda x: 3 * x**2, lambda y: y**3), # 5 - (r"$u=4x^3, G(u)=x^4$", lambda x: 4 * x**3, lambda y: y**4), # 6 - (r"$u=5x^4, G(u)=x^5$", lambda x: 5 * x**4, lambda y: y**5), # 7 - (r"$u=6x^5, G(u)=x^6$", lambda x: 5 * x**4, lambda y: y**5), # 8 - (r"$u=e^x, G(u)=e^x-1$", lambda x: np.exp(x), lambda y: np.exp(y) - 1), # 9 - ] - - os.makedirs(os.path.join(cfg.output_dir, "visual"), exist_ok=True) - for i, (title, u_func, G_func) in enumerate(func_u_G_pair): - u, y, G_ref = generate_y_u_G_ref(u_func, G_func) - G_pred = predict_func({"u": u, "y": y}) - plt.plot(y, G_pred, label=r"$G(u)(y)_{ref}$") - plt.plot(y, G_ref, label=r"$G(u)(y)_{pred}$") - plt.legend() - plt.title(title) - plt.savefig(os.path.join(cfg.output_dir, "visual", f"func_{i}_result.png")) - logger.message( - f"Saved result of function {i} to {cfg.output_dir}/visual/func_{i}_result.png" - ) - plt.clf() - plt.close() - - -@hydra.main(version_base=None, config_path="./conf", config_name="deeponet.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +""" +Reference: https://deepxde.readthedocs.io/en/latest/demos/operator/antiderivative_unaligned.html +""" + +import os +from os import path as osp +from typing import Callable +from typing import Tuple + +import hydra +import numpy as np +import paddle +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.DeepONet(**cfg.MODEL) + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "IterableNPZDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": ("u", "y"), + "label_keys": ("G",), + "alias_dict": {"u": "X_train0", "y": "X_train1", "G": "y_train"}, + }, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss(), + {"G": lambda out: out["G"]}, + ) + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "IterableNPZDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": ("u", "y"), + "label_keys": ("G",), + "alias_dict": {"u": "X_test0", "y": "X_test1", "G": "y_test"}, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + {"G": lambda out: out["G"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="G_eval", + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + eval_freq=cfg.TRAIN.eval_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator, + eval_during_train=cfg.TRAIN.eval_during_train, + checkpoint_path=cfg.TRAIN.checkpoint_path, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + def predict_func(input_dict): + return solver.predict(input_dict, return_numpy=True)[cfg.MODEL.G_key] + + plot(cfg, predict_func) + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # set model + model = ppsci.arch.DeepONet(**cfg.MODEL) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "IterableNPZDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": ("u", "y"), + "label_keys": ("G",), + "alias_dict": {"u": "X_test0", "y": "X_test1", "G": "y_test"}, + }, + } + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + {"G": lambda out: out["G"]}, + metric={"L2Rel": ppsci.metric.L2Rel()}, + name="G_eval", + ) + validator = {sup_validator.name: sup_validator} + + solver = ppsci.solver.Solver( + model, + None, + cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + solver.eval() + + def predict_func(input_dict): + return solver.predict(input_dict, return_numpy=True)[cfg.MODEL.G_key] + + plot(cfg, predict_func) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.DeepONet(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export model + from paddle.static import InputSpec + + input_spec = [ + { + model.input_keys[0]: InputSpec( + [None, 1000], "float32", name=model.input_keys[0] + ), + model.input_keys[1]: InputSpec( + [None, 1], "float32", name=model.input_keys[1] + ), + } + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy import python_infer + + predictor = python_infer.GeneralPredictor(cfg) + + def predict_func(input_dict): + return next(iter(predictor.predict(input_dict).values())) + + plot(cfg, predict_func) + + +def plot(cfg: DictConfig, predict_func: Callable): + # visualize prediction for different functions u and corresponding G(u) + dtype = paddle.get_default_dtype() + + def generate_y_u_G_ref( + u_func: Callable, G_u_func: Callable + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Generate discretized data of given function u and corresponding G(u). + + Args: + u_func (Callable): Function u. + G_u_func (Callable): Function G(u). + + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: Discretized data of u, y and G(u). + """ + x = np.linspace(0, 1, cfg.MODEL.num_loc, dtype=dtype).reshape( + [1, cfg.MODEL.num_loc] + ) + u = u_func(x) + u = np.tile(u, [cfg.NUM_Y, 1]) + + y = np.linspace(0, 1, cfg.NUM_Y, dtype=dtype).reshape([cfg.NUM_Y, 1]) + G_ref = G_u_func(y) + return u, y, G_ref + + func_u_G_pair = [ + # (title_string, func_u, func_G(u)), s.t. dG/dx == u and G(u)(0) = 0 + (r"$u=\cos(x), G(u)=sin(x$)", lambda x: np.cos(x), lambda y: np.sin(y)), # 1 + ( + r"$u=sec^2(x), G(u)=tan(x$)", + lambda x: (1 / np.cos(x)) ** 2, + lambda y: np.tan(y), + ), # 2 + ( + r"$u=sec(x)tan(x), G(u)=sec(x) - 1$", + lambda x: (1 / np.cos(x) * np.tan(x)), + lambda y: 1 / np.cos(y) - 1, + ), # 3 + ( + r"$u=1.5^x\ln{1.5}, G(u)=1.5^x-1$", + lambda x: 1.5**x * np.log(1.5), + lambda y: 1.5**y - 1, + ), # 4 + (r"$u=3x^2, G(u)=x^3$", lambda x: 3 * x**2, lambda y: y**3), # 5 + (r"$u=4x^3, G(u)=x^4$", lambda x: 4 * x**3, lambda y: y**4), # 6 + (r"$u=5x^4, G(u)=x^5$", lambda x: 5 * x**4, lambda y: y**5), # 7 + (r"$u=6x^5, G(u)=x^6$", lambda x: 5 * x**4, lambda y: y**5), # 8 + (r"$u=e^x, G(u)=e^x-1$", lambda x: np.exp(x), lambda y: np.exp(y) - 1), # 9 + ] + + os.makedirs(os.path.join(cfg.output_dir, "visual"), exist_ok=True) + for i, (title, u_func, G_func) in enumerate(func_u_G_pair): + u, y, G_ref = generate_y_u_G_ref(u_func, G_func) + G_pred = predict_func({"u": u, "y": y}) + plt.plot(y, G_pred, label=r"$G(u)(y)_{ref}$") + plt.plot(y, G_ref, label=r"$G(u)(y)_{pred}$") + plt.legend() + plt.title(title) + plt.savefig(os.path.join(cfg.output_dir, "visual", f"func_{i}_result.png")) + logger.message( + f"Saved result of function {i} to {cfg.output_dir}/visual/func_{i}_result.png" + ) + plt.clf() + plt.close() + + +@hydra.main(version_base=None, config_path="./conf", config_name="deeponet.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/perovskite_solar_cells/conf/psc_nn.yaml b/examples/perovskite_solar_cells/conf/psc_nn.yaml new file mode 100644 index 0000000000..2b8ba88f0f --- /dev/null +++ b/examples/perovskite_solar_cells/conf/psc_nn.yaml @@ -0,0 +1,60 @@ +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + dir: outputs_allen_cahn_piratenet/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} + chdir: false + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + dir: ${hydra.run.dir} + subdir: ./ + +mode: "train" +seed: 42 +output_dir: ${hydra:run.dir} + +data: + train_features_path: "./data/cleaned/training.csv" + train_labels_path: "./data/cleaned/training_labels.csv" + val_features_path: "./data/cleaned/validation.csv" + val_labels_path: "./data/cleaned/validation_labels.csv" + +model: + num_layers: 4 + hidden_size: [128, 96, 64, 32] + activation: "relu" + input_dim: 2808 + output_dim: 1 + +TRAIN: + epochs: 10 + search_epochs: 3 + batch_size: 64 + learning_rate: 0.001 + eval_during_train: true + eval_freq: 5 + save_freq: 10 + log_freq: 50 + lr_scheduler: + gamma: 0.95 + decay_steps: 5 + warmup_epoch: 2 + warmup_start_lr: 1.0e-6 + +eval: + batch_size: 64 + eval_with_no_grad: true + pretrained_model_path: null + log_freq: 50 diff --git a/examples/perovskite_solar_cells/psc_nn.py b/examples/perovskite_solar_cells/psc_nn.py new file mode 100644 index 0000000000..92cac686d2 --- /dev/null +++ b/examples/perovskite_solar_cells/psc_nn.py @@ -0,0 +1,364 @@ +import os +from os import path as osp + +import hydra +import numpy as np +import optuna +import paddle +import pandas as pd +from matplotlib import pyplot as plt +from omegaconf import DictConfig +from sklearn.metrics import mean_absolute_percentage_error +from sklearn.metrics import mean_squared_error +from sklearn.metrics import r2_score +from sklearn.model_selection import train_test_split + +import ppsci +from ppsci.constraint import SupervisedConstraint +from ppsci.optimizer import lr_scheduler +from ppsci.optimizer import optimizer +from ppsci.solver import Solver +from ppsci.validate import SupervisedValidator + + +def weighted_loss(output_dict, target_dict, weight_dict=None): + pred = output_dict["target"] + true = target_dict["target"] + epsilon = 1e-06 + n = len(true) + weights = true / (paddle.sum(x=true) + epsilon) + squared = (true - pred) ** 2 + weighted = squared * weights + loss = paddle.sum(x=weighted) / n + return {"weighted_mse": loss} + + +def create_tensor_dict(X, y): + """Create Tensor Dictionary for Input and Labels""" + return { + "input": paddle.to_tensor(X.values, dtype="float32"), + "label": {"target": paddle.to_tensor(y.values, dtype="float32")}, + } + + +def create_constraint(input_dict, batch_size, shuffle=True): + """Create supervision constraints""" + return SupervisedConstraint( + dataloader_cfg={ + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": input_dict["input"]}, + "label": input_dict["label"], + }, + "batch_size": batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": shuffle, + }, + }, + loss=weighted_loss, + output_expr={"target": lambda out: out["target"]}, + name="train_constraint", + ) + + +def create_validator(input_dict, batch_size, name="validator"): + """Create an evaluator""" + return SupervisedValidator( + dataloader_cfg={ + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": input_dict["input"]}, + "label": input_dict["label"], + }, + "batch_size": batch_size, + }, + loss=weighted_loss, + output_expr={"target": lambda out: out["target"]}, + metric={"RMSE": ppsci.metric.RMSE(), "MAE": ppsci.metric.MAE()}, + name=name, + ) + + +def create_optimizer(model, optimizer_name, lr, train_cfg, data_size): + """Create optimizer and learning rate scheduler""" + schedule = lr_scheduler.ExponentialDecay( + epochs=train_cfg.epochs, + iters_per_epoch=data_size // train_cfg.batch_size, + learning_rate=lr, + gamma=train_cfg.lr_scheduler.gamma, + decay_steps=train_cfg.lr_scheduler.decay_steps, + warmup_epoch=train_cfg.lr_scheduler.warmup_epoch, + warmup_start_lr=train_cfg.lr_scheduler.warmup_start_lr, + )() + + if optimizer_name == "Adam": + return optimizer.Adam(learning_rate=schedule)(model) + elif optimizer_name == "RMSProp": + return optimizer.RMSProp(learning_rate=schedule)(model) + else: + return optimizer.SGD(learning_rate=schedule)(model) + + +def define_model(trial, input_dim, output_dim): + n_layers = trial.suggest_int("n_layers", 4, 6) + hidden_sizes = [] + for i in range(n_layers): + out_features = trial.suggest_int(f"n_units_l{i}", 10, input_dim // 2) + hidden_sizes.append(out_features) + + model = ppsci.arch.MLP( + input_keys=("input",), + output_keys=("target",), + num_layers=None, + hidden_size=hidden_sizes, + activation="relu", + input_dim=input_dim, + output_dim=output_dim, + ) + return model + + +def train(cfg: DictConfig): + # Read and preprocess data + X_train = pd.read_csv(cfg.data.train_features_path) + y_train = pd.read_csv(cfg.data.train_labels_path) + X_val = pd.read_csv(cfg.data.val_features_path) + y_val = pd.read_csv(cfg.data.val_labels_path) + + for col in X_train.columns: + if "[" in col or "]" in col: + old_name = col + new_name = col.replace("[", "(").replace("]", ")") + X_train = X_train.rename(columns={old_name: new_name}) + X_val = X_val.rename(columns={old_name: new_name}) + + X_train, X_verif, y_train, y_verif = train_test_split( + X_train, y_train, test_size=0.1, random_state=42 + ) + + for df in [X_train, y_train, X_verif, y_verif, X_val, y_val]: + df.reset_index(drop=True, inplace=True) + + def objective(trial): + model = define_model(trial, cfg.model.input_dim, cfg.model.output_dim) + + optimizer_name = trial.suggest_categorical( + "optimizer", ["Adam", "RMSProp", "SGD"] + ) + lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True) + + train_dict = create_tensor_dict(X_train, y_train) + verif_dict = create_tensor_dict(X_verif, y_verif) + + opt = create_optimizer(model, optimizer_name, lr, cfg.TRAIN, len(X_train)) + + train_constraint = create_constraint(train_dict, cfg.TRAIN.batch_size) + verif_validator = create_validator( + verif_dict, cfg.eval.batch_size, "verif_validator" + ) + + solver = Solver( + model=model, + constraint={"train": train_constraint}, + optimizer=opt, + validator={"verif": verif_validator}, + output_dir=cfg.output_dir, + epochs=cfg.TRAIN.search_epochs, + iters_per_epoch=len(X_train) // cfg.TRAIN.batch_size, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + save_freq=cfg.TRAIN.save_freq, + eval_with_no_grad=cfg.eval.eval_with_no_grad, + log_freq=cfg.TRAIN.log_freq, + ) + + solver.train() + + verif_preds = solver.predict({"input": verif_dict["input"]}, return_numpy=True)[ + "target" + ] + + verif_rmse = np.sqrt(mean_squared_error(y_verif.values, verif_preds)) + + return verif_rmse + + study = optuna.create_study() + study.optimize(objective, n_trials=50) + + best_params = study.best_trial.params + print("\nBest hyperparameters: " + str(best_params)) + + # Save the optimal model structure + hidden_sizes = [] + for i in range(best_params["n_layers"]): + hidden_sizes.append(best_params[f"n_units_l{i}"]) + + # Create and train the final model + final_model = define_model( + study.best_trial, cfg.model.input_dim, cfg.model.output_dim + ) + opt = create_optimizer( + final_model, + best_params["optimizer"], + best_params["lr"], + cfg.TRAIN, + len(X_train), + ) + + train_dict = create_tensor_dict(X_train, y_train) + val_dict = create_tensor_dict(X_val, y_val) + + train_constraint = create_constraint(train_dict, cfg.TRAIN.batch_size) + val_validator = create_validator(val_dict, cfg.eval.batch_size, "val_validator") + + solver = Solver( + model=final_model, + constraint={"train": train_constraint}, + optimizer=opt, + validator={"valid": val_validator}, + output_dir=cfg.output_dir, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=len(X_train) // cfg.TRAIN.batch_size, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + save_freq=cfg.TRAIN.save_freq, + eval_with_no_grad=cfg.eval.eval_with_no_grad, + log_freq=cfg.TRAIN.log_freq, + ) + + solver.train() + + # Save model structure and weights + model_dict = { + "state_dict": final_model.state_dict(), + "hidden_size": hidden_sizes, + "n_layers": best_params["n_layers"], + "optimizer": best_params["optimizer"], + "lr": best_params["lr"], + } + paddle.save( + model_dict, os.path.join(cfg.output_dir, "checkpoints", "best_model.pdparams") + ) + print( + "Saved model structure and weights to " + + os.path.join(cfg.output_dir, "checkpoints", "best_model.pdparams") + ) + + solver.plot_loss_history(by_epoch=True, smooth_step=1) + solver.eval() + + visualize_results(solver, X_val, y_val, cfg.output_dir) + + +def evaluate(cfg: DictConfig): + # Read and preprocess data + X_val = pd.read_csv(cfg.data.val_features_path) + y_val = pd.read_csv(cfg.data.val_labels_path) + + for col in X_val.columns: + if "[" in col or "]" in col: + old_name = col + new_name = col.replace("[", "(").replace("]", ")") + X_val = X_val.rename(columns={old_name: new_name}) + + # Loading model structure and weights + print(f"Loading model from {cfg.eval.pretrained_model_path}") + model_dict = paddle.load(cfg.eval.pretrained_model_path) + hidden_size = model_dict["hidden_size"] + print(f"Loaded model structure with hidden sizes: {hidden_size}") + + model = ppsci.arch.MLP( + input_keys=("input",), + output_keys=("target",), + num_layers=None, + hidden_size=hidden_size, + activation="relu", + input_dim=cfg.model.input_dim, + output_dim=cfg.model.output_dim, + ) + + # Load model weights + model.set_state_dict(model_dict["state_dict"]) + print("Successfully loaded model weights") + + valid_dict = create_tensor_dict(X_val, y_val) + valid_validator = create_validator( + valid_dict, cfg.eval.batch_size, "valid_validator" + ) + + solver = Solver( + model=model, + output_dir=cfg.output_dir, + validator={"valid": valid_validator}, + eval_with_no_grad=cfg.eval.eval_with_no_grad, + ) + + # evaluation model + print("Evaluating model...") + solver.eval() + + # Generate prediction results + predictions = solver.predict({"input": valid_dict["input"]}, return_numpy=True)[ + "target" + ] + + # Calculate multiple evaluation indicators + rmse = np.sqrt(mean_squared_error(y_val.values, predictions)) + r2 = r2_score(y_val.values, predictions) + mape = mean_absolute_percentage_error(y_val.values, predictions) + + print("Evaluation metrics:") + print(f"RMSE: {rmse:.5f}") + print(f"R2 Score: {r2:.5f}") + print(f"MAPE: {mape:.5f}") + + # Visualization results + print("Generating visualization...") + visualize_results(solver, X_val, y_val, cfg.output_dir) + print("Evaluation completed.") + + +def visualize_results(solver, X_val, y_val, output_dir): + pred_dict = solver.predict( + {"input": paddle.to_tensor(X_val.values, dtype="float32")}, return_numpy=True + ) + val_preds = pred_dict["target"] + val_true = y_val.values + + plt.figure(figsize=(10, 6)) + plt.grid(True, linestyle="--", alpha=0.7) + plt.hist(val_true, bins=30, alpha=0.6, label="True Jsc", color="tab:blue") + plt.hist(val_preds, bins=30, alpha=0.6, label="Predicted Jsc", color="orange") + + pred_mean = np.mean(val_preds) + pred_std = np.std(val_preds) + plt.axvline(pred_mean, color="black", linestyle="--") + plt.axvline(pred_mean + pred_std, color="red", linestyle="--") + plt.axvline(pred_mean - pred_std, color="red", linestyle="--") + + val_rmse = np.sqrt(mean_squared_error(val_true, val_preds)) + plt.title(f"Distribution of True Jsc vs Pred Jsc: RMSE {val_rmse:.5f}", pad=20) + plt.xlabel("Jsc (mA/cm²)") + plt.ylabel("Counts") + plt.legend(fontsize=10) + plt.tight_layout() + plt.savefig( + osp.join(output_dir, "jsc_distribution.png"), dpi=300, bbox_inches="tight" + ) + plt.close() + + +@hydra.main(version_base=None, config_path="./conf", config_name="psc_nn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/perovskite_solar_cells/psc_nn_overview.png b/examples/perovskite_solar_cells/psc_nn_overview.png new file mode 100644 index 0000000000..f8afd96416 Binary files /dev/null and b/examples/perovskite_solar_cells/psc_nn_overview.png differ diff --git a/examples/perovskite_solar_cells/requirements.txt b/examples/perovskite_solar_cells/requirements.txt new file mode 100644 index 0000000000..a45c8c6d76 --- /dev/null +++ b/examples/perovskite_solar_cells/requirements.txt @@ -0,0 +1,10 @@ +paddlepaddle-gpu>=3.0.0 +paddlesci>=0.0.1 +numpy>=1.26.0 +pandas>=2.2.0 +matplotlib>=3.9.0 +scikit-learn>=1.4.0 +hydra-core>=1.3.0 +omegaconf>=2.3.0 +optuna>=4.0.0 +h5py>=3.12.0 \ No newline at end of file diff --git a/examples/perovskite_solar_cells/solar_cell_pretrained.pdparams b/examples/perovskite_solar_cells/solar_cell_pretrained.pdparams new file mode 100644 index 0000000000..b22937a19b Binary files /dev/null and b/examples/perovskite_solar_cells/solar_cell_pretrained.pdparams differ diff --git a/examples/phycrnet/functions.py b/examples/phycrnet/functions.py index 0d4cd4ca84..dc0c5876b7 100644 --- a/examples/phycrnet/functions.py +++ b/examples/phycrnet/functions.py @@ -1,422 +1,422 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -import os -from typing import Dict - -import matplotlib.pyplot as plt -import numpy as np -import paddle -import paddle.nn as nn - -from ppsci.arch import phycrnet - -dt = None -dx = None -num_time_batch = None -uv = None -time_steps = None - -# transform -def transform_in(input): - shape = input["initial_state_shape"][0] - input_transformed = { - "initial_state": input["initial_state"][0].reshape(shape.tolist()), - "input": input["input"][0], - } - return input_transformed - - -def transform_out(input, out, model): - # Stop the transform to avoid circulation - model.enable_transform = False - - loss_func = phycrnet.loss_generator(dt, dx) - batch_loss = 0 - state_detached = [] - prev_output = [] - for time_batch_id in range(num_time_batch): - # update the first input for each time batch - if time_batch_id == 0: - hidden_state = input["initial_state"] - u0 = input["input"] - else: - hidden_state = state_detached - u0 = prev_output[-2:-1].detach() # second last output - out = model({"initial_state": hidden_state, "input": u0}) - - # output is a list - output = out["outputs"] - second_last_state = out["second_last_state"] - - # [t, c, height (Y), width (X)] - output = paddle.concat(tuple(output), axis=0) - - # concatenate the initial state to the output for central diff - output = paddle.concat((u0.cuda(), output), axis=0) - - # get loss - loss = compute_loss(output, loss_func) - batch_loss += loss - - # update the state and output for next batch - prev_output = output - state_detached = [] - for i in range(len(second_last_state)): - (h, c) = second_last_state[i] - state_detached.append((h.detach(), c.detach())) # hidden state - - model.enable_transform = True - return {"loss": batch_loss} - - -def tranform_output_val(input, out, name="results.npz"): - output = out["outputs"] - input = input["input"] - - # shape: [t, c, h, w] - output = paddle.concat(tuple(output), axis=0) - output = paddle.concat((input.cuda(), output), axis=0) - - # Padding x and y axis due to periodic boundary condition - output = paddle.concat((output[:, :, :, -1:], output, output[:, :, :, 0:2]), axis=3) - output = paddle.concat((output[:, :, -1:, :], output, output[:, :, 0:2, :]), axis=2) - - # [t, c, h, w] - truth = uv[0:time_steps, :, :, :] - - # [101, 2, 131, 131] - truth = np.concatenate((truth[:, :, :, -1:], truth, truth[:, :, :, 0:2]), axis=3) - truth = np.concatenate((truth[:, :, -1:, :], truth, truth[:, :, 0:2, :]), axis=2) - truth = paddle.to_tensor(truth) - # post-process - ten_true = [] - ten_pred = [] - for i in range(0, 1001): - u_star, u_pred, v_star, v_pred = post_process( - output, - truth, - num=i, - ) - - ten_true.append(paddle.stack([u_star, v_star])) - ten_pred.append(paddle.stack([u_pred, v_pred])) - ten_true = paddle.stack(ten_true) - ten_pred = paddle.stack(ten_pred) - # compute the error - # a-RMSE - error = ( - paddle.sum((ten_pred - ten_true) ** 2, axis=(1, 2, 3)) - / ten_true.shape[2] - / ten_true.shape[3] - ) - N = error.shape[0] - M = 0 - for i in range(N): - M = M + np.eye(N, k=-i) - M = M.T / np.arange(N) - M[:, 0] = 0 - M[0, :] = 0 - M = paddle.to_tensor(M) - aRMSE = paddle.sqrt(M.T @ error) - np.savez( - name, - error=np.array(error), - ten_true=ten_true, - ten_pred=ten_pred, - aRMSE=np.array(aRMSE), - ) - error = paddle.linalg.norm(error) - return {"loss": paddle.to_tensor([error])} - - -def train_loss_func(result_dict, *args) -> paddle.Tensor: - """For model calculation of loss. - - Args: - result_dict (Dict[str, paddle.Tensor]): The result dict. - - Returns: - paddle.Tensor: Loss value. - """ - return {"residual": result_dict["loss"]} - - -def val_loss_func(result_dict, *args) -> paddle.Tensor: - return {"residual": result_dict["loss"]} - - -def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: - return {"dummy_loss": paddle.to_tensor(0.0)} - - -class GaussianRF(object): - def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary="periodic"): - self.dim = dim - - if sigma is None: - sigma = tau ** (0.5 * (2 * alpha - self.dim)) - - k_max = size // 2 - - if dim == 1: - k = paddle.concat( - ( - paddle.arange(start=0, end=k_max, step=1), - paddle.arange(start=-k_max, end=0, step=1), - ), - 0, - ) - - self.sqrt_eig = ( - size - * math.sqrt(2.0) - * sigma - * ((4 * (math.pi**2) * (k**2) + tau**2) ** (-alpha / 2.0)) - ) - self.sqrt_eig[0] = 0.0 - - elif dim == 2: - wavenumers = paddle.concat( - ( - paddle.arange(start=0, end=k_max, step=1), - paddle.arange(start=-k_max, end=0, step=1), - ), - 0, - ).tile((size, 1)) - - perm = list(range(wavenumers.ndim)) - perm[1] = 0 - perm[0] = 1 - k_x = wavenumers.transpose(perm=perm) - k_y = wavenumers - - self.sqrt_eig = ( - (size**2) - * math.sqrt(2.0) - * sigma - * ( - (4 * (math.pi**2) * (k_x**2 + k_y**2) + tau**2) - ** (-alpha / 2.0) - ) - ) - self.sqrt_eig[0, 0] = 0.0 - - elif dim == 3: - wavenumers = paddle.concat( - ( - paddle.arange(start=0, end=k_max, step=1), - paddle.arange(start=-k_max, end=0, step=1), - ), - 0, - ).tile((size, size, 1)) - - perm = list(range(wavenumers.ndim)) - perm[1] = 2 - perm[2] = 1 - k_x = wavenumers.transpose(perm=perm) - k_y = wavenumers - - perm = list(range(wavenumers.ndim)) - perm[0] = 2 - perm[2] = 0 - k_z = wavenumers.transpose(perm=perm) - - self.sqrt_eig = ( - (size**3) - * math.sqrt(2.0) - * sigma - * ( - (4 * (math.pi**2) * (k_x**2 + k_y**2 + k_z**2) + tau**2) - ** (-alpha / 2.0) - ) - ) - self.sqrt_eig[0, 0, 0] = 0.0 - - self.size = [] - for j in range(self.dim): - self.size.append(size) - - self.size = tuple(self.size) - - def sample(self, N): - - coeff = paddle.randn((N, *self.size, 2)) - - coeff[..., 0] = self.sqrt_eig * coeff[..., 0] - coeff[..., 1] = self.sqrt_eig * coeff[..., 1] - - if self.dim == 2: - u = paddle.as_real(paddle.fft.ifft2(paddle.as_complex(coeff))) - else: - raise f"self.dim not in (2): {self.dim}" - - u = u[..., 0] - - return u - - -def compute_loss(output, loss_func): - """calculate the physics loss""" - - # Padding x axis due to periodic boundary condition - output = paddle.concat((output[:, :, :, -2:], output, output[:, :, :, 0:3]), axis=3) - - # Padding y axis due to periodic boundary condition - output = paddle.concat((output[:, :, -2:, :], output, output[:, :, 0:3, :]), axis=2) - - # get physics loss - mse_loss = nn.MSELoss() - f_u, f_v = loss_func.get_phy_Loss(output) - loss = mse_loss(f_u, paddle.zeros_like(f_u).cuda()) + mse_loss( - f_v, paddle.zeros_like(f_v).cuda() - ) - - return loss - - -def post_process(output, true, num): - """ - num: Number of time step - """ - u_star = true[num, 0, 1:-1, 1:-1] - u_pred = output[num, 0, 1:-1, 1:-1].detach() - - v_star = true[num, 1, 1:-1, 1:-1] - v_pred = output[num, 1, 1:-1, 1:-1].detach() - - return u_star, u_pred, v_star, v_pred - - -class Dataset: - def __init__(self, initial_state, input): - self.initial_state = initial_state - self.input = input - - def get(self, epochs=1): - input_dict_train = { - "initial_state": [], - "initial_state_shape": [], - "input": [], - } - label_dict_train = {"dummy_loss": []} - input_dict_val = { - "initial_state": [], - "initial_state_shape": [], - "input": [], - } - label_dict_val = {"dummy_loss": []} - for i in range(epochs): - shape = self.initial_state.shape - input_dict_train["initial_state"].append(self.initial_state.reshape((-1,))) - input_dict_train["initial_state_shape"].append(paddle.to_tensor(shape)) - input_dict_train["input"].append(self.input) - label_dict_train["dummy_loss"].append(paddle.to_tensor(0.0)) - - if i == epochs - 1: - shape = self.initial_state.shape - input_dict_val["initial_state"].append( - self.initial_state.reshape((-1,)) - ) - input_dict_val["initial_state_shape"].append(paddle.to_tensor(shape)) - input_dict_val["input"].append(self.input) - label_dict_val["dummy_loss"].append(paddle.to_tensor(0.0)) - - return input_dict_train, label_dict_train, input_dict_val, label_dict_val - - -def output_graph(model, input_dataset, fig_save_path, case_name): - with paddle.no_grad(): - output_dataset = model(input_dataset) - output = output_dataset["outputs"] - input = input_dataset["input"][0] - output = paddle.concat(tuple(output), axis=0) - output = paddle.concat((input.cuda(), output), axis=0) - - # Padding x and y axis due to periodic boundary condition - output = paddle.concat((output[:, :, :, -1:], output, output[:, :, :, 0:2]), axis=3) - output = paddle.concat((output[:, :, -1:, :], output, output[:, :, 0:2, :]), axis=2) - truth = uv[0:2001, :, :, :] - truth = np.concatenate((truth[:, :, :, -1:], truth, truth[:, :, :, 0:2]), axis=3) - truth = np.concatenate((truth[:, :, -1:, :], truth, truth[:, :, 0:2, :]), axis=2) - - # post-process - ten_true = [] - ten_pred = [] - - for i in range(0, 100): - u_star, u_pred, v_star, v_pred = post_process(output, truth, num=20 * i) - ten_true.append([u_star, v_star]) - ten_pred.append([u_pred, v_pred]) - - ten_true = np.stack(ten_true) - ten_pred = np.stack(ten_pred) - - # compute the error - # a-RMSE - error = ( - np.sum((ten_pred - ten_true) ** 2, axis=(1, 2, 3)) - / ten_true.shape[2] - / ten_true.shape[3] - ) - N = error.shape[0] - M = 0 - for i in range(N): - M = M + np.eye(N, k=-i) - M = M.T / np.arange(N) - M[:, 0] = 0 - M[0, :] = 0 - - M = paddle.to_tensor(M) - aRMSE = paddle.sqrt(M.T @ error).numpy() - t = np.linspace(0, 4, N) - plt.plot(t, aRMSE, color="r") - plt.yscale("log") - plt.xlabel("t") - plt.ylabel("a-RMSE") - plt.ylim((1e-4, 10)) - plt.xlim((0, 4)) - plt.legend( - [ - "PhyCRNet", - ], - loc="upper left", - ) - plt.title(case_name) - plt.savefig(os.path.join(fig_save_path, "error.jpg")) - - _, ax = plt.subplots(3, 4, figsize=(18, 12)) - ax[0, 0].contourf(ten_true[25, 0]) - ax[0, 0].set_title("t=1") - ax[0, 0].set_ylabel("truth") - ax[1, 0].contourf(ten_pred[25, 0]) - ax[1, 0].set_ylabel("pred") - ax[2, 0].contourf(ten_true[25, 0] - ten_pred[25, 0]) - ax[2, 0].set_ylabel("error") - ax[0, 1].contourf(ten_true[50, 0]) - ax[0, 1].set_title("t=2") - ax[1, 1].contourf(ten_pred[50, 0]) - ax[2, 1].contourf(ten_true[50, 0] - ten_pred[50, 0]) - ax[0, 2].contourf(ten_true[75, 0]) - ax[0, 2].set_title("t=3") - ax[1, 2].contourf(ten_pred[75, 0]) - ax[2, 2].contourf(ten_true[75, 0] - ten_pred[75, 0]) - ax[0, 3].contourf(ten_true[99, 0]) - ax[0, 3].set_title("t=4") - ax[1, 3].contourf(ten_pred[99, 0]) - ax[2, 3].contourf(ten_true[99, 0] - ten_pred[99, 0]) - plt.title(case_name) - plt.savefig(os.path.join(fig_save_path, "Burgers.jpg")) - plt.close() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import os +from typing import Dict + +import matplotlib.pyplot as plt +import numpy as np +import paddle +import paddle.nn as nn + +from ppsci.arch import phycrnet + +dt = None +dx = None +num_time_batch = None +uv = None +time_steps = None + +# transform +def transform_in(input): + shape = input["initial_state_shape"][0] + input_transformed = { + "initial_state": input["initial_state"][0].reshape(shape.tolist()), + "input": input["input"][0], + } + return input_transformed + + +def transform_out(input, out, model): + # Stop the transform to avoid circulation + model.enable_transform = False + + loss_func = phycrnet.loss_generator(dt, dx) + batch_loss = 0 + state_detached = [] + prev_output = [] + for time_batch_id in range(num_time_batch): + # update the first input for each time batch + if time_batch_id == 0: + hidden_state = input["initial_state"] + u0 = input["input"] + else: + hidden_state = state_detached + u0 = prev_output[-2:-1].detach() # second last output + out = model({"initial_state": hidden_state, "input": u0}) + + # output is a list + output = out["outputs"] + second_last_state = out["second_last_state"] + + # [t, c, height (Y), width (X)] + output = paddle.concat(tuple(output), axis=0) + + # concatenate the initial state to the output for central diff + output = paddle.concat((u0.cuda(), output), axis=0) + + # get loss + loss = compute_loss(output, loss_func) + batch_loss += loss + + # update the state and output for next batch + prev_output = output + state_detached = [] + for i in range(len(second_last_state)): + (h, c) = second_last_state[i] + state_detached.append((h.detach(), c.detach())) # hidden state + + model.enable_transform = True + return {"loss": batch_loss} + + +def tranform_output_val(input, out, name="results.npz"): + output = out["outputs"] + input = input["input"] + + # shape: [t, c, h, w] + output = paddle.concat(tuple(output), axis=0) + output = paddle.concat((input.cuda(), output), axis=0) + + # Padding x and y axis due to periodic boundary condition + output = paddle.concat((output[:, :, :, -1:], output, output[:, :, :, 0:2]), axis=3) + output = paddle.concat((output[:, :, -1:, :], output, output[:, :, 0:2, :]), axis=2) + + # [t, c, h, w] + truth = uv[0:time_steps, :, :, :] + + # [101, 2, 131, 131] + truth = np.concatenate((truth[:, :, :, -1:], truth, truth[:, :, :, 0:2]), axis=3) + truth = np.concatenate((truth[:, :, -1:, :], truth, truth[:, :, 0:2, :]), axis=2) + truth = paddle.to_tensor(truth) + # post-process + ten_true = [] + ten_pred = [] + for i in range(0, 1001): + u_star, u_pred, v_star, v_pred = post_process( + output, + truth, + num=i, + ) + + ten_true.append(paddle.stack([u_star, v_star])) + ten_pred.append(paddle.stack([u_pred, v_pred])) + ten_true = paddle.stack(ten_true) + ten_pred = paddle.stack(ten_pred) + # compute the error + # a-RMSE + error = ( + paddle.sum((ten_pred - ten_true) ** 2, axis=(1, 2, 3)) + / ten_true.shape[2] + / ten_true.shape[3] + ) + N = error.shape[0] + M = 0 + for i in range(N): + M = M + np.eye(N, k=-i) + M = M.T / np.arange(N) + M[:, 0] = 0 + M[0, :] = 0 + M = paddle.to_tensor(M) + aRMSE = paddle.sqrt(M.T @ error) + np.savez( + name, + error=np.array(error), + ten_true=ten_true, + ten_pred=ten_pred, + aRMSE=np.array(aRMSE), + ) + error = paddle.linalg.norm(error) + return {"loss": paddle.to_tensor([error])} + + +def train_loss_func(result_dict, *args) -> paddle.Tensor: + """For model calculation of loss. + + Args: + result_dict (Dict[str, paddle.Tensor]): The result dict. + + Returns: + paddle.Tensor: Loss value. + """ + return {"residual": result_dict["loss"]} + + +def val_loss_func(result_dict, *args) -> paddle.Tensor: + return {"residual": result_dict["loss"]} + + +def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: + return {"dummy_loss": paddle.to_tensor(0.0)} + + +class GaussianRF(object): + def __init__(self, dim, size, alpha=2, tau=3, sigma=None, boundary="periodic"): + self.dim = dim + + if sigma is None: + sigma = tau ** (0.5 * (2 * alpha - self.dim)) + + k_max = size // 2 + + if dim == 1: + k = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ) + + self.sqrt_eig = ( + size + * math.sqrt(2.0) + * sigma + * ((4 * (math.pi**2) * (k**2) + tau**2) ** (-alpha / 2.0)) + ) + self.sqrt_eig[0] = 0.0 + + elif dim == 2: + wavenumers = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ).tile((size, 1)) + + perm = list(range(wavenumers.ndim)) + perm[1] = 0 + perm[0] = 1 + k_x = wavenumers.transpose(perm=perm) + k_y = wavenumers + + self.sqrt_eig = ( + (size**2) + * math.sqrt(2.0) + * sigma + * ( + (4 * (math.pi**2) * (k_x**2 + k_y**2) + tau**2) + ** (-alpha / 2.0) + ) + ) + self.sqrt_eig[0, 0] = 0.0 + + elif dim == 3: + wavenumers = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ).tile((size, size, 1)) + + perm = list(range(wavenumers.ndim)) + perm[1] = 2 + perm[2] = 1 + k_x = wavenumers.transpose(perm=perm) + k_y = wavenumers + + perm = list(range(wavenumers.ndim)) + perm[0] = 2 + perm[2] = 0 + k_z = wavenumers.transpose(perm=perm) + + self.sqrt_eig = ( + (size**3) + * math.sqrt(2.0) + * sigma + * ( + (4 * (math.pi**2) * (k_x**2 + k_y**2 + k_z**2) + tau**2) + ** (-alpha / 2.0) + ) + ) + self.sqrt_eig[0, 0, 0] = 0.0 + + self.size = [] + for j in range(self.dim): + self.size.append(size) + + self.size = tuple(self.size) + + def sample(self, N): + + coeff = paddle.randn((N, *self.size, 2)) + + coeff[..., 0] = self.sqrt_eig * coeff[..., 0] + coeff[..., 1] = self.sqrt_eig * coeff[..., 1] + + if self.dim == 2: + u = paddle.as_real(paddle.fft.ifft2(paddle.as_complex(coeff))) + else: + raise f"self.dim not in (2): {self.dim}" + + u = u[..., 0] + + return u + + +def compute_loss(output, loss_func): + """calculate the physics loss""" + + # Padding x axis due to periodic boundary condition + output = paddle.concat((output[:, :, :, -2:], output, output[:, :, :, 0:3]), axis=3) + + # Padding y axis due to periodic boundary condition + output = paddle.concat((output[:, :, -2:, :], output, output[:, :, 0:3, :]), axis=2) + + # get physics loss + mse_loss = nn.MSELoss() + f_u, f_v = loss_func.get_phy_Loss(output) + loss = mse_loss(f_u, paddle.zeros_like(f_u).cuda()) + mse_loss( + f_v, paddle.zeros_like(f_v).cuda() + ) + + return loss + + +def post_process(output, true, num): + """ + num: Number of time step + """ + u_star = true[num, 0, 1:-1, 1:-1] + u_pred = output[num, 0, 1:-1, 1:-1].detach() + + v_star = true[num, 1, 1:-1, 1:-1] + v_pred = output[num, 1, 1:-1, 1:-1].detach() + + return u_star, u_pred, v_star, v_pred + + +class Dataset: + def __init__(self, initial_state, input): + self.initial_state = initial_state + self.input = input + + def get(self, epochs=1): + input_dict_train = { + "initial_state": [], + "initial_state_shape": [], + "input": [], + } + label_dict_train = {"dummy_loss": []} + input_dict_val = { + "initial_state": [], + "initial_state_shape": [], + "input": [], + } + label_dict_val = {"dummy_loss": []} + for i in range(epochs): + shape = self.initial_state.shape + input_dict_train["initial_state"].append(self.initial_state.reshape((-1,))) + input_dict_train["initial_state_shape"].append(paddle.to_tensor(shape)) + input_dict_train["input"].append(self.input) + label_dict_train["dummy_loss"].append(paddle.to_tensor(0.0)) + + if i == epochs - 1: + shape = self.initial_state.shape + input_dict_val["initial_state"].append( + self.initial_state.reshape((-1,)) + ) + input_dict_val["initial_state_shape"].append(paddle.to_tensor(shape)) + input_dict_val["input"].append(self.input) + label_dict_val["dummy_loss"].append(paddle.to_tensor(0.0)) + + return input_dict_train, label_dict_train, input_dict_val, label_dict_val + + +def output_graph(model, input_dataset, fig_save_path, case_name): + with paddle.no_grad(): + output_dataset = model(input_dataset) + output = output_dataset["outputs"] + input = input_dataset["input"][0] + output = paddle.concat(tuple(output), axis=0) + output = paddle.concat((input.cuda(), output), axis=0) + + # Padding x and y axis due to periodic boundary condition + output = paddle.concat((output[:, :, :, -1:], output, output[:, :, :, 0:2]), axis=3) + output = paddle.concat((output[:, :, -1:, :], output, output[:, :, 0:2, :]), axis=2) + truth = uv[0:2001, :, :, :] + truth = np.concatenate((truth[:, :, :, -1:], truth, truth[:, :, :, 0:2]), axis=3) + truth = np.concatenate((truth[:, :, -1:, :], truth, truth[:, :, 0:2, :]), axis=2) + + # post-process + ten_true = [] + ten_pred = [] + + for i in range(0, 100): + u_star, u_pred, v_star, v_pred = post_process(output, truth, num=20 * i) + ten_true.append([u_star, v_star]) + ten_pred.append([u_pred, v_pred]) + + ten_true = np.stack(ten_true) + ten_pred = np.stack(ten_pred) + + # compute the error + # a-RMSE + error = ( + np.sum((ten_pred - ten_true) ** 2, axis=(1, 2, 3)) + / ten_true.shape[2] + / ten_true.shape[3] + ) + N = error.shape[0] + M = 0 + for i in range(N): + M = M + np.eye(N, k=-i) + M = M.T / np.arange(N) + M[:, 0] = 0 + M[0, :] = 0 + + M = paddle.to_tensor(M) + aRMSE = paddle.sqrt(M.T @ error).numpy() + t = np.linspace(0, 4, N) + plt.plot(t, aRMSE, color="r") + plt.yscale("log") + plt.xlabel("t") + plt.ylabel("a-RMSE") + plt.ylim((1e-4, 10)) + plt.xlim((0, 4)) + plt.legend( + [ + "PhyCRNet", + ], + loc="upper left", + ) + plt.title(case_name) + plt.savefig(os.path.join(fig_save_path, "error.jpg")) + + _, ax = plt.subplots(3, 4, figsize=(18, 12)) + ax[0, 0].contourf(ten_true[25, 0]) + ax[0, 0].set_title("t=1") + ax[0, 0].set_ylabel("truth") + ax[1, 0].contourf(ten_pred[25, 0]) + ax[1, 0].set_ylabel("pred") + ax[2, 0].contourf(ten_true[25, 0] - ten_pred[25, 0]) + ax[2, 0].set_ylabel("error") + ax[0, 1].contourf(ten_true[50, 0]) + ax[0, 1].set_title("t=2") + ax[1, 1].contourf(ten_pred[50, 0]) + ax[2, 1].contourf(ten_true[50, 0] - ten_pred[50, 0]) + ax[0, 2].contourf(ten_true[75, 0]) + ax[0, 2].set_title("t=3") + ax[1, 2].contourf(ten_pred[75, 0]) + ax[2, 2].contourf(ten_true[75, 0] - ten_pred[75, 0]) + ax[0, 3].contourf(ten_true[99, 0]) + ax[0, 3].set_title("t=4") + ax[1, 3].contourf(ten_pred[99, 0]) + ax[2, 3].contourf(ten_true[99, 0] - ten_pred[99, 0]) + plt.title(case_name) + plt.savefig(os.path.join(fig_save_path, "Burgers.jpg")) + plt.close() diff --git a/examples/phygeonet/heat_equation.py b/examples/phygeonet/heat_equation.py index 91818e4758..4158bf3c5c 100644 --- a/examples/phygeonet/heat_equation.py +++ b/examples/phygeonet/heat_equation.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream import os.path as osp from typing import Dict @@ -252,3 +253,261 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +import os.path as osp +from typing import Dict + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +import utils +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + data = np.load(cfg.data_dir) + coords = data["coords"] + jinvs = data["jinvs"] + dxdxis = data["dxdxis"] + dydxis = data["dydxis"] + dxdetas = data["dxdetas"] + dydetas = data["dydetas"] + + model = ppsci.arch.USCNN(**cfg.MODEL) + + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + iters_per_epoch = coords.shape[0] + sup_constraint_res = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "coords": coords, + "jinvs": jinvs, + "dxdxis": dxdxis, + "dydxis": dydxis, + "dxdetas": dxdetas, + "dydetas": dydetas, + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "iters_per_epoch": iters_per_epoch, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss( + lambda out, label, weight: {"residual": out["residual"]} + ), + name="residual", + ) + sup_constraint = {sup_constraint_res.name: sup_constraint_res} + + def _transform_out( + _input: Dict[str, paddle.Tensor], + _output: Dict[str, paddle.Tensor], + pad_singleside: int = cfg.MODEL.pad_singleside, + ): + """Calculation residual. + + Args: + _input (Dict[str, paddle.Tensor]): The input of the model. + _output (Dict[str, paddle.Tensor]): The output of the model. + pad_singleside (int, optional): Pad size. Defaults to cfg.MODEL.pad_singleside. + """ + output_v = _output["output_v"] + jinv = _input["jinvs"] + dxdxi = _input["dxdxis"] + dydxi = _input["dydxis"] + dxdeta = _input["dxdetas"] + dydeta = _input["dydetas"] + output_v[:, 0, -pad_singleside:, pad_singleside:-pad_singleside] = 0 + output_v[:, 0, :pad_singleside, pad_singleside:-pad_singleside] = 1 + output_v[:, 0, pad_singleside:-pad_singleside, -pad_singleside:] = 1 + output_v[:, 0, pad_singleside:-pad_singleside, 0:pad_singleside] = 1 + output_v[:, 0, 0, 0] = 0.5 * (output_v[:, 0, 0, 1] + output_v[:, 0, 1, 0]) + output_v[:, 0, 0, -1] = 0.5 * (output_v[:, 0, 0, -2] + output_v[:, 0, 1, -1]) + dvdx = utils.dfdx(output_v, dydeta, dydxi, jinv) + d2vdx2 = utils.dfdx(dvdx, dydeta, dydxi, jinv) + dvdy = utils.dfdy(output_v, dxdxi, dxdeta, jinv) + d2vdy2 = utils.dfdy(dvdy, dxdxi, dxdeta, jinv) + continuity = d2vdy2 + d2vdx2 + return {"residual": paddle.mean(continuity**2)} + + model.register_output_transform(_transform_out) + solver = ppsci.solver.Solver( + model, + sup_constraint, + cfg.output_dir, + optimizer, + epochs=cfg.epochs, + iters_per_epoch=iters_per_epoch, + ) + solver.train() + solver.plot_loss_history() + + +def evaluate(cfg: DictConfig): + data = np.load(cfg.data_dir) + coords = data["coords"] + + ofv_sb = paddle.to_tensor(data["OFV_sb"]) + + ## create model + pad_singleside = cfg.MODEL.pad_singleside + model = ppsci.arch.USCNN(**cfg.MODEL) + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + ) + output_v = solver.predict({"coords": paddle.to_tensor(coords)}) + output_v = output_v["output_v"] + + output_v[0, 0, -pad_singleside:, pad_singleside:-pad_singleside] = 0 + output_v[0, 0, :pad_singleside, pad_singleside:-pad_singleside] = 1 + output_v[0, 0, pad_singleside:-pad_singleside, -pad_singleside:] = 1 + output_v[0, 0, pad_singleside:-pad_singleside, 0:pad_singleside] = 1 + output_v[0, 0, 0, 0] = 0.5 * (output_v[0, 0, 0, 1] + output_v[0, 0, 1, 0]) + output_v[0, 0, 0, -1] = 0.5 * (output_v[0, 0, 0, -2] + output_v[0, 0, 1, -1]) + + ev = paddle.sqrt( + paddle.mean((ofv_sb - output_v[0, 0]) ** 2) / paddle.mean(ofv_sb**2) + ).item() + logger.info(f"ev: {ev}") + + output_v = output_v.numpy() + ofv_sb = ofv_sb.numpy() + fig = plt.figure() + ax = plt.subplot(1, 2, 1) + utils.visualize( + ax, + coords[0, 0, 1:-1, 1:-1], + coords[0, 1, 1:-1, 1:-1], + output_v[0, 0, 1:-1, 1:-1], + "horizontal", + [0, 1], + ) + utils.set_axis_label(ax, "p") + ax.set_title("CNN " + r"$T$") + ax.set_aspect("equal") + ax = plt.subplot(1, 2, 2) + utils.visualize( + ax, + coords[0, 0, 1:-1, 1:-1], + coords[0, 1, 1:-1, 1:-1], + ofv_sb[1:-1, 1:-1], + "horizontal", + [0, 1], + ) + utils.set_axis_label(ax, "p") + ax.set_aspect("equal") + ax.set_title("FV " + r"$T$") + fig.tight_layout(pad=1) + fig.savefig(f"{cfg.output_dir}/result.png", bbox_inches="tight") + plt.close(fig) + + +def export(cfg: DictConfig): + model = ppsci.arch.USCNN(**cfg.MODEL) + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 2, 19, 84], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + data = np.load(cfg.data_dir) + coords = data["coords"] + ofv_sb = data["OFV_sb"] + + ## create model + pad_singleside = cfg.MODEL.pad_singleside + input_spec = {"coords": coords} + + output_v = predictor.predict(input_spec, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_v = { + store_key: output_v[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_v.keys()) + } + + output_v = output_v["output_v"] + + output_v[0, 0, -pad_singleside:, pad_singleside:-pad_singleside] = 0 + output_v[0, 0, :pad_singleside, pad_singleside:-pad_singleside] = 1 + output_v[0, 0, pad_singleside:-pad_singleside, -pad_singleside:] = 1 + output_v[0, 0, pad_singleside:-pad_singleside, 0:pad_singleside] = 1 + output_v[0, 0, 0, 0] = 0.5 * (output_v[0, 0, 0, 1] + output_v[0, 0, 1, 0]) + output_v[0, 0, 0, -1] = 0.5 * (output_v[0, 0, 0, -2] + output_v[0, 0, 1, -1]) + + ev = paddle.sqrt( + paddle.mean((ofv_sb - output_v[0, 0]) ** 2) / paddle.mean(ofv_sb**2) + ).item() + logger.info(f"ev: {ev}") + + fig = plt.figure() + ax = plt.subplot(1, 2, 1) + utils.visualize( + ax, + coords[0, 0, 1:-1, 1:-1], + coords[0, 1, 1:-1, 1:-1], + output_v[0, 0, 1:-1, 1:-1], + "horizontal", + [0, 1], + ) + utils.set_axis_label(ax, "p") + ax.set_title("CNN " + r"$T$") + ax.set_aspect("equal") + ax = plt.subplot(1, 2, 2) + utils.visualize( + ax, + coords[0, 0, 1:-1, 1:-1], + coords[0, 1, 1:-1, 1:-1], + ofv_sb[1:-1, 1:-1], + "horizontal", + [0, 1], + ) + utils.set_axis_label(ax, "p") + ax.set_aspect("equal") + ax.set_title("FV " + r"$T$") + fig.tight_layout(pad=1) + fig.savefig(osp.join(cfg.output_dir, "result.png"), bbox_inches="tight") + plt.close(fig) + + +@hydra.main(version_base=None, config_path="./conf", config_name="heat_equation.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/phygeonet/heat_equation_with_bc.py b/examples/phygeonet/heat_equation_with_bc.py index 1005f1a9bb..118038b4e3 100644 --- a/examples/phygeonet/heat_equation_with_bc.py +++ b/examples/phygeonet/heat_equation_with_bc.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream from os import path as osp from typing import Dict @@ -319,3 +320,328 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +from os import path as osp +from typing import Dict + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +import utils +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + model = ppsci.arch.USCNN(**cfg.MODEL) + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + data = np.load(cfg.data_dir) + coords = data["coords"] + jinvs = data["jinvs"] + dxdxis = data["dxdxis"] + dydxis = data["dydxis"] + dxdetas = data["dxdetas"] + dydetas = data["dydetas"] + + iters_per_epoch = coords.shape[0] + sup_constraint_res = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "coords": coords, + "jinvs": jinvs, + "dxdxis": dxdxis, + "dydxis": dydxis, + "dxdetas": dxdetas, + "dydetas": dydetas, + }, + }, + "batch_size": cfg.TRAIN.batch_size, + "iters_per_epoch": iters_per_epoch, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss( + lambda out, label, weight: {"residual": out["residual"]} + ), + name="residual", + ) + sup_constraint = {sup_constraint_res.name: sup_constraint_res} + + def _transform_out( + _input: Dict[str, paddle.Tensor], + _output: Dict[str, paddle.Tensor], + pad_singleside: int = cfg.MODEL.pad_singleside, + ): + """Calculation residual. + + Args: + _input (Dict[str, paddle.Tensor]): The input of the model. + _output (Dict[str, paddle.Tensor]): The output of the model. + pad_singleside (int, optional): Pad size. Defaults to cfg.MODEL.pad_singleside. + """ + output_v = _output["output_v"] + batch_size = output_v.shape[0] + jinv = _input["jinvs"] + dxdxi = _input["dxdxis"] + dydxi = _input["dydxis"] + dxdeta = _input["dxdetas"] + dydeta = _input["dydetas"] + Para = _input["coords"] + for j in range(batch_size): + output_v[j, 0, -pad_singleside:, pad_singleside:-pad_singleside] = output_v[ + j, 0, 1:2, pad_singleside:-pad_singleside + ] + output_v[j, 0, :pad_singleside, pad_singleside:-pad_singleside] = output_v[ + j, 0, -2:-1, pad_singleside:-pad_singleside + ] + output_v[j, 0, :, -pad_singleside:] = 0 + output_v[j, 0, :, 0:pad_singleside] = Para[j, 0, 0, 0] + dvdx = utils.dfdx(output_v, dydeta, dydxi, jinv) + d2vdx2 = utils.dfdx(dvdx, dydeta, dydxi, jinv) + dvdy = utils.dfdy(output_v, dxdxi, dxdeta, jinv) + d2vdy2 = utils.dfdy(dvdy, dxdxi, dxdeta, jinv) + continuity = d2vdy2 + d2vdx2 + return {"residual": paddle.mean(continuity**2)} + + model.register_output_transform(_transform_out) + solver = ppsci.solver.Solver( + model, + sup_constraint, + cfg.output_dir, + optimizer, + epochs=cfg.epochs, + iters_per_epoch=iters_per_epoch, + ) + + solver.train() + solver.plot_loss_history() + + +def evaluate(cfg: DictConfig): + pad_singleside = cfg.MODEL.pad_singleside + model = ppsci.arch.USCNN(**cfg.MODEL) + + data = np.load(cfg.test_data_dir) + paras = paddle.to_tensor(data["paras"]) + truths = paddle.to_tensor(data["truths"]) + coords = paddle.to_tensor(data["coords"]) + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.EVAL.pretrained_model_path, ### the path of the model + ) + + paras = paras.reshape([paras.shape[0], 1, paras.shape[1], paras.shape[2]]) + output = solver.predict({"coords": paras}) + output_v = output["output_v"] + num_sample = output_v.shape[0] + for j in range(num_sample): + # Impose BC + output_v[j, 0, -pad_singleside:, pad_singleside:-pad_singleside] = output_v[ + j, 0, 1:2, pad_singleside:-pad_singleside + ] + output_v[j, 0, :pad_singleside, pad_singleside:-pad_singleside] = output_v[ + j, 0, -2:-1, pad_singleside:-pad_singleside + ] + output_v[j, 0, :, -pad_singleside:] = 0 + output_v[j, 0, :, 0:pad_singleside] = paras[j, 0, 0, 0] + + error = paddle.sqrt( + paddle.mean((truths - output_v) ** 2) / paddle.mean(truths**2) + ).item() + logger.info(f"The average error: {error / num_sample}") + output_vs = output_v.numpy() + PARALIST = [1, 2, 3, 4, 5, 6, 7] + for i in range(len(PARALIST)): + truth = truths[i].numpy() + coord = coords[i].numpy() + output_v = output_vs[i] + truth = truth.reshape(1, 1, truth.shape[0], truth.shape[1]) + coord = coord.reshape(1, 2, coord.shape[2], coord.shape[3]) + fig1 = plt.figure() + xylabelsize = 20 + xytickssize = 20 + titlesize = 20 + ax = plt.subplot(1, 2, 1) + _, cbar = utils.visualize( + ax, + coord[0, 0, :, :], + coord[0, 1, :, :], + output_v[0, :, :], + "horizontal", + [0, max(PARALIST)], + ) + ax.set_aspect("equal") + utils.set_axis_label(ax, "p") + ax.set_title("PhyGeoNet " + r"$T$", fontsize=titlesize) + ax.set_xlabel(xlabel=r"$x$", fontsize=xylabelsize) + ax.set_ylabel(ylabel=r"$y$", fontsize=xylabelsize) + ax.set_xticks([-1, 0, 1]) + ax.set_yticks([-1, 0, 1]) + ax.tick_params(axis="x", labelsize=xytickssize) + ax.tick_params(axis="y", labelsize=xytickssize) + cbar.set_ticks([0, 1, 2, 3, 4, 5, 6, 7]) + cbar.ax.tick_params(labelsize=xytickssize) + ax = plt.subplot(1, 2, 2) + _, cbar = utils.visualize( + ax, + coord[0, 0, :, :], + coord[0, 1, :, :], + truth[0, 0, :, :], + "horizontal", + [0, max(PARALIST)], + ) + ax.set_aspect("equal") + utils.set_axis_label(ax, "p") + ax.set_title("FV " + r"$T$", fontsize=titlesize) + ax.set_xlabel(xlabel=r"$x$", fontsize=xylabelsize) + ax.set_ylabel(ylabel=r"$y$", fontsize=xylabelsize) + ax.set_xticks([-1, 0, 1]) + ax.set_yticks([-1, 0, 1]) + ax.tick_params(axis="x", labelsize=xytickssize) + ax.tick_params(axis="y", labelsize=xytickssize) + cbar.set_ticks([0, 1, 2, 3, 4, 5, 6, 7]) + cbar.ax.tick_params(labelsize=xytickssize) + fig1.tight_layout(pad=1) + fig1.savefig(osp.join(cfg.output_dir, f"Para{i}T.png"), bbox_inches="tight") + plt.close(fig1) + + +def export(cfg: DictConfig): + model = ppsci.arch.USCNN(**cfg.MODEL) + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 1, 19, 84], "float32", name=key) + for key in model.input_keys + }, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + pad_singleside = cfg.MODEL.pad_singleside + + data = np.load(cfg.test_data_dir) + paras = data["paras"] + truths = data["truths"] + coords = data["coords"] + + paras = paras.reshape([paras.shape[0], 1, paras.shape[1], paras.shape[2]]) + input_spec = {"coords": paras} + output_v = predictor.predict(input_spec, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_v = { + store_key: output_v[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_v.keys()) + } + output_v = output_v["output_v"] + num_sample = output_v.shape[0] + for j in range(num_sample): + # Impose BC + output_v[j, 0, -pad_singleside:, pad_singleside:-pad_singleside] = output_v[ + j, 0, 1:2, pad_singleside:-pad_singleside + ] + output_v[j, 0, :pad_singleside, pad_singleside:-pad_singleside] = output_v[ + j, 0, -2:-1, pad_singleside:-pad_singleside + ] + output_v[j, 0, :, -pad_singleside:] = 0 + output_v[j, 0, :, 0:pad_singleside] = paras[j, 0, 0, 0] + + error = paddle.sqrt( + paddle.mean((truths - output_v) ** 2) / paddle.mean(truths**2) + ).item() + logger.info(f"The average error: {error / num_sample}") + + output_vs = output_v + PARALIST = [1, 2, 3, 4, 5, 6, 7] + for i in range(len(PARALIST)): + truth = truths[i] + coord = coords[i] + output_v = output_vs[i] + truth = truth.reshape(1, 1, truth.shape[0], truth.shape[1]) + coord = coord.reshape(1, 2, coord.shape[2], coord.shape[3]) + fig1 = plt.figure() + xylabelsize = 20 + xytickssize = 20 + titlesize = 20 + ax = plt.subplot(1, 2, 1) + _, cbar = utils.visualize( + ax, + coord[0, 0, :, :], + coord[0, 1, :, :], + output_v[0, :, :], + "horizontal", + [0, max(PARALIST)], + ) + ax.set_aspect("equal") + utils.set_axis_label(ax, "p") + ax.set_title("PhyGeoNet " + r"$T$", fontsize=titlesize) + ax.set_xlabel(xlabel=r"$x$", fontsize=xylabelsize) + ax.set_ylabel(ylabel=r"$y$", fontsize=xylabelsize) + ax.set_xticks([-1, 0, 1]) + ax.set_yticks([-1, 0, 1]) + ax.tick_params(axis="x", labelsize=xytickssize) + ax.tick_params(axis="y", labelsize=xytickssize) + cbar.set_ticks([0, 1, 2, 3, 4, 5, 6, 7]) + cbar.ax.tick_params(labelsize=xytickssize) + ax = plt.subplot(1, 2, 2) + _, cbar = utils.visualize( + ax, + coord[0, 0, :, :], + coord[0, 1, :, :], + truth[0, 0, :, :], + "horizontal", + [0, max(PARALIST)], + ) + ax.set_aspect("equal") + utils.set_axis_label(ax, "p") + ax.set_title("FV " + r"$T$", fontsize=titlesize) + ax.set_xlabel(xlabel=r"$x$", fontsize=xylabelsize) + ax.set_ylabel(ylabel=r"$y$", fontsize=xylabelsize) + ax.set_xticks([-1, 0, 1]) + ax.set_yticks([-1, 0, 1]) + ax.tick_params(axis="x", labelsize=xytickssize) + ax.tick_params(axis="y", labelsize=xytickssize) + cbar.set_ticks([0, 1, 2, 3, 4, 5, 6, 7]) + cbar.ax.tick_params(labelsize=xytickssize) + fig1.tight_layout(pad=1) + fig1.savefig(osp.join(cfg.output_dir, f"Para{i}T.png"), bbox_inches="tight") + plt.close(fig1) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="heat_equation_with_bc.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/phygeonet/utils.py b/examples/phygeonet/utils.py index f4172ac5b5..7fcd4ab661 100644 --- a/examples/phygeonet/utils.py +++ b/examples/phygeonet/utils.py @@ -1,239 +1,239 @@ -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import paddle -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon - - -def dfdx( - f: paddle.Tensor, - dydeta: paddle.Tensor, - dydxi: paddle.Tensor, - jinv: paddle.Tensor, - h: int = 0.01, -): - """Calculate the derivative of the given function f in the x direction - using the form of discrete difference. - - Args: - f (paddle.Tensor): The matrix that needs to calculate differentiation. - dydeta (paddle.Tensor): The dydeta data. - dydxi (paddle.Tensor): The dydxi data. - jinv (paddle.Tensor): Jacobian matrix. - h (int, optional): Differential interval. Defaults to 0.01. - """ - dfdxi_internal = ( - ( - -f[:, :, :, 4:] - + 8 * f[:, :, :, 3:-1] - - 8 * f[:, :, :, 1:-3] - + f[:, :, :, 0:-4] - ) - / 12 - / h - ) - dfdxi_left = ( - ( - -11 * f[:, :, :, 0:-3] - + 18 * f[:, :, :, 1:-2] - - 9 * f[:, :, :, 2:-1] - + 2 * f[:, :, :, 3:] - ) - / 6 - / h - ) - dfdxi_right = ( - ( - 11 * f[:, :, :, 3:] - - 18 * f[:, :, :, 2:-1] - + 9 * f[:, :, :, 1:-2] - - 2 * f[:, :, :, 0:-3] - ) - / 6 - / h - ) - dfdxi = paddle.concat( - (dfdxi_left[:, :, :, 0:2], dfdxi_internal, dfdxi_right[:, :, :, -2:]), 3 - ) - dfdeta_internal = ( - ( - -f[:, :, 4:, :] - + 8 * f[:, :, 3:-1, :] - - 8 * f[:, :, 1:-3, :] - + f[:, :, 0:-4, :] - ) - / 12 - / h - ) - dfdeta_low = ( - ( - -11 * f[:, :, 0:-3, :] - + 18 * f[:, :, 1:-2, :] - - 9 * f[:, :, 2:-1, :] - + 2 * f[:, :, 3:, :] - ) - / 6 - / h - ) - dfdeta_up = ( - ( - 11 * f[:, :, 3:, :] - - 18 * f[:, :, 2:-1, :] - + 9 * f[:, :, 1:-2, :] - - 2 * f[:, :, 0:-3, :] - ) - / 6 - / h - ) - dfdeta = paddle.concat( - (dfdeta_low[:, :, 0:2, :], dfdeta_internal, dfdeta_up[:, :, -2:, :]), 2 - ) - dfdx = jinv * (dfdxi * dydeta - dfdeta * dydxi) - return dfdx - - -def dfdy( - f: paddle.Tensor, - dxdxi: paddle.Tensor, - dxdeta: paddle.Tensor, - jinv: paddle.Tensor, - h: int = 0.01, -): - """Calculate the derivative of the given function f in the y direction - using the form of discrete difference. - - Args: - f (paddle.Tensor): The matrix that needs to calculate differentiation. - dxdxi (paddle.Tensor): The dxdxi data. - dxdeta (paddle.Tensor): The dxdeta data. - jinv (paddle.Tensor): Jacobian matrix. - h (int, optional): Differential interval. Defaults to 0.01. - """ - dfdxi_internal = ( - ( - -f[:, :, :, 4:] - + 8 * f[:, :, :, 3:-1] - - 8 * f[:, :, :, 1:-3] - + f[:, :, :, 0:-4] - ) - / 12 - / h - ) - dfdxi_left = ( - ( - -11 * f[:, :, :, 0:-3] - + 18 * f[:, :, :, 1:-2] - - 9 * f[:, :, :, 2:-1] - + 2 * f[:, :, :, 3:] - ) - / 6 - / h - ) - dfdxi_right = ( - ( - 11 * f[:, :, :, 3:] - - 18 * f[:, :, :, 2:-1] - + 9 * f[:, :, :, 1:-2] - - 2 * f[:, :, :, 0:-3] - ) - / 6 - / h - ) - dfdxi = paddle.concat( - (dfdxi_left[:, :, :, 0:2], dfdxi_internal, dfdxi_right[:, :, :, -2:]), 3 - ) - - dfdeta_internal = ( - ( - -f[:, :, 4:, :] - + 8 * f[:, :, 3:-1, :] - - 8 * f[:, :, 1:-3, :] - + f[:, :, 0:-4, :] - ) - / 12 - / h - ) - dfdeta_low = ( - ( - -11 * f[:, :, 0:-3, :] - + 18 * f[:, :, 1:-2, :] - - 9 * f[:, :, 2:-1, :] - + 2 * f[:, :, 3:, :] - ) - / 6 - / h - ) - dfdeta_up = ( - ( - 11 * f[:, :, 3:, :] - - 18 * f[:, :, 2:-1, :] - + 9 * f[:, :, 1:-2, :] - - 2 * f[:, :, 0:-3, :] - ) - / 6 - / h - ) - dfdeta = paddle.concat( - (dfdeta_low[:, :, 0:2, :], dfdeta_internal, dfdeta_up[:, :, -2:, :]), 2 - ) - dfdy = jinv * (dfdeta * dxdxi - dfdxi * dxdeta) - return dfdy - - -def set_axis_label(ax, type): - if type == "p": - ax.set_xlabel(r"$x$") - ax.set_ylabel(r"$y$") - elif type == "r": - ax.set_xlabel(r"$\xi$") - ax.set_ylabel(r"$\eta$") - else: - raise ValueError("The axis type only can be reference or physical") - - -def gen_e2vcg(x: np.ndarray): - """Generate adjacent coordinate indices for each point based on the shape of x. - - Args: - x (np.ndarray): Input coordinate array. - """ - nelemx = x.shape[1] - 1 - nelemy = x.shape[0] - 1 - nelem = nelemx * nelemy - nnx = x.shape[1] - e2vcg = np.zeros([4, nelem]) - for j in range(nelemy): - for i in range(nelemx): - e2vcg[:, j * nelemx + i] = np.asarray( - [j * nnx + i, j * nnx + i + 1, (j + 1) * nnx + i, (j + 1) * nnx + i + 1] - ) - return e2vcg.astype("int64") - - -def visualize(ax, x, y, u, colorbarPosition="vertical", colorlimit=None): - xdg0 = np.vstack([x.flatten(order="C"), y.flatten(order="C")]) - udg0 = u.flatten(order="C") - idx = np.asarray([0, 1, 3, 2]) - nelemx = x.shape[1] - 1 - nelemy = x.shape[0] - 1 - nelem = nelemx * nelemy - e2vcg0 = gen_e2vcg(x) - udg_ref = udg0[e2vcg0] - cmap = matplotlib.cm.coolwarm - polygon_list = [] - for i in range(nelem): - polygon_ = Polygon(xdg0[:, e2vcg0[idx, i]].T) - polygon_list.append(polygon_) - polygon_ensemble = PatchCollection(polygon_list, cmap=cmap, alpha=1) - polygon_ensemble.set_edgecolor("face") - polygon_ensemble.set_array(np.mean(udg_ref, axis=0)) - if colorlimit is None: - pass - else: - polygon_ensemble.set_clim(colorlimit) - ax.add_collection(polygon_ensemble) - ax.set_xlim(np.min(xdg0[0, :]), np.max(xdg0[0, :])) - ax.set_ylim(np.min(xdg0[1, :]), np.max(xdg0[1, :])) - cbar = plt.colorbar(polygon_ensemble, orientation=colorbarPosition) - return ax, cbar +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import paddle +from matplotlib.collections import PatchCollection +from matplotlib.patches import Polygon + + +def dfdx( + f: paddle.Tensor, + dydeta: paddle.Tensor, + dydxi: paddle.Tensor, + jinv: paddle.Tensor, + h: int = 0.01, +): + """Calculate the derivative of the given function f in the x direction + using the form of discrete difference. + + Args: + f (paddle.Tensor): The matrix that needs to calculate differentiation. + dydeta (paddle.Tensor): The dydeta data. + dydxi (paddle.Tensor): The dydxi data. + jinv (paddle.Tensor): Jacobian matrix. + h (int, optional): Differential interval. Defaults to 0.01. + """ + dfdxi_internal = ( + ( + -f[:, :, :, 4:] + + 8 * f[:, :, :, 3:-1] + - 8 * f[:, :, :, 1:-3] + + f[:, :, :, 0:-4] + ) + / 12 + / h + ) + dfdxi_left = ( + ( + -11 * f[:, :, :, 0:-3] + + 18 * f[:, :, :, 1:-2] + - 9 * f[:, :, :, 2:-1] + + 2 * f[:, :, :, 3:] + ) + / 6 + / h + ) + dfdxi_right = ( + ( + 11 * f[:, :, :, 3:] + - 18 * f[:, :, :, 2:-1] + + 9 * f[:, :, :, 1:-2] + - 2 * f[:, :, :, 0:-3] + ) + / 6 + / h + ) + dfdxi = paddle.concat( + (dfdxi_left[:, :, :, 0:2], dfdxi_internal, dfdxi_right[:, :, :, -2:]), 3 + ) + dfdeta_internal = ( + ( + -f[:, :, 4:, :] + + 8 * f[:, :, 3:-1, :] + - 8 * f[:, :, 1:-3, :] + + f[:, :, 0:-4, :] + ) + / 12 + / h + ) + dfdeta_low = ( + ( + -11 * f[:, :, 0:-3, :] + + 18 * f[:, :, 1:-2, :] + - 9 * f[:, :, 2:-1, :] + + 2 * f[:, :, 3:, :] + ) + / 6 + / h + ) + dfdeta_up = ( + ( + 11 * f[:, :, 3:, :] + - 18 * f[:, :, 2:-1, :] + + 9 * f[:, :, 1:-2, :] + - 2 * f[:, :, 0:-3, :] + ) + / 6 + / h + ) + dfdeta = paddle.concat( + (dfdeta_low[:, :, 0:2, :], dfdeta_internal, dfdeta_up[:, :, -2:, :]), 2 + ) + dfdx = jinv * (dfdxi * dydeta - dfdeta * dydxi) + return dfdx + + +def dfdy( + f: paddle.Tensor, + dxdxi: paddle.Tensor, + dxdeta: paddle.Tensor, + jinv: paddle.Tensor, + h: int = 0.01, +): + """Calculate the derivative of the given function f in the y direction + using the form of discrete difference. + + Args: + f (paddle.Tensor): The matrix that needs to calculate differentiation. + dxdxi (paddle.Tensor): The dxdxi data. + dxdeta (paddle.Tensor): The dxdeta data. + jinv (paddle.Tensor): Jacobian matrix. + h (int, optional): Differential interval. Defaults to 0.01. + """ + dfdxi_internal = ( + ( + -f[:, :, :, 4:] + + 8 * f[:, :, :, 3:-1] + - 8 * f[:, :, :, 1:-3] + + f[:, :, :, 0:-4] + ) + / 12 + / h + ) + dfdxi_left = ( + ( + -11 * f[:, :, :, 0:-3] + + 18 * f[:, :, :, 1:-2] + - 9 * f[:, :, :, 2:-1] + + 2 * f[:, :, :, 3:] + ) + / 6 + / h + ) + dfdxi_right = ( + ( + 11 * f[:, :, :, 3:] + - 18 * f[:, :, :, 2:-1] + + 9 * f[:, :, :, 1:-2] + - 2 * f[:, :, :, 0:-3] + ) + / 6 + / h + ) + dfdxi = paddle.concat( + (dfdxi_left[:, :, :, 0:2], dfdxi_internal, dfdxi_right[:, :, :, -2:]), 3 + ) + + dfdeta_internal = ( + ( + -f[:, :, 4:, :] + + 8 * f[:, :, 3:-1, :] + - 8 * f[:, :, 1:-3, :] + + f[:, :, 0:-4, :] + ) + / 12 + / h + ) + dfdeta_low = ( + ( + -11 * f[:, :, 0:-3, :] + + 18 * f[:, :, 1:-2, :] + - 9 * f[:, :, 2:-1, :] + + 2 * f[:, :, 3:, :] + ) + / 6 + / h + ) + dfdeta_up = ( + ( + 11 * f[:, :, 3:, :] + - 18 * f[:, :, 2:-1, :] + + 9 * f[:, :, 1:-2, :] + - 2 * f[:, :, 0:-3, :] + ) + / 6 + / h + ) + dfdeta = paddle.concat( + (dfdeta_low[:, :, 0:2, :], dfdeta_internal, dfdeta_up[:, :, -2:, :]), 2 + ) + dfdy = jinv * (dfdeta * dxdxi - dfdxi * dxdeta) + return dfdy + + +def set_axis_label(ax, type): + if type == "p": + ax.set_xlabel(r"$x$") + ax.set_ylabel(r"$y$") + elif type == "r": + ax.set_xlabel(r"$\xi$") + ax.set_ylabel(r"$\eta$") + else: + raise ValueError("The axis type only can be reference or physical") + + +def gen_e2vcg(x: np.ndarray): + """Generate adjacent coordinate indices for each point based on the shape of x. + + Args: + x (np.ndarray): Input coordinate array. + """ + nelemx = x.shape[1] - 1 + nelemy = x.shape[0] - 1 + nelem = nelemx * nelemy + nnx = x.shape[1] + e2vcg = np.zeros([4, nelem]) + for j in range(nelemy): + for i in range(nelemx): + e2vcg[:, j * nelemx + i] = np.asarray( + [j * nnx + i, j * nnx + i + 1, (j + 1) * nnx + i, (j + 1) * nnx + i + 1] + ) + return e2vcg.astype("int64") + + +def visualize(ax, x, y, u, colorbarPosition="vertical", colorlimit=None): + xdg0 = np.vstack([x.flatten(order="C"), y.flatten(order="C")]) + udg0 = u.flatten(order="C") + idx = np.asarray([0, 1, 3, 2]) + nelemx = x.shape[1] - 1 + nelemy = x.shape[0] - 1 + nelem = nelemx * nelemy + e2vcg0 = gen_e2vcg(x) + udg_ref = udg0[e2vcg0] + cmap = matplotlib.cm.coolwarm + polygon_list = [] + for i in range(nelem): + polygon_ = Polygon(xdg0[:, e2vcg0[idx, i]].T) + polygon_list.append(polygon_) + polygon_ensemble = PatchCollection(polygon_list, cmap=cmap, alpha=1) + polygon_ensemble.set_edgecolor("face") + polygon_ensemble.set_array(np.mean(udg_ref, axis=0)) + if colorlimit is None: + pass + else: + polygon_ensemble.set_clim(colorlimit) + ax.add_collection(polygon_ensemble) + ax.set_xlim(np.min(xdg0[0, :]), np.max(xdg0[0, :])) + ax.set_ylim(np.min(xdg0[1, :]), np.max(xdg0[1, :])) + cbar = plt.colorbar(polygon_ensemble, orientation=colorbarPosition) + return ax, cbar diff --git a/examples/phylstm/conf/phylstm2.yaml b/examples/phylstm/conf/phylstm2.yaml index d896194f1e..b40ced799d 100644 --- a/examples/phylstm/conf/phylstm2.yaml +++ b/examples/phylstm/conf/phylstm2.yaml @@ -1,50 +1,50 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_PhyLSTM2/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set data file path -DATA_FILE_PATH: data_boucwen.mat - -# model settings -MODEL: - input_size: 1 - hidden_size: 100 - model_type: 2 - -# training settings -TRAIN: - epochs: 100 - iters_per_epoch: 1 - save_freq: 50 - learning_rate: 0.001 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_PhyLSTM2/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set data file path +DATA_FILE_PATH: data_boucwen.mat + +# model settings +MODEL: + input_size: 1 + hidden_size: 100 + model_type: 2 + +# training settings +TRAIN: + epochs: 100 + iters_per_epoch: 1 + save_freq: 50 + learning_rate: 0.001 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true diff --git a/examples/phylstm/conf/phylstm3.yaml b/examples/phylstm/conf/phylstm3.yaml index 222917d350..5bc71536eb 100644 --- a/examples/phylstm/conf/phylstm3.yaml +++ b/examples/phylstm/conf/phylstm3.yaml @@ -1,50 +1,50 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_PhyLSTM3/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 42 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# set data file path -DATA_FILE_PATH: data_boucwen.mat - -# model settings -MODEL: - input_size: 1 - hidden_size: 100 - model_type: 3 - -# training settings -TRAIN: - epochs: 200 - iters_per_epoch: 1 - save_freq: 50 - learning_rate: 0.001 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_PhyLSTM3/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set data file path +DATA_FILE_PATH: data_boucwen.mat + +# model settings +MODEL: + input_size: 1 + hidden_size: 100 + model_type: 3 + +# training settings +TRAIN: + epochs: 200 + iters_per_epoch: 1 + save_freq: 50 + learning_rate: 0.001 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true diff --git a/examples/phylstm/functions.py b/examples/phylstm/functions.py index 04f4c274d1..7bbada0bfc 100644 --- a/examples/phylstm/functions.py +++ b/examples/phylstm/functions.py @@ -1,191 +1,191 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import random -from typing import Dict - -import numpy as np -import paddle -import paddle.nn.functional as F - - -def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: - return {"dummy_loss": paddle.full([], 0.0)} - - -# transform -def transform_in(input): - input_transformed = { - "ag": input["ag"][0], - "eta": input["eta"][0], - "eta_t": input["eta_t"][0], - "g": input["g"][0], - "lift": input["lift"][0], - "ag_c": input["ag_c"][0], - "phi": input["phi"][0], - } - return input_transformed - - -def transform_out(input, out): - # Add transformed input for computing loss - out.update(input) - return out - - -def train_loss_func2(result_dict, *args) -> paddle.Tensor: - """For phylstm2 calculation of loss. - - Args: - result_dict (Dict[str, paddle.Tensor]): The result dict. - - Returns: - paddle.Tensor: Loss value. - """ - # for measurements - loss_u = F.mse_loss(result_dict["eta"], result_dict["eta_pred"]) - loss_udot = F.mse_loss(result_dict["eta_t"], result_dict["eta_dot_pred"]) - - # for collocations - loss_ut_c = F.mse_loss(result_dict["eta_t_pred_c"], result_dict["eta_dot_pred_c"]) - loss_e = F.mse_loss( - paddle.matmul( - result_dict["lift"], - paddle.ones( - [result_dict["lift"].shape[0], 1, result_dict["eta"].shape[2]], - dtype=paddle.get_default_dtype(), - ), - ), - result_dict["lift_pred_c"], - ) - - # total loss - loss = loss_u + loss_udot + loss_ut_c + loss_e - loss = paddle.square(loss) - return {"loss2": loss} - - -def train_loss_func3(result_dict, *args) -> paddle.Tensor: - """For phylstm3 calculation of loss. - - Args: - result_dict (Dict[str, paddle.Tensor]): The result dict. - - Returns: - paddle.Tensor: Loss value. - """ - # for measurements - loss_u = F.mse_loss(result_dict["eta"], result_dict["eta_pred"]) - loss_udot = F.mse_loss(result_dict["eta_t"], result_dict["eta_dot_pred"]) - - # for collocations - loss_ut_c = F.mse_loss(result_dict["eta_t_pred_c"], result_dict["eta_dot_pred_c"]) - loss_gt_c = F.mse_loss(result_dict["g_t_pred_c"], result_dict["g_dot_pred_c"]) - - loss_e = F.mse_loss( - paddle.matmul( - result_dict["lift"], - paddle.ones( - [result_dict["lift"].shape[0], 1, result_dict["eta"].shape[2]], - dtype=paddle.get_default_dtype(), - ), - ), - result_dict["lift_pred_c"], - ) - - loss = loss_u + loss_udot + loss_ut_c + loss_gt_c + loss_e - loss = paddle.square(loss) - return {"loss3": loss} - - -class Dataset: - def __init__(self, eta, eta_t, g, ag, ag_c, lift, phi_t, ratio_split=0.8): - self.eta = np.asarray(eta, dtype=paddle.get_default_dtype()) - self.eta_t = np.asarray(eta_t, dtype=paddle.get_default_dtype()) - self.g = np.asarray(g, dtype=paddle.get_default_dtype()) - self.ag = np.asarray(ag, dtype=paddle.get_default_dtype()) - self.lift = np.asarray(lift, dtype=paddle.get_default_dtype()) - self.ag_c = np.asarray(ag_c, dtype=paddle.get_default_dtype()) - self.phi_t = np.asarray(phi_t, dtype=paddle.get_default_dtype()) - self.ratio_split = ratio_split - - def get(self, epochs=1): - input_dict_train = { - "ag": [], - "eta": [], - "eta_t": [], - "g": [], - "lift": [], - "ag_c": [], - "phi": [], - } - label_dict_train = {"dummy_loss": []} - input_dict_val = { - "ag": [], - "eta": [], - "eta_t": [], - "g": [], - "lift": [], - "ag_c": [], - "phi": [], - } - label_dict_val = {"dummy_loss": []} - for i in range(epochs): - ind = list(range(self.ag.shape[0])) - random.shuffle(ind) - ratio_split = self.ratio_split - ind_tr = ind[0 : round(ratio_split * self.ag.shape[0])] - ind_val = ind[round(ratio_split * self.ag.shape[0]) :] - - self.ag_tr = self.ag[ind_tr] - self.eta_tr = self.eta[ind_tr] - self.eta_t_tr = self.eta_t[ind_tr] - self.g_tr = self.g[ind_tr] - - self.ag_val = self.ag[ind_val] - self.eta_val = self.eta[ind_val] - self.eta_t_val = self.eta_t[ind_val] - self.g_val = self.g[ind_val] - - input_dict_train["ag"].append(self.ag_tr) - input_dict_train["eta"].append(self.eta_tr) - input_dict_train["eta_t"].append(self.eta_t_tr) - input_dict_train["g"].append(self.g_tr) - input_dict_train["lift"].append(self.lift) - input_dict_train["ag_c"].append(self.ag_c) - input_dict_train["phi"].append(self.phi_t) - label_dict_train["dummy_loss"].append( - np.asarray(0.0, dtype=paddle.get_default_dtype()) - ) - - if i == epochs - 1: - input_dict_val["ag"].append(self.ag_val) - input_dict_val["eta"].append(self.eta_val) - input_dict_val["eta_t"].append(self.eta_t_val) - input_dict_val["g"].append(self.g_val) - input_dict_val["lift"].append(self.lift) - input_dict_val["ag_c"].append(self.ag_c) - input_dict_val["phi"].append(self.phi_t) - label_dict_val["dummy_loss"].append( - np.asarray(0.0, dtype=paddle.get_default_dtype()) - ) - - def to_numpy_dict(dct): - return {k: np.asarray(v, dtype="float32") for k, v in dct.items()} - - input_dict_train = to_numpy_dict(input_dict_train) - label_dict_train = to_numpy_dict(label_dict_train) - input_dict_val = to_numpy_dict(input_dict_val) - label_dict_val = to_numpy_dict(label_dict_val) - return input_dict_train, label_dict_train, input_dict_val, label_dict_val +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import Dict + +import numpy as np +import paddle +import paddle.nn.functional as F + + +def metric_expr(output_dict, *args) -> Dict[str, paddle.Tensor]: + return {"dummy_loss": paddle.full([], 0.0)} + + +# transform +def transform_in(input): + input_transformed = { + "ag": input["ag"][0], + "eta": input["eta"][0], + "eta_t": input["eta_t"][0], + "g": input["g"][0], + "lift": input["lift"][0], + "ag_c": input["ag_c"][0], + "phi": input["phi"][0], + } + return input_transformed + + +def transform_out(input, out): + # Add transformed input for computing loss + out.update(input) + return out + + +def train_loss_func2(result_dict, *args) -> paddle.Tensor: + """For phylstm2 calculation of loss. + + Args: + result_dict (Dict[str, paddle.Tensor]): The result dict. + + Returns: + paddle.Tensor: Loss value. + """ + # for measurements + loss_u = F.mse_loss(result_dict["eta"], result_dict["eta_pred"]) + loss_udot = F.mse_loss(result_dict["eta_t"], result_dict["eta_dot_pred"]) + + # for collocations + loss_ut_c = F.mse_loss(result_dict["eta_t_pred_c"], result_dict["eta_dot_pred_c"]) + loss_e = F.mse_loss( + paddle.matmul( + result_dict["lift"], + paddle.ones( + [result_dict["lift"].shape[0], 1, result_dict["eta"].shape[2]], + dtype=paddle.get_default_dtype(), + ), + ), + result_dict["lift_pred_c"], + ) + + # total loss + loss = loss_u + loss_udot + loss_ut_c + loss_e + loss = paddle.square(loss) + return {"loss2": loss} + + +def train_loss_func3(result_dict, *args) -> paddle.Tensor: + """For phylstm3 calculation of loss. + + Args: + result_dict (Dict[str, paddle.Tensor]): The result dict. + + Returns: + paddle.Tensor: Loss value. + """ + # for measurements + loss_u = F.mse_loss(result_dict["eta"], result_dict["eta_pred"]) + loss_udot = F.mse_loss(result_dict["eta_t"], result_dict["eta_dot_pred"]) + + # for collocations + loss_ut_c = F.mse_loss(result_dict["eta_t_pred_c"], result_dict["eta_dot_pred_c"]) + loss_gt_c = F.mse_loss(result_dict["g_t_pred_c"], result_dict["g_dot_pred_c"]) + + loss_e = F.mse_loss( + paddle.matmul( + result_dict["lift"], + paddle.ones( + [result_dict["lift"].shape[0], 1, result_dict["eta"].shape[2]], + dtype=paddle.get_default_dtype(), + ), + ), + result_dict["lift_pred_c"], + ) + + loss = loss_u + loss_udot + loss_ut_c + loss_gt_c + loss_e + loss = paddle.square(loss) + return {"loss3": loss} + + +class Dataset: + def __init__(self, eta, eta_t, g, ag, ag_c, lift, phi_t, ratio_split=0.8): + self.eta = np.asarray(eta, dtype=paddle.get_default_dtype()) + self.eta_t = np.asarray(eta_t, dtype=paddle.get_default_dtype()) + self.g = np.asarray(g, dtype=paddle.get_default_dtype()) + self.ag = np.asarray(ag, dtype=paddle.get_default_dtype()) + self.lift = np.asarray(lift, dtype=paddle.get_default_dtype()) + self.ag_c = np.asarray(ag_c, dtype=paddle.get_default_dtype()) + self.phi_t = np.asarray(phi_t, dtype=paddle.get_default_dtype()) + self.ratio_split = ratio_split + + def get(self, epochs=1): + input_dict_train = { + "ag": [], + "eta": [], + "eta_t": [], + "g": [], + "lift": [], + "ag_c": [], + "phi": [], + } + label_dict_train = {"dummy_loss": []} + input_dict_val = { + "ag": [], + "eta": [], + "eta_t": [], + "g": [], + "lift": [], + "ag_c": [], + "phi": [], + } + label_dict_val = {"dummy_loss": []} + for i in range(epochs): + ind = list(range(self.ag.shape[0])) + random.shuffle(ind) + ratio_split = self.ratio_split + ind_tr = ind[0 : round(ratio_split * self.ag.shape[0])] + ind_val = ind[round(ratio_split * self.ag.shape[0]) :] + + self.ag_tr = self.ag[ind_tr] + self.eta_tr = self.eta[ind_tr] + self.eta_t_tr = self.eta_t[ind_tr] + self.g_tr = self.g[ind_tr] + + self.ag_val = self.ag[ind_val] + self.eta_val = self.eta[ind_val] + self.eta_t_val = self.eta_t[ind_val] + self.g_val = self.g[ind_val] + + input_dict_train["ag"].append(self.ag_tr) + input_dict_train["eta"].append(self.eta_tr) + input_dict_train["eta_t"].append(self.eta_t_tr) + input_dict_train["g"].append(self.g_tr) + input_dict_train["lift"].append(self.lift) + input_dict_train["ag_c"].append(self.ag_c) + input_dict_train["phi"].append(self.phi_t) + label_dict_train["dummy_loss"].append( + np.asarray(0.0, dtype=paddle.get_default_dtype()) + ) + + if i == epochs - 1: + input_dict_val["ag"].append(self.ag_val) + input_dict_val["eta"].append(self.eta_val) + input_dict_val["eta_t"].append(self.eta_t_val) + input_dict_val["g"].append(self.g_val) + input_dict_val["lift"].append(self.lift) + input_dict_val["ag_c"].append(self.ag_c) + input_dict_val["phi"].append(self.phi_t) + label_dict_val["dummy_loss"].append( + np.asarray(0.0, dtype=paddle.get_default_dtype()) + ) + + def to_numpy_dict(dct): + return {k: np.asarray(v, dtype="float32") for k, v in dct.items()} + + input_dict_train = to_numpy_dict(input_dict_train) + label_dict_train = to_numpy_dict(label_dict_train) + input_dict_val = to_numpy_dict(input_dict_val) + label_dict_val = to_numpy_dict(label_dict_val) + return input_dict_train, label_dict_train, input_dict_val, label_dict_val diff --git a/examples/phylstm/phylstm2.py b/examples/phylstm/phylstm2.py index 0d320e77f8..87b11bdf29 100755 --- a/examples/phylstm/phylstm2.py +++ b/examples/phylstm/phylstm2.py @@ -1,322 +1,322 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/zhry10/PhyLSTM.git -""" - -from os import path as osp - -import functions -import hydra -import numpy as np -import scipy.io -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) - ag_data = mat["input_tf"] # ag, ad, av - u_data = mat["target_X_tf"] - ut_data = mat["target_Xd_tf"] - utt_data = mat["target_Xdd_tf"] - ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) - u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) - ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) - utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) - - t = mat["time"] - dt = t[0, 1] - t[0, 0] - - ag_all = ag_data - u_all = u_data - u_t_all = ut_data - u_tt_all = utt_data - - # finite difference - N = u_data.shape[1] - phi1 = np.concatenate( - [ - np.array([-3 / 2, 2, -1 / 2]), - np.zeros([N - 3]), - ] - ) - temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) - temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) - phi2 = temp1 + temp2 - phi3 = np.concatenate( - [ - np.zeros([N - 3]), - np.array([1 / 2, -2, 3 / 2]), - ] - ) - phi_t0 = ( - 1 - / dt - * np.concatenate( - [ - np.reshape(phi1, [1, phi1.shape[0]]), - phi2, - np.reshape(phi3, [1, phi3.shape[0]]), - ], - axis=0, - ) - ) - phi_t0 = np.reshape(phi_t0, [1, N, N]) - - ag_star = ag_all[0:10] - eta_star = u_all[0:10] - eta_t_star = u_t_all[0:10] - eta_tt_star = u_tt_all[0:10] - ag_c_star = ag_all[0:50] - lift_star = -ag_c_star - - eta = eta_star - ag = ag_star - lift = lift_star - eta_t = eta_t_star - eta_tt = eta_tt_star - ag_c = ag_c_star - g = -eta_tt - ag - phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) - - model = ppsci.arch.DeepPhyLSTM( - cfg.MODEL.input_size, - eta.shape[2], - cfg.MODEL.hidden_size, - cfg.MODEL.model_type, - ) - model.register_input_transform(functions.transform_in) - model.register_output_transform(functions.transform_out) - - dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) - - ( - input_dict_train, - label_dict_train, - input_dict_val, - label_dict_val, - ) = dataset_obj.get(cfg.TRAIN.epochs) - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_train, - "label": label_dict_train, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func2), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - }, - name="sup_train", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func2), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - }, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # initialize solver - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - solver = ppsci.solver.Solver( - model, - constraint_pde, - optimizer=optimizer, - validator=validator_pde, - cfg=cfg, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) - ag_data = mat["input_tf"] # ag, ad, av - u_data = mat["target_X_tf"] - ut_data = mat["target_Xd_tf"] - utt_data = mat["target_Xdd_tf"] - ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) - u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) - ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) - utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) - - t = mat["time"] - dt = t[0, 1] - t[0, 0] - - ag_all = ag_data - u_all = u_data - u_t_all = ut_data - u_tt_all = utt_data - - # finite difference - N = u_data.shape[1] - phi1 = np.concatenate( - [ - np.array([-3 / 2, 2, -1 / 2]), - np.zeros([N - 3]), - ] - ) - temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) - temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) - phi2 = temp1 + temp2 - phi3 = np.concatenate( - [ - np.zeros([N - 3]), - np.array([1 / 2, -2, 3 / 2]), - ] - ) - phi_t0 = ( - 1 - / dt - * np.concatenate( - [ - np.reshape(phi1, [1, phi1.shape[0]]), - phi2, - np.reshape(phi3, [1, phi3.shape[0]]), - ], - axis=0, - ) - ) - phi_t0 = np.reshape(phi_t0, [1, N, N]) - - ag_star = ag_all[0:10] - eta_star = u_all[0:10] - eta_t_star = u_t_all[0:10] - eta_tt_star = u_tt_all[0:10] - ag_c_star = ag_all[0:50] - lift_star = -ag_c_star - - eta = eta_star - ag = ag_star - lift = lift_star - eta_t = eta_t_star - eta_tt = eta_tt_star - ag_c = ag_c_star - g = -eta_tt - ag - phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) - - model = ppsci.arch.DeepPhyLSTM( - cfg.MODEL.input_size, - eta.shape[2], - cfg.MODEL.hidden_size, - cfg.MODEL.model_type, - ) - model.register_input_transform(functions.transform_in) - model.register_output_transform(functions.transform_out) - - dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) - - ( - _, - _, - input_dict_val, - label_dict_val, - ) = dataset_obj.get(1) - - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func2), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - }, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # initialize solver - solver = ppsci.solver.Solver( - model, - validator=validator_pde, - cfg=cfg, - ) - # evaluate - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="phylstm2.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/zhry10/PhyLSTM.git +""" + +from os import path as osp + +import functions +import hydra +import numpy as np +import scipy.io +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) + ag_data = mat["input_tf"] # ag, ad, av + u_data = mat["target_X_tf"] + ut_data = mat["target_Xd_tf"] + utt_data = mat["target_Xdd_tf"] + ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) + u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) + ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) + utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) + + t = mat["time"] + dt = t[0, 1] - t[0, 0] + + ag_all = ag_data + u_all = u_data + u_t_all = ut_data + u_tt_all = utt_data + + # finite difference + N = u_data.shape[1] + phi1 = np.concatenate( + [ + np.array([-3 / 2, 2, -1 / 2]), + np.zeros([N - 3]), + ] + ) + temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) + temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) + phi2 = temp1 + temp2 + phi3 = np.concatenate( + [ + np.zeros([N - 3]), + np.array([1 / 2, -2, 3 / 2]), + ] + ) + phi_t0 = ( + 1 + / dt + * np.concatenate( + [ + np.reshape(phi1, [1, phi1.shape[0]]), + phi2, + np.reshape(phi3, [1, phi3.shape[0]]), + ], + axis=0, + ) + ) + phi_t0 = np.reshape(phi_t0, [1, N, N]) + + ag_star = ag_all[0:10] + eta_star = u_all[0:10] + eta_t_star = u_t_all[0:10] + eta_tt_star = u_tt_all[0:10] + ag_c_star = ag_all[0:50] + lift_star = -ag_c_star + + eta = eta_star + ag = ag_star + lift = lift_star + eta_t = eta_t_star + eta_tt = eta_tt_star + ag_c = ag_c_star + g = -eta_tt - ag + phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) + + model = ppsci.arch.DeepPhyLSTM( + cfg.MODEL.input_size, + eta.shape[2], + cfg.MODEL.hidden_size, + cfg.MODEL.model_type, + ) + model.register_input_transform(functions.transform_in) + model.register_output_transform(functions.transform_out) + + dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) + + ( + input_dict_train, + label_dict_train, + input_dict_val, + label_dict_val, + ) = dataset_obj.get(cfg.TRAIN.epochs) + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_train, + "label": label_dict_train, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func2), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + }, + name="sup_train", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func2), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + }, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # initialize solver + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + solver = ppsci.solver.Solver( + model, + constraint_pde, + optimizer=optimizer, + validator=validator_pde, + cfg=cfg, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) + ag_data = mat["input_tf"] # ag, ad, av + u_data = mat["target_X_tf"] + ut_data = mat["target_Xd_tf"] + utt_data = mat["target_Xdd_tf"] + ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) + u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) + ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) + utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) + + t = mat["time"] + dt = t[0, 1] - t[0, 0] + + ag_all = ag_data + u_all = u_data + u_t_all = ut_data + u_tt_all = utt_data + + # finite difference + N = u_data.shape[1] + phi1 = np.concatenate( + [ + np.array([-3 / 2, 2, -1 / 2]), + np.zeros([N - 3]), + ] + ) + temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) + temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) + phi2 = temp1 + temp2 + phi3 = np.concatenate( + [ + np.zeros([N - 3]), + np.array([1 / 2, -2, 3 / 2]), + ] + ) + phi_t0 = ( + 1 + / dt + * np.concatenate( + [ + np.reshape(phi1, [1, phi1.shape[0]]), + phi2, + np.reshape(phi3, [1, phi3.shape[0]]), + ], + axis=0, + ) + ) + phi_t0 = np.reshape(phi_t0, [1, N, N]) + + ag_star = ag_all[0:10] + eta_star = u_all[0:10] + eta_t_star = u_t_all[0:10] + eta_tt_star = u_tt_all[0:10] + ag_c_star = ag_all[0:50] + lift_star = -ag_c_star + + eta = eta_star + ag = ag_star + lift = lift_star + eta_t = eta_t_star + eta_tt = eta_tt_star + ag_c = ag_c_star + g = -eta_tt - ag + phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) + + model = ppsci.arch.DeepPhyLSTM( + cfg.MODEL.input_size, + eta.shape[2], + cfg.MODEL.hidden_size, + cfg.MODEL.model_type, + ) + model.register_input_transform(functions.transform_in) + model.register_output_transform(functions.transform_out) + + dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) + + ( + _, + _, + input_dict_val, + label_dict_val, + ) = dataset_obj.get(1) + + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func2), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + }, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # initialize solver + solver = ppsci.solver.Solver( + model, + validator=validator_pde, + cfg=cfg, + ) + # evaluate + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="phylstm2.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/phylstm/phylstm3.py b/examples/phylstm/phylstm3.py index 071ecbeedb..2f5f05f0e7 100755 --- a/examples/phylstm/phylstm3.py +++ b/examples/phylstm/phylstm3.py @@ -1,360 +1,360 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/zhry10/PhyLSTM.git -""" - -from os import path as osp - -import functions -import hydra -import numpy as np -import scipy.io -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) - t = mat["time"] - dt = 0.02 - n1 = int(dt / 0.005) - t = t[::n1] - - ag_data = mat["input_tf"][:, ::n1] # ag, ad, av - u_data = mat["target_X_tf"][:, ::n1] - ut_data = mat["target_Xd_tf"][:, ::n1] - utt_data = mat["target_Xdd_tf"][:, ::n1] - ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) - u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) - ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) - utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) - - ag_pred = mat["input_pred_tf"][:, ::n1] # ag, ad, av - u_pred = mat["target_pred_X_tf"][:, ::n1] - ut_pred = mat["target_pred_Xd_tf"][:, ::n1] - utt_pred = mat["target_pred_Xdd_tf"][:, ::n1] - ag_pred = ag_pred.reshape([ag_pred.shape[0], ag_pred.shape[1], 1]) - u_pred = u_pred.reshape([u_pred.shape[0], u_pred.shape[1], 1]) - ut_pred = ut_pred.reshape([ut_pred.shape[0], ut_pred.shape[1], 1]) - utt_pred = utt_pred.reshape([utt_pred.shape[0], utt_pred.shape[1], 1]) - - N = u_data.shape[1] - phi1 = np.concatenate( - [ - np.array([-3 / 2, 2, -1 / 2]), - np.zeros([N - 3]), - ] - ) - temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) - temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) - phi2 = temp1 + temp2 - phi3 = np.concatenate( - [ - np.zeros([N - 3]), - np.array([1 / 2, -2, 3 / 2]), - ] - ) - phi_t0 = ( - 1 - / dt - * np.concatenate( - [ - np.reshape(phi1, [1, phi1.shape[0]]), - phi2, - np.reshape(phi3, [1, phi3.shape[0]]), - ], - axis=0, - ) - ) - phi_t0 = np.reshape(phi_t0, [1, N, N]) - - ag_star = ag_data - eta_star = u_data - eta_t_star = ut_data - eta_tt_star = utt_data - ag_c_star = np.concatenate([ag_data, ag_pred[0:53]]) - lift_star = -ag_c_star - - eta = eta_star - ag = ag_star - lift = lift_star - eta_t = eta_t_star - eta_tt = eta_tt_star - g = -eta_tt - ag - ag_c = ag_c_star - - phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) - - model = ppsci.arch.DeepPhyLSTM( - cfg.MODEL.input_size, - eta.shape[2], - cfg.MODEL.hidden_size, - cfg.MODEL.model_type, - ) - model.register_input_transform(functions.transform_in) - model.register_output_transform(functions.transform_out) - - dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) - ( - input_dict_train, - label_dict_train, - input_dict_val, - label_dict_val, - ) = dataset_obj.get(cfg.TRAIN.epochs) - - sup_constraint_pde = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_train, - "label": label_dict_train, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func3), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - "g_t_pred_c": lambda out: out["g_t_pred_c"], - "g_dot_pred_c": lambda out: out["g_dot_pred_c"], - }, - name="sup_train", - ) - constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} - - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func3), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - "g_t_pred_c": lambda out: out["g_t_pred_c"], - "g_dot_pred_c": lambda out: out["g_dot_pred_c"], - }, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # initialize solver - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - solver = ppsci.solver.Solver( - model, - constraint_pde, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - validator=validator_pde, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) - t = mat["time"] - dt = 0.02 - n1 = int(dt / 0.005) - t = t[::n1] - - ag_data = mat["input_tf"][:, ::n1] # ag, ad, av - u_data = mat["target_X_tf"][:, ::n1] - ut_data = mat["target_Xd_tf"][:, ::n1] - utt_data = mat["target_Xdd_tf"][:, ::n1] - ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) - u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) - ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) - utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) - - ag_pred = mat["input_pred_tf"][:, ::n1] # ag, ad, av - u_pred = mat["target_pred_X_tf"][:, ::n1] - ut_pred = mat["target_pred_Xd_tf"][:, ::n1] - utt_pred = mat["target_pred_Xdd_tf"][:, ::n1] - ag_pred = ag_pred.reshape([ag_pred.shape[0], ag_pred.shape[1], 1]) - u_pred = u_pred.reshape([u_pred.shape[0], u_pred.shape[1], 1]) - ut_pred = ut_pred.reshape([ut_pred.shape[0], ut_pred.shape[1], 1]) - utt_pred = utt_pred.reshape([utt_pred.shape[0], utt_pred.shape[1], 1]) - - N = u_data.shape[1] - phi1 = np.concatenate( - [ - np.array([-3 / 2, 2, -1 / 2]), - np.zeros([N - 3]), - ] - ) - temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) - temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) - phi2 = temp1 + temp2 - phi3 = np.concatenate( - [ - np.zeros([N - 3]), - np.array([1 / 2, -2, 3 / 2]), - ] - ) - phi_t0 = ( - 1 - / dt - * np.concatenate( - [ - np.reshape(phi1, [1, phi1.shape[0]]), - phi2, - np.reshape(phi3, [1, phi3.shape[0]]), - ], - axis=0, - ) - ) - phi_t0 = np.reshape(phi_t0, [1, N, N]) - - ag_star = ag_data - eta_star = u_data - eta_t_star = ut_data - eta_tt_star = utt_data - ag_c_star = np.concatenate([ag_data, ag_pred[0:53]]) - lift_star = -ag_c_star - - eta = eta_star - ag = ag_star - lift = lift_star - eta_t = eta_t_star - eta_tt = eta_tt_star - g = -eta_tt - ag - ag_c = ag_c_star - - phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) - - model = ppsci.arch.DeepPhyLSTM( - cfg.MODEL.input_size, - eta.shape[2], - cfg.MODEL.hidden_size, - cfg.MODEL.model_type, - ) - model.register_input_transform(functions.transform_in) - model.register_output_transform(functions.transform_out) - - dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) - ( - _, - _, - input_dict_val, - label_dict_val, - ) = dataset_obj.get(1) - - sup_validator_pde = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": input_dict_val, - "label": label_dict_val, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": 1, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(functions.train_loss_func3), - { - "eta_pred": lambda out: out["eta_pred"], - "eta_dot_pred": lambda out: out["eta_dot_pred"], - "g_pred": lambda out: out["g_pred"], - "eta_t_pred_c": lambda out: out["eta_t_pred_c"], - "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], - "lift_pred_c": lambda out: out["lift_pred_c"], - "g_t_pred_c": lambda out: out["g_t_pred_c"], - "g_dot_pred_c": lambda out: out["g_dot_pred_c"], - }, - metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, - name="sup_valid", - ) - validator_pde = {sup_validator_pde.name: sup_validator_pde} - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - seed=cfg.seed, - validator=validator_pde, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - - # evaluate - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="phylstm3.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/zhry10/PhyLSTM.git +""" + +from os import path as osp + +import functions +import hydra +import numpy as np +import scipy.io +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) + t = mat["time"] + dt = 0.02 + n1 = int(dt / 0.005) + t = t[::n1] + + ag_data = mat["input_tf"][:, ::n1] # ag, ad, av + u_data = mat["target_X_tf"][:, ::n1] + ut_data = mat["target_Xd_tf"][:, ::n1] + utt_data = mat["target_Xdd_tf"][:, ::n1] + ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) + u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) + ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) + utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) + + ag_pred = mat["input_pred_tf"][:, ::n1] # ag, ad, av + u_pred = mat["target_pred_X_tf"][:, ::n1] + ut_pred = mat["target_pred_Xd_tf"][:, ::n1] + utt_pred = mat["target_pred_Xdd_tf"][:, ::n1] + ag_pred = ag_pred.reshape([ag_pred.shape[0], ag_pred.shape[1], 1]) + u_pred = u_pred.reshape([u_pred.shape[0], u_pred.shape[1], 1]) + ut_pred = ut_pred.reshape([ut_pred.shape[0], ut_pred.shape[1], 1]) + utt_pred = utt_pred.reshape([utt_pred.shape[0], utt_pred.shape[1], 1]) + + N = u_data.shape[1] + phi1 = np.concatenate( + [ + np.array([-3 / 2, 2, -1 / 2]), + np.zeros([N - 3]), + ] + ) + temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) + temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) + phi2 = temp1 + temp2 + phi3 = np.concatenate( + [ + np.zeros([N - 3]), + np.array([1 / 2, -2, 3 / 2]), + ] + ) + phi_t0 = ( + 1 + / dt + * np.concatenate( + [ + np.reshape(phi1, [1, phi1.shape[0]]), + phi2, + np.reshape(phi3, [1, phi3.shape[0]]), + ], + axis=0, + ) + ) + phi_t0 = np.reshape(phi_t0, [1, N, N]) + + ag_star = ag_data + eta_star = u_data + eta_t_star = ut_data + eta_tt_star = utt_data + ag_c_star = np.concatenate([ag_data, ag_pred[0:53]]) + lift_star = -ag_c_star + + eta = eta_star + ag = ag_star + lift = lift_star + eta_t = eta_t_star + eta_tt = eta_tt_star + g = -eta_tt - ag + ag_c = ag_c_star + + phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) + + model = ppsci.arch.DeepPhyLSTM( + cfg.MODEL.input_size, + eta.shape[2], + cfg.MODEL.hidden_size, + cfg.MODEL.model_type, + ) + model.register_input_transform(functions.transform_in) + model.register_output_transform(functions.transform_out) + + dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) + ( + input_dict_train, + label_dict_train, + input_dict_val, + label_dict_val, + ) = dataset_obj.get(cfg.TRAIN.epochs) + + sup_constraint_pde = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_train, + "label": label_dict_train, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func3), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + "g_t_pred_c": lambda out: out["g_t_pred_c"], + "g_dot_pred_c": lambda out: out["g_dot_pred_c"], + }, + name="sup_train", + ) + constraint_pde = {sup_constraint_pde.name: sup_constraint_pde} + + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func3), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + "g_t_pred_c": lambda out: out["g_t_pred_c"], + "g_dot_pred_c": lambda out: out["g_dot_pred_c"], + }, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # initialize solver + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + solver = ppsci.solver.Solver( + model, + constraint_pde, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + validator=validator_pde, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + mat = scipy.io.loadmat(cfg.DATA_FILE_PATH) + t = mat["time"] + dt = 0.02 + n1 = int(dt / 0.005) + t = t[::n1] + + ag_data = mat["input_tf"][:, ::n1] # ag, ad, av + u_data = mat["target_X_tf"][:, ::n1] + ut_data = mat["target_Xd_tf"][:, ::n1] + utt_data = mat["target_Xdd_tf"][:, ::n1] + ag_data = ag_data.reshape([ag_data.shape[0], ag_data.shape[1], 1]) + u_data = u_data.reshape([u_data.shape[0], u_data.shape[1], 1]) + ut_data = ut_data.reshape([ut_data.shape[0], ut_data.shape[1], 1]) + utt_data = utt_data.reshape([utt_data.shape[0], utt_data.shape[1], 1]) + + ag_pred = mat["input_pred_tf"][:, ::n1] # ag, ad, av + u_pred = mat["target_pred_X_tf"][:, ::n1] + ut_pred = mat["target_pred_Xd_tf"][:, ::n1] + utt_pred = mat["target_pred_Xdd_tf"][:, ::n1] + ag_pred = ag_pred.reshape([ag_pred.shape[0], ag_pred.shape[1], 1]) + u_pred = u_pred.reshape([u_pred.shape[0], u_pred.shape[1], 1]) + ut_pred = ut_pred.reshape([ut_pred.shape[0], ut_pred.shape[1], 1]) + utt_pred = utt_pred.reshape([utt_pred.shape[0], utt_pred.shape[1], 1]) + + N = u_data.shape[1] + phi1 = np.concatenate( + [ + np.array([-3 / 2, 2, -1 / 2]), + np.zeros([N - 3]), + ] + ) + temp1 = np.concatenate([-1 / 2 * np.identity(N - 2), np.zeros([N - 2, 2])], axis=1) + temp2 = np.concatenate([np.zeros([N - 2, 2]), 1 / 2 * np.identity(N - 2)], axis=1) + phi2 = temp1 + temp2 + phi3 = np.concatenate( + [ + np.zeros([N - 3]), + np.array([1 / 2, -2, 3 / 2]), + ] + ) + phi_t0 = ( + 1 + / dt + * np.concatenate( + [ + np.reshape(phi1, [1, phi1.shape[0]]), + phi2, + np.reshape(phi3, [1, phi3.shape[0]]), + ], + axis=0, + ) + ) + phi_t0 = np.reshape(phi_t0, [1, N, N]) + + ag_star = ag_data + eta_star = u_data + eta_t_star = ut_data + eta_tt_star = utt_data + ag_c_star = np.concatenate([ag_data, ag_pred[0:53]]) + lift_star = -ag_c_star + + eta = eta_star + ag = ag_star + lift = lift_star + eta_t = eta_t_star + eta_tt = eta_tt_star + g = -eta_tt - ag + ag_c = ag_c_star + + phi_t = np.repeat(phi_t0, ag_c_star.shape[0], axis=0) + + model = ppsci.arch.DeepPhyLSTM( + cfg.MODEL.input_size, + eta.shape[2], + cfg.MODEL.hidden_size, + cfg.MODEL.model_type, + ) + model.register_input_transform(functions.transform_in) + model.register_output_transform(functions.transform_out) + + dataset_obj = functions.Dataset(eta, eta_t, g, ag, ag_c, lift, phi_t) + ( + _, + _, + input_dict_val, + label_dict_val, + ) = dataset_obj.get(1) + + sup_validator_pde = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": input_dict_val, + "label": label_dict_val, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": 1, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(functions.train_loss_func3), + { + "eta_pred": lambda out: out["eta_pred"], + "eta_dot_pred": lambda out: out["eta_dot_pred"], + "g_pred": lambda out: out["g_pred"], + "eta_t_pred_c": lambda out: out["eta_t_pred_c"], + "eta_dot_pred_c": lambda out: out["eta_dot_pred_c"], + "lift_pred_c": lambda out: out["lift_pred_c"], + "g_t_pred_c": lambda out: out["g_t_pred_c"], + "g_dot_pred_c": lambda out: out["g_dot_pred_c"], + }, + metric={"metric": ppsci.metric.FunctionalMetric(functions.metric_expr)}, + name="sup_valid", + ) + validator_pde = {sup_validator_pde.name: sup_validator_pde} + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + seed=cfg.seed, + validator=validator_pde, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + + # evaluate + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="phylstm3.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/pipe/conf/poiseuille_flow.yaml b/examples/pipe/conf/poiseuille_flow.yaml index 0cfb688863..e9d1308f10 100644 --- a/examples/pipe/conf/poiseuille_flow.yaml +++ b/examples/pipe/conf/poiseuille_flow.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -97,3 +98,103 @@ INFER: max_batch_size: 256 num_cpu_threads: 4 batch_size: 256 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_poiseuille_flow/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +NU_MEAN: 0.001 +NU_STD: 0.9 +L: 1.0 # length of pipe +R: 0.05 # radius of pipe +RHO: 1 # density +P_OUT: 0 # pressure at the outlet of pipe +P_IN: 0.1 # pressure at the inlet of pipe +N_x: 10 +N_y: 50 +N_p: 50 +X_IN: 0 + +# model settings +MODEL: + u_net: + input_keys: ["sin(x)", "cos(x)", "y", "nu"] + output_keys: ["u"] + num_layers: 3 + hidden_size: 50 + activation: "swish" + v_net: + input_keys: ["sin(x)", "cos(x)", "y", "nu"] + output_keys: ["v"] + num_layers: 3 + hidden_size: 50 + activation: "swish" + p_net: + input_keys: ["sin(x)", "cos(x)", "y", "nu"] + output_keys: ["p"] + num_layers: 3 + hidden_size: 50 + activation: "swish" + output_keys: ["v", "u", "p"] + +# training settings +TRAIN: + epochs: 3000 + batch_size: + pde_constraint: 128 + learning_rate: 5.0e-3 + eval_during_train: false + save_freq: 10 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/LabelFree-DNN-Surrogate/poiseuille_flow_pretrained.pdparams" + export_path: ./inference/poiseuile_flow + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 256 + num_cpu_threads: 4 + batch_size: 256 +>>>>>>> Stashed changes diff --git a/examples/pipe/poiseuille_flow.py b/examples/pipe/poiseuille_flow.py index 450a40f86f..f217f22f9f 100644 --- a/examples/pipe/poiseuille_flow.py +++ b/examples/pipe/poiseuille_flow.py @@ -1,671 +1,671 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/Jianxun-Wang/LabelFree-DNN-Surrogate -""" - -import copy -import os -from os import path as osp - -import hydra -import matplotlib.pyplot as plt -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import checker - -if not checker.dynamic_import_to_globals("seaborn"): - raise ModuleNotFoundError("Please install seaborn with `pip install seaborn>=0.13.0`.") # fmt: skip - -import seaborn as sns - - -def train(cfg: DictConfig): - X_OUT = cfg.X_IN + cfg.L - Y_START = -cfg.R - Y_END = Y_START + 2 * cfg.R - NU_START = cfg.NU_MEAN - cfg.NU_MEAN * cfg.NU_STD # 0.0001 - NU_END = cfg.NU_MEAN + cfg.NU_MEAN * cfg.NU_STD # 0.1 - - ## prepare data with (?, 2) - data_1d_x = np.linspace( - cfg.X_IN, X_OUT, cfg.N_x, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_y = np.linspace( - Y_START, Y_END, cfg.N_y, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_nu = np.linspace( - NU_START, NU_END, cfg.N_p, endpoint=True, dtype=paddle.get_default_dtype() - ) - - data_2d_xy = ( - np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T - ) - data_2d_xy_shuffle = copy.deepcopy(data_2d_xy) - np.random.shuffle(data_2d_xy_shuffle) - - input_x = data_2d_xy_shuffle[:, 0].reshape(data_2d_xy_shuffle.shape[0], 1) - input_y = data_2d_xy_shuffle[:, 1].reshape(data_2d_xy_shuffle.shape[0], 1) - input_nu = data_2d_xy_shuffle[:, 2].reshape(data_2d_xy_shuffle.shape[0], 1) - - interior_geom = ppsci.geometry.PointCloud( - interior={"x": input_x, "y": input_y, "nu": input_nu}, - coord_keys=("x", "y", "nu"), - ) - - # set model - model_u = ppsci.arch.MLP(**cfg.MODEL.u_net) - model_v = ppsci.arch.MLP(**cfg.MODEL.v_net) - model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) - - def input_trans(input): - x, y = input["x"], input["y"] - nu = input["nu"] - b = 2 * np.pi / (X_OUT - cfg.X_IN) - c = np.pi * (cfg.X_IN + X_OUT) / (cfg.X_IN - X_OUT) - sin_x = cfg.X_IN * paddle.sin(b * x + c) - cos_x = cfg.X_IN * paddle.cos(b * x + c) - return {"sin(x)": sin_x, "cos(x)": cos_x, "x": x, "y": y, "nu": nu} - - def output_trans_u(input, out): - return {"u": out["u"] * (cfg.R**2 - input["y"] ** 2)} - - def output_trans_v(input, out): - return {"v": (cfg.R**2 - input["y"] ** 2) * out["v"]} - - def output_trans_p(input, out): - return { - "p": ( - (cfg.P_IN - cfg.P_OUT) * (X_OUT - input["x"]) / cfg.L - + (cfg.X_IN - input["x"]) * (X_OUT - input["x"]) * out["p"] - ) - } - - model_u.register_input_transform(input_trans) - model_v.register_input_transform(input_trans) - model_p.register_input_transform(input_trans) - model_u.register_output_transform(output_trans_u) - model_v.register_output_transform(output_trans_v) - model_p.register_output_transform(output_trans_p) - model = ppsci.arch.ModelList((model_u, model_v, model_p)) - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) - - # set euqation - equation = { - "NavierStokes": ppsci.equation.NavierStokes( - nu="nu", rho=cfg.RHO, dim=2, time=False - ) - } - - # set constraint - ITERS_PER_EPOCH = int( - (cfg.N_x * cfg.N_y * cfg.N_p) / cfg.TRAIN.batch_size.pde_constraint - ) - - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom=interior_geom, - dataloader_cfg={ - "dataset": "NamedArrayDataset", - "num_workers": 1, - "batch_size": cfg.TRAIN.batch_size.pde_constraint, - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, - }, - loss=ppsci.loss.MSELoss("mean"), - evenly=True, - name="EQ", - ) - # wrap constraints together - constraint = {pde_constraint.name: pde_constraint} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - save_freq=cfg.TRAIN.save_freq, - equation=equation, - ) - solver.train() - - -def evaluate(cfg: DictConfig): - NU_MEAN = 0.001 - NU_STD = 0.9 - L = 1.0 # length of pipe - R = 0.05 # radius of pipe - RHO = 1 # density - P_OUT = 0 # pressure at the outlet of pipe - P_IN = 0.1 # pressure at the inlet of pipe - N_x = 10 - N_y = 50 - N_p = 50 - X_IN = 0 - X_OUT = X_IN + L - Y_START = -R - Y_END = Y_START + 2 * R - NU_START = NU_MEAN - NU_MEAN * NU_STD # 0.0001 - NU_END = NU_MEAN + NU_MEAN * NU_STD # 0.1 - - ## prepare data with (?, 2) - data_1d_x = np.linspace( - X_IN, X_OUT, N_x, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_y = np.linspace( - Y_START, Y_END, N_y, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_nu = np.linspace( - NU_START, NU_END, N_p, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_2d_xy = ( - np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T - ) - - # set model - model_u = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("u",), 3, 50, "swish") - model_v = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("v",), 3, 50, "swish") - model_p = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("p",), 3, 50, "swish") - - class Transform: - def input_trans(self, input): - self.input = input - x, y = input["x"], input["y"] - nu = input["nu"] - b = 2 * np.pi / (X_OUT - X_IN) - c = np.pi * (X_IN + X_OUT) / (X_IN - X_OUT) - sin_x = X_IN * paddle.sin(b * x + c) - cos_x = X_IN * paddle.cos(b * x + c) - return {"sin(x)": sin_x, "cos(x)": cos_x, "y": y, "nu": nu} - - def output_trans_u(self, input, out): - return {"u": out["u"] * (R**2 - self.input["y"] ** 2)} - - def output_trans_v(self, input, out): - return {"v": (R**2 - self.input["y"] ** 2) * out["v"]} - - def output_trans_p(self, input, out): - return { - "p": ( - (P_IN - P_OUT) * (X_OUT - self.input["x"]) / L - + (X_IN - self.input["x"]) * (X_OUT - self.input["x"]) * out["p"] - ) - } - - transform = Transform() - model_u.register_input_transform(transform.input_trans) - model_v.register_input_transform(transform.input_trans) - model_p.register_input_transform(transform.input_trans) - model_u.register_output_transform(transform.output_trans_u) - model_v.register_output_transform(transform.output_trans_v) - model_p.register_output_transform(transform.output_trans_p) - model = ppsci.arch.ModelList((model_u, model_v, model_p)) - - # Validator vel - input_dict = { - "x": data_2d_xy[:, 0:1], - "y": data_2d_xy[:, 1:2], - "nu": data_2d_xy[:, 2:3], - } - u_analytical = np.zeros([N_y, N_x, N_p]) - dP = P_IN - P_OUT - - for i in range(N_p): - uy = (R**2 - data_1d_y**2) * dP / (2 * L * data_1d_nu[i] * RHO) - u_analytical[:, :, i] = np.tile(uy.reshape([N_y, 1]), N_x) - - label_dict = {"u": np.ones_like(input_dict["x"])} - weight_dict = {"u": np.ones_like(input_dict["x"])} - - # Validator KL - num_test = 500 - data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) - data_2d_xy_test = ( - np.array( - np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 - ) - .reshape(3, -1) - .T - ) - input_dict_KL = { - "x": data_2d_xy_test[:, 0:1], - "y": data_2d_xy_test[:, 1:2], - "nu": data_2d_xy_test[:, 2:3], - } - u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) - label_dict_KL = {"u": np.ones_like(input_dict_KL["x"])} - weight_dict_KL = {"u": np.ones_like(input_dict_KL["x"])} - - class Cross_section_velocity_profile_metric(ppsci.metric.base.Metric): - def __init__(self, keep_batch: bool = False): - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict): - u_pred = output_dict["u"].numpy().reshape(N_y, N_x, N_p) - metric_dict = {} - for nu in range(N_p): - err = ( - u_analytical[:, int(round(N_x / 2)), nu] - - u_pred[:, int(round(N_x / 2)), nu] - ) - metric_dict[f"nu = {data_1d_nu[nu]:.2g}"] = np.abs(err).sum() - return metric_dict - - # Kullback-Leibler Divergence - class KL_divergence(ppsci.metric.base.Metric): - def __init__(self, keep_batch: bool = False): - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict): - u_max_pred = output_dict["u"].numpy().flatten() - import scipy - - print(f"KL = {scipy.stats.entropy(u_max_a, u_max_pred)}") - return {"KL divergence": scipy.stats.entropy(u_max_a, u_max_pred)} - - dataset_vel = { - "name": "NamedArrayDataset", - "input": input_dict, - "label": label_dict, - "weight": weight_dict, - } - dataset_kl = { - "name": "NamedArrayDataset", - "input": input_dict_KL, - "label": label_dict_KL, - "weight": weight_dict_KL, - } - eval_cfg = { - "sampler": { - "name": "BatchSampler", - "shuffle": False, - "drop_last": False, - }, - "batch_size": 2000, - } - eval_cfg["dataset"] = dataset_vel - velocity_validator = ppsci.validate.SupervisedValidator( - eval_cfg, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - {"Cross_section_velocity_profile_MAE": Cross_section_velocity_profile_metric()}, - name="Cross_section_velocity_profile_MAE", - ) - eval_cfg["dataset"] = dataset_kl - kl_validator = ppsci.validate.SupervisedValidator( - eval_cfg, - ppsci.loss.MSELoss("mean"), - {"u": lambda out: out["u"]}, - {"KL_divergence": KL_divergence()}, - name="KL_divergence", - ) - validator = { - velocity_validator.name: velocity_validator, - kl_validator.name: kl_validator, - } - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - solver.eval() - - output_dict = solver.predict(input_dict, return_numpy=True) - u_pred = output_dict["u"].reshape(N_y, N_x, N_p) - fontsize = 16 - idx_X = int(round(N_x / 2)) # pipe velocity section at L/2 - nu_index = [3, 6, 9, 12, 14, 20, 49] # pick 7 nu samples - ytext = [0.55, 0.5, 0.4, 0.28, 0.1, 0.05, 0.001] # text y position - - # Plot - PLOT_DIR = osp.join(cfg.output_dir, "visu") - os.makedirs(PLOT_DIR, exist_ok=True) - plt.figure(1) - plt.clf() - for idxP in range(len(nu_index)): - ax1 = plt.subplot(111) - plt.plot( - data_1d_y, - u_analytical[:, idx_X, nu_index[idxP]], - color="darkblue", - linestyle="-", - lw=3.0, - alpha=1.0, - ) - plt.plot( - data_1d_y, - u_pred[:, idx_X, nu_index[idxP]], - color="red", - linestyle="--", - dashes=(5, 5), - lw=2.0, - alpha=1.0, - ) - plt.text( - -0.012, - ytext[idxP], - rf"$\nu = $ {data_1d_nu[nu_index[idxP]]:.2g}", - {"color": "k", "fontsize": fontsize - 4}, - ) - - plt.ylabel(r"$u(y)$", fontsize=fontsize) - plt.xlabel(r"$y$", fontsize=fontsize) - ax1.tick_params(axis="x", labelsize=fontsize) - ax1.tick_params(axis="y", labelsize=fontsize) - ax1.set_xlim([-0.05, 0.05]) - ax1.set_ylim([0.0, 0.62]) - plt.savefig(osp.join(PLOT_DIR, "pipe_uProfiles.png"), bbox_inches="tight") - - # Distribution of center velocity - # Predicted result - input_dict_test = { - "x": data_2d_xy_test[:, 0:1], - "y": data_2d_xy_test[:, 1:2], - "nu": data_2d_xy_test[:, 2:3], - } - output_dict_test = solver.predict(input_dict_test, return_numpy=True) - u_max_pred = output_dict_test["u"] - - # Analytical result, y = 0 - u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) - - # Plot - plt.figure(2) - plt.clf() - ax1 = plt.subplot(111) - sns.kdeplot( - u_max_a, - fill=True, - color="black", - label="Analytical", - linestyle="-", - linewidth=3, - ) - sns.kdeplot( - u_max_pred, - fill=False, - color="red", - label="DNN", - linestyle="--", - linewidth=3.5, - ) - plt.legend(prop={"size": fontsize}) - plt.xlabel(r"$u_c$", fontsize=fontsize) - plt.ylabel(r"PDF", fontsize=fontsize) - ax1.tick_params(axis="x", labelsize=fontsize) - ax1.tick_params(axis="y", labelsize=fontsize) - plt.savefig(osp.join(PLOT_DIR, "pipe_unformUQ.png"), bbox_inches="tight") - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - model_u = ppsci.arch.MLP(**cfg.MODEL.u_net) - model_v = ppsci.arch.MLP(**cfg.MODEL.v_net) - model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) - X_OUT = cfg.X_IN + cfg.L - - class Transform: - def input_trans(self, input): - self.input = input - x, y = input["x"], input["y"] - nu = input["nu"] - b = 2 * np.pi / (X_OUT - cfg.X_IN) - c = np.pi * (cfg.X_IN + X_OUT) / (cfg.X_IN - X_OUT) - sin_x = cfg.X_IN * paddle.sin(b * x + c) - cos_x = cfg.X_IN * paddle.cos(b * x + c) - return {"sin(x)": sin_x, "cos(x)": cos_x, "y": y, "nu": nu} - - def output_trans_u(self, input, out): - return {"u": out["u"] * (cfg.R**2 - self.input["y"] ** 2)} - - def output_trans_v(self, input, out): - return {"v": (cfg.R**2 - self.input["y"] ** 2) * out["v"]} - - def output_trans_p(self, input, out): - return { - "p": ( - (cfg.P_IN - cfg.P_OUT) * (X_OUT - self.input["x"]) / cfg.L - + (cfg.X_IN - self.input["x"]) - * (X_OUT - self.input["x"]) - * out["p"] - ) - } - - transform = Transform() - model_u.register_input_transform(transform.input_trans) - model_v.register_input_transform(transform.input_trans) - model_p.register_input_transform(transform.input_trans) - model_u.register_output_transform(transform.output_trans_u) - model_v.register_output_transform(transform.output_trans_v) - model_p.register_output_transform(transform.output_trans_p) - model = ppsci.arch.ModelList((model_u, model_v, model_p)) - - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - input_keys = ["x", "y", "nu"] - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - NU_MEAN = 0.001 - NU_STD = 0.9 - L = 1.0 # length of pipe - R = 0.05 # radius of pipe - RHO = 1 # density - P_OUT = 0 # pressure at the outlet of pipe - P_IN = 0.1 # pressure at the inlet of pipe - N_x = 10 - N_y = 50 - N_p = 50 - X_IN = 0 - X_OUT = X_IN + L - Y_START = -R - Y_END = Y_START + 2 * R - NU_START = NU_MEAN - NU_MEAN * NU_STD # 0.0001 - NU_END = NU_MEAN + NU_MEAN * NU_STD # 0.1 - - ## prepare data with (?, 2) - data_1d_x = np.linspace( - X_IN, X_OUT, N_x, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_y = np.linspace( - Y_START, Y_END, N_y, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_1d_nu = np.linspace( - NU_START, NU_END, N_p, endpoint=True, dtype=paddle.get_default_dtype() - ) - data_2d_xy = ( - np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T - ) - - # Initialize your custom predictor - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - # Prepare input data - input_dict = { - "x": data_2d_xy[:, 0:1], - "y": data_2d_xy[:, 1:2], - "nu": data_2d_xy[:, 2:3], - } - - u_analytical = np.zeros([N_y, N_x, N_p]) - dP = P_IN - P_OUT - - for i in range(N_p): - uy = (R**2 - data_1d_y**2) * dP / (2 * L * data_1d_nu[i] * RHO) - u_analytical[:, :, i] = np.tile(uy.reshape([N_y, 1]), N_x) - - # Validator KL - num_test = 500 - data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) - data_2d_xy_test = ( - np.array( - np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 - ) - .reshape(3, -1) - .T - ) - - # Perform inference - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - # mapping data to cfg.INFER.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - # Process and reshape output as needed - u_pred = output_dict["u"].reshape(N_y, N_x, N_p) - fontsize = 16 - idx_X = int(round(N_x / 2)) # pipe velocity section at L/2 - nu_index = [3, 6, 9, 12, 14, 20, 49] # pick 7 nu samples - ytext = [0.55, 0.5, 0.4, 0.28, 0.1, 0.05, 0.001] # text y position - - # Plot - PLOT_DIR = osp.join(cfg.output_dir, "visu") - os.makedirs(PLOT_DIR, exist_ok=True) - plt.figure(1) - plt.clf() - for idxP in range(len(nu_index)): - ax1 = plt.subplot(111) - plt.plot( - data_1d_y, - u_analytical[:, idx_X, nu_index[idxP]], - color="darkblue", - linestyle="-", - lw=3.0, - alpha=1.0, - ) - plt.plot( - data_1d_y, - u_pred[:, idx_X, nu_index[idxP]], - color="red", - linestyle="--", - dashes=(5, 5), - lw=2.0, - alpha=1.0, - ) - plt.text( - -0.012, - ytext[idxP], - rf"$\nu = $ {data_1d_nu[nu_index[idxP]]:.2g}", - {"color": "k", "fontsize": fontsize - 4}, - ) - - plt.ylabel(r"$u(y)$", fontsize=fontsize) - plt.xlabel(r"$y$", fontsize=fontsize) - ax1.tick_params(axis="x", labelsize=fontsize) - ax1.tick_params(axis="y", labelsize=fontsize) - ax1.set_xlim([-0.05, 0.05]) - ax1.set_ylim([0.0, 0.62]) - plt.savefig(osp.join(PLOT_DIR, "pipe_uProfiles.png"), bbox_inches="tight") - - # Distribution of center velocity - num_test = 500 - data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) - data_2d_xy_test = ( - np.array( - np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 - ) - .reshape(3, -1) - .T - ) - # Predicted result - input_dict_test = { - "x": data_2d_xy_test[:, 0:1], - "y": data_2d_xy_test[:, 1:2], - "nu": data_2d_xy_test[:, 2:3], - } - output_dict_test = predictor.predict(input_dict_test, cfg.INFER.batch_size) - # mapping data to cfg.INFER.output_keys - output_dict_test = { - store_key: output_dict_test[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict_test.keys()) - } - u_max_pred = output_dict_test["u"] - u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) - - plt.figure(2) - plt.clf() - ax1 = plt.subplot(111) - sns.kdeplot( - u_max_a, - fill=True, - color="black", - label="Analytical", - linestyle="-", - linewidth=3, - ) - sns.kdeplot( - u_max_pred, - fill=False, - color="red", - label="DNN", - linestyle="--", - linewidth=3.5, - ) - plt.legend(prop={"size": fontsize}) - plt.xlabel(r"$u_c$", fontsize=fontsize) - plt.ylabel(r"PDF", fontsize=fontsize) - ax1.tick_params(axis="x", labelsize=fontsize) - ax1.tick_params(axis="y", labelsize=fontsize) - plt.savefig(osp.join(PLOT_DIR, "pipe_unformUQ.png"), bbox_inches="tight") - - -@hydra.main(version_base=None, config_path="./conf", config_name="poiseuille_flow.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/Jianxun-Wang/LabelFree-DNN-Surrogate +""" + +import copy +import os +from os import path as osp + +import hydra +import matplotlib.pyplot as plt +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import checker + +if not checker.dynamic_import_to_globals("seaborn"): + raise ModuleNotFoundError("Please install seaborn with `pip install seaborn>=0.13.0`.") # fmt: skip + +import seaborn as sns + + +def train(cfg: DictConfig): + X_OUT = cfg.X_IN + cfg.L + Y_START = -cfg.R + Y_END = Y_START + 2 * cfg.R + NU_START = cfg.NU_MEAN - cfg.NU_MEAN * cfg.NU_STD # 0.0001 + NU_END = cfg.NU_MEAN + cfg.NU_MEAN * cfg.NU_STD # 0.1 + + ## prepare data with (?, 2) + data_1d_x = np.linspace( + cfg.X_IN, X_OUT, cfg.N_x, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_y = np.linspace( + Y_START, Y_END, cfg.N_y, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_nu = np.linspace( + NU_START, NU_END, cfg.N_p, endpoint=True, dtype=paddle.get_default_dtype() + ) + + data_2d_xy = ( + np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T + ) + data_2d_xy_shuffle = copy.deepcopy(data_2d_xy) + np.random.shuffle(data_2d_xy_shuffle) + + input_x = data_2d_xy_shuffle[:, 0].reshape(data_2d_xy_shuffle.shape[0], 1) + input_y = data_2d_xy_shuffle[:, 1].reshape(data_2d_xy_shuffle.shape[0], 1) + input_nu = data_2d_xy_shuffle[:, 2].reshape(data_2d_xy_shuffle.shape[0], 1) + + interior_geom = ppsci.geometry.PointCloud( + interior={"x": input_x, "y": input_y, "nu": input_nu}, + coord_keys=("x", "y", "nu"), + ) + + # set model + model_u = ppsci.arch.MLP(**cfg.MODEL.u_net) + model_v = ppsci.arch.MLP(**cfg.MODEL.v_net) + model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) + + def input_trans(input): + x, y = input["x"], input["y"] + nu = input["nu"] + b = 2 * np.pi / (X_OUT - cfg.X_IN) + c = np.pi * (cfg.X_IN + X_OUT) / (cfg.X_IN - X_OUT) + sin_x = cfg.X_IN * paddle.sin(b * x + c) + cos_x = cfg.X_IN * paddle.cos(b * x + c) + return {"sin(x)": sin_x, "cos(x)": cos_x, "x": x, "y": y, "nu": nu} + + def output_trans_u(input, out): + return {"u": out["u"] * (cfg.R**2 - input["y"] ** 2)} + + def output_trans_v(input, out): + return {"v": (cfg.R**2 - input["y"] ** 2) * out["v"]} + + def output_trans_p(input, out): + return { + "p": ( + (cfg.P_IN - cfg.P_OUT) * (X_OUT - input["x"]) / cfg.L + + (cfg.X_IN - input["x"]) * (X_OUT - input["x"]) * out["p"] + ) + } + + model_u.register_input_transform(input_trans) + model_v.register_input_transform(input_trans) + model_p.register_input_transform(input_trans) + model_u.register_output_transform(output_trans_u) + model_v.register_output_transform(output_trans_v) + model_p.register_output_transform(output_trans_p) + model = ppsci.arch.ModelList((model_u, model_v, model_p)) + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(model) + + # set euqation + equation = { + "NavierStokes": ppsci.equation.NavierStokes( + nu="nu", rho=cfg.RHO, dim=2, time=False + ) + } + + # set constraint + ITERS_PER_EPOCH = int( + (cfg.N_x * cfg.N_y * cfg.N_p) / cfg.TRAIN.batch_size.pde_constraint + ) + + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom=interior_geom, + dataloader_cfg={ + "dataset": "NamedArrayDataset", + "num_workers": 1, + "batch_size": cfg.TRAIN.batch_size.pde_constraint, + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "shuffle": False, + "drop_last": False, + }, + }, + loss=ppsci.loss.MSELoss("mean"), + evenly=True, + name="EQ", + ) + # wrap constraints together + constraint = {pde_constraint.name: pde_constraint} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + save_freq=cfg.TRAIN.save_freq, + equation=equation, + ) + solver.train() + + +def evaluate(cfg: DictConfig): + NU_MEAN = 0.001 + NU_STD = 0.9 + L = 1.0 # length of pipe + R = 0.05 # radius of pipe + RHO = 1 # density + P_OUT = 0 # pressure at the outlet of pipe + P_IN = 0.1 # pressure at the inlet of pipe + N_x = 10 + N_y = 50 + N_p = 50 + X_IN = 0 + X_OUT = X_IN + L + Y_START = -R + Y_END = Y_START + 2 * R + NU_START = NU_MEAN - NU_MEAN * NU_STD # 0.0001 + NU_END = NU_MEAN + NU_MEAN * NU_STD # 0.1 + + ## prepare data with (?, 2) + data_1d_x = np.linspace( + X_IN, X_OUT, N_x, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_y = np.linspace( + Y_START, Y_END, N_y, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_nu = np.linspace( + NU_START, NU_END, N_p, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_2d_xy = ( + np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T + ) + + # set model + model_u = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("u",), 3, 50, "swish") + model_v = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("v",), 3, 50, "swish") + model_p = ppsci.arch.MLP(("sin(x)", "cos(x)", "y", "nu"), ("p",), 3, 50, "swish") + + class Transform: + def input_trans(self, input): + self.input = input + x, y = input["x"], input["y"] + nu = input["nu"] + b = 2 * np.pi / (X_OUT - X_IN) + c = np.pi * (X_IN + X_OUT) / (X_IN - X_OUT) + sin_x = X_IN * paddle.sin(b * x + c) + cos_x = X_IN * paddle.cos(b * x + c) + return {"sin(x)": sin_x, "cos(x)": cos_x, "y": y, "nu": nu} + + def output_trans_u(self, input, out): + return {"u": out["u"] * (R**2 - self.input["y"] ** 2)} + + def output_trans_v(self, input, out): + return {"v": (R**2 - self.input["y"] ** 2) * out["v"]} + + def output_trans_p(self, input, out): + return { + "p": ( + (P_IN - P_OUT) * (X_OUT - self.input["x"]) / L + + (X_IN - self.input["x"]) * (X_OUT - self.input["x"]) * out["p"] + ) + } + + transform = Transform() + model_u.register_input_transform(transform.input_trans) + model_v.register_input_transform(transform.input_trans) + model_p.register_input_transform(transform.input_trans) + model_u.register_output_transform(transform.output_trans_u) + model_v.register_output_transform(transform.output_trans_v) + model_p.register_output_transform(transform.output_trans_p) + model = ppsci.arch.ModelList((model_u, model_v, model_p)) + + # Validator vel + input_dict = { + "x": data_2d_xy[:, 0:1], + "y": data_2d_xy[:, 1:2], + "nu": data_2d_xy[:, 2:3], + } + u_analytical = np.zeros([N_y, N_x, N_p]) + dP = P_IN - P_OUT + + for i in range(N_p): + uy = (R**2 - data_1d_y**2) * dP / (2 * L * data_1d_nu[i] * RHO) + u_analytical[:, :, i] = np.tile(uy.reshape([N_y, 1]), N_x) + + label_dict = {"u": np.ones_like(input_dict["x"])} + weight_dict = {"u": np.ones_like(input_dict["x"])} + + # Validator KL + num_test = 500 + data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) + data_2d_xy_test = ( + np.array( + np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 + ) + .reshape(3, -1) + .T + ) + input_dict_KL = { + "x": data_2d_xy_test[:, 0:1], + "y": data_2d_xy_test[:, 1:2], + "nu": data_2d_xy_test[:, 2:3], + } + u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) + label_dict_KL = {"u": np.ones_like(input_dict_KL["x"])} + weight_dict_KL = {"u": np.ones_like(input_dict_KL["x"])} + + class Cross_section_velocity_profile_metric(ppsci.metric.base.Metric): + def __init__(self, keep_batch: bool = False): + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict): + u_pred = output_dict["u"].numpy().reshape(N_y, N_x, N_p) + metric_dict = {} + for nu in range(N_p): + err = ( + u_analytical[:, int(round(N_x / 2)), nu] + - u_pred[:, int(round(N_x / 2)), nu] + ) + metric_dict[f"nu = {data_1d_nu[nu]:.2g}"] = np.abs(err).sum() + return metric_dict + + # Kullback-Leibler Divergence + class KL_divergence(ppsci.metric.base.Metric): + def __init__(self, keep_batch: bool = False): + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict): + u_max_pred = output_dict["u"].numpy().flatten() + import scipy + + print(f"KL = {scipy.stats.entropy(u_max_a, u_max_pred)}") + return {"KL divergence": scipy.stats.entropy(u_max_a, u_max_pred)} + + dataset_vel = { + "name": "NamedArrayDataset", + "input": input_dict, + "label": label_dict, + "weight": weight_dict, + } + dataset_kl = { + "name": "NamedArrayDataset", + "input": input_dict_KL, + "label": label_dict_KL, + "weight": weight_dict_KL, + } + eval_cfg = { + "sampler": { + "name": "BatchSampler", + "shuffle": False, + "drop_last": False, + }, + "batch_size": 2000, + } + eval_cfg["dataset"] = dataset_vel + velocity_validator = ppsci.validate.SupervisedValidator( + eval_cfg, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + {"Cross_section_velocity_profile_MAE": Cross_section_velocity_profile_metric()}, + name="Cross_section_velocity_profile_MAE", + ) + eval_cfg["dataset"] = dataset_kl + kl_validator = ppsci.validate.SupervisedValidator( + eval_cfg, + ppsci.loss.MSELoss("mean"), + {"u": lambda out: out["u"]}, + {"KL_divergence": KL_divergence()}, + name="KL_divergence", + ) + validator = { + velocity_validator.name: velocity_validator, + kl_validator.name: kl_validator, + } + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + solver.eval() + + output_dict = solver.predict(input_dict, return_numpy=True) + u_pred = output_dict["u"].reshape(N_y, N_x, N_p) + fontsize = 16 + idx_X = int(round(N_x / 2)) # pipe velocity section at L/2 + nu_index = [3, 6, 9, 12, 14, 20, 49] # pick 7 nu samples + ytext = [0.55, 0.5, 0.4, 0.28, 0.1, 0.05, 0.001] # text y position + + # Plot + PLOT_DIR = osp.join(cfg.output_dir, "visu") + os.makedirs(PLOT_DIR, exist_ok=True) + plt.figure(1) + plt.clf() + for idxP in range(len(nu_index)): + ax1 = plt.subplot(111) + plt.plot( + data_1d_y, + u_analytical[:, idx_X, nu_index[idxP]], + color="darkblue", + linestyle="-", + lw=3.0, + alpha=1.0, + ) + plt.plot( + data_1d_y, + u_pred[:, idx_X, nu_index[idxP]], + color="red", + linestyle="--", + dashes=(5, 5), + lw=2.0, + alpha=1.0, + ) + plt.text( + -0.012, + ytext[idxP], + rf"$\nu = $ {data_1d_nu[nu_index[idxP]]:.2g}", + {"color": "k", "fontsize": fontsize - 4}, + ) + + plt.ylabel(r"$u(y)$", fontsize=fontsize) + plt.xlabel(r"$y$", fontsize=fontsize) + ax1.tick_params(axis="x", labelsize=fontsize) + ax1.tick_params(axis="y", labelsize=fontsize) + ax1.set_xlim([-0.05, 0.05]) + ax1.set_ylim([0.0, 0.62]) + plt.savefig(osp.join(PLOT_DIR, "pipe_uProfiles.png"), bbox_inches="tight") + + # Distribution of center velocity + # Predicted result + input_dict_test = { + "x": data_2d_xy_test[:, 0:1], + "y": data_2d_xy_test[:, 1:2], + "nu": data_2d_xy_test[:, 2:3], + } + output_dict_test = solver.predict(input_dict_test, return_numpy=True) + u_max_pred = output_dict_test["u"] + + # Analytical result, y = 0 + u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) + + # Plot + plt.figure(2) + plt.clf() + ax1 = plt.subplot(111) + sns.kdeplot( + u_max_a, + fill=True, + color="black", + label="Analytical", + linestyle="-", + linewidth=3, + ) + sns.kdeplot( + u_max_pred, + fill=False, + color="red", + label="DNN", + linestyle="--", + linewidth=3.5, + ) + plt.legend(prop={"size": fontsize}) + plt.xlabel(r"$u_c$", fontsize=fontsize) + plt.ylabel(r"PDF", fontsize=fontsize) + ax1.tick_params(axis="x", labelsize=fontsize) + ax1.tick_params(axis="y", labelsize=fontsize) + plt.savefig(osp.join(PLOT_DIR, "pipe_unformUQ.png"), bbox_inches="tight") + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + model_u = ppsci.arch.MLP(**cfg.MODEL.u_net) + model_v = ppsci.arch.MLP(**cfg.MODEL.v_net) + model_p = ppsci.arch.MLP(**cfg.MODEL.p_net) + X_OUT = cfg.X_IN + cfg.L + + class Transform: + def input_trans(self, input): + self.input = input + x, y = input["x"], input["y"] + nu = input["nu"] + b = 2 * np.pi / (X_OUT - cfg.X_IN) + c = np.pi * (cfg.X_IN + X_OUT) / (cfg.X_IN - X_OUT) + sin_x = cfg.X_IN * paddle.sin(b * x + c) + cos_x = cfg.X_IN * paddle.cos(b * x + c) + return {"sin(x)": sin_x, "cos(x)": cos_x, "y": y, "nu": nu} + + def output_trans_u(self, input, out): + return {"u": out["u"] * (cfg.R**2 - self.input["y"] ** 2)} + + def output_trans_v(self, input, out): + return {"v": (cfg.R**2 - self.input["y"] ** 2) * out["v"]} + + def output_trans_p(self, input, out): + return { + "p": ( + (cfg.P_IN - cfg.P_OUT) * (X_OUT - self.input["x"]) / cfg.L + + (cfg.X_IN - self.input["x"]) + * (X_OUT - self.input["x"]) + * out["p"] + ) + } + + transform = Transform() + model_u.register_input_transform(transform.input_trans) + model_v.register_input_transform(transform.input_trans) + model_p.register_input_transform(transform.input_trans) + model_u.register_output_transform(transform.output_trans_u) + model_v.register_output_transform(transform.output_trans_v) + model_p.register_output_transform(transform.output_trans_p) + model = ppsci.arch.ModelList((model_u, model_v, model_p)) + + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + input_keys = ["x", "y", "nu"] + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + NU_MEAN = 0.001 + NU_STD = 0.9 + L = 1.0 # length of pipe + R = 0.05 # radius of pipe + RHO = 1 # density + P_OUT = 0 # pressure at the outlet of pipe + P_IN = 0.1 # pressure at the inlet of pipe + N_x = 10 + N_y = 50 + N_p = 50 + X_IN = 0 + X_OUT = X_IN + L + Y_START = -R + Y_END = Y_START + 2 * R + NU_START = NU_MEAN - NU_MEAN * NU_STD # 0.0001 + NU_END = NU_MEAN + NU_MEAN * NU_STD # 0.1 + + ## prepare data with (?, 2) + data_1d_x = np.linspace( + X_IN, X_OUT, N_x, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_y = np.linspace( + Y_START, Y_END, N_y, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_1d_nu = np.linspace( + NU_START, NU_END, N_p, endpoint=True, dtype=paddle.get_default_dtype() + ) + data_2d_xy = ( + np.array(np.meshgrid(data_1d_x, data_1d_y, data_1d_nu)).reshape(3, -1).T + ) + + # Initialize your custom predictor + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # Prepare input data + input_dict = { + "x": data_2d_xy[:, 0:1], + "y": data_2d_xy[:, 1:2], + "nu": data_2d_xy[:, 2:3], + } + + u_analytical = np.zeros([N_y, N_x, N_p]) + dP = P_IN - P_OUT + + for i in range(N_p): + uy = (R**2 - data_1d_y**2) * dP / (2 * L * data_1d_nu[i] * RHO) + u_analytical[:, :, i] = np.tile(uy.reshape([N_y, 1]), N_x) + + # Validator KL + num_test = 500 + data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) + data_2d_xy_test = ( + np.array( + np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 + ) + .reshape(3, -1) + .T + ) + + # Perform inference + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + # Process and reshape output as needed + u_pred = output_dict["u"].reshape(N_y, N_x, N_p) + fontsize = 16 + idx_X = int(round(N_x / 2)) # pipe velocity section at L/2 + nu_index = [3, 6, 9, 12, 14, 20, 49] # pick 7 nu samples + ytext = [0.55, 0.5, 0.4, 0.28, 0.1, 0.05, 0.001] # text y position + + # Plot + PLOT_DIR = osp.join(cfg.output_dir, "visu") + os.makedirs(PLOT_DIR, exist_ok=True) + plt.figure(1) + plt.clf() + for idxP in range(len(nu_index)): + ax1 = plt.subplot(111) + plt.plot( + data_1d_y, + u_analytical[:, idx_X, nu_index[idxP]], + color="darkblue", + linestyle="-", + lw=3.0, + alpha=1.0, + ) + plt.plot( + data_1d_y, + u_pred[:, idx_X, nu_index[idxP]], + color="red", + linestyle="--", + dashes=(5, 5), + lw=2.0, + alpha=1.0, + ) + plt.text( + -0.012, + ytext[idxP], + rf"$\nu = $ {data_1d_nu[nu_index[idxP]]:.2g}", + {"color": "k", "fontsize": fontsize - 4}, + ) + + plt.ylabel(r"$u(y)$", fontsize=fontsize) + plt.xlabel(r"$y$", fontsize=fontsize) + ax1.tick_params(axis="x", labelsize=fontsize) + ax1.tick_params(axis="y", labelsize=fontsize) + ax1.set_xlim([-0.05, 0.05]) + ax1.set_ylim([0.0, 0.62]) + plt.savefig(osp.join(PLOT_DIR, "pipe_uProfiles.png"), bbox_inches="tight") + + # Distribution of center velocity + num_test = 500 + data_1d_nu_distribution = np.random.normal(NU_MEAN, 0.2 * NU_MEAN, num_test) + data_2d_xy_test = ( + np.array( + np.meshgrid((X_IN - X_OUT) / 2.0, 0, data_1d_nu_distribution), np.float32 + ) + .reshape(3, -1) + .T + ) + # Predicted result + input_dict_test = { + "x": data_2d_xy_test[:, 0:1], + "y": data_2d_xy_test[:, 1:2], + "nu": data_2d_xy_test[:, 2:3], + } + output_dict_test = predictor.predict(input_dict_test, cfg.INFER.batch_size) + # mapping data to cfg.INFER.output_keys + output_dict_test = { + store_key: output_dict_test[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict_test.keys()) + } + u_max_pred = output_dict_test["u"] + u_max_a = (R**2) * dP / (2 * L * data_1d_nu_distribution * RHO) + + plt.figure(2) + plt.clf() + ax1 = plt.subplot(111) + sns.kdeplot( + u_max_a, + fill=True, + color="black", + label="Analytical", + linestyle="-", + linewidth=3, + ) + sns.kdeplot( + u_max_pred, + fill=False, + color="red", + label="DNN", + linestyle="--", + linewidth=3.5, + ) + plt.legend(prop={"size": fontsize}) + plt.xlabel(r"$u_c$", fontsize=fontsize) + plt.ylabel(r"PDF", fontsize=fontsize) + ax1.tick_params(axis="x", labelsize=fontsize) + ax1.tick_params(axis="y", labelsize=fontsize) + plt.savefig(osp.join(PLOT_DIR, "pipe_unformUQ.png"), bbox_inches="tight") + + +@hydra.main(version_base=None, config_path="./conf", config_name="poiseuille_flow.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/quick_start/case1.py b/examples/quick_start/case1.py index e959c54a89..5405428a65 100644 --- a/examples/quick_start/case1.py +++ b/examples/quick_start/case1.py @@ -1,89 +1,89 @@ -import numpy as np - -import ppsci -from ppsci.utils import logger - -# set random seed(42) for reproducibility -ppsci.utils.misc.set_random_seed(42) - -# set output directory -OUTPUT_DIR = "./output_quick_start_case1" - -# initialize logger while create output directory -logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - -# set 1D-geometry domain([-π, π]) -l_limit, r_limit = -np.pi, np.pi -x_domain = ppsci.geometry.Interval(l_limit, r_limit) - -# set model to 3-layer MLP -model = ppsci.arch.MLP(("x",), ("u",), 3, 64) - -# standard solution of sin(x) -def sin_compute_func(data: dict): - return np.sin(data["x"]) - - -# set constraint on 1D-geometry([-π, π]) -ITERS_PER_EPOCH = 100 # use 100 iterations per training epoch -interior_constraint = ppsci.constraint.InteriorConstraint( - output_expr={"u": lambda out: out["u"]}, - label_dict={"u": sin_compute_func}, - geom=x_domain, - dataloader_cfg={ - "dataset": "NamedArrayDataset", - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": True, - }, - "batch_size": 32, # use 32 samples(points) per iteration for interior constraint - }, - loss=ppsci.loss.MSELoss(), -) -# wrap constraint(s) into one dict -constraint = { - interior_constraint.name: interior_constraint, -} - -# set training hyper-parameters -EPOCHS = 10 -# set optimizer -optimizer = ppsci.optimizer.Adam(2e-3)(model) - -# set visualizer -visual_input_dict = { - "x": np.linspace(l_limit, r_limit, 1000, dtype="float32").reshape(1000, 1) -} -visual_input_dict["u_ref"] = np.sin(visual_input_dict["x"]) -visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visual_input_dict, - ("x",), - {"u_pred": lambda out: out["u"], "u_ref": lambda out: out["u_ref"]}, - prefix="u=sin(x)", - ), -} - -# initialize solver -solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer, - epochs=EPOCHS, - iters_per_epoch=ITERS_PER_EPOCH, - visualizer=visualizer, -) -# train model -solver.train() - -# compute l2-relative error of trained model -pred_u = solver.predict(visual_input_dict, return_numpy=True)["u"] -l2_rel = np.linalg.norm(pred_u - visual_input_dict["u_ref"]) / np.linalg.norm( - visual_input_dict["u_ref"] -) -logger.info(f"l2_rel = {l2_rel:.5f}") - -# visualize prediction after finished training -solver.visualize() +import numpy as np + +import ppsci +from ppsci.utils import logger + +# set random seed(42) for reproducibility +ppsci.utils.misc.set_random_seed(42) + +# set output directory +OUTPUT_DIR = "./output_quick_start_case1" + +# initialize logger while create output directory +logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + +# set 1D-geometry domain([-π, π]) +l_limit, r_limit = -np.pi, np.pi +x_domain = ppsci.geometry.Interval(l_limit, r_limit) + +# set model to 3-layer MLP +model = ppsci.arch.MLP(("x",), ("u",), 3, 64) + +# standard solution of sin(x) +def sin_compute_func(data: dict): + return np.sin(data["x"]) + + +# set constraint on 1D-geometry([-π, π]) +ITERS_PER_EPOCH = 100 # use 100 iterations per training epoch +interior_constraint = ppsci.constraint.InteriorConstraint( + output_expr={"u": lambda out: out["u"]}, + label_dict={"u": sin_compute_func}, + geom=x_domain, + dataloader_cfg={ + "dataset": "NamedArrayDataset", + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "shuffle": True, + }, + "batch_size": 32, # use 32 samples(points) per iteration for interior constraint + }, + loss=ppsci.loss.MSELoss(), +) +# wrap constraint(s) into one dict +constraint = { + interior_constraint.name: interior_constraint, +} + +# set training hyper-parameters +EPOCHS = 10 +# set optimizer +optimizer = ppsci.optimizer.Adam(2e-3)(model) + +# set visualizer +visual_input_dict = { + "x": np.linspace(l_limit, r_limit, 1000, dtype="float32").reshape(1000, 1) +} +visual_input_dict["u_ref"] = np.sin(visual_input_dict["x"]) +visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visual_input_dict, + ("x",), + {"u_pred": lambda out: out["u"], "u_ref": lambda out: out["u_ref"]}, + prefix="u=sin(x)", + ), +} + +# initialize solver +solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer, + epochs=EPOCHS, + iters_per_epoch=ITERS_PER_EPOCH, + visualizer=visualizer, +) +# train model +solver.train() + +# compute l2-relative error of trained model +pred_u = solver.predict(visual_input_dict, return_numpy=True)["u"] +l2_rel = np.linalg.norm(pred_u - visual_input_dict["u_ref"]) / np.linalg.norm( + visual_input_dict["u_ref"] +) +logger.info(f"l2_rel = {l2_rel:.5f}") + +# visualize prediction after finished training +solver.visualize() diff --git a/examples/quick_start/case2.py b/examples/quick_start/case2.py index 16ddabdf16..1da61837b7 100644 --- a/examples/quick_start/case2.py +++ b/examples/quick_start/case2.py @@ -1,112 +1,112 @@ -import numpy as np - -import ppsci -from ppsci.autodiff import jacobian -from ppsci.utils import logger - -# set random seed(42) for reproducibility -ppsci.utils.misc.set_random_seed(42) - -# set output directory -OUTPUT_DIR = "./output_quick_start_case2" - -# initialize logger while create output directory -logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") - -# set 1D-geometry domain([-π, π]) -l_limit, r_limit = -np.pi, np.pi -x_domain = ppsci.geometry.Interval(l_limit, r_limit) - -# set model to 3-layer MLP -model = ppsci.arch.MLP(("x",), ("u",), 3, 64) - -# standard solution of sin(x) -def sin_compute_func(data: dict): - return np.sin(data["x"]) - - -# standard solution of cos(x) -def cos_compute_func(data: dict): - return np.cos(data["x"]) - - -# set constraint on 1D-geometry([-π, π]) -ITERS_PER_EPOCH = 100 # use 100 iterations per training epoch -interior_constraint = ppsci.constraint.InteriorConstraint( - output_expr={"du_dx": lambda out: jacobian(out["u"], out["x"])}, - label_dict={"du_dx": cos_compute_func}, - geom=x_domain, - dataloader_cfg={ - "dataset": "NamedArrayDataset", - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": True, - }, - "batch_size": 32, # use 32 samples(points) per iteration for interior constraint - }, - loss=ppsci.loss.MSELoss(), -) -bc_constraint = ppsci.constraint.BoundaryConstraint( - {"u": lambda d: d["u"]}, - {"u": lambda d: sin_compute_func(d) + 2}, # (1) - x_domain, - dataloader_cfg={ - "dataset": "NamedArrayDataset", - "iters_per_epoch": ITERS_PER_EPOCH, - "sampler": { - "name": "BatchSampler", - "shuffle": True, - }, - "batch_size": 1, # use 1 sample(point) per iteration for boundary constraint - }, - loss=ppsci.loss.MSELoss(), - criteria=lambda x: np.isclose(x, l_limit), -) -# wrap constraint(s) into one dict -constraint = { - interior_constraint.name: interior_constraint, - bc_constraint.name: bc_constraint, -} - -# set training hyper-parameters -EPOCHS = 10 -# set optimizer -optimizer = ppsci.optimizer.Adam(2e-3)(model) - -# set visualizer -visual_input_dict = { - "x": np.linspace(l_limit, r_limit, 1000, dtype="float32").reshape(1000, 1) -} -visual_input_dict["u_ref"] = np.sin(visual_input_dict["x"]) + 2.0 -visualizer = { - "visualize_u": ppsci.visualize.VisualizerScatter1D( - visual_input_dict, - ("x",), - {"u_pred": lambda out: out["u"], "u_ref": lambda out: out["u_ref"]}, - prefix="u=sin(x)", - ), -} - -# initialize solver -solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer, - epochs=EPOCHS, - iters_per_epoch=ITERS_PER_EPOCH, - visualizer=visualizer, -) -# train model -solver.train() - -# compute l2-relative error of trained model -pred_u = solver.predict(visual_input_dict, return_numpy=True)["u"] -l2_rel = np.linalg.norm(pred_u - visual_input_dict["u_ref"]) / np.linalg.norm( - visual_input_dict["u_ref"] -) -logger.info(f"l2_rel = {l2_rel:.5f}") - -# visualize prediction after finished training -solver.visualize() +import numpy as np + +import ppsci +from ppsci.autodiff import jacobian +from ppsci.utils import logger + +# set random seed(42) for reproducibility +ppsci.utils.misc.set_random_seed(42) + +# set output directory +OUTPUT_DIR = "./output_quick_start_case2" + +# initialize logger while create output directory +logger.init_logger("ppsci", f"{OUTPUT_DIR}/train.log", "info") + +# set 1D-geometry domain([-π, π]) +l_limit, r_limit = -np.pi, np.pi +x_domain = ppsci.geometry.Interval(l_limit, r_limit) + +# set model to 3-layer MLP +model = ppsci.arch.MLP(("x",), ("u",), 3, 64) + +# standard solution of sin(x) +def sin_compute_func(data: dict): + return np.sin(data["x"]) + + +# standard solution of cos(x) +def cos_compute_func(data: dict): + return np.cos(data["x"]) + + +# set constraint on 1D-geometry([-π, π]) +ITERS_PER_EPOCH = 100 # use 100 iterations per training epoch +interior_constraint = ppsci.constraint.InteriorConstraint( + output_expr={"du_dx": lambda out: jacobian(out["u"], out["x"])}, + label_dict={"du_dx": cos_compute_func}, + geom=x_domain, + dataloader_cfg={ + "dataset": "NamedArrayDataset", + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "shuffle": True, + }, + "batch_size": 32, # use 32 samples(points) per iteration for interior constraint + }, + loss=ppsci.loss.MSELoss(), +) +bc_constraint = ppsci.constraint.BoundaryConstraint( + {"u": lambda d: d["u"]}, + {"u": lambda d: sin_compute_func(d) + 2}, # (1) + x_domain, + dataloader_cfg={ + "dataset": "NamedArrayDataset", + "iters_per_epoch": ITERS_PER_EPOCH, + "sampler": { + "name": "BatchSampler", + "shuffle": True, + }, + "batch_size": 1, # use 1 sample(point) per iteration for boundary constraint + }, + loss=ppsci.loss.MSELoss(), + criteria=lambda x: np.isclose(x, l_limit), +) +# wrap constraint(s) into one dict +constraint = { + interior_constraint.name: interior_constraint, + bc_constraint.name: bc_constraint, +} + +# set training hyper-parameters +EPOCHS = 10 +# set optimizer +optimizer = ppsci.optimizer.Adam(2e-3)(model) + +# set visualizer +visual_input_dict = { + "x": np.linspace(l_limit, r_limit, 1000, dtype="float32").reshape(1000, 1) +} +visual_input_dict["u_ref"] = np.sin(visual_input_dict["x"]) + 2.0 +visualizer = { + "visualize_u": ppsci.visualize.VisualizerScatter1D( + visual_input_dict, + ("x",), + {"u_pred": lambda out: out["u"], "u_ref": lambda out: out["u_ref"]}, + prefix="u=sin(x)", + ), +} + +# initialize solver +solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer, + epochs=EPOCHS, + iters_per_epoch=ITERS_PER_EPOCH, + visualizer=visualizer, +) +# train model +solver.train() + +# compute l2-relative error of trained model +pred_u = solver.predict(visual_input_dict, return_numpy=True)["u"] +l2_rel = np.linalg.norm(pred_u - visual_input_dict["u_ref"]) / np.linalg.norm( + visual_input_dict["u_ref"] +) +logger.info(f"l2_rel = {l2_rel:.5f}") + +# visualize prediction after finished training +solver.visualize() diff --git a/examples/quick_start/case3.ipynb b/examples/quick_start/case3.ipynb index 452502ceea..ce0c6fd1e3 100644 --- a/examples/quick_start/case3.ipynb +++ b/examples/quick_start/case3.ipynb @@ -1,593 +1,593 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 简介\n", - "\n", - "本项目来源于共创计划开发性课题CAE领域的基于飞桨+DeepXDE/PaddleScience的复杂结构受力分析。深度学习擅长数据驱动,而工程结构有各种控制方程,PINN(Physics-informed Neural Network)方法利用控制方程加速深度学习神经网络收敛,甚至在无训练数据的情况下实现无监督学习。\n", - "\n", - "板是工程结构中常见构件,板控制方程存在高阶微分,这个问题的解决可以为后续解决复杂结构问题打下良好基础。从标准教科书中可以获得薄板小挠度理论的基本方程以及相关的边界条件表达式,教科书可参考《钱伟长,叶开沅,弹性力学,科学出版社,1956》。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "薄板小挠度理论的基本方程为:\n", - "$$\n", - "\\frac{\\partial^4 w}{\\partial x^4}+2 \\frac{\\partial^4 w}{\\partial x^2 \\partial y^2}+\\frac{\\partial^4 w}{\\partial y^4}=\\frac{q}{D}\n", - "$$\n", - "\n", - "其中 $w(x,y)$ 表示薄板的挠度,即薄板在垂直载荷作用下的变形或偏移量,$x,y$ 表示薄板在平面内的坐标,$D$ 为薄板的弯曲刚度,$q$ 是作用在薄板上的面载荷,表示每单位面积上的外部载荷。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "在本问题中,矩形薄板 $x$ 方向长 $2m$,$y$ 方向宽 $1m$,板厚 $10mm$,$x$ 方向左右两边处于简支状态(可以转动但不能位移),$y$ 方向上下两边自由(没有任何约束,可以自由移动和转动)。\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "左右两边 $(x=-1 \\mid x=+1)$ 为简支边界条件,因此挠度 $w$ 和弯矩 $M_x$ 都为 $0$ :\n", - "\n", - "$$\n", - "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(M_x\\right)_{x=-1 \\mid x=+1}=0\n", - "$$\n", - "\n", - "\n", - "由于 $M_x=-D\\left(\\frac{\\partial^2 w}{\\partial x^2}+\\mu \\frac{\\partial^2 w}{\\partial y^2}\\right)$, 且 $\\frac{\\partial^2 w}{\\partial y^2}=0$, 所以简支边界条件可化简为:\n", - "\n", - "$$\n", - "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(\\frac{\\partial^2 w}{\\partial x^2}\\right)_{x=-1 \\mid x=+1}=0\n", - "$$\n", - "\n", - "\n", - "上下两边 $(y=-0.5 \\mid y=+0.5)$ 为自由边界条件, 弯矩、扭矩、横向剪切力都为 $0$ :\n", - "\n", - "$$\n", - "\\left(M_y\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0, \\quad\\left(M_{x y}\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0, \\quad\\left(Q_y\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0\n", - "$$\n", - "\n", - "\n", - "由于 $M_y=-D\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right), \\quad M_{x y}=-D(1-\\mu) \\frac{\\partial^2 w}{\\partial x \\partial y}, \\quad Q_y=-D \\frac{\\partial}{\\partial y}\\left(\\frac{\\partial^2 w}{\\partial x^2}+\\frac{\\partial^2 w}{\\partial y^2}\\right)$ ,且扭矩可以变换为等效剪力, 扭矩和横向剪力合并为 $\\left(Q_y+\\frac{\\partial M_{x y}}{\\partial x}\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0$, 所以自由边界条件用挠度表示为\n", - "\n", - "$$\n", - "\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right)_{y=-0.5 \\mid y=+0.5}=0, \\quad\\left(\\frac{\\partial^3 w}{\\partial y^3}+(2-\\mu) \\frac{\\partial^3 w}{\\partial x^2 \\partial y}\\right)_{y=-0.5 \\mid y=+0.5}=0\n", - "$$\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 1. 设置计算域" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from matplotlib import pyplot as plt\n", - "\n", - "import ppsci\n", - "import sympy as sp\n", - "import numpy as np\n", - "\n", - "# 设置薄板计算域长、宽参数\n", - "Lx = 2.0 # 薄板x方向长度(m)\n", - "Ly = 1.0 # 薄板y方向宽度(m)\n", - "\n", - "# 设置方程参数\n", - "E = 210000.0e6 # 弹性模量(Pa)\n", - "mu = 0.28 # 薄板泊松比(无量纲)\n", - "h = 0.01 # 薄板厚度(m)\n", - "D = E * (h**3) / (12 * (1 - mu**2)) # 薄板弯曲刚度(kN*m^2)\n", - "q = 1000.0 # 均布载荷(N/m^2)\n", - "\n", - "rectangle = ppsci.geometry.Rectangle([-Lx / 2, -Ly / 2], [Lx / 2, Ly / 2]) # 创建薄板几何形状" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. 编写方程中的表达式" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/latex": [ - "$\\displaystyle \\frac{\\partial^{4}}{\\partial x^{4}} w{\\left(x,y \\right)} + \\frac{\\partial^{4}}{\\partial y^{4}} w{\\left(x,y \\right)} + 2 \\frac{\\partial^{4}}{\\partial y^{2}\\partial x^{2}} w{\\left(x,y \\right)} - 0.0526628571428571$" - ], - "text/plain": [ - "Derivative(w(x, y), (x, 4)) + Derivative(w(x, y), (y, 4)) + 2*Derivative(w(x, y), (x, 2), (y, 2)) - 0.0526628571428571" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# 使用sympy库计算符号公式\n", - "x, y = sp.symbols(\"x y\") # 定义符号变量x, y\n", - "w = sp.Function(\"w\")(x, y) # 定义函数 w(x,y)\n", - "left = w.diff(x, 4) + 2 * w.diff(x, 2).diff(y, 2) + w.diff(y, 4) # 定义薄板弯曲的双调和方程的左侧部分\n", - "right = q / D # 方程右侧的载荷项,表示均布载荷 q 除以板的弯曲刚度 D。这是薄板在载荷下的响应。\n", - "res = left - right # 定义方程残差\n", - "res # 可视化显示方程残差" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. 初始化神经网络模型" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W1117 17:10:41.976225 7764 gpu_resources.cc:119] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 12.0, Runtime API Version: 11.8\n", - "W1117 17:10:41.977502 7764 gpu_resources.cc:164] device: 0, cuDNN Version: 8.7.\n" - ] - }, - { - "data": { - "text/plain": [ - "MLP(\n", - " (linears): LayerList(\n", - " (0): Linear(in_features=2, out_features=50, dtype=float32)\n", - " (1): Linear(in_features=50, out_features=50, dtype=float32)\n", - " (2): Linear(in_features=50, out_features=50, dtype=float32)\n", - " (3): Linear(in_features=50, out_features=50, dtype=float32)\n", - " )\n", - " (acts): LayerList(\n", - " (0): Tanh()\n", - " (1): Tanh()\n", - " (2): Tanh()\n", - " (3): Tanh()\n", - " )\n", - " (last_fc): Linear(in_features=50, out_features=1, dtype=float32)\n", - ")" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "model = ppsci.arch.MLP([\"x\", \"y\"], [\"w\"], 4, 50)\n", - "model # 可视化显示模型结构" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4. 初始化控制方程和边界条件\n", - "\n", - "接下来讲解如何将开头简介中的控制方程和边界条件转换为深度学习代码。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4.1 控制方程\n", - "\n", - "控制方程表示在矩形薄板区域内部,挠度和弯矩所满足的微分方程。因此可以在矩形内部采样足够多的配点(collation points)用于模型训练,如下所示:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADfYUlEQVR4nOy9fXxV1ZU+/iShQIIEvCSgJihCGQTBWrWCtBnjkIpWnShhAqgIrca2VkpAOqOVgcRi1YoSRFs1teL4gkaITduxaqGkkxYFX6r1BfiCklpQIBAlFFKQm/v7w99Jzz05L2vtvfa5N3Cefvx8ys099+yzz9prr71enpWRSCQSiBAhQoQIESJEOAaRmeoBRIgQIUKECBEipAqRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUI3wPLly5GRkYHm5uZUD0UbGRkZqKqqUrp2yJAhmDlzpuh4UoV0fKd33303hg4diqysLJx55pmpHk6ECKEgMoQiRPj/YW1Mr732GvvagwcPoqqqCo2NjfIDCxnPP/+8sqESIRy89957qKqqEjWiXnrpJfznf/4nvvrVr+LRRx/Fj3/8Y7HfdsPMmTORkZHR+V9ubi6+9KUv4Z577sGhQ4c6v1dVVZX0vZycHJx88sm47LLL8OijjyZ91+u37f/17t3b6HNF6H7okeoBRIhwNODgwYOorq4GABQXF4v//vTp0zF16lT06tVL/LedeP755/HAAw8YM4ba29vRo4ea6tm8eTMyM4+O85vOO33vvfdQXV2N4uJiDBkyRGQ8v//975GZmYlHHnkEPXv2FPnNIPTq1Qs///nPAQCffvopVq1ahXnz5uHVV1/F008/nfTdn/3sZzjuuONw6NAh7NixAy+++CK+9a1voaamBr/5zW8wePBgz9+2Iysry9wDReiWiAyhCBHSGAcOHECfPn2QlZUlqsAPHjyInJwc7d85cuQIOjo6WBunzok8DEMwLEi/U13s3r0b2dnZYkZQIpHAP/7xD2RnZ3t+p0ePHrj66qs7/33DDTdg7NixeOaZZ3DvvffipJNO6vzb5MmTkZeX1/nvBQsW4Mknn8Q111yD//iP/8Arr7zi+9sRInjh6DhaRYhgCDNnzsRxxx2HHTt24PLLL8dxxx2H/Px8zJs3D/F4HADQ3NyM/Px8AEB1dXWnC97uUdm0aRMmT56MWCyG3r1745xzzsGvfvWrpHtZobk//OEPuOGGGzBw4EAUFhYm/c0ZCvnpT3+K008/Hb169cJJJ52E733ve/j000+TvlNcXIzRo0fj9ddfx7/+678iJycHP/zhDz2f94EHHgCApHCC9ZwZGRlYvHgxampqMGzYMPTq1QvvvfceDh8+jAULFuDss89Gv3790KdPHxQVFWHt2rVd7uGcGyv0sXXrVsycORP9+/dHv3798M1vfhMHDx5MutaZI2TNy5/+9CfMnTsX+fn56NOnD6644gq0tLQkXdvR0YGqqiqcdNJJyMnJwQUXXID33nuPlHdkf/YlS5bglFNOQXZ2Ns4//3y88847Xb7/+9//HkVFRejTpw/69++P0tJSbNy4Mek7bu90yJAhuPTSS/HHP/4R5557Lnr37o2hQ4fif/7nf5Ku+4//+A8AwAUXXND5jqyw7GuvvYaJEyciLy8P2dnZOPXUU/Gtb33L9/kyMjLw6KOP4sCBA52/t3z5cgCfG7s/+tGPOt/3kCFD8MMf/rBLSMoa+4svvohzzjkH2dnZeOihh3zv60RmZmanR5US9rvqqqtw3XXXYf369fjd737HuleECBYij1CECAGIx+OYOHEixo4di8WLF2P16tW45557MGzYMHz3u99Ffn4+fvazn+G73/0urrjiCkyaNAkAcMYZZwAA3n33XXz1q19FQUEBbr75ZvTp0wd1dXW4/PLLsWrVKlxxxRVJ97vhhhuQn5+PBQsW4MCBA57jqqqqQnV1NUpKSvDd734Xmzdvxs9+9jO8+uqr+NOf/oQvfOELnd/du3cvLr74YkydOhVXX301Bg0a5Pqb3/72t/HRRx/hd7/7HR5//HHX7zz66KP4xz/+geuvvx69evVCLBZDW1sbfv7zn2PatGmoqKjA/v378cgjj2DixInYsGEDKfG2vLwcp556Ku644w688cYb+PnPf46BAwfirrvuCrx21qxZOP7447Fw4UI0NzejpqYGN954I5555pnO79xyyy34yU9+gssuuwwTJ07EW2+9hYkTJ+If//hH4O9b+J//+R/s378f3/ve9/CPf/wDS5cuxb/927/h7bff7pzT1atX4+KLL8bQoUNRVVWF9vZ2LFu2DF/96lfxxhtvBIaytm7dismTJ+Paa6/FjBkz8Itf/AIzZ87E2WefjdNPPx3/+q//iu9///u477778MMf/hAjR44EAIwcORK7d+/GhRdeiPz8fNx8883o378/mpubUV9f73vPxx9/HA8//DA2bNjQGU4aP348AOC6667DY489hsmTJ+Omm27C+vXrcccdd2Djxo147rnnkn5n8+bNmDZtGr797W+joqICI0aMIM+thffffx8AMGDAANL3p0+fjocffhgvvfQSvv71ryf9bc+ePV2+37NnT+Tm5rLHFeEoRiJChAiJRCKRePTRRxMAEq+++mrnZzNmzEgASNx2221J3/3yl7+cOPvsszv/3dLSkgCQWLhwYZffnTBhQmLMmDGJf/zjH52fdXR0JMaPH58YPnx4l/t/7WtfSxw5csR1bNu2bUskEonE7t27Ez179kxceOGFiXg83vm9+++/PwEg8Ytf/KLzs/PPPz8BIPHggw+S5uF73/tewk01bNu2LQEgkZubm9i9e3fS344cOZI4dOhQ0meffPJJYtCgQYlvfetbSZ8752nhwoUJAF2+d8UVVyQGDBiQ9Nkpp5ySmDFjRue/rXkpKSlJdHR0dH4+Z86cRFZWVuLTTz9NJBKJxM6dOxM9evRIXH755Um/V1VVlQCQ9JtusJ49Ozs7sX379s7P169fnwCQmDNnTudnZ555ZmLgwIGJvXv3dn721ltvJTIzMxPXXHNNl7Fb79R6PgCJ//u//+v8bPfu3YlevXolbrrpps7Pnn322QSAxNq1a5PG+dxzz3WRYSpmzJiR6NOnT9Jnb775ZgJA4rrrrkv6fN68eQkAid///vddxv7CCy+w7tfS0pJoaWlJbN26NfHjH/84kZGRkTjjjDM6v2fJR0tLi+vvfPLJJwkAiSuuuCLptwG4/jdx4kTS+CIcO4hCYxEiEPCd73wn6d9FRUX44IMPAq9rbW3F73//e5SXl2P//v3Ys2cP9uzZg71792LixInYsmULduzYkXRNRUVFYO7I6tWrcfjwYVRWViYlD1dUVCA3Nxf/+7//m/T9Xr164Zvf/GbgeCkoKyvrDAVayMrK6swt6ejoQGtrK44cOYJzzjkHb7zxBul33eZ47969aGtrC7z2+uuv7wzhWdfG43H89a9/BQCsWbMGR44cwQ033JB03axZs0hjs3D55ZejoKCg89/nnnsuxo4di+effx4A8PHHH+PNN9/EzJkzEYvFOr93xhln4Otf/3rn9/wwatQoFBUVdf47Pz8fI0aMIMlb//79AQC/+c1v8Nlnn1EfyxPWeOfOnZv0+U033QQAXeTs1FNPxcSJE8m/f+DAAeTn5yM/Px9f/OIX8cMf/hDnnXdeF0+TH4477jgAwP79+5M+7927N373u991+e/OO+8k/3aEYwNRaCxChAD07t27y8Z//PHH45NPPgm8duvWrUgkEvjv//5v/Pd//7frd3bv3p20uZ566qmBv2tt8M7QQ8+ePTF06NDOv1soKCgQS4L1Gt9jjz2Ge+65B5s2bUrahCnPAwAnn3xy0r+PP/54AMAnn3wSGMrwuxb453x98YtfTPpeLBbr/C4Fw4cP7/LZv/zLv6Curi7pPm4hoZEjR+LFF1/sTID3gvNZALq8nX/++SgrK0N1dTWWLFmC4uJiXH755bjyyiuVEs3/+te/IjMzs8u8nXDCCejfv38XOaO+awu9e/fGr3/9awCfG+unnnpqZ14cFX//+98BAH379k36PCsrCyUlJazfinBsIjKEIkQIgE5lT0dHBwBg3rx5nidl5ybjV2WjCsnfdPutJ554AjNnzsTll1+OH/zgBxg4cCCysrJwxx13dOZ8BMFrnhOJhNFr0w06z5KRkYGVK1filVdewa9//evOEvN77rkHr7zySqf3hAu7t80PXDmTMFasZHXnOooQgYrIEIoQQQBeG8XQoUMBAF/4whdET6ennHIKgM+TU617AMDhw4exbds2rXtRNz07Vq5ciaFDh6K+vj7p+oULFyqPQxLWfG3dujXJa7F3716Sp8XCli1bunz2//7f/+tMgLa/Fyc2bdqEvLw8X28QFUHvaNy4cRg3bhxuv/12PPXUU7jqqqvw9NNP47rrrmPd55RTTkFHRwe2bNnSmZQNALt27cKnn37a+byphJXUzwnJRYhgR5QjFCGCACxOHmfp+sCBA1FcXIyHHnoIH3/8cZfrnCXeVJSUlKBnz5647777kjwFjzzyCPbt24dLLrlE6XcBdG7Uzmfxg+XFsI9l/fr1ePnll5XHIYkJEyagR48e+NnPfpb0+f3338/6nV/+8pdJOV0bNmzA+vXrcfHFFwMATjzxRJx55pl47LHHkubvnXfewUsvvYRvfOMb6g9hg9c7+uSTT7p4jqyKPTcG5iBY462pqUn6/N577wUALTmTwFNPPYWf//znOO+88zBhwoSUjiVC90XkEYoQQQDZ2dkYNWoUnnnmGfzLv/wLYrEYRo8ejdGjR+OBBx7A1772NYwZMwYVFRUYOnQodu3ahZdffhnbt2/HW2+9xb5ffn4+brnlFlRXV+Oiiy7Cv//7v2Pz5s346U9/iq985StaRHJnn302AOD73/8+Jk6ciKysLEydOtX3mksvvRT19fW44oorcMkll2Dbtm148MEHMWrUqM4cjlRi0KBBmD17Nu655x78+7//Oy666CK89dZb+O1vf4u8vDyyF+yLX/wivva1r+G73/0uDh06hJqaGgwYMAD/+Z//2fmdu+++GxdffDHOO+88XHvttZ3l8/369RNj6z7zzDORlZWFu+66C/v27UOvXr3wb//2b3jqqafw05/+FFdccQWGDRuG/fv3o7a2Frm5uUpG2Je+9CXMmDEDDz/8MD799FOcf/752LBhAx577DFcfvnluOCCC0Seh4KVK1fiuOOOw+HDhzuZpf/0pz/hS1/6Ep599tku3z9y5AieeOIJ19+64oorRDxzEY4ORIZQhAhC+PnPf45Zs2Zhzpw5OHz4MBYuXIjRo0dj1KhReO2111BdXY3ly5dj7969GDhwIL785S9jwYIFyverqqpCfn4+7r//fsyZMwexWAzXX389fvzjHydxCHExadIkzJo1C08//TSeeOIJJBKJQENo5syZ2LlzJx566CG8+OKLGDVqFJ544gk8++yzadN/7a677kJOTg5qa2uxevVqnHfeeXjppZfwta99jcx2fc011yAzMxM1NTXYvXs3zj33XNx///048cQTO79TUlKCF154AQsXLsSCBQvwhS98Aeeffz7uuusudjKxF0444QQ8+OCDuOOOO3DttdciHo9j7dq1nYbK008/jV27dqFfv34499xz8eSTTyrf++c//zmGDh2K5cuX47nnnsMJJ5yAW265JfSw53e/+10AnydY5+Xl4cwzz8QvfvELz0TwQ4cOYfr06a6/tW3btsgQitCJjER3zCaMECFCBAF8+umnOP7447Fo0SLceuutnt9rbm7Gqaeeirvvvhvz5s0LcYQRIkQwjShHKEKECMcE2tvbu3xm5b6YaJQbIUKE7oEoNBYhQoRjAs888wyWL1+Ob3zjGzjuuOPwxz/+EStWrMCFF16Ir371q6keXoQIEVKEyBCKECHCMYEzzjgDPXr0wE9+8hO0tbV1JlAvWrQo1UOLECFCChHlCEWIECFChAgRjllEOUIRIkSIECFChGMWkSEUIUKECBEiRDhmEeUIBaCjowMfffQR+vbtq9R6IEKECBEiRIgQPhKJBPbv34+TTjoJmZnefp/IEArARx99hMGDB6d6GBEiRIgQIUIEBfztb39DYWGh598jQygAffv2BfD5RObm5qZ4NBEiRIgQIUIECtra2jB48ODOfdwLkSEUACsclpubGxlCESJEiBAhQjdDUFpLlCwdIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhExS0dgI94RR9OHTfh4/8c4se+JKDq5CFmZWakeVoQIEQQQre8IxxoiQygCC/Ub6zH7hdnY3ra987PC3EIsvWgpJo2clMKRHZ2INqUIYSJa3+owvVYjXWAOGYlEIpHqQaQz2tra0K9fP+zbt++Y7zVWv7Eek+smI4FkkcnA531cVpavFFOW0aJPzaYUzXsyjqX5CHN9H20wvVYjA1UN1P07MoQCEBlCnyPeEceQpUOSFqIdGchAYW4hts3epr1RRIs+NZtSGPPenQyLY8kQDXN9m0Qq5s/0Wo0MVHVEhpAQ0tkQCnPRNzY34oLHLgj83toZa1E8pFj5PtGiT82mFMa8dycD92g1RL0Q1vrWQZC+S5XhanKtHi0GaqpA3b+jqrFuivqN9RiydAgueOwCXFl/JS547AIMWToE9RvrEe+Io7G5ESveXoHG5kbEO+La9/t4/8ei33NDvCOO2S/M7rL5AOj8rPKFSu3nMTE/kmj6sMlT8QGfz8Xf2v6Gpg+bRJ4ljHm3DAvnc+1o24HJdZNRv7Fe+belEZYc2pHq+QljfevAT99Zf0/F/HHWajr8frrrvlQhSpbuhvA6re5o24GyujIMyB6Ave17Oz+XOBWd2PdE0e+5gbPoVU+l3cErQd1sGjY1YPpz07WfhTrvyzYsw6xzZ7FPnkGGRQYyUPlCJUpHlKbFqTYMObRDen5UPMVhrG9V+Om7yXWTUTe5DnNempMS+TJtQEr+/rEU6uUi8gh1M1BOq3YjCAg+FVFOCUUnF6Ewt7AzNOBEBjIwOHcwik4u4j5SJ0wrlVSfuqmgbjY162tEnoU6n3NenJN0CqfC9KlZGmF7RyTnJ8hz4oUw1rcKKPruhudvSJl8cQxIFW+MlIGaCt2nKoupQGQIdTMEKU03+LnzqcKalZmFpRctBYAuytL6d81FNVrWvslTaSrCHapu6KBNCQCyMtznWeVZOPNp0tD6eP/HaeG6D9s7ImV46Wx2nPUd5juiGIktB1tIv2UirEc1IFsOtKTMQD0WQ71cRIZQN4PqYnY7FXGFddLISVhZvhIFuQVJnxfmFookj5o8leqculUUv85piLIpxRPeY+CegCmGl/23ATOG1pbWLWlxggzbOyJheElsdl7ru6BvAaqKq3DoyCHc9ofbcErNKaG9I0njxURYj7JWp46eiikrpxg3UL1gyiPrpRdTYXjpIjKEuhl0F7OlWFSFddLISWie3Yy1M9biqUlPYe2Mtdg2e1ugEUQxJkx6nVRP3SoGjcRpyM/orBxXqfQsXvCbdzdIG1oZyMCA7AGoaqwSOUHqeiy4cqh7PwnDS2qzc67v6uJqAMDCxoW4sv5KLGxciB37dyRdY/KUT9V3eTl5KQvr+a3Vusl1WPHOCiMGKvUAaiLU66cXu1soHIgMoZRAR3FyTu9usBSLjrBmZWaheEgxpo2ZhuIhxYGGCceYMOV1Ujl1qxg0kqchL6OzdEQp+1ko93Kbdz+4KU432Q4yLKx5kZgzqbwEqhxK3E/iACC52Vnru1ePXp8bp/v9Q/HOdyQZOqMaiT/9xk87/+38O6Aftg+C21rdOmsrtu/fbsRApR5AAflQb5BebNjcQPqdVFUguiHiEQqANI+QROa+JYiA++bhBiffxIq3V+DK+isDr3tq0lOYNmYa6R5+Y+XysUhXG1h8HDvadrjOmXN+VPk7wuBj4TwLANY8xjviWLZhGea8OIf9DEGy7fb3wbmDcd1Z12Fh40L2/Zwwwf3jJ4fS9/Oan5qLagJ/R1ruguTfC9XF1ah9o1a0MslL3znnWWf+KODoJLex+EFXz/qBq/v8fqexuRHlK8vR2t7q+p0MZCAvJ4+UtxUGJ1VEqCgESUNIUnG6LTSrbN5+yvb6/TA37HQhA6MqVABY88EalDxeEvibzvkJ28AEvJ8FgJLRraI4qbLttpnUvVunPWdhy5qp+znnZ3zheKzbvi5w85Xa7CxQ9QMFEiSUVCPHVLk25wDrtRb8YNoo4Og+r+s5hl1+Tj72HNwjIos6iAgV0wzSCWRurtJd83ZhVfkqUlgpjITQdIsVc8Id5SvLSb/pdO9uad1Cuk431yvoWQCEUkEE8GTbLawq4bqnylpjc6NI2MaUbNvnp7W9FcOWDSOF3aTz6yTDFhIJstTQEDdsT4FfKKisrgxzXpjTKUt+a8ENYdES6KQceD2/H64acxWA1IUquYg8QgGQ8giFSWFPPRVRTwmqp6ywvCNcqIQ7vGB/X5RrTXgmnM8CQMRbQT2F68q2hDeDKmux7FiSW181bMOR7fLTy9nrR9V7LBUekvQI2bFk4hIM6jMorcn17OCECAtzC1FxVgUpzAukpm0QV5erhkjXzliL1vZWo6FKCqj7d8QsHRK4XCo67l3rVBQE65Tg5vK1hFUnpyld2Wq95odzmrM2Z8vwoF6bQEL0NOT2LI3NjWRvRdHJRZ6yNmnkJJSOKA2URd1EXcubMblusmdYN2jOqDLkzG2wPGTczYhLCcBZPzpM09R3FgTLY+xlnNrhfGd+sOeepRujuxs4vG072naQjSAgWc9SEdbeYIHLW2fXi1mZWSKyGAYiQygkmFScOvBTnEHU9kGbR5AydRoTVJjKA+AuevvmTL22urjauOKXbNFBUZwSBq+XUV6QW4CKsypw6MghNDY3er5rzsZth5thQZEvimzHsmOoaqxirx/dFh/czc4NfsapE4W5heSEdztUjdAwwQkRcuRuycQl7HY1QYdSE3qR8/xuhxYJWQwDUWgsAFKhMYr733LbSyRT6y4KqWRQ3SQ9t98z1S+HE16pvaw26X7pFAbUCWuoyppUoq5dbre0bsHDrz+cxFvj965Vqint8HLnByXFOu9nGQ7Onn92+M1JOsmS13qrOKsCw2PDu4RjuYZomImzKjpROkSo+rxBodJ54+dhxTsrxPUi5/nDDntRECVLpxm6G5eKJEGbFC+Qadp2qmejbnJdl3Gb7jnEQdgtOiiJuteddR3q3q0LfF4nhw2HvM9L1mLZMdJzNGxuEGNary6u9jSCAP/1k04hZbck5ebZzVhw/oKkhGQuKacFlaTyMJnedXjbpBKFg0KlCSRw97q7jehFyvMPyB6A1dNXk3mN0hGRRygAYfAIpSOXivSpNJUeKuq9dTwb1GvvufAezH1prvHQZ5C3ggJu4r4XpQOQ3AiYkiOj4410vu94R5xEhZCXk4c9B/ew7ylNCSBdCh9mB3BumbUFqh5R8Qjr6kQVT6Mbn5Kqx0TXw6vrceN49dOt23zEIyQEaUMIkFec9t+V4jYJs8qNAs547AnAUuEVitIMunbe+HlYvG6xSOiTAi+ju2xUGWpeqQm8nrI5+fHebGnd4mrcBz2vKXJAP8NCmgRO9xmCNt/KcZ/nNAVtNCZDyV6wy8SuA7uUyDndoGLQSIb4KQaeDpmpF6iHUj/o6mlKNWIqZC0IkSEkBBOGkBsklD/1N1ZPX42szCzfBSp9KtUFVRlUjq3Eyo0rfRWWyRJkr2vvvfBezHlpTujkkm5Gd9OHTSKGhp/iKx1RqrwBmciRCTJSZ4+djZr1NWL3lFg/bvOblZGV1HCXYtSHZXi7QZLVOGymdy8jv2Fzg+tBwtS8SuQpSeSThcmyLoUoR6ibQYLgkJrhX76yPDBWbrIBqgqo+RA162sCT21BeTA6fX1M9xziwo1gTkLWgvK1bm+6Xfl5TeTIBOWqlZ4m279NYv3YZalybCUAJBlBgHceiDSBq2pem5QeUc1ZlGy2PGzZMLS2t2LJxCVYVb4KhbmFSdfo9kP0mmPd/pIAXW793rMXWWV37DbvRFQ+nyZIRy4VCs+QF6RjxZTyaOdp2Q8mS5Dt19ZvrMewZcPIOROSjL5e70BX1ig8N0vXLyWN0e15TdEu+FFFxDvi5HtSZVtn/ViwDNfpz013/bsXr5BuCb4duiEP3XmId8Sx5oM1gfcBusqTTrPlINoDSY6coDmmUhk4wVkrqu9ZUtZShSg0FoCwQmMWdMIyQW5oP0gkG/s9g0SsWCIB2AmTJcip7DlEeQcqssZpykoBN0fGpKvdVP823UOBSnhHKrwoGfJQmQdu8rVTnrihucNHDqNgSYFS0rwqqHPstV6njp6KxesWA1BfKzrvOZ3oHpyIcoSEELYhBKg3XgRkuFR0k+pMxop1E4CdMJXszaWml1SwnHdgsqN2LDuGT9o/Ec2RMc1V4ndPACnJg1Bp57HmgzVY1LQo8Bo/+afm5WydtZWsnzjgHCT85InTvf47v/lOqJ3TublPXutV4gCtmr+YboU1dkSGkBBSYQjZoVou6rzG2WPJC6q9kYDwOoDrJABLj0ViPJIbqal3oOLdqi6uRlVjFQD1k2oqynFN9m9TAXWjcSvZ9gJlvNT75ufkJxkPEt5fzkGCWtHpZyhw5VvKuyFpRKiuFU6RzYShE1zvm06FNXZEvcaOAqi2uHCLX1O5VHRafHA6gAdVrfnBLX+H2xsJkEn29jJUJ4+cTP4NlZ5DXjARr1fpqF2YW4hbi27F6IGjtXNkwj5FmuzfpgKddh5uoMo/NV/N6UGRaJ3BaXdDkaeg/DCOfANyZJa6ffrscJNbinHEKbJxMupb99XNb001IkMoTaHTeBHouigoyaCqvZGsxbbqvVWkZytfWS7SAdwObm8kCcPDz1CllGIDaj2H/CCpWC1wNiWn4pNOKuVCyqMk2b+Ni6CNxo+V3g1U+Vfd7Cn6KQjU+Z5fNB9VxVWke3gZ1Vz5piQfU+XOJIs4NZrAKbLx2gckCgNSiSg0FoBUhcZMxF1N9EZSZZJ1/jYgExqi9kaSCIcFhUoyMzI9q9hMuYupcjO/aD4mDJ1AmgsOoVuq+w3p9CrzQ9j929ygy0pvvXNqzqFO8YUF1byQMPNOOPKdgQwSEzU1ncFUWImbJ0h9z1xW94hZ+ihBqgwhU5n4ki0+VPJGvCDZNiCMxcjZGL3cxSaSa7mbF8UwoD6rtHeLC4pRrjr3lHn1o28wmR/HYaXv1aMXK+dQt/hCNecwzLwTTi7Ug5c+SGrHwUmol6yQtHiAnF5351icc8fV5alIfFZBRKjYzWHKZepFFjg8Npx0veWyVomr+4FDKBjUQNGL+EsSVNd95bhKkYazVHCbX1IaM1IJGC0jyHRTWTd4ETw6oUrwRiEG9OOwsufH6cyNm2xTdcCW1i3spsVeRJT5Ofnke6o0Ow2T0JVCWJifk4/tc7YHcqepEAtKNaa29GLJ4yW+hTFuutYaA7VBsSTfWTog8ggFIFUeIcoJNJYdQ93kOpHNnuuK5oYKOFVrYfGaqILDpePsfRaWu5gTsqSWslNLkFPR24pDVWBB5VSrS9/gXAeSFVZ+3pOC3AIkEomkMKHzO5yQx/jC8Ri2bFhgzmFre6vWWtUtC+dQQ+h6ZXTDeTqebBXvvJuuXfPBGlJRTeQRSjEeeOABDBkyBL1798bYsWOxYcMG0nVPP/00MjIycPnll5sdoBAoJ/vW9laUPF5COmEFgdt2gXoiuPErN2LtjLWom1xH+r7f6TYdqNytU1eQEWSfrzA8VEAyPX4sO4b3Z72PtTPWYn7RfN/rKN44yqk1qO2Grox6gZPsaofKqdbLo1o6gtaiw4vVXWduKN6TirMqPI0gIFgGnDLcs0dP33v6JXAHrVUvOQ5qd2O/7rY/3IZTak4he6IkvDK6hQqqekLVO++ma4uHFGu33+mO6FZVY8888wzmzp2LBx98EGPHjkVNTQ0mTpyIzZs3Y+DAgZ7XNTc3Y968eSgq6l4vzysT3wmJclVuCSTVHV82qgzFQ4pZLQy8wCkNN+GFoZ66dFz3qqdCP0/MqPxRpHsHKXLVEmSJKiKdcXtBtSpKl77BDpW5cZORoKqdQ0cOkcbDmUu/ewblHHrROPjJcZCnWFdP6lY4mqwA8wP3IOCna02VwqdbErUT3coQuvfee1FRUYFvfvObAIAHH3wQ//u//4tf/OIXuPnmm12vicfjuOqqq1BdXY2mpiZ8+umnIY5YH9bi9EuAk9poOCWQ3H5QEv2tqP2GTJQwc05dqiWjqmGlIL6pquIq0v0pClq1BNlUv6F4Rxy7DuxiXaPaq8wPHPoGJyT7fnlt5I3NjaSxqOQcut2z7l2aB9hueKnyplEPKCq0IxxI98ijGg8c45Wia6VL4VMRLuei2xhChw8fxuuvv45bbrml87PMzEyUlJTg5Zdf9rzutttuw8CBA3HttdeiqSk4EffQoUM4dOifp6e2tja9gQvAapRJTYBTIdWyQD0VqRg2qguMW6LvxuGj6zWjnrpUK6dUNwGKJ6b29VoU9i3Ejv2yTUztMMFfFAQV6gbdRFu/teQl39T8uKC5ocqI20YusUn7NfF13pPrHVH1KHLDQiYbgEp6UzjGA8d4pRozUvxfqnotbHQbQ2jPnj2Ix+MYNGhQ0ueDBg3Cpk2bXK/54x//iEceeQRvvvkm+T533HEHqqurdYZqBKobTdCC8lJuFCWhYthwFxg3CdCrhFnXa0ad/0F9Bvl6taTDShRPzPb92zvbXZhifg07LKBK3aBD8EbZnHRY3XXy4yieDp1Nmnuq5xpeqh7FMPPDKJDwpnCNB0pYVqWwhroPmNBrYaPbGEJc7N+/H9OnT0dtbS3y8vLI191yyy2YO3du57/b2towePBgE0NkQWWjCVpQ88bPw4p3Vmi5LIMMGx1Di3Pas5Q7pYRZ5TSou9H7bSSx7JhyWImq0IfHhhtlfpUOC/iBG6aUINP0W0tldWWoHFuJ0tNKXeU77Pw4L9nW8chyT/Vcw0v1oBd2fhgFOt4UFeOBMte1l9W69glz3ps7ZlN6LWx0G0MoLy8PWVlZ2LUrOR9g165dOOGEE7p8//3330dzczMuu+yyzs86OjoAAD169MDmzZsxbNiwLtf16tULvXr1Eh69PrgbDaXC6u51d3f5m4rL0suw0Y0Nc/sNUUuYVZSnzkYftJHMHjebNAa3cXMMtOIhxcbaXYTZb4gqF4u/vhiV4ypFWMSD1lLN+hrUrK9xlW+JuZEKPXI3aZ1TPcfwUj1ocA0a6RYZXlDNNVI1eHU9UarNvU3ptbDRbQyhnj174uyzz8aaNWs6S+A7OjqwZs0a3HjjjV2+f9ppp+Htt99O+mz+/PnYv38/li5dmhZeHguURcdVpqouYymXpURsmNtvqOnDJpIhpHIaVN3MKBvJk395kjQGt3GrJK1LcZg4rykdURpKvyGqXPz4jz/Gqcefqn1fzlrykm/djUoy9MjZpHU8UfGOOGLZMdw54U60HGxBfk4+CnILXGVL9aBhotlyKpN7dQxeVU+Uiq42rdfCRrcxhABg7ty5mDFjBs455xyce+65qKmpwYEDBzqryK655hoUFBTgjjvuQO/evTF69Oik6/v37w8AXT5PJTiLjqNMdaxsXZelVGyYukAmDJ2ArMws4+EZlc2MspFYm8Seg3vY45bwNqieBr2uaZ7dbLRUVqJJJAecteQn3zohkzBDj3aYyE208kec86Aix5xqPYrRmerkXo7Bq5N2YEFVV5vWa2GjWxlCU6ZMQUtLCxYsWICdO3fizDPPxAsvvNCZQP3hhx8iM7P7cESqLDqqMpWwslWNKalS6qKTi0iNYKVK9CngbmbUObxqzFVYun6p0rh1vA0qMii5Wah4oricPbreTe5a8pNvnQTUsEKPdoSdm6gix37yz8kPS4fkXqrBu+fAni5s6ipeK1VdHYZeCxNRi40AmGqxQelertNUUKpztAoxoVTD2PqN9SirK/P9jVXlq1w3alVafkCW/ItDu9/a3hrquFVkUFJudUIQYTaJVF1L3IbIFvzmBYCWjHDBbX6q2u7E3spCxWtmsUpbfEnFQ4rZLO5hdrz3Q1C7j3nj52HxusVd3odKmyFVXR2mXtMBdf/uVh6howmmCeh0CN4s5dZyoEXp1CGRz2CdzvwwIHuAa2sDnRCEdH4AN6TRr1c/ZWXOdYuryKCU3Op6lSwvQMWvK0Q4evygupZUvLKUeZEKPaZzbiJHjt3W7PK3lrPXLCcMaJIp2c/Ddc+F92DuS3PFvFaqupqj17Iys4wVaUih+8SRjjKEQUDn1T9ncO5g/GD8D5Dx///PDuvfU0dPxZSVU5T6RnH7lrmBolD3tu8l90ayTqt+nb9N9MqyNhLAvWdcAgmUjSrD7U2345SaU1DyeAkWNS3CoqZFmNkwEw2bG9j3pEJFBiXkVqpn3KSRk0R62FHgtZbcoNqPiTovALR711k98yi9uDh9uKRyEznPIbVmqTKypXULee5U4dXTLr9PPvkgQoGqrqbqtaYPmxDviLvq43RCZAilCGER0HktqJ98/Seeyq1uch1WvLNCeaPyWyRhlwtbCFL8Jhu6em0kWRmfP3/NKzVY2LiwS1NM081KVWRQQm6pXiUr1OFluAIItUmkfS1Vjqv0vB+glvvA8bY5EWTk26FiPHjpER2WYy9Q17T0mqUYBQOyB6CqsUrE8Ap6Z27Gg7Re1NHVFL1mwkg0gcgQShEkvCZUeFnjbspt66yt2L5/u/apQ7ebs6ShSFH8OpsQBUmb6NhKAPAlf7TuCagbYEFQkUEJuaUq6fKV5YGnbgmj2w7q5rRk4hKsKl+FwtzCpL9zupU7oVOhRfVQ6BgPlFN9kHxQQF37jc2Noms2SJas+ZEwvDjvzA4TB2gdXU3Ra6YPdBKIDKEUQVqBe4Fz6mhtb8WwZcMw58U5pN+mkLdRTpFukDIUqYp/R9uOLn93g26uSdHJRVi5cSX5Grsy55z6qePhyqCE3HLK3+3wUqi6RrcF7uakI99u0KnQonooTBv8QSETP3AOf/Ub61G+spz0u25r1mst+clSdXG1ZwUrQJ87nXAeRy9y9IWOLAfpNdMHOglEydIphHSXXyc4ib8qvZukyduc1+mWC8c74li2YRlJ8bccbCGNS9f1r5pM2rCpAdOfmy5O8qbaL05Hbrnl7xZMcfQA6snbqvLtBhX2+O//9vusxNkwcxPdKoWmjp6KxesWd47R/mwA7fDH1VXONRukF71kqe5dWj6aTn6cVM+4hs0N7KIPHVk2XfxjGlH5fABMlc/bYaICwUtZuJVYcktedUv7OVAthed2Jn/iiidw85qbyWXCqqCWq1KgUi7rBQlm6fGF47Fu+zpWM13APdQQBMkSZtNUFhwElU7b3/Vtf7gNCxsXBv6mfa7CLBH3kikdeguOrnJ7bxy96ITE3FF/Y8nEJRjUZ5DnOvKbQwDKz6gKThl++enloVWQUffvyBAKQBiGkDQoymJA9gA8M/kZFA8pRtOHTaTFCZhdTF7gbtIq3i2L74K6CamCqgjtyMrI8swnCnOT9oMUO3UsO0Yqh5dUqOnCH2OBYihQOLYs2DlgOLxAAIxtWKqHP876yUAG68AXtJa4nEpuUDkIea0jtzkEkBKjnvpeqourUftGbWjtSyIeoWMY1NLzksdLUJhbiMkjJ5N/WyVsF2YDQ05ncqAr34XpXlnc3kgJJHyTqu0VVlZVSdg8HaphJbcQRLwjjpLHSwLvaZUwSyjUMMJFHASF+SgcW3bYQ0MmQyscuK1pip6gvoNYdgy1l9UmjVU3fCMRrlcJrXutI7c55CSQq5DleoES1o1lx1DVWMXWE2Eg8ggFoDt6hDinDg5B3JKJSzDr3FmsxRJ2A0PuiRHo6ukxSZYG0MNCg3MHo2xUGamRrNOTElaTSOmwEuXUbT0r1/Xv9V5NeIRMyhBHxgfnDnad+3QLrVD1BPXZV09fjQlDJyR9Jsl4rxva4+bHUdcR9Rkrx1Zi5caVonrZL6ybQILULknaUxWFxoSQakNIRaFywy8ZyEBmRqZ4+IUSj5dmHOUYgWHRvLvBS/E7eyNxwpZ2hBXCNGFEmFCofhtt6YhS7ZCHc/wmjX+OjLu1oLGQLqEVlXxGlXclKaupyo8LGptK6N2ChM7wMhKvO+s6dj6bBCJDSAipNIRUFapOnzEvly93cVA8BbHsGLJ7ZGP7frkNg5OMyPVuSYNi5Oq+S643JlV95ZyQVKiUjRaASH6YTjIuFZx8jAXnLzDy21LJ1I3NjShfWe6ZF+aX8Azw3hXFiMrLycOSiUtQkFtgtC0Pt5jDQtA6ougL0zmHbnqk7t06I3oiCNT9O+IRSlPocE2ocnlUjqvU5mKxQInH723fm2QEAfrkW1SeDbsRpMrPo8vrQyGo0+Fl4XDCpBPBG+DNazI8Npx0vZVLQuWRKh1Rqs1FZJKd3A4KaWFhbiFuLbqV/dth5UtZ8lbyeIlvcrybDKvyRlHaQrQcbMHVz11Nln9VPe2U7yUTl/jex0LQOqLwfFFyDpdtWCaq18LqpKCKyCMUgFR4hKTyLrinDtVu827QKRPXPZVwToyqXrewc590K6yCch5UvRiUE2gsO4a6yXUiPYa4Hgvu93Vye3S8KaqVkYBshWMYHiGVqk43GVZ9V1S9SPUwSYQRJSrS7PDyqFJzDu2Q0GvSz0dF5BHqxpDqORTLjuH9We9j9fTViGXHPH/PzkYq1RxPx7KXaGdBOTGqnuZMNGcNgpuHRKLhqK4Xg+Kxam1vRcnjJSI9h7iM41wPh478c7uXW+v0tj/chlNqTmF541S9IkFeTNOtf7hVnRbcZJjzrtz04toZa/HEFU8gLyfP9Zog+Zdk6ZbuNODlUS0dUUq63g4JvRZWJwVVROXzaQidnkNeXoray2p9T5CqQuh1KlNlD7ZDl92WUoLMZXjVZYZ1gnOqdZbLxjviLCZiN0gwwnoxCTshUSbLLWEO0yXP7V6uO1dcNm2KF1OiRNwPXGZ1igwHwe+5C3ILsOfgHs9r/eRfOowo3WnArbxeRS+r6DWgq26zws8m6UlUEXmE0hAmeg4BEOnHZIdfXolObosF6jx4nXL9ToyqpznJU6BqXo4F3VNWvCOONR+sId2L2lfOz/solSfD8YZQPBz5OfnY0bZDu3+bTvdyJ6hzRfWKcLyYUr3b3MA53EgYXkHP3bC5gfQ7buM21QBVsn+dE6p6meul99JtAIw+nyqiHKEApDJHiBpP5cSqARm2WGpeietprG8h2o+0u3LBuD1f0DhUcnVUq50kuUikqotUeE1U8scoOSFh5slQv88pV04ll4oXdCu0VHNZTHAhcTmQVD0F1Iq0vJw8Up9BL3lNRd5LECjvzVTVmvXbYfNQeSEqnxdCqsrnOcmQaz5YQ2LjleJo4CpWt4XZsLlBO9kzFX2DJJJJTfS24mxanGRV7lhUew5tad2Ch19/GDv27+j8jmTyuVSCrOq9OKX/TuiWFKdTCxFOcn3RyUVkbh47uJt8fk4+9hzco2TMmEpaVwXnYGjXGbsO7MKcF+cE/n6QjKRT3z4gMoTEkG48Qm49hyp+XSFSPUSFlGI12XzRVN8giVMghx1Xum0Gt2klwFPmOj2HJO7vB0vx72jbgcoXKz1zQ1LNpeKEroFCNU6fuOIJFOQWGG/TQjEeAChXc3Ir0irHVmLp+qW+4+FWkKaCrNVk9Sd1TaRSt7khMoSEkM7M0txFL3XikyTRM9180e+ZVU9zuqdA6vyZaJthOjSh0yLDDSbCuqlS1iqM7xKnZ+p983Pyk8JEYdNB6Lb44Bj5dlgNl3WMGdNteSj31/XESHi3Uqnb3BA1XT1K4NVwlFOGSqm84CxkySRB080XGzY3sKudgqoYdKs7qPPn9PJJVF1R521+0XxUFVexlXlQ1ZH1b07Fyt/a/obbm24X61pNnQNnfomusuY23AVkSoqp93XmyphshulV8QZ83uJDpSpTpyItKzNLq92Pl56mQteQMln9yanqSqVu00HkEQpAqj1CXuA2F/UTMG7CsckkQenmi4B/vyVAXQmp9hsKs22GE2Hli0jnybhBNXSm2o9JKn+I2nBXMrSi2t/KT95SmUztJp/cZtNA+Dk8FuxzJ5Eflw5eeuvaVOk2N0ShMSGkqyHEcUHWXlbrawSpuKElkwSthdewqQE162u6/F2l+aIdXh24JcE1JnUaLwLqhkqYlS6SeTJeUE0sT6WypjbclZZXt/s6w2FecMqbKWZ1nQ09rIo0XVCSubl6lNNjcVCfQSnJAaNCMmk/MoSEkK6GECfPYcLQCa5/040rSyQJcqp53JovltWVke5jsiJGx5jUaZthr7riVtVQjNhUl09zwH2/YSlrrzlMVU6J87472nbg6ueuDrzObniYLI/WpV8IMnAHZA/AM5OfEWn3ogJTFZsqzVa7a0sgDiJDSAhhG0JUBRlm9VJQKbiqQlep8HCOZc4Lc1y9SE5IdzW2IFG9Zp+/eEecRIXgVnXFUWxBRqypEz/HG+PMMfKDyvs1razD7kdnB3VdqvRiM1keravXOJ7qsI1RnWRuitHNNe5NhwdVdVsqPEJRsnSKoRorlqDCl6CIV00SVO055BxL6WmlJEPIVFdj3SRFlbYZsewYqhqruvw9KOHQqZjen/W+a06Tl4FqukWGE4W5heScIpX365awS1XWQfczOYdB4BhgQYnUzkILqrw3NjcqVdzp6jVqwm8qjFRuMreFID1tretDRw6hqrgKta/XYvv+f97H6QmyoNo6gwoTLYFMITKEUghKWMhPcYZVvWTCiFBVCs6xcBU5B5LVa9Tv6VRd+Sk2P8Vv92xI91Jzg5/cOvNkAKD2jVpjytOEsubMISBDCWCBa4BxDY8wKu509VpQD7ZUGamqvRP99K/bui7oW4Dq4moMjw0PJEqkVJNJwXQfOx1EobEAmAqNScaKdaqeUkURz6nwCBqLCXZX6eo1lRwWnaor+/04OR3Sz+Mnm7otMky59nXvp0MqqeOV0AlbUfP9wqy4MxG6ShXzcbwjjmUblpHYm6ljoazrQ0cOiVWTWdB9L2ESUEY5QkIwYQiZjhVzIL3JSOcmUMciubg4hoNJY1Kn6spSbFzFL1mGKxl+UHm/uvlrKvIU74ijqrEKi5oWEZ8sGTrGna4RS5mvVFfc6SIV7UZU+nrpkkdac/1o6aOieTlSazqs/KwoRyiNYSpWrAIJEi0LkrkJdlDGEuQOp4IbGpJy93opBqdy4oYzuTlMUuFS6fAD9/3qKmwVeVJtZGmHTviRQzJadHIRSd6c4OR4ORFmGMYL0qHsIKgUhADBOo+6rq3fkggtq65pqm5LJSJDKAUwESvWgYQRIZmbYKFy3OebASfRkrq4vBanSvKzrjFpMrmVq/glcq5M5RlR3y9XFr1kgSNPqhueG1QNBqp+qHmlBo+/9Tj2tu/t/Ewih4dacWfiQEdFmHmRnIIQLo8UdQ53H9gtdlBTWdOprJzkIDKEUgDuIqNa7TruRh0LXXWReClUTuNVVR4dr8V56MghyiN3UUSqxqTp5Fau4pfwcOlW0uky23JkUUJRc9vdUI0lrsHA8bLajSCA76kzWXFnEiaLK5ygev6XTFyCWefOYh0KOOu6eEixttdfZU2nsnKSi8gQSgFM9BxKpeWts/HpGBCS3amtxVlVXOV7XwtuiohrTOoYkHWT63DD8zd0aZLpVGwqil/Xw6UTftCVY44stra3iihqTqjbJCWAZcRSSUbtUPHUhVkeLdUGp+jkotAql6jr4K/7/sq+H3dd63r9uWs6jOpTSWSmegDHIiyFBfxz8XmhMLcwUCFbm7tTGVsKvX5jvf6gfcBZJPGOOBqbG7Hi7RVobG5EvCPeqVCnjZlGYnxVfd6gxQkAta/XorBvoed7yUAGBucODuXEaN+07ajfWI85L81JMoLycvJwz4X3ePJNWWO3w0/xTxo5Cc2zm7F2xlo8NekprJ2xFttmbxNtvOj8noQcU2VxR9uOQFmofKES8Y6u/Cuq95xfNB/bZm/DrUW3ojDXjIxNGjkJlWMr2dcB3vJGhaqsBaF+Yz2GLB2CCx67AFfWX4kLHrsAQ5YOCZQHr+uAz5OQC3ILkr5P0bUccEKVXB2tMtduetZNH+s8i0puYjogMoRSBOvU7bYYq4uryZsPZXOnKnRVUBfJltYtSgrNDp3npSzO7fu3o+LsCgCyytwNKp4TL2Nh78G9mLJyCp5999kuis1P1vwUP8dAtSvUeEccBX0LWBu9lBxTZbHlYIuYoqbec8LQCUkJ9oAZGSs9rVTpOgs6OTyqsuYFVeM46DoAyoY+FZbXJuiwC6jpaN255hiYQc/iXNNhJ6XrIgqNpRClI0rRr1c/NDY3AgCKhxSze+Do5mNIgOKmVWVDdkLneamLbnhsuFglnR+4pyyKsTBt1TTPfkISVXVucAtnDcge0OkCp4QfpOSYGjLIz8knPRtFZloOtHiy99rvKRl+9AsVcULvbtDN4UlVBafKdbrFFX7ghCpVdfTRkpuYakSGUIrgtnksf2s5O6eHEwpobG40wttgig3ZDTonDW6CoY4ypyhO6fYGALpsxk7FZoqHyjl+q3oolh3rUp3kttFLnSCpCjuWHSPdj0ITMGXllECDwyv8aCI/TrXE3S5vujwvEuXRqsax9OFQJ2/NClVS2gCpekfCzE2kGu9hJqVLIDKEUgDJbHrq5j7nxTldEmslE6n9FklQcihHMemcNCgn5Vh2DPGOeFLuEhdUxWmqvYEdJhMTKQo1+wvZWD15NXYf2O27oUqeICkK23QrDQtZGVl4uuzpwPCj83d120N4zcGA7AHY277XV94aNjekRcmzqnEsGZaR0NWp7ofohI6hSI1ipHM7DTdEOUIhQzqnhxqHthtBgJlEaq8E2+Gx4aTrKYqJG6u2g5Kk3treipLHS9i5Sxa4OQ2cOL+qojSVmEjKuWrbjqzMrMAETZ336oagZG+JPB2qhy6vTx5pzIB/3gZXd7jNwa55u7CqfJWnvAFIaeGFHarGsZRRTZnv2b+djTUfrPFNNpaWbV2oFrfc9ofbcErNKSh5vASLmhZhUdMizGyYiYbNDa7XS+eLmUTUYiMA0i02TFC8e7XJCIIK5b2Ky1z6mXXbglAYgFV7I6n2MTLd3gDg9ROiQKUdh5+3DICRnmJBHhZVHivJdiTWWPxau1QVV7H7zHnBOSfjC8ej6cOmLs1SneMIs0WGavsaqbY3Kn3VvDxnYffL84NOLzwnKOMPq52GG6j7d+QRChk6bluvUkcvyzsoIZTrKVAtYzVx2tc5aVgn5dXTV3vmiqh453RKRikVWhzaBTdwPEqUslruyZtSySN9ggyS2VTQBLiB4n1Yun4p6X4UHWOXt9b2VgxbNgwlj5f4MkOHXfKs6rWTqspTCUVLeH6doJa4U0HRxwOyB6CqsSrQ40nRk1x6lFQgyhEKGTocK35xe7fEyx1tO3D1c1cH3st0rJwTL6aeHnQrU6wyZqrip3iqwigZ9cr94FYt+YGa48RJiKQmaG6bvU2suo0qs6q5YFIJoVZn8iAjmtK+AuAZvCqtQaRLnv3WvGp1nUQPRZVQdFCysUT/Ot18LZ3iFjeEUZlsGlFoLADSoTEVty2nG7odUiEpnZCPHUFhiLDZsTmhjfLTywMVWJidrZ2bx54De1C+shyAnuudK2tUl3/YXb+lZDYIYYRp7Yhlx/BJ+ydaIR8LQXPkBdOd2d3WvBSz9PjC8Vi3fR3pd3RD0brzpKr3Ob/vpo+pzOdOSIfeJRB1n09TcLPpdajKpU6s1JDPsg3LMKjPIE8F43ciSkVfGi4RpKSHRBduXoyVmXonYBVZo568Gza5J1Q6IeVtCItfS8fzoOKNmT12Nqoaq9iVOG6GBKc1iHUPyZJnzppX9drZr6vfWI9hy4aRD1qqNAQWrGRjVQPOdIsKL31c926d0u+lCyeQCiKPUACkPUIWqEmauidpiSQ9qufEDo4nh3p63zprK/k0RwHFO2d11Jb2kJhKINT5XR1ZC0pIpva/kvI2SCcyB4E771xvjN3b41be7pfg7eV1mTxyMqms27o/IJfUG5bHzoKOd4XrtbPglmxM1YvUtbh6+mpkZWaJ6hFuknjYSfQcUPfvyBAKgClDCKApTwmFrlMZA6hVT3AUJ/X383PyxbmQ/AyXBBKdvCtu8FIA6RYCBMKTNbf7Ujf8wbmDA5Up1eBQMerCrG7hrCm3tUQdq58BwPFwcPQFBWGHkXWNLvt8D+wzEDN/ORM79ssdoJygrkXrPhYk9AgnJJiKqjcOotBYNwDF3StRmeLmArVi5SveXhGo9FUo+znuW2o4xIsLSWcRmiCCTLcQINXwMkGLzwm/BFXycAxIbpjS77dNtCbhhADdwmwU3UEJr2RmZHom2QOfb7R1k+vEq33C7EUlESZ1zvfSi80w6VsG13st75GezZlAL6FHOCFB6bZDqUJkCKU5pPJOUhErp+Zh6BAFpkOs3E1ZezEGm4772+/V9GETGjY1uIY/3BSmiRwn6kZWOa5SKZ9GojeS32+X1ZV18QpKnLqpMr9k4hLMOneWkjxQDADLCPKao9rLajFh6AT2vYNAff73Wt5DY3OjlvFpwugycYBSDcE5f1tKJ3o9X8VZFRgeGx46J5BJRDxCaQ4KJ8Z1Z12HunfrSBwTqt2cvXgwKAhSMJwuzU5YSqWxuVGLa8ON60LaQ6LDM8SBnTvHKwfEjf9Din/FDurclI7w7piuysZO4W6h/LYzNCrBskzl1lI1ggCeERo2+y91zS9qWkTmK/OCqQagkkz6XnpZBVw94sdP53y+5tnNWHD+grTmBFJBlCMUAJM5Qhx4dfgGQD6tSsfKdx3YhTkvzgkcOyXOr8qObSEVsXJukmAYCbwqlUjO90PJKaPmqEjMoW4+id9YVfLfqOMOAqeQwTSju1VFFib7L2fN6+SiSK/jIHDllZs479R1XqDokVTkK9phOi8vYpY+CmC31GPZMbw/6/1O67y6uBp72/eyTqsSHgm752TWubPEGKNV2bEteMXKdU7t0h4SUydTC5QmoG5weg7cToJbZ21FLDuW1HOIwjAuMYe6oQ0/ZlvVHBQJ7x2VbTgMRvcw2X8tvXboyCFUFVehoG+wl1mF6d1CECN7AgmUjSpD04dN2qzNAJ9Jn5pHN79oPtbOWIu6ybSQfZAeUY0OSEFVrk0gMoTSFG5CMmzZMLS2t6L89HLUvlHrep2fwpCOlUsbCm4b8PY525XCZjqK0zkmymZFocE33XyRywtjwU1hurVgsGRxYeNC7Ni/I+n7fspTtyWKSQNSl/tEN5k3qMWHzmZlItSpC6deW9i4EAkkUF1cjflF832v1TE+vWQwK+PzZ695pUZsI+bOO1WGRuWP6uz2rqtHOOFm6RYfQOqNMCei0FgAUhEaM9V80VTJqm55PuX3dcJm1Ofxc9NyG3emovkil++JEhLghNqCfk+HXM5UaCMs9mCVZw+L0V0FKs8TpNdmj5uNmldqAu+tEzoOKiKQLAc3yRWnq0d0Gq/qhs7C5JCKeISEELYhRBGS47OPV4oRm95QdGK9Qde7KZV0iJWrELWZMhx1uWmcSIcWDBYkDUinrLUcaMGUlVO6/LYfOGtFVbYkDy6SuRgqz0PRa3k5eV0oMtygK186G7EKcWbQ91X1sqoeiXfEUdVYhUVNizy/4wddYzFMDqmIR6gbwnTzRW57Dw5UKfABmmJ1K3GPd8RR8nhJ4O9TY+Vcbh/VcnjdhrFe4PA9Ufg/VENt0k05AZkmmoC3rM0bPw8r3lnRpRhhb/terbWiwxslGcrWWZ92qD5PY3NjoF5rOdiC/Jx87Dm4R4y+wQ2cXEl7EvmW1i14+PWHk8LCQQYgZd5V9bJUE1cudEv0w+SQoqLb5Qg98MADGDJkCHr37o2xY8diw4YNnt+tra1FUVERjj/+eBx//PEoKSnx/X4qYcXOKVVYwOfeEJUYsW6+hjQ4sWJnQmfYsXIndJLPTSSnBiWFAp+XSjuTn73i/qqKyFTPoaB8miD4ydridYux5MIlSb+9a94urCpfpbxWdGQLMJ9cz4Xq89RvrO9sCByEq8ZcBcBsThNVrhs2NXTJZ+LkxnGgqpc5esREib4KbUm6yTXQzUJjzzzzDK655ho8+OCDGDt2LGpqavDss89i8+bNGDhwYJfvX3XVVfjqV7+K8ePHo3fv3rjrrrvw3HPP4d1330VBAY0PJ4zQmErJc3VxNaoaqwCohQrCbCfgNwbdWHFYsXK3EuMdbTtw9XNXB15L7V4vBakWH92l5xAn/BBGOMQCp1+UG2lh2GXfQdDJZaHqtrUz1qK1vdVozqEqZYIXJN+DyR6E1DA3hzRXhbYkTLk+KnOExo4di6985Su4//77AQAdHR0YPHgwZs2ahZtvvjnw+ng8juOPPx73338/rrnmGtI9TRtCYTZflITEgpWKFevk3FATjCvHVmLlxpVJ98jLycOeg3sCrzWRcBgEr/fDyWnqDj2HpI06yeRnTr+o2stqPRummkqu54LLg8XdfO2bn8mDGkWuszKyfFuPuCFVfEwUcIy/wbmDA9mxvUCVy7Dk+qjLETp8+DBef/113HLLLZ2fZWZmoqSkBC+//DLpNw4ePIjPPvsMsVjM8zuHDh3CoUOHOv/d1tamPmgCOHkYTvewqVyTIEiRcEnFinXmgep+daswCTKCrOaLVY1VSvlHOu9VosVHuvcc4uSrSOYlSPdua21v9ZQFqdwoP1BljRvS4OaY2cNeUjlNbgjKybG3HuGgYVMDpj83PWXkhH6gyv/8ovmoKq4CANS+UcuupqTmD4Uh1xx0G0Noz549iMfjGDRoUNLngwYNwqZNm0i/8V//9V846aSTUFLinWB7xx13oLq6WmusHITRfFECnWWnmxtcS1xVmv1RFeuuA7sCm8OqzgMlwZhyOpRsvmiK7VWl+WS69hziGnVSeQkc44vbrNhr8zB54DHZyJaq2/w8YqbgJ9dlo8pIZfxOUHv6pQJU+Z8wdEKnXKn0lwT8e0w6je73Z72PddvXpdyD1u2SpVVx55134umnn8Zzzz2H3r17e37vlltuwb59+zr/+9vf/mZ0XJzmi5zEUEkk9a7yUBAqBIaUfkNZGVmY8+IcY8yjFPIzyukwLycv6d+FuYWd7N9ecEumNkk0puoVSbeeQ9TqSvvc6pJZxjviWPPBGlT8uoKcLGyXrSAEkQWaSK7nyhqXKJCq2+om16VEr3kl3/v1vfOCRczohBSxqx0qBIcq8u+VwB3L9o6o2OHUI34kwanuXdZtDKG8vDxkZWVh165dSZ/v2rULJ5xwgu+1ixcvxp133omXXnoJZ5xxhu93e/Xqhdzc3KT/TILbfNEEy6cfOJUGXOZXSpWT0wgxwTzqV7FROa6S9BtLJi7polC5zRd1q4yCoOMVMdmCgSPT3OpKa251WJate5Y8XuJLXeEm/5ZsqW4epmCyka0Fqm7T5QSSbrbMaQJNOSxJNVMG1NtSqMq/m7Go0uIj3Zikneg2hlDPnj1x9tlnY82aNZ2fdXR0YM2aNTjvvPM8r/vJT36CH/3oR3jhhRdwzjnnhDFUFjgCGnZvFqneVX4Ior53wsQJyxqHzumwILdAu3s9NXS1bMMyJcVvusWHCjgyrVL+a38HKiXKKvd086hJ9YeSgg71A5XGgKPbVAwaU/qQckCzwDks6Rq5qsZEUG83bok+l7bE9AFPAt2qauyZZ57BjBkz8NBDD+Hcc89FTU0N6urqsGnTJgwaNAjXXHMNCgoKcMcddwAA7rrrLixYsABPPfUUvvrVr3b+znHHHYfjjjuOdM+wmKUpJc9cBmNdqJaZqjDdji8c3xkrluxqrwudUk/utdwWGQA/dyidqpBUKthUqitVy+ElmbXTjdWdWwGmAyk6BzueffdZV24i0y0y3HLjmj5sMs6UrEr/4PYMBX0LcP3Z12vl93H0SJhM0k4cdVVjADBlyhS0tLRgwYIF2LlzJ84880y88MILnQnUH374ITIz/+nk+tnPfobDhw9j8uTJSb+zcOFCVFVVhTn0QPglRKoyGOuCe4KhMr/6Kb5pY6ZhxdsrxMenWoWlw8bNvVbFG8BNxtSp1pAsaebKtE51pRPUxHpu1ZOf/JtidVdNrA+T1M5Pt6kwVa98dyWmrXI3zlT1oZtsU5PUuUnkKlApdPCa24/2f4SqxiqsLF+pbHhw9Eg6Mkk70a08QqlAKpquOpEKi9pKSKXmYnD5I/y8ALHsmOjzSlRh6XAVUa9VbQCq4k3gGjXSlWxcIktObyQpLi2Oh44j/1LcXzpeYmkPVVhNZes31qOsrixwPED4+sGkp1Waw0mKtJDy3ruDRygyhAKQDoZQmG5sQK0fDbXZH2Vxbp21FcOWDRNR0pIhRfuiH9jncybz3Qd2izVftI8XoDcAtWAqVGgiLKtDZOmHJROXdBYW6IJLQkc1ZiQ8a+nAym7/nTCaynJDldRmy1KyrWLkUmWBO1e6rOaSSCVD+lEZGjtWEaYbm0qJb7n3K8dWovS0UpIyp7p3f/R/P0LFWRWoaqzSCiNIhxStkEr9xnrMbJjJUvzUcIyXy5mCVe+tAgBxFl4TYVkdIks3WMpUyggCaDxAsewY6ibX+VbSuW12ugarSqjECQlSuzCbynJDlUEyJi3bXL6ndOBwKl9ZbpzDyVRYWBLdpmrsWIO9iiLeEUdhX/PVPpwqscLcQqwqX4UlFy0hl1NTF+eipkVY2LgQsexYl7JjTnNYncoYL4RRBuqsylkycQnpuvtfvV+8ktDEHAJ0DikKTCnToKqnDGSg9rLaJBI6J0xVNlHX0poP1vhW4+g0sg27qSwnh4SiD03INpVmIl04nCxWc9Pl6+nW7NuJyCOUhnA7KQzIHtB5SjFlUVNPXKrhB67HqrW9FQkkUF1crVThIJ2kR1H8Fb+uQL9e/bS5duwepHhHHPe8fA85d0iSzbZhUwPpe9xER8k2ByZp+XW8JjrekiBQ19KipkVY/tZyEW+lBcvDteaDNVpeKa6Xg6M/KPowVUm8qp4ojixKsZpLIlUtoSiIPEJpBq+TgkXkpuMh8YPFnEvBoD6DlISXQ1QG/FMp/PyNn6P89HK2cSEVUrS8c1WNVYGGYmt7K0oeLxH1ynB4TQA5bo76jfXk0JRKWFaCyHJ+0XzjjOsqXhPT3CmctSTprbR7uKjJ616GBNfLQfUiUpmqt7RuIY1fmtspbA6nIPjdT5rA1yQxqw4ij1AagXJSyO6RjdXTV5OTdCngJkerKgZOE08LlFwHL0iUtaokjgPyPYa4uUM68wb8UxYpoIQhvJJCvU6JTR82kfo9+YWlJMH1mnA2O5WO5Zy1JEWxQc0fdMJPX3C8HJRnfrrsaUw+fXKXz52Id8RR+3pt4PcK++qVvbtB1xPFzTes+HWFLyO61/2Ccpgk6TRSjcgQSiNQlOf2/duRlZklUh0G8JRbkOFAWRiqycAq7mndJD1VxQ+Y4XeyGw2r3luF+1+9P/AaVbc+JzE1KAwRpFDdFLuuEZtqJU2dd52O5Zy1JGUYq9A6BBkSnJCJ1zNzKQiaPmzC9v3B8l1xdoUWV5bbM4XN4dSvVz+UPO7daNztfkFh3Xnj52HFOyvEG0OnCpEhlEYIO2bNUW6UfkzUCgi74lvzwRqSi11VKajmeKi2F7FDd/Nxg91ooBhCJ/Y9UckooMpY5bhKI3kyOkasNOeRCnSq4jjeRGstUXmW1nywRskoVCGXBOi5ixyPm0SuCVW+qf0CnfCTwdIRpcYJGO2wWmJQ70cJ69697u4uf5P2goeJKEcojRDmSQHgKTeVfkx+uQmW4qsqriL3rVGNV6vkeHAVvx9MMKZS+4a1HGhRqlqiyphfLzbdPBnJ3mCqeTKqMqdTFcfNIcrKzCJzwSxqWqSUv8aV4bycPMweNxux7JiRHlK6uSYmdW2QDDZsblBuAOyGIBnl5mKp6r506RumgsgQSiOE3RSTqtz8ElJ1N7ugROAEEigbVYbbm27HKTWnKJchcxWnpPFiopEmRblNHT0VU1ZOUTIKJGRRojzZacSunr4aj5Y+ikNHDnVR+tIJyjql75T3I9mx3HTyNFWGJ4+cjPycfLQcbEHNKzXGG0OrwpSupcpg6YhSVyM/LycPs8fSDUiqjHIOFTq6T5VOI9WIDKE0Atdy183opyo3v4RUqc3OrwN9zSs1WNi4EDv270j6u2Q1jBPUuenbs6/n30x3c/dTbnWT67DinRVGDFTqqVUq1GsZsb169MLMhpkoebzEVelL8sJIeJYkquKoc8ipLFQxCimGw4DsAVi1cRVaDrYk/U1lnUpXKzkhId9u4Mig3civHFeJvJy8zw3I9TQDkiujVM+4xMEtlX3DVBAZQmkGquUuQdQmcSqS2uySlMLYSgD+J2bArCuWOjeP/PsjsMj1nH8HzDOmuim3rbO2Yvv+7cYMVCplg2T4gaL0pWRR0rPktfn4hRTt4GxKXu/LDdyTe5DhYM2LxJzp6jaqEWWC5I8rg1mZWWhtb8XSV5Ziz8E9Sd/xMyBVZZTiGefSnLiBKremDV4qol5jAUhVrzG/BFfp/jg6/YakG+px+wnZf1+lDNkP1LmRbKTpBk6yM7fcn9KPSbUCS6rHELWv1qOlj5KqY4JkMYwmkZy5AcCa/3hHnJw87fb+g3SPm6xfd9Z1WNi4MPB+QXOmq9tUEuUlKwyl+6d5rRHTMqra85DbB9J0UUPUa6ybw6uKwkR/HJ1+QxJcPXaoJurplCF7gTo3JhlTOcpCpdyfcnLjcujYr9PtMRTviGPZhmUk7xYAEVkMo3qTOjcNmxvYm4WVPK1SjRkkb16yXvduHem5/eZMV7fpVChKVXVy9aFqzzjTMupHUzB19FQsXre4c3wWOF5wk6zrKohCY90MJvrjeIVXYtmxQJeldKxddeHWrK8x0v+Lw+QqzZjqFw4qqyvDnBfmdL4bbrm/6fwlCzrhBytEMufFOaR77T6wW0QWw2IcDpobAMp5Siphb2rOiZusS4RBqbqtsbmxSzjFNJM3FVx9qNrCJowKYy/d95Ov/0QrpJgu78qOKDQWgFSFxryw4u0VuLL+ysDvPTXpKZSfXq7kpVBxWUqFh6guXzuyMrI884kykIGCvgVYfvlyUTZu0+CECAtzC1FxVgUpNAGohVB1wQ0/qHi3rDCAjixS7stx/1PgNjcAlEImbs8CBIe9VUM09mfQDYNSdVssO5bElMyRf51wJgcUGazfWI+yujLS7znHTZ3vrbO2Yt32dUaIRVVDimGEni1Q9+/IEApAuhlCVCGqLq5G7Ru17FCRToxeItYetMCdY1IhPOwODKgcg5A7DyoGaphMzdw8MbdNVmW8nPuuKl9lVH44m4VfbhzVKJTYnMLKN3SCI/+UnDgp+MkgR9YG5w52NSCD5jsV7M+Udcc5zOu+qyhHqBuCIkSUGHQsO4aqxip2/FU3Ri8Ra+f0UCrMLUTZqDJSTyo7ugMDKidEyDGClkxcglnnzmIZMWEzNXPyxLxCXiqySL1vdXG1cbmRatFBzV+TyDkxnW/oBemcOAu6xr+fDEq0sPGbbyuPJ8wcHKqeCJs4mILIEEoTUIUoKMkyqIzVz5hRTdyTht8CrzirAsNjw9nNOe0w0QfMDyoKVVoJWF4TFSMoKKlROlGcYwRSN1nJ+6q2XeBAskUHxSiU2pzcDK/xheOxbvs6rHh7ha98qDRlpoJbtGHa+JdqYeM138OWDRMrqKGAk/wsXWAjgcgQSgNwM+j9DIWgMlY/YybsXmd+oJ5kdU6RHKNO9XSoqlBVnwvoGipQ5TSieAiv//X1mP3b2UkNLHU3DOqmrOLdkrhvGCdVyvv3yo1T2ewom1NBbgHiHXGSQWOtqfqN9Ri2bBhZ/r10mzMvyA+68h9GRZNECxsLTkO3sbkxtAOtxQNU8esKsuElUU0qjahqLMVQzaD3yuinnlbdjBnpjUCXLItSicVh1HUDxahTJXjTYSdWfa7q4moxgjiKh3Bv+94uXbx1q/WoFU92I0iCmI1TaZUOzMeSLToohIntn7V7snq7QVX+3XRb3WRaeb6u/OtUNHFkwmQ7pbAOtJZeLHm8xNdIdZNFE2SWOoiSpQNgOllaOoNe5/coicr5OfnYPmc7evbo6fv7YeeVcIkELZgieNOtwrHfn/JcOgR8XqAmNQaNR+XenMRbSVmj3BdAaLLtlexMzY3jEmZuad2C2tdrk4zbAdkDsLd9b5fr/NaAlPw7f88EAaUdqvpTtdJWJ7lc+hk4UKnq5JJ3SoC6f0ceoRRD2nrXOWlQvBAtB1swbNkwIydBHbg15yzsq3fi0jkd6vA92U+WsewY3p/1fmc/Iq9nAf7pTpbiNNIJAek2X+S0mpGUNZPcPiow3aLD6e1c2LgQCSRQXVzduY569+jteq3fGpDmO+Pw8+jIP0cfW+t0zotzUFZXpuT5UvGKBHmeTDfv5nKWWXCTRRP8ayqIcoRSDOlwlG781StGb4dfrFya+ZpzYnDGypderBeH1kkeVzVw/U6WSyYuQdHJRcpVOV7wmmOdPCWv5+MgKE9MWtb87ju+cDyaPmxC+cryUJNQAffqI4mEU69T/Uf7P0JVYxVWlq9EVmZWl2bHdoTJfKxblUYBVc9uad0SWP5OkQkuKz3F82Q6B4fL/p+K5GcuIo9QimHCeteNv04aOQnvz3of+Tn5rn8P6ySo23xRZx7iHXGs+WAN6T5S+VYU7waV6ZoKvznWzb9yPp8fvE65fidGEyzrFuz3bW1vxbBlw5RyIXThNy86LNpUb+eONm8jyI6wmI91WPApoOjjAdkDUNVYRTIGKDJB9YpwvJ8mc3A4xmuqkp+5iDxCKQbHeud4R3T7X63bvg4tB1s8/276JChVuaEyD9x8Izdlzj2xc70bEvQF1Dl2PYX3LUT7kXa0trcqeyTs41DJuQkjKVQlF0KqqpLS90vFQ8Lp3+anA+xwrgGTJdI6VWmU31alJ/GDrkyoeD9N9UDkGK+S3jqTiDxCaQCK9a7iHQk7Vr7i7RXYdWAX6Tq/xUQ9rR4+cphUpcGZB69TlxtU863cTkmcklcJcPKf3E7hzZXNePiyh8nP5wWdHB/T5e6SuRBcUOeF6yHk9m/Lz8lX8lhL9yB0g6lcRD99XF1c7Zo4HgRdmVD1fprIwQnymgGf0x2snr5a1FtnElHVWADCbLHh5fHRaXuhCp1WHkG9v4KqRaj3zs/JTzqx6lbucGjvqXNP7TlU8esKEk+KVIsAqcoSnb5elPn2q1KU6G9l/y3n2mv6sInV8kGqB5l0xZUF1f5tre2tytVNUj0InTA1R857OGWi7t06ViWllEyE0WOSg3SrrvRC1GKjG8It5MFxiQIyZdOAXisPPyMICD4JUr1RTre9LuEZJwmQ6vINck9zNycpMj+psJKO+50y3y0HW1CwpAAPXfpQl7mWSgr1CkFNHjk58BlU7hcEEwzvXO+WPXSVlZmlnKRsKjzD6VSflZmldG83fcxZf5IyoZPEbcL4CArLAjBOSimJyBBKc1AX/O1Ntys1WfWCRKzc6RmiGg+qm71u5Q7VOJhfNB9VxVVaChXgbU6UnApODplkWMnLgJfqb7Xn4B5P5albSeSXJ+XWvsILqWj1wck70e3fpmPQ6Oa0uckS9dnLV5Z36VSvYxRwKiklZcJkj0lVeMkEAAxZOiT06kodRIZQmoO64N3aauguAJ1WHsDnnqElE5dgUJ9BrJ5DOmXbOvTxVONgwtAJIguYW4bqd7LkJhybTGaVbr5owUt5qm7SFG9rZkamL4NzLDuGusl1ohwoJnKfJPq3SSXpB8FJ8Pjw6w8nlfBbPQcpcIabdXUi5YBYObYSpaeVioakdA6mfsaHicayYbb4kEKULJ3m0CW1A7xJ/yjQbeUxqM+gpBJkSrK3RNm2M4mbkqhnmojMbYwUxLJjgXkY3KRR6WTWTnK5F+jkcpSkSwtByeIqSaEUb6tlBLnNUQYyUHtZrZhhbIEih/k5+djRtoOcgMrp36ZDx6ALN4JHJ4/RjrYdWNi4EAOyB7D1g5RO9EqmXlW+CksuWmKEHFAnidtt/ejSk3ghnXpWUhEZQmkOzmbhBnusXLU3ktsmwzm1qmzUXovei9vICStWzq2yM13pYgd1Dusm1/mWQquyX0txjdgVqlc4yW0s9vmmQlJ5cjqAh9kTKeggkEACLQdbcPVzV5M3r1T1b+OAWrFpeTjsY+eAW4HpNg/SfF5USPWYNMn+n07Ni6mIqsYCEGbVmBf8MvSpoSNn92apCqug0MrWWVu78Hy4fc+rqsLpuh1fOB7Dlg0LjJW78dtIVnr5gepulqh6kqj+0nGPq1YiOfs0fec33yFx1uj0R3KCM3dWFZnJShwnOH3mAJpcp6J/GwWcik073KpWqZ3qKRWYYc+DHZx1yZVlSsXd1llbsW77OrbMS1Zz6oK6f0eGUADSwRACvDdnSq6OGyRK7ymKNZYdE28AGGQYejWJtL7jtgjdDC6qEqDkNHgpTt3Gi5yyWomyeztUNy+3sRw+chgFSwqw5+Ae12tMKM90Utj2MbnJ4Y62Hah8sVJ7fqh0DulK1+GEW7l4vCOOksdLAq811XBZAlwDjCPLVFoIHXoSUw1luYiarh5l8HKJ3lp0q1LozHSs3BJ0kz2HTMbKhy0bhtb2VhLtPSWngRsCpIZeUumG5iZ7+42lZ4+eeOjShzpzb+zQCUv6hXfCDoUGwU8OC3K9jUSAHu4JCunohFp1oBryPLHviV1C98VDirVz/TjzIB1CNJ3zp0tPQgmbmWzxYQKRRygA6eIR8oOX9U0F1Rvj5ar1c+FKEfdRx0MlPLO8EjqnPk5YiBsC7A5uaKo3ijMWSQI+6qnaBOkf930GyeHscbNR80pN4H11PX8m16sXrJYfVLZrIFiWdDwSnPG4heZ0Qme6RJEUWVb1vlHu7/Y8YYeU7YhCY0JIF0MoSKDcFkA6xMrD3qhNxMrdxqYaFpLcQCxIu6GpyoujUDljkVCeXANXUmGrhjX85DAvJy+UHKqwQ63cvn6A2Vw/lfGojs8NYeT8BelkCkzoMROImKWPIlAUqxuXCjVWHhQ20WmAKsH+y9mkOPw4Ogy+qmEhEyWjuqSCdnA2cVPkclS+Gj8PJbdBpe49LaisFYocthxsQX5OPvYc3BMo1zoIM9SqkmgPyLG6S43HCR3SQKp+aNjc4JnAHyTLfjpZepzdBZEhlObwU6xldWVdyLvsCyDeEdcmzVPZVJzQ2ai5p2uO4aWTv6ST02ACEq0MuJs4RaFWjvtcNqRd4n5yEcuOGSF0C5JF1bVClaWrxlyFpeuXKh8oKDBJtGkHh1XdIlAcHhvuKddeBirHwFVpsOsFVRmj6oeaV2rw+FuPJ+VDckJyXjrZmSCtO87ugig0FoBUhsY44RevRaAbNpHMGZDOm9Bt9qjzbNw4OzUEmKqYuk5uQlh0A/b7hZ1PQ5FF1QpJjhy2trcaaWJqRxgVP9RnXvz1xagcV8muhOPm6XDDvFSDiRtC1AlbqbwfFXqSsCspdRCFxo4CcMIvXqd23bCJZNUXh6JfxxMV74gjlh3DnRPu7AwpFOQWdNlgdU6/nLAQ9cSeSs4SnTChjjdKJZ8mSC6e/MuTgfcF6KdaqizeMeEO0u851wpHDrMys0SbmLoZoZKhVi9Q9cqP//hjnHr8qeyCBW4rDW4LEiptCddzYnlZy+rKWNcBaiE5N50s0cjYDalOnPZDZAilMTiL028R6GxUqSrPVt2Y/TZWtwq3ey+8F1NWTmEvek6cnRoCTGW3Zo7B66XQuMmT3SWfhiqLlJAC0HWtcPPo/Jr4cj1rfkaoia7xFqj6orW91VMWJML23PEsmbgEs86dBQCofaPWSAhx0shJqBxbyWr6a0Gij5cJQziVhzwK2IbQCy+8gOOOOw5f+9rXAAAPPPAAamtrMWrUKDzwwAM4/vjjxQd5rIJrXPgtAtVk0PGF40PJGXBCxRMVtLHOGz8PK95Z0WUxen0etOj9FEZQToMdkgpdFVRZs1qX6Cq07pRPQ71nfk6+8lrR3Xy4Gw3VCDVVGcRtrOwmCzpeTNfx9C3E9v3+oWF7CxJTnhMAKD2tVMkQsqCbzCxpCKf6kEcB2xD6wQ9+gLvuugsA8Pbbb+Omm27C3LlzsXbtWsydOxePPvqo+CCPVah2YVddBF7KdNroaVi8brHRJE0nuJ4oCgHa3evu7vK3HW07sHjdYtRNrkNenzz2oi8dUYp+vfqhsbkRADoJ3TjzIanQVUEJz8SyY6hqrBJRaKrPTJWL0tNKUXRKkciplnrPgtwCrc1RdfPhbjTShrdKyMPuBQuClyxIhu0bNjeg/Ui769+83p2u8eo3b6q634JVNSzdWZ6LdDjkUcA2hLZt24ZRo0YBAFatWoVLL70UP/7xj/HGG2/gG9/4hvgAj2WoljmqhKn8lOnidYuVvSZuoCxQbv6Oajm7tRjnvjSXnQDoZjguf2s52zsiEZbSRVB4xvq3lEJT3cRSkU/DlcWq4iosXb+0S28/ylrhbj4qG42k4a0T8rAMiYpfV5D4zpyyIBW2Dyqbj2XH8PBlD7s+j47x6jdvqrrfksWWAy0inltdpMMhjwJ2i42ePXvi4MGDAIDVq1fjwgsvBADEYjG0tbXJji6CJ1W5GyjU8W6geFOefudpvD/rfe1uy25tBNy6Z3PbH+i4gu2LkfMcUt2buWGpoLlThXTrEj+obmJ+cmGNo2xUGZo+bEK8I96l/YKK0UiVxYbNDRiydAgWNi7s3Nhj2TFUF1cb60zO2WgsSHlSJNbApJGTUDe5jjQepyxYBqqpVhoWsntko3REqeff3WTMr+0Gdd681uOA7AGdz+d8XgCYOnoqpqycYqSzPBcmWiyZANsQ+trXvoa5c+fiRz/6ETZs2IBLLrkEAPD//t//Q2FhofgAIyT3B6ocV+n6HZ0wFVWZrtu+jrXgneAqTk6/GolkbepilO7HRFHoA7IHoKqxyrhy8+pFNTw2nHQ9dQ51NjEvucjK+Fzua16pCc1ILOhbgKriKjR92ISyurIu7+eT9k9Q1ViFhs0NIuNwQmWjkfCkUNfA4SOHA/WDam8wiV5xFE/y9v3b2YckrwMLV3e4rcdd83ZhVfkqV71YN7kOK95ZIaabdJHKXogcsENj999/P2644QasXLkSP/vZz1BQ8PnL+O1vf4uLLrpIfIDHCoJCHtapo3hIMYpOlsl9sKBqtXPc4qqxYqrrWTemDtAXo7S7N+ywFFXW7JBWaLqM43a5aNjUgJr1NYgnkpW7dDKmUxa3tG5B7eu1vmXUpvMgVN6LBGkidQ0ULikM7GCuIwvpRA8CBOdrVRV3PczY4aY73Najm14cXzgeP33tp2kVigqLoFMXEaFiAMIgVFSJs0vmiqgQC3LJDsNo5qjafJZLEvbkX57E1c9dHfg9NzI1v/fmRUxI5SyhzF269YyTIGPUaVKpCpV2DFZ/O8kcL9X3okuayG24S/l9HVlQ1YfShLFBsnh89vFi/R/t4PZIk+oVZyFIr5km6PSCKKFiW1tb548E5QGla4f2dIVqaaFERr8FrtV++MhhfPs332Z5KMKIFXudDgfnDsbU0VOxeN3izjHanw2ghxTrN9aTu2Q7T+tBRoiX96vuXVoOBTWnQ6Xqi3Nq52xKumW6qUjGVG3H0LCpAdOfmy6awKrqTdH1pKiGMkzxnanqQymPhdWxPkgWKUYQwJtfFaNcMhRF0WumCTp1QfIIZWVl4eOPP8bAgQORmZmJjAyXJMVEAhkZGYjHw4k9hgWTHqFUnWbdQLXa6zfW4zu/+Q67E3YYHiELXhuxrveBqnDc3ptOuxCpjtRBslbQtwDLL1+O3Qd2e25CQXMYNnEap1t6+enlIt4YbnsVP0idilVl2403bN32dYFzdDR1MNf1WHC9MbHsGD5p/0TEs8ppw6Ty+0Hg6LVUMEtT92+SIfSHP/wBX/3qV9GjRw80Nja6GkIWzj//fLURpylMGkJhGgcUUDY5zsnD7n7luPABGFswqouRo3AykNFFAegYvBJhKZXN2y/fy8vQVDX2VEF9ruriatS+UStioKmEhbIysrrkMFmQ2px0NxpVUkaAF4q2IGmc6kLVkFTxxlQXV6OqsQoAz/Bye79NHzaxeqT5/T4X6XSQ94KoIXQsw6QhxDnNSsZz/eClTLknD6CrAUc5eQFISyp26oabn5OPBy99UDw/KhU5HRzFyVGKgJyhSzESY9kxtLa3ihlophp0rp6+GlmZWSkxClSNWDcDgtrBXNI4lQDXkNTxxjRsbmAZXl5G6uSRk8kM1NLNedPtIO8GY01Xq6qqsGDBAmRmJlfe79u3D9/5znewYsUK/miPUZgoLTTFJsohLPSKqwfFigGkhIqdMmfU3KUlE5d0GaNEflQqcjo4FU/UXJ3bm24X3fzCrrgDeBWKhbmFKBtVhppXagJ/t3xleRcSxjCMAh32X6/qpaAO5pIs5c5nUdV/3Dwjrk4E/pmvxcmH8svtoxpBVo80ScO6u3AEUcDmEXrkkUfwta99DR988EHnZ42NjRgzZgzef/990cEd7ZAgBLODSlaoAq4w+5W7uvHUlI4oFeXmoYI6Z5w2C05IGbxuc7d11lbEsmOBPE5BsuYFKlEiVT4WNi4U50IKkwgSoPHXVI6tTJJtCpyJtGER4KmQMtrhJBTs2aOn7/wEGaeA91oPIir04++h8p1Rwe1Y7zTuKGSfFCPV4s9yg7WHSBtBQPfhCKKAbQj95S9/QWFhIc4880zU1tbiBz/4AS688EJMnz4d69atMzHGJDzwwAMYMmQIevfujbFjx2LDhg2+33/22Wdx2mmnoXfv3hgzZgyef/5542OkQoIQzIIk07EbqMKcn5MfeJpzUwC6ypgDSynOeWGOKwme25zpGK2Ua/Nz8rGjbUegkrbPXWt7K4YtG0YyfIMYmYMQpPR1lJ2EoRsWEaT9fl7G16ryVVhy0ZJO2dYxQgHevKhs+CZO9iaMUz9Dx0//ldWVYdDiQeIHRE7HelVmcYpetHLPdPcQLiT1WqrBNoSOP/541NXV4cYbb8S3v/1tLF26FL/97W9x++23o0cPdqSNhWeeeQZz587FwoUL8cYbb+BLX/oSJk6ciN27d7t+f926dZg2bRquvfZa/PnPf8bll1+Oyy+/HO+8847RcXLAYU/2gjTTsRsoyjw/Jx/b52wPldSRC7sy9XIru82ZjtFKaQvRcrAFVz93NVlJqxi+nHYtTgQpfdXN3oK1+S3bsEz51O5mYJs8tXoZX0751zFCOQcAVY+wqTmSNE6D5P36X1/vq/+chpfEAZF6OLK8MSaN1MpxlVp7iApM6LVUQSlZetmyZbj55ptx+eWX4/XXX0dWVhaeeuopfOlLXzIxxk6MHTsWX/nKV3D//fcDADo6OjB48GDMmjULN998c5fvT5kyBQcOHMBvfvObzs/GjRuHM888Ew8++CDpnmEQKgJ6sW2dpDXOfU0SY4VJuMglwXMmfKuW4FPLbCnVI7pVaNY7H9hnIGb+ciZ27NcnSvSTD5WqIokcGUoydV5OHpZMXIKC3AKjCcpu799K5A5CUMGETsUepRQ+lh1D3eQ65X5tdnDXukqhBgUSVU0c2hGVIhDOXEmTdVIhpddMwFjV2EUXXYTXXnsNDz74ICZPnoz29nbMnTsXy5cvR3V1Nf7zP/9Te/BuOHz4MHJycrBy5UpcfvnlnZ/PmDEDn376KRoauvbyOfnkkzF37lxUVlZ2frZw4UL88pe/xFtvvUW6b1iGkA5UuVS2tG7Bw68/jB37d3R+J2hx6nLxeMEUc7Hz97nKlMsO7XZfN56WHW07UPliJfYc3ON6nd/zShuNkgauLju2HZIcO9Qyb9MJyk55iHfEUfJ4SeB1uhxRQWuHOkdhGaf28UryNrlBt6pJlXZEwkhNVYm6Cb1mAsaqxuLxOP7yl7/gpJNOAgBkZ2fjZz/7GS699FJcd911xgyhPXv2IB6PY9CgQUmfDxo0CJs2bXK9ZufOna7f37lzp+d9Dh06hEOHDnX+O4hJOx3A7V7uZwwEVW2osL9SDAfd3lNB4FR42OE2t9TqEr9TYEFugaeyAPwZkaXDiJLMr17yAQC1b9SyCPik+nR5PZ8bVKuWqMaxU3biHfHQ+n75sWtT50iigpO71k1XHen+vp9O1KnIA8zqRdUohCm9lkqwDaHf/e53rp9fcsklePvtt7UHlGrccccdqK6uTvUwWKDQxHuVqzpBXZxUIea4hE1SsXOVnW4zwKB2FrPHzSb9jtu4TeR06La6sMNLPrwUuh+4itNLudufz+/UqmJ86bBp62508Y441nywJnCMQPAasOaosbmxSym/BdPGqdtaN111RP19P8NBlXZEx0jV0YuqMmtSr6USotnNeXl5kj/X5bezsrKwa9eupM937dqFE044wfWaE044gfV9ALjlllswd+7czn+3tbVh8ODBGiN3hyTduA6XihukrHaV3laSG7IdHGWqc9KyEiIrfl3hewp88i9Pkn7PbdymOjrr9q8LkmmOZ8YJiuIMUu7W8zU2N4qdWnX6t1lQ3ei4rR0oayArMwtZmVm+eUt+82Oizxz1oLe3fS/LyOasE1XDQcp7K6kXVWWW4t3S0WuphFJobMmSJairq8OHH36Iw4cPJ/29tZXWVI6Lnj174uyzz8aaNWs6c4Q6OjqwZs0a3Hjjja7XnHfeeVizZk1SjtDvfvc7nHfeeZ736dWrF3r16iU59C4w0Y/JT5mq5GcAela7jktYsqGsBQ4JXkFuASrOqsChI4fQ2Nyo5TJ2g1VNkZ+Tjz0H97CNGdNhRBVQZdqp0Hcd2EVqYhukODnKXWpz0g172MHd6DiJ/1zDmDM/dsNHJeeQstYp8v7wZQ8D6MpKPyB7gKuBxFknOsaupPdWQi/qyGxjc2Ogd0tHr6US7PL56upq3HvvvZgyZQr27duHuXPnYtKkScjMzERVVZWBIf4Tc+fORW1tLR577DFs3LgR3/3ud3HgwAF885vfBABcc801uOWWWzq/P3v2bLzwwgu45557sGnTJlRVVeG1117zNJzCgEm+H91yVSd0rPYweYEooJQwV46rRHVxNRKJBBY2LmSVIHu9Vz9cNeYq1/FQlLQE7YIUuDJtL3Ofde4sbVJRDn1EvCOOXQd2dfmeG4LknyrjVEoACsEe4P+8TqgYxtycQ6tcf2HjwiQjCJDjMaPIu5v+2zVvF1aVr1JeJ7rUJNKkubpQ1cv1G+tRvrKcdA8dvZYqsKvGhg0bhvvuuw+XXHIJ+vbtizfffLPzs1deeQVPPfWUqbECAO6//37cfffd2LlzJ84880zcd999GDt2LACguLgYQ4YMwfLlyzu//+yzz2L+/Plobm7G8OHD8ZOf/ATf+MY3yPeTrBqT6gDOBbfqQiKzPxVdwSnwq/AA3Ft86Jaze2HtjLVobW/VqsJLRUdn5/1NVSxRq8Z0Gq+qjhlQ698mUXXFWc8qFZ06/dvcIFkppNM0WeU66lyvnr4aE4ZOcP2bSdoRLlT6W3JpRyT0mhSMlc/36dMHGzduxMknn4wTTzwR//u//4uzzjoLH3zwAb785S9j37592oNPJ0gaQpIdwDmg8IRYkFqcqegKToWbUgSgvKHrGpqpMmYk7itVyq9Dy6BikHiBI/8q61lifVGfd37RfFQVV2nlkQDunFBWyImDVHLdeCFoDVDnOpYdQ+1ltaHTjnBhksMpXfSaHcbK5wsLC/Hxxx/j5JNPxrBhw/DSSy/hrLPOwquvvmo8t6a7QyXnxnS5qhMSFVqAXiUbJWlPurEsJf6tW84OuLuHTeREuUE3n8MN6ZAMKpl4yZF/Tt6ZBYmqK+rzThg6wUhVl2rOYcOmBkx/bnradJun5LVR57q1vVWcdoQDqk7kFlpwaUdSodckwPYI3XzzzcjNzcUPf/hDPPPMM7j66qsxZMgQfPjhh5gzZw7uvPNOU2NNCVLtEQLkXMteC7/irAoMjw0XX5w6p0qvZzaRaA6ouYwtmA5VSICSyK3iraA++5KJSzCozyAjJ0OOxzNojDeccwPWbV9H3qw4ZI1OqBL5hUmy57bB1r1blxIPnASs52nY1ODaYsc5Hq43PRXkhlydyAnVSXnEUgVjoTEnXn75Zbz88ssYPnw4LrvsMp2fSkuYyBFSVdgcxel1QgjbXanLNGx/Zh2G1iDotikJeq8DsgfgmcnPiLQo4EKlwoiqzCnPnpWR1dkYEjDjBZBo71E5rhIr31upxK2iQgmgkx9H3cxMrHeVA51TBuwIy4DgtIKwj0clRyYsT4iqTqSG6iRypFKJ0Ayhox3SLTZ0TpBB/Ybs9zDhNaHAK/9G9VRpPbNEUm7QuHVO2ZxTVpjGqE4iN4c0kyPTprwAku09LFDHan+nVEoA3fw4SmsHE3qA6yVJBwNCt89g/cZ6VPy6QqQvnBQkew566SGOXgS66vlUV4eFYgjl5ubizTffxNChQ1V/Iu1hoteY6gmSoihMek2CwFG8XA8M52SSlZmltBh1qzsop6ywjVTVcCxXmbs9Vyq8AH6J8H7KPDMjU2ysOlVXlApFtx5PTnk3rQeoxu/g3MEoG1WGmldqAn/TlAEh1WdwzQdrtPvCSSKMRtUATS8CXTmcUpn/ZUHcEProo486+4tZ6Nu3L956663IEFKAiQ7gpr0mfuAqXq4HhhOrtp/auItRt7rD75SVCiNVtaKKojz9NmWqVySsTUMidCbhJQsjPy4sPUDNOWz6sCmUDdsLqocB53hM5mapeIl18hq5MEU7YtqDJF41dvrpp+OBBx7AlVfKJMkd63Bm1C+9WJ8pmEOWJVnGqsJWymVH5lRv2MGtutOt7vCqlJBkIeaAW1FFZX7125SnjZmGFW+vIN2PU3Gnozj9qqDKRpa5Js7qjFWn6sqtQpHDbizR38oOSv823RYZJtmGpfoMmmJ0V/USm+g56AW/ZspDlg5h67VUpm+4gcwsffvtt+Pb3/42/uM//qOzjcbVV18t6iU5lqHLFMxpvtiwqSGJEZbKnuwFVbZSzjMHMbT63RvwZ391gsrwy0EqmLYtJuVYdoz0faoypzBJSyvp+o312jLrxbxeelqp6FiD7kdlerc2cC67sRSlARA875S14sfqHiRzVu8+CjO3FyT7DEozuut0GpBmrQ6aa7d3raLXTHZXUAUrR2jbtm249tpr8d5776G2tvaorBJzwlRozAsqp17VnCM7dMIzui5a6jPrJJoD4VZzOBGmGxtQkwlKCJAadtk6ayuGLRsmEkZQDSlS5Uoq5EG9n6n8ONXve0E6lMsNO0t5DTjJ3dQwuERYJx1Y2e2/ozLXXL0WdvqGEULFU089Fb///e9x//33Y9KkSRg5ciR69Ej+iTfeeENtxMcgvBYTZ7PmVkN4JbDqhGc4vYl0ntkr3ODMC/KCThNZXYTpxqbKhAqHFPUEuG77OpEwgmpIkaPYJUIenPtxQ0VcD49EKMpEKJcTdtZpdOoEhVC2ctznz0I1aDh62kvnSYQw/UKw1LzGMJvKSodtpcBmlv7rX/+K+vp6HH/88SgtLe1iCEWgQeK0w22+mEDCszIGUBfCopOLUNi3ENv3+3sf7nvlPtS+Xpv0Pe4zuynTeEecVM0hyUDMRVh5EhSZ6NuzL1aVr8K/nfpv7FMXZ1OeNmaatpJWUZwqil1nQ+Hez1R+nPU9CcNOd8Py2vwpBoQpI8zt/Zr2APnp+UNHDpHGbpKVXXeuTRv1YYFlxdTW1uKmm25CSUkJ3n33XeTn55sa11ENqdMOh/68MLeQXMbKFcKszCxUnF0RyNWy9x97gX8kf6Z6wnNWc5gyMiSbPJpItHSCIhP7D+/HlfVX4qFLH2KHQbmbsm7yOVdx6ih2lbGq3o9jeKkY0bqeAp0NS/eQZ8proCqLqs8TpOeriqtI46asOdV2Fty51tVrYXrGOSAbQhdddBE2bNiA+++/H9dcc43JMR3VkDztUJWV1Xyx6cMmkiGkIoTUBFAnJCqm0q2aw+86XQ9JEKgysefgHqUediqbspuSphqYYbveuRuKzv1KR5SiX69+aGxuBAAUDyl2TThWle8w+rc5vydxyDPpNZBKPaD0QwzS87Wv16Kwb2EgbYqpajqAN9cSei3VFYReIFeNxeNx/OUvf4mMIE1IVg9xmy9KVxmojMUNEhVT6VLNEXQdANdKIqmSUe574FTTAXoVQBY4FWBcmQ3b9a56P2sOSh4vwaKmRVjUtAgzG2aiYXODa/WOqnxzKiDt9413xFHQt4ClK7jVbV6Q8BpIVJvpPA9Fz2/fvx0VZ1cAUF9LuuDkd0roNQn9YQJkj9Dvfvc7k+M4ZiCpqFsOtJDYey1lJeE58TrJq3TidkJ3c5Lq8KzqteNcp5to6QXOe9AJMRwt+TS6ULmf3xyU1ZV1IVu0eyFNdTB3O+0PyB7QKbcUXSEV0tL1GkhVm+k8D1WXDY8NN+4l9gNlrgv6FqD29VoxvTZp5CTUTa7DDc/fgJaDLZ2fh/XMbiB7hCLIQEpR12+sx5SVU3yTn4GuykrHc+J3kvez9KmwEp91TnISHECqXjtpriAV7hz7e6BCxQD14sgJKr1XOWFL8k1xvJ4UWeTejzIHTsZpuxfSBMeVlxfTqsR08lB56QqpQ56O10CSo0bneTh6XmUtSYEy1xVnV/gWwqjotTkvzUkygvJy8nDPhfekrB1HVPIVMkyXtlrIysjC02VPuwqWysmSepJ3Pd30LUT7kXbX3kr2Z2450NKFYyIVbKOqClCayE4118J6D9/5zXeSlI0XVL0jR1M+jRNUrwL3fpwCBwsSeXReoHgxs7+QjdWTV2P3gd2i+Vx+UPE6Sleb6TwPxTMby451kp4GrSWT7SiC5lqqug3w1mt7D+7FlJVTOhnLw0ZkCIWMMEpbASCeiCOvT57vOKgJrBwF42VkNWxu8H3mqaOnYsrKKVpJllJQVYBSG4GEQp80chIuHX4pCpYUYM/BPa7fCTsxkWMo2mVxS+sWPPz6w9ixf0fnd5a/tdzTQNatmOIaoZz7qYZ/TfGrkHJZ2rYjKzMriejTTVdIJ8JyD2zS1WY6z0PhLmptb0XJ4yWBh70w2lH4zbV1+AhCGHrNFCJDKAVIZWmrF/wWWyw7xlIwbkaW3zPfe+G9mPPSnLRZIKoKUGojoCr0ZRuWYVCfQZ4bRM8ePfHQpQ/5Ms+GmZjIScwM6hYeZCCr5tPolMNT7qebmyTNr6KiS/x0hXT1pqVLLMOr7t06z7mV1ou6h1YvneeEnyxLEksGeZW8vFJh67WwyRSBKEcoZdCJC5vo4+QXV2/Y3ED6HQrxl/OZt87aiu37t4feh8sPqjkKUhURVEU958U5gblDOjlhEpU3dlDyaQZkD0BVY1Wgx5NShaSST6OT50W5n2rPPAscQ4ry/ri6hFIVKVm9ad2TkitnIlFetxrV0nmrp6/27PnnJctSVXiAXq8+Cb3G6YWZig4ArF5jxyLC7jVGgWRvpMbmRpSvLPdsUZGBDOTl5JFyTbi9vLj9sKT6cAG0mDu3N5LudRaovaLskOq75fcMEu54v95ICSS6VExRINlDLoyecCo987g9mKjvj6NLAJD7RAEQyWnh9DvjPguXOFPneVT6v6VbzzhJfegHyfVM3b8jQygA6WgIAfrN9rjCmZ+Tjz0H92gZXm7j55TaSy0QziYvxSw9vnA81m1fR/odTpNIO6QaFko323T7fTeFet1Z1wWyk7tB0kCW2nyC4FWuvrd9r2cYhrNhcd4fVZeENTcWVBp0Up4FgPGcGyc4Bnb56eVo+rAJq95bhftfvZ90jZf8Szc5VTlQUfW8lP6yg7p/R6GxbgrdMng397YfrhpzFQC+a9TNPc/pkWbdQ5Xo0Qluea1qubL9utb2VgxbNozsllalIpAIIUq6473gFRZWZSeXpOM3STpqh9sc7Jq3C6vKV2mFlVTeH1WXNGySCZFToRKmDHoWAGLl9Rxw8+MueOwCkhEU9NvUOWxsbiSFwbkEnZxemEBqyBSBKFm6W0O6N5IfSk8rRdEpRawEby/PS8VZFWQjTHKBpKJqQTXZkZpo6QZn1RXHk8VRnFmZWcqhArfETK5BY6LqjZMgqxsu8Soq0CFNVE1IDbpv/cZ61KyvIY1ByjBVTX72ehbg89BeKooyKAnHsewYqhqr2OFSP/mnzqEzPULCQ8bthZkqMkUgMoRSDhPK1A9cHhP7YsvKzCIraT8DgBP+kFwgYVct6BpeToW+68AuzHlxTuB93aquqIotlYqTw4pt8gRJqeo0WdLstaYpukKncsrvvrNfmE36XSnPLaCX/Oz2LI3NjeT1X3RykShvT5CBbf2bYwQBwfJPnUNnjqgEbQm3F2YqPEEWIkMohQhSpiZItDhua7fFRjG8KO55CpZMXIJZ584SWyBh96GSMLzs8x3viOOel+9ROlVSmkQ2fdiE91reIz2bCcVJ4V6xYPoE6echkSxppoJqeJmonOIcniiGKVWvSfMSUdd1w6YGTH9uuriR62dgc/PjCnILUHFWBQ4dOYTG5kblOfSChIeM2wszlYiSpQNgKlk6KKFx3vh5WPHOCvHFyKlI4lQ6qd7DDSaS5jjjWjJxiS8/DxUmKpB0qq685pWbOO8FyWRtr5Dq8NhwcWZdDqSTT4Pu1fRhExo2NbiGpXQrp6jjo8px5bhKLJm4xPc7XE+ablGIHTp6SapQAHA3BOverSPN8Y1fuRH5ffK7EIyqzCEVqsnvJmSRiyhZOo0R5DFJIIG7191tJKGPwmMyIHsAVk9frdzvRsXr5Py3iZAH5dmzMrJI/DwUhM1rUl1c7Vt67pZcqpI4z/l9P3jx3LglEjfPbsaC8xeI9thSgXQ/OS/YeV+8cnPckp+luKzsoMpn6YhS37+r9AHT5fGxg7r+3WCf68NHDov3Q6TOcX6ffFQ1ViUZQYDaHHrxGjlh5Rxyn9mELJpC5BEKgAmPkO7JRNeK5pyyVMJz1OerLq5G7Ru1ynw7KuCejnROgiZPRDqnSssDFeTdcCKWHfPkm3L7fT+E0TbAD6ph5zB5hnSoJSicL9Q5kJBjXU+aVJpAkEeVgvyc/C5d03XlljLHBbkFSCQSXYwg+3c4cxjviKPk8ZLAsbnpac4z6/Kq6SDiERKCCUOIqkz9YILHxCmcqpuVSWIzCbg9V1ZGFuIJ91OOjsESZHhVjvs8Bi/x3FyeF+r35xfNx4ShE8iKMyyCN1XoGGGmuXS4xqkFN8PLj8vKrX+byRBV2BxEfvDSfWWjylDzSg379yT5tfzmuKq4ipRHRJ1Dip62Dj/ctarDoyaJyBASQrp5hCxIkMj5nbJ0NyvJ2L4U7M87sM9AAMDuA7vJ1ViqSppieIV1qrQbdFzvRjp4BnQRJNdVxVW+eUim8x5UdQPF+AzKAwtamzon+zA8aRy46b6mD5tS6qkH/Of40JFD3TbnMExvrx3U/TuqGksBVDP57aDGlP2MnaByWR2+Dd3GsirP4we/xTmozyDSvVWryewVSFbiq9P7ZLrqyi0mz81h0m1CCfBybKRLmCnVjPYTt5vylpgDP3BljFI5RQ21Ba1tHY4jE/lyOnDTfTp6mUu94aXHwugCb4dOJZvbM6eiolICkUcoAKarxgBeJj/n5KFqmUu6sSUpAFSfh+IFkHQ5e0HHG8KZR+rJXdW7oeoZiHfEUdVYhUVNizy/Y6FybCVWblwpeqrkelv8PCSm8h44Y6R4V1VDbdIhKmlPmo5eoXjCAbUKK5P5cd095zCMKjEnotCYEEz2GvNSplNHT8XidYsBqIeVdEJb6ebGBtSf5/CRwyhYUoA9B/e4/m4GMlDQ9/Nqih37zZZ5qhqYKoqTulGohjAlmrhyoRtWVcnNkzJOqeD0maMYXqqhNhNrWypcrhN6oVzr9h1ngrQXTOfHhZlyYCrnUIqehILIEBKC6aarXspU58Spa5mnU2IjoP489Rvr8Z3ffIekwKqLq1HVWAXAnIJRabzYsLnBNYFTclymqzq4VVCUxPWts7ayky91cvPCknWAnmBPSUBVLcww9by6sqZjSHA72TuTfYctG5YW+XFhVWGZyjm0w3TuUGQICSGV3edVT5xUhb96+mpMGDrB9b6pJsKyQ8Uw426+T016Cr169DKqYHRoBdxggrxPuqqDE5oxXcLM8bY4YTdOw6h8CdrsqF4RlXCg6bWtKmu6oWVdIyTMyrmg/DhT69UJzjOrHDRMF89EhpAQUmkIuYGyAKiWeSw7htrLao0zuupCtbqJE4axjCiTCkanXDVo7NKJxVLgMpmbLmFWzQHR5VJRgZ+3mOPZoBp/6VLR6SW/Op5qKS93GJVzJvLjdCCVc+gFk8Z3VDV2FEK631Bre6tnJr+pqi87qEYHt+KE0xvJWXXDbWLLgXTjRQumeiNJgNt4senDJiVDSLea0QsS/dskGypzKzrTqX+bF6h6TadXoFSfwTAq59yYxKWrrjiySX1mjqzZId3sWgWRRygA6eIRMnkKDIPR1e15qAmPJmPVGcgI/QTsdcLiNl70QypP93ZwT+I64Svnb9kRRC64sHGhp3GaTlwq0gn3qe7fxtFrYXiELPJQE3NBkW1TxK52+eeSaXKhWhhhIkE/Co0JIdWGkNXjpXxluWd7A7cFws2RSUUyKCfh0USsOj8nHw9e+mBKDAWdclU7TClO6pgpnca5+WbSJcyqlUIc45SSnyZhnOpUdIaVV0KBil7TyV3kGtimPKoSLT64ulqCTJMLu6yZJqz1Q9R09SiA1Xix5PES3x5Pbo0erRAAp7GeEyqN9oJAIbWzN5G0wGm+SGmumJ+Tj+1ztmsRO6aq8SLwT8XlZQQBcg1AgeQmoJyGtCqNF73edX5OPmms9nmkNvp0a/S6bfY2DI8NJ93TWj+q8k2FDjGhm8ylAqp6TaeJp9+1bpBocO0GPz1WOa6S9Bsc0k1qU2WnbErqt1nnzvLVxxnIwODcwb6koKYR5QilKVQaLzoXyKSRk9CvVz9Sfyin4jTl2ucwCztPBxKxamsxPnjpg+jZo6fSKdnU3HCYbQtzC8mJxaqM2BZ02WJV8s3c3jW1hNlSqCr5NE6Zk85Ps+R72YZlSlwqQTJCYZo2AS5vlape08ld5OSHUXPOVOClx6j5cZyuAl7y7wZLNm9vul20MMA0G7sEotBYAFIRGpNkg9UJTaSba58LqRJkO55991mUryzv8nkYjRcTSKBybCVKTytl9UbScTlLsmFLNF40ESL1m5905FJJp4pOazyUdSSt16gHGC85XPPBGhLLedh8aVK0JRI9Le33BvRkKxVd6KOqsW4MTtUT4H8K5FrjnFO0NVbOxhZmzyE/D5KKl2PluysxbZW7caZ6gnQq6dIRpeQTbxjeAVUPnt/mqGPgcjwCEpVCpvq32cGtCNLxikjnCXHWkbReoxgnfnI4Kn8UaRxcj6rqHEt7TnQ9wXZIeMh0Ku5MIzKE0hAcAaYsEI7ipG58qu5T6c07SOlIlCADnyvU/1j5H75j4ZaB+inp5tnNIiFAXZezijFhuvEiVaFKGd2c9aPSuFNlk1HZVKRDutxD05oP1pB/W0J+g+SwqriK9Dsc41Z3jqVoS+Idcew6sIs8bgokytxN0pPoIAqNBSAVoTEuCR11gUiSMboh7H45YTWV5br0qY0XpcKPKi5n6qnVxFxZieo9e/QkPJ06KKGGvJw8LJm4BAW5BYGGhG7/NgpMtraQDndLM6XboRsyoYR0pXsMSs6xjudOpXydU7UWZo9JXUTl80JIZY6Q36kylh1D3eQ63+oPlcWkG1cOq19OmE1luXMStJmZ6NKs251eisMp3agLOEZJOnCp3PiVG1E2qswIo7l0V3CdQ5MXKHqNAqocfvPMb2L5m8sB6B3K0qXzukoyOqBOFZHuiMrnuzGCykQzkIHay2oxYegE381OpdyZUnruB3tVjF/JpVe5MlXp6JQoc0MmnFAlpQyUk3dDBbU0mlpObv9dTskyda5aDrYYKU92wqtc2Q2SJdNO+V4ycQnpuvtfvZ+8VqkwIW+ATB6fBapeo4Iqh4+++Shi2bEuNCNutBx+MDXHHHCqxApzC1FdXJ2ke28tujXty9xNITKE0hQc3hwnuJudHZSNj4I5L84JVOZcXhOL26KqsUpL6QQZe84Fz1H4lJwGKbp/LsLgcOJujjqcOlTYjZInrngCeTl5rt+T4Pmxg8Ol4oSkUdawqYH0Pa686R6a7OAaHkHgyGFreyv2tu/tYhhwxpKqNW0HNRl9ycQlaJ7djAXnL0jSvTo8TXaY4J8zjcgQSmOoeE0kCN38Nr7q4mry+CWVud3DRSl5BbyVDnfBUxR+VkYW6ibXkZTnltYtpPFLnrgBvVMrVRY5m6Pf/aSVqWWUFOQWYM/BPYFjCvJoqtyfQ+gnZZTVb6x37V3lBq68SR2a5hfNZxseQeDKYQYy8PM3fo7y08uVwnJSifkqcm9ds+q9VaQxDOozKLCwRuUADqhHIlKNKEcoAKluscEFJ7k1qGO5W94JABZVvURsXDXuLdlJOijX5NnJz2Ly6ZMDxxTviGNIzRBs3+9/civsW4jmymbRfIKwOJy474vbEkMnkZST12KizYJK7pBqTgYnyX9w7uDANeo175LtSSShkrSuO9c6HEAqxR+m5EmVaNYU/5wqIh6hYxRU1yulY7lXqSOnw7BuySWXHRWgl+BzSpC9ylq51S1NHzYFGkEAUHF2hbIR5KXEwuJwsubq27/5tq/3xe1+QSXP88bPw4p3ViiXJ3OeTbrjN5Asc6veW4X7X70/8BrVcAqHtyco5BG0SbutIwCofaM2ZSzYHCZpC6pzrUtloUI5wT1w+M23m87g6GsVSpJ0QuQRCsDR6hFyA8dy555E5hfNR1VxFXsRcJ/H9OlDl5DOtFfGb8MqHVEqylwbhMNHDqNwSSFaDra4/t2rqSa30orzzrnNN01W+0h6b91AlbXKcZW+ydw6J30dqgwp8sd4RxzLNiwLpfGnKpUFt+KMu1aCGlrr8ktJsLibQFQ1doyCms/iBk5egpUzQq2GWdS0SClWzD2h5eXkYfa42Yhlx4wk6ek2rjTplQlKkm/Y3CCSDAnQchl69uiJBy99sLMiKOh+XOZhCxy5VcnVMVXtQ03abznQopR3QZUhi/DQDbo5h6o5J5K5JlmZWazGnzr5aSp5nSq5e9y14jXfOoU1dqRDsrgOIkMoTaG6GCkJjFIdyykKxg6V5GmqMp88cjLyc/LRcrAFNa/UpG2SHrdijQrqhmW18FBNhgR4mxRnI9RRkhy55ZTUS4zNC5S1OnX0VExZOUVpo5KQNYmycDfjYOusrYhlx1z1m9TmbEeQAZxAAmWjynB70+04peYULQOMe1hSMSKo19z4lRs9jTGJwhoLYbZOMoHIEEpD6J6G/DafynGVpN+gLjTOCVulEoaizAdkD8Cqjau6hGBUFKfp0k+pElUnOBuWc2NaPX01Hi19FIeOHAp8ZpVNinpKllCSVLnlejRNKXC/tVo3uQ4r3lmhvFFJyJrUSd9uHLS2t2LYsmGu+k1yc3bCa64tD3nNKzVY2LgQO/bvSPq7ZPWrG1SMCOo1ZaPKPI0xqs5obG4M1ImmDnhhITKE0gxSpyGvzcfPDW4HR/FzTtjcUEOQMreUo4Ti1DVAqUaUbomqG7gblrUx9erRCzMbZqLk8ZLAZ9bZpCinZAleGqrcWvkn+Tn5nrxCQDgK3Mtjsn3/dhFvjI6sSZ/0g/Tb7U23GyUmtM915dhKAP4ecuuegDnOKxUjQsLwoOqM8pXlgTrR1AEvLHQbQ6i1tRVXXXUVcnNz0b9/f1x77bX4+9//7vv9WbNmYcSIEcjOzsbJJ5+M73//+9i3b1+Io+ZB+jTktvlwFhDHO2IpmPlF80ljc1uEXvcL4jXa277X8z5UxalrgHKNKB1mbTeobFjcZzbNnsvN37GDY7DY39XVz13tWdkmqcCD1pKbx4SS3AsEb2g6siZ50qfot6Xrlwb+DqAXqrT04MqNK8nX2GXbBMcV14iQMDyoOqO1vTXp3176wcQBLyx0m/L5q666Ch9//DF+97vf4bPPPsM3v/lNXH/99Xjqqadcv//RRx/ho48+wuLFizFq1Cj89a9/xXe+8x189NFHWLmSvgDCBGej0en+SynzbNjcwK4kyMrMwoShE0iEh85FGFS54FWiW/duHem5/RSnbumnard1yU7MQV3PnaWz8Y44vv/b77OeOYyESD+agqmjp2LxusWdY7Q/G0AzWDglx9yO3373pK4lFc4syoamKmu6ZeEWrMqtIP3m3HS9oBuqVE3Mp9COqECl67xup/ogneEFP53IoSRJJ3SL8vmNGzdi1KhRePXVV3HOOecAAF544QV84xvfwPbt23HSSSeRfufZZ5/F1VdfjQMHDqBHD5oNGGb5fFiEd4B/mScA5XJZFWIxnfJcibJN6m+snr4aWZlZrgSTqW62CPBKlW/7w21ssrswS2S55H0UxU8pU+Z0o6eAI9sqJdFhypbqvHOpNmLZMXzS/olRigfJhrGSlB0qlAHOa8YXjse67evIDZi5pJN2pHsD1qOKUPHll19G//79O40gACgpKUFmZibWr1+PK664gvQ71mT4GUGHDh3CoUOHOv/d1tamPnAmwmy94EeCNmTpEGXvCPcEqeuN4XpC3MCJldtPrIW5hag4q8K4F48K6gmxfmM9yQgCkueGM9e6HDBeHgydEyfF49pysAUFuQUi74oSCrr+19ejX69+KB5SzPJShJ13oTrvKh6u2WNno6qxSssDFQQVHZqVkeWaTyRJGKjiubNfU7+xHsOWDSN7rLx0Riw7RvLOfbz/YzG+p1SiWxhCO3fuxMCBA5M+69GjB2KxGHbu3En6jT179uBHP/oRrr/+et/v3XHHHaiurlYeqyrqN9ajqrHK9zvSTKxui66xuZG8sXuRvHFctrrhQAnXvU6sXMWgMImgDcvanKmwz43JsCoHbnJLUcZhc51QDJu97XtR8ngJCnMLMXlkcIsWCyphO1PGqd/9OKzwln67tehWjB44WjnkQwEnLGTJOoV2pLG5sYvXOCyjQDVE76Yz4h1xlDxeEnjPLa1bungxTbSmMY2UGkI333wz7rrrLt/vbNy4Ufs+bW1tuOSSSzBq1ChUVVX5fveWW27B3Llzk64dPHiw9hj8QFUYCSSMnwClWnRQT5ASm1MqY+VUcE6gJjcsjtfBLQk2aK4B97CqiXYVFqg5OGFznXAMqh1tO8jNUZdMXIJZ585iyYQEezAXOh4u07kmfka9E4W5hSgbVYaaV2oCf9fNaxyGUaDrWXfqjHhHPND7G8uOoaqxir3W09GDlNIcoZaWFuzd613xAwBDhw7FE088gZtuugmffPJJ5+dHjhxB79698eyzz/qGxvbv34+JEyciJycHv/nNb9C7d2/WGMPIEaLmXlQXV2PB+QuMjIE7FjeoxMol805SGSv3AjenwfSGxcmNWFW+itX+AAg/X0olB0elxYiK8lZpD5OZkenpeVCdP8ocmTA6OLLG7dknBa/1VnFWBYbHhnfORdOHTUp6MayGoyby9/xyDhNIYED2AM+KXS9ZDdsg7xY5Qvn5+cjPzw/83nnnnYdPP/0Ur7/+Os4++2wAwO9//3t0dHRg7Nixnte1tbVh4sSJ6NWrF371q1+xjaCwQD05Do8NNzwSmndEMlZOyTvJy8nDjrYdaGxu9FXQqYyVW2PVyWlQdW1zQPV2VBdX+95LN6wqkUzd2NyIil9XkE/BqmFUVeXN9TTawy9S+TEUT8H1v74es387O6kZsMTmRJU1FQ+XFKieJxMVVl5QMbpNhH39vL/XnXWdb1qA21oPQ7+polvwCI0cORIXXXQRKioqsGHDBvzpT3/CjTfeiKlTp3ZWjO3YsQOnnXYaNmzYAOBzI+jCCy/EgQMH8Mgjj6CtrQ07d+7Ezp07EY/Lk2LpgKowBvYZaJT1GAi3RUfQ/azfaznYgqufu5pMcKjKCeTGt1I3mVaeX11crcWfYZJR1w4KaaGVp8FFWDk4Fg9QyeMlvkaqmyxyuU50+KVUeZEqx1WKcbFQcvD2tu9NMoIAGTZlKgeRZQSp8vPo8vpQyD51OK44OlGV1NVU2NeLg4p6KLfWelj6TRXdIlkaAJ588knceOONmDBhAjIzM1FWVob77ruv8++fffYZNm/ejIMHDwIA3njjDaxfvx4A8MUvfjHpt7Zt24YhQ4aENvYgULwisewYZvxyRhL9uymXot9JgBorlzh5uIESfw47Vm4ZDrcW3aocXtBJGuecIClekaUXLVU6nYeRg6NSheSURaoXQFeWrHtRZdtC6YhSLP76YpFQlarRKVEJxfHAqXrdwgy1SFRY+UHHY0LxWFmNqOMdce2cQ+5aD4MjTwfdgkcolQiLRygoHusG0/Fntw2WGivXyenZ0bYDlS9W+jL+euVKhB0rB2Tmn8MhVX56eee8bWndgodff5htIHN5YSjGlk4ODgVcnh0LqlwnOrLkJttNHzZ1Saa1w0QOlU7OnwVdrpggWVPlEdPhH9OB891SK6z85pHCcRUkG9Q8RwlDkbvWw+TIs6Nb5AhF+Ce8Tht5OXloP9KOvx/u2k5Ekr/CDW4nAQneHsD/JFeQW+BpBAH+p4ewY+VSCZ7UE5ZbuaoTlBMkpyqHeuqWYiH2ApcNWJduQlWW/Oar9rJaX6NadX68DFXV3BY7dEOZfrKm6nWT8NY5QfWsqnqN/eRQwmNC9T5K5ORw13q6d6fvFjlCxwqSGgKOq0ReTh5aDra4GkEWuDk5upDocROUd9GwuYE0FjcFHWasfOusrYhlx0Rytij5FAOyB6CqsSrQGKDG3Cm5EdwcGZP9hjgbsoThZaJ/GwDx+fHLK9HJbbHAaWTrlavjJWuq/euk+97pNFzW1YnxjjjWfLCGNE5qX7nV01cjlh1z/Y5UTg5nrad7d/rII5RmyMrMQmt7K5a+slQrD0IXfqcjVQ8JtdLnyb88SRqjm4KW8li5QacqjfLbfics699UmZCIuaueuk1xwHCMVwlvnUr/Nsp8bZu9TWx+qHklruu1byHaj7Sjtb1Ve62o5uqoet0kPb8S1UyqOpHbfoTaV87aR7wglXNIXeumvcW6iAyhNAOXjdWCpEuRotS4mx11wVtVYvk5+dhzcA9bQYex4EyVgeqUq3pBx0DWcdertAoIAjUhtG5yHYpOLsK67euw4u0VyoYGV5a49AESFAJUQ9VrvTZsbtBeKzrrQdWDK+X5lQyxqehEauI/9wDHMRTtho9KziF1rYeRYqCKyBBKMdwS78LMg3CCo9SoC0Cl0ueqMVdh6fqlSgra5IIzkZvgHLubMq17l1bG74SOgayqTE2xxVIMk9rLarHv0D4xbx2nf1vFrytIvxlWCw+n4eW2XnXXSqp6BUp5fqlzuGzDMgzqM4hUlUnRiZwDr8oBLuycQyrStTt9ZAilEG5eEq+4rhukXYqmEhBVPFylp5Wi6JQiZQVtasFJK0436JSrWpAwkHWUaSqoHUy1+AiSJa6hH3YLD0peiclGtlYPrglDJ3T5u6oHV8rzS53DOS/O6fz/ErLNSfxXOcBRKVncWmQ44dT91vjD6lkXBqLy+QCYKp9X8ZI4IU1Lb6L0XKXNgL3sMpV9adzuXfduHbltgIUwylXtkCofppTIWhwqqS5hTlWLD05JP/X+VJk3sV6546CWRceyY6i9rJaVK0PRb6rXWVChF5CQbeq8zS+aj6riKq38MYDfIsML1cXVqH2jtts0WaXu35EhFAAThpAqH4oFKw/Cq9JHFSa4Hjj9hsLqy+MFSqy84qwKdq6O1HNReUIkDWQT/YYAM40XTRsGOvcEPp+PIBngJB2b5G2ijkPy+VVlQqfHIOeA4XwWHaM6LFn1MhRVcw7dkGq97YfIEBKCCUNIleTMtMBRxzW/aD4mDJ0g3ngyVY0XAVoyt33z96q08btWwhtBbRIpaSDrKlOnMjfFBpwK0jYpjwigRhAoSfZpGRQNmxpQs76my9/dfpPrqZT2yDmhIls6DZd1vG0myUed95LwbPshjHergsgQEoIJQ4ijPO0lkKYNBe7piLJ5UX5zQPYAPDP5GXEPFxXc6g37ezGpOL1OyKkIF+ooU7vhYZINOJ09Qqunr3bNkbGgwyysGx7y+g3qOLhhfsn5t0NHtrhl7BacTO+c9Ug1YlPpPeXC1LtVRcQsncagJkvWTa5DVmZWaBueXwKiGygJqJSkxocve7hzkwh7k+cmcyfweZNKt1g5BdTkzKCTrUll4/UOdPsNma64k+SQosoh9Z5B70uHqkC3MIBjyLiNw0pgr/h1hUjPLRXoypZzDncd2JWUIO0FnUIBSsWeKe8ph3GcshdYMPFuw0BkCKUAHOUZtoeE0ySSunlxSpDDaqBogdu2wcLw2HA0z25mK06K8WCKp4gCzjvgGh6cCiOVA4BOJZEql0rY1Ute31OtxFGt6nRrZNuvVz9Szy0TbRQkWlTY5zDeEcc9L9+jVHVFaQxtN1rfn/W+a06TST3AOfRyeMxS1SJDF5EhlAKkI8um1+Jc88EaLGpa5HkdlcFYtQTZ9OaveoI5se+JbMVJ8UZwTraAXhmrE9x3wJVj6lw7G5NyjGEVXhxKWMRPDiV4q1LVi0n1IOA2juIhxUZY3SneOekegzpM734HRL+Dhj1vzbT3FPCXW2fOIQDUvlFrhLE/HRDlCAXAZPd5idi+HaphJb/FeejIIeMJqBKdl1Xvu2zDMpInhzIWicRVauxeuow1jByVMIsEqGtBhd3XSw51wrphJs/awanqpIxDMnnb+j3J6jVuDotkoQAnh0n6efxkk7tWAJl3GwaiZGkhmDSEALmcGNWwUtDirCquUqoM4iAVSa4qyZGqSZdU4zbeEUdVY5WvB053fF7QfQcUOVYtVQZSzwFkh+lkX0CuAkyKi4gzDqkDHsdwMGlIShQKcA8akhWQkikHKu82lVxwUbJ0NwGHkl06rERxv9a+XovCvoXYsd+cS1TarR0EVTJLSqhDNXFVtWrFDh2XeRg5KtxkfDuoIVgOVMNCphJCpVrDSOZ52REmqzs3NCSVbmCqUICbwyQVKpVOOZDoMZmOBIyRIdQN4CdMpSNKlWPJlMW5ff92VBdXo6qxylg+U5j5EZzkUAo/D1Vx+kGCZdyCqsHAaaehc8Lz2uydVBFekDRCdPLDTMFUBZhKnpeFynGf6w/OOHQPeCrJz7qGpMlCAe5BQ6IC0lSekW6PyTCKPriIDKE0R5AwVRVXKVdLUBfn8Nhwo12DJcueg0D1Aiz++mJUjqv0VRASpx1u80VTZaxFJxehsG8htu/3n5v7XrkPta/XJn2P+8xum328Ix56xVEq+rdRYKICzG/j8zIgOOEsFeM4KDeRArfqNVWPrMlCAe5hT8LDJVFJp8P2bTrZWxKRIZTGoAjT0vVLSb/ltjFyFmfxkGLRJqbOBXbvhfdiysopxqvoqAbCj//4Y5x6/KmBbLS6px1u80VTZaxZmVmoODu4fcjef+wF/pH8mcoJz7nZxzvixoxhL2XO5VIB9MIrppEqLiIdNme/Ax4FbnLONSR1DMi6yXW44fkb0HKwpfNztwOiymFP18OlG+7WOehJGGFhIjKE0hgUYaKEEwB3hcFdnF4Khqv4vRbYvPHzsOKdFUa8ThaoBkJre6vn5i552qEqK6v5ImCujHV4bDj7GkDmhGeKUiJImXO4VKTDK9LgbHwSIV1A7UCQLrmJFlQ37fqN9Zjz0pwkIygvJw/3XHiPGN+UjoGqk3Kge9ALO+9TF5EhlMagCkksO4ZP2j9hKwyJzYer+P0W2OJ1i1E3uQ55ffKMnaY5XgAArpu75GmHqqwmDJ3QOQZTHFQ6YSeJE55UsrAFqjKncqlIh1ekwcnzUmVDtkP1QJAuuYkWVDZtr3e99+BeTFk5BQCQ3yc/SY+pyjfHQLUbuAP7DERB3wJ8tP8j1t4gcdBLFS+WKiJDKI1BFZLZY2crKwydzYer+CkLbO5Lc8klriohCLvxFwSvzV3ytNNyoAVZGVmIJ+KufzfhMpcIFXlB94QnUXEE8JR56YhS9OvVD43NjQCA4iHFbFb3dMiJoHh4VdmQ3aB6IEiX3EQL0q1iAGDaqmlJa9puaEqmGNjhdigdkD2gU/6oe4PEQS/MvE8JRIZQGoMqTLcW3YrRA0crKwyVxami+CU9KTohCN3eSJKlrVNWTgk0OiRd5lKhIi9Yic86il41WdgOqqzd3nR7F2LK5W8tZ3tHOLJddHKRkY3QFBuyF1QPBGHmJlJkUbpVDIAuBxunoSmdF+N1KLX0Wyw7hr3tezs/99sbJA566dg9wQ+RIZTG4AiT7qmWu/moGDVSnhSJEIRObyTTpa0WsjKy8HTZ0yIuc0AzVNS3EO1H2tHa3ur7zC0HWkTCLrqgyppbcriKd4R6v4ZNDZj+3HRj8+PnLQxKtOeGN1UPBBTPYyw7hnhHHPGOuLJhTD0smWoVY4dJryDlUJr9hWysnrwauw/sDjQmpQ560qFuk8hM9QAi+MMSpoLcgqTPC/oWoKq4CoeOHMJtf7gNp9ScgpLHS7CoaREWNS3CzIaZaNjcYGxcKkaNxAKjuKUrX6jE4SOH0djciBVvr0BjcyPiHV1DT1ZvJEvZOZGBDAzOHdzFoLEUp/Ud5zWAfmkr8PmpMq9Pnu93ulzTEXd9buq8xTvimDRyEppnN2PtjLV4atJTWDtjLZorm/HwZQ8nPaMF699TR0/FlJVTujyXZVjUb6xnPYsOdPOdgH/Oh+T9atbXGJ8ft/e3bfY2cjI8dW1bBo3k+rHQ2t6KksdLMGTpEKV5sYx+6lx76dnC3MIuBrGqbNkNTUmQcq7atiMrMwvTxkxLOiC76QvV9+oGL1lMJyMIiFpsBMJ0iw0qnN2xnVwuTpju/6LSkkGCBp963/yc/C4lrW6nbp22BjqtBCQp9P3GYz13LDsm0sbE6x7XfvlaLNuwzDPUaKpXlhd02nnYwenlFHQ/Si6Yyfw4E61spNePE6rtRVT75pluFQPo9WV0g6ou8dMXAMT7iqWCVoK6f0ceoW4Cyz3cq0cvVDVWBRLfOU+1Xp4CVaicGiQ8KdTTqt0IAmROgk7onHakqyqCTsBU72DQ/Dqfubq4GolEAtV/qPbNtzJ1GvYCRdYocJsPt7VEuZ+XEQTw5qd+Yz2GLB2CCx67AFfWX4kLHruA5DmRPOlbkFg/q6evRiw75vodFe8cJ2zvhKVnnZ4T53eCPFp+4HiUKHpbRZcE6QsAyu/VDaoyGxYij1AA0sUjBKg3iZTuVG5B9TSo40lR7WBujUvnJCgJqSaRlqIsX1nu643Jy8nrYhy6QcUbwDkVS5+Gg96bZPdwr9+zryWv+5WNKkPNKzWB9wuaH04jUr/rAf81y10POutH2lNlwtvqBrd3Len1o+Y4cXUJx2MGQFsv6sqsDqLu80JIJ0NIxwhwQkoIVY0ap+IcXzge67avC1xwEiEPU93DudDtNs5t1Jqfk489B/doGV4W0qFzO2ejcCpzAGxDlKrQ3e7X9GGT9mavE/KxI2jNhk0KyTFcyk8vD9yYTYQAveB813sO7EH5ynIAeiElrvHA0SVrPlhDKhKRmh8JmVVFZAgJIZ0MIarCoEJKCHW9KaqkjIB7KXAQqAo1DKgakiremMqxlZ0tWThKWmdjt99DUuFJnDI5m4euQg8zP27JxCUY1GdQ4IHCTf5TcXqnPhfVsy3lbVWFjscbUJc1yn3rN9aTaUMkvLdhGqVuiAwhIaSTISTpEbJj9fTVyMrMSolRoKp43Ra9M0HaC6ZChapQCUOoemNa21tZStrLSJ08cjJq1teQ7iu9iUqeMqmbloRC1/UAqhyEOHIddsjEed8gIkg36gZdD4mpkHiqQoV+9+UeniSMk7DClF6IDCEhpJMhxAkLcQjxLCVjISyjQOKU7QyvDVs2TFShUp8jTO8S1yB2yxGgjNfPSOV4ojinYQqkT5mU+ZBS6GHnx3HkWtozw4Gf4ZJAAgOyByQRAtqh6iFJRV+4MGXNeV/q4YlykKDqkO7iEYoIFbsR/Ii/nOB0Kne6ScPqjaTLNO1GtGaKWdcvlOCnTE0YSRxCN7dqPApBHYWkLTMj07cSKpYdQ93kOna7iiBIN3SkzIckyZyTKdnKj1vx9gpfGVFpgcIh8gubhNIOE0SQfuzrqegLRzW8TPTpovCW2eFXvcsxILtLq43IEOpm4DSJBPw7lXshrN5IJjoUm1CoXgt/2uhpWLxusacynTd+Hla8s0L8xMlRgKosrhQj1TKCvNh4ay+rxYShE1j3pYD6/O+1vIfG5kYR41NSodsNr/qN9Ri2bBhJRjgHITuojNG6JJS6OsPLcKl7t450vZuecDNyw+wLZx2EGjY1uIaS3QwvE8YDp4F37WW1vtxPHAOyu7TaiEJjAUin0Jgd3PAGoJZYLBlecMKk29RtPHXv1rFdzlLhIft1gF6+DCVEOiB7AJ6Z/IyyN4bqnq8cV4mV761UTgxVAbdyUCrcEZR3UlVcJdKtXiU/joKgUIoUCSUlWZsDaT0RVriG+p78KhQBGUJD6jOvnr7a8/Cik8qgm0CuiihHSAimDKEw80rchNCZF+QFShxaNdYednUHVwGqJiUHQeK5uBVPJo1UU01E/cAx8CWTtb06fAPo0tTST/4l8+N2HdiFOS/OCRw7ZWMPytXhQsIIldYTYSTwqlR1UjirnMYDdW2HWbXoJWfpzCwdGUIBMGEIpUOSXrwjLsIlYYrgzULluM9d1BKLhqsMTFXpWTBx4nQrl+0ORioV3FYzFnTG68d5taV1i2u4NUj+Jb0S0u9Kl4TSeW9Ahq9MykNi2iOkeoByM7yCZO/h1x/Gjv07Or+vQjsiXbVoqgJMBZEhJARpQ0iSp0PHwpZQniYJ3pwMrWGFN+zzL83b5ISEwlApl5VmIQ4LbnJS0LcA1599PT6Lf4ZFTYsCf4O7ufkZkqUjSpXlX3pTkX5XKiSUXpAymqU8JKaNfNUDlEqfPyfSgdU/XQhrgcgQEoOkIWSa/4RrLOgqT+lTrV9Soenwhg53jCpM5l+FxUKsApXnCTLqZo+bLdLCgnPPquIqpTYdgLkGqKbzMHRyDiXkPcjwp+pEk0Y+9wBFWYucUBuXdoR6gE5XL7EfIkNICJKGkJTyk/Qq6bTIqGqsIp3CqZuP6a7R3O9LJY9ynsMJVYPXhJEqEdtXeR6KXEj3UqPc8/js45Xz7ExtKrrvinK9qWRtr9+myLuKTjRlOHIOUBSdnQ7tbCxIGpBh5AxFPEJpCIlycenSTz+uDS9wFSG1LJfDK2RP0OXGygEadwyl9NOtRH5w7mBMHT0Vi9ct7hy38zpKyagO14kkNQFlrihQfZ7G5sZAuWg52ELqpUYpOY53xLFsw7LAe1KMIMBd/k2VFeu8K6oR4tQZ1GTtID2gKh+qOlFF91HA4Xui0FtwOYAscGhHqPCjJ+EYkKnIk/VD5BEKQLp5hDhlkCbaZki6aJ0gl2yPrcTKjSu1YuUcBJ0cOWSL1BOnbmgr3eL5Ov2TqL2RVHup2cE18mPZMXzS/omyVydVZcVu41D1Mqc63zDdZB2gF4FQmk2r5iqafF4db06Y/eyi0JgQTOQI6SgM6qIw0TaDS9MOmKnmoEIyZq0TV1e5Tpf3I93i+SqblUpvJG4vNTtUSp6ri6tR1VgFQN34SkVZsfP+uvlkYeYbOukadrTtwNXPXR14bdjNlqVafOi21AkLnHSDsLrRR6GxNISEO5waZjLRNoPjolVhNKa4lJ3VZH7wCqWpKEDVkIMXs23QWKhu7fKV5a5MsOnG6MoN1fmFO5ywh72yMrOUwh2c+9nveWvRrRg9cLRWqEAnnCVhROm2ugH0QyZU+WjY1IDpz01PukdeTh7p2i2tW7pswibDMVItPjihtlSxNVONOglZM4HIEAoZugpDpd8QIEMdT1VW84vmo6q4Sim/IahXGNUIssNNeaYqHi3db6i1vdXTwJWK5/uBuhFz+yfp9EZSMSw493NuNqZyTYIglWchlU+mMw9U+XCrKN1zcI/vNVaz5arGKqX8I+kDFDenidNaRXJtU8Ex6ky0VZJAFBoLQFjM0pRYsYWw2mY4kUpq+sG5g1E2qoxUIk1BKrhwOLFxTsWaqXJZyvNQN2JuqI4TAvbrjUQFJw8jFTk8FjppJjY3uK4FkyFp6dYZdlDkneIN9jpASXWvlzhAqepRr/E4e0ymc1g17HyuKEdICGH0GlNZcLptM1Ri5WHmnbht3k0fNrFziPyUZ1ix9HhHHI3NjShfWe75fvz6DXFyZMJOBuWWK1PzSCR6I3HAMQZmnTsrJTwpOr2r/KBihJjwqEq098jPyU+iUeAwYlPy08LkGaIwTYdt+DjHsmzDMlZ7l7BzF6n7d6b2nSJowVpwTuVmuRXrN9a7Xjdp5CQ0z27G2hlr8dSkp7B2xlrUTaZ1abZi5Rc8dgGurL8SFzx2AYYsHeJ5LwuWixb4p0Kw4AwXWJv/irdXoLG5EfEOXkjLcilPGzOts3GoFRZ03tsN1nf8TpD2eLQp1G+sx5ClQ1DyeImvkeo2Fiu0FcuOke4Vljs5yLUPAJUvVHZ559bzFOQWJH1emFvYZWMJetcZyMDg3MFihh/1fpYRpCvfXHjpCTdw5dpvXVtwrqMg/aQCP/moHFdJ+o0lE5ck6cRts7dheGw46VpKfpqffFPBDRPb4aYXJUGVa0uvUYwg4J9zy9lDwkSUI5RC6HICOePP8Y64b/6QRKz80JFDqCqu6tLjyR6bNuVS5sbKqaE0UwaEShWScyyTRk5Cv179SH3hqApWFzoJj9Q8Ek6yt8QpmXO/sDlQuIncFjhy7ZVP5uVRlcg59BqHm3w0fdhEWssFuQVdZE46P82S78bmRiWKkqA8Tw7nlSSocq2i1+zvIIzcRS6i0FgATIbGTNHs+7mXpWLlVo8nZ2w6DI4IaqycGkozEVKSZIM16U5WMSLCbL4oVYJsB7dNg/N+YXGgWDDVuwrwz1WU7GqvC501YDI/TZWiJB37+FHkmqvXJLsBqCAqn+8GMJFB72dtB8XK3U7yXgvko/0foaqxCivLV3Z+V5r12muhUL0KqTx5caue/MZiqhRe1bOh49rnQqoE2ULQM/vdT1q+qeB6LKly7TcX08ZMw4q3V4iPT3Xz01kD3GvDoCjR8YpIGxAcudaprnRCirFeApEhlEKY2lC8lHndu7QcImqs3Kn4JTkigjYsqRYZpuLRnM2BMhZpd7JO+w5pAzNIsUuUIHOe2Uu2UsWBMrDPQPJ3qXJNmQtp/aQbUtRZA5xrw6IokWpvpBuW5YQCG5sbyb+bylAXF5EhlEJIbSheG0lYsXJL8Ut5uHQ2aSdUlafuqYtjvFIVhhRfjURumpSBqarYubIp4c1JBQdK/cZ6fP+33yd/n+pRoMzF1llbxQxeqTXtXAOWkbj7wG40Njf6rgeJ/LQgcI1hjldEUi/awSFvpfbXS2V1pQoiQyiFkNhQOBsJ1/DiKn6JE6SJ8APXgJA4dVFOlbHsGOom17GqPziK08uYSwcmYSDcprISzxxmSBCgJaVaeqNybCVKTyslGcbUuVi3fZ2Iwauzpv0OefUb6zGzYabnOqUeEN3gJd9UihLpAgyTYVnVUKAbrD2kOxlBQGQIpRw6Gwp3IzEVK7e+J+Hh0t2wvJQfVQFKnbooc117WW0gB46qZ8rPmDt05FDg9YBZJmFdxc6VTQlvTpg5Z9RKsYLcAqNM0tPGTNM2eFXXtJ8MA/Bdp/PGz8OKd1ZoHWbc5DveEU9JBafJsKxqKNAJrnGcLnxIQGQIpQVUNhTVjUQyVu5U/BIeLp0NS9eTI33q0vWaqD5PkDFXVVwVOHaApsxVEx5VQlvO6iaObEp4c8LMOaMmpV775WvZ4RDuXOiGZFXWdJAMx7JjnusUAO5ed3eXv6mEkFQoSlSM4SDDwGRYVicUaIdpvWYS3YZQsbW1FVdddRVyc3PRv39/XHvttfj73/9OujaRSODiiy9GRkYGfvnLX5odqCK4RFmcjcSJ0hGlWF66HPOL5mN+0Xysnr4a22Zv82zcCdDJrzikeW5Q3bBUiSnt0JlTL7gRX7rNtROqz0Mhg6t9vRaFfYPJA03ymHAUu0XeZicAHbZsGKaNntY5XjvcZJNKmGh/ZjdyOV35poI6P9V/qGaTGqrMhZt+opLvcdd0kAwnkPCkAPGD9Xs6ZIgmCAHd5NtJcGs6LOsl11Qy1/lF843qNdPoNh6hq666Ch9//DF+97vf4bPPPsM3v/lNXH/99XjqqacCr62pqUFGRjAbcXeC6gnBzRpf/tZyLL1oqeupT8WroXOCVAk/SHlyJE5dOnkJ9t9QfR6KMbd9/3ZUF1ejqrEq9Go6C1SFvaV1iycB6OJ1iz1DIE7Z5Hpzgk6tppuscjY0bm5IuucmcqknOJCo7JOs4KSG4sMIy+qEAicMnWA0V8w0uoUhtHHjRrzwwgt49dVXcc455wAAli1bhm984xtYvHgxTjrpJM9r33zzTdxzzz147bXXcOKJ4TDvhgGVE4LfoiurK+tCtqij+Kmbv5vhwFXSUvFz3VOXlMtX53moxtzw2PCUsrtSFHtB3wLUvl7rqziffudpvD/rfVLDYuoGRt2cdJPW/WDND8UgUNnY0zk3MYxWMbr3kDCGuYaBVOI6h6oi3hEnkfBSDLBU0U9Q0C0MoZdffhn9+/fvNIIAoKSkBJmZmVi/fj2uuOIK1+sOHjyIK6+8Eg888ABOOOEE0r0OHTqEQ4f+mUza1tamN3hD4J4QKCETp7CrKn4q/AwHjpKWip/rnLokS1t1nodjzBUPKTbu2fACRbFXnF1BIgBdt30d2eiOZcdw54Q70XKwBfk5+SjILUh6ZulTq6pxbM1PWV1Z4D0AtY09XXMTw2gVY3k7dGRflxCQaxikIuewYXODbxgygQTZe5wK+gkquoUhtHPnTgwcmEwq1qNHD8RiMezcudPzujlz5mD8+PEoLS0l3+uOO+5AdXW18lglQFmg3BOCirvZpLuSYjg0z24mKSqp+LnqqUt689R5HmrZfrwjjnhHPFCZm6zuCFLsUtVtgP8mYMK7aN1TxzieNHISqourSZ3TVY0H7mauMz+lI0rRr1e/TlK+4iHFrvmQlANJLDuGve172cm91mGm5UBLl1YRYSfsqhgGqp4oFVm09JofBmQPQOkI2v4aNv0EBylNlr755puRkZHh+9+mTZuUfvtXv/oVfv/736OmpoZ13S233IJ9+/Z1/ve3v/1N6f6qoCTOWeAkbqpa2SoJwkGgdncGQErQVEn+9IJKMqx0krXO8/glc1pobW9FyeMlnnJlgSOLqvBLJpdSnJwETalTq1QH81uLbkVB3wLPv4eR2G4HZ37sa/W2P9yGU2pOQcnjJVjUtAiLmhZhZsNMNGxu6HItJSH54csexqryVV3W6eDcwfjB+B8g4///n9u1U0dPxZSVU1KesKsq39zEdVVZpBye97bvDUWvmUZKPUI33XQTZs6c6fudoUOH4oQTTsDu3buTPj9y5AhaW1s9Q16///3v8f7776N///5Jn5eVlaGoqAiNjY2u1/Xq1Qu9evWiPoIoVKx26glB18qWdFea4BWRLGvmnrqkXb66+QBenhYngnpySYX6VFpoADK8PVxvnZTxRZXxZRuWYVCfQb6e3/suvs+3QafpxHY7OEnuQc05g/QaJQzktU7HFY5zvfbeC+/FnJfmpEXCrlQCdFDIS1XfppteM4lu0X1+48aNGDVqFF577TWcffbZAICXXnoJF110EbZv3+6aLL1z507s2bMn6bMxY8Zg6dKluOyyy3DqqaeS7m2y+7wdQV19rUWh0mHc/vuqpFmcDtNBG59K93JKd2QAvp3DVUAJDVG7g3O7dAd1QqeMvbG50Zca302uJGVRN4Fct0s3993odDm3gyrjdvjNi64sSIEyPxb7MkXPBM2nTmjWjXvqp6/9FHNenBN4LXetqkJXvil68dCRQ2x9C6SvXuPgqOo+P3LkSFx00UWoqKjAgw8+iM8++ww33ngjpk6d2mkE7dixAxMmTMD//M//4Nxzz8UJJ5zg6i06+eSTyUZQmDCdUa9KmsUty6RsfNK8ItYpbtvsbaLJv9RNnHOy4yh23coUi1HbjxrfTa6kZFHCq6STIBrviGPNB2t8f9+CdaqVOrWqeGAlPL9OSOd4Bc2P9W+qfgmSJZ2EZPu19RvrMWzZMHKepKQH3O8d6Mo3RS8+WvooaZxOmTVVsh8G/QQX3cIQAoAnn3wSN954IyZMmIDMzEyUlZXhvvvu6/z7Z599hs2bN+PgwYMpHKU6wsio91p0VnmkrrvSFCcGd2OWOMlxNnHq5tmwuYHtHdGtTKHKy462HWhsbsTH+z/Gey3vaf+2ZAK5VJduP9g3AQmeGJW2BUHzwpUFUwy+fvNz3VnXkZK7nTBZKUTp2eaEZL+4oHegWr23bMMykl607sk1aEyGsnT1mjS6RWgslQgrNGbKDekGtxOK2ybNDcNwwikcl/CcF+agZn1N4Bicrl1VqIaG/Fy+gHtvJKoLXBVUucrPyUfLwRbWb/vJIvW+q6evRlZmlujJkLPx+YVm3EIrFL4i5zgAuofEgu46p4RMdOXNTY/UvVvHDgkC5vQagMBcJTt0UxDsMPUOuEb+U5OeQq8evZRDcOkSllUBdf+ODKEAhJ0jRMlNAGDErajjRlcx5CgLrH5jPZlLRSqur2OUqihjSeXrhG5umBso46XmyDi7eet6LIKMWDs4G5JO3zfOpmXhqUlPofz0cuWGthR52zprK8uwo4C6dpxj0ZV9r/dTcZY/F5VzLICckWhizat4tyw9pWPQpFuTVCqOqhyhYwEmwyucMXA6utthghODwmNhQbLsUidM6TaHjc2N5NBe0clFoeV0qIDqEqeGFpz5SypVaXZw+LI4RHSquU5OGd91YBcpWdet6oq6zqmh5MIlhUleQAk9wgkJSlUK+b0fTphOklXdRM6nX7jZDc6Ql05ujm4oK90NqcgQSiME5SYA7uEV3c3DD9STsC4nhhs4m5oEvbzXGL1A/R7VsGrY1IDpz00PLadDJRxG3SxUcmQA/RJm6lzPL5qPquKqUPoj2WU83hHHPS/fE1h15dVfzW+dW/K96r1Vvs9kwfnuJfQIx/CWMDwoHDkULJm4BLPOnSW2OZvI+eToQy8jMxW5OenYbd6JKDQWgLBCY3akS3iFE+OWKju2gxpeqRxXiSUTlwQ+C3UxSj8LN1zgvBdgJqdjR9sOXP3c1YHXzS+aj1H5o9h5Mjo5MoBaqFM6185E7p5fflwCCVJvJ0p+mgrCCFUNjw1PWSjOCVNhaeq45hfNx4ShE0hzwaFkSHUOj6VrGjY3oOaVmi5/N50baYG6f6eUWTqCO9yYQ6XZi/1glR1X/LrC96RlZyOlsMFyXeBUj0sQxTuHWRiQf5YgRlUAyMpw/y0OC3EQnHLlZOX1woShEzBtzDS0trdi2LJhZKZpL5buWHaMdF8nO7GTMdcN0uy1Jk72fuzl1cXVgb2dnOvcS75VwNUjXu/HjTG8eXYzFpy/IEmv6YIz71J6iQLKmgeARU2LyIztVH24ZOKSTnb2VMDOSO9mBAGyek0CkSHUTRBWwzpLiEseLyHzz1igtKfgbGwSm5oqvbxKqw0vUAyreMJ7Hqy5bmxuZBkFQeDML9eYtOC2IdZNriONz8qT4bT4kDZiTfVH8motMjw2nHS9tc65eSNUUPu3+b0ftwOdJOIdcew6sIv03eriapG1TAWl1Y0dlPYe1PVqD/FxDxK64BjlJto3qSLKEeomCKNhnUpFglNh+iXkcWPFEjwWOkmLksRffvlfZaPKPE9OdjgZonXj7NT5BaCVJ+PMS4h3xElNNVXzZGLZMcweOxtPvv1kl2RgbrhApYktFW75Gtx1zm2mTM0Lo/ZvCzNf0Xl/SijQCn3dWnQrbi26NdSEXWqrG4C+jjj6MOzcHFWjPBXd5p2IPEJpAIrVbrphnaoQuylM50kQAG77w20oqytT8ijoeGZ0PWmSp1ovLwC1e7NXhZVOk0jK/EqHZYO8Nn7sxH5evCSX/PoatBxsQV5OHirHVSY1c+WAcrKnNrGlgLvOqfJ941duxNoZa7F9zvaUeVmlQPU6OI0D0x4qC3Z9HsuO4f1Z72PtjLWYXzTf9zrKOqLqQ1UPrg64RrmFVHSbdyLyCKUYVKvddMM6rhCzGgL+dja273f/bcpJSMczE4YnjQM3L0CYFVZuifhB82syT0aFndjNi+flodh7cC+WvrJU6/Qv0cSWCu46p8pt2aiyzrkK08sqTQfBObDpVqWplHz76fNR+aNI9w1aR1TaEQlWdw64nh3VFh0mEBlCKQTXvSxB/e8FlaTDIIVJDbVRODVUyz6l++VINoG0rtXh+uHwkQQZ3V7Xm8yTcVPode/ScogoeTJSit8aq18TW8l7Ude5inyH1b/NBB0E9cCmWw6vElYK0udVxVWke1PWkQ7tiAqHURA4+VpA6rvNOxEZQimCqvI21bCOs4npNgT0golYsaQnTSfmHnSt1+bkZF72QtDc6eR0mGq+CISTJ2Mp/mUblmFQn0HKa8YyWrlNbC2YaLirKt9h9G9za4uj6zWj6ohBfQZpGUHctULR57Wv16KwbyF27JdfR3aEVVhjQYW6QZK8UgKRIZQi6FjtJkixqEmhdZPrUHRyEdZtX4cVb6/wVKAq8WJT4SkJT5qOIUG91m1zinfEUfJ4SeD4/OZO12NiOizrBNfwoip0O6OzqmeCs8nYDZ8trVvw8OsPY8f+HeQxUNe5qnxz9Ai3kCIrI8u1EtIub5cOv5Td4sN0qFt1rVD0+fb921FdXI2qxiqj6yjMdACqXFjPWzm2EqWnlUbM0hE+h6pCNVXtQNnsai+rxb5D+zBs2bBArwg31GY6VqzjSdMxJLjXqlRYBc2dRE6HybCsE6byZOxQ9UxQ7+XWIkNqDG4w5SkGeN5d631R6CBUWnyY9E4C6gfUhk0NpN8fHhuutI44e4DpObKPKax8LdOIDKEUQUehmiqBlGzxwd2cwogVu52AKQpGx3unG6+X8MZItfgwudk6IZkn4wbVfB7KJuNV+h80BkCvmbKp9gnc/m1UOgiVFh+mvZMqYaX6jfWuYUA3nNj3RBQPKWato1TQjlAQVr5WGIgMoRRBR6Ga5Orw2uyAz1t8UD0b1M2psG8hll6cmp4zVAWjE3OXiNfremOoRiklp0N3sw07T8YPKkmjQZuMX+m/3xhub7odtW/UpmU/Jm7/tqYPm0iGkBNU45S7Hjgyxw0rqTaGpq4j1ZC8rs6QbLStk68VFiJDKEXQUaiUcIz0yZLTQd3i6AjanKqLq3Fr0a0pWSQcBaMTc5eK1+t4YyhGKSWnQ6USKtV5MhRwk0Z1Sv+94HZNWOSEQaDK8IShE1iHIDdwks3fn/V+YI4R15vCDStJN4Z2Pq9Obp+qzjDdaDsdEREqphDSPYeAYNp7Vah4Nryeb3DuYKwqX4UF5y9IiRHEJYTTIbOkXJufk48dbTsCKfBVCeGCCAwBWosPLhW+UxYXNi5MMoIAWYI3J2FlUCNeCyqKWrdFBgUmyAlVWi5w5Z/bXsINTn3jpteGLRuG1vZWz/WgQipIWSt2g4aqFyvHVbKNWQkiU67O4MyZpG5LNSJDKMWQ6jkEmGUTVbX+vZ4vlV2Rl21YxlIwXOVoR9CmkEACLQdbcPVzV4sZrW7wM7orx1WSfoPjOaGy/zo3e93eSHbFP+vcWUYVtdsmI336VTVC3aB6SFKRfy95y8/JJ43VPo8qek2H/ZrDZi/VGNoNOmF1lXXEnbN00W0SyEgkErLd+o4ytLW1oV+/fti3bx9yc3NDu29jcyMueOyCwO+tnbEWxUOKEe+I+1apZCADBX0LsPzy5dh9YLcSGeCQpUMCXcbbZm8L1cvDCQNy+S6emvQUpo2Z5nv94NzBpJg7pzcSAGPhELf5avqwiSVrlHsEVUy5obq4WjxPxtpEgeC8HYmcnKB1Ygcnn8kpi1x4hYI58qYi/055G184HsOWDSPrEYpec9M7XP1JGbubbqHqxa2ztrKpAlSfQZXvTPJ+bjCt29xA3b8jQygAqTKEuIYHVYjt4Cp+r00lFQJujYe64FUayropSY7h5bYJrNu+DjvadqDyxUrsObjH9bqwjUppI1dFFr0gIVthK2qq8TU4dzA5p4hqhLpB1ZiwrnWTYZ2qQY4eUd2cV7y9AlfWXxl4na6BCQQ/z7zx87DinRUsw8Ty6HixmFu/73xvOgavzpxZcpJuuo26f0ehsTSFqVi1HdyQmU4DVN2QhxMcdzmX5dov54cac/fLaSjILfBUFIBsOIQCndCfGyQZwiXyZOzh2SeueAJ5OXnG7mXdzy/3zx4ivrXoVqPNlAH1XBOVvBwKOHpENTwUZiKv3/PMGz8Pi9ctZoX1rHkvebzE1wgCktclJ7Tlpo915szSi+mm26iIqsbSGJwSSJUFrVIRJEXNrxOG4FRTAAjMCbJDgmcjqCJt9jhauS3XoNCpFpQiTOT2HKJApczdCUtRNzY3khU19V4qjWztMM35osqNo8qkTgF1flQ357BIBS24PY8VBuRUfVE9127rkmrwelE13HvhvdpzFnZ7DylEhlCag6owdDqYq3Cp6FLz6yhUnQUfBImO1UFG2pN/eZL0WxzjVsLY1CVMVOk5xMmTkVCe0opatZGtHapGKNXwVeXGUS3bpoKiR1QNGhOkgkHz7XweLuUIxXM9IHsAnpn8jKs3jiqzXlQNU1ZO6fRgqc5Zdy2pj0Jj3QCUcIxuyaoJC12ncsMPnAXP2ZSXTFyiXdFGMdJaDrYgPydfLBwiWS2oWqJPrRJzwgoXUSChPCUVteS8c6srORVg3PJ3ibJtKeiEbXVC+U6oVNxxjW4KJ9He9r2drXic0Fkflj5++p2nUTe5TnnOdKhGUonIEDqK4LXwKTBhoZtSqNJjtRanBA08VfldNeaqzns7xwLQT6umjE0OuD2HUpEnY0FKUZuYd07+GccAM5VvGFZ4Q8egkaDvUDV4uUa37rwHyXYQLH2c1ydPec6k8w3DQmQIHWVwLvzV01ejsG9qLHRTClV3wdshvTjJvCKnlYqcVtPh9M7pOdQ8uxkLzl+QtNmHqTyl7pWqeVc1wExw44QZ3tAxaFS9nICewcs1unXnnSLbFHy8/2OtOZP0xIWFKEfoKIQzVr30YvMN+NxgSqFS25NQIN0VmZPTkJWZpd3ENB1O7xI9h8Lsbi9xr1TNu04TX6l8Q+lEYyp0+9ypQGe+uXlKEvMu0f5FqpIurAbNEogMoWMAYW4ydphUqBIL3kRXZK7y01Xu6XB6T4d+alzo3itV865rgFHkzUSicXeF7nxzdK/UvPs1zq59ozY0AzcVhqsqIkLFAKSKUNEEdJuxqsA0CaPbMwFIOQu2Dgs1B+nA+J0OYwgbqXpmCcZkKsKS4XSG1HzrMuBLzXu6keKaRsQsLYSjyRBKFVKhUNNhwYdleKbDs6bDGMJGKp45bAMsFYendEKqDF6T834sGbiRISSEyBCSQaq8UcfKgk+HZ02HMYSNY9XIP5ZwNM73sWLgRoaQECJDqHvjWFnwQHo8azqMIWxERv7Rj2i+uyciQ0gIkSEUIUKEdMSxaHSmEtF8dz9Q9++oaixChAgRuiG6U1XO0YBovo9eRISKESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxYRs3QArA4kbW1tKR5JhAgRIkSIEIEKa98O6iQWGUIB2L9/PwBg8ODBKR5JhAgRIkSIEIGL/fv3o1+/fp5/j5quBqCjowMfffQR+vbti4yMDLHfbWtrw+DBg/G3v/3tqGzmerQ/H3D0P+PR/nxA9IxHA4725wOO/mc09XyJRAL79+/HSSedhMxM70ygyCMUgMzMTBQWFhr7/dzc3KNSsC0c7c8HHP3PeLQ/HxA949GAo/35gKP/GU08n58nyEKULB0hQoQIESJEOGYRGUIRIkSIECFChGMWkSGUIvTq1QsLFy5Er169Uj0UIzjanw84+p/xaH8+IHrGowFH+/MBR/8zpvr5omTpCBEiRIgQIcIxi8gjFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSFkCLfffjvGjx+PnJwc9O/fn3RNIpHAggULcOKJJyI7OxslJSXYsmVL0ndaW1tx1VVXITc3F/3798e1116Lv//97waeIBjcsTQ3NyMjI8P1v2effbbze25/f/rpp8N4pCSozHVxcXGXsX/nO99J+s6HH36ISy65BDk5ORg4cCB+8IMf4MiRIyYfxRPcZ2xtbcWsWbMwYsQIZGdn4+STT8b3v/997Nu3L+l7qXyHDzzwAIYMGYLevXtj7Nix2LBhg+/3n332WZx22mno3bs3xowZg+effz7p75R1GSY4z1dbW4uioiIcf/zxOP7441FSUtLl+zNnzuzyri666CLTj+ELzjMuX768y/h79+6d9J3u/A7ddEpGRgYuueSSzu+k0zv8v//7P1x22WU46aSTkJGRgV/+8peB1zQ2NuKss85Cr1698MUvfhHLly/v8h3uumYhEcEIFixYkLj33nsTc+fOTfTr1490zZ133pno169f4pe//GXirbfeSvz7v/974tRTT020t7d3fueiiy5KfOlLX0q88soriaampsQXv/jFxLRp0ww9hT+4Yzly5Eji448/Tvqvuro6cdxxxyX279/f+T0AiUcffTTpe/Y5CAsqc33++ecnKioqksa+b9++zr8fOXIkMXr06ERJSUniz3/+c+L5559P5OXlJW655RbTj+MK7jO+/fbbiUmTJiV+9atfJbZu3ZpYs2ZNYvjw4YmysrKk76XqHT799NOJnj17Jn7xi18k3n333URFRUWif//+iV27drl+/09/+lMiKysr8ZOf/CTx3nvvJebPn5/4whe+kHj77bc7v0NZl2GB+3xXXnll4oEHHkj8+c9/TmzcuDExc+bMRL9+/RLbt2/v/M6MGTMSF110UdK7am1tDeuRuoD7jI8++mgiNzc3afw7d+5M+k53fod79+5NerZ33nknkZWVlXj00Uc7v5NO7/D5559P3HrrrYn6+voEgMRzzz3n+/0PPvggkZOTk5g7d27ivffeSyxbtiyRlZWVeOGFFzq/w50zLiJDyDAeffRRkiHU0dGROOGEExJ3331352effvppolevXokVK1YkEolE4r333ksASLz66qud3/ntb3+byMjISOzYsUN87H6QGsuZZ56Z+Na3vpX0GWXxmIbq851//vmJ2bNne/79+eefT2RmZiYp6p/97GeJ3NzcxKFDh0TGToXUO6yrq0v07Nkz8dlnn3V+lqp3eO655ya+973vdf47Ho8nTjrppMQdd9zh+v3y8vLEJZdckvTZ2LFjE9/+9rcTiQRtXYYJ7vM5ceTIkUTfvn0Tjz32WOdnM2bMSJSWlkoPVRncZwzSsUfbO1yyZEmib9++ib///e+dn6XbO7RA0QP/+Z//mTj99NOTPpsyZUpi4sSJnf/WnbMgRKGxNMG2bduwc+dOlJSUdH7Wr18/jB07Fi+//DIA4OWXX0b//v1xzjnndH6npKQEmZmZWL9+fajjlRjL66+/jjfffBPXXnttl79973vfQ15eHs4991z84he/QCJkuiud53vyySeRl5eH0aNH45ZbbsHBgweTfnfMmDEYNGhQ52cTJ05EW1sb3n33XfkH8YGUPO3btw+5ubno0SO5dWHY7/Dw4cN4/fXXk9ZQZmYmSkpKOteQEy+//HLS94HP34f1fcq6DAsqz+fEwYMH8dlnnyEWiyV93tjYiIEDB2LEiBH47ne/i71794qOnQrVZ/z73/+OU045BYMHD0ZpaWnSWjra3uEjjzyCqVOnok+fPkmfp8s75CJoDUrMWRCipqtpgp07dwJA0gZp/dv6286dOzFw4MCkv/fo0QOxWKzzO2FBYiyPPPIIRo4cifHjxyd9ftttt+Hf/u3fkJOTg5deegk33HAD/v73v+P73/++2PiDoPp8V155JU455RScdNJJ+Mtf/oL/+q//wubNm1FfX9/5u27v2PpbmJB4h3v27MGPfvQjXH/99Umfp+Id7tmzB/F43HV+N23a5HqN1/uwrznrM6/vhAWV53Piv/7rv3DSSSclbSoXXXQRJk2ahFNPPRXvv/8+fvjDH+Liiy/Gyy+/jKysLNFnCILKM44YMQK/+MUvcMYZZ2Dfvn1YvHgxxo8fj3fffReFhYVH1TvcsGED3nnnHTzyyCNJn6fTO+TCaw22tbWhvb0dn3zyibbcByEyhBi4+eabcdddd/l+Z+PGjTjttNNCGpE8qM+oi/b2djz11FP47//+7y5/s3/25S9/GQcOHMDdd98tsomafj67QTBmzBiceOKJmDBhAt5//30MGzZM+Xc5COsdtrW14ZJLLsGoUaNQVVWV9DeT7zCCGu688048/fTTaGxsTEomnjp1auf/HzNmDM444wwMGzYMjY2NmDBhQiqGysJ5552H8847r/Pf48ePx8iRI/HQQw/hRz/6UQpHJo9HHnkEY8aMwbnnnpv0eXd/h6lGZAgxcNNNN2HmzJm+3xk6dKjSb59wwgkAgF27duHEE0/s/HzXrl0488wzO7+ze/fupOuOHDmC1tbWzut1QX1G3bGsXLkSBw8exDXXXBP43bFjx+JHP/oRDh06pN2LJqznszB27FgAwNatWzFs2DCccMIJXaoddu3aBQDd6h3u378fF110Efr27YvnnnsOX/jCF3y/L/kOvZCXl4esrKzO+bSwa9cuz+c54YQTfL9PWZdhQeX5LCxevBh33nknVq9ejTPOOMP3u0OHDkVeXh62bt0a+iaq84wWvvCFL+DLX/4ytm7dCuDoeYcHDhzA008/jdtuuy3wPql8h1x4rcHc3FxkZ2cjKytLWyYCIZJpFMET3GTpxYsXd362b98+12Tp1157rfM7L774YkqTpVXHcv7553epNPLCokWLEscff7zyWFUgNdd//OMfEwASb731ViKR+GeytL3a4aGHHkrk5uYm/vGPf8g9AAGqz7hv377EuHHjEueff37iwIEDpHuF9Q7PPffcxI033tj573g8nigoKPBNlr700kuTPjvvvPO6JEv7rcswwX2+RCKRuOuuuxK5ubmJl19+mXSPv/3tb4mMjIxEQ0OD9nhVoPKMdhw5ciQxYsSIxJw5cxKJxNHxDhOJz/eSXr16Jfbs2RN4j1S/QwsgJkuPHj066bNp06Z1SZbWkYnAcYr8SoQu+Otf/5r485//3Fke/uc//znx5z//OalMfMSIEYn6+vrOf995552J/v37JxoaGhJ/+ctfEqWlpa7l81/+8pcT69evT/zxj39MDB8+PKXl835j2b59e2LEiBGJ9evXJ123ZcuWREZGRuK3v/1tl9/81a9+laitrU28/fbbiS1btiR++tOfJnJychILFiww/jxOcJ9v69atidtuuy3x2muvJbZt25ZoaGhIDB06NPGv//qvnddY5fMXXnhh4s0330y88MILifz8/JSWz3Oecd++fYmxY8cmxowZk9i6dWtSue6RI0cSiURq3+HTTz+d6PX/tXO/IU2tcRzAvyubpdPsr1Etw9xAxJQQKZVErCyjFxVRIbqiDCNMwTRl6Sp6sRcjQQkEaVgQzIok0IYhaUEZla6S1AqbiuCLKA1BSLPffdF1sDur671OG+f7gfNm5/HZ8zuP2/luZ8/x95eamhrp7OyUEydOSEhIiGuVXmZmphQXF7vaP378WPz8/MRisUhXV5eYTKYpl8//7nU5W6Zbn9lsFrVaLbdv33abq8n3oZGRETlz5oy0traK0+mUpqYm2bRpk+h0ulkP5v+1xgsXLkhjY6P09PRIW1ubHDp0SBYuXChv3rxxtfHlOZyUlJQkBw8e9Hj8T5vDkZER1/kOgFy+fFkcDof09fWJiEhxcbFkZma62k8uny8sLJSuri65cuXKlMvnf3XM/i8GIS8xGAwCwGNrbm52tcHf91qZ9P37dyktLZXQ0FDx9/eX1NRUefv2rVu/nz59ksOHD4tGo5Hg4GA5evSoW7iaTb8bi9Pp9KhZRKSkpES0Wq1MTEx49Gm32yU2NlY0Go0EBgZKTEyMVFVVTdnW26ZbX39/v2zdulWWLl0q/v7+EhERIYWFhW73ERIR6e3tlV27dsmiRYtk+fLlUlBQ4Lb0fDZNt8bm5uYp/68BiNPpFJG5n8PKykpZt26dqNVqiY+Pl6dPn7r2JScni8FgcGt/8+ZN0ev1olarJSoqShoaGtz2/5vX5WyaTn1hYWFTzpXJZBIRkdHRUdmxY4esWLFCFixYIGFhYZKdnT1jJ5j/ajo15ufnu9qGhoZKenq6tLe3u/Xny3MoItLd3S0A5P79+x59/Wlz+LP3iMmaDAaDJCcne/xNbGysqNVqCQ8PdzsvTvrVMfu/VCKzvC6ZiIiI6A/B+wgRERGRYjEIERERkWIxCBEREZFiMQgRERGRYjEIERERkWIxCBEREZFiMQgRERGRYjEIERH9RktLC1QqFYaHh+d6KEQ0wxiEiMhnTExMICEhAfv27XN7/MuXL9BqtTAajV553oSEBAwODmLx4sVe6Z+I5g7vLE1EPuXdu3eIjY1FdXU1MjIyAABZWVl49eoVnj9/DrVaPccjJCJfwm+EiMin6PV6mM1m5ObmYnBwEHfv3oXNZsP169d/GoLOnj0LvV6PgIAAhIeHo7S0FOPj4wAAEcG2bduQlpaGyc+Fnz9/xtq1a1FWVgbA89JYX18f9uzZgyVLliAwMBBRUVG4d++e94snohnnN9cDICKartzcXNTV1SEzMxMdHR0oKytDTEzMT9sHBQWhpqYGq1evRkdHB7KzsxEUFISioiKoVCpcu3YN0dHRqKioQF5eHnJycrBmzRpXEPqnU6dOYWxsDI8ePUJgYCA6Ozuh0Wi8VS4ReREvjRGRT+ru7kZkZCSio6PR3t4OP79//7nOYrHAZrPhxYsXrsdu3bqFrKws5Ofno7KyEg6HAzqdDsCPb4RSUlIwNDSEkJAQbNy4Efv374fJZJrxuohodvHSGBH5JKvVioCAADidTgwMDAAAcnJyoNFoXNuk2tpaJCYmYtWqVdBoNDh37hz6+/vd+jtw4AD27t0Ls9kMi8XiCkFTOX36NC5duoTExESYTCa8fv3aO0USkdcxCBGRz3ny5AnKy8tRX1+P+Ph4HDt2DCKCixcv4uXLl64NAFpbW5GRkYH09HTU19fD4XDAaDRibGzMrc/R0VG0tbVh/vz5eP/+/S+f//jx4/jw4YPr0lxcXBwqKyu9VS4ReRGDEBH5lNHRURw5cgQnT55ESkoKrl69imfPnqGqqgorV65ERESEawN+hKawsDAYjUbExcVBp9Ohr6/Po9+CggLMmzcPdrsdFRUVePDgwS/HodVqkZOTgzt37qCgoADV1dVeqZeIvItBiIh8SklJCUQEZrMZALB+/XpYLBYUFRWht7fXo71Op0N/fz9sNht6enpQUVGBuro6tzYNDQ2wWq24ceMGtm/fjsLCQhgMBgwNDU05hvz8fDQ2NsLpdKK9vR3Nzc2IjIyc8VqJyPv4Y2ki8hkPHz5EamoqWlpakJSU5LYvLS0N3759Q1NTE1Qqldu+oqIiWK1WfP36Fbt378bmzZtx/vx5DA8P4+PHj4iOjkZeXh5KSkoAAOPj49iyZQs2bNiA2tpajx9L5+bmwm63Y2BgAMHBwdi5cyfKy8uxbNmyWTsWRDQzGISIiIhIsXhpjIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFOsvr7c0c0I+1hYAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "interior_points = rectangle.sample_interior(1000, random='Halton')\n", - "px1, py1 = interior_points[\"x\"], interior_points[\"y\"]\n", - "plt.scatter(px1, py1, color='green')\n", - "plt.title('Interior training points for PDE')\n", - "plt.xlabel('X-axis')\n", - "plt.ylabel('Y-axis')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "接下来将控制方程转为实际深度学习模型中需要的约束条件:内部约束\n", - "> $$\n", - "\\frac{\\partial^4 w}{\\partial x^4}+2 \\frac{\\partial^4 w}{\\partial x^2 \\partial y^2}+\\frac{\\partial^4 w}{\\partial y^4}=\\frac{q}{D}\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33m[2024/11/17 17:10:44] ppsci WARNING: Logger has already been automatically initialized as `log_file` is set to None by default, information will only be printed to terminal without writting to any file.\u001b[0m\n" - ] - } - ], - "source": [ - "pde_contraint = ppsci.constraint.InteriorConstraint(\n", - " {\"kirchhoff_res\": res}, # 残差表达式\n", - " {\"kirchhoff_res\": 0.0}, # 残差目标优化值\n", - " rectangle, # 约束区域:薄板矩形\n", - " {\n", - " \"dataset\": \"IterableNamedArrayDataset\",\n", - " \"iters_per_epoch\": 1,\n", - " \"batch_size\": 20000, # 采样两万个配点用于训练\n", - " },\n", - " random=\"Halton\",\n", - " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4.2 简支边界条件\n", - "\n", - "接下来讲解如何将开头简介中的左右简支边界条件转换为深度学习代码。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "同样先预览一下简支边界条件所对应的在矩形左右边界上的训练点" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABPcElEQVR4nO3dd1gU1/4/8PcCsoA0DSCiBBWxoyQqxhY0EiGigok3tgCWmGKJLSSWxJpcNRo1CtafmsRobDdCrrHGcr1RbsDejRK7YgkKItjg/P7Y725cloVdGGYH9v16nn1WZs/MnDMzO/tx5nPmqIQQAkRERERWyMbSFSAiIiKyFAZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQhKZPHkyVCoV7t69a+mqlEitWrXQv39/S1ejTGn3kVI8e/YMn3zyCXx9fWFjY4OoqChLVwkqlQqTJ0+2yLr79++PWrVqWWTdZMiUY2Hv3r1QqVTYuHGjPJWSGI+5omn37969e3XTzNlmSjvnGsNAiMgEBw4cwOTJk3H//n3JlrlixQrMmjULPXv2xHfffYdRo0ZJtmyyDmvWrMG8efMsXQ2yYjk5OZg8ebJesFTeMBAiq/HZZ58hNze3RPMeOHAAU6ZMkTQQ2r17N2rUqIG5c+ciOjoaISEhki27pHJzc/HZZ59ZuhpkIgZCJLdly5bh3Llzur9zcnIwZcqUQgOh0pxz5cRAiCT36NEj5OfnW7oaBuzs7ODg4GDpaujcvn0b7u7uki0vPz8fjx49KtUyHBwcYGdnJ1GNqKw8fPjQ0lWgAqT4/pUHlSpVglqtNqms0s65xjAQktjdu3fx9ttvw9XVFS+88AJGjBhh8OV49uwZpk2bBn9/f6jVatSqVQvjx4/H48eP9coZu0dfMJ/n22+/hUqlwv79+zF69Gh4enqicuXK6NGjB+7cuaM3rxACX3zxBWrWrAknJyd07NgRp06dMlhHRkYGPv74YwQGBsLZ2Rmurq544403cOzYMb1y2nvIa9euxWeffYYaNWrAyckJR48ehUqlwty5cw2WfeDAAahUKvz4449Gt6N2uevWrcP48ePh7e2NypUro3v37rh69apB+Q0bNqB58+ZwdHSEh4cH3nnnHVy/fl2vTGH3q1UqFYYNG4bExEQ0adIEarUajRs3xrZt2/Tmi4uLAwDUrl0bKpUKKpUKly5dAgDs3LkT7dq1g7u7O5ydnVG/fn2MHz/eaNsuXboElUqFPXv24NSpU7rlaf9H9fDhQ4wZMwa+vr5Qq9WoX78+Zs+eDSFEoXVfvXo1GjduDLVarVfvgg4ePIiwsDB4eHjA0dERtWvXxsCBAw2W+fwxp91mf/zxB9555x24ubnB09MTn3/+OYQQuHr1KiIjI+Hq6gpvb298/fXXesszdz9qCSFQq1YtREZGGnz26NEjuLm54f333zc6P1D8ftF+b7T7sWCdn/8fbocOHdCkSRMcOnQIbdq00W2/xYsXl6q9phy3/fv3h7OzM9LS0tClSxe4uLigX79+6NChA3755RdcvnxZdww9n7vx+PFjTJo0CXXr1oVarYavry8++eQTg/PM48ePMWrUKHh6esLFxQXdu3fHtWvXity2BeXl5UnW3g4dOqBDhw4G8xbMTdF+j2bPno2lS5fqzqctW7ZEamqqwfza77iDgwOaNGmCTZs2FdqW2bNno02bNnjhhRfg6OiI5s2bF5oDVdj3b+vWraU+bgHghx9+QHBwMJycnFClShW8+uqr2LFjh16ZhQsX6tbr4+ODoUOHGlyx1h63p0+fRseOHeHk5IQaNWrgq6++MljntWvXEBUVhcqVK8PLywujRo0yOFYA/f1w6dIleHp6AgCmTJmiOw6155DCzrmm/v7VqlULXbt2xW+//Ybg4GA4ODigTp06+P7774vdfmYTJIlJkyYJACIwMFB069ZNxMfHi3feeUcAENHR0XplY2NjBQDRs2dPkZCQIGJiYgQAERUVpVcOgJg0aZLBuvz8/ERsbKzu75UrVwoA4qWXXhKvvfaaWLBggRgzZoywtbUVb7/9tt68n332mQAgunTpIuLj48XAgQOFj4+P8PDw0Ftmamqq8Pf3F2PHjhVLliwRU6dOFTVq1BBubm7i+vXrunJ79uwRAESjRo1EUFCQmDNnjpg+fbp4+PChaNu2rWjevLlB/YcMGSJcXFzEw4cPjW5P7XIDAwNF06ZNxZw5c8TYsWOFg4ODqFevnsjJyTFof8uWLcXcuXPF2LFjhaOjo6hVq5a4d++ewT4quI2bNWsmqlevLqZNmybmzZsn6tSpI5ycnMTdu3eFEEIcO3ZM9OnTRwAQc+fOFatWrRKrVq0S2dnZ4uTJk8Le3l60aNFCfPPNN2Lx4sXi448/Fq+++qrRtmVnZ4tVq1aJBg0aiJo1a+qWl56eLvLz88Vrr70mVCqVePfdd0V8fLzo1q2bACBGjhxpUPeGDRsKT09PMWXKFJGQkCCOHDlS6Dpv3bolqlSpIurVqydmzZolli1bJiZMmCAaNmxosMznjzntNgsKChJ9+vQRCxcuFBEREQKAmDNnjqhfv7748MMPxcKFC0Xbtm0FAPGf//ynRPsxNjZW+Pn56f6eMGGCqFSpkvjrr7/06rh+/XoBQOzbt8/oNjZlv2iPm4sXL+rNq63znj17dNNCQkKEj4+P8PLyEsOGDRPz588X7dq1EwDE8uXLS9ReU4/b2NhYoVarhb+/v4iNjRWLFy8W33//vdixY4cICgoSHh4eumNo06ZNQggh8vLyROfOnYWTk5MYOXKkWLJkiRg2bJiws7MTkZGReu3Vnqf69u0r4uPjxZtvvimaNm1q9PxT2LaSsr0hISEiJCTEYF0Fj4+LFy/qznt169YVM2fOFF999ZXw8PAQNWvWFE+ePNGV3b59u7CxsRFNmjQRc+bMERMmTBBubm6icePGessUQoiaNWuKIUOGiPj4eDFnzhwRHBwsAIjNmzfrlTP2/SvNcSuEEJMnTxYARJs2bcSsWbPEN998I/r27Ss+/fRTXRnt9zI0NFQsWLBADBs2TNja2oqWLVvqtVt73Pr6+ooRI0aIhQsXitdee00AEFu2bNGVy8nJEfXq1RMODg7ik08+EfPmzRPNmzfXHQfPfxee3w/Z2dli0aJFAoDo0aOH7jg8duyYXj0L7kdTfv/8/PxE/fr1RbVq1cT48eNFfHy8ePnll4VKpRInT54schuai4GQRLQ7vHv37nrThwwZIgDoDoyjR48KAOLdd9/VK/fxxx8LAGL37t26aeYGQqGhoSI/P183fdSoUcLW1lbcv39fCCHE7du3hb29vYiIiNArN378eAFAb5mPHj0SeXl5euu9ePGiUKvVYurUqbpp2hNhnTp19E56QgixZMkSAUCcOXNGN+3JkycGQVdhtMutUaOGyMrK0k3Xnky++eYb3fK8vLxEkyZNRG5urq7c5s2bBQAxceJE3TRjgZC9vb24cOGCbtqxY8cEALFgwQLdtFmzZhX6ozl37lwBQNy5c6fI9hQmJCRENG7cWG9aYmKiACC++OILvek9e/YUKpVKr54AhI2NjTh16lSx69q0aZMAIFJTU4ssZywQeu+993TTnj17JmrWrClUKpWYMWOGbvq9e/eEo6Oj3r41dT8KYfhDd+7cOQFALFq0SK+O3bt3F7Vq1dI7hgsyZb+YGwgBEF9//bVu2uPHj0VQUJDw8vLS/fiUxXGr/eEYO3asQRsiIiIMfsiFEGLVqlXCxsZG/Pe//9WbvnjxYgFA7N+/Xwjx9/loyJAheuX69u1rViAkZXvNDYReeOEFkZGRoZuelJQkAIh///vfumlBQUGievXqunOhEELs2LFDADDYfgXPY0+ePBFNmjQRr732mt50Y9+/0hy358+fFzY2NqJHjx4G51/tfNrzeOfOnfXKxMfHCwBixYoVumna4/b777/XTXv8+LHw9vYWb731lm7avHnzBACxfv163bSHDx+KunXrFhkICSHEnTt3jB4rBc+55vz++fn5GQSOt2/fFmq1WowZM8ZgXaXBW2MSGzp0qN7fw4cPBwBs2bJF73306NF65caMGQMA+OWXX0q87vfee0/vMmT79u2Rl5eHy5cvAwB+/fVXPHnyBMOHD9crN3LkSINlqdVq2NhoDo+8vDz89ddfutsLhw8fNigfGxsLR0dHvWlvv/02HBwcsHr1at207du34+7du3jnnXdMalNMTAxcXFx0f/fs2RPVq1fXbceDBw/i9u3bGDJkiN696IiICDRo0MCk7RkaGgp/f3/d302bNoWrqyv+/PPPYufV5vgkJSVJkhe1ZcsW2Nra4qOPPtKbPmbMGAghsHXrVr3pISEhaNSokcn13Lx5M54+fWp2vd59913dv21tbdGiRQsIITBo0CC9ddSvX7/Q7VbcfixMvXr10KpVK73jJyMjA1u3bkW/fv2K7JYr9X4BNPkOz9/WsLe3x/vvv4/bt2/j0KFDemXL4rj98MMPTa7rhg0b0LBhQzRo0AB3797VvV577TUAwJ49ewD8fT4qeLwVdk4oihzfU2N69eqFKlWq6P5u3749AOiOw5s3b+Lo0aOIjY2Fm5ubrtzrr79e6Hfn+fPYvXv3kJmZifbt2xd63ivs+1ea4zYxMRH5+fmYOHGi7vyrpZ1Pex4fOXKkXpnBgwfD1dXVYFs6OzvrnW/t7e0RHBys9z3dsmULqlevjp49e+qmOTk54b333jNa15Iw9/evUaNGuv0JAJ6enkbPMaXBQEhiAQEBen/7+/vDxsZGl4dw+fJl2NjYoG7dunrlvL294e7urgtaSuLFF1/U+1t7crh3755u3YXV0dPTU+9EAmgS/+bOnYuAgACo1Wp4eHjA09MTx48fR2ZmpsG6a9eubTDN3d0d3bp1w5o1a3TTVq9ejRo1auhOyMUpWFeVSoW6devqbU8AqF+/vsG8DRo0MGl7FtxugGbbabdbUXr16oW2bdvi3XffRbVq1dC7d2+sX7++xD++ly9fho+Pj96PCgA0bNhQ9/nzCtvuhQkJCcFbb72FKVOmwMPDA5GRkVi5cmWhOQCFKbiN3Nzc4ODgAA8PD4PphW234vajMTExMdi/f7+u3Rs2bMDTp08RHR1d5HxS7xcA8PHxQeXKlfWm1atXDwAM2iH1cWtnZ4eaNWuaXNfz58/j1KlT8PT01Htp63v79m1dPWxsbPT+I2CsXkWR43tqTEnPe8bqs3nzZrzyyitwcHBA1apV4enpiUWLFpl83gNKftympaXBxsamyP/cGNuW9vb2qFOnjsG2rFmzpkHwVfD8dvnyZdStW9egnLnHQXHM/f0rzbnZHAyEypix6L80D5nKy8srdLqtrW2h00WBJFtT/POf/8To0aPx6quv4ocffsD27duxc+dONG7cuNAfk4JXg7RiYmLw559/4sCBA3jw4AF+/vln9OnTx+B/O5ZUmu3m6OiIffv24ddff0V0dDSOHz+OXr164fXXXze6n6RkbLsXpH3oXXJyMoYNG4br169j4MCBaN68ObKzs4udv7BtJOXxZkzv3r1RqVIl3f+uf/jhB7Ro0aLYE7Qp+8XYd1CO/Wau56/QmiI/Px+BgYHYuXNnoa8hQ4aUYW1Lx9z9IuVx+N///hfdu3eHg4MDFi5ciC1btmDnzp3o27dvocsz9v0r6XFbFuT4nprL1N8/uequnF+jCuL8+fN6f1+4cAH5+fm6LHs/Pz/k5+cblLt16xbu378PPz8/3bQqVaoY9AJ48uQJbt68WaK6aZddcN137twxiLA3btyIjh07Yvny5ejduzc6d+6M0NBQs5+jEx4eDk9PT6xevRqbNm1CTk5Osf8rel7BugohcOHCBb3tCUDvuRZa586d09uepVHUF9fGxgadOnXCnDlzcPr0aXz55ZfYvXu37vaDOfz8/HDjxg08ePBAb/rZs2d1n5fGK6+8gi+//BIHDx7E6tWrcerUKaxdu7ZUyzRFcfvRmKpVqyIiIgKrV6/G5cuXsX//fpOPn+L2i/bKQcFj2tjViRs3bhh0W//jjz8AwKAdch23xo5Lf39/ZGRkoFOnTggNDTV4aX+QteejtLQ0gzqYQ8r2FnbeA4zvl+IYO+8VVp9//etfcHBwwPbt2zFw4EC88cYbCA0NNXudJT1u/f39kZ+fj9OnTxstY2xbPnnyBBcvXizROcLPzw9paWkGAYYpx4E5/6k35/dPTgyEJJaQkKD394IFCwAAb7zxBgCgS5cuAGDwELQ5c+YA0Nwz1/L398e+ffv0yi1durTE/2MNDQ1FpUqVsGDBAr0DvrAHstna2hp8KTZs2GDQ1bU4dnZ26NOnD9avX49vv/0WgYGBaNq0qcnzf//993pBwcaNG3Hz5k3d9mzRogW8vLywePFivds8W7duxZkzZ/S2Z2lob4kUPEFnZGQYlA0KCgIAk287Pa9Lly7Iy8tDfHy83vS5c+dCpVLp2m2ue/fuGezP0tTTXMXtx6JER0fj9OnTiIuLg62tLXr37l3sPKbsF+3toOe/Y3l5eVi6dGmhy3z27BmWLFmi+/vJkydYsmQJPD090bx5c72ych23lStXLvSWzdtvv43r169j2bJlBp/l5ubqAjptfebPn69XxtyHNErZXn9/f5w9e1bv0R/Hjh3D/v37zaqTVvXq1REUFITvvvtOb1vt3LnTIOCwtbWFSqXSO8deunQJiYmJZq+3JMdtVFQUbGxsMHXqVIMr79rvb2hoKOzt7TF//ny97/Ty5cuRmZlZonNely5dcOPGDb3HBOTk5Bj9LjzPyckJgOG50dh6ANN+/+TEJ6dJ7OLFi+jevTvCw8ORnJyMH374AX379kWzZs0AAM2aNUNsbCyWLl2K+/fvIyQkBCkpKfjuu+8QFRWFjh076pb17rvv4oMPPsBbb72F119/HceOHcP27dsN8jJM5enpiY8//hjTp09H165d0aVLFxw5cgRbt241WGbXrl0xdepUDBgwAG3atMGJEyewevVq1KlTx+z1xsTEYP78+dizZw9mzpxp1rxVq1ZFu3btMGDAANy6dQvz5s1D3bp1MXjwYACah3vNnDkTAwYMQEhICPr06YNbt27hm2++Qa1atSQbtkL7QzdhwgTdZe9u3bph6tSp2LdvHyIiIuDn54fbt29j4cKFqFmzJtq1a2f2erp164aOHTtiwoQJuHTpEpo1a4YdO3YgKSkJI0eONMjlMNV3332HhQsXokePHvD398eDBw+wbNkyuLq66k5OZam4/ViUiIgIvPDCC9iwYQPeeOMNeHl5FTuPKfulcePGeOWVVzBu3DhkZGSgatWqWLt2LZ49e1boMn18fDBz5kxcunQJ9erVw7p163D06FEsXboUlSpVMqu9Uh23zZs3x7p16zB69Gi0bNkSzs7O6NatG6Kjo7F+/Xp88MEH2LNnD9q2bYu8vDycPXsW69evx/bt29GiRQsEBQWhT58+WLhwITIzM9GmTRvs2rULFy5cMGn9ZdHegQMHYs6cOQgLC8OgQYNw+/ZtLF68GI0bN0ZWVpZZ9dKaPn06IiIi0K5dOwwcOBAZGRlYsGABGjdurHdrOCIiAnPmzEF4eDj69u2L27dvIyEhAXXr1sXx48fNWmdJjtu6detiwoQJmDZtGtq3b48333wTarUaqamp8PHxwfTp0+Hp6Ylx48ZhypQpCA8PR/fu3XHu3DksXLgQLVu2NLkjyvMGDx6M+Ph4xMTE4NChQ6hevTpWrVqlC3KK4ujoiEaNGmHdunWoV68eqlatiiZNmqBJkyYGZc35/ZOVpH3QrJi2m+Dp06dFz549hYuLi6hSpYoYNmyYXndRIYR4+vSpmDJliqhdu7aoVKmS8PX1FePGjROPHj3SK5eXlyc+/fRT4eHhIZycnERYWJi4cOGC0e7zBbtGF9YNOC8vT0yZMkVUr15dODo6ig4dOoiTJ08aLPPRo0dizJgxunJt27YVycnJBl1btevYsGFDkduncePGwsbGRly7ds2k7ald7o8//ijGjRsnvLy8hKOjo4iIiBCXL182KL9u3Trx0ksvCbVaLapWrSr69etnsC5j3eeHDh1qsLyC20MIIaZNmyZq1KghbGxsdN2ud+3aJSIjI4WPj4+wt7cXPj4+ok+fPuKPP/4oto2FdZ8XQogHDx6IUaNGCR8fH1GpUiUREBAgZs2aZdDt1ljdC3P48GHRp08f8eKLLwq1Wi28vLxE165dxcGDBw2WWVj3+YLd0GNjY0XlypWLbZM5+7Fgt9znaR9DsWbNGpPaa+p+SUtLE6GhoUKtVuueV7Jz585Cu883btxYHDx4ULRu3Vo4ODgIPz8/ER8fr7e8sjhujW1rITTPcenbt69wd3c36Ar+5MkTMXPmTNG4cWOhVqtFlSpVRPPmzcWUKVNEZmamrlxubq746KOPxAsvvCAqV64sunXrJq5evWpW93kp2yuEED/88IOoU6eOsLe3F0FBQWL79u1Gu8/PmjXLYP7C6v6vf/1LNGzYUKjVatGoUSPx008/FXrMLV++XAQEBAi1Wi0aNGggVq5cada543nmHrdaK1as0G2nKlWqiJCQELFz5069MvHx8aJBgwaiUqVKolq1auLDDz/Uex6TEMbPMYW1+/Lly6J79+7CyclJeHh4iBEjRoht27YV231eCCEOHDggmjdvLuzt7fW2fWHbzdTfPz8/PxEREWFQd2OPVygNlRAWzJgiq/HSSy+hatWq2LVrl0nl9+7di44dO2LDhg16XTqpfJFqP44aNQrLly9Henq6Sf9LlVqHDh1w9+5dnDx5sshyPG7peZY+bsk0zBGiMnfw4EEcPXoUMTExlq4KlUOPHj3CDz/8gLfeeos/JlRu8LgtP5gjRGXm5MmTOHToEL7++mtUr14dvXr1snSVqBy5ffs2fv31V2zcuBF//fUXRowYYekqERWLx235w0CIyszGjRsxdepU1K9fHz/++GO5GIWYlOP06dPo168fvLy8MH/+fF2vLyIl43Fb/jBHiIiIiKwWc4SIiIjIajEQIiIiIqvFHKFi5Ofn48aNG3BxcSnV+GBEREQkHyEEHjx4AB8fnyLH6mMgVIwbN27A19fX0tUgIiKiErh69Spq1qxp9HMGQsVwcXEBoNmQrq6uFq4NERERmSIrKwu+vr6633FjGAgVQ3s7zNXVlYEQERFROVNcWguTpYmIiMhqMRAiIiIiq8VAiIiIiKwWAyEiIiKyWgyEiIiIyGoxECIiIiKrxUCIiIiIrBYDISIiIrJaDISIiIjIajEQIiIiItllZwM9egBNm2res7MtUw8OsUFERESyCg4GUlP//vvECcDFBWjZEkhJkbcuvCJEREREsikYBD0vNVXzuZwYCBEREZEssrONB0Faqany3iZjIERERESyiI6WtpwUGAgRERGRLNLSpC0nBQZCREREJAt/f2nLSYGBEBEREcli1Sppy0mBgRARERHJwtlZ00W+KC1basrJhYEQERERySYlxXgwZInnCPGBikRERCSrlBRNF/noaE1itL+/5naYnFeCtBgIERERkeycnYFNmyxdC94aIyIiIivGQIiIiIisFgMhIiIisloMhIiIiMhqMRAiIiIiq8VAiIiIiKwWAyEiIiKyWgyEiIiIyGoxECIiIiKrxUCIiIiIrBYDIQvIzQWGDQPCwjTvubmWrhEREZF1YiAks6gowMkJSEgAduzQvDs5aaYTERGRvBgIySgqCkhKKvyzpCQGQ0RERHJjICST3FzjQZBWUhJvkxEREcmJgZBM4uKkLUdERESlx0BIJufPS1uOiIiISo+BkEwCAqQtR0RERKWnEkIIS1dCybKysuDm5obMzEy4urqWeDm5uZreYcXJyQEcHUu8GiIiIoLpv9+8IiQTR0cgMrLoMpGRDIKIiIjkxEBIRomJxoOhyEjN50RERCQfO0tXwNokJmpuk8XFaRKjAwKAWbN4JYiIiMgSGAhZgKMjEB9v6VoQERERb40RERGR7JQy7iYDISIiIpKVksbdLHeBUEJCAmrVqgUHBwe0atUKKSkpJs23du1aqFQqRHFALyIiIotR2rib5SoQWrduHUaPHo1Jkybh8OHDaNasGcLCwnD79u0i57t06RI+/vhjtG/fXqaaEhERUUFKHHezXAVCc+bMweDBgzFgwAA0atQIixcvhpOTE1asWGF0nry8PPTr1w9TpkxBnTp1ZKwtERERPU+J426Wm0DoyZMnOHToEEJDQ3XTbGxsEBoaiuTkZKPzTZ06FV5eXhg0aJAc1TSJUhLEiIiI5KTEcTfLTff5u3fvIi8vD9WqVdObXq1aNZw9e7bQeX777TcsX74cR48eNXk9jx8/xuPHj3V/Z2Vllai+xhS8N6pNEuMDFYmIqKILCND87plSTi7l5oqQuR48eIDo6GgsW7YMHh4eJs83ffp0uLm56V6+vr6S1UlpCWJERERymjVL2nJSKDeBkIeHB2xtbXHr1i296bdu3YK3t7dB+bS0NFy6dAndunWDnZ0d7Ozs8P333+Pnn3+GnZ0d0tLSCl3PuHHjkJmZqXtdvXpVkvorMUGMiIhITkocd7PcBEL29vZo3rw5du3apZuWn5+PXbt2oXXr1gblGzRogBMnTuDo0aO6V/fu3dGxY0ccPXrU6JUetVoNV1dXvZcUlJggRkREJDeljbtZbnKEAGD06NGIjY1FixYtEBwcjHnz5uHhw4cYMGAAACAmJgY1atTA9OnT4eDggCZNmujN7+7uDgAG0+WgxAQxIiIiS1DSuJvlKhDq1asX7ty5g4kTJyI9PR1BQUHYtm2bLoH6ypUrsLFR5kUuJSaIERERWYpSxt1UCSGEpSuhZFlZWXBzc0NmZmapbpPl5moeH16cnByORE9ERFRapv5+K/PySQWkxAQxIiIia8dASEZKSxAjIiKyduUqR6giUFKCGBERkbVjIGQBSkkQIyIishSlXBTgrTEiIiKSVVSUpgNRQsLfQ005OVlmhAUGQkRERCQbpQ03xUCIiIiIZKHE4aYYCBEREZEslDjcFAMhIiIikoUSh5tiIGQBubnAsGFAWJjmnSPOExGRNTB1GCk5h5viEBvFkGqIDS1jSWJ8oCIREVV0cg43xSE2FEhpmfJERERyUuJwUwyEZKLETHkiIiK5KW24KQZCMlFipjwREZElJCZqbn8NHQp07qx5z8mxTIoIh9iQiRIz5YmIiCxFKcNN8YqQTJSYKU9ERGTt2GusGFL1GpMzU56IiMjasdeYwigxU56IiMjaMRCSkdIy5YmIiKwdk6VllpiouU0WF6dJjA4IAGbN4pUgIiIiS2AgZAFKyZQnIiKyFKVcFOCtMSIiIpJVVJSmA1FCArBjh+bdyckyIywwECIiIiLZKG24KQZCREREJAslDjfFQIiIiIhkocThphgIERERkSyUONwUAyEiIiKShRKHm+IQG8WQaogNIiIiayfncFMcYoOIiIgURYnDTTEQIiIiItkobbgpPlmaiIiIZKWk4aYYCBEREZHslDLcFG+NERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkIWkJ0N9OgBNG2qec/OtnSNiIiIrBMHXZVZcDCQmvr33ydOAC4uQMuWQEqK5epFRERkjXhFSEYFg6DnpaZqPiciIiL5MBCSSXa28SBIKzWVt8mIiIjkxEBIJtHR0pYjIiKi0mMgJJO0NGnLERERUekxEJKJv7+05YiIiKj0GAjJZNUqacsRERFR6TEQkomzs6aLfFFattSUIyIiInkwEJJRSorxYIjPESIiIpIfH6gos5QUTRf56GhNYrS/v+Z2GK8EERERyY+BkAU4OwObNlm6FkRERJaTmwvExQHnzwMBAcCsWYCjo/z14K0xIiIiklVUFODkBCQkADt2aN6dnDTT5cZAiIiIiGQTFQUkJRX+WVKS/MEQAyEiIiKSRW6u8SBIKylJU04uDISIiIhIFnFx0paTAgMhC8jNBYYNA8LCNO9yRr5ERESWcv68tOWkwEBIZkpKECMiIpJTQIC05aRQ7gKhhIQE1KpVCw4ODmjVqhVSingK4bJly9C+fXtUqVIFVapUQWhoaJHly5rSEsSIiIjkNGuWtOWkUK4CoXXr1mH06NGYNGkSDh8+jGbNmiEsLAy3b98utPzevXvRp08f7NmzB8nJyfD19UXnzp1x/fp1mWuuzAQxIiIiOTk6ApGRRZeJjJT3eUIqIYSQb3Wl06pVK7Rs2RLx8fEAgPz8fPj6+mL48OEYO3ZssfPn5eWhSpUqiI+PR0xMjEnrzMrKgpubGzIzM+Hq6lriug8bprkNVpyhQ4H/ax4REVGFZOwOSWQkkJgozTpM/f0uN0+WfvLkCQ4dOoRx48bpptnY2CA0NBTJyckmLSMnJwdPnz5F1apVjZZ5/PgxHj9+rPs7Kyur5JV+jhITxIiIiCwhMVE5T5YuN4HQ3bt3kZeXh2rVqulNr1atGs6ePWvSMj799FP4+PggNDTUaJnp06djypQppaprYQICNMnRppQjIiKq6BwdlXEHpFzlCJXGjBkzsHbtWmzatAkODg5Gy40bNw6ZmZm619WrVyVZvxITxIiIiKxduQmEPDw8YGtri1u3bulNv3XrFry9vYucd/bs2ZgxYwZ27NiBpk2bFllWrVbD1dVV7yUFJSaIERERWbtyEwjZ29ujefPm2LVrl25afn4+du3ahdatWxud76uvvsK0adOwbds2tGjRQo6qGpWYaDwYkjJBjIiIiExTbnKEAGD06NGIjY1FixYtEBwcjHnz5uHhw4cYMGAAACAmJgY1atTA9OnTAQAzZ87ExIkTsWbNGtSqVQvp6ekAAGdnZzg7O1ukDUpKECMiIrJ25SoQ6tWrF+7cuYOJEyciPT0dQUFB2LZtmy6B+sqVK7Cx+fsi16JFi/DkyRP07NlTbzmTJk3C5MmT5ay6HqUkiBEREVm7cvUcIUuQ6jlCRERE9Leyvjti6u93uckRIiIioopBSeNuMhAiIiIi2Sht3E0GQkRERCQLJY67yUCIiIiIZBEXJ205KTAQsoDcXM0grGFhmneOOE9ERNZAieNuMhCSmZISxIiIiORk6niaco67ye7zxZCy+3xRCWIAny5NREQVW26u5j//xcnJKX1XenafVxglJogRERHJSYnjbjIQkokSE8SIiIjkprRxN8vVEBvlmRITxIiIiCxBSeNuMhCSSUCAJjnalHJEREQVnVLG3WSydDGkSpaWM0GMiIjI2jFZWmGUmCBGRERk7RgIyUhpCWJERETWjjlCMlNSghgREZG1YyBkAUpJECMiIrKU7GwgOhpISwP8/YFVqwBnZ/nrwUCIiIiIZBUcDKSm/v33iROAiwvQsiWQkiJvXZgjRERERLIpGAQ9LzVV87mcGAgRERGRLLKzjQdBWqmpmnJyYSBEREREsoiOlracFBgIERERkSzS0qQtJwUGQkRERCQLf39py0mBgRARERHJYtUqactJgYEQERERycLZWdNFvigtW8r7PCEGQkRERCSblBTjwZAlniPEByoSERGRrFJS+GRpIiIismLOzsCmTZauBW+NERERkRVjIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVMjsQ2rZtG3777Tfd3wkJCQgKCkLfvn1x7949SStHREREVJbMDoTi4uKQlZUFADhx4gTGjBmDLl264OLFixg9erTkFSQiIiIqK2aPNXbx4kU0atQIAPCvf/0LXbt2xT//+U8cPnwYXbp0kbyCFZFSBpojIiKydmYHQvb29sjJyQEA/Prrr4iJiQEAVK1aVXeliIwLDgZSU//++8QJwMUFaNlSMxovERERycfsQKhdu3YYPXo02rZti5SUFKxbtw4A8Mcff6BmzZqSV7AiKRgEPS81VfM5gyEiIiL5mJ0jFB8fDzs7O2zcuBGLFi1CjRo1AABbt25FeHi45BWsKLKzjQdBWqmpmnJEREQkD5UQQli6EkqWlZUFNzc3ZGZmwtXVtcTL6dEDSEwsvlxUFLBpU4lXQ0RERDD999ukW2NZWVm6hRSXB1SaYKEiS0uTthwRERGVnkmBUJUqVXDz5k14eXnB3d0dKpXKoIwQAiqVCnl5eZJXsiLw99ckRptSjoiIiORhUiC0e/duVK1aVffvwgIhKtqqVZreYaaUIyIiInmYFAiFhITo/t2hQ4eyqkuF5uys6SJfVMJ0y5Z8nhAREZGczO41NnnyZOTn5xtMz8zMRJ8+fSSpVEWVkqIJdgrD5wgRERHJz+xAaPny5WjXrh3+/PNP3bS9e/ciMDAQacz0LVZKCvDggaZ3WGCg5v3BAwZBRERElmB2IHT8+HHUrFkTQUFBWLZsGeLi4tC5c2dER0fjwIEDZVHHCsfZWdNF/vhxzTtvhxERkbXJzQWGDQPCwjTvubmWqUeJnyM0fvx4zJgxA3Z2dti6dSs6deokdd0UQarnCBEREZFGVBSQlGQ4PTLStGfumcLU32+zrwgBwIIFC/DNN9+gT58+qFOnDj766CMcO3asxJUlIiIi62AsCAI006Oi5KxNCQKh8PBwTJkyBd999x1Wr16NI0eO4NVXX8Urr7yCr776qizqSERERBVAbq7xIEgrKUne22RmB0J5eXk4fvw4evbsCQBwdHTEokWLsHHjRsydO1fyChIREVHFEBcnbTkpmD36/M6dOwudHhERgROmPDqZiIiIrNL589KWk0KJcoSM8fDwkHJxFZZSMuWJiIjkFBAgbTkpmN1rLC8vD3PnzsX69etx5coVPHnyRO/zjIwMSStoaVL3GpMjU56IiEiJcnMBJ6fiy+XkAI6OpVtXmfUamzJlCubMmYNevXohMzMTo0ePxptvvgkbGxtMnjy5NHWu8JSWKU9ERCQnR0fNf/yLEhlZ+iDIHGZfEfL398f8+fMREREBFxcXHD16VDftf//7H9asWVNWdbUIqa4IyRkFExERKVm5fo5Qeno6AgMDAQDOzs7IzMwEAHTt2hW//PJLCatb8SkxU56IiMgSEhM1//EfOhTo3FnznpNjmRQRs3uN1axZEzdv3sSLL74If39/7NixAy+//DJSU1OhVqvLoo4VghIz5YmIiCzF0RGIj7d0LUpwRahHjx7YtWsXAGD48OH4/PPPERAQgJiYGAwcOFDyChaUkJCAWrVqwcHBAa1atUJKMaOVbtiwAQ0aNICDgwMCAwOxZcuWMq9jYZSYKU9ERGTtSjzWmFZycjKSk5MREBCAbt26SVWvQq1btw4xMTFYvHgxWrVqhXnz5mHDhg04d+4cvLy8DMofOHAAr776KqZPn46uXbtizZo1mDlzJg4fPowmTZqYtE7mCBEREZU/pv5+lzoQklOrVq3QsmVLxP/ftbT8/Hz4+vpi+PDhGDt2rEH5Xr164eHDh9i8ebNu2iuvvIKgoCAsXrzYpHVK2X2+qF5jALvQExERSaVMB13VcnV1xZ9//lmaRZjsyZMnOHToEEJDQ3XTbGxsEBoaiuTk5ELnSU5O1isPAGFhYUbLA8Djx4+RlZWl95JKYqLxboMMgoiIiORnciB048YNg2lyXky6e/cu8vLyUK1aNb3p1apVQ3p6eqHzpKenm1UeAKZPnw43Nzfdy9fXt/SVf46SMuWJiIisncmBUOPGjSvcM4IKM27cOGRmZupeV69elXwd2kz57ds178wJIiIia6OU4aZMDoS+/PJLvP/++/jHP/6hG0bjnXfekWTYCVN4eHjA1tYWt27d0pt+69YteHt7FzqPt7e3WeUBQK1Ww9XVVe9FRERE0omK0nQgSkgAduzQvDs5WWaEBZMDoSFDhuD48eP466+/0KhRI/z73//GokWLZBto1d7eHs2bN9d13Qc0ydK7du1C69atC52ndevWeuUBYOfOnUbLExERUdlS2nBTZj1QsXbt2ti9ezfi4+Px5ptvomHDhrCz01/E4cOHJa3g80aPHo3Y2Fi0aNECwcHBmDdvHh4+fIgBAwYAAGJiYlCjRg1Mnz4dADBixAiEhITg66+/RkREBNauXYuDBw9i6dKlZVZHIiIiKlxubtG9pwHN57m58qWNmP1k6cuXL+Onn35ClSpVEBkZaRAIlaVevXrhzp07mDhxItLT0xEUFIRt27bpEqKvXLkCG5u/L3K1adMGa9aswWeffYbx48cjICAAiYmJJj9DiIiIiKRjznBTcj112qznCC1btgxjxoxBaGgolixZAk9Pz7KsmyJI+RwhrdxczU4+f17zJOlZs5gwTUREFV9YmCYnqDidO2s6FJWGqb/fJl/OCQ8PR0pKCuLj4xETE1O62lmxgvdGtUlifI4QERFVdAEBpgVCcg43ZXKydF5eHo4fP84gqBSUliBGREQkp1mzpC0nBZMDoZ07d6JmzZplWZcKzZwEMSIioorI0dH4CAtakZHypouUaogNMp05CWJEREQVldKGm5Kvy5eVO39e2nJERETlVWKicjoOMRCSiRITxIiIiCxFO9yUpZnVfd4aSdV9PjdX8/jw4uTksCs9ERFRaZn6+80cIZkoMUGMiIjI2jEQkpHSEsSIiIisHXOEZKakBDEiIiJrx0DIApSSIEZERGTteGuMiIiIZJedDfToATRtqnnPzrZMPXhFiIiIiGQVHAykpv7994kTgIsL0LIlkJIib114RYiIiIhkUzAIel5qquZzOTEQIiIiIllkZxsPgrRSU+W9TcZAiIiIiGQRHS1tOSkwECIiIiJZpKVJW04KDISIiIhIFv7+0paTAgMhIiIiksWqVdKWkwIDISIiIpKFs7Omi3xRWrbUlJMLAyEiIiKSTUqK8WDIEs8R4gMViYiISFYpKZou8tHRmsRof3/N7TA5rwRpMRAiIiIi2Tk7A5s2WboWvDVGREREVoyBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCFpCdDfToATRtqnnPzrZ0jYiIiKwTxxqTWXAwkJr6998nTgAuLpYZcZeIiMja8YqQjAoGQc9LTdV8TkRERPJhICST7GzjQZBWaipvkxEREcmJgZBMoqOlLUdERESlx0BIJmlp0pYjIiKi0mMgJBN/f2nLERERUekxEJLJqlXSliMiIqLSYyAkE2dnTRf5orRsqSlHRERE8mAgJKOUFOPBEJ8jREREJD8+UFFmKSmaLvLR0ZrEaH9/ze0wXgkiIiKSHwMhC3B2BjZtsnQtiIiILCc3F4iLA86fBwICgFmzAEdH+evBW2NEREQkq6gowMkJSEgAduzQvDs5aabLjYEQERERySYqCkhKKvyzpCT5gyEGQkRERCSL3FzjQZBWUpKmnFwYCBEREZEs4uKkLScFBkJEREQki/PnpS0nBQZCFpCbCwwbBoSFad7lvARIRERkKQEB0paTgkoIIeRbXfmTlZUFNzc3ZGZmwtXVtdTLM5YkFhkJJCaWevFERESKlZur6R1WnJyc0nelN/X3m1eEZKS0THkiIiI5OTpq/uNflMhIeZ8nxEBIJkrMlCciIpJbYqLxYMgSd0cYCMlEiZnyRERElpCYqLn9NXQo0Lmz5j0nxzIpIhxiQyZKzJQnIiKyFEdHID7e0rXgFSHZKDFTnoiIyNqx11gxpOo1JmemPBERkbVjrzGFUWKmPBERkbVjICQjpWXKExERWTsmS8ssMVFzmywuTpMYHRAAzJrFK0FERESWUG6uCGVkZKBfv35wdXWFu7s7Bg0ahOzs7CLLDx8+HPXr14ejoyNefPFFfPTRR8jMzJSx1oXTZspv3655ZxBERETWRinDTZWbQKhfv344deoUdu7cic2bN2Pfvn147733jJa/ceMGbty4gdmzZ+PkyZP49ttvsW3bNgwaNEjGWhMREVFBUVGaDkQJCcCOHZp3JyfLjLBQLnqNnTlzBo0aNUJqaipatGgBANi2bRu6dOmCa9euwcfHx6TlbNiwAe+88w4ePnwIOzvT7gpKPdYYERGRNStquClAupzZCtVrLDk5Ge7u7rogCABCQ0NhY2OD33//3eTlaDdGUUHQ48ePkZWVpfciIiKi0lPicFPlIhBKT0+Hl5eX3jQ7OztUrVoV6enpJi3j7t27mDZtWpG30wBg+vTpcHNz0718fX1LXG8iIiL6mxKHm7JoIDR27FioVKoiX2fPni31erKyshAREYFGjRph8uTJRZYdN24cMjMzda+rV6+Wev1ERESkzOGmLNp9fsyYMejfv3+RZerUqQNvb2/cvn1bb/qzZ8+QkZEBb2/vIud/8OABwsPD4eLigk2bNqFSpUpFller1VCr1SbVv6TYfZ6IiKxRQIAmOdqUcnIpV8nSBw8eRPPmzQEAO3bsQHh4eJHJ0llZWQgLC4NarcaWLVvgZMoYF4UsQ8pkaWNJYnygIhERVXRyDjdVoZKlGzZsiPDwcAwePBgpKSnYv38/hg0bht69e+uCoOvXr6NBgwZISUkBoNkAnTt3xsOHD7F8+XJkZWUhPT0d6enpyMvLs0g7isqUT0qyTLdBIiIiuShxuKlyEQgBwOrVq9GgQQN06tQJXbp0Qbt27bB06VLd50+fPsW5c+eQk5MDADh8+DB+//13nDhxAnXr1kX16tV1L0vk/SgxU56IiEhuShtuqlzcGrMkqW6NDRumeWBUcYYO1TxtmoiIqCIr63xZU3+/OdaYTJSYKU9ERGQp2uGmLK3c3Bor70zNgJczU56IiMja8dZYMaS6NSZnpjwREZG1q1C9xioCJWbKExERWTsGQjJSWqY8ERGRtWOytMwSE/lkaSIiIqVgIGQBSsmUJyIispTsbCA6GkhLA/z9gVWrAGdn+evBQIiIiIhkFRwMpKb+/feJE4CLC9CyJfB/A0TIhjlCREREJJuCQdDzUlM1n8uJgRARERHJIjvbeBCklZqqKScXBkJEREQki+hoactJgYEQERERySItTdpyUmAgRERERLLw95e2nBQYCBEREZEsVq2StpwUGAgRERGRLJydNV3ki9KypbzPE2IgRERERLJJSTEeDFniOUJ8oCIRERHJKiWFT5YmIiIiK+bsDGzaZOla8NYYERERWTEGQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktRgIWUBuLjBsGBAWpnnPzbV0jYiIiKwTAyGZRUUBTk5AQgKwY4fm3clJM52IiIjkxUBIRlFRQFJS4Z8lJTEYIiIikhsDIZnk5hoPgrSSknibjIiISE4MhGQSFydtOSIiIio9BkIyOX9e2nJERERUegyEZBIQIG05IiIiKj2VEEJYuhJKlpWVBTc3N2RmZsLV1bXEy8nN1fQOK05ODuDoWOLVEBEREUz//eYVIZk4OgKRkUWXiYxkEERERCQnBkIySkw0HgxFRmo+JyIiIvnYWboC1iYxUXObLC5OkxgdEADMmsUrQURERJbAQMgCHB2B+HhL14KIiMhylHJRgLfGiIiISFZKGm6KgRARERHJRmnDTTEQIiIiIlkocbgpBkJEREQkCyUON8VAiIiIiGShxOGmGAhZQG4uMGwYEBameeeI80REZA2UONwUh9gohlRDbGgZSxLjAxWJiKiik3O4KQ6xoUBKy5QnIiKSkxKHm2IgJBMlZsoTERHJTWnDTTEQkokSM+WJiIgsITFRc/tr6FCgc2fNe06OZVJEOMSGTJSYKU9ERGQpShluileEZKLETHkiIiJrx15jxZCq15icmfJERETWjr3GFEaJmfJERETWjoGQjJSWKU9ERGTtmCwts8REzW2yuDhNYnRAADBrFq8EERERWQIDIQtQSqY8ERGRpSjlogBvjREREZGsoqI0HYgSEoAdOzTvTk6WGWGBgRARERHJRmnDTTEQIiIiIlkocbipchMIZWRkoF+/fnB1dYW7uzsGDRqE7Oxsk+YVQuCNN96ASqVCIrtmERERWYQSh5sqN4FQv379cOrUKezcuRObN2/Gvn378N5775k077x586BSqcq4hkRERFQUJQ43VS4CoTNnzmDbtm34f//v/6FVq1Zo164dFixYgLVr1+LGjRtFznv06FF8/fXXWLFihUy1LV5uLjBsGBAWpnnniPNERGQNlDjcVLkIhJKTk+Hu7o4WLVropoWGhsLGxga///670flycnLQt29fJCQkwNvb26R1PX78GFlZWXovKSkpU56IiEhOs2ZJW04K5SIQSk9Ph5eXl940Ozs7VK1aFenp6UbnGzVqFNq0aYPI4sa2eM706dPh5uame/n6+pa43gUpLVOeiIhITkocbsqigdDYsWOhUqmKfJ09e7ZEy/7555+xe/duzJs3z6z5xo0bh8zMTN3r6tWrJVp/QUrMlCciIpKb0oabsuiTpceMGYP+/fsXWaZOnTrw9vbG7du39aY/e/YMGRkZRm957d69G2lpaXB3d9eb/tZbb6F9+/bYu3dvofOp1Wqo1WpTm2AyczLl+dRpIiKqyJQ03JRFAyFPT094enoWW65169a4f/8+Dh06hObNmwPQBDr5+flo1apVofOMHTsW7777rt60wMBAzJ07F926dSt95c2kxEx5IiIiS1HKcFPlIkeoYcOGCA8Px+DBg5GSkoL9+/dj2LBh6N27N3x8fAAA169fR4MGDZCSkgIA8Pb2RpMmTfReAPDiiy+idu3asrdBiZnyRERE1q5cBEIAsHr1ajRo0ACdOnVCly5d0K5dOyxdulT3+dOnT3Hu3Dnk5ORYsJbGKTFTnoiIyNqVm9Hnq1atijVr1hj9vFatWhBCFLmM4j4vS9pM+aISpuXOlCciIrJ25eaKUEWgtEx5IiIia1durghVFErKlCciIrJ2DIQsQCmZ8kRERJailIsCvDVGREREslLScFMMhIiIiEg2ShtuioEQERERyUKJw00xECIiIiJZmDPclFwYCBEREZEslDjcFAMhIiIikoUSh5tSCUs+brkcyMrKgpubGzIzM+Hq6mrp6hAREZVbubma3mHFyckpfVd6U3+/eUWIiIiIZKEdbqoocg83xUCIiIiIZKO04ab4ZGkiIiKSlZKGm2IgRERERLJTynBTvDVGREREVouBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLT5YuhhACgGYUWyIiIioftL/b2t9xYxgIFePBgwcAAF9fXwvXhIiIiMz14MEDuLm5Gf1cJYoLlaxcfn4+bty4ARcXF6hUKsmWm5WVBV9fX1y9ehWurq6SLVdJKnob2b7yr6K3saK3D6j4bWT7Sk4IgQcPHsDHxwc2NsYzgXhFqBg2NjaoWbNmmS3f1dW1Qh7cz6vobWT7yr+K3saK3j6g4reR7SuZoq4EaTFZmoiIiKwWAyEiIiKyWgyELEStVmPSpElQq9WWrkqZqehtZPvKv4rexorePqDit5HtK3tMliYiIiKrxStCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJl6Msvv0SbNm3g5OQEd3d3k+YRQmDixImoXr06HB0dERoaivPnz+uVycjIQL9+/eDq6gp3d3cMGjQI2dnZZdCCoplbj0uXLkGlUhX62rBhg65cYZ+vXbtWjibpKcl27tChg0HdP/jgA70yV65cQUREBJycnODl5YW4uDg8e/asLJtilLltzMjIwPDhw1G/fn04OjrixRdfxEcffYTMzEy9cpbahwkJCahVqxYcHBzQqlUrpKSkFFl+w4YNaNCgARwcHBAYGIgtW7bofW7K91Fu5rRx2bJlaN++PapUqYIqVaogNDTUoHz//v0N9lV4eHhZN8Moc9r37bffGtTdwcFBr4zS9qE57SvsfKJSqRAREaEro6T9t2/fPnTr1g0+Pj5QqVRITEwsdp69e/fi5ZdfhlqtRt26dfHtt98alDH3e202QWVm4sSJYs6cOWL06NHCzc3NpHlmzJgh3NzcRGJiojh27Jjo3r27qF27tsjNzdWVCQ8PF82aNRP/+9//xH//+19Rt25d0adPnzJqhXHm1uPZs2fi5s2beq8pU6YIZ2dn8eDBA105AGLlypV65Z5vv1xKsp1DQkLE4MGD9eqemZmp+/zZs2eiSZMmIjQ0VBw5ckRs2bJFeHh4iHHjxpV1cwplbhtPnDgh3nzzTfHzzz+LCxcuiF27domAgADx1ltv6ZWzxD5cu3atsLe3FytWrBCnTp0SgwcPFu7u7uLWrVuFlt+/f7+wtbUVX331lTh9+rT47LPPRKVKlcSJEyd0ZUz5PsrJ3Db27dtXJCQkiCNHjogzZ86I/v37Czc3N3Ht2jVdmdjYWBEeHq63rzIyMuRqkh5z27dy5Urh6uqqV/f09HS9Mkrah+a276+//tJr28mTJ4Wtra1YuXKlroyS9t+WLVvEhAkTxE8//SQAiE2bNhVZ/s8//xROTk5i9OjR4vTp02LBggXC1tZWbNu2TVfG3G1WEgyEZLBy5UqTAqH8/Hzh7e0tZs2apZt2//59oVarxY8//iiEEOL06dMCgEhNTdWV2bp1q1CpVOL69euS190YqeoRFBQkBg4cqDfNlC9QWStp+0JCQsSIESOMfr5lyxZhY2Ojd7JetGiRcHV1FY8fP5ak7qaSah+uX79e2Nvbi6dPn+qmWWIfBgcHi6FDh+r+zsvLEz4+PmL69OmFln/77bdFRESE3rRWrVqJ999/Xwhh2vdRbua2saBnz54JFxcX8d133+mmxcbGisjISKmrWiLmtq+4c6vS9mFp99/cuXOFi4uLyM7O1k1T0v57ninngE8++UQ0btxYb1qvXr1EWFiY7u/SbjNT8NaYgly8eBHp6ekIDQ3VTXNzc0OrVq2QnJwMAEhOToa7uztatGihKxMaGgobGxv8/vvvstVVinocOnQIR48exaBBgww+Gzp0KDw8PBAcHIwVK1ZAyPy4q9K0b/Xq1fDw8ECTJk0wbtw45OTk6C03MDAQ1apV000LCwtDVlYWTp06JX1DiiDVsZSZmQlXV1fY2ekPXSjnPnzy5AkOHTqk992xsbFBaGio7rtTUHJysl55QLMvtOVN+T7KqSRtLCgnJwdPnz5F1apV9abv3bsXXl5eqF+/Pj788EP89ddfktbdFCVtX3Z2Nvz8/ODr64vIyEi975GS9qEU+2/58uXo3bs3KleurDddCfuvJIr7DkqxzUzBQVcVJD09HQD0fiS1f2s/S09Ph5eXl97ndnZ2qFq1qq6MHKSox/Lly9GwYUO0adNGb/rUqVPx2muvwcnJCTt27MCQIUOQnZ2Njz76SLL6F6ek7evbty/8/Pzg4+OD48eP49NPP8W5c+fw008/6ZZb2P7VfiYnKfbh3bt3MW3aNLz33nt60+Xeh3fv3kVeXl6h2/bs2bOFzmNsXzz/XdNOM1ZGTiVpY0GffvopfHx89H5YwsPD8eabb6J27dpIS0vD+PHj8cYbbyA5ORm2traStqEoJWlf/fr1sWLFCjRt2hSZmZmYPXs22rRpg1OnTqFmzZqK2oel3X8pKSk4efIkli9frjddKfuvJIx9B7OyspCbm4t79+6V+pg3BQMhM40dOxYzZ84sssyZM2fQoEEDmWokLVPbV1q5ublYs2YNPv/8c4PPnp/20ksv4eHDh5g1a5YkP6Jl3b7nA4LAwEBUr14dnTp1QlpaGvz9/Uu8XHPItQ+zsrIQERGBRo0aYfLkyXqfleU+pJKZMWMG1q5di7179+olFPfu3Vv378DAQDRt2hT+/v7Yu3cvOnXqZImqmqx169Zo3bq17u82bdqgYcOGWLJkCaZNm2bBmklv+fLlCAwMRHBwsN708rz/lIKBkJnGjBmD/v37F1mmTp06JVq2t7c3AODWrVuoXr26bvqtW7cQFBSkK3P79m29+Z49e4aMjAzd/KVhavtKW4+NGzciJycHMTExxZZt1aoVpk2bhsePH5d6PBq52qfVqlUrAMCFCxfg7+8Pb29vgx4Pt27dAgBJ9h8gTxsfPHiA8PBwuLi4YNOmTahUqVKR5aXch4Xx8PCAra2tbltq3bp1y2hbvL29iyxvyvdRTiVpo9bs2bMxY8YM/Prrr2jatGmRZevUqQMPDw9cuHBB1h/S0rRPq1KlSnjppZdw4cIFAMrah6Vp38OHD7F27VpMnTq12PVYav+VhLHvoKurKxwdHWFra1vqY8IkkmUbkVHmJkvPnj1bNy0zM7PQZOmDBw/qymzfvt1iydIlrUdISIhBTyNjvvjiC1GlSpUS17UkpNrOv/32mwAgjh07JoT4O1n6+R4PS5YsEa6uruLRo0fSNcAEJW1jZmameOWVV0RISIh4+PChSeuSYx8GBweLYcOG6f7Oy8sTNWrUKDJZumvXrnrTWrdubZAsXdT3UW7mtlEIIWbOnClcXV1FcnKySeu4evWqUKlUIikpqdT1NVdJ2ve8Z8+eifr164tRo0YJIZS3D0vavpUrVwq1Wi3u3r1b7Dosuf+eBxOTpZs0aaI3rU+fPgbJ0qU5Jkyqq2RLIgOXL18WR44c0XURP3LkiDhy5IheV/H69euLn376Sff3jBkzhLu7u0hKShLHjx8XkZGRhXaff+mll8Tvv/8ufvvtNxEQEGCx7vNF1ePatWuifv364vfff9eb7/z580KlUomtW7caLPPnn38Wy5YtEydOnBDnz58XCxcuFE5OTmLixIll3p6CzG3fhQsXxNSpU8XBgwfFxYsXRVJSkqhTp4549dVXdfNou8937txZHD16VGzbtk14enpatPu8OW3MzMwUrVq1EoGBgeLChQt6XXafPXsmhLDcPly7dq1Qq9Xi22+/FadPnxbvvfeecHd31/XQi46OFmPHjtWV379/v7CzsxOzZ88WZ86cEZMmTSq0+3xx30c5mdvGGTNmCHt7e7Fx40a9faU9Bz148EB8/PHHIjk5WVy8eFH8+uuv4uWXXxYBAQGyB+Ylad+UKVPE9u3bRVpamjh06JDo3bu3cHBwEKdOndKVUdI+NLd9Wu3atRO9evUymK60/ffgwQPd7xwAMWfOHHHkyBFx+fJlIYQQY8eOFdHR0bry2u7zcXFx4syZMyIhIaHQ7vNFbTMpMBAqQ7GxsQKAwWvPnj26Mvi/561o5efni88//1xUq1ZNqNVq0alTJ3Hu3Dm95f7111+iT58+wtnZWbi6uooBAwboBVdyKa4eFy9eNGivEEKMGzdO+Pr6iry8PINlbt26VQQFBQlnZ2dRuXJl0axZM7F48eJCy5Y1c9t35coV8eqrr4qqVasKtVot6tatK+Li4vSeIySEEJcuXRJvvPGGcHR0FB4eHmLMmDF6Xc/lZG4b9+zZU+gxDUBcvHhRCGHZfbhgwQLx4osvCnt7exEcHCz+97//6T4LCQkRsbGxeuXXr18v6tWrJ+zt7UXjxo3FL7/8ove5Kd9HuZnTRj8/v0L31aRJk4QQQuTk5IjOnTsLT09PUalSJeHn5ycGDx4s6Y+Mucxp38iRI3Vlq1WrJrp06SIOHz6stzyl7UNzj9GzZ88KAGLHjh0Gy1La/jN2ftC2KTY2VoSEhBjMExQUJOzt7UWdOnX0fg+1itpmUlAJIXO/ZCIiIiKF4HOEiIiIyGoxECIiIiKrxUCIiIiIrBYDISIiIrJaDISIiIjIajEQIiIiIqvFQIiIiIisFgMhIqJi7N27FyqVCvfv37d0VYhIYgyEiKjcyMvLQ5s2bfDmm2/qTc/MzISvry8mTJhQJutt06YNbt68CTc3tzJZPhFZDp8sTUTlyh9//IGgoCAsW7YM/fr1AwDExMTg2LFjSE1Nhb29vYVrSETlCa8IEVG5Uq9ePcyYMQPDhw/HzZs3kZSUhLVr1+L77783GgR9+umnqFevHpycnFCnTh18/vnnePr0KQBACIHQ0FCEhYVB+//CjIwM1KxZExMnTgRgeGvs8uXL6NatG6pUqYLKlSujcePG2LJlS9k3nogkZ2fpChARmWv48OHYtGkToqOjceLECUycOBHNmjUzWt7FxQXffvstfHx8cOLECQwePBguLi745JNPoFKp8N133yEwMBDz58/HiBEj8MEHH6BGjRq6QKigoUOH4smTJ9i3bx8qV66M06dPw9nZuayaS0RliLfGiKhcOnv2LBo2bIjAwEAcPnwYdnam/79u9uzZWLt2LQ4ePKibtmHDBsTExGDkyJFYsGABjhw5goCAAACaK0IdO3bEvXv34O7ujqZNm+Ktt97CpEmTJG8XEcmLt8aIqFxasWIFnJyccPHiRVy7dg0A8MEHH8DZ2Vn30lq3bh3atm0Lb29vODs747PPPsOVK1f0lvePf/wDPXr0wIwZMzB79mxdEFSYjz76CF988QXatm2LSZMm4fjx42XTSCIqcwyEiKjcOXDgAObOnYvNmzcjODgYgwYNghACU6dOxdGjR3UvAEhOTka/fv3QpUsXbN68GUeOHMGECRPw5MkTvWXm5OTg0KFDsLW1xfnz54tc/7vvvos///xTd2uuRYsWWLBgQVk1l4jKEAMhIipXcnJy0L9/f3z44Yfo2LEjli9fjpSUFCxevBheXl6oW7eu7gVogiY/Pz9MmDABLVq0QEBAAC5fvmyw3DFjxsDGxgZbt27F/PnzsXv37iLr4evriw8++AA//fQTxowZg2XLlpVJe4mobDEQIqJyZdy4cRBCYMaMGQCAWrVqYfbs2fjkk09w6dIlg/IBAQG4cuUK1q5di7S0NMyfPx+bNm3SK/PLL79gxYoVWL16NV5//XXExcUhNjYW9+7dK7QOI0eOxPbt23Hx4kUcPnwYe/bsQcOGDSVvKxGVPSZLE1G58Z///AedOnXC3r170a5dO73PwsLC8OzZM/z6669QqVR6n33yySdYsWIFHj9+jIiICLzyyiuYPHky7t+/jzt37iAwMBAjRozAuHHjAABPnz5F69at4e/vj3Xr1hkkSw8fPhxbt27FtWvX4OrqivDwcMydOxcvvPCCbNuCiKTBQIiIiIisFm+NERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktf4/Hu/Z8Fsg2ocAAAAASUVORK5CYII=", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "lr_buondary_points = rectangle.sample_boundary(100, random='Halton', criteria=lambda x, y: np.isclose(x, -Lx / 2) | np.isclose(x, Lx / 2))\n", - "px2, py2 = lr_buondary_points[\"x\"], lr_buondary_points[\"y\"]\n", - "plt.scatter(px2, py2, color='blue')\n", - "plt.title('boundary points for simply supported boundary condition')\n", - "plt.xlabel('X-axis')\n", - "plt.ylabel('Y-axis')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "接下来将控制方程转为实际深度学习模型中需要的约束条件:(简支)边界约束\n", - "> $$\n", - "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(\\frac{\\partial^2 w}{\\partial x^2}\\right)_{x=-1 \\mid x=+1}=0\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "constraint_left_right = ppsci.constraint.BoundaryConstraint(\n", - " {\"w\": w, \"ddw_dxx\": w.diff(x, 2)}, # 挠度和 x 轴弯矩表达式\n", - " {\"w\": 0, \"ddw_dxx\": 0}, # 挠度和 x 轴弯矩目标值均为0\n", - " rectangle,\n", - " {\n", - " \"dataset\": \"IterableNamedArrayDataset\",\n", - " \"iters_per_epoch\": 1,\n", - " \"batch_size\": 10000, # 采样一万个点用于训练\n", - " },\n", - " criteria=lambda x, y: np.isclose(x, -Lx / 2) | np.isclose(x, Lx / 2), # 采样点在左右两侧边界上\n", - " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4.3 自由边界条件\n", - "\n", - "接下来讲解如何将开头简介中的左右简支边界条件转换为深度学习代码。" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "同样先预览一下简支边界条件所对应的在矩形上下边界上的训练点" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHdElEQVR4nO3dd3hUZf7+8XuSkAQMKWwCIRApAWkGcGGJICy6ZAVBBAUERJpIUYoIgmChqqCioIhSluLuF6Wp6LqAKGURiCIdKQoYikACiCRANCHJ8/uDX2Yd0mZCJpNw3q/rmgty5jlnPs9zytyZOefEZowxAgAAsCAvTxcAAADgKQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhOGXChAmy2Ww6f/68p0spkKpVq6pPnz6eLsOtstZRcZGenq7Ro0crMjJSXl5e6tixo9tf8/XXX1f16tXl7e2thg0buv31nHH33Xfr9ttv93QZBXLs2DHZbDYtWrTI06UUW3fffbfuvvtu+8+ujpnNZtOECRPcUhuc4+PpAgB43tatW7V27VoNHz5cwcHBhbLMBQsW6PXXX9fw4cP15z//WbfeemuhLDc3a9eu1ejRo/Xoo49qwoQJCg0NdevrAc5atWqVtm3bRuAppghCwE3ihRde0JgxYwo079atWzVx4kT16dOn0ILQ+vXrValSJU2fPr1QlufM63l5eWn+/Pny9fUtktcErlelShX99ttvKlWqlH3aqlWrNGvWrByD0G+//SYfH96KPYmvxgAX/f7778rMzPR0Gdn4+PjI39/f02XYnT17ttBClSRlZmbq999/z/P1SpcunW8Iym85cJ8rV654ugS3s9ls8vf3l7e3t1Pt/f39CUIeRhCCS86fP6+HH35YgYGB+tOf/qSnnnoq25tKenq6Jk+erKioKPn5+alq1ap67rnnlJqa6tAut+/Grz+fZ9GiRbLZbNqyZYtGjBihsLAw3XLLLXrwwQd17tw5h3mNMXrppZdUuXJllSlTRvfcc4/279+f7TUuXLigZ555RtHR0QoICFBgYKDuu+8+7dmzx6Hdxo0bZbPZtGTJEr3wwguqVKmSypQpo927d8tms+X4acfWrVtls9n04Ycf5jqOWctdunSpnnvuOYWHh+uWW27RAw88oJMnT2Zrv3z5cjVq1EilS5dWaGioHn30UZ06dcqhTU7nCNlsNg0ZMkQrV67U7bffLj8/P9WrV09r1qxxmG/UqFGSpGrVqslms8lms+nYsWOSpC+//FLNmzdXcHCwAgICVKtWLT333HO59i3rHIkNGzZo//799uVt3LhR0rU3w5EjRyoyMlJ+fn6qVauWpk2bJmNMjrUvXrxY9erVk5+fn0Pd17dduHChrly5Yn+9rHM08lrOqVOn9Nhjj6lChQr2sVmwYEG25aempmr8+PGqUaOG/Pz8FBkZqdGjR2fbpvOyY8cONWvWTKVLl1a1atU0e/bsbG3Onj2rfv36qUKFCvL391eDBg30/vvvO7TJ2nayxjNLTuem9OnTRwEBATp16pQ6duyogIAAhYWF6ZlnnlFGRobD/BcvXlSfPn0UFBSk4OBg9e7dWxcvXsxW4969e9WnTx9Vr15d/v7+Cg8P12OPPaZffvnFoV3W9njgwAE98sgjCgkJUfPmzbVw4ULZbDbt2rUr27JfeeUVeXt7Z9u2r3fq1Cn169dPERER8vPzU7Vq1fTEE08oLS3N3uann35Sly5dVK5cOZUpU0Z33nmn/vOf/+Q4lsuWLdPLL7+sypUry9/fX61atdKRI0eyve7cuXMVFRWl0qVLq0mTJvr666+ztbl+PfTp00ezZs2SJPu2+cf9NKfj4K5du3TfffcpMDBQAQEBatWqlb755huHNq4cF5E3Yihc8vDDD6tq1aqaMmWKvvnmG7399tv69ddf9c9//tPe5vHHH9f777+vzp07a+TIkfr22281ZcoUHTx4UJ988kmBX3vo0KEKCQnR+PHjdezYMc2YMUNDhgzR0qVL7W3GjRunl156SW3btlXbtm21c+dO3XvvvQ4HSOnaQXLlypXq0qWLqlWrpsTERM2ZM0ctW7bUgQMHFBER4dB+8uTJ8vX11TPPPKPU1FTVrl1bd911lxYvXqynn37aoe3ixYtVtmxZdejQId8+vfzyy7LZbHr22Wd19uxZzZgxQ7Gxsdq9e7dKly4t6doBr2/fvvrLX/6iKVOmKDExUW+99Za2bNmiXbt25fupy+bNm/Xxxx/rySefVNmyZfX222+rU6dOOnHihP70pz/poYce0o8//qgPP/xQ06dPt59bExYWpv379+v+++9X/fr1NWnSJPn5+enIkSPasmVLrq8XFhamf/3rX3r55Zd1+fJlTZkyRZJUp04dGWP0wAMPaMOGDerXr58aNmyoL774QqNGjdKpU6eyBcv169dr2bJlGjJkiEJDQ1W1atUcX/Nf//qX5s6dq23btukf//iHJKlZs2Z5LicxMVF33nmnPSiFhYVp9erV6tevn5KTkzV8+HBJ1z5BeuCBB7R582YNGDBAderU0b59+zR9+nT9+OOPWrlyZZ7jL0m//vqr2rZtq4cffljdu3fXsmXL9MQTT8jX11ePPfaYpGtfkdx99906cuSIhgwZomrVqmn58uXq06ePLl68qKeeeirf18lJRkaGWrdurZiYGE2bNk1fffWV3njjDUVFRemJJ56QdO0XiA4dOmjz5s0aNGiQ6tSpo08++US9e/fOtrwvv/xSP/30k/r27avw8HDt379fc+fO1f79+/XNN99kC+NdunRRzZo19corr8gYo86dO2vw4MFavHix7rjjDoe2ixcv1t13361KlSrl2p/Tp0+rSZMmunjxogYMGKDatWvr1KlTWrFihVJSUuTr66vExEQ1a9ZMKSkpGjZsmP70pz/p/fff1wMPPKAVK1bowQcfdFjm1KlT5eXlpWeeeUZJSUl67bXX1KNHD3377bf2NvPnz9fAgQPVrFkzDR8+XD/99JMeeOABlStXTpGRkbnWO3DgQJ0+fVpffvml/vWvf+W+ov6//fv3q0WLFgoMDNTo0aNVqlQpzZkzR3fffbf++9//KiYmxqG9M8dF5MMAThg/fryRZB544AGH6U8++aSRZPbs2WOMMWb37t1Gknn88ccd2j3zzDNGklm/fr19miQzfvz4bK9VpUoV07t3b/vPCxcuNJJMbGysyczMtE9/+umnjbe3t7l48aIxxpizZ88aX19f065dO4d2zz33nJHksMzff//dZGRkOLxufHy88fPzM5MmTbJP27Bhg5FkqlevblJSUhzaz5kzx0gyBw8etE9LS0szoaGhDq+Vk6zlVqpUySQnJ9unL1u2zEgyb731ln155cuXN7fffrv57bff7O0+//xzI8mMGzfOPi1rHf2RJOPr62uOHDlin7Znzx4jycycOdM+7fXXXzeSTHx8vMP806dPN5LMuXPn8uxPTlq2bGnq1avnMG3lypVGknnppZccpnfu3NnYbDaHOiUZLy8vs3//fqder3fv3uaWW27JNj235fTr189UrFjRnD9/3mF6t27dTFBQkH19/+tf/zJeXl7m66+/dmg3e/ZsI8ls2bIlz7patmxpJJk33njDPi01NdU0bNjQlC9f3qSlpRljjJkxY4aRZP7v//7P3i4tLc00bdrUBAQE2LeTrG1nw4YNDq8THx9vJJmFCxc6jIkkh23aGGPuuOMO06hRI/vPWevltddes09LT083LVq0yLbM6/cDY4z58MMPjSSzadMm+7Ss7bF79+7Z2nfv3t1EREQ47IM7d+7M9lo56dWrl/Hy8jLfffddtuey9vvhw4cbSQ7r7NKlS6ZatWqmatWq9tfNGss6deqY1NRUe9u33nrLSDL79u0zxvxvP2zYsKFDu7lz5xpJpmXLlvZpOa2HwYMHZ9s3s1x/HOzYsaPx9fU1R48etU87ffq0KVu2rPnrX/9qn+bscRH546sxuGTw4MEOPw8dOlTStZMB//jviBEjHNqNHDlSkrJ9NO2KAQMGOPy22aJFC2VkZOj48eOSpK+++kppaWkaOnSoQ7us3+z/yM/PT15e1zb/jIwM/fLLL/avfXbu3Jmtfe/eve2f0GR5+OGH5e/vr8WLF9unffHFFzp//rweffRRp/rUq1cvlS1b1v5z586dVbFiRfs4bt++XWfPntWTTz7pcP5Pu3btVLt2bafGMzY2VlFRUfaf69evr8DAQP3000/5zpv1adOnn35aKOdFrVq1St7e3ho2bJjD9JEjR8oYo9WrVztMb9myperWrXvDr3v9cowx+uijj9S+fXsZY3T+/Hn7o3Xr1kpKSrJvB8uXL1edOnVUu3Zth3Z/+9vfJEkbNmzI9/V9fHw0cOBA+8++vr4aOHCgzp49qx07dki6Njbh4eHq3r27vV2pUqU0bNgwXb58Wf/9738L3P9BgwY5/NyiRQuH9b9q1Sr5+PjYPyGSJG9vb/v+/Ud/3A9+//13nT9/Xnfeeack5bjvXP/a0rXt/vTp0w5jt3jxYpUuXVqdOnXKtR+ZmZlauXKl2rdvr8aNG2d7Pmu/X7VqlZo0aaLmzZvbnwsICNCAAQN07NgxHThwwGG+vn37Opxb1qJFC0myj1HWfjho0CCHdllfJRaWjIwMrV27Vh07dlT16tXt0ytWrKhHHnlEmzdvVnJyssM8+R0XkT+CEFxSs2ZNh5+joqLk5eVlP5/k+PHj8vLyUo0aNRzahYeHKzg4+IZ2zusvvw4JCZF07WuHrNfOqcawsDB72yyZmZmaPn26atasKT8/P4WGhiosLEx79+5VUlJStteuVq1atmnBwcFq3769PvjgA/u0xYsXq1KlSvY3yfxcX6vNZlONGjUcxlOSatWqlW3e2rVrOzWeOV22HhISYh+3vHTt2lV33XWXHn/8cVWoUEHdunXTsmXLChyKjh8/roiICIfwJ1372izr+T/KadwL4vrlnDt3ThcvXtTcuXMVFhbm8Ojbt6+ka+frSNLhw4e1f//+bO1uu+02h3Z5iYiI0C233OIwLWv+P67rmjVr2gN6ltzGxln+/v4KCwtzmHb9+j9+/LgqVqyogIAAh3Y5bXcXLlzQU089pQoVKqh06dIKCwuzj6+z+87f//53VaxY0f5LRGZmpj788EN16NAh27bxR+fOnVNycnK+92U6fvx4jrXnNpYFPbaUKlXKIbDcqHPnziklJSXX2jMzM7OdQ5hf7cgf5wjhhuR2A78bubHf9SdxZsntKgxz3Um2znjllVf04osv6rHHHtPkyZNVrlw5eXl5afjw4Tm+yV//aVCWXr16afny5dq6dauio6P12Wef6cknn8z2ZuZJNzJupUuX1qZNm7Rhwwb95z//0Zo1a7R06VL97W9/09q1a52+Mqagchv3G11O1jp+9NFHczwPRrr2yVlW2+joaL355ps5tsvr/BB3yG3fcnW/KaiHH35YW7du1ahRo9SwYUMFBAQoMzNTbdq0cXrf8fb21iOPPKJ58+bp3Xff1ZYtW3T69GmnP0ktbIV5bClqJbn24oIgBJccPnzY4Te8I0eOKDMz034Sa5UqVZSZmanDhw/bf/uSpMTERF28eFFVqlSxTwsJCcl2VUpaWprOnDlToNqyln348GGH39LOnTuX7bejFStW6J577tH8+fMdpl+8eNGlG/G1adNGYWFhWrx4sWJiYpSSkqKePXs6Pf/hw4cdfjbG6MiRI/Y34aw+/fDDD9k+Zfrhhx8cxvNG5BVcvby81KpVK7Vq1UpvvvmmXnnlFT3//PPasGGDYmNjXXqdKlWq6KuvvtKlS5ccfvM/dOiQ/fmiEBYWprJlyyojIyPfPkRFRWnPnj1q1apVgQP+6dOndeXKFYdPhX788UdJcth39u7dq8zMTIcgff3YZP3Gf/2+cyOftlapUkXr1q3T5cuXHT4V+uGHHxza/frrr1q3bp0mTpyocePG2adfvx07o1evXnrjjTf073//W6tXr1ZYWJhat26d5zxhYWEKDAzU999/n29/rq9dKvh29sdjyx/3w6tXryo+Pl4NGjTIc35nt5uwsDCVKVMm19q9vLyKPHhbQfH5tRUlQtZloFlmzpwpSbrvvvskSW3btpUkzZgxw6Fd1m/T7dq1s0+LiorSpk2bHNrNnTs3199s8xMbG6tSpUpp5syZDr8NXV+LdO23qOt/Y1q+fHm+l+1ez8fHx34V0KJFixQdHW0PMc745z//qUuXLtl/XrFihc6cOWMfz8aNG6t8+fKaPXu2w6Xaq1ev1sGDBx3G80ZkvUFf/+Z64cKFbG2z/nSFK5eOZ2nbtq0yMjL0zjvvOEyfPn26bDabvd/u5u3trU6dOumjjz7K8U31j5cfP/zwwzp16pTmzZuXrd1vv/3m1L1x0tPTNWfOHPvPaWlpmjNnjsLCwtSoUSNJ18YmISHB4Wqf9PR0zZw5UwEBAWrZsqWka2/K3t7e2fadd999N986ctO2bVulp6frvffes0/LyMiw799Zsj59uH7fyWkfy0/9+vVVv359/eMf/9BHH32kbt265Xs/naw/1fLvf/9b27dvz/Z8Vl1t27bVtm3bFBcXZ3/uypUrmjt3rqpWreryeWeNGzdWWFiYZs+e7XAF6qJFi3K8xcD1ctu/ruft7a17771Xn376qf0rU+naL5IffPCBmjdvrsDAQJdqR/74RAguiY+P1wMPPKA2bdooLi5O//d//6dHHnnE/htRgwYN1Lt3b82dO1cXL15Uy5YttW3bNr3//vvq2LGj7rnnHvuyHn/8cQ0aNEidOnXS3//+d+3Zs0dffPFFgf80Qtb9UaZMmaL7779fbdu21a5du7R69epsy7z//vs1adIk9e3bV82aNdO+ffu0ePHiAn3f36tXL7399tvasGGDXn31VZfmLVeunJo3b66+ffsqMTFRM2bMUI0aNdS/f39J185BePXVV9W3b1+1bNlS3bt3t18+X7Vq1WyX7hdU1pvx888/r27duqlUqVJq3769Jk2apE2bNqldu3aqUqWKzp49q3fffVeVK1d2OBHVWe3bt9c999yj559/XseOHVODBg20du1affrppxo+fLjDSd3uNnXqVG3YsEExMTHq37+/6tatqwsXLmjnzp366quv7CGwZ8+eWrZsmQYNGqQNGzborrvuUkZGhg4dOqRly5bpiy++yPHE3T+KiIjQq6++qmPHjum2227T0qVLtXv3bs2dO9d+B+IBAwZozpw56tOnj3bs2KGqVatqxYoV2rJli2bMmGH/BC0oKEhdunTRzJkzZbPZFBUVpc8//9ypc5Vy0759e911110aM2aMjh07prp16+rjjz/Ods5PYGCg/vrXv+q1117T1atXValSJa1du1bx8fEFet1evXrpmWeekSSnvxZ75ZVXtHbtWrVs2dJ+O4MzZ85o+fLl2rx5s4KDgzVmzBh9+OGHuu+++zRs2DCVK1dO77//vuLj4/XRRx+5/NV1qVKl9NJLL2ngwIH629/+pq5duyo+Pl4LFy506piRtX8NGzZMrVu3lre3t7p165Zj25deesl+764nn3xSPj4+mjNnjlJTU/Xaa6+5VDec5JmL1VDSZF0Ke+DAAdO5c2dTtmxZExISYoYMGeJwWbcxxly9etVMnDjRVKtWzZQqVcpERkaasWPHmt9//92hXUZGhnn22WdNaGioKVOmjGndurU5cuRIrpfPX3+5bE6XEWdkZJiJEyeaihUrmtKlS5u7777bfP/999mW+fvvv5uRI0fa2911110mLi7OtGzZ0uFS2KzXWL58eZ7jU69ePePl5WV+/vlnp8Yza7kffvihGTt2rClfvrwpXbq0adeunTl+/Hi29kuXLjV33HGH8fPzM+XKlTM9evTI9lq5XT4/ePDgbMu7fjyMMWby5MmmUqVKxsvLy34p/bp160yHDh1MRESE8fX1NREREaZ79+7mxx9/zLePOV0+b8y1y5iffvppExERYUqVKmVq1qxpXn/9dYdLgPOqPTd5XT6f23ISExPN4MGDTWRkpClVqpQJDw83rVq1MnPnznVol5aWZl599VVTr1494+fnZ0JCQkyjRo3MxIkTTVJSUp51ZY3D9u3bTdOmTY2/v7+pUqWKeeedd3Ksp2/fviY0NNT4+vqa6OjoHC8nP3funOnUqZMpU6aMCQkJMQMHDjTff/99jpfP5zQmOW0rv/zyi+nZs6cJDAw0QUFBpmfPnmbXrl3Zlvnzzz+bBx980AQHB5ugoCDTpUsXc/r06WyXgWe9Rl63Xjhz5ozx9vY2t912W+4DmIPjx4+bXr16mbCwMOPn52eqV69uBg8e7HBp+9GjR03nzp1NcHCw8ff3N02aNDGff/65w3Jy279zugTeGGPeffddU61aNePn52caN25sNm3alO2YkdO86enpZujQoSYsLMzYbDaHsb9+3Iy5diuB1q1bm4CAAFOmTBlzzz33mK1btzq0ceW4iLzZjOGMKuBG3XHHHSpXrpzWrVvnVPuNGzfqnnvu0fLly9W5c2c3VwcUT+fPn1fFihU1btw4vfjii54uBxbFOULADdq+fbt2796tXr16eboUoERZtGiRMjIyXLrAAChsnCMEFND333+vHTt26I033lDFihXVtWtXT5cElAjr16/XgQMH9PLLL6tjx465/ukUoCjwiRBQQCtWrFDfvn119epVffjhh8XqL78DxdmkSZM0YsQINWzYMNuVaUBR4xwhAABgWXwiBAAALIsgBAAALIuTpfORmZmp06dPq2zZsjf097MAAEDRMcbo0qVLioiIyPMmmgShfJw+fZq/7QIAQAl18uRJVa5cOdfnCUL5yLqt/cmTJ/kbLwAAlBDJycmKjIx0+APPOSEI5SPr67DAwECCEAAAJUx+p7VwsjQAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAs7iztCRkZ0tdfS2fOSBUrSi1aSN7eN942LU16913p6FEpKkp68knJ17dk1+qu/lt9XKnV87W6Y3t1R52uvH5xqJVx9exx0NP7VUEY5CkpKclIMklJSYWzwI8+MqZyZWOk/z0qV742/UbajhpljLe3Y1tv72vTS2qt7uq/1ceVWj1fqzu2V3fU6crrF4daGVfPHgc9vV9dx9n3b4JQPgo1CH30kTE2m+OKl65Ns9kcNxZX2o4alb3dHx8F2bA8Xau7+m/1caVWz9fqju3VHXW6a1zdVSvj6tnjoKf3qxwQhApJoQWh9PTsSfn6jSUy8lo7V9qmpmZP1dc/vL2vtSsptaakuKf/Vh9XavV8re7YXlNSCr9Od42ru2plXD17HHTXMfsGEYQKSaEFoQ0b8l7xWY8NG1xrO326c22nTy85tQ4e7J7+W31cqdXztbpje3V2f3GlTneNq7tqZVwLv//F4Zh9g5x9/+Zk6aJy5kzhtstqe/Soc22dbedKDe6q9fDhwl/m0aNShQruWa6zPD2u1OraMt1Rqzu2V2f3F1fqlNwzru6qlXF1rp27joPuOmYXES6fLyoVKzrfzpW2UVHOtXW2XdZynW3njlpr1iz8ZUZFMa7U6vla3bG9Oru/uFKn5J5xdVetjKtz7dx1HHTXMbuo3PBnTze5Qj9HKKeTyaScvxd2pq07z7nwVK1Z3zcXdv+tPq7U6vla3bG9uvtclsIc1+J0jpBVx7UkHbNvEOcIFRK3XDV2/caS15UCzrR151U4nqrVXf23+rhSq+drdcf26u6rmwpzXIvTVWNWHdeSdMy+AQShQlIk9xGKjHT+3hG5tS2q+7IUZa3u6r/Vx5VaPV+rO7bXorzfzY2Oa3G9j5CVxrUkHbMLyNn3b5sxxhTdF3ElT3JysoKCgpSUlKTAwMDCWWhJuvutp2vlztLUerPWyh2QubO0p8e1JB2zC8DZ92+CUD7cEoQAAIBbOfv+zVVjAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAskpcEJo1a5aqVq0qf39/xcTEaNu2bU7Nt2TJEtlsNnXs2NG9BQIAgBKjRAWhpUuXasSIERo/frx27typBg0aqHXr1jp79mye8x07dkzPPPOMWrRoUUSVAgCAkqBEBaE333xT/fv3V9++fVW3bl3Nnj1bZcqU0YIFC3KdJyMjQz169NDEiRNVvXr1IqwWAAAUdyUmCKWlpWnHjh2KjY21T/Py8lJsbKzi4uJynW/SpEkqX768+vXrVxRlAgCAEsTH0wU46/z588rIyFCFChUcpleoUEGHDh3KcZ7Nmzdr/vz52r17t9Ovk5qaqtTUVPvPycnJBaoXAAAUfyXmEyFXXbp0ST179tS8efMUGhrq9HxTpkxRUFCQ/REZGenGKgEAgCeVmE+EQkND5e3trcTERIfpiYmJCg8Pz9b+6NGjOnbsmNq3b2+flpmZKUny8fHRDz/8oKioqGzzjR07ViNGjLD/nJycTBgCAOAmVWKCkK+vrxo1aqR169bZL4HPzMzUunXrNGTIkGzta9eurX379jlMe+GFF3Tp0iW99dZbuYYbPz8/+fn5FXr9AACg+CkxQUiSRowYod69e6tx48Zq0qSJZsyYoStXrqhv376SpF69eqlSpUqaMmWK/P39dfvttzvMHxwcLEnZpgMAAGsqUUGoa9euOnfunMaNG6eEhAQ1bNhQa9assZ9AfeLECXl53bSnPQEAgEJmM8YYTxdRnCUnJysoKEhJSUkKDAz0dDkAAMAJzr5/8/EJAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwrBIXhGbNmqWqVavK399fMTEx2rZtW65t582bpxYtWigkJEQhISGKjY3Nsz0AALCWEhWEli5dqhEjRmj8+PHauXOnGjRooNatW+vs2bM5tt+4caO6d++uDRs2KC4uTpGRkbr33nt16tSpIq4cAAAURzZjjPF0Ec6KiYnRX/7yF73zzjuSpMzMTEVGRmro0KEaM2ZMvvNnZGQoJCRE77zzjnr16uXUayYnJysoKEhJSUkKDAy8ofoBAEDRcPb9u8R8IpSWlqYdO3YoNjbWPs3Ly0uxsbGKi4tzahkpKSm6evWqypUr564yAQBACeLj6QKcdf78eWVkZKhChQoO0ytUqKBDhw45tYxnn31WERERDmHqeqmpqUpNTbX/nJycXLCCAQBAsVdiPhG6UVOnTtWSJUv0ySefyN/fP9d2U6ZMUVBQkP0RGRlZhFUCAICiVGKCUGhoqLy9vZWYmOgwPTExUeHh4XnOO23aNE2dOlVr165V/fr182w7duxYJSUl2R8nT5684doBAEDxVGKCkK+vrxo1aqR169bZp2VmZmrdunVq2rRprvO99tprmjx5stasWaPGjRvn+zp+fn4KDAx0eAAAgJtTiTlHSJJGjBih3r17q3HjxmrSpIlmzJihK1euqG/fvpKkXr16qVKlSpoyZYok6dVXX9W4ceP0wQcfqGrVqkpISJAkBQQEKCAgwGP9AAAAxUOJCkJdu3bVuXPnNG7cOCUkJKhhw4Zas2aN/QTqEydOyMvrfx9yvffee0pLS1Pnzp0dljN+/HhNmDChKEsHAADFUIm6j5AncB8hAABKnpvuPkIAAACFjSAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsy+UgtGbNGm3evNn+86xZs9SwYUM98sgj+vXXXwu1OAAAAHdyOQiNGjVKycnJkqR9+/Zp5MiRatu2reLj4zVixIhCLxAAAMBdfFydIT4+XnXr1pUkffTRR7r//vv1yiuvaOfOnWrbtm2hFwgAAOAuLn8i5Ovrq5SUFEnSV199pXvvvVeSVK5cOfsnRQAAACWBy58INW/eXCNGjNBdd92lbdu2aenSpZKkH3/8UZUrVy70AgEAANzF5U+E3nnnHfn4+GjFihV67733VKlSJUnS6tWr1aZNm0IvEAAAwF1sxhjj6SKKs+TkZAUFBSkpKUmBgYGeLgcAADjB2fdvp74aS05Oti8kv/OACAsAAKCkcCoIhYSE6MyZMypfvryCg4Nls9mytTHGyGazKSMjo9CLBAAAcAengtD69etVrlw5+/9zCkIAAAAlDecI5YNzhAAAKHmcff92+aqxCRMmKDMzM9v0pKQkde/e3dXFAQAAeIzLQWj+/Plq3ry5fvrpJ/u0jRs3Kjo6WkePHi3U4gAAANzJ5SC0d+9eVa5cWQ0bNtS8efM0atQo3XvvverZs6e2bt3qjhoBAADcwuUgFBISomXLlmnIkCEaOHCg3nrrLa1evVovv/yyfHxcvlG1y2bNmqWqVavK399fMTEx2rZtW57tly9frtq1a8vf31/R0dFatWqV22sEAAAlg8tBSJJmzpypt956S927d1f16tU1bNgw7dmzp7Bry2bp0qUaMWKExo8fr507d6pBgwZq3bq1zp49m2P7rVu3qnv37urXr5927dqljh07qmPHjvr+++/dXisAACj+XL5qrE2bNtq+fbtmz56tzp0767ffftOIESO0aNEiTZw4UaNHj3ZXrYqJidFf/vIXvfPOO5KkzMxMRUZGaujQoRozZky29l27dtWVK1f0+eef26fdeeedatiwoWbPnu3Ua3LVGAAAJY/brhrLyMjQ3r171blzZ0lS6dKl9d5772nFihWaPn16wSvOR1pamnbs2KHY2Fj7NC8vL8XGxiouLi7HeeLi4hzaS1Lr1q1zbS9JqampSk5OdngAAICbk8tB6Msvv1RERES26e3atdO+ffsKpaicnD9/XhkZGapQoYLD9AoVKighISHHeRISElxqL0lTpkxRUFCQ/REZGXnjxQMAgGKpQOcI5SY0NLQwF+cRY8eOVVJSkv1x8uRJT5cEAADcxOXLvDIyMjR9+nQtW7ZMJ06cUFpamsPzFy5cKLTi/ig0NFTe3t5KTEx0mJ6YmKjw8PAc5wkPD3epvST5+fnJz8/vxgsGAADFnsufCE2cOFFvvvmmunbtqqSkJI0YMUIPPfSQvLy8NGHCBDeUeI2vr68aNWqkdevW2adlZmZq3bp1atq0aY7zNG3a1KG9dO2rvdzaAwAAa3E5CC1evFjz5s3TyJEj5ePjo+7du+sf//iHxo0bp2+++cYdNdqNGDFC8+bN0/vvv6+DBw/qiSee0JUrV9S3b19JUq9evTR27Fh7+6eeekpr1qzRG2+8oUOHDmnChAnavn27hgwZ4tY6AQBAyeDyV2MJCQmKjo6WJAUEBCgpKUmSdP/99+vFF18s3Oqu07VrV507d07jxo1TQkKCGjZsqDVr1thPiD5x4oS8vP6X7Zo1a6YPPvhAL7zwgp577jnVrFlTK1eu1O233+7WOgEAQMngchCqXLmyzpw5o1tvvVVRUVFau3at/vznP+u7774rknNrhgwZkusnOhs3bsw2rUuXLurSpYubqwIAACWRy1+NPfjgg/bzboYOHaoXX3xRNWvWVK9evfTYY48VeoEAAADu4vKdpa8XFxenuLg41axZU+3bty+suooN7iwNAEDJ4+z79w3/ldSmTZtyFRYAACiRbuiGioGBgfrpp58KqxYAAIAi5XQQOn36dLZpN/itGgAAgEc5HYTq1aunDz74wJ21AAAAFCmng9DLL7+sgQMHqkuXLvY/o/Hoo49yAjEAACixnA5CTz75pPbu3atffvlFdevW1b///W+99957N8UfWgUAANbk0lVj1apV0/r16/XOO+/ooYceUp06deTj47iInTt3FmqBAAAA7uLy5fPHjx/Xxx9/rJCQEHXo0CFbEAIAACgpXEoxWX9sNTY2Vvv371dYWJi76gIAAHA7p4NQmzZttG3bNr3zzjvq1auXO2sCAAAoEk4HoYyMDO3du1eVK1d2Zz0AAABFxukg9OWXX7qzDgAAgCJ3Q39iAwAAoCQjCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsqMUHowoUL6tGjhwIDAxUcHKx+/frp8uXLebYfOnSoatWqpdKlS+vWW2/VsGHDlJSUVIRVAwCA4qzEBKEePXpo//79+vLLL/X5559r06ZNGjBgQK7tT58+rdOnT2vatGn6/vvvtWjRIq1Zs0b9+vUrwqoBAEBxZjPGGE8XkZ+DBw+qbt26+u6779S4cWNJ0po1a9S2bVv9/PPPioiIcGo5y5cv16OPPqorV67Ix8fHqXmSk5MVFBSkpKQkBQYGFrgPAACg6Dj7/l0iPhGKi4tTcHCwPQRJUmxsrLy8vPTtt986vZyswcgrBKWmpio5OdnhAQAAbk4lIgglJCSofPnyDtN8fHxUrlw5JSQkOLWM8+fPa/LkyXl+nSZJU6ZMUVBQkP0RGRlZ4LoBAEDx5tEgNGbMGNlstjwfhw4duuHXSU5OVrt27VS3bl1NmDAhz7Zjx45VUlKS/XHy5Mkbfn0AAFA8OXeijJuMHDlSffr0ybNN9erVFR4errNnzzpMT09P14ULFxQeHp7n/JcuXVKbNm1UtmxZffLJJypVqlSe7f38/OTn5+dU/QAAoGTzaBAKCwtTWFhYvu2aNm2qixcvaseOHWrUqJEkaf369crMzFRMTEyu8yUnJ6t169by8/PTZ599Jn9//0KrHQAAlHwl4hyhOnXqqE2bNurfv7+2bdumLVu2aMiQIerWrZv9irFTp06pdu3a2rZtm6RrIejee+/VlStXNH/+fCUnJyshIUEJCQnKyMjwZHcAAEAx4dFPhFyxePFiDRkyRK1atZKXl5c6deqkt99+2/781atX9cMPPyglJUWStHPnTvsVZTVq1HBYVnx8vKpWrVpktQMAgOKpRNxHyJO4jxAAACXPTXUfIQAAAHcgCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsqMUHowoUL6tGjhwIDAxUcHKx+/frp8uXLTs1rjNF9990nm82mlStXurdQAABQYpSYINSjRw/t379fX375pT7//HNt2rRJAwYMcGreGTNmyGazublCAABQ0vh4ugBnHDx4UGvWrNF3332nxo0bS5Jmzpyptm3batq0aYqIiMh13t27d+uNN97Q9u3bVbFixaIqGQAAlAAl4hOhuLg4BQcH20OQJMXGxsrLy0vffvttrvOlpKTokUce0axZsxQeHu7Ua6Wmpio5OdnhAQAAbk4lIgglJCSofPnyDtN8fHxUrlw5JSQk5Drf008/rWbNmqlDhw5Ov9aUKVMUFBRkf0RGRha4bgAAULx5NAiNGTNGNpstz8ehQ4cKtOzPPvtM69ev14wZM1yab+zYsUpKSrI/Tp48WaDXBwAAxZ9HzxEaOXKk+vTpk2eb6tWrKzw8XGfPnnWYnp6ergsXLuT6ldf69et19OhRBQcHO0zv1KmTWrRooY0bN+Y4n5+fn/z8/JztAgAAKME8GoTCwsIUFhaWb7umTZvq4sWL2rFjhxo1aiTpWtDJzMxUTExMjvOMGTNGjz/+uMO06OhoTZ8+Xe3bt7/x4gEAQIlXIq4aq1Onjtq0aaP+/ftr9uzZunr1qoYMGaJu3brZrxg7deqUWrVqpX/+859q0qSJwsPDc/y06NZbb1W1atWKugsAAKAYKhEnS0vS4sWLVbt2bbVq1Upt27ZV8+bNNXfuXPvzV69e1Q8//KCUlBQPVgkAAEoSmzHGeLqI4iw5OVlBQUFKSkpSYGCgp8sBAABOcPb9u8R8IgQAAFDYCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyfDxdACRlZEhffy2dOSNVrCi1aCF5e+fcNi1Nevdd6ehRKSpKevJJyde3aJbp6nJdaessd9XqynI9Was7xtRdtRaHdXUzjiu1lqzjoDtqvVnH1VMM8pSUlGQkmaSkJPe8wEcfGVO5sjHS/x6VK1+bfr1Ro4zx9nZs6+19bbq7l+nqcl1p6yx31erKcj1ZqzvG1F21Fod1dTOOK7WWrOOgO2q9WcfVDZx9/yYI5cOtQeijj4yx2Rw3EunaNJvNccMaNSp7uz8+sjYsdyzT1eW60tZZ7qrVleV6slZ3jKm7ai0O6+pmHFdqLVnHQXfUerOOq5sQhAqJ24JQenr2VH39hhUZea1damr2VH39w9vbmJSUwl9maqprtbrS1lnuqtWV5Xqy1tTUwh9Td9WakuL5dXUzjiu1lqzjoDtqdcextTiMqxsRhAqJ24LQhg15byRZjw0bjJk+3bm2gwcX/jKnT3etVlfaOstdtbqyXE/W6uwyXRlTd9Xq7DboznV1M44rtZas46A7anXHsbU4jKsbOfv+zcnSnnLmjPPtjh51ru3hw4W/zKNHpQoVnF+us1xp665aXVmus9xRq7PLdGVMXVmuK7U6uw26c13djONKrSXrOOiOWp1V0sa1GODyeU+pWNH5dlFRzrWtWbPwlxkV5VqtrrR1lrtqdWW5znJHrc4u05UxdWW5rtTq7DboznV1M44rtZas46A7anXHsVXy/LgWB279XOom4PZzhHI68Uy6se+bC3OZf/xu3JnlutLWWe6qtbicI5RfrVnnXBTmmLqr1oKcI+Sp7bUkjSu1lqzjoDtqdcextTiMqxtxjlAhKZKrxq7fsArjCoTCXKary3WlrbPcVWtxuWosv1rdMabuqrU4rKubcVyptWQdB91R6806rm5CECokHrmPUGRk4d+T4kaX6epyXWnrLHfVWhzvI5RTre4YU3fVWhzW1c04rtRaso6D7qj1Zh1XN3D2/dtmjDGe/GquuEtOTlZQUJCSkpIUGBjonhfx9F1KubM0d5bmztIlZ1yptWQdB7mztMfuLO3s+zdBKB9FEoQAAEChcvb9m6vGAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZfl4uoDiLuvG28nJyR6uBAAAOCvrfTu/P6BBEMrHpUuXJEmRkZEergQAALjq0qVLCgoKyvV5/tZYPjIzM3X69GmVLVtWNpvNpXmTk5MVGRmpkydP3rR/p8wKfZSs0U/6eHOwQh8la/STPt4YY4wuXbqkiIgIeXnlfiYQnwjlw8vLS5UrV76hZQQGBt60G3EWK/RRskY/6ePNwQp9lKzRT/pYcHl9EpSFk6UBAIBlEYQAAIBlEYTcyM/PT+PHj5efn5+nS3EbK/RRskY/6ePNwQp9lKzRT/pYNDhZGgAAWBafCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCN2Al19+Wc2aNVOZMmUUHBzs1DzGGI0bN04VK1ZU6dKlFRsbq8OHDzu0uXDhgnr06KHAwEAFBwerX79+unz5sht64BxX6zl27JhsNluOj+XLl9vb5fT8kiVLiqJL2RRkzO++++5s9Q8aNMihzYkTJ9SuXTuVKVNG5cuX16hRo5Senu7OruTK1T5euHBBQ4cOVa1atVS6dGndeuutGjZsmJKSkhzaeXo9zpo1S1WrVpW/v79iYmK0bdu2PNsvX75ctWvXlr+/v6Kjo7Vq1SqH553ZR4uaK32cN2+eWrRooZCQEIWEhCg2NjZb+z59+mRbZ23atHF3N/LkSh8XLVqUrX5/f3+HNsVxPUqu9TOnY4zNZlO7du3sbYrTuty0aZPat2+viIgI2Ww2rVy5Mt95Nm7cqD//+c/y8/NTjRo1tGjRomxtXN3HXWZQYOPGjTNvvvmmGTFihAkKCnJqnqlTp5qgoCCzcuVKs2fPHvPAAw+YatWqmd9++83epk2bNqZBgwbmm2++MV9//bWpUaOG6d69u5t6kT9X60lPTzdnzpxxeEycONEEBASYS5cu2dtJMgsXLnRo98dxKEoFGfOWLVua/v37O9SflJRkfz49Pd3cfvvtJjY21uzatcusWrXKhIaGmrFjx7q7OzlytY/79u0zDz30kPnss8/MkSNHzLp160zNmjVNp06dHNp5cj0uWbLE+Pr6mgULFpj9+/eb/v37m+DgYJOYmJhj+y1bthhvb2/z2muvmQMHDpgXXnjBlCpVyuzbt8/expl9tCi52sdHHnnEzJo1y+zatcscPHjQ9OnTxwQFBZmff/7Z3qZ3796mTZs2DuvswoULRdWlbFzt48KFC01gYKBD/QkJCQ5titt6NMb1fv7yyy8Offz++++Nt7e3Wbhwob1NcVqXq1atMs8//7z5+OOPjSTzySef5Nn+p59+MmXKlDEjRowwBw4cMDNnzjTe3t5mzZo19jaujllBEIQKwcKFC50KQpmZmSY8PNy8/vrr9mkXL140fn5+5sMPPzTGGHPgwAEjyXz33Xf2NqtXrzY2m82cOnWq0GvPT2HV07BhQ/PYY485THNmRykKBe1jy5YtzVNPPZXr86tWrTJeXl4OB+j33nvPBAYGmtTU1EKp3VmFtR6XLVtmfH19zdWrV+3TPLkemzRpYgYPHmz/OSMjw0RERJgpU6bk2P7hhx827dq1c5gWExNjBg4caIxxbh8taq728Xrp6emmbNmy5v3337dP6927t+nQoUNhl1pgrvYxv2NucVyPxtz4upw+fbopW7asuXz5sn1acVuXWZw5LowePdrUq1fPYVrXrl1N69at7T/f6Jg5g6/GilB8fLwSEhIUGxtrnxYUFKSYmBjFxcVJkuLi4hQcHKzGjRvb28TGxsrLy0vffvttkddcGPXs2LFDu3fvVr9+/bI9N3jwYIWGhqpJkyZasGCBjAdua3UjfVy8eLFCQ0N1++23a+zYsUpJSXFYbnR0tCpUqGCf1rp1ayUnJ2v//v2F35E8FNZ2lZSUpMDAQPn4OP6ZQk+sx7S0NO3YscNhf/Ly8lJsbKx9f7peXFycQ3vp2jrJau/MPlqUCtLH66WkpOjq1asqV66cw/SNGzeqfPnyqlWrlp544gn98ssvhVq7swrax8uXL6tKlSqKjIxUhw4dHPap4rYepcJZl/Pnz1e3bt10yy23OEwvLuvSVfntj4UxZs7gj64WoYSEBElyeGPM+jnruYSEBJUvX97heR8fH5UrV87epigVRj3z589XnTp11KxZM4fpkyZN0t/+9jeVKVNGa9eu1ZNPPqnLly9r2LBhhVa/Mwrax0ceeURVqlRRRESE9u7dq2effVY//PCDPv74Y/tyc1rXWc8VpcJYj+fPn9fkyZM1YMAAh+meWo/nz59XRkZGjmN86NChHOfJbZ38cf/LmpZbm6JUkD5e79lnn1VERITDm0mbNm300EMPqVq1ajp69Kiee+453XfffYqLi5O3t3eh9iE/BeljrVq1tGDBAtWvX19JSUmaNm2amjVrpv3796ty5crFbj1KN74ut23bpu+//17z5893mF6c1qWrctsfk5OT9dtvv+nXX3+94e3fGQSh64wZM0avvvpqnm0OHjyo2rVrF1FF7uFsP2/Ub7/9pg8++EAvvvhituf+OO2OO+7QlStX9PrrrxfaG6i7+/jHQBAdHa2KFSuqVatWOnr0qKKiogq8XFcU1XpMTk5Wu3btVLduXU2YMMHhOXevRxTc1KlTtWTJEm3cuNHhZOJu3brZ/x8dHa369esrKipKGzduVKtWrTxRqkuaNm2qpk2b2n9u1qyZ6tSpozlz5mjy5MkerMx95s+fr+joaDVp0sRheklfl8UBQeg6I0eOVJ8+ffJsU7169QItOzw8XJKUmJioihUr2qcnJiaqYcOG9jZnz551mC89PV0XLlywz18YnO3njdazYsUKpaSkqFevXvm2jYmJ0eTJk5Wamloof3emqPqYJSYmRpJ05MgRRUVFKTw8PNvVDYmJiZJUaOuyKPp46dIltWnTRmXLltUnn3yiUqVK5dm+sNdjbkJDQ+Xt7W0f0yyJiYm59ik8PDzP9s7so0WpIH3MMm3aNE2dOlVfffWV6tevn2fb6tWrKzQ0VEeOHCnyN88b6WOWUqVK6Y477tCRI0ckFb/1KN1YP69cuaIlS5Zo0qRJ+b6OJ9elq3LbHwMDA1W6dGl5e3vf8LbhlEI728jCXD1Zetq0afZpSUlJOZ4svX37dnubL774wuMnSxe0npYtW2a7yig3L730kgkJCSlwrQVVWGO+efNmI8ns2bPHGPO/k6X/eHXDnDlzTGBgoPn9998LrwNOKGgfk5KSzJ133mlatmxprly54tRrFeV6bNKkiRkyZIj954yMDFOpUqU8T5a+//77HaY1bdo028nSee2jRc3VPhpjzKuvvmoCAwNNXFycU69x8uRJY7PZzKeffnrD9RZEQfr4R+np6aZWrVrm6aefNsYUz/VoTMH7uXDhQuPn52fOnz+f72t4el1mkZMnS99+++0O07p3757tZOkb2TacqrXQlmRBx48fN7t27bJfGr5r1y6za9cuh0vEa9WqZT7++GP7z1OnTjXBwcHm008/NXv37jUdOnTI8fL5O+64w3z77bdm8+bNpmbNmh6/fD6ven7++WdTq1Yt8+233zrMd/jwYWOz2czq1auzLfOzzz4z8+bNM/v27TOHDx827777rilTpowZN26c2/uTE1f7eOTIETNp0iSzfft2Ex8fbz799FNTvXp189e//tU+T9bl8/fee6/ZvXu3WbNmjQkLC/Po5fOu9DEpKcnExMSY6Ohoc+TIEYfLc9PT040xnl+PS5YsMX5+fmbRokXmwIEDZsCAASY4ONh+pV7Pnj3NmDFj7O23bNlifHx8zLRp08zBgwfN+PHjc7x8Pr99tCi52sepU6caX19fs2LFCod1lnVcunTpknnmmWdMXFyciY+PN1999ZX585//bGrWrFnkAb2gfZw4caL54osvzNGjR82OHTtMt27djL+/v9m/f7+9TXFbj8a43s8szZs3N127ds02vbity0uXLtnfByWZN9980+zatcscP37cGGPMmDFjTM+ePe3tsy6fHzVqlDl48KCZNWtWjpfP5zVmhYEgdAN69+5tJGV7bNiwwd5G//8eK1kyMzPNiy++aCpUqGD8/PxMq1atzA8//OCw3F9++cV0797dBAQEmMDAQNO3b1+HcFXU8qsnPj4+W7+NMWbs2LEmMjLSZGRkZFvm6tWrTcOGDU1AQIC55ZZbTIMGDczs2bNzbFsUXO3jiRMnzF//+ldTrlw54+fnZ2rUqGFGjRrlcB8hY4w5duyYue+++0zp0qVNaGioGTlypMOl50XJ1T5u2LAhx+1bkomPjzfGFI/1OHPmTHPrrbcaX19f06RJE/PNN9/Yn2vZsqXp3bu3Q/tly5aZ2267zfj6+pp69eqZ//znPw7PO7OPFjVX+lilSpUc19n48eONMcakpKSYe++914SFhZlSpUqZKlWqmP79+xfqG0tBuNLH4cOH29tWqFDBtG3b1uzcudNhecVxPRrj+vZ66NAhI8msXbs227KK27rM7ZiR1afevXubli1bZpunYcOGxtfX11SvXt3h/TJLXmNWGGzGeOB6ZQAAgGKA+wgBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBQD42btwom82mixcveroUAIWMIASgxMjIyFCzZs300EMPOUxPSkpSZGSknn/+ebe8brNmzXTmzBkFBQW5ZfkAPIc7SwMoUX788Uc1bNhQ8+bNU48ePSRJvXr10p49e/Tdd9/J19fXwxUCKEn4RAhAiXLbbbdp6tSpGjp0qM6cOaNPP/1US5Ys0T//+c9cQ9Czzz6r2267TWXKlFH16tX14osv6urVq5IkY4xiY2PVunVrZf1eeOHCBVWuXFnjxo2TlP2rsePHj6t9+/YKCQnRLbfconr16mnVqlXu7zyAQufj6QIAwFVDhw7VJ598op49e2rfvn0aN26cGjRokGv7smXLatGiRYqIiNC+ffvUv39/lS1bVqNHj5bNZtP777+v6Ohovf3223rqqac0aNAgVapUyR6Erjd48GClpaVp06ZNuuWWW3TgwAEFBAS4q7sA3IivxgCUSIcOHVKdOnUUHR2tnTt3ysfH+d/rpk2bpiVLlmj79u32acuXL1evXr00fPhwzZw5U7t27VLNmjUlXftE6J577tGvv/6q4OBg1a9fX506ddL48eMLvV8AihZfjQEokRYsWKAyZcooPj5eP//8syRp0KBBCggIsD+yLF26VHfddZfCw8MVEBCgF154QSdOnHBYXpcuXfTggw9q6tSpmjZtmj0E5WTYsGF66aWXdNddd2n8+PHau3evezoJwO0IQgBKnK1bt2r69On6/PPP1aRJE/Xr10/GGE2aNEm7d++2PyQpLi5OPXr0UNu2bfX5559r165dev7555WWluawzJSUFO3YsUPe3t46fPhwnq//+OOP66effrJ/Nde4cWPNnDnTXd0F4EYEIQAlSkpKivr06aMnnnhC99xzj+bPn69t27Zp9uzZKl++vGrUqGF/SNdCU5UqVfT888+rcePGqlmzpo4fP55tuSNHjpSXl5dWr16tt99+W+vXr8+zjsjISA0aNEgff/yxRo4cqXnz5rmlvwDciyAEoEQZO3asjDGaOnWqJKlq1aqaNm2aRo8erWPHjmVrX7NmTZ04cUJLlizR0aNH9fbbb+uTTz5xaPOf//xHCxYs0OLFi/X3v/9do0aNUu/evfXrr7/mWMPw4cP1xRdfKD4+Xjt37tSGDRtUp06dQu8rAPfjZGkAJcZ///tftWrVShs3blTz5s0dnmvdurXS09P11VdfyWazOTw3evRoLViwQKmpqWrXrp3uvPNOTZgwQRcvXtS5c+cUHR2tp556SmPHjpUkXb16VU2bNlVUVJSWLl2a7WTpoUOHavXq1fr5558VGBioNm3aaPr06frTn/5UZGMBoHAQhAAAgGXx1RgAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALCs/wfJV3AB4gEqdgAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "ud_buondary_points = rectangle.sample_boundary(100, random='Halton', criteria=lambda x, y: np.isclose(y, -Ly / 2) | np.isclose(y, Ly / 2))\n", - "px3, py3 = ud_buondary_points[\"x\"], ud_buondary_points[\"y\"]\n", - "plt.scatter(px3, py3, color='red')\n", - "plt.title('boundary points for free boundary condition')\n", - "plt.xlabel('X-axis')\n", - "plt.ylabel('Y-axis')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "接下来将自由边界条件转为实际深度学习模型中需要的约束条件:(自由)边界约束\n", - "> $$\n", - "\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right)_{y=-0.5 \\mid y=+0.5}=0, \\quad\\left(\\frac{\\partial^3 w}{\\partial y^3}+(2-\\mu) \\frac{\\partial^3 w}{\\partial x^2 \\partial y}\\right)_{y=-0.5 \\mid y=+0.5}=0\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "constraint_up_down = ppsci.constraint.BoundaryConstraint(\n", - " {\n", - " \"item1\": w.diff(y, 2) + mu * w.diff(x, 2), # 上下边界上需要满足的条件\n", - " \"item2\": w.diff(y, 3) + (2 - mu) * w.diff(x, 2).diff(y), # 上下边界上需要满足的条件\n", - " },\n", - " {\"item1\": 0.0, \"item2\": 0.0}, # 上下边界上需要满足的条件\n", - " rectangle,\n", - " {\n", - " \"dataset\": \"IterableNamedArrayDataset\",\n", - " \"iters_per_epoch\": 1,\n", - " \"batch_size\": 10000, # 采样一万个点用于训练\n", - " },\n", - " criteria=lambda x, y: np.isclose(y, -Ly / 2) | np.isclose(y, Ly / 2), # 采样点在左右两侧边界上\n", - " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "综上所述,控制方程、简支边界条件、自由边界条件所用的训练数据点预览如下:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAA6cAAAHHCAYAAABDdy4DAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9fXwV5Zn//zk5CBwegh4IWBM0Jc2iCG0X5aHsZg0lFa3aIFAQFdGfoq2CBCrdWllILFarYILgY7arfmmhRgikthUQvjndbFGx+t2KihSQFEHEQCxPSUFOzu+Pw8TJZGbu636YkwSu9776cjmZM3PPPdfc57ru6ymUSCQSYBiGYRiGYRiGYZg2JK2tB8AwDMMwDMMwDMMwbJwyDMMwDMMwDMMwbQ4bpwzDMAzDMAzDMEybw8YpwzAMwzAMwzAM0+awccowDMMwDMMwDMO0OWycMgzDMAzDMAzDMG0OG6cMwzAMwzAMwzBMm8PGKcMwDMMwDMMwDNPmsHHKMAzDMAzDMAzDtDlsnDIMwxhi+fLluPjii3HOOefg3HPPTem1s7Ozceutt6b0mgzDMAzDMCZh45RhziC2bt2KiRMn4qKLLkLXrl2RmZmJ73znO1i6dGlbD+2M58MPP8Stt96KnJwclJeX47nnnmvrIZF56qmn8MILL7T1MBiGYRiGOcsJJRKJRFsPgmEYfTZv3ozRo0fjwgsvxLRp03D++efj448/xhtvvIFdu3Zh586dbT3EM5pnnnkGP/zhD7Fjxw587WtfS/n1T5w4gbS0NJxzzjnS3x08eDD69OmDWCxmfmAMwzAMwzBEOrX1ABiGMcNDDz2EXr164a233moVUvrZZ5+1zaDakEQigX/84x+IRCIpuZ41x6kO57Xo0qVLm1yXYRiGYRjGFBzWyzBnCLt27cKll17qahz17du3+f+vra1FKBRyDeMMhUIoLi5u/ndxcTFCoRD++te/4uabb0avXr2QkZGB//iP/0AikcDHH3+MwsJCpKen4/zzz8fixYtbnC8WiyEUCqGiogIlJSXIzMxEz549MXHiRBw+fBgnTpxAUVER+vbtix49euC2227DiRMnWpzj+eefx7e//W307dsXXbp0waBBg/D000+3Gnt2djauvfZarF+/HpdffjkikQieffZZXHHFFfjGN77hOmcDBw7E2LFjfWY1yVNPPYVLL70UXbp0wQUXXIB77rkHf//731tce8GCBQCAjIyMVvPo5NZbb0WPHj3w0UcfYezYsejevTsuuOACPPjgg3AGsxw/fhw/+tGP0L9/f3Tp0gUDBw7EokWLWh3nzDl94YUXEAqF8Kc//Qlz5sxBRkYGunfvjuuvvx51dXUtvvf+++/jj3/8I0KhEEKhEPLz8wEAX3zxBUpKSpCbm4uuXbuid+/e+Nd//Ve89tprwjljGIZhGIaRhT2nDHOGcNFFF+H111/He++9h8GDBxs99+TJk3HJJZfgkUcewe9//3ssXLgQ0WgUzz77LL797W/jF7/4BX7961/jvvvuw7Bhw/Bv//ZvLb7/8MMPIxKJ4Cc/+Ql27tyJpUuX4pxzzkFaWho+//xzFBcX44033sALL7yAr371q5g/f37zd59++mlceuml+N73vodOnTrhlVdewd13342mpibcc889La6zfft2TJkyBXfddRemT5+OgQMHokePHpg+fXqreXnrrbfw17/+FfPmzfO99+LiYpSUlKCgoAA//OEPsX37djz99NN466238Kc//QnnnHMOysrK8H/+z//BmjVr8PTTT6NHjx74+te/7nveeDyOq666CiNHjsSjjz6KdevWYcGCBTh16hQefPBBAEnv7/e+9z1UV1fj9ttvxze/+U2sX78ec+fOxb59+1BaWip8djNnzsR5552HBQsWoLa2FmVlZZgxYwZeeuklAEBZWRlmzpyJHj164IEHHgAA9OvXr/neH374Ydxxxx0YPnw4jhw5gj//+c9455138J3vfEd4bYZhGIZhGCkSDMOcEWzYsCERDocT4XA48a1vfSvx4x//OLF+/frEyZMnWxy3e/fuBIDE888/3+ocABILFixo/veCBQsSABJ33nln82enTp1KZGVlJUKhUOKRRx5p/vzzzz9PRCKRxLRp05o/q66uTgBIDB48uMU4pkyZkgiFQomrr766xfW/9a1vJS666KIWnzU0NLQa59ixYxMDBgxo8dlFF12UAJBYt25di8///ve/J7p27Zr493//9xaf33vvvYnu3bsnjh071ur8Fp999lmic+fOiSuvvDIRj8ebP1+2bFkCQOK//uu/mj+z5qqurs7zfBbTpk1LAEjMnDmz+bOmpqbENddck+jcuXPzOdauXZsAkFi4cGGL70+cODERCoUSO3fubHH/9rl//vnnEwASBQUFiaampubPZ8+enQiHw4m///3vzZ9deumliSuuuKLVOL/xjW8krrnmGuH9MAzDMAzDmIDDehnmDOE73/kOXn/9dXzve9/DX/7yFzz66KMYO3YsMjMz8dvf/lbr3HfccUfz/x8Oh3H55ZcjkUjg9ttvb/783HPPxcCBA/HRRx+1+v4tt9zSolDPiBEjkEgk8P/9f/9fi+NGjBiBjz/+GKdOnWr+zJ4zevjwYRw8eBBXXHEFPvroIxw+fLjF97/61a+2CtPt1asXCgsLsXLlyuZQ2Hg8jpdeegnjxo1D9+7dPe9748aNOHnyJIqKipCW9uVyOX36dKSnp+P3v/+953cpzJgxo/n/D4VCmDFjBk6ePImNGzcCAP7whz8gHA7j3nvvbfG9H/3oR0gkEnj11VeF17jzzjsRCoWa/52Xl4d4PI6//e1vwu+ee+65eP/997Fjxw7qLTEMwzAMwyjDxinDnEEMGzYMlZWV+Pzzz7Flyxbcf//9OHr0KCZOnIgPPvhA+bwXXnhhi3/36tULXbt2RZ8+fVp9/vnnn5O+DwD9+/dv9XlTU1MLo/NPf/oTCgoK0L17d5x77rnIyMjAT3/6UwBwNU7duOWWW7Bnzx7U1NQASBqdBw4cwNSpUz3vGUCzATdw4MAWn3fu3BkDBgwgGXhepKWlYcCAAS0++6d/+icAybxg6/oXXHABevbs2eK4Sy65pMX4/HDO/XnnnQcArs/JyYMPPoi///3v+Kd/+icMGTIEc+fOxbvvviv8HsMwDMMwjApsnDLMGUjnzp0xbNgw/PznP8fTTz+NL774Ai+//DIAtPCi2YnH457nC4fDpM8AtCrU43es6By7du3CmDFjcPDgQTz++OP4/e9/j9deew2zZ88GADQ1NbX4nldl3rFjx6Jfv3741a9+BQD41a9+hfPPPx8FBQWux59JyDwnJ//2b/+GXbt24b/+678wePBg/Od//ieGDh2K//zP/zQ9TIZhGIZhGDZOGeZM5/LLLwcA7N+/H8CXnjN7tVmA5oVLNa+88gpOnDiB3/72t7jrrrvw3e9+FwUFBdLtYcLhMG688UasWrUKn3/+OdauXYspU6Z4Gm4WF110EYBkoSU7J0+exO7du5v/rkJTU1OrEOi//vWvAJIVdK3rf/LJJzh69GiL4z788MMW49PFa8MCAKLRKG677TasXLkSH3/8Mb7+9a/7ViJmGIZhGIZRhY1ThjlDqK6udvWG/eEPfwDwZWhqeno6+vTpg//+7/9ucdxTTz0V/CAlsYxH+30dPnwYzz//vPS5pk6dis8//xx33XUXjh07hptvvln4nYKCAnTu3BlPPPFEizH88pe/xOHDh3HNNddIj8POsmXLmv//RCKBZcuW4ZxzzsGYMWMAAN/97ncRj8dbHAcApaWlCIVCuPrqq7Wub9G9e/dWmxUAcOjQoRb/7tGjB772ta+1avfDMAzDMAxjAm4lwzBnCDNnzkRDQwOuv/56XHzxxTh58iQ2b96Ml156CdnZ2bjtttuaj73jjjvwyCOP4I477sDll1+O//7v/2722rUnrrzySnTu3BnXXXdds1FZXl6Ovn37NnuCqfzzP/8zBg8ejJdffhmXXHIJhg4dKvxORkYG7r//fpSUlOCqq67C9773PWzfvh1PPfUUhg0bRjJwvejatSvWrVuHadOmYcSIEXj11Vfx+9//Hj/96U+RkZEBALjuuuswevRoPPDAA6itrcU3vvENbNiwAVVVVSgqKkJOTo7y9e1cdtllePrpp7Fw4UJ87WtfQ9++ffHtb38bgwYNQn5+Pi677DJEo1H8+c9/xqpVq1oUcmIYhmEYhjEFG6cMc4awaNEivPzyy/jDH/6A5557DidPnsSFF16Iu+++G/PmzcO5557bfOz8+fNRV1eHVatWoaKiAldffTVeffVV9O3bt+1uwIWBAwdi1apVmDdvHu677z6cf/75+OEPf4iMjIxWlX4p3HLLLfjxj38sLIRkp7i4GBkZGVi2bBlmz56NaDSKO++8Ez//+c9bVCCWJRwOY926dfjhD3+IuXPnomfPnliwYEGLHq9paWn47W9/i/nz5+Oll17C888/j+zsbDz22GP40Y9+pHxtJ/Pnz8ff/vY3PProozh69CiuuOIKfPvb38a9996L3/72t9iwYQNOnDiBiy66CAsXLsTcuXONXZthGIZhGMYilKBUxWAYhjkDWLJkCWbPno3a2tpWVWxTya233opVq1bh2LFjbTYGhmEYhmGY9gbnnDIMc1aQSCTwy1/+EldccUWbGqYMwzAMwzCMOxzWyzDMGc3x48fx29/+FtXV1di6dSuqqqraekgMwzAMwzCMC2ycMgxzRlNXV4cbb7wR5557Ln7605/ie9/7XlsPiWEYhmEYhnGBc04ZhmEYhmEYhmGYNodzThmGYRiGYRiGYZg2h41ThmEYhmEYhmEYps3hnFMBTU1N+OSTT9CzZ0+EQqG2Hg7DMAzDMAQSiQSOHj2KCy64AGlpvBfPMAzTEWDjVMAnn3yC/v37t/UwGIZhGIZR4OOPP0ZWVlZbD4NhGIYhwMapgJ49ewJI/rilp6e38WgYhmEYhqFw5MgR9O/fv/l3nGEYhmn/sHEqwArlTU9PZ+OUYRiGYToYnJLDMAzTceAkDIZhGIZhGIZhGKbNYeOUYRiGYRiGYRiGaXPYOGUYhmEYhmEYhmHaHDZOGYZhGIZhGIZhmDaHjVOGYRiGYRiGYRimzWHjlGEYhmEYhmEYhmlz2DhlGIZhGIZhGIZh2hw2ThmGYRiGYRiGYZg2h41ThmEYhmEYhmEYps3p1NYDYBzE40BNDbB/P/CVrwB5eUA4rH/syZPAU08Bu3YBOTnA3XcDnTurDbEpjpo9Nfj07/tw8Yd1+Ho8A2mZmd7XD2KcHeWcMgTx7DvKONt6rG09TplznmnPPohxypy3rZ99W8toEM+JeM5j9Sfx0hVPIfLJLjRekIPJf7wbPaIGnj3DMAzTcUkwvhw+fDgBIHH48OHgL7Z6dSKRlZVIAF/+Lysr+bnOsXPnJhLhcMtjw+Hk57JD/GB1IuvxrMT1k5DYkw7x9YMYZ0c5pwwe1z/18suJ6t3ViRXvrkhU765OnIqfoo81heNsTzIqff22HqfMOQN+9qfip1rLm+y1UzBOISl+733nzY+2ltEgnhPxnC/2m5v4Ai3P+QXCiRf7aT57Gyn9/WYYhmGMwMapgJT9uK1enUiEQi1/0IHkZ6FQyx92mWPnzm19nP1/Ekrg6g9WJ0LFocT1k5CII/k/+7maQmh5fQPjbDr9v1P33RfcvbfRfH45se7Xbwol5/j6SUigOPm/O27tnZxn0VhTOM72JKPS12/rccqckzpWxXFaG0+WrKEYiazHsxKrP1gdzLMPYj5lzmvo2TcBiSdH93CfNz8kZLQpFEo0mZbRIJ4T8Zwv9pvbvLY757IJMGagsnHKMAzT8QglEolE2/pu2zdHjhxBr169cPjwYaSnpwdzkXgcyM4G9u51/3soBGRlAbt3J/9NPTYeB7p1S/7Xi3AYaGgQhqbFm+LIXpKNT/6+F7VlQOYR94TlRCiEUFYWsHNnMvTLwDgTAOIh4Ld/XoHxhT82e+9HjwL/9E8pn89mBM++CcDedOCrRcl/+81981i3bwd69lQapxWyvf/ofnyl51eQd2EewmlhozKaCAEN/Xrjrf95CXmZoxDuoTZWT6hjJchoIisTf6x+AQf+/gkmDb8VaGpCyNQ4T56ky1M4TLsnxWdfua0SEysmIoGWPwchhJDWlMCRZ3uj24FD/teWWZ80ZNQX6pwafO+t9SnyAHDqdKJM6LSUrJq0CuMvGd/6SxIy2nDhBeh64JD/Oy+7PgUhT8RzHnt7O7r27Ykw4q7vUgJAHGH841CDdohvSn6/GYZhGKNwQaT2QE2N9w86kNxT/vjj5HEyxz71lL9SAST//tRT4iHuqcHeI3uR9zegv5dxBCBkXf+pp4yNMwSgUwL45Ac3mr/3uXPbZD6bETzPNAAXHgHy/gbh3DePde5cpXFWbqtE9pJsjH5xNG6svBGjXxyN7CXZqNxWaVRGQwmg+6eHUPyzAjx4w/kpn1NIyGjo470o/lkBXn/gFoT8DFOVccrIE/WeFJ59vCmOWetmtTJMASCBBPL+Bm/D1H5tmfVJUUZFxJctTfl7b61P97xl+/rpuSxaV4R4k8t3ifP0l/l3oZuXYSo5TgDByhPxnH8cPhedPAxT4PR8Io6XrpB79gzDMMyZARdEag/s32/2OABN+/bhk//3R2RRDt61S3zpo8lrf+UYcQCEcyZPvJ987NfqideWOCd27DB/Tupx1nkJkOcdoN+TbZxenrN9R/ZhYsVEvNF9FoZTzikho185BvTZf0R6rMbGQDznV44BOZ8Try0zThl56tePdqzCs7c2nrw4nyp7Es9eZZyeXv3TVG6rxOHKBbjN5PUl3nunjCSQwMdHPkbNnhrkZ+e3Pi+Bd/7nZXyDcqDEOP/65qs4lXYIgygHyzwnooyeW0c7Z+QTiXeJYRiGOWNgz2l74CtfoR9HPHby5iIsOrCWdt6cHPGleyavu78H7ZSUcyZP/BXysTujxGtLnHNfv27Gz0k+zjovgf096HO/s7evf+9LTo9T5DkDgEW7f007p4SM7u8B7DqPdtog5pR6zsDGSTz2L92Pke8pnjNA+trWxpMX5Hde4tkjN5d23Olx+nr18eXmyl96HDd7fYn33ktGXOeXOE/k+5EY51Ofb8Dd7yyknVfmORHv6e8ZtHM2XiDxLjEMwzBnDJxzKiClOaf79iVDn5zY8oriTXEcyOiG8w/HXXcW7DmKaU1A40NAOAHtPDkr53T/3/did5lEzinhnqxcqUTcJwcpBHS/H9i5DMg6mgwNpZxTlCfW/X5g95NhfOVIXPucANTzIz3mSSbn1Dr2n2YADY+kIa2piTTOWG0Mo18c7TvMtCbg78/0Qo+6I8nQbSdueYeEezIpo81Q3yeBjAY+zpMnkTgtT35y/0+/yMSOoo8QHuD/PjX0i2JIUWdsv3+/1DhFzz+tKSl3pPcOoM29RC5j5a7feebDAkDFxArM3jAbe4/sRadTxOdk5ZxKrE8yOad2qqdVI+/CvJZe38xRwud5vF8U595xCB894f/ON57fG933HpAaZ1NaAPnrVs6pxz1Zvw2cc8owDMP4wZ7T9kA4DCxZkvz/Q46fa+vfZWVAOIyafZsxY2xSUXCaHta/i65KKh+nOgGLv5X8zHMHYs4cxDuFEauNYeXWlYjVxlxzpMJpYSy5agma0kIouqrl9SwSodMKYVlZUvEl3hM6dwbmzHEdp/Xvxd8CTnYGZl3lOAfhnE6c55w5Np4sFenUlCTO2cycOXIFXHyevfN5NqV9ef9+z/5k5xCevaKb9zN3jFPkOQOS156WfxiJREI8TxL35CejCeszg3PaYqw+Mqr6LkmNs3NnfHz7RNdz2mV0d8M+1Ozb7HtPCSQw9d8O4aN/7JceZ96FechKz2o29pwk0kL42fjeAEJIOK6dkHj2LeY+EiG9S/FOYaFX/+4/3N0cliySJ+u8iESU1qfWY0iy+FutDdMQQuif3h91x+tae32X5eCNuVN8r797wb041Un8zu9ecK/0OP3WEpXnhM6dWzx75xrRBCCRSOCN+25Aj4wIVvTzX+9X9JvD/U4ZhmHOUtg4bS+MHw+sWgVkZrb8PCsr+fn4ZMXH/Uf3Y80gYOIkYJ9jI3hvevLzNbZkop9cCTw6Krlj3oJwGJg7F5W3jfQNl2sxxEvGY9WkVdgyMsv1+qGs/i3GSr0nAMCjj6LpvvvQ5BhnPJQc/0+uTCp7fx7ZH00VL5PPiblzWzV/t58TACoHAd+fBOxPdzSJlzinNZ949NEvr9MUFxr9fvO0Nz05LvvzXDsoJHz2CSRw9xXH8PGdk0njtEK2Raw5PU97ezr+4DZPPvfkJaOJtJYPPx4Cnh7dA5W3jSSNrwVU2VMYp9e7ZJ9TKn+aUeh6TqeM7j+633OsiaxM3DmtNyoH+Y8z4TFOa+MJgKuBmkAC3W+YipdLJmF/esu/f9IrDW+U3Ud69q3mnvAuifJhE0igrqGuxWee95+W1vL+Jdcnt7Em0tLw2Cjg/itbXsyaxxsG34DJqya3uod9R/Zh1OeLknPncf1L7nwAWelZvu/8D27tjUvufEA4Tqc8AfD8HaE+p0RaGj649VrE7v7ul2vb+PF4o+y+VmuEtZaN+nwRKrdV4pZPH8XyfnMRh2OcCGN5v7m45VP5d4lhGIY5M+CwXgEpDwuKx5NVD/fvT+bw5OW1UArsIXhpTckKrl85lswLq7kouSPuRqdTyWqSOZ8Do664GZct/KUwXM6rDYJVmOTTv+/DxR/W4evxDKRlZrYaK/We7FT+5SX897/fgJzPkzlcTw5L7vS3GpPEOXHyJHYunINX1z/Z4pxO0pqAty8pxTcT/Vqc07UQy6nTFSp37UqGht59d6vWHLPWzWqhlGalZ2HJVUvcW0u4zFNlnzrMem1Oi3P0T++PCYMm4InNZcJnv2L8CkwZOMF3nMCXIdv7juxz9VA5CTcB4w9m4DejSv2f++l7iv8xhlnPT8L74Xr8t8s4QwghGoniyNFDuPu0jFrPKd5J0JJDBFVObMfF+/XFgP83DR8f+8R1Ps45Bfx0ay/Mz7wJabm5iP/gLtR8+qZnoR4/rPfZ/n66yWj1tOovi+o47inWP47RvypodW7nOcc9sR75/3Rlq+Ms3GQ2HAojnvhyU8W55vzPRUBTWsj9+VDn/uRJTxlduXUlbqy8UTyRLpDvX3ItcY61ctfvWs1b//T+ePzKx5vDjd0IIYSs9CzsnrET4T9tdr2+lUsLAKGmRPPcf3r6na+4YbX7e3F6nH9981U89fkGzzUPSD7TDRfNw5jIINJz2vnWeiw/vhk/H3Kk+ZzW2lY4sLC55Zjb+tR8z7N2I5wWxrH6k3jpiqcQ+WQXGi/IweQ/3m3UY8phvQzDMB0PNk4FBPXjJqo86fc9GUPCDSsHKntJtlhxOq1EmIB6z25Kcv/0/ii7qkzNQAFdyV0xfgWmDJniOxaRkenXMxKQM7Tc5qxmT40wRxRwGDQC7EowVa5MnD+EZEhq70hvHGp0b1ciK4uq7xZ1vMCXz1BpE8IxVr/3mXLvqrLtNZ6aPTWo+rAKZW+WCc9JHaMqlHxoAOjTrQ8ONRxSnkNdnDI3KmsUnvrzU5i9frbwu6L3SGc9pM4f9V0WrW3F+cVYEFtg7Hq6sHHKMAzT8eBWMm2AjkJrheBNrJjYrNhTsZQ0y8ARhct5tkFQQOaex18yHoUDC7UNDDvU0FX7caL2Km5GpqjybQghFK0rQuHAQtL9hNPCrebfyhEUGTR5F+aRDTUrZNv5jPxwy1X1up7X+bPSs3DH0Dt8FVoZWdQ1Fi38xmsZBSry4cTvfbYU/rKrynxlRUW2/caTd2Eepq6ZSjon0PL5tCr8o/neUmV98ZWLMXnVZOU5FCF6j+zvaeW2SuQszdF6j+zorIcya4UIytq25M0lwvMAtDx3hmEY5uyEPacCTO+8mvKqee2m3zD4BizavAiAv8fHpLeFMlZTnkRVZD1UJ0+dRGZpJg42HHQ9n5c3xrSnwguKZw+AtKEWb4pj6ZalSh4fimHopuRXvF9hRBaDkDMvo8SSJ1ORBzreMRPeVztUGXZSNKIIq7at0t4YcKLjxdaNuLCuT32PvGTQj6C9iNT580NmXaDAnlOGYRjGCzZOBZj8cTOt0HopzhQlLVVGlOl71kFGyf3B737QqtCKG875SbXR7/WcASgbairGjo5haEIWqXK2c+ZObN67WduzF8T74xYaSh2rCQPEQifP04mpDSiq4WkipNt5Xapci2TQSarXPlXD3e27fkQjUXze+HmbhVjbYeOUYRim48FhvSnEdCitW8gnQAsDCyI0NBX3rINOqKYXzvC0HfU7SN+jhmL64fWcASB7SbZyaLFsqKluKLOJ0EOqnGWVZrXYdFD17FHDEvcf3U9+f0ShoX5jpcg2FRXZdBZOslAJZXeDGtrqtSaqIJJrALjzlTvRq0sv5GfnC2XQjqlwYyqqocEqnuBZI2ahOFYcWIg1wzAMc2bDxmkKCUKh9UKkpFENkKrtVVo5fDL3nAr8lDQ/ZdQLZ45qcazY93iZHC8Kbs85Vhsjbwh45QjKGDu6GxAm8i6p8uP0hsvkh9qhGnA76ne08qapFtQSjdVUrrZos8CO9bzcDFML6/nHamMIp4UDW9NMQzE2DzUeQsHyAmSlZ2HiJRPJ51bZNNBFdv5k10NrbXsg7wEM7jvYyEYJwzAMc/bBxmkKCVKhVUFkgADuoaEyCr3JYi0WQRnusp4Pu5FJVeQSSATuOaAaalUfVmHqmqmeckY1dkxsQOh6/lQ90W6ePYp8Uby90UgUxbHilBbUMmHAyRRdy0rPwoRBE1D2RpnwvJNWTUJ9Y32L75pe00wis2G278g+cnXj0rGlmDl8pvQaYDpkWYSOJziIonYMwzDM2QHnnAoIIudUpNDWN9antHiQm9IDwEiuqOliLaaqsbohk2sXQsvejtQcxJL8Esy/Yr7WOEWoFrQB1OTMZP6lat6lqRZL9Y310sVvAHPtcVKVC07B612bPnQ6cqO50q2NnKSyIBogb9zJvkchhJAWSvP0IuvkW4rWvSAMV5n10EThqSDgnFOGYZiOB3tOU4gofNH6t6k2JCq5bhYyoaEmwoephqmuJ9cPqvcto1sGnrn2mRbXonpZcqO5SmOTgRKWaTJHkOJF7NOtD/Yd2YdYbcxXcVbNu9RpsWRRtb0KS95YQpavINrjtKcweKr3SyYM2E6Qa5oTlU0t2fuyhzebzLcUrXv3jboPK99baXzDjroeqnqCGYZhGMaNtLYewNmGpdBmpme2+DwrPQsl+SWenhagpUIronJbJbKXZGP0i6NxY+WNGP3iaGQvyUbltkrSOE0qyX73TDUoKcVJitYVId7knfsmwlJGLUXSjYxuGdg7e2+rMcuEL8eb4ojVxrBy60rEamNaY3bDMtQAtLoX69+UHEGKnImuZ52vrqEON6+5mSyHlkLu3CCxFHK373vJWUa3DNJ9/OrdX0nL1/hLxqN2Vi2qp1VjxfgVqJ5Wjd2zdpM3IZzvTxBh8DpYmwVThkxBfna+bwEtwP35+5GKNU1FlgD1+yoaWaS11tkRrXsJJPDY5sek742CaD0MIYT+6f2bDdOg1zWGYRjm7ICN0zbg6uzxuHZHLS7fdBAF2z7EHybpKbROVJUxO6aVZC8lnqqsyRTdcUJVmkRGXQghPHPtM+jcqXOr71IVubrjdVqbBlT8NgSKRhaRziHjnfO6nhsiOdTZiHCTs72z9wqfTUa3DM+ettZ1veTLzYBTfX8oGyTRSBTxpjhJ+U+VweD1/KORKOn7Qa1puptaMnJtUTiwUGutsyOT92nHxIYdZZPL8gTrboYyDMMwjAXnnAownbMybhxQVdX688JCoKgsdf0eU50rqotM/9BJl05qDv3bUb8Dz739HPYd3dd8DKVaqkpPQFGvyftG3YdFmxe1eT4xNUdQJ0d035F9KFpf5Gnw+clPEHmXomcza8QsUkEban9anffHa6xOVOQ46CJETnmIN8VRsLxA+L2g1jRVWXKT65o9Na2KOlHHoYqJnrO6+cmi9VCnx3HQcM4pwzBMx4NzTlOIl2EKJD9PJPKQlZ+afo9LtyxFv+79fPsFmsoVNYFOpWMnQbXk8MpBzEzPxO3/fDuWbllqLJ+Yils+sYm+ooC/8ZOZnkn2RDrHF0TepagacDQSJRmnVDnUeX+8xurET45N52er5q/Hm+Ip62FrSpb85Lr8unLfTQ7VNdFrfk2Eb+vmJ6u23wpyXWMYhmHOXNhzKsDUzmtjI9Ctm/i4FW9X4qZXvJUfkVKpstPu501R9SICZlsf6FQ6diNIz6/9vt08t36kogorIPYkiuRM5C2ZNXIWqb2ImycyyIq1XjIZVKSA7vsTq41Je+tMRU/43YOMB1ZH1uJNcRTHirGwZqHwOiZkieIFBKD8TN3wm9/CgYVGqlBT3hOV9bo9VZd2gz2nDMMwHQ82TgWY+nGbMQN48knxcffcA3z7HnWFVqWNiEhJVFFagggp1Gnd4UWQSpOXousHNWyUgui5qRhOVIOpT7c+qGuoE47Rbf7bKqScakTJvg86mzQqyr9Jg8FUyKaKrLl9xw9dWQLo7bMAGNl4oxrDlDBvv/EGtV7LpFuYWtdkYOOUYRim48FhvSlixw76cctcwqisXL6VW1f6KkMqbR1E4VduoaF+BNXyRad1hxdBteTwC3fzw1QVVoqyKRu+TDUWrOq8VpEh2XDOtgopF4X+Wvl1skq87PtjRyYs1TKCV3+w2si5TYZsqsgadWPHlCzJts/S3dSizu/uWbtd5bJ/en/cMPgGLNq8qPk7Xvfmh8563d6qSzMMwzAdHzZOU0RuLrBhA+04IPX9Hu2Kl1U0R8UrEHQOkpeSW/F+hfS5ADNKk1fRIZkqm9Q8TwoyyibVcFLxAt805CYseXOJkoFJMRSDwM+ICrrPrhsmc61lz20qf92CKmsyGzsmZanqQ4+CAA5MbWjJ5NL6yeXIrJHK74nuem0qf51hGIZhLDisV0Cqc04bGoBI5Mt/q4bVyYbEWRSNKMKqbauUw3GpIYWlY0tJCi0V2XBmU6GhXp60iZdMJBXYscYCmKlqaTrfkHJOL6qnVaO+sV4rP89k3rIOMvMKmAn5tF/XVK61c6x+4zKdv05F5l02JUuV2yoxoWIC6ZqmUgFMhsSqvifUud44dSPGDBjj+jfd/PUg4bBehmGYjgd7TlNEJJJsF+NVrRdI/t1umOrsajt32g8cP4DZ62cLx+lmUMl4hqheBftYTCi0MuHMpkJD/TxpVMMUMOsN1KlsqnpOJ3ZvSTgtrFT52EInJBYwZ9xS5/WhmodQ/k65sVxrUViq9W8ZwxSgyb5KVIEJLzJ1DZmXNw/F+cXasmStsxT6p/cXegGpMmcyJNbt3ijjoM71pFWTUH5duWd187aIcmAYhmHOTNg4TSFr1/r3OV27tuVnuoaGXWGJN8Wx+PXFvsZbOBRGPNG6YbvIELYrQQeOH/AcrxcmFFqZcGYTShNl4yAtlOY6nxbRSBQVEyuQn51vzBsYRBsWmWPdjB9dA1MGUaVkVUOROgduec/tKdc6Mz0T04dOx4lTJxCrjfka60Hkr1OgGm1jBowx8t7IbL6IjHqZnOQgQ2Kp46DOdX1jfSDttxiGYRjGCRunKWbt2mSI79y5yeJHubnAY4+19JhamDQ0KB4YP0PKyxB2U4K8jFy/c5voh+enxE8fOh250VxjShNl48CaA6+cy/Lryj1D5VShKpsf1H0gNE5kzwm0rbeEEsquaijq5Ca3h1zrGcNmIKN7Bp57+7kWxqyfsW4if11lUyLVeYzUdbZoZJFSeyUvmTNd+MvamKn6sIocASO7AeEnwyqbUNTfQoZhGObsIa2tB3A2EokAy5YB69cn/+v1Y2y6EqJlvGWmZ7b4PCs9C0Uji0jncDarn1gxsZUxIGOYWtgVWgpWS5OVW1ciVhtDvCl5zfGXjEftrFpUT6vGivErUD2tGrWzajH/ivmYMmSKMS+ljELrNt9B5WFZyqal3HqxsGYhRr84GtlLslG5rVL7nL0jvbFx6kbsnrW7zQxTN1l0YingReuKmmWGAnVe/a5rFQ9yyiwVS/m3yzH13c/onoHiWHGrfruWweIlA15rBgWrgrDbe+qHZbQBaDXfQVRrps5h4cBCz7+JIikAd5nzW5Nl1ojKbZXIXpKN0S+O9kwpcBuHfa5FyK7RIsaNS9ZhePLJZLHAJ59M/nvcOCOnZxiGYTooXBBJQFsWVAiq36NXdVmZ3oiUIjmyHlSAVvwjiB6qMsSb4li6ZSkph7d6WrVW9WMVvAqUuEEtWiJT9CTVRYx0CjaptEgC3PvsymJCZilrRGZ6JhKJRCvD1H6MaB1xhu5TZL8kv0Qr/1alN6rXmP3k0MQ6q9tbVuedUamk7RxH5bZKTH9lumfvYjsmepZ6pbdYuKW5qMAFkRiGYToeHc5z+uSTTyI7Oxtdu3bFiBEjsGXLFtL3fvOb3yAUCmFcB9qW9fMgAEkFecKgCajZUyPlhXHzwIg8QyGEWhQDoeRpxRNxlI4txYrxK1A6tpQ0NpEXw8tDJvIAmcLyUIiUc/t8uc13EFheqhOnTqA4vxiZPcXeLqonkerhsXtwbqy8keyd1UG2YJOFbEsQvzkoyS+Rvr4JmaV4GacPne5pmAI0j5hdhmcOnylcK3pHeqM4Viz9nto9rdFIFLtm7moRBeHnmbd/98E/PoiLyi4iyaEJT61uCobqGqHaT9k5jvGXjEfFRFqIuG77rcZGf8MUSP69sVHrMgzDMEwHpUMZpy+99BLmzJmDBQsW4J133sE3vvENjB07Fp999pnv92pra3HfffchL6/j9VrzUojDoaTyUvZGmREDQFZBoypj/br3Iyu0okqYMqFzKuGEIqiho0GEHopwGoULYguQQAIl+SWYlzfP97vUcD23kGm7sdBWGweqfSdVlGyvOXgg7wHpsF/VEGO3MfltHORGc0nnoc6jaK3wqyDsd89uGxs5S3NQ31gvNNrc5F8mhFk3vNZ0CgYV1Y0Zt3HkZ+drr9Fe2NfjG+/y3iixM3eu9GUYhmGYM4AOZZw+/vjjmD59Om677TYMGjQIzzzzDLp164b/+q//8vxOPB7HTTfdhJKSEgwYMCCFozWHXSEuGlEEoHVepwkDQEZBk1XGTHgnZNp5mPbeyXgodHNKZQ1rL6Pwk6OfoDhWjGNfHCNdl1pYy83Do5pzp0u8KS5dIVpHyQbc50AU5eCFSh6fm3z4bRwEYTiJvMiHGg95ftftnnU2NkzlG4s2X/yQjTwxhezGjN84gsr1dW4crN28lfS9HTukLsMwDMOcIXSYar0nT57E22+/jfvvv7/5s7S0NBQUFOD111/3/N6DDz6Ivn374vbbb0dNjZlCDrqoVCi0Qm+nrpnq+neZdi9+OU3UlgAqFTV1+uHFm+LY9NEmz7/bCaKdB9VDUTq2FDOHz1T2mMrm01Ja2vz63V+Trq3j1Qmiv6oISnVeJ7pebb/3yEu+KVCNDJF8uM1tUNVvdSsIW/es089ZNqxVpv2WDEFV3jXVK5U6DtM9S13zYaM7gV3i7+bSHP4MwzDMGUaHMU4PHjyIeDyOfv36tfi8X79++PDDD12/8z//8z/45S9/if/93/8lX+fEiRM4ceJE87+PHDmiNF4vnIUgrCqFlAIQqgaAn0LrZYiKFDRVZUylH56KEeJEt52HTBgzpaiM233LtqIAaDJR11CHjG4ZONhwMLDWHEH0V/VDpQgMoNfqhrJx4JRvavEgipGhIh+AOcPJS36da4Wsp1ZnYyNV+cYUTBl2Jnul2qGOw1TPUs+Ng+/8CHjrntP/8I4yeOwxqcsxDMMwZwgdxjiV5ejRo5g6dSrKy8vRp08f8vcefvhhlJTIFzeh4FehsKoq+Xc/A1XFAPBTaCdUTEDvSO8WIXgy1TQpyhhVofVC1QhxQ8d7pxsaKVI4Vb1HVJm4achNWPLmEiNeHTdSmXMnG2JtosetjGFol+94UxyLX1+s7bXU8S4C+oaTSYPJec86GxupzDemoGvYmeyValE0MikXMuOgrtF+G26eGwedTwID1wLbxwFIwM1ALSzkfqcMwzBnKx3GOO3Tpw/C4TAOHGiZX3bgwAGcf/75rY7ftWsXamtrcd111zV/1tTUBADo1KkTtm/fjpycnFbfu//++zFnzpzmfx85cgT9+/fXHr9MhUJTfU8peYDO3DDZ8Fc/ZUy35YuMESLTzkNFodUJjaQonNFIVMl7RO7ReHEh8i7KMxau5ySo0FE3UhVibUF5j+585U706tKrVdEeU15LE2HTqoaTSYPJ7Z51NjZkjUyKHOq2QlINDVbdgPDaeAii7Y4d0fruu85OGQ+srDxtoLbEVBsZhmEYpmPSYYzTzp0747LLLsOmTZua28E0NTVh06ZNmDFjRqvjL774Ymzd2rLwwrx583D06FEsWbLE0+Ds0qULunTpYnz81MqD37ttOx54ZL+RPE+VkDeV8Fc3ZUw1BNGOzPiz0rNwx9A7XPNNnah4TVSNDKrC+fCYh0njcCp8MjIRTgsbCdez35v9XI9f+Tgmr5ocmHfWgrq58LfDfzNyPYocHmo8hILlBa6bLybCPU2FTcsaTjoGU8XECtz9h7tR11DX/LnbPetsbMiEtVLksC17KOtsQOh4bFXumbK+C9fZKeOBk50x7uOP0HAgk1x/gWEYhjmz6VDVeufMmYPy8nK8+OKL2LZtG374wx/i+PHjuO222wAAt9xyS3PBpK5du2Lw4MEt/nfuueeiZ8+eGDx4MDp37pzSsVMrD278827P6rJBtXtxolJB1I6pyq3U8c/Lm0dq56FbMVOl3QRV4bQr8H44FT5ZmfCrtitbIdhZEXnOhjm4b9R9yu04qFA3F8reKDPSvkbmPfKqLqtTCRZov61KvNaKym2VmL1hdgu57tOtDxZfubjVPetUiZWpkiySw7buoSyzAeH2vqr0SlW5Z+r6PiprlHg97tMPq144H+vXA8uWsWHKMAzDdCDPKQBMnjwZdXV1mD9/Pj799FN885vfxLp165qLJO3Zswdpae3T3s7NTRY/EhLdCcDbwyjjhdFVVFWNW1OVW6njHzNgTLMiZrJiphuyHgrqHGZ0y1D2HqUyn9A63strsmjzIlRMrECf7n2MeGfdkPGWqRbAsiPzHvl5E2W8lk6vtKXopyJs2o7JPPdDDYcwedVkAEBG94wW8qEjw37fpeYb6+b0moAqZzvqdyB7Sba2d1f1nqnr++a9mwNfjxmGYZgzj1AikdCvNHMGc+TIEfTq1QuHDx9Genq68nkaG4Fu3fyOOP0Yfto1WTACXyqcu2ftVmoNE2+KI3tJNkmJd6N6WrVS7tTKrStxY+WNwuNWjF+BKUOmeP493hRHv0X9PHsmes2Pm7EVdP6VF7HaGEa/OFp4XPW0atQ31mNixUQAcFXkRN5H1bwxN0PC65qWTHkpp34ya2rM1rgnVEwQHgeoy7F9jCrvkep1vTYLpgyegkWbFwFQkw8VZOQ3PztfKB8AEA6FW/RpdhYGk5UH6zv7juxrrkydmZ4p9d7K3mcQiOQshBCikSjqG+vJ76sfqvcsu77rrsc6mPr9ZhiGYVJHh/KcdmQikWShB/eiSKcVjYFrmw3T5KfeHkbddi9+2L0wKsqiqRDEqu1VnoYpkJwfU+1qLEznnMnmhOp4QFORT2iyn6nOXI+/ZDyKRhSh7M0y3+MA/bYhqu+RynVFXun7Rt2Hle+tNFLUivJuB5HnbjdMrXuzR4nIyLCfDMlsKMmG1JravLIjymu3/m3Ku6uaxyy7vptqTcMwDMOcHbBxmkLWrvVpJzNwbbJAhAs6yrVXyJvVQsYv3Kpqe5WS8WCicqtlOPnRO9IbhQMLXf/mZaj5KZYmiji5jYMa2hZviiMaieKRMY8oe4BkUDE0TRXmMTHXhRcXkoxTE3mYXu+RyetSNgt+895vsGvmLmzeu1lL0aduDMgWAlNZq1TDZk2+r6kOqfXCL0RZVPBNtlWW6iYiJaw+Goki3hRvzoVVrWDMMAzDnH20zwTNM5i1a4GGBuCee4DL8w4Bw5YlQ3k9DFNAX7l2K8Zy4L4DWD1ptWfxGgDKxUF0CpxYUCukyhRtciviYxWeMlXEyQ2vQkqZPTNRnF+ME6dO4ME/PoiLyi7C6BdH4+Y1N2P2+tn4yaafoL6xPjAPg4qhacIrTp3rk6dO+hZpspTkoApgObHeo41TNyIaiXoep3pdmVw+Z+EbmYJWskVwZAqBqa5VskXYTL+vFFnqHemN4lhx4AWTvIpn5UZzSd+nvteq7w+lCFV9Yz0Klhe4Fvaz09gIzJgBjB2b/G9jI2noDMMwzBkM55wKCDJnhZJjJJO/pzoGZ+GVmj01mLRqEuob612/Qx2XTq6RqbxV+1j8ciuL84tJbWh0cs7sc72jfgfK3y7H3qPexkiQuYSAWs6ZCZmlXjejW0arNiROD5X1XAEzeZjUkE3T1wXUZV4mPFonZzgVee7U9zmIHFG/Z5pAojnaxI1U5Fqn+p4Bfzl2kzsnfufxiiIy2eeUc04ZhmE6Huw5bUNMeBhNjMHywtQ31iNnaQ4Klhd4GqYA3cuh0z7DZOsMipdlyZtLSNfTCbG25rpLpy5JD4yPYWofm+UBkm33IkLFc2JCZqlz6Gyv4+ahUmnv44WfZ92JyetaqMi8rBdUtTUM4N2GyHkMtbWLG9Q50Akv93qP/J5pSX6JMP+d6vmVkTM7QUQK6MgxJZLAy4vtmd6C5OenW5kzDMMwZyGcc9rG6LYBESHrCZLxdlAURNVcIxN5q0Dy/pduWSpUyP2McTu6IdZ+hrIbltL7UM1DKH+n3Gium2w+oYWuzOqEfrrlJpoouKKSv2i60IuszKsUtDKVM+yHl3w4q/TakW2Fs6Oe1jjaKWsiL7PXM614v4J0vSBzrWXz16lyqSPHVj4pdTMzPzsfjY3ehqlFVVUyxJf7njIMw5x9cFivgFSFBQVRAZIa7kdp/+BGkG0VAP3QSUrYmZ1oJIrPGz8PNMSaGppHwVTYr2r4tVtIOKVQj27oJ2BW9ky3x9FBRuY3fbQJBcsLhOe0z1UqW6Y45ePg8YOYtGoSAL1QaMpGmtszk22bZMfEvJmSM9H7arriuAiZcPRJl07CxFs/xdrlmcLj77kHWLZMb2wc1sswDNPxYM9pG9DYCMydC+zYAeTmAo89BkQiZqsZ+u3QT6iYgKIRRSi8uBB5F+aRig/ZkfVyqKLjoVPxBM8aMQvFsWIpL6Isum1N7KhWOXWi6jmxe8Urt1UiZ2kOSSFWbc1iZ9+RfYjVxoxs5phsj6MLVeYrt1Vi+ivTSee0y5yMd1Z3w8wtamJVml6UCDXywNlmSsXLbMdEJAdVzmK1MYTTwp7z7ve+BlFxXIR0pePN5QDExukOmnOcYRiGOcNgz6kA0zuvqSgCIeMJzUrPwsRLJpJacgDBF+lxQ1ZJlvUE2z0Wbu1zTDaMN+k5tbNx6kZfhTZIVD1Sbh4eZxEkLyjFkqiYLr5lApWWR144vXkU7yyAwLxvOkYv9f0pyS/B/CvmS3/Pz/OpG8lBlbNoJNoiTJY67zKeWQDGInUoRdKse0ogAfx+KfDWDOF52XPKMAxzdsLGqQCTP25+RSAAcwaqjAEk67lSMdSCalrvhez9Ay0VyyDHKxPSKvNsVBVaXXRDFd1Cg3OW5kiH/OpsmgQR6hqUDMlsvPjNvV9oKADl8FcdKHOmupFgagNCpwK56sYUdd5lDHfT+etSlY5PdgZ+/o/mI7xoaNDPOWXjlGEYpuPBxqkAUz9ujY1At27i40z8IFMVMYsQQkgLpXkWKwGSxk/FxArPKp1eiPKfglDiZe7fpFeUipci56R/en/cMfQOUosbJ6nycKeyvYUI1TYopls6BZnzJ7vx4vf83eYEQJvk31LnTFXeTMppW+RaU+Zddt13nh/QWy+8DHfXNWxlJbB9XPPVnZjaqGXjlGEYpuPBrWRSxNy5aseptA+RrYaaQKLZMHVrDxJCCOXXlWPMgDFKFVC92lz8+LUfK7VUEEG9/9KxpeTWNiYRtaywt915IO8B3/YRXni1cDBNENVfveYno1uG7/e82nmIWneYbOkk29pFFuo8RiNRoaHh1hpGp9WMLNbaNnvdbEyomECaM9V2KpTvZXTLaM5l9ntn3NpvUdYwnTY7lHnXqSRuYr3wah2WG81tffCU8cDAta7nMZniwjAMw3Q82HMqwNTO69ixwIYN4uOuvBJYvz75/6t6YFR36ItGFmHVB6uM5FuqVgA2sYNv2hMWFLJtfgA5T6IF1Wup4sUOsvqrczz7juzDzWtuFn7PHpopkw+rE7JpjTdoryN1vjdO3YgxA8ZInz9V+bfUStp+VXcBudxPmfeIss6azLV2huV74TfvJqpgA8lNu37d+xmLZPGV2ZOdgdcWA/Vfw7hRQ7Di2Uyj7WPYc8owDNPxYONUgKkftxkzgCefFB9nFYHQaXsAqBk01dOqm6v36obZ6hT+MaHEyyiwqqHFqcylDUqh9Tu/qY0Q1ZBwJ7KGsIqxmIpiPW6GOvW6QW+8pKLVjEolbbeiTiobCTJGMeC9zprOtY43xaXbArkhyv2UxUQ4eltuFrJxyjAM0/Fg41RAW+Scdu6irvjYlZ4d9TtQ/nY59h6V907oopP/ZKHba5GiwKoaZanuJQgEr9AGvRGiOz+ySm4qe3oC8v0e7e/pc28/h31H9zUf4zdXulVj/QjakFCNqHDbYJEx6N1yRPcd2Yei9UU42HDQ9Zp+92patkzOu1TupwCTvZSDklk/2DhlGIbpeHCf0xQRiSRzaUTVeiMRIFar1nfRTSnJ7JmJkvwSfP6Pz1H2Rlmrc5ns4WlHJ//JQrcnqKh/p2pPQNO9BKlKtrNvZLwprt17Ubf/I+Ddm9OJbq9Fv/6obnIcRD6sH9L9HhXnSrX/L0XOZOdYFtmeyhZuc+vWR9WJ3yZSZnqmp2EK+Pe3NS1bJufda90DgPJ3yqXCfk32UlbtWc0wDMOcXbDnVEBb9DlVyfuieL+A1r0LdavVeim8JvKfgsyVVA3LM51XqOuB1fVImK5iGquNYdKqSZ7hxqZCtilynGrPqXS/RwGyoaF+ci8rZ7r5t16oVBJXlRfRmjhr5CzXDTsnbl7boGQrqHm3n181f91EK6VUtxVjzynDMEzHgz2nKWbt2mSI79y5wI4dQG4u8NhjLdvHUD0w1nFU79fuWbt9PYmyiBReL0+ACIrHjzoGL2SqktoVMtXveY1d1wOr45GIN8Wx6aNNvue3oHiAwmlhhNPCvnmwfvNDVVxFHnELq0Krn7GYmZ6JeFMcK7eu1H4fRN4v698yXis/WaJ4DgE1OaPOsSwyERU6nlrKmvjrd39NOpfbmCmyRV3D7JiYd7/3iBrl4AbVCyxak01sBDEMwzBnLmyctgGRSLLokReyio+swWRCOaAqvG6KUP/0/rhh8A1YtHlR8/js9wbQFFId4041LM9UOJ+JcFoLFYWWWhjGgmpUyMyPMz9aJu+SYphRjMXGLxpb5O3q5sX6bRao9qzVCTvWkTOq8SuDaG2zoxryGW+KY+mWpcI1sa6hDhndMnCw4aC0gRlk+LPOvFM265zrxYHjBzB7/WzhuSlrgOyaLNqoZRiGYc4+uM9pO0S272LVhz6JrDZM5daJFF7gy355Xr3vHv3Oo569PikeQ5kxuCHrndb9nhPqhsLSLUtJPW7delZ64dWL0w2vvpFeyOZdWv0hF8QWtDBMATO9Qb16pkYjUQDAocZDgVyT3O+RgE7+dir7llKg9PosGlmE6mnV2DlzJ6KRqFSPZ6unLcXYAoCbhtzkOhaKgenXrzioAj9+yPTYta8XM4fPVOod60RmTY43xfGvV9ahW7cEnnwy2WbtySeTRQPHjZO8cYZhGOaMgnNOBbRlzgq12uyEigmk85nKrTOdp6gSwqbb71G1Oqapqpoq1YxNtnWgGqaAXCXNVOddUrHLWd/ufTFt7bRWxrD9mpk9M/HCuBfw2fHPUtPv0WMcuveuWkG4LVoj2dc2lXB91RY19Y312v1tU5lH6TUGnVx4E9V0qfJdkl+Ch2cMxz/eG9t8FSf2Ggw6cM4pwzBMx4PDetsxonBNa6eagoz3S4TJSpVuIWwUZY86hkmrJqH8uvJWipVqWJ6pcD4Vb5hutVtArlqqSlhlkHmXOj147XIWq415GqbWNfce3Ws03BeQC2k1VUVbp4JwkK2R/NY2lXB9P6+dG/aQ3XBaWCvPUzf82YRxq5sLb6KaLnVNXrDhIeC9f5z+l7u3tqoqGfLLIb4MwzBnH2yctnP8FB8ZQ4Oi5FKVJFOhrW5QPSbUc9c31htvyWFCkZMxVCxMtHWgKpDz8uahOL9Y6RpB5F1WfViFqWumGjGeVMLbTWwM+BnuTky12KDkr0cjURTHiqVzt3WNKq+NKZUcWZm10M3wDyK/loKpfskmNgx1izGR1/vXFsPLKLUzd65/bQaGYRjmzITDegW057Agashe0cgilI4t9T1GRkky2TDeOQZROxxrLDKtaky25PD73qisUdi8d7NUUaJUtHWwk8r2Km7zWvF+hXQ4sxcqYceAfHit/XomQoy93rXpQ6cjN5prPDTUL2QzgQR6R3q3yr21H+N2z6aMKieq8ikTJm+yNYsK1ntRtb3Kt/e0jFxT521e3jyMGTAmkNBj8pq8/FVg11XC8115JbB+vd6Y2vPvN8MwDOMOF0TqwFB3qgsHFvr+XaaQBiBfsEmE1dJk+ivTyQWO7GMQISr6IlNMyOt79Y31yFma01zgZ/SLo5G9JNu3sI5XQRUKVrXbWG1MqmCM5UnTLX5CwW1eVbzp4ZD786AUvnJDNAdeyBYP8no+bgWTamfVYv4V86VlkIJf4Z6S/BJPwxRwv2fZ9UIGVQ8gVa5Kx5Zi96zdbWaYWgWbRr842rPHqopcU2V6Yc1C0tqkAuV3AQAQ3Uk6X65a/TCGYRimg8PGaTtDxuAwYWioVr01VanSUtYKlheQ+2M6x2BVXhVhqlqxEx1l3WmoiDzcFs5qt1SFU2ZjQcX4FSFjGFrHxBPe11WpNkupGOsHRY7sRojb81HdEKHifHaFAwu1Kghb96xbJVuEasoAdS2cOXxmYLItQqZKtqxcy8q0iY0EN0QbIQCA7/wIQOL0/9xIfv7YY0aHxjAMw3QQOOe0HSEbKmeiOI9OIQ3dHCWV6ppOw2D8JePRq0uvFsVrvNBpyeGFiX6l9ny3eFMci19fHGiO4IlTJ1CcX4zyt8ux96h7zmxQYZuyeZcTBk3w9DDZkd148MqLpSCSI53+uyaQeXayxiB1vYjVxhBOC0uvC7I9ni1k1sKgZNsP2YJNFjJyLSPTJvLX/cbh9rsAAOXvlCef7cC1wPZxSBqidmM6OT+FhVwMiWEY5myFc04FBJ2zYiL/iNJyxguZVhNThkwRHkdFpqWJHbdcyKByYCm5qEHkcKYqRzCzZybuvOzOVnmOMrm/qlDzLmv21ASaI+tsMXPr2lux76i6HOm29NBF9tnJvjvU9cJqF2QhY/zptDWhtKgJWrbdUM1zVmnHZeW+b/poExbWLDRyDVO0eLYrV582UFsap4WFISNtZADOOWUYhumIsHEqIMgfNzdFyg2qUhxkv1DTCozpno8m+vQ5z0fxrgRl3Hsp2dRqt/bnpVJoypRx5SeXFJkNauPBC105knmfdFrjuKH67GTuWaeYlPNcfuhsuHnJVVtuHMj2NaaOxW+dOnHqhPG1yUTbmxZjPtkZeG0xuhwejPzLsrDml18z6jFl45RhGKbjwWG9bYRMSKuoRx2g3gpBNYzOC6ryIhOuRglRNtHexUImLDOotjpeoXEV71eQvk/NEXSG9un2S7QjMvApMqsauq6qROvIkVXYi4LJ1jgWqs9O5p5VWiBZ15YJI9VJGfCSK5OyLUO8KY4Dxw+Qj6emZIjWqeL8YtL1qGuTqXDoVs92utnq1AzDMEzHho3TNiAV+UdUTOStWgSR62adg2Jg6ubAAvLGnCnj3suYcirJQeUIWgq5iX6JgNm8S1mDUVeJVpEjahSERdmbZa0+081J1Xl21HuWyRl2Imv8me49akq2ZZCVC4C+ESJap8rfLkdWzyxhmDpl49F0HnVb9ZVlGIZh2j9snLYBMg3j7QRR0Acw43WUVV4oHphoJIqKiRVK7V1EeBmDssacCeNexpiSNYZlFXITnmATRaKcUI0nFSWaujHghWxhr3Ao7FqBWLdIjcyz07lnr/XCmWfqRVBVs0UEFeXgBVUurHWjaEQRCi8uJG2oUdapvUf3oiS/BMWxYq2NR5332UQYMMMwDHN2wcZpGyCrnMmG1qqg43VUUV4oRl35deUYM2CM8NqyYxblaVGwP0Md417WmJI1hmUVchOeYN3wSa9nKjKeVORQ18sqEwVhPS9qaxxZz5L17EQbX7/76++0Q4rd1ot4U7zNqmZTMJ3C4IeMXKikHlB/Q3Kjudobj6rvc1tURWYYhmE6PmyctgEyypnsDrfOLrVqqFUqct3cUFF+gsrTUjHuVT0SJnMEnQq5CU+wTvikjkIrK4cmQhVloiCCbI0DJJ9d6ZWl+P6q7/set/j1xa0+UwnPdK4X8aZ4IMafKe+byRQGEVS5SO+SjsVXLpY21mQ2nfKz87XSHVTe57Zup8QwDMN0XNg4bQNkiooEaaiZIhW5bk5UwzeDzNOSNe51PIyFAwvRq0svxGpjAID87HzX8GcVhVx300A1fFJXoZWRQ1Ohx9Rrzsubh+L8YtTsqSEZp6rexT7d+yh9z0TfyyCMPxOebfvaUjiw0FjhND+ocnHkxBFMXjUZ4bSw1LVVNp3cWnBR1l3Z9zmIsH6GYRjm7IGN0zZApMTJ5h+19S51qnLdLFSVn1TmaVFQNerdFPYX/vIClly1xNXQVzE2dcK8VcInTSi0MnJoqnIr9ZpjBowxWkDLC518ThPVatuqarbX970M29pZtYHmQspuLsgaa7obAUHmubdVVWSGYRjmzICN0zbClBLXHnapqcrLweMHW/UZVPHuqio/qczToqDiYfRT2CdUTEDvSG8cajzUYszW/MoamzrFpWQVZxMKrYwSLduSx4u8C/NazbnXNQFz3kWvTR4T+Zy6BYvaomq2E5MbdiphxTLRMarGmupvSNB57m1RFZlhGIY5c2DjtA2hhmb60R52qSnKyw2Db8CkVZOMKIuqyk8q87QoCq2sR0KksANoZSQ559e0DPh5YGQUZxMKrYwSbapya9X2Kk/DFEg+F9Nh035zXjiwUKkPqR2/CAcquq1CdNY1kxt2qmHFdlmkomKsyW4EpCLPPdVVkRmGYZgzCzZO2wi/0MwgqjZaeXZBhbL5KS+Lr1yMORvmGPPuqio/1PY18aY44k1xZQWbqtDKeiRUWhAF6T2neGCo4ZOmFFqqEm0ivNZS9P3oHemNwoGFruMMKtdatQ+pdc91x+uMRDjooLNZYWrDTtf7asniD373A9Q11AnvRdVYk1mnUpHnnsqqyAzDMMyZR1pbD+Bs5NdvVWLC9SHsfeR3wMpK4B9dAXyp9FRuqySfi6rQ7Kjfgewl2Rj94mjcWHkjRr84GtlLsqWuJWL8JeNRO6sW1dOqsWL8ClRPq8buWbuR0T2DrBBRsJQfy4BzEkII/dP7t1J+LGPQOsaN+sZ6FCwvUJ4bS6F13q/Xs7UU2Mz0zBafZ6VntVJ+VcPgZOeXAsWLW7SuCEBSiZ0yZEqzIhtviiNWG8PKrSsRq40h3hRXfqZueMmh28aAdW7ntQBxeC1ls+BQ4yHPebeMCvvcAHCdH+tzypxbRX+cMtU/vT/mjpqL0On/c7vnGwbfgMmrJpPlNyh0NitMeOGpc209Gy/GXzIee2fvRZ9u3oWqZGRbF9nNTEsOH/zjg7io7CIULC/AwpqFWFizELdW3Yqq7VWtvmvi3WIYhmHOXthzmmKGDYvjz3++HrB+tD/7BvBIA3DBFiTuHCnt5aLsUkcjURTHilOSf+W2i286B0knb8/Ls+ZEdW5UQ+YoXjTdMDiTOV5B9D40WenVLodeMqsbXhtEbp3f/EQjUfKc+8nUyKyRrtd4/MrHMXvD7DbNX7fQ8b6Z8MJT5TtWG0M4Lez73nbu1BnPXvtsc4hvkEXWRMhuZqqukSYLYzEMwzBnF6FEIqGWmHSWcOTIEfTq1QuHDx9Genq61rmGDwfeesuabvuO8unPLtgC3DkSAFA9rZpc+t/y1iXP1LryL6Vgy+5ZuwPLv4rVxjD6xdG+5wbc71l2PP3T+5OUH8srMGnVJNQ31rseIzM3QHD3aRFviiN7SbZyPqHMdUWbECu3rsSNlTcKz7Ni/ApMGTIFgHeYpKWgr5q0CgCUn6kbFJlVDXc3/bxF8zNr5CxSGxr7nHvhvOdRWaPw1J+fwuz1s4XnV5VfWfzWNQCeG0ei94TyXlPlOxqJtlg//NZDnfXKFJS5se6JssaI5jLIVBIKJn+/GYZhmNTAntMUcewY8NZb1r+coYshAAngk+HJEN+u/yC1D7ErQl671HcMvQMLYgs8x5WK/CsZL4iMMqNTFTScFkY4LexpmALyxaSCrlLp5zH2QzbHi2LQBdX7cPes3dqVXu33QZFZ1dxiU7l11kbJ9Fem+87Pr9/9NWlclGdjv+fKbZXIWZpDzmc26YH3e99VvW8mKiJT5du5foi8iSqybdLAo7QxA0BeW0RrpG5hLIZhGObsg43TFDF1qvX/uefUNX++ZgUwZTy5fYhdEXJTfEy0zNCtfklVFqu2V0l7ZnWUH9PFpFJRpdJLYbe847ohsVRZC7r3oa5Cm4oWSyaMILeNADcSSKCuoQ4Z3TJwsOGgsUIzXs/bD1NVVimbIKoGndd7kpmeielDp+PEqROI1cY8zyXTCsaOSLZk1yvVaBU/dDYzveC2MAzDMIwpuCBSiti1i3jg5wNaFMeQKczhVmAl1flXboVcAHHhHwBShYRMYLqYlMmiPn64Ffw5cN8BrJ60mlRYyQtZWZMpelL1YevCKW6YUnJljGEdZApaOfEqnuXHTUNuAiBfaMatyJLf83bDZOEemcJhXoWjRDjfk5L8EiQSCSyILRAWhaMUT/PClGzJFleTwatoWG40V+l83BaGYRiGMQXnnAowlbNy/fXA2rWEAweuweo1iWalVjevrb3lX7l5IAH4Ft+Qzf2kopN/5ZX3Rs2TCyoXS+e8KrJGyaOr3FaJCRUTSGMwlc+okhOrg+y8W7In2xqoelo16hvrpXIXvbxv04dOJ3vJRHmeMojuPYj3nZLvTJ075zrnxYrxKzDp0klK7yNljjJ7ZuKFcS/gs+OfGVtDqGuAfRxBrM2m4JxThmGYjgeH9aaI5cuBnj39jkgqTb9anmihJOnmMba3/Cu3sLZYbYzs5cq7MK9d5F95he9R8uSCCNWz35PXJoVo3lRkTRR2SekFamGynYZKTqyOXMmGa8r2rLWH7IbTwuRQV78wbZnwTZNVVk31IaWiE+LtJt/xpjgKlhcIr+tW8Zb6nlPmaO/RvS3GYWINkQln5rYwDMMwTBCwcZoievQAhg2zF0Wyk1QCLr+8CTcNa6lYmAjL1S3rH1T+lQXVKKr6sApT10xtN/lXXkq0n8GmU1hKFaoxrCprfoaZjBFGUXKpRqRMTqxofoLwcsuEL7sZARRjmBKmTaF0bClmDp9pzAAJunCYE11j2DnX8aZ4IO277HL2Qd0HcjcpODcVmaJr3BaGYRiGCQIO6xVgOiwo2U6m9efDhgFbtrT+3ERYrv1czvYRm/duJindXuGqVPzCNWVDyeyYCjd0M0Aq3q8wGhqaynBG636qPqxC2ZtlrtcCWs6bSVmzoIbXFo0sQunYUt9jZD3OlBBrAL7hnveNug8r31tp3MstI/Oq7UZ03isguJBN6rjm5c3DmAFjtDcDggjxNt2+i1oYS4SpZ+YXCp4bzW2TtjAqcFgvwzBMx4MLIqWYLVuAo0eBceOAIUOS/z161N0wBfwLc1j/vmPoHah4v6JVESK3c1mFReob65GzNEdY5MfCq/BLNBIl3befF0RUSAgAwiF3JchSDGe9OgubPtrkWoyJQlDFpOykqkhP5bbK5gJOboapdS3gywJHAE3WZEP4qHNTOLDQ9+8qxWFExYoKBxb6ehYTSOCxzY8FUpCGIvO9I72xcepG7J61W8kQVvHOOv8dRMgm5d4BYGHNQuG6RCGIKtp+slWSX+JpmAKt33OVwljUc1NwK5blVjCpdlYt5l8xX7owlR+NjcCMGcDYscn/NjZqn5JhGIbp4LDnVEB72Xl128nuHekNAC0UIYpXR7U4CNDaw0jNvxIVuhF5ImQx4d0y7UlMRZEeldYgzmcjKnIkE+ZqYg51Pc5e49X12Ot6qKjFs/zuwQ/q/ZXkl6D8nXJygSUTyERi6EZHBBERYD+3TsTFpEsnKRXGopybsoYEmf8uorAwjt/+Ng3O9mqFhcTigQTay+83wzAMQ4eNUwFt+ePmF4a7o36Haz6kSJEzHVpqUvHzMoomDJqAsjfKhGNxXhfQD/eVMSBE6FZeFqFaAdZNkfUyhlSUWd05DGreqJsFJq/phFrtWMWAkHk3AQRSPdoPmVBW3c0AkTFcNDKZF5/KirfV06oBQCv02u/cIrnU2aTUZfiYfXjr/17QfMUvSQAIGTNQ2ThlGIbpeLBxKqCtftz8FNLCgYXKBmYQir5JA87NKKrZU6OkwAWZf+U0ICierSA9OIB6jqHoWTfnr26vct0koDxnyhx6EZTHWTcnU+WabvjJjq4BYfLdNIHXhtumjzZhYc1C4fd1NgPcZDAcCiOe+DIFINURF1Qv67y8eRiUMQh9u/fFrWtvxb6jemtIW7TzsVj5TiVuvOz65iu1JmmgNjQAkYjetdg4ZRiG6Xhwtd52iKiia3F+sXL1ySAqZepWA7bjVoVUp1qwTEsKLyNB1C6F6tky0dbHD9nqpvaKtV5QvFuUqsyiOfQjiJxBQF2udK7phl/7H9UWKBYm303n2GSfpd97MihjEOm6OhV87TJoFQqzG6ZA8BVvne85VX7GDBjTLCNLrtZfQ6j577HaGMJpYWMe9XhTHHfcexjuRqlF8m9z5wLLlilfimEYhumgsHHazqAopEveXEI6l5siF5Sir2N8AP7Krkx7AzcoCq3IwPQyIGRbwwRlLAByz4yiyMrkr1I2AmR7gVrItIWhYJe16UOnY0FsgbRc6VyT+m6Y6geq+2460Qnt9ttwo6C7GRBOCyPvwjxMXTPV9e8io5/6HKnvuYps664h8aY4Nn20yfcYi0mrJrXoX63rWa7ZU4OGT2nPcMcOpUswDMMwHRw2TtuAxsbkrvCOHUBuLvDYY8nwpXhTHEu3LBUqpHZlwQ83Rc60om9H1figKLteChkFkUKr2ntU1bNl2liwkPEEihRZv3vzw1RvSjsmPc7UwmL90/vjhsE3YNHmRQDcQ2J1rklR8k1GOai+m05U3pWTp07irt/d5fuelL9djqyeWcJwVZV1yYmq0S/7HCnvuapsq64hsi1rnL81up7l/Uf3A9FDwC7xsbm50qdnGIZhzgA6XCuZJ598EtnZ2ejatStGjBiBLV49WACUl5cjLy8P5513Hs477zwUFBT4Hp8Kxo0DunUDnnwS2LAh+d9u3ZIFIrKXZGP2+tmk80QjUc9WDCGE0D+9v6siF0S7EB1kWoQ42xtsnLoRWT29W1L4zYOFyMAEWrZbsaPTGsatdY0ufs/WomhkEaqnVWPnzJ2IRqKerXdE9+aFiTBXN0RtYSiKspes1TfWo76xHiX5Jc1tM3bP2o1Hv/NoYNektKMJKspBFZV3pXJbJbJKs3Cw4aDneRNIYO/RvZh+2XQAwa9LMka/1WZl9vrZmFAxQfo5Ut5zVdmWXUNMtKyxP+eTp062akEj4is9vwJ850dI5pV6bXwl//bYY8rDZBiGYTowHaog0ksvvYRbbrkFzzzzDEaMGIGysjK8/PLL2L59O/r27dvq+Jtuugn/8i//glGjRqFr1674xS9+gTVr1uD9999HZmamyxVaY7KgwrhxQFWV219OP4KBa4EptN3okvwSFMeKT39bvtCJToEaU5goypHKSrBWcSbLU7HvyD7cvOZm4XetlhGpqoYqerYUD5BsJdsgC6jYUQmPtb6nKmtBXRMAMrplYO/svejcqbPn94MqoCWLbDE12bZGK8avQJdOXQJfl3Ra7Lhh6jmoyhn13KZb1mR0y0BdQ13zv2UqSO999glg+7jTnzqr9QLf+14Tqqr0750LIjEMw3Q8OpRxOmLECAwbNgzLTldJaGpqQv/+/TFz5kz85Cc/EX4/Ho/jvPPOw7Jly3DLLbeQrmnqx62xMekh9eb0Y/hpV6DzSc+j7IpQ1fYqLUUuSGWIgqnKwamoBFs0ogirtq1qcY0+3fr4eoQs3JTcoHsJ+rWCoVR/lalk21aVXy0ocmy6SrXJa/bp1gfPXvus69y1p2q7MlWTVfp3WnMf9LpEMfqjkSjqG+ulwtrdNrBSvaZ6IfM+W/cui2wF6cTK1acN1JbG6bBvf4Itm2ibxyLYOGUYhul4dJic05MnT+Ltt9/G/fff3/xZWloaCgoK8Prrr5PO0dDQgC+++ALRaDSoYXoyd67oiNM/0K8tBq6Z6XFEy/A23dxF3Tw0XSXSVE5dKirBlr1Z1uozkWFqKbnFsWLpfFZd3J6tTI6syfzVIKHmAZrM3zR9zYMNBz1lIcgCWoDcOywTZiwTFu7MJzWVH+uFKM/T+rdsvnXVh1WYumZqSjehqFBlcV7ePORn56NgeYH0NaQrSKffi70Hb0j+5tV/Dd3P/xTlT6RjytC2nSuGYRimbekwxunBgwcRj8fRr1+/Fp/369cPH374Iekc//7v/44LLrgABQXeP7wnTpzAiRMnmv995MgRtQE7IFcerP+a55/cFNKgFTk7dkV2R/0OPPf2c9h3dF+L8ckoYlRl98DxA1i5daWv8hxUJVigdS9EN2SVXFNVQWWQLQQjUuCLRhSh8OLCNvMOyRTnMZW/GcQ1LbxkIagCWrIFfmSKqVW8XyE1llTmuQP+Rv8dQ+/AgtgC6XO6bWAFvQlFRaZljU6LJb8K0s41bdfMXdi8dzP237AfX+nZFXkXTm0XXmaGYRimbelwBZFUeeSRR/Cb3/wGa9asQdeuXT2Pe/jhh9GrV6/m//Xv39/I9cmVB6M7XT8uHVuK3bN2t5mCU7mtEtlLsjH6xdG4sfJGLIgtaGGYArQiL3YsJcireA+QNAxnr5+NGytvxOgXRyN7STb5/BQoBaJEhimQDM20k5WehZL8khYVYJ14FUxyzrWp+5b1HvoValk9aTVKryo1VsxJBqsVxvRXppOL84hkTVQ8K4hrOr/vVTwLMF9AS6VQk0wxNaoxlNEto80MN2eBNasQVm5UvkxsOOT+PERF1VSwijTJFCKSkX9KYTURzrXGbU3LWZqD+sZ6o0XhGIZhmI5PhzFO+/Tpg3A4jAMHDrT4/MCBAzj//PN9v7to0SI88sgj2LBhA77+9a/7Hnv//ffj8OHDzf/7+OOPtccOgFB58HT1wu/8qMWnltIwc/hMhNPCSoqJLtQqj7KKGEUJchqGsgYwBT8jrGhkEekcpWNLlZVcuyKnU91VhIr30EuBb+tNkoLlBb55cU5jT6dKtYlrUgmiFY8TnQrV1MqyFMPcKgalI0u666Gb0a/SL9hvA0u08SCD6saVrPx7PeeMbhmkcdrnMMg1jWEYhjnz6DDGaefOnXHZZZdh06Yvm4c3NTVh06ZN+Na3vuX5vUcffRQ/+9nPsG7dOlx++eXC63Tp0gXp6ekt/meCSAQoLPT6q61ar60YklNpCMqj5odsv0tZRcxLCUqlJ8Iah5sRVjjQ86G1IDM9U1nJtY6jGA2zXp2FTR9tUlLGVb2HQbS9sUM1MFRaYdiNPZWWHaauqaLUB4VOCySAtmEhMoZCCOGZa59B506dlQ3MoNZDGY+3zAaW7saDrpEnK/9uz3nv7L1Sa4jORgjDMAxzdtJhck4BYM6cOZg2bRouv/xyDB8+HGVlZTh+/Dhuu+02AMAtt9yCzMxMPPzwwwCAX/ziF5g/fz5WrFiB7OxsfPrppwCAHj16oEePHikf/9q1Xu1kQhj27X3Yf+292GtLcbXnmMrkuplEtd+lSBHzzD86uh8Hjh/w7ffql9ekg1veqkyenRPZ71KMhr1H97YoViKT5ysqBAOkPvePmvcou0li4TT2ZPI3TV7z2txrkVma6VlEy0+OTGOiOBQlx5tSzEk27xVIPpeHah5yzQs1sR5SCibZ861r9tSg7I0y4Xl1Nh5kipmJ8tfta60of9ntOcusIbJ57gzDMAzToYzTyZMno66uDvPnz8enn36Kb37zm1i3bl1zkaQ9e/YgLe1LZ/DTTz+NkydPYuLEiS3Os2DBAhQXF6dy6M2sXZtsKzN3brJIUm5uMuQ3EslEvKnWVWnWUUx0Ud3t91PE/BTSKUOmYOXWlcbHplpgSMegk/2uylzLKuM61V9NF2mS2XCR3STxM/aoxbNMXrNzp8549tpnfdvCqG4MyD4XU8WhKPhtBqhsuFVuq8SsV2dh71H352JqPZR5T3Q2sKioGnmitVYFmbkRrmknOwOvLca96wfi34Zav4VKw2IYhmHOEDpUn9O2oD30STPdo5FKvCmOpVuW+noxnYga0lP6bEYjUaP3q+KdoZyD2kuV+l2ZXoR2RHPuhqxBY2IOndf364NpvycAKI4VY2HNQtK5TfUApfb1lLmmjhxRz0fxPIr6fAYtTzLP3zqP19rhhYmetdT7CrofrUyPWcvopPY0VkW71+/KSpc+p8n0l7VrlYfVgvbw+80wDMPIwcapgPbw46aimOjipvSKECk9VIV058ydyFmaY0R5Nqmg2ZWxvt37AgA+O/4ZWRkXKXIio0GE6c0JiyCUXKohXpJfgvJ3yqXkUMfYsyOzWSBzTVMeaJ3nYtKYUjGQZTfcRGuHG5T10OSmi8rGA1UWgpgvqyBV506dCXenhuea1myYAk7jFDBnoLaH32+GYRhGDjZOBbSHH7dUe05lPRQWIkWMeh/z8ubhnPA5KI4VA1BXnlW8MxRMexGd53YzGijMGDYDEwZNMNp3NKg5lPFKUolGoqiYWGGsaBNls4ByzSB61pp4Lia8uKoGsuyGm0pUgWg9DGLTReZZy6wjst5u6nxldMvAM9c+E2j17VZr2snOwM//0TxyLxoa9EN828PvN8MwDCNHh6nWezaj26NRBpkiMFYvT2qLEWpO5cKahVgQW4BoJIpoJNrqmlSlUbcqqRtBt0XwqqhJYdlby4xXcA5iDgGzVWmt6q/l15VjzIAxxgxzSsVZ0TWDqihr4rnotgnSqcQqm/cqk49NWQ+DqiJLrWwtu47ItoKhzlddQ13g7VxarWmvLUbSKPWvhjx3bmBDYhiGYdoxbJy2AY2NwIwZwNixyf82NrY+xt5eoWZPDUqvLAUg36NRFmoRmNKxpaidVYv5V8wntxiRNUjqG+txqPGQlAFsx0RVUjupaovgNBo2Tt2IrJ601haA2f6BVR+2Ki3timwxJ5l2HSJkNixkUWk/YxHkRgZ1vjd9tMlXHlXaBFlrU3GsWNlAlt1wk107ROthUJsuFFTXERlZlJ2voNu52Ne0IZ0mkL6zY0dgw2EYhmHaMR2qWu+ZgLOVzIYNwJNPtsyx8Qr3um/UfVj53krpKqsUrHC01R+sJh3fr3s/aWNYVNXSiVV58z/f+U/psFHAfFVSqkK7dMtSzBw+U2uzwFlRdsnV7lV/vcZhomJp5bZKlL1ZRjpWVhmmtOugMC9vHorziwNtfyPTfsYi6Arb1PleWLMQL/zlBSMh54BaLrqbIS1byZq6dmT1zMKSq8X3GtSmCwWd9ipUWZRZa/2uZzIk3VrT/m0osPV18fG5uUqXYRiGYTo47DlNIe49TpNUVSX/7udtWbR5ER6/8nHlMDwv7KGHy95aRvqOSlimX2iaFzoeDFPh0JaniGq4z14/22hoLSAf7qvr+bGMKwoyc7hy60rEamOIN8V9PUEl+SWka5sM4/VD1sMoY4C4zY0IGc+zKU+619okwmutkPEEUtaOkvwS1BbVknLRf73111pj10E3ooMii/b5Uh2XX0i6isxaPPaY2eMYhmGYMwv2nKaIxkZvw9SiqiqBLd+a6+tt+dGGHyl5Eb2QLX7k17OPssvu1SNPhIoHQ6dHqYWKpwiQ7z9Kwe41Wf3BatJGgqrnR6bHp8oc2gu/uHmCAKD8nfJAe0cGCXXeqz6swtQ1U6WLa/nJthMTnlqZXHQLyjOS8Up7rR2yRZxq9tSgrqFOeFxGtwwt+fJaD1PVZ9aar7t+dxcONhyUup5fD9oJFRPQO9IbhxoPNX8uUxAuEklGCvn9HhYWcr9ThmGYsxWu1ivAVLW/GTOS4btChi0Drpnpe4ipqryy7Rn8qljKVrC1FLdNH20i9a/UuWfVqqSqVYstVCvZUpCp4Jx3YZ50aB61mmrRyCKUji31/HtbtTsJokKuDKo9awG5arGymyelY0uVQs5l78dUL003dJ+tKdn2w289LBxYaLzPrB8nT51EVmmWp0HuvJ5K2x6V5+0VScR9ThmGYc5uOKw3RZCLO9R/TXiIqTwoGe8Y4F0ERqXwixWaVpxfTA69VQ0lU6lKquIpchJkURVqyHLd8TqlarFUr03hwELPv+kWkFItRhRUhVwZKGG34ZC74SFTXMuS7Xl580jjUg05l11z+nTrg1kjZyEaiRovtKNSxMmOCdn2Q7QeVm2vkqq86wdlTezcqTOeufaZ5grTouvJ/i4AagXh1q5Ntou55x7gyiuT/21oMGeYMgzDMB0TNk5TBLm4Q3Sn8BBTeVBUhXPGsBmeBp2uASLKJUsggQmDJuChmodwUdlFygaH6ZxBGYIoqkJpLXHD4BswedVkpWqxJvJ1g2h3snHqRjxf+DxOnDrhqoybrpCruiFCeT7xhPe5ZDY2wmlhjBkwhjQuQG0uqGvOxEsmIqNbBuoa6lD2RlmbbAyICLI1F3U9LBxYqFwF2kJmE0Zmo0d1vVLZjItEgGXLgPXrk//lUF6GYRiGjdMUQSvukMBXJpSSlSadohQAXeGcMGiCp0FnygBxU5wsz1LZG2VYEFuAfUf3tfi7yZYpTkwalEEUVQH8Fc6KiRVY+d7KQDYNqN4dU618rI2FLp264NaqW1GwvMBVGTfd6kfXA+v3fIpGFpHOQZ1DmQJJKnNBMeh6R3pj9bbVrcJHg3xPVTAh217IrIeyGy92VDZhqBEkuutVEJtxDMMwzNkDG6cpwioC4UdhYQjLvpe0YkVKk4nQRRMeBFMGiF1xKhpRBMDfswSY7S3qhKqg9enWJxAPDBU3hXPnzJ3Ye3RvYJsGVO+OycIvFGXcZO9KUx5YL4OAGjJKnUPZStiyXi6RQWe9i6Y2BlQ33qjf05VtL2TXQ+rGix2dTRhKBIluD2KqzOpurjIMwzBnJmycppC1a70NVKsIBEVpMqU4m/AgmDRAwmlh5F2Yh1XbVpHOCei35PCCarg/9d2nmv/t/Dug7oGxI7ovu8JZ31iPnKU5mL1+NuncMpsGsu2LTLbyoSjj+47sa/V3N0T3bNoD62YQyMyNrsHlh9tceF1P1PrHXr3ViezGgMrGm+z3dGTbC5X1UHY9N7kJ44ZKyy/rWOpmXHvIC2cYhmHaJ9xKJsWsXZtsKzN3brJIUm5uMuTXnmvj115BpDjLtozwas+QlZ5Fas8gavYu2/JDNddTtSWHF9Q2NOMvGY9wWlh5/kTIVEFWqS5M3TRQqZRsopVPvCmOpVuWkpRxSnsQQHzPMsq/agVp6txUba+SqoJtrR1LtywlbVA450Ikb15rU8X7FaT7Fm0M+LUw8WvNpPo9Vdn2QnY9PHnqJO763V1S67mpaBU/vH4XrBYyqu8zoP6sGIZhmLMDbiUjoL2VopdpISKjdDnbM4zKGoXNezeT2jXotPxwQm3zQMFEOwtqG5ogWpfItGFRaQsUVJsbJzqtfGTapPzq+l/hJ5t+ot2i49fv/ho3r7lZeL0V41dgypAppLF54Tc3AJTb8FjyIDMXOm1/TKxLIhkOIYTMnpl4YdwL+Oz4Zy164oq+lypZB+jrYeW2Svzgdz8gbarY5y2o3wA33NY1tw0Taq/ZVs/4H12BNSuAzwcA530EXH8T+vftY+xZtbffb4ZhGEYMG6cC2tuPG9V4WzF+BSZdOknJYJLtWer1HarCYkelP2Q4FPbMTzWhmLZFz0yKsdk70hsvTXwJ+dn5qNlTQ563IHtQeiE7hype4Opp1ahvrNfaKFE1GHRwmxtA3+CS2TSiGIZ+11Mxhp2ovPtZ6VmYPnQ6FsQWCI819bwoiNZDWfm2b4TIzDWAQNYu1TWxxTN+7g3gk+FAi9DhBHDBFlT/qdHIs2pvv98MwzCMGA7r7WBQc5p21O9opWxSwlxVQ678QpFlEIXF2bFCyygtOZZuWYp+3fspjct06B8FSnjzocZDKFhegKz0LEy8ZCL53KZCjmWQmUPZHrP2UMlwWlg5TJ1qMMiGqotwm5tYbYwcWpx3YZ7reycTsq8bymwihFslDHXfkX0kw1T1/KqopmZ4YV/3gwoJl8FNZikGa/MzaDZMXfhkOG6/7hB2bdUaIsMwDNNBYc+pgFTuvFJ+3Cm75tFIFPWN9dLhebreE1N4eXyc9E/vjwmDJqDsjTKp85tS0IJEJrzZqaD6UTq2FDOHz5R+fqn0Hst40LxkWna8MmHRIYQC9zpTn3/RiCKs2rbK1wChzIVMRIZfKLNOBIWK51QG3VQHUzIvK99ea25QIeEqUKNtYrUxjH72auCRhuYRtSY55qNHQ+jRQ29c7DllGIbpeLBxKiBVP24qhW+A1uF6CSSai1a44afspDKXSYTXfEwfOh250dxmZVEmnNUi1WGtKkqurKIeQghpobRAwptFsmlaiZcxzFVCx92gzndGtww8c+0zgcuNjqGmIt8m333V/HXRxpsqKrKvktpARXbjSZRbHERIuAyyufHdv7EBJ967WnjeceOANWv0xsbGKcMwTMeDw3rbAbKhtH7hencMvcM3zM0vPC8VVSCpUMOEZcKALVSqGgNqRqaqkit7X/bwZp1Kmm7j95PN+0bdh5XvrTSqxFND11W9wG5QZbp0bGlKNjQoz98r11pFvilVZvt064N9R/YhVhvzlX17yGfltkrkLM0hyYdfuCoVE7IfdDVZqnxTNkJ0Q8J1NhmtlkPTX5lOrjYcTguj74lv4WPC+XftUh4awzAM04HhPqdtjGpPRa8efbnRXNJ13ZRxkz1LTUBpGK/ak0+2F6BKXz6dfrSq91U0ssi3R64MItlMIIHHNj+m3W/XCbUHqN0w1e1xS5XpzPRMo/10vaD0IKbkWlPlWyRvCSRQ11CHm9fcLNV3VFb+VXq1WpTkl2jLvuket26I5BtIGqZ7Z+9VMoJTsclorYcFywtQ31jveZybHF526bmka+TkKA+PYRiG6cCwcdrG6DRUdzPedAxMitIUjUQRb4qTlLNUKPGAnkJLUdBUlGwTSq7KfRUOLHTdtFBRclV7zuoq8RTDzO4JU9k4cEI1iOuO12lfi4rX889Kz0LRyCLSOWQMEBl5E21A6Mi/c+Nt49SNyOopfjYP5D2gLfs66zF1vRPJdwghPHPtM+jcqTN53HaC3mT0Wg/9sMvh8uXW/+fvGf/yOIZhGOZsgo3TNsb0LjdVyXarNErx1tU31qNgeQHJa5gqJR5ordCWji0lfU+koKkq2aaU3Ggkil0zd2Hj1I2IRqKe57M/V4rHmYKOZ0XWc+fEzzCze8J0vNN2KAbxDYNvwORVk417iv3wipAoHFhI+j7FAHGTt+pp1fjV9b9Cn259XL8jMjB15B9oufE2ZsAYLLmatlmhK/sy67F93h7844O4qOwi8npHlW8VdH4DRKhUGgZaymGPHsCwYcmReDFsGLSLITEMwzAdE845bWNM73LrtnTwymd14pd/ZTJnSybP055/FW+KY/Hri4W9AEUKmmqLDdVNB78c1fLryn37VqrklALec2wifFvHwBXlHYs2DmTzLr1kPzM9E7f/8+1YumWpsWvJ4JZXSMkRpci3n7xlpmfiYMNBz++mMn9dpi2ODjqtupwE0X6Lsh6aaOvjhWw0hZccbtkCDB8OvPVW6+8MG5b8O8MwDHN2wp7TNiaIXW7dXXnLW+PnrfPynJjM2dLxvsqGhroRb4pj00ebhNcCWivZKpsOIi8gAOPeFr85poR5i6DOg1dIpJ8nTNc754bTU1mSX4JEIoGSP5ZI59YFiQn5Fslb1fYq0lhSlb/u5kXeOXMnopGosdQBynrcO9IbxbFioZFGWe9kPL0y62FQnlmZzSaRHG7ZAhw9mqzKO2RI8r9Hj7JhyjAMc7bDrWQEpKIUvV9rGODLUvwqvRt1WnyotJcw1ZJCpj2BH6p9F92+54fzfij9aO3tHGR6zAIw0rqFMscASD1nRfcnGodKRWNTvTn9xuU2P0FcywvRO6wi39ZGwKRVkzwNbqs6b11DnXCMbu+yrPyrEFS7F51WXV7ott9SXQ9Nt3mSaXFkqs2TDtxKhmEYpuPBYb3tAErImooi5hYOKINs/lXNnhqs/mC19rlNhmuqhM7JGCVeYWuyoXWy7R90+8xS53j3rN2ustk/vT9uGHwDFm1e1Pwdv/vzQicEPMjCLyZy63ShvPOy8k3ddLGq82Z0y8DBhoPSocNBhpZa9xFUuxedVl1e6IS366yHur8BTigtjqKRKComViDvwjxs3rsZK7euNGIYMwzDMGcHbJy2E/yUzKD77nlhMv9K5tzUcM1YbQzhtLBQKZdR0GSMEpGSTc2Tq9xWiemvTCeNz1SPWZmQWD/ZHJk1UjkPUHcTwlTepZt3yVRunSoy7zxVvlU8wTcNuQlL3lxiNH9dN0/UdK6x19jdZL7i/Qql8+lsWsi8q5bsmvKUOqFsOpRfV47DJw6T+9syDMMwjB02TtsRbkqmjCIGmAn3tKAo/9FIFMWxYrLCS1HiqQaYMyzRhPIjY5RQlGyRZ0vWYDDlmZMtWONlAKl4pi1Ui01ZmPDOeXknJ14yUTh+2WtRCcL4UvUEF15ciLyL8pQNTB358ML05pUXbjIv+/6Z2LSgvqtVH1Zh6pqpgRuEok0HAG2ymcowDMOcGbBx2s6hKmIP1TyE8nfKjSomIuXf+reMYQqIlXiqAujMlzOh/FAVwXl581CcX0xSdr0MO1kvrUjJlckvMxkS67WpIhoLWeneXuXpGdTxzvl5J8veLCONjXotGXSNdpVzOrHLWzgtrGVg6oSWuslRW25eUcJaLUxtWlDfVTeZDcog9Np0AIDsJdmBerUZhmGYMxs2Tts5VEXMLQ+qveVfZaZnYvrQ6Thx6gRitTFPBVdGAbRjQvmhKoJjBozRVq5kDQY/JVc2J9lUSKzOWMhK9xtlyLswz1OGVVtyiLyTaaE0xBPe1V+t3DqdXrJumG7DInusm1FlOnfRC7sxuqN+B557+znsO7qv+e9Z6VmYPpQWAh/E5pXfhp0TU5sWlPUwHAq7yqpoTdQpmOQmE7K58wzDMAzjhFvJtHN0wjhlW7d44dbCYfes3ciN5pK+P2PYjOaWHAtiC4RtEPzaZIjQbekRZAN7J1SDIRqJ+irUopYgsnOs4vGxKsDOXjcbEyomkMYi06pGJMMyLTkAmnfSUvbd5ieEEMqvKzeySeGE+s5/UPcBuX2KzDqi225EFWerlAWxBS0MUyApRwtiC9A70ltpbQDMrIdebVpK8ktarJEm5pDyrvptonitiTqturwIYmOFYRiGObtg47Sdo9tr0p5/5dZHkoqb8k9VeDO6Z6A4VuyqaHoZT14KoFffVSdWBWHZezZttPlBnb+KiRW+bUFU+8qa6oVoV3K9wmHdxmLNNcU7brqHKFU5LhpZZLxXpAjqO7+wZiHZoKCcs3ekNzZO3WjMqJLBa4PFieUJtAh688prDXHbsKudVYv5V8wnb5DI4PeuFo0sIp3DLvMqG1oUgqygzTAMw5wdcJ9TAe2hT5qo7x6FaCRqPP+K0sswMz0TiUSilWFqP8av36Ez7CzeFEfB8gLh2EryS7RycFX7o7qN2StUzkQvSBN9ZXVC+1QqwDrHMnvdbFKOp8keojLzFnQFVDe83nk3qL1/qf2UAfP9Mf0Q9fj1wu0dd65zXlBkKag+qlTcngHQuuhdzZ4aqTVApqey7DNPRX9bGdrD7zfDMAwjB+ecdgBM5H2mOv/KUninD53uOz5KRVb75/GmuHIFYdE9O5XBXTN3YfPezSQjU5Qn56bQmqg2ayKMTjWfULUCrHMshRcXkoxTk94WmZzbVOVbAl/K0olTJ1CcX4zyt8ux9yjNmyjKtZZpbZRKo0w299oiN5qL2lm1SptXIllqq/Zd9utTn4Fs/ji14NbSLUvRr3s/qc2JoPvbMgzDMGc+7DkVEMTO67FjwNSpwK5dQE4OsHw50KOH+HteO+l+O9V+mNrF9vMynjh1AjdW3ig8h4xXTORJ7h3pjUONh1y/63XPqgq52/fcrgl4e7Z0vLQmPKeqUK8tGkuQ3hY/D6CMJzEVuMlBZs9M3HnZnfgi/gUW1iwUnoPynClz4nwOQc7Jyq0rSWuEE7d7NSFLMp5FwGz7LkDtGcjIssp8y25O6KxpJmHPKcMwTMeDjVMBpn/chg8H3nqr9efDhgFbtqidUyYE0A2q4eKn1Hr9LSjjyUv5oXqS7ddTVchlQlplw5epSm5bhtHJKrl+YwnCUKRsOAShRKs8S5EMzho5C2VvlAmvrRP6HGS4px+bPtpE8nZSx6ErS9Q1Szd1wA2dZ0CVZZVNJZX3MJWh4V6wccowDNPxYONUgMkfNy/D1MLPQBX90LspJu0h/yrVXrGK9yukPLWqyqBqnlwQHkzThh1VqZRRciljMWkoymw4mFSiVd4Tigz26dYHdQ11wuvryFdbeOErt1Xi3lfv9cxJdyKTX6siS/GmOIpjxSQvtc74vNB9BhRZFq3JXqQ6X9QEbJwyDMN0PDjnNEUcO+ZvmALJvx871jrEl6LwuvV6bA/5V6ZykLyULqeCJlstkpp/5cyJVc2TC6KFAjWXkILJXDc7lLGo9Ct1g9LD1J6fKZNTqhISK3pPKDJY11CHjG4ZONhwkJRXqEKq24CoFNOiyrSKLFFC9EXo9lqmzu2+I/sQq421ujeKLMv0arXDPUoZhmGYVMDGaYqYOpV+3Jo1X/5bRuFVKR4kUmhlFX03dI2nIIuDqCrkqgp6UC0UTBh2ssYVRcktGpmUDZmCKrqGouqGgwg/OSwcWKj8nlBl6aYhN2HJm0sCKzSTyjYg1GJaWelZmD50OnKjueSiZH6bV16oGMpe6Bhx1Lm9+w9348iJI83/lg0n9lqTKXCPUoZhGCZI2DhNEbt2yR9H6WF55yt3oleXXq599Ux4LU0p+qrGk0mDye2eVRVyWQWd6tnSCTHVqSqrugnhpeSabrvjxM9QPHHqBOWWpZRskRwW5xcrvydUWSq8uBB5F+UZ8ZC7IbuxowM18uCFwhcwZsAY32N0qwvLVJ2W8TSqGHF5F+Yhq2eWsEKz3TAF1KoIO9fkA8cPYPb62cLvcY9ShmEYJkjYOE0ROTnA1q204ywoCtyhxkMoWF7gqYzpei1NhvrJGk+qBlPhwEIU5xdjyZtLWvV2dd6zqkIuE9JK3Qhoy76KOpsQOhsPqhWSRYYiBaqSTZHDJW8uIZ3L7T2RbWtjIvTZwrk58PiVj2PyqsmBtwGhriu/2/E7X+PURMsXmRB9mfZdKkZcOC2M6Zf5t99yQzWc2L4mx5viWPz64kA2J9pDcSSGYRimY8DGaYpYvhzo2ZN2nIXMzrufMqYT8pnKUD8nKgaTV2GoWSNm4YG8B4x5l2XytigbAW3dV1FmE0I3hBJQv1+KoVj+djmyemZh31EzSjZFDimFxwD390RWBt3mWrVCsNvmwH2j7sPK91YG4p21oK4XZW+UIe/CPGVZoBhrVNmflzeveeOj/J3ywDzMudFcpe/p5oQG1aO0LTfdGIZhmI5HmuwX1q1bh//5n/9p/veTTz6Jb37zm7jxxhvx+eefGx3cmUSPHslqvH4MG9ayGJKMwWcpEkXrihBvirf6u6XQThkyxTUE2E68KY5YbQwrt65EvCmOzJ6ZzcqJkxBC6J/e30ionxNZr61l8DgNic8bP0dxrBhV26ta3FusNoZ4U7zZu5yZntnie1npWb5God/3SvJLsGL8ClRPq8buWbuFFUJF4dtez9UUVFnbUb8D2UuyMfrF0bix8kaMfnE0spdko3JbJflaOvdLMRT3Ht2L6ZdNB4BWcquiZFPlMBqJKr8nqjIIJOVe9pl4vSv7juzDos2LUHplKaqnVZNlWBbLW+w1X3Z0ZMEy1vygyv6YAWOaiw4tuSrpKTchX6rj8UInJ1RHDt3wk7OJFROl1g2GYRjm7EC6lcyQIUPwi1/8At/97nexdetWDBs2DHPmzEF1dTUuvvhiPP/880GNtU1oyz6nqiX/dVo9uO1y9470xqHGQ5676UF59WTaKuRdmCdsxxGNRBHpFGmRz2XfwVfxPlnGbqw2BgDIz84XGv869xlUlUxKyx+rNZFsP1gnOvdL7a26YvwKdOnUxUhrGpm+l8WxYgDqLX1kZVClR6/pfqY6ecMTKiYIjwP0ZcGvVVa8KY5+i/rhUOMh1797zYdu6yOveVNd9y1MrBMmwnDbqm+uHW4lwzAM0/GQDuvdvXs3Bg0aBABYvXo1rr32Wvz85z/HO++8g+9+97vGB3imsWVLsl3M1KnJ4kc5OclQXmf7GEC95L/qzrmXomuFLEYj0RYKnE6oH0X5kcnFo3hR3JRPZxipbvXWF/7ygnS4momQWl1EIX3Wv3VDKK37oOB2nEyYeX52vpH8TKocPpD3AAb3HaxVsEi2WrFKWKvJasY6IZvjLxmPohFFKHuzzPc4QF8W/KjaXuVpmALJ+XDzhOqkS4jmTWXdt6+HuuuETnE1C5KcHTyAibd+ioYDmcjNBR57DIhEtC7LMAzDdHCkjdPOnTujoaEBALBx40bccsstAIBoNIojR474fZU5TY8eLdvF+KFS8l8lLIyi6EbOiWDjxI347PhnWoYRVaGVyYFSNchVC4mYzBGVDakNKnfLr3iWqAiMjEGjY1RQClFFI1HEm+KIN8WNKNkyclg4sBC9uvTS8qRTUTUyTRU5M/EOFF5cSDJOVWRBplWWH70jvVE4sND1byr5v9R5c3sXRVEsZVeVoWp7VbvI8RTK2cpKYPs4rD099g0bgCefBAoLgbVrgx8fwzAM0z6Rzjn913/9V8yZMwc/+9nPsGXLFlxzzTUAgL/+9a/IysoyPkAmaTTUzqrFxqkbEY1EPY/Tyf8k5fId2YtwWrhF3qpbDqcfsjlI1BwonTwtam6ahekcUVH+XQgh9I70RnGsdbsS07lblqw58w2pRVoohg/lfr3k2C/fz6K+sR4Fywukc2H98JLDzJ6ZKM4vxolTJ/DgHx/ERWUXoWB5ARbWLMTCmoW4tepWVG2vMjIGJ6pGpgmPI+UdmPXqLGz6aJPv2hCULJhqlQUkK6JT1wZR/q/M2uH2Lh647wBWT1rtuR4CaDc5nr5ydtowdaOqChjn/ieGYRjmLEA653TPnj24++678fHHH+Pee+/F7bffDgCYPXs24vE4nnjiiUAG2lakKmeFGoZlGXeAel6bGyr5W7IhfTo5SKL50c3Tct6bH0HkiPo91wQSzR4TN1KRI2j6nnXl2E32nASRE22fux31O1D+drlvT8og87JVnwklv1gkT9Rr2/FaG4KQBWrup6m8Vft9+OX/RiNRI++R8x0elTUKNXtqMGnVJM/K0anI8XSO0VXOTnYGfv6P5lF50dCgH+LLOacMwzAdD2nP6YUXXojf/e53+Mtf/tJsmAJAaWnpGWeYpgqZapumqylayHpTVKow6lTXFFUbpnjURFDnYN+RfaTj3DxbXp5mUeVfUU4c1fOrUtkV0PNwuaErx5RogiAqHVty2KVTl6Qn28cwdRuDbKSBH6rPxITHUSWMXjc6wgsvbz9lLTSVt0r1iOqsHXbs62F9Yz1yluagYHmBb0sj2QgRXTzl7LXFSBql/uv03LnBjY1hGIZpv5ByTo8cOdK86yjKK+XdSTlU8rZ0CnF4IZO/pVqIxVSumxeeOZM9s9B4qtG10iwg15ewclslZq+fTRqPU6EVeZq9nmvF+xWk6wWZIyiTd0n1zOrKsdXWg6qQm6p07Cf/fmN4qOYhlL9TbiwXUKcvpV9+McXjqBJG77c2mJAFledrKm916ZalpI23uoY60rio8+v1Tvuh02rGDb/33VXO6r9GOu+OHUaHyTAMw3QQSMbpeeedh/3796Nv374499xzEQq13vFMJBIIhUKIx4PrxXim0NiY3BX+61+b8KeGw0hccQ7Q+WSLY0SFekwUenGej6roxmpjSoVYTHkp/PBScqu2V2k3l6cqgm4KLdUwdHuuqcgRpBSFohg0sqHeunLcFpWOKXmKbrgVlFIpoGVHx8h0vit9u/cFAHx2/DPEamO+80MpTOWG32aB6TWNgo6BD9DCy+1kdMvQNoYtZDdJLHT7qNqhvO9OOavYNQRrd4nPnUtLc2cYhmHOMEg5p3/84x/xL//yL+jUqRNisZircWpxxRVXGB1gW2M6Z2XcuGTBh5YkgIFrgSnuimSQ/S2dUPK3Zq+bTaqw6czTksl1A2C8ZYpObpooX9Z5H3ZjQ7ffXypzBEvHlqJf936+c+5l5Kn03NRFpgepKa8lNU+RiolcQF3DW6UljFeuKIUV41dg0qWTAmmLpILK2qDitayeVo36xnqlHFvnM443xVGwvIB8bdM5p6rve2Mj0K2b+Pycc8owDHN2Il0Q6WzD5I+bu2EKwPpx9zBQqYV6TOGn6FZuq8SEigmk87gZ1ZTiJwACa4WgqsRTjaCMbhl45tpnWozTRDEh3aIxKgaVzJxTDfCdM3di897NxgwSiuEejURdQ7pVjWaVYkAUKBsDQaCzqSDrObQwuVlgCpm1QWazCmhtGMoaw27HW3JNvT5gboNId8PN+7cwial2MmycMgzDdDykjdPi4mLMnz8faWktaykdPnwYP/jBD7By5UqjA2xrTP24iXeLTz+Gn3ZtFeIr6zk1Fb7odl6qQtY/vb+nYuKnmAFIufeNMl9U4+5X1/8KN339JqXvijYhdDy/KgaVzJzLGO/2vDsTBkkqKx0DcpWhnaGiVFJlqOkaGdY57KHBt669FfuOpm6zwG0cQRv4Mu+T133JVmhXrUIO0NcJKiY23LwMVJN9Ttk4ZRiG6XhIV+v95S9/iX/913/FRx991PxZLBbDkCFDsGsXIZHkLEVcefB09cLXFts+ke9bqlqNlYJMrp1fnpZXdc3CgYVG+4dSoM4XNU/LWXFU5rui49zmbefMnYhGosLqr6LKrm7IzDk199NZEMZE/8VUVTq2kKkMbY1BllT1pdSpoG1hrxw7ZsAYLLnauxKwJVMq77hfpWNKf1FTVZIBuaJCXlWHRRXIrXGr5JVaRCNRbJy6kVy9mIqJ4nZr1yZDd++5B7jyyuR/GxrMGaYMwzBMx4RUEMnOu+++i7vuugvf/OY3sXjxYvz1r3/FkiVLMHfuXJSUyCthZwvkyoOnKxnKFOqx0KnGSoGqkBSNLBJex634iWqhJVksj0XVh1WuubNu86VT1ZPy3T7d+mDfkX3CQjT2eavcVomcpTmk0Ei/wi9+UOdctcgKtRiTiKArHbtdz6sQ0fSh05EbzW0eAwCUv1MuVTzI1LyICKKCtt/c3DH0DtfCUBZe8uaXEwu4R1tY7/F9o+7DyvdWGg0hpsp76dhSzBw+U/n5qRbfsn4/yq8rx5gBY5Su7Qf1/j+o+8B3TYtEgGXLTI+OYRiG6chIe07PO+88VFRUYMaMGbjrrruwZMkSvPrqq3jooYfQqZO0rSvNk08+iezsbHTt2hUjRozAli1bfI9/+eWXcfHFF6Nr164YMmQI/vCHPwQ+RjfIlQejOwHI9y2l9tnT8RhQFZLCgYVK5w+61QzQ0sPiVdTJbb50ekOKPG0JJFDXUIeb19xM9nSr9Jn18jBSEM25imfWwjJIYrUxLe+WmycqyArRbp7s2lm1mH/F/BZjUO3Bq+LVlfUQBjU/XtERuVHaQmiXN5Gs3/nKnZ7rXgIJPLb5Man3hAK1x6yOYQrQ1zpnr1/dvtciqO/7wpqFRqN3GIZhmDMfaeMUAJYuXYolS5ZgypQpGDBgAO6991785S9/MT22Vrz00kuYM2cOFixYgHfeeQff+MY3MHbsWHz22Weux2/evBlTpkzB7bffjv/3//4fxo0bh3HjxuG9994LfKxOHntMdEQCQAIvLOsr1UTeQic8j6rQUhUymTBkO0G3mvFSct1wmy+/8FGRIihjGIoUZ52NCKfRUDq2VDgeQDznqgaYnUmrJhkPR6fIbEa3jGavtQmD2I0gNwYsVEL6KUZGNBJFvCneJpsFIllPIOEbtu2F7oadymaVSmgxdb4qJla02ggIMl9Z9n1PVZg6wzAM0/GRLoh01VVX4c9//jOeeeYZTJw4EY2NjZgzZw5eeOEFlJSU4Mc//nFQY8WIESMwbNgwLDsdB9TU1IT+/ftj5syZ+MlPftLq+MmTJ+P48eP43e9+1/zZyJEj8c1vfhPPPPMM6ZqpqdabRKcQhEzRHXsLhx31O/Dc289h39F9zcf4hbzpVo31w0TLFNG5ZUPk3IoUyVb1tB87KmsUNu/djH1H9qFofREONhx0/Z7fvZooRmIfn8k5dwu/dBZBomKqCJZMy5OgCxHZ5eHA8QOYvX628DuU56hbcZcyPybmRlbegqqMbEenVRe1SJlKqx4g2DVRFbsM76jfgfK3y7H3KK3FVqrHygWRGIZhOh7SntN4PI53330XEycmlZlIJIKnn34aq1atQmkpzQujwsmTJ/H222+joODLvm5paWkoKCjA66+/7vqd119/vcXxADB27FjP4wHgxIkTOHLkSIv/mWLt2qQB6oZuhULqDvuO+h0tvCsLYgtaGKaAWmioyHtI8RrohM6KUM3dcptXqrfMzZOVszQH9Y31yEzP9DRMAX9Pt8nwZ9Nz7hbOuXf2XqWQX1Ph6Ca91m7IeMTssjNz+EwjkQi6If3U+THh/ZKVN50Qfio61/AKX3YaprIh+BZBrokqONe0BbEFSCCBkvwSzMub5/tdlTB1hmEY5uxD2jh97bXXcMEFF7T6/JprrsHWrVuNDMqNgwcPIh6Po1+/fi0+79evHz799FPX73z66adSxwPAww8/jF69ejX/r3///vqDtxFUhUJK+GLvSG8Ux4qFRppsaKgojEwm3FAndNYPWQVUN0RZpJBWbfdxodtwG7fp8GfTc+403jt36qwc8iur0HoZinaZ/dX1v0Kfbn08rwfQDWKd6timDA8TFXet+dk4dWOr/EX7eYDgNgvc5E01hF8G6jW8ZMtvs8pELYAg1kSVEGOvNe2To5+gOFaMY18cI13bvqY1NgIzZgBjxyb/29godx8MwzDMmYfRCkZ9+rgrfB2J+++/H3PmzGn+95EjR4wbqJEIsOSJL0Oj3jyg35PPrxqrqIWDG6IqrW7Vdt1QqSDsVXlVZ35klFxdj4RIIQ0hhF+/+2vSudzGrVM52AsTc+4X7uxVwdXqeSmCsrkgCp20ZDZWGyN7rf1k3ER1bL/KtpS+lPGmODZ9tMn3GAvRHFrFm/yeh9/cyIS7U+WNIuvRSBSHGg9JVaG2vkt9T1TDcmU2DvxkzeSaqHIvQaxpzjSXDRuAJ5802+eUYRiG6XhIG6fxeBylpaWoqKjAnj17cPLkyRZ/r68XK5oq9OnTB+FwGAcOHGjx+YEDB3D++ee7fuf888+XOh4AunTpgi5duugP2AdVRUeETgsHL3RC3igKjVerDKrxS0Wk5NqhGgZuxJviWLplqVAhrWuoQ0a3DBxsOChtYIo2IgA1w1pnziky7aZgx5viKFhe4HXaZkSbCzKGoomwaB3ZdqJqeLjNuR+UDRqZuXHmHsrkrgM0eaPI+nPXPQcArrmfNwy+AYs2LwLgniNPeU90NiFMh+Drromq90Jp8yWzpvnVX6iqShqubKAyDMOcnUiH9ZaUlODxxx/H5MmTcfjwYcyZMwfjx49HWloaiouLAxhiks6dO+Oyyy7Dpk1fegmampqwadMmfOtb33L9zre+9a0WxwPJsGSv41OBTv4RBd0WDk50wupMhBuaglJdsmhkEaqnVWPnzJ2IRqLSLU2sEE9KkRsAuGnITa7joSjOQYU/qyAj084QyPzsfO28S5nQyXhTHAeOH2h1nBt+sk+VbWp7HGoes4VM5WmZEPVU5a7LQJF1r3Xv0e88qvWe6IblBl2BXAbVe6ncVolJqyaRrkFZ006eCPsWBgSSBiqH+DIMw5ydSFfrzcnJwRNPPIFrrrkGPXv2xP/+7/82f/bGG29gxYoVQY0VL730EqZNm4Znn30Ww4cPR1lZGSoqKvDhhx+iX79+uOWWW5CZmYmHH34YQLKVzBVXXIFHHnkE11xzDX7zm9/g5z//Od555x0MHjyYdE2T1f5EFWOtneWdM3di897NxkJaAXqFV+dYdCorqlYQNnXPboiqa6p6tb08En5UT6tGfWM9qdqnFzKhlEFAlWk/OdKtAE2V7ZL8EpS/Uy406Chjpsq2M2zZZMVbqmEK0CseU6rDWvdEkXWTFVp1ZF31u7qVsdtTtV2Ve5Fd1yhr2owZyfBdEffcA5wuzK8MV+tlGIbpeEiH9X766acYMmQIAKBHjx44fPgwAODaa6/Ff/zHf5gdnYPJkyejrq4O8+fPx6effopvfvObWLduXXPRoz179iAt7Utn8KhRo7BixQrMmzcPP/3pT5Gbm4u1a9eSDVPTNHtbTnYGXlsM1H8NiO4EvvMjoPPJZm9LVmlWi9YbJhRambBWU1UgZb0wpsOc3fALoVQNefPzSLhhD28Lp4W1csl0Qv1MGLYmcup08y6poZOUsHaq7FNl25m/KZOP6oVM5WnZEPUgc9fzLszTkjcdWXf7LkX+qbJVtb3KMzc/iBB8FWRDjGXWNZk1bccO2nipxzEMwzBnFtLGaVZWFvbv348LL7wQOTk52LBhA4YOHYq33nor8FxNAJgxYwZmzJjh+rdYLNbqs+9///v4/ve/H/CoaOw/uh9YWQlsHwdYYU+7ALx1DzBwLTAlqUA6e0KaUGj9lCQnOjmXdqjFTIpjxVIGoa5R5aWoquYQyhgLbgqp6fxaCqbynk3l1OkUfDEZEkmVfZnNHjuy+ahuUOd8Xt48FOcXS18jiNz1qg+rMHXN1JRsQFGgyj9VtsreKEPehXmu96K7+SKCuh7KhhjLtt+irmm5ucniRyJy1TJRGIZhmA6OdFjvT37yE6Snp+OnP/0pXnrpJdx8883Izs7Gnj17MHv2bDzyyCNBjbVNMBkW9K9X1uFPr1kVje05Oacfgc1AdWIq/MtLKZs+dDpyo7nGQ0P9QjYTSKB3pDcONR5y/a7bPQdVTEonfI8a4gnIheyaxlJiq7ZXoeyNslZ/lw0BBejzNi9vHsYMGBNI2LEodJJK6dhS3H353eSQei/ZpuIVCipCN9SUipvRU/F+BVnWRajImwm8IiTcxiMjW/3T+/uuz0GE4Mush7IhxjKh6+XXlZOfYWMj0K2b+LiGhmRlex04rJdhGKbjIW2cOnn99dfx+uuvIzc3F9ddd52pcbUbTP24JX+Qral2K/5y+m8/7Qp0Puny9yRUhdNPEUp1nqJXnifVC2Pds4xSKYtMfuyUIVNafEY1FkrHlmLm8JkpzQm1oFZ2ld0EkTUMg/KWiTZBKBSNLMKqD1ZJbXy4zSu1PY5qrnVb5jHK5q4DQDgURjzhXjAolTmXVm/PSasmeT4frw2xCRUTSNfQ3RCQQWU9lMnvpj7rjVM3YsyAMVJj96vWC5hrJ8PGKcMwTMdD2zg90zH140YtAoFhy4BrZnr+2c04chKUd5GCl+Gr44WxlHjdwjt+6HijZI2FttgcUCnWRFWyZTyIQXrLdDdB3KCM1/k8qe1x3Ao0Ud9TqpFhWtZkNiNkNgaCNupk2+44xzN73WyUvVkm/B5lfTaBTiEyUVE45zUo6xoAaTnzMlBN9jll45RhGKbjoWWcpqen43//938xYMAAk2NqV5j6cRs7lpZng5x1wNSrPf8sUuKC9C6KkDWKZQxCAORd/HBaWFoZ1/VGUY2FVG8cyFR2tSOrZMso/0F6y9yMMQDCZ5sWSjPm3dOpeCt6T529RcvfLsfeo2YrT4ugbkb0T++PCYMmuIaQOwnSqFPZnHGOJ1Wh1FRMVBGmGJOUdQ1o3WOWKmeNjcDcucniR7m5wGOP6Yfy2mHjlGEYpuNB7nP6ySeftPqMna50yMUdojtdPzbd79E0Kv1braIylB6X1CIwk1ZNau69OPrF0chekk3qs+jXB9VU39Gge9y6IVvUxIJSPMUKk1y5dSWikSh2zdyF6mnVmJc3z/d7Qfa3desXKnq2CSQ8DVOV8VKuZ53X7VqAd79JZ2/RBBIoyS9p0dszaFnzk3XnWAoHFpLOGVSfT9lK2l7jkVmrVMZI6YVrR7cQGbWvrmhdA6AkZ9Y9r925EhPvi+EPr8axbJlZw5RhGIbpmJA9p+eddx6efPJJ3Hjjl2GYPXv2xF/+8hf2nBKgFYFIAD+NIHS6rYyF6X6P1dOqtds62NENMaN4HFVy3dzOI4Ia8uaFX1hzkGHJXsgUa5IZh59X7sSpE8r5u37ohqh6PdsJl0wIJGTTVK61dS7ZIj6mZE0mVN95vrbu82myv7NuP143VL3bqfbk+kUkyMpZKqNH2HPKMAzT8SC3knnooYdw1113Yc2aNXj22WcRjUZx880384JPJBJJ5tL4F4EI4ZabVyi1HIg3xbHpo02ksZhu66DT65LaZiFVrTt0WpoA3i0UTPQDlSXeFMeB4wfIx1P7Lor6wRbnF5OuJ+MtM6HQej3bmj01JONU1rvndb2K9ytI36f0m3STb5OyJpp30fd1+nyayJelehgp4zHdEka1rzJAa9Nl9R2lIJprt3UtVhuTljOde2YYhmHODsjG6d13342rr74at99+OwYNGoTy8nI8/fTTQY7tjGPtWkoRCHnjSLbYh5sirqMc6IaYUQxCmT6tTmQNvyD6jprqB0pFViYA+iaIyFAqf7scWT2zsO+oGcXZpELr9mxNKfpeCr7zeqb7TTrl25SsmZp3FaPOlHdNZkOBIv+6m1cWOn2VAT2j34nqXMvKme49MwzDMGcHZOMUAL761a/i//7f/4tly5Zh/PjxuOSSS9CpU8tTvPPOO0YHeKaxdm3LIhADcuL43swa/P3UfsRqvRVaL2SLfXi1ddBRDqgK4I76HZ4KPOWevZRcausOU4afCrIGiQ5UmbCU2qIRRSi8uJCkZFMMpb1H96IkvwTFsWJtxVlHoaV63kwo+jIKvqwxLGsEmJA104aEjFFncjOCEnERjURRMbHCN/fSjsz67CWDJrzbJjy5OnMd9CYLwzAMc3YiZZwCwN/+9jdUVlbivPPOQ2FhYSvjlBETiQDLln2p0D5ToeYdkCn2IVv4RUY5oIbcLogtwBNvPoFDjYeaPzMRmklt3RFU0RUKpsPwvJCRCZVwRKqhlBvNNRICqarQynqDdBR9WQVf1hiWNQJMyJquIUH1Irt9z6RRTJnr8uvKfft0qoYXi/KyKYjeNx1Pru5cG9lk+UdXYM0K4PMBwHkfAdff2KabiAzDMEzbI2VZlpeX40c/+hEKCgrw/vvvIyMjI6hxnfGY8A7IVGLNSs8it3WQVQ7sCqAIu2EKmAnNjDfFAzP8TPWJNBmG5wdVJtK7pGPxlYulQ7hlDKX87HztEEiVEFXVd0tF0VdV8GWMYVkjwISs6YQG64TkBuFd0914ULmXVOZlq6YhqMy1cz18/MrHMXnVZLVNlufeAD4ZDlgVkD/7BvBIAx78wxFM+Yv07TAMwzBnCGTj9KqrrsKWLVuwbNky3HLLLUGO6YzHlHeAqkDOy5uH4vziZOEXgnGq4mEcf8l4FOcXk6qQ2jGRaxSU4aejZLsZtaYLqrhBlYkjJ45g8qrJCKeFpa6rYijp9FmU9RqayOWTUfRVjal4UxzRSBSPjHkEdQ11yOiWgcz0TNd5UJFvXVlTDQ3W3XQLKjdbZeNB9V7aIi9bBdm59loP7xt1H1a+t1Juk+W5108bpq358N10DB8ObNmiclcMwzBMR4dsnMbjcbz77rvIysoKcjxnBaa8A1QFcsyAMQinhQMPLc2NUpu5tsRErlF7qqQpMmpNFFTxQnZjQXZToL3nZ6Y6r03Vs+t1/14tWlTkW0fWVNYKE5tupnKzVcOK7d9XvZdU52WrIjPXfuvhos2LUDGxAn269/GVM2vtmPB/brYZps6+scl/v/UWcOwY0KOHyp0xDMMwHRmycfraa68FOY6zClPegbwL89A70rtVqKxFEOF+fh4v3ZxO3Vyj9lBJk2rU6hZU8UKm5Y6qodae8zNTXRVZ1pgS3b+XF0p1Y4NqkLnJmexaYWJjwMQGmolKvzr3kuq8bFWocz0qaxRylub4rodzNswh9aodf8l4DNuyA2+1MkpbM3UqsGYN/X4YhmGYMwOuZtQGmPIOVG2v8jRMgaTiYDLcT6T0qfYitbCKG+kYlybawOiEapos5qKiZMvk/1qoGGrtNT/TZFVkiizKGFOi+weAxzY/1upvqhsbVPzkTGatMLExoLuBZqrSr869pDovWxXqXG/eu9loNELjZ7T52bWLdBjDMAxzhsHGaRtgwjtgKbp+9I70RuHAwlafB5l/pdKL1LrfuuN1yF6Srd3bUBdVxdRkSKmOkm0Zcz/43Q9Q11AnvA9Vj3eq8jMBoHBgIXp16YVYbQwAkJ+d79r6w1ToOnVjQMaYitXGpPrOWpjIy/aCIme1s2oDyQ/2QnUDzeTmkM69UNvXxJviiDfFhe+RqaJsblDmeuXWlaRzUdbNym2V2IEIgKuFx+bkkC7LMAzDnGGwcdoGmAivpVRlPdR4yNMQclOIvJQgGaXPS9mxwo+97veGwTdg8qrJ2h4PE6gqpqZCSk0o2eMvGY9rc69FZmkmDjYcdD0mFUVX7MjMj10Wd9TvwHNvP4d9R/c1H/PCX15w3bQwlRMrszFANaZ0QomD6AEpI2eUtcJkTrvKBprJzSGde/GTQYv6xnoULC8Qbr6ZCFEWIZprU5sOze/VtV2A9xpOf+od3rt8OemyDMMwzBkGG6dthG4BH9O5dX5KUDQSlVL6vJSdqu1Vrtd4/MrHMXvDbGPhsLqoKqamlDiqkr10y1L0697PU3Hv3Kkznr322eYQ37YoumKHOj876ne08qA78du00Hm3dEKPRcaUiT67JntABtFD1mTVbPsGGsV7aHJN1N3k8JJBJ35ybCpEGRDPn5/31mSkTwIJoOs/gAu2nC6KlEBLAzX572HDuBgSwzDM2UookUjIJweeRRw5cgS9evXC4cOHkZ6ebvz8qiFbsdoYRr84Wnhc9bRq5RBSSwmbNXIWqQXNivErMGXIFN9jnPc7KmsUnvrzU5i9frbw/JR7MYU1J4C7UeemGMab4sheki1U4kSFQ1ZuXYkbK2+UGq+fN8XNmOif3j8lRVfsUOYnGomivrGeFBIumk+Vd8vke+U2Hr/7p0C9LuXeqXJmf69Fa8WqSasAwKi8Ub2HQTw73Xcn3hRHrDaGSasmob6x3vUYNzm2ZMXLsKWuJV73IOt9VVkPLeJNcSzdsrT1Gu/sc3r67MOGhYy1kQn695thGIYxDxunAtrrj5sJQ4iqOPXp1oeUuyirsLspTX5QjF8ZRAq8imKqo8RZUJVsO6LzqxhqQeS6+c1PAgnf6tNemNy0UDHYZPC6fxFBGCOyxpyMwQTAiOxQjGHrnmTWRJnx6b4HKkazKUNbZv5EqK6Hvmv8P7oCa1YAnw8AzvsI//X8Kdz2re+TxkOhvf5+MwzDMN5wWG8HxURVS4phmEACdQ11yOiWgYMNB7VzyezXd1Oa/DARFmm/vkiBV8l78wrny0zPxPSh03Hi1AnEamO+51GpeiwKf5YtXhRUrptfyO0dQ+/AgtgC6XOaDHU1We3XDa/775/eHzcMvgGLNi8CoB6CLRMKKhuuKSro5AwD1t0wkA2xpq6JXukFXrKtWwFcJdd69Qertc9tunq47HpIWuO7/gOY8uWcf/Ur1cJxMAzDMGc27DkV0N53XnW8ezKGYdGIIix5cwkAOY+gm9cBgDCn0I6M14iCSW+CF6KCPpRCKCoeNkDfk5jq+bHkouL9CulwZsCs59RUaDblOm5Kvk4YqUooKNXTX7mtEtNfme4ZYWHHVISDqvfQbw4BBC7bTqj3UZJfgvJ3yqUqOvvJPvW6G6duRDgtbDRCQiSLTkyv8Rbt/febYRiGaQ0bpwI6wo+bTNiZrNJgUT2tGvWN9VKKs5f3bfrQ6WQPmWml0WQuFwUdQ0827NlixfgVmHTpJKVQROr87Jy5E5v3bjaq0MqGMwel0FINtqBafKQ6D11kEMtuZpnaLNAJsVbZFAtKnkznWlPHSp0/69oWJiIkZN7lIDcGOsLvN8MwDNMSDuttB+gquTJhZ5QWNHbsoX3htDA5rMsvvFAmdJNavZiKyXYTInTD6pxhdAeOHyAVjnKrdktVOKnzk1Wa1SIP2YRCKxPOHGS1YUq13yBbfMi0ebKjWq3WL1zTT4admG5NpBNi7TaHMmHJeRfmGdt4EIUbW/+WMUwBsexT58/pDTfRvksm1N70Gs8wDMN0bNg4bWP8lFzZfEcKMkqDmxJEMYRFRhmV0rGlmDl8plHjw3QLHj9MGMLOdhqLX18s9MAUx4pJOYd2ZHPdnAWyTCi0lP6QFkErtH4Gm8kWHxSohrBpYw6Q38yibBZQN+NM9k0F6O901YdVmLpmqtGNB5O51lTZV8ldB8y076LKYhBrPMMwDNOx4bBeAUGGBfmFfLpVLk11uJVq+weVarN2ggqvA+hjm5c3D2MGjNHaEAii8qtOtVuveVUNH6aeXxa/cPDcaK7REFpZUhUWbhlwVR9WoezNMtfrAOrVaqljkwkNLb+uXLhWyHqcTVS/ttBZl0yFnurkWs8YNgMTBk3AqKxR5JB6ndx1QD1EO1W52yI4rJdhGKbjkdbWAzhboXgXnUaG5Zmp3FapfF1rN91SttzoHemNjVM3Yves3UqKmIp31vnvIMI1Adr9A8DCmoUY/eJoZC/JVp7vICq/Wh6YzPTMFp9npWehJL/Etw2L3VNrYSmvuoap1/n9sFoZrdy6ErHaGOJNcQDJe6ydVYvqadVYMX4FqqdVo3ZWLeZfMR9ThkxBfnZ+m3laZLzhqlRuq0T2kmyMfnG0q2FqXQcAitYVNc+b5XkGzL1XVNmsmFhBMkzdZM1vXfOTd1lDkfLuh0Puc2PN96xXZ2HTR5taySwVy0Ntl2PqHE8YNAH1jfXIWZqD0S+Oxo2VNwrXKK/5i0aipGtaFYTd3lM/gpBFhmEY5uyAPacCgtp5Vd3FN7HjLOONUMmH1alOqeqtlUHGm6DjMQnSe6DjgbE8tarFsajn9yPInE0ROjneqeqDKuPlki1wBNDnwJQM63qcTRWfEkUeyGJCZqlzvPjKxZi8arJScTXn/MWb4ihYXiAcm9saLXPPOtWnTcCeU4ZhmI4HG6cCgvpxoyq5XphoFyJSGlQNCBmFFkAgFU9FyISy6hiRIkO4aGQyryuV1W4t2ZHdIMnoltEq19Tv/F6kolWN37V1jGLVirgUVDcLqNVq7e1jUh1aG+S8yeK19k0YNAFlb5RJncuUzIrmuGJiBWZvmG0snFyngrBs+zCZMGTTsHHKMAzT8WDjVEB785xamOglKFJgdQwIk7lipvBSmjZ9tAkLaxYKv6+qOLspw+FQGPHEl+FxqfTAWAosdYPEnuuWszRHy4uW6lY+dkQyXTGxAn269/FVooP0hquuCRS5bM5h3V7laoCJ3ktdD1jQHmdZ3Na+mj01bRbNAvjPcTQSNW7cpyp/PVUREW6wccowDNPx4Gq9bcCxY8Djs/LQacv7ONVrO3D9jUDXf0idQyZP0QuvCp26LVAAWjsOFVTD+/yUpkEZg0jXVq3ga6/8ahW4sRumQPDVbt3yvGRy3Sw5kTm/GzI5mybbeVByvG9YfYNww0B2jmWQlS9qtVpKlIDovfarXkwhiPxrHdzWPp3qtjLtp7zWML85Xrl1JWksMjKkU0HY7Z5TXcWaYRiGOTNh4zTFDB8OvPUWAIQBDAI+HQQ80gBcsAW4c6Tw+yq9BGUNOlO9QHUVWiequ/Iipak4v5h0fR3FOZwWRt6FeZi6Zqrr30XGAfUZymwKqLTq0Nl0iDfFsemjTZ5/t2O6nQelJQp1wyCojRcZ+aIawjI5rKL3WqafshPTbWFUN6n8vifTysgNimEoWsO85jgo495rja54v4L0feueTWxoMgzDMAzAYb1CTIYFfWmYOjn9CE4bqFY4lZdnRmYHWsWga28heIB6mPHJUyeRWZqJgw0HXc8bQgiZPZOVLPcdDbbtgWrencozpCrvquHXssaBiXY1OiHhqjnefs/eVJEe+/n8QobtUEJqTeawmsBUqL/OJhXle6qyGmSudapbswSVv146thT9uvdLWe4ph/UyDMN0PLiVTIo4dszLMAVgldr/ZDhemVCNA/cdwOpJq7XbJ6i0bgDaXwgeJSTT3lLDonJbJbJKszwNU+v7e4/uxfTLpgMItu0BNeTO3r5h9vrZmFAxQfoZurWscEO1VQf1/IB8uxpRO4+idUU4eeqkVHsLVVn1aw0jMwcU/NpvWBSNLEL1tGrsnLkT0UjU9/4p3mI3gnqvTbSFUV3TZL7nbGW0cepGZPX0bkETQgj90/v7en1V1zCLVLdmEbXdcd4zdW2bvX42qQUOwzAMc/bCnlMBpnZer78eWLtWfNy4ccCaNcn/X8czQ/GaZKVnoXZWbUqLvqig4nGUbcmxYvwKdOnUJdC2Bzotdtww+RxMewHt56V672RCKZ2VgymeZKpX0o0V41dg0qWTUlJZWlR8iOoBlPUWp+q91gnJVSmmZaIIl67XV2YN88u1TmVrFpl7VinmlYrieOw5ZRiG6XhwzmmK2LVL/ji/gkUi5Y7iNdl7ZC8eqnkI86+Y3+LzIIu+qCDjcQT8vRRefKXnV5CfnW80R9YJJe8uGomiOFYsnSOoWzxIJ5/QDxnvXVZ6Frmdh7Oljajoim4+4Y76Ha0MnKCqkPrlassUnQkih9UEbrJmYk3zypk1kUOvm2dMXcNEudam8/j9MJm/7gbnojIMwzBusHGaInJygK1bacf5QfWaUJWhBbEFGNx3cCvlKqiiL3aoHhTZMGMZg8hZiCUoI806t5/Rb/1b1nAyXTzIJFQ5nJc3D8X5xajZUyPdaxKgKbpeMu1s62PHb8MgyCqkXgacTNEZGYPB5Hsti+k1zXmc6vec6BiG1DWs7M2yVp855Ux3jZLxXFPvWXXzR7bSMcMwDHPmw2G9AkyFBR07BvTsKT7u6FGgRw/3v8kU1JAJs+qf3t8zpC2ocE+ZoiZB9e+0vpvqFgdeoXmi9g0ytGU/WTuyIdm64bf2c1l49be1/l13vA6TV00GYK7fo9t1dd8dnfB2wP3eikYUofDiwpQUp3EjiDXN+fzbQ7EeilyLNklUQ67tcrijfgeee/s57Du6r/nvJjeyVItJBVWEi8N6GYZhOh5snApITbXeJMOGAVu2tP7cKo4zadUk1DfWu37XqbzIVuqUad6ui0rVyiDynzK6ZeCZa59pE+PNzXCpeL9CuqJsUAotdcyi86rkL3s9ayp2RVenQqvMhoGJCssiVKtopzJPkYrOmiabC69iGAYReSDaKKAgu05TjEXTG1n2deLA8QOYvX628DtB/f6wccowDNPx4Gq9KWTLlqQB6oaXYVq5rRLZS7JRsLzAU4kDWlcVtVd3pOAW0mYpkNSKqBRUq1bKVPoUVZoEkobp3tl7lZUx3blxq/SqkiPoZZgC/pVmZbHkcPSLo6WqbapUGfV61hndMkhjteZRp0Jr9bRq7J61G7nRXNI17e+PakVZ6n3JHud1b21lmJpY02Qq1lKqIHv1uDVZTdZvDSsaWUQ6BzVEGaBXyXauuybXtpnDZ0pV/WUYhmEY9pwKCGLn9dgxYOrUZPGjnBxg+XL3UF7ZirNAa6/Jg398sN14fgD10DwL0/07VbyBQc2NbJ9LavEg3ZA5nf6M9nPIeu/cwnFzluaQPGcAtCu0AuphyX7XzeyZiRfGvYDPjn+mVK22vVTRdo5N5r3UWdNUPcFu30tl5IGF21zV7KnRWhfdrqHS49atUrju2ubrMT7ZGWN3v4fEoa8hNxd47DEgElG6jCvsOWUYhul4sHEqoK1+3FSVC7d8u4vKLmqRY2THL6xSxxjxQjU0UQVTLTnsvPz+y5i0alKrz02FxsnkCJpWaN3QacMhyvVUyeujbjroboLY70HGIFRpqSGj/Ou2NAkC6ntkak2zzkUxhv1ksK1DTp3jNLnxoCKHXpiQLTcZ6brqVfzjvbFo7vN9msJCWts1CmycMgzDdDw4rLedIlNxFvAOjwqnhfHE1U8gdPr/nN8BWobCyYTdqoR/qYYmquAXzqgSernq/VWYstrdYPYLSZYds1fo3+pJq1F6VWlzGLAofNlEyJxMGw47bmHAOUtzUN9Y3yKUWRZqeLepCq2yoaQyYZcWMiGkMuHtbpgO1Zd5j0ytaYB7WLzb2PxksF/3fqRxyDxT1flVDVk2MWYRJtY251r8L3/6zNUwBYCqqmS/b4ZhGObshFvJtFNklAuR8iLTFoZqjDxU85BS+Bel16e9tYsIkQfFREsOIKnofn/V933HotIWwW38Jto3mOpbqWLkyfTiVIEyPyY3QWTeH5VNFdl+j6otTUyHo8u8RwCw6aNN5HPryi9FBk1vlOnOr6n2XfGmOA4cP0A6loqJli/WWtzYCNz4mv+xVVVAY6PZEF+GYRimY8BhvQLaKixIthUMRXmhhMLJtGFxQg3/MhWaqKoMms4jdEINSTZlLJjI5/QyboKYK6sYVedOnQl3pwYlTDIzPRMvFNJzPylzptsKJ6gQ0iBC9amy4ZbHKEKnmjA1FH3nzJ3kHGaK4W9qfnVaEKm0cpGpFmwi3WLGDODJJ8XH3XMPsGyZ1qU4rJdhGKYDwp7TdorIwwgA0UgUFRMrfEMkZRUdnXBaqgfIhIdAxzsn6w2UDUekzKFJ76KsJ03GKJb1dFPmqq6hDlmlWYG28RF5lRNIoPGLRhQsL2j+XLQx4OaFl7kuhdUfrAYAo302VSIFKFDfI5nevZQ1TQQ1+uNn//0zTB86HcWxYq3IA9PzS5EzN1SKTQFJuae2TDKRbrFjh9njGIZhmDMLzjltAxobk7vHY8cm/9vY2PoYUQ5SCCGUX1eOMQPG+Bohsu0/KG1Y/KC2L9Fpb6HajsZCNpxPJsSakuOpO343KDl4gHybk6ByLusa6oy36nDilZ8ZjUQBAIcaD7X43FT7EK/rUlj21jJymx4qsdqYUt6wCBOGigV1TaNAlcGFNQuxILYA0Ui0WSYsqDm8gHpetkn81hQnWelZKMkvabHuPpD3QMpavuTSujORj2MYhmHOLNg4TTHjxgHduiXDmjZsSP63Wzf3AhA6xU9Uey1SjBEKmz7aJDSuqAaVhVVspDhWrKUMyhYSklHCKZ6WtlJmU9FjVtZg0S0gJcK5CbJx6kZ07dTV9VhTRa28rpvVk77pY8pQrtxW6Vpd2g3ZIjq6G1l2ZIxBEbIyWN9Yj0ONh1oZbNSxmCq+pQM1uqN0bClqZ9Vi/hXzW6y7pgoyUQpCPfYY7Z6oxzEMwzBnFmycppBx45KFHtzwqlCo4mHU9cz5GSMl+SWe17WzsGahUe+P3Qu8sGYh6TteyqCsIkZRwsOhMComVpAU2qoPPYSAOH5VdIxiqhzKGCx+1zNZVda+CRJOC3u2VbKPaemWpdoGqv26YwaMwZKr3WXOaxyAnqFsbVDVN9aTjpc16kxtZM3LmydlDIqQNZqt0Nv/fOc/MenSSdIhxamsQO7Eek+scHAR/br3E6ZbqFaCpkbqRCLJdjF+FBZyMSSGYZizFS6IJMBUQYXGxqSHVERDg/6PskwRG6tfpluuolu+KgBywRfTvT9lc6lExWVkCgl5FXGyeHniy5h46UThmOJNcXxl8VdQ11CnPX5ZUtVjVvZ5Oa8nyonVKRgjU/BLp5KtFyoFa1TkQKaIF7Xwj9e8e71H1DzGIIpAid5Xk2Mx1aNUVq6DkiWV90ulIJTXZi33OWUYhjm74YJIKWLuXPpxuhUKqR63qg+rMHXNVE8jwKswB7Xgi06xFQuZXCoLajsamUJCXkWcZKuK1uypIRmmGd0ylPO7vJTLVHl4rLm663d34WDDQanriQpF3TfqPqx8b6VyhWOZezPV+saOXeZWf7Aay94Sv+wqHnTZIl6ikE3RhoHbewQA5e+UG2sbJYPX+ypCZa5NtHSSrdwtuwHkN9du64WMga5aEGrt2uSm7dy5yeJHubnJUF72mDIMw5zdsOdUgKmd17FjkzmmIq68Eli/XvkyAOTa0DiRaQcjo/iVji3FzOEzpQ1U2Xsx5a31QsdrB9A9d0Uji1A6tlR6fH5KbuHAQiMeHionT51EVmmWpzHuvJ5syx77eQDaM5dt9WJ6TuxQZXvj1I0Ip4WlZI4qZ9FIFOXXlZPy11XapKi2jdJ9z5zn2fTRJlI6gI4XV6Wlk/U9mfmVfU/85tpEOyvZdlOphD2nDMMwHQ/OOU0RqaxQSM2RdIOa62blIM7Lm0ca0+z1s5VyUGU9GX269cGskbMQjUQDKbQjW8TJCdVzVzhQkJTlgqgIVtX2KiNFTwBaTmjnTp3xzLXPNFdiFV1P1ttnIZOf6Zcn6XXuoCqtUgpz9Y70xrS106QqbgN0ORPlSQeZv+5lmKpUGffCel+L84vJRdBU851TVR9A9j3xmmvVonlO2kNBKIZhGObMgY3TFKFSoVBVSaIUKoknvM9FVcjDaWGMGTCGNCZArQIpVcmeeMlEZHTLQF1DHcreKDPejsMUspWCqVCV3MKBhVpFTwA540HGONFRXmWMSJVWL0Eo1qL3NIEEDjUealXAifIeUeVM5MkyUVnarXLx84XP48SpE63WNVMGkxPRpkQCCUwYNAEP1TyEi8ouUjaMZTevVOaXKoszhs3wNJBNtrNqy4JQDMMwzJkHG6cpQrZCoa73wM8oKBpZRDoHRQmSrc4KyFUgpXqXVm9b3Sp81FQ7DpOYatngREbJlTEWnKgYD1SPkgnllaq4W2Oihk4HpVh7vaeZ6ZnoHent+h3Ke2RKzkx5xSyjrUunLri16lYULC9ota4F0f/XjtdcW1EkZW+UYUFsgdJmgCoq80uVxQmDJngayNT1IlYbE26QBrXhxjAMw5ydsHGaQtau9TZQ7RUKTXkPvIwCasgoRQkKOkyS4l2yzut2LUBOodVpYUL9rm7LBjdklVyKseBEx3igeJRM9M2kKu5WLmJGtwz06dbH87hUKNZu7+kvr/slDjUe8vwO1WOpK2cmvWKide2hmocC7/9rn+uiEUUA/KNIrOsCwfTkVZlfE8Ygdb2YtGqScIM0qA03hmEY5uykwxin9fX1uOmmm5Ceno5zzz0Xt99+O44dO+Z7/MyZMzFw4EBEIhFceOGFuPfee3H48OEUjro1a9cm28Xcc0+y+NE99yT/bRmmpr0HbkaBjHJDMbhMhUl6XUvUd1VXibfQ8VbLflclP80PFSVXdhPERIinH7IbHXZkjEj7s7p5zc2eFYVNKtai98j+ntY31mNKJa2Vj8jI0JUzU14xyrq25M0lpDHphlhba+CqbavI37HLtskevCrza8IYpK4Xzh65XmuD6kZIYyMwY0ayYOCMGcl/MwzDMGc3HaZa79VXX439+/fj2WefxRdffIHbbrsNw4YNw4oVK1yPf++997BgwQLceuutGDRoEP72t7/hBz/4Ab7+9a9j1Sq6UpLqan+pqnxIqaIJQKqSY7wpjqVblmL2+tnS46dUjXSr4FnxfoWR3p0mKpKqfNcUsr0WT546iczSTF/DzFmlNpV9Ut2qnt4w+AYs2rwIgFzlV+e5qS04ZNsE+V2T+h7JtghJRQVU1Yq7FjLrAgUT96xa0bxoRBFWbVulVd3Wier8qlYHBuSrVtvxq2AtU2WZ+5wyDMMwbnQI43Tbtm0YNGgQ3nrrLVx++eUAgHXr1uG73/0u9u7diwsuuIB0npdffhk333wzjh8/jk6daC1eU/3j9ut3f42b19wsPE7XAAD8lRsASgaXSkN6HePOhDEvas1gjXnnzJ3YvHdzq16OlO8G0YrECVXJrdxWiR/87gekfqv2eUtlywgvJdeEQu73rPp064PSsaXITM9Ubl9iR0a2ZVqEpFKuAL02KTItp6KRKD5v/DzwVkfUjRYKJjahVOfX+Z6MyhrVao3ymiuv9YKKznvuZZhamDJQ2ThlGIbpeNAstDbm9ddfx7nnnttsmAJAQUEB0tLS8Oabb+L6668nncf6gfIzTE+cOIETJ040//vIkSPqA5ekclsl2btgokDL+EvGo3BgYSsjAEgaXLJN1QH5hvSqDdwtrLA4kTHsF3ZIDVd19uzMSs/C9KHTyaGuQXu4rNA6Ny+dpeTKeubsIZQyc63bp9IKc3W7RzeZpZyb8pzrGuqQmZ5p5FlRQlnvfOVO9OrSC/nZ+dItQlKZx6cy77KyBgCzRsxCcayYtHbooLJ+hkNh1/xUyjolQlWu7e9J5bZK5CzNIXt1vdaLaCTaKpzXjf1H9yu9542N/oYpkPx7Y+OXBQIZhmGYs4cOYZx++umn6Nu3b4vPOnXqhGg0ik8//ZR0joMHD+JnP/sZ7rzzTt/jHn74YZSUlCiPVRWqIkcxtmRwMwJitTGywZV3YV4r5YRiJFnI5DG6GQyyxrAb1Bw2t2rAC2ILSN9NVY8/PyXXz1jywq7EU+e6anuVVDi4LG4yS1GSU92PkWJsHmo8hILlBchKz8LESyaSzts70hvPXfdc4KHiTrw2DNyQlTVrXXsg7wEM7juYtHboINpocY4tgQSp/dbSLUvRr3s/oxsyFLx+P6wcUS+vrtt6EW+Ko2B5gfCaO+p3tPL0U97zuXNp9zR3LrBsGe1YhmEY5syhTY3Tn/zkJ/jFL37he8y2bdu0r3PkyBFcc801GDRoEIqLi32Pvf/++zFnzpwW3+3fv7/2GOw0NiZ/eHfsAHJzgUd+IafIBe0xoSrnVR9WYeqaqZ7KCcUTYMJgkDGG3VD1QqsaeSKC8jrKeOa8NkFEcw24h4OLlGQdqDmdqe7HKGPk7juyD2VvlpGOfWniS1L9hS105UoGWVkDvlzXdLzjVPw2WpxkpWdhwqAJKHujTHhee+SLyQ0ZP3SjT5zrRbwpLoyQiEaiKI4VS7/n8aY43nz37wDc2yTZ2bFDeAjDMAxzBtKmxumPfvQj3Hrrrb7HDBgwAOeffz4+++yzFp+fOnUK9fX1OP/8832/f/ToUVx11VXo2bMn1qxZg3POOcf3+C5duqBLly6k8avgzLXZsAF48sk0YOATwBR/JSajWwaeufaZwJUdqnLupkw7lRORJ8CUweCm0Fr5Vyu3rvRVcGW8KLLIerpliufIIusR9NoECSIcXBUZj5FOCLiKYSdj5FrzkxZK8/TQWeNT8a75yVUQhqCMrLltIul4Ean4bbRMHzodudHc5vmo2VNDMk7tBLkhY0c3+sSJKEJC1L7L6z1vlsF//DuAGcJx5OYKD2EYhmHOQDpUQaQ///nPuOyyywAAGzZswFVXXeVbEOnIkSMYO3YsunTpgj/84Q/o1q2b9LVNFlTwLgJx+hEMXOtroP7q+l/hpq/fpDUGCpRKjl75V4Bc4RJKAaXM9Ey8UPgCPjv+GVl5ljXydIuDWGNVrSJrH0NQVX+pBY1UN0FSXTApVhvDpFWTPPPj/IpvAfTKqKobBroVUXVkyTl+L7lKIIHekd4t2jGZ2AyhykLp2FLMHD6zTXtgUjYeVJ+lbBEnlU2QoKpoexVpumPoHaR0Bvt73kIGT3YGfv6P00d5t4xqaNDPOeWCSAzDMB2PDtHn9JJLLsFVV12F6dOnY8uWLfjTn/6EGTNm4IYbbmg2TPft24eLL74YW7ZsAZD8Ubryyitx/Phx/PKXv8SRI0fw6aef4tNPP0U8braROgX/IhCnf6C3j0v+cHuQmZ5ptMeeF5Q+epT8K0q/S9G1Ekig8YtGFCwvIPcele3fCXj36cvoliG8BwAoyS+R7vFnx3R/WzdEPRWB5P3unb1XyTBJVU6n1ae0YHmBb+EWNzmU7ceoIksWqn1bi0YWacmSHYpcOfsEU+5NBLV/p2WYpmJd88KtF7TbMSrPMhV9loMKV/fqkZsbpbk0rfe8lQx2PpnciAUAD0O/sJCLITEMw5ytdIiCSADw61//GjNmzMCYMWOQlpaGCRMm4Iknnmj++xdffIHt27ejoaEBAPDOO+/gzTffBAB87Wtfa3Gu3bt3Izs7O2VjByhFIE4rPK8tBq6Z6fhLcve97nidUgEKFfxC3qj5V1QjxK9q5KHGQ57Ks5uyrpN/5RUanLM0RxgK+kDeA3gg7wHl8Eid0Dyqt4VS0OiZa59B507eGyR+pCKnU6UCrFMOqTmNurl81rXcZNuPwoGFWPSdRUZCbWUrAANmQrBlCpXphrKnKpdW5VlaiNZC1YJGAC0tIRqJIt4UR7wprp2/Lvueu8rglPHAysrkhqwDk31OGYZhmI5HhwjrbUtMhQWNHZvMLxWSsw6YenXzPy1F7r5R92HR5kWBhXx64ab41eypMRK+6dejr2/3vpi2dhr2Hd3n+l2vcLkgQktVQkFlkQnNm3TppOZ521G/A8+9/VyLeRIp9jq9Qv1Q6XGrcn5Zw0A1jFhHltxku2ZPjXQYsi66/Tx1Q7BFsqYbyh5kjrYX9md74PgBUvsvE32W/eSCmpZgYm5k33NfGTzZObkhW/81FFz+Vfz2+YFGPaYc1sswDNPx6DCe045Obi7NOO12/n402P6dmZ6J2//5dizdsjSlhWYs3HbOTfQW9VMqpwyZglhtzNMwBbw9iUGElupWA6ZA9Ua4tW9wotI+QuRtoninTLT18UPWC6jbdklVlvxku/y6ct+NDpX58Xs2upWHdUOwVVsbUdY1HW+jExnvq31NjDfFsfj1xSnps+xX0Ijq1TVRpEn2PfeVwc4nmyOFHphWjUhkoNKYGIZhmDOHDpFzeibw2GO04w6sv6U5x6ckvwSJRAIlfyyRzq0LEkpOqp+STcnjUzUMUpl/tXPmTkQjUSN5cpQcvd6R3iiOFQsNNEqOKiXPzkImF042p1MGGUPJhDGsIksi2QZgdH5Ez4aSZ+wHdQ78cka9ZE3GKHO7nqkcbdVcT+vedNbCeFMcmz7aJLwOIJZ/a43aOHUjopGo6zGm8tdl3nNq/rGp3t0MwzBMx4aN0xQRiSRzafwoLAR6dE8qcl06dUFxrNjXe+hE18thR1SgRMUIsRSx6a9MFyqVfbv3JY3TqTwHqQjZlez6xnrkLM1RUmi9zi0qDAXQKwmb2rBQLS7lVkhF18Mss6FgwhiWlSWqwVQ4sNDI/FCejWohH5n3RNW404ly0DFsnWNXLXhlobohY83bwpqFwmsANPkPp4URTgsrb2bKFKaivue6BjzDMAxzdsFhvSlk7VrvdjL2IhB+Sq4fuiF8FtQ8LpnwULdzumEpTtY1ZcPlgg4tte7FVDihHb/wYWr7Bic6GxY6YZdB9KmkFn6pmFiBvAvzSD1u/ZCVpVhtTCo8U2d+ZJ6Nl1xZLWR03hOdd0EnysFE+L6JglcWsqHyMoW9ZMPTZebGHs6skr9Ofc9TkRrBMAzDnBmwcZpi1q5NtpWZOxfYsQMYkBPH92bW4O+n9iNW+2XBoVTm1tmRVTYpyolKhdXPjn+mbGQGqQiZVGjd8FJyK96vUBqvzoaFjHfKktsgK6ZSjMXy68px+MRh5CzNMVIkhypLldsqMf2V6aRzmohwkM1T9JKrqu1Vyu+J7rugk7tuInyfOoex2hjCaWFSNWyKoSaz+aiyoZbK/HUZVHLdGYZhmLMPNk7bgEgEWLbsS2/iMxUtFcOJl0wkn8tkWFQQhpeOFzg/O1/ZyAxKETKt0Lqh077BwsSGBdWIqvqwClPXTG3zFkdlV5UBgHGvtkiWZDdfTEQ4qHgO3eRK5z2ReRfGDBjT6u86UQ4mirJR59BZYVlXtmU2H1U21ChzE41EURwrFsqsc923xq+6pjpl8NgxYOpUYNcuICcHWL4c6NGDfDqGYRjmDIRbyQgIqhS9XwsFGUPORAsQiyDasFDPaeHWNiFVfQyduF234v0KUmuOaCRqVKEVtW+wY6rFjeyzC2IMXrg9GwDaLTlUxkFtb0O5PlXWg3hXZcdCbVMTjURRfl258dZGui2eVOVbV7ap8zYvbx6K84uVZNVvbhJINId0y1CSX4Lyd8qNbUINHw689Vbrz4cNA7ZskT6dK9xKhmEYpuPBntM2gOKhTAulIZ7wLkZh5daJKq3KEEQbFhMVVoPIX3SDkn81fSgtdNNZkEQ3PM7Py+TEVB4XJcczHAq7yqnI06674eAmE7I5nyaQDcEXVbGm9uw04Tn0gzIWqge4vrHeeGsj63tuXvTM9ExMHzodJ06dQKw25nkuiny7oRu+T523MQPGKK/tQeSvu31HdV3zMkyB5OfDh5szUBmGYZiOBXtOBQSx8yqzY+8V7haER4o6rtKxpejXvR9JiZS5V5NeYFkoBZvsXof6xnrpUGUTnjsvo2H60OnIjeYa9yyLPDAUnN47GSNMBqpHasX4FZgyZIrydVSuSfEeekVSAO7vu67n0Im1YVD1YRXK3ixr9XfneWW9+aa91s5xqxT18ZpDKiqeadG8mZwrnegPKrLjPXYM6NlTfN6jR/VDfNlzyjAM0/HgVjJtANWbWDSyKJB+kV5QeiKGQ2HMXj+b3DKCcs7ekd7YOHWjkXYjKni1k3BieUwsZHtHyrZ3cWvr4Na+oXZWLeZfMZ/Us1QWvzYZRSOLSOewy7uJ1h1eBNXj1sS5KiZWeMq2as9Okz1l7e1g3AxTt7HYW4SICLIXs+VF92q/JWp75DaHXn1CnVgVb6ntV6zxUluryJ7b7VrOHrMm5R+Qf7ZTp9LOSz2OYRiGObNgz6mAtvScVk+rTkkVVDuyngSKl0bWw5PKHFOZnEE7bvlXzjxTLyieu6C8ixS88jmdn9XsqZHKexTNta7HyKRHiiqDJq6pmz+q+76oVNO2j8WqVGxK9lXQlS3nHMab4ihYXiC8rk4epijXNqg1QNbjTZUL6rP9+teBrVvF5xsyBHj3XdKlPWHPKcMwTMeDc07bAJl8sVTnW544dQLF+cUof7sce49+qRSp5hYCcq1dUm2UyeYMWuRGc1E7q1ZJoRV5LoLqo0ohyLzHoCsd61R/VQ0NNdFXVzfXW2eNUK2mbR/L+EvGo1eXXkZkXxXZ1jpOnHMYb4orV7z1e0+dRvCumbuwee/mVrIe5Bogm79OzVGlPtucHJpxmpNDOh3DMAxzhsGeUwFBV+sFzOSL6Y6lVVGRnpm487I7kRvNxYHjBzB7/WzheUT5VyIPj0renS7UnEEnbvdqwosm4wEC9No6OAk671EmP1On0rFs9VdqvrHzfnSuaScVlXd1ry0aS5B5lBTPcBD5xjoVb93ul7rxE3SEgf3+KPnrAIw+W845ZRiGYfxg41RAkD9uOgqtF7IhfhSD5P9v77zDoyrWP/7dBAIJEEIgQCCBUEIMGHrnBoJEmmI0dLgIKkEUMFGkeGkJqBelVwsiQX8UEUJRKQImykWkSQkQMGAixUgLPaFkd35/rLtuOWVO2d1E38/z8Gh255wzM+ed2Xln3vKg6IHLA824a0Hm+MxFBxZxKd689XBXegu90zpo6X9eOXZn6g7ecaDEpFWpaSjvZoE7A+Q4onRzRk4O9N5w41XoXKXgi8k272mi5XlKNn70bouUXCodJ4A+71YqWi+gXzoZUk4JgiBKHqScyuDqHzc9/SuVmsTyKiQr4lZwmexpOdlx9+kRz2mZI7wLMbWbDkaTEckZyXh7z9vcdVJTPyHc4feoxNfNEU/nKLXFFSeYei7+lcwpSiOHy9VFzw03JQqdu09ueSPero5fjX6N+ina+NHzFFhPNwk171ZKFinPKUEQBCEE+Zx6GCX+YlI/9Gp8lHj9tAC4NKci4Jocq2KoCQAD8OcPVZO3UY2y7IiW/ItK+l9MDuXkWImvmyPFIUepBT1k0BElftlSKFVGlOT65KmL2pyljvDkgraVcz18f8XkWki2lUSGVuoPq1fUab39VpW+WzlZPHDAbOI7ZAhw7pzZx/Tzz7Wb8hIEQRAlG1JOSwhSP/RxEXGKFnIWeBfZV+5d0bzwk8NdaUCUBIDhyR+qZEErhlplWQi1Shxvv24+sxnjd41XfRIjpoTxRjrWUzFUey9XBfXRqtipUUZ4NgyS2prnDiWBqbRuuKkJcKRFwddbqbfdsFt3ch1PV1jlUWmgMSGUKve88L5bXlksXx7YuJH78QRBEMQ/AFJOSwByP/TJMcmqIlUqUQhjwmJ0OdkRQ48FGQ+8p2Wzn5yNpLZJik87lZrMKVGWlZw2KlW8omtFI6RCiF2EZiG+OPmF02dKT2KElDC9Ih0rQem9eJUCLaeGaiPvalFGxJQ6XnNctW2WGj8Pih7IXg84y7laqwU9lXrHDTulG296nAJrjV4MaPOhdoViTBAEQfwzIOW0mMPzQ79g/wKuezku5JQqhHqZ7Fna5XgfV5/OAvxK27v/exd1KtWRzd2q1WROiWmpK9I6WPD28kZCiwSuezuiZsGpJnWH2s0JsUW2EpNWHhn0ZG5arcqI2rGtts08G248CMm50pNbtYpUXEQckmOSsWD/Aqfo0rZKvZqNN61m3lrdJLTIsh6KMUEQBPHPxcvTFSCk4fmh5zGHBJwXcpYdeuCvxbcFscW4ZeE3MGogYsJi4O3lDaPJiIzcDKzJXIOM3AwYTc75UG1Jy0pD2IIwdF7ZGYPSBqHzys4IWxAGwBzkpKZ/TbvyIf4huqWR4VXa8gvz0WddH6RlpTl9J7egBYCk7Umy/QDwLyInR09GTmIOJkVPQoh/iNP7smCAAaH+oaqUuPDAcMXXWLBdcKpBjSzyICZraVlpks90RE4GLcqW41i1KFtCcqQnSn2Ghcar0NiWQm2becbPssPLEFLBNXJuixJFyhaLXE3LmGadfwN9A5ESk4KcxBw7OVEr2/GR8chNzEX60HSsjl+N9KHpTvcWQ4ubhFZZdmf8AIIgCOLvBymnxRzeH/BA30BVCznLDr1ahVBq8S9WXmrhA0D1gowHyymGnDJiQUjJVLugFYJ3Edmlbhe7wC+AvkqckrpIoWXBqVUWHeFZZEs9MyUmhUsG9dysUAvvu8vOz1Y0XsXQ0mae8XPxzkUktEgAoL+c26JGkRKTqxuFN5CckYzNZzY7bQDERcSpkm0lGwa2zzSajKhZoabi3wQ9ZNld8QMIgiCIvydk1lvM4f0BT2yTiOSMZFUmsVpM+pSYtioxodOav08MW38uOcTMz/Q8Gbh67yq8Dd4wMuHFnitM/vQwcxVD64LTE9Fe4yLiULFMRWTkZgAAYsJiuE4OLRQHM0Ye09FA30AkZyRrNkUHtLWZd/yEB4a71M8dUK5I8cjViK9GIHFbop3/tsUkNjcxV7fUYbYImeFW9q1srRPvb4Iesuyu+AEEQRDE3xNSTos5vD/0k6In4fGqj6teyCkNxKLGV0vPRbwWnyiLcpfwVYKqCLF6pnrov76/rCIoZvLnCh9BtalebBecngoKZAuvrL2z5x0s+3mZXdnUY6mK/ET1SMOjFbkgOpa/9QpSo2WDRmkgNrWbFTx9rVSR4pGr64XXnT5Xm76FB7FNQltzY9s6Sf0m6LHxpkdAJ4IgCOKfCymnxRzeH3rAvAiZ2WUmrhZcRZBfEGr619Rt8euIGkVTrxNHPYIRxUfGo2KZiqoixLo61YMFb4M31vZeK2vy53hfrblwxSK3Dnh8AGb/OBsAROVw85nNHgsKZAuvrAkFgFKqSCg1qXVV30idqMsF0lJ6uqtlg4bnhD7QNxBGkxFGk1HVZgXv5pVSRUqt2bqrotTybBL6lvbFrj67cOXeFVnlXq+NN73y9hIEQRD/PAyMMe3JFf/G3L59GxUrVsStW7fg7+/vsXoILbaC/IIwuPFgVCpbCR8f/hiX7lyyfudqhWBN5hoMShskW251/GoMjBoIAMjIzUDnlZ1lr0kfmi6Z3sBxgW+LRTE8O+Ysfrz4o+SpieVeckpmTmKO07UWRQ8QVtTkFBs9+sIRuVy4PP1maauYkiv0DEvKEQCCyi9vn+gJb/+KIfXuHeGRI0seV3f0jdC7W3dyneLxKvcMtWMHEB8/jqiZx8Q2YaT6WkqubctqlStA2ZiWQ8s8IiQnADS9V0dcZSnAS3H5/SYIgiD4IeVUhuL042b5od98ejNWZa7C1YKromVdrRCoWRRpXdAqeW6QX5Bd/4gtcrUombwLWiHUKPdSyC3Ik2OSudLE8CycHRec7UPaY8/5Pei3vp+ombTSRa1W5GSNF15FQkqOGBgq+1YWNPe0lHH1gt9VmyFaNmiExo8jSucx3s0rob7m6Vc95Ip3TPOgdh6R2sgCoOm9iuEJRbU4/X4TBEEQfFC0Xg9QWAiMHg1062b+b2Eh33XeXt7IL8zHgv0LJBVTwDmyotJ0L3LIRb0VigapR6RZXrM6x/4RS4OgJUKsp1I9OMITYVNtLlwhbCOI5hfmo96ieoj9PFbSf1drqhml8MgaD479ITaO5KL+iimmgLK+URod24Ka8SqH1ujKlvGza8guBPoGCpZRGu1YSyRtnsi4StIPicE79nnmbDXzCE/EdL1TeqmVW4IgCOKfB52cyqD3zuuzzwKbNzt/HhcHbNokfa3cqYAYKTEpTkFf9DD7VXtyouXEUYtZndZTEz3R4xTZcp9FBxbh9R2v61IvNSdnSk6Q9Dw14nlnYrIm54NpwbY/ePwYXWlSq8ZcVeh6QHq8Kh0LWseOnqe6elskiCEoCxVCUFhUKGi+DSg7Ief1mVU6jyg5WQagy5yoVW61QCenBEEQJQ9STmXQ88dNTDG1IKeg6uHvZEGvhYFaRVPINFTOR9RynVazOj19vrTgDrNIWwJ9A3Gj8IZuvmRqNkr06nsl0Zr18K3TssB2p6+13PuTG69aomCrRYlC2a9RP0mFyRXmy2IIydXmM5s1m8QqlTUl88juX3dzBYHTa5zqJbdqIeWUIAii5EHKqQx6/bgVFgJ+fvLlCgoAX1/h73gXcbzotTDQenKidEHMG0xFDJ5FrrtQq9yrObVMiUlBckYyAGULZ6H3u+f8HkUbJXouQvU6iVFykqhlge1OX+tdQ3bB28tb1m9SLNiVJ064eNvGYwGipK8BfU4GHdFiGaJW1niemZaVxp0+Sy8LB3duFghByilBEETJg5RTGfT6cRs9GliyRL7cqFHA4sXC3+l5cmoLz4LWVahdEItFL5bzxQVcZ+asFjVmlEpOLW0XtEKpXqQWzmIbB30i+2D+/vnczwf0UW70PonhWdTrscDWekrOuzFliQpsgVeuPWHu6fhsvaId8/Q1AJeeEKvdsNM7+q5cGikl91eDu8ysxSDllCAIouRByqkMev24desGfPutfLmuXYEdO4S/U2LS6pizTwq1C1qt6HEi5WgaXG9RPY+k9HCnz6qSTQqhdvHWVWrjQMmJLe+pEQ+uOImR6w+9Ftie8LXWO7WRJ/zX1UQ7Lo4pj3jGnSuUOSWbWTybO0rmOjo5JQiCIJRSytMV+KcQHs6nnIaHi38nlTDekRD/EO6gL45mXpaoja7OS6kksqbQwsUSXdMWsf6x/VsqYX3S9iTERcQpCpgkl19Ub6WVN2KxpR6Oyo9QvzkiFwHYAAO8DF4wMvEIqoG+gVjXZ51o5FM18LZdSR/J9YdekZXjI+Od5MHia70mc42kfFii7Sr1tZaTawu8/SU0n+gxX1gi/wqNI7l5TGieEOprW19jNXOAFnhdF/SM4m1Bbp51RCpiulIXDDm5tSjDSqJEEwRBEH9vSDl1E7Nm8Zn1zpol/b3UIi6heQLCA8PtFmLLfl7msgWtVlyhaOi9yAX4cgI69u+l25fQe11vpxMfPU6ZeBem87rNw5jWY1S9P56NA4tiKrQRAADLei1Dl7pdFD9bCt62X753WVbh44VngV3TvyaMJqPsM20V4bSsNNRbVI9roa9kY8oRuU0eQJmyI3R/PeYLMYVy3cl1XNc7zhNCmw4ZuRmaNsSUYJuXWsgEXkipd4Uyxzt/BvoGYlmvZZJ5ncXmOrHNCSm55U0fRhAEQfyzIOXUTfj6mqPxykXrFQuGZIvYIk7oB96VC1pb1Ji1uuKUANB3kSu3IAv0DRQ9hQHgZIqoxykT7wJWrWIK8C9ok9omYf2p9U4Kll5mvI7wnCB6G7ztUuto3RCQW2AzMBQ+KrSLgir3TDULfbGNF0ezfDGk3qnak1kLlvkiIzdDk/+6kEKp5zzhig0xIXgiaQsp9a5Q5nj7b12fdaKbSTyWFGKbE1Ibhq6aJwiCIIiSC/mcyuCePKcMHZ68hu+3B7pkB1loocS7oOXxbVKbfoLHh1ZP01Cl/k9q06XIoUfkWiUBdtRsHCjpK0v0XncF1FIardmVaZPEfCGlnqm3r7XRZNQlPYic3ycPrvBf1ysnMOAeH0g1kbQdn8fjn8w7rt0ZLVouHZK7I6STzylBEETJg5RTGVzx41ZYCDz30llkHL6IBxVPAE+OBXweujQQkasXtGqDi/AqGp5Y5LoqOrIFrUFAeNNHuGLjwNX5CcXqZJHh7PxsLDu8DBfv/NUub4O3qA+s2vpK5eOtWq4qhm4aikt3Lil6pt4Kkp7vSkymeP3XhZ4N6LMxoDV/KOB6uVa7oSW0CSgle9n52fj48Md2sqcm/Zbe0aKVRt0tLATGjQOys83xFmbN4rMe4oWUU4IgiJIHKacyuOLHTc98gmp3o/VYpOmV1oPHBM4Ti1y988o6okf6BDXpI7RuHLg6qqlYXRxlpGaFmhjRYgTCA8Nx+d5lO1NeMZRsCMgp9mqVTFcs9PV8V0IyBYA7Urgjem1kyG3GKI1CDegv12o3tHg2AbXOke6IFq1kfAlbEZndWzZt4rqFLKScEgRBlDxIOZVB7x83PfM0qj0Vs71eyyJNzwWL0WRERm4G+q3vJ2pu7K5FroXicnKqZgPClRsHWlPDKG0Pj5L9oOiBrgqfK5/pKtNSV7wrx/srMad2RA95F/tO6Vzoqr5SuqHFMw6VmAkrNQl352amLWKKqQW9FFRSTgmCIEoepJzKoPePm14LU71OX9Uu0owmI5IzkvH2nrdln8GrELgqAb1Y/eXK8yzIAn0Dcb3wuiK/PHdsQOi9caCXr5jS9vAq2SviVuhipu6OZ7rStFTru5K7vjj6r6udC13hA6k1B7FQHdWYCbsib6heJ86FhYCfn+V6g2i5ggLtJr6knBIEQZQ8KFqvm9EjWqSWyImOKIn8a4HHxMwW3miRSvrG0f9Qif8VwJfrkydy5se9PgYA0UA5WiJuqonoakHPqKQ8fcWDmvbw5sIFoFsKDt50I2qf6cr0GlreFY9iKDRf8Pqvy80DauRDy1yol1zboiTqMU+0WqU5Si1ojTYshF5Rd5976SyA+rLlxo0DFi9WW1uCIAiipELKqZvRIy0C74J90YFFqFaumqzCqWSRpsbEjDcnH2/fZOdny54m6JGyBeBfkAkp+JvPbFa9kNO6AeGqND1qUduezaclbP9suHLvii4KX1pWGhK+SnD5M4tbeg0liqHjfGE0GTVvDCiRDwDWsXb53mW35S3lgScfbVJbczssAY6kcuOqVTJdNa7VbGbakpaVhh0H/MCjnGZna6wsQRAEUSIhs14ZXOFzGjqrAfI2vA7k1wcCz1qj9QJ8Jn1qAvXoGe2WZydfbSAWHjPa/MJ8XfyvlKDFV0vNdbzmgbuG7BLMTVjcou2qMTNOy0pD73W9ue5vuU6LL6HSFCB6PNMT6TWE6qDVP9ld/uspMSlY9vMyVdFw+zXq57a+lpMJXvNlpX7vnoiibUFOlq1ytmYCcHC07P1GjdJ+ckpmvQRBECUPUk5lcE2eUwZ7XxsGRGyCYaB5Ia7XQs4Wd0eiVBtcRC7XolheSSk8kYtTCh6FhHcDItA3EMt6LXNJ+gg9URqhVslGSKh/qN1i3BUBpGwRUgA8pWTq8Vw9/eDVKumujowtpNS6MnUXIB28iddHlicXtNT17oJH2bbK2UMf4N371lo7Y/591NvntFy5cnj06JG2GxIEQRCqKF26NLy9+dYnZNbrRv6KUCjwg3zmWZRZvxWr1hXILiyU+DVZUOqLKgSvidnk6MlIjklW9Qwpc0e1uRY3n96MIRuHuHVhKgbviQmvWV5+Yb6o+bI7TEd5lSOlZsZKfO0cTWfV+BIq9e3T45la0Rqt24Je/slaTD5dZYZqsbZIzkhW7LutVfEXkgml5u08ZsIWirtJuFV+fB4CEZuAM8/Cooj+hfkecXH65Ts1GAzIz8/HpUvC+YgJgiAI9xAQEIDq1avDYBAPhgfQyaksep2cmiMUSpUwv4atp77HzSL5xZCWtA5qIzm6KgWGEEILw3Un1+l2uuKpXJ2uOjFxRfoInvbwKkdKzYx5T9KS2iZhXrd5mtuix0m1O7C8y81nNmP+T/Odvlcj17zjenL0ZHSp28Ulp8JK5J0XHmsLsbGjl+LviNo5VKw+Cc0TEB4YXiJMwvec32Pf9jVpfyqo9spphyev4X/fBulSv9u3b+P111/HG2+8gerVq8PPz092UUQQBEHoC2MMBQUFuHLlCgICAhAcLL0hTcqpDHopp6NHA0uWcBRstRh4agwAvhQKSqLmWlDrf+VpP0Y15szeBm8YmVHwO3f5Z6nN4arWB9IdqEnfocTM2J0bIUqeJ+bj6w54x7tSuVaqGLrK6kDOpF8pof6h3NYWjr7OeqTpEkKpebstxcE/2bE+iw4swus7Xpcta3GtcJKzhz7AzjnW+As1es/H+XFndGvXjRs38MMPP6B9+/YICtJH4SUIgiDUcf36dVy5cgUNGjSQNPH1cmOd/tFwRx7M/yuKocUsKi0rTbBofGQ8chNzkT40HavjV3OfIFmi3XZe2RmD0gah88rOCFsQJvocCxYTM+CvhZoFx+ikFoVsTeYaZORmwGgSVhCVYDFndny2EJYyYoopYB/J01WkZaUhbEEYYj+PlcwFKVQXi1luoG8g17NckT5CCDnTRABI2p7k9M4t7anpX9Pu8xD/EKcFv9y7NsCAUP9QRNeK1kXWeJ9nUWBcId9SWBQmno0opXItNa6FkJuX1CIlHykxKVz3mNdtHlbHr0b60HTkJOYgPDCc6zrL2FEr27xoiaJtMRMeGDUQMWExLjm95pVpy7zGo5gC5v4VlDOfh8BTY2AY0hOGp17Domfe17VdRqMRPj4+8NXLRpggCIJQjd+fJqRy/v/kc+omwsOBb7/lKBh41vq/PH6itn5NRpMRc/bNkY12q8X/6kHRAyTHJGPZ4WW4eEfYj9FVJnFK/a96N+wtaProiKuUOqUnn0J1iY+MR8UyFXXJI6kXvKmMhNJ38Pol8uYCFUvXo1TWlOQedZV8iyGlMEmhRK7F/JOF0MN/XaoeQvIBAMt+XiZrtTGm9Ri7+ujt62yR7YzcDHh7eSs+xZSLF6A0/ZZeKJFpNfOapX89kULJYDCQKS9BEEQxgHcuJrNeGdztc4r/lLWmlbGF13xRS7RbJf5XNSvUxIgWI5z8nVxpEidVHyH/KycfJxFcYQ6rJPqrXF1caU6txlRQi2miUqQiwAJQJWtSbeZJAeJq+XZEjTk7wCfXjn1hyb25+9fdeHvP27o8Qy/URJ92la+zJaWVBSWbE8UpirZtfZT4wvPOa2Jzk7tMlK9du4ZDhw4hOjoa5cqV0/3+xZVhw4bh5s2b2LRpEwAgJiYGTZs2xfz58zXdV6/7uIOwsDAkJSUhKSnJ01UhCOJP7t+/j5ycHNSpUwdly5YVLUcnp27C19ccgdAcrdeRPxcFEZsEFVOA/xRES7RboRMvsYXL73d+R3JGMtb3W293cqskEqUcYgsY3tM3T55SKI3+KlUXJad6SlB7AqjFNFEpUidpYQvCFMuaXJulZEtv+eZF6ck+r1xL9UXDoIYuqZsWpUTNqZvSsaMkSrYtcpYnWtthQW+lTqlMK5nXpOYmT0S3JvjJyMhA586dcePGDQQEBFg/T0tLQ+nSpT1XMQUcPHhQ0YaEWJsJgnA/pJy6kU2bbNPJOBCxCRioXSEAxBf0606u47qe1/9K6cJFytzTETklgmdxo0Wp07oIVLJo51Ew9TaH403/IITeSr9cXwu964zcDMWyxttmMdnSU755MZqMuHzvMnd53s0Kub5Ijknmep6SeUkPc2g16WqUjB01aboA5ZsTatrhCnNyJTIdXSsau3/dzX1vT6W2IVxHYCBf/IPiAAXAIoiSCwVEcjObNgEFBcCoUUDXrsDIV4wInlkfhoG9BcvbBn4RQyiQhVDwDFf5X1kCr+iVL1Es+IuaQCxKgvDYPl9NwChblCzapepii2MALEvQF6WLP61BX5QExpJDbV8rlTU9At3oJd+8KA06A/DJEk9fLDu8DCEV+AJS8aDnmLad2yzm+3JBfHjHjtLgULaoCUTFG+BIz/6zhVdWN5/ejLAFYVym3oA5MBXP3FRYaI5k362b+b+FhVy3dzvuDoBmMpnw/vvvo379+ihTpgxq1aqFd955x/p9ZmYmnnjiCfj6+qJy5coYMWIE7t69y33/zz//HC1btkSFChVQvXp1DBo0CFeuXAEA5ObmonNnsxtBpUqVYDAYMGzYMABms15bM9kbN27g+eefR6VKleDn54cePXog2yb6Y2pqKgICArBjxw5ERkaifPny6N69O/LyxOUuIyMDBoMB33zzDRo3boyyZcuibdu2OHHihF25DRs2oFGjRihTpgzCwsIwZ84cu+/DwsLszI8NBgM++eQTPPfcc/Dz80N4eDi2bNki2+b169cjKirK2texsbG4d+8ed18TBKEcUk49gK8vsHgxsGMH8MFSbyx+ZhYAdQt9JYt7JRFQAeWLcT3MPV0RLVOJUqfXIpAnsnCgbyB2DdmlSMFUsqAVW1Ap3XQQQo3S74iWvnb1Rosez9QCb3Rei3wltUni3qzg6YuLdy4ioUWC3TMcn8m7AaFlTEspBVJzH++GnRBisu2pqNmujCDMK6vz98/nMue1/IY4BqayxfJuWj5xAX5+DEuWmIMFLllijsvw7LNKWuB69NisVMpbb72FmTNnYsqUKTh16hRWr16NatWqAQDu3buHbt26oVKlSjh48CC+/PJL7Nq1C6NHj+a+/6NHjzBjxgwcO3YMmzZtQm5urlUZCw0NxYYNGwAAZ86cQV5eHhYsWCB4n2HDhuHQoUPYsmUL9u3bB8YYevbsaReJs6CgALNnz8bnn3+OH374AefPn8ebb74pW8dx48Zhzpw5OHjwIIKCgtCrVy/rfQ8fPox+/fphwIAByMzMRHJyMqZMmYLU1FTJe6akpKBfv344fvw4evbsicGDByM/P1+0zXl5eRg4cCBefPFFZGVlISMjA/Hx8aBQLQThWsistxig1mRTqWmmq/yvLOX0MPfUw3RSzEyUJziMXj6FPH29rNcy2ZyZas2LpUwAHxQ9kL0ekF9kqzFNtKC1r5XKmh6nnu7yYVYSnVeN6SRvX4QHhutiSq52TEvJMCAcDOvS7Uvova63U+A3PcyHjSajR6Jmu9KcnMeMWSpXtC28m6mJ2xNx8aOFwJkQwTKbN5sV1D9j+XgULe4Parlz5w4WLFiAxYsXY+jQoQCAevXq4V//+hcAYPXq1bh//z4+++wzq0/l4sWL0atXL7z33ntWJVaKF1980fr/devWxcKFC9GqVSvcvXsX5cuXt5rvVq1aVdT/Mjs7G1u2bMHevXvRvn17AMCqVasQGhqKTZs2oW/fvgDMivCHH36IevXqAQBGjx6N6dOny9Zx2rRpePLJJwEAK1euREhICDZu3Ih+/fph7ty56NKlC6ZMmQIAaNCgAU6dOoVZs2ZZlWwhhg0bhoEDzUH63n33XSxcuBAHDhxA9+7dBdt87tw5FBUVIT4+HrVr1wYAREVFydadIAht0MlpMUGpyWZxyzUJ6GPuqVWJ0LLLrcfpmi1aTxfVtkXuRDI7ny/pLs8iW23uRaV97XgSBkCRrOlx6qmnObMUvEFn/Mv4Y07XOYoXx0r6Qg9TcjVjWk6GR3w1QnLuc4xIrtV8OCYsBjFhMYrmQyVInRC70pycR6Z5FFNAfl6zvtNrV4Azz1qfIsTmzZ438XV1zlsxsrKy8ODBA3TpIrxxmZWVhSZNmtgF++nQoQNMJhPOnDnD9YzDhw+jV69eqFWrFipUqIBOnToBAM6fP6+onqVKlUKbNm2sn1WuXBkRERHIysqyfubn52dVTAEgODjYakIsRbt27az/HxgYaHffrKwsdOjQwa58hw4dkJ2dDaNR/H00btzY+v/lypWDv7+/ZF2aNGmCLl26ICoqCn379sWyZctw48YN2boTBKGNEqOc5ufnY/DgwfD390dAQABeeuklbh8Lxhh69OgBg8FgDa1eHFGy0NeiSMVFxCE1LhWToydjcvRkUdNSNYtxrQqZFiVCq0muKxaBahf3atviCX9CNSjpazElHYDLNloAYYVBD3NmnjbzcPvBbfRf31+xeaGaTSfHeUmJD57SMS0nwwxMNB2WGHooE67anJDbhHK1ObmUTCe1TeK6x+ToyfybqTvnwKyUSvv0jhvH9WiXofdmJS++vr663s8Ri1mwv78/Vq1ahYMHD2Ljxo0AgIcPhbMFaMExuq/BYPCYWaxQXUwmk2h5b29v7Ny5E9u2bUPDhg2xaNEiREREICcnx9VVJYh/NCXGrHfw4MHIy8vDzp078ejRI7zwwgsYMWIEVq9eLXvt/Pnzi1US7sJC8w9vdjYQHg7MmmX2Q1WCWkVKyFQu9Viq1eTN0Yw0LiJOsWmfFnNPtaaTepjk6uUzq8ak2PEeatvC60+YEpOC5IxkLvNuV8Db19n52UjOSJY0q8tNzJWVNaUm7XKRUdXKNw9KlQyl6Wu0piZSGjVW6ZhWmoaJFz2iKXsianZcRJzLzcnFZHrP+T2Y/9N82eu71O3Cv5maX5+rTtl8Bh4uw90B0CyEh4fD19cXu3fvxvDhw52+j4yMRGpqKu7du2c9Pd27dy+8vLwQEREhe//Tp0/j+vXrmDlzJkJDQwEAhw4dsivj4+MDAJKnkJGRkSgqKsL+/futZr3Xr1/HmTNn0LAhXyoqKX766SfUqlULgDnw0i+//ILIyEjrs/fu3WtXfu/evWjQoAG8vdXNw2JtNhgM6NChAzp06ICpU6eidu3a2LhxI9544w1VzyEIQp4SoZxmZWVh+/btOHjwIFq2bAkAWLRoEXr27InZs2ejRo0aotcePXoUc+bMwaFDhxAcrK8vkBocU8lYAkHExSnzsVGjSMkthN5s/ybWnFgjuOjkUQBsUaKQOSp0c7vORf/1/RUtnPXwy9LqU6hXqgctbXG3P6FaePq6ZoWaWHZ4GZeSziNrvIoFr58ZzzPV+AwrSWeiVuFyl587oFwZ1nux74jW++u1OaFkE0qPPMdqUjZdvXdV0ueUVzG26/PAs8A5yeIAzBu3nsSdAdBsKVu2LCZMmIDx48fDx8cHHTp0wNWrV3Hy5Em89NJLGDx4MKZNm4ahQ4ciOTkZV69exZgxYzBkyBAuf9NatWrBx8cHixYtwsiRI3HixAnMmDHDrkzt2rVhMBjw9ddfo2fPnvD19UX58uXtyoSHhyMuLg4JCQn46KOPUKFCBUycOBE1a9ZEXFyc5n6YPn06KleujGrVqmHSpEmoUqUKnv0zWtbYsWPRqlUrzJgxA/3798e+ffuwePFiLF26VPXzhNp88uRJ7N69G127dkXVqlWxf/9+XL161aokEwThGkqEcrpv3z4EBARYFVMAiI2NhZeXF/bv34/nnntO8LqCggIMGjQIS5YsQfXq1bme9eDBAzx48FfAmNu3b2urvA2iOU5hHwSCZ0GrVJHiMfec9eMsp+9cGfgBEFfoxBRlsYWzHrvcWk6U9AycoaUtShZUMWExLj0BlIKnrxNaJGBaxjTReyhVzIwmIwJ9AzGzy0xcLbiKIL8g1PSvaddmPYNiqd2ssO0bXtQoXEqVLC19o0QZ1nux74gluJEWuVdqDSGEkk0orSe2amQxLSsN/df3l90g4VGM7d7pk2OBg6P+/EPcommW88+RW3FXADQhpkyZglKlSmHq1Kn4/fffERwcjJEjRwIw+3Du2LEDiYmJaNWqFfz8/NC7d2/MnTuX695BQUFITU3Ff/7zHyxcuBDNmzfH7Nmz8cwzz1jL1KxZEykpKZg4cSJeeOEFPP/884KRcFesWIHExEQ8/fTTePjwITp27IitW7c6mc+qYebMmUhMTER2djaaNm2Kr776ynq62bx5c6xbtw5Tp07FjBkzEBwcjOnTp0sGQ5JDqM0TJkzADz/8gPnz5+P27duoXbs25syZgx49emhuH0EQ4hhYCYiJ/e6772LlypVOzv5Vq1ZFSkoKXnnlFcHrXn75ZRiNRnzyyScAzOYZGzdutO6+CZGcnIyUlBSnz2/dugV/f3/VbSgsNIfJl2P14TSM/55vEWFRiAAILu5tFaKM3Ax0XtlZVd0tP8I5iTm6Ki5iCp2l/uv6rEOVclW4FpC87Usfmi67qBRayIX6h4ouAo0mI8IWhIkuNJX2n5a2WOoideoW6BuIdX3WcQUw0rqIl0Oqrx8UPcCgtEGy91gdvxoDowYqfo7QuNJLjuRkmzcg1sivR+JqwVXN9dEDrXK55/weXLp9SXRjwLaslAwbYECgbyCuF1532tiQwjIO53Sdgze+fUOzhYNW1mSuUSzfasajGlmUm9MAcxTftb3Xok8j+U0Up3e6Jk0kKBIDYFBsSSTGtWvXcOjQIURHR9sFEOJFyW8soQ8ZGRno3Lkzbty4IRopmCCIksn9+/eRk5ODOnXqoGzZsqLlPBoQaeLEiTAYDJL/Tp8+rereW7ZswXfffWeXhJmHt956C7du3bL+u3DhgqrnO8Ib3GHQyN+5g+AoCc6ixZTNFYEfeE5y3/j2DUTXinYKECUUjEVNwBsxlAYx0jtwhpa2SAVtsZBfmI/Yz2NlI/+6I7+fVF/rZVanJLiUHifwekX5jI+Mx8XXL6KKXxXRMu4IXmVBSd/YjtHp309H7fm10XllZ/x747/x+o7XMXH3ROQX5gsqVjyBhz7u9TE29NvgNPdV9q0sed2Axweg//r+mvMY64Ea+RYLmicWoEqtLPL4/RqZEVXKicumLU7vdGA8ELFJoKR+iqkeuCMAGkEQBGGPR816x44dK2uGUbduXVSvXt0p3HdRURHy8/NFzXW/++47nDt3zmnnrXfv3oiOjkZGRobgdWXKlEGZMmV4m8ANd3AHgWARUiZzvKZ5epjK6ekL5orch3r4ZVmwLAItJxXrTq4T7Vu9A2doDVgjZgLoiJTJsZ5mymp83QB9zOqUmqLqoRDzynZGbga8vbwlx61PKR989PRHkqc3rg5eZUFJECu5Uzc5OeI1YxWa+zaf2Sx43dyuc/H6t6/rYrKtB3qZjUrNiYG+garmWVdFLrd7pwPjgYc+KPf9R6jHuiO6WXVVwQFdjasDoBEEQRD2eFQ5DQoKQlBQkGy5du3a4ebNmzh8+DBatGgBwKx8mkwmuxxbtkycONEp0l1UVBTmzZuHXr16aa+8QsLDzcGPZAk8K/ixlH8dj/+TkiArYvAuTnlMz7TkPpRSmPSOpMljCuqKwBlafcwsC6qM3Az0W98P+YX5TmXEFuTFwe8S0K6kG01GLDqwSNHiXA+FgVe2Hd+LWL/oHSFWLTx9E+gbKBhd2REeOeJRCoTmPqHr2oe0x9JDSzUHTdMTrfINyM+JiW0TueriKLOuCgYk+E5nFH9FTw8fY4KPmJgYj6WaIQiieFAifE4BoEePHrh8+TI+/PBDayqZli1bWlPJXLp0CV26dMFnn32G1q1bC96Dx+fUkdu3b6NixYpu8Dn98zX8pyzgI55rjMe/Tgwx/xk5lPhMusq3T4lfJwDNu9xK/LR4fOTU1k2rz6caP8Hi5HdpuY8SH2Cxa6SwHVda/czU+nfL3V+NLOjtMyzVNwwMlX0rK85B6mp/WS2yoBWe/lcj3xYzXrGNJ8D8Tqr4VVHls6xkTivOiqVWn1OCIAhCP3h9TktEtF4AWLVqFUaPHo0uXbrAy8sLvXv3xsKFC63fP3r0CGfOnEFBQYEHaymOr685XYxYtF4AZh8cCcUU0GaeK3YCE+ofigGPD8DsH2cDUG86qMQUVOkJVUZuhqJTDy2LXaUnh7wnIGLmhlKniFp37HlP8S7dvoSM3Azk3cnDqaunNN9bz9NXpWZ1YnIohe24EhsnNf1rIqF5Ah4UPUBGboZoHdRaKcj1i1JZ0Cu1kS1Sp7jDmw+XjK4shitTx2iVBa3P5ul/NfLNo2wzMGvwqWsF1xRZAuhxqksQBEEQaigxJ6eeQq+TUwti6WSeecaIn2Pcs1MttpuvZgff9p5KI9bynlClZaUh4asE0RMCW/Q49VB7cijVfwB0OUVUCm9bgvyCuE5YbJE68eJ97q4hu2T9LpXAE2XUFqlxZTtOsvOz8fHhj3HpziXr91KKnlorBQtaTxP1OrUWQ2gOWXdyHVf0WUf0ODkVqg8A3WRBKa7qfzXKdlKbJCzYbw5GpNQSQMtvQnGATk4JgiCKD3+7k9O/C5s2mU18x40zB0kKD8efQSC8kZbFt1Ptqhx9WgI/qAlwxONLp3Qx5s7AT47lxPoPMC+SPRGIhfcUT4li6gm/S154ooxakDsBsoyTtKw0QT9KqaA+YrId6BvItcliiXirZizynlo/Hf40frz4o6p5RGgOUTr29MoTKXZCmdA8QTdZUIKeVgO895Ui7rE4RNeOVuWzTMGACIIgCHdDyqkH8PUFFi92/pxXWdPbVM8WoUWnqwIcAdKLHyWLMT0TomsJBiLUf0pMkqNrReu6EJQyz1MD7yKetw8dFTU10YBtUWIiyrM416JoCMm20WRE7OexsnUTinjLO855N4pC5oXYbUponUeUmDPrpQxKuRIoMTHWM7iU2kjkWu/riO2c6O3lrVrJ1OpaIPX7IbxRq/pRBEEQxN8AUk6LGVLKmp7pPXhxR8RascWP0sWY3EKX9yRKrxQPFngVps2nN2PIxiG6bzyIbXqoMeXlXcS7yu9SDl45nNdtHsa0HiN7f62KhqNsG01G1RFv5ca5Rb43nNog2SYLju9e6zyiZCNED2WQJ4cnD7yywIsr0rAoLS+k/Lsr4iyvOfxnb8Xbubh8+y2wZAmKVZ5TgiAIwv14eboChDNCidbVJlNXgyUS5OvbX0fvdb25EtZblBHLosgRAwwI9Q9VdLLJuxgL9A2UXVCnZaUhbEEYOq/sjEFpg9B5ZWeELQiza4MFp4TxDu0AlJ348CpM8/fP5+prNcRHxiM3MRfpQ9OxOn410oemY163eVzXTo6ebL3m7JizCPQNxJrMNcjIzRCVN6k+lMNW4VMKrxzyKiOuymFrqYtj3SxjWek4t5XvxQcFzDI40GMesWyE1PSvafd5iH8IUmJSrHKUk5ijeSNN6eaVI0plgRfe8X753mXZcaTmvoC5v13lxy6F4zw7LWOanWIKmOe03s8ZsHmz8AbC5s3m2AyEMMOGDVOUcUAtqampTnniCfUYDAZskth1iYmJQVJSktvqo5SwsDDMnz/f09VwG7m5uTAYDDh69CgAICMjAwaDATdv3pS8riT1k5xMehJSTksISk5wtGC7uJi/f77oswD7RazeCh3Avxhb12edrGLaZ10fRYqf1CJb6aJPTmECAG+DcL/Y9vXDoofIyM1QtKC1e4bDpodj28ToUrcLBkYNRH5hPuotqsel4APifRjoG8j1XEsEYSXt9dTGgpoctmIKnFQqFqFxLibfalA6j1g2smzfkdBGSG5iLqZ2mmq34aYVNSeJjn+7IuIs73h/fcfrXONIyX0r+1bGriG7dFH+lcIrh+xhaeDMs5JlNm82m/wSzixYsACpqamersY/guK8eCfcS/v27ZGXl4eKFSsCEN+8OXjwIEaMGOHm2qkjLy8PPXr04C7vzg0rMustIbjKVMwWJcGH1AY4AvQ3r5UyVdPbZ1CND6hcWgYGBiMTV7w85SNoa76s1qRci9/l6zteV9VeXjnkQW8Tb9s6CsnWupPruK63jHO1QXJ47y+FnMm/K01IjSYjLt+7zFU2JSYFy35eplkWeOExb3Yc7zwm1TzpXT7u9TG61O1ifobOOW6lUCSHO+cAHNYU48YJx2b4p2NZHBOu4+HDh/Dx8fF0Nf5WGI1GGAwGeHmVzDMxHx8fVK9eXbZcUFCQG2qjDzzt8RQlU0r+gbjiBMcWtYtcoQBHjqcmtrv47jav1XriLGRirQap07Kktklc9xDzEVRr8svbvwA0mZQ79mFMWIzsCRCgvL22p3iBvoE4N+acqBzyImeezMDQu2Fv7Dm/R/NJtreXt+JxrtS0NciP74dTrh5qrBH0wjKHvL7jdclyFrPdSdGTJOckVyA23nksJKTkiNeiQ8k8qweK5DC/Plex7GwNFdIZoxHIyADWrDH/16jde0aS9evXIyoqCr6+vqhcuTJiY2Nx7949AM5mvTExMRgzZgySkpJQqVIlVKtWDcuWLcO9e/fwwgsvoEKFCqhfvz62bdtmvcZiovjNN9+gcePGKFu2LNq2bYsTJ04I1ic3NxdeXl44dOiQ3efz589H7dq1YTKZBK9bunQpwsPDUbZsWVSrVg19+vSxfidk/ti0aVMkJydb/zYYDPjggw/Qo0cP+Pr6om7duli/fr1dvQwGA9auXYv27dujbNmyePzxx/H999/b3ff7779H69atUaZMGQQHB2PixIkoKiqy68PRo0cjKSkJVapUQbdu3RAWFgYAeO6552AwGKx/A8DmzZvRvHlzlC1bFnXr1kVKSord/bKzs9GxY0eULVsWDRs2xM6dOwX7x5GioiKMHj0aFStWRJUqVTBlyhTYZnu8ceMGnn/+eVSqVAl+fn7o0aMHsm0GSnJyMpo2bWp3z/nz59vV3SI/s2fPRnBwMCpXroxRo0bh0aNH1jJXrlxBr1694Ovrizp16mDVqlVOdZ07dy6ioqJQrlw5hIaG4tVXX8Xdu3et31tO2bZs2YKGDRuiTJky+N///ofSpUvjjz/+sLtXUlISoqPFN3hv3ryJl19+GdWqVbO+46+//tr6/YYNG9CoUSOUKVMGYWFhmDNnjt31YWFhePfdd/Hiiy+iQoUKqFWrFj7++GO7MgcOHECzZs1QtmxZtGzZEkeOHLH73tasNyMjAy+88AJu3boFg8EAg8FglVtHuT5//jzi4uJQvnx5+Pv7o1+/frh8+a+NVcs7+/zzzxEWFoaKFStiwIABuHPnjmh/WPp206ZN1vHVrVs3XLhwwa7cBx98gHr16sHHxwcRERH4/PPP7b63tQywjKW0tDR07twZfn5+aNKkCfbt22dtv1ibpca5Wkg5LQYImcY54gqfTlvU+m9JBTiyLLoBc8Ta13fw+7Ba0Gpe644TZ17EFPe4iDhV93O1j6Clf/U2KVfrj8rrc2lZjNdbVA/5hfku21iwKBrzf5qv2+Jf6TjnldvRrUYjfWg6Lr5+UfM84k7/d0d4TUcdN6/02mSSQ2qDZF63eVwWEnLjiGcD0N0bB4rmz8CzXMXCw1VWRmfS0oCwMKBzZ2DQIPN/w8LMn7uCvLw8DBw4EC+++CKysrKQkZGB+Ph4SKWkX7lyJapUqYIDBw5gzJgxeOWVV9C3b1+0b98eP//8M7p27YohQ4agoKDA7rpx48Zhzpw5OHjwIIKCgtCrVy87JcVCWFgYYmNjsWLFCrvPV6xYgWHDhgmehh06dAivvfYapk+fjjNnzmD79u3o2LGj4v6YMmUKevfujWPHjmHw4MEYMGAAsrKynNoxduxYHDlyBO3atUOvXr1w/brZPeLSpUvo2bMnWrVqhWPHjuGDDz7A8uXL8fbbbzv1oY+PD/bu3YsPP/wQBw8etLYxLy/P+veePXvw/PPPIzExEadOncJHH32E1NRUvPPOOwAAk8mE+Ph4+Pj4YP/+/fjwww8xYcIErrauXLkSpUqVwoEDB7BgwQLMnTsXn3zyifX7YcOG4dChQ9iyZQv27dsHxhh69uwp+M6kSE9Px7lz55Ceno6VK1ciNTXVzlR82LBhuHDhAtLT07F+/XosXboUV65csbuHl5cXFi5ciJMnT2LlypX47rvvMH78eLsyBQUFeO+99/DJJ5/g5MmTaNmyJerWrWunJD169AirVq3Ciy++KFhXk8mEHj16YO/evfi///s/nDp1CjNnzoS3t3kOP3z4MPr164cBAwYgMzMTycnJmDJlipPp+5w5c6xK56uvvopXXnkFZ86cAQDcvXsXTz/9NBo2bIjDhw8jOTkZb775pmj/tW/fHvPnz4e/vz/y8vKQl5cnWN5kMiEuLg75+fn4/vvvsXPnTvz666/o37+/Xblz585h06ZN+Prrr/H111/j+++/x8yZM0Wfb+nbd955B5999hn27t2LmzdvYsCAAdbvN27ciMTERIwdOxYnTpzAyy+/jBdeeAHp6emS9500aRLefPNNHD16FA0aNMDAgQNRVFQk2ma9xrkjZNbrYXij4fKYdGnxn1KqnPGaMwq1zxFXmte6+sRZKUIRM9VGtgWUp6UQMvWT619XKPhqIwgLtdcdUaxt+2jz6c2Yv3++KtNMOZSOc1657d2wt7W/tM4jSjYr9EyNpMS6Q6vZrhqTWKm5fGDUQKzJXMP1bJ5xJBZ511U5VuVQNH8+ORY4OOrPP8Q3p2bN0lYnPUhLA/r0ARz1wkuXzJ+vXw/E63wAn5eXh6KiIsTHx6N27doAgKioKMlrmjRpgsmTJwMA3nrrLcycORNVqlRBQkICAGDq1Kn44IMPcPz4cbRt29Z63bRp0/Dkk08CMCtGISEh2LhxI/r16+f0jOHDh2PkyJGYO3cuypQpg59//hmZmZnYbBty2Ybz58+jXLlyePrpp1GhQgXUrl0bzZo1U9wfffv2xfDhxMN/ZAAAQFZJREFUwwEAM2bMwM6dO7Fo0SIsXbrUWmb06NHo3bs3APNp0fbt27F8+XKMHz8eS5cuRWhoKBYvXgyDwYDHHnsMv//+OyZMmICpU6daFevw8HC8//77Ts8PCAiwM39MSUnBxIkTMXToUABA3bp1MWPGDIwfPx7Tpk3Drl27cPr0aezYsQM1atQAALz77rtcvn2hoaGYN28eDAYDIiIikJmZiXnz5iEhIQHZ2dnYsmUL9u7di/bt2wMAVq1ahdDQUGzatAl9+/bl7tNKlSph8eLF8Pb2xmOPPYannnoKu3fvRkJCAn755Rds27YNBw4cQKtWrQAAy5cvR2RkpN09bIM3hYWF4e2338bIkSPt3sujR4+wdOlSNGnSxPrZSy+9hBUrVmDcuHEAgK+++gr3798XlDkA2LVrFw4cOICsrCw0aNAAgLnPLcydOxddunTBlClTAAANGjTAqVOnMGvWLAwbNsxarmfPnnj11VcBABMmTMC8efOQnp6OiIgIrF69GiaTCcuXL0fZsmXRqFEjXLx4Ea+88opgnXx8fFCxYkUYDAZJ09jdu3cjMzMTOTk5CA0NBQB89tlnaNSoEQ4ePGjtX5PJhNTUVFSoUAEAMGTIEOzevdu64SHEo0ePsHjxYrRp0waAefxGRkbiwIEDaN26NWbPno1hw4ZZ2/zGG2/gp59+wuzZs9G5c2fR+7755pt46qmnAJhlvVGjRjh79iwee+wxwTbrNc4doZNTD6J0h1vPID2OKFlc8C5ilQRqcZV5rd4nzjyn3Eqv1RLZ1gKvj6CYqZ9U/7pKwdcSQZjH51LvUzxvL29E14rG+qz1gt/LPY9XdpSMczXyrWUeMZqM2P3rbtHvbdl8erOupqW81h3zus3TZLarxiSWZy53x0aZuwLn2WI0GWE0GbkDnRl8HgERmyTLxMV5Pt+p0QgkJjorpsBfnyUl6W/i26RJE3Tp0gVRUVHo27cvli1bhhs3bkhe07hxY+v/e3t7o3LlynYKbbVq1QDA6fSrXbt21v8PDAxERESE06mkhWeffRbe3t7YuHEjALNpYefOne1MRm158sknUbt2bdStWxdDhgzBqlWrnE5uebCto+VvxzralilVqhRatmxpLZOVlYV27drBYPhrjuzQoQPu3r2Lixf/GistWrTgqs+xY8cwffp0lC9f3vovISEBeXl5KCgoQFZWFkJDQ62KqVAbxGjbtq1dPdu1a4fs7GwYjUZkZWWhVKlSVmUEACpXriz5zsRo1KiR9eQRAIKDg62yYXmObX889thjToFwdu3ahS5duqBmzZqoUKEChgwZguvXr9u9Yx8fHzvZBMynsmfPnsVPP/0EwCxH/fr1Q7ly5QTrevToUYSEhFgVU0eysrLQoUMHu886dOhg7TcLtvWwKFi2bbaYt1vgfWdSWGTBopgCQMOGDREQEGD3zsLCwqyKKWD/PsQoVaqUVbkF/npHtnIv1C9ysmLbT8HB5t8iqbroNc4dIeXUQ6hdVMuZdKmFJxKkBd5FrB4+rFrRM3qrFj8uuWvFFIbi4CPoSpNytRGEeX0uLYvxjNwMTZGOLahd/CuVHd5xrla+1cwjlja8vedt0TK26J0aiXduqFaumupTQTXjxGgy4rVtr8nO5e1D2rvUNQNwvxuDRSZiP49FfmE+1zUh/iHYsJEhLk64H4pLntM9e4CLEnshjAEXLpjL6Ym3tzd27tyJbdu2oWHDhli0aBEiIiKQk5Mjek3p0qXt/jYYDHafWRQeMd9QHnx8fPD8889jxYoVePjwIVavXi1qigkAFSpUwM8//4w1a9YgODgYU6dORZMmTaypOLy8vJxMlZWap+qJmHLkyN27d5GSkoKjR49a/2VmZiI7O9tOufEEvH0qJC9KZCM3NxdPP/00GjdujA0bNuDw4cNYsmQJAHMwKQu+vr52yjYAVK1aFb169cKKFStw+fJlbNu2TVKOfHXapdLaZldSnOqmdN6QG+dqIeXUQ2jZ4XaF/xTP6V1S2yTuXJd6+rBqRY8TZy3KHe+1QgpDcfARdEWaIDFc5XPZb30/XU7wlCz+rfmCVfhaA/zjXK18K5lHlKarkQv8k7gtEbt/3e2SPJ+uCAonNU7e2fOOUy5Px2sv3L6AHy/+6PJx5E43Bl6ZEMtxu2kTUFAAjBoFdO1q/m9BQfFQTAEgj1N/5y2nBIPBgA4dOiAlJQVHjhyBj4+P9cRSTyynV4A52M4vv/ziZL5py/Dhw7Fr1y4sXbrUanosRalSpRAbG4v3338fx48fR25uLr777jsA5qimeTadd/v2bUEF3LaOlr8d62hbpqioCIcPH7aWiYyMtPpnWti7dy8qVKiAkJAQyfqXLl3a7vQNAJo3b44zZ86gfv36Tv+8vLwQGRmJCxcu2LXNsQ1i7N+/36ld4eHh8Pb2RmRkJIqKiuzKXL9+HWfOnEHDhg0BmPv0jz/+sGurJU8nL4899pi1Dy2cOXPGTtk4fPgwTCYT5syZg7Zt26JBgwb4/fffuZ8xfPhwfPHFF/j4449Rr149pxM+Wxo3boyLFy/il19+Efw+MjISe/futfts7969aNCggd3psBSRkZE4fvw47t+/b/1M7p35+Pg4yYbQfS9cuGAXqOjUqVO4efOm9Z2ppaioyC5AmeUd2cq9UL9oea5Ym6XGuVrI59RDKF3kuiMlgJgfYKh/qNV/Ky0rDfUW1ZP1kXWVD6tatPitavHjUnqtkC9ZcfAR1DM9ixSu8rl0PNVR6x/K+7zs/GyELQjT5GutBL3SHgmhxAqCNzXSxTsX7dIJ8aQJclVaHwtKxomtv/O0jGlc98+7k4eBUQNVjSO9029pnWd5ZCLQNxDr+qyT3Pjw9S2+6WKCOfV33nK87N+/H7t370bXrl1RtWpV7N+/H1evXpVUGtUyffp0VK5cGdWqVcOkSZNQpUoVu0jAjkRGRqJt27aYMGECXnzxRclTra+//hq//vorOnbsiEqVKmHr1q0wmUyIiIgAADzxxBNITU1Fr169EBAQgKlTpwoqE19++SVatmyJf/3rX1i1ahUOHDiA5cuX25VZsmQJwsPDERkZiXnz5uHGjRvW07hXX30V8+fPx5gxYzB69GicOXMG06ZNwxtvvCGb1iQsLAy7d+9Ghw4dUKZMGVSqVAlTp07F008/jVq1aqFPnz7w8vLCsWPHcOLECbz99tuIjY1FgwYNMHToUMyaNQu3b9/GpEmTJJ9j4fz583jjjTfw8ssv4+eff8aiRYuskWfDw8MRFxeHhIQEfPTRR6hQoQImTpyImjVrIi7OHFQxJiYGV69exfvvv48+ffpg+/bt2LZtG/z9/bmeDwARERHo3r07Xn75ZXzwwQcoVaoUkpKS7N51/fr18ejRIyxatAi9evWyBpHipVu3bvD398fbb7+N6dOnS5bt1KkTOnbsiN69e2Pu3LmoX78+Tp8+DYPBgO7du2Ps2LFo1aoVZsyYgf79+2Pfvn1YvHixne+rHIMGDcKkSZOQkJCAt956C7m5uZg9e7bkNWFhYbh79y52796NJk2awM/PD35+fnZlYmNjERUVhcGDB2P+/PkoKirCq6++ik6dOqFly5bc9ROidOnSGDNmDBYuXIhSpUph9OjRaNu2LVq3bg3AHCSsX79+aNasGWJjY/HVV18hLS0Nu3btUv1MoTZ/9913kuNcLXRy6iGULnLdlRJAytxPyemhK3xYtSJ0UsTjB6jllFsPHzB3RSyW8xF0lUm5I3r6XIqh1h+V52S3sm9lJGckK/a11uLPDLjGogJQZgWhJDWSLTwWCK4+wVdqEmtR0HixzIlKx5G702/pMScC5g0hy4ZbSSQ6GggJAQwiU4vBAISGmsvpib+/P3744Qf07NkTDRo0wOTJkzFnzhyugDpKmTlzJhITE9GiRQv88ccf+Oqrr2Tze7700kt4+PChpCkmYA4klJaWhieeeAKRkZH48MMPsWbNGjRq1AiAOXBTp06d8PTTT+Opp57Cs88+i3r16jndJyUlBWvXrkXjxo3x2WefYc2aNU6nPzNnzsTMmTPRpEkT/O9//8OWLVtQpUoVAEDNmjWxdetWHDhwAE2aNMHIkSPx0ksvWQNISTFnzhzs3LkToaGh1iAv3bp1w9dff41vv/0WrVq1Qtu2bTFv3jxr8CovLy9s3LgRhYWFaN26NYYPHy4Z2MaW559/3nrdqFGjkJiYiBEjRli/X7FiBVq0aIGnn34a7dq1A2MMW7dutZpiRkZGYunSpViyZAmaNGmCAwcOSEadFWPFihWoUaMGOnXqhPj4eIwYMQJVq1a1ft+kSRPMnTsX7733Hh5//HGsWrUK//3vf7nv7+XlhWHDhsFoNOL555+XLb9hwwa0atUKAwcORMOGDTF+/HjrCV7z5s2xbt06rF27Fo8//jimTp2K6dOn2wVDkqN8+fL46quvkJmZiWbNmmHSpEl47733JK9p3749Ro4cif79+yMoKEgwmJbBYMDmzZtRqVIldOzYEbGxsahbty6++OIL7rqJ4efnhwkTJmDQoEHo0KEDypcvb3ffZ599FgsWLMDs2bPRqFEjfPTRR1ixYgViYmJUP1OozXLjXC0GJhWfnMDt27dRsWJF3Lp1S9HukxxGkxFhC8Ikd7gDfQORX5jv9L1lkaFHFFKl9RVbkFh25HMSc6xKn1T7bLE9mXUnvJGS12SuwaC0QbL3Wx2/GgOjBtp9puVaR9SeoGfkZqDzSvHobFLoLWtK2sBb1rJpAoDrdM+R9KHpXJGO5Z5nOTWs7FsZ1wuvK6pDUpskrM9aLyuLnoBXhidHT0ZyTDL2nN+jSt4c5xAxhMat1BzCK0e848QiL0rGVah/qGy7hBCLRC03LpX2kdR1es+J7uTatWs4dOgQoqOjuX0KbbFE6wXsAyNZFFZXROt1BxkZGejcuTNu3LjhFOhGjhkzZuDLL7/E8ePHXVM5GwwGAzZu3Ch6mpubm4s6dergyJEjTvk9ieLNSy+9hKtXr2LLli2erkqJIzU1FUlJSZr9Oj3B/fv3kZOTgzp16kj6aJNZr4eQM1+0/K3WlFRv8z6lJm887Utqk4S4x+JcZqYshZL0I1r8uHivPXX1FDJyMyT7Qix9hBw8qWq8Dd6CpphaTU9tZTE7PxsfH/7Yzj9PSgHjba+YybFlc0cOpSboUibOw5sP5zbztGX+/vlOn+mZCkcLvDLcpW4Xa0RjNamRpNIiOc5p58acw48Xf+TauOBRtgDlJrFK5EbNia4WdwI1Zt7umhNLEvHxZgU0MdE+OFJICDB/fslUTNVy9+5d5ObmYvHixU45QgmCl1u3biEzMxOrV68mxZQQhcx6PYiU+WJKTIrk6YteUUF5URMFUqp9G/ptwLzu83Q1P+TBkgoj4asE7sAnWqLV8pqdvr3nbZeZbPOY+sn5CKpJP+Eoi9MypjkFjtESvdUWIVPJdX3WcV2rZgEtZpoZHhiu+F5ywYP0SoUDqEuFpFT+taZGcpxrhOa0eovqIb8wX9KEWWkQM6UmsbxykxKTompzQatLgBIzb6XBoHhkIsQ/BEaTUXOEbE8THw/k5gLp6cDq1eb/5uT8sxRTwJxLtEWLFoiJiZE16SUIMeLi4tC1a1eMHDnSmmOXIBwhs14ZXGXWa4vQSee6k+sUm02pNQHjQanJmy3uCugkh9ApihS2bZEy5QSk+1aJ2akrTbbFTP16N+yN+T/Nl71eiYmemCwKYVnInh1zlus0jBce0/kqflUwr9s81PSvqYtcKjH1dLQokEKp6bEQSk4Rha5VKv9Kx5sFoXGndE5T6oYgV28hk1ge14UQ/xDkJuaqkistprNK51w187tS83ZPmKlrNeslCIIg9IPXrJeUUxncoZwKoXSxwLsYU6sA8Cz0efzFXAHPQkyJsmTBcdGn1o9L7FoxXNmXQn3F6yPIqyDJyaIYQX5BuFpw1fq3HotZJRsDejxPqa+1KzYGhNBj40qN/NvKW9VyVTFs0zBcusM3h2hRMLVspjnWW42/s2O/qtmgU9sGNZsQahVhoWeJ+Vx7IlYCKacEQRDFB/I5LeEo9X/iNQELmReiSgFQmuLDXfAsxJSkwrDF0WxPqR+XmJ/c7l934+094j47Ur53WhHy49Q7/YTaHLe2cgno43Mp5h8qhB7PU+prvef8Hi7lVIvvntZUSGp8PS04ytuCHvxziJrULhbUuCFI1VsMnhRLapRFo8kIo8ko6TctNC6V+I3aotaH1HFOrFquKoZuGip4rZ6pkwiCIIi/L+RzWkxR6v/EuxgTUwB4fP60pDTRmiZDCF6fMqXKkpQPKa8fl5SfXMMgviTISgP1qO1jvVN0KK23GHr5XNr6h/7fc/+HKn5VXP48Xl9rLf7MvKj1XVTr6ymFkjlEi4LpzoA9eqXfsmDp99jPYyUVU8B+XPL6jT4seug0T2iRQ9s50dvL28mv3LEeavzXCYIgiH8OdHJajOHZlbegdpGldDdbbRRItb5uYvCeBj0d/jR2/7qb+756nALLnV4kxyRz3UfJO9Xax0pkTQqjyYjL9y5z11sOvU6RLQvojNwMXCu4ptvzxMw1eceJOywS1Ch5ak/geODtGy0Kpt7WAHJmuUInrWpOrHndD4TGJe8mRM15Ne3GgGWe0EMOtZ5YEwRBEAQpp8Wc+AZxiMuriDMnMpBXHvDuFIPoOs4nFtHV22DqEX9UybuNc5WAJa2AIpG362UCon8Dgu8CeeWBPbUlFuQPHwJLlwLnzgH16gGvvgpvHx/nckYjsGcPkJcHBAebM5N7ewsutrxMQP1jF7F+b29U6ZWCjv+eBHgrW3w7LsRKFQGjDgL1buDP9gubMTu3HTDZ2A/YLfoE2g6ZBOVyC9LSRYBx7hx8er0cjpW/J/ierAvnmu2BjAynPnUk7cSXWDS7H/7l0CalioSt0nD5+gW02Lwfdc8yeO04D9R7KN72P9/9gUObMSvn/5BW5ZqoTYbzexKWU8f39MdN8dMY7vdkNML43W4MyBR+97b8cfOSbN/LbQg4KSwPHwJLFznVU2hjwMsE9L5WBW/WGYzWlwOBBkan5xtNRuzJyYDx+wwE3wUiHo+Bd6cYp3JKlTzj/UIce+sFLMhjTu/ISaliEBz3TjjMD97R0bLKP4+CGeZXA9Fph4GcDXZ96qj0G0zMKk9//DnniSpbDvKU1jkYid+96fyen5yL+GtBom23naOE5d5+3jWajHh962vomMNE56fKvpXxRZ8vzKfWDHYy+keAxBixwXFzxnaeWP/sGhybNsLudyQ4UGSDSmDOF5I1obaX9BQzBEEQhAthhCS3bt1iANitW7fc//ANGxgLCWHMnP/b/C8kxPy5LePGMebtbVfukQFsZnswJNv/e64f2Hl/2JU972/+fPXx1bL3Zd7e5s856ln05ZcsZG6I7PNNQm2SYfXx1dZ7zmxvbq9c+6XaHvheINt1bhcrMhYpa7sD6TnpTn2upJ6GZAMzJBvYvgXjuN590ZdfsksVvQXbZLlf6NzQv9rFg5K2C7x72+crbb/YeyqsHiQsIxpkVKyez/X783kSfb/h1AZmSDY4XWt5fxtOyY9Rx3oWGYtYek46+2FuEtfzhw+r7NRP96pVdpYRYxELmRsiWF8nGRk3jpm8vLjmkswPUvjmJ955TABLPzvW3ZBsYO+1BzM61NWxT5X0k9h7Emp/vICMOrbJMkfJyb1l3s38IEV0frJ9dnpOumCfFlYPEpRnnn+GZANb2rk8Mzm03eTlxYrefFPgxUjP+Zb3JdT2IgOE7+kCrl69yrZt28bu3r3rlucRBEEQ4hQWFrJTp06xwsJCyXKknMrgMeV0wwbGDAb7H3/A/JnB8NciaNw45zIAM/35b2Z7sKD3g6yLbiPM/2zLWj7L/CDlr+eL3Nf6z7IAlKinyWC/sBJ7vskA+zZxYFECZ7b/q61i7edp+74FNsoMb9sFsFWaHRUznnqGzg0114Xn3W/YwEwG8TbZ9n16Tjpfxypp+5/v3rFNQs+Xa/97Mu/J5Nh2JXUVkVGhesb/+XzHetr2vUXZk1ro220IqOhTqXe/4dQGaz2F3r3JAKexJKXkWZXpP+spJ6OW92QyCLRFQEa5ZFmCDac2OPX30s7lnd+RyLs3Ccio4PM55lIpGXW8Z3pOOte4tyibvGP5h7lJgn1qMhiYEWYZVqqcitVTjYzuWzDOunkg1na5eVQvSqpyajKZWEJCAqtUqRIDwI4cOeLW59euXZvNmzfPrc9UAgC2ceNGT1fDbaSnpzMA7MaNG4wxxlasWMEqVqwoe11J6aecnByPyDnhfkg51QmPKKdFRc670o6LgNBQxgoKnE9jHBdAXl7swb07rNbsmuy8v8CCynZRGxpifvaDB5L3ZYD5+4ICyXqaDGC/+YN5TTX/k3q+tU1FfCd8RcYiFvZ+DfbIIL6gMsG8a+/zH+lnmwz469m8bX/wQLBeQienpSZDtp5GLy+WfmYHK3r4gO/dPzCXE7unEX/1ve3pjCRK2i4jo7bP52n/IwNYGZn3ZCcjOsmobT29/5RR0QX6n89PP7uLa8GfnpOua58yg4GZQkNY2Ps1+MeyDUJKXujcULNiKlNPyzsqNfmvsSzXTxYZlZVljjFvOVVefXw1Sz+zw+mET827VyJPvHOJ7T2LCgtYEYfcF929wz2WvaYKnOrb3vPPOdd7Kr9iKjc+lcooCw1lGw6vlmy73T1dSElVTrdu3cpKly7N9u7dy/Ly8tijR4/c+nxSTosXjsppQUEBu3z5svX7adOmsSZNmjhdl5eXx+7fv++mWqqnqKhIsZyLtZko3vAqp+RzWhzZswe4KBFdljHgwgVg3Diz348IBgAwmeDz8Sf4vMoIhN6eJlrWCwAuXDQ/++hRyfsCMH8/bpxkPQ0MqHXb7DsIAKG3Je5nadOePUBMjPSzYQ5Asul6V5RiqeLPB1CKAXN2Sj/bwPDXs3nbvnQpkJTk9JWQn9yog+Z6SNXTYDIhZuspoKkP37tfuhS4eFEktqb5fVr6/vs6nD5eS5fyt71pU8l62j6/6R/y7S/FgA++90Po7QLxgrYyopOM2tazarkghN6+KlrW8nzj9xnSz/2TvDt5uvYpGIPhwkXE7ZaWZ7uxbDOWJAMRzZ8vO5eUYmZZPladcyz/KaOy5TjGvJ3/rkxdAXC9eyXyxDuX2N7T++hRgEPuMWEi11ju+BsQVC4IZf8Ql1HLnPvctSpYX/Uv31LHHMK2yM1PABTJKC5cQPyneyXbbndPgXn0n865c+cQHByM9u3bi5Z5+PAhfGRiIBDClPS+8/X1ha+vr2y56tWru6E22vH29i4xdSXcg5enK0AIkMcZyTA7m6/cuXPo6BPO/+xz53R9fo275kAk3M/npMm98lzl6gtnYxB+Nm/bRcoJpWWpd4Pz+efO8befs5417oI/FYmStnPWM/guf/uH+nXgK+gCGV3afDLWtp/HVZZXloMrBLukT7nlSeB+oqmQOOtZ74aCsczbdgVjXtF9eedHBfLkkrmEs57Bd4E36wzmKvtF+/l2qW0uvn5RNFWMS+YnBb9NJQKj0Rx8as0a83/lNkc0MGzYMIwZMwbnz5+HwWBAWFgYACAmJgajR49GUlISqlSpgm7dugEATpw4gR49eqB8+fKoVq0ahgwZgmvX/tqYMJlM+O9//4s6derA19cXTZo0wfr162XrcefOHQwcOBDlypVDzZo1sWTJErvvz58/j7i4OJQvXx7+/v7o168fLl/+K1L7sGHD8Oyzz9pdk5SUhBibjaiYmBi89tprGD9+PAIDA1G9enUkJyfbXZOdnY2OHTuibNmyaNiwIXbu3OlU1wkTJqBBgwbw8/ND3bp1MWXKFDx69Mj6fXJyMpo2bYpPPvkEderUQdmyZfHZZ5+hcuXKePDggd29nn32WQwZMkS0Xy5evIiBAwciMDAQ5cqVQ8uWLbF//37r9x988AHq1asHHx8fRERE4PPPP7e73mAw4JNPPsFzzz0HPz8/hIeHY8uWLXZltm7digYNGsDX1xedO3dGbm6u3fepqakICAiw/n9KSgqOHTsGg8EAg8GA1NRU67M2bdpkvS4zMxNPPPEEfH19UblyZYwYMQJ37/41oVve2ezZsxEcHIzKlStj1KhRdn3piKVvP/roI4SGhsLPzw/9+vXDrVu3rGVMJhOmT5+OkJAQlClTBk2bNsX27dut3+fm5sJgMODo0aMAgIyMDBgMBuzevRstW7aEn58f2rdvjzNnzki2mTGG5ORk1KpVC2XKlEGNGjXw2muvidadKL6QclocCeaMZBjOqXDWq8d/z+Bgc3kdn59X3hwhk/v5vHDW82yggmfztl2inGMux3OVOJ+v5D1x1jOvvIJUJErazlnPvPL87fdq0ICvoAtktGHjLvCqWVO+IMxRcblzQrqgT7nlyQVj6XpwRYzslaLrPRXVU8l9eedHBfLkkrmEs54je6Wgdcs4rrJeNWvabUL4lPIRzWXskvlJyW9TcSctDQgLAzp3BgYNMv83LMz8uQtYsGCBdSGfl5eHgwcPWr9buXIlfHx8sHfvXnz44Ye4efMmnnjiCTRr1gyHDh3C9u3bcfnyZfTr1896zX//+1989tln+PDDD3Hy5Em8/vrr+Pe//43vv/9esh6zZs1CkyZNcOTIEUycOBGJiYlWxdBkMiEuLg75+fn4/vvvsXPnTvz666/o37+/4vauXLkS5cqVw/79+/H+++9j+vTpds+Jj4+Hj48P9u/fjw8//BATJkxwukeFChWQmpqKU6dOYcGCBVi2bBnmzbPfbDx79iw2bNiAtLQ0HD16FH379oXRaLRTDK9cuYJvvvkGL774omBd7969i06dOuHSpUvYsmULjh07hvHjx8NkMgEANm7ciMTERIwdOxYnTpzAyy+/jBdeeAHp6el290lJSUG/fv1w/Phx9OzZE4MHD0Z+vnnn68KFC4iPj0evXr1w9OhRDB8+HBMnThTtv/79+2Ps2LFo1KgR8vLykJeXJ/ge7t27h27duqFSpUo4ePAgvvzyS+zatQujR4+2K5eeno5z584hPT0dK1euRGpqqlXZFePs2bNYt24dvvrqK2zfvh1HjhzBq6++av1+wYIFmDNnDmbPno3jx4+jW7dueOaZZ5Ats4k1adIkzJkzB4cOHUKpUqWs70WszRs2bMC8efPw0UcfITs7G5s2bUJUVJTkM4hiinusjEsuHvU5FQo6YevXJONzKugrJHdPNf58MvfckLlO1udVqc8pY8wlfmJafU7tXuOffnJrD33GTF5eyny65N6TxZ9PpJwRYBcDvNmGzHW69aeSelr85LyngpWeLBBVVYM8uUJGWVGRojHCFWBI5z7V6nOqdSyZvLxYUWGBbjKqaswr6VMd5cmlc4lMPU0Gm/epZB4XQMjvuM77NfnGp5L5SclvkwvR7HOqQ0AvNcybN4/Vrl3b7rNOnTqxZs2a2X02Y8YM1rVrV7vPLly4wACwM2fOsPv37zM/Pz/2448/2pV56aWX2MCBA0WfX7t2bda9e3e7z/r378969OjBGGPs22+/Zd7e3uz8+fPW70+ePMkAsAMHDjDGGBs6dCiLi4uzu0diYiLr1KmTXZv+9a9/2ZVp1aoVmzBhAmOMsR07drBSpUqxS5cuWb/ftm0bA6R9TmfNmsVatGhh/XvatGmsdOnS7MqVK3blXnnlFWubGGNszpw5rG7dusxkMgne96OPPmIVKlRg169fF/y+ffv2LCEhwe6zvn37sp49e1r/BsAmT55s/fvu3bsMANu2bRtjjLG33nqLNWzY0O4eEyZMYIB4QCQx/0vbfvr4449ZpUqV7MbCN998w7y8vNgff/zBGDO/s9q1a7Mim/mjb9++rH///oLttTzb29ubXbx40frZtm3bmJeXF8vLy2OMMVajRg32zjvv2F3XqlUr9uqrrzLGnAMiWXxsd+3aZVdXAFZfRaE2z5kzhzVo0IA9fPhQtL6EZ+H1OaWT0+KItzewwLzLDYPD6Yzl7/nzAV9f4I03pO/1xhvmnI+89/T2Npfnua+vL9c94x/vi19f/w233kuBwQAwxwMnx+fzwlHPj2PK46EPkNjd/LfJsYDatnP4q1hMKPu3GALD2LGiPmV29+R9Tz4+ouWYwfxR9WVrEf94X9l6WlHSdol6Wvo4qTtQIyAUawdtgNfYsfL35ZQnV8govL0VjRHH03ELIf4h9nlldepTy9+G+Qsw5+lFSBKRZ9OfRQ3zF+g6lgwADGPHwrusry4yqnrMc9QVgK7yxP7875x2sJtLZOcxneppgOGv96lkHhcgPjIeuYm5dia/2WN/4xufSuYnJb9NxRWjEUhMNKujjlg+S0pyqYmvIy1atLD7+9ixY0hPT0f58uWt/x577DEAZr/Vs2fPoqCgAE8++aRdmc8++wznZEyq27Vr5/R3VlYWACArKwuhoaEIDQ21ft+wYUMEBARYy/DSuHFju7+Dg4Nx5coVu+fUqFFDtF4A8MUXX6BDhw6oXr06ypcvj8mTJ+P8+fN2ZWrXro2goCC7zxISEvDtt9/i0iVzjuDU1FQMGzYMBkfZ/pOjR4+iWbNmCAwUNqHIyspChw727ikdOnRw6hPbNpcrVw7+/v52bW7Tpo1deaE2KyUrKwtNmjRBuXLl7OpmMpms5rIA0KhRI3jbzB+270OMWrVqoaaN5VG7du2s9719+zZ+//13rn5xxLafgv+02pCqS9++fVFYWIi6desiISEBGzduRFFRkeQziOIJKafFlfh4YP16wNHUMCTE/Hn8n4vf9983B/5wXIx4e5s/f/995fdUcl/Oe3p7eePxkVNhWL8Bhpoh8s/nRaKehnHj8PKum0gfmo6+k1fj1AcpMIRwPFtJn+pQT9XvSaScISQUhvUb4N2nj0fq+TA4CHvnJuG199KRk5hjVtR0lidFdVVyTwVlhRb61vbaovO7j4+Mx+DkDXh5WGVc8rcvdr96ZRjWb9B9LOkto5rGvJK66iBPBm9vmN58E21Wmd/za++lg335Jd885mEZFULQ79gV794V86g74Q1MuGeP26pkq1gAZjNTi/mn7T+Ln6bFn/Cbb76x+/7UqVNcfqda8PLyAnNQ7IV8F0uXLm33t8FgsJrJ8rBv3z4MHjwYPXv2xNdff40jR45g0qRJePjwoV05x74DgGbNmqFJkyb47LPPcPjwYZw8eRLDhg0TfRZPECIetLbZlRSnutnWxbJhIFWX0NBQnDlzBkuXLoWvry9effVVdOzYUdJnliieULTe4kx8PBAXZ/7xy8sz+/tERzv/2L//PvD22+bIh+fOmf14Xn1VeFea955K7qvknkrK8iJRT2/gryifUQASJunbdp3q6QRvP7m5P3nqWTY6GtHukicPy6hdFFkpdH738ZHxiFsehz2TM/DL9xkIvmv2hfXrFOPRd+82GVVSVx3kydvHBzG25cIAPPdciZBRblzx7l0xj7oL3uBPSgN66Ujz5s2xYcMGhIWFoVQp5+Vcw4YNUaZMGZw/fx6dOnVSdO+ffvrJ6e/IyEgAQGRkJC5cuIALFy5YT09PnTqFmzdvomHDhgCAoKAgnDhxwu4eR48edVJ+pLA8Jy8vz3py5livH3/8EbVr18akSZOsn/3222/czxg+fDjmz5+PS5cuITY21u402JHGjRvjk08+QX5+vuDpaWRkJPbu3YuhQ4daP9u7d6+1T3iIjIx0CpDk2GZHfHx8YJQ5wY+MjERqairu3btnVdT37t0LLy8vREREcNdPiPPnz+P333+3nnD/9NNP1vv6+/ujRo0a2Lt3r50M7t27F61bt1b9TLE2+/r6olevXujVqxdGjRqFxx57DJmZmWjevLnqZxEewD1WxiUXj/icEgRBEAShCU0+p+np0j6zln/p6XpXW9TnNDEx0e6zS5cusaCgINanTx924MABdvbsWbZ9+3Y2bNgwq9/gpEmTWOXKlVlqaio7e/YsO3z4MFu4cCFLTU0VfX7t2rWZv78/e++999iZM2fY4sWLmbe3N9u+fTtjjDGTycSaNm3KoqOj2eHDh9n+/ftZixYt7PxJt2/fzgwGA1u5ciX75Zdf2NSpU5m/v7+Tz6ljm+Li4tjQoUMZY4wZjUbWsGFD9uSTT7KjR4+yH374gbVo0cLOl3Lz5s2sVKlSbM2aNezs2bNswYIFLDAwkMsnkzHGbt68yfz8/JiPjw9bu3ataJ8wxtiDBw9YgwYNWHR0NPvf//7Hzp07x9avX2/16d24cSMrXbo0W7p0Kfvll1/YnDlzmLe3N0u3kREI+MtWrFiRrVixgjHG2G+//cZ8fHzYm2++yU6fPs1WrVrFqlevLulzumrVKlauXDl25MgRdvXqVWtuU9tn3bt3jwUHB7PevXuzzMxM9t1337G6deta+5oxPj9hR6ZNm8bKlSvHYmNjre+oQYMGbMCAAdYy8+bNY/7+/mzt2rXs9OnTbMKECax06dLsl19+YYyJ+5xa2ssYY0eOHGEAWE5OjmibV6xYwT755BOWmZnJzp07xyZPnsx8fX3ZtWvXROtPuBfyOSUIgiAIglBDdLTZVFnE/xAGAxAaai7nISwnUkajEV27dkVUVBSSkpIQEBAALy/z8m7GjBmYMmUK/vvf/yIyMhLdu3fHN998gzp16kjee+zYsTh06BCaNWuGt99+G3PnzrWmrzEYDNi8eTMqVaqEjh07IjY2FnXr1sUXX3xhvb5bt26YMmUKxo8fj1atWuHOnTt4/vnnFbXPy8sLGzduRGFhIVq3bo3hw4fjnXfesSvzzDPP4PXXX8fo0aPRtGlT/Pjjj5gyZQr3MypWrIjevXujfPnyTqlvHPHx8cG3336LqlWromfPnoiKisLMmTOtPprPPvssFixYgNmzZ6NRo0b46KOPsGLFCrv0OXLUqlULGzZswKZNm9CkSRN8+OGHePfddyWv6d27N7p3747OnTsjKCgIa9ascSrj5+eHHTt2ID8/H61atUKfPn3QpUsXLF68mLtuYtSvXx/x8fHo2bMnunbtisaNG2Pp0qXW71977TW88cYbGDt2LKKiorB9+3Zs2bIF4bxRvQUQanNAQACWLVuGDh06oHHjxti1axe++uorVK5cWXMbCfdiYEzI25+wcPv2bVSsWBG3bt2Cv7+//AUEQRAEQXica9eu4dChQ4iOjhb0OZQlLQ2w+O7bLpUsCqsWv2mi2NClSxc0atQICxcu9HRVShzJycnYtGmTNUcpQUhx//595OTkWPMNi0EnpwRBEARBEI64KqAXUSy4ceMGNm7ciIyMDIwaNcrT1SEI4k8oIBJBEARBEIQQrgroRXicZs2a4caNG3jvvfc0BwUiCEI/yKxXBjLrJQiCIIiSh2azXoIgCEI3yKyXIAiCIAiCIAiCKDGQckoQBEEQBEEQBEF4HFJOCYIgCIL4W8IYA3kvEQRBeB7euZiUU4IgCIIg/nZ4e3vj4cOHKCws9HRVCIIg/vEUFBQAAEqXLi1ZjqL1ymDR8m/fvu3hmhAEQRAEwcu9e/ewZcsW1K9fH15eXvDz84PBkqOUIAiCcAuMMRQUFODKlSsICAiAt0y0c4rWK8PFixcRGhrq6WoQBEEQBKEQg8GAU6dOkWkvQRCEhwkICED16tVlNwlJOZXBZDLh999/R4UKFXTdcb19+zZCQ0Nx4cKFv22Kmr97G6l9JZ+/exv/7u0D/v5tpPaphzGGO3fuoEaNGmCM4dGjR7renyAIguCjdOnSsiemFsisVwYvLy+EhIS47P7+/v5/ywWHLX/3NlL7Sj5/9zb+3dsH/P3bSO1TR8WKFa3/z7swIgiCIDwHBUQiCIIgCIIgCIIgPA4ppwRBEARBEARBEITHIeXUQ5QpUwbTpk1DmTJlPF0Vl/F3byO1r+Tzd2/j3719wN+/jdQ+giAI4p8EBUQiCIIgCIIgCIIgPA6dnBIEQRAEQRAEQRAeh5RTgiAIgiAIgiAIwuOQckoQBEEQBEEQBEF4HFJOCYIgCIIgCIIgCI9DyqkLeeedd9C+fXv4+fkhICCA6xrGGKZOnYrg4GD4+voiNjYW2dnZdmXy8/MxePBg+Pv7IyAgAC+99BLu3r3rghZIo7Qeubm5MBgMgv++/PJLazmh79euXeuOJtmhpp9jYmKc6j5y5Ei7MufPn8dTTz0FPz8/VK1aFePGjUNRUZErmyKK0jbm5+djzJgxiIiIgK+vL2rVqoXXXnsNt27dsivnqXe4ZMkShIWFoWzZsmjTpg0OHDggWf7LL7/EY489hrJlyyIqKgpbt261+55nPLobJW1ctmwZoqOjUalSJVSqVAmxsbFO5YcNG+b0rrp37+7qZoiipH2pqalOdS9btqxdmeL2DpW0T2g+MRgMeOqpp6xlitP7++GHH9CrVy/UqFEDBoMBmzZtkr0mIyMDzZs3R5kyZVC/fn2kpqY6lVE6rgmCIIgSDCNcxtSpU9ncuXPZG2+8wSpWrMh1zcyZM1nFihXZpk2b2LFjx9gzzzzD6tSpwwoLC61lunfvzpo0acJ++ukntmfPHla/fn02cOBAF7VCHKX1KCoqYnl5eXb/UlJSWPny5dmdO3es5QCwFStW2JWzbb+7UNPPnTp1YgkJCXZ1v3XrlvX7oqIi9vjjj7PY2Fh25MgRtnXrVlalShX21ltvubo5gihtY2ZmJouPj2dbtmxhZ8+eZbt372bh4eGsd+/eduU88Q7Xrl3LfHx82KeffspOnjzJEhISWEBAALt8+bJg+b179zJvb2/2/vvvs1OnTrHJkyez0qVLs8zMTGsZnvHoTpS2cdCgQWzJkiXsyJEjLCsriw0bNoxVrFiRXbx40Vpm6NChrHv37nbvKj8/311NskNp+1asWMH8/f3t6v7HH3/YlSlO71Bp+65fv27XthMnTjBvb2+2YsUKa5ni9P62bt3KJk2axNLS0hgAtnHjRsnyv/76K/Pz82NvvPEGO3XqFFu0aBHz9vZm27dvt5ZR2mcEQRBEyYaUUzewYsUKLuXUZDKx6tWrs1mzZlk/u3nzJitTpgxbs2YNY4yxU6dOMQDs4MGD1jLbtm1jBoOBXbp0Sfe6i6FXPZo2bcpefPFFu894FjWuRm37OnXqxBITE0W/37p1K/Py8rJbQH/wwQfM39+fPXjwQJe686LXO1y3bh3z8fFhjx49sn7miXfYunVrNmrUKOvfRqOR1ahRg/33v/8VLN+vXz/21FNP2X3Wpk0b9vLLLzPG+Maju1HaRkeKiopYhQoV2MqVK62fDR06lMXFxeldVVUobZ/c3Frc3qHW9zdv3jxWoUIFdvfuXetnxen92cIzB4wfP541atTI7rP+/fuzbt26Wf/W2mcEQRBEyYLMeosROTk5+OOPPxAbG2v9rGLFimjTpg327dsHANi3bx8CAgLQsmVLa5nY2Fh4eXlh//79bqurHvU4fPgwjh49ipdeesnpu1GjRqFKlSpo3bo1Pv30UzA3p+PV0r5Vq1ahSpUqePzxx/HWW2+hoKDA7r5RUVGoVq2a9bNu3brh9u3bOHnypP4NkUAvWbp16xb8/f1RqlQpu8/d+Q4fPnyIw4cP240dLy8vxMbGWseOI/v27bMrD5jfhaU8z3h0J2ra6EhBQQEePXqEwMBAu88zMjJQtWpVRERE4JVXXsH169d1rTsPatt39+5d1K5dG6GhoYiLi7MbR8XpHerx/pYvX44BAwagXLlydp8Xh/enBrkxqEefEQRBECWLUvJFCHfxxx9/AICd4mL52/LdH3/8gapVq9p9X6pUKQQGBlrLuAM96rF8+XJERkaiffv2dp9Pnz4dTzzxBPz8/PDtt9/i1Vdfxd27d/Haa6/pVn851LZv0KBBqF27NmrUqIHjx49jwoQJOHPmDNLS0qz3FXq/lu/ciR7v8Nq1a5gxYwZGjBhh97m73+G1a9dgNBoF+/b06dOC14i9C9uxZvlMrIw7UdNGRyZMmIAaNWrYLfa7d++O+Ph41KlTB+fOncN//vMf9OjRA/v27YO3t7eubZBCTfsiIiLw6aefonHjxrh16xZmz56N9u3b4+TJkwgJCSlW71Dr+ztw4ABOnDiB5cuX231eXN6fGsTG4O3bt1FYWIgbN25olnmCIAiiZEHKqUImTpyI9957T7JMVlYWHnvsMTfVSF9426eVwsJCrF69GlOmTHH6zvazZs2a4d69e5g1a5Yuio2r22erpEVFRSE4OBhdunTBuXPnUK9ePdX3VYK73uHt27fx1FNPoWHDhkhOTrb7zpXvkFDHzJkzsXbtWmRkZNgFDRowYID1/6OiotC4cWPUq1cPGRkZ6NKliyeqyk27du3Qrl0769/t27dHZGQkPvroI8yYMcODNdOf5cuXIyoqCq1bt7b7vCS/P4IgCIJwhJRThYwdOxbDhg2TLFO3bl1V965evToA4PLlywgODrZ+fvnyZTRt2tRa5sqVK3bXFRUVIT8/33q9Fnjbp7Ue69evR0FBAZ5//nnZsm3atMGMGTPw4MEDlClTRra8FO5qn4U2bdoAAM6ePYt69eqhevXqTpEmL1++DAC6vD/APW28c+cOunfvjgoVKmDjxo0oXbq0ZHk936EQVapUgbe3t7UvLVy+fFm0LdWrV5cszzMe3YmaNlqYPXs2Zs6ciV27dqFx48aSZevWrYsqVarg7NmzblVutLTPQunSpdGsWTOcPXsWQPF6h1rad+/ePaxduxbTp0+XfY6n3p8axMagv78/fH194e3trVkmCIIgiJIF+ZwqJCgoCI899pjkPx8fH1X3rlOnDqpXr47du3dbP7t9+zb2799vPR1o164dbt68icOHD1vLfPfddzCZTFZFSAu87dNaj+XLl+OZZ55BUFCQbNmjR4+iUqVKuig17mqfbd0BWBfG7dq1Q2Zmpp1SuHPnTvj7+6Nhw4aa2+eONt6+fRtdu3aFj48PtmzZ4pS6Qwg936EQPj4+aNGihd3YMZlM2L17t93Jmi3t2rWzKw+Y34WlPM94dCdq2ggA77//PmbMmIHt27fb+ReLcfHiRVy/ft1OmXMHattni9FoRGZmprXuxekdamnfl19+iQcPHuDf//637HM89f7UIDcG9ZAJgiAIooTh6YhMf2d+++03duTIEWu6lCNHjrAjR47YpU2JiIhgaWlp1r9nzpzJAgIC2ObNm9nx48dZXFycYCqZZs2asf3797P//e9/LDw83GOpZKTqcfHiRRYREcH2799vd112djYzGAxs27ZtTvfcsmULW7ZsGcvMzGTZ2dls6dKlzM/Pj02dOtXl7XFEafvOnj3Lpk+fzg4dOsRycnLY5s2bWd26dVnHjh2t11hSyXTt2pUdPXqUbd++nQUFBXk0lYySNt66dYu1adOGRUVFsbNnz9qlrygqKmKMee4drl27lpUpU4alpqayU6dOsREjRrCAgABrZOQhQ4awiRMnWsvv3buXlSpVis2ePZtlZWWxadOmCaaSkRuP7kRpG2fOnMl8fHzY+vXr7d6VZQ66c+cOe/PNN9m+fftYTk4O27VrF2vevDkLDw9n9+/fL/btS0lJYTt27GDnzp1jhw8fZgMGDGBly5ZlJ0+etJYpTu9Qafss/Otf/2L9+/d3+ry4vb87d+5Yf+cAsLlz57IjR46w3377jTHG2MSJE9mQIUOs5S2pZMaNG8eysrLYkiVLBFPJSPUZQRAE8feClFMXMnToUAbA6V96erq1DP7MB2nBZDKxKVOmsGrVqrEyZcqwLl26sDNnztjd9/r162zgwIGsfPnyzN/fn73wwgt2Cq+7kKtHTk6OU3sZY+ytt95ioaGhzGg0Ot1z27ZtrGnTpqx8+fKsXLlyrEmTJuzDDz8ULOtqlLbv/PnzrGPHjiwwMJCVKVOG1a9fn40bN84uzyljjOXm5rIePXowX19fVqVKFTZ27Fi7NCzuRGkb09PTBWUaAMvJyWGMefYdLlq0iNWqVYv5+Piw1q1bs59++sn6XadOndjQoUPtyq9bt441aNCA+fj4sEaNGrFvvvnG7nue8ehulLSxdu3agu9q2rRpjDHGCgoKWNeuXVlQUBArXbo0q127NktISPDowl9J+5KSkqxlq1Wrxnr27Ml+/vlnu/sVt3eoVEZPnz7NALBvv/3W6V7F7f2JzQ+WNg0dOpR16tTJ6ZqmTZsyHx8fVrduXbvfQwtSfUYQBEH8vTAw5uYcHQRBEARBEARBEAThAPmcEgRBEARBEARBEB6HlFOCIAiCIAiCIAjC45ByShAEQRAEQRAEQXgcUk4JgiAIgiAIgiAIj0PKKUEQBEEQBEEQBOFxSDklCIIgCIIgCIIgPA4ppwRBEARBEARBEITHIeWUIAhChoyMDBgMBty8edPTVSEIgiAIgvjbQsopQRAlBqPRiPbt2yM+Pt7u81u3biE0NBSTJk1yyXPbt2+PvLw8VKxY0SX3JwiCIAiCIAADY4x5uhIEQRC8/PLLL2jatCmWLVuGwYMHAwCef/55HDt2DAcPHoSPj4+Ha0gQBEEQBEGogU5OCYIoUTRo0AAzZ87EmDFjkJeXh82bN2Pt2rX47LPPRBXTCRMmoEGDBvDz80PdunUxZcoUPHr0CADAGENsbCy6desGy15dfn4+QkJCMHXqVADOZr2//fYbevXqhUqVKqFcuXJo1KgRtm7d6vrGEwRBEARB/I0p5ekKEARBKGXMmDHYuHEjhgwZgszMTEydOhVNmjQRLV+hQgWkpqaiRo0ayMzMREJCAipUqIDx48fDYDBg5cqViIqKwsKFC5GYmIiRI0eiZs2aVuXUkVGjRuHhw4f44YcfUK5cOZw6dQrly5d3VXMJgiAIgiD+EZBZL0EQJZLTp08jMjISUVFR+Pnnn1GqFP9e2+zZs7F27VocOnTI+tmXX36J559/HklJSVi0aBGOHDmC8PBwAOaT086dO+PGjRsICAhA48aN0bt3b0ybNk33dhEEQRAEQfxTIbNegiBKJJ9++in8/PyQk5ODixcvAgBGjhyJ8uXLW/9Z+OKLL9ChQwdUr14d5cuXx+TJk3H+/Hm7+/Xt2xfPPfccZs6cidmzZ1sVUyFee+01vP322+jQoQOmTZuG48ePu6aRBEEQBEEQ/yBIOSUIosTx448/Yt68efj666/RunVrvPTSS2CMYfr06Th69Kj1HwDs27cPgwcPRs+ePfH111/jyJEjmDRpEh4+fGh3z4KCAhw+fBje3t7Izs6WfP7w4cPx66+/Ws2KW7ZsiUWLFrmquQRBEARBEP8ISDklCKJEUVBQgGHDhuGVV15B586dsXz5chw4cAAffvghqlativr161v/AWZFtnbt2pg0aRJatmyJ8PBw/Pbbb073HTt2LLy8vLBt2zYsXLgQ3333nWQ9QkNDMXLkSKSlpWHs2LFYtmyZS9pLEARBEATxT4GUU4IgShRvvfUWGGOYOXMmACAsLAyzZ8/G+PHjkZub61Q+PDwc58+fx9q1a3Hu3DksXLgQGzdutCvzzTff4NNPP8WqVavw5JNPYty4cRg6dChu3LghWIekpCTs2LEDOTk5+Pnnn5Geno7IyEjd20oQBEEQBPFPggIiEQRRYvj+++/RpUsXZGRk4F//+pfdd926dUNRURF27doFg8Fg99348ePx6aef4sGDB3jqqafQtm1bJCcn4+bNm7h69SqioqKQmJiIt956CwDw6NEjtGvXDvXq1cMXX3zhFBBpzJgx2LZtGy5evAh/f390794d8+bNQ+XKld3WFwRBEARBEH83SDklCIIgCIIgCIIgPA6Z9RIEQRAEQRAEQRAeh5RTgiAIgiAIgiAIwuOQckoQBEEQBEEQBEF4HFJOCYIgCIIgCIIgCI9DyilBEARBEARBEAThcUg5JQiCIAiCIAiCIDwOKacEQRAEQRAEQRCExyHllCAIgiAIgiAIgvA4pJwSBEEQBEEQBEEQHoeUU4IgCIIgCIIgCMLjkHJKEARBEARBEARBeBxSTgmCIAiCIAiCIAiP8//O3bdWbNTSsQAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plt.scatter(px1, py1, color='green', label='collation points')\n", - "plt.scatter(px2, py2, color='blue', label='simply supported boundary condition points')\n", - "plt.scatter(px3, py3, color='red', label='free boundary condition points')\n", - "plt.title('Summary of points')\n", - "plt.xlabel('X-axis')\n", - "plt.ylabel('Y-axis')\n", - "plt.legend(bbox_to_anchor=(1, 0), loc=3, borderaxespad=0)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. 初始化深度学习求解器并开始求解" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[2024/11/17 17:10:45] ppsci INFO: Using paddlepaddle 3.0.0 on device Place(gpu:0)\u001b[0m\n", - "\u001b[36m[2024/11/17 17:10:45] ppsci MESSAGE: Set to_static=False for computational optimization.\u001b[0m\n", - "[2024/11/17 17:11:51] ppsci INFO: [Train][Epoch 1/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 66.07724s, reader_cost: 0.00001s, ips: 605.35, eta: 7:19:24\u001b[0m\n", - "\u001b[36m[2024/11/17 17:11:51] ppsci MESSAGE: Finish saving checkpoint to: ./output_kirchhoff/checkpoints/latest(latest checkpoint will be saved every epoch as expected, but this log will be printed only once for tidy logging)\u001b[0m\n", - "[2024/11/17 17:12:13] ppsci INFO: [Train][Epoch 100/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.16517s, reader_cost: 0.00002s, ips: 242176.51, eta: 0:00:49\u001b[0m\n", - "[2024/11/17 17:12:32] ppsci INFO: [Train][Epoch 200/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14143s, reader_cost: 0.00002s, ips: 282823.83, eta: 0:00:28\u001b[0m\n", - "[2024/11/17 17:12:50] ppsci INFO: [Train][Epoch 300/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14079s, reader_cost: 0.00002s, ips: 284106.49, eta: 0:00:14\u001b[0m\n", - "[2024/11/17 17:13:09] ppsci INFO: [Train][Epoch 400/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14086s, reader_cost: 0.00002s, ips: 283973.91, eta: 0:00:00\u001b[0m\n" - ] - } - ], - "source": [ - "# set optimizer\n", - "opt = ppsci.optimizer.LBFGS(max_iter=1000)(model)\n", - "solver = ppsci.solver.Solver(\n", - " model,\n", - " {\n", - " \"pde_contraint\": pde_contraint,\n", - " \"constraint_left_right\": constraint_left_right,\n", - " \"constraint_up_down\": constraint_up_down,\n", - " },\n", - " output_dir=\"./output_kirchhoff\",\n", - " optimizer=opt,\n", - " epochs=400,\n", - " iters_per_epoch=1,\n", - " log_freq=100,\n", - " # pretrained_model_path=\"./output_kirchhoff/checkpoints/latest\"\n", - ")\n", - "solver.train()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 6. 结果可视化" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "num_cords = 10201\n", - "(10201,) (10201,) (10201, 1)\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfIAAAGHCAYAAABLftCiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABiiUlEQVR4nO3de1xUZf4H8A8gMFwckIsMKAgmiSSCoY6wlm2SQ7Gb7Jar5oq6JGVqupimroJpRnnLTH+xVt5+q2lu6pq5JGFmm4SK4i11zUC8MCggjKCAwPn94Y+Txxlghttw4PN+vc7L5jnf85znHKb5znnOc56xEARBABEREcmSpbkbQERERI3HRE5ERCRjTOREREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkROrSozMxMhISHo3LkzJkyYgC1btuCxxx4zevuNGzfC19e33hhfX19s3LjR6DonTJgAa2trODo64vTp00ZvR1TL19cXCoWiwfcmUUtgIqdWNXfuXDz11FO4ffs2Nm7ciLFjx+Ls2bPmbhbGjh2L0tJSBAUF1Rv32Wefwd/fH3Z2dggODsaBAwfqjS8sLMSoUaOgVCrh4uKCV199FRUVFZKYVatWwcfHB/b29vjNb36DkydPStafOHEC4eHhsLe3h4+PD1avXi1Zv3r1aqjVatjb29eZSAoLC/Hyyy+ja9eu6Ny5MwIDA3HixAlx/dGjR/HUU0+hS5cucHd3xx//+Efk5ubWe2wPa6idD6upqcG8efPg4eEBR0dHREZG4vLly+L6a9euYcSIEejRowcsLCz0vpxVVFTglVdegb+/Pzp37gwfHx/MnDkTd+/eFWP27duHp59+Gm5ubujSpQuGDBmC7777TlLPxYsX8bvf/Q6urq5wcXHBM888I/lC9+qrr8LR0VGyWFhYYPr06WJMTk4OkpOTTTpfRM2FiZxa1S+//IKQkBBzN6NRDh8+jL/85S94//33UVJSgtdffx2///3v6014tV8QLl++jDNnzuDYsWOYOXOmuH7btm1YtGgRPv/8cxQVFWH48OGIjIzE7du3AQA6nQ6RkZHQaDQoKirC559/joULF+Kf//ynWIeXlxdmz56Nv/3tbwbbUF5ejqeffhqVlZU4deoUdDodvvzyS3h5eQG4n1CjoqIQGhoKrVaLX375BZ06dcLYsWONPjfGtPNhS5cuxWeffYZDhw5Bq9XCx8cHv//971FTUwMAsLS0xPDhw7F161Z0795db/uqqiq4ubnhyy+/RHFxMb777jscOHAAs2fPFmNu3bqFadOm4dKlS7h58yZGjx6N5557DleuXBFjRo8eDUdHR+Tk5CAvLw99+/ZFVFQUan+GIjk5GaWlpeJy+PBhAMC4ceOMPj9ELUogMtKePXsEb29v8fWaNWsEAEJaWpogCIJQXFwsdOrUSfjvf/+rt21VVZXg4OAgWFhYCLa2toKDg4Pwz3/+U9iwYYPQo0cPSdzy5cuFgIAAQalUCo8//rjwzTffiOsfjr99+7bwl7/8RXBxcRG8vLyE999/X+jRo4ewYcMGo49r/Pjxwvjx442K+9Of/iQpGzRokPDWW28ZjM/OzhYACD/99JNYtm/fPsHe3l64e/euIAiCMHToUGH27Nni+urqakGlUgmbNm0SBOH+8Xp6egrV1dVizOzZs4Xf/va3evt7+NzU+vvf/y54eXkJFRUVBttZVFQkABDOnDkjln355ZeCo6OjwXhDTGlnrR49egj/8z//I76+deuWYGNjI3z33XcGY435m3744YdCUFBQvTGurq7CF198Ib5WKpXC3r17xdenT58WAAgFBQUGt3/llVeEQYMG6ZXXdf6JWhqvyMloTz31FPLy8nDhwgUAQGpqKvz9/ZGamgoA+Pbbb9G9e3f4+/vrbWtlZYXS0lL4+PiIVzgvvPCCXtzixYuxZcsW/Otf/8KtW7cwf/58jBgxApcuXTLYpvj4eJw6dQqnTp3Cf//7X5w5cwbXrl2TxPTr1w/vvvtuUw8fJ0+exIABAyRlAwYMQFZWVp3x9vb26NOnjyT+zp07+O9//2uwTktLSzz++ONinSdPnkT//v1haWkpqaOufRpy4MAB9O7dGy+//DLc3Nzg7++PxMREVFVVAQC6dOmC1157DR9//DHu3r2L4uJibNy4EX/84x+N3oep7SwpKcHly5clx+7s7IxevXqZdGwPS0tLQ3BwcJ3rs7KyUFxcLLmF8re//Q2bN29GcXEx7ty5g7///e8YOnQoXF1d9bbX6XTYsmULXnvttUa3kai5MZGT0Tp37ozBgwdj//79qKqqwsGDB7FkyRLs378fALB//34888wzTdrH+++/j2XLluHRRx+FpaUl/vCHP+CJJ57AZ599phdbU1ODzZs3Y9GiRejWrRscHBzw/vvvi12itU6dOoU5c+Y0qV3A/Q9xZ2dnSZmzszN0Op1J8bXrjKnT1H0aUlBQgG+//RbBwcG4du0adu/ejc2bN2PZsmVizMiRI5GSkgJHR0e4uLggOztbsr4hjTk3tTHGbtOQZcuW4YcffsCSJUsMrtdqtXjxxRfxxhtvSL5sajQa5OTkwMXFBZ07d8a+ffuwbt06g3Vs3rwZtra2GDVqVKPaSNQSmMjJJM888wxSU1ORkZEBX19fREdH4+eff0ZBQQFSU1PFRP7OO+9IBgcZIz8/HzqdDn/4wx/g7OwsLocOHdK7ygaAmzdvoqKiAn5+fmJZ586d4ebm1uTjfOyxx8S2v/rqqwAApVKJkpISSVxxcTGUSqXBOuqKr11nTJ2m7tOQzp07o1u3bpg5cyZsbW3x2GOP4bXXXsPu3bsB3B/spdFo8Ne//hV37tzB7du3ERUVhSFDhkgGjtWnMecGQJOPrdby5cuxYsUKHDhwAD4+Pnrrr127hqeeegqRkZFISkqS7O+3v/0tNBoNbt++jTt37mD27Nn4zW9+A61Wq1fPRx99hIkTJ0KhUJjcRqKWwkROJnnmmWdw8OBBfPXVVxg+fDisra3x5JNP4pNPPsEvv/yCYcOGAQDmzZsnGSBkDGdnZygUCqSkpKC4uFhcysrK8NFHH+nFu7u7w9bWFjk5OWJZaWkpCgoKmnycZ8+eFdteOxo5ODgYx44dk8TVPk5nSHBwMMrKynD+/HlJvJ2dHR599FGDddbU1ODEiRNincHBwThx4oQ4AKyhfRry+OOP65VZWFiI/33q1Ck4OjrilVdega2tLRwcHPDGG2/g4sWL+Omnn4zah6ntdHJyQo8ePSTHXlJSgkuXLpk8GHLhwoVYtWoVvvvuO/Tt21dv/S+//IInnngCv//977FmzRrJsV+6dAm3bt3CrFmz4ODgAFtbW7zyyiuorq7GDz/8IKnn4MGDOHfuHCZPnmxS+4hanLlv0pO8VFVVCU5OToKTk5OQmpoqCIIgrF69WnBychIGDBjQ4PYPD1p6eIDQjBkzhPDwcOGnn34SampqhDt37gjfffedcOHCBYPxsbGxwqBBg4Rr164JZWVlQlxcnGBlZdUig91++OEHwc7OTvjqq6+EyspKYf369YK9vb2Qk5NT5zYajUb43e9+JxQVFQnXr18XBg4cKLz22mvi+s8++0xwcXERMjIyhPLycmHRokWCSqUSdDqdIAiCUFJSInTt2lVYtGiRUF5eLmRkZAguLi7C559/LtZx79494e7du8K6desEHx8f4e7du8Ldu3eFmpoaQRAEITc3V3BwcBBWrVol3Lt3T7hw4YLQs2dPYenSpYIgCEJOTo6gUCiETz75RKwrISFB6Ny5s1BSUiIIwv3zXt/HhTHtfFhSUpLQs2dP4cKFC0JpaanwyiuvCEFBQZIBc7XH4uPjI6xbt064e/euUFlZKa6fOXOm4OvrK1y6dMngPs6dOyd069ZNWLBggcH1paWlgpubm7Bw4ULh7t27wr1794R169YZHLQ5cuRIITIyss7j4WA3MhcmcjJZdHS0YGdnJ5SXlwuCIAjnz58XAAhz585tcNuGEnlVVZWwatUq4bHHHhOUSqXQtWtXITIyUhxR/XC8TqcTJkyYIHTp0qXOUeuBgYHCkiVL6myTsYlcEARh69atQq9evQSFQiEEBQWJI/ZrOTg4CP/4xz/E1zdv3hRGjhwpdO7cWXB2dhbi4uLEEeu1Vq5cKXTv3l1QKBRCWFiYkJWVJVl//PhxYfDgwYJCoRC6d+8ufPDBB5L1iYmJAgC9JTs7W4w5dOiQEBoaKtjb2wu+vr7CokWLhKqqKnH9119/LQwePFhwdnYWunTpIjz55JPCoUOHxPULFy4Uhg4dWu+5aaidkZGRwiuvvCK+rq6uFubMmSO4u7sL9vb2wvDhwyVtFgTB4HHV/q1ycnIEAIKNjY3g4OAgWWpNmDBBAKC3/sH3w5EjR4Rhw4YJLi4u4hfS3bt3S9qRl5cnWFtbC3v27Knz+JnIyVwsBOGhkUFEHcykSZOwdetWWFtb4/vvv29wUpiOaMiQIVi5ciUGDRpk7qa0SY888ghu3LgBLy8v8akOotbCRE5ERCRjHOxGREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjnczdgPaopqYG169fR+fOnSWzSBERyZUgCLh9+za8vLwkP47TWOXl5aisrGzUtjY2Npwm9wFM5C3g+vXr8Pb2NncziIia3ZUrVwz+PrwpysvL4WdvB20jH35WqVTIzs5mMv9/TOQtoHPnzgDuv+Eb8wMQRERtjU6ng7e3t/j51hSVlZXQCsAVB2soTey01AmAt1aLyspKJvL/x0TeAmq705VKJRM5EbUrzXm7UGkBKE2uj3OYPYyD3YiIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiYiIZIyJnIiI2r21a9fC19cXCoUCarUaR44cqTd+x44dCAgIgEKhQFBQEPbt2ydZv3PnTgwfPhyurq6wsLBAVlaWZH1RURGmTZuG3r17w87ODj4+Pnj99ddRUlIiicvNzUVUVBTs7e3RtWtXzJo1C1VVVSYdGxM5ERG1a9u3b0d8fDwSExNx/PhxBAcHQ6PR4MaNGwbjDx8+jDFjxiA2NhYnTpxAdHQ0oqOjcebMGTGmrKwMQ4YMwXvvvWewjuvXr+P69etYvnw5zpw5g40bNyIlJQWxsbFiTHV1NaKiolBZWYnDhw9j06ZN2LhxIxISEkw7QIGaXUlJiQBAKCkpMXdTiIiaRXN+rol1OVoLQmcbk5YSR2uT2zFo0CBhypQp4uvq6mrBy8tLSEpKMhj/pz/9SYiKipKUqdVq4ZVXXtGLzc7OFgAIJ06caLAdn3/+uWBjYyPcu3dPEARB2Ldvn2BpaSlotVox5qOPPhKUSqVQUVFhzKEJgiAIvCInIiLZ0el0kqWiosJgXGVlJTIzMxERESGWWVpaIiIiAunp6Qa3SU9Pl8QDgEajqTPeWCUlJVAqlejUqZO4n6CgIHh4eEj2o9PpcPbsWaPr5cxuRERkHt5OgJWJ15PVNcC5Ar3fs0hMTMTChQv1wgsKClBdXS1JlgDg4eGB8+fPG9yFVqs1GK/Vak1r60PtWLx4MeLi4hrcT+06YzGRExGR7Dz8Wxa2trZmbE39dDodoqKiEBgYaPDLRlMxkRMRkewY+1sWbm5usLKyQn5+vqQ8Pz8fKpXK4DYqlcqk+Prcvn0bkZGR6Ny5M3bt2gVra2vJfh4ePV+7X1P2xXvkRETUbtnY2CA0NBRpaWliWU1NDdLS0hAWFmZwm7CwMEk8AKSmptYZXxedTofhw4fDxsYGe/bs0fu1trCwMJw+fVoyej41NRVKpRKBgYFG74dX5ERE1K7Fx8dj/PjxGDBgAAYNGoRVq1ahrKwMEydOBADExMSgW7duSEpKAgBMnz4dQ4cOxYoVKxAVFYVt27bh2LFjWLdunVhnUVERcnNzcf36dQDAhQsXANy/klapVGISv3PnDv7xj3+Ig/IAwN3dHVZWVhg+fDgCAwMxbtw4LF26FFqtFvPnz8eUKVNMulXARE5ERO3aqFGjcPPmTSQkJECr1SIkJAQpKSniwLLc3FxYWv7aQR0eHo6tW7di/vz5mDdvHvz9/bF792707dtXjNmzZ4/4RQAARo8eDeDXQXfHjx9HRkYGAKBXr16S9mRnZ8PX1xdWVlbYu3cvJk+ejLCwMDg4OGD8+PFYtGiRScdnIQgCf9y1mel0Ojg5OYmPGhARyV1zfq6JdfVxg9LEUeu66ho4nSvg5+sDeI+ciIhIxpjIiYiIZIyJnIiISMaYyImIiGSMo9aJiMg8ujsB1lambXOvGjhX0DLtkSlekRMREckYEzkREZGMMZETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGMMZETERHJGBM5ERGRjMk+ka9duxa+vr5QKBRQq9U4cuRInbFPPfUULCws9JaoqCgxZsKECXrrIyMjW+NQiIiITCbruda3b9+O+Ph4JCcnQ61WY9WqVdBoNLhw4QK6du2qF79z505UVlaKrwsLCxEcHIyRI0dK4iIjI7Fhwwbxta2tbcsdBBFRR+XtBNiYONd6ZXXLtEXGZJ3IV65ciUmTJmHixIkAgOTkZHz11VdYv3495syZoxfv4uIieb1t2zbY29vrJXJbW1uoVCqj21FRUYGKigrxtU6nM+UwiIiIGk22XeuVlZXIzMxERESEWGZpaYmIiAikp6cbVcenn36K0aNHw8HBQVJ+8OBBdO3aFb1798bkyZNRWFhYbz1JSUlwcnISF29vb9MPiIiIqBFkm8gLCgpQXV0NDw8PSbmHhwe0Wm2D2x85cgRnzpzByy+/LCmPjIzE5s2bkZaWhvfeew/fffcdnn32WVRX192dM3fuXJSUlIjLlStXGndQREREJpJ113pTfPrppwgKCsKgQYMk5aNHjxb/OygoCP369cMjjzyCgwcPYtiwYQbrsrW15X10IiIyC9lekbu5ucHKygr5+fmS8vz8/Abvb5eVlWHbtm2IjY1tcD89e/aEm5sbfv755ya1l4iIqCXINpHb2NggNDQUaWlpYllNTQ3S0tIQFhZW77Y7duxARUUF/vznPze4n6tXr6KwsBCenp5NbjMREVFzk20iB4D4+Hh8/PHH2LRpE86dO4fJkyejrKxMHMUeExODuXPn6m336aefIjo6Gq6urpLy0tJSzJo1Cz/++CNycnKQlpaGESNGoFevXtBoNK1yTERERKaQdSIfNWoUli9fjoSEBISEhCArKwspKSniALjc3Fzk5eVJtrlw4QL+85//GOxWt7KywqlTp/D888/j0UcfRWxsLEJDQ/H999/zHjgRkYyZMnkYcL/nNiAgAAqFAkFBQdi3b59k/c6dOzF8+HC4urrCwsICWVlZenWUl5djypQpcHV1haOjI1544QW928GGJinbtm2bScdmIQiCYNIW1CCdTgcnJyeUlJRAqVSauzlERE3WnJ9rYl1/eRxKEyeE0VVWw2n9cZPasX37dsTExEgmD9uxY0edk4cdPnwYTz75JJKSkvC73/0OW7duxXvvvYfjx4+jb9++AID//d//RXZ2Nry8vDBp0iScOHECISEhknomT56Mr776Chs3boSTkxOmTp0KS0tL/PDDD2KMhYUFNmzYIJlB1NnZGQqFwuhzwkTeApjIiai9kXMiV6vVGDhwINasWQPg/ngqb29vTJs2zeDkYaNGjUJZWRn27t0rlg0ePBghISFITk6WxObk5MDPz08vkZeUlMDd3R1bt27Fiy++CAA4f/48+vTpg/T0dAwePBjA/US+a9cuREdHm3IaJGTdtU5ERB2TTqeTLA/Orvmgxkwelp6eLokHAI1GY/RkYwCQmZmJe/fuSeoJCAiAj4+PXj1TpkyBm5sbBg0ahPXr18PU6+sO+xw5ERGZmZcSUJiYhsqrAEBvBs3ExEQsXLhQL7y+ycPOnz9vcBdarbbRk409WIeNjQ2cnZ3rrWfRokV4+umnYW9vj/379+O1115DaWkpXn/9daP3xURORESyc+XKFUnXulwHJC9YsED87/79+6OsrAzLli0zKZGza52IiGRHqVRKlroSeWMmD1OpVI2abOzhOiorK1FcXGxSPWq1GlevXq3zVoEhTORERNRuNWbysLCwMEk8AKSmpjY42diDQkNDYW1tLannwoULyM3NrbeerKwsdOnSxaQeBnatExFRuxYfH4/x48djwIABGDRoEFatWqU3eVi3bt2QlJQEAJg+fTqGDh2KFStWICoqCtu2bcOxY8ewbt06sc6ioiLk5ubi+vXrAO4naeD+lbhKpYKTkxNiY2MRHx8PFxcXKJVKTJs2DWFhYeKI9S+//BL5+fkYPHgwFAoFUlNT8c477+CNN94w6fiYyImIqF0bNWoUbt68iYSEBGi1WoSEhOhNHmZp+WsHdXh4OLZu3Yr58+dj3rx58Pf3x+7du8VnyAFgz5494hcB4Ncf3Hpw0N37778PS0tLvPDCC6ioqIBGo8H//M//iNtYW1tj7dq1+Otf/wpBENCrVy+sXLkSkyZNMun4+Bx5C+Bz5ETU3rTIc+Tzn4LSxFHruvIqOL19kJ+vD+A9ciIiIhljIiciIpIxJnIiIiIZYyInIiKSMSZyIiIiGePjZ0REZB4qJWBnbdo2d++1TFtkjFfkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxrnWiYjIPDyUgL2NadvcqWyZtsiY7K/I165dC19fXygUCqjVahw5cqTO2I0bN8LCwkKyKBQKSYwgCEhISICnpyfs7OwQERGBixcvtvRhEBERNYqsE/n27dsRHx+PxMREHD9+HMHBwdBoNLhx40ad2yiVSuTl5YnL5cuXJeuXLl2K1atXIzk5GRkZGXBwcIBGo0F5eXlLHw4REZHJZJ3IV65ciUmTJmHixIkIDAxEcnIy7O3tsX79+jq3sbCwgEqlEhcPDw9xnSAIWLVqFebPn48RI0agX79+2Lx5M65fv47du3e3whERERGZRraJvLKyEpmZmYiIiBDLLC0tERERgfT09Dq3Ky0tRY8ePeDt7Y0RI0bg7Nmz4rrs7GxotVpJnU5OTlCr1fXWWVFRAZ1OJ1mIiIhag2wHuxUUFKC6ulpyRQ0AHh4eOH/+vMFtevfujfXr16Nfv34oKSnB8uXLER4ejrNnz6J79+7QarViHQ/XWbvOkKSkJLz11lt65dMGC7CxEu6316dG2n6faunr7g+99rwneV3YVfq6i7t0wIe7i7Tr31Upfe3ucFe6XiF9DQDuncqk+8Ad/ZiqMr0yt/LbemWuZfpl7jr9Mrciw196XAr1YwEA2hLD5QCQV8+6q/WsA4DcBtZLYouNj33Q5UZuZw49nBu3nY8J2/k41b++ez3rPetZpzK8rsi1s8HyAhelXtlNpX5soYN+WYFCv+xmJwe9sluwl8ZU6ccUlttJY8qkrwt10vE8N4ukr2/dlA4ac71hLXntlvfQ66tW0te5D7+WXud1ztFrMrURsr0ib4ywsDDExMQgJCQEQ4cOxc6dO+Hu7o6///3vTap37ty5KCkpEZcrV640U4up1TSUVJpDY5Nja2uNdrbG+SbqIGSbyN3c3GBlZYX8/HxJeX5+PlQqlVF1WFtbo3///vj5558BQNzO1DptbW2hVColCxERUWuQbSK3sbFBaGgo0tLSxLKamhqkpaUhLCzMqDqqq6tx+vRpeHp6AgD8/PygUqkkdep0OmRkZBhdp1w83I3X7tXXTWsqU7qPH9bWr8qb0r6mnJeHNeffSwY63P+PZmDKo8oAsGPHDgQEBEChUCAoKAj79u2TrDfmUeXjx4/jmWeegbOzM1xdXREXF4fS0lJJTG5uLqKiomBvb4+uXbti1qxZqKqqMunYZJvIASA+Ph4ff/wxNm3ahHPnzmHy5MkoKyvDxIkTAQAxMTGYO3euGL9o0SLs378fv/zyC44fP44///nPuHz5Ml5++WUA90e0z5gxA2+//Tb27NmD06dPIyYmBl5eXoiOjjbHIZrVw/f1WoKh+5P1quP+p6y01WTeVttlChPfHya//xqhNf4/ovqZ+qjy4cOHMWbMGMTGxuLEiROIjo5GdHQ0zpw5I8Y09Kjy9evXERERgV69eiEjIwMpKSk4e/YsJkyYINZRXV2NqKgoVFZW4vDhw9i0aRM2btyIhIQEk45P1ol81KhRWL58ORISEhASEoKsrCykpKSIg9Vyc3ORl5cnxt+6dQuTJk1Cnz598Nxzz0Gn0+Hw4cMIDAwUY2bPno1p06YhLi4OAwcORGlpKVJSUvQmjqGWVdfApHrVNwDKGK1537atJc2mtqc5B7k1pBF/50a9n6jdMPVR5Q8++ACRkZGYNWsW+vTpg8WLF+Pxxx/HmjVrABj3qPLevXthbW2NtWvXonfv3hg4cCCSk5PxxRdfiLdz9+/fj59++gn/+Mc/EBISgmeffRaLFy/G2rVrUVlp/Ax2sk7kADB16lRcvnwZFRUVyMjIgFqtFtcdPHgQGzduFF+///77YqxWq8VXX32F/v37S+qzsLDAokWLoNVqUV5ejm+++QaPPvpoax0OyUVzdCO3lWTeVtpBZIKHH/mtqKgwGNeYR5XT09Ml8QCg0WjEeGMeVa6oqICNjQ0sLX9Ns3Z292+h/Oc//xH3ExQUJHlSSqPRQKfTSR6NbojsEzmR0TrYfVfZ4t+pwyhycUSRa2fTFhdHAIC3tzecnJzEJSkpyeA+6ntUua7HirVabb3xxjyq/PTTT0Or1WLZsmWorKzErVu3MGfOHAAQe4rr2s+D+zAGEzm1GkPP5rY5pnT7toer8ubYf2t2q7cCWbxPCVeuXJE89vvgeKi24LHHHsOmTZuwYsUK2NvbQ6VSwc/PDx4eHpKr9ObARE4mMTTZBTWRuZK5ub9EtEP8/6P1PPzIr62trcG4xjyqrFKp6o039lHll156CVqtFteuXUNhYSEWLlyImzdvomfPnvXu58F9GIOJvIN4eJaolmZoFqxm0x5Grj+stZNqc+2vOR85M0YjZnRrDi36fjagtf9/bc8a86hyWFiYJB4AUlNTxXhTH1X28PCAo6Mjtm/fDoVCgWeeeUbcz+nTpyWj51NTU6FUKiWDsBsi2ylaqe0oUHQ2OE2rWXg61T9Va3PzcW78lK0P6+HcOtO4mutKvI11qzf10TND07NS2xQfH4/x48djwIABGDRoEFatWqX3qHK3bt3E++zTp0/H0KFDsWLFCkRFRWHbtm04duwY1q1bB0D6qLK/vz/8/PywYMECvUeV16xZg/DwcDg6OiI1NRWzZs3Cu+++C2dnZwDA8OHDERgYiHHjxmHp0qXQarWYP38+pkyZUmcPgyFM5GR2BS5Kg3OuF7l2rnvO9cbq7tTwvOs+TqbNvd6cWjqZt/Xu9BYY6MZHz2jUqFG4efMmEhISoNVqERISoveo8oP3rcPDw7F161bMnz8f8+bNg7+/P3bv3o2+ffuKMbNnz0ZZWRni4uJQXFyMIUOG6D2qfOTIESQmJqK0tBQBAQH4+9//jnHjxonrrayssHfvXkyePBlhYWFwcHDA+PHjsWjRIpOOj4mcqKma86oc+DXZNmdCb4kE3trd6kRNMHXqVEydOtXguoMHD+qVjRw5EiNHjqyzvtpHletLups3b26wXT169NCbNc5UvEdOIkO/yERm1FzJt61fhXcw/P+MmhuvyIkMMWf3+oOacnXekgnc1KvxNnZ/nKg94RU5tT8NTeHZEhOOtHQ3cw9n4xOzKbFtSUN/FzONWCdq63hFTvKkcgK0beCKubXJMUHLCCeDITliIqdWdVPZGe66NvKoWnNr7kFvbRUHuVEzKeqiRKWj8Y9ZAUCpjeE51Tsydq0T1YX3dZsHzyNRi2IipzaNzwBTU7SX90+hjj+jTHVjIqc2oamzbLUZ7b3buaWOr5V/8ay13m+3YN8q+6GOjYm8FRT41Ji7CY0m2w8ic4xcp5bT0N+zBbX2POtEpmIipxbTLj4AG3N/t71elTfmuHh/vN0o9BbM3QSqAxO5GRT4VLf4PjrEPTU+O0wA3wctRM49iR0NEzkRkYkM/fIZf4uczIWJnExm6AOLP+n4kPbWvd7ejoeoHWEip47L2AFvvM/bOMaeNw48bBNa45YftQwm8jagoDv/ByLqqArL7czdBJI5JnJqv8z4yBKA9tMdbe7jaOTf0dTJYDjPOskV51qnVmfqfOtFrp3hUthO52cn6sAKOjvibmfTnrAps7BuodbIF6/IOzB26RERyR8TObUZbXqa1sYOeDN3t3RTNbb9rTVAsBHPkLfp9xlRIzCRk7w1dTIQjpg2L55/s+Eg2/aDiZyIDJN7bwJRByH7RL527Vr4+vpCoVBArVbjyJEjdcZ+/PHHeOKJJ9ClSxd06dIFERERevETJkyAhYWFZImMjGzpw6D2jAmRiFqQrBP59u3bER8fj8TERBw/fhzBwcHQaDS4ceOGwfiDBw9izJgx+Pbbb5Geng5vb28MHz4c165dk8RFRkYiLy9PXD777LPWOBwiIiKTyTqRr1y5EpMmTcLEiRMRGBiI5ORk2NvbY/369Qbjt2zZgtdeew0hISEICAjAJ598gpqaGqSlpUnibG1toVKpxKVLly6tcTjUEprzWfKONMNbU3oRmvM8mXsuACIZkG0ir6ysRGZmJiIiIsQyS0tLREREID093ag67ty5g3v37sHFxUVSfvDgQXTt2hW9e/fG5MmTUVhYWG89FRUV0Ol0kqUpCjzvNWl7aoPYvS5L7eKneAmAabdhAWDHjh0ICAiAQqFAUFAQ9u3bJ1kvCAISEhLg6ekJOzs7RERE4OLFi+L6gwcP6t2mrV2OHj0KAMjJyTG4/scffzTp2GSbyAsKClBdXQ0PDw9JuYeHB7RarVF1vPnmm/Dy8pJ8GYiMjMTmzZuRlpaG9957D9999x2effZZVFfXPcIzKSkJTk5O4uLt7d24g2qH+EFIROZm6m3Yw4cPY8yYMYiNjcWJEycQHR2N6OhonDlzRoxZunQpVq9ejeTkZGRkZMDBwQEajQbl5eUAgPDwcMkt2ry8PLz88svw8/PDgAEDJPv75ptvJHGhoaEmHZ9sE3lTvfvuu9i2bRt27doFheLXmYVGjx6N559/HkFBQYiOjsbevXtx9OhRHDx4sM665s6di5KSEnG5cuVKKxwBNRs+AiXVWr0HPO/USky9DfvBBx8gMjISs2bNQp8+fbB48WI8/vjjWLNmDYD7V+OrVq3C/PnzMWLECPTr1w+bN2/G9evXsXv3bgCAjY2N5Batq6sr/vWvf2HixImwsLCQ7M/V1VUSa21t2ux1sk3kbm5usLKyQn5+vqQ8Pz8fKpWq3m2XL1+Od999F/v370e/fv3qje3Zsyfc3Nzw888/1xlja2sLpVIpWYj0sHu91Zg6zzrJz8O3MysqKgzGNeY2bHp6uiQeADQajRifnZ0NrVYriXFycoJara6zzj179qCwsBATJ07UW/f888+ja9euGDJkCPbs2VP/gRsg20RuY2OD0NBQyUC12oFrYWFhdW63dOlSLF68GCkpKXrdG4ZcvXoVhYWF8PT0bJZ2U+O0mQ/mjjTgrTF4fup1s8rB3E1oU4rsHVHo0NmkpcjeEQDg7e0tuaWZlJRkcB+NuQ2r1Wrrja/915Q6P/30U2g0GnTv3l0sc3R0xIoVK7Bjxw589dVXGDJkCKKjo01O5rL+0ZT4+HiMHz8eAwYMwKBBg7Bq1SqUlZWJ33hiYmLQrVs38Q/83nvvISEhAVu3boWvr694wh0dHeHo6IjS0lK89dZbeOGFF6BSqXDp0iXMnj0bvXr1gkajMdtxEpEBTZ3Vj2TtypUrkt5PW1tbM7amflevXsXXX3+Nzz//XFLu5uaG+Ph48fXAgQNx/fp1LFu2DM8//7zR9cv2ihwARo0aheXLlyMhIQEhISHIyspCSkqK+C0pNzcXeXl5YvxHH32EyspKvPjii/D09BSX5cuXAwCsrKxw6tQpPP/883j00UcRGxuL0NBQfP/99236TdLhyekDva13r7f19hH9v4dvZ9b1Gd2Y27Aqlare+Np/ja1zw4YNcHV1NSo5q9Xqem/lGiLrK3IAmDp1KqZOnWpw3cMD1HJycuqty87ODl9//XUztYyIiMztwduw0dHRAH69DVtX7ggLC0NaWhpmzJghlqWmpoq3bf38/KBSqZCWloaQkBAA9+/ZZ2RkYPLkyZK6BEHAhg0bEBMTY9QgtqysLJNv5co+kRMRNdVNpfFjMAoUbWS8BhnN1Nuw06dPx9ChQ7FixQpERUVh27ZtOHbsGNatWwcAsLCwwIwZM/D222/D398ffn5+WLBgAby8vMQvC7UOHDiA7OxsvPzyy3rt2rRpE2xsbNC/f38AwM6dO7F+/Xp88sknJh0fEzm1f55OQF6JuVvxKx9nILfY3K3Q19a61VtgVjf+hGnHNGrUKNy8eRMJCQnQarUICQnRuw1rafnrnebw8HBs3boV8+fPx7x58+Dv74/du3ejb9++Yszs2bNRVlaGuLg4FBcXY8iQIUhJSZE8zgzcH+QWHh6OgIAAg21bvHgxLl++jE6dOiEgIADbt2/Hiy++aNLxWQiCIJi0BTVIp9PByckJMX2KYWOlRIFPjWR9gY90cpmHf07Q0MxuhV2lZV3cKyWv3V3K9bZxVUrL3B3u6sco9MvcO5VJ94U7+jFVZXplbuW39coAwLXMcLm7znC5W5HhmfFcCg3HAwC0DSTqhhL5VRMSfW4zfClor4nc2FHrxjxD3lAir2dsRF1POdSVyOu6Ijc0oVFdV+Q3O+mPSL8Fe2mMgVHrheV2+nWVScsKdQr9mCJp2a2bNpLXrjek3bhuefrdum5XraSvcx9+/Wtyq6zWYfM5Z5SUlDT5Edvaz8jPrq2EvVL/+OtzR3cXY7rFN0s72gtZD3ajtkXWXY6mTE7CR6wM43khMgsmciJqe93q1Oz4Gw7tFxM5kTkwcRJRM2EiJyJqImPujxO1FCZyIiIiGePjZ0QdHbv5yUwKFY64ozCt5+JupVXDQR0Mr8iJzIUJlIiaARM5ERGRjDGRU6MYGtzTofCZaSmeDyKzYSInIiKSMSZyahAfo2lB5r5Pbu79N5acfrqWqIUxkRNR6zJlOtxGqGuedaL2iomc2ofmuEJr4QRDRNQSmMiJqO1pgZ8wJWqvmpTI7927hytXruDChQsoKipqrjYRGcQuUyIifSYn8tu3b+Ojjz7C0KFDoVQq4evriz59+sDd3R09evTApEmTcPTo0ZZoqyw9/FvkZCZt+QrPXAPO5DrQjYgkTErkK1euhK+vLzZs2ICIiAjs3r0bWVlZ+O9//4v09HQkJiaiqqoKw4cPR2RkJC5evNhS7SYiIiKYONf60aNHcejQITz22GMG1w8aNAh/+ctfkJycjA0bNuD777+Hv79/szSUiIjal4JODlB0Mu3x1vJOFi3UGvkyKZF/9tlnRsXZ2tri1VdfbVSDiIiIyHgctU5ERCRjjf4ZU51Ohw0bNkCr1cLPzw/BwcEICgqCvT1nASMiImotjU7kf/zjH3Hy5EkMHDgQX375JS5cuAAAeOSRRxAcHIzt27c3WyOJiIjIsEYn8vT0dBw8eBADBw4EAFRUVOD06dPIysrCyZMnm62BRNTG8ZfPiMyq0Ym8X79+6NTp181tbW0xYMAADBgwoFkaRkRERA1r9GC3pUuXIiEhARUVFc3ZHiJqDZwMhqjdaHQi9/X1hU6nQ2BgIObNm4c9e/bgypUrzdk2o6xduxa+vr5QKBRQq9U4cuRIvfE7duxAQEAAFAoFgoKCsG/fPsl6QRCQkJAAT09P2NnZISIighPbkGHN2aXMxErUosyVK7766iuo1WrY2dmhS5cuiI6OlqzPzc1FVFQU7O3t0bVrV8yaNQtVVVUmHVujE/kLL7yAnJwc/OY3v8Hhw4cxfvx4+Pr6wt3dHcOHD29stSbZvn074uPjkZiYiOPHjyM4OBgajQY3btwwGH/48GGMGTMGsbGxOHHiBKKjoxEdHY0zZ86IMUuXLsXq1auRnJyMjIwMODg4QKPRoLy8vFWOiYiImpe5csUXX3yBcePGYeLEiTh58iR++OEHvPTSS+L66upqREVFobKyEocPH8amTZuwceNGJCQkmHR8FoIgCCaeEwCAvb090tPTERwcLJbl5OTgxIkTOHXqFBITExtTrUnUajUGDhyINWvWAABqamrg7e2NadOmYc6cOXrxo0aNQllZGfbu3SuWDR48GCEhIUhOToYgCPDy8sLMmTPxxhtvAABKSkrg4eGBjRs3YvTo0Ua1S6fTwcnJCTF9iqHzc9RbX+BTLX3d/aHXnvf0tinsKi3r4l4pee3uov9Fw1UpLXN3uKsfo9Avc+9UplfWBXf046r049zKb+vvo0y/DADcdYbL3Yp0BssBwKXQ8DYAAG1J3esAIK+B9QBw1YiYB+WaGF9vXcXNV1dDmrMHwNSeCWN+LrahufHr+dna+n5cp8BFabD8ptLwNoUO+uUFCv2ym50c9MpuQf9R3JtV+nGF5Xb6cWXSskKdQj+mSFp266aN5LXrDWu9bdzypGVuV62kr3Mffv3rtV5ltQ6bzzmjpKQESqXh82is2s/IhSVboFCaOLOb7g4WOo01qR3myBVVVVXw9fXFW2+9hdjYWIPt+ve//43f/e53uH79Ojw8PAAAycnJePPNN3Hz5k3Y2NgY3O5hjb4iHzhwIMrKpB/kvr6++MMf/tAqSbyyshKZmZmIiIgQyywtLREREYH09HSD26Snp0viAUCj0Yjx2dnZ0Gq1khgnJyeo1eo66wTuj9jX6XSShYiIWs7Dn7l1jdcyV644fvw4rl27BktLS/Tv3x+enp549tlnJVf16enpCAoKEpN47X50Oh3Onj1r9LlodCKfPn06Fi5ciOLi4sZW0SQFBQWorq6WnAAA8PDwgFarNbiNVqutN772X1PqBICkpCQ4OTmJi7e3t8nHQ0TU0RTDDrdgb9JSjPu9Fd7e3pLP3aSkJIP7MFeu+OWXXwAACxcuxPz587F371506dIFTz31lPiz33Xt58F9GKPRj5+9+OKLAAB/f3/84Q9/gFqtRv/+/dG3b1+juwPai7lz5yI+Pl58rdPpmMyJiFrQlStXJF3rtra2ZmyNvpqa+z9h/be//Q0vvPACAGDDhg3o3r07duzYgVdeeaXZ9tXoRJ6dnY2TJ0+KE8C88847yMnJQadOndC7d2+cOnWq2RppiJubG6ysrJCfny8pz8/Ph0qlMriNSqWqN7723/z8fHh6ekpiQkJC6myLra1tm3sTkcy05v1xonZAqVQadY/cXLmitjwwMFBcb2tri549eyI3N1es5+HR87X7ratthjS6a71Hjx54/vnnkZCQgC+++AKXLl1CcXExvvnmm2b9plEXGxsbhIaGIi0tTSyrqalBWloawsLCDG4TFhYmiQeA1NRUMd7Pzw8qlUoSo9PpkJGRUWed1IE150C31sYvDtRBmCtXhIaGwtbWVpy+HADu3buHnJwc9OjRQ9zP6dOnJaPnU1NToVQqJV8AGmLSFXlubi58fHzqXN+5c2c88cQTeOKJJwAA165dQ7du3UzZhUni4+Mxfvx4DBgwAIMGDcKqVatQVlaGiRMnAgBiYmLQrVs38d7J9OnTMXToUKxYsQJRUVHYtm0bjh07hnXr1gEALCwsMGPGDLz99tvw9/eHn58fFixYAC8vL71n/4iISB7MkSuUSiVeffVVJCYmwtvbGz169MCyZcsAACNHjgQADB8+HIGBgRg3bhyWLl0KrVaL+fPnY8qUKSb18pqUyAcOHIjo6Gi8/PLL4hzrDyspKcHnn3+ODz74AHFxcXj99ddN2YVJRo0ahZs3byIhIQFarRYhISFISUkRBwvk5ubC0vLXTofw8HBs3boV8+fPx7x58+Dv74/du3ejb9++Yszs2bNRVlaGuLg4FBcXY8iQIUhJSYFCof/4BxHhfs8E51unNsxcuWLZsmXo1KkTxo0bh7t370KtVuPAgQPo0qULAMDKygp79+7F5MmTERYWBgcHB4wfPx6LFi0y6fhMeo68sLAQS5Yswfr166FQKBAaGgovLy8oFArcunULP/30E86ePYvHH38cCxYswHPPPWdSY9oLPkf+0D7a63Pkcn2GvJa5niXnc+TSfXTg58hnlOyErVL/nNSnQleGVU5/bJZ2tBcm3SN3dXXFypUrkZeXhzVr1sDf3x8FBQXitHRjx45FZmYm0tPTO2wSJyIiak2NGrVuZ2cHtVotPoJGRERE5tHoUesBAQFISEjA3bv6XbNERETUOhqdyFNTU/H111+jV69e2LhxYzM2iYiIiIzV6EQeHh6OjIwMJCUlYcGCBQgNDcX333/fnG0jIiKiBjQ6kdeKiYnBhQsXEBUVhWeffRYvvvgisrOzm6NtRM3HmBHr5mKuyVk4KQyZWUGVA26auBQYGPnf0TU5kdcaPnw4Xn75ZezatQuBgYGYPXs2SktLm6t6ovofPaP2pS1/8SJqYxo913pycjKOHj2Ko0eP4ty5c7C0tETfvn3x6quvIjg4GNu2bUNgYCB27tyJAQMGNGebiYiI6P81OpEvWbIEarUaMTExGDx4MEJDQ2Fn9+skBnFxcXjnnXcwYcIEye+vdjRuuZYo8KkxdzPav4YmgzGGKZPBUJvlUni73klhiNqbRifyK1euNBgTGxuLBQsWNHYX1EYYmtWNqNGulhg3u1t9tCX1zu5G1JE02z1yQ7p27YoDBw605C6I5M3cA87MvX8iarIWTeQWFhYYOnRoS+6CzMTQPOtERNT6WjSRE7Vbcv4t8pbA80FkNkzkREQtgGNLqLUwkRMRNRFvNZE5MZETmUtbGWjWVtpBRI3S6MfPiIiImqKoXAFrG7uGAx9wr7y6hVojX7wiJyIikjEmciIiIhljIicJ906NH7TjVs4fNTFaW7sv3dbaQ0RGYyKnNsWtSGeeHZsyzzqfmTasg5wXfmGltoaJnMzCXccPww7NTD9QU9cXxdZ6Pzalx4uoLkzk1P61td+2bqvd2G2tXW3t70bURjGRExERyRgTORERtXtr166Fr68vFAoF1Go1jhw5Um/8jh07EBAQAIVCgaCgIOzbt0+yXhAEJCQkwNPTE3Z2doiIiMDFixcN1lVRUYGQkBBYWFggKytLLM/JyYGFhYXe8uOPP5p0bEzkJH9aGXXBtrXua6IOYPv27YiPj0diYiKOHz+O4OBgaDQa3Lhxw2D84cOHMWbMGMTGxuLEiROIjo5GdHQ0zpw5I8YsXboUq1evRnJyMjIyMuDg4ACNRoPy8nK9+mbPng0vL6862/fNN98gLy9PXEJDQ006PiZyIvqVnL5oyOkLHJnVypUrMWnSJEycOBGBgYFITk6Gvb091q9fbzD+gw8+QGRkJGbNmoU+ffpg8eLFePzxx7FmzRoA96/GV61ahfnz52PEiBHo168fNm/ejOvXr2P37t2Suv79739j//79WL58eZ3tc3V1hUqlEhdra2uTjo+JnGTBpbCNjHLvII9YNVobOT9t5v1CLUan00mWiooKg3GVlZXIzMxERESEWGZpaYmIiAikp6cb3CY9PV0SDwAajUaMz87OhlarlcQ4OTlBrVZL6szPz8ekSZPwv//7v7C3t6/zWJ5//nl07doVQ4YMwZ49exo++IfINpEXFRVh7NixUCqVcHZ2RmxsLEpLS+uNnzZtGnr37g07Ozv4+Pjg9ddfR0mJ9IPH0P2Kbdu2tfThEBF1OAV37HCzzLSl4M79udm9vb3h5OQkLklJSYb3UVCA6upqeHh4SMo9PDyg1WoNbqPVauuNr/23vhhBEDBhwgS8+uqrGDBggMH9ODo6YsWKFdixYwe++uorDBkyBNHR0SYnc9n+aMrYsWORl5eH1NRU3Lt3DxMnTkRcXBy2bt1qMP769eu4fv06li9fjsDAQFy+fBmvvvoqrl+/jn/+85+S2A0bNiAyMlJ87ezs3JKH0q65lsngyqi1nmmWS7d1bjHg49zy+7laAnR3avn9NIFr2W0UOnQ2dzPIgCtXrkCpVIqvbW1tzdgafR9++CFu376NuXPn1hnj5uaG+Ph48fXAgQNx/fp1LFu2DM8//7zR+5JlIj937hxSUlJw9OhR8ZvOhx9+iOeeew7Lly83OKigb9+++OKLL8TXjzzyCJYsWYI///nPqKqqQqdOv54KZ2dnqFQqo9tTUVEh6dbR6cw0OxkRUQehVColibwubm5usLKyQn5+vqQ8Pz+/zs95lUpVb3ztv/n5+fD09JTEhISEAAAOHDiA9PR0vS8YAwYMwNixY7Fp0yaD+1ar1UhNTW3wuB4ky6719PR0ODs7S7orIiIiYGlpiYyMDKPrKSkpgVKplCRxAJgyZQrc3NwwaNAgrF+/HoIg1FtPUlKSpIvH29vbtAMiIqIWYWNjg9DQUKSlpYllNTU1SEtLQ1hYmMFtwsLCJPEAkJqaKsb7+flBpVJJYnQ6HTIyMsSY1atX4+TJk8jKykJWVpb4+Nr27duxZMmSOtublZUl+XJgDFlekWu1WnTt2lVS1qlTJ7i4uNR5z+NhBQUFWLx4MeLi4iTlixYtwtNPPw17e3vs378fr732GkpLS/H666/XWdfcuXMl3SM6nY7JvK1oztnBmjKQSy7d6rWa0r2eWwL4NFOXeV4J4Nm2u9+p7YuPj8f48eMxYMAADBo0CKtWrUJZWRkmTpwIAIiJiUG3bt3E++zTp0/H0KFDsWLFCkRFRWHbtm04duwY1q1bB+D+WKoZM2bg7bffhr+/P/z8/LBgwQJ4eXkhOjoaAODj4yNpg6OjI4D7vcHdu3cHAGzatAk2Njbo378/AGDnzp1Yv349PvnkE5OOr00l8jlz5uC9996rN+bcuXNN3o9Op0NUVBQCAwOxcOFCyboFCxaI/92/f3+UlZVh2bJl9SZyW1vbNnd/hoiI7hs1ahRu3ryJhIQEaLVahISEICUlRRyslpubC0vLXzuow8PDsXXrVsyfPx/z5s2Dv78/du/ejb59+4oxs2fPRllZGeLi4lBcXIwhQ4YgJSUFCoXCpLYtXrwYly9fRqdOnRAQEIDt27fjxRdfNKmONpXIZ86ciQkTJtQb07NnT6hUKr0H+auqqlBUVNTgve3bt28jMjISnTt3xq5duxp8Xk+tVmPx4sWoqKhgsiYikqmpU6di6tSpBtcdPHhQr2zkyJEYOXJknfVZWFhg0aJFWLRokVH79/X11btNO378eIwfP96o7evTphK5u7s73N3dG4wLCwtDcXExMjMzxRlwDhw4gJqaGqjV6jq30+l00Gg0sLW1xZ49e4z65pSVlYUuXbq0yyTuqrhr7iZ0DHLrVq/VWqPXOzhXxV0UltuZuxkkY7Ic7NanTx9ERkZi0qRJOHLkCH744QdMnToVo0ePFkesX7t2DQEBAeJ8ujqdDsOHD0dZWRk+/fRT6HQ6aLVaaLVaVFdXAwC+/PJLfPLJJzhz5gx+/vlnfPTRR3jnnXcwbdo0sx1rR9Ko3yJv6uxeZvo5Tfp/TT3/jfj7m+0374laSJu6IjfFli1bMHXqVAwbNgyWlpZ44YUXsHr1anH9vXv3cOHCBdy5cwcAcPz4cXFEe69evSR1ZWdnw9fXF9bW1li7di3++te/QhAE9OrVS5zajzq4NjJjmWw054A3gquyHIU60+69Usch20Tu4uJS5+QvgP79iKeeeqrBx8giIyMlE8HImatSf+L+tsJdJ4NJYpqLXLvVa3Wg7nV33W3cVHLyF5IfWXatU8fCebOpMfi+oY5Ctlfk7ZlbnjUKPO+Zuxny15zPkDeG3K/Ga5n7qpzPkrdbRbdtYSWYdsugupSfjQ/jFTkRUTNwryozdxOog2IiJ5MZ+sByK5dhN6axI6Y50K1xjD1vMnxyQJbvd2q3mMiJWkJ76Vav1d6Oh6gdYSInImohXXDH3E2gDoCJnOrVpj+ImjoZDLUPLfw+cC1jNzq1bUzk1GLaxQdgY+6Pt9du6MYcF8cXELU4JnJqE5p92kxzP3pGpmlowFsz/z05TSu1J0zk1Ka12KQeMhwp3SbIrLeBk8JQR8BETtScZJboiEj+mMjbALerVuZuAhnC+7vNQ0bnsUP9DgC1G0zk1Kra9QdlR7ka7yjH2ULcO3EGOGpenGu9FbjlWqLAp+aB11Yo8Kk2Y4sMk9UHTEd/9OxysX5ZD+fWbkXboS0BVO1nPnZ3h7u4WWZn7ma0uMJbClhUmjbXulDGudYfxkRO7U9DI5zlOtDNUPKua70ck/rVEqB7PcmYP55CZBC71okMMfW+bkt2N18ubjiJN8c2pjD1eGV0n5xIbnhF3kG4O9w1dxPIVM2RiGvrkOMVOhEZhVfk1GbJ5hnglrgab+6r6Za4OpfJoLe63kdNnRSGv4BGbQUTOTVZUz/QWnWWLTncH2+pLvGW7GpvLm3072PsdMPt+TfJXa9YmLsJVAcmcjKJsR9Usp5n3Zz3c1s62ZozmTfHeeXUu0R6mMhJfup79Ky1P+ibs3u5tZJsc+6nLXWvd/RHEpuZWy7Tg1zwL0XUFrT2lbIcutnNpF1PWtSBrV27Fr6+vlAoFFCr1Thy5Ei98Tt27EBAQAAUCgWCgoKwb98+yXpBEJCQkABPT0/Y2dkhIiICFy9elMQ8//zz8PHxgUKhgKenJ8aNG4fr169LYk6dOoUnnngCCoUC3t7eWLp0qcnHxkROrUYWH5CmdP8219WouZJqc+3XlPPQAR9D64I7Dca4Kpr/qZLCrqZNnOKW236nit6+fTvi4+ORmJiI48ePIzg4GBqNBjdu3DAYf/jwYYwZMwaxsbE4ceIEoqOjER0djTNnzogxS5cuxerVq5GcnIyMjAw4ODhAo9GgvLxcjPntb3+Lzz//HBcuXMAXX3yBS5cu4cUXXxTX63Q6DB8+HD169EBmZiaWLVuGhQsXYt26dSYdHxM5dRxtcSCVua+Mzb1/Q9ri34lkbeXKlZg0aRImTpyIwMBAJCcnw97eHuvXrzcY/8EHHyAyMhKzZs1Cnz59sHjxYjz++ONYs2YNgPtX46tWrcL8+fMxYsQI9OvXD5s3b8b169exe/dusZ6//vWvGDx4MHr06IHw8HDMmTMHP/74I+7du/8la8uWLaisrMT69evx2GOPYfTo0Xj99dexcuVKk46PibyDMuYKwJgriZYim0fPmqKtJNG20o4W1CHeTx2MTqeTLBUVFQbjKisrkZmZiYiICLHM0tISERERSE9PN7hNenq6JB4ANBqNGJ+dnQ2tViuJcXJyglqtrrPOoqIibNmyBeHh4bC2thb38+STT8LGxkaynwsXLuDWrVtGnIX/Px6jI4nautYc6NbUbvW2ljyb2p7WHPTWjH/nVn30kfTcKrDBrZsmLgX3k563tzecnJzEJSkpyeA+CgoKUF1dDQ8PD0m5h4cHtFqtwW20Wm298bX/GlPnm2++CQcHB7i6uiI3Nxf/+te/GtzPg/swBhM5mZXJH6QtOTK5A96/NYuWPM8deOR6F/dKczehVV25cgUlJSXiMnfuXHM3yaBZs2bhxIkT2L9/P6ysrBATEwNBEJp1H7JN5EVFRRg7diyUSiWcnZ0RGxuL0tLSerd56qmnYGFhIVleffVVSUxubi6ioqJgb2+Prl27YtasWaiqqmrJQ6HW0Jz3Xdvb1XittnRVzvvk1AClUilZbG1tDca5ubnBysoK+fn5kvL8/HyoVCqD26hUqnrja/81pk43Nzc8+uijeOaZZ7Bt2zbs27cPP/74Y737eXAfxpBtIh87dizOnj2L1NRU7N27F4cOHUJcXFyD202aNAl5eXni8uBQ/+rqakRFRaGyshKHDx/Gpk2bsHHjRiQkJLTkoVBH0laTeK223j4zMjTJEadpbftsbGwQGhqKtLQ0saympgZpaWkICwszuE1YWJgkHgBSU1PFeD8/P6hUKkmMTqdDRkZGnXXW7heAeD8/LCwMhw4dEge/1e6nd+/e6NKli9HHKMtEfu7cOaSkpOCTTz6BWq3GkCFD8OGHH2Lbtm16z+g9zN7eHiqVSlyUSqW4bv/+/fjpp5/wj3/8AyEhIXj22WexePFirF27FpWVHavbyliGPshkOatba3SryyVJtkY7m3q+W3g8REs/KuneqelTubq7lDccRACA+Ph4fPzxx9i0aRPOnTuHyZMno6ysDBMnTgQAxMTESLrmp0+fjpSUFKxYsQLnz5/HwoULcezYMUydOhUAYGFhgRkzZuDtt9/Gnj17cPr0acTExMDLywvR0dEAgIyMDKxZswZZWVm4fPkyDhw4gDFjxuCRRx4Rk/1LL70EGxsbxMbG4uzZs9i+fTs++OADxMfHm3R8skzk6enpcHZ2xoABA8SyiIgIWFpaIiMjo95tt2zZAjc3N/Tt2xdz587FnTu/jsxOT09HUFCQZPCBRqOBTqfD2bNn66yzoqJCbwQlSbX4M+StNdCtLc1k1ha14fPDkesd16hRo7B8+XIkJCQgJCQEWVlZSElJET/rc3NzkZeXJ8aHh4dj69atWLduHYKDg/HPf/4Tu3fvRt++fcWY2bNnY9q0aYiLi8PAgQNRWlqKlJQUKBQKAPcvGnfu3Ilhw4ahd+/eiI2NRb9+/fDdd9+JtwGcnJywf/9+ZGdnIzQ0FDNnzkRCQoJRvcsPkuXPmGq1WnTt2lVS1qlTJ7i4uNQ70u+ll15Cjx494OXlhVOnTuHNN9/EhQsXsHPnTrHexowgTEpKwltvvdXYw6GOQC5X47UuF/OnTxvJvaoMNzs5mLsZ9JCpU6eKV9QPO3jwoF7ZyJEjMXLkyDrrs7CwwKJFi7Bo0SKD64OCgnDgwIEG29WvXz98//33DcbVp00l8jlz5uC9996rN+bcuXONrv/BbzlBQUHw9PTEsGHDcOnSJTzyyCONrnfu3LmSrhCdTgdvb298+KMFlMraXwx6eNakhmZRsmt0e1rUw+8YRwMxhso8DJQRtTIXI8sA4NGWbAjQ8P9LD7828/9DOh3wvpN520CGtalEPnPmTEyYMKHemJ49e0KlUulNrVdVVYWioiKTRvqp1WoAwM8//4xHHnkEKpVKb/5dY0YQ2tra1jlikoiIqCW1qUTu7u4Od3f3BuPCwsJQXFyMzMxMhIaGAgAOHDiAmpoaMTkbIysrCwDg6ekp1rtkyRLcuHFD7LpPTU2FUqlEYGCgiUdDRETU8mQ52K1Pnz6IjIzEpEmTcOTIEfzwww+YOnUqRo8eDS8vLwDAtWvXEBAQIF5hX7p0CYsXL0ZmZiZycnKwZ88exMTE4Mknn0S/fv0AAMOHD0dgYCDGjRuHkydP4uuvv8b8+fMxZcoUXnETEVGbJMtEDtwffR4QEIBhw4bhueeew5AhQyS/GHPv3j1cuHBBHJVuY2ODb775BsOHD0dAQABmzpyJF154AV9++aW4jZWVFfbu3QsrKyuEhYXhz3/+M2JiYuoczEBERGRuFkJzzxVH0Ol0cHJyQklJieQ5dSIiuWrOz7XaulzWXIGlnWl11dzVoWiqNz9fHyDbK3IiIiJiIiciIpI1JnIiIiIZYyInIiKSMSZyIiIiGWMiJyIikjEmciIiIhljIiciIpIxJnIiIiIZYyInIiKSsTb162dERNRxuGqtYaWwNmmb6nJrFLVQe+SKV+REREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkRORETt3tq1a+Hr6wuFQgG1Wo0jR47UG79jxw4EBARAoVAgKCgI+/btk6wXBAEJCQnw9PSEnZ0dIiIicPHiRUnMkiVLEB4eDnt7ezg7Oxvcj4WFhd6ybds2k46NiZyIiNq17du3Iz4+HomJiTh+/DiCg4Oh0Whw48YNg/GHDx/GmDFjEBsbixMnTiA6OhrR0dE4c+aMGLN06VKsXr0aycnJyMjIgIODAzQaDcrLy8WYyspKjBw5EpMnT663fRs2bEBeXp64REdHm3R8FoIgCCZtQQ3S6XRwcnJCSUkJlEqluZtDRNRkzfm5VluX/3wtrBSm1VVdrsPFt1UmtUOtVmPgwIFYs2YNAKCmpgbe3t6YNm0a5syZoxc/atQolJWVYe/evWLZ4MGDERISguTkZAiCAC8vL8ycORNvvPEGAKCkpAQeHh7YuHEjRo8eLalv48aNmDFjBoqLi/X2ZWFhgV27dpmcvB/EK3IiIpIdnU4nWSoqKgzGVVZWIjMzExEREWKZpaUlIiIikJ6ebnCb9PR0STwAaDQaMT47OxtarVYS4+TkBLVaXWed9ZkyZQrc3NwwaNAgrF+/HqZeX3OudSIiMgvX61boZGNl0jZVlVa4CMDb21tSnpiYiIULF+rFFxQUoLq6Gh4eHpJyDw8PnD9/3uA+tFqtwXitViuury2rK8ZYixYtwtNPPw17e3vs378fr732GkpLS/H6668bXQcTORERyc6VK1ckXeu2trZmbE3jLViwQPzv/v37o6ysDMuWLTMpkbNrnYiIZEepVEqWuhK5m5sbrKyskJ+fLynPz8+HSqUyuI1Kpao3vvZfU+o0llqtxtWrV+u8VWAIEzkREbVbNjY2CA0NRVpamlhWU1ODtLQ0hIWFGdwmLCxMEg8AqampYryfnx9UKpUkRqfTISMjo846jZWVlYUuXbqY1MPArnUiImrX4uPjMX78eAwYMACDBg3CqlWrUFZWhokTJwIAYmJi0K1bNyQlJQEApk+fjqFDh2LFihWIiorCtm3bcOzYMaxbtw7A/ZHmM2bMwNtvvw1/f3/4+flhwYIF8PLykow+z83NRVFREXJzc1FdXY2srCwAQK9eveDo6Igvv/wS+fn5GDx4MBQKBVJTU/HOO++II+GNxURORETt2qhRo3Dz5k0kJCRAq9UiJCQEKSkp4mC13NxcWFr+2kEdHh6OrVu3Yv78+Zg3bx78/f2xe/du9O3bV4yZPXs2ysrKEBcXh+LiYgwZMgQpKSlQKBRiTEJCAjZt2iS+7t+/PwDg22+/xVNPPQVra2usXbsWf/3rXyEIAnr16oWVK1di0qRJJh2fbJ8jLyoqwrRp0/Dll1/C0tISL7zwAj744AM4OjoajM/JyYGfn5/BdZ9//jlGjhwJ4P43rYd99tlnes8F1ofPkRNRe9MSz5EP/stNdLIxra6qSh1+XO/Oz9cHyPaKfOzYscjLy0Nqairu3buHiRMnIi4uDlu3bjUY7+3tjby8PEnZunXrsGzZMjz77LOS8g0bNiAyMlJ8XdfUekREROYmy0R+7tw5pKSk4OjRoxgwYAAA4MMPP8Rzzz2H5cuXw8vLS28bKysrvdGEu3btwp/+9Ce9q3hnZ+cmjzwkIiJqDbIctZ6eng5nZ2cxiQNAREQELC0tkZGRYVQdmZmZyMrKQmxsrN46U2fZqaio0JtliIiIqDXI8opcq9Wia9eukrJOnTrBxcXF6Fl1Pv30U/Tp0wfh4eGS8sbMspOUlIS33nrL9AMhIiJqojZ1RT5nzhyDP+n24FLXlHqmuHv3LrZu3WrwanzBggX4zW9+g/79++PNN9/E7NmzsWzZsnrrmzt3LkpKSsTlypUrTW4jERGRMdrUFfnMmTMxYcKEemN69uwJlUql9/NzVVVVKCoqMure9j//+U/cuXMHMTExDcaq1WosXrwYFRUVdT6gb2trK9vpAYmIzMX1ihWsrU2ba/3ePdPiO4I2lcjd3d3h7u7eYFxYWBiKi4uRmZmJ0NBQAMCBAwdQU1MDtVrd4Paffvopnn/+eaP21ZhZdoiIiFpLm0rkxurTpw8iIyMxadIkJCcn4969e5g6dSpGjx4tjli/du0ahg0bhs2bN2PQoEHitj///DMOHTqEffv26dXbXLPsEBERtRZZJnIA2LJlC6ZOnYphw4aJE8KsXr1aXH/v3j1cuHABd+7ckWy3fv16dO/eHcOHD9ers7lm2SEiImotsp3ZrS3jzG5E1N60xMxuUc8UwdratLru3dPhq1QXfr4+oE2NWiciIiLTMJETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGMyXZCGCIikjfXq5awsTLterKymtefD+MZISIikjEmciIiIhljIiciIpIxJnIiIiIZYyInIiKSMSZyIiIiGWMiJyIikjEmciIiavfWrl0LX19fKBQKqNVqHDlypN74HTt2ICAgAAqFAkFBQdi3b59kvSAISEhIgKenJ+zs7BAREYGLFy9KYoqKijB27FgolUo4OzsjNjYWpaWlkphTp07hiSeegEKhgLe3N5YuXWrysTGRExFRu7Z9+3bEx8cjMTERx48fR3BwMDQaDW7cuGEw/vDhwxgzZgxiY2Nx4sQJREdHIzo6GmfOnBFjli5ditWrVyM5ORkZGRlwcHCARqNBeXm5GDN27FicPXsWqamp2Lt3Lw4dOoS4uDhxvU6nw/Dhw9GjRw9kZmZi2bJlWLhwIdatW2fS8VkIgiCYeE6oATqdDk5OTigpKYFSqTR3c4iImqw5P9dq64rpUwwbK9PqqqzWYfM5Z5PaoVarMXDgQKxZswYAUFNTA29vb0ybNg1z5szRix81ahTKysqwd+9esWzw4MEICQlBcnIyBEGAl5cXZs6ciTfeeAMAUFJSAg8PD2zcuBGjR4/GuXPnEBgYiKNHj2LAgAEAgJSUFDz33HO4evUqvLy88NFHH+Fvf/sbtFotbGxsAABz5szB7t27cf78eaPPCa/IiYhIdnQ6nWSpqKgwGFdZWYnMzExERESIZZaWloiIiEB6errBbdLT0yXxAKDRaMT47OxsaLVaSYyTkxPUarUYk56eDmdnZzGJA0BERAQsLS2RkZEhxjz55JNiEq/dz4ULF3Dr1i2jzwXnWiciIrNwvWIBWwsLk7apEO7He3t7S8oTExOxcOFCvfiCggJUV1fDw8NDUu7h4VHnVa9WqzUYr9VqxfW1ZfXFdO3aVbK+U6dOcHFxkcT4+fnp1VG7rkuXLgbb9zAmciIikp0rV65IutZtbW3N2BrzYtc6ERHJjlKplCx1JXI3NzdYWVkhPz9fUp6fnw+VSmVwG5VKVW987b8NxTw8mK6qqgpFRUWSGEN1PLgPYzCRExFRu2VjY4PQ0FCkpaWJZTU1NUhLS0NYWJjBbcLCwiTxAJCamirG+/n5QaVSSWJ0Oh0yMjLEmLCwMBQXFyMzM1OMOXDgAGpqaqBWq8WYQ4cO4d69e5L99O7d2+hudYCJnIiI2rn4+Hh8/PHH2LRpE86dO4fJkyejrKwMEydOBADExMRg7ty5Yvz06dORkpKCFStW4Pz581i4cCGOHTuGqVOnAgAsLCwwY8YMvP3229izZw9Onz6NmJgYeHl5ITo6GgDQp08fREZGYtKkSThy5Ah++OEHTJ06FaNHj4aXlxcA4KWXXoKNjQ1iY2Nx9uxZbN++HR988AHi4+NNOj7eIycionZt1KhRuHnzJhISEqDVahESEoKUlBRxYFlubi4sLX+9rg0PD8fWrVsxf/58zJs3D/7+/ti9ezf69u0rxsyePRtlZWWIi4tDcXExhgwZgpSUFCgUCjFmy5YtmDp1KoYNGwZLS0u88MILWL16tbjeyckJ+/fvx5QpUxAaGgo3NzckJCRInjU3Bp8jbwF8jpyI2puWeI78r44lsLUwra4KQYf3S/n5+iB2rRMREckYEzkREZGMMZETERHJGBM5ERGRjHHUeguoHT+o0+nM3BIiouZR+3nWnOOjKwTTPyMbs017x0TeAgoLCwHozwVMRCR3hYWFcHJyalIdNjY2UKlU+B9t4z4jVSqV5IdGOjo+ftYCiouL0aVLF+Tm5jb5Dd9R6HQ6eHt7682fTPXjeTMdz1njlJSUwMfHB7du3YKzs3OT6ysvL0dlZWWjtrWxsZE8r93R8Yq8BdROLODk5MQPChPVzptMpuF5Mx3PWeM8OHFKUygUCibjZsLBbkRERDLGRE5ERCRjTOQtwNbWFomJiR3693FNxXPWODxvpuM5axyet7aLg92IiIhkjFfkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOTNYMmSJQgPD4e9vb3RMx4JgoCEhAR4enrCzs4OERERuHjxYss2tI0pKirC2LFjoVQq4ezsjNjYWJSWlta7zVNPPQULCwvJ8uqrr7ZSi1vf2rVr4evrC4VCAbVajSNHjtQbv2PHDgQEBEChUCAoKAj79u1rpZa2Laact40bN+q9pzraRCWHDh3C73//e3h5ecHCwgK7d+9ucJuDBw/i8ccfh62tLXr16oWNGze2eDvJMCbyZlBZWYmRI0di8uTJRm+zdOlSrF69GsnJycjIyICDgwM0Gg3Ky8tbsKVty9ixY3H27FmkpqZi7969OHToEOLi4hrcbtKkScjLyxOXpUuXtkJrW9/27dsRHx+PxMREHD9+HMHBwdBoNLhx44bB+MOHD2PMmDGIjY3FiRMnEB0djejoaJw5c6aVW25epp434P4sbw++py5fvtyKLTa/srIyBAcHY+3atUbFZ2dnIyoqCr/97W+RlZWFGTNm4OWXX8bXX3/dwi0lgwRqNhs2bBCcnJwajKupqRFUKpWwbNkysay4uFiwtbUVPvvssxZsYdvx008/CQCEo0ePimX//ve/BQsLC+HatWt1bjd06FBh+vTprdBC8xs0aJAwZcoU8XV1dbXg5eUlJCUlGYz/05/+JERFRUnK1Gq18Morr7RoO9saU8+bsf/fdhQAhF27dtUbM3v2bOGxxx6TlI0aNUrQaDQt2DKqC6/IzSA7OxtarRYRERFimZOTE9RqNdLT083YstaTnp4OZ2dnDBgwQCyLiIiApaUlMjIy6t12y5YtcHNzQ9++fTF37lzcuXOnpZvb6iorK5GZmSl5j1haWiIiIqLO90h6erokHgA0Gk2HeU8BjTtvAFBaWooePXrA29sbI0aMwNmzZ1ujubLF91rbwh9NMQOtVgsA8PDwkJR7eHiI69o7rVaLrl27Sso6deoEFxeXes/BSy+9hB49esDLywunTp3Cm2++iQsXLmDnzp0t3eRWVVBQgOrqaoPvkfPnzxvcRqvVduj3FNC489a7d2+sX78e/fr1Q0lJCZYvX47w8HCcPXsW3bt3b41my05d7zWdToe7d+/Czs7OTC3rmHhFXoc5c+boDYB5eKnrg6Eja+nzFhcXB41Gg6CgIIwdOxabN2/Grl27cOnSpWY8CupIwsLCEBMTg5CQEAwdOhQ7d+6Eu7s7/v73v5u7aURG4RV5HWbOnIkJEybUG9OzZ89G1a1SqQAA+fn58PT0FMvz8/MREhLSqDrbCmPPm0ql0ht8VFVVhaKiIvH8GEOtVgMAfv75ZzzyyCMmt7etcnNzg5WVFfLz8yXl+fn5dZ4flUplUnx71Jjz9jBra2v0798fP//8c0s0sV2o672mVCp5NW4GTOR1cHd3h7u7e4vU7efnB5VKhbS0NDFx63Q6ZGRkmDTyvS0y9ryFhYWhuLgYmZmZCA0NBQAcOHAANTU1YnI2RlZWFgBIvhC1BzY2NggNDUVaWhqio6MBADU1NUhLS8PUqVMNbhMWFoa0tDTMmDFDLEtNTUVYWFgrtLhtaMx5e1h1dTVOnz6N5557rgVbKm9hYWF6jzZ2tPdam2Lu0XbtweXLl4UTJ04Ib731luDo6CicOHFCOHHihHD79m0xpnfv3sLOnTvF1++++67g7Ows/Otf/xJOnToljBgxQvDz8xPu3r1rjkMwi8jISKF///5CRkaG8J///Efw9/cXxowZI66/evWq0Lt3byEjI0MQBEH4+eefhUWLFgnHjh0TsrOzhX/9619Cz549hSeffNJch9Citm3bJtja2gobN24UfvrpJyEuLk5wdnYWtFqtIAiCMG7cOGHOnDli/A8//CB06tRJWL58uXDu3DkhMTFRsLa2Fk6fPm2uQzALU8/bW2+9JXz99dfCpUuXhMzMTGH06NGCQqEQzp49a65DaHW3b98WP7cACCtXrhROnDghXL58WRAEQZgzZ44wbtw4Mf6XX34R7O3thVmzZgnnzp0T1q5dK1hZWQkpKSnmOoQOjYm8GYwfP14AoLd8++23YgwAYcOGDeLrmpoaYcGCBYKHh4dga2srDBs2TLhw4ULrN96MCgsLhTFjxgiOjo6CUqkUJk6cKPnyk52dLTmPubm5wpNPPim4uLgItra2Qq9evYRZs2YJJSUlZjqClvfhhx8KPj4+go2NjTBo0CDhxx9/FNcNHTpUGD9+vCT+888/Fx599FHBxsZGeOyxx4SvvvqqlVvcNphy3mbMmCHGenh4CM8995xw/PhxM7TafL799luDn2G152n8+PHC0KFD9bYJCQkRbGxshJ49e0o+36h18WdMiYiIZIyj1omIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiYiIZIyJnIiISMaYyImIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiWSisLAQXbt2RU5OTpPqGT16NFasWNE8jSIis+Nc60QyER8fj9u3b+Pjjz9uUj1nzpzBk08+iezsbDg5OTVT64jIXHhFTiQDd+7cwaefforY2Ngm19W3b1888sgj+Mc//tEMLSMic2MiJ2oDPvvsM9jZ2SEvL08smzhxIvr164eSkhLs27cPtra2GDx4sLg+JycHFhYW+OKLL/Dkk0/Czs4OAwcORG5uLr7//nsMHjwY9vb2GDZsGIqLiyX7+/3vf49t27a11uERUQtiIidqA0aPHo1HH30U77zzDgAgMTER33zzDf7973/DyckJ33//PUJDQyXbnDx5EgDw0Ucf4Z133sHhw4eRn5+PP//5z3j33XexZs0afPvttzh58iQ2bNgg2XbQoEE4cuQIKioqWucAiajFdDJ3A4gIsLCwwJIlS/Diiy9CpVLhww8/xPfff49u3boBAC5fvgwvLy/JNllZWXBxccH27dvh6uoKABg6dCj+85//4OzZs7C3twcADBw4EFqtVrKtl5cXKisrodVq0aNHj1Y4QiJqKbwiJ2ojfve73yEwMBCLFi3Crl278Nhjj4nr7t69C4VCIYk/efIk/vCHP4hJHAByc3MxatQoMYnXlvn5+Um2tbOzA3D/3jsRyRsTOVEbkZKSgvPnz6O6uhoeHh6SdW5ubrh165akLCsrC2q1WlJ28uRJyX308vJyXLhwAcHBwZK4oqIiAIC7u3tzHgIRmQETOVEbcPz4cfzpT3/Cp59+imHDhmHBggWS9f3798dPP/0kvtbpdMjJyUH//v3FsuzsbJSUlEjKTp8+DUEQEBQUJKnvzJkz6N69O9zc3FroiIiotTCRE5lZTk4OoqKiMG/ePIwZMwaLFi3CF198gePHj4sxGo0GZ8+eFa/KT548CSsrK/Tt21eMqb1n/uA976ysLDzyyCNwdHSU7PP777/H8OHDW/jIiKg1MJETmVFRUREiIyMxYsQIzJkzBwCgVqvx7LPPYt68eWJcUFAQHn/8cXz++ecA7ify3r17S+6bnzx5UnI1Xlv2cLd6eXk5du/ejUmTJrXUYRFRK+LMbkQy8dVXX2HWrFk4c+YMLC0b/x38o48+wq5du7B///5mbB0RmQsfPyOSiaioKFy8eBHXrl2Dt7d3o+uxtrbGhx9+2IwtIyJz4hU5ERGRjPEeORERkYwxkRMREckYEzkREZGMMZETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGM/R+bijESVhvIpgAAAABJRU5ErkJggg==", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# plot result\n", - "num_cord0 = 101\n", - "num_cord1 = 101\n", - "num_cords = num_cord0 * num_cord1\n", - "print(f\"num_cords = {num_cords}\")\n", - "x, y = np.meshgrid(\n", - " np.linspace(\n", - " start=-Lx / 2, stop=Lx / 2, num=num_cord0, endpoint=True, dtype=\"float32\"\n", - " ),\n", - " np.linspace(\n", - " start=-Ly / 2, stop=Ly / 2, num=num_cord1, endpoint=True, dtype=\"float32\"\n", - " ),\n", - ")\n", - "x = x.ravel()\n", - "y = y.ravel()\n", - "# predict solution of w(x, y) on the 2D grid\n", - "w_pred = solver.predict({\"x\": x[:, None], \"y\": y[:, None]}, return_numpy=True)[\"w\"]\n", - "fig = plt.figure(100, figsize=(5, 4))\n", - "y_min = w_pred.min(axis=(0,))[0]\n", - "y_max = w_pred.max(axis=(0,))[0]\n", - "ax1 = plt.subplot(1, 1, 1)\n", - "plt.tricontourf(x, y, w_pred[:, 0], levels=30, cmap=\"rainbow\")\n", - "print(x.shape, y.shape, w_pred.shape)\n", - "cb1 = plt.colorbar()\n", - "plt.axis(\"equal\")\n", - "plt.xlabel(\"$x (m)$\")\n", - "plt.ylabel(\"$y (m)$\")\n", - "plt.title(f\"w-field: [{y_min:.6f}, {y_max:.6f}]\", fontsize=9.5)\n", - "plt.show()\n", - "# plt.savefig(\"./result.jpg\")\n", - "# print(\"saved matplotlib to: ./result.jpg\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 7.有限元计算结果比较\n", - "\n", - "通过比较,可以发现PINN方法计算结果和有限元方法计算结果基本一致。这里有限元计算所用薄板的几何参数、材料参数、载荷及边界条件和第2部分所描述薄板是一样的,有限元计算所用软件为SIPESC2022。PINN方法计算的最大挠度是12.2mm,有限元方法计算的最大挠度是12.2mm,两者的计算结果相差很小。\n", - "\"FEM_result\"\n" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "conda_py310", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.18" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 简介\n", + "\n", + "本项目来源于共创计划开发性课题CAE领域的基于飞桨+DeepXDE/PaddleScience的复杂结构受力分析。深度学习擅长数据驱动,而工程结构有各种控制方程,PINN(Physics-informed Neural Network)方法利用控制方程加速深度学习神经网络收敛,甚至在无训练数据的情况下实现无监督学习。\n", + "\n", + "板是工程结构中常见构件,板控制方程存在高阶微分,这个问题的解决可以为后续解决复杂结构问题打下良好基础。从标准教科书中可以获得薄板小挠度理论的基本方程以及相关的边界条件表达式,教科书可参考《钱伟长,叶开沅,弹性力学,科学出版社,1956》。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "薄板小挠度理论的基本方程为:\n", + "$$\n", + "\\frac{\\partial^4 w}{\\partial x^4}+2 \\frac{\\partial^4 w}{\\partial x^2 \\partial y^2}+\\frac{\\partial^4 w}{\\partial y^4}=\\frac{q}{D}\n", + "$$\n", + "\n", + "其中 $w(x,y)$ 表示薄板的挠度,即薄板在垂直载荷作用下的变形或偏移量,$x,y$ 表示薄板在平面内的坐标,$D$ 为薄板的弯曲刚度,$q$ 是作用在薄板上的面载荷,表示每单位面积上的外部载荷。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在本问题中,矩形薄板 $x$ 方向长 $2m$,$y$ 方向宽 $1m$,板厚 $10mm$,$x$ 方向左右两边处于简支状态(可以转动但不能位移),$y$ 方向上下两边自由(没有任何约束,可以自由移动和转动)。\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "左右两边 $(x=-1 \\mid x=+1)$ 为简支边界条件,因此挠度 $w$ 和弯矩 $M_x$ 都为 $0$ :\n", + "\n", + "$$\n", + "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(M_x\\right)_{x=-1 \\mid x=+1}=0\n", + "$$\n", + "\n", + "\n", + "由于 $M_x=-D\\left(\\frac{\\partial^2 w}{\\partial x^2}+\\mu \\frac{\\partial^2 w}{\\partial y^2}\\right)$, 且 $\\frac{\\partial^2 w}{\\partial y^2}=0$, 所以简支边界条件可化简为:\n", + "\n", + "$$\n", + "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(\\frac{\\partial^2 w}{\\partial x^2}\\right)_{x=-1 \\mid x=+1}=0\n", + "$$\n", + "\n", + "\n", + "上下两边 $(y=-0.5 \\mid y=+0.5)$ 为自由边界条件, 弯矩、扭矩、横向剪切力都为 $0$ :\n", + "\n", + "$$\n", + "\\left(M_y\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0, \\quad\\left(M_{x y}\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0, \\quad\\left(Q_y\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0\n", + "$$\n", + "\n", + "\n", + "由于 $M_y=-D\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right), \\quad M_{x y}=-D(1-\\mu) \\frac{\\partial^2 w}{\\partial x \\partial y}, \\quad Q_y=-D \\frac{\\partial}{\\partial y}\\left(\\frac{\\partial^2 w}{\\partial x^2}+\\frac{\\partial^2 w}{\\partial y^2}\\right)$ ,且扭矩可以变换为等效剪力, 扭矩和横向剪力合并为 $\\left(Q_y+\\frac{\\partial M_{x y}}{\\partial x}\\right)_{\\mathrm{y}=-0.5 \\mid \\mathrm{y}=+0.5}=0$, 所以自由边界条件用挠度表示为\n", + "\n", + "$$\n", + "\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right)_{y=-0.5 \\mid y=+0.5}=0, \\quad\\left(\\frac{\\partial^3 w}{\\partial y^3}+(2-\\mu) \\frac{\\partial^3 w}{\\partial x^2 \\partial y}\\right)_{y=-0.5 \\mid y=+0.5}=0\n", + "$$\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. 设置计算域" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "\n", + "import ppsci\n", + "import sympy as sp\n", + "import numpy as np\n", + "\n", + "# 设置薄板计算域长、宽参数\n", + "Lx = 2.0 # 薄板x方向长度(m)\n", + "Ly = 1.0 # 薄板y方向宽度(m)\n", + "\n", + "# 设置方程参数\n", + "E = 210000.0e6 # 弹性模量(Pa)\n", + "mu = 0.28 # 薄板泊松比(无量纲)\n", + "h = 0.01 # 薄板厚度(m)\n", + "D = E * (h**3) / (12 * (1 - mu**2)) # 薄板弯曲刚度(kN*m^2)\n", + "q = 1000.0 # 均布载荷(N/m^2)\n", + "\n", + "rectangle = ppsci.geometry.Rectangle([-Lx / 2, -Ly / 2], [Lx / 2, Ly / 2]) # 创建薄板几何形状" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. 编写方程中的表达式" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/latex": [ + "$\\displaystyle \\frac{\\partial^{4}}{\\partial x^{4}} w{\\left(x,y \\right)} + \\frac{\\partial^{4}}{\\partial y^{4}} w{\\left(x,y \\right)} + 2 \\frac{\\partial^{4}}{\\partial y^{2}\\partial x^{2}} w{\\left(x,y \\right)} - 0.0526628571428571$" + ], + "text/plain": [ + "Derivative(w(x, y), (x, 4)) + Derivative(w(x, y), (y, 4)) + 2*Derivative(w(x, y), (x, 2), (y, 2)) - 0.0526628571428571" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# 使用sympy库计算符号公式\n", + "x, y = sp.symbols(\"x y\") # 定义符号变量x, y\n", + "w = sp.Function(\"w\")(x, y) # 定义函数 w(x,y)\n", + "left = w.diff(x, 4) + 2 * w.diff(x, 2).diff(y, 2) + w.diff(y, 4) # 定义薄板弯曲的双调和方程的左侧部分\n", + "right = q / D # 方程右侧的载荷项,表示均布载荷 q 除以板的弯曲刚度 D。这是薄板在载荷下的响应。\n", + "res = left - right # 定义方程残差\n", + "res # 可视化显示方程残差" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. 初始化神经网络模型" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W1117 17:10:41.976225 7764 gpu_resources.cc:119] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 12.0, Runtime API Version: 11.8\n", + "W1117 17:10:41.977502 7764 gpu_resources.cc:164] device: 0, cuDNN Version: 8.7.\n" + ] + }, + { + "data": { + "text/plain": [ + "MLP(\n", + " (linears): LayerList(\n", + " (0): Linear(in_features=2, out_features=50, dtype=float32)\n", + " (1): Linear(in_features=50, out_features=50, dtype=float32)\n", + " (2): Linear(in_features=50, out_features=50, dtype=float32)\n", + " (3): Linear(in_features=50, out_features=50, dtype=float32)\n", + " )\n", + " (acts): LayerList(\n", + " (0): Tanh()\n", + " (1): Tanh()\n", + " (2): Tanh()\n", + " (3): Tanh()\n", + " )\n", + " (last_fc): Linear(in_features=50, out_features=1, dtype=float32)\n", + ")" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = ppsci.arch.MLP([\"x\", \"y\"], [\"w\"], 4, 50)\n", + "model # 可视化显示模型结构" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. 初始化控制方程和边界条件\n", + "\n", + "接下来讲解如何将开头简介中的控制方程和边界条件转换为深度学习代码。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.1 控制方程\n", + "\n", + "控制方程表示在矩形薄板区域内部,挠度和弯矩所满足的微分方程。因此可以在矩形内部采样足够多的配点(collation points)用于模型训练,如下所示:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAADfYUlEQVR4nOy9fXxV1ZU+/iShQIIEvCSgJihCGQTBWrWCtBnjkIpWnShhAqgIrca2VkpAOqOVgcRi1YoSRFs1teL4gkaITduxaqGkkxYFX6r1BfiCklpQIBAlFFKQm/v7w99Jzz05L2vtvfa5N3Cefvx8ys099+yzz9prr71enpWRSCQSiBAhQoQIESJEOAaRmeoBRIgQIUKECBEipAqRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUI3wPLly5GRkYHm5uZUD0UbGRkZqKqqUrp2yJAhmDlzpuh4UoV0fKd33303hg4diqysLJx55pmpHk6ECKEgMoQiRPj/YW1Mr732GvvagwcPoqqqCo2NjfIDCxnPP/+8sqESIRy89957qKqqEjWiXnrpJfznf/4nvvrVr+LRRx/Fj3/8Y7HfdsPMmTORkZHR+V9ubi6+9KUv4Z577sGhQ4c6v1dVVZX0vZycHJx88sm47LLL8OijjyZ91+u37f/17t3b6HNF6H7okeoBRIhwNODgwYOorq4GABQXF4v//vTp0zF16lT06tVL/LedeP755/HAAw8YM4ba29vRo4ea6tm8eTMyM4+O85vOO33vvfdQXV2N4uJiDBkyRGQ8v//975GZmYlHHnkEPXv2FPnNIPTq1Qs///nPAQCffvopVq1ahXnz5uHVV1/F008/nfTdn/3sZzjuuONw6NAh7NixAy+++CK+9a1voaamBr/5zW8wePBgz9+2Iysry9wDReiWiAyhCBHSGAcOHECfPn2QlZUlqsAPHjyInJwc7d85cuQIOjo6WBunzok8DEMwLEi/U13s3r0b2dnZYkZQIpHAP/7xD2RnZ3t+p0ePHrj66qs7/33DDTdg7NixeOaZZ3DvvffipJNO6vzb5MmTkZeX1/nvBQsW4Mknn8Q111yD//iP/8Arr7zi+9sRInjh6DhaRYhgCDNnzsRxxx2HHTt24PLLL8dxxx2H/Px8zJs3D/F4HADQ3NyM/Px8AEB1dXWnC97uUdm0aRMmT56MWCyG3r1745xzzsGvfvWrpHtZobk//OEPuOGGGzBw4EAUFhYm/c0ZCvnpT3+K008/Hb169cJJJ52E733ve/j000+TvlNcXIzRo0fj9ddfx7/+678iJycHP/zhDz2f94EHHgCApHCC9ZwZGRlYvHgxampqMGzYMPTq1QvvvfceDh8+jAULFuDss89Gv3790KdPHxQVFWHt2rVd7uGcGyv0sXXrVsycORP9+/dHv3798M1vfhMHDx5MutaZI2TNy5/+9CfMnTsX+fn56NOnD6644gq0tLQkXdvR0YGqqiqcdNJJyMnJwQUXXID33nuPlHdkf/YlS5bglFNOQXZ2Ns4//3y88847Xb7/+9//HkVFRejTpw/69++P0tJSbNy4Mek7bu90yJAhuPTSS/HHP/4R5557Lnr37o2hQ4fif/7nf5Ku+4//+A8AwAUXXND5jqyw7GuvvYaJEyciLy8P2dnZOPXUU/Gtb33L9/kyMjLw6KOP4sCBA52/t3z5cgCfG7s/+tGPOt/3kCFD8MMf/rBLSMoa+4svvohzzjkH2dnZeOihh3zv60RmZmanR5US9rvqqqtw3XXXYf369fjd737HuleECBYij1CECAGIx+OYOHEixo4di8WLF2P16tW45557MGzYMHz3u99Ffn4+fvazn+G73/0urrjiCkyaNAkAcMYZZwAA3n33XXz1q19FQUEBbr75ZvTp0wd1dXW4/PLLsWrVKlxxxRVJ97vhhhuQn5+PBQsW4MCBA57jqqqqQnV1NUpKSvDd734Xmzdvxs9+9jO8+uqr+NOf/oQvfOELnd/du3cvLr74YkydOhVXX301Bg0a5Pqb3/72t/HRRx/hd7/7HR5//HHX7zz66KP4xz/+geuvvx69evVCLBZDW1sbfv7zn2PatGmoqKjA/v378cgjj2DixInYsGEDKfG2vLwcp556Ku644w688cYb+PnPf46BAwfirrvuCrx21qxZOP7447Fw4UI0NzejpqYGN954I5555pnO79xyyy34yU9+gssuuwwTJ07EW2+9hYkTJ+If//hH4O9b+J//+R/s378f3/ve9/CPf/wDS5cuxb/927/h7bff7pzT1atX4+KLL8bQoUNRVVWF9vZ2LFu2DF/96lfxxhtvBIaytm7dismTJ+Paa6/FjBkz8Itf/AIzZ87E2WefjdNPPx3/+q//iu9///u477778MMf/hAjR44EAIwcORK7d+/GhRdeiPz8fNx8883o378/mpubUV9f73vPxx9/HA8//DA2bNjQGU4aP348AOC6667DY489hsmTJ+Omm27C+vXrcccdd2Djxo147rnnkn5n8+bNmDZtGr797W+joqICI0aMIM+thffffx8AMGDAANL3p0+fjocffhgvvfQSvv71ryf9bc+ePV2+37NnT+Tm5rLHFeEoRiJChAiJRCKRePTRRxMAEq+++mrnZzNmzEgASNx2221J3/3yl7+cOPvsszv/3dLSkgCQWLhwYZffnTBhQmLMmDGJf/zjH52fdXR0JMaPH58YPnx4l/t/7WtfSxw5csR1bNu2bUskEonE7t27Ez179kxceOGFiXg83vm9+++/PwEg8Ytf/KLzs/PPPz8BIPHggw+S5uF73/tewk01bNu2LQEgkZubm9i9e3fS344cOZI4dOhQ0meffPJJYtCgQYlvfetbSZ8752nhwoUJAF2+d8UVVyQGDBiQ9Nkpp5ySmDFjRue/rXkpKSlJdHR0dH4+Z86cRFZWVuLTTz9NJBKJxM6dOxM9evRIXH755Um/V1VVlQCQ9JtusJ49Ozs7sX379s7P169fnwCQmDNnTudnZ555ZmLgwIGJvXv3dn721ltvJTIzMxPXXHNNl7Fb79R6PgCJ//u//+v8bPfu3YlevXolbrrpps7Pnn322QSAxNq1a5PG+dxzz3WRYSpmzJiR6NOnT9Jnb775ZgJA4rrrrkv6fN68eQkAid///vddxv7CCy+w7tfS0pJoaWlJbN26NfHjH/84kZGRkTjjjDM6v2fJR0tLi+vvfPLJJwkAiSuuuCLptwG4/jdx4kTS+CIcO4hCYxEiEPCd73wn6d9FRUX44IMPAq9rbW3F73//e5SXl2P//v3Ys2cP9uzZg71792LixInYsmULduzYkXRNRUVFYO7I6tWrcfjwYVRWViYlD1dUVCA3Nxf/+7//m/T9Xr164Zvf/GbgeCkoKyvrDAVayMrK6swt6ejoQGtrK44cOYJzzjkHb7zxBul33eZ47969aGtrC7z2+uuv7wzhWdfG43H89a9/BQCsWbMGR44cwQ033JB03axZs0hjs3D55ZejoKCg89/nnnsuxo4di+effx4A8PHHH+PNN9/EzJkzEYvFOr93xhln4Otf/3rn9/wwatQoFBUVdf47Pz8fI0aMIMlb//79AQC/+c1v8Nlnn1EfyxPWeOfOnZv0+U033QQAXeTs1FNPxcSJE8m/f+DAAeTn5yM/Px9f/OIX8cMf/hDnnXdeF0+TH4477jgAwP79+5M+7927N373u991+e/OO+8k/3aEYwNRaCxChAD07t27y8Z//PHH45NPPgm8duvWrUgkEvjv//5v/Pd//7frd3bv3p20uZ566qmBv2tt8M7QQ8+ePTF06NDOv1soKCgQS4L1Gt9jjz2Ge+65B5s2bUrahCnPAwAnn3xy0r+PP/54AMAnn3wSGMrwuxb453x98YtfTPpeLBbr/C4Fw4cP7/LZv/zLv6Curi7pPm4hoZEjR+LFF1/sTID3gvNZALq8nX/++SgrK0N1dTWWLFmC4uJiXH755bjyyiuVEs3/+te/IjMzs8u8nXDCCejfv38XOaO+awu9e/fGr3/9awCfG+unnnpqZ14cFX//+98BAH379k36PCsrCyUlJazfinBsIjKEIkQIgE5lT0dHBwBg3rx5nidl5ybjV2WjCsnfdPutJ554AjNnzsTll1+OH/zgBxg4cCCysrJwxx13dOZ8BMFrnhOJhNFr0w06z5KRkYGVK1filVdewa9//evOEvN77rkHr7zySqf3hAu7t80PXDmTMFasZHXnOooQgYrIEIoQQQBeG8XQoUMBAF/4whdET6ennHIKgM+TU617AMDhw4exbds2rXtRNz07Vq5ciaFDh6K+vj7p+oULFyqPQxLWfG3dujXJa7F3716Sp8XCli1bunz2//7f/+tMgLa/Fyc2bdqEvLw8X28QFUHvaNy4cRg3bhxuv/12PPXUU7jqqqvw9NNP47rrrmPd55RTTkFHRwe2bNnSmZQNALt27cKnn37a+byphJXUzwnJRYhgR5QjFCGCACxOHmfp+sCBA1FcXIyHHnoIH3/8cZfrnCXeVJSUlKBnz5647777kjwFjzzyCPbt24dLLrlE6XcBdG7Uzmfxg+XFsI9l/fr1ePnll5XHIYkJEyagR48e+NnPfpb0+f3338/6nV/+8pdJOV0bNmzA+vXrcfHFFwMATjzxRJx55pl47LHHkubvnXfewUsvvYRvfOMb6g9hg9c7+uSTT7p4jqyKPTcG5iBY462pqUn6/N577wUALTmTwFNPPYWf//znOO+88zBhwoSUjiVC90XkEYoQQQDZ2dkYNWoUnnnmGfzLv/wLYrEYRo8ejdGjR+OBBx7A1772NYwZMwYVFRUYOnQodu3ahZdffhnbt2/HW2+9xb5ffn4+brnlFlRXV+Oiiy7Cv//7v2Pz5s346U9/iq985StaRHJnn302AOD73/8+Jk6ciKysLEydOtX3mksvvRT19fW44oorcMkll2Dbtm148MEHMWrUqM4cjlRi0KBBmD17Nu655x78+7//Oy666CK89dZb+O1vf4u8vDyyF+yLX/wivva1r+G73/0uDh06hJqaGgwYMAD/+Z//2fmdu+++GxdffDHOO+88XHvttZ3l8/369RNj6z7zzDORlZWFu+66C/v27UOvXr3wb//2b3jqqafw05/+FFdccQWGDRuG/fv3o7a2Frm5uUpG2Je+9CXMmDEDDz/8MD799FOcf/752LBhAx577DFcfvnluOCCC0Seh4KVK1fiuOOOw+HDhzuZpf/0pz/hS1/6Ep599tku3z9y5AieeOIJ19+64oorRDxzEY4ORIZQhAhC+PnPf45Zs2Zhzpw5OHz4MBYuXIjRo0dj1KhReO2111BdXY3ly5dj7969GDhwIL785S9jwYIFyverqqpCfn4+7r//fsyZMwexWAzXX389fvzjHydxCHExadIkzJo1C08//TSeeOIJJBKJQENo5syZ2LlzJx566CG8+OKLGDVqFJ544gk8++yzadN/7a677kJOTg5qa2uxevVqnHfeeXjppZfwta99jcx2fc011yAzMxM1NTXYvXs3zj33XNx///048cQTO79TUlKCF154AQsXLsSCBQvwhS98Aeeffz7uuusudjKxF0444QQ8+OCDuOOOO3DttdciHo9j7dq1nYbK008/jV27dqFfv34499xz8eSTTyrf++c//zmGDh2K5cuX47nnnsMJJ5yAW265JfSw53e/+10AnydY5+Xl4cwzz8QvfvELz0TwQ4cOYfr06a6/tW3btsgQitCJjER3zCaMECFCBAF8+umnOP7447Fo0SLceuutnt9rbm7Gqaeeirvvvhvz5s0LcYQRIkQwjShHKEKECMcE2tvbu3xm5b6YaJQbIUKE7oEoNBYhQoRjAs888wyWL1+Ob3zjGzjuuOPwxz/+EStWrMCFF16Ir371q6keXoQIEVKEyBCKECHCMYEzzjgDPXr0wE9+8hO0tbV1JlAvWrQo1UOLECFCChHlCEWIECFChAgRjllEOUIRIkSIECFChGMWkSEUIUKECBEiRDhmEeUIBaCjowMfffQR+vbtq9R6IEKECBEiRIgQPhKJBPbv34+TTjoJmZnefp/IEArARx99hMGDB6d6GBEiRIgQIUIEBfztb39DYWGh598jQygAffv2BfD5RObm5qZ4NBEiRIgQIUIECtra2jB48ODOfdwLkSEUACsclpubGxlCESJEiBAhQjdDUFpLlCwdIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhExS0dgI94RR9OHTfh4/8c4se+JKDq5CFmZWakeVoQIEQQQre8IxxoiQygCC/Ub6zH7hdnY3ra987PC3EIsvWgpJo2clMKRHZ2INqUIYSJa3+owvVYjXWAOGYlEIpHqQaQz2tra0K9fP+zbt++Y7zVWv7Eek+smI4FkkcnA531cVpavFFOW0aJPzaYUzXsyjqX5CHN9H20wvVYjA1UN1P07MoQCEBlCnyPeEceQpUOSFqIdGchAYW4hts3epr1RRIs+NZtSGPPenQyLY8kQDXN9m0Qq5s/0Wo0MVHVEhpAQ0tkQCnPRNzY34oLHLgj83toZa1E8pFj5PtGiT82mFMa8dycD92g1RL0Q1vrWQZC+S5XhanKtHi0GaqpA3b+jqrFuivqN9RiydAgueOwCXFl/JS547AIMWToE9RvrEe+Io7G5ESveXoHG5kbEO+La9/t4/8ei33NDvCOO2S/M7rL5AOj8rPKFSu3nMTE/kmj6sMlT8QGfz8Xf2v6Gpg+bRJ4ljHm3DAvnc+1o24HJdZNRv7Fe+belEZYc2pHq+QljfevAT99Zf0/F/HHWajr8frrrvlQhSpbuhvA6re5o24GyujIMyB6Ave17Oz+XOBWd2PdE0e+5gbPoVU+l3cErQd1sGjY1YPpz07WfhTrvyzYsw6xzZ7FPnkGGRQYyUPlCJUpHlKbFqTYMObRDen5UPMVhrG9V+Om7yXWTUTe5DnNempMS+TJtQEr+/rEU6uUi8gh1M1BOq3YjCAg+FVFOCUUnF6Ewt7AzNOBEBjIwOHcwik4u4j5SJ0wrlVSfuqmgbjY162tEnoU6n3NenJN0CqfC9KlZGmF7RyTnJ8hz4oUw1rcKKPruhudvSJl8cQxIFW+MlIGaCt2nKoupQGQIdTMEKU03+LnzqcKalZmFpRctBYAuytL6d81FNVrWvslTaSrCHapu6KBNCQCyMtznWeVZOPNp0tD6eP/HaeG6D9s7ImV46Wx2nPUd5juiGIktB1tIv2UirEc1IFsOtKTMQD0WQ71cRIZQN4PqYnY7FXGFddLISVhZvhIFuQVJnxfmFookj5o8leqculUUv85piLIpxRPeY+CegCmGl/23ATOG1pbWLWlxggzbOyJheElsdl7ru6BvAaqKq3DoyCHc9ofbcErNKaG9I0njxURYj7JWp46eiikrpxg3UL1gyiPrpRdTYXjpIjKEuhl0F7OlWFSFddLISWie3Yy1M9biqUlPYe2Mtdg2e1ugEUQxJkx6nVRP3SoGjcRpyM/orBxXqfQsXvCbdzdIG1oZyMCA7AGoaqwSOUHqeiy4cqh7PwnDS2qzc67v6uJqAMDCxoW4sv5KLGxciB37dyRdY/KUT9V3eTl5KQvr+a3Vusl1WPHOCiMGKvUAaiLU66cXu1soHIgMoZRAR3FyTu9usBSLjrBmZWaheEgxpo2ZhuIhxYGGCceYMOV1Ujl1qxg0kqchL6OzdEQp+1ko93Kbdz+4KU432Q4yLKx5kZgzqbwEqhxK3E/iACC52Vnru1ePXp8bp/v9Q/HOdyQZOqMaiT/9xk87/+38O6Aftg+C21rdOmsrtu/fbsRApR5AAflQb5BebNjcQPqdVFUguiHiEQqANI+QROa+JYiA++bhBiffxIq3V+DK+isDr3tq0lOYNmYa6R5+Y+XysUhXG1h8HDvadrjOmXN+VPk7wuBj4TwLANY8xjviWLZhGea8OIf9DEGy7fb3wbmDcd1Z12Fh40L2/Zwwwf3jJ4fS9/Oan5qLagJ/R1ruguTfC9XF1ah9o1a0MslL3znnWWf+KODoJLex+EFXz/qBq/v8fqexuRHlK8vR2t7q+p0MZCAvJ4+UtxUGJ1VEqCgESUNIUnG6LTSrbN5+yvb6/TA37HQhA6MqVABY88EalDxeEvibzvkJ28AEvJ8FgJLRraI4qbLttpnUvVunPWdhy5qp+znnZ3zheKzbvi5w85Xa7CxQ9QMFEiSUVCPHVLk25wDrtRb8YNoo4Og+r+s5hl1+Tj72HNwjIos6iAgV0wzSCWRurtJd83ZhVfkqUlgpjITQdIsVc8Id5SvLSb/pdO9uad1Cuk431yvoWQCEUkEE8GTbLawq4bqnylpjc6NI2MaUbNvnp7W9FcOWDSOF3aTz6yTDFhIJstTQEDdsT4FfKKisrgxzXpjTKUt+a8ENYdES6KQceD2/H64acxWA1IUquYg8QgGQ8giFSWFPPRVRTwmqp6ywvCNcqIQ7vGB/X5RrTXgmnM8CQMRbQT2F68q2hDeDKmux7FiSW181bMOR7fLTy9nrR9V7LBUekvQI2bFk4hIM6jMorcn17OCECAtzC1FxVgUpzAukpm0QV5erhkjXzliL1vZWo6FKCqj7d8QsHRK4XCo67l3rVBQE65Tg5vK1hFUnpyld2Wq95odzmrM2Z8vwoF6bQEL0NOT2LI3NjWRvRdHJRZ6yNmnkJJSOKA2URd1EXcubMblusmdYN2jOqDLkzG2wPGTczYhLCcBZPzpM09R3FgTLY+xlnNrhfGd+sOeepRujuxs4vG072naQjSAgWc9SEdbeYIHLW2fXi1mZWSKyGAYiQygkmFScOvBTnEHU9kGbR5AydRoTVJjKA+AuevvmTL22urjauOKXbNFBUZwSBq+XUV6QW4CKsypw6MghNDY3er5rzsZth5thQZEvimzHsmOoaqxirx/dFh/czc4NfsapE4W5heSEdztUjdAwwQkRcuRuycQl7HY1QYdSE3qR8/xuhxYJWQwDUWgsAFKhMYr733LbSyRT6y4KqWRQ3SQ9t98z1S+HE16pvaw26X7pFAbUCWuoyppUoq5dbre0bsHDrz+cxFvj965Vqint8HLnByXFOu9nGQ7Onn92+M1JOsmS13qrOKsCw2PDu4RjuYZomImzKjpROkSo+rxBodJ54+dhxTsrxPUi5/nDDntRECVLpxm6G5eKJEGbFC+Qadp2qmejbnJdl3Gb7jnEQdgtOiiJuteddR3q3q0LfF4nhw2HvM9L1mLZMdJzNGxuEGNary6u9jSCAP/1k04hZbck5ebZzVhw/oKkhGQuKacFlaTyMJnedXjbpBKFg0KlCSRw97q7jehFyvMPyB6A1dNXk3mN0hGRRygAYfAIpSOXivSpNJUeKuq9dTwb1GvvufAezH1prvHQZ5C3ggJu4r4XpQOQ3AiYkiOj4410vu94R5xEhZCXk4c9B/ew7ylNCSBdCh9mB3BumbUFqh5R8Qjr6kQVT6Mbn5Kqx0TXw6vrceN49dOt23zEIyQEaUMIkFec9t+V4jYJs8qNAs547AnAUuEVitIMunbe+HlYvG6xSOiTAi+ju2xUGWpeqQm8nrI5+fHebGnd4mrcBz2vKXJAP8NCmgRO9xmCNt/KcZ/nNAVtNCZDyV6wy8SuA7uUyDndoGLQSIb4KQaeDpmpF6iHUj/o6mlKNWIqZC0IkSEkBBOGkBsklD/1N1ZPX42szCzfBSp9KtUFVRlUjq3Eyo0rfRWWyRJkr2vvvfBezHlpTujkkm5Gd9OHTSKGhp/iKx1RqrwBmciRCTJSZ4+djZr1NWL3lFg/bvOblZGV1HCXYtSHZXi7QZLVOGymdy8jv2Fzg+tBwtS8SuQpSeSThcmyLoUoR6ibQYLgkJrhX76yPDBWbrIBqgqo+RA162sCT21BeTA6fX1M9xziwo1gTkLWgvK1bm+6Xfl5TeTIBOWqlZ4m279NYv3YZalybCUAJBlBgHceiDSBq2pem5QeUc1ZlGy2PGzZMLS2t2LJxCVYVb4KhbmFSdfo9kP0mmPd/pIAXW793rMXWWV37DbvRFQ+nyZIRy4VCs+QF6RjxZTyaOdp2Q8mS5Dt19ZvrMewZcPIOROSjL5e70BX1ig8N0vXLyWN0e15TdEu+FFFxDvi5HtSZVtn/ViwDNfpz013/bsXr5BuCb4duiEP3XmId8Sx5oM1gfcBusqTTrPlINoDSY6coDmmUhk4wVkrqu9ZUtZShSg0FoCwQmMWdMIyQW5oP0gkG/s9g0SsWCIB2AmTJcip7DlEeQcqssZpykoBN0fGpKvdVP823UOBSnhHKrwoGfJQmQdu8rVTnrihucNHDqNgSYFS0rwqqHPstV6njp6KxesWA1BfKzrvOZ3oHpyIcoSEELYhBKg3XgRkuFR0k+pMxop1E4CdMJXszaWml1SwnHdgsqN2LDuGT9o/Ec2RMc1V4ndPACnJg1Bp57HmgzVY1LQo8Bo/+afm5WydtZWsnzjgHCT85InTvf47v/lOqJ3TublPXutV4gCtmr+YboU1dkSGkBBSYQjZoVou6rzG2WPJC6q9kYDwOoDrJABLj0ViPJIbqal3oOLdqi6uRlVjFQD1k2oqynFN9m9TAXWjcSvZ9gJlvNT75ufkJxkPEt5fzkGCWtHpZyhw5VvKuyFpRKiuFU6RzYShE1zvm06FNXZEvcaOAqi2uHCLX1O5VHRafHA6gAdVrfnBLX+H2xsJkEn29jJUJ4+cTP4NlZ5DXjARr1fpqF2YW4hbi27F6IGjtXNkwj5FmuzfpgKddh5uoMo/NV/N6UGRaJ3BaXdDkaeg/DCOfANyZJa6ffrscJNbinHEKbJxMupb99XNb001IkMoTaHTeBHouigoyaCqvZGsxbbqvVWkZytfWS7SAdwObm8kCcPDz1CllGIDaj2H/CCpWC1wNiWn4pNOKuVCyqMk2b+Ni6CNxo+V3g1U+Vfd7Cn6KQjU+Z5fNB9VxVWke3gZ1Vz5piQfU+XOJIs4NZrAKbLx2gckCgNSiSg0FoBUhcZMxF1N9EZSZZJ1/jYgExqi9kaSCIcFhUoyMzI9q9hMuYupcjO/aD4mDJ1AmgsOoVuq+w3p9CrzQ9j929ygy0pvvXNqzqFO8YUF1byQMPNOOPKdgQwSEzU1ncFUWImbJ0h9z1xW94hZ+ihBqgwhU5n4ki0+VPJGvCDZNiCMxcjZGL3cxSaSa7mbF8UwoD6rtHeLC4pRrjr3lHn1o28wmR/HYaXv1aMXK+dQt/hCNecwzLwTTi7Ug5c+SGrHwUmol6yQtHiAnF5351icc8fV5alIfFZBRKjYzWHKZepFFjg8Npx0veWyVomr+4FDKBjUQNGL+EsSVNd95bhKkYazVHCbX1IaM1IJGC0jyHRTWTd4ETw6oUrwRiEG9OOwsufH6cyNm2xTdcCW1i3spsVeRJT5Ofnke6o0Ow2T0JVCWJifk4/tc7YHcqepEAtKNaa29GLJ4yW+hTFuutYaA7VBsSTfWTog8ggFIFUeIcoJNJYdQ93kOpHNnuuK5oYKOFVrYfGaqILDpePsfRaWu5gTsqSWslNLkFPR24pDVWBB5VSrS9/gXAeSFVZ+3pOC3AIkEomkMKHzO5yQx/jC8Ri2bFhgzmFre6vWWtUtC+dQQ+h6ZXTDeTqebBXvvJuuXfPBGlJRTeQRSjEeeOABDBkyBL1798bYsWOxYcMG0nVPP/00MjIycPnll5sdoBAoJ/vW9laUPF5COmEFgdt2gXoiuPErN2LtjLWom1xH+r7f6TYdqNytU1eQEWSfrzA8VEAyPX4sO4b3Z72PtTPWYn7RfN/rKN44yqk1qO2Grox6gZPsaofKqdbLo1o6gtaiw4vVXWduKN6TirMqPI0gIFgGnDLcs0dP33v6JXAHrVUvOQ5qd2O/7rY/3IZTak4he6IkvDK6hQqqekLVO++ma4uHFGu33+mO6FZVY8888wzmzp2LBx98EGPHjkVNTQ0mTpyIzZs3Y+DAgZ7XNTc3Y968eSgq6l4vzysT3wmJclVuCSTVHV82qgzFQ4pZLQy8wCkNN+GFoZ66dFz3qqdCP0/MqPxRpHsHKXLVEmSJKiKdcXtBtSpKl77BDpW5cZORoKqdQ0cOkcbDmUu/ewblHHrROPjJcZCnWFdP6lY4mqwA8wP3IOCna02VwqdbErUT3coQuvfee1FRUYFvfvObAIAHH3wQ//u//4tf/OIXuPnmm12vicfjuOqqq1BdXY2mpiZ8+umnIY5YH9bi9EuAk9poOCWQ3H5QEv2tqP2GTJQwc05dqiWjqmGlIL6pquIq0v0pClq1BNlUv6F4Rxy7DuxiXaPaq8wPHPoGJyT7fnlt5I3NjaSxqOQcut2z7l2aB9hueKnyplEPKCq0IxxI98ijGg8c45Wia6VL4VMRLuei2xhChw8fxuuvv45bbrml87PMzEyUlJTg5Zdf9rzutttuw8CBA3HttdeiqSk4EffQoUM4dOifp6e2tja9gQvAapRJTYBTIdWyQD0VqRg2qguMW6LvxuGj6zWjnrpUK6dUNwGKJ6b29VoU9i3Ejv2yTUztMMFfFAQV6gbdRFu/teQl39T8uKC5ocqI20YusUn7NfF13pPrHVH1KHLDQiYbgEp6UzjGA8d4pRozUvxfqnotbHQbQ2jPnj2Ix+MYNGhQ0ueDBg3Cpk2bXK/54x//iEceeQRvvvkm+T533HEHqqurdYZqBKobTdCC8lJuFCWhYthwFxg3CdCrhFnXa0ad/0F9Bvl6taTDShRPzPb92zvbXZhifg07LKBK3aBD8EbZnHRY3XXy4yieDp1Nmnuq5xpeqh7FMPPDKJDwpnCNB0pYVqWwhroPmNBrYaPbGEJc7N+/H9OnT0dtbS3y8vLI191yyy2YO3du57/b2towePBgE0NkQWWjCVpQ88bPw4p3Vmi5LIMMGx1Di3Pas5Q7pYRZ5TSou9H7bSSx7JhyWImq0IfHhhtlfpUOC/iBG6aUINP0W0tldWWoHFuJ0tNKXeU77Pw4L9nW8chyT/Vcw0v1oBd2fhgFOt4UFeOBMte1l9W69glz3ps7ZlN6LWx0G0MoLy8PWVlZ2LUrOR9g165dOOGEE7p8//3330dzczMuu+yyzs86OjoAAD169MDmzZsxbNiwLtf16tULvXr1Eh69PrgbDaXC6u51d3f5m4rL0suw0Y0Nc/sNUUuYVZSnzkYftJHMHjebNAa3cXMMtOIhxcbaXYTZb4gqF4u/vhiV4ypFWMSD1lLN+hrUrK9xlW+JuZEKPXI3aZ1TPcfwUj1ocA0a6RYZXlDNNVI1eHU9UarNvU3ptbDRbQyhnj174uyzz8aaNWs6S+A7OjqwZs0a3HjjjV2+f9ppp+Htt99O+mz+/PnYv38/li5dmhZeHguURcdVpqouYymXpURsmNtvqOnDJpIhpHIaVN3MKBvJk395kjQGt3GrJK1LcZg4rykdURpKvyGqXPz4jz/Gqcefqn1fzlrykm/djUoy9MjZpHU8UfGOOGLZMdw54U60HGxBfk4+CnILXGVL9aBhotlyKpN7dQxeVU+Uiq42rdfCRrcxhABg7ty5mDFjBs455xyce+65qKmpwYEDBzqryK655hoUFBTgjjvuQO/evTF69Oik6/v37w8AXT5PJTiLjqNMdaxsXZelVGyYukAmDJ2ArMws4+EZlc2MspFYm8Seg3vY45bwNqieBr2uaZ7dbLRUVqJJJAecteQn3zohkzBDj3aYyE208kec86Aix5xqPYrRmerkXo7Bq5N2YEFVV5vWa2GjWxlCU6ZMQUtLCxYsWICdO3fizDPPxAsvvNCZQP3hhx8iM7P7cESqLDqqMpWwslWNKalS6qKTi0iNYKVK9CngbmbUObxqzFVYun6p0rh1vA0qMii5Wah4oricPbreTe5a8pNvnQTUsEKPdoSdm6gix37yz8kPS4fkXqrBu+fAni5s6ipeK1VdHYZeCxNRi40AmGqxQelertNUUKpztAoxoVTD2PqN9SirK/P9jVXlq1w3alVafkCW/ItDu9/a3hrquFVkUFJudUIQYTaJVF1L3IbIFvzmBYCWjHDBbX6q2u7E3spCxWtmsUpbfEnFQ4rZLO5hdrz3Q1C7j3nj52HxusVd3odKmyFVXR2mXtMBdf/uVh6howmmCeh0CN4s5dZyoEXp1CGRz2CdzvwwIHuAa2sDnRCEdH4AN6TRr1c/ZWXOdYuryKCU3Op6lSwvQMWvK0Q4evygupZUvLKUeZEKPaZzbiJHjt3W7PK3lrPXLCcMaJIp2c/Ddc+F92DuS3PFvFaqupqj17Iys4wVaUih+8SRjjKEQUDn1T9ncO5g/GD8D5Dx///PDuvfU0dPxZSVU5T6RnH7lrmBolD3tu8l90ayTqt+nb9N9MqyNhLAvWdcAgmUjSrD7U2345SaU1DyeAkWNS3CoqZFmNkwEw2bG9j3pEJFBiXkVqpn3KSRk0R62FHgtZbcoNqPiTovALR711k98yi9uDh9uKRyEznPIbVmqTKypXULee5U4dXTLr9PPvkgQoGqrqbqtaYPmxDviLvq43RCZAilCGER0HktqJ98/Seeyq1uch1WvLNCeaPyWyRhlwtbCFL8Jhu6em0kWRmfP3/NKzVY2LiwS1NM081KVWRQQm6pXiUr1OFluAIItUmkfS1Vjqv0vB+glvvA8bY5EWTk26FiPHjpER2WYy9Q17T0mqUYBQOyB6CqsUrE8Ap6Z27Gg7Re1NHVFL1mwkg0gcgQShEkvCZUeFnjbspt66yt2L5/u/apQ7ebs6ShSFH8OpsQBUmb6NhKAPAlf7TuCagbYEFQkUEJuaUq6fKV5YGnbgmj2w7q5rRk4hKsKl+FwtzCpL9zupU7oVOhRfVQ6BgPlFN9kHxQQF37jc2Noms2SJas+ZEwvDjvzA4TB2gdXU3Ra6YPdBKIDKEUQVqBe4Fz6mhtb8WwZcMw58U5pN+mkLdRTpFukDIUqYp/R9uOLn93g26uSdHJRVi5cSX5Grsy55z6qePhyqCE3HLK3+3wUqi6RrcF7uakI99u0KnQonooTBv8QSETP3AOf/Ub61G+spz0u25r1mst+clSdXG1ZwUrQJ87nXAeRy9y9IWOLAfpNdMHOglEydIphHSXXyc4ib8qvZukyduc1+mWC8c74li2YRlJ8bccbCGNS9f1r5pM2rCpAdOfmy5O8qbaL05Hbrnl7xZMcfQA6snbqvLtBhX2+O//9vusxNkwcxPdKoWmjp6KxesWd47R/mwA7fDH1VXONRukF71kqe5dWj6aTn6cVM+4hs0N7KIPHVk2XfxjGlH5fABMlc/bYaICwUtZuJVYcktedUv7OVAthed2Jn/iiidw85qbyWXCqqCWq1KgUi7rBQlm6fGF47Fu+zpWM13APdQQBMkSZtNUFhwElU7b3/Vtf7gNCxsXBv6mfa7CLBH3kikdeguOrnJ7bxy96ITE3FF/Y8nEJRjUZ5DnOvKbQwDKz6gKThl++enloVWQUffvyBAKQBiGkDQoymJA9gA8M/kZFA8pRtOHTaTFCZhdTF7gbtIq3i2L74K6CamCqgjtyMrI8swnCnOT9oMUO3UsO0Yqh5dUqOnCH2OBYihQOLYs2DlgOLxAAIxtWKqHP876yUAG68AXtJa4nEpuUDkIea0jtzkEkBKjnvpeqourUftGbWjtSyIeoWMY1NLzksdLUJhbiMkjJ5N/WyVsF2YDQ05ncqAr34XpXlnc3kgJJHyTqu0VVlZVSdg8HaphJbcQRLwjjpLHSwLvaZUwSyjUMMJFHASF+SgcW3bYQ0MmQyscuK1pip6gvoNYdgy1l9UmjVU3fCMRrlcJrXutI7c55CSQq5DleoES1o1lx1DVWMXWE2Eg8ggFoDt6hDinDg5B3JKJSzDr3FmsxRJ2A0PuiRHo6ukxSZYG0MNCg3MHo2xUGamRrNOTElaTSOmwEuXUbT0r1/Xv9V5NeIRMyhBHxgfnDnad+3QLrVD1BPXZV09fjQlDJyR9Jsl4rxva4+bHUdcR9Rkrx1Zi5caVonrZL6ybQILULknaUxWFxoSQakNIRaFywy8ZyEBmRqZ4+IUSj5dmHOUYgWHRvLvBS/E7eyNxwpZ2hBXCNGFEmFCofhtt6YhS7ZCHc/wmjX+OjLu1oLGQLqEVlXxGlXclKaupyo8LGptK6N2ChM7wMhKvO+s6dj6bBCJDSAipNIRUFapOnzEvly93cVA8BbHsGLJ7ZGP7frkNg5OMyPVuSYNi5Oq+S643JlV95ZyQVKiUjRaASH6YTjIuFZx8jAXnLzDy21LJ1I3NjShfWe6ZF+aX8Azw3hXFiMrLycOSiUtQkFtgtC0Pt5jDQtA6ougL0zmHbnqk7t06I3oiCNT9O+IRSlPocE2ocnlUjqvU5mKxQInH723fm2QEAfrkW1SeDbsRpMrPo8vrQyGo0+Fl4XDCpBPBG+DNazI8Npx0vZVLQuWRKh1Rqs1FZJKd3A4KaWFhbiFuLbqV/dth5UtZ8lbyeIlvcrybDKvyRlHaQrQcbMHVz11Nln9VPe2U7yUTl/jex0LQOqLwfFFyDpdtWCaq18LqpKCKyCMUgFR4hKTyLrinDtVu827QKRPXPZVwToyqXrewc590K6yCch5UvRiUE2gsO4a6yXUiPYa4Hgvu93Vye3S8KaqVkYBshWMYHiGVqk43GVZ9V1S9SPUwSYQRJSrS7PDyqFJzDu2Q0GvSz0dF5BHqxpDqORTLjuH9We9j9fTViGXHPH/PzkYq1RxPx7KXaGdBOTGqnuZMNGcNgpuHRKLhqK4Xg+Kxam1vRcnjJSI9h7iM41wPh478c7uXW+v0tj/chlNqTmF541S9IkFeTNOtf7hVnRbcZJjzrtz04toZa/HEFU8gLyfP9Zog+Zdk6ZbuNODlUS0dUUq63g4JvRZWJwVVROXzaQidnkNeXoray2p9T5CqQuh1KlNlD7ZDl92WUoLMZXjVZYZ1gnOqdZbLxjviLCZiN0gwwnoxCTshUSbLLWEO0yXP7V6uO1dcNm2KF1OiRNwPXGZ1igwHwe+5C3ILsOfgHs9r/eRfOowo3WnArbxeRS+r6DWgq26zws8m6UlUEXmE0hAmeg4BEOnHZIdfXolObosF6jx4nXL9ToyqpznJU6BqXo4F3VNWvCOONR+sId2L2lfOz/solSfD8YZQPBz5OfnY0bZDu3+bTvdyJ6hzRfWKcLyYUr3b3MA53EgYXkHP3bC5gfQ7buM21QBVsn+dE6p6meul99JtAIw+nyqiHKEApDJHiBpP5cSqARm2WGpeietprG8h2o+0u3LBuD1f0DhUcnVUq50kuUikqotUeE1U8scoOSFh5slQv88pV04ll4oXdCu0VHNZTHAhcTmQVD0F1Iq0vJw8Up9BL3lNRd5LECjvzVTVmvXbYfNQeSEqnxdCqsrnOcmQaz5YQ2LjleJo4CpWt4XZsLlBO9kzFX2DJJJJTfS24mxanGRV7lhUew5tad2Ch19/GDv27+j8jmTyuVSCrOq9OKX/TuiWFKdTCxFOcn3RyUVkbh47uJt8fk4+9hzco2TMmEpaVwXnYGjXGbsO7MKcF+cE/n6QjKRT3z4gMoTEkG48Qm49hyp+XSFSPUSFlGI12XzRVN8giVMghx1Xum0Gt2klwFPmOj2HJO7vB0vx72jbgcoXKz1zQ1LNpeKEroFCNU6fuOIJFOQWGG/TQjEeAChXc3Ir0irHVmLp+qW+4+FWkKaCrNVk9Sd1TaRSt7khMoSEkM7M0txFL3XikyTRM9180e+ZVU9zuqdA6vyZaJthOjSh0yLDDSbCuqlS1iqM7xKnZ+p983Pyk8JEYdNB6Lb44Bj5dlgNl3WMGdNteSj31/XESHi3Uqnb3BA1XT1K4NVwlFOGSqm84CxkySRB080XGzY3sKudgqoYdKs7qPPn9PJJVF1R521+0XxUFVexlXlQ1ZH1b07Fyt/a/obbm24X61pNnQNnfomusuY23AVkSoqp93XmyphshulV8QZ83uJDpSpTpyItKzNLq92Pl56mQteQMln9yanqSqVu00HkEQpAqj1CXuA2F/UTMG7CsckkQenmi4B/vyVAXQmp9hsKs22GE2Hli0jnybhBNXSm2o9JKn+I2nBXMrSi2t/KT95SmUztJp/cZtNA+Dk8FuxzJ5Eflw5eeuvaVOk2N0ShMSGkqyHEcUHWXlbrawSpuKElkwSthdewqQE162u6/F2l+aIdXh24JcE1JnUaLwLqhkqYlS6SeTJeUE0sT6WypjbclZZXt/s6w2FecMqbKWZ1nQ09rIo0XVCSubl6lNNjcVCfQSnJAaNCMmk/MoSEkK6GECfPYcLQCa5/040rSyQJcqp53JovltWVke5jsiJGx5jUaZthr7riVtVQjNhUl09zwH2/YSlrrzlMVU6J87472nbg6ueuDrzObniYLI/WpV8IMnAHZA/AM5OfEWn3ogJTFZsqzVa7a0sgDiJDSAhhG0JUBRlm9VJQKbiqQlep8HCOZc4Lc1y9SE5IdzW2IFG9Zp+/eEecRIXgVnXFUWxBRqypEz/HG+PMMfKDyvs1razD7kdnB3VdqvRiM1keravXOJ7qsI1RnWRuitHNNe5NhwdVdVsqPEJRsnSKoRorlqDCl6CIV00SVO055BxL6WmlJEPIVFdj3SRFlbYZsewYqhqruvw9KOHQqZjen/W+a06Tl4FqukWGE4W5heScIpX365awS1XWQfczOYdB4BhgQYnUzkILqrw3NjcqVdzp6jVqwm8qjFRuMreFID1tretDRw6hqrgKta/XYvv+f97H6QmyoNo6gwoTLYFMITKEUghKWMhPcYZVvWTCiFBVCs6xcBU5B5LVa9Tv6VRd+Sk2P8Vv92xI91Jzg5/cOvNkAKD2jVpjytOEsubMISBDCWCBa4BxDY8wKu509VpQD7ZUGamqvRP99K/bui7oW4Dq4moMjw0PJEqkVJNJwXQfOx1EobEAmAqNScaKdaqeUkURz6nwCBqLCXZX6eo1lRwWnaor+/04OR3Sz+Mnm7otMky59nXvp0MqqeOV0AlbUfP9wqy4MxG6ShXzcbwjjmUblpHYm6ljoazrQ0cOiVWTWdB9L2ESUEY5QkIwYQiZjhVzIL3JSOcmUMciubg4hoNJY1Kn6spSbFzFL1mGKxl+UHm/uvlrKvIU74ijqrEKi5oWEZ8sGTrGna4RS5mvVFfc6SIV7UZU+nrpkkdac/1o6aOieTlSazqs/KwoRyiNYSpWrAIJEi0LkrkJdlDGEuQOp4IbGpJy93opBqdy4oYzuTlMUuFS6fAD9/3qKmwVeVJtZGmHTviRQzJadHIRSd6c4OR4ORFmGMYL0qHsIKgUhADBOo+6rq3fkggtq65pqm5LJSJDKAUwESvWgYQRIZmbYKFy3OebASfRkrq4vBanSvKzrjFpMrmVq/glcq5M5RlR3y9XFr1kgSNPqhueG1QNBqp+qHmlBo+/9Tj2tu/t/Ewih4dacWfiQEdFmHmRnIIQLo8UdQ53H9gtdlBTWdOprJzkIDKEUgDuIqNa7TruRh0LXXWReClUTuNVVR4dr8V56MghyiN3UUSqxqTp5Fau4pfwcOlW0uky23JkUUJRc9vdUI0lrsHA8bLajSCA76kzWXFnEiaLK5ygev6XTFyCWefOYh0KOOu6eEixttdfZU2nsnKSi8gQSgFM9BxKpeWts/HpGBCS3amtxVlVXOV7XwtuiohrTOoYkHWT63DD8zd0aZLpVGwqil/Xw6UTftCVY44stra3iihqTqjbJCWAZcRSSUbtUPHUhVkeLdUGp+jkotAql6jr4K/7/sq+H3dd63r9uWs6jOpTSWSmegDHIiyFBfxz8XmhMLcwUCFbm7tTGVsKvX5jvf6gfcBZJPGOOBqbG7Hi7RVobG5EvCPeqVCnjZlGYnxVfd6gxQkAta/XorBvoed7yUAGBucODuXEaN+07ajfWI85L81JMoLycvJwz4X3ePJNWWO3w0/xTxo5Cc2zm7F2xlo8NekprJ2xFttmbxNtvOj8noQcU2VxR9uOQFmofKES8Y6u/Cuq95xfNB/bZm/DrUW3ojDXjIxNGjkJlWMr2dcB3vJGhaqsBaF+Yz2GLB2CCx67AFfWX4kLHrsAQ5YOCZQHr+uAz5OQC3ILkr5P0bUccEKVXB2tMtduetZNH+s8i0puYjogMoRSBOvU7bYYq4uryZsPZXOnKnRVUBfJltYtSgrNDp3npSzO7fu3o+LsCgCyytwNKp4TL2Nh78G9mLJyCp5999kuis1P1vwUP8dAtSvUeEccBX0LWBu9lBxTZbHlYIuYoqbec8LQCUkJ9oAZGSs9rVTpOgs6OTyqsuYFVeM46DoAyoY+FZbXJuiwC6jpaN255hiYQc/iXNNhJ6XrIgqNpRClI0rRr1c/NDY3AgCKhxSze+Do5mNIgOKmVWVDdkLneamLbnhsuFglnR+4pyyKsTBt1TTPfkISVXVucAtnDcge0OkCp4QfpOSYGjLIz8knPRtFZloOtHiy99rvKRl+9AsVcULvbtDN4UlVBafKdbrFFX7ghCpVdfTRkpuYakSGUIrgtnksf2s5O6eHEwpobG40wttgig3ZDTonDW6CoY4ypyhO6fYGALpsxk7FZoqHyjl+q3oolh3rUp3kttFLnSCpCjuWHSPdj0ITMGXllECDwyv8aCI/TrXE3S5vujwvEuXRqsax9OFQJ2/NClVS2gCpekfCzE2kGu9hJqVLIDKEUgDJbHrq5j7nxTldEmslE6n9FklQcihHMemcNCgn5Vh2DPGOeFLuEhdUxWmqvYEdJhMTKQo1+wvZWD15NXYf2O27oUqeICkK23QrDQtZGVl4uuzpwPCj83d120N4zcGA7AHY277XV94aNjekRcmzqnEsGZaR0NWp7ofohI6hSI1ipHM7DTdEOUIhQzqnhxqHthtBgJlEaq8E2+Gx4aTrKYqJG6u2g5Kk3treipLHS9i5Sxa4OQ2cOL+qojSVmEjKuWrbjqzMrMAETZ336oagZG+JPB2qhy6vTx5pzIB/3gZXd7jNwa55u7CqfJWnvAFIaeGFHarGsZRRTZnv2b+djTUfrPFNNpaWbV2oFrfc9ofbcErNKSh5vASLmhZhUdMizGyYiYbNDa7XS+eLmUTUYiMA0i02TFC8e7XJCIIK5b2Ky1z6mXXbglAYgFV7I6n2MTLd3gDg9ROiQKUdh5+3DICRnmJBHhZVHivJdiTWWPxau1QVV7H7zHnBOSfjC8ej6cOmLs1SneMIs0WGavsaqbY3Kn3VvDxnYffL84NOLzwnKOMPq52GG6j7d+QRChk6bluvUkcvyzsoIZTrKVAtYzVx2tc5aVgn5dXTV3vmiqh453RKRikVWhzaBTdwPEqUslruyZtSySN9ggyS2VTQBLiB4n1Yun4p6X4UHWOXt9b2VgxbNgwlj5f4MkOHXfKs6rWTqspTCUVLeH6doJa4U0HRxwOyB6CqsSrQ40nRk1x6lFQgyhEKGTocK35xe7fEyx1tO3D1c1cH3st0rJwTL6aeHnQrU6wyZqrip3iqwigZ9cr94FYt+YGa48RJiKQmaG6bvU2suo0qs6q5YFIJoVZn8iAjmtK+AuAZvCqtQaRLnv3WvGp1nUQPRZVQdFCysUT/Ot18LZ3iFjeEUZlsGlFoLADSoTEVty2nG7odUiEpnZCPHUFhiLDZsTmhjfLTywMVWJidrZ2bx54De1C+shyAnuudK2tUl3/YXb+lZDYIYYRp7Yhlx/BJ+ydaIR8LQXPkBdOd2d3WvBSz9PjC8Vi3fR3pd3RD0brzpKr3Ob/vpo+pzOdOSIfeJRB1n09TcLPpdajKpU6s1JDPsg3LMKjPIE8F43ciSkVfGi4RpKSHRBduXoyVmXonYBVZo568Gza5J1Q6IeVtCItfS8fzoOKNmT12Nqoaq9iVOG6GBKc1iHUPyZJnzppX9drZr6vfWI9hy4aRD1qqNAQWrGRjVQPOdIsKL31c926d0u+lCyeQCiKPUACkPUIWqEmauidpiSQ9qufEDo4nh3p63zprK/k0RwHFO2d11Jb2kJhKINT5XR1ZC0pIpva/kvI2SCcyB4E771xvjN3b41be7pfg7eV1mTxyMqms27o/IJfUG5bHzoKOd4XrtbPglmxM1YvUtbh6+mpkZWaJ6hFuknjYSfQcUPfvyBAKgClDCKApTwmFrlMZA6hVT3AUJ/X383PyxbmQ/AyXBBKdvCtu8FIA6RYCBMKTNbf7Ujf8wbmDA5Up1eBQMerCrG7hrCm3tUQdq58BwPFwcPQFBWGHkXWNLvt8D+wzEDN/ORM79ssdoJygrkXrPhYk9AgnJJiKqjcOotBYNwDF3StRmeLmArVi5SveXhGo9FUo+znuW2o4xIsLSWcRmiCCTLcQINXwMkGLzwm/BFXycAxIbpjS77dNtCbhhADdwmwU3UEJr2RmZHom2QOfb7R1k+vEq33C7EUlESZ1zvfSi80w6VsG13st75GezZlAL6FHOCFB6bZDqUJkCKU5pPJOUhErp+Zh6BAFpkOs3E1ZezEGm4772+/V9GETGjY1uIY/3BSmiRwn6kZWOa5SKZ9GojeS32+X1ZV18QpKnLqpMr9k4hLMOneWkjxQDADLCPKao9rLajFh6AT2vYNAff73Wt5DY3OjlvFpwugycYBSDcE5f1tKJ3o9X8VZFRgeGx46J5BJRDxCaQ4KJ8Z1Z12HunfrSBwTqt2cvXgwKAhSMJwuzU5YSqWxuVGLa8ON60LaQ6LDM8SBnTvHKwfEjf9Din/FDurclI7w7piuysZO4W6h/LYzNCrBskzl1lI1ggCeERo2+y91zS9qWkTmK/OCqQagkkz6XnpZBVw94sdP53y+5tnNWHD+grTmBFJBlCMUAJM5Qhx4dfgGQD6tSsfKdx3YhTkvzgkcOyXOr8qObSEVsXJukmAYCbwqlUjO90PJKaPmqEjMoW4+id9YVfLfqOMOAqeQwTSju1VFFib7L2fN6+SiSK/jIHDllZs479R1XqDokVTkK9phOi8vYpY+CmC31GPZMbw/6/1O67y6uBp72/eyTqsSHgm752TWubPEGKNV2bEteMXKdU7t0h4SUydTC5QmoG5weg7cToJbZ21FLDuW1HOIwjAuMYe6oQ0/ZlvVHBQJ7x2VbTgMRvcw2X8tvXboyCFUFVehoG+wl1mF6d1CECN7AgmUjSpD04dN2qzNAJ9Jn5pHN79oPtbOWIu6ybSQfZAeUY0OSEFVrk0gMoTSFG5CMmzZMLS2t6L89HLUvlHrep2fwpCOlUsbCm4b8PY525XCZjqK0zkmymZFocE33XyRywtjwU1hurVgsGRxYeNC7Ni/I+n7fspTtyWKSQNSl/tEN5k3qMWHzmZlItSpC6deW9i4EAkkUF1cjflF832v1TE+vWQwK+PzZ695pUZsI+bOO1WGRuWP6uz2rqtHOOFm6RYfQOqNMCei0FgAUhEaM9V80VTJqm55PuX3dcJm1Ofxc9NyG3emovkil++JEhLghNqCfk+HXM5UaCMs9mCVZw+L0V0FKs8TpNdmj5uNmldqAu+tEzoOKiKQLAc3yRWnq0d0Gq/qhs7C5JCKeISEELYhRBGS47OPV4oRm95QdGK9Qde7KZV0iJWrELWZMhx1uWmcSIcWDBYkDUinrLUcaMGUlVO6/LYfOGtFVbYkDy6SuRgqz0PRa3k5eV0oMtygK186G7EKcWbQ91X1sqoeiXfEUdVYhUVNizy/4wddYzFMDqmIR6gbwnTzRW57Dw5UKfABmmJ1K3GPd8RR8nhJ4O9TY+Vcbh/VcnjdhrFe4PA9Ufg/VENt0k05AZkmmoC3rM0bPw8r3lnRpRhhb/terbWiwxslGcrWWZ92qD5PY3NjoF5rOdiC/Jx87Dm4R4y+wQ2cXEl7EvmW1i14+PWHk8LCQQYgZd5V9bJUE1cudEv0w+SQoqLb5Qg98MADGDJkCHr37o2xY8diw4YNnt+tra1FUVERjj/+eBx//PEoKSnx/X4qYcXOKVVYwOfeEJUYsW6+hjQ4sWJnQmfYsXIndJLPTSSnBiWFAp+XSjuTn73i/qqKyFTPoaB8miD4ydridYux5MIlSb+9a94urCpfpbxWdGQLMJ9cz4Xq89RvrO9sCByEq8ZcBcBsThNVrhs2NXTJZ+LkxnGgqpc5esREib4KbUm6yTXQzUJjzzzzDK655ho8+OCDGDt2LGpqavDss89i8+bNGDhwYJfvX3XVVfjqV7+K8ePHo3fv3rjrrrvw3HPP4d1330VBAY0PJ4zQmErJc3VxNaoaqwCohQrCbCfgNwbdWHFYsXK3EuMdbTtw9XNXB15L7V4vBakWH92l5xAn/BBGOMQCp1+UG2lh2GXfQdDJZaHqtrUz1qK1vdVozqEqZYIXJN+DyR6E1DA3hzRXhbYkTLk+KnOExo4di6985Su4//77AQAdHR0YPHgwZs2ahZtvvjnw+ng8juOPPx73338/rrnmGtI9TRtCYTZflITEgpWKFevk3FATjCvHVmLlxpVJ98jLycOeg3sCrzWRcBgEr/fDyWnqDj2HpI06yeRnTr+o2stqPRummkqu54LLg8XdfO2bn8mDGkWuszKyfFuPuCFVfEwUcIy/wbmDA9mxvUCVy7Dk+qjLETp8+DBef/113HLLLZ2fZWZmoqSkBC+//DLpNw4ePIjPPvsMsVjM8zuHDh3CoUOHOv/d1tamPmgCOHkYTvewqVyTIEiRcEnFinXmgep+daswCTKCrOaLVY1VSvlHOu9VosVHuvcc4uSrSOYlSPdua21v9ZQFqdwoP1BljRvS4OaY2cNeUjlNbgjKybG3HuGgYVMDpj83PWXkhH6gyv/8ovmoKq4CANS+UcuupqTmD4Uh1xx0G0Noz549iMfjGDRoUNLngwYNwqZNm0i/8V//9V846aSTUFLinWB7xx13oLq6WmusHITRfFECnWWnmxtcS1xVmv1RFeuuA7sCm8OqzgMlwZhyOpRsvmiK7VWl+WS69hziGnVSeQkc44vbrNhr8zB54DHZyJaq2/w8YqbgJ9dlo8pIZfxOUHv6pQJU+Z8wdEKnXKn0lwT8e0w6je73Z72PddvXpdyD1u2SpVVx55134umnn8Zzzz2H3r17e37vlltuwb59+zr/+9vf/mZ0XJzmi5zEUEkk9a7yUBAqBIaUfkNZGVmY8+IcY8yjFPIzyukwLycv6d+FuYWd7N9ecEumNkk0puoVSbeeQ9TqSvvc6pJZxjviWPPBGlT8uoKcLGyXrSAEkQWaSK7nyhqXKJCq2+om16VEr3kl3/v1vfOCRczohBSxqx0qBIcq8u+VwB3L9o6o2OHUI34kwanuXdZtDKG8vDxkZWVh165dSZ/v2rULJ5xwgu+1ixcvxp133omXXnoJZ5xxhu93e/Xqhdzc3KT/TILbfNEEy6cfOJUGXOZXSpWT0wgxwTzqV7FROa6S9BtLJi7polC5zRd1q4yCoOMVMdmCgSPT3OpKa251WJate5Y8XuJLXeEm/5ZsqW4epmCyka0Fqm7T5QSSbrbMaQJNOSxJNVMG1NtSqMq/m7Go0uIj3Zikneg2hlDPnj1x9tlnY82aNZ2fdXR0YM2aNTjvvPM8r/vJT36CH/3oR3jhhRdwzjnnhDFUFjgCGnZvFqneVX4Ior53wsQJyxqHzumwILdAu3s9NXS1bMMyJcVvusWHCjgyrVL+a38HKiXKKvd086hJ9YeSgg71A5XGgKPbVAwaU/qQckCzwDks6Rq5qsZEUG83bok+l7bE9AFPAt2qauyZZ57BjBkz8NBDD+Hcc89FTU0N6urqsGnTJgwaNAjXXHMNCgoKcMcddwAA7rrrLixYsABPPfUUvvrVr3b+znHHHYfjjjuOdM+wmKUpJc9cBmNdqJaZqjDdji8c3xkrluxqrwudUk/utdwWGQA/dyidqpBUKthUqitVy+ElmbXTjdWdWwGmAyk6BzueffdZV24i0y0y3HLjmj5sMs6UrEr/4PYMBX0LcP3Z12vl93H0SJhM0k4cdVVjADBlyhS0tLRgwYIF2LlzJ84880y88MILnQnUH374ITIz/+nk+tnPfobDhw9j8uTJSb+zcOFCVFVVhTn0QPglRKoyGOuCe4KhMr/6Kb5pY6ZhxdsrxMenWoWlw8bNvVbFG8BNxtSp1pAsaebKtE51pRPUxHpu1ZOf/JtidVdNrA+T1M5Pt6kwVa98dyWmrXI3zlT1oZtsU5PUuUnkKlApdPCa24/2f4SqxiqsLF+pbHhw9Eg6Mkk70a08QqlAKpquOpEKi9pKSKXmYnD5I/y8ALHsmOjzSlRh6XAVUa9VbQCq4k3gGjXSlWxcIktObyQpLi2Oh44j/1LcXzpeYmkPVVhNZes31qOsrixwPED4+sGkp1Waw0mKtJDy3ruDRygyhAKQDoZQmG5sQK0fDbXZH2Vxbp21FcOWDRNR0pIhRfuiH9jncybz3Qd2izVftI8XoDcAtWAqVGgiLKtDZOmHJROXdBYW6IJLQkc1ZiQ8a+nAym7/nTCaynJDldRmy1KyrWLkUmWBO1e6rOaSSCVD+lEZGjtWEaYbm0qJb7n3K8dWovS0UpIyp7p3f/R/P0LFWRWoaqzSCiNIhxStkEr9xnrMbJjJUvzUcIyXy5mCVe+tAgBxFl4TYVkdIks3WMpUyggCaDxAsewY6ibX+VbSuW12ugarSqjECQlSuzCbynJDlUEyJi3bXL6ndOBwKl9ZbpzDyVRYWBLdpmrsWIO9iiLeEUdhX/PVPpwqscLcQqwqX4UlFy0hl1NTF+eipkVY2LgQsexYl7JjTnNYncoYL4RRBuqsylkycQnpuvtfvV+8ktDEHAJ0DikKTCnToKqnDGSg9rLaJBI6J0xVNlHX0poP1vhW4+g0sg27qSwnh4SiD03INpVmIl04nCxWc9Pl6+nW7NuJyCOUhnA7KQzIHtB5SjFlUVNPXKrhB67HqrW9FQkkUF1crVThIJ2kR1H8Fb+uQL9e/bS5duwepHhHHPe8fA85d0iSzbZhUwPpe9xER8k2ByZp+XW8JjrekiBQ19KipkVY/tZyEW+lBcvDteaDNVpeKa6Xg6M/KPowVUm8qp4ojixKsZpLIlUtoSiIPEJpBq+TgkXkpuMh8YPFnEvBoD6DlISXQ1QG/FMp/PyNn6P89HK2cSEVUrS8c1WNVYGGYmt7K0oeLxH1ynB4TQA5bo76jfXk0JRKWFaCyHJ+0XzjjOsqXhPT3CmctSTprbR7uKjJ616GBNfLQfUiUpmqt7RuIY1fmtspbA6nIPjdT5rA1yQxqw4ij1AagXJSyO6RjdXTV5OTdCngJkerKgZOE08LlFwHL0iUtaokjgPyPYa4uUM68wb8UxYpoIQhvJJCvU6JTR82kfo9+YWlJMH1mnA2O5WO5Zy1JEWxQc0fdMJPX3C8HJRnfrrsaUw+fXKXz52Id8RR+3pt4PcK++qVvbtB1xPFzTes+HWFLyO61/2Ccpgk6TRSjcgQSiNQlOf2/duRlZklUh0G8JRbkOFAWRiqycAq7mndJD1VxQ+Y4XeyGw2r3luF+1+9P/AaVbc+JzE1KAwRpFDdFLuuEZtqJU2dd52O5Zy1JGUYq9A6BBkSnJCJ1zNzKQiaPmzC9v3B8l1xdoUWV5bbM4XN4dSvVz+UPO7daNztfkFh3Xnj52HFOyvEG0OnCpEhlEYIO2bNUW6UfkzUCgi74lvzwRqSi11VKajmeKi2F7FDd/Nxg91ooBhCJ/Y9UckooMpY5bhKI3kyOkasNOeRCnSq4jjeRGstUXmW1nywRskoVCGXBOi5ixyPm0SuCVW+qf0CnfCTwdIRpcYJGO2wWmJQ70cJ69697u4uf5P2goeJKEcojRDmSQHgKTeVfkx+uQmW4qsqriL3rVGNV6vkeHAVvx9MMKZS+4a1HGhRqlqiyphfLzbdPBnJ3mCqeTKqMqdTFcfNIcrKzCJzwSxqWqSUv8aV4bycPMweNxux7JiRHlK6uSYmdW2QDDZsblBuAOyGIBnl5mKp6r506RumgsgQSiOE3RSTqtz8ElJ1N7ugROAEEigbVYbbm27HKTWnKJchcxWnpPFiopEmRblNHT0VU1ZOUTIKJGRRojzZacSunr4aj5Y+ikNHDnVR+tIJyjql75T3I9mx3HTyNFWGJ4+cjPycfLQcbEHNKzXGG0OrwpSupcpg6YhSVyM/LycPs8fSDUiqjHIOFTq6T5VOI9WIDKE0Atdy183opyo3v4RUqc3OrwN9zSs1WNi4EDv270j6u2Q1jBPUuenbs6/n30x3c/dTbnWT67DinRVGDFTqqVUq1GsZsb169MLMhpkoebzEVelL8sJIeJYkquKoc8ipLFQxCimGw4DsAVi1cRVaDrYk/U1lnUpXKzkhId9u4Mig3civHFeJvJy8zw3I9TQDkiujVM+4xMEtlX3DVBAZQmkGquUuQdQmcSqS2uySlMLYSgD+J2bArCuWOjeP/PsjsMj1nH8HzDOmuim3rbO2Yvv+7cYMVCplg2T4gaL0pWRR0rPktfn4hRTt4GxKXu/LDdyTe5DhYM2LxJzp6jaqEWWC5I8rg1mZWWhtb8XSV5Ziz8E9Sd/xMyBVZZTiGefSnLiBKremDV4qol5jAUhVrzG/BFfp/jg6/YakG+px+wnZf1+lDNkP1LmRbKTpBk6yM7fcn9KPSbUCS6rHELWv1qOlj5KqY4JkMYwmkZy5AcCa/3hHnJw87fb+g3SPm6xfd9Z1WNi4MPB+QXOmq9tUEuUlKwyl+6d5rRHTMqra85DbB9J0UUPUa6ybw6uKwkR/HJ1+QxJcPXaoJurplCF7gTo3JhlTOcpCpdyfcnLjcujYr9PtMRTviGPZhmUk7xYAEVkMo3qTOjcNmxvYm4WVPK1SjRkkb16yXvduHem5/eZMV7fpVChKVXVy9aFqzzjTMupHUzB19FQsXre4c3wWOF5wk6zrKohCY90MJvrjeIVXYtmxQJeldKxddeHWrK8x0v+Lw+QqzZjqFw4qqyvDnBfmdL4bbrm/6fwlCzrhBytEMufFOaR77T6wW0QWw2IcDpobAMp5Siphb2rOiZusS4RBqbqtsbmxSzjFNJM3FVx9qNrCJowKYy/d95Ov/0QrpJgu78qOKDQWgFSFxryw4u0VuLL+ysDvPTXpKZSfXq7kpVBxWUqFh6guXzuyMrI884kykIGCvgVYfvlyUTZu0+CECAtzC1FxVgUpNAGohVB1wQ0/qHi3rDCAjixS7stx/1PgNjcAlEImbs8CBIe9VUM09mfQDYNSdVssO5bElMyRf51wJgcUGazfWI+yujLS7znHTZ3vrbO2Yt32dUaIRVVDimGEni1Q9+/IEApAuhlCVCGqLq5G7Ru17FCRToxeItYetMCdY1IhPOwODKgcg5A7DyoGaphMzdw8MbdNVmW8nPuuKl9lVH44m4VfbhzVKJTYnMLKN3SCI/+UnDgp+MkgR9YG5w52NSCD5jsV7M+Udcc5zOu+qyhHqBuCIkSUGHQsO4aqxip2/FU3Ri8Ra+f0UCrMLUTZqDJSTyo7ugMDKidEyDGClkxcglnnzmIZMWEzNXPyxLxCXiqySL1vdXG1cbmRatFBzV+TyDkxnW/oBemcOAu6xr+fDEq0sPGbbyuPJ8wcHKqeCJs4mILIEEoTUIUoKMkyqIzVz5hRTdyTht8CrzirAsNjw9nNOe0w0QfMDyoKVVoJWF4TFSMoKKlROlGcYwRSN1nJ+6q2XeBAskUHxSiU2pzcDK/xheOxbvs6rHh7ha98qDRlpoJbtGHa+JdqYeM138OWDRMrqKGAk/wsXWAjgcgQSgNwM+j9DIWgMlY/YybsXmd+oJ5kdU6RHKNO9XSoqlBVnwvoGipQ5TSieAiv//X1mP3b2UkNLHU3DOqmrOLdkrhvGCdVyvv3yo1T2ewom1NBbgHiHXGSQWOtqfqN9Ri2bBhZ/r10mzMvyA+68h9GRZNECxsLTkO3sbkxtAOtxQNU8esKsuElUU0qjahqLMVQzaD3yuinnlbdjBnpjUCXLItSicVh1HUDxahTJXjTYSdWfa7q4moxgjiKh3Bv+94uXbx1q/WoFU92I0iCmI1TaZUOzMeSLToohIntn7V7snq7QVX+3XRb3WRaeb6u/OtUNHFkwmQ7pbAOtJZeLHm8xNdIdZNFE2SWOoiSpQNgOllaOoNe5/coicr5OfnYPmc7evbo6fv7YeeVcIkELZgieNOtwrHfn/JcOgR8XqAmNQaNR+XenMRbSVmj3BdAaLLtlexMzY3jEmZuad2C2tdrk4zbAdkDsLd9b5fr/NaAlPw7f88EAaUdqvpTtdJWJ7lc+hk4UKnq5JJ3SoC6f0ceoRRD2nrXOWlQvBAtB1swbNkwIydBHbg15yzsq3fi0jkd6vA92U+WsewY3p/1fmc/Iq9nAf7pTpbiNNIJAek2X+S0mpGUNZPcPiow3aLD6e1c2LgQCSRQXVzduY569+jteq3fGpDmO+Pw8+jIP0cfW+t0zotzUFZXpuT5UvGKBHmeTDfv5nKWWXCTRRP8ayqIcoRSDOlwlG781StGb4dfrFya+ZpzYnDGypderBeH1kkeVzVw/U6WSyYuQdHJRcpVOV7wmmOdPCWv5+MgKE9MWtb87ju+cDyaPmxC+cryUJNQAffqI4mEU69T/Uf7P0JVYxVWlq9EVmZWl2bHdoTJfKxblUYBVc9uad0SWP5OkQkuKz3F82Q6B4fL/p+K5GcuIo9QimHCeteNv04aOQnvz3of+Tn5rn8P6ySo23xRZx7iHXGs+WAN6T5S+VYU7waV6ZoKvznWzb9yPp8fvE65fidGEyzrFuz3bW1vxbBlw5RyIXThNy86LNpUb+eONm8jyI6wmI91WPApoOjjAdkDUNVYRTIGKDJB9YpwvJ8mc3A4xmuqkp+5iDxCKQbHeud4R3T7X63bvg4tB1s8/276JChVuaEyD9x8Izdlzj2xc70bEvQF1Dl2PYX3LUT7kXa0trcqeyTs41DJuQkjKVQlF0KqqpLS90vFQ8Lp3+anA+xwrgGTJdI6VWmU31alJ/GDrkyoeD9N9UDkGK+S3jqTiDxCaQCK9a7iHQk7Vr7i7RXYdWAX6Tq/xUQ9rR4+cphUpcGZB69TlxtU863cTkmcklcJcPKf3E7hzZXNePiyh8nP5wWdHB/T5e6SuRBcUOeF6yHk9m/Lz8lX8lhL9yB0g6lcRD99XF1c7Zo4HgRdmVD1fprIwQnymgGf0x2snr5a1FtnElHVWADCbLHh5fHRaXuhCp1WHkG9v4KqRaj3zs/JTzqx6lbucGjvqXNP7TlU8esKEk+KVIsAqcoSnb5elPn2q1KU6G9l/y3n2mv6sInV8kGqB5l0xZUF1f5tre2tytVNUj0InTA1R857OGWi7t06ViWllEyE0WOSg3SrrvRC1GKjG8It5MFxiQIyZdOAXisPPyMICD4JUr1RTre9LuEZJwmQ6vINck9zNycpMj+psJKO+50y3y0HW1CwpAAPXfpQl7mWSgr1CkFNHjk58BlU7hcEEwzvXO+WPXSVlZmlnKRsKjzD6VSflZmldG83fcxZf5IyoZPEbcL4CArLAjBOSimJyBBKc1AX/O1Ntys1WfWCRKzc6RmiGg+qm71u5Q7VOJhfNB9VxVVaChXgbU6UnApODplkWMnLgJfqb7Xn4B5P5albSeSXJ+XWvsILqWj1wck70e3fpmPQ6Oa0uckS9dnLV5Z36VSvYxRwKiklZcJkj0lVeMkEAAxZOiT06kodRIZQmoO64N3aauguAJ1WHsDnnqElE5dgUJ9BrJ5DOmXbOvTxVONgwtAJIguYW4bqd7LkJhybTGaVbr5owUt5qm7SFG9rZkamL4NzLDuGusl1ohwoJnKfJPq3SSXpB8FJ8Pjw6w8nlfBbPQcpcIabdXUi5YBYObYSpaeVioakdA6mfsaHicayYbb4kEKULJ3m0CW1A7xJ/yjQbeUxqM+gpBJkSrK3RNm2M4mbkqhnmojMbYwUxLJjgXkY3KRR6WTWTnK5F+jkcpSkSwtByeIqSaEUb6tlBLnNUQYyUHtZrZhhbIEih/k5+djRtoOcgMrp36ZDx6ALN4JHJ4/RjrYdWNi4EAOyB7D1g5RO9EqmXlW+CksuWmKEHFAnidtt/ejSk3ghnXpWUhEZQmkOzmbhBnusXLU3ktsmwzm1qmzUXovei9vICStWzq2yM13pYgd1Dusm1/mWQquyX0txjdgVqlc4yW0s9vmmQlJ5cjqAh9kTKeggkEACLQdbcPVzV5M3r1T1b+OAWrFpeTjsY+eAW4HpNg/SfF5USPWYNMn+n07Ni6mIqsYCEGbVmBf8MvSpoSNn92apCqug0MrWWVu78Hy4fc+rqsLpuh1fOB7Dlg0LjJW78dtIVnr5gepulqh6kqj+0nGPq1YiOfs0fec33yFx1uj0R3KCM3dWFZnJShwnOH3mAJpcp6J/GwWcik073KpWqZ3qKRWYYc+DHZx1yZVlSsXd1llbsW77OrbMS1Zz6oK6f0eGUADSwRACvDdnSq6OGyRK7ymKNZYdE28AGGQYejWJtL7jtgjdDC6qEqDkNHgpTt3Gi5yyWomyeztUNy+3sRw+chgFSwqw5+Ae12tMKM90Utj2MbnJ4Y62Hah8sVJ7fqh0DulK1+GEW7l4vCOOksdLAq811XBZAlwDjCPLVFoIHXoSUw1luYiarh5l8HKJ3lp0q1LozHSs3BJ0kz2HTMbKhy0bhtb2VhLtPSWngRsCpIZeUumG5iZ7+42lZ4+eeOjShzpzb+zQCUv6hXfCDoUGwU8OC3K9jUSAHu4JCunohFp1oBryPLHviV1C98VDirVz/TjzIB1CNJ3zp0tPQgmbmWzxYQKRRygA6eIR8oOX9U0F1Rvj5ar1c+FKEfdRx0MlPLO8EjqnPk5YiBsC7A5uaKo3ijMWSQI+6qnaBOkf930GyeHscbNR80pN4H11PX8m16sXrJYfVLZrIFiWdDwSnPG4heZ0Qme6RJEUWVb1vlHu7/Y8YYeU7YhCY0JIF0MoSKDcFkA6xMrD3qhNxMrdxqYaFpLcQCxIu6GpyoujUDljkVCeXANXUmGrhjX85DAvJy+UHKqwQ63cvn6A2Vw/lfGojs8NYeT8BelkCkzoMROImKWPIlAUqxuXCjVWHhQ20WmAKsH+y9mkOPw4Ogy+qmEhEyWjuqSCdnA2cVPkclS+Gj8PJbdBpe49LaisFYocthxsQX5OPvYc3BMo1zoIM9SqkmgPyLG6S43HCR3SQKp+aNjc4JnAHyTLfjpZepzdBZEhlObwU6xldWVdyLvsCyDeEdcmzVPZVJzQ2ai5p2uO4aWTv6ST02ACEq0MuJs4RaFWjvtcNqRd4n5yEcuOGSF0C5JF1bVClaWrxlyFpeuXKh8oKDBJtGkHh1XdIlAcHhvuKddeBirHwFVpsOsFVRmj6oeaV2rw+FuPJ+VDckJyXjrZmSCtO87ugig0FoBUhsY44RevRaAbNpHMGZDOm9Bt9qjzbNw4OzUEmKqYuk5uQlh0A/b7hZ1PQ5FF1QpJjhy2trcaaWJqRxgVP9RnXvz1xagcV8muhOPm6XDDvFSDiRtC1AlbqbwfFXqSsCspdRCFxo4CcMIvXqd23bCJZNUXh6JfxxMV74gjlh3DnRPu7AwpFOQWdNlgdU6/nLAQ9cSeSs4SnTChjjdKJZ8mSC6e/MuTgfcF6KdaqizeMeEO0u851wpHDrMys0SbmLoZoZKhVi9Q9cqP//hjnHr8qeyCBW4rDW4LEiptCddzYnlZy+rKWNcBaiE5N50s0cjYDalOnPZDZAilMTiL028R6GxUqSrPVt2Y/TZWtwq3ey+8F1NWTmEvek6cnRoCTGW3Zo7B66XQuMmT3SWfhiqLlJAC0HWtcPPo/Jr4cj1rfkaoia7xFqj6orW91VMWJML23PEsmbgEs86dBQCofaPWSAhx0shJqBxbyWr6a0Gij5cJQziVhzwK2IbQCy+8gOOOOw5f+9rXAAAPPPAAamtrMWrUKDzwwAM4/vjjxQd5rIJrXPgtAtVk0PGF40PJGXBCxRMVtLHOGz8PK95Z0WUxen0etOj9FEZQToMdkgpdFVRZs1qX6Cq07pRPQ71nfk6+8lrR3Xy4Gw3VCDVVGcRtrOwmCzpeTNfx9C3E9v3+oWF7CxJTnhMAKD2tVMkQsqCbzCxpCKf6kEcB2xD6wQ9+gLvuugsA8Pbbb+Omm27C3LlzsXbtWsydOxePPvqo+CCPVah2YVddBF7KdNroaVi8brHRJE0nuJ4oCgHa3evu7vK3HW07sHjdYtRNrkNenzz2oi8dUYp+vfqhsbkRADoJ3TjzIanQVUEJz8SyY6hqrBJRaKrPTJWL0tNKUXRKkciplnrPgtwCrc1RdfPhbjTShrdKyMPuBQuClyxIhu0bNjeg/Ui769+83p2u8eo3b6q634JVNSzdWZ6LdDjkUcA2hLZt24ZRo0YBAFatWoVLL70UP/7xj/HGG2/gG9/4hvgAj2WoljmqhKn8lOnidYuVvSZuoCxQbv6Oajm7tRjnvjSXnQDoZjguf2s52zsiEZbSRVB4xvq3lEJT3cRSkU/DlcWq4iosXb+0S28/ylrhbj4qG42k4a0T8rAMiYpfV5D4zpyyIBW2Dyqbj2XH8PBlD7s+j47x6jdvqrrfksWWAy0inltdpMMhjwJ2i42ePXvi4MGDAIDVq1fjwgsvBADEYjG0tbXJji6CJ1W5GyjU8W6geFOefudpvD/rfe1uy25tBNy6Z3PbH+i4gu2LkfMcUt2buWGpoLlThXTrEj+obmJ+cmGNo2xUGZo+bEK8I96l/YKK0UiVxYbNDRiydAgWNi7s3Nhj2TFUF1cb60zO2WgsSHlSJNbApJGTUDe5jjQepyxYBqqpVhoWsntko3REqeff3WTMr+0Gdd681uOA7AGdz+d8XgCYOnoqpqycYqSzPBcmWiyZANsQ+trXvoa5c+fiRz/6ETZs2IBLLrkEAPD//t//Q2FhofgAIyT3B6ocV+n6HZ0wFVWZrtu+jrXgneAqTk6/GolkbepilO7HRFHoA7IHoKqxyrhy8+pFNTw2nHQ9dQ51NjEvucjK+Fzua16pCc1ILOhbgKriKjR92ISyurIu7+eT9k9Q1ViFhs0NIuNwQmWjkfCkUNfA4SOHA/WDam8wiV5xFE/y9v3b2YckrwMLV3e4rcdd83ZhVfkqV71YN7kOK95ZIaabdJHKXogcsENj999/P2644QasXLkSP/vZz1BQ8PnL+O1vf4uLLrpIfIDHCoJCHtapo3hIMYpOlsl9sKBqtXPc4qqxYqrrWTemDtAXo7S7N+ywFFXW7JBWaLqM43a5aNjUgJr1NYgnkpW7dDKmUxa3tG5B7eu1vmXUpvMgVN6LBGkidQ0ULikM7GCuIwvpRA8CBOdrVRV3PczY4aY73Najm14cXzgeP33tp2kVigqLoFMXEaFiAMIgVFSJs0vmiqgQC3LJDsNo5qjafJZLEvbkX57E1c9dHfg9NzI1v/fmRUxI5SyhzF269YyTIGPUaVKpCpV2DFZ/O8kcL9X3okuayG24S/l9HVlQ1YfShLFBsnh89vFi/R/t4PZIk+oVZyFIr5km6PSCKKFiW1tb548E5QGla4f2dIVqaaFERr8FrtV++MhhfPs332Z5KMKIFXudDgfnDsbU0VOxeN3izjHanw2ghxTrN9aTu2Q7T+tBRoiX96vuXVoOBTWnQ6Xqi3Nq52xKumW6qUjGVG3H0LCpAdOfmy6awKrqTdH1pKiGMkzxnanqQymPhdWxPkgWKUYQwJtfFaNcMhRF0WumCTp1QfIIZWVl4eOPP8bAgQORmZmJjAyXJMVEAhkZGYjHw4k9hgWTHqFUnWbdQLXa6zfW4zu/+Q67E3YYHiELXhuxrveBqnDc3ptOuxCpjtRBslbQtwDLL1+O3Qd2e25CQXMYNnEap1t6+enlIt4YbnsVP0idilVl2403bN32dYFzdDR1MNf1WHC9MbHsGD5p/0TEs8ppw6Ty+0Hg6LVUMEtT92+SIfSHP/wBX/3qV9GjRw80Nja6GkIWzj//fLURpylMGkJhGgcUUDY5zsnD7n7luPABGFswqouRo3AykNFFAegYvBJhKZXN2y/fy8vQVDX2VEF9ruriatS+UStioKmEhbIysrrkMFmQ2px0NxpVUkaAF4q2IGmc6kLVkFTxxlQXV6OqsQoAz/Bye79NHzaxeqT5/T4X6XSQ94KoIXQsw6QhxDnNSsZz/eClTLknD6CrAUc5eQFISyp26oabn5OPBy99UDw/KhU5HRzFyVGKgJyhSzESY9kxtLa3ihlophp0rp6+GlmZWSkxClSNWDcDgtrBXNI4lQDXkNTxxjRsbmAZXl5G6uSRk8kM1NLNedPtIO8GY01Xq6qqsGDBAmRmJlfe79u3D9/5znewYsUK/miPUZgoLTTFJsohLPSKqwfFigGkhIqdMmfU3KUlE5d0GaNEflQqcjo4FU/UXJ3bm24X3fzCrrgDeBWKhbmFKBtVhppXagJ/t3xleRcSxjCMAh32X6/qpaAO5pIs5c5nUdV/3Dwjrk4E/pmvxcmH8svtoxpBVo80ScO6u3AEUcDmEXrkkUfwta99DR988EHnZ42NjRgzZgzef/990cEd7ZAgBLODSlaoAq4w+5W7uvHUlI4oFeXmoYI6Z5w2C05IGbxuc7d11lbEsmOBPE5BsuYFKlEiVT4WNi4U50IKkwgSoPHXVI6tTJJtCpyJtGER4KmQMtrhJBTs2aOn7/wEGaeA91oPIir04++h8p1Rwe1Y7zTuKGSfFCPV4s9yg7WHSBtBQPfhCKKAbQj95S9/QWFhIc4880zU1tbiBz/4AS688EJMnz4d69atMzHGJDzwwAMYMmQIevfujbFjx2LDhg2+33/22Wdx2mmnoXfv3hgzZgyef/5542OkQoIQzIIk07EbqMKcn5MfeJpzUwC6ypgDSynOeWGOKwme25zpGK2Ua/Nz8rGjbUegkrbPXWt7K4YtG0YyfIMYmYMQpPR1lJ2EoRsWEaT9fl7G16ryVVhy0ZJO2dYxQgHevKhs+CZO9iaMUz9Dx0//ldWVYdDiQeIHRE7HelVmcYpetHLPdPcQLiT1WqrBNoSOP/541NXV4cYbb8S3v/1tLF26FL/97W9x++23o0cPdqSNhWeeeQZz587FwoUL8cYbb+BLX/oSJk6ciN27d7t+f926dZg2bRquvfZa/PnPf8bll1+Oyy+/HO+8847RcXLAYU/2gjTTsRsoyjw/Jx/b52wPldSRC7sy9XIru82ZjtFKaQvRcrAFVz93NVlJqxi+nHYtTgQpfdXN3oK1+S3bsEz51O5mYJs8tXoZX0751zFCOQcAVY+wqTmSNE6D5P36X1/vq/+chpfEAZF6OLK8MSaN1MpxlVp7iApM6LVUQSlZetmyZbj55ptx+eWX4/XXX0dWVhaeeuopfOlLXzIxxk6MHTsWX/nKV3D//fcDADo6OjB48GDMmjULN998c5fvT5kyBQcOHMBvfvObzs/GjRuHM888Ew8++CDpnmEQKgJ6sW2dpDXOfU0SY4VJuMglwXMmfKuW4FPLbCnVI7pVaNY7H9hnIGb+ciZ27NcnSvSTD5WqIokcGUoydV5OHpZMXIKC3AKjCcpu799K5A5CUMGETsUepRQ+lh1D3eQ65X5tdnDXukqhBgUSVU0c2hGVIhDOXEmTdVIhpddMwFjV2EUXXYTXXnsNDz74ICZPnoz29nbMnTsXy5cvR3V1Nf7zP/9Te/BuOHz4MHJycrBy5UpcfvnlnZ/PmDEDn376KRoauvbyOfnkkzF37lxUVlZ2frZw4UL88pe/xFtvvUW6b1iGkA5UuVS2tG7Bw68/jB37d3R+J2hx6nLxeMEUc7Hz97nKlMsO7XZfN56WHW07UPliJfYc3ON6nd/zShuNkgauLju2HZIcO9Qyb9MJyk55iHfEUfJ4SeB1uhxRQWuHOkdhGaf28UryNrlBt6pJlXZEwkhNVYm6Cb1mAsaqxuLxOP7yl7/gpJNOAgBkZ2fjZz/7GS699FJcd911xgyhPXv2IB6PY9CgQUmfDxo0CJs2bXK9ZufOna7f37lzp+d9Dh06hEOHDnX+O4hJOx3A7V7uZwwEVW2osL9SDAfd3lNB4FR42OE2t9TqEr9TYEFugaeyAPwZkaXDiJLMr17yAQC1b9SyCPik+nR5PZ8bVKuWqMaxU3biHfHQ+n75sWtT50iigpO71k1XHen+vp9O1KnIA8zqRdUohCm9lkqwDaHf/e53rp9fcsklePvtt7UHlGrccccdqK6uTvUwWKDQxHuVqzpBXZxUIea4hE1SsXOVnW4zwKB2FrPHzSb9jtu4TeR06La6sMNLPrwUuh+4itNLudufz+/UqmJ86bBp62508Y441nywJnCMQPAasOaosbmxSym/BdPGqdtaN111RP19P8NBlXZEx0jV0YuqMmtSr6USotnNeXl5kj/X5bezsrKwa9eupM937dqFE044wfWaE044gfV9ALjlllswd+7czn+3tbVh8ODBGiN3hyTduA6XihukrHaV3laSG7IdHGWqc9KyEiIrfl3hewp88i9Pkn7PbdymOjrr9q8LkmmOZ8YJiuIMUu7W8zU2N4qdWnX6t1lQ3ei4rR0oayArMwtZmVm+eUt+82Oizxz1oLe3fS/LyOasE1XDQcp7K6kXVWWW4t3S0WuphFJobMmSJairq8OHH36Iw4cPJ/29tZXWVI6Lnj174uyzz8aaNWs6c4Q6OjqwZs0a3Hjjja7XnHfeeVizZk1SjtDvfvc7nHfeeZ736dWrF3r16iU59C4w0Y/JT5mq5GcAela7jktYsqGsBQ4JXkFuASrOqsChI4fQ2Nyo5TJ2g1VNkZ+Tjz0H97CNGdNhRBVQZdqp0Hcd2EVqYhukODnKXWpz0g172MHd6DiJ/1zDmDM/dsNHJeeQstYp8v7wZQ8D6MpKPyB7gKuBxFknOsaupPdWQi/qyGxjc2Ogd0tHr6US7PL56upq3HvvvZgyZQr27duHuXPnYtKkScjMzERVVZWBIf4Tc+fORW1tLR577DFs3LgR3/3ud3HgwAF885vfBABcc801uOWWWzq/P3v2bLzwwgu45557sGnTJlRVVeG1117zNJzCgEm+H91yVSd0rPYweYEooJQwV46rRHVxNRKJBBY2LmSVIHu9Vz9cNeYq1/FQlLQE7YIUuDJtL3Ofde4sbVJRDn1EvCOOXQd2dfmeG4LknyrjVEoACsEe4P+8TqgYxtycQ6tcf2HjwiQjCJDjMaPIu5v+2zVvF1aVr1JeJ7rUJNKkubpQ1cv1G+tRvrKcdA8dvZYqsKvGhg0bhvvuuw+XXHIJ+vbtizfffLPzs1deeQVPPfWUqbECAO6//37cfffd2LlzJ84880zcd999GDt2LACguLgYQ4YMwfLlyzu//+yzz2L+/Plobm7G8OHD8ZOf/ATf+MY3yPeTrBqT6gDOBbfqQiKzPxVdwSnwq/AA3Ft86Jaze2HtjLVobW/VqsJLRUdn5/1NVSxRq8Z0Gq+qjhlQ698mUXXFWc8qFZ06/dvcIFkppNM0WeU66lyvnr4aE4ZOcP2bSdoRLlT6W3JpRyT0mhSMlc/36dMHGzduxMknn4wTTzwR//u//4uzzjoLH3zwAb785S9j37592oNPJ0gaQpIdwDmg8IRYkFqcqegKToWbUgSgvKHrGpqpMmYk7itVyq9Dy6BikHiBI/8q61lifVGfd37RfFQVV2nlkQDunFBWyImDVHLdeCFoDVDnOpYdQ+1ltaHTjnBhksMpXfSaHcbK5wsLC/Hxxx/j5JNPxrBhw/DSSy/hrLPOwquvvmo8t6a7QyXnxnS5qhMSFVqAXiUbJWlPurEsJf6tW84OuLuHTeREuUE3n8MN6ZAMKpl4yZF/Tt6ZBYmqK+rzThg6wUhVl2rOYcOmBkx/bnradJun5LVR57q1vVWcdoQDqk7kFlpwaUdSodckwPYI3XzzzcjNzcUPf/hDPPPMM7j66qsxZMgQfPjhh5gzZw7uvPNOU2NNCVLtEQLkXMteC7/irAoMjw0XX5w6p0qvZzaRaA6ouYwtmA5VSICSyK3iraA++5KJSzCozyAjJ0OOxzNojDeccwPWbV9H3qw4ZI1OqBL5hUmy57bB1r1blxIPnASs52nY1ODaYsc5Hq43PRXkhlydyAnVSXnEUgVjoTEnXn75Zbz88ssYPnw4LrvsMp2fSkuYyBFSVdgcxel1QgjbXanLNGx/Zh2G1iDotikJeq8DsgfgmcnPiLQo4EKlwoiqzCnPnpWR1dkYEjDjBZBo71E5rhIr31upxK2iQgmgkx9H3cxMrHeVA51TBuwIy4DgtIKwj0clRyYsT4iqTqSG6iRypFKJ0Ayhox3SLTZ0TpBB/Ybs9zDhNaHAK/9G9VRpPbNEUm7QuHVO2ZxTVpjGqE4iN4c0kyPTprwAku09LFDHan+nVEoA3fw4SmsHE3qA6yVJBwNCt89g/cZ6VPy6QqQvnBQkew566SGOXgS66vlUV4eFYgjl5ubizTffxNChQ1V/Iu1hoteY6gmSoihMek2CwFG8XA8M52SSlZmltBh1qzsop6ywjVTVcCxXmbs9Vyq8AH6J8H7KPDMjU2ysOlVXlApFtx5PTnk3rQeoxu/g3MEoG1WGmldqAn/TlAEh1WdwzQdrtPvCSSKMRtUATS8CXTmcUpn/ZUHcEProo486+4tZ6Nu3L956663IEFKAiQ7gpr0mfuAqXq4HhhOrtp/auItRt7rD75SVCiNVtaKKojz9NmWqVySsTUMidCbhJQsjPy4sPUDNOWz6sCmUDdsLqocB53hM5mapeIl18hq5MEU7YtqDJF41dvrpp+OBBx7AlVfKJMkd63Bm1C+9WJ8pmEOWJVnGqsJWymVH5lRv2MGtutOt7vCqlJBkIeaAW1FFZX7125SnjZmGFW+vIN2PU3Gnozj9qqDKRpa5Js7qjFWn6sqtQpHDbizR38oOSv823RYZJtmGpfoMmmJ0V/USm+g56AW/ZspDlg5h67VUpm+4gcwsffvtt+Pb3/42/uM//qOzjcbVV18t6iU5lqHLFMxpvtiwqSGJEZbKnuwFVbZSzjMHMbT63RvwZ391gsrwy0EqmLYtJuVYdoz0faoypzBJSyvp+o312jLrxbxeelqp6FiD7kdlerc2cC67sRSlARA875S14sfqHiRzVu8+CjO3FyT7DEozuut0GpBmrQ6aa7d3raLXTHZXUAUrR2jbtm249tpr8d5776G2tvaorBJzwlRozAsqp17VnCM7dMIzui5a6jPrJJoD4VZzOBGmGxtQkwlKCJAadtk6ayuGLRsmEkZQDSlS5Uoq5EG9n6n8ONXve0E6lMsNO0t5DTjJ3dQwuERYJx1Y2e2/ozLXXL0WdvqGEULFU089Fb///e9x//33Y9KkSRg5ciR69Ej+iTfeeENtxMcgvBYTZ7PmVkN4JbDqhGc4vYl0ntkr3ODMC/KCThNZXYTpxqbKhAqHFPUEuG77OpEwgmpIkaPYJUIenPtxQ0VcD49EKMpEKJcTdtZpdOoEhVC2ctznz0I1aDh62kvnSYQw/UKw1LzGMJvKSodtpcBmlv7rX/+K+vp6HH/88SgtLe1iCEWgQeK0w22+mEDCszIGUBfCopOLUNi3ENv3+3sf7nvlPtS+Xpv0Pe4zuynTeEecVM0hyUDMRVh5EhSZ6NuzL1aVr8K/nfpv7FMXZ1OeNmaatpJWUZwqil1nQ+Hez1R+nPU9CcNOd8Py2vwpBoQpI8zt/Zr2APnp+UNHDpHGbpKVXXeuTRv1YYFlxdTW1uKmm25CSUkJ3n33XeTn55sa11ENqdMOh/68MLeQXMbKFcKszCxUnF0RyNWy9x97gX8kf6Z6wnNWc5gyMiSbPJpItHSCIhP7D+/HlfVX4qFLH2KHQbmbsm7yOVdx6ih2lbGq3o9jeKkY0bqeAp0NS/eQZ8proCqLqs8TpOeriqtI46asOdV2Fty51tVrYXrGOSAbQhdddBE2bNiA+++/H9dcc43JMR3VkDztUJWV1Xyx6cMmkiGkIoTUBFAnJCqm0q2aw+86XQ9JEKgysefgHqUediqbspuSphqYYbveuRuKzv1KR5SiX69+aGxuBAAUDyl2TThWle8w+rc5vydxyDPpNZBKPaD0QwzS87Wv16Kwb2EgbYqpajqAN9cSei3VFYReIFeNxeNx/OUvf4mMIE1IVg9xmy9KVxmojMUNEhVT6VLNEXQdANdKIqmSUe574FTTAXoVQBY4FWBcmQ3b9a56P2sOSh4vwaKmRVjUtAgzG2aiYXODa/WOqnxzKiDt9413xFHQt4ClK7jVbV6Q8BpIVJvpPA9Fz2/fvx0VZ1cAUF9LuuDkd0roNQn9YQJkj9Dvfvc7k+M4ZiCpqFsOtJDYey1lJeE58TrJq3TidkJ3c5Lq8KzqteNcp5to6QXOe9AJMRwt+TS6ULmf3xyU1ZV1IVu0eyFNdTB3O+0PyB7QKbcUXSEV0tL1GkhVm+k8D1WXDY8NN+4l9gNlrgv6FqD29VoxvTZp5CTUTa7DDc/fgJaDLZ2fh/XMbiB7hCLIQEpR12+sx5SVU3yTn4GuykrHc+J3kvez9KmwEp91TnISHECqXjtpriAV7hz7e6BCxQD14sgJKr1XOWFL8k1xvJ4UWeTejzIHTsZpuxfSBMeVlxfTqsR08lB56QqpQ56O10CSo0bneTh6XmUtSYEy1xVnV/gWwqjotTkvzUkygvJy8nDPhfekrB1HVPIVMkyXtlrIysjC02VPuwqWysmSepJ3Pd30LUT7kXbX3kr2Z2450NKFYyIVbKOqClCayE4118J6D9/5zXeSlI0XVL0jR1M+jRNUrwL3fpwCBwsSeXReoHgxs7+QjdWTV2P3gd2i+Vx+UPE6Sleb6TwPxTMby451kp4GrSWT7SiC5lqqug3w1mt7D+7FlJVTOhnLw0ZkCIWMMEpbASCeiCOvT57vOKgJrBwF42VkNWxu8H3mqaOnYsrKKVpJllJQVYBSG4GEQp80chIuHX4pCpYUYM/BPa7fCTsxkWMo2mVxS+sWPPz6w9ixf0fnd5a/tdzTQNatmOIaoZz7qYZ/TfGrkHJZ2rYjKzMriejTTVdIJ8JyD2zS1WY6z0PhLmptb0XJ4yWBh70w2lH4zbV1+AhCGHrNFCJDKAVIZWmrF/wWWyw7xlIwbkaW3zPfe+G9mPPSnLRZIKoKUGojoCr0ZRuWYVCfQZ4bRM8ePfHQpQ/5Ms+GmZjIScwM6hYeZCCr5tPolMNT7qebmyTNr6KiS/x0hXT1pqVLLMOr7t06z7mV1ou6h1YvneeEnyxLEksGeZW8vFJh67WwyRSBKEcoZdCJC5vo4+QXV2/Y3ED6HQrxl/OZt87aiu37t4feh8sPqjkKUhURVEU958U5gblDOjlhEpU3dlDyaQZkD0BVY1Wgx5NShaSST6OT50W5n2rPPAscQ4ry/ri6hFIVKVm9ad2TkitnIlFetxrV0nmrp6/27PnnJctSVXiAXq8+Cb3G6YWZig4ArF5jxyLC7jVGgWRvpMbmRpSvLPdsUZGBDOTl5JFyTbi9vLj9sKT6cAG0mDu3N5LudRaovaLskOq75fcMEu54v95ICSS6VExRINlDLoyecCo987g9mKjvj6NLAJD7RAEQyWnh9DvjPguXOFPneVT6v6VbzzhJfegHyfVM3b8jQygA6WgIAfrN9rjCmZ+Tjz0H92gZXm7j55TaSy0QziYvxSw9vnA81m1fR/odTpNIO6QaFko323T7fTeFet1Z1wWyk7tB0kCW2nyC4FWuvrd9r2cYhrNhcd4fVZeENTcWVBp0Up4FgPGcGyc4Bnb56eVo+rAJq95bhftfvZ90jZf8Szc5VTlQUfW8lP6yg7p/R6GxbgrdMng397YfrhpzFQC+a9TNPc/pkWbdQ5Xo0Qluea1qubL9utb2VgxbNozsllalIpAIIUq6473gFRZWZSeXpOM3STpqh9sc7Jq3C6vKV2mFlVTeH1WXNGySCZFToRKmDHoWAGLl9Rxw8+MueOwCkhEU9NvUOWxsbiSFwbkEnZxemEBqyBSBKFm6W0O6N5IfSk8rRdEpRawEby/PS8VZFWQjTHKBpKJqQTXZkZpo6QZn1RXHk8VRnFmZWcqhArfETK5BY6LqjZMgqxsu8Soq0CFNVE1IDbpv/cZ61KyvIY1ByjBVTX72ehbg89BeKooyKAnHsewYqhqr2OFSP/mnzqEzPULCQ8bthZkqMkUgMoRSDhPK1A9cHhP7YsvKzCIraT8DgBP+kFwgYVct6BpeToW+68AuzHlxTuB93aquqIotlYqTw4pt8gRJqeo0WdLstaYpukKncsrvvrNfmE36XSnPLaCX/Oz2LI3NjeT1X3RykShvT5CBbf2bYwQBwfJPnUNnjqgEbQm3F2YqPEEWIkMohQhSpiZItDhua7fFRjG8KO55CpZMXIJZ584SWyBh96GSMLzs8x3viOOel+9ROlVSmkQ2fdiE91reIz2bCcVJ4V6xYPoE6echkSxppoJqeJmonOIcniiGKVWvSfMSUdd1w6YGTH9uuriR62dgc/PjCnILUHFWBQ4dOYTG5kblOfSChIeM2wszlYiSpQNgKlk6KKFx3vh5WPHOCvHFyKlI4lQ6qd7DDSaS5jjjWjJxiS8/DxUmKpB0qq685pWbOO8FyWRtr5Dq8NhwcWZdDqSTT4Pu1fRhExo2NbiGpXQrp6jjo8px5bhKLJm4xPc7XE+ablGIHTp6SapQAHA3BOverSPN8Y1fuRH5ffK7EIyqzCEVqsnvJmSRiyhZOo0R5DFJIIG7191tJKGPwmMyIHsAVk9frdzvRsXr5Py3iZAH5dmzMrJI/DwUhM1rUl1c7Vt67pZcqpI4z/l9P3jx3LglEjfPbsaC8xeI9thSgXQ/OS/YeV+8cnPckp+luKzsoMpn6YhS37+r9AHT5fGxg7r+3WCf68NHDov3Q6TOcX6ffFQ1ViUZQYDaHHrxGjlh5Rxyn9mELJpC5BEKgAmPkO7JRNeK5pyyVMJz1OerLq5G7Ru1ynw7KuCejnROgiZPRDqnSssDFeTdcCKWHfPkm3L7fT+E0TbAD6ph5zB5hnSoJSicL9Q5kJBjXU+aVJpAkEeVgvyc/C5d03XlljLHBbkFSCQSXYwg+3c4cxjviKPk8ZLAsbnpac4z6/Kq6SDiERKCCUOIqkz9YILHxCmcqpuVSWIzCbg9V1ZGFuIJ91OOjsESZHhVjvs8Bi/x3FyeF+r35xfNx4ShE8iKMyyCN1XoGGGmuXS4xqkFN8PLj8vKrX+byRBV2BxEfvDSfWWjylDzSg379yT5tfzmuKq4ipRHRJ1Dip62Dj/ctarDoyaJyBASQrp5hCxIkMj5nbJ0NyvJ2L4U7M87sM9AAMDuA7vJ1ViqSppieIV1qrQbdFzvRjp4BnQRJNdVxVW+eUim8x5UdQPF+AzKAwtamzon+zA8aRy46b6mD5tS6qkH/Of40JFD3TbnMExvrx3U/TuqGksBVDP57aDGlP2MnaByWR2+Dd3GsirP4we/xTmozyDSvVWryewVSFbiq9P7ZLrqyi0mz81h0m1CCfBybKRLmCnVjPYTt5vylpgDP3BljFI5RQ21Ba1tHY4jE/lyOnDTfTp6mUu94aXHwugCb4dOJZvbM6eiolICkUcoAKarxgBeJj/n5KFqmUu6sSUpAFSfh+IFkHQ5e0HHG8KZR+rJXdW7oeoZiHfEUdVYhUVNizy/Y6FybCVWblwpeqrkelv8PCSm8h44Y6R4V1VDbdIhKmlPmo5eoXjCAbUKK5P5cd095zCMKjEnotCYEEz2GvNSplNHT8XidYsBqIeVdEJb6ebGBtSf5/CRwyhYUoA9B/e4/m4GMlDQ9/Nqih37zZZ5qhqYKoqTulGohjAlmrhyoRtWVcnNkzJOqeD0maMYXqqhNhNrWypcrhN6oVzr9h1ngrQXTOfHhZlyYCrnUIqehILIEBKC6aarXspU58Spa5mnU2IjoP489Rvr8Z3ffIekwKqLq1HVWAXAnIJRabzYsLnBNYFTclymqzq4VVCUxPWts7ayky91cvPCknWAnmBPSUBVLcww9by6sqZjSHA72TuTfYctG5YW+XFhVWGZyjm0w3TuUGQICSGV3edVT5xUhb96+mpMGDrB9b6pJsKyQ8Uw426+T016Cr169DKqYHRoBdxggrxPuqqDE5oxXcLM8bY4YTdOw6h8CdrsqF4RlXCg6bWtKmu6oWVdIyTMyrmg/DhT69UJzjOrHDRMF89EhpAQUmkIuYGyAKiWeSw7htrLao0zuupCtbqJE4axjCiTCkanXDVo7NKJxVLgMpmbLmFWzQHR5VJRgZ+3mOPZoBp/6VLR6SW/Op5qKS93GJVzJvLjdCCVc+gFk8Z3VDV2FEK631Bre6tnJr+pqi87qEYHt+KE0xvJWXXDbWLLgXTjRQumeiNJgNt4senDJiVDSLea0QsS/dskGypzKzrTqX+bF6h6TadXoFSfwTAq59yYxKWrrjiySX1mjqzZId3sWgWRRygA6eIRMnkKDIPR1e15qAmPJmPVGcgI/QTsdcLiNl70QypP93ZwT+I64Svnb9kRRC64sHGhp3GaTlwq0gn3qe7fxtFrYXiELPJQE3NBkW1TxK52+eeSaXKhWhhhIkE/Co0JIdWGkNXjpXxluWd7A7cFws2RSUUyKCfh0USsOj8nHw9e+mBKDAWdclU7TClO6pgpnca5+WbSJcyqlUIc45SSnyZhnOpUdIaVV0KBil7TyV3kGtimPKoSLT64ulqCTJMLu6yZJqz1Q9R09SiA1Xix5PES3x5Pbo0erRAAp7GeEyqN9oJAIbWzN5G0wGm+SGmumJ+Tj+1ztmsRO6aq8SLwT8XlZQQBcg1AgeQmoJyGtCqNF73edX5OPmms9nmkNvp0a/S6bfY2DI8NJ93TWj+q8k2FDjGhm8ylAqp6TaeJp9+1bpBocO0GPz1WOa6S9Bsc0k1qU2WnbErqt1nnzvLVxxnIwODcwb6koKYR5QilKVQaLzoXyKSRk9CvVz9Sfyin4jTl2ucwCztPBxKxamsxPnjpg+jZo6fSKdnU3HCYbQtzC8mJxaqM2BZ02WJV8s3c3jW1hNlSqCr5NE6Zk85Ps+R72YZlSlwqQTJCYZo2AS5vlape08ld5OSHUXPOVOClx6j5cZyuAl7y7wZLNm9vul20MMA0G7sEotBYAFIRGpNkg9UJTaSba58LqRJkO55991mUryzv8nkYjRcTSKBybCVKTytl9UbScTlLsmFLNF40ESL1m5905FJJp4pOazyUdSSt16gHGC85XPPBGhLLedh8aVK0JRI9Le33BvRkKxVd6KOqsW4MTtUT4H8K5FrjnFO0NVbOxhZmzyE/D5KKl2PluysxbZW7caZ6gnQq6dIRpeQTbxjeAVUPnt/mqGPgcjwCEpVCpvq32cGtCNLxikjnCXHWkbReoxgnfnI4Kn8UaRxcj6rqHEt7TnQ9wXZIeMh0Ku5MIzKE0hAcAaYsEI7ipG58qu5T6c07SOlIlCADnyvU/1j5H75j4ZaB+inp5tnNIiFAXZezijFhuvEiVaFKGd2c9aPSuFNlk1HZVKRDutxD05oP1pB/W0J+g+SwqriK9Dsc41Z3jqVoS+Idcew6sIs8bgokytxN0pPoIAqNBSAVoTEuCR11gUiSMboh7H45YTWV5br0qY0XpcKPKi5n6qnVxFxZieo9e/QkPJ06KKGGvJw8LJm4BAW5BYGGhG7/NgpMtraQDndLM6XboRsyoYR0pXsMSs6xjudOpXydU7UWZo9JXUTl80JIZY6Q36kylh1D3eQ63+oPlcWkG1cOq19OmE1luXMStJmZ6NKs251eisMp3agLOEZJOnCp3PiVG1E2qswIo7l0V3CdQ5MXKHqNAqocfvPMb2L5m8sB6B3K0qXzukoyOqBOFZHuiMrnuzGCykQzkIHay2oxYegE381OpdyZUnruB3tVjF/JpVe5MlXp6JQoc0MmnFAlpQyUk3dDBbU0mlpObv9dTskyda5aDrYYKU92wqtc2Q2SJdNO+V4ycQnpuvtfvZ+8VqkwIW+ATB6fBapeo4Iqh4+++Shi2bEuNCNutBx+MDXHHHCqxApzC1FdXJ2ke28tujXty9xNITKE0hQc3hwnuJudHZSNj4I5L84JVOZcXhOL26KqsUpL6QQZe84Fz1H4lJwGKbp/LsLgcOJujjqcOlTYjZInrngCeTl5rt+T4Pmxg8Ol4oSkUdawqYH0Pa686R6a7OAaHkHgyGFreyv2tu/tYhhwxpKqNW0HNRl9ycQlaJ7djAXnL0jSvTo8TXaY4J8zjcgQSmOoeE0kCN38Nr7q4mry+CWVud3DRSl5BbyVDnfBUxR+VkYW6ibXkZTnltYtpPFLnrgBvVMrVRY5m6Pf/aSVqWWUFOQWYM/BPYFjCvJoqtyfQ+gnZZTVb6x37V3lBq68SR2a5hfNZxseQeDKYQYy8PM3fo7y08uVwnJSifkqcm9ds+q9VaQxDOozKLCwRuUADqhHIlKNKEcoAKluscEFJ7k1qGO5W94JABZVvURsXDXuLdlJOijX5NnJz2Ly6ZMDxxTviGNIzRBs3+9/civsW4jmymbRfIKwOJy474vbEkMnkZST12KizYJK7pBqTgYnyX9w7uDANeo175LtSSShkrSuO9c6HEAqxR+m5EmVaNYU/5wqIh6hYxRU1yulY7lXqSOnw7BuySWXHRWgl+BzSpC9ylq51S1NHzYFGkEAUHF2hbIR5KXEwuJwsubq27/5tq/3xe1+QSXP88bPw4p3ViiXJ3OeTbrjN5Asc6veW4X7X70/8BrVcAqHtyco5BG0SbutIwCofaM2ZSzYHCZpC6pzrUtloUI5wT1w+M23m87g6GsVSpJ0QuQRCsDR6hFyA8dy555E5hfNR1VxFXsRcJ/H9OlDl5DOtFfGb8MqHVEqylwbhMNHDqNwSSFaDra4/t2rqSa30orzzrnNN01W+0h6b91AlbXKcZW+ydw6J30dqgwp8sd4RxzLNiwLpfGnKpUFt+KMu1aCGlrr8ktJsLibQFQ1doyCms/iBk5egpUzQq2GWdS0SClWzD2h5eXkYfa42Yhlx4wk6ek2rjTplQlKkm/Y3CCSDAnQchl69uiJBy99sLMiKOh+XOZhCxy5VcnVMVXtQ03abznQopR3QZUhi/DQDbo5h6o5J5K5JlmZWazGnzr5aSp5nSq5e9y14jXfOoU1dqRDsrgOIkMoTaG6GCkJjFIdyykKxg6V5GmqMp88cjLyc/LRcrAFNa/UpG2SHrdijQrqhmW18FBNhgR4mxRnI9RRkhy55ZTUS4zNC5S1OnX0VExZOUVpo5KQNYmycDfjYOusrYhlx1z1m9TmbEeQAZxAAmWjynB70+04peYULQOMe1hSMSKo19z4lRs9jTGJwhoLYbZOMoHIEEpD6J6G/DafynGVpN+gLjTOCVulEoaizAdkD8Cqjau6hGBUFKfp0k+pElUnOBuWc2NaPX01Hi19FIeOHAp8ZpVNinpKllCSVLnlejRNKXC/tVo3uQ4r3lmhvFFJyJrUSd9uHLS2t2LYsmGu+k1yc3bCa64tD3nNKzVY2LgQO/bvSPq7ZPWrG1SMCOo1ZaPKPI0xqs5obG4M1ImmDnhhITKE0gxSpyGvzcfPDW4HR/FzTtjcUEOQMreUo4Ti1DVAqUaUbomqG7gblrUx9erRCzMbZqLk8ZLAZ9bZpCinZAleGqrcWvkn+Tn5nrxCQDgK3Mtjsn3/dhFvjI6sSZ/0g/Tb7U23GyUmtM915dhKAP4ecuuegDnOKxUjQsLwoOqM8pXlgTrR1AEvLHQbQ6i1tRVXXXUVcnNz0b9/f1x77bX4+9//7vv9WbNmYcSIEcjOzsbJJ5+M73//+9i3b1+Io+ZB+jTktvlwFhDHO2IpmPlF80ljc1uEXvcL4jXa277X8z5UxalrgHKNKB1mbTeobFjcZzbNnsvN37GDY7DY39XVz13tWdkmqcCD1pKbx4SS3AsEb2g6siZ50qfot6Xrlwb+DqAXqrT04MqNK8nX2GXbBMcV14iQMDyoOqO1vTXp3176wcQBLyx0m/L5q666Ch9//DF+97vf4bPPPsM3v/lNXH/99Xjqqadcv//RRx/ho48+wuLFizFq1Cj89a9/xXe+8x189NFHWLmSvgDCBGej0en+SynzbNjcwK4kyMrMwoShE0iEh85FGFS54FWiW/duHem5/RSnbumnard1yU7MQV3PnaWz8Y44vv/b77OeOYyESD+agqmjp2LxusWdY7Q/G0AzWDglx9yO3373pK4lFc4syoamKmu6ZeEWrMqtIP3m3HS9oBuqVE3Mp9COqECl67xup/ogneEFP53IoSRJJ3SL8vmNGzdi1KhRePXVV3HOOecAAF544QV84xvfwPbt23HSSSeRfufZZ5/F1VdfjQMHDqBHD5oNGGb5fFiEd4B/mScA5XJZFWIxnfJcibJN6m+snr4aWZlZrgSTqW62CPBKlW/7w21ssrswS2S55H0UxU8pU+Z0o6eAI9sqJdFhypbqvHOpNmLZMXzS/olRigfJhrGSlB0qlAHOa8YXjse67evIDZi5pJN2pHsD1qOKUPHll19G//79O40gACgpKUFmZibWr1+PK664gvQ71mT4GUGHDh3CoUOHOv/d1tamPnAmwmy94EeCNmTpEGXvCPcEqeuN4XpC3MCJldtPrIW5hag4q8K4F48K6gmxfmM9yQgCkueGM9e6HDBeHgydEyfF49pysAUFuQUi74oSCrr+19ejX69+KB5SzPJShJ13oTrvKh6u2WNno6qxSssDFQQVHZqVkeWaTyRJGKjiubNfU7+xHsOWDSN7rLx0Riw7RvLOfbz/YzG+p1SiWxhCO3fuxMCBA5M+69GjB2KxGHbu3En6jT179uBHP/oRrr/+et/v3XHHHaiurlYeqyrqN9ajqrHK9zvSTKxui66xuZG8sXuRvHFctrrhQAnXvU6sXMWgMImgDcvanKmwz43JsCoHbnJLUcZhc51QDJu97XtR8ngJCnMLMXlkcIsWCyphO1PGqd/9OKzwln67tehWjB44WjnkQwEnLGTJOoV2pLG5sYvXOCyjQDVE76Yz4h1xlDxeEnjPLa1bungxTbSmMY2UGkI333wz7rrrLt/vbNy4Ufs+bW1tuOSSSzBq1ChUVVX5fveWW27B3Llzk64dPHiw9hj8QFUYCSSMnwClWnRQT5ASm1MqY+VUcE6gJjcsjtfBLQk2aK4B97CqiXYVFqg5OGFznXAMqh1tO8jNUZdMXIJZ585iyYQEezAXOh4u07kmfka9E4W5hSgbVYaaV2oCf9fNaxyGUaDrWXfqjHhHPND7G8uOoaqxir3W09GDlNIcoZaWFuzd613xAwBDhw7FE088gZtuugmffPJJ5+dHjhxB79698eyzz/qGxvbv34+JEyciJycHv/nNb9C7d2/WGMPIEaLmXlQXV2PB+QuMjIE7FjeoxMol805SGSv3AjenwfSGxcmNWFW+itX+AAg/X0olB0elxYiK8lZpD5OZkenpeVCdP8ocmTA6OLLG7dknBa/1VnFWBYbHhnfORdOHTUp6MayGoyby9/xyDhNIYED2AM+KXS9ZDdsg7xY5Qvn5+cjPzw/83nnnnYdPP/0Ur7/+Os4++2wAwO9//3t0dHRg7Nixnte1tbVh4sSJ6NWrF371q1+xjaCwQD05Do8NNzwSmndEMlZOyTvJy8nDjrYdaGxu9FXQqYyVW2PVyWlQdW1zQPV2VBdX+95LN6wqkUzd2NyIil9XkE/BqmFUVeXN9TTawy9S+TEUT8H1v74es387O6kZsMTmRJU1FQ+XFKieJxMVVl5QMbpNhH39vL/XnXWdb1qA21oPQ7+polvwCI0cORIXXXQRKioqsGHDBvzpT3/CjTfeiKlTp3ZWjO3YsQOnnXYaNmzYAOBzI+jCCy/EgQMH8Mgjj6CtrQ07d+7Ezp07EY/Lk2LpgKowBvYZaJT1GAi3RUfQ/azfaznYgqufu5pMcKjKCeTGt1I3mVaeX11crcWfYZJR1w4KaaGVp8FFWDk4Fg9QyeMlvkaqmyxyuU50+KVUeZEqx1WKcbFQcvD2tu9NMoIAGTZlKgeRZQSp8vPo8vpQyD51OK44OlGV1NVU2NeLg4p6KLfWelj6TRXdIlkaAJ588knceOONmDBhAjIzM1FWVob77ruv8++fffYZNm/ejIMHDwIA3njjDaxfvx4A8MUvfjHpt7Zt24YhQ4aENvYgULwisewYZvxyRhL9uymXot9JgBorlzh5uIESfw47Vm4ZDrcW3aocXtBJGuecIClekaUXLVU6nYeRg6NSheSURaoXQFeWrHtRZdtC6YhSLP76YpFQlarRKVEJxfHAqXrdwgy1SFRY+UHHY0LxWFmNqOMdce2cQ+5aD4MjTwfdgkcolQiLRygoHusG0/Fntw2WGivXyenZ0bYDlS9W+jL+euVKhB0rB2Tmn8MhVX56eee8bWndgodff5htIHN5YSjGlk4ODgVcnh0LqlwnOrLkJttNHzZ1Saa1w0QOlU7OnwVdrpggWVPlEdPhH9OB891SK6z85pHCcRUkG9Q8RwlDkbvWw+TIs6Nb5AhF+Ce8Tht5OXloP9KOvx/u2k5Ekr/CDW4nAQneHsD/JFeQW+BpBAH+p4ewY+VSCZ7UE5ZbuaoTlBMkpyqHeuqWYiH2ApcNWJduQlWW/Oar9rJaX6NadX68DFXV3BY7dEOZfrKm6nWT8NY5QfWsqnqN/eRQwmNC9T5K5ORw13q6d6fvFjlCxwqSGgKOq0ReTh5aDra4GkEWuDk5upDocROUd9GwuYE0FjcFHWasfOusrYhlx0Rytij5FAOyB6CqsSrQGKDG3Cm5EdwcGZP9hjgbsoThZaJ/GwDx+fHLK9HJbbHAaWTrlavjJWuq/euk+97pNFzW1YnxjjjWfLCGNE5qX7nV01cjlh1z/Y5UTg5nrad7d/rII5RmyMrMQmt7K5a+slQrD0IXfqcjVQ8JtdLnyb88SRqjm4KW8li5QacqjfLbfics699UmZCIuaueuk1xwHCMVwlvnUr/Nsp8bZu9TWx+qHklruu1byHaj7Sjtb1Ve62o5uqoet0kPb8S1UyqOpHbfoTaV87aR7wglXNIXeumvcW6iAyhNAOXjdWCpEuRotS4mx11wVtVYvk5+dhzcA9bQYex4EyVgeqUq3pBx0DWcdertAoIAjUhtG5yHYpOLsK67euw4u0VyoYGV5a49AESFAJUQ9VrvTZsbtBeKzrrQdWDK+X5lQyxqehEauI/9wDHMRTtho9KziF1rYeRYqCKyBBKMdwS78LMg3CCo9SoC0Cl0ueqMVdh6fqlSgra5IIzkZvgHLubMq17l1bG74SOgayqTE2xxVIMk9rLarHv0D4xbx2nf1vFrytIvxlWCw+n4eW2XnXXSqp6BUp5fqlzuGzDMgzqM4hUlUnRiZwDr8oBLuycQyrStTt9ZAilEG5eEq+4rhukXYqmEhBVPFylp5Wi6JQiZQVtasFJK0436JSrWpAwkHWUaSqoHUy1+AiSJa6hH3YLD0peiclGtlYPrglDJ3T5u6oHV8rzS53DOS/O6fz/ErLNSfxXOcBRKVncWmQ44dT91vjD6lkXBqLy+QCYKp9X8ZI4IU1Lb6L0XKXNgL3sMpV9adzuXfduHbltgIUwylXtkCofppTIWhwqqS5hTlWLD05JP/X+VJk3sV6546CWRceyY6i9rJaVK0PRb6rXWVChF5CQbeq8zS+aj6riKq38MYDfIsML1cXVqH2jtts0WaXu35EhFAAThpAqH4oFKw/Cq9JHFSa4Hjj9hsLqy+MFSqy84qwKdq6O1HNReUIkDWQT/YYAM40XTRsGOvcEPp+PIBngJB2b5G2ijkPy+VVlQqfHIOeA4XwWHaM6LFn1MhRVcw7dkGq97YfIEBKCCUNIleTMtMBRxzW/aD4mDJ0g3ngyVY0XAVoyt33z96q08btWwhtBbRIpaSDrKlOnMjfFBpwK0jYpjwigRhAoSfZpGRQNmxpQs76my9/dfpPrqZT2yDmhIls6DZd1vG0myUed95LwbPshjHergsgQEoIJQ4ijPO0lkKYNBe7piLJ5UX5zQPYAPDP5GXEPFxXc6g37ezGpOL1OyKkIF+ooU7vhYZINOJ09Qqunr3bNkbGgwyysGx7y+g3qOLhhfsn5t0NHtrhl7BacTO+c9Ug1YlPpPeXC1LtVRcQsncagJkvWTa5DVmZWaBueXwKiGygJqJSkxocve7hzkwh7k+cmcyfweZNKt1g5BdTkzKCTrUll4/UOdPsNma64k+SQosoh9Z5B70uHqkC3MIBjyLiNw0pgr/h1hUjPLRXoypZzDncd2JWUIO0FnUIBSsWeKe8ph3GcshdYMPFuw0BkCKUAHOUZtoeE0ySSunlxSpDDaqBogdu2wcLw2HA0z25mK06K8WCKp4gCzjvgGh6cCiOVA4BOJZEql0rY1Ute31OtxFGt6nRrZNuvVz9Szy0TbRQkWlTY5zDeEcc9L9+jVHVFaQxtN1rfn/W+a06TST3AOfRyeMxS1SJDF5EhlAKkI8um1+Jc88EaLGpa5HkdlcFYtQTZ9OaveoI5se+JbMVJ8UZwTraAXhmrE9x3wJVj6lw7G5NyjGEVXhxKWMRPDiV4q1LVi0n1IOA2juIhxUZY3SneOekegzpM734HRL+Dhj1vzbT3FPCXW2fOIQDUvlFrhLE/HRDlCAXAZPd5idi+HaphJb/FeejIIeMJqBKdl1Xvu2zDMpInhzIWicRVauxeuow1jByVMIsEqGtBhd3XSw51wrphJs/awanqpIxDMnnb+j3J6jVuDotkoQAnh0n6efxkk7tWAJl3GwaiZGkhmDSEALmcGNWwUtDirCquUqoM4iAVSa4qyZGqSZdU4zbeEUdVY5WvB053fF7QfQcUOVYtVQZSzwFkh+lkX0CuAkyKi4gzDqkDHsdwMGlIShQKcA8akhWQkikHKu82lVxwUbJ0NwGHkl06rERxv9a+XovCvoXYsd+cS1TarR0EVTJLSqhDNXFVtWrFDh2XeRg5KtxkfDuoIVgOVMNCphJCpVrDSOZ52REmqzs3NCSVbmCqUICbwyQVKpVOOZDoMZmOBIyRIdQN4CdMpSNKlWPJlMW5ff92VBdXo6qxylg+U5j5EZzkUAo/D1Vx+kGCZdyCqsHAaaehc8Lz2uydVBFekDRCdPLDTMFUBZhKnpeFynGf6w/OOHQPeCrJz7qGpMlCAe5BQ6IC0lSekW6PyTCKPriIDKE0R5AwVRVXKVdLUBfn8Nhwo12DJcueg0D1Aiz++mJUjqv0VRASpx1u80VTZaxFJxehsG8htu/3n5v7XrkPta/XJn2P+8xum328Ix56xVEq+rdRYKICzG/j8zIgOOEsFeM4KDeRArfqNVWPrMlCAe5hT8LDJVFJp8P2bTrZWxKRIZTGoAjT0vVLSb/ltjFyFmfxkGLRJqbOBXbvhfdiysopxqvoqAbCj//4Y5x6/KmBbLS6px1u80VTZaxZmVmoODu4fcjef+wF/pH8mcoJz7nZxzvixoxhL2XO5VIB9MIrppEqLiIdNme/Ax4FbnLONSR1DMi6yXW44fkb0HKwpfNztwOiymFP18OlG+7WOehJGGFhIjKE0hgUYaKEEwB3hcFdnF4Khqv4vRbYvPHzsOKdFUa8ThaoBkJre6vn5i552qEqK6v5ImCujHV4bDj7GkDmhGeKUiJImXO4VKTDK9LgbHwSIV1A7UCQLrmJFlQ37fqN9Zjz0pwkIygvJw/3XHiPGN+UjoGqk3Kge9ALO+9TF5EhlMagCkksO4ZP2j9hKwyJzYer+P0W2OJ1i1E3uQ55ffKMnaY5XgAArpu75GmHqqwmDJ3QOQZTHFQ6YSeJE55UsrAFqjKncqlIh1ekwcnzUmVDtkP1QJAuuYkWVDZtr3e99+BeTFk5BQCQ3yc/SY+pyjfHQLUbuAP7DERB3wJ8tP8j1t4gcdBLFS+WKiJDKI1BFZLZY2crKwydzYer+CkLbO5Lc8klriohCLvxFwSvzV3ytNNyoAVZGVmIJ+KufzfhMpcIFXlB94QnUXEE8JR56YhS9OvVD43NjQCA4iHFbFb3dMiJoHh4VdmQ3aB6IEiX3EQL0q1iAGDaqmlJa9puaEqmGNjhdigdkD2gU/6oe4PEQS/MvE8JRIZQGoMqTLcW3YrRA0crKwyVxami+CU9KTohCN3eSJKlrVNWTgk0OiRd5lKhIi9Yic86il41WdgOqqzd3nR7F2LK5W8tZ3tHOLJddHKRkY3QFBuyF1QPBGHmJlJkUbpVDIAuBxunoSmdF+N1KLX0Wyw7hr3tezs/99sbJA566dg9wQ+RIZTG4AiT7qmWu/moGDVSnhSJEIRObyTTpa0WsjKy8HTZ0yIuc0AzVNS3EO1H2tHa3ur7zC0HWkTCLrqgyppbcriKd4R6v4ZNDZj+3HRj8+PnLQxKtOeGN1UPBBTPYyw7hnhHHPGOuLJhTD0smWoVY4dJryDlUJr9hWysnrwauw/sDjQmpQ560qFuk8hM9QAi+MMSpoLcgqTPC/oWoKq4CoeOHMJtf7gNp9ScgpLHS7CoaREWNS3CzIaZaNjcYGxcKkaNxAKjuKUrX6jE4SOH0djciBVvr0BjcyPiHV1DT1ZvJEvZOZGBDAzOHdzFoLEUp/Ud5zWAfmkr8PmpMq9Pnu93ulzTEXd9buq8xTvimDRyEppnN2PtjLV4atJTWDtjLZorm/HwZQ8nPaMF699TR0/FlJVTujyXZVjUb6xnPYsOdPOdgH/Oh+T9atbXGJ8ft/e3bfY2cjI8dW1bBo3k+rHQ2t6KksdLMGTpEKV5sYx+6lx76dnC3MIuBrGqbNkNTUmQcq7atiMrMwvTxkxLOiC76QvV9+oGL1lMJyMIiFpsBMJ0iw0qnN2xnVwuTpju/6LSkkGCBp963/yc/C4lrW6nbp22BjqtBCQp9P3GYz13LDsm0sbE6x7XfvlaLNuwzDPUaKpXlhd02nnYwenlFHQ/Si6Yyfw4E61spNePE6rtRVT75pluFQPo9WV0g6ou8dMXAMT7iqWCVoK6f0ceoW4Cyz3cq0cvVDVWBRLfOU+1Xp4CVaicGiQ8KdTTqt0IAmROgk7onHakqyqCTsBU72DQ/Dqfubq4GolEAtV/qPbNtzJ1GvYCRdYocJsPt7VEuZ+XEQTw5qd+Yz2GLB2CCx67AFfWX4kLHruA5DmRPOlbkFg/q6evRiw75vodFe8cJ2zvhKVnnZ4T53eCPFp+4HiUKHpbRZcE6QsAyu/VDaoyGxYij1AA0sUjBKg3iZTuVG5B9TSo40lR7WBujUvnJCgJqSaRlqIsX1nu643Jy8nrYhy6QcUbwDkVS5+Gg96bZPdwr9+zryWv+5WNKkPNKzWB9wuaH04jUr/rAf81y10POutH2lNlwtvqBrd3Len1o+Y4cXUJx2MGQFsv6sqsDqLu80JIJ0NIxwhwQkoIVY0ap+IcXzge67avC1xwEiEPU93DudDtNs5t1Jqfk489B/doGV4W0qFzO2ejcCpzAGxDlKrQ3e7X9GGT9mavE/KxI2jNhk0KyTFcyk8vD9yYTYQAveB813sO7EH5ynIAeiElrvHA0SVrPlhDKhKRmh8JmVVFZAgJIZ0MIarCoEJKCHW9KaqkjIB7KXAQqAo1DKgakiremMqxlZ0tWThKWmdjt99DUuFJnDI5m4euQg8zP27JxCUY1GdQ4IHCTf5TcXqnPhfVsy3lbVWFjscbUJc1yn3rN9aTaUMkvLdhGqVuiAwhIaSTISTpEbJj9fTVyMrMSolRoKp43Ra9M0HaC6ZChapQCUOoemNa21tZStrLSJ08cjJq1teQ7iu9iUqeMqmbloRC1/UAqhyEOHIddsjEed8gIkg36gZdD4mpkHiqQoV+9+UeniSMk7DClF6IDCEhpJMhxAkLcQjxLCVjISyjQOKU7QyvDVs2TFShUp8jTO8S1yB2yxGgjNfPSOV4ojinYQqkT5mU+ZBS6GHnx3HkWtozw4Gf4ZJAAgOyByQRAtqh6iFJRV+4MGXNeV/q4YlykKDqkO7iEYoIFbsR/Ii/nOB0Kne6ScPqjaTLNO1GtGaKWdcvlOCnTE0YSRxCN7dqPApBHYWkLTMj07cSKpYdQ93kOna7iiBIN3SkzIckyZyTKdnKj1vx9gpfGVFpgcIh8gubhNIOE0SQfuzrqegLRzW8TPTpovCW2eFXvcsxILtLq43IEOpm4DSJBPw7lXshrN5IJjoUm1CoXgt/2uhpWLxusacynTd+Hla8s0L8xMlRgKosrhQj1TKCvNh4ay+rxYShE1j3pYD6/O+1vIfG5kYR41NSodsNr/qN9Ri2bBhJRjgHITuojNG6JJS6OsPLcKl7t450vZuecDNyw+wLZx2EGjY1uIaS3QwvE8YDp4F37WW1vtxPHAOyu7TaiEJjAUin0Jgd3PAGoJZYLBlecMKk29RtPHXv1rFdzlLhIft1gF6+DCVEOiB7AJ6Z/IyyN4bqnq8cV4mV761UTgxVAbdyUCrcEZR3UlVcJdKtXiU/joKgUIoUCSUlWZsDaT0RVriG+p78KhQBGUJD6jOvnr7a8/Cik8qgm0CuiihHSAimDKEw80rchNCZF+QFShxaNdYednUHVwGqJiUHQeK5uBVPJo1UU01E/cAx8CWTtb06fAPo0tTST/4l8+N2HdiFOS/OCRw7ZWMPytXhQsIIldYTYSTwqlR1UjirnMYDdW2HWbXoJWfpzCwdGUIBMGEIpUOSXrwjLsIlYYrgzULluM9d1BKLhqsMTFXpWTBx4nQrl+0ORioV3FYzFnTG68d5taV1i2u4NUj+Jb0S0u9Kl4TSeW9Ahq9MykNi2iOkeoByM7yCZO/h1x/Gjv07Or+vQjsiXbVoqgJMBZEhJARpQ0iSp0PHwpZQniYJ3pwMrWGFN+zzL83b5ISEwlApl5VmIQ4LbnJS0LcA1599PT6Lf4ZFTYsCf4O7ufkZkqUjSpXlX3pTkX5XKiSUXpAymqU8JKaNfNUDlEqfPyfSgdU/XQhrgcgQEoOkIWSa/4RrLOgqT+lTrV9Soenwhg53jCpM5l+FxUKsApXnCTLqZo+bLdLCgnPPquIqpTYdgLkGqKbzMHRyDiXkPcjwp+pEk0Y+9wBFWYucUBuXdoR6gE5XL7EfIkNICJKGkJTyk/Qq6bTIqGqsIp3CqZuP6a7R3O9LJY9ynsMJVYPXhJEqEdtXeR6KXEj3UqPc8/js45Xz7ExtKrrvinK9qWRtr9+myLuKTjRlOHIOUBSdnQ7tbCxIGpBh5AxFPEJpCIlycenSTz+uDS9wFSG1LJfDK2RP0OXGygEadwyl9NOtRH5w7mBMHT0Vi9ct7hy38zpKyagO14kkNQFlrihQfZ7G5sZAuWg52ELqpUYpOY53xLFsw7LAe1KMIMBd/k2VFeu8K6oR4tQZ1GTtID2gKh+qOlFF91HA4Xui0FtwOYAscGhHqPCjJ+EYkKnIk/VD5BEKQLp5hDhlkCbaZki6aJ0gl2yPrcTKjSu1YuUcBJ0cOWSL1BOnbmgr3eL5Ov2TqL2RVHup2cE18mPZMXzS/omyVydVZcVu41D1Mqc63zDdZB2gF4FQmk2r5iqafF4db06Y/eyi0JgQTOQI6SgM6qIw0TaDS9MOmKnmoEIyZq0TV1e5Tpf3I93i+SqblUpvJG4vNTtUSp6ri6tR1VgFQN34SkVZsfP+uvlkYeYbOukadrTtwNXPXR14bdjNlqVafOi21AkLnHSDsLrRR6GxNISEO5waZjLRNoPjolVhNKa4lJ3VZH7wCqWpKEDVkIMXs23QWKhu7fKV5a5MsOnG6MoN1fmFO5ywh72yMrOUwh2c+9nveWvRrRg9cLRWqEAnnCVhROm2ugH0QyZU+WjY1IDpz01PukdeTh7p2i2tW7pswibDMVItPjihtlSxNVONOglZM4HIEAoZugpDpd8QIEMdT1VW84vmo6q4Sim/IahXGNUIssNNeaYqHi3db6i1vdXTwJWK5/uBuhFz+yfp9EZSMSw493NuNqZyTYIglWchlU+mMw9U+XCrKN1zcI/vNVaz5arGKqX8I+kDFDenidNaRXJtU8Ex6ky0VZJAFBoLQFjM0pRYsYWw2mY4kUpq+sG5g1E2qoxUIk1BKrhwOLFxTsWaqXJZyvNQN2JuqI4TAvbrjUQFJw8jFTk8FjppJjY3uK4FkyFp6dYZdlDkneIN9jpASXWvlzhAqepRr/E4e0ymc1g17HyuKEdICGH0GlNZcLptM1Ri5WHmnbht3k0fNrFziPyUZ1ix9HhHHI3NjShfWe75fvz6DXFyZMJOBuWWK1PzSCR6I3HAMQZmnTsrJTwpOr2r/KBihJjwqEq098jPyU+iUeAwYlPy08LkGaIwTYdt+DjHsmzDMlZ7l7BzF6n7d6b2nSJowVpwTuVmuRXrN9a7Xjdp5CQ0z27G2hlr8dSkp7B2xlrUTaZ1abZi5Rc8dgGurL8SFzx2AYYsHeJ5LwuWixb4p0Kw4AwXWJv/irdXoLG5EfEOXkjLcilPGzOts3GoFRZ03tsN1nf8TpD2eLQp1G+sx5ClQ1DyeImvkeo2Fiu0FcuOke4Vljs5yLUPAJUvVHZ559bzFOQWJH1emFvYZWMJetcZyMDg3MFihh/1fpYRpCvfXHjpCTdw5dpvXVtwrqMg/aQCP/moHFdJ+o0lE5ck6cRts7dheGw46VpKfpqffFPBDRPb4aYXJUGVa0uvUYwg4J9zy9lDwkSUI5RC6HICOePP8Y64b/6QRKz80JFDqCqu6tLjyR6bNuVS5sbKqaE0UwaEShWScyyTRk5Cv179SH3hqApWFzoJj9Q8Ek6yt8QpmXO/sDlQuIncFjhy7ZVP5uVRlcg59BqHm3w0fdhEWssFuQVdZE46P82S78bmRiWKkqA8Tw7nlSSocq2i1+zvIIzcRS6i0FgATIbGTNHs+7mXpWLlVo8nZ2w6DI4IaqycGkozEVKSZIM16U5WMSLCbL4oVYJsB7dNg/N+YXGgWDDVuwrwz1WU7GqvC501YDI/TZWiJB37+FHkmqvXJLsBqCAqn+8GMJFB72dtB8XK3U7yXgvko/0foaqxCivLV3Z+V5r12muhUL0KqTx5caue/MZiqhRe1bOh49rnQqoE2ULQM/vdT1q+qeB6LKly7TcX08ZMw4q3V4iPT3Xz01kD3GvDoCjR8YpIGxAcudaprnRCirFeApEhlEKY2lC8lHndu7QcImqs3Kn4JTkigjYsqRYZpuLRnM2BMhZpd7JO+w5pAzNIsUuUIHOe2Uu2UsWBMrDPQPJ3qXJNmQtp/aQbUtRZA5xrw6IokWpvpBuW5YQCG5sbyb+bylAXF5EhlEJIbSheG0lYsXJL8Ut5uHQ2aSdUlafuqYtjvFIVhhRfjURumpSBqarYubIp4c1JBQdK/cZ6fP+33yd/n+pRoMzF1llbxQxeqTXtXAOWkbj7wG40Njf6rgeJ/LQgcI1hjldEUi/awSFvpfbXS2V1pQoiQyiFkNhQOBsJ1/DiKn6JE6SJ8APXgJA4dVFOlbHsGOom17GqPziK08uYSwcmYSDcprISzxxmSBCgJaVaeqNybCVKTyslGcbUuVi3fZ2Iwauzpv0OefUb6zGzYabnOqUeEN3gJd9UihLpAgyTYVnVUKAbrD2kOxlBQGQIpRw6Gwp3IzEVK7e+J+Hh0t2wvJQfVQFKnbooc117WW0gB46qZ8rPmDt05FDg9YBZJmFdxc6VTQlvTpg5Z9RKsYLcAqNM0tPGTNM2eFXXtJ8MA/Bdp/PGz8OKd1ZoHWbc5DveEU9JBafJsKxqKNAJrnGcLnxIQGQIpQVUNhTVjUQyVu5U/BIeLp0NS9eTI33q0vWaqD5PkDFXVVwVOHaApsxVEx5VQlvO6iaObEp4c8LMOaMmpV775WvZ4RDuXOiGZFXWdJAMx7JjnusUAO5ed3eXv6mEkFQoSlSM4SDDwGRYVicUaIdpvWYS3YZQsbW1FVdddRVyc3PRv39/XHvttfj73/9OujaRSODiiy9GRkYGfvnLX5odqCK4RFmcjcSJ0hGlWF66HPOL5mN+0Xysnr4a22Zv82zcCdDJrzikeW5Q3bBUiSnt0JlTL7gRX7rNtROqz0Mhg6t9vRaFfYPJA03ymHAUu0XeZicAHbZsGKaNntY5XjvcZJNKmGh/ZjdyOV35poI6P9V/qGaTGqrMhZt+opLvcdd0kAwnkPCkAPGD9Xs6ZIgmCAHd5NtJcGs6LOsl11Qy1/lF843qNdPoNh6hq666Ch9//DF+97vf4bPPPsM3v/lNXH/99XjqqacCr62pqUFGRjAbcXeC6gnBzRpf/tZyLL1oqeupT8WroXOCVAk/SHlyJE5dOnkJ9t9QfR6KMbd9/3ZUF1ejqrEq9Go6C1SFvaV1iycB6OJ1iz1DIE7Z5Hpzgk6tppuscjY0bm5IuucmcqknOJCo7JOs4KSG4sMIy+qEAicMnWA0V8w0uoUhtHHjRrzwwgt49dVXcc455wAAli1bhm984xtYvHgxTjrpJM9r33zzTdxzzz147bXXcOKJ4TDvhgGVE4LfoiurK+tCtqij+Kmbv5vhwFXSUvFz3VOXlMtX53moxtzw2PCUsrtSFHtB3wLUvl7rqziffudpvD/rfVLDYuoGRt2cdJPW/WDND8UgUNnY0zk3MYxWMbr3kDCGuYaBVOI6h6oi3hEnkfBSDLBU0U9Q0C0MoZdffhn9+/fvNIIAoKSkBJmZmVi/fj2uuOIK1+sOHjyIK6+8Eg888ABOOOEE0r0OHTqEQ4f+mUza1tamN3hD4J4QKCETp7CrKn4q/AwHjpKWip/rnLokS1t1nodjzBUPKTbu2fACRbFXnF1BIgBdt30d2eiOZcdw54Q70XKwBfk5+SjILUh6ZulTq6pxbM1PWV1Z4D0AtY09XXMTw2gVY3k7dGRflxCQaxikIuewYXODbxgygQTZe5wK+gkquoUhtHPnTgwcmEwq1qNHD8RiMezcudPzujlz5mD8+PEoLS0l3+uOO+5AdXW18lglQFmg3BOCirvZpLuSYjg0z24mKSqp+LnqqUt689R5HmrZfrwjjnhHPFCZm6zuCFLsUtVtgP8mYMK7aN1TxzieNHISqourSZ3TVY0H7mauMz+lI0rRr1e/TlK+4iHFrvmQlANJLDuGve172cm91mGm5UBLl1YRYSfsqhgGqp4oFVm09JofBmQPQOkI2v4aNv0EBylNlr755puRkZHh+9+mTZuUfvtXv/oVfv/736OmpoZ13S233IJ9+/Z1/ve3v/1N6f6qoCTOWeAkbqpa2SoJwkGgdncGQErQVEn+9IJKMqx0krXO8/glc1pobW9FyeMlnnJlgSOLqvBLJpdSnJwETalTq1QH81uLbkVB3wLPv4eR2G4HZ37sa/W2P9yGU2pOQcnjJVjUtAiLmhZhZsNMNGxu6HItJSH54csexqryVV3W6eDcwfjB+B8g4///n9u1U0dPxZSVU1KesKsq39zEdVVZpBye97bvDUWvmUZKPUI33XQTZs6c6fudoUOH4oQTTsDu3buTPj9y5AhaW1s9Q16///3v8f7776N///5Jn5eVlaGoqAiNjY2u1/Xq1Qu9evWiPoIoVKx26glB18qWdFea4BWRLGvmnrqkXb66+QBenhYngnpySYX6VFpoADK8PVxvnZTxRZXxZRuWYVCfQb6e3/suvs+3QafpxHY7OEnuQc05g/QaJQzktU7HFY5zvfbeC+/FnJfmpEXCrlQCdFDIS1XfppteM4lu0X1+48aNGDVqFF577TWcffbZAICXXnoJF110EbZv3+6aLL1z507s2bMn6bMxY8Zg6dKluOyyy3DqqaeS7m2y+7wdQV19rUWh0mHc/vuqpFmcDtNBG59K93JKd2QAvp3DVUAJDVG7g3O7dAd1QqeMvbG50Zca302uJGVRN4Fct0s3993odDm3gyrjdvjNi64sSIEyPxb7MkXPBM2nTmjWjXvqp6/9FHNenBN4LXetqkJXvil68dCRQ2x9C6SvXuPgqOo+P3LkSFx00UWoqKjAgw8+iM8++ww33ngjpk6d2mkE7dixAxMmTMD//M//4Nxzz8UJJ5zg6i06+eSTyUZQmDCdUa9KmsUty6RsfNK8ItYpbtvsbaLJv9RNnHOy4yh23coUi1HbjxrfTa6kZFHCq6STIBrviGPNB2t8f9+CdaqVOrWqeGAlPL9OSOd4Bc2P9W+qfgmSJZ2EZPu19RvrMWzZMHKepKQH3O8d6Mo3RS8+WvooaZxOmTVVsh8G/QQX3cIQAoAnn3wSN954IyZMmIDMzEyUlZXhvvvu6/z7Z599hs2bN+PgwYMpHKU6wsio91p0VnmkrrvSFCcGd2OWOMlxNnHq5tmwuYHtHdGtTKHKy462HWhsbsTH+z/Gey3vaf+2ZAK5VJduP9g3AQmeGJW2BUHzwpUFUwy+fvNz3VnXkZK7nTBZKUTp2eaEZL+4oHegWr23bMMykl607sk1aEyGsnT1mjS6RWgslQgrNGbKDekGtxOK2ybNDcNwwikcl/CcF+agZn1N4Bicrl1VqIaG/Fy+gHtvJKoLXBVUucrPyUfLwRbWb/vJIvW+q6evRlZmlujJkLPx+YVm3EIrFL4i5zgAuofEgu46p4RMdOXNTY/UvVvHDgkC5vQagMBcJTt0UxDsMPUOuEb+U5OeQq8evZRDcOkSllUBdf+ODKEAhJ0jRMlNAGDErajjRlcx5CgLrH5jPZlLRSqur2OUqihjSeXrhG5umBso46XmyDi7eet6LIKMWDs4G5JO3zfOpmXhqUlPofz0cuWGthR52zprK8uwo4C6dpxj0ZV9r/dTcZY/F5VzLICckWhizat4tyw9pWPQpFuTVCqOqhyhYwEmwyucMXA6utthghODwmNhQbLsUidM6TaHjc2N5NBe0clFoeV0qIDqEqeGFpz5SypVaXZw+LI4RHSquU5OGd91YBcpWdet6oq6zqmh5MIlhUleQAk9wgkJSlUK+b0fTphOklXdRM6nX7jZDc6Ql05ujm4oK90NqcgQSiME5SYA7uEV3c3DD9STsC4nhhs4m5oEvbzXGL1A/R7VsGrY1IDpz00PLadDJRxG3SxUcmQA/RJm6lzPL5qPquKqUPoj2WU83hHHPS/fE1h15dVfzW+dW/K96r1Vvs9kwfnuJfQIx/CWMDwoHDkULJm4BLPOnSW2OZvI+eToQy8jMxW5OenYbd6JKDQWgLBCY3akS3iFE+OWKju2gxpeqRxXiSUTlwQ+C3UxSj8LN1zgvBdgJqdjR9sOXP3c1YHXzS+aj1H5o9h5Mjo5MoBaqFM6185E7p5fflwCCVJvJ0p+mgrCCFUNjw1PWSjOCVNhaeq45hfNx4ShE0hzwaFkSHUOj6VrGjY3oOaVmi5/N50baYG6f6eUWTqCO9yYQ6XZi/1glR1X/LrC96RlZyOlsMFyXeBUj0sQxTuHWRiQf5YgRlUAyMpw/y0OC3EQnHLlZOX1woShEzBtzDS0trdi2LJhZKZpL5buWHaMdF8nO7GTMdcN0uy1Jk72fuzl1cXVgb2dnOvcS75VwNUjXu/HjTG8eXYzFpy/IEmv6YIz71J6iQLKmgeARU2LyIztVH24ZOKSTnb2VMDOSO9mBAGyek0CkSHUTRBWwzpLiEseLyHzz1igtKfgbGwSm5oqvbxKqw0vUAyreMJ7Hqy5bmxuZBkFQeDML9eYtOC2IdZNriONz8qT4bT4kDZiTfVH8motMjw2nHS9tc65eSNUUPu3+b0ftwOdJOIdcew6sIv03eriapG1TAWl1Y0dlPYe1PVqD/FxDxK64BjlJto3qSLKEeomCKNhnUpFglNh+iXkcWPFEjwWOkmLksRffvlfZaPKPE9OdjgZonXj7NT5BaCVJ+PMS4h3xElNNVXzZGLZMcweOxtPvv1kl2RgbrhApYktFW75Gtx1zm2mTM0Lo/ZvCzNf0Xl/SijQCn3dWnQrbi26NdSEXWqrG4C+jjj6MOzcHFWjPBXd5p2IPEJpAIrVbrphnaoQuylM50kQAG77w20oqytT8ijoeGZ0PWmSp1ovLwC1e7NXhZVOk0jK/EqHZYO8Nn7sxH5evCSX/PoatBxsQV5OHirHVSY1c+WAcrKnNrGlgLvOqfJ941duxNoZa7F9zvaUeVmlQPU6OI0D0x4qC3Z9HsuO4f1Z72PtjLWYXzTf9zrKOqLqQ1UPrg64RrmFVHSbdyLyCKUYVKvddMM6rhCzGgL+dja273f/bcpJSMczE4YnjQM3L0CYFVZuifhB82syT0aFndjNi+flodh7cC+WvrJU6/Qv0cSWCu46p8pt2aiyzrkK08sqTQfBObDpVqWplHz76fNR+aNI9w1aR1TaEQlWdw64nh3VFh0mEBlCKQTXvSxB/e8FlaTDIIVJDbVRODVUyz6l++VINoG0rtXh+uHwkQQZ3V7Xm8yTcVPode/ScogoeTJSit8aq18TW8l7Ude5inyH1b/NBB0E9cCmWw6vElYK0udVxVWke1PWkQ7tiAqHURA4+VpA6rvNOxEZQimCqvI21bCOs4npNgT0golYsaQnTSfmHnSt1+bkZF72QtDc6eR0mGq+CISTJ2Mp/mUblmFQn0HKa8YyWrlNbC2YaLirKt9h9G9za4uj6zWj6ohBfQZpGUHctULR57Wv16KwbyF27JdfR3aEVVhjQYW6QZK8UgKRIZQi6FjtJkixqEmhdZPrUHRyEdZtX4cVb6/wVKAq8WJT4SkJT5qOIUG91m1zinfEUfJ4SeD4/OZO12NiOizrBNfwoip0O6OzqmeCs8nYDZ8trVvw8OsPY8f+HeQxUNe5qnxz9Ai3kCIrI8u1EtIub5cOv5Td4sN0qFt1rVD0+fb921FdXI2qxiqj6yjMdACqXFjPWzm2EqWnlUbM0hE+h6pCNVXtQNnsai+rxb5D+zBs2bBArwg31GY6VqzjSdMxJLjXqlRYBc2dRE6HybCsE6byZOxQ9UxQ7+XWIkNqDG4w5SkGeN5d631R6CBUWnyY9E4C6gfUhk0NpN8fHhuutI44e4DpObKPKax8LdOIDKEUQUehmiqBlGzxwd2cwogVu52AKQpGx3unG6+X8MZItfgwudk6IZkn4wbVfB7KJuNV+h80BkCvmbKp9gnc/m1UOgiVFh+mvZMqYaX6jfWuYUA3nNj3RBQPKWato1TQjlAQVr5WGIgMoRRBR6Ga5Orw2uyAz1t8UD0b1M2psG8hll6cmp4zVAWjE3OXiNfremOoRiklp0N3sw07T8YPKkmjQZuMX+m/3xhub7odtW/UpmU/Jm7/tqYPm0iGkBNU45S7Hjgyxw0rqTaGpq4j1ZC8rs6QbLStk68VFiJDKEXQUaiUcIz0yZLTQd3i6AjanKqLq3Fr0a0pWSQcBaMTc5eK1+t4YyhGKSWnQ6USKtV5MhRwk0Z1Sv+94HZNWOSEQaDK8IShE1iHIDdwks3fn/V+YI4R15vCDStJN4Z2Pq9Obp+qzjDdaDsdEREqphDSPYeAYNp7Vah4Nryeb3DuYKwqX4UF5y9IiRHEJYTTIbOkXJufk48dbTsCKfBVCeGCCAwBWosPLhW+UxYXNi5MMoIAWYI3J2FlUCNeCyqKWrdFBgUmyAlVWi5w5Z/bXsINTn3jpteGLRuG1vZWz/WgQipIWSt2g4aqFyvHVbKNWQkiU67O4MyZpG5LNSJDKMWQ6jkEmGUTVbX+vZ4vlV2Rl21YxlIwXOVoR9CmkEACLQdbcPVzV4sZrW7wM7orx1WSfoPjOaGy/zo3e93eSHbFP+vcWUYVtdsmI336VTVC3aB6SFKRfy95y8/JJ43VPo8qek2H/ZrDZi/VGNoNOmF1lXXEnbN00W0SyEgkErLd+o4ytLW1oV+/fti3bx9yc3NDu29jcyMueOyCwO+tnbEWxUOKEe+I+1apZCADBX0LsPzy5dh9YLcSGeCQpUMCXcbbZm8L1cvDCQNy+S6emvQUpo2Z5nv94NzBpJg7pzcSAGPhELf5avqwiSVrlHsEVUy5obq4WjxPxtpEgeC8HYmcnKB1Ygcnn8kpi1x4hYI58qYi/055G184HsOWDSPrEYpec9M7XP1JGbubbqHqxa2ztrKpAlSfQZXvTPJ+bjCt29xA3b8jQygAqTKEuIYHVYjt4Cp+r00lFQJujYe64FUayropSY7h5bYJrNu+DjvadqDyxUrsObjH9bqwjUppI1dFFr0gIVthK2qq8TU4dzA5p4hqhLpB1ZiwrnWTYZ2qQY4eUd2cV7y9AlfWXxl4na6BCQQ/z7zx87DinRUsw8Ty6HixmFu/73xvOgavzpxZcpJuuo26f0ehsTSFqVi1HdyQmU4DVN2QhxMcdzmX5dov54cac/fLaSjILfBUFIBsOIQCndCfGyQZwiXyZOzh2SeueAJ5OXnG7mXdzy/3zx4ivrXoVqPNlAH1XBOVvBwKOHpENTwUZiKv3/PMGz8Pi9ctZoX1rHkvebzE1wgCktclJ7Tlpo915szSi+mm26iIqsbSGJwSSJUFrVIRJEXNrxOG4FRTAAjMCbJDgmcjqCJt9jhauS3XoNCpFpQiTOT2HKJApczdCUtRNzY3khU19V4qjWztMM35osqNo8qkTgF1flQ357BIBS24PY8VBuRUfVE9127rkmrwelE13HvhvdpzFnZ7DylEhlCag6owdDqYq3Cp6FLz6yhUnQUfBImO1UFG2pN/eZL0WxzjVsLY1CVMVOk5xMmTkVCe0opatZGtHapGKNXwVeXGUS3bpoKiR1QNGhOkgkHz7XweLuUIxXM9IHsAnpn8jKs3jiqzXlQNU1ZO6fRgqc5Zdy2pj0Jj3QCUcIxuyaoJC12ncsMPnAXP2ZSXTFyiXdFGMdJaDrYgPydfLBwiWS2oWqJPrRJzwgoXUSChPCUVteS8c6srORVg3PJ3ibJtKeiEbXVC+U6oVNxxjW4KJ9He9r2drXic0Fkflj5++p2nUTe5TnnOdKhGUonIEDqK4LXwKTBhoZtSqNJjtRanBA08VfldNeaqzns7xwLQT6umjE0OuD2HUpEnY0FKUZuYd07+GccAM5VvGFZ4Q8egkaDvUDV4uUa37rwHyXYQLH2c1ydPec6k8w3DQmQIHWVwLvzV01ejsG9qLHRTClV3wdshvTjJvCKnlYqcVtPh9M7pOdQ8uxkLzl+QtNmHqTyl7pWqeVc1wExw44QZ3tAxaFS9nICewcs1unXnnSLbFHy8/2OtOZP0xIWFKEfoKIQzVr30YvMN+NxgSqFS25NQIN0VmZPTkJWZpd3ENB1O7xI9h8Lsbi9xr1TNu04TX6l8Q+lEYyp0+9ypQGe+uXlKEvMu0f5FqpIurAbNEogMoWMAYW4ydphUqBIL3kRXZK7y01Xu6XB6T4d+alzo3itV865rgFHkzUSicXeF7nxzdK/UvPs1zq59ozY0AzcVhqsqIkLFAKSKUNEEdJuxqsA0CaPbMwFIOQu2Dgs1B+nA+J0OYwgbqXpmCcZkKsKS4XSG1HzrMuBLzXu6keKaRsQsLYSjyRBKFVKhUNNhwYdleKbDs6bDGMJGKp45bAMsFYendEKqDF6T834sGbiRISSEyBCSQaq8UcfKgk+HZ02HMYSNY9XIP5ZwNM73sWLgRoaQECJDqHvjWFnwQHo8azqMIWxERv7Rj2i+uyciQ0gIkSEUIUKEdMSxaHSmEtF8dz9Q9++oaixChAgRuiG6U1XO0YBovo9eRISKESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSEUIUKECBEiRDhmERlCESJEiBAhQoRjFpEhFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxYRs3QArA4kbW1tKR5JhAgRIkSIEIEKa98O6iQWGUIB2L9/PwBg8ODBKR5JhAgRIkSIEIGL/fv3o1+/fp5/j5quBqCjowMfffQR+vbti4yMDLHfbWtrw+DBg/G3v/3tqGzmerQ/H3D0P+PR/nxA9IxHA4725wOO/mc09XyJRAL79+/HSSedhMxM70ygyCMUgMzMTBQWFhr7/dzc3KNSsC0c7c8HHP3PeLQ/HxA949GAo/35gKP/GU08n58nyEKULB0hQoQIESJEOGYRGUIRIkSIECFChGMWkSGUIvTq1QsLFy5Er169Uj0UIzjanw84+p/xaH8+IHrGowFH+/MBR/8zpvr5omTpCBEiRIgQIcIxi8gjFCFChAgRIkQ4ZhEZQhEiRIgQIUKEYxaRIRQhQoQIESJEOGYRGUIRIkSIECFChGMWkSFkCLfffjvGjx+PnJwc9O/fn3RNIpHAggULcOKJJyI7OxslJSXYsmVL0ndaW1tx1VVXITc3F/3798e1116Lv//97waeIBjcsTQ3NyMjI8P1v2effbbze25/f/rpp8N4pCSozHVxcXGXsX/nO99J+s6HH36ISy65BDk5ORg4cCB+8IMf4MiRIyYfxRPcZ2xtbcWsWbMwYsQIZGdn4+STT8b3v/997Nu3L+l7qXyHDzzwAIYMGYLevXtj7Nix2LBhg+/3n332WZx22mno3bs3xowZg+effz7p75R1GSY4z1dbW4uioiIcf/zxOP7441FSUtLl+zNnzuzyri666CLTj+ELzjMuX768y/h79+6d9J3u/A7ddEpGRgYuueSSzu+k0zv8v//7P1x22WU46aSTkJGRgV/+8peB1zQ2NuKss85Cr1698MUvfhHLly/v8h3uumYhEcEIFixYkLj33nsTc+fOTfTr1490zZ133pno169f4pe//GXirbfeSvz7v/974tRTT020t7d3fueiiy5KfOlLX0q88soriaampsQXv/jFxLRp0ww9hT+4Yzly5Eji448/Tvqvuro6cdxxxyX279/f+T0AiUcffTTpe/Y5CAsqc33++ecnKioqksa+b9++zr8fOXIkMXr06ERJSUniz3/+c+L5559P5OXlJW655RbTj+MK7jO+/fbbiUmTJiV+9atfJbZu3ZpYs2ZNYvjw4YmysrKk76XqHT799NOJnj17Jn7xi18k3n333URFRUWif//+iV27drl+/09/+lMiKysr8ZOf/CTx3nvvJebPn5/4whe+kHj77bc7v0NZl2GB+3xXXnll4oEHHkj8+c9/TmzcuDExc+bMRL9+/RLbt2/v/M6MGTMSF110UdK7am1tDeuRuoD7jI8++mgiNzc3afw7d+5M+k53fod79+5NerZ33nknkZWVlXj00Uc7v5NO7/D5559P3HrrrYn6+voEgMRzzz3n+/0PPvggkZOTk5g7d27ivffeSyxbtiyRlZWVeOGFFzq/w50zLiJDyDAeffRRkiHU0dGROOGEExJ3331352effvppolevXokVK1YkEolE4r333ksASLz66qud3/ntb3+byMjISOzYsUN87H6QGsuZZ56Z+Na3vpX0GWXxmIbq851//vmJ2bNne/79+eefT2RmZiYp6p/97GeJ3NzcxKFDh0TGToXUO6yrq0v07Nkz8dlnn3V+lqp3eO655ya+973vdf47Ho8nTjrppMQdd9zh+v3y8vLEJZdckvTZ2LFjE9/+9rcTiQRtXYYJ7vM5ceTIkUTfvn0Tjz32WOdnM2bMSJSWlkoPVRncZwzSsUfbO1yyZEmib9++ib///e+dn6XbO7RA0QP/+Z//mTj99NOTPpsyZUpi4sSJnf/WnbMgRKGxNMG2bduwc+dOlJSUdH7Wr18/jB07Fi+//DIA4OWXX0b//v1xzjnndH6npKQEmZmZWL9+fajjlRjL66+/jjfffBPXXnttl79973vfQ15eHs4991z84he/QCJkuiud53vyySeRl5eH0aNH45ZbbsHBgweTfnfMmDEYNGhQ52cTJ05EW1sb3n33XfkH8YGUPO3btw+5ubno0SO5dWHY7/Dw4cN4/fXXk9ZQZmYmSkpKOteQEy+//HLS94HP34f1fcq6DAsqz+fEwYMH8dlnnyEWiyV93tjYiIEDB2LEiBH47ne/i71794qOnQrVZ/z73/+OU045BYMHD0ZpaWnSWjra3uEjjzyCqVOnok+fPkmfp8s75CJoDUrMWRCipqtpgp07dwJA0gZp/dv6286dOzFw4MCkv/fo0QOxWKzzO2FBYiyPPPIIRo4cifHjxyd9ftttt+Hf/u3fkJOTg5deegk33HAD/v73v+P73/++2PiDoPp8V155JU455RScdNJJ+Mtf/oL/+q//wubNm1FfX9/5u27v2PpbmJB4h3v27MGPfvQjXH/99Umfp+Id7tmzB/F43HV+N23a5HqN1/uwrznrM6/vhAWV53Piv/7rv3DSSSclbSoXXXQRJk2ahFNPPRXvv/8+fvjDH+Liiy/Gyy+/jKysLNFnCILKM44YMQK/+MUvcMYZZ2Dfvn1YvHgxxo8fj3fffReFhYVH1TvcsGED3nnnHTzyyCNJn6fTO+TCaw22tbWhvb0dn3zyibbcByEyhBi4+eabcdddd/l+Z+PGjTjttNNCGpE8qM+oi/b2djz11FP47//+7y5/s3/25S9/GQcOHMDdd98tsomafj67QTBmzBiceOKJmDBhAt5//30MGzZM+Xc5COsdtrW14ZJLLsGoUaNQVVWV9DeT7zCCGu688048/fTTaGxsTEomnjp1auf/HzNmDM444wwMGzYMjY2NmDBhQiqGysJ5552H8847r/Pf48ePx8iRI/HQQw/hRz/6UQpHJo9HHnkEY8aMwbnnnpv0eXd/h6lGZAgxcNNNN2HmzJm+3xk6dKjSb59wwgkAgF27duHEE0/s/HzXrl0488wzO7+ze/fupOuOHDmC1tbWzut1QX1G3bGsXLkSBw8exDXXXBP43bFjx+JHP/oRDh06pN2LJqznszB27FgAwNatWzFs2DCccMIJXaoddu3aBQDd6h3u378fF110Efr27YvnnnsOX/jCF3y/L/kOvZCXl4esrKzO+bSwa9cuz+c54YQTfL9PWZdhQeX5LCxevBh33nknVq9ejTPOOMP3u0OHDkVeXh62bt0a+iaq84wWvvCFL+DLX/4ytm7dCuDoeYcHDhzA008/jdtuuy3wPql8h1x4rcHc3FxkZ2cjKytLWyYCIZJpFMET3GTpxYsXd362b98+12Tp1157rfM7L774YkqTpVXHcv7553epNPLCokWLEscff7zyWFUgNdd//OMfEwASb731ViKR+GeytL3a4aGHHkrk5uYm/vGPf8g9AAGqz7hv377EuHHjEueff37iwIEDpHuF9Q7PPffcxI033tj573g8nigoKPBNlr700kuTPjvvvPO6JEv7rcswwX2+RCKRuOuuuxK5ubmJl19+mXSPv/3tb4mMjIxEQ0OD9nhVoPKMdhw5ciQxYsSIxJw5cxKJxNHxDhOJz/eSXr16Jfbs2RN4j1S/QwsgJkuPHj066bNp06Z1SZbWkYnAcYr8SoQu+Otf/5r485//3Fke/uc//znx5z//OalMfMSIEYn6+vrOf995552J/v37JxoaGhJ/+ctfEqWlpa7l81/+8pcT69evT/zxj39MDB8+PKXl835j2b59e2LEiBGJ9evXJ123ZcuWREZGRuK3v/1tl9/81a9+laitrU28/fbbiS1btiR++tOfJnJychILFiww/jxOcJ9v69atidtuuy3x2muvJbZt25ZoaGhIDB06NPGv//qvnddY5fMXXnhh4s0330y88MILifz8/JSWz3Oecd++fYmxY8cmxowZk9i6dWtSue6RI0cSiURq3+HTTz+d6PX/tXO/IU2tcRzAvyubpdPsr1Etw9xAxJQQKZVErCyjFxVRIbqiDCNMwTRl6Sp6sRcjQQkEaVgQzIok0IYhaUEZla6S1AqbiuCLKA1BSLPffdF1sDur671OG+f7gfNm5/HZ8zuP2/luZ8/x95eamhrp7OyUEydOSEhIiGuVXmZmphQXF7vaP378WPz8/MRisUhXV5eYTKYpl8//7nU5W6Zbn9lsFrVaLbdv33abq8n3oZGRETlz5oy0traK0+mUpqYm2bRpk+h0ulkP5v+1xgsXLkhjY6P09PRIW1ubHDp0SBYuXChv3rxxtfHlOZyUlJQkBw8e9Hj8T5vDkZER1/kOgFy+fFkcDof09fWJiEhxcbFkZma62k8uny8sLJSuri65cuXKlMvnf3XM/i8GIS8xGAwCwGNrbm52tcHf91qZ9P37dyktLZXQ0FDx9/eX1NRUefv2rVu/nz59ksOHD4tGo5Hg4GA5evSoW7iaTb8bi9Pp9KhZRKSkpES0Wq1MTEx49Gm32yU2NlY0Go0EBgZKTEyMVFVVTdnW26ZbX39/v2zdulWWLl0q/v7+EhERIYWFhW73ERIR6e3tlV27dsmiRYtk+fLlUlBQ4Lb0fDZNt8bm5uYp/68BiNPpFJG5n8PKykpZt26dqNVqiY+Pl6dPn7r2JScni8FgcGt/8+ZN0ev1olarJSoqShoaGtz2/5vX5WyaTn1hYWFTzpXJZBIRkdHRUdmxY4esWLFCFixYIGFhYZKdnT1jJ5j/ajo15ufnu9qGhoZKenq6tLe3u/Xny3MoItLd3S0A5P79+x59/Wlz+LP3iMmaDAaDJCcne/xNbGysqNVqCQ8PdzsvTvrVMfu/VCKzvC6ZiIiI6A/B+wgRERGRYjEIERERkWIxCBEREZFiMQgRERGRYjEIERERkWIxCBEREZFiMQgRERGRYjEIERH9RktLC1QqFYaHh+d6KEQ0wxiEiMhnTExMICEhAfv27XN7/MuXL9BqtTAajV553oSEBAwODmLx4sVe6Z+I5g7vLE1EPuXdu3eIjY1FdXU1MjIyAABZWVl49eoVnj9/DrVaPccjJCJfwm+EiMin6PV6mM1m5ObmYnBwEHfv3oXNZsP169d/GoLOnj0LvV6PgIAAhIeHo7S0FOPj4wAAEcG2bduQlpaGyc+Fnz9/xtq1a1FWVgbA89JYX18f9uzZgyVLliAwMBBRUVG4d++e94snohnnN9cDICKartzcXNTV1SEzMxMdHR0oKytDTEzMT9sHBQWhpqYGq1evRkdHB7KzsxEUFISioiKoVCpcu3YN0dHRqKioQF5eHnJycrBmzRpXEPqnU6dOYWxsDI8ePUJgYCA6Ozuh0Wi8VS4ReREvjRGRT+ru7kZkZCSio6PR3t4OP79//7nOYrHAZrPhxYsXrsdu3bqFrKws5Ofno7KyEg6HAzqdDsCPb4RSUlIwNDSEkJAQbNy4Efv374fJZJrxuohodvHSGBH5JKvVioCAADidTgwMDAAAcnJyoNFoXNuk2tpaJCYmYtWqVdBoNDh37hz6+/vd+jtw4AD27t0Ls9kMi8XiCkFTOX36NC5duoTExESYTCa8fv3aO0USkdcxCBGRz3ny5AnKy8tRX1+P+Ph4HDt2DCKCixcv4uXLl64NAFpbW5GRkYH09HTU19fD4XDAaDRibGzMrc/R0VG0tbVh/vz5eP/+/S+f//jx4/jw4YPr0lxcXBwqKyu9VS4ReRGDEBH5lNHRURw5cgQnT55ESkoKrl69imfPnqGqqgorV65ERESEawN+hKawsDAYjUbExcVBp9Ohr6/Po9+CggLMmzcPdrsdFRUVePDgwS/HodVqkZOTgzt37qCgoADV1dVeqZeIvItBiIh8SklJCUQEZrMZALB+/XpYLBYUFRWht7fXo71Op0N/fz9sNht6enpQUVGBuro6tzYNDQ2wWq24ceMGtm/fjsLCQhgMBgwNDU05hvz8fDQ2NsLpdKK9vR3Nzc2IjIyc8VqJyPv4Y2ki8hkPHz5EamoqWlpakJSU5LYvLS0N3759Q1NTE1Qqldu+oqIiWK1WfP36Fbt378bmzZtx/vx5DA8P4+PHj4iOjkZeXh5KSkoAAOPj49iyZQs2bNiA2tpajx9L5+bmwm63Y2BgAMHBwdi5cyfKy8uxbNmyWTsWRDQzGISIiIhIsXhpjIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFItBiIiIiBSLQYiIiIgUi0GIiIiIFOsvr7c0c0I+1hYAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "interior_points = rectangle.sample_interior(1000, random='Halton')\n", + "px1, py1 = interior_points[\"x\"], interior_points[\"y\"]\n", + "plt.scatter(px1, py1, color='green')\n", + "plt.title('Interior training points for PDE')\n", + "plt.xlabel('X-axis')\n", + "plt.ylabel('Y-axis')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "接下来将控制方程转为实际深度学习模型中需要的约束条件:内部约束\n", + "> $$\n", + "\\frac{\\partial^4 w}{\\partial x^4}+2 \\frac{\\partial^4 w}{\\partial x^2 \\partial y^2}+\\frac{\\partial^4 w}{\\partial y^4}=\\frac{q}{D}\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33m[2024/11/17 17:10:44] ppsci WARNING: Logger has already been automatically initialized as `log_file` is set to None by default, information will only be printed to terminal without writting to any file.\u001b[0m\n" + ] + } + ], + "source": [ + "pde_contraint = ppsci.constraint.InteriorConstraint(\n", + " {\"kirchhoff_res\": res}, # 残差表达式\n", + " {\"kirchhoff_res\": 0.0}, # 残差目标优化值\n", + " rectangle, # 约束区域:薄板矩形\n", + " {\n", + " \"dataset\": \"IterableNamedArrayDataset\",\n", + " \"iters_per_epoch\": 1,\n", + " \"batch_size\": 20000, # 采样两万个配点用于训练\n", + " },\n", + " random=\"Halton\",\n", + " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.2 简支边界条件\n", + "\n", + "接下来讲解如何将开头简介中的左右简支边界条件转换为深度学习代码。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "同样先预览一下简支边界条件所对应的在矩形左右边界上的训练点" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABPcElEQVR4nO3dd1gU1/4/8PcCsoA0DSCiBBWxoyQqxhY0EiGigok3tgCWmGKJLSSWxJpcNRo1CtafmsRobDdCrrHGcr1RbsDejRK7YgkKItjg/P7Y725cloVdGGYH9v16nn1WZs/MnDMzO/tx5nPmqIQQAkRERERWyMbSFSAiIiKyFAZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQhKZPHkyVCoV7t69a+mqlEitWrXQv39/S1ejTGn3kVI8e/YMn3zyCXx9fWFjY4OoqChLVwkqlQqTJ0+2yLr79++PWrVqWWTdZMiUY2Hv3r1QqVTYuHGjPJWSGI+5omn37969e3XTzNlmSjvnGsNAiMgEBw4cwOTJk3H//n3JlrlixQrMmjULPXv2xHfffYdRo0ZJtmyyDmvWrMG8efMsXQ2yYjk5OZg8ebJesFTeMBAiq/HZZ58hNze3RPMeOHAAU6ZMkTQQ2r17N2rUqIG5c+ciOjoaISEhki27pHJzc/HZZ59ZuhpkIgZCJLdly5bh3Llzur9zcnIwZcqUQgOh0pxz5cRAiCT36NEj5OfnW7oaBuzs7ODg4GDpaujcvn0b7u7uki0vPz8fjx49KtUyHBwcYGdnJ1GNqKw8fPjQ0lWgAqT4/pUHlSpVglqtNqms0s65xjAQktjdu3fx9ttvw9XVFS+88AJGjBhh8OV49uwZpk2bBn9/f6jVatSqVQvjx4/H48eP9coZu0dfMJ/n22+/hUqlwv79+zF69Gh4enqicuXK6NGjB+7cuaM3rxACX3zxBWrWrAknJyd07NgRp06dMlhHRkYGPv74YwQGBsLZ2Rmurq544403cOzYMb1y2nvIa9euxWeffYYaNWrAyckJR48ehUqlwty5cw2WfeDAAahUKvz4449Gt6N2uevWrcP48ePh7e2NypUro3v37rh69apB+Q0bNqB58+ZwdHSEh4cH3nnnHVy/fl2vTGH3q1UqFYYNG4bExEQ0adIEarUajRs3xrZt2/Tmi4uLAwDUrl0bKpUKKpUKly5dAgDs3LkT7dq1g7u7O5ydnVG/fn2MHz/eaNsuXboElUqFPXv24NSpU7rlaf9H9fDhQ4wZMwa+vr5Qq9WoX78+Zs+eDSFEoXVfvXo1GjduDLVarVfvgg4ePIiwsDB4eHjA0dERtWvXxsCBAw2W+fwxp91mf/zxB9555x24ubnB09MTn3/+OYQQuHr1KiIjI+Hq6gpvb298/fXXesszdz9qCSFQq1YtREZGGnz26NEjuLm54f333zc6P1D8ftF+b7T7sWCdn/8fbocOHdCkSRMcOnQIbdq00W2/xYsXl6q9phy3/fv3h7OzM9LS0tClSxe4uLigX79+6NChA3755RdcvnxZdww9n7vx+PFjTJo0CXXr1oVarYavry8++eQTg/PM48ePMWrUKHh6esLFxQXdu3fHtWvXity2BeXl5UnW3g4dOqBDhw4G8xbMTdF+j2bPno2lS5fqzqctW7ZEamqqwfza77iDgwOaNGmCTZs2FdqW2bNno02bNnjhhRfg6OiI5s2bF5oDVdj3b+vWraU+bgHghx9+QHBwMJycnFClShW8+uqr2LFjh16ZhQsX6tbr4+ODoUOHGlyx1h63p0+fRseOHeHk5IQaNWrgq6++MljntWvXEBUVhcqVK8PLywujRo0yOFYA/f1w6dIleHp6AgCmTJmiOw6155DCzrmm/v7VqlULXbt2xW+//Ybg4GA4ODigTp06+P7774vdfmYTJIlJkyYJACIwMFB069ZNxMfHi3feeUcAENHR0XplY2NjBQDRs2dPkZCQIGJiYgQAERUVpVcOgJg0aZLBuvz8/ERsbKzu75UrVwoA4qWXXhKvvfaaWLBggRgzZoywtbUVb7/9tt68n332mQAgunTpIuLj48XAgQOFj4+P8PDw0Ftmamqq8Pf3F2PHjhVLliwRU6dOFTVq1BBubm7i+vXrunJ79uwRAESjRo1EUFCQmDNnjpg+fbp4+PChaNu2rWjevLlB/YcMGSJcXFzEw4cPjW5P7XIDAwNF06ZNxZw5c8TYsWOFg4ODqFevnsjJyTFof8uWLcXcuXPF2LFjhaOjo6hVq5a4d++ewT4quI2bNWsmqlevLqZNmybmzZsn6tSpI5ycnMTdu3eFEEIcO3ZM9OnTRwAQc+fOFatWrRKrVq0S2dnZ4uTJk8Le3l60aNFCfPPNN2Lx4sXi448/Fq+++qrRtmVnZ4tVq1aJBg0aiJo1a+qWl56eLvLz88Vrr70mVCqVePfdd0V8fLzo1q2bACBGjhxpUPeGDRsKT09PMWXKFJGQkCCOHDlS6Dpv3bolqlSpIurVqydmzZolli1bJiZMmCAaNmxosMznjzntNgsKChJ9+vQRCxcuFBEREQKAmDNnjqhfv7748MMPxcKFC0Xbtm0FAPGf//ynRPsxNjZW+Pn56f6eMGGCqFSpkvjrr7/06rh+/XoBQOzbt8/oNjZlv2iPm4sXL+rNq63znj17dNNCQkKEj4+P8PLyEsOGDRPz588X7dq1EwDE8uXLS9ReU4/b2NhYoVarhb+/v4iNjRWLFy8W33//vdixY4cICgoSHh4eumNo06ZNQggh8vLyROfOnYWTk5MYOXKkWLJkiRg2bJiws7MTkZGReu3Vnqf69u0r4uPjxZtvvimaNm1q9PxT2LaSsr0hISEiJCTEYF0Fj4+LFy/qznt169YVM2fOFF999ZXw8PAQNWvWFE+ePNGV3b59u7CxsRFNmjQRc+bMERMmTBBubm6icePGessUQoiaNWuKIUOGiPj4eDFnzhwRHBwsAIjNmzfrlTP2/SvNcSuEEJMnTxYARJs2bcSsWbPEN998I/r27Ss+/fRTXRnt9zI0NFQsWLBADBs2TNja2oqWLVvqtVt73Pr6+ooRI0aIhQsXitdee00AEFu2bNGVy8nJEfXq1RMODg7ik08+EfPmzRPNmzfXHQfPfxee3w/Z2dli0aJFAoDo0aOH7jg8duyYXj0L7kdTfv/8/PxE/fr1RbVq1cT48eNFfHy8ePnll4VKpRInT54schuai4GQRLQ7vHv37nrThwwZIgDoDoyjR48KAOLdd9/VK/fxxx8LAGL37t26aeYGQqGhoSI/P183fdSoUcLW1lbcv39fCCHE7du3hb29vYiIiNArN378eAFAb5mPHj0SeXl5euu9ePGiUKvVYurUqbpp2hNhnTp19E56QgixZMkSAUCcOXNGN+3JkycGQVdhtMutUaOGyMrK0k3Xnky++eYb3fK8vLxEkyZNRG5urq7c5s2bBQAxceJE3TRjgZC9vb24cOGCbtqxY8cEALFgwQLdtFmzZhX6ozl37lwBQNy5c6fI9hQmJCRENG7cWG9aYmKiACC++OILvek9e/YUKpVKr54AhI2NjTh16lSx69q0aZMAIFJTU4ssZywQeu+993TTnj17JmrWrClUKpWYMWOGbvq9e/eEo6Oj3r41dT8KYfhDd+7cOQFALFq0SK+O3bt3F7Vq1dI7hgsyZb+YGwgBEF9//bVu2uPHj0VQUJDw8vLS/fiUxXGr/eEYO3asQRsiIiIMfsiFEGLVqlXCxsZG/Pe//9WbvnjxYgFA7N+/Xwjx9/loyJAheuX69u1rViAkZXvNDYReeOEFkZGRoZuelJQkAIh///vfumlBQUGievXqunOhEELs2LFDADDYfgXPY0+ePBFNmjQRr732mt50Y9+/0hy358+fFzY2NqJHjx4G51/tfNrzeOfOnfXKxMfHCwBixYoVumna4/b777/XTXv8+LHw9vYWb731lm7avHnzBACxfv163bSHDx+KunXrFhkICSHEnTt3jB4rBc+55vz++fn5GQSOt2/fFmq1WowZM8ZgXaXBW2MSGzp0qN7fw4cPBwBs2bJF73306NF65caMGQMA+OWXX0q87vfee0/vMmT79u2Rl5eHy5cvAwB+/fVXPHnyBMOHD9crN3LkSINlqdVq2NhoDo+8vDz89ddfutsLhw8fNigfGxsLR0dHvWlvv/02HBwcsHr1at207du34+7du3jnnXdMalNMTAxcXFx0f/fs2RPVq1fXbceDBw/i9u3bGDJkiN696IiICDRo0MCk7RkaGgp/f3/d302bNoWrqyv+/PPPYufV5vgkJSVJkhe1ZcsW2Nra4qOPPtKbPmbMGAghsHXrVr3pISEhaNSokcn13Lx5M54+fWp2vd59913dv21tbdGiRQsIITBo0CC9ddSvX7/Q7VbcfixMvXr10KpVK73jJyMjA1u3bkW/fv2K7JYr9X4BNPkOz9/WsLe3x/vvv4/bt2/j0KFDemXL4rj98MMPTa7rhg0b0LBhQzRo0AB3797VvV577TUAwJ49ewD8fT4qeLwVdk4oihzfU2N69eqFKlWq6P5u3749AOiOw5s3b+Lo0aOIjY2Fm5ubrtzrr79e6Hfn+fPYvXv3kJmZifbt2xd63ivs+1ea4zYxMRH5+fmYOHGi7vyrpZ1Pex4fOXKkXpnBgwfD1dXVYFs6OzvrnW/t7e0RHBys9z3dsmULqlevjp49e+qmOTk54b333jNa15Iw9/evUaNGuv0JAJ6enkbPMaXBQEhiAQEBen/7+/vDxsZGl4dw+fJl2NjYoG7dunrlvL294e7urgtaSuLFF1/U+1t7crh3755u3YXV0dPTU+9EAmgS/+bOnYuAgACo1Wp4eHjA09MTx48fR2ZmpsG6a9eubTDN3d0d3bp1w5o1a3TTVq9ejRo1auhOyMUpWFeVSoW6devqbU8AqF+/vsG8DRo0MGl7FtxugGbbabdbUXr16oW2bdvi3XffRbVq1dC7d2+sX7++xD++ly9fho+Pj96PCgA0bNhQ9/nzCtvuhQkJCcFbb72FKVOmwMPDA5GRkVi5cmWhOQCFKbiN3Nzc4ODgAA8PD4PphW234vajMTExMdi/f7+u3Rs2bMDTp08RHR1d5HxS7xcA8PHxQeXKlfWm1atXDwAM2iH1cWtnZ4eaNWuaXNfz58/j1KlT8PT01Htp63v79m1dPWxsbPT+I2CsXkWR43tqTEnPe8bqs3nzZrzyyitwcHBA1apV4enpiUWLFpl83gNKftympaXBxsamyP/cGNuW9vb2qFOnjsG2rFmzpkHwVfD8dvnyZdStW9egnLnHQXHM/f0rzbnZHAyEypix6L80D5nKy8srdLqtrW2h00WBJFtT/POf/8To0aPx6quv4ocffsD27duxc+dONG7cuNAfk4JXg7RiYmLw559/4sCBA3jw4AF+/vln9OnTx+B/O5ZUmu3m6OiIffv24ddff0V0dDSOHz+OXr164fXXXze6n6RkbLsXpH3oXXJyMoYNG4br169j4MCBaN68ObKzs4udv7BtJOXxZkzv3r1RqVIl3f+uf/jhB7Ro0aLYE7Qp+8XYd1CO/Wau56/QmiI/Px+BgYHYuXNnoa8hQ4aUYW1Lx9z9IuVx+N///hfdu3eHg4MDFi5ciC1btmDnzp3o27dvocsz9v0r6XFbFuT4nprL1N8/uequnF+jCuL8+fN6f1+4cAH5+fm6LHs/Pz/k5+cblLt16xbu378PPz8/3bQqVaoY9AJ48uQJbt68WaK6aZddcN137twxiLA3btyIjh07Yvny5ejduzc6d+6M0NBQs5+jEx4eDk9PT6xevRqbNm1CTk5Osf8rel7BugohcOHCBb3tCUDvuRZa586d09uepVHUF9fGxgadOnXCnDlzcPr0aXz55ZfYvXu37vaDOfz8/HDjxg08ePBAb/rZs2d1n5fGK6+8gi+//BIHDx7E6tWrcerUKaxdu7ZUyzRFcfvRmKpVqyIiIgKrV6/G5cuXsX//fpOPn+L2i/bKQcFj2tjViRs3bhh0W//jjz8AwKAdch23xo5Lf39/ZGRkoFOnTggNDTV4aX+QteejtLQ0gzqYQ8r2FnbeA4zvl+IYO+8VVp9//etfcHBwwPbt2zFw4EC88cYbCA0NNXudJT1u/f39kZ+fj9OnTxstY2xbPnnyBBcvXizROcLPzw9paWkGAYYpx4E5/6k35/dPTgyEJJaQkKD394IFCwAAb7zxBgCgS5cuAGDwELQ5c+YA0Nwz1/L398e+ffv0yi1durTE/2MNDQ1FpUqVsGDBAr0DvrAHstna2hp8KTZs2GDQ1bU4dnZ26NOnD9avX49vv/0WgYGBaNq0qcnzf//993pBwcaNG3Hz5k3d9mzRogW8vLywePFivds8W7duxZkzZ/S2Z2lob4kUPEFnZGQYlA0KCgIAk287Pa9Lly7Iy8tDfHy83vS5c+dCpVLp2m2ue/fuGezP0tTTXMXtx6JER0fj9OnTiIuLg62tLXr37l3sPKbsF+3toOe/Y3l5eVi6dGmhy3z27BmWLFmi+/vJkydYsmQJPD090bx5c72ych23lStXLvSWzdtvv43r169j2bJlBp/l5ubqAjptfebPn69XxtyHNErZXn9/f5w9e1bv0R/Hjh3D/v37zaqTVvXq1REUFITvvvtOb1vt3LnTIOCwtbWFSqXSO8deunQJiYmJZq+3JMdtVFQUbGxsMHXqVIMr79rvb2hoKOzt7TF//ny97/Ty5cuRmZlZonNely5dcOPGDb3HBOTk5Bj9LjzPyckJgOG50dh6ANN+/+TEJ6dJ7OLFi+jevTvCw8ORnJyMH374AX379kWzZs0AAM2aNUNsbCyWLl2K+/fvIyQkBCkpKfjuu+8QFRWFjh076pb17rvv4oMPPsBbb72F119/HceOHcP27dsN8jJM5enpiY8//hjTp09H165d0aVLFxw5cgRbt241WGbXrl0xdepUDBgwAG3atMGJEyewevVq1KlTx+z1xsTEYP78+dizZw9mzpxp1rxVq1ZFu3btMGDAANy6dQvz5s1D3bp1MXjwYACah3vNnDkTAwYMQEhICPr06YNbt27hm2++Qa1atSQbtkL7QzdhwgTdZe9u3bph6tSp2LdvHyIiIuDn54fbt29j4cKFqFmzJtq1a2f2erp164aOHTtiwoQJuHTpEpo1a4YdO3YgKSkJI0eONMjlMNV3332HhQsXokePHvD398eDBw+wbNkyuLq66k5OZam4/ViUiIgIvPDCC9iwYQPeeOMNeHl5FTuPKfulcePGeOWVVzBu3DhkZGSgatWqWLt2LZ49e1boMn18fDBz5kxcunQJ9erVw7p163D06FEsXboUlSpVMqu9Uh23zZs3x7p16zB69Gi0bNkSzs7O6NatG6Kjo7F+/Xp88MEH2LNnD9q2bYu8vDycPXsW69evx/bt29GiRQsEBQWhT58+WLhwITIzM9GmTRvs2rULFy5cMGn9ZdHegQMHYs6cOQgLC8OgQYNw+/ZtLF68GI0bN0ZWVpZZ9dKaPn06IiIi0K5dOwwcOBAZGRlYsGABGjdurHdrOCIiAnPmzEF4eDj69u2L27dvIyEhAXXr1sXx48fNWmdJjtu6detiwoQJmDZtGtq3b48333wTarUaqamp8PHxwfTp0+Hp6Ylx48ZhypQpCA8PR/fu3XHu3DksXLgQLVu2NLkjyvMGDx6M+Ph4xMTE4NChQ6hevTpWrVqlC3KK4ujoiEaNGmHdunWoV68eqlatiiZNmqBJkyYGZc35/ZOVpH3QrJi2m+Dp06dFz549hYuLi6hSpYoYNmyYXndRIYR4+vSpmDJliqhdu7aoVKmS8PX1FePGjROPHj3SK5eXlyc+/fRT4eHhIZycnERYWJi4cOGC0e7zBbtGF9YNOC8vT0yZMkVUr15dODo6ig4dOoiTJ08aLPPRo0dizJgxunJt27YVycnJBl1btevYsGFDkduncePGwsbGRly7ds2k7ald7o8//ijGjRsnvLy8hKOjo4iIiBCXL182KL9u3Trx0ksvCbVaLapWrSr69etnsC5j3eeHDh1qsLyC20MIIaZNmyZq1KghbGxsdN2ud+3aJSIjI4WPj4+wt7cXPj4+ok+fPuKPP/4oto2FdZ8XQogHDx6IUaNGCR8fH1GpUiUREBAgZs2aZdDt1ljdC3P48GHRp08f8eKLLwq1Wi28vLxE165dxcGDBw2WWVj3+YLd0GNjY0XlypWLbZM5+7Fgt9znaR9DsWbNGpPaa+p+SUtLE6GhoUKtVuueV7Jz585Cu883btxYHDx4ULRu3Vo4ODgIPz8/ER8fr7e8sjhujW1rITTPcenbt69wd3c36Ar+5MkTMXPmTNG4cWOhVqtFlSpVRPPmzcWUKVNEZmamrlxubq746KOPxAsvvCAqV64sunXrJq5evWpW93kp2yuEED/88IOoU6eOsLe3F0FBQWL79u1Gu8/PmjXLYP7C6v6vf/1LNGzYUKjVatGoUSPx008/FXrMLV++XAQEBAi1Wi0aNGggVq5cada543nmHrdaK1as0G2nKlWqiJCQELFz5069MvHx8aJBgwaiUqVKolq1auLDDz/Uex6TEMbPMYW1+/Lly6J79+7CyclJeHh4iBEjRoht27YV231eCCEOHDggmjdvLuzt7fW2fWHbzdTfPz8/PxEREWFQd2OPVygNlRAWzJgiq/HSSy+hatWq2LVrl0nl9+7di44dO2LDhg16XTqpfJFqP44aNQrLly9Henq6Sf9LlVqHDh1w9+5dnDx5sshyPG7peZY+bsk0zBGiMnfw4EEcPXoUMTExlq4KlUOPHj3CDz/8gLfeeos/JlRu8LgtP5gjRGXm5MmTOHToEL7++mtUr14dvXr1snSVqBy5ffs2fv31V2zcuBF//fUXRowYYekqERWLx235w0CIyszGjRsxdepU1K9fHz/++GO5GIWYlOP06dPo168fvLy8MH/+fF2vLyIl43Fb/jBHiIiIiKwWc4SIiIjIajEQIiIiIqvFHKFi5Ofn48aNG3BxcSnV+GBEREQkHyEEHjx4AB8fnyLH6mMgVIwbN27A19fX0tUgIiKiErh69Spq1qxp9HMGQsVwcXEBoNmQrq6uFq4NERERmSIrKwu+vr6633FjGAgVQ3s7zNXVlYEQERFROVNcWguTpYmIiMhqMRAiIiIiq8VAiIiIiKwWAyEiIiKyWgyEiIiIyGoxECIiIiKrxUCIiIiIrBYDISIiIrJaDISIiIjIajEQIiIiItllZwM9egBNm2res7MtUw8OsUFERESyCg4GUlP//vvECcDFBWjZEkhJkbcuvCJEREREsikYBD0vNVXzuZwYCBEREZEssrONB0Faqany3iZjIERERESyiI6WtpwUGAgRERGRLNLSpC0nBQZCREREJAt/f2nLSYGBEBEREcli1Sppy0mBgRARERHJwtlZ00W+KC1basrJhYEQERERySYlxXgwZInnCPGBikRERCSrlBRNF/noaE1itL+/5naYnFeCtBgIERERkeycnYFNmyxdC94aIyIiIivGQIiIiIisFgMhIiIisloMhIiIiMhqMRAiIiIiq8VAiIiIiKwWAyEiIiKyWgyEiIiIyGoxECIiIiKrxUCIiIiIrBYDIQvIzQWGDQPCwjTvubmWrhEREZF1YiAks6gowMkJSEgAduzQvDs5aaYTERGRvBgIySgqCkhKKvyzpCQGQ0RERHJjICST3FzjQZBWUhJvkxEREcmJgZBM4uKkLUdERESlx0BIJufPS1uOiIiISo+BkEwCAqQtR0RERKWnEkIIS1dCybKysuDm5obMzEy4urqWeDm5uZreYcXJyQEcHUu8GiIiIoLpv9+8IiQTR0cgMrLoMpGRDIKIiIjkxEBIRomJxoOhyEjN50RERCQfO0tXwNokJmpuk8XFaRKjAwKAWbN4JYiIiMgSGAhZgKMjEB9v6VoQERERb40RERGR7JQy7iYDISIiIpKVksbdLHeBUEJCAmrVqgUHBwe0atUKKSkpJs23du1aqFQqRHFALyIiIotR2rib5SoQWrduHUaPHo1Jkybh8OHDaNasGcLCwnD79u0i57t06RI+/vhjtG/fXqaaEhERUUFKHHezXAVCc+bMweDBgzFgwAA0atQIixcvhpOTE1asWGF0nry8PPTr1w9TpkxBnTp1ZKwtERERPU+J426Wm0DoyZMnOHToEEJDQ3XTbGxsEBoaiuTkZKPzTZ06FV5eXhg0aJAc1TSJUhLEiIiI5KTEcTfLTff5u3fvIi8vD9WqVdObXq1aNZw9e7bQeX777TcsX74cR48eNXk9jx8/xuPHj3V/Z2Vllai+xhS8N6pNEuMDFYmIqKILCND87plSTi7l5oqQuR48eIDo6GgsW7YMHh4eJs83ffp0uLm56V6+vr6S1UlpCWJERERymjVL2nJSKDeBkIeHB2xtbXHr1i296bdu3YK3t7dB+bS0NFy6dAndunWDnZ0d7Ozs8P333+Pnn3+GnZ0d0tLSCl3PuHHjkJmZqXtdvXpVkvorMUGMiIhITkocd7PcBEL29vZo3rw5du3apZuWn5+PXbt2oXXr1gblGzRogBMnTuDo0aO6V/fu3dGxY0ccPXrU6JUetVoNV1dXvZcUlJggRkREJDeljbtZbnKEAGD06NGIjY1FixYtEBwcjHnz5uHhw4cYMGAAACAmJgY1atTA9OnT4eDggCZNmujN7+7uDgAG0+WgxAQxIiIiS1DSuJvlKhDq1asX7ty5g4kTJyI9PR1BQUHYtm2bLoH6ypUrsLFR5kUuJSaIERERWYpSxt1UCSGEpSuhZFlZWXBzc0NmZmapbpPl5moeH16cnByORE9ERFRapv5+K/PySQWkxAQxIiIia8dASEZKSxAjIiKyduUqR6giUFKCGBERkbVjIGQBSkkQIyIishSlXBTgrTEiIiKSVVSUpgNRQsLfQ005OVlmhAUGQkRERCQbpQ03xUCIiIiIZKHE4aYYCBEREZEslDjcFAMhIiIikoUSh5tiIGQBubnAsGFAWJjmnSPOExGRNTB1GCk5h5viEBvFkGqIDS1jSWJ8oCIREVV0cg43xSE2FEhpmfJERERyUuJwUwyEZKLETHkiIiK5KW24KQZCMlFipjwREZElJCZqbn8NHQp07qx5z8mxTIoIh9iQiRIz5YmIiCxFKcNN8YqQTJSYKU9ERGTt2GusGFL1GpMzU56IiMjasdeYwigxU56IiMjaMRCSkdIy5YmIiKwdk6VllpiouU0WF6dJjA4IAGbN4pUgIiIiS2AgZAFKyZQnIiKyFKVcFOCtMSIiIpJVVJSmA1FCArBjh+bdyckyIywwECIiIiLZKG24KQZCREREJAslDjfFQIiIiIhkocThphgIERERkSyUONwUAyEiIiKShRKHm+IQG8WQaogNIiIiayfncFMcYoOIiIgURYnDTTEQIiIiItkobbgpPlmaiIiIZKWk4aYYCBEREZHslDLcFG+NERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkIWkJ0N9OgBNG2qec/OtnSNiIiIrBMHXZVZcDCQmvr33ydOAC4uQMuWQEqK5epFRERkjXhFSEYFg6DnpaZqPiciIiL5MBCSSXa28SBIKzWVt8mIiIjkxEBIJtHR0pYjIiKi0mMgJJO0NGnLERERUekxEJKJv7+05YiIiKj0GAjJZNUqacsRERFR6TEQkomzs6aLfFFattSUIyIiInkwEJJRSorxYIjPESIiIpIfH6gos5QUTRf56GhNYrS/v+Z2GK8EERERyY+BkAU4OwObNlm6FkRERJaTmwvExQHnzwMBAcCsWYCjo/z14K0xIiIiklVUFODkBCQkADt2aN6dnDTT5cZAiIiIiGQTFQUkJRX+WVKS/MEQAyEiIiKSRW6u8SBIKylJU04uDISIiIhIFnFx0paTAgMhC8jNBYYNA8LCNO9yRr5ERESWcv68tOWkwEBIZkpKECMiIpJTQIC05aRQ7gKhhIQE1KpVCw4ODmjVqhVSingK4bJly9C+fXtUqVIFVapUQWhoaJHly5rSEsSIiIjkNGuWtOWkUK4CoXXr1mH06NGYNGkSDh8+jGbNmiEsLAy3b98utPzevXvRp08f7NmzB8nJyfD19UXnzp1x/fp1mWuuzAQxIiIiOTk6ApGRRZeJjJT3eUIqIYSQb3Wl06pVK7Rs2RLx8fEAgPz8fPj6+mL48OEYO3ZssfPn5eWhSpUqiI+PR0xMjEnrzMrKgpubGzIzM+Hq6lriug8bprkNVpyhQ4H/ax4REVGFZOwOSWQkkJgozTpM/f0uN0+WfvLkCQ4dOoRx48bpptnY2CA0NBTJyckmLSMnJwdPnz5F1apVjZZ5/PgxHj9+rPs7Kyur5JV+jhITxIiIiCwhMVE5T5YuN4HQ3bt3kZeXh2rVqulNr1atGs6ePWvSMj799FP4+PggNDTUaJnp06djypQppaprYQICNMnRppQjIiKq6BwdlXEHpFzlCJXGjBkzsHbtWmzatAkODg5Gy40bNw6ZmZm619WrVyVZvxITxIiIiKxduQmEPDw8YGtri1u3bulNv3XrFry9vYucd/bs2ZgxYwZ27NiBpk2bFllWrVbD1dVV7yUFJSaIERERWbtyEwjZ29ujefPm2LVrl25afn4+du3ahdatWxud76uvvsK0adOwbds2tGjRQo6qGpWYaDwYkjJBjIiIiExTbnKEAGD06NGIjY1FixYtEBwcjHnz5uHhw4cYMGAAACAmJgY1atTA9OnTAQAzZ87ExIkTsWbNGtSqVQvp6ekAAGdnZzg7O1ukDUpKECMiIrJ25SoQ6tWrF+7cuYOJEyciPT0dQUFB2LZtmy6B+sqVK7Cx+fsi16JFi/DkyRP07NlTbzmTJk3C5MmT5ay6HqUkiBEREVm7cvUcIUuQ6jlCRERE9Leyvjti6u93uckRIiIioopBSeNuMhAiIiIi2Sht3E0GQkRERCQLJY67yUCIiIiIZBEXJ205KTAQsoDcXM0grGFhmneOOE9ERNZAieNuMhCSmZISxIiIiORk6niaco67ye7zxZCy+3xRCWIAny5NREQVW26u5j//xcnJKX1XenafVxglJogRERHJSYnjbjIQkokSE8SIiIjkprRxN8vVEBvlmRITxIiIiCxBSeNuMhCSSUCAJjnalHJEREQVnVLG3WSydDGkSpaWM0GMiIjI2jFZWmGUmCBGRERk7RgIyUhpCWJERETWjjlCMlNSghgREZG1YyBkAUpJECMiIrKU7GwgOhpISwP8/YFVqwBnZ/nrwUCIiIiIZBUcDKSm/v33iROAiwvQsiWQkiJvXZgjRERERLIpGAQ9LzVV87mcGAgRERGRLLKzjQdBWqmpmnJyYSBEREREsoiOlracFBgIERERkSzS0qQtJwUGQkRERCQLf39py0mBgRARERHJYtUqactJgYEQERERycLZWdNFvigtW8r7PCEGQkRERCSblBTjwZAlniPEByoSERGRrFJS+GRpIiIismLOzsCmTZauBW+NERERkRVjIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVMjsQ2rZtG3777Tfd3wkJCQgKCkLfvn1x7949SStHREREVJbMDoTi4uKQlZUFADhx4gTGjBmDLl264OLFixg9erTkFSQiIiIqK2aPNXbx4kU0atQIAPCvf/0LXbt2xT//+U8cPnwYXbp0kbyCFZFSBpojIiKydmYHQvb29sjJyQEA/Prrr4iJiQEAVK1aVXeliIwLDgZSU//++8QJwMUFaNlSMxovERERycfsQKhdu3YYPXo02rZti5SUFKxbtw4A8Mcff6BmzZqSV7AiKRgEPS81VfM5gyEiIiL5mJ0jFB8fDzs7O2zcuBGLFi1CjRo1AABbt25FeHi45BWsKLKzjQdBWqmpmnJEREQkD5UQQli6EkqWlZUFNzc3ZGZmwtXVtcTL6dEDSEwsvlxUFLBpU4lXQ0RERDD999ukW2NZWVm6hRSXB1SaYKEiS0uTthwRERGVnkmBUJUqVXDz5k14eXnB3d0dKpXKoIwQAiqVCnl5eZJXsiLw99ckRptSjoiIiORhUiC0e/duVK1aVffvwgIhKtqqVZreYaaUIyIiInmYFAiFhITo/t2hQ4eyqkuF5uys6SJfVMJ0y5Z8nhAREZGczO41NnnyZOTn5xtMz8zMRJ8+fSSpVEWVkqIJdgrD5wgRERHJz+xAaPny5WjXrh3+/PNP3bS9e/ciMDAQacz0LVZKCvDggaZ3WGCg5v3BAwZBRERElmB2IHT8+HHUrFkTQUFBWLZsGeLi4tC5c2dER0fjwIEDZVHHCsfZWdNF/vhxzTtvhxERkbXJzQWGDQPCwjTvubmWqUeJnyM0fvx4zJgxA3Z2dti6dSs6deokdd0UQarnCBEREZFGVBSQlGQ4PTLStGfumcLU32+zrwgBwIIFC/DNN9+gT58+qFOnDj766CMcO3asxJUlIiIi62AsCAI006Oi5KxNCQKh8PBwTJkyBd999x1Wr16NI0eO4NVXX8Urr7yCr776qizqSERERBVAbq7xIEgrKUne22RmB0J5eXk4fvw4evbsCQBwdHTEokWLsHHjRsydO1fyChIREVHFEBcnbTkpmD36/M6dOwudHhERgROmPDqZiIiIrNL589KWk0KJcoSM8fDwkHJxFZZSMuWJiIjkFBAgbTkpmN1rLC8vD3PnzsX69etx5coVPHnyRO/zjIwMSStoaVL3GpMjU56IiEiJcnMBJ6fiy+XkAI6OpVtXmfUamzJlCubMmYNevXohMzMTo0ePxptvvgkbGxtMnjy5NHWu8JSWKU9ERCQnR0fNf/yLEhlZ+iDIHGZfEfL398f8+fMREREBFxcXHD16VDftf//7H9asWVNWdbUIqa4IyRkFExERKVm5fo5Qeno6AgMDAQDOzs7IzMwEAHTt2hW//PJLCatb8SkxU56IiMgSEhM1//EfOhTo3FnznpNjmRQRs3uN1axZEzdv3sSLL74If39/7NixAy+//DJSU1OhVqvLoo4VghIz5YmIiCzF0RGIj7d0LUpwRahHjx7YtWsXAGD48OH4/PPPERAQgJiYGAwcOFDyChaUkJCAWrVqwcHBAa1atUJKMaOVbtiwAQ0aNICDgwMCAwOxZcuWMq9jYZSYKU9ERGTtSjzWmFZycjKSk5MREBCAbt26SVWvQq1btw4xMTFYvHgxWrVqhXnz5mHDhg04d+4cvLy8DMofOHAAr776KqZPn46uXbtizZo1mDlzJg4fPowmTZqYtE7mCBEREZU/pv5+lzoQklOrVq3QsmVLxP/ftbT8/Hz4+vpi+PDhGDt2rEH5Xr164eHDh9i8ebNu2iuvvIKgoCAsXrzYpHVK2X2+qF5jALvQExERSaVMB13VcnV1xZ9//lmaRZjsyZMnOHToEEJDQ3XTbGxsEBoaiuTk5ELnSU5O1isPAGFhYUbLA8Djx4+RlZWl95JKYqLxboMMgoiIiORnciB048YNg2lyXky6e/cu8vLyUK1aNb3p1apVQ3p6eqHzpKenm1UeAKZPnw43Nzfdy9fXt/SVf46SMuWJiIisncmBUOPGjSvcM4IKM27cOGRmZupeV69elXwd2kz57ds178wJIiIia6OU4aZMDoS+/PJLvP/++/jHP/6hG0bjnXfekWTYCVN4eHjA1tYWt27d0pt+69YteHt7FzqPt7e3WeUBQK1Ww9XVVe9FRERE0omK0nQgSkgAduzQvDs5WWaEBZMDoSFDhuD48eP466+/0KhRI/z73//GokWLZBto1d7eHs2bN9d13Qc0ydK7du1C69atC52ndevWeuUBYOfOnUbLExERUdlS2nBTZj1QsXbt2ti9ezfi4+Px5ptvomHDhrCz01/E4cOHJa3g80aPHo3Y2Fi0aNECwcHBmDdvHh4+fIgBAwYAAGJiYlCjRg1Mnz4dADBixAiEhITg66+/RkREBNauXYuDBw9i6dKlZVZHIiIiKlxubtG9pwHN57m58qWNmP1k6cuXL+Onn35ClSpVEBkZaRAIlaVevXrhzp07mDhxItLT0xEUFIRt27bpEqKvXLkCG5u/L3K1adMGa9aswWeffYbx48cjICAAiYmJJj9DiIiIiKRjznBTcj112qznCC1btgxjxoxBaGgolixZAk9Pz7KsmyJI+RwhrdxczU4+f17zJOlZs5gwTUREFV9YmCYnqDidO2s6FJWGqb/fJl/OCQ8PR0pKCuLj4xETE1O62lmxgvdGtUlifI4QERFVdAEBpgVCcg43ZXKydF5eHo4fP84gqBSUliBGREQkp1mzpC0nBZMDoZ07d6JmzZplWZcKzZwEMSIioorI0dH4CAtakZHypouUaogNMp05CWJEREQVldKGm5Kvy5eVO39e2nJERETlVWKicjoOMRCSiRITxIiIiCxFO9yUpZnVfd4aSdV9PjdX8/jw4uTksCs9ERFRaZn6+80cIZkoMUGMiIjI2jEQkpHSEsSIiIisHXOEZKakBDEiIiJrx0DIApSSIEZERGTteGuMiIiIZJedDfToATRtqnnPzrZMPXhFiIiIiGQVHAykpv7994kTgIsL0LIlkJIib114RYiIiIhkUzAIel5qquZzOTEQIiIiIllkZxsPgrRSU+W9TcZAiIiIiGQRHS1tOSkwECIiIiJZpKVJW04KDISIiIhIFv7+0paTAgMhIiIiksWqVdKWkwIDISIiIpKFs7Omi3xRWrbUlJMLAyEiIiKSTUqK8WDIEs8R4gMViYiISFYpKZou8tHRmsRof3/N7TA5rwRpMRAiIiIi2Tk7A5s2WboWvDVGREREVoyBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCFpCdDfToATRtqnnPzrZ0jYiIiKwTxxqTWXAwkJr6998nTgAuLpYZcZeIiMja8YqQjAoGQc9LTdV8TkRERPJhICST7GzjQZBWaipvkxEREcmJgZBMoqOlLUdERESlx0BIJmlp0pYjIiKi0mMgJBN/f2nLERERUekxEJLJqlXSliMiIqLSYyAkE2dnTRf5orRsqSlHRERE8mAgJKOUFOPBEJ8jREREJD8+UFFmKSmaLvLR0ZrEaH9/ze0wXgkiIiKSHwMhC3B2BjZtsnQtiIiILCc3F4iLA86fBwICgFmzAEdH+evBW2NEREQkq6gowMkJSEgAduzQvDs5aabLjYEQERERySYqCkhKKvyzpCT5gyEGQkRERCSL3FzjQZBWUpKmnFwYCBEREZEs4uKkLScFBkJEREQki/PnpS0nBQZCFpCbCwwbBoSFad7lvARIRERkKQEB0paTgkoIIeRbXfmTlZUFNzc3ZGZmwtXVtdTLM5YkFhkJJCaWevFERESKlZur6R1WnJyc0nelN/X3m1eEZKS0THkiIiI5OTpq/uNflMhIeZ8nxEBIJkrMlCciIpJbYqLxYMgSd0cYCMlEiZnyRERElpCYqLn9NXQo0Lmz5j0nxzIpIhxiQyZKzJQnIiKyFEdHID7e0rXgFSHZKDFTnoiIyNqx11gxpOo1JmemPBERkbVjrzGFUWKmPBERkbVjICQjpWXKExERWTsmS8ssMVFzmywuTpMYHRAAzJrFK0FERESWUG6uCGVkZKBfv35wdXWFu7s7Bg0ahOzs7CLLDx8+HPXr14ejoyNefPFFfPTRR8jMzJSx1oXTZspv3655ZxBERETWRinDTZWbQKhfv344deoUdu7cic2bN2Pfvn147733jJa/ceMGbty4gdmzZ+PkyZP49ttvsW3bNgwaNEjGWhMREVFBUVGaDkQJCcCOHZp3JyfLjLBQLnqNnTlzBo0aNUJqaipatGgBANi2bRu6dOmCa9euwcfHx6TlbNiwAe+88w4ePnwIOzvT7gpKPdYYERGRNStquClAupzZCtVrLDk5Ge7u7rogCABCQ0NhY2OD33//3eTlaDdGUUHQ48ePkZWVpfciIiKi0lPicFPlIhBKT0+Hl5eX3jQ7OztUrVoV6enpJi3j7t27mDZtWpG30wBg+vTpcHNz0718fX1LXG8iIiL6mxKHm7JoIDR27FioVKoiX2fPni31erKyshAREYFGjRph8uTJRZYdN24cMjMzda+rV6+Wev1ERESkzOGmLNp9fsyYMejfv3+RZerUqQNvb2/cvn1bb/qzZ8+QkZEBb2/vIud/8OABwsPD4eLigk2bNqFSpUpFller1VCr1SbVv6TYfZ6IiKxRQIAmOdqUcnIpV8nSBw8eRPPmzQEAO3bsQHh4eJHJ0llZWQgLC4NarcaWLVvgZMoYF4UsQ8pkaWNJYnygIhERVXRyDjdVoZKlGzZsiPDwcAwePBgpKSnYv38/hg0bht69e+uCoOvXr6NBgwZISUkBoNkAnTt3xsOHD7F8+XJkZWUhPT0d6enpyMvLs0g7isqUT0qyTLdBIiIiuShxuKlyEQgBwOrVq9GgQQN06tQJXbp0Qbt27bB06VLd50+fPsW5c+eQk5MDADh8+DB+//13nDhxAnXr1kX16tV1L0vk/SgxU56IiEhuShtuqlzcGrMkqW6NDRumeWBUcYYO1TxtmoiIqCIr63xZU3+/OdaYTJSYKU9ERGQp2uGmLK3c3Bor70zNgJczU56IiMja8dZYMaS6NSZnpjwREZG1q1C9xioCJWbKExERWTsGQjJSWqY8ERGRtWOytMwSE/lkaSIiIqVgIGQBSsmUJyIispTsbCA6GkhLA/z9gVWrAGdn+evBQIiIiIhkFRwMpKb+/feJE4CLC9CyJfB/A0TIhjlCREREJJuCQdDzUlM1n8uJgRARERHJIjvbeBCklZqqKScXBkJEREQki+hoactJgYEQERERySItTdpyUmAgRERERLLw95e2nBQYCBEREZEsVq2StpwUGAgRERGRLJydNV3ki9KypbzPE2IgRERERLJJSTEeDFniOUJ8oCIRERHJKiWFT5YmIiIiK+bsDGzaZOla8NYYERERWTEGQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktRgIWUBuLjBsGBAWpnnPzbV0jYiIiKwTAyGZRUUBTk5AQgKwY4fm3clJM52IiIjkxUBIRlFRQFJS4Z8lJTEYIiIikhsDIZnk5hoPgrSSknibjIiISE4MhGQSFydtOSIiIio9BkIyOX9e2nJERERUegyEZBIQIG05IiIiKj2VEEJYuhJKlpWVBTc3N2RmZsLV1bXEy8nN1fQOK05ODuDoWOLVEBEREUz//eYVIZk4OgKRkUWXiYxkEERERCQnBkIySkw0HgxFRmo+JyIiIvnYWboC1iYxUXObLC5OkxgdEADMmsUrQURERJbAQMgCHB2B+HhL14KIiMhylHJRgLfGiIiISFZKGm6KgRARERHJRmnDTTEQIiIiIlkocbgpBkJEREQkCyUON8VAiIiIiGShxOGmGAhZQG4uMGwYEBameeeI80REZA2UONwUh9gohlRDbGgZSxLjAxWJiKiik3O4KQ6xoUBKy5QnIiKSkxKHm2IgJBMlZsoTERHJTWnDTTEQkokSM+WJiIgsITFRc/tr6FCgc2fNe06OZVJEOMSGTJSYKU9ERGQpShluileEZKLETHkiIiJrx15jxZCq15icmfJERETWjr3GFEaJmfJERETWjoGQjJSWKU9ERGTtmCwts8REzW2yuDhNYnRAADBrFq8EERERWQIDIQtQSqY8ERGRpSjlogBvjREREZGsoqI0HYgSEoAdOzTvTk6WGWGBgRARERHJRmnDTTEQIiIiIlkocbipchMIZWRkoF+/fnB1dYW7uzsGDRqE7Oxsk+YVQuCNN96ASqVCIrtmERERWYQSh5sqN4FQv379cOrUKezcuRObN2/Gvn378N5775k077x586BSqcq4hkRERFQUJQ43VS4CoTNnzmDbtm34f//v/6FVq1Zo164dFixYgLVr1+LGjRtFznv06FF8/fXXWLFihUy1LV5uLjBsGBAWpnnniPNERGQNlDjcVLkIhJKTk+Hu7o4WLVropoWGhsLGxga///670flycnLQt29fJCQkwNvb26R1PX78GFlZWXovKSkpU56IiEhOs2ZJW04K5SIQSk9Ph5eXl940Ozs7VK1aFenp6UbnGzVqFNq0aYPI4sa2eM706dPh5uame/n6+pa43gUpLVOeiIhITkocbsqigdDYsWOhUqmKfJ09e7ZEy/7555+xe/duzJs3z6z5xo0bh8zMTN3r6tWrJVp/QUrMlCciIpKb0oabsuiTpceMGYP+/fsXWaZOnTrw9vbG7du39aY/e/YMGRkZRm957d69G2lpaXB3d9eb/tZbb6F9+/bYu3dvofOp1Wqo1WpTm2AyczLl+dRpIiKqyJQ03JRFAyFPT094enoWW65169a4f/8+Dh06hObNmwPQBDr5+flo1apVofOMHTsW7777rt60wMBAzJ07F926dSt95c2kxEx5IiIiS1HKcFPlIkeoYcOGCA8Px+DBg5GSkoL9+/dj2LBh6N27N3x8fAAA169fR4MGDZCSkgIA8Pb2RpMmTfReAPDiiy+idu3asrdBiZnyRERE1q5cBEIAsHr1ajRo0ACdOnVCly5d0K5dOyxdulT3+dOnT3Hu3Dnk5ORYsJbGKTFTnoiIyNqVm9Hnq1atijVr1hj9vFatWhBCFLmM4j4vS9pM+aISpuXOlCciIrJ25eaKUEWgtEx5IiIia1durghVFErKlCciIrJ2DIQsQCmZ8kRERJailIsCvDVGREREslLScFMMhIiIiEg2ShtuioEQERERyUKJw00xECIiIiJZmDPclFwYCBEREZEslDjcFAMhIiIikoUSh5tSCUs+brkcyMrKgpubGzIzM+Hq6mrp6hAREZVbubma3mHFyckpfVd6U3+/eUWIiIiIZKEdbqoocg83xUCIiIiIZKO04ab4ZGkiIiKSlZKGm2IgRERERLJTynBTvDVGREREVouBEBEREVktBkJERERktRgIERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLT5YuhhACgGYUWyIiIioftL/b2t9xYxgIFePBgwcAAF9fXwvXhIiIiMz14MEDuLm5Gf1cJYoLlaxcfn4+bty4ARcXF6hUKsmWm5WVBV9fX1y9ehWurq6SLVdJKnob2b7yr6K3saK3D6j4bWT7Sk4IgQcPHsDHxwc2NsYzgXhFqBg2NjaoWbNmmS3f1dW1Qh7cz6vobWT7yr+K3saK3j6g4reR7SuZoq4EaTFZmoiIiKwWAyEiIiKyWgyELEStVmPSpElQq9WWrkqZqehtZPvKv4rexorePqDit5HtK3tMliYiIiKrxStCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJl6Msvv0SbNm3g5OQEd3d3k+YRQmDixImoXr06HB0dERoaivPnz+uVycjIQL9+/eDq6gp3d3cMGjQI2dnZZdCCoplbj0uXLkGlUhX62rBhg65cYZ+vXbtWjibpKcl27tChg0HdP/jgA70yV65cQUREBJycnODl5YW4uDg8e/asLJtilLltzMjIwPDhw1G/fn04OjrixRdfxEcffYTMzEy9cpbahwkJCahVqxYcHBzQqlUrpKSkFFl+w4YNaNCgARwcHBAYGIgtW7bofW7K91Fu5rRx2bJlaN++PapUqYIqVaogNDTUoHz//v0N9lV4eHhZN8Moc9r37bffGtTdwcFBr4zS9qE57SvsfKJSqRAREaEro6T9t2/fPnTr1g0+Pj5QqVRITEwsdp69e/fi5ZdfhlqtRt26dfHtt98alDH3e202QWVm4sSJYs6cOWL06NHCzc3NpHlmzJgh3NzcRGJiojh27Jjo3r27qF27tsjNzdWVCQ8PF82aNRP/+9//xH//+19Rt25d0adPnzJqhXHm1uPZs2fi5s2beq8pU6YIZ2dn8eDBA105AGLlypV65Z5vv1xKsp1DQkLE4MGD9eqemZmp+/zZs2eiSZMmIjQ0VBw5ckRs2bJFeHh4iHHjxpV1cwplbhtPnDgh3nzzTfHzzz+LCxcuiF27domAgADx1ltv6ZWzxD5cu3atsLe3FytWrBCnTp0SgwcPFu7u7uLWrVuFlt+/f7+wtbUVX331lTh9+rT47LPPRKVKlcSJEyd0ZUz5PsrJ3Db27dtXJCQkiCNHjogzZ86I/v37Czc3N3Ht2jVdmdjYWBEeHq63rzIyMuRqkh5z27dy5Urh6uqqV/f09HS9Mkrah+a276+//tJr28mTJ4Wtra1YuXKlroyS9t+WLVvEhAkTxE8//SQAiE2bNhVZ/s8//xROTk5i9OjR4vTp02LBggXC1tZWbNu2TVfG3G1WEgyEZLBy5UqTAqH8/Hzh7e0tZs2apZt2//59oVarxY8//iiEEOL06dMCgEhNTdWV2bp1q1CpVOL69euS190YqeoRFBQkBg4cqDfNlC9QWStp+0JCQsSIESOMfr5lyxZhY2Ojd7JetGiRcHV1FY8fP5ak7qaSah+uX79e2Nvbi6dPn+qmWWIfBgcHi6FDh+r+zsvLEz4+PmL69OmFln/77bdFRESE3rRWrVqJ999/Xwhh2vdRbua2saBnz54JFxcX8d133+mmxcbGisjISKmrWiLmtq+4c6vS9mFp99/cuXOFi4uLyM7O1k1T0v57ninngE8++UQ0btxYb1qvXr1EWFiY7u/SbjNT8NaYgly8eBHp6ekIDQ3VTXNzc0OrVq2QnJwMAEhOToa7uztatGihKxMaGgobGxv8/vvvstVVinocOnQIR48exaBBgww+Gzp0KDw8PBAcHIwVK1ZAyPy4q9K0b/Xq1fDw8ECTJk0wbtw45OTk6C03MDAQ1apV000LCwtDVlYWTp06JX1DiiDVsZSZmQlXV1fY2ekPXSjnPnzy5AkOHTqk992xsbFBaGio7rtTUHJysl55QLMvtOVN+T7KqSRtLCgnJwdPnz5F1apV9abv3bsXXl5eqF+/Pj788EP89ddfktbdFCVtX3Z2Nvz8/ODr64vIyEi975GS9qEU+2/58uXo3bs3KleurDddCfuvJIr7DkqxzUzBQVcVJD09HQD0fiS1f2s/S09Ph5eXl97ndnZ2qFq1qq6MHKSox/Lly9GwYUO0adNGb/rUqVPx2muvwcnJCTt27MCQIUOQnZ2Njz76SLL6F6ek7evbty/8/Pzg4+OD48eP49NPP8W5c+fw008/6ZZb2P7VfiYnKfbh3bt3MW3aNLz33nt60+Xeh3fv3kVeXl6h2/bs2bOFzmNsXzz/XdNOM1ZGTiVpY0GffvopfHx89H5YwsPD8eabb6J27dpIS0vD+PHj8cYbbyA5ORm2traStqEoJWlf/fr1sWLFCjRt2hSZmZmYPXs22rRpg1OnTqFmzZqK2oel3X8pKSk4efIkli9frjddKfuvJIx9B7OyspCbm4t79+6V+pg3BQMhM40dOxYzZ84sssyZM2fQoEEDmWokLVPbV1q5ublYs2YNPv/8c4PPnp/20ksv4eHDh5g1a5YkP6Jl3b7nA4LAwEBUr14dnTp1QlpaGvz9/Uu8XHPItQ+zsrIQERGBRo0aYfLkyXqfleU+pJKZMWMG1q5di7179+olFPfu3Vv378DAQDRt2hT+/v7Yu3cvOnXqZImqmqx169Zo3bq17u82bdqgYcOGWLJkCaZNm2bBmklv+fLlCAwMRHBwsN708rz/lIKBkJnGjBmD/v37F1mmTp06JVq2t7c3AODWrVuoXr26bvqtW7cQFBSkK3P79m29+Z49e4aMjAzd/KVhavtKW4+NGzciJycHMTExxZZt1aoVpk2bhsePH5d6PBq52qfVqlUrAMCFCxfg7+8Pb29vgx4Pt27dAgBJ9h8gTxsfPHiA8PBwuLi4YNOmTahUqVKR5aXch4Xx8PCAra2tbltq3bp1y2hbvL29iyxvyvdRTiVpo9bs2bMxY8YM/Prrr2jatGmRZevUqQMPDw9cuHBB1h/S0rRPq1KlSnjppZdw4cIFAMrah6Vp38OHD7F27VpMnTq12PVYav+VhLHvoKurKxwdHWFra1vqY8IkkmUbkVHmJkvPnj1bNy0zM7PQZOmDBw/qymzfvt1iydIlrUdISIhBTyNjvvjiC1GlSpUS17UkpNrOv/32mwAgjh07JoT4O1n6+R4PS5YsEa6uruLRo0fSNcAEJW1jZmameOWVV0RISIh4+PChSeuSYx8GBweLYcOG6f7Oy8sTNWrUKDJZumvXrnrTWrdubZAsXdT3UW7mtlEIIWbOnClcXV1FcnKySeu4evWqUKlUIikpqdT1NVdJ2ve8Z8+eifr164tRo0YJIZS3D0vavpUrVwq1Wi3u3r1b7Dosuf+eBxOTpZs0aaI3rU+fPgbJ0qU5Jkyqq2RLIgOXL18WR44c0XURP3LkiDhy5IheV/H69euLn376Sff3jBkzhLu7u0hKShLHjx8XkZGRhXaff+mll8Tvv/8ufvvtNxEQEGCx7vNF1ePatWuifv364vfff9eb7/z580KlUomtW7caLPPnn38Wy5YtEydOnBDnz58XCxcuFE5OTmLixIll3p6CzG3fhQsXxNSpU8XBgwfFxYsXRVJSkqhTp4549dVXdfNou8937txZHD16VGzbtk14enpatPu8OW3MzMwUrVq1EoGBgeLChQt6XXafPXsmhLDcPly7dq1Qq9Xi22+/FadPnxbvvfeecHd31/XQi46OFmPHjtWV379/v7CzsxOzZ88WZ86cEZMmTSq0+3xx30c5mdvGGTNmCHt7e7Fx40a9faU9Bz148EB8/PHHIjk5WVy8eFH8+uuv4uWXXxYBAQGyB+Ylad+UKVPE9u3bRVpamjh06JDo3bu3cHBwEKdOndKVUdI+NLd9Wu3atRO9evUymK60/ffgwQPd7xwAMWfOHHHkyBFx+fJlIYQQY8eOFdHR0bry2u7zcXFx4syZMyIhIaHQ7vNFbTMpMBAqQ7GxsQKAwWvPnj26Mvi/561o5efni88//1xUq1ZNqNVq0alTJ3Hu3Dm95f7111+iT58+wtnZWbi6uooBAwboBVdyKa4eFy9eNGivEEKMGzdO+Pr6iry8PINlbt26VQQFBQlnZ2dRuXJl0axZM7F48eJCy5Y1c9t35coV8eqrr4qqVasKtVot6tatK+Li4vSeIySEEJcuXRJvvPGGcHR0FB4eHmLMmDF6Xc/lZG4b9+zZU+gxDUBcvHhRCGHZfbhgwQLx4osvCnt7exEcHCz+97//6T4LCQkRsbGxeuXXr18v6tWrJ+zt7UXjxo3FL7/8ove5Kd9HuZnTRj8/v0L31aRJk4QQQuTk5IjOnTsLT09PUalSJeHn5ycGDx4s6Y+Mucxp38iRI3Vlq1WrJrp06SIOHz6stzyl7UNzj9GzZ88KAGLHjh0Gy1La/jN2ftC2KTY2VoSEhBjMExQUJOzt7UWdOnX0fg+1itpmUlAJIXO/ZCIiIiKF4HOEiIiIyGoxECIiIiKrxUCIiIiIrBYDISIiIrJaDISIiIjIajEQIiIiIqvFQIiIiIisFgMhIqJi7N27FyqVCvfv37d0VYhIYgyEiKjcyMvLQ5s2bfDmm2/qTc/MzISvry8mTJhQJutt06YNbt68CTc3tzJZPhFZDp8sTUTlyh9//IGgoCAsW7YM/fr1AwDExMTg2LFjSE1Nhb29vYVrSETlCa8IEVG5Uq9ePcyYMQPDhw/HzZs3kZSUhLVr1+L77783GgR9+umnqFevHpycnFCnTh18/vnnePr0KQBACIHQ0FCEhYVB+//CjIwM1KxZExMnTgRgeGvs8uXL6NatG6pUqYLKlSujcePG2LJlS9k3nogkZ2fpChARmWv48OHYtGkToqOjceLECUycOBHNmjUzWt7FxQXffvstfHx8cOLECQwePBguLi745JNPoFKp8N133yEwMBDz58/HiBEj8MEHH6BGjRq6QKigoUOH4smTJ9i3bx8qV66M06dPw9nZuayaS0RliLfGiKhcOnv2LBo2bIjAwEAcPnwYdnam/79u9uzZWLt2LQ4ePKibtmHDBsTExGDkyJFYsGABjhw5goCAAACaK0IdO3bEvXv34O7ujqZNm+Ktt97CpEmTJG8XEcmLt8aIqFxasWIFnJyccPHiRVy7dg0A8MEHH8DZ2Vn30lq3bh3atm0Lb29vODs747PPPsOVK1f0lvePf/wDPXr0wIwZMzB79mxdEFSYjz76CF988QXatm2LSZMm4fjx42XTSCIqcwyEiKjcOXDgAObOnYvNmzcjODgYgwYNghACU6dOxdGjR3UvAEhOTka/fv3QpUsXbN68GUeOHMGECRPw5MkTvWXm5OTg0KFDsLW1xfnz54tc/7vvvos///xTd2uuRYsWWLBgQVk1l4jKEAMhIipXcnJy0L9/f3z44Yfo2LEjli9fjpSUFCxevBheXl6oW7eu7gVogiY/Pz9MmDABLVq0QEBAAC5fvmyw3DFjxsDGxgZbt27F/PnzsXv37iLr4evriw8++AA//fQTxowZg2XLlpVJe4mobDEQIqJyZdy4cRBCYMaMGQCAWrVqYfbs2fjkk09w6dIlg/IBAQG4cuUK1q5di7S0NMyfPx+bNm3SK/PLL79gxYoVWL16NV5//XXExcUhNjYW9+7dK7QOI0eOxPbt23Hx4kUcPnwYe/bsQcOGDSVvKxGVPSZLE1G58Z///AedOnXC3r170a5dO73PwsLC8OzZM/z6669QqVR6n33yySdYsWIFHj9+jIiICLzyyiuYPHky7t+/jzt37iAwMBAjRozAuHHjAABPnz5F69at4e/vj3Xr1hkkSw8fPhxbt27FtWvX4OrqivDwcMydOxcvvPCCbNuCiKTBQIiIiIisFm+NERERkdViIERERERWi4EQERERWS0GQkRERGS1GAgRERGR1WIgRERERFaLgRARERFZLQZCREREZLUYCBEREZHVYiBEREREVouBEBEREVktBkJERERktf4/Hu/Z8Fsg2ocAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "lr_buondary_points = rectangle.sample_boundary(100, random='Halton', criteria=lambda x, y: np.isclose(x, -Lx / 2) | np.isclose(x, Lx / 2))\n", + "px2, py2 = lr_buondary_points[\"x\"], lr_buondary_points[\"y\"]\n", + "plt.scatter(px2, py2, color='blue')\n", + "plt.title('boundary points for simply supported boundary condition')\n", + "plt.xlabel('X-axis')\n", + "plt.ylabel('Y-axis')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "接下来将控制方程转为实际深度学习模型中需要的约束条件:(简支)边界约束\n", + "> $$\n", + "(w)_{x=-1 \\mid x=+1}=0, \\quad\\left(\\frac{\\partial^2 w}{\\partial x^2}\\right)_{x=-1 \\mid x=+1}=0\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "constraint_left_right = ppsci.constraint.BoundaryConstraint(\n", + " {\"w\": w, \"ddw_dxx\": w.diff(x, 2)}, # 挠度和 x 轴弯矩表达式\n", + " {\"w\": 0, \"ddw_dxx\": 0}, # 挠度和 x 轴弯矩目标值均为0\n", + " rectangle,\n", + " {\n", + " \"dataset\": \"IterableNamedArrayDataset\",\n", + " \"iters_per_epoch\": 1,\n", + " \"batch_size\": 10000, # 采样一万个点用于训练\n", + " },\n", + " criteria=lambda x, y: np.isclose(x, -Lx / 2) | np.isclose(x, Lx / 2), # 采样点在左右两侧边界上\n", + " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3 自由边界条件\n", + "\n", + "接下来讲解如何将开头简介中的左右简支边界条件转换为深度学习代码。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "同样先预览一下简支边界条件所对应的在矩形上下边界上的训练点" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkIAAAHHCAYAAABTMjf2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHdElEQVR4nO3dd3hUZf7+8XuSkAQMKWwCIRApAWkGcGGJICy6ZAVBBAUERJpIUYoIgmChqqCioIhSluLuF6Wp6LqAKGURiCIdKQoYikACiCRANCHJ8/uDX2Yd0mZCJpNw3q/rmgty5jlnPs9zytyZOefEZowxAgAAsCAvTxcAAADgKQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhAABgWQQhOGXChAmy2Ww6f/68p0spkKpVq6pPnz6eLsOtstZRcZGenq7Ro0crMjJSXl5e6tixo9tf8/XXX1f16tXl7e2thg0buv31nHH33Xfr9ttv93QZBXLs2DHZbDYtWrTI06UUW3fffbfuvvtu+8+ujpnNZtOECRPcUhuc4+PpAgB43tatW7V27VoNHz5cwcHBhbLMBQsW6PXXX9fw4cP15z//WbfeemuhLDc3a9eu1ejRo/Xoo49qwoQJCg0NdevrAc5atWqVtm3bRuAppghCwE3ihRde0JgxYwo079atWzVx4kT16dOn0ILQ+vXrValSJU2fPr1QlufM63l5eWn+/Pny9fUtktcErlelShX99ttvKlWqlH3aqlWrNGvWrByD0G+//SYfH96KPYmvxgAX/f7778rMzPR0Gdn4+PjI39/f02XYnT17ttBClSRlZmbq999/z/P1SpcunW8Iym85cJ8rV654ugS3s9ls8vf3l7e3t1Pt/f39CUIeRhCCS86fP6+HH35YgYGB+tOf/qSnnnoq25tKenq6Jk+erKioKPn5+alq1ap67rnnlJqa6tAut+/Grz+fZ9GiRbLZbNqyZYtGjBihsLAw3XLLLXrwwQd17tw5h3mNMXrppZdUuXJllSlTRvfcc4/279+f7TUuXLigZ555RtHR0QoICFBgYKDuu+8+7dmzx6Hdxo0bZbPZtGTJEr3wwguqVKmSypQpo927d8tms+X4acfWrVtls9n04Ycf5jqOWctdunSpnnvuOYWHh+uWW27RAw88oJMnT2Zrv3z5cjVq1EilS5dWaGioHn30UZ06dcqhTU7nCNlsNg0ZMkQrV67U7bffLj8/P9WrV09r1qxxmG/UqFGSpGrVqslms8lms+nYsWOSpC+//FLNmzdXcHCwAgICVKtWLT333HO59i3rHIkNGzZo//799uVt3LhR0rU3w5EjRyoyMlJ+fn6qVauWpk2bJmNMjrUvXrxY9erVk5+fn0Pd17dduHChrly5Yn+9rHM08lrOqVOn9Nhjj6lChQr2sVmwYEG25aempmr8+PGqUaOG/Pz8FBkZqdGjR2fbpvOyY8cONWvWTKVLl1a1atU0e/bsbG3Onj2rfv36qUKFCvL391eDBg30/vvvO7TJ2nayxjNLTuem9OnTRwEBATp16pQ6duyogIAAhYWF6ZlnnlFGRobD/BcvXlSfPn0UFBSk4OBg9e7dWxcvXsxW4969e9WnTx9Vr15d/v7+Cg8P12OPPaZffvnFoV3W9njgwAE98sgjCgkJUfPmzbVw4ULZbDbt2rUr27JfeeUVeXt7Z9u2r3fq1Cn169dPERER8vPzU7Vq1fTEE08oLS3N3uann35Sly5dVK5cOZUpU0Z33nmn/vOf/+Q4lsuWLdPLL7+sypUry9/fX61atdKRI0eyve7cuXMVFRWl0qVLq0mTJvr666+ztbl+PfTp00ezZs2SJPu2+cf9NKfj4K5du3TfffcpMDBQAQEBatWqlb755huHNq4cF5E3Yihc8vDDD6tq1aqaMmWKvvnmG7399tv69ddf9c9//tPe5vHHH9f777+vzp07a+TIkfr22281ZcoUHTx4UJ988kmBX3vo0KEKCQnR+PHjdezYMc2YMUNDhgzR0qVL7W3GjRunl156SW3btlXbtm21c+dO3XvvvQ4HSOnaQXLlypXq0qWLqlWrpsTERM2ZM0ctW7bUgQMHFBER4dB+8uTJ8vX11TPPPKPU1FTVrl1bd911lxYvXqynn37aoe3ixYtVtmxZdejQId8+vfzyy7LZbHr22Wd19uxZzZgxQ7Gxsdq9e7dKly4t6doBr2/fvvrLX/6iKVOmKDExUW+99Za2bNmiXbt25fupy+bNm/Xxxx/rySefVNmyZfX222+rU6dOOnHihP70pz/poYce0o8//qgPP/xQ06dPt59bExYWpv379+v+++9X/fr1NWnSJPn5+enIkSPasmVLrq8XFhamf/3rX3r55Zd1+fJlTZkyRZJUp04dGWP0wAMPaMOGDerXr58aNmyoL774QqNGjdKpU6eyBcv169dr2bJlGjJkiEJDQ1W1atUcX/Nf//qX5s6dq23btukf//iHJKlZs2Z5LicxMVF33nmnPSiFhYVp9erV6tevn5KTkzV8+HBJ1z5BeuCBB7R582YNGDBAderU0b59+zR9+nT9+OOPWrlyZZ7jL0m//vqr2rZtq4cffljdu3fXsmXL9MQTT8jX11ePPfaYpGtfkdx99906cuSIhgwZomrVqmn58uXq06ePLl68qKeeeirf18lJRkaGWrdurZiYGE2bNk1fffWV3njjDUVFRemJJ56QdO0XiA4dOmjz5s0aNGiQ6tSpo08++US9e/fOtrwvv/xSP/30k/r27avw8HDt379fc+fO1f79+/XNN99kC+NdunRRzZo19corr8gYo86dO2vw4MFavHix7rjjDoe2ixcv1t13361KlSrl2p/Tp0+rSZMmunjxogYMGKDatWvr1KlTWrFihVJSUuTr66vExEQ1a9ZMKSkpGjZsmP70pz/p/fff1wMPPKAVK1bowQcfdFjm1KlT5eXlpWeeeUZJSUl67bXX1KNHD3377bf2NvPnz9fAgQPVrFkzDR8+XD/99JMeeOABlStXTpGRkbnWO3DgQJ0+fVpffvml/vWvf+W+ov6//fv3q0WLFgoMDNTo0aNVqlQpzZkzR3fffbf++9//KiYmxqG9M8dF5MMAThg/fryRZB544AGH6U8++aSRZPbs2WOMMWb37t1Gknn88ccd2j3zzDNGklm/fr19miQzfvz4bK9VpUoV07t3b/vPCxcuNJJMbGysyczMtE9/+umnjbe3t7l48aIxxpizZ88aX19f065dO4d2zz33nJHksMzff//dZGRkOLxufHy88fPzM5MmTbJP27Bhg5FkqlevblJSUhzaz5kzx0gyBw8etE9LS0szoaGhDq+Vk6zlVqpUySQnJ9unL1u2zEgyb731ln155cuXN7fffrv57bff7O0+//xzI8mMGzfOPi1rHf2RJOPr62uOHDlin7Znzx4jycycOdM+7fXXXzeSTHx8vMP806dPN5LMuXPn8uxPTlq2bGnq1avnMG3lypVGknnppZccpnfu3NnYbDaHOiUZLy8vs3//fqder3fv3uaWW27JNj235fTr189UrFjRnD9/3mF6t27dTFBQkH19/+tf/zJeXl7m66+/dmg3e/ZsI8ls2bIlz7patmxpJJk33njDPi01NdU0bNjQlC9f3qSlpRljjJkxY4aRZP7v//7P3i4tLc00bdrUBAQE2LeTrG1nw4YNDq8THx9vJJmFCxc6jIkkh23aGGPuuOMO06hRI/vPWevltddes09LT083LVq0yLbM6/cDY4z58MMPjSSzadMm+7Ss7bF79+7Z2nfv3t1EREQ47IM7d+7M9lo56dWrl/Hy8jLfffddtuey9vvhw4cbSQ7r7NKlS6ZatWqmatWq9tfNGss6deqY1NRUe9u33nrLSDL79u0zxvxvP2zYsKFDu7lz5xpJpmXLlvZpOa2HwYMHZ9s3s1x/HOzYsaPx9fU1R48etU87ffq0KVu2rPnrX/9qn+bscRH546sxuGTw4MEOPw8dOlTStZMB//jviBEjHNqNHDlSkrJ9NO2KAQMGOPy22aJFC2VkZOj48eOSpK+++kppaWkaOnSoQ7us3+z/yM/PT15e1zb/jIwM/fLLL/avfXbu3Jmtfe/eve2f0GR5+OGH5e/vr8WLF9unffHFFzp//rweffRRp/rUq1cvlS1b1v5z586dVbFiRfs4bt++XWfPntWTTz7pcP5Pu3btVLt2bafGMzY2VlFRUfaf69evr8DAQP3000/5zpv1adOnn35aKOdFrVq1St7e3ho2bJjD9JEjR8oYo9WrVztMb9myperWrXvDr3v9cowx+uijj9S+fXsZY3T+/Hn7o3Xr1kpKSrJvB8uXL1edOnVUu3Zth3Z/+9vfJEkbNmzI9/V9fHw0cOBA+8++vr4aOHCgzp49qx07dki6Njbh4eHq3r27vV2pUqU0bNgwXb58Wf/9738L3P9BgwY5/NyiRQuH9b9q1Sr5+PjYPyGSJG9vb/v+/Ud/3A9+//13nT9/Xnfeeack5bjvXP/a0rXt/vTp0w5jt3jxYpUuXVqdOnXKtR+ZmZlauXKl2rdvr8aNG2d7Pmu/X7VqlZo0aaLmzZvbnwsICNCAAQN07NgxHThwwGG+vn37Opxb1qJFC0myj1HWfjho0CCHdllfJRaWjIwMrV27Vh07dlT16tXt0ytWrKhHHnlEmzdvVnJyssM8+R0XkT+CEFxSs2ZNh5+joqLk5eVlP5/k+PHj8vLyUo0aNRzahYeHKzg4+IZ2zusvvw4JCZF07WuHrNfOqcawsDB72yyZmZmaPn26atasKT8/P4WGhiosLEx79+5VUlJStteuVq1atmnBwcFq3769PvjgA/u0xYsXq1KlSvY3yfxcX6vNZlONGjUcxlOSatWqlW3e2rVrOzWeOV22HhISYh+3vHTt2lV33XWXHn/8cVWoUEHdunXTsmXLChyKjh8/roiICIfwJ1372izr+T/KadwL4vrlnDt3ThcvXtTcuXMVFhbm8Ojbt6+ka+frSNLhw4e1f//+bO1uu+02h3Z5iYiI0C233OIwLWv+P67rmjVr2gN6ltzGxln+/v4KCwtzmHb9+j9+/LgqVqyogIAAh3Y5bXcXLlzQU089pQoVKqh06dIKCwuzj6+z+87f//53VaxY0f5LRGZmpj788EN16NAh27bxR+fOnVNycnK+92U6fvx4jrXnNpYFPbaUKlXKIbDcqHPnziklJSXX2jMzM7OdQ5hf7cgf5wjhhuR2A78bubHf9SdxZsntKgxz3Um2znjllVf04osv6rHHHtPkyZNVrlw5eXl5afjw4Tm+yV//aVCWXr16afny5dq6dauio6P12Wef6cknn8z2ZuZJNzJupUuX1qZNm7Rhwwb95z//0Zo1a7R06VL97W9/09q1a52+Mqagchv3G11O1jp+9NFHczwPRrr2yVlW2+joaL355ps5tsvr/BB3yG3fcnW/KaiHH35YW7du1ahRo9SwYUMFBAQoMzNTbdq0cXrf8fb21iOPPKJ58+bp3Xff1ZYtW3T69GmnP0ktbIV5bClqJbn24oIgBJccPnzY4Te8I0eOKDMz034Sa5UqVZSZmanDhw/bf/uSpMTERF28eFFVqlSxTwsJCcl2VUpaWprOnDlToNqyln348GGH39LOnTuX7bejFStW6J577tH8+fMdpl+8eNGlG/G1adNGYWFhWrx4sWJiYpSSkqKePXs6Pf/hw4cdfjbG6MiRI/Y34aw+/fDDD9k+Zfrhhx8cxvNG5BVcvby81KpVK7Vq1UpvvvmmXnnlFT3//PPasGGDYmNjXXqdKlWq6KuvvtKlS5ccfvM/dOiQ/fmiEBYWprJlyyojIyPfPkRFRWnPnj1q1apVgQP+6dOndeXKFYdPhX788UdJcth39u7dq8zMTIcgff3YZP3Gf/2+cyOftlapUkXr1q3T5cuXHT4V+uGHHxza/frrr1q3bp0mTpyocePG2adfvx07o1evXnrjjTf073//W6tXr1ZYWJhat26d5zxhYWEKDAzU999/n29/rq9dKvh29sdjyx/3w6tXryo+Pl4NGjTIc35nt5uwsDCVKVMm19q9vLyKPHhbQfH5tRUlQtZloFlmzpwpSbrvvvskSW3btpUkzZgxw6Fd1m/T7dq1s0+LiorSpk2bHNrNnTs3199s8xMbG6tSpUpp5syZDr8NXV+LdO23qOt/Y1q+fHm+l+1ez8fHx34V0KJFixQdHW0PMc745z//qUuXLtl/XrFihc6cOWMfz8aNG6t8+fKaPXu2w6Xaq1ev1sGDBx3G80ZkvUFf/+Z64cKFbG2z/nSFK5eOZ2nbtq0yMjL0zjvvOEyfPn26bDabvd/u5u3trU6dOumjjz7K8U31j5cfP/zwwzp16pTmzZuXrd1vv/3m1L1x0tPTNWfOHPvPaWlpmjNnjsLCwtSoUSNJ18YmISHB4Wqf9PR0zZw5UwEBAWrZsqWka2/K3t7e2fadd999N986ctO2bVulp6frvffes0/LyMiw799Zsj59uH7fyWkfy0/9+vVVv359/eMf/9BHH32kbt265Xs/naw/1fLvf/9b27dvz/Z8Vl1t27bVtm3bFBcXZ3/uypUrmjt3rqpWreryeWeNGzdWWFiYZs+e7XAF6qJFi3K8xcD1ctu/ruft7a17771Xn376qf0rU+naL5IffPCBmjdvrsDAQJdqR/74RAguiY+P1wMPPKA2bdooLi5O//d//6dHHnnE/htRgwYN1Lt3b82dO1cXL15Uy5YttW3bNr3//vvq2LGj7rnnHvuyHn/8cQ0aNEidOnXS3//+d+3Zs0dffPFFgf80Qtb9UaZMmaL7779fbdu21a5du7R69epsy7z//vs1adIk9e3bV82aNdO+ffu0ePHiAn3f36tXL7399tvasGGDXn31VZfmLVeunJo3b66+ffsqMTFRM2bMUI0aNdS/f39J185BePXVV9W3b1+1bNlS3bt3t18+X7Vq1WyX7hdU1pvx888/r27duqlUqVJq3769Jk2apE2bNqldu3aqUqWKzp49q3fffVeVK1d2OBHVWe3bt9c999yj559/XseOHVODBg20du1affrppxo+fLjDSd3uNnXqVG3YsEExMTHq37+/6tatqwsXLmjnzp366quv7CGwZ8+eWrZsmQYNGqQNGzborrvuUkZGhg4dOqRly5bpiy++yPHE3T+KiIjQq6++qmPHjum2227T0qVLtXv3bs2dO9d+B+IBAwZozpw56tOnj3bs2KGqVatqxYoV2rJli2bMmGH/BC0oKEhdunTRzJkzZbPZFBUVpc8//9ypc5Vy0759e911110aM2aMjh07prp16+rjjz/Ods5PYGCg/vrXv+q1117T1atXValSJa1du1bx8fEFet1evXrpmWeekSSnvxZ75ZVXtHbtWrVs2dJ+O4MzZ85o+fLl2rx5s4KDgzVmzBh9+OGHuu+++zRs2DCVK1dO77//vuLj4/XRRx+5/NV1qVKl9NJLL2ngwIH629/+pq5duyo+Pl4LFy506piRtX8NGzZMrVu3lre3t7p165Zj25deesl+764nn3xSPj4+mjNnjlJTU/Xaa6+5VDec5JmL1VDSZF0Ke+DAAdO5c2dTtmxZExISYoYMGeJwWbcxxly9etVMnDjRVKtWzZQqVcpERkaasWPHmt9//92hXUZGhnn22WdNaGioKVOmjGndurU5cuRIrpfPX3+5bE6XEWdkZJiJEyeaihUrmtKlS5u7777bfP/999mW+fvvv5uRI0fa2911110mLi7OtGzZ0uFS2KzXWL58eZ7jU69ePePl5WV+/vlnp8Yza7kffvihGTt2rClfvrwpXbq0adeunTl+/Hi29kuXLjV33HGH8fPzM+XKlTM9evTI9lq5XT4/ePDgbMu7fjyMMWby5MmmUqVKxsvLy34p/bp160yHDh1MRESE8fX1NREREaZ79+7mxx9/zLePOV0+b8y1y5iffvppExERYUqVKmVq1qxpXn/9dYdLgPOqPTd5XT6f23ISExPN4MGDTWRkpClVqpQJDw83rVq1MnPnznVol5aWZl599VVTr1494+fnZ0JCQkyjRo3MxIkTTVJSUp51ZY3D9u3bTdOmTY2/v7+pUqWKeeedd3Ksp2/fviY0NNT4+vqa6OjoHC8nP3funOnUqZMpU6aMCQkJMQMHDjTff/99jpfP5zQmOW0rv/zyi+nZs6cJDAw0QUFBpmfPnmbXrl3Zlvnzzz+bBx980AQHB5ugoCDTpUsXc/r06WyXgWe9Rl63Xjhz5ozx9vY2t912W+4DmIPjx4+bXr16mbCwMOPn52eqV69uBg8e7HBp+9GjR03nzp1NcHCw8ff3N02aNDGff/65w3Jy279zugTeGGPeffddU61aNePn52caN25sNm3alO2YkdO86enpZujQoSYsLMzYbDaHsb9+3Iy5diuB1q1bm4CAAFOmTBlzzz33mK1btzq0ceW4iLzZjOGMKuBG3XHHHSpXrpzWrVvnVPuNGzfqnnvu0fLly9W5c2c3VwcUT+fPn1fFihU1btw4vfjii54uBxbFOULADdq+fbt2796tXr16eboUoERZtGiRMjIyXLrAAChsnCMEFND333+vHTt26I033lDFihXVtWtXT5cElAjr16/XgQMH9PLLL6tjx465/ukUoCjwiRBQQCtWrFDfvn119epVffjhh8XqL78DxdmkSZM0YsQINWzYMNuVaUBR4xwhAABgWXwiBAAALIsgBAAALIuTpfORmZmp06dPq2zZsjf097MAAEDRMcbo0qVLioiIyPMmmgShfJw+fZq/7QIAQAl18uRJVa5cOdfnCUL5yLqt/cmTJ/kbLwAAlBDJycmKjIx0+APPOSEI5SPr67DAwECCEAAAJUx+p7VwsjQAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAs7iztCRkZ0tdfS2fOSBUrSi1aSN7eN942LU16913p6FEpKkp68knJ17dk1+qu/lt9XKnV87W6Y3t1R52uvH5xqJVx9exx0NP7VUEY5CkpKclIMklJSYWzwI8+MqZyZWOk/z0qV742/UbajhpljLe3Y1tv72vTS2qt7uq/1ceVWj1fqzu2V3fU6crrF4daGVfPHgc9vV9dx9n3b4JQPgo1CH30kTE2m+OKl65Ns9kcNxZX2o4alb3dHx8F2bA8Xau7+m/1caVWz9fqju3VHXW6a1zdVSvj6tnjoKf3qxwQhApJoQWh9PTsSfn6jSUy8lo7V9qmpmZP1dc/vL2vtSsptaakuKf/Vh9XavV8re7YXlNSCr9Od42ru2plXD17HHTXMfsGEYQKSaEFoQ0b8l7xWY8NG1xrO326c22nTy85tQ4e7J7+W31cqdXztbpje3V2f3GlTneNq7tqZVwLv//F4Zh9g5x9/+Zk6aJy5kzhtstqe/Soc22dbedKDe6q9fDhwl/m0aNShQruWa6zPD2u1OraMt1Rqzu2V2f3F1fqlNwzru6qlXF1rp27joPuOmYXES6fLyoVKzrfzpW2UVHOtXW2XdZynW3njlpr1iz8ZUZFMa7U6vla3bG9Oru/uFKn5J5xdVetjKtz7dx1HHTXMbuo3PBnTze5Qj9HKKeTyaScvxd2pq07z7nwVK1Z3zcXdv+tPq7U6vla3bG9uvtclsIc1+J0jpBVx7UkHbNvEOcIFRK3XDV2/caS15UCzrR151U4nqrVXf23+rhSq+drdcf26u6rmwpzXIvTVWNWHdeSdMy+AQShQlIk9xGKjHT+3hG5tS2q+7IUZa3u6r/Vx5VaPV+rO7bXorzfzY2Oa3G9j5CVxrUkHbMLyNn3b5sxxhTdF3ElT3JysoKCgpSUlKTAwMDCWWhJuvutp2vlztLUerPWyh2QubO0p8e1JB2zC8DZ92+CUD7cEoQAAIBbOfv+zVVjAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAsghCAADAskpcEJo1a5aqVq0qf39/xcTEaNu2bU7Nt2TJEtlsNnXs2NG9BQIAgBKjRAWhpUuXasSIERo/frx27typBg0aqHXr1jp79mye8x07dkzPPPOMWrRoUUSVAgCAkqBEBaE333xT/fv3V9++fVW3bl3Nnj1bZcqU0YIFC3KdJyMjQz169NDEiRNVvXr1IqwWAAAUdyUmCKWlpWnHjh2KjY21T/Py8lJsbKzi4uJynW/SpEkqX768+vXrVxRlAgCAEsTH0wU46/z588rIyFCFChUcpleoUEGHDh3KcZ7Nmzdr/vz52r17t9Ovk5qaqtTUVPvPycnJBaoXAAAUfyXmEyFXXbp0ST179tS8efMUGhrq9HxTpkxRUFCQ/REZGenGKgEAgCeVmE+EQkND5e3trcTERIfpiYmJCg8Pz9b+6NGjOnbsmNq3b2+flpmZKUny8fHRDz/8oKioqGzzjR07ViNGjLD/nJycTBgCAOAmVWKCkK+vrxo1aqR169bZL4HPzMzUunXrNGTIkGzta9eurX379jlMe+GFF3Tp0iW99dZbuYYbPz8/+fn5FXr9AACg+CkxQUiSRowYod69e6tx48Zq0qSJZsyYoStXrqhv376SpF69eqlSpUqaMmWK/P39dfvttzvMHxwcLEnZpgMAAGsqUUGoa9euOnfunMaNG6eEhAQ1bNhQa9assZ9AfeLECXl53bSnPQEAgEJmM8YYTxdRnCUnJysoKEhJSUkKDAz0dDkAAMAJzr5/8/EJAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwLIIQAACwrBIXhGbNmqWqVavK399fMTEx2rZtW65t582bpxYtWigkJEQhISGKjY3Nsz0AALCWEhWEli5dqhEjRmj8+PHauXOnGjRooNatW+vs2bM5tt+4caO6d++uDRs2KC4uTpGRkbr33nt16tSpIq4cAAAURzZjjPF0Ec6KiYnRX/7yF73zzjuSpMzMTEVGRmro0KEaM2ZMvvNnZGQoJCRE77zzjnr16uXUayYnJysoKEhJSUkKDAy8ofoBAEDRcPb9u8R8IpSWlqYdO3YoNjbWPs3Ly0uxsbGKi4tzahkpKSm6evWqypUr564yAQBACeLj6QKcdf78eWVkZKhChQoO0ytUqKBDhw45tYxnn31WERERDmHqeqmpqUpNTbX/nJycXLCCAQBAsVdiPhG6UVOnTtWSJUv0ySefyN/fP9d2U6ZMUVBQkP0RGRlZhFUCAICiVGKCUGhoqLy9vZWYmOgwPTExUeHh4XnOO23aNE2dOlVr165V/fr182w7duxYJSUl2R8nT5684doBAEDxVGKCkK+vrxo1aqR169bZp2VmZmrdunVq2rRprvO99tprmjx5stasWaPGjRvn+zp+fn4KDAx0eAAAgJtTiTlHSJJGjBih3r17q3HjxmrSpIlmzJihK1euqG/fvpKkXr16qVKlSpoyZYok6dVXX9W4ceP0wQcfqGrVqkpISJAkBQQEKCAgwGP9AAAAxUOJCkJdu3bVuXPnNG7cOCUkJKhhw4Zas2aN/QTqEydOyMvrfx9yvffee0pLS1Pnzp0dljN+/HhNmDChKEsHAADFUIm6j5AncB8hAABKnpvuPkIAAACFjSAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsiyAEAAAsy+UgtGbNGm3evNn+86xZs9SwYUM98sgj+vXXXwu1OAAAAHdyOQiNGjVKycnJkqR9+/Zp5MiRatu2reLj4zVixIhCLxAAAMBdfFydIT4+XnXr1pUkffTRR7r//vv1yiuvaOfOnWrbtm2hFwgAAOAuLn8i5Ovrq5SUFEnSV199pXvvvVeSVK5cOfsnRQAAACWBy58INW/eXCNGjNBdd92lbdu2aenSpZKkH3/8UZUrVy70AgEAANzF5U+E3nnnHfn4+GjFihV67733VKlSJUnS6tWr1aZNm0IvEAAAwF1sxhjj6SKKs+TkZAUFBSkpKUmBgYGeLgcAADjB2fdvp74aS05Oti8kv/OACAsAAKCkcCoIhYSE6MyZMypfvryCg4Nls9mytTHGyGazKSMjo9CLBAAAcAengtD69etVrlw5+/9zCkIAAAAlDecI5YNzhAAAKHmcff92+aqxCRMmKDMzM9v0pKQkde/e3dXFAQAAeIzLQWj+/Plq3ry5fvrpJ/u0jRs3Kjo6WkePHi3U4gAAANzJ5SC0d+9eVa5cWQ0bNtS8efM0atQo3XvvverZs6e2bt3qjhoBAADcwuUgFBISomXLlmnIkCEaOHCg3nrrLa1evVovv/yyfHxcvlG1y2bNmqWqVavK399fMTEx2rZtW57tly9frtq1a8vf31/R0dFatWqV22sEAAAlg8tBSJJmzpypt956S927d1f16tU1bNgw7dmzp7Bry2bp0qUaMWKExo8fr507d6pBgwZq3bq1zp49m2P7rVu3qnv37urXr5927dqljh07qmPHjvr+++/dXisAACj+XL5qrE2bNtq+fbtmz56tzp0767ffftOIESO0aNEiTZw4UaNHj3ZXrYqJidFf/vIXvfPOO5KkzMxMRUZGaujQoRozZky29l27dtWVK1f0+eef26fdeeedatiwoWbPnu3Ua3LVGAAAJY/brhrLyMjQ3r171blzZ0lS6dKl9d5772nFihWaPn16wSvOR1pamnbs2KHY2Fj7NC8vL8XGxiouLi7HeeLi4hzaS1Lr1q1zbS9JqampSk5OdngAAICbk8tB6Msvv1RERES26e3atdO+ffsKpaicnD9/XhkZGapQoYLD9AoVKighISHHeRISElxqL0lTpkxRUFCQ/REZGXnjxQMAgGKpQOcI5SY0NLQwF+cRY8eOVVJSkv1x8uRJT5cEAADcxOXLvDIyMjR9+nQtW7ZMJ06cUFpamsPzFy5cKLTi/ig0NFTe3t5KTEx0mJ6YmKjw8PAc5wkPD3epvST5+fnJz8/vxgsGAADFnsufCE2cOFFvvvmmunbtqqSkJI0YMUIPPfSQvLy8NGHCBDeUeI2vr68aNWqkdevW2adlZmZq3bp1atq0aY7zNG3a1KG9dO2rvdzaAwAAa3E5CC1evFjz5s3TyJEj5ePjo+7du+sf//iHxo0bp2+++cYdNdqNGDFC8+bN0/vvv6+DBw/qiSee0JUrV9S3b19JUq9evTR27Fh7+6eeekpr1qzRG2+8oUOHDmnChAnavn27hgwZ4tY6AQBAyeDyV2MJCQmKjo6WJAUEBCgpKUmSdP/99+vFF18s3Oqu07VrV507d07jxo1TQkKCGjZsqDVr1thPiD5x4oS8vP6X7Zo1a6YPPvhAL7zwgp577jnVrFlTK1eu1O233+7WOgEAQMngchCqXLmyzpw5o1tvvVVRUVFau3at/vznP+u7774rknNrhgwZkusnOhs3bsw2rUuXLurSpYubqwIAACWRy1+NPfjgg/bzboYOHaoXX3xRNWvWVK9evfTYY48VeoEAAADu4vKdpa8XFxenuLg41axZU+3bty+suooN7iwNAEDJ4+z79w3/ldSmTZtyFRYAACiRbuiGioGBgfrpp58KqxYAAIAi5XQQOn36dLZpN/itGgAAgEc5HYTq1aunDz74wJ21AAAAFCmng9DLL7+sgQMHqkuXLvY/o/Hoo49yAjEAACixnA5CTz75pPbu3atffvlFdevW1b///W+99957N8UfWgUAANbk0lVj1apV0/r16/XOO+/ooYceUp06deTj47iInTt3FmqBAAAA7uLy5fPHjx/Xxx9/rJCQEHXo0CFbEAIAACgpXEoxWX9sNTY2Vvv371dYWJi76gIAAHA7p4NQmzZttG3bNr3zzjvq1auXO2sCAAAoEk4HoYyMDO3du1eVK1d2Zz0AAABFxukg9OWXX7qzDgAAgCJ3Q39iAwAAoCQjCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsqMUHowoUL6tGjhwIDAxUcHKx+/frp8uXLebYfOnSoatWqpdKlS+vWW2/VsGHDlJSUVIRVAwCA4qzEBKEePXpo//79+vLLL/X5559r06ZNGjBgQK7tT58+rdOnT2vatGn6/vvvtWjRIq1Zs0b9+vUrwqoBAEBxZjPGGE8XkZ+DBw+qbt26+u6779S4cWNJ0po1a9S2bVv9/PPPioiIcGo5y5cv16OPPqorV67Ix8fHqXmSk5MVFBSkpKQkBQYGFrgPAACg6Dj7/l0iPhGKi4tTcHCwPQRJUmxsrLy8vPTtt986vZyswcgrBKWmpio5OdnhAQAAbk4lIgglJCSofPnyDtN8fHxUrlw5JSQkOLWM8+fPa/LkyXl+nSZJU6ZMUVBQkP0RGRlZ4LoBAEDx5tEgNGbMGNlstjwfhw4duuHXSU5OVrt27VS3bl1NmDAhz7Zjx45VUlKS/XHy5Mkbfn0AAFA8OXeijJuMHDlSffr0ybNN9erVFR4errNnzzpMT09P14ULFxQeHp7n/JcuXVKbNm1UtmxZffLJJypVqlSe7f38/OTn5+dU/QAAoGTzaBAKCwtTWFhYvu2aNm2qixcvaseOHWrUqJEkaf369crMzFRMTEyu8yUnJ6t169by8/PTZ599Jn9//0KrHQAAlHwl4hyhOnXqqE2bNurfv7+2bdumLVu2aMiQIerWrZv9irFTp06pdu3a2rZtm6RrIejee+/VlStXNH/+fCUnJyshIUEJCQnKyMjwZHcAAEAx4dFPhFyxePFiDRkyRK1atZKXl5c6deqkt99+2/781atX9cMPPyglJUWStHPnTvsVZTVq1HBYVnx8vKpWrVpktQMAgOKpRNxHyJO4jxAAACXPTXUfIQAAAHcgCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsqMUHowoUL6tGjhwIDAxUcHKx+/frp8uXLTs1rjNF9990nm82mlStXurdQAABQYpSYINSjRw/t379fX375pT7//HNt2rRJAwYMcGreGTNmyGazublCAABQ0vh4ugBnHDx4UGvWrNF3332nxo0bS5Jmzpyptm3batq0aYqIiMh13t27d+uNN97Q9u3bVbFixaIqGQAAlAAl4hOhuLg4BQcH20OQJMXGxsrLy0vffvttrvOlpKTokUce0axZsxQeHu7Ua6Wmpio5OdnhAQAAbk4lIgglJCSofPnyDtN8fHxUrlw5JSQk5Drf008/rWbNmqlDhw5Ov9aUKVMUFBRkf0RGRha4bgAAULx5NAiNGTNGNpstz8ehQ4cKtOzPPvtM69ev14wZM1yab+zYsUpKSrI/Tp48WaDXBwAAxZ9HzxEaOXKk+vTpk2eb6tWrKzw8XGfPnnWYnp6ergsXLuT6ldf69et19OhRBQcHO0zv1KmTWrRooY0bN+Y4n5+fn/z8/JztAgAAKME8GoTCwsIUFhaWb7umTZvq4sWL2rFjhxo1aiTpWtDJzMxUTExMjvOMGTNGjz/+uMO06OhoTZ8+Xe3bt7/x4gEAQIlXIq4aq1Onjtq0aaP+/ftr9uzZunr1qoYMGaJu3brZrxg7deqUWrVqpX/+859q0qSJwsPDc/y06NZbb1W1atWKugsAAKAYKhEnS0vS4sWLVbt2bbVq1Upt27ZV8+bNNXfuXPvzV69e1Q8//KCUlBQPVgkAAEoSmzHGeLqI4iw5OVlBQUFKSkpSYGCgp8sBAABOcPb9u8R8IgQAAFDYCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyCEIAAMCyfDxdACRlZEhffy2dOSNVrCi1aCF5e+fcNi1Nevdd6ehRKSpKevJJyde3aJbp6nJdaessd9XqynI9Was7xtRdtRaHdXUzjiu1lqzjoDtqvVnH1VMM8pSUlGQkmaSkJPe8wEcfGVO5sjHS/x6VK1+bfr1Ro4zx9nZs6+19bbq7l+nqcl1p6yx31erKcj1ZqzvG1F21Fod1dTOOK7WWrOOgO2q9WcfVDZx9/yYI5cOtQeijj4yx2Rw3EunaNJvNccMaNSp7uz8+sjYsdyzT1eW60tZZ7qrVleV6slZ3jKm7ai0O6+pmHFdqLVnHQXfUerOOq5sQhAqJ24JQenr2VH39hhUZea1damr2VH39w9vbmJSUwl9maqprtbrS1lnuqtWV5Xqy1tTUwh9Td9WakuL5dXUzjiu1lqzjoDtqdcextTiMqxsRhAqJ24LQhg15byRZjw0bjJk+3bm2gwcX/jKnT3etVlfaOstdtbqyXE/W6uwyXRlTd9Xq7DboznV1M44rtZas46A7anXHsbU4jKsbOfv+zcnSnnLmjPPtjh51ru3hw4W/zKNHpQoVnF+us1xp665aXVmus9xRq7PLdGVMXVmuK7U6uw26c13djONKrSXrOOiOWp1V0sa1GODyeU+pWNH5dlFRzrWtWbPwlxkV5VqtrrR1lrtqdWW5znJHrc4u05UxdWW5rtTq7DboznV1M44rtZas46A7anXHsVXy/LgWB279XOom4PZzhHI68Uy6se+bC3OZf/xu3JnlutLWWe6qtbicI5RfrVnnXBTmmLqr1oKcI+Sp7bUkjSu1lqzjoDtqdcextTiMqxtxjlAhKZKrxq7fsArjCoTCXKary3WlrbPcVWtxuWosv1rdMabuqrU4rKubcVyptWQdB91R6806rm5CECokHrmPUGRk4d+T4kaX6epyXWnrLHfVWhzvI5RTre4YU3fVWhzW1c04rtRaso6D7qj1Zh1XN3D2/dtmjDGe/GquuEtOTlZQUJCSkpIUGBjonhfx9F1KubM0d5bmztIlZ1yptWQdB7mztMfuLO3s+zdBKB9FEoQAAEChcvb9m6vGAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZRGEAACAZfl4uoDiLuvG28nJyR6uBAAAOCvrfTu/P6BBEMrHpUuXJEmRkZEergQAALjq0qVLCgoKyvV5/tZYPjIzM3X69GmVLVtWNpvNpXmTk5MVGRmpkydP3rR/p8wKfZSs0U/6eHOwQh8la/STPt4YY4wuXbqkiIgIeXnlfiYQnwjlw8vLS5UrV76hZQQGBt60G3EWK/RRskY/6ePNwQp9lKzRT/pYcHl9EpSFk6UBAIBlEYQAAIBlEYTcyM/PT+PHj5efn5+nS3EbK/RRskY/6ePNwQp9lKzRT/pYNDhZGgAAWBafCAEAAMsiCAEAAMsiCAEAAMsiCAEAAMsiCN2Al19+Wc2aNVOZMmUUHBzs1DzGGI0bN04VK1ZU6dKlFRsbq8OHDzu0uXDhgnr06KHAwEAFBwerX79+unz5sht64BxX6zl27JhsNluOj+XLl9vb5fT8kiVLiqJL2RRkzO++++5s9Q8aNMihzYkTJ9SuXTuVKVNG5cuX16hRo5Senu7OruTK1T5euHBBQ4cOVa1atVS6dGndeuutGjZsmJKSkhzaeXo9zpo1S1WrVpW/v79iYmK0bdu2PNsvX75ctWvXlr+/v6Kjo7Vq1SqH553ZR4uaK32cN2+eWrRooZCQEIWEhCg2NjZb+z59+mRbZ23atHF3N/LkSh8XLVqUrX5/f3+HNsVxPUqu9TOnY4zNZlO7du3sbYrTuty0aZPat2+viIgI2Ww2rVy5Mt95Nm7cqD//+c/y8/NTjRo1tGjRomxtXN3HXWZQYOPGjTNvvvmmGTFihAkKCnJqnqlTp5qgoCCzcuVKs2fPHvPAAw+YatWqmd9++83epk2bNqZBgwbmm2++MV9//bWpUaOG6d69u5t6kT9X60lPTzdnzpxxeEycONEEBASYS5cu2dtJMgsXLnRo98dxKEoFGfOWLVua/v37O9SflJRkfz49Pd3cfvvtJjY21uzatcusWrXKhIaGmrFjx7q7OzlytY/79u0zDz30kPnss8/MkSNHzLp160zNmjVNp06dHNp5cj0uWbLE+Pr6mgULFpj9+/eb/v37m+DgYJOYmJhj+y1bthhvb2/z2muvmQMHDpgXXnjBlCpVyuzbt8/expl9tCi52sdHHnnEzJo1y+zatcscPHjQ9OnTxwQFBZmff/7Z3qZ3796mTZs2DuvswoULRdWlbFzt48KFC01gYKBD/QkJCQ5titt6NMb1fv7yyy8Offz++++Nt7e3Wbhwob1NcVqXq1atMs8//7z5+OOPjSTzySef5Nn+p59+MmXKlDEjRowwBw4cMDNnzjTe3t5mzZo19jaujllBEIQKwcKFC50KQpmZmSY8PNy8/vrr9mkXL140fn5+5sMPPzTGGHPgwAEjyXz33Xf2NqtXrzY2m82cOnWq0GvPT2HV07BhQ/PYY485THNmRykKBe1jy5YtzVNPPZXr86tWrTJeXl4OB+j33nvPBAYGmtTU1EKp3VmFtR6XLVtmfH19zdWrV+3TPLkemzRpYgYPHmz/OSMjw0RERJgpU6bk2P7hhx827dq1c5gWExNjBg4caIxxbh8taq728Xrp6emmbNmy5v3337dP6927t+nQoUNhl1pgrvYxv2NucVyPxtz4upw+fbopW7asuXz5sn1acVuXWZw5LowePdrUq1fPYVrXrl1N69at7T/f6Jg5g6/GilB8fLwSEhIUGxtrnxYUFKSYmBjFxcVJkuLi4hQcHKzGjRvb28TGxsrLy0vffvttkddcGPXs2LFDu3fvVr9+/bI9N3jwYIWGhqpJkyZasGCBjAdua3UjfVy8eLFCQ0N1++23a+zYsUpJSXFYbnR0tCpUqGCf1rp1ayUnJ2v//v2F35E8FNZ2lZSUpMDAQPn4OP6ZQk+sx7S0NO3YscNhf/Ly8lJsbKx9f7peXFycQ3vp2jrJau/MPlqUCtLH66WkpOjq1asqV66cw/SNGzeqfPnyqlWrlp544gn98ssvhVq7swrax8uXL6tKlSqKjIxUhw4dHPap4rYepcJZl/Pnz1e3bt10yy23OEwvLuvSVfntj4UxZs7gj64WoYSEBElyeGPM+jnruYSEBJUvX97heR8fH5UrV87epigVRj3z589XnTp11KxZM4fpkyZN0t/+9jeVKVNGa9eu1ZNPPqnLly9r2LBhhVa/Mwrax0ceeURVqlRRRESE9u7dq2effVY//PCDPv74Y/tyc1rXWc8VpcJYj+fPn9fkyZM1YMAAh+meWo/nz59XRkZGjmN86NChHOfJbZ38cf/LmpZbm6JUkD5e79lnn1VERITDm0mbNm300EMPqVq1ajp69Kiee+453XfffYqLi5O3t3eh9iE/BeljrVq1tGDBAtWvX19JSUmaNm2amjVrpv3796ty5crFbj1KN74ut23bpu+//17z5893mF6c1qWrctsfk5OT9dtvv+nXX3+94e3fGQSh64wZM0avvvpqnm0OHjyo2rVrF1FF7uFsP2/Ub7/9pg8++EAvvvhituf+OO2OO+7QlStX9PrrrxfaG6i7+/jHQBAdHa2KFSuqVatWOnr0qKKiogq8XFcU1XpMTk5Wu3btVLduXU2YMMHhOXevRxTc1KlTtWTJEm3cuNHhZOJu3brZ/x8dHa369esrKipKGzduVKtWrTxRqkuaNm2qpk2b2n9u1qyZ6tSpozlz5mjy5MkerMx95s+fr+joaDVp0sRheklfl8UBQeg6I0eOVJ8+ffJsU7169QItOzw8XJKUmJioihUr2qcnJiaqYcOG9jZnz551mC89PV0XLlywz18YnO3njdazYsUKpaSkqFevXvm2jYmJ0eTJk5Wamloof3emqPqYJSYmRpJ05MgRRUVFKTw8PNvVDYmJiZJUaOuyKPp46dIltWnTRmXLltUnn3yiUqVK5dm+sNdjbkJDQ+Xt7W0f0yyJiYm59ik8PDzP9s7so0WpIH3MMm3aNE2dOlVfffWV6tevn2fb6tWrKzQ0VEeOHCnyN88b6WOWUqVK6Y477tCRI0ckFb/1KN1YP69cuaIlS5Zo0qRJ+b6OJ9elq3LbHwMDA1W6dGl5e3vf8LbhlEI728jCXD1Zetq0afZpSUlJOZ4svX37dnubL774wuMnSxe0npYtW2a7yig3L730kgkJCSlwrQVVWGO+efNmI8ns2bPHGPO/k6X/eHXDnDlzTGBgoPn9998LrwNOKGgfk5KSzJ133mlatmxprly54tRrFeV6bNKkiRkyZIj954yMDFOpUqU8T5a+//77HaY1bdo028nSee2jRc3VPhpjzKuvvmoCAwNNXFycU69x8uRJY7PZzKeffnrD9RZEQfr4R+np6aZWrVrm6aefNsYUz/VoTMH7uXDhQuPn52fOnz+f72t4el1mkZMnS99+++0O07p3757tZOkb2TacqrXQlmRBx48fN7t27bJfGr5r1y6za9cuh0vEa9WqZT7++GP7z1OnTjXBwcHm008/NXv37jUdOnTI8fL5O+64w3z77bdm8+bNpmbNmh6/fD6ven7++WdTq1Yt8+233zrMd/jwYWOz2czq1auzLfOzzz4z8+bNM/v27TOHDx827777rilTpowZN26c2/uTE1f7eOTIETNp0iSzfft2Ex8fbz799FNTvXp189e//tU+T9bl8/fee6/ZvXu3WbNmjQkLC/Po5fOu9DEpKcnExMSY6Ohoc+TIEYfLc9PT040xnl+PS5YsMX5+fmbRokXmwIEDZsCAASY4ONh+pV7Pnj3NmDFj7O23bNlifHx8zLRp08zBgwfN+PHjc7x8Pr99tCi52sepU6caX19fs2LFCod1lnVcunTpknnmmWdMXFyciY+PN1999ZX585//bGrWrFnkAb2gfZw4caL54osvzNGjR82OHTtMt27djL+/v9m/f7+9TXFbj8a43s8szZs3N127ds02vbity0uXLtnfByWZN9980+zatcscP37cGGPMmDFjTM+ePe3tsy6fHzVqlDl48KCZNWtWjpfP5zVmhYEgdAN69+5tJGV7bNiwwd5G//8eK1kyMzPNiy++aCpUqGD8/PxMq1atzA8//OCw3F9++cV0797dBAQEmMDAQNO3b1+HcFXU8qsnPj4+W7+NMWbs2LEmMjLSZGRkZFvm6tWrTcOGDU1AQIC55ZZbTIMGDczs2bNzbFsUXO3jiRMnzF//+ldTrlw54+fnZ2rUqGFGjRrlcB8hY4w5duyYue+++0zp0qVNaGioGTlypMOl50XJ1T5u2LAhx+1bkomPjzfGFI/1OHPmTHPrrbcaX19f06RJE/PNN9/Yn2vZsqXp3bu3Q/tly5aZ2267zfj6+pp69eqZ//znPw7PO7OPFjVX+lilSpUc19n48eONMcakpKSYe++914SFhZlSpUqZKlWqmP79+xfqG0tBuNLH4cOH29tWqFDBtG3b1uzcudNhecVxPRrj+vZ66NAhI8msXbs227KK27rM7ZiR1afevXubli1bZpunYcOGxtfX11SvXt3h/TJLXmNWGGzGeOB6ZQAAgGKA+wgBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBAADLIggBQD42btwom82mixcveroUAIWMIASgxMjIyFCzZs300EMPOUxPSkpSZGSknn/+ebe8brNmzXTmzBkFBQW5ZfkAPIc7SwMoUX788Uc1bNhQ8+bNU48ePSRJvXr10p49e/Tdd9/J19fXwxUCKEn4RAhAiXLbbbdp6tSpGjp0qM6cOaNPP/1US5Ys0T//+c9cQ9Czzz6r2267TWXKlFH16tX14osv6urVq5IkY4xiY2PVunVrZf1eeOHCBVWuXFnjxo2TlP2rsePHj6t9+/YKCQnRLbfconr16mnVqlXu7zyAQufj6QIAwFVDhw7VJ598op49e2rfvn0aN26cGjRokGv7smXLatGiRYqIiNC+ffvUv39/lS1bVqNHj5bNZtP777+v6Ohovf3223rqqac0aNAgVapUyR6Erjd48GClpaVp06ZNuuWWW3TgwAEFBAS4q7sA3IivxgCUSIcOHVKdOnUUHR2tnTt3ysfH+d/rpk2bpiVLlmj79u32acuXL1evXr00fPhwzZw5U7t27VLNmjUlXftE6J577tGvv/6q4OBg1a9fX506ddL48eMLvV8AihZfjQEokRYsWKAyZcooPj5eP//8syRp0KBBCggIsD+yLF26VHfddZfCw8MVEBCgF154QSdOnHBYXpcuXfTggw9q6tSpmjZtmj0E5WTYsGF66aWXdNddd2n8+PHau3evezoJwO0IQgBKnK1bt2r69On6/PPP1aRJE/Xr10/GGE2aNEm7d++2PyQpLi5OPXr0UNu2bfX5559r165dev7555WWluawzJSUFO3YsUPe3t46fPhwnq//+OOP66effrJ/Nde4cWPNnDnTXd0F4EYEIQAlSkpKivr06aMnnnhC99xzj+bPn69t27Zp9uzZKl++vGrUqGF/SNdCU5UqVfT888+rcePGqlmzpo4fP55tuSNHjpSXl5dWr16tt99+W+vXr8+zjsjISA0aNEgff/yxRo4cqXnz5rmlvwDciyAEoEQZO3asjDGaOnWqJKlq1aqaNm2aRo8erWPHjmVrX7NmTZ04cUJLlizR0aNH9fbbb+uTTz5xaPOf//xHCxYs0OLFi/X3v/9do0aNUu/evfXrr7/mWMPw4cP1xRdfKD4+Xjt37tSGDRtUp06dQu8rAPfjZGkAJcZ///tftWrVShs3blTz5s0dnmvdurXS09P11VdfyWazOTw3evRoLViwQKmpqWrXrp3uvPNOTZgwQRcvXtS5c+cUHR2tp556SmPHjpUkXb16VU2bNlVUVJSWLl2a7WTpoUOHavXq1fr5558VGBioNm3aaPr06frTn/5UZGMBoHAQhAAAgGXx1RgAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALAsghAAALCs/wfJV3AB4gEqdgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "ud_buondary_points = rectangle.sample_boundary(100, random='Halton', criteria=lambda x, y: np.isclose(y, -Ly / 2) | np.isclose(y, Ly / 2))\n", + "px3, py3 = ud_buondary_points[\"x\"], ud_buondary_points[\"y\"]\n", + "plt.scatter(px3, py3, color='red')\n", + "plt.title('boundary points for free boundary condition')\n", + "plt.xlabel('X-axis')\n", + "plt.ylabel('Y-axis')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "接下来将自由边界条件转为实际深度学习模型中需要的约束条件:(自由)边界约束\n", + "> $$\n", + "\\left(\\frac{\\partial^2 w}{\\partial y^2}+\\mu \\frac{\\partial^2 w}{\\partial x^2}\\right)_{y=-0.5 \\mid y=+0.5}=0, \\quad\\left(\\frac{\\partial^3 w}{\\partial y^3}+(2-\\mu) \\frac{\\partial^3 w}{\\partial x^2 \\partial y}\\right)_{y=-0.5 \\mid y=+0.5}=0\n", + "$$" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "constraint_up_down = ppsci.constraint.BoundaryConstraint(\n", + " {\n", + " \"item1\": w.diff(y, 2) + mu * w.diff(x, 2), # 上下边界上需要满足的条件\n", + " \"item2\": w.diff(y, 3) + (2 - mu) * w.diff(x, 2).diff(y), # 上下边界上需要满足的条件\n", + " },\n", + " {\"item1\": 0.0, \"item2\": 0.0}, # 上下边界上需要满足的条件\n", + " rectangle,\n", + " {\n", + " \"dataset\": \"IterableNamedArrayDataset\",\n", + " \"iters_per_epoch\": 1,\n", + " \"batch_size\": 10000, # 采样一万个点用于训练\n", + " },\n", + " criteria=lambda x, y: np.isclose(y, -Ly / 2) | np.isclose(y, Ly / 2), # 采样点在左右两侧边界上\n", + " loss=ppsci.loss.MSELoss(), # 使用均方根误差损失函数\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "综上所述,控制方程、简支边界条件、自由边界条件所用的训练数据点预览如下:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAA6cAAAHHCAYAAABDdy4DAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9fXwV5Zn//zk5CBwegh4IWBM0Jc2iCG0X5aHsZg0lFa3aIFAQFdGfoq2CBCrdWllILFarYILgY7arfmmhRgikthUQvjndbFGx+t2KihSQFEHEQCxPSUFOzu+Pw8TJZGbu636YkwSu9776cjmZM3PPPdfc57ru6ymUSCQSYBiGYRiGYRiGYZg2JK2tB8AwDMMwDMMwDMMwbJwyDMMwDMMwDMMwbQ4bpwzDMAzDMAzDMEybw8YpwzAMwzAMwzAM0+awccowDMMwDMMwDMO0OWycMgzDMAzDMAzDMG0OG6cMwzAMwzAMwzBMm8PGKcMwDMMwDMMwDNPmsHHKMAzDMAzDMAzDtDlsnDIMwxhi+fLluPjii3HOOefg3HPPTem1s7Ozceutt6b0mgzDMAzDMCZh45RhziC2bt2KiRMn4qKLLkLXrl2RmZmJ73znO1i6dGlbD+2M58MPP8Stt96KnJwclJeX47nnnmvrIZF56qmn8MILL7T1MBiGYRiGOcsJJRKJRFsPgmEYfTZv3ozRo0fjwgsvxLRp03D++efj448/xhtvvIFdu3Zh586dbT3EM5pnnnkGP/zhD7Fjxw587WtfS/n1T5w4gbS0NJxzzjnS3x08eDD69OmDWCxmfmAMwzAMwzBEOrX1ABiGMcNDDz2EXr164a233moVUvrZZ5+1zaDakEQigX/84x+IRCIpuZ41x6kO57Xo0qVLm1yXYRiGYRjGFBzWyzBnCLt27cKll17qahz17du3+f+vra1FKBRyDeMMhUIoLi5u/ndxcTFCoRD++te/4uabb0avXr2QkZGB//iP/0AikcDHH3+MwsJCpKen4/zzz8fixYtbnC8WiyEUCqGiogIlJSXIzMxEz549MXHiRBw+fBgnTpxAUVER+vbtix49euC2227DiRMnWpzj+eefx7e//W307dsXXbp0waBBg/D000+3Gnt2djauvfZarF+/HpdffjkikQieffZZXHHFFfjGN77hOmcDBw7E2LFjfWY1yVNPPYVLL70UXbp0wQUXXIB77rkHf//731tce8GCBQCAjIyMVvPo5NZbb0WPHj3w0UcfYezYsejevTsuuOACPPjgg3AGsxw/fhw/+tGP0L9/f3Tp0gUDBw7EokWLWh3nzDl94YUXEAqF8Kc//Qlz5sxBRkYGunfvjuuvvx51dXUtvvf+++/jj3/8I0KhEEKhEPLz8wEAX3zxBUpKSpCbm4uuXbuid+/e+Nd//Ve89tprwjljGIZhGIaRhT2nDHOGcNFFF+H111/He++9h8GDBxs99+TJk3HJJZfgkUcewe9//3ssXLgQ0WgUzz77LL797W/jF7/4BX7961/jvvvuw7Bhw/Bv//ZvLb7/8MMPIxKJ4Cc/+Ql27tyJpUuX4pxzzkFaWho+//xzFBcX44033sALL7yAr371q5g/f37zd59++mlceuml+N73vodOnTrhlVdewd13342mpibcc889La6zfft2TJkyBXfddRemT5+OgQMHokePHpg+fXqreXnrrbfw17/+FfPmzfO99+LiYpSUlKCgoAA//OEPsX37djz99NN466238Kc//QnnnHMOysrK8H/+z//BmjVr8PTTT6NHjx74+te/7nveeDyOq666CiNHjsSjjz6KdevWYcGCBTh16hQefPBBAEnv7/e+9z1UV1fj9ttvxze/+U2sX78ec+fOxb59+1BaWip8djNnzsR5552HBQsWoLa2FmVlZZgxYwZeeuklAEBZWRlmzpyJHj164IEHHgAA9OvXr/neH374Ydxxxx0YPnw4jhw5gj//+c9455138J3vfEd4bYZhGIZhGCkSDMOcEWzYsCERDocT4XA48a1vfSvx4x//OLF+/frEyZMnWxy3e/fuBIDE888/3+ocABILFixo/veCBQsSABJ33nln82enTp1KZGVlJUKhUOKRRx5p/vzzzz9PRCKRxLRp05o/q66uTgBIDB48uMU4pkyZkgiFQomrr766xfW/9a1vJS666KIWnzU0NLQa59ixYxMDBgxo8dlFF12UAJBYt25di8///ve/J7p27Zr493//9xaf33vvvYnu3bsnjh071ur8Fp999lmic+fOiSuvvDIRj8ebP1+2bFkCQOK//uu/mj+z5qqurs7zfBbTpk1LAEjMnDmz+bOmpqbENddck+jcuXPzOdauXZsAkFi4cGGL70+cODERCoUSO3fubHH/9rl//vnnEwASBQUFiaampubPZ8+enQiHw4m///3vzZ9deumliSuuuKLVOL/xjW8krrnmGuH9MAzDMAzDmIDDehnmDOE73/kOXn/9dXzve9/DX/7yFzz66KMYO3YsMjMz8dvf/lbr3HfccUfz/x8Oh3H55ZcjkUjg9ttvb/783HPPxcCBA/HRRx+1+v4tt9zSolDPiBEjkEgk8P/9f/9fi+NGjBiBjz/+GKdOnWr+zJ4zevjwYRw8eBBXXHEFPvroIxw+fLjF97/61a+2CtPt1asXCgsLsXLlyuZQ2Hg8jpdeegnjxo1D9+7dPe9748aNOHnyJIqKipCW9uVyOX36dKSnp+P3v/+953cpzJgxo/n/D4VCmDFjBk6ePImNGzcCAP7whz8gHA7j3nvvbfG9H/3oR0gkEnj11VeF17jzzjsRCoWa/52Xl4d4PI6//e1vwu+ee+65eP/997Fjxw7qLTEMwzAMwyjDxinDnEEMGzYMlZWV+Pzzz7Flyxbcf//9OHr0KCZOnIgPPvhA+bwXXnhhi3/36tULXbt2RZ8+fVp9/vnnn5O+DwD9+/dv9XlTU1MLo/NPf/oTCgoK0L17d5x77rnIyMjAT3/6UwBwNU7duOWWW7Bnzx7U1NQASBqdBw4cwNSpUz3vGUCzATdw4MAWn3fu3BkDBgwgGXhepKWlYcCAAS0++6d/+icAybxg6/oXXHABevbs2eK4Sy65pMX4/HDO/XnnnQcArs/JyYMPPoi///3v+Kd/+icMGTIEc+fOxbvvviv8HsMwDMMwjApsnDLMGUjnzp0xbNgw/PznP8fTTz+NL774Ai+//DIAtPCi2YnH457nC4fDpM8AtCrU43es6By7du3CmDFjcPDgQTz++OP4/e9/j9deew2zZ88GADQ1NbX4nldl3rFjx6Jfv3741a9+BQD41a9+hfPPPx8FBQWux59JyDwnJ//2b/+GXbt24b/+678wePBg/Od//ieGDh2K//zP/zQ9TIZhGIZhGDZOGeZM5/LLLwcA7N+/H8CXnjN7tVmA5oVLNa+88gpOnDiB3/72t7jrrrvw3e9+FwUFBdLtYcLhMG688UasWrUKn3/+OdauXYspU6Z4Gm4WF110EYBkoSU7J0+exO7du5v/rkJTU1OrEOi//vWvAJIVdK3rf/LJJzh69GiL4z788MMW49PFa8MCAKLRKG677TasXLkSH3/8Mb7+9a/7ViJmGIZhGIZRhY1ThjlDqK6udvWG/eEPfwDwZWhqeno6+vTpg//+7/9ucdxTTz0V/CAlsYxH+30dPnwYzz//vPS5pk6dis8//xx33XUXjh07hptvvln4nYKCAnTu3BlPPPFEizH88pe/xOHDh3HNNddIj8POsmXLmv//RCKBZcuW4ZxzzsGYMWMAAN/97ncRj8dbHAcApaWlCIVCuPrqq7Wub9G9e/dWmxUAcOjQoRb/7tGjB772ta+1avfDMAzDMAxjAm4lwzBnCDNnzkRDQwOuv/56XHzxxTh58iQ2b96Ml156CdnZ2bjtttuaj73jjjvwyCOP4I477sDll1+O//7v/2722rUnrrzySnTu3BnXXXdds1FZXl6Ovn37NnuCqfzzP/8zBg8ejJdffhmXXHIJhg4dKvxORkYG7r//fpSUlOCqq67C9773PWzfvh1PPfUUhg0bRjJwvejatSvWrVuHadOmYcSIEXj11Vfx+9//Hj/96U+RkZEBALjuuuswevRoPPDAA6itrcU3vvENbNiwAVVVVSgqKkJOTo7y9e1cdtllePrpp7Fw4UJ87WtfQ9++ffHtb38bgwYNQn5+Pi677DJEo1H8+c9/xqpVq1oUcmIYhmEYhjEFG6cMc4awaNEivPzyy/jDH/6A5557DidPnsSFF16Iu+++G/PmzcO5557bfOz8+fNRV1eHVatWoaKiAldffTVeffVV9O3bt+1uwIWBAwdi1apVmDdvHu677z6cf/75+OEPf4iMjIxWlX4p3HLLLfjxj38sLIRkp7i4GBkZGVi2bBlmz56NaDSKO++8Ez//+c9bVCCWJRwOY926dfjhD3+IuXPnomfPnliwYEGLHq9paWn47W9/i/nz5+Oll17C888/j+zsbDz22GP40Y9+pHxtJ/Pnz8ff/vY3PProozh69CiuuOIKfPvb38a9996L3/72t9iwYQNOnDiBiy66CAsXLsTcuXONXZthGIZhGMYilKBUxWAYhjkDWLJkCWbPno3a2tpWVWxTya233opVq1bh2LFjbTYGhmEYhmGY9gbnnDIMc1aQSCTwy1/+EldccUWbGqYMwzAMwzCMOxzWyzDMGc3x48fx29/+FtXV1di6dSuqqqraekgMwzAMwzCMC2ycMgxzRlNXV4cbb7wR5557Ln7605/ie9/7XlsPiWEYhmEYhnGBc04ZhmEYhmEYhmGYNodzThmGYRiGYRiGYZg2h41ThmEYhmEYhmEYps3hnFMBTU1N+OSTT9CzZ0+EQqG2Hg7DMAzDMAQSiQSOHj2KCy64AGlpvBfPMAzTEWDjVMAnn3yC/v37t/UwGIZhGIZR4OOPP0ZWVlZbD4NhGIYhwMapgJ49ewJI/rilp6e38WgYhmEYhqFw5MgR9O/fv/l3nGEYhmn/sHEqwArlTU9PZ+OUYRiGYToYnJLDMAzTceAkDIZhGIZhGIZhGKbNYeOUYRiGYRiGYRiGaXPYOGUYhmEYhmEYhmHaHDZOGYZhGIZhGIZhmDaHjVOGYRiGYRiGYRimzWHjlGEYhmEYhmEYhmlz2DhlGIZhGIZhGIZh2hw2ThmGYRiGYRiGYZg2h41ThmEYhmEYhmEYps3p1NYDYBzE40BNDbB/P/CVrwB5eUA4rH/syZPAU08Bu3YBOTnA3XcDnTurDbEpjpo9Nfj07/tw8Yd1+Ho8A2mZmd7XD2KcHeWcMgTx7DvKONt6rG09TplznmnPPohxypy3rZ99W8toEM+JeM5j9Sfx0hVPIfLJLjRekIPJf7wbPaIGnj3DMAzTcUkwvhw+fDgBIHH48OHgL7Z6dSKRlZVIAF/+Lysr+bnOsXPnJhLhcMtjw+Hk57JD/GB1IuvxrMT1k5DYkw7x9YMYZ0c5pwwe1z/18suJ6t3ViRXvrkhU765OnIqfoo81heNsTzIqff22HqfMOQN+9qfip1rLm+y1UzBOISl+733nzY+2ltEgnhPxnC/2m5v4Ai3P+QXCiRf7aT57Gyn9/WYYhmGMwMapgJT9uK1enUiEQi1/0IHkZ6FQyx92mWPnzm19nP1/Ekrg6g9WJ0LFocT1k5CII/k/+7maQmh5fQPjbDr9v1P33RfcvbfRfH45se7Xbwol5/j6SUigOPm/O27tnZxn0VhTOM72JKPS12/rccqckzpWxXFaG0+WrKEYiazHsxKrP1gdzLMPYj5lzmvo2TcBiSdH93CfNz8kZLQpFEo0mZbRIJ4T8Zwv9pvbvLY757IJMGagsnHKMAzT8QglEolE2/pu2zdHjhxBr169cPjwYaSnpwdzkXgcyM4G9u51/3soBGRlAbt3J/9NPTYeB7p1S/7Xi3AYaGgQhqbFm+LIXpKNT/6+F7VlQOYR94TlRCiEUFYWsHNnMvTLwDgTAOIh4Ld/XoHxhT82e+9HjwL/9E8pn89mBM++CcDedOCrRcl/+81981i3bwd69lQapxWyvf/ofnyl51eQd2EewmlhozKaCAEN/Xrjrf95CXmZoxDuoTZWT6hjJchoIisTf6x+AQf+/gkmDb8VaGpCyNQ4T56ky1M4TLsnxWdfua0SEysmIoGWPwchhJDWlMCRZ3uj24FD/teWWZ80ZNQX6pwafO+t9SnyAHDqdKJM6LSUrJq0CuMvGd/6SxIy2nDhBeh64JD/Oy+7PgUhT8RzHnt7O7r27Ykw4q7vUgJAHGH841CDdohvSn6/GYZhGKNwQaT2QE2N9w86kNxT/vjj5HEyxz71lL9SAST//tRT4iHuqcHeI3uR9zegv5dxBCBkXf+pp4yNMwSgUwL45Ac3mr/3uXPbZD6bETzPNAAXHgHy/gbh3DePde5cpXFWbqtE9pJsjH5xNG6svBGjXxyN7CXZqNxWaVRGQwmg+6eHUPyzAjx4w/kpn1NIyGjo470o/lkBXn/gFoT8DFOVccrIE/WeFJ59vCmOWetmtTJMASCBBPL+Bm/D1H5tmfVJUUZFxJctTfl7b61P97xl+/rpuSxaV4R4k8t3ifP0l/l3oZuXYSo5TgDByhPxnH8cPhedPAxT4PR8Io6XrpB79gzDMMyZARdEag/s32/2OABN+/bhk//3R2RRDt61S3zpo8lrf+UYcQCEcyZPvJ987NfqideWOCd27DB/Tupx1nkJkOcdoN+TbZxenrN9R/ZhYsVEvNF9FoZTzikho185BvTZf0R6rMbGQDznV44BOZ8Try0zThl56tePdqzCs7c2nrw4nyp7Es9eZZyeXv3TVG6rxOHKBbjN5PUl3nunjCSQwMdHPkbNnhrkZ+e3Pi+Bd/7nZXyDcqDEOP/65qs4lXYIgygHyzwnooyeW0c7Z+QTiXeJYRiGOWNgz2l74CtfoR9HPHby5iIsOrCWdt6cHPGleyavu78H7ZSUcyZP/BXysTujxGtLnHNfv27Gz0k+zjovgf096HO/s7evf+9LTo9T5DkDgEW7f007p4SM7u8B7DqPdtog5pR6zsDGSTz2L92Pke8pnjNA+trWxpMX5Hde4tkjN5d23Olx+nr18eXmyl96HDd7fYn33ktGXOeXOE/k+5EY51Ofb8Dd7yyknVfmORHv6e8ZtHM2XiDxLjEMwzBnDJxzKiClOaf79iVDn5zY8oriTXEcyOiG8w/HXXcW7DmKaU1A40NAOAHtPDkr53T/3/did5lEzinhnqxcqUTcJwcpBHS/H9i5DMg6mgwNpZxTlCfW/X5g95NhfOVIXPucANTzIz3mSSbn1Dr2n2YADY+kIa2piTTOWG0Mo18c7TvMtCbg78/0Qo+6I8nQbSdueYeEezIpo81Q3yeBjAY+zpMnkTgtT35y/0+/yMSOoo8QHuD/PjX0i2JIUWdsv3+/1DhFzz+tKSl3pPcOoM29RC5j5a7feebDAkDFxArM3jAbe4/sRadTxOdk5ZxKrE8yOad2qqdVI+/CvJZe38xRwud5vF8U595xCB894f/ON57fG933HpAaZ1NaAPnrVs6pxz1Zvw2cc8owDMP4wZ7T9kA4DCxZkvz/Q46fa+vfZWVAOIyafZsxY2xSUXCaHta/i65KKh+nOgGLv5X8zHMHYs4cxDuFEauNYeXWlYjVxlxzpMJpYSy5agma0kIouqrl9SwSodMKYVlZUvEl3hM6dwbmzHEdp/Xvxd8CTnYGZl3lOAfhnE6c55w5Np4sFenUlCTO2cycOXIFXHyevfN5NqV9ef9+z/5k5xCevaKb9zN3jFPkOQOS156WfxiJREI8TxL35CejCeszg3PaYqw+Mqr6LkmNs3NnfHz7RNdz2mV0d8M+1Ozb7HtPCSQw9d8O4aN/7JceZ96FechKz2o29pwk0kL42fjeAEJIOK6dkHj2LeY+EiG9S/FOYaFX/+4/3N0cliySJ+u8iESU1qfWY0iy+FutDdMQQuif3h91x+tae32X5eCNuVN8r797wb041Un8zu9ecK/0OP3WEpXnhM6dWzx75xrRBCCRSOCN+25Aj4wIVvTzX+9X9JvD/U4ZhmHOUtg4bS+MHw+sWgVkZrb8PCsr+fn4ZMXH/Uf3Y80gYOIkYJ9jI3hvevLzNbZkop9cCTw6Krlj3oJwGJg7F5W3jfQNl2sxxEvGY9WkVdgyMsv1+qGs/i3GSr0nAMCjj6LpvvvQ5BhnPJQc/0+uTCp7fx7ZH00VL5PPiblzWzV/t58TACoHAd+fBOxPdzSJlzinNZ949NEvr9MUFxr9fvO0Nz05LvvzXDsoJHz2CSRw9xXH8PGdk0njtEK2Raw5PU97ezr+4DZPPvfkJaOJtJYPPx4Cnh7dA5W3jSSNrwVU2VMYp9e7ZJ9TKn+aUeh6TqeM7j+633OsiaxM3DmtNyoH+Y8z4TFOa+MJgKuBmkAC3W+YipdLJmF/esu/f9IrDW+U3Ud69q3mnvAuifJhE0igrqGuxWee95+W1vL+Jdcnt7Em0tLw2Cjg/itbXsyaxxsG34DJqya3uod9R/Zh1OeLknPncf1L7nwAWelZvu/8D27tjUvufEA4Tqc8AfD8HaE+p0RaGj649VrE7v7ul2vb+PF4o+y+VmuEtZaN+nwRKrdV4pZPH8XyfnMRh2OcCGN5v7m45VP5d4lhGIY5M+CwXgEpDwuKx5NVD/fvT+bw5OW1UArsIXhpTckKrl85lswLq7kouSPuRqdTyWqSOZ8Do664GZct/KUwXM6rDYJVmOTTv+/DxR/W4evxDKRlZrYaK/We7FT+5SX897/fgJzPkzlcTw5L7vS3GpPEOXHyJHYunINX1z/Z4pxO0pqAty8pxTcT/Vqc07UQy6nTFSp37UqGht59d6vWHLPWzWqhlGalZ2HJVUvcW0u4zFNlnzrMem1Oi3P0T++PCYMm4InNZcJnv2L8CkwZOMF3nMCXIdv7juxz9VA5CTcB4w9m4DejSv2f++l7iv8xhlnPT8L74Xr8t8s4QwghGoniyNFDuPu0jFrPKd5J0JJDBFVObMfF+/XFgP83DR8f+8R1Ps45Bfx0ay/Mz7wJabm5iP/gLtR8+qZnoR4/rPfZ/n66yWj1tOovi+o47inWP47RvypodW7nOcc9sR75/3Rlq+Ms3GQ2HAojnvhyU8W55vzPRUBTWsj9+VDn/uRJTxlduXUlbqy8UTyRLpDvX3ItcY61ctfvWs1b//T+ePzKx5vDjd0IIYSs9CzsnrET4T9tdr2+lUsLAKGmRPPcf3r6na+4YbX7e3F6nH9981U89fkGzzUPSD7TDRfNw5jIINJz2vnWeiw/vhk/H3Kk+ZzW2lY4sLC55Zjb+tR8z7N2I5wWxrH6k3jpiqcQ+WQXGi/IweQ/3m3UY8phvQzDMB0PNk4FBPXjJqo86fc9GUPCDSsHKntJtlhxOq1EmIB6z25Kcv/0/ii7qkzNQAFdyV0xfgWmDJniOxaRkenXMxKQM7Tc5qxmT40wRxRwGDQC7EowVa5MnD+EZEhq70hvHGp0b1ciK4uq7xZ1vMCXz1BpE8IxVr/3mXLvqrLtNZ6aPTWo+rAKZW+WCc9JHaMqlHxoAOjTrQ8ONRxSnkNdnDI3KmsUnvrzU5i9frbwu6L3SGc9pM4f9V0WrW3F+cVYEFtg7Hq6sHHKMAzT8eBWMm2AjkJrheBNrJjYrNhTsZQ0y8ARhct5tkFQQOaex18yHoUDC7UNDDvU0FX7caL2Km5GpqjybQghFK0rQuHAQtL9hNPCrebfyhEUGTR5F+aRDTUrZNv5jPxwy1X1up7X+bPSs3DH0Dt8FVoZWdQ1Fi38xmsZBSry4cTvfbYU/rKrynxlRUW2/caTd2Eepq6ZSjon0PL5tCr8o/neUmV98ZWLMXnVZOU5FCF6j+zvaeW2SuQszdF6j+zorIcya4UIytq25M0lwvMAtDx3hmEY5uyEPacCTO+8mvKqee2m3zD4BizavAiAv8fHpLeFMlZTnkRVZD1UJ0+dRGZpJg42HHQ9n5c3xrSnwguKZw+AtKEWb4pj6ZalSh4fimHopuRXvF9hRBaDkDMvo8SSJ1ORBzreMRPeVztUGXZSNKIIq7at0t4YcKLjxdaNuLCuT32PvGTQj6C9iNT580NmXaDAnlOGYRjGCzZOBZj8cTOt0HopzhQlLVVGlOl71kFGyf3B737QqtCKG875SbXR7/WcASgbairGjo5haEIWqXK2c+ZObN67WduzF8T74xYaSh2rCQPEQifP04mpDSiq4WkipNt5Xapci2TQSarXPlXD3e27fkQjUXze+HmbhVjbYeOUYRim48FhvSnEdCitW8gnQAsDCyI0NBX3rINOqKYXzvC0HfU7SN+jhmL64fWcASB7SbZyaLFsqKluKLOJ0EOqnGWVZrXYdFD17FHDEvcf3U9+f0ShoX5jpcg2FRXZdBZOslAJZXeDGtrqtSaqIJJrALjzlTvRq0sv5GfnC2XQjqlwYyqqocEqnuBZI2ahOFYcWIg1wzAMc2bDxmkKCUKh9UKkpFENkKrtVVo5fDL3nAr8lDQ/ZdQLZ45qcazY93iZHC8Kbs85Vhsjbwh45QjKGDu6GxAm8i6p8uP0hsvkh9qhGnA76ne08qapFtQSjdVUrrZos8CO9bzcDFML6/nHamMIp4UDW9NMQzE2DzUeQsHyAmSlZ2HiJRPJ51bZNNBFdv5k10NrbXsg7wEM7jvYyEYJwzAMc/bBxmkKCVKhVUFkgADuoaEyCr3JYi0WQRnusp4Pu5FJVeQSSATuOaAaalUfVmHqmqmeckY1dkxsQOh6/lQ90W6ePYp8Uby90UgUxbHilBbUMmHAyRRdy0rPwoRBE1D2RpnwvJNWTUJ9Y32L75pe00wis2G278g+cnXj0rGlmDl8pvQaYDpkWYSOJziIonYMwzDM2QHnnAoIIudUpNDWN9antHiQm9IDwEiuqOliLaaqsbohk2sXQsvejtQcxJL8Esy/Yr7WOEWoFrQB1OTMZP6lat6lqRZL9Y310sVvAHPtcVKVC07B612bPnQ6cqO50q2NnKSyIBogb9zJvkchhJAWSvP0IuvkW4rWvSAMV5n10EThqSDgnFOGYZiOB3tOU4gofNH6t6k2JCq5bhYyoaEmwoephqmuJ9cPqvcto1sGnrn2mRbXonpZcqO5SmOTgRKWaTJHkOJF7NOtD/Yd2YdYbcxXcVbNu9RpsWRRtb0KS95YQpavINrjtKcweKr3SyYM2E6Qa5oTlU0t2fuyhzebzLcUrXv3jboPK99baXzDjroeqnqCGYZhGMaNtLYewNmGpdBmpme2+DwrPQsl+SWenhagpUIronJbJbKXZGP0i6NxY+WNGP3iaGQvyUbltkrSOE0qyX73TDUoKcVJitYVId7knfsmwlJGLUXSjYxuGdg7e2+rMcuEL8eb4ojVxrBy60rEamNaY3bDMtQAtLoX69+UHEGKnImuZ52vrqEON6+5mSyHlkLu3CCxFHK373vJWUa3DNJ9/OrdX0nL1/hLxqN2Vi2qp1VjxfgVqJ5Wjd2zdpM3IZzvTxBh8DpYmwVThkxBfna+bwEtwP35+5GKNU1FlgD1+yoaWaS11tkRrXsJJPDY5sek742CaD0MIYT+6f2bDdOg1zWGYRjm7ICN0zbg6uzxuHZHLS7fdBAF2z7EHybpKbROVJUxO6aVZC8lnqqsyRTdcUJVmkRGXQghPHPtM+jcqXOr71IVubrjdVqbBlT8NgSKRhaRziHjnfO6nhsiOdTZiHCTs72z9wqfTUa3DM+ettZ1veTLzYBTfX8oGyTRSBTxpjhJ+U+VweD1/KORKOn7Qa1puptaMnJtUTiwUGutsyOT92nHxIYdZZPL8gTrboYyDMMwjAXnnAownbMybhxQVdX688JCoKgsdf0eU50rqotM/9BJl05qDv3bUb8Dz739HPYd3dd8DKVaqkpPQFGvyftG3YdFmxe1eT4xNUdQJ0d035F9KFpf5Gnw+clPEHmXomcza8QsUkEban9anffHa6xOVOQ46CJETnmIN8VRsLxA+L2g1jRVWXKT65o9Na2KOlHHoYqJnrO6+cmi9VCnx3HQcM4pwzBMx4NzTlOIl2EKJD9PJPKQlZ+afo9LtyxFv+79fPsFmsoVNYFOpWMnQbXk8MpBzEzPxO3/fDuWbllqLJ+Yils+sYm+ooC/8ZOZnkn2RDrHF0TepagacDQSJRmnVDnUeX+8xurET45N52er5q/Hm+Ip62FrSpb85Lr8unLfTQ7VNdFrfk2Eb+vmJ6u23wpyXWMYhmHOXNhzKsDUzmtjI9Ctm/i4FW9X4qZXvJUfkVKpstPu501R9SICZlsf6FQ6diNIz6/9vt08t36kogorIPYkiuRM5C2ZNXIWqb2ImycyyIq1XjIZVKSA7vsTq41Je+tMRU/43YOMB1ZH1uJNcRTHirGwZqHwOiZkieIFBKD8TN3wm9/CgYVGqlBT3hOV9bo9VZd2gz2nDMMwHQ82TgWY+nGbMQN48knxcffcA3z7HnWFVqWNiEhJVFFagggp1Gnd4UWQSpOXousHNWyUgui5qRhOVIOpT7c+qGuoE47Rbf7bKqScakTJvg86mzQqyr9Jg8FUyKaKrLl9xw9dWQLo7bMAGNl4oxrDlDBvv/EGtV7LpFuYWtdkYOOUYRim48FhvSlixw76cctcwqisXL6VW1f6KkMqbR1E4VduoaF+BNXyRad1hxdBteTwC3fzw1QVVoqyKRu+TDUWrOq8VpEh2XDOtgopF4X+Wvl1skq87PtjRyYs1TKCV3+w2si5TYZsqsgadWPHlCzJts/S3dSizu/uWbtd5bJ/en/cMPgGLNq8qPk7Xvfmh8563d6qSzMMwzAdHzZOU0RuLrBhA+04IPX9Hu2Kl1U0R8UrEHQOkpeSW/F+hfS5ADNKk1fRIZkqm9Q8TwoyyibVcFLxAt805CYseXOJkoFJMRSDwM+ICrrPrhsmc61lz20qf92CKmsyGzsmZanqQ4+CAA5MbWjJ5NL6yeXIrJHK74nuem0qf51hGIZhLDisV0Cqc04bGoBI5Mt/q4bVyYbEWRSNKMKqbauUw3GpIYWlY0tJCi0V2XBmU6GhXp60iZdMJBXYscYCmKlqaTrfkHJOL6qnVaO+sV4rP89k3rIOMvMKmAn5tF/XVK61c6x+4zKdv05F5l02JUuV2yoxoWIC6ZqmUgFMhsSqvifUud44dSPGDBjj+jfd/PUg4bBehmGYjgd7TlNEJJJsF+NVrRdI/t1umOrsajt32g8cP4DZ62cLx+lmUMl4hqheBftYTCi0MuHMpkJD/TxpVMMUMOsN1KlsqnpOJ3ZvSTgtrFT52EInJBYwZ9xS5/WhmodQ/k65sVxrUViq9W8ZwxSgyb5KVIEJLzJ1DZmXNw/F+cXasmStsxT6p/cXegGpMmcyJNbt3ijjoM71pFWTUH5duWd187aIcmAYhmHOTNg4TSFr1/r3OV27tuVnuoaGXWGJN8Wx+PXFvsZbOBRGPNG6YbvIELYrQQeOH/AcrxcmFFqZcGYTShNl4yAtlOY6nxbRSBQVEyuQn51vzBsYRBsWmWPdjB9dA1MGUaVkVUOROgduec/tKdc6Mz0T04dOx4lTJxCrjfka60Hkr1OgGm1jBowx8t7IbL6IjHqZnOQgQ2Kp46DOdX1jfSDttxiGYRjGCRunKWbt2mSI79y5yeJHubnAY4+19JhamDQ0KB4YP0PKyxB2U4K8jFy/c5voh+enxE8fOh250VxjShNl48CaA6+cy/Lryj1D5VShKpsf1H0gNE5kzwm0rbeEEsquaijq5Ca3h1zrGcNmIKN7Bp57+7kWxqyfsW4if11lUyLVeYzUdbZoZJFSeyUvmTNd+MvamKn6sIocASO7AeEnwyqbUNTfQoZhGObsIa2tB3A2EokAy5YB69cn/+v1Y2y6EqJlvGWmZ7b4PCs9C0Uji0jncDarn1gxsZUxIGOYWtgVWgpWS5OVW1ciVhtDvCl5zfGXjEftrFpUT6vGivErUD2tGrWzajH/ivmYMmSKMS+ljELrNt9B5WFZyqal3HqxsGYhRr84GtlLslG5rVL7nL0jvbFx6kbsnrW7zQxTN1l0YingReuKmmWGAnVe/a5rFQ9yyiwVS/m3yzH13c/onoHiWHGrfruWweIlA15rBgWrgrDbe+qHZbQBaDXfQVRrps5h4cBCz7+JIikAd5nzW5Nl1ojKbZXIXpKN0S+O9kwpcBuHfa5FyK7RIsaNS9ZhePLJZLHAJ59M/nvcOCOnZxiGYTooXBBJQFsWVAiq36NXdVmZ3oiUIjmyHlSAVvwjiB6qMsSb4li6ZSkph7d6WrVW9WMVvAqUuEEtWiJT9CTVRYx0CjaptEgC3PvsymJCZilrRGZ6JhKJRCvD1H6MaB1xhu5TZL8kv0Qr/1alN6rXmP3k0MQ6q9tbVuedUamk7RxH5bZKTH9lumfvYjsmepZ6pbdYuKW5qMAFkRiGYToeHc5z+uSTTyI7Oxtdu3bFiBEjsGXLFtL3fvOb3yAUCmFcB9qW9fMgAEkFecKgCajZUyPlhXHzwIg8QyGEWhQDoeRpxRNxlI4txYrxK1A6tpQ0NpEXw8tDJvIAmcLyUIiUc/t8uc13EFheqhOnTqA4vxiZPcXeLqonkerhsXtwbqy8keyd1UG2YJOFbEsQvzkoyS+Rvr4JmaV4GacPne5pmAI0j5hdhmcOnylcK3pHeqM4Viz9nto9rdFIFLtm7moRBeHnmbd/98E/PoiLyi4iyaEJT61uCobqGqHaT9k5jvGXjEfFRFqIuG77rcZGf8MUSP69sVHrMgzDMEwHpUMZpy+99BLmzJmDBQsW4J133sE3vvENjB07Fp999pnv92pra3HfffchL6/j9VrzUojDoaTyUvZGmREDQFZBoypj/br3Iyu0okqYMqFzKuGEIqiho0GEHopwGoULYguQQAIl+SWYlzfP97vUcD23kGm7sdBWGweqfSdVlGyvOXgg7wHpsF/VEGO3MfltHORGc0nnoc6jaK3wqyDsd89uGxs5S3NQ31gvNNrc5F8mhFk3vNZ0CgYV1Y0Zt3HkZ+drr9Fe2NfjG+/y3iixM3eu9GUYhmGYM4AOZZw+/vjjmD59Om677TYMGjQIzzzzDLp164b/+q//8vxOPB7HTTfdhJKSEgwYMCCFozWHXSEuGlEEoHVepwkDQEZBk1XGTHgnZNp5mPbeyXgodHNKZQ1rL6Pwk6OfoDhWjGNfHCNdl1pYy83Do5pzp0u8KS5dIVpHyQbc50AU5eCFSh6fm3z4bRwEYTiJvMiHGg95ftftnnU2NkzlG4s2X/yQjTwxhezGjN84gsr1dW4crN28lfS9HTukLsMwDMOcIXSYar0nT57E22+/jfvvv7/5s7S0NBQUFOD111/3/N6DDz6Ivn374vbbb0dNjZlCDrqoVCi0Qm+nrpnq+neZdi9+OU3UlgAqFTV1+uHFm+LY9NEmz7/bCaKdB9VDUTq2FDOHz1T2mMrm01Ja2vz63V+Trq3j1Qmiv6oISnVeJ7pebb/3yEu+KVCNDJF8uM1tUNVvdSsIW/es089ZNqxVpv2WDEFV3jXVK5U6DtM9S13zYaM7gV3i7+bSHP4MwzDMGUaHMU4PHjyIeDyOfv36tfi8X79++PDDD12/8z//8z/45S9/if/93/8lX+fEiRM4ceJE87+PHDmiNF4vnIUgrCqFlAIQqgaAn0LrZYiKFDRVZUylH56KEeJEt52HTBgzpaiM233LtqIAaDJR11CHjG4ZONhwMLDWHEH0V/VDpQgMoNfqhrJx4JRvavEgipGhIh+AOcPJS36da4Wsp1ZnYyNV+cYUTBl2Jnul2qGOw1TPUs+Ng+/8CHjrntP/8I4yeOwxqcsxDMMwZwgdxjiV5ejRo5g6dSrKy8vRp08f8vcefvhhlJTIFzeh4FehsKoq+Xc/A1XFAPBTaCdUTEDvSO8WIXgy1TQpyhhVofVC1QhxQ8d7pxsaKVI4Vb1HVJm4achNWPLmEiNeHTdSmXMnG2JtosetjGFol+94UxyLX1+s7bXU8S4C+oaTSYPJec86GxupzDemoGvYmeyValE0MikXMuOgrtF+G26eGwedTwID1wLbxwFIwM1ALSzkfqcMwzBnKx3GOO3Tpw/C4TAOHGiZX3bgwAGcf/75rY7ftWsXamtrcd111zV/1tTUBADo1KkTtm/fjpycnFbfu//++zFnzpzmfx85cgT9+/fXHr9MhUJTfU8peYDO3DDZ8Fc/ZUy35YuMESLTzkNFodUJjaQonNFIVMl7RO7ReHEh8i7KMxau5ySo0FE3UhVibUF5j+585U706tKrVdEeU15LE2HTqoaTSYPJ7Z51NjZkjUyKHOq2QlINDVbdgPDaeAii7Y4d0fruu85OGQ+srDxtoLbEVBsZhmEYpmPSYYzTzp0747LLLsOmTZua28E0NTVh06ZNmDFjRqvjL774Ymzd2rLwwrx583D06FEsWbLE0+Ds0qULunTpYnz81MqD37ttOx54ZL+RPE+VkDeV8Fc3ZUw1BNGOzPiz0rNwx9A7XPNNnah4TVSNDKrC+fCYh0njcCp8MjIRTgsbCdez35v9XI9f+Tgmr5ocmHfWgrq58LfDfzNyPYocHmo8hILlBa6bLybCPU2FTcsaTjoGU8XECtz9h7tR11DX/LnbPetsbMiEtVLksC17KOtsQOh4bFXumbK+C9fZKeOBk50x7uOP0HAgk1x/gWEYhjmz6VDVeufMmYPy8nK8+OKL2LZtG374wx/i+PHjuO222wAAt9xyS3PBpK5du2Lw4MEt/nfuueeiZ8+eGDx4MDp37pzSsVMrD278827P6rJBtXtxolJB1I6pyq3U8c/Lm0dq56FbMVOl3QRV4bQr8H44FT5ZmfCrtitbIdhZEXnOhjm4b9R9yu04qFA3F8reKDPSvkbmPfKqLqtTCRZov61KvNaKym2VmL1hdgu57tOtDxZfubjVPetUiZWpkiySw7buoSyzAeH2vqr0SlW5Z+r6PiprlHg97tMPq144H+vXA8uWsWHKMAzDdCDPKQBMnjwZdXV1mD9/Pj799FN885vfxLp165qLJO3Zswdpae3T3s7NTRY/EhLdCcDbwyjjhdFVVFWNW1OVW6njHzNgTLMiZrJiphuyHgrqHGZ0y1D2HqUyn9A63strsmjzIlRMrECf7n2MeGfdkPGWqRbAsiPzHvl5E2W8lk6vtKXopyJs2o7JPPdDDYcwedVkAEBG94wW8qEjw37fpeYb6+b0moAqZzvqdyB7Sba2d1f1nqnr++a9mwNfjxmGYZgzj1AikdCvNHMGc+TIEfTq1QuHDx9Genq68nkaG4Fu3fyOOP0Yfto1WTACXyqcu2ftVmoNE2+KI3tJNkmJd6N6WrVS7tTKrStxY+WNwuNWjF+BKUOmeP493hRHv0X9PHsmes2Pm7EVdP6VF7HaGEa/OFp4XPW0atQ31mNixUQAcFXkRN5H1bwxN0PC65qWTHkpp34ya2rM1rgnVEwQHgeoy7F9jCrvkep1vTYLpgyegkWbFwFQkw8VZOQ3PztfKB8AEA6FW/RpdhYGk5UH6zv7juxrrkydmZ4p9d7K3mcQiOQshBCikSjqG+vJ76sfqvcsu77rrsc6mPr9ZhiGYVJHh/KcdmQikWShB/eiSKcVjYFrmw3T5KfeHkbddi9+2L0wKsqiqRDEqu1VnoYpkJwfU+1qLEznnMnmhOp4QFORT2iyn6nOXI+/ZDyKRhSh7M0y3+MA/bYhqu+RynVFXun7Rt2Hle+tNFLUivJuB5HnbjdMrXuzR4nIyLCfDMlsKMmG1JravLIjymu3/m3Ku6uaxyy7vptqTcMwDMOcHbBxmkLWrvVpJzNwbbJAhAs6yrVXyJvVQsYv3Kpqe5WS8WCicqtlOPnRO9IbhQMLXf/mZaj5KZYmiji5jYMa2hZviiMaieKRMY8oe4BkUDE0TRXmMTHXhRcXkoxTE3mYXu+RyetSNgt+895vsGvmLmzeu1lL0aduDMgWAlNZq1TDZk2+r6kOqfXCL0RZVPBNtlWW6iYiJaw+Goki3hRvzoVVrWDMMAzDnH20zwTNM5i1a4GGBuCee4DL8w4Bw5YlQ3k9DFNAX7l2K8Zy4L4DWD1ptWfxGgDKxUF0CpxYUCukyhRtciviYxWeMlXEyQ2vQkqZPTNRnF+ME6dO4ME/PoiLyi7C6BdH4+Y1N2P2+tn4yaafoL6xPjAPg4qhacIrTp3rk6dO+hZpspTkoApgObHeo41TNyIaiXoep3pdmVw+Z+EbmYJWskVwZAqBqa5VskXYTL+vFFnqHemN4lhx4AWTvIpn5UZzSd+nvteq7w+lCFV9Yz0Klhe4Fvaz09gIzJgBjB2b/G9jI2noDMMwzBkM55wKCDJnhZJjJJO/pzoGZ+GVmj01mLRqEuob612/Qx2XTq6RqbxV+1j8ciuL84tJbWh0cs7sc72jfgfK3y7H3qPexkiQuYSAWs6ZCZmlXjejW0arNiROD5X1XAEzeZjUkE3T1wXUZV4mPFonZzgVee7U9zmIHFG/Z5pAojnaxI1U5Fqn+p4Bfzl2kzsnfufxiiIy2eeUc04ZhmE6Huw5bUNMeBhNjMHywtQ31iNnaQ4Klhd4GqYA3cuh0z7DZOsMipdlyZtLSNfTCbG25rpLpy5JD4yPYWofm+UBkm33IkLFc2JCZqlz6Gyv4+ahUmnv44WfZ92JyetaqMi8rBdUtTUM4N2GyHkMtbWLG9Q50Akv93qP/J5pSX6JMP+d6vmVkTM7QUQK6MgxJZLAy4vtmd6C5OenW5kzDMMwZyGcc9rG6LYBESHrCZLxdlAURNVcIxN5q0Dy/pduWSpUyP2McTu6IdZ+hrIbltL7UM1DKH+n3Gium2w+oYWuzOqEfrrlJpoouKKSv2i60IuszKsUtDKVM+yHl3w4q/TakW2Fs6Oe1jjaKWsiL7PXM614v4J0vSBzrWXz16lyqSPHVj4pdTMzPzsfjY3ehqlFVVUyxJf7njIMw5x9cFivgFSFBQVRAZIa7kdp/+BGkG0VAP3QSUrYmZ1oJIrPGz8PNMSaGppHwVTYr2r4tVtIOKVQj27oJ2BW9ky3x9FBRuY3fbQJBcsLhOe0z1UqW6Y45ePg8YOYtGoSAL1QaMpGmtszk22bZMfEvJmSM9H7arriuAiZcPRJl07CxFs/xdrlmcLj77kHWLZMb2wc1sswDNPxYM9pG9DYCMydC+zYAeTmAo89BkQiZqsZ+u3QT6iYgKIRRSi8uBB5F+aRig/ZkfVyqKLjoVPxBM8aMQvFsWIpL6Isum1N7KhWOXWi6jmxe8Urt1UiZ2kOSSFWbc1iZ9+RfYjVxoxs5phsj6MLVeYrt1Vi+ivTSee0y5yMd1Z3w8wtamJVml6UCDXywNlmSsXLbMdEJAdVzmK1MYTTwp7z7ve+BlFxXIR0pePN5QDExukOmnOcYRiGOcNgz6kA0zuvqSgCIeMJzUrPwsRLJpJacgDBF+lxQ1ZJlvUE2z0Wbu1zTDaMN+k5tbNx6kZfhTZIVD1Sbh4eZxEkLyjFkqiYLr5lApWWR144vXkU7yyAwLxvOkYv9f0pyS/B/CvmS3/Pz/OpG8lBlbNoJNoiTJY67zKeWQDGInUoRdKse0ogAfx+KfDWDOF52XPKMAxzdsLGqQCTP25+RSAAcwaqjAEk67lSMdSCalrvhez9Ay0VyyDHKxPSKvNsVBVaXXRDFd1Cg3OW5kiH/OpsmgQR6hqUDMlsvPjNvV9oKADl8FcdKHOmupFgagNCpwK56sYUdd5lDHfT+etSlY5PdgZ+/o/mI7xoaNDPOWXjlGEYpuPBxqkAUz9ujY1At27i40z8IFMVMYsQQkgLpXkWKwGSxk/FxArPKp1eiPKfglDiZe7fpFeUipci56R/en/cMfQOUosbJ6nycKeyvYUI1TYopls6BZnzJ7vx4vf83eYEQJvk31LnTFXeTMppW+RaU+Zddt13nh/QWy+8DHfXNWxlJbB9XPPVnZjaqGXjlGEYpuPBrWRSxNy5aseptA+RrYaaQKLZMHVrDxJCCOXXlWPMgDFKFVC92lz8+LUfK7VUEEG9/9KxpeTWNiYRtaywt915IO8B3/YRXni1cDBNENVfveYno1uG7/e82nmIWneYbOkk29pFFuo8RiNRoaHh1hpGp9WMLNbaNnvdbEyomECaM9V2KpTvZXTLaM5l9ntn3NpvUdYwnTY7lHnXqSRuYr3wah2WG81tffCU8cDAta7nMZniwjAMw3Q82HMqwNTO69ixwIYN4uOuvBJYvz75/6t6YFR36ItGFmHVB6uM5FuqVgA2sYNv2hMWFLJtfgA5T6IF1Wup4sUOsvqrczz7juzDzWtuFn7PHpopkw+rE7JpjTdoryN1vjdO3YgxA8ZInz9V+bfUStp+VXcBudxPmfeIss6azLV2huV74TfvJqpgA8lNu37d+xmLZPGV2ZOdgdcWA/Vfw7hRQ7Di2Uyj7WPYc8owDNPxYONUgKkftxkzgCefFB9nFYHQaXsAqBk01dOqm6v36obZ6hT+MaHEyyiwqqHFqcylDUqh9Tu/qY0Q1ZBwJ7KGsIqxmIpiPW6GOvW6QW+8pKLVjEolbbeiTiobCTJGMeC9zprOtY43xaXbArkhyv2UxUQ4eltuFrJxyjAM0/Fg41RAW+Scdu6irvjYlZ4d9TtQ/nY59h6V907oopP/ZKHba5GiwKoaZanuJQgEr9AGvRGiOz+ySm4qe3oC8v0e7e/pc28/h31H9zUf4zdXulVj/QjakFCNqHDbYJEx6N1yRPcd2Yei9UU42HDQ9Zp+92patkzOu1TupwCTvZSDklk/2DhlGIbpeHCf0xQRiSRzaUTVeiMRIFar1nfRTSnJ7JmJkvwSfP6Pz1H2Rlmrc5ns4WlHJ//JQrcnqKh/p2pPQNO9BKlKtrNvZLwprt17Ubf/I+Ddm9OJbq9Fv/6obnIcRD6sH9L9HhXnSrX/L0XOZOdYFtmeyhZuc+vWR9WJ3yZSZnqmp2EK+Pe3NS1bJufda90DgPJ3yqXCfk32UlbtWc0wDMOcXbDnVEBb9DlVyfuieL+A1r0LdavVeim8JvKfgsyVVA3LM51XqOuB1fVImK5iGquNYdKqSZ7hxqZCtilynGrPqXS/RwGyoaF+ci8rZ7r5t16oVBJXlRfRmjhr5CzXDTsnbl7boGQrqHm3n181f91EK6VUtxVjzynDMEzHgz2nKWbt2mSI79y5wI4dQG4u8NhjLdvHUD0w1nFU79fuWbt9PYmyiBReL0+ACIrHjzoGL2SqktoVMtXveY1d1wOr45GIN8Wx6aNNvue3oHiAwmlhhNPCvnmwfvNDVVxFHnELq0Krn7GYmZ6JeFMcK7eu1H4fRN4v698yXis/WaJ4DgE1OaPOsSwyERU6nlrKmvjrd39NOpfbmCmyRV3D7JiYd7/3iBrl4AbVCyxak01sBDEMwzBnLmyctgGRSLLokReyio+swWRCOaAqvG6KUP/0/rhh8A1YtHlR8/js9wbQFFId4041LM9UOJ+JcFoLFYWWWhjGgmpUyMyPMz9aJu+SYphRjMXGLxpb5O3q5sX6bRao9qzVCTvWkTOq8SuDaG2zoxryGW+KY+mWpcI1sa6hDhndMnCw4aC0gRlk+LPOvFM265zrxYHjBzB7/WzhuSlrgOyaLNqoZRiGYc4+uM9pO0S272LVhz6JrDZM5daJFF7gy355Xr3vHv3Oo569PikeQ5kxuCHrndb9nhPqhsLSLUtJPW7delZ64dWL0w2vvpFeyOZdWv0hF8QWtDBMATO9Qb16pkYjUQDAocZDgVyT3O+RgE7+dir7llKg9PosGlmE6mnV2DlzJ6KRqFSPZ6unLcXYAoCbhtzkOhaKgenXrzioAj9+yPTYta8XM4fPVOod60RmTY43xfGvV9ahW7cEnnwy2WbtySeTRQPHjZO8cYZhGOaMgnNOBbRlzgq12uyEigmk85nKrTOdp6gSwqbb71G1Oqapqpoq1YxNtnWgGqaAXCXNVOddUrHLWd/ufTFt7bRWxrD9mpk9M/HCuBfw2fHPUtPv0WMcuveuWkG4LVoj2dc2lXB91RY19Y312v1tU5lH6TUGnVx4E9V0qfJdkl+Ch2cMxz/eG9t8FSf2Ggw6cM4pwzBMx4PDetsxonBNa6eagoz3S4TJSpVuIWwUZY86hkmrJqH8uvJWipVqWJ6pcD4Vb5hutVtArlqqSlhlkHmXOj147XIWq415GqbWNfce3Ws03BeQC2k1VUVbp4JwkK2R/NY2lXB9P6+dG/aQ3XBaWCvPUzf82YRxq5sLb6KaLnVNXrDhIeC9f5z+l7u3tqoqGfLLIb4MwzBnH2yctnP8FB8ZQ4Oi5FKVJFOhrW5QPSbUc9c31htvyWFCkZMxVCxMtHWgKpDz8uahOL9Y6RpB5F1WfViFqWumGjGeVMLbTWwM+BnuTky12KDkr0cjURTHiqVzt3WNKq+NKZUcWZm10M3wDyK/loKpfskmNgx1izGR1/vXFsPLKLUzd65/bQaGYRjmzITDegW057Agashe0cgilI4t9T1GRkky2TDeOQZROxxrLDKtaky25PD73qisUdi8d7NUUaJUtHWwk8r2Km7zWvF+hXQ4sxcqYceAfHit/XomQoy93rXpQ6cjN5prPDTUL2QzgQR6R3q3yr21H+N2z6aMKieq8ikTJm+yNYsK1ntRtb3Kt/e0jFxT521e3jyMGTAmkNBj8pq8/FVg11XC8115JbB+vd6Y2vPvN8MwDOMOF0TqwFB3qgsHFvr+XaaQBiBfsEmE1dJk+ivTyQWO7GMQISr6IlNMyOt79Y31yFma01zgZ/SLo5G9JNu3sI5XQRUKVrXbWG1MqmCM5UnTLX5CwW1eVbzp4ZD786AUvnJDNAdeyBYP8no+bgWTamfVYv4V86VlkIJf4Z6S/BJPwxRwv2fZ9UIGVQ8gVa5Kx5Zi96zdbWaYWgWbRr842rPHqopcU2V6Yc1C0tqkAuV3AQAQ3Uk6X65a/TCGYRimg8PGaTtDxuAwYWioVr01VanSUtYKlheQ+2M6x2BVXhVhqlqxEx1l3WmoiDzcFs5qt1SFU2ZjQcX4FSFjGFrHxBPe11WpNkupGOsHRY7sRojb81HdEKHifHaFAwu1Kghb96xbJVuEasoAdS2cOXxmYLItQqZKtqxcy8q0iY0EN0QbIQCA7/wIQOL0/9xIfv7YY0aHxjAMw3QQOOe0HSEbKmeiOI9OIQ3dHCWV6ppOw2D8JePRq0uvFsVrvNBpyeGFiX6l9ny3eFMci19fHGiO4IlTJ1CcX4zyt8ux96h7zmxQYZuyeZcTBk3w9DDZkd148MqLpSCSI53+uyaQeXayxiB1vYjVxhBOC0uvC7I9ni1k1sKgZNsP2YJNFjJyLSPTJvLX/cbh9rsAAOXvlCef7cC1wPZxSBqidmM6OT+FhVwMiWEY5myFc04FBJ2zYiL/iNJyxguZVhNThkwRHkdFpqWJHbdcyKByYCm5qEHkcKYqRzCzZybuvOzOVnmOMrm/qlDzLmv21ASaI+tsMXPr2lux76i6HOm29NBF9tnJvjvU9cJqF2QhY/zptDWhtKgJWrbdUM1zVmnHZeW+b/poExbWLDRyDVO0eLYrV582UFsap4WFISNtZADOOWUYhumIsHEqIMgfNzdFyg2qUhxkv1DTCozpno8m+vQ5z0fxrgRl3Hsp2dRqt/bnpVJoypRx5SeXFJkNauPBC105knmfdFrjuKH67GTuWaeYlPNcfuhsuHnJVVtuHMj2NaaOxW+dOnHqhPG1yUTbmxZjPtkZeG0xuhwejPzLsrDml18z6jFl45RhGKbjwWG9bYRMSKuoRx2g3gpBNYzOC6ryIhOuRglRNtHexUImLDOotjpeoXEV71eQvk/NEXSG9un2S7QjMvApMqsauq6qROvIkVXYi4LJ1jgWqs9O5p5VWiBZ15YJI9VJGfCSK5OyLUO8KY4Dxw+Qj6emZIjWqeL8YtL1qGuTqXDoVs92utnq1AzDMEzHho3TNiAV+UdUTOStWgSR62adg2Jg6ubAAvLGnCnj3suYcirJQeUIWgq5iX6JgNm8S1mDUVeJVpEjahSERdmbZa0+081J1Xl21HuWyRl2Imv8me49akq2ZZCVC4C+ESJap8rfLkdWzyxhmDpl49F0HnVb9ZVlGIZh2j9snLYBMg3j7QRR0Acw43WUVV4oHphoJIqKiRVK7V1EeBmDssacCeNexpiSNYZlFXITnmATRaKcUI0nFSWaujHghWxhr3Ao7FqBWLdIjcyz07lnr/XCmWfqRVBVs0UEFeXgBVUurHWjaEQRCi8uJG2oUdapvUf3oiS/BMWxYq2NR5332UQYMMMwDHN2wcZpGyCrnMmG1qqg43VUUV4oRl35deUYM2CM8NqyYxblaVGwP0Md417WmJI1hmUVchOeYN3wSa9nKjKeVORQ18sqEwVhPS9qaxxZz5L17EQbX7/76++0Q4rd1ot4U7zNqmZTMJ3C4IeMXKikHlB/Q3Kjudobj6rvc1tURWYYhmE6PmyctgEyypnsDrfOLrVqqFUqct3cUFF+gsrTUjHuVT0SJnMEnQq5CU+wTvikjkIrK4cmQhVloiCCbI0DJJ9d6ZWl+P6q7/set/j1xa0+UwnPdK4X8aZ4IMafKe+byRQGEVS5SO+SjsVXLpY21mQ2nfKz87XSHVTe57Zup8QwDMN0XNg4bQNkiooEaaiZIhW5bk5UwzeDzNOSNe51PIyFAwvRq0svxGpjAID87HzX8GcVhVx300A1fFJXoZWRQ1Ohx9Rrzsubh+L8YtTsqSEZp6rexT7d+yh9z0TfyyCMPxOebfvaUjiw0FjhND+ocnHkxBFMXjUZ4bSw1LVVNp3cWnBR1l3Z9zmIsH6GYRjm7IGN0zZApMTJ5h+19S51qnLdLFSVn1TmaVFQNerdFPYX/vIClly1xNXQVzE2dcK8VcInTSi0MnJoqnIr9ZpjBowxWkDLC518ThPVatuqarbX970M29pZtYHmQspuLsgaa7obAUHmubdVVWSGYRjmzICN0zbClBLXHnapqcrLweMHW/UZVPHuqio/qczToqDiYfRT2CdUTEDvSG8cajzUYszW/MoamzrFpWQVZxMKrYwSLduSx4u8C/NazbnXNQFz3kWvTR4T+Zy6BYvaomq2E5MbdiphxTLRMarGmupvSNB57m1RFZlhGIY5c2DjtA2hhmb60R52qSnKyw2Db8CkVZOMKIuqyk8q87QoCq2sR0KksANoZSQ559e0DPh5YGQUZxMKrYwSbapya9X2Kk/DFEg+F9Nh035zXjiwUKkPqR2/CAcquq1CdNY1kxt2qmHFdlmkomKsyW4EpCLPPdVVkRmGYZgzCzZO2wi/0MwgqjZaeXZBhbL5KS+Lr1yMORvmGPPuqio/1PY18aY44k1xZQWbqtDKeiRUWhAF6T2neGCo4ZOmFFqqEm0ivNZS9P3oHemNwoGFruMMKtdatQ+pdc91x+uMRDjooLNZYWrDTtf7asniD373A9Q11AnvRdVYk1mnUpHnnsqqyAzDMMyZR1pbD+Bs5NdvVWLC9SHsfeR3wMpK4B9dAXyp9FRuqySfi6rQ7Kjfgewl2Rj94mjcWHkjRr84GtlLsqWuJWL8JeNRO6sW1dOqsWL8ClRPq8buWbuR0T2DrBBRsJQfy4BzEkII/dP7t1J+LGPQOsaN+sZ6FCwvUJ4bS6F13q/Xs7UU2Mz0zBafZ6VntVJ+VcPgZOeXAsWLW7SuCEBSiZ0yZEqzIhtviiNWG8PKrSsRq40h3hRXfqZueMmh28aAdW7ntQBxeC1ls+BQ4yHPebeMCvvcAHCdH+tzypxbRX+cMtU/vT/mjpqL0On/c7vnGwbfgMmrJpPlNyh0NitMeOGpc209Gy/GXzIee2fvRZ9u3oWqZGRbF9nNTEsOH/zjg7io7CIULC/AwpqFWFizELdW3Yqq7VWtvmvi3WIYhmHOXthzmmKGDYvjz3++HrB+tD/7BvBIA3DBFiTuHCnt5aLsUkcjURTHilOSf+W2i286B0knb8/Ls+ZEdW5UQ+YoXjTdMDiTOV5B9D40WenVLodeMqsbXhtEbp3f/EQjUfKc+8nUyKyRrtd4/MrHMXvD7DbNX7fQ8b6Z8MJT5TtWG0M4Lez73nbu1BnPXvtsc4hvkEXWRMhuZqqukSYLYzEMwzBnF6FEIqGWmHSWcOTIEfTq1QuHDx9Genq61rmGDwfeesuabvuO8unPLtgC3DkSAFA9rZpc+t/y1iXP1LryL6Vgy+5ZuwPLv4rVxjD6xdG+5wbc71l2PP3T+5OUH8srMGnVJNQ31rseIzM3QHD3aRFviiN7SbZyPqHMdUWbECu3rsSNlTcKz7Ni/ApMGTIFgHeYpKWgr5q0CgCUn6kbFJlVDXc3/bxF8zNr5CxSGxr7nHvhvOdRWaPw1J+fwuz1s4XnV5VfWfzWNQCeG0ei94TyXlPlOxqJtlg//NZDnfXKFJS5se6JssaI5jLIVBIKJn+/GYZhmNTAntMUcewY8NZb1r+coYshAAngk+HJEN+u/yC1D7ErQl671HcMvQMLYgs8x5WK/CsZL4iMMqNTFTScFkY4LexpmALyxaSCrlLp5zH2QzbHi2LQBdX7cPes3dqVXu33QZFZ1dxiU7l11kbJ9Fem+87Pr9/9NWlclGdjv+fKbZXIWZpDzmc26YH3e99VvW8mKiJT5du5foi8iSqybdLAo7QxA0BeW0RrpG5hLIZhGObsg43TFDF1qvX/uefUNX++ZgUwZTy5fYhdEXJTfEy0zNCtfklVFqu2V0l7ZnWUH9PFpFJRpdJLYbe847ohsVRZC7r3oa5Cm4oWSyaMILeNADcSSKCuoQ4Z3TJwsOGgsUIzXs/bD1NVVimbIKoGndd7kpmeielDp+PEqROI1cY8zyXTCsaOSLZk1yvVaBU/dDYzveC2MAzDMIwpuCBSiti1i3jg5wNaFMeQKczhVmAl1flXboVcAHHhHwBShYRMYLqYlMmiPn64Ffw5cN8BrJ60mlRYyQtZWZMpelL1YevCKW6YUnJljGEdZApaOfEqnuXHTUNuAiBfaMatyJLf83bDZOEemcJhXoWjRDjfk5L8EiQSCSyILRAWhaMUT/PClGzJFleTwatoWG40V+l83BaGYRiGMQXnnAowlbNy/fXA2rWEAweuweo1iWalVjevrb3lX7l5IAH4Ft+Qzf2kopN/5ZX3Rs2TCyoXS+e8KrJGyaOr3FaJCRUTSGMwlc+okhOrg+y8W7In2xqoelo16hvrpXIXvbxv04dOJ3vJRHmeMojuPYj3nZLvTJ075zrnxYrxKzDp0klK7yNljjJ7ZuKFcS/gs+OfGVtDqGuAfRxBrM2m4JxThmGYjgeH9aaI5cuBnj39jkgqTb9anmihJOnmMba3/Cu3sLZYbYzs5cq7MK9d5F95he9R8uSCCNWz35PXJoVo3lRkTRR2SekFamGynYZKTqyOXMmGa8r2rLWH7IbTwuRQV78wbZnwTZNVVk31IaWiE+LtJt/xpjgKlhcIr+tW8Zb6nlPmaO/RvS3GYWINkQln5rYwDMMwTBCwcZoievQAhg2zF0Wyk1QCLr+8CTcNa6lYmAjL1S3rH1T+lQXVKKr6sApT10xtN/lXXkq0n8GmU1hKFaoxrCprfoaZjBFGUXKpRqRMTqxofoLwcsuEL7sZARRjmBKmTaF0bClmDp9pzAAJunCYE11j2DnX8aZ4IO277HL2Qd0HcjcpODcVmaJr3BaGYRiGCQIO6xVgOiwo2U6m9efDhgFbtrT+3ERYrv1czvYRm/duJindXuGqVPzCNWVDyeyYCjd0M0Aq3q8wGhqaynBG636qPqxC2ZtlrtcCWs6bSVmzoIbXFo0sQunYUt9jZD3OlBBrAL7hnveNug8r31tp3MstI/Oq7UZ03isguJBN6rjm5c3DmAFjtDcDggjxNt2+i1oYS4SpZ+YXCp4bzW2TtjAqcFgvwzBMx4MLIqWYLVuAo0eBceOAIUOS/z161N0wBfwLc1j/vmPoHah4v6JVESK3c1mFReob65GzNEdY5MfCq/BLNBIl3befF0RUSAgAwiF3JchSDGe9OgubPtrkWoyJQlDFpOykqkhP5bbK5gJOboapdS3gywJHAE3WZEP4qHNTOLDQ9+8qxWFExYoKBxb6ehYTSOCxzY8FUpCGIvO9I72xcepG7J61W8kQVvHOOv8dRMgm5d4BYGHNQuG6RCGIKtp+slWSX+JpmAKt33OVwljUc1NwK5blVjCpdlYt5l8xX7owlR+NjcCMGcDYscn/NjZqn5JhGIbp4LDnVEB72Xl128nuHekNAC0UIYpXR7U4CNDaw0jNvxIVuhF5ImQx4d0y7UlMRZEeldYgzmcjKnIkE+ZqYg51Pc5e49X12Ot6qKjFs/zuwQ/q/ZXkl6D8nXJygSUTyERi6EZHBBERYD+3TsTFpEsnKRXGopybsoYEmf8uorAwjt/+Ng3O9mqFhcTigQTay+83wzAMQ4eNUwFt+ePmF4a7o36Haz6kSJEzHVpqUvHzMoomDJqAsjfKhGNxXhfQD/eVMSBE6FZeFqFaAdZNkfUyhlSUWd05DGreqJsFJq/phFrtWMWAkHk3AQRSPdoPmVBW3c0AkTFcNDKZF5/KirfV06oBQCv02u/cIrnU2aTUZfiYfXjr/17QfMUvSQAIGTNQ2ThlGIbpeLBxKqCtftz8FNLCgYXKBmYQir5JA87NKKrZU6OkwAWZf+U0ICierSA9OIB6jqHoWTfnr26vct0koDxnyhx6EZTHWTcnU+WabvjJjq4BYfLdNIHXhtumjzZhYc1C4fd1NgPcZDAcCiOe+DIFINURF1Qv67y8eRiUMQh9u/fFrWtvxb6jemtIW7TzsVj5TiVuvOz65iu1JmmgNjQAkYjetdg4ZRiG6Xhwtd52iKiia3F+sXL1ySAqZepWA7bjVoVUp1qwTEsKLyNB1C6F6tky0dbHD9nqpvaKtV5QvFuUqsyiOfQjiJxBQF2udK7phl/7H9UWKBYm303n2GSfpd97MihjEOm6OhV87TJoFQqzG6ZA8BVvne85VX7GDBjTLCNLrtZfQ6j577HaGMJpYWMe9XhTHHfcexjuRqlF8m9z5wLLlilfimEYhumgsHHazqAopEveXEI6l5siF5Sir2N8AP7Krkx7AzcoCq3IwPQyIGRbwwRlLAByz4yiyMrkr1I2AmR7gVrItIWhYJe16UOnY0FsgbRc6VyT+m6Y6geq+2460Qnt9ttwo6C7GRBOCyPvwjxMXTPV9e8io5/6HKnvuYps664h8aY4Nn20yfcYi0mrJrXoX63rWa7ZU4OGT2nPcMcOpUswDMMwHRw2TtuAxsbkrvCOHUBuLvDYY8nwpXhTHEu3LBUqpHZlwQ83Rc60om9H1figKLteChkFkUKr2ntU1bNl2liwkPEEihRZv3vzw1RvSjsmPc7UwmL90/vjhsE3YNHmRQDcQ2J1rklR8k1GOai+m05U3pWTp07irt/d5fuelL9djqyeWcJwVZV1yYmq0S/7HCnvuapsq64hsi1rnL81up7l/Uf3A9FDwC7xsbm50qdnGIZhzgA6XCuZJ598EtnZ2ejatStGjBiBLV49WACUl5cjLy8P5513Hs477zwUFBT4Hp8Kxo0DunUDnnwS2LAh+d9u3ZIFIrKXZGP2+tmk80QjUc9WDCGE0D+9v6siF0S7EB1kWoQ42xtsnLoRWT29W1L4zYOFyMAEWrZbsaPTGsatdY0ufs/WomhkEaqnVWPnzJ2IRqKerXdE9+aFiTBXN0RtYSiKspes1TfWo76xHiX5Jc1tM3bP2o1Hv/NoYNektKMJKspBFZV3pXJbJbJKs3Cw4aDneRNIYO/RvZh+2XQAwa9LMka/1WZl9vrZmFAxQfo5Ut5zVdmWXUNMtKyxP+eTp062akEj4is9vwJ850dI5pV6bXwl//bYY8rDZBiGYTowHaog0ksvvYRbbrkFzzzzDEaMGIGysjK8/PLL2L59O/r27dvq+Jtuugn/8i//glGjRqFr1674xS9+gTVr1uD9999HZmamyxVaY7KgwrhxQFWV219OP4KBa4EptN3okvwSFMeKT39bvtCJToEaU5goypHKSrBWcSbLU7HvyD7cvOZm4XetlhGpqoYqerYUD5BsJdsgC6jYUQmPtb6nKmtBXRMAMrplYO/svejcqbPn94MqoCWLbDE12bZGK8avQJdOXQJfl3Ra7Lhh6jmoyhn13KZb1mR0y0BdQ13zv2UqSO999glg+7jTnzqr9QLf+14Tqqr0750LIjEMw3Q8OpRxOmLECAwbNgzLTldJaGpqQv/+/TFz5kz85Cc/EX4/Ho/jvPPOw7Jly3DLLbeQrmnqx62xMekh9eb0Y/hpV6DzSc+j7IpQ1fYqLUUuSGWIgqnKwamoBFs0ogirtq1qcY0+3fr4eoQs3JTcoHsJ+rWCoVR/lalk21aVXy0ocmy6SrXJa/bp1gfPXvus69y1p2q7MlWTVfp3WnMf9LpEMfqjkSjqG+ulwtrdNrBSvaZ6IfM+W/cui2wF6cTK1acN1JbG6bBvf4Itm2ibxyLYOGUYhul4dJic05MnT+Ltt9/G/fff3/xZWloaCgoK8Prrr5PO0dDQgC+++ALRaDSoYXoyd67oiNM/0K8tBq6Z6XFEy/A23dxF3Tw0XSXSVE5dKirBlr1Z1uozkWFqKbnFsWLpfFZd3J6tTI6syfzVIKHmAZrM3zR9zYMNBz1lIcgCWoDcOywTZiwTFu7MJzWVH+uFKM/T+rdsvnXVh1WYumZqSjehqFBlcV7ePORn56NgeYH0NaQrSKffi70Hb0j+5tV/Dd3P/xTlT6RjytC2nSuGYRimbekwxunBgwcRj8fRr1+/Fp/369cPH374Iekc//7v/44LLrgABQXeP7wnTpzAiRMnmv995MgRtQE7IFcerP+a55/cFNKgFTk7dkV2R/0OPPf2c9h3dF+L8ckoYlRl98DxA1i5daWv8hxUJVigdS9EN2SVXFNVQWWQLQQjUuCLRhSh8OLCNvMOyRTnMZW/GcQ1LbxkIagCWrIFfmSKqVW8XyE1llTmuQP+Rv8dQ+/AgtgC6XO6bWAFvQlFRaZljU6LJb8K0s41bdfMXdi8dzP237AfX+nZFXkXTm0XXmaGYRimbelwBZFUeeSRR/Cb3/wGa9asQdeuXT2Pe/jhh9GrV6/m//Xv39/I9cmVB6M7XT8uHVuK3bN2t5mCU7mtEtlLsjH6xdG4sfJGLIgtaGGYArQiL3YsJcireA+QNAxnr5+NGytvxOgXRyN7STb5/BQoBaJEhimQDM20k5WehZL8khYVYJ14FUxyzrWp+5b1HvoValk9aTVKryo1VsxJBqsVxvRXppOL84hkTVQ8K4hrOr/vVTwLMF9AS6VQk0wxNaoxlNEto80MN2eBNasQVm5UvkxsOOT+PERF1VSwijTJFCKSkX9KYTURzrXGbU3LWZqD+sZ6o0XhGIZhmI5PhzFO+/Tpg3A4jAMHDrT4/MCBAzj//PN9v7to0SI88sgj2LBhA77+9a/7Hnv//ffj8OHDzf/7+OOPtccOgFB58HT1wu/8qMWnltIwc/hMhNPCSoqJLtQqj7KKGEUJchqGsgYwBT8jrGhkEekcpWNLlZVcuyKnU91VhIr30EuBb+tNkoLlBb55cU5jT6dKtYlrUgmiFY8TnQrV1MqyFMPcKgalI0u666Gb0a/SL9hvA0u08SCD6saVrPx7PeeMbhmkcdrnMMg1jWEYhjnz6DDGaefOnXHZZZdh06Yvm4c3NTVh06ZN+Na3vuX5vUcffRQ/+9nPsG7dOlx++eXC63Tp0gXp6ekt/meCSAQoLPT6q61ar60YklNpCMqj5odsv0tZRcxLCUqlJ8Iah5sRVjjQ86G1IDM9U1nJtY6jGA2zXp2FTR9tUlLGVb2HQbS9sUM1MFRaYdiNPZWWHaauqaLUB4VOCySAtmEhMoZCCOGZa59B506dlQ3MoNZDGY+3zAaW7saDrpEnK/9uz3nv7L1Sa4jORgjDMAxzdtJhck4BYM6cOZg2bRouv/xyDB8+HGVlZTh+/Dhuu+02AMAtt9yCzMxMPPzwwwCAX/ziF5g/fz5WrFiB7OxsfPrppwCAHj16oEePHikf/9q1Xu1kQhj27X3Yf+292GtLcbXnmMrkuplEtd+lSBHzzD86uh8Hjh/w7ffql9ekg1veqkyenRPZ71KMhr1H97YoViKT5ysqBAOkPvePmvcou0li4TT2ZPI3TV7z2txrkVma6VlEy0+OTGOiOBQlx5tSzEk27xVIPpeHah5yzQs1sR5SCibZ861r9tSg7I0y4Xl1Nh5kipmJ8tfta60of9ntOcusIbJ57gzDMAzToYzTyZMno66uDvPnz8enn36Kb37zm1i3bl1zkaQ9e/YgLe1LZ/DTTz+NkydPYuLEiS3Os2DBAhQXF6dy6M2sXZtsKzN3brJIUm5uMuQ3EslEvKnWVWnWUUx0Ud3t91PE/BTSKUOmYOXWlcbHplpgSMegk/2uylzLKuM61V9NF2mS2XCR3STxM/aoxbNMXrNzp8549tpnfdvCqG4MyD4XU8WhKPhtBqhsuFVuq8SsV2dh71H352JqPZR5T3Q2sKioGnmitVYFmbkRrmknOwOvLca96wfi34Zav4VKw2IYhmHOEDpUn9O2oD30STPdo5FKvCmOpVuW+noxnYga0lP6bEYjUaP3q+KdoZyD2kuV+l2ZXoR2RHPuhqxBY2IOndf364NpvycAKI4VY2HNQtK5TfUApfb1lLmmjhxRz0fxPIr6fAYtTzLP3zqP19rhhYmetdT7CrofrUyPWcvopPY0VkW71+/KSpc+p8n0l7VrlYfVgvbw+80wDMPIwcapgPbw46aimOjipvSKECk9VIV058ydyFmaY0R5Nqmg2ZWxvt37AgA+O/4ZWRkXKXIio0GE6c0JiyCUXKohXpJfgvJ3yqXkUMfYsyOzWSBzTVMeaJ3nYtKYUjGQZTfcRGuHG5T10OSmi8rGA1UWgpgvqyBV506dCXenhuea1myYAk7jFDBnoLaH32+GYRhGDjZOBbSHH7dUe05lPRQWIkWMeh/z8ubhnPA5KI4VA1BXnlW8MxRMexGd53YzGijMGDYDEwZNMNp3NKg5lPFKUolGoqiYWGGsaBNls4ByzSB61pp4Lia8uKoGsuyGm0pUgWg9DGLTReZZy6wjst5u6nxldMvAM9c+E2j17VZr2snOwM//0TxyLxoa9EN828PvN8MwDCNHh6nWezaj26NRBpkiMFYvT2qLEWpO5cKahVgQW4BoJIpoJNrqmlSlUbcqqRtBt0XwqqhJYdlby4xXcA5iDgGzVWmt6q/l15VjzIAxxgxzSsVZ0TWDqihr4rnotgnSqcQqm/cqk49NWQ+DqiJLrWwtu47ItoKhzlddQ13g7VxarWmvLUbSKPWvhjx3bmBDYhiGYdoxbJy2AY2NwIwZwNixyf82NrY+xt5eoWZPDUqvLAUg36NRFmoRmNKxpaidVYv5V8wntxiRNUjqG+txqPGQlAFsx0RVUjupaovgNBo2Tt2IrJ601haA2f6BVR+2Ki3timwxJ5l2HSJkNixkUWk/YxHkRgZ1vjd9tMlXHlXaBFlrU3GsWNlAlt1wk107ROthUJsuFFTXERlZlJ2voNu52Ne0IZ0mkL6zY0dgw2EYhmHaMR2qWu+ZgLOVzIYNwJNPtsyx8Qr3um/UfVj53krpKqsUrHC01R+sJh3fr3s/aWNYVNXSiVV58z/f+U/psFHAfFVSqkK7dMtSzBw+U2uzwFlRdsnV7lV/vcZhomJp5bZKlL1ZRjpWVhmmtOugMC9vHorziwNtfyPTfsYi6Arb1PleWLMQL/zlBSMh54BaLrqbIS1byZq6dmT1zMKSq8X3GtSmCwWd9ipUWZRZa/2uZzIk3VrT/m0osPV18fG5uUqXYRiGYTo47DlNIe49TpNUVSX/7udtWbR5ER6/8nHlMDwv7KGHy95aRvqOSlimX2iaFzoeDFPh0JaniGq4z14/22hoLSAf7qvr+bGMKwoyc7hy60rEamOIN8V9PUEl+SWka5sM4/VD1sMoY4C4zY0IGc+zKU+619okwmutkPEEUtaOkvwS1BbVknLRf73111pj10E3ooMii/b5Uh2XX0i6isxaPPaY2eMYhmGYMwv2nKaIxkZvw9SiqiqBLd+a6+tt+dGGHyl5Eb2QLX7k17OPssvu1SNPhIoHQ6dHqYWKpwiQ7z9Kwe41Wf3BatJGgqrnR6bHp8oc2gu/uHmCAKD8nfJAe0cGCXXeqz6swtQ1U6WLa/nJthMTnlqZXHQLyjOS8Up7rR2yRZxq9tSgrqFOeFxGtwwt+fJaD1PVZ9aar7t+dxcONhyUup5fD9oJFRPQO9IbhxoPNX8uUxAuEklGCvn9HhYWcr9ThmGYsxWu1ivAVLW/GTOS4btChi0Drpnpe4ipqryy7Rn8qljKVrC1FLdNH20i9a/UuWfVqqSqVYstVCvZUpCp4Jx3YZ50aB61mmrRyCKUji31/HtbtTsJokKuDKo9awG5arGymyelY0uVQs5l78dUL003dJ+tKdn2w289LBxYaLzPrB8nT51EVmmWp0HuvJ5K2x6V5+0VScR9ThmGYc5uOKw3RZCLO9R/TXiIqTwoGe8Y4F0ERqXwixWaVpxfTA69VQ0lU6lKquIpchJkURVqyHLd8TqlarFUr03hwELPv+kWkFItRhRUhVwZKGG34ZC74SFTXMuS7Xl580jjUg05l11z+nTrg1kjZyEaiRovtKNSxMmOCdn2Q7QeVm2vkqq86wdlTezcqTOeufaZ5grTouvJ/i4AagXh1q5Ntou55x7gyiuT/21oMGeYMgzDMB0TNk5TBLm4Q3Sn8BBTeVBUhXPGsBmeBp2uASLKJUsggQmDJuChmodwUdlFygaH6ZxBGYIoqkJpLXHD4BswedVkpWqxJvJ1g2h3snHqRjxf+DxOnDrhqoybrpCruiFCeT7xhPe5ZDY2wmlhjBkwhjQuQG0uqGvOxEsmIqNbBuoa6lD2RlmbbAyICLI1F3U9LBxYqFwF2kJmE0Zmo0d1vVLZjItEgGXLgPXrk//lUF6GYRiGjdMUQSvukMBXJpSSlSadohQAXeGcMGiCp0FnygBxU5wsz1LZG2VYEFuAfUf3tfi7yZYpTkwalEEUVQH8Fc6KiRVY+d7KQDYNqN4dU618rI2FLp264NaqW1GwvMBVGTfd6kfXA+v3fIpGFpHOQZ1DmQJJKnNBMeh6R3pj9bbVrcJHg3xPVTAh217IrIeyGy92VDZhqBEkuutVEJtxDMMwzNkDG6cpwioC4UdhYQjLvpe0YkVKk4nQRRMeBFMGiF1xKhpRBMDfswSY7S3qhKqg9enWJxAPDBU3hXPnzJ3Ye3RvYJsGVO+OycIvFGXcZO9KUx5YL4OAGjJKnUPZStiyXi6RQWe9i6Y2BlQ33qjf05VtL2TXQ+rGix2dTRhKBIluD2KqzOpurjIMwzBnJmycppC1a70NVKsIBEVpMqU4m/AgmDRAwmlh5F2Yh1XbVpHOCei35PCCarg/9d2nmv/t/Dug7oGxI7ovu8JZ31iPnKU5mL1+NuncMpsGsu2LTLbyoSjj+47sa/V3N0T3bNoD62YQyMyNrsHlh9tceF1P1PrHXr3ViezGgMrGm+z3dGTbC5X1UHY9N7kJ44ZKyy/rWOpmXHvIC2cYhmHaJ9xKJsWsXZtsKzN3brJIUm5uMuTXnmvj115BpDjLtozwas+QlZ5Fas8gavYu2/JDNddTtSWHF9Q2NOMvGY9wWlh5/kTIVEFWqS5M3TRQqZRsopVPvCmOpVuWkpRxSnsQQHzPMsq/agVp6txUba+SqoJtrR1LtywlbVA450Ikb15rU8X7FaT7Fm0M+LUw8WvNpPo9Vdn2QnY9PHnqJO763V1S67mpaBU/vH4XrBYyqu8zoP6sGIZhmLMDbiUjoL2VopdpISKjdDnbM4zKGoXNezeT2jXotPxwQm3zQMFEOwtqG5ogWpfItGFRaQsUVJsbJzqtfGTapPzq+l/hJ5t+ot2i49fv/ho3r7lZeL0V41dgypAppLF54Tc3AJTb8FjyIDMXOm1/TKxLIhkOIYTMnpl4YdwL+Oz4Zy164oq+lypZB+jrYeW2Svzgdz8gbarY5y2o3wA33NY1tw0Taq/ZVs/4H12BNSuAzwcA530EXH8T+vftY+xZtbffb4ZhGEYMG6cC2tuPG9V4WzF+BSZdOknJYJLtWer1HarCYkelP2Q4FPbMTzWhmLZFz0yKsdk70hsvTXwJ+dn5qNlTQ563IHtQeiE7hype4Opp1ahvrNfaKFE1GHRwmxtA3+CS2TSiGIZ+11Mxhp2ovPtZ6VmYPnQ6FsQWCI819bwoiNZDWfm2b4TIzDWAQNYu1TWxxTN+7g3gk+FAi9DhBHDBFlT/qdHIs2pvv98MwzCMGA7r7WBQc5p21O9opWxSwlxVQ678QpFlEIXF2bFCyygtOZZuWYp+3fspjct06B8FSnjzocZDKFhegKz0LEy8ZCL53KZCjmWQmUPZHrP2UMlwWlg5TJ1qMMiGqotwm5tYbYwcWpx3YZ7reycTsq8bymwihFslDHXfkX0kw1T1/KqopmZ4YV/3gwoJl8FNZikGa/MzaDZMXfhkOG6/7hB2bdUaIsMwDNNBYc+pgFTuvFJ+3Cm75tFIFPWN9dLhebreE1N4eXyc9E/vjwmDJqDsjTKp85tS0IJEJrzZqaD6UTq2FDOHz5R+fqn0Hst40LxkWna8MmHRIYQC9zpTn3/RiCKs2rbK1wChzIVMRIZfKLNOBIWK51QG3VQHUzIvK99ea25QIeEqUKNtYrUxjH72auCRhuYRtSY55qNHQ+jRQ29c7DllGIbpeLBxKiBVP24qhW+A1uF6CSSai1a44afspDKXSYTXfEwfOh250dxmZVEmnNUi1WGtKkqurKIeQghpobRAwptFsmlaiZcxzFVCx92gzndGtww8c+0zgcuNjqGmIt8m333V/HXRxpsqKrKvktpARXbjSZRbHERIuAyyufHdv7EBJ967WnjeceOANWv0xsbGKcMwTMeDw3rbAbKhtH7hencMvcM3zM0vPC8VVSCpUMOEZcKALVSqGgNqRqaqkit7X/bwZp1Kmm7j95PN+0bdh5XvrTSqxFND11W9wG5QZbp0bGlKNjQoz98r11pFvilVZvt064N9R/YhVhvzlX17yGfltkrkLM0hyYdfuCoVE7IfdDVZqnxTNkJ0Q8J1NhmtlkPTX5lOrjYcTguj74lv4WPC+XftUh4awzAM04HhPqdtjGpPRa8efbnRXNJ13ZRxkz1LTUBpGK/ak0+2F6BKXz6dfrSq91U0ssi3R64MItlMIIHHNj+m3W/XCbUHqN0w1e1xS5XpzPRMo/10vaD0IKbkWlPlWyRvCSRQ11CHm9fcLNV3VFb+VXq1WpTkl2jLvuket26I5BtIGqZ7Z+9VMoJTsclorYcFywtQ31jveZybHF526bmka+TkKA+PYRiG6cCwcdrG6DRUdzPedAxMitIUjUQRb4qTlLNUKPGAnkJLUdBUlGwTSq7KfRUOLHTdtFBRclV7zuoq8RTDzO4JU9k4cEI1iOuO12lfi4rX889Kz0LRyCLSOWQMEBl5E21A6Mi/c+Nt49SNyOopfjYP5D2gLfs66zF1vRPJdwghPHPtM+jcqTN53HaC3mT0Wg/9sMvh8uXW/+fvGf/yOIZhGOZsgo3TNsb0LjdVyXarNErx1tU31qNgeQHJa5gqJR5ordCWji0lfU+koKkq2aaU3Ggkil0zd2Hj1I2IRqKe57M/V4rHmYKOZ0XWc+fEzzCze8J0vNN2KAbxDYNvwORVk417iv3wipAoHFhI+j7FAHGTt+pp1fjV9b9Cn259XL8jMjB15B9oufE2ZsAYLLmatlmhK/sy67F93h7844O4qOwi8npHlW8VdH4DRKhUGgZaymGPHsCwYcmReDFsGLSLITEMwzAdE845bWNM73LrtnTwymd14pd/ZTJnSybP055/FW+KY/Hri4W9AEUKmmqLDdVNB78c1fLryn37VqrklALec2wifFvHwBXlHYs2DmTzLr1kPzM9E7f/8+1YumWpsWvJ4JZXSMkRpci3n7xlpmfiYMNBz++mMn9dpi2ODjqtupwE0X6Lsh6aaOvjhWw0hZccbtkCDB8OvPVW6+8MG5b8O8MwDHN2wp7TNiaIXW7dXXnLW+PnrfPynJjM2dLxvsqGhroRb4pj00ebhNcCWivZKpsOIi8gAOPeFr85poR5i6DOg1dIpJ8nTNc754bTU1mSX4JEIoGSP5ZI59YFiQn5Fslb1fYq0lhSlb/u5kXeOXMnopGosdQBynrcO9IbxbFioZFGWe9kPL0y62FQnlmZzSaRHG7ZAhw9mqzKO2RI8r9Hj7JhyjAMc7bDrWQEpKIUvV9rGODLUvwqvRt1WnyotJcw1ZJCpj2BH6p9F92+54fzfij9aO3tHGR6zAIw0rqFMscASD1nRfcnGodKRWNTvTn9xuU2P0FcywvRO6wi39ZGwKRVkzwNbqs6b11DnXCMbu+yrPyrEFS7F51WXV7ott9SXQ9Nt3mSaXFkqs2TDtxKhmEYpuPBYb3tAErImooi5hYOKINs/lXNnhqs/mC19rlNhmuqhM7JGCVeYWuyoXWy7R90+8xS53j3rN2ustk/vT9uGHwDFm1e1Pwdv/vzQicEPMjCLyZy63ShvPOy8k3ddLGq82Z0y8DBhoPSocNBhpZa9xFUuxedVl1e6IS366yHur8BTigtjqKRKComViDvwjxs3rsZK7euNGIYMwzDMGcHbJy2E/yUzKD77nlhMv9K5tzUcM1YbQzhtLBQKZdR0GSMEpGSTc2Tq9xWiemvTCeNz1SPWZmQWD/ZHJk1UjkPUHcTwlTepZt3yVRunSoy7zxVvlU8wTcNuQlL3lxiNH9dN0/UdK6x19jdZL7i/Qql8+lsWsi8q5bsmvKUOqFsOpRfV47DJw6T+9syDMMwjB02TtsRbkqmjCIGmAn3tKAo/9FIFMWxYrLCS1HiqQaYMyzRhPIjY5RQlGyRZ0vWYDDlmZMtWONlAKl4pi1Ui01ZmPDOeXknJ14yUTh+2WtRCcL4UvUEF15ciLyL8pQNTB358ML05pUXbjIv+/6Z2LSgvqtVH1Zh6pqpgRuEok0HAG2ymcowDMOcGbBx2s6hKmIP1TyE8nfKjSomIuXf+reMYQqIlXiqAujMlzOh/FAVwXl581CcX0xSdr0MO1kvrUjJlckvMxkS67WpIhoLWeneXuXpGdTxzvl5J8veLCONjXotGXSNdpVzOrHLWzgtrGVg6oSWuslRW25eUcJaLUxtWlDfVTeZDcog9Np0AIDsJdmBerUZhmGYMxs2Tts5VEXMLQ+qveVfZaZnYvrQ6Thx6gRitTFPBVdGAbRjQvmhKoJjBozRVq5kDQY/JVc2J9lUSKzOWMhK9xtlyLswz1OGVVtyiLyTaaE0xBPe1V+t3DqdXrJumG7DInusm1FlOnfRC7sxuqN+B557+znsO7qv+e9Z6VmYPpQWAh/E5pXfhp0TU5sWlPUwHAq7yqpoTdQpmOQmE7K58wzDMAzjhFvJtHN0wjhlW7d44dbCYfes3ciN5pK+P2PYjOaWHAtiC4RtEPzaZIjQbekRZAN7J1SDIRqJ+irUopYgsnOs4vGxKsDOXjcbEyomkMYi06pGJMMyLTkAmnfSUvbd5ieEEMqvKzeySeGE+s5/UPcBuX2KzDqi225EFWerlAWxBS0MUyApRwtiC9A70ltpbQDMrIdebVpK8ktarJEm5pDyrvptonitiTqturwIYmOFYRiGObtg47Sdo9tr0p5/5dZHkoqb8k9VeDO6Z6A4VuyqaHoZT14KoFffVSdWBWHZezZttPlBnb+KiRW+bUFU+8qa6oVoV3K9wmHdxmLNNcU7brqHKFU5LhpZZLxXpAjqO7+wZiHZoKCcs3ekNzZO3WjMqJLBa4PFieUJtAh688prDXHbsKudVYv5V8wnb5DI4PeuFo0sIp3DLvMqG1oUgqygzTAMw5wdcJ9TAe2hT5qo7x6FaCRqPP+K0sswMz0TiUSilWFqP8av36Ez7CzeFEfB8gLh2EryS7RycFX7o7qN2StUzkQvSBN9ZXVC+1QqwDrHMnvdbFKOp8keojLzFnQFVDe83nk3qL1/qf2UAfP9Mf0Q9fj1wu0dd65zXlBkKag+qlTcngHQuuhdzZ4aqTVApqey7DNPRX9bGdrD7zfDMAwjB+ecdgBM5H2mOv/KUninD53uOz5KRVb75/GmuHIFYdE9O5XBXTN3YfPezSQjU5Qn56bQmqg2ayKMTjWfULUCrHMshRcXkoxTk94WmZzbVOVbAl/K0olTJ1CcX4zyt8ux9yjNmyjKtZZpbZRKo0w299oiN5qL2lm1SptXIllqq/Zd9utTn4Fs/ji14NbSLUvRr3s/qc2JoPvbMgzDMGc+7DkVEMTO67FjwNSpwK5dQE4OsHw50KOH+HteO+l+O9V+mNrF9vMynjh1AjdW3ig8h4xXTORJ7h3pjUONh1y/63XPqgq52/fcrgl4e7Z0vLQmPKeqUK8tGkuQ3hY/D6CMJzEVuMlBZs9M3HnZnfgi/gUW1iwUnoPynClz4nwOQc7Jyq0rSWuEE7d7NSFLMp5FwGz7LkDtGcjIssp8y25O6KxpJmHPKcMwTMeDjVMBpn/chg8H3nqr9efDhgFbtqidUyYE0A2q4eKn1Hr9LSjjyUv5oXqS7ddTVchlQlplw5epSm5bhtHJKrl+YwnCUKRsOAShRKs8S5EMzho5C2VvlAmvrRP6HGS4px+bPtpE8nZSx6ErS9Q1Szd1wA2dZ0CVZZVNJZX3MJWh4V6wccowDNPxYONUgMkfNy/D1MLPQBX90LspJu0h/yrVXrGK9yukPLWqyqBqnlwQHkzThh1VqZRRciljMWkoymw4mFSiVd4Tigz26dYHdQ11wuvryFdbeOErt1Xi3lfv9cxJdyKTX6siS/GmOIpjxSQvtc74vNB9BhRZFq3JXqQ6X9QEbJwyDMN0PDjnNEUcO+ZvmALJvx871jrEl6LwuvV6bA/5V6ZykLyULqeCJlstkpp/5cyJVc2TC6KFAjWXkILJXDc7lLGo9Ct1g9LD1J6fKZNTqhISK3pPKDJY11CHjG4ZONhwkJRXqEKq24CoFNOiyrSKLFFC9EXo9lqmzu2+I/sQq421ujeKLMv0arXDPUoZhmGYVMDGaYqYOpV+3Jo1X/5bRuFVKR4kUmhlFX03dI2nIIuDqCrkqgp6UC0UTBh2ssYVRcktGpmUDZmCKrqGouqGgwg/OSwcWKj8nlBl6aYhN2HJm0sCKzSTyjYg1GJaWelZmD50OnKjueSiZH6bV16oGMpe6Bhx1Lm9+w9348iJI83/lg0n9lqTKXCPUoZhGCZI2DhNEbt2yR9H6WF55yt3oleXXq599Ux4LU0p+qrGk0mDye2eVRVyWQWd6tnSCTHVqSqrugnhpeSabrvjxM9QPHHqBOWWpZRskRwW5xcrvydUWSq8uBB5F+UZ8ZC7IbuxowM18uCFwhcwZsAY32N0qwvLVJ2W8TSqGHF5F+Yhq2eWsEKz3TAF1KoIO9fkA8cPYPb62cLvcY9ShmEYJkjYOE0ROTnA1q204ywoCtyhxkMoWF7gqYzpei1NhvrJGk+qBlPhwEIU5xdjyZtLWvV2dd6zqkIuE9JK3Qhoy76KOpsQOhsPqhWSRYYiBaqSTZHDJW8uIZ3L7T2RbWtjIvTZwrk58PiVj2PyqsmBtwGhriu/2/E7X+PURMsXmRB9mfZdKkZcOC2M6Zf5t99yQzWc2L4mx5viWPz64kA2J9pDcSSGYRimY8DGaYpYvhzo2ZN2nIXMzrufMqYT8pnKUD8nKgaTV2GoWSNm4YG8B4x5l2XytigbAW3dV1FmE0I3hBJQv1+KoVj+djmyemZh31EzSjZFDimFxwD390RWBt3mWrVCsNvmwH2j7sPK91YG4p21oK4XZW+UIe/CPGVZoBhrVNmflzeveeOj/J3ywDzMudFcpe/p5oQG1aO0LTfdGIZhmI5HmuwX1q1bh//5n/9p/veTTz6Jb37zm7jxxhvx+eefGx3cmUSPHslqvH4MG9ayGJKMwWcpEkXrihBvirf6u6XQThkyxTUE2E68KY5YbQwrt65EvCmOzJ6ZzcqJkxBC6J/e30ionxNZr61l8DgNic8bP0dxrBhV26ta3FusNoZ4U7zZu5yZntnie1npWb5God/3SvJLsGL8ClRPq8buWbuFFUJF4dtez9UUVFnbUb8D2UuyMfrF0bix8kaMfnE0spdko3JbJflaOvdLMRT3Ht2L6ZdNB4BWcquiZFPlMBqJKr8nqjIIJOVe9pl4vSv7juzDos2LUHplKaqnVZNlWBbLW+w1X3Z0ZMEy1vygyv6YAWOaiw4tuSrpKTchX6rj8UInJ1RHDt3wk7OJFROl1g2GYRjm7EC6lcyQIUPwi1/8At/97nexdetWDBs2DHPmzEF1dTUuvvhiPP/880GNtU1oyz6nqiX/dVo9uO1y9470xqHGQ5676UF59WTaKuRdmCdsxxGNRBHpFGmRz2XfwVfxPlnGbqw2BgDIz84XGv869xlUlUxKyx+rNZFsP1gnOvdL7a26YvwKdOnUxUhrGpm+l8WxYgDqLX1kZVClR6/pfqY6ecMTKiYIjwP0ZcGvVVa8KY5+i/rhUOMh1797zYdu6yOveVNd9y1MrBMmwnDbqm+uHW4lwzAM0/GQDuvdvXs3Bg0aBABYvXo1rr32Wvz85z/HO++8g+9+97vGB3imsWVLsl3M1KnJ4kc5OclQXmf7GEC95L/qzrmXomuFLEYj0RYKnE6oH0X5kcnFo3hR3JRPZxipbvXWF/7ygnS4momQWl1EIX3Wv3VDKK37oOB2nEyYeX52vpH8TKocPpD3AAb3HaxVsEi2WrFKWKvJasY6IZvjLxmPohFFKHuzzPc4QF8W/KjaXuVpmALJ+XDzhOqkS4jmTWXdt6+HuuuETnE1C5KcHTyAibd+ioYDmcjNBR57DIhEtC7LMAzDdHCkjdPOnTujoaEBALBx40bccsstAIBoNIojR474fZU5TY8eLdvF+KFS8l8lLIyi6EbOiWDjxI347PhnWoYRVaGVyYFSNchVC4mYzBGVDakNKnfLr3iWqAiMjEGjY1RQClFFI1HEm+KIN8WNKNkyclg4sBC9uvTS8qRTUTUyTRU5M/EOFF5cSDJOVWRBplWWH70jvVE4sND1byr5v9R5c3sXRVEsZVeVoWp7VbvI8RTK2cpKYPs4rD099g0bgCefBAoLgbVrgx8fwzAM0z6Rzjn913/9V8yZMwc/+9nPsGXLFlxzzTUAgL/+9a/IysoyPkAmaTTUzqrFxqkbEY1EPY/Tyf8k5fId2YtwWrhF3qpbDqcfsjlI1BwonTwtam6ahekcUVH+XQgh9I70RnGsdbsS07lblqw58w2pRVoohg/lfr3k2C/fz6K+sR4Fywukc2H98JLDzJ6ZKM4vxolTJ/DgHx/ERWUXoWB5ARbWLMTCmoW4tepWVG2vMjIGJ6pGpgmPI+UdmPXqLGz6aJPv2hCULJhqlQUkK6JT1wZR/q/M2uH2Lh647wBWT1rtuR4CaDc5nr5ydtowdaOqChjn/ieGYRjmLEA653TPnj24++678fHHH+Pee+/F7bffDgCYPXs24vE4nnjiiUAG2lakKmeFGoZlGXeAel6bGyr5W7IhfTo5SKL50c3Tct6bH0HkiPo91wQSzR4TN1KRI2j6nnXl2E32nASRE22fux31O1D+drlvT8og87JVnwklv1gkT9Rr2/FaG4KQBWrup6m8Vft9+OX/RiNRI++R8x0elTUKNXtqMGnVJM/K0anI8XSO0VXOTnYGfv6P5lF50dCgH+LLOacMwzAdD2nP6YUXXojf/e53+Mtf/tJsmAJAaWnpGWeYpgqZapumqylayHpTVKow6lTXFFUbpnjURFDnYN+RfaTj3DxbXp5mUeVfUU4c1fOrUtkV0PNwuaErx5RogiAqHVty2KVTl6Qn28cwdRuDbKSBH6rPxITHUSWMXjc6wgsvbz9lLTSVt0r1iOqsHXbs62F9Yz1yluagYHmBb0sj2QgRXTzl7LXFSBql/uv03LnBjY1hGIZpv5ByTo8cOdK86yjKK+XdSTlU8rZ0CnF4IZO/pVqIxVSumxeeOZM9s9B4qtG10iwg15ewclslZq+fTRqPU6EVeZq9nmvF+xWk6wWZIyiTd0n1zOrKsdXWg6qQm6p07Cf/fmN4qOYhlL9TbiwXUKcvpV9+McXjqBJG77c2mJAFledrKm916ZalpI23uoY60rio8+v1Tvuh02rGDb/33VXO6r9GOu+OHUaHyTAMw3QQSMbpeeedh/3796Nv374499xzEQq13vFMJBIIhUKIx4PrxXim0NiY3BX+61+b8KeGw0hccQ7Q+WSLY0SFekwUenGej6roxmpjSoVYTHkp/PBScqu2V2k3l6cqgm4KLdUwdHuuqcgRpBSFohg0sqHeunLcFpWOKXmKbrgVlFIpoGVHx8h0vit9u/cFAHx2/DPEamO+80MpTOWG32aB6TWNgo6BD9DCy+1kdMvQNoYtZDdJLHT7qNqhvO9OOavYNQRrd4nPnUtLc2cYhmHOMEg5p3/84x/xL//yL+jUqRNisZircWpxxRVXGB1gW2M6Z2XcuGTBh5YkgIFrgSnuimSQ/S2dUPK3Zq+bTaqw6czTksl1A2C8ZYpObpooX9Z5H3ZjQ7ffXypzBEvHlqJf936+c+5l5Kn03NRFpgepKa8lNU+RiolcQF3DW6UljFeuKIUV41dg0qWTAmmLpILK2qDitayeVo36xnqlHFvnM443xVGwvIB8bdM5p6rve2Mj0K2b+Pycc8owDHN2Il0Q6WzD5I+bu2EKwPpx9zBQqYV6TOGn6FZuq8SEigmk87gZ1ZTiJwACa4WgqsRTjaCMbhl45tpnWozTRDEh3aIxKgaVzJxTDfCdM3di897NxgwSiuEejURdQ7pVjWaVYkAUKBsDQaCzqSDrObQwuVlgCpm1QWazCmhtGMoaw27HW3JNvT5gboNId8PN+7cwial2MmycMgzDdDykjdPi4mLMnz8faWktaykdPnwYP/jBD7By5UqjA2xrTP24iXeLTz+Gn3ZtFeIr6zk1Fb7odl6qQtY/vb+nYuKnmAFIufeNMl9U4+5X1/8KN339JqXvijYhdDy/KgaVzJzLGO/2vDsTBkkqKx0DcpWhnaGiVFJlqOkaGdY57KHBt669FfuOpm6zwG0cQRv4Mu+T133JVmhXrUIO0NcJKiY23LwMVJN9Ttk4ZRiG6XhIV+v95S9/iX/913/FRx991PxZLBbDkCFDsGsXIZHkLEVcefB09cLXFts+ke9bqlqNlYJMrp1fnpZXdc3CgYVG+4dSoM4XNU/LWXFU5rui49zmbefMnYhGosLqr6LKrm7IzDk199NZEMZE/8VUVTq2kKkMbY1BllT1pdSpoG1hrxw7ZsAYLLnauxKwJVMq77hfpWNKf1FTVZIBuaJCXlWHRRXIrXGr5JVaRCNRbJy6kVy9mIqJ4nZr1yZDd++5B7jyyuR/GxrMGaYMwzBMx4RUEMnOu+++i7vuugvf/OY3sXjxYvz1r3/FkiVLMHfuXJSUyCthZwvkyoOnKxnKFOqx0KnGSoGqkBSNLBJex634iWqhJVksj0XVh1WuubNu86VT1ZPy3T7d+mDfkX3CQjT2eavcVomcpTmk0Ei/wi9+UOdctcgKtRiTiKArHbtdz6sQ0fSh05EbzW0eAwCUv1MuVTzI1LyICKKCtt/c3DH0DtfCUBZe8uaXEwu4R1tY7/F9o+7DyvdWGg0hpsp76dhSzBw+U/n5qRbfsn4/yq8rx5gBY5Su7Qf1/j+o+8B3TYtEgGXLTI+OYRiG6chIe07PO+88VFRUYMaMGbjrrruwZMkSvPrqq3jooYfQqZO0rSvNk08+iezsbHTt2hUjRozAli1bfI9/+eWXcfHFF6Nr164YMmQI/vCHPwQ+RjfIlQejOwHI9y2l9tnT8RhQFZLCgYVK5w+61QzQ0sPiVdTJbb50ekOKPG0JJFDXUIeb19xM9nSr9Jn18jBSEM25imfWwjJIYrUxLe+WmycqyArRbp7s2lm1mH/F/BZjUO3Bq+LVlfUQBjU/XtERuVHaQmiXN5Gs3/nKnZ7rXgIJPLb5Man3hAK1x6yOYQrQ1zpnr1/dvtciqO/7wpqFRqN3GIZhmDMfaeMUAJYuXYolS5ZgypQpGDBgAO6991785S9/MT22Vrz00kuYM2cOFixYgHfeeQff+MY3MHbsWHz22Weux2/evBlTpkzB7bffjv/3//4fxo0bh3HjxuG9994LfKxOHntMdEQCQAIvLOsr1UTeQic8j6rQUhUymTBkO0G3mvFSct1wmy+/8FGRIihjGIoUZ52NCKfRUDq2VDgeQDznqgaYnUmrJhkPR6fIbEa3jGavtQmD2I0gNwYsVEL6KUZGNBJFvCneJpsFIllPIOEbtu2F7oadymaVSmgxdb4qJla02ggIMl9Z9n1PVZg6wzAM0/GRLoh01VVX4c9//jOeeeYZTJw4EY2NjZgzZw5eeOEFlJSU4Mc//nFQY8WIESMwbNgwLDsdB9TU1IT+/ftj5syZ+MlPftLq+MmTJ+P48eP43e9+1/zZyJEj8c1vfhPPPPMM6ZqpqdabRKcQhEzRHXsLhx31O/Dc289h39F9zcf4hbzpVo31w0TLFNG5ZUPk3IoUyVb1tB87KmsUNu/djH1H9qFofREONhx0/Z7fvZooRmIfn8k5dwu/dBZBomKqCJZMy5OgCxHZ5eHA8QOYvX628DuU56hbcZcyPybmRlbegqqMbEenVRe1SJlKqx4g2DVRFbsM76jfgfK3y7H3KK3FVqrHygWRGIZhOh7SntN4PI53330XEycmlZlIJIKnn34aq1atQmkpzQujwsmTJ/H222+joODLvm5paWkoKCjA66+/7vqd119/vcXxADB27FjP4wHgxIkTOHLkSIv/mWLt2qQB6oZuhULqDvuO+h0tvCsLYgtaGKaAWmioyHtI8RrohM6KUM3dcptXqrfMzZOVszQH9Y31yEzP9DRMAX9Pt8nwZ9Nz7hbOuXf2XqWQX1Ph6Ca91m7IeMTssjNz+EwjkQi6If3U+THh/ZKVN50Qfio61/AKX3YaprIh+BZBrokqONe0BbEFSCCBkvwSzMub5/tdlTB1hmEY5uxD2jh97bXXcMEFF7T6/JprrsHWrVuNDMqNgwcPIh6Po1+/fi0+79evHz799FPX73z66adSxwPAww8/jF69ejX/r3///vqDtxFUhUJK+GLvSG8Ux4qFRppsaKgojEwm3FAndNYPWQVUN0RZpJBWbfdxodtwG7fp8GfTc+403jt36qwc8iur0HoZinaZ/dX1v0Kfbn08rwfQDWKd6timDA8TFXet+dk4dWOr/EX7eYDgNgvc5E01hF8G6jW8ZMtvs8pELYAg1kSVEGOvNe2To5+gOFaMY18cI13bvqY1NgIzZgBjxyb/29godx8MwzDMmYfRCkZ9+rgrfB2J+++/H3PmzGn+95EjR4wbqJEIsOSJL0Oj3jyg35PPrxqrqIWDG6IqrW7Vdt1QqSDsVXlVZ35klFxdj4RIIQ0hhF+/+2vSudzGrVM52AsTc+4X7uxVwdXqeSmCsrkgCp20ZDZWGyN7rf1k3ER1bL/KtpS+lPGmODZ9tMn3GAvRHFrFm/yeh9/cyIS7U+WNIuvRSBSHGg9JVaG2vkt9T1TDcmU2DvxkzeSaqHIvQaxpzjSXDRuAJ5802+eUYRiG6XhIG6fxeBylpaWoqKjAnj17cPLkyRZ/r68XK5oq9OnTB+FwGAcOHGjx+YEDB3D++ee7fuf888+XOh4AunTpgi5duugP2AdVRUeETgsHL3RC3igKjVerDKrxS0Wk5NqhGgZuxJviWLplqVAhrWuoQ0a3DBxsOChtYIo2IgA1w1pnziky7aZgx5viKFhe4HXaZkSbCzKGoomwaB3ZdqJqeLjNuR+UDRqZuXHmHsrkrgM0eaPI+nPXPQcArrmfNwy+AYs2LwLgniNPeU90NiFMh+Drromq90Jp8yWzpvnVX6iqShqubKAyDMOcnUiH9ZaUlODxxx/H5MmTcfjwYcyZMwfjx49HWloaiouLAxhiks6dO+Oyyy7Dpk1fegmampqwadMmfOtb33L9zre+9a0WxwPJsGSv41OBTv4RBd0WDk50wupMhBuaglJdsmhkEaqnVWPnzJ2IRqLSLU2sEE9KkRsAuGnITa7joSjOQYU/qyAj084QyPzsfO28S5nQyXhTHAeOH2h1nBt+sk+VbWp7HGoes4VM5WmZEPVU5a7LQJF1r3Xv0e88qvWe6IblBl2BXAbVe6ncVolJqyaRrkFZ006eCPsWBgSSBiqH+DIMw5ydSFfrzcnJwRNPPIFrrrkGPXv2xP/+7/82f/bGG29gxYoVQY0VL730EqZNm4Znn30Ww4cPR1lZGSoqKvDhhx+iX79+uOWWW5CZmYmHH34YQLKVzBVXXIFHHnkE11xzDX7zm9/g5z//Od555x0MHjyYdE2T1f5EFWOtneWdM3di897NxkJaAXqFV+dYdCorqlYQNnXPboiqa6p6tb08En5UT6tGfWM9qdqnFzKhlEFAlWk/OdKtAE2V7ZL8EpS/Uy406Chjpsq2M2zZZMVbqmEK0CseU6rDWvdEkXWTFVp1ZF31u7qVsdtTtV2Ve5Fd1yhr2owZyfBdEffcA5wuzK8MV+tlGIbpeEiH9X766acYMmQIAKBHjx44fPgwAODaa6/Ff/zHf5gdnYPJkyejrq4O8+fPx6effopvfvObWLduXXPRoz179iAt7Utn8KhRo7BixQrMmzcPP/3pT5Gbm4u1a9eSDVPTNHtbTnYGXlsM1H8NiO4EvvMjoPPJZm9LVmlWi9YbJhRambBWU1UgZb0wpsOc3fALoVQNefPzSLhhD28Lp4W1csl0Qv1MGLYmcup08y6poZOUsHaq7FNl25m/KZOP6oVM5WnZEPUgc9fzLszTkjcdWXf7LkX+qbJVtb3KMzc/iBB8FWRDjGXWNZk1bccO2nipxzEMwzBnFtLGaVZWFvbv348LL7wQOTk52LBhA4YOHYq33nor8FxNAJgxYwZmzJjh+rdYLNbqs+9///v4/ve/H/CoaOw/uh9YWQlsHwdYYU+7ALx1DzBwLTAlqUA6e0KaUGj9lCQnOjmXdqjFTIpjxVIGoa5R5aWoquYQyhgLbgqp6fxaCqbynk3l1OkUfDEZEkmVfZnNHjuy+ahuUOd8Xt48FOcXS18jiNz1qg+rMHXN1JRsQFGgyj9VtsreKEPehXmu96K7+SKCuh7KhhjLtt+irmm5ucniRyJy1TJRGIZhmA6OdFjvT37yE6Snp+OnP/0pXnrpJdx8883Izs7Gnj17MHv2bDzyyCNBjbVNMBkW9K9X1uFPr1kVje05Oacfgc1AdWIq/MtLKZs+dDpyo7nGQ0P9QjYTSKB3pDcONR5y/a7bPQdVTEonfI8a4gnIheyaxlJiq7ZXoeyNslZ/lw0BBejzNi9vHsYMGBNI2LEodJJK6dhS3H353eSQei/ZpuIVCipCN9SUipvRU/F+BVnWRajImwm8IiTcxiMjW/3T+/uuz0GE4Mush7IhxjKh6+XXlZOfYWMj0K2b+LiGhmRlex04rJdhGKbjIW2cOnn99dfx+uuvIzc3F9ddd52pcbUbTP24JX+Qral2K/5y+m8/7Qp0Puny9yRUhdNPEUp1nqJXnifVC2Pds4xSKYtMfuyUIVNafEY1FkrHlmLm8JkpzQm1oFZ2ld0EkTUMg/KWiTZBKBSNLMKqD1ZJbXy4zSu1PY5qrnVb5jHK5q4DQDgURjzhXjAolTmXVm/PSasmeT4frw2xCRUTSNfQ3RCQQWU9lMnvpj7rjVM3YsyAMVJj96vWC5hrJ8PGKcMwTMdD2zg90zH140YtAoFhy4BrZnr+2c04chKUd5GCl+Gr44WxlHjdwjt+6HijZI2FttgcUCnWRFWyZTyIQXrLdDdB3KCM1/k8qe1x3Ao0Ud9TqpFhWtZkNiNkNgaCNupk2+44xzN73WyUvVkm/B5lfTaBTiEyUVE45zUo6xoAaTnzMlBN9jll45RhGKbjoWWcpqen43//938xYMAAk2NqV5j6cRs7lpZng5x1wNSrPf8sUuKC9C6KkDWKZQxCAORd/HBaWFoZ1/VGUY2FVG8cyFR2tSOrZMso/0F6y9yMMQDCZ5sWSjPm3dOpeCt6T529RcvfLsfeo2YrT4ugbkb0T++PCYMmuIaQOwnSqFPZnHGOJ1Wh1FRMVBGmGJOUdQ1o3WOWKmeNjcDcucniR7m5wGOP6Yfy2mHjlGEYpuNB7nP6ySeftPqMna50yMUdojtdPzbd79E0Kv1braIylB6X1CIwk1ZNau69OPrF0chekk3qs+jXB9VU39Gge9y6IVvUxIJSPMUKk1y5dSWikSh2zdyF6mnVmJc3z/d7Qfa3desXKnq2CSQ8DVOV8VKuZ53X7VqAd79JZ2/RBBIoyS9p0dszaFnzk3XnWAoHFpLOGVSfT9lK2l7jkVmrVMZI6YVrR7cQGbWvrmhdA6AkZ9Y9r925EhPvi+EPr8axbJlZw5RhGIbpmJA9p+eddx6efPJJ3Hjjl2GYPXv2xF/+8hf2nBKgFYFIAD+NIHS6rYyF6X6P1dOqtds62NENMaN4HFVy3dzOI4Ia8uaFX1hzkGHJXsgUa5IZh59X7sSpE8r5u37ohqh6PdsJl0wIJGTTVK61dS7ZIj6mZE0mVN95vrbu82myv7NuP143VL3bqfbk+kUkyMpZKqNH2HPKMAzT8SC3knnooYdw1113Yc2aNXj22WcRjUZx880384JPJBJJ5tL4F4EI4ZabVyi1HIg3xbHpo02ksZhu66DT65LaZiFVrTt0WpoA3i0UTPQDlSXeFMeB4wfIx1P7Lor6wRbnF5OuJ+MtM6HQej3bmj01JONU1rvndb2K9ytI36f0m3STb5OyJpp30fd1+nyayJelehgp4zHdEka1rzJAa9Nl9R2lIJprt3UtVhuTljOde2YYhmHODsjG6d13342rr74at99+OwYNGoTy8nI8/fTTQY7tjGPtWkoRCHnjSLbYh5sirqMc6IaYUQxCmT6tTmQNvyD6jprqB0pFViYA+iaIyFAqf7scWT2zsO+oGcXZpELr9mxNKfpeCr7zeqb7TTrl25SsmZp3FaPOlHdNZkOBIv+6m1cWOn2VAT2j34nqXMvKme49MwzDMGcHZOMUAL761a/i//7f/4tly5Zh/PjxuOSSS9CpU8tTvPPOO0YHeKaxdm3LIhADcuL43swa/P3UfsRqvRVaL2SLfXi1ddBRDqgK4I76HZ4KPOWevZRcausOU4afCrIGiQ5UmbCU2qIRRSi8uJCkZFMMpb1H96IkvwTFsWJtxVlHoaV63kwo+jIKvqwxLGsEmJA104aEjFFncjOCEnERjURRMbHCN/fSjsz67CWDJrzbJjy5OnMd9CYLwzAMc3YiZZwCwN/+9jdUVlbivPPOQ2FhYSvjlBETiQDLln2p0D5ToeYdkCn2IVv4RUY5oIbcLogtwBNvPoFDjYeaPzMRmklt3RFU0RUKpsPwvJCRCZVwRKqhlBvNNRICqarQynqDdBR9WQVf1hiWNQJMyJquIUH1Irt9z6RRTJnr8uvKfft0qoYXi/KyKYjeNx1Pru5cG9lk+UdXYM0K4PMBwHkfAdff2KabiAzDMEzbI2VZlpeX40c/+hEKCgrw/vvvIyMjI6hxnfGY8A7IVGLNSs8it3WQVQ7sCqAIu2EKmAnNjDfFAzP8TPWJNBmG5wdVJtK7pGPxlYulQ7hlDKX87HztEEiVEFXVd0tF0VdV8GWMYVkjwISs6YQG64TkBuFd0914ULmXVOZlq6YhqMy1cz18/MrHMXnVZLVNlufeAD4ZDlgVkD/7BvBIAx78wxFM+Yv07TAMwzBnCGTj9KqrrsKWLVuwbNky3HLLLUGO6YzHlHeAqkDOy5uH4vziZOEXgnGq4mEcf8l4FOcXk6qQ2jGRaxSU4aejZLsZtaYLqrhBlYkjJ45g8qrJCKeFpa6rYijp9FmU9RqayOWTUfRVjal4UxzRSBSPjHkEdQ11yOiWgcz0TNd5UJFvXVlTDQ3W3XQLKjdbZeNB9V7aIi9bBdm59loP7xt1H1a+t1Juk+W5108bpq358N10DB8ObNmiclcMwzBMR4dsnMbjcbz77rvIysoKcjxnBaa8A1QFcsyAMQinhQMPLc2NUpu5tsRErlF7qqQpMmpNFFTxQnZjQXZToL3nZ6Y6r03Vs+t1/14tWlTkW0fWVNYKE5tupnKzVcOK7d9XvZdU52WrIjPXfuvhos2LUDGxAn269/GVM2vtmPB/brYZps6+scl/v/UWcOwY0KOHyp0xDMMwHRmycfraa68FOY6zClPegbwL89A70rtVqKxFEOF+fh4v3ZxO3Vyj9lBJk2rU6hZU8UKm5Y6qodae8zNTXRVZ1pgS3b+XF0p1Y4NqkLnJmexaYWJjwMQGmolKvzr3kuq8bFWocz0qaxRylub4rodzNswh9aodf8l4DNuyA2+1MkpbM3UqsGYN/X4YhmGYMwOuZtQGmPIOVG2v8jRMgaTiYDLcT6T0qfYitbCKG+kYlybawOiEapos5qKiZMvk/1qoGGrtNT/TZFVkiizKGFOi+weAxzY/1upvqhsbVPzkTGatMLExoLuBZqrSr869pDovWxXqXG/eu9loNELjZ7T52bWLdBjDMAxzhsHGaRtgwjtgKbp+9I70RuHAwlafB5l/pdKL1LrfuuN1yF6Srd3bUBdVxdRkSKmOkm0Zcz/43Q9Q11AnvA9Vj3eq8jMBoHBgIXp16YVYbQwAkJ+d79r6w1ToOnVjQMaYitXGpPrOWpjIy/aCIme1s2oDyQ/2QnUDzeTmkM69UNvXxJviiDfFhe+RqaJsblDmeuXWlaRzUdbNym2V2IEIgKuFx+bkkC7LMAzDnGGwcdoGmAivpVRlPdR4yNMQclOIvJQgGaXPS9mxwo+97veGwTdg8qrJ2h4PE6gqpqZCSk0o2eMvGY9rc69FZmkmDjYcdD0mFUVX7MjMj10Wd9TvwHNvP4d9R/c1H/PCX15w3bQwlRMrszFANaZ0QomD6AEpI2eUtcJkTrvKBprJzSGde/GTQYv6xnoULC8Qbr6ZCFEWIZprU5sOze/VtV2A9xpOf+od3rt8OemyDMMwzBkGG6dthG4BH9O5dX5KUDQSlVL6vJSdqu1Vrtd4/MrHMXvDbGPhsLqoKqamlDiqkr10y1L0697PU3Hv3Kkznr322eYQ37YoumKHOj876ne08qA78du00Hm3dEKPRcaUiT67JntABtFD1mTVbPsGGsV7aHJN1N3k8JJBJ35ybCpEGRDPn5/31mSkTwIJoOs/gAu2nC6KlEBLAzX572HDuBgSwzDM2UookUjIJweeRRw5cgS9evXC4cOHkZ6ebvz8qiFbsdoYRr84Wnhc9bRq5RBSSwmbNXIWqQXNivErMGXIFN9jnPc7KmsUnvrzU5i9frbw/JR7MYU1J4C7UeemGMab4sheki1U4kSFQ1ZuXYkbK2+UGq+fN8XNmOif3j8lRVfsUOYnGomivrGeFBIumk+Vd8vke+U2Hr/7p0C9LuXeqXJmf69Fa8WqSasAwKi8Ub2HQTw73Xcn3hRHrDaGSasmob6x3vUYNzm2ZMXLsKWuJV73IOt9VVkPLeJNcSzdsrT1Gu/sc3r67MOGhYy1kQn695thGIYxDxunAtrrj5sJQ4iqOPXp1oeUuyirsLspTX5QjF8ZRAq8imKqo8RZUJVsO6LzqxhqQeS6+c1PAgnf6tNemNy0UDHYZPC6fxFBGCOyxpyMwQTAiOxQjGHrnmTWRJnx6b4HKkazKUNbZv5EqK6Hvmv8P7oCa1YAnw8AzvsI//X8Kdz2re+TxkOhvf5+MwzDMN5wWG8HxURVS4phmEACdQ11yOiWgYMNB7VzyezXd1Oa/DARFmm/vkiBV8l78wrny0zPxPSh03Hi1AnEamO+51GpeiwKf5YtXhRUrptfyO0dQ+/AgtgC6XOaDHU1We3XDa/775/eHzcMvgGLNi8CoB6CLRMKKhuuKSro5AwD1t0wkA2xpq6JXukFXrKtWwFcJdd69Qertc9tunq47HpIWuO7/gOY8uWcf/Ur1cJxMAzDMGc27DkV0N53XnW8ezKGYdGIIix5cwkAOY+gm9cBgDCn0I6M14iCSW+CF6KCPpRCKCoeNkDfk5jq+bHkouL9CulwZsCs59RUaDblOm5Kvk4YqUooKNXTX7mtEtNfme4ZYWHHVISDqvfQbw4BBC7bTqj3UZJfgvJ3yqUqOvvJPvW6G6duRDgtbDRCQiSLTkyv8Rbt/febYRiGaQ0bpwI6wo+bTNiZrNJgUT2tGvWN9VKKs5f3bfrQ6WQPmWml0WQuFwUdQ0827NlixfgVmHTpJKVQROr87Jy5E5v3bjaq0MqGMwel0FINtqBafKQ6D11kEMtuZpnaLNAJsVbZFAtKnkznWlPHSp0/69oWJiIkZN7lIDcGOsLvN8MwDNMSDuttB+gquTJhZ5QWNHbsoX3htDA5rMsvvFAmdJNavZiKyXYTInTD6pxhdAeOHyAVjnKrdktVOKnzk1Wa1SIP2YRCKxPOHGS1YUq13yBbfMi0ebKjWq3WL1zTT4admG5NpBNi7TaHMmHJeRfmGdt4EIUbW/+WMUwBsexT58/pDTfRvksm1N70Gs8wDMN0bNg4bWP8lFzZfEcKMkqDmxJEMYRFRhmV0rGlmDl8plHjw3QLHj9MGMLOdhqLX18s9MAUx4pJOYd2ZHPdnAWyTCi0lP6QFkErtH4Gm8kWHxSohrBpYw6Q38yibBZQN+NM9k0F6O901YdVmLpmqtGNB5O51lTZV8ldB8y076LKYhBrPMMwDNOx4bBeAUGGBfmFfLpVLk11uJVq+weVarN2ggqvA+hjm5c3D2MGjNHaEAii8qtOtVuveVUNH6aeXxa/cPDcaK7REFpZUhUWbhlwVR9WoezNMtfrAOrVaqljkwkNLb+uXLhWyHqcTVS/ttBZl0yFnurkWs8YNgMTBk3AqKxR5JB6ndx1QD1EO1W52yI4rJdhGKbjkdbWAzhboXgXnUaG5Zmp3FapfF1rN91SttzoHemNjVM3Yves3UqKmIp31vnvIMI1Adr9A8DCmoUY/eJoZC/JVp7vICq/Wh6YzPTMFp9npWehJL/Etw2L3VNrYSmvuoap1/n9sFoZrdy6ErHaGOJNcQDJe6ydVYvqadVYMX4FqqdVo3ZWLeZfMR9ThkxBfnZ+m3laZLzhqlRuq0T2kmyMfnG0q2FqXQcAitYVNc+b5XkGzL1XVNmsmFhBMkzdZM1vXfOTd1lDkfLuh0Puc2PN96xXZ2HTR5taySwVy0Ntl2PqHE8YNAH1jfXIWZqD0S+Oxo2VNwrXKK/5i0aipGtaFYTd3lM/gpBFhmEY5uyAPacCgtp5Vd3FN7HjLOONUMmH1alOqeqtlUHGm6DjMQnSe6DjgbE8tarFsajn9yPInE0ROjneqeqDKuPlki1wBNDnwJQM63qcTRWfEkUeyGJCZqlzvPjKxZi8arJScTXn/MWb4ihYXiAcm9saLXPPOtWnTcCeU4ZhmI4HG6cCgvpxoyq5XphoFyJSGlQNCBmFFkAgFU9FyISy6hiRIkO4aGQyryuV1W4t2ZHdIMnoltEq19Tv/F6kolWN37V1jGLVirgUVDcLqNVq7e1jUh1aG+S8yeK19k0YNAFlb5RJncuUzIrmuGJiBWZvmG0snFyngrBs+zCZMGTTsHHKMAzT8WDjVEB785xamOglKFJgdQwIk7lipvBSmjZ9tAkLaxYKv6+qOLspw+FQGPHEl+FxqfTAWAosdYPEnuuWszRHy4uW6lY+dkQyXTGxAn269/FVooP0hquuCRS5bM5h3V7laoCJ3ktdD1jQHmdZ3Na+mj01bRbNAvjPcTQSNW7cpyp/PVUREW6wccowDNPx4Gq9bcCxY8Djs/LQacv7ONVrO3D9jUDXf0idQyZP0QuvCp26LVAAWjsOFVTD+/yUpkEZg0jXVq3ga6/8ahW4sRumQPDVbt3yvGRy3Sw5kTm/GzI5mybbeVByvG9YfYNww0B2jmWQlS9qtVpKlIDovfarXkwhiPxrHdzWPp3qtjLtp7zWML85Xrl1JWksMjKkU0HY7Z5TXcWaYRiGOTNh4zTFDB8OvPUWAIQBDAI+HQQ80gBcsAW4c6Tw+yq9BGUNOlO9QHUVWiequ/Iipak4v5h0fR3FOZwWRt6FeZi6Zqrr30XGAfUZymwKqLTq0Nl0iDfFsemjTZ5/t2O6nQelJQp1wyCojRcZ+aIawjI5rKL3WqafshPTbWFUN6n8vifTysgNimEoWsO85jgo495rja54v4L0feueTWxoMgzDMAzAYb1CTIYFfWmYOjn9CE4bqFY4lZdnRmYHWsWga28heIB6mPHJUyeRWZqJgw0HXc8bQgiZPZOVLPcdDbbtgWrencozpCrvquHXssaBiXY1OiHhqjnefs/eVJEe+/n8QobtUEJqTeawmsBUqL/OJhXle6qyGmSudapbswSVv146thT9uvdLWe4ph/UyDMN0PLiVTIo4dszLMAVgldr/ZDhemVCNA/cdwOpJq7XbJ6i0bgDaXwgeJSTT3lLDonJbJbJKszwNU+v7e4/uxfTLpgMItu0BNeTO3r5h9vrZmFAxQfoZurWscEO1VQf1/IB8uxpRO4+idUU4eeqkVHsLVVn1aw0jMwcU/NpvWBSNLEL1tGrsnLkT0UjU9/4p3mI3gnqvTbSFUV3TZL7nbGW0cepGZPX0bkETQgj90/v7en1V1zCLVLdmEbXdcd4zdW2bvX42qQUOwzAMc/bCnlMBpnZer78eWLtWfNy4ccCaNcn/X8czQ/GaZKVnoXZWbUqLvqig4nGUbcmxYvwKdOnUJdC2Bzotdtww+RxMewHt56V672RCKZ2VgymeZKpX0o0V41dg0qWTUlJZWlR8iOoBlPUWp+q91gnJVSmmZaIIl67XV2YN88u1TmVrFpl7VinmlYrieOw5ZRiG6XhwzmmK2LVL/ji/gkUi5Y7iNdl7ZC8eqnkI86+Y3+LzIIu+qCDjcQT8vRRefKXnV5CfnW80R9YJJe8uGomiOFYsnSOoWzxIJ5/QDxnvXVZ6Frmdh7Oljajoim4+4Y76Ha0MnKCqkPrlassUnQkih9UEbrJmYk3zypk1kUOvm2dMXcNEudam8/j9MJm/7gbnojIMwzBusHGaInJygK1bacf5QfWaUJWhBbEFGNx3cCvlKqiiL3aoHhTZMGMZg8hZiCUoI806t5/Rb/1b1nAyXTzIJFQ5nJc3D8X5xajZUyPdaxKgKbpeMu1s62PHb8MgyCqkXgacTNEZGYPB5Hsti+k1zXmc6vec6BiG1DWs7M2yVp855Ux3jZLxXFPvWXXzR7bSMcMwDHPmw2G9AkyFBR07BvTsKT7u6FGgRw/3v8kU1JAJs+qf3t8zpC2ocE+ZoiZB9e+0vpvqFgdeoXmi9g0ytGU/WTuyIdm64bf2c1l49be1/l13vA6TV00GYK7fo9t1dd8dnfB2wP3eikYUofDiwpQUp3EjiDXN+fzbQ7EeilyLNklUQ67tcrijfgeee/s57Du6r/nvJjeyVItJBVWEi8N6GYZhOh5snApITbXeJMOGAVu2tP7cKo4zadUk1DfWu37XqbzIVuqUad6ui0rVyiDynzK6ZeCZa59pE+PNzXCpeL9CuqJsUAotdcyi86rkL3s9ayp2RVenQqvMhoGJCssiVKtopzJPkYrOmiabC69iGAYReSDaKKAgu05TjEXTG1n2deLA8QOYvX628DtB/f6wccowDNPx4Gq9KWTLlqQB6oaXYVq5rRLZS7JRsLzAU4kDWlcVtVd3pOAW0mYpkNSKqBRUq1bKVPoUVZoEkobp3tl7lZUx3blxq/SqkiPoZZgC/pVmZbHkcPSLo6WqbapUGfV61hndMkhjteZRp0Jr9bRq7J61G7nRXNI17e+PakVZ6n3JHud1b21lmJpY02Qq1lKqIHv1uDVZTdZvDSsaWUQ6BzVEGaBXyXauuybXtpnDZ0pV/WUYhmEY9pwKCGLn9dgxYOrUZPGjnBxg+XL3UF7ZirNAa6/Jg398sN14fgD10DwL0/07VbyBQc2NbJ9LavEg3ZA5nf6M9nPIeu/cwnFzluaQPGcAtCu0AuphyX7XzeyZiRfGvYDPjn+mVK22vVTRdo5N5r3UWdNUPcFu30tl5IGF21zV7KnRWhfdrqHS49atUrju2ubrMT7ZGWN3v4fEoa8hNxd47DEgElG6jCvsOWUYhul4sHEqoK1+3FSVC7d8u4vKLmqRY2THL6xSxxjxQjU0UQVTLTnsvPz+y5i0alKrz02FxsnkCJpWaN3QacMhyvVUyeujbjroboLY70HGIFRpqSGj/Ou2NAkC6ntkak2zzkUxhv1ksK1DTp3jNLnxoCKHXpiQLTcZ6brqVfzjvbFo7vN9msJCWts1CmycMgzDdDw4rLedIlNxFvAOjwqnhfHE1U8gdPr/nN8BWobCyYTdqoR/qYYmquAXzqgSernq/VWYstrdYPYLSZYds1fo3+pJq1F6VWlzGLAofNlEyJxMGw47bmHAOUtzUN9Y3yKUWRZqeLepCq2yoaQyYZcWMiGkMuHtbpgO1Zd5j0ytaYB7WLzb2PxksF/3fqRxyDxT1flVDVk2MWYRJtY251r8L3/6zNUwBYCqqmS/b4ZhGObshFvJtFNklAuR8iLTFoZqjDxU85BS+Bel16e9tYsIkQfFREsOIKnofn/V933HotIWwW38Jto3mOpbqWLkyfTiVIEyPyY3QWTeH5VNFdl+j6otTUyHo8u8RwCw6aNN5HPryi9FBk1vlOnOr6n2XfGmOA4cP0A6loqJli/WWtzYCNz4mv+xVVVAY6PZEF+GYRimY8BhvQLaKixIthUMRXmhhMLJtGFxQg3/MhWaqKoMms4jdEINSTZlLJjI5/QyboKYK6sYVedOnQl3pwYlTDIzPRMvFNJzPylzptsKJ6gQ0iBC9amy4ZbHKEKnmjA1FH3nzJ3kHGaK4W9qfnVaEKm0cpGpFmwi3WLGDODJJ8XH3XMPsGyZ1qU4rJdhGKYDwp7TdorIwwgA0UgUFRMrfEMkZRUdnXBaqgfIhIdAxzsn6w2UDUekzKFJ76KsJ03GKJb1dFPmqq6hDlmlWYG28RF5lRNIoPGLRhQsL2j+XLQx4OaFl7kuhdUfrAYAo302VSIFKFDfI5nevZQ1TQQ1+uNn//0zTB86HcWxYq3IA9PzS5EzN1SKTQFJuae2TDKRbrFjh9njGIZhmDMLzjltAxobk7vHY8cm/9vY2PoYUQ5SCCGUX1eOMQPG+Bohsu0/KG1Y/KC2L9Fpb6HajsZCNpxPJsSakuOpO343KDl4gHybk6ByLusa6oy36nDilZ8ZjUQBAIcaD7X43FT7EK/rUlj21jJymx4qsdqYUt6wCBOGigV1TaNAlcGFNQuxILYA0Ui0WSYsqDm8gHpetkn81hQnWelZKMkvabHuPpD3QMpavuTSujORj2MYhmHOLNg4TTHjxgHduiXDmjZsSP63Wzf3AhA6xU9Uey1SjBEKmz7aJDSuqAaVhVVspDhWrKUMyhYSklHCKZ6WtlJmU9FjVtZg0S0gJcK5CbJx6kZ07dTV9VhTRa28rpvVk77pY8pQrtxW6Vpd2g3ZIjq6G1l2ZIxBEbIyWN9Yj0ONh1oZbNSxmCq+pQM1uqN0bClqZ9Vi/hXzW6y7pgoyUQpCPfYY7Z6oxzEMwzBnFmycppBx45KFHtzwqlCo4mHU9cz5GSMl+SWe17WzsGahUe+P3Qu8sGYh6TteyqCsIkZRwsOhMComVpAU2qoPPYSAOH5VdIxiqhzKGCx+1zNZVda+CRJOC3u2VbKPaemWpdoGqv26YwaMwZKr3WXOaxyAnqFsbVDVN9aTjpc16kxtZM3LmydlDIqQNZqt0Nv/fOc/MenSSdIhxamsQO7Eek+scHAR/br3E6ZbqFaCpkbqRCLJdjF+FBZyMSSGYZizFS6IJMBUQYXGxqSHVERDg/6PskwRG6tfpluuolu+KgBywRfTvT9lc6lExWVkCgl5FXGyeHniy5h46UThmOJNcXxl8VdQ11CnPX5ZUtVjVvZ5Oa8nyonVKRgjU/BLp5KtFyoFa1TkQKaIF7Xwj9e8e71H1DzGIIpAid5Xk2Mx1aNUVq6DkiWV90ulIJTXZi33OWUYhjm74YJIKWLuXPpxuhUKqR63qg+rMHXNVE8jwKswB7Xgi06xFQuZXCoLajsamUJCXkWcZKuK1uypIRmmGd0ylPO7vJTLVHl4rLm663d34WDDQanriQpF3TfqPqx8b6VyhWOZezPV+saOXeZWf7Aay94Sv+wqHnTZIl6ikE3RhoHbewQA5e+UG2sbJYPX+ypCZa5NtHSSrdwtuwHkN9du64WMga5aEGrt2uSm7dy5yeJHubnJUF72mDIMw5zdsOdUgKmd17FjkzmmIq68Eli/XvkyAOTa0DiRaQcjo/iVji3FzOEzpQ1U2Xsx5a31QsdrB9A9d0Uji1A6tlR6fH5KbuHAQiMeHionT51EVmmWpzHuvJ5syx77eQDaM5dt9WJ6TuxQZXvj1I0Ip4WlZI4qZ9FIFOXXlZPy11XapKi2jdJ9z5zn2fTRJlI6gI4XV6Wlk/U9mfmVfU/85tpEOyvZdlOphD2nDMMwHQ/OOU0RqaxQSM2RdIOa62blIM7Lm0ca0+z1s5VyUGU9GX269cGskbMQjUQDKbQjW8TJCdVzVzhQkJTlgqgIVtX2KiNFTwBaTmjnTp3xzLXPNFdiFV1P1ttnIZOf6Zcn6XXuoCqtUgpz9Y70xrS106QqbgN0ORPlSQeZv+5lmKpUGffCel+L84vJRdBU851TVR9A9j3xmmvVonlO2kNBKIZhGObMgY3TFKFSoVBVSaIUKoknvM9FVcjDaWGMGTCGNCZArQIpVcmeeMlEZHTLQF1DHcreKDPejsMUspWCqVCV3MKBhVpFTwA540HGONFRXmWMSJVWL0Eo1qL3NIEEDjUealXAifIeUeVM5MkyUVnarXLx84XP48SpE63WNVMGkxPRpkQCCUwYNAEP1TyEi8ouUjaMZTevVOaXKoszhs3wNJBNtrNqy4JQDMMwzJkHG6cpQrZCoa73wM8oKBpZRDoHRQmSrc4KyFUgpXqXVm9b3Sp81FQ7DpOYatngREbJlTEWnKgYD1SPkgnllaq4W2Oihk4HpVh7vaeZ6ZnoHent+h3Ke2RKzkx5xSyjrUunLri16lYULC9ota4F0f/XjtdcW1EkZW+UYUFsgdJmgCoq80uVxQmDJngayNT1IlYbE26QBrXhxjAMw5ydsHGaQtau9TZQ7RUKTXkPvIwCasgoRQkKOkyS4l2yzut2LUBOodVpYUL9rm7LBjdklVyKseBEx3igeJRM9M2kKu5WLmJGtwz06dbH87hUKNZu7+kvr/slDjUe8vwO1WOpK2cmvWKide2hmocC7/9rn+uiEUUA/KNIrOsCwfTkVZlfE8Ygdb2YtGqScIM0qA03hmEY5uykwxin9fX1uOmmm5Ceno5zzz0Xt99+O44dO+Z7/MyZMzFw4EBEIhFceOGFuPfee3H48OEUjro1a9cm28Xcc0+y+NE99yT/bRmmpr0HbkaBjHJDMbhMhUl6XUvUd1VXibfQ8VbLflclP80PFSVXdhPERIinH7IbHXZkjEj7s7p5zc2eFYVNKtai98j+ntY31mNKJa2Vj8jI0JUzU14xyrq25M0lpDHphlhba+CqbavI37HLtskevCrza8IYpK4Xzh65XmuD6kZIYyMwY0ayYOCMGcl/MwzDMGc3HaZa79VXX439+/fj2WefxRdffIHbbrsNw4YNw4oVK1yPf++997BgwQLceuutGDRoEP72t7/hBz/4Ab7+9a9j1Sq6UpLqan+pqnxIqaIJQKqSY7wpjqVblmL2+tnS46dUjXSr4FnxfoWR3p0mKpKqfNcUsr0WT546iczSTF/DzFmlNpV9Ut2qnt4w+AYs2rwIgFzlV+e5qS04ZNsE+V2T+h7JtghJRQVU1Yq7FjLrAgUT96xa0bxoRBFWbVulVd3Wier8qlYHBuSrVtvxq2AtU2WZ+5wyDMMwbnQI43Tbtm0YNGgQ3nrrLVx++eUAgHXr1uG73/0u9u7diwsuuIB0npdffhk333wzjh8/jk6daC1eU/3j9ut3f42b19wsPE7XAAD8lRsASgaXSkN6HePOhDEvas1gjXnnzJ3YvHdzq16OlO8G0YrECVXJrdxWiR/87gekfqv2eUtlywgvJdeEQu73rPp064PSsaXITM9Ubl9iR0a2ZVqEpFKuAL02KTItp6KRKD5v/DzwVkfUjRYKJjahVOfX+Z6MyhrVao3ymiuv9YKKznvuZZhamDJQ2ThlGIbpeNAstDbm9ddfx7nnnttsmAJAQUEB0tLS8Oabb+L6668nncf6gfIzTE+cOIETJ040//vIkSPqA5ekclsl2btgokDL+EvGo3BgYSsjAEgaXLJN1QH5hvSqDdwtrLA4kTHsF3ZIDVd19uzMSs/C9KHTyaGuQXu4rNA6Ny+dpeTKeubsIZQyc63bp9IKc3W7RzeZpZyb8pzrGuqQmZ5p5FlRQlnvfOVO9OrSC/nZ+dItQlKZx6cy77KyBgCzRsxCcayYtHbooLJ+hkNh1/xUyjolQlWu7e9J5bZK5CzNIXt1vdaLaCTaKpzXjf1H9yu9542N/oYpkPx7Y+OXBQIZhmGYs4cOYZx++umn6Nu3b4vPOnXqhGg0ik8//ZR0joMHD+JnP/sZ7rzzTt/jHn74YZSUlCiPVRWqIkcxtmRwMwJitTGywZV3YV4r5YRiJFnI5DG6GQyyxrAb1Bw2t2rAC2ILSN9NVY8/PyXXz1jywq7EU+e6anuVVDi4LG4yS1GSU92PkWJsHmo8hILlBchKz8LESyaSzts70hvPXfdc4KHiTrw2DNyQlTVrXXsg7wEM7juYtHboINpocY4tgQSp/dbSLUvRr3s/oxsyFLx+P6wcUS+vrtt6EW+Ko2B5gfCaO+p3tPL0U97zuXNp9zR3LrBsGe1YhmEY5syhTY3Tn/zkJ/jFL37he8y2bdu0r3PkyBFcc801GDRoEIqLi32Pvf/++zFnzpwW3+3fv7/2GOw0NiZ/eHfsAHJzgUd+IafIBe0xoSrnVR9WYeqaqZ7KCcUTYMJgkDGG3VD1QqsaeSKC8jrKeOa8NkFEcw24h4OLlGQdqDmdqe7HKGPk7juyD2VvlpGOfWniS1L9hS105UoGWVkDvlzXdLzjVPw2WpxkpWdhwqAJKHujTHhee+SLyQ0ZP3SjT5zrRbwpLoyQiEaiKI4VS7/n8aY43nz37wDc2yTZ2bFDeAjDMAxzBtKmxumPfvQj3Hrrrb7HDBgwAOeffz4+++yzFp+fOnUK9fX1OP/8832/f/ToUVx11VXo2bMn1qxZg3POOcf3+C5duqBLly6k8avgzLXZsAF48sk0YOATwBR/JSajWwaeufaZwJUdqnLupkw7lRORJ8CUweCm0Fr5Vyu3rvRVcGW8KLLIerpliufIIusR9NoECSIcXBUZj5FOCLiKYSdj5FrzkxZK8/TQWeNT8a75yVUQhqCMrLltIul4Ean4bbRMHzodudHc5vmo2VNDMk7tBLkhY0c3+sSJKEJC1L7L6z1vlsF//DuAGcJx5OYKD2EYhmHOQDpUQaQ///nPuOyyywAAGzZswFVXXeVbEOnIkSMYO3YsunTpgj/84Q/o1q2b9LVNFlTwLgJx+hEMXOtroP7q+l/hpq/fpDUGCpRKjl75V4Bc4RJKAaXM9Ey8UPgCPjv+GVl5ljXydIuDWGNVrSJrH0NQVX+pBY1UN0FSXTApVhvDpFWTPPPj/IpvAfTKqKobBroVUXVkyTl+L7lKIIHekd4t2jGZ2AyhykLp2FLMHD6zTXtgUjYeVJ+lbBEnlU2QoKpoexVpumPoHaR0Bvt73kIGT3YGfv6P00d5t4xqaNDPOeWCSAzDMB2PDtHn9JJLLsFVV12F6dOnY8uWLfjTn/6EGTNm4IYbbmg2TPft24eLL74YW7ZsAZD8Ubryyitx/Phx/PKXv8SRI0fw6aef4tNPP0U8braROgX/IhCnf6C3j0v+cHuQmZ5ptMeeF5Q+epT8K0q/S9G1Ekig8YtGFCwvIPcele3fCXj36cvoliG8BwAoyS+R7vFnx3R/WzdEPRWB5P3unb1XyTBJVU6n1ae0YHmBb+EWNzmU7ceoIksWqn1bi0YWacmSHYpcOfsEU+5NBLV/p2WYpmJd88KtF7TbMSrPMhV9loMKV/fqkZsbpbk0rfe8lQx2PpnciAUAD0O/sJCLITEMw5ytdIiCSADw61//GjNmzMCYMWOQlpaGCRMm4Iknnmj++xdffIHt27ejoaEBAPDOO+/gzTffBAB87Wtfa3Gu3bt3Izs7O2VjByhFIE4rPK8tBq6Z6fhLcve97nidUgEKFfxC3qj5V1QjxK9q5KHGQ57Ks5uyrpN/5RUanLM0RxgK+kDeA3gg7wHl8Eid0Dyqt4VS0OiZa59B507eGyR+pCKnU6UCrFMOqTmNurl81rXcZNuPwoGFWPSdRUZCbWUrAANmQrBlCpXphrKnKpdW5VlaiNZC1YJGAC0tIRqJIt4UR7wprp2/Lvueu8rglPHAysrkhqwDk31OGYZhmI5HhwjrbUtMhQWNHZvMLxWSsw6YenXzPy1F7r5R92HR5kWBhXx64ab41eypMRK+6dejr2/3vpi2dhr2Hd3n+l2vcLkgQktVQkFlkQnNm3TppOZ521G/A8+9/VyLeRIp9jq9Qv1Q6XGrcn5Zw0A1jFhHltxku2ZPjXQYsi66/Tx1Q7BFsqYbyh5kjrYX9md74PgBUvsvE32W/eSCmpZgYm5k33NfGTzZObkhW/81FFz+Vfz2+YFGPaYc1sswDNPx6DCe045Obi7NOO12/n402P6dmZ6J2//5dizdsjSlhWYs3HbOTfQW9VMqpwyZglhtzNMwBbw9iUGElupWA6ZA9Ua4tW9wotI+QuRtoninTLT18UPWC6jbdklVlvxku/y6ct+NDpX58Xs2upWHdUOwVVsbUdY1HW+jExnvq31NjDfFsfj1xSnps+xX0Ijq1TVRpEn2PfeVwc4nmyOFHphWjUhkoNKYGIZhmDOHDpFzeibw2GO04w6sv6U5x6ckvwSJRAIlfyyRzq0LEkpOqp+STcnjUzUMUpl/tXPmTkQjUSN5cpQcvd6R3iiOFQsNNEqOKiXPzkImF042p1MGGUPJhDGsIksi2QZgdH5Ez4aSZ+wHdQ78cka9ZE3GKHO7nqkcbdVcT+vedNbCeFMcmz7aJLwOIJZ/a43aOHUjopGo6zGm8tdl3nNq/rGp3t0MwzBMx4aN0xQRiSRzafwoLAR6dE8qcl06dUFxrNjXe+hE18thR1SgRMUIsRSx6a9MFyqVfbv3JY3TqTwHqQjZlez6xnrkLM1RUmi9zi0qDAXQKwmb2rBQLS7lVkhF18Mss6FgwhiWlSWqwVQ4sNDI/FCejWohH5n3RNW404ly0DFsnWNXLXhlobohY83bwpqFwmsANPkPp4URTgsrb2bKFKaivue6BjzDMAxzdsFhvSlk7VrvdjL2IhB+Sq4fuiF8FtQ8LpnwULdzumEpTtY1ZcPlgg4tte7FVDihHb/wYWr7Bic6GxY6YZdB9KmkFn6pmFiBvAvzSD1u/ZCVpVhtTCo8U2d+ZJ6Nl1xZLWR03hOdd0EnysFE+L6JglcWsqHyMoW9ZMPTZebGHs6skr9Ofc9TkRrBMAzDnBmwcZpi1q5NtpWZOxfYsQMYkBPH92bW4O+n9iNW+2XBoVTm1tmRVTYpyolKhdXPjn+mbGQGqQiZVGjd8FJyK96vUBqvzoaFjHfKktsgK6ZSjMXy68px+MRh5CzNMVIkhypLldsqMf2V6aRzmohwkM1T9JKrqu1Vyu+J7rugk7tuInyfOoex2hjCaWFSNWyKoSaz+aiyoZbK/HUZVHLdGYZhmLMPNk7bgEgEWLbsS2/iMxUtFcOJl0wkn8tkWFQQhpeOFzg/O1/ZyAxKETKt0Lqh077BwsSGBdWIqvqwClPXTG3zFkdlV5UBgHGvtkiWZDdfTEQ4qHgO3eRK5z2ReRfGDBjT6u86UQ4mirJR59BZYVlXtmU2H1U21ChzE41EURwrFsqsc923xq+6pjpl8NgxYOpUYNcuICcHWL4c6NGDfDqGYRjmDIRbyQgIqhS9XwsFGUPORAsQiyDasFDPaeHWNiFVfQyduF234v0KUmuOaCRqVKEVtW+wY6rFjeyzC2IMXrg9GwDaLTlUxkFtb0O5PlXWg3hXZcdCbVMTjURRfl258dZGui2eVOVbV7ap8zYvbx6K84uVZNVvbhJINId0y1CSX4Lyd8qNbUINHw689Vbrz4cNA7ZskT6dK9xKhmEYpuPBntM2gOKhTAulIZ7wLkZh5daJKq3KEEQbFhMVVoPIX3SDkn81fSgtdNNZkEQ3PM7Py+TEVB4XJcczHAq7yqnI06674eAmE7I5nyaQDcEXVbGm9uw04Tn0gzIWqge4vrHeeGsj63tuXvTM9ExMHzodJ06dQKw25nkuiny7oRu+T523MQPGKK/tQeSvu31HdV3zMkyB5OfDh5szUBmGYZiOBXtOBQSx8yqzY+8V7haER4o6rtKxpejXvR9JiZS5V5NeYFkoBZvsXof6xnrpUGUTnjsvo2H60OnIjeYa9yyLPDAUnN47GSNMBqpHasX4FZgyZIrydVSuSfEeekVSAO7vu67n0Im1YVD1YRXK3ixr9XfneWW9+aa91s5xqxT18ZpDKiqeadG8mZwrnegPKrLjPXYM6NlTfN6jR/VDfNlzyjAM0/HgVjJtANWbWDSyKJB+kV5QeiKGQ2HMXj+b3DKCcs7ekd7YOHWjkXYjKni1k3BieUwsZHtHyrZ3cWvr4Na+oXZWLeZfMZ/Us1QWvzYZRSOLSOewy7uJ1h1eBNXj1sS5KiZWeMq2as9Okz1l7e1g3AxTt7HYW4SICLIXs+VF92q/JWp75DaHXn1CnVgVb6ntV6zxUluryJ7b7VrOHrMm5R+Qf7ZTp9LOSz2OYRiGObNgz6mAtvScVk+rTkkVVDuyngSKl0bWw5PKHFOZnEE7bvlXzjxTLyieu6C8ixS88jmdn9XsqZHKexTNta7HyKRHiiqDJq6pmz+q+76oVNO2j8WqVGxK9lXQlS3nHMab4ihYXiC8rk4epijXNqg1QNbjTZUL6rP9+teBrVvF5xsyBHj3XdKlPWHPKcMwTMeDc07bAJl8sVTnW544dQLF+cUof7sce49+qRSp5hYCcq1dUm2UyeYMWuRGc1E7q1ZJoRV5LoLqo0ohyLzHoCsd61R/VQ0NNdFXVzfXW2eNUK2mbR/L+EvGo1eXXkZkXxXZ1jpOnHMYb4orV7z1e0+dRvCumbuwee/mVrIe5Bogm79OzVGlPtucHJpxmpNDOh3DMAxzhsGeUwFBV+sFzOSL6Y6lVVGRnpm487I7kRvNxYHjBzB7/WzheUT5VyIPj0renS7UnEEnbvdqwosm4wEC9No6OAk671EmP1On0rFs9VdqvrHzfnSuaScVlXd1ry0aS5B5lBTPcBD5xjoVb93ul7rxE3SEgf3+KPnrAIw+W845ZRiGYfxg41RAkD9uOgqtF7IhfhSD5P9v77zDoyrWP/7dBAIJEEIgQCCBUEIMGHrnBoJEmmI0dLgIKkEUMFGkeGkJqBelVwsiQX8UEUJRKQImykWkSQkQMGAixUgLPaFkd35/rLtuOWVO2d1E38/z8Gh255wzM+ed2Xln3vKg6IHLA824a0Hm+MxFBxZxKd689XBXegu90zpo6X9eOXZn6g7ecaDEpFWpaSjvZoE7A+Q4onRzRk4O9N5w41XoXKXgi8k272mi5XlKNn70bouUXCodJ4A+71YqWi+gXzoZUk4JgiBKHqScyuDqHzc9/SuVmsTyKiQr4lZwmexpOdlx9+kRz2mZI7wLMbWbDkaTEckZyXh7z9vcdVJTPyHc4feoxNfNEU/nKLXFFSeYei7+lcwpSiOHy9VFzw03JQqdu09ueSPero5fjX6N+ina+NHzFFhPNwk171ZKFinPKUEQBCEE+Zx6GCX+YlI/9Gp8lHj9tAC4NKci4Jocq2KoCQAD8OcPVZO3UY2y7IiW/ItK+l9MDuXkWImvmyPFIUepBT1k0BElftlSKFVGlOT65KmL2pyljvDkgraVcz18f8XkWki2lUSGVuoPq1fUab39VpW+WzlZPHDAbOI7ZAhw7pzZx/Tzz7Wb8hIEQRAlG1JOSwhSP/RxEXGKFnIWeBfZV+5d0bzwk8NdaUCUBIDhyR+qZEErhlplWQi1Shxvv24+sxnjd41XfRIjpoTxRjrWUzFUey9XBfXRqtipUUZ4NgyS2prnDiWBqbRuuKkJcKRFwddbqbfdsFt3ch1PV1jlUWmgMSGUKve88L5bXlksXx7YuJH78QRBEMQ/AFJOSwByP/TJMcmqIlUqUQhjwmJ0OdkRQ48FGQ+8p2Wzn5yNpLZJik87lZrMKVGWlZw2KlW8omtFI6RCiF2EZiG+OPmF02dKT2KElDC9Ih0rQem9eJUCLaeGaiPvalFGxJQ6XnNctW2WGj8Pih7IXg84y7laqwU9lXrHDTulG296nAJrjV4MaPOhdoViTBAEQfwzIOW0mMPzQ79g/wKuezku5JQqhHqZ7Fna5XgfV5/OAvxK27v/exd1KtWRzd2q1WROiWmpK9I6WPD28kZCiwSuezuiZsGpJnWH2s0JsUW2EpNWHhn0ZG5arcqI2rGtts08G248CMm50pNbtYpUXEQckmOSsWD/Aqfo0rZKvZqNN61m3lrdJLTIsh6KMUEQBPHPxcvTFSCk4fmh5zGHBJwXcpYdeuCvxbcFscW4ZeE3MGogYsJi4O3lDaPJiIzcDKzJXIOM3AwYTc75UG1Jy0pD2IIwdF7ZGYPSBqHzys4IWxAGwBzkpKZ/TbvyIf4huqWR4VXa8gvz0WddH6RlpTl9J7egBYCk7Umy/QDwLyInR09GTmIOJkVPQoh/iNP7smCAAaH+oaqUuPDAcMXXWLBdcKpBjSzyICZraVlpks90RE4GLcqW41i1KFtCcqQnSn2Ghcar0NiWQm2becbPssPLEFLBNXJuixJFyhaLXE3LmGadfwN9A5ESk4KcxBw7OVEr2/GR8chNzEX60HSsjl+N9KHpTvcWQ4ubhFZZdmf8AIIgCOLvBymnxRzeH/BA30BVCznLDr1ahVBq8S9WXmrhA0D1gowHyymGnDJiQUjJVLugFYJ3Edmlbhe7wC+AvkqckrpIoWXBqVUWHeFZZEs9MyUmhUsG9dysUAvvu8vOz1Y0XsXQ0mae8XPxzkUktEgAoL+c26JGkRKTqxuFN5CckYzNZzY7bQDERcSpkm0lGwa2zzSajKhZoabi3wQ9ZNld8QMIgiCIvydk1lvM4f0BT2yTiOSMZFUmsVpM+pSYtioxodOav08MW38uOcTMz/Q8Gbh67yq8Dd4wMuHFnitM/vQwcxVD64LTE9Fe4yLiULFMRWTkZgAAYsJiuE4OLRQHM0Ye09FA30AkZyRrNkUHtLWZd/yEB4a71M8dUK5I8cjViK9GIHFbop3/tsUkNjcxV7fUYbYImeFW9q1srRPvb4Iesuyu+AEEQRDE3xNSTos5vD/0k6In4fGqj6teyCkNxKLGV0vPRbwWnyiLcpfwVYKqCLF6pnrov76/rCIoZvLnCh9BtalebBecngoKZAuvrL2z5x0s+3mZXdnUY6mK/ET1SMOjFbkgOpa/9QpSo2WDRmkgNrWbFTx9rVSR4pGr64XXnT5Xm76FB7FNQltzY9s6Sf0m6LHxpkdAJ4IgCOKfCymnxRzeH3rAvAiZ2WUmrhZcRZBfEGr619Rt8euIGkVTrxNHPYIRxUfGo2KZiqoixLo61YMFb4M31vZeK2vy53hfrblwxSK3Dnh8AGb/OBsAROVw85nNHgsKZAuvrAkFgFKqSCg1qXVV30idqMsF0lJ6uqtlg4bnhD7QNxBGkxFGk1HVZgXv5pVSRUqt2bqrotTybBL6lvbFrj67cOXeFVnlXq+NN73y9hIEQRD/PAyMMe3JFf/G3L59GxUrVsStW7fg7+/vsXoILbaC/IIwuPFgVCpbCR8f/hiX7lyyfudqhWBN5hoMShskW251/GoMjBoIAMjIzUDnlZ1lr0kfmi6Z3sBxgW+LRTE8O+Ysfrz4o+SpieVeckpmTmKO07UWRQ8QVtTkFBs9+sIRuVy4PP1maauYkiv0DEvKEQCCyi9vn+gJb/+KIfXuHeGRI0seV3f0jdC7W3dyneLxKvcMtWMHEB8/jqiZx8Q2YaT6WkqubctqlStA2ZiWQ8s8IiQnADS9V0dcZSnAS3H5/SYIgiD4IeVUhuL042b5od98ejNWZa7C1YKromVdrRCoWRRpXdAqeW6QX5Bd/4gtcrUombwLWiHUKPdSyC3Ik2OSudLE8CycHRec7UPaY8/5Pei3vp+ombTSRa1W5GSNF15FQkqOGBgq+1YWNPe0lHH1gt9VmyFaNmiExo8jSucx3s0rob7m6Vc95Ip3TPOgdh6R2sgCoOm9iuEJRbU4/X4TBEEQfFC0Xg9QWAiMHg1062b+b2Eh33XeXt7IL8zHgv0LJBVTwDmyotJ0L3LIRb0VigapR6RZXrM6x/4RS4OgJUKsp1I9OMITYVNtLlwhbCOI5hfmo96ieoj9PFbSf1drqhml8MgaD479ITaO5KL+iimmgLK+URod24Ka8SqH1ujKlvGza8guBPoGCpZRGu1YSyRtnsi4StIPicE79nnmbDXzCE/EdL1TeqmVW4IgCOKfB52cyqD3zuuzzwKbNzt/HhcHbNokfa3cqYAYKTEpTkFf9DD7VXtyouXEUYtZndZTEz3R4xTZcp9FBxbh9R2v61IvNSdnSk6Q9Dw14nlnYrIm54NpwbY/ePwYXWlSq8ZcVeh6QHq8Kh0LWseOnqe6elskiCEoCxVCUFhUKGi+DSg7Ief1mVU6jyg5WQagy5yoVW61QCenBEEQJQ9STmXQ88dNTDG1IKeg6uHvZEGvhYFaRVPINFTOR9RynVazOj19vrTgDrNIWwJ9A3Gj8IZuvmRqNkr06nsl0Zr18K3TssB2p6+13PuTG69aomCrRYlC2a9RP0mFyRXmy2IIydXmM5s1m8QqlTUl88juX3dzBYHTa5zqJbdqIeWUIAii5EHKqQx6/bgVFgJ+fvLlCgoAX1/h73gXcbzotTDQenKidEHMG0xFDJ5FrrtQq9yrObVMiUlBckYyAGULZ6H3u+f8HkUbJXouQvU6iVFykqhlge1OX+tdQ3bB28tb1m9SLNiVJ064eNvGYwGipK8BfU4GHdFiGaJW1niemZaVxp0+Sy8LB3duFghByilBEETJg5RTGfT6cRs9GliyRL7cqFHA4sXC3+l5cmoLz4LWVahdEItFL5bzxQVcZ+asFjVmlEpOLW0XtEKpXqQWzmIbB30i+2D+/vnczwf0UW70PonhWdTrscDWekrOuzFliQpsgVeuPWHu6fhsvaId8/Q1AJeeEKvdsNM7+q5cGikl91eDu8ysxSDllCAIouRByqkMev24desGfPutfLmuXYEdO4S/U2LS6pizTwq1C1qt6HEi5WgaXG9RPY+k9HCnz6qSTQqhdvHWVWrjQMmJLe+pEQ+uOImR6w+9Ftie8LXWO7WRJ/zX1UQ7Lo4pj3jGnSuUOSWbWTybO0rmOjo5JQiCIJRSytMV+KcQHs6nnIaHi38nlTDekRD/EO6gL45mXpaoja7OS6kksqbQwsUSXdMWsf6x/VsqYX3S9iTERcQpCpgkl19Ub6WVN2KxpR6Oyo9QvzkiFwHYAAO8DF4wMvEIqoG+gVjXZ51o5FM18LZdSR/J9YdekZXjI+Od5MHia70mc42kfFii7Sr1tZaTawu8/SU0n+gxX1gi/wqNI7l5TGieEOprW19jNXOAFnhdF/SM4m1Bbp51RCpiulIXDDm5tSjDSqJEEwRBEH9vSDl1E7Nm8Zn1zpol/b3UIi6heQLCA8PtFmLLfl7msgWtVlyhaOi9yAX4cgI69u+l25fQe11vpxMfPU6ZeBem87rNw5jWY1S9P56NA4tiKrQRAADLei1Dl7pdFD9bCt62X753WVbh44VngV3TvyaMJqPsM20V4bSsNNRbVI9roa9kY8oRuU0eQJmyI3R/PeYLMYVy3cl1XNc7zhNCmw4ZuRmaNsSUYJuXWsgEXkipd4Uyxzt/BvoGYlmvZZJ5ncXmOrHNCSm55U0fRhAEQfyzIOXUTfj6mqPxykXrFQuGZIvYIk7oB96VC1pb1Ji1uuKUANB3kSu3IAv0DRQ9hQHgZIqoxykT7wJWrWIK8C9ok9omYf2p9U4Kll5mvI7wnCB6G7ztUuto3RCQW2AzMBQ+KrSLgir3TDULfbGNF0ezfDGk3qnak1kLlvkiIzdDk/+6kEKp5zzhig0xIXgiaQsp9a5Q5nj7b12fdaKbSTyWFGKbE1Ibhq6aJwiCIIiSC/mcyuCePKcMHZ68hu+3B7pkB1loocS7oOXxbVKbfoLHh1ZP01Cl/k9q06XIoUfkWiUBdtRsHCjpK0v0XncF1FIardmVaZPEfCGlnqm3r7XRZNQlPYic3ycPrvBf1ysnMOAeH0g1kbQdn8fjn8w7rt0ZLVouHZK7I6STzylBEETJg5RTGVzx41ZYCDz30llkHL6IBxVPAE+OBXweujQQkasXtGqDi/AqGp5Y5LoqOrIFrUFAeNNHuGLjwNX5CcXqZJHh7PxsLDu8DBfv/NUub4O3qA+s2vpK5eOtWq4qhm4aikt3Lil6pt4Kkp7vSkymeP3XhZ4N6LMxoDV/KOB6uVa7oSW0CSgle9n52fj48Md2sqcm/Zbe0aKVRt0tLATGjQOys83xFmbN4rMe4oWUU4IgiJIHKacyuOLHTc98gmp3o/VYpOmV1oPHBM4Ti1y988o6okf6BDXpI7RuHLg6qqlYXRxlpGaFmhjRYgTCA8Nx+d5lO1NeMZRsCMgp9mqVTFcs9PV8V0IyBYA7Urgjem1kyG3GKI1CDegv12o3tHg2AbXOke6IFq1kfAlbEZndWzZt4rqFLKScEgRBlDxIOZVB7x83PfM0qj0Vs71eyyJNzwWL0WRERm4G+q3vJ2pu7K5FroXicnKqZgPClRsHWlPDKG0Pj5L9oOiBrgqfK5/pKtNSV7wrx/srMad2RA95F/tO6Vzoqr5SuqHFMw6VmAkrNQl352amLWKKqQW9FFRSTgmCIEoepJzKoPePm14LU71OX9Uu0owmI5IzkvH2nrdln8GrELgqAb1Y/eXK8yzIAn0Dcb3wuiK/PHdsQOi9caCXr5jS9vAq2SviVuhipu6OZ7rStFTru5K7vjj6r6udC13hA6k1B7FQHdWYCbsib6heJ86FhYCfn+V6g2i5ggLtJr6knBIEQZQ8KFqvm9EjWqSWyImOKIn8a4HHxMwW3miRSvrG0f9Qif8VwJfrkydy5se9PgYA0UA5WiJuqonoakHPqKQ8fcWDmvbw5sIFoFsKDt50I2qf6cr0GlreFY9iKDRf8Pqvy80DauRDy1yol1zboiTqMU+0WqU5Si1ojTYshF5Rd5976SyA+rLlxo0DFi9WW1uCIAiipELKqZvRIy0C74J90YFFqFaumqzCqWSRpsbEjDcnH2/fZOdny54m6JGyBeBfkAkp+JvPbFa9kNO6AeGqND1qUduezaclbP9suHLvii4KX1pWGhK+SnD5M4tbeg0liqHjfGE0GTVvDCiRDwDWsXb53mW35S3lgScfbVJbczssAY6kcuOqVTJdNa7VbGbakpaVhh0H/MCjnGZna6wsQRAEUSIhs14ZXOFzGjqrAfI2vA7k1wcCz1qj9QJ8Jn1qAvXoGe2WZydfbSAWHjPa/MJ8XfyvlKDFV0vNdbzmgbuG7BLMTVjcou2qMTNOy0pD73W9ue5vuU6LL6HSFCB6PNMT6TWE6qDVP9ld/uspMSlY9vMyVdFw+zXq57a+lpMJXvNlpX7vnoiibUFOlq1ytmYCcHC07P1GjdJ+ckpmvQRBECUPUk5lcE2eUwZ7XxsGRGyCYaB5Ia7XQs4Wd0eiVBtcRC7XolheSSk8kYtTCh6FhHcDItA3EMt6LXNJ+gg9URqhVslGSKh/qN1i3BUBpGwRUgA8pWTq8Vw9/eDVKumujowtpNS6MnUXIB28iddHlicXtNT17oJH2bbK2UMf4N371lo7Y/591NvntFy5cnj06JG2GxIEQRCqKF26NLy9+dYnZNbrRv6KUCjwg3zmWZRZvxWr1hXILiyU+DVZUOqLKgSvidnk6MlIjklW9Qwpc0e1uRY3n96MIRuHuHVhKgbviQmvWV5+Yb6o+bI7TEd5lSOlZsZKfO0cTWfV+BIq9e3T45la0Rqt24Je/slaTD5dZYZqsbZIzkhW7LutVfEXkgml5u08ZsIWirtJuFV+fB4CEZuAM8/Cooj+hfkecXH65Ts1GAzIz8/HpUvC+YgJgiAI9xAQEIDq1avDYBAPhgfQyaksep2cmiMUSpUwv4atp77HzSL5xZCWtA5qIzm6KgWGEEILw3Un1+l2uuKpXJ2uOjFxRfoInvbwKkdKzYx5T9KS2iZhXrd5mtuix0m1O7C8y81nNmP+T/Odvlcj17zjenL0ZHSp28Ulp8JK5J0XHmsLsbGjl+LviNo5VKw+Cc0TEB4YXiJMwvec32Pf9jVpfyqo9spphyev4X/fBulSv9u3b+P111/HG2+8gerVq8PPz092UUQQBEHoC2MMBQUFuHLlCgICAhAcLL0hTcqpDHopp6NHA0uWcBRstRh4agwAvhQKSqLmWlDrf+VpP0Y15szeBm8YmVHwO3f5Z6nN4arWB9IdqEnfocTM2J0bIUqeJ+bj6w54x7tSuVaqGLrK6kDOpF8pof6h3NYWjr7OeqTpEkKpebstxcE/2bE+iw4swus7Xpcta3GtcJKzhz7AzjnW+As1es/H+XFndGvXjRs38MMPP6B9+/YICtJH4SUIgiDUcf36dVy5cgUNGjSQNPH1cmOd/tFwRx7M/yuKocUsKi0rTbBofGQ8chNzkT40HavjV3OfIFmi3XZe2RmD0gah88rOCFsQJvocCxYTM+CvhZoFx+ikFoVsTeYaZORmwGgSVhCVYDFndny2EJYyYoopYB/J01WkZaUhbEEYYj+PlcwFKVQXi1luoG8g17NckT5CCDnTRABI2p7k9M4t7anpX9Pu8xD/EKcFv9y7NsCAUP9QRNeK1kXWeJ9nUWBcId9SWBQmno0opXItNa6FkJuX1CIlHykxKVz3mNdtHlbHr0b60HTkJOYgPDCc6zrL2FEr27xoiaJtMRMeGDUQMWExLjm95pVpy7zGo5gC5v4VlDOfh8BTY2AY0hOGp17Domfe17VdRqMRPj4+8NXLRpggCIJQjd+fJqRy/v/kc+omwsOBb7/lKBh41vq/PH6itn5NRpMRc/bNkY12q8X/6kHRAyTHJGPZ4WW4eEfYj9FVJnFK/a96N+wtaProiKuUOqUnn0J1iY+MR8UyFXXJI6kXvKmMhNJ38Pol8uYCFUvXo1TWlOQedZV8iyGlMEmhRK7F/JOF0MN/XaoeQvIBAMt+XiZrtTGm9Ri7+ujt62yR7YzcDHh7eSs+xZSLF6A0/ZZeKJFpNfOapX89kULJYDCQKS9BEEQxgHcuJrNeGdztc4r/lLWmlbGF13xRS7RbJf5XNSvUxIgWI5z8nVxpEidVHyH/KycfJxFcYQ6rJPqrXF1caU6txlRQi2miUqQiwAJQJWtSbeZJAeJq+XZEjTk7wCfXjn1hyb25+9fdeHvP27o8Qy/URJ92la+zJaWVBSWbE8UpirZtfZT4wvPOa2Jzk7tMlK9du4ZDhw4hOjoa5cqV0/3+xZVhw4bh5s2b2LRpEwAgJiYGTZs2xfz58zXdV6/7uIOwsDAkJSUhKSnJ01UhCOJP7t+/j5ycHNSpUwdly5YVLUcnp27C19ccgdAcrdeRPxcFEZsEFVOA/xRES7RboRMvsYXL73d+R3JGMtb3W293cqskEqUcYgsY3tM3T55SKI3+KlUXJad6SlB7AqjFNFEpUidpYQvCFMuaXJulZEtv+eZF6ck+r1xL9UXDoIYuqZsWpUTNqZvSsaMkSrYtcpYnWtthQW+lTqlMK5nXpOYmT0S3JvjJyMhA586dcePGDQQEBFg/T0tLQ+nSpT1XMQUcPHhQ0YaEWJsJgnA/pJy6kU2bbNPJOBCxCRioXSEAxBf0606u47qe1/9K6cJFytzTETklgmdxo0Wp07oIVLJo51Ew9TaH403/IITeSr9cXwu964zcDMWyxttmMdnSU755MZqMuHzvMnd53s0Kub5Ijknmep6SeUkPc2g16WqUjB01aboA5ZsTatrhCnNyJTIdXSsau3/dzX1vT6W2IVxHYCBf/IPiAAXAIoiSCwVEcjObNgEFBcCoUUDXrsDIV4wInlkfhoG9BcvbBn4RQyiQhVDwDFf5X1kCr+iVL1Es+IuaQCxKgvDYPl9NwChblCzapepii2MALEvQF6WLP61BX5QExpJDbV8rlTU9At3oJd+8KA06A/DJEk9fLDu8DCEV+AJS8aDnmLad2yzm+3JBfHjHjtLgULaoCUTFG+BIz/6zhVdWN5/ejLAFYVym3oA5MBXP3FRYaI5k362b+b+FhVy3dzvuDoBmMpnw/vvvo379+ihTpgxq1aqFd955x/p9ZmYmnnjiCfj6+qJy5coYMWIE7t69y33/zz//HC1btkSFChVQvXp1DBo0CFeuXAEA5ObmonNnsxtBpUqVYDAYMGzYMABms15bM9kbN27g+eefR6VKleDn54cePXog2yb6Y2pqKgICArBjxw5ERkaifPny6N69O/LyxOUuIyMDBoMB33zzDRo3boyyZcuibdu2OHHihF25DRs2oFGjRihTpgzCwsIwZ84cu+/DwsLszI8NBgM++eQTPPfcc/Dz80N4eDi2bNki2+b169cjKirK2texsbG4d+8ed18TBKEcUk49gK8vsHgxsGMH8MFSbyx+ZhYAdQt9JYt7JRFQAeWLcT3MPV0RLVOJUqfXIpAnsnCgbyB2DdmlSMFUsqAVW1Ap3XQQQo3S74iWvnb1Rosez9QCb3Rei3wltUni3qzg6YuLdy4ioUWC3TMcn8m7AaFlTEspBVJzH++GnRBisu2pqNmujCDMK6vz98/nMue1/IY4BqayxfJuWj5xAX5+DEuWmIMFLllijsvw7LNKWuB69NisVMpbb72FmTNnYsqUKTh16hRWr16NatWqAQDu3buHbt26oVKlSjh48CC+/PJL7Nq1C6NHj+a+/6NHjzBjxgwcO3YMmzZtQm5urlUZCw0NxYYNGwAAZ86cQV5eHhYsWCB4n2HDhuHQoUPYsmUL9u3bB8YYevbsaReJs6CgALNnz8bnn3+OH374AefPn8ebb74pW8dx48Zhzpw5OHjwIIKCgtCrVy/rfQ8fPox+/fphwIAByMzMRHJyMqZMmYLU1FTJe6akpKBfv344fvw4evbsicGDByM/P1+0zXl5eRg4cCBefPFFZGVlISMjA/Hx8aBQLQThWsistxig1mRTqWmmq/yvLOX0MPfUw3RSzEyUJziMXj6FPH29rNcy2ZyZas2LpUwAHxQ9kL0ekF9kqzFNtKC1r5XKmh6nnu7yYVYSnVeN6SRvX4QHhutiSq52TEvJMCAcDOvS7Uvova63U+A3PcyHjSajR6Jmu9KcnMeMWSpXtC28m6mJ2xNx8aOFwJkQwTKbN5sV1D9j+XgULe4Parlz5w4WLFiAxYsXY+jQoQCAevXq4V//+hcAYPXq1bh//z4+++wzq0/l4sWL0atXL7z33ntWJVaKF1980fr/devWxcKFC9GqVSvcvXsX5cuXt5rvVq1aVdT/Mjs7G1u2bMHevXvRvn17AMCqVasQGhqKTZs2oW/fvgDMivCHH36IevXqAQBGjx6N6dOny9Zx2rRpePLJJwEAK1euREhICDZu3Ih+/fph7ty56NKlC6ZMmQIAaNCgAU6dOoVZs2ZZlWwhhg0bhoEDzUH63n33XSxcuBAHDhxA9+7dBdt87tw5FBUVIT4+HrVr1wYAREVFydadIAht0MlpMUGpyWZxyzUJ6GPuqVWJ0LLLrcfpmi1aTxfVtkXuRDI7ny/pLs8iW23uRaV97XgSBkCRrOlx6qmnObMUvEFn/Mv4Y07XOYoXx0r6Qg9TcjVjWk6GR3w1QnLuc4xIrtV8OCYsBjFhMYrmQyVInRC70pycR6Z5FFNAfl6zvtNrV4Azz1qfIsTmzZ438XV1zlsxsrKy8ODBA3TpIrxxmZWVhSZNmtgF++nQoQNMJhPOnDnD9YzDhw+jV69eqFWrFipUqIBOnToBAM6fP6+onqVKlUKbNm2sn1WuXBkRERHIysqyfubn52dVTAEgODjYakIsRbt27az/HxgYaHffrKwsdOjQwa58hw4dkJ2dDaNR/H00btzY+v/lypWDv7+/ZF2aNGmCLl26ICoqCn379sWyZctw48YN2boTBKGNEqOc5ufnY/DgwfD390dAQABeeuklbh8Lxhh69OgBg8FgDa1eHFGy0NeiSMVFxCE1LhWToydjcvRkUdNSNYtxrQqZFiVCq0muKxaBahf3atviCX9CNSjpazElHYDLNloAYYVBD3NmnjbzcPvBbfRf31+xeaGaTSfHeUmJD57SMS0nwwxMNB2WGHooE67anJDbhHK1ObmUTCe1TeK6x+ToyfybqTvnwKyUSvv0jhvH9WiXofdmJS++vr663s8Ri1mwv78/Vq1ahYMHD2Ljxo0AgIcPhbMFaMExuq/BYPCYWaxQXUwmk2h5b29v7Ny5E9u2bUPDhg2xaNEiREREICcnx9VVJYh/NCXGrHfw4MHIy8vDzp078ejRI7zwwgsYMWIEVq9eLXvt/Pnzi1US7sJC8w9vdjYQHg7MmmX2Q1WCWkVKyFQu9Viq1eTN0Yw0LiJOsWmfFnNPtaaTepjk6uUzq8ak2PEeatvC60+YEpOC5IxkLvNuV8Db19n52UjOSJY0q8tNzJWVNaUm7XKRUdXKNw9KlQyl6Wu0piZSGjVW6ZhWmoaJFz2iKXsianZcRJzLzcnFZHrP+T2Y/9N82eu71O3Cv5maX5+rTtl8Bh4uw90B0CyEh4fD19cXu3fvxvDhw52+j4yMRGpqKu7du2c9Pd27dy+8vLwQEREhe//Tp0/j+vXrmDlzJkJDQwEAhw4dsivj4+MDAJKnkJGRkSgqKsL+/futZr3Xr1/HmTNn0LAhXyoqKX766SfUqlULgDnw0i+//ILIyEjrs/fu3WtXfu/evWjQoAG8vdXNw2JtNhgM6NChAzp06ICpU6eidu3a2LhxI9544w1VzyEIQp4SoZxmZWVh+/btOHjwIFq2bAkAWLRoEXr27InZs2ejRo0aotcePXoUc+bMwaFDhxAcrK8vkBocU8lYAkHExSnzsVGjSMkthN5s/ybWnFgjuOjkUQBsUaKQOSp0c7vORf/1/RUtnPXwy9LqU6hXqgctbXG3P6FaePq6ZoWaWHZ4GZeSziNrvIoFr58ZzzPV+AwrSWeiVuFyl587oFwZ1nux74jW++u1OaFkE0qPPMdqUjZdvXdV0ueUVzG26/PAs8A5yeIAzBu3nsSdAdBsKVu2LCZMmIDx48fDx8cHHTp0wNWrV3Hy5Em89NJLGDx4MKZNm4ahQ4ciOTkZV69exZgxYzBkyBAuf9NatWrBx8cHixYtwsiRI3HixAnMmDHDrkzt2rVhMBjw9ddfo2fPnvD19UX58uXtyoSHhyMuLg4JCQn46KOPUKFCBUycOBE1a9ZEXFyc5n6YPn06KleujGrVqmHSpEmoUqUKnv0zWtbYsWPRqlUrzJgxA/3798e+ffuwePFiLF26VPXzhNp88uRJ7N69G127dkXVqlWxf/9+XL161aokEwThGkqEcrpv3z4EBARYFVMAiI2NhZeXF/bv34/nnntO8LqCggIMGjQIS5YsQfXq1bme9eDBAzx48FfAmNu3b2urvA2iOU5hHwSCZ0GrVJHiMfec9eMsp+9cGfgBEFfoxBRlsYWzHrvcWk6U9AycoaUtShZUMWExLj0BlIKnrxNaJGBaxjTReyhVzIwmIwJ9AzGzy0xcLbiKIL8g1PSvaddmPYNiqd2ssO0bXtQoXEqVLC19o0QZ1nux74gluJEWuVdqDSGEkk0orSe2amQxLSsN/df3l90g4VGM7d7pk2OBg6P+/EPcommW88+RW3FXADQhpkyZglKlSmHq1Kn4/fffERwcjJEjRwIw+3Du2LEDiYmJaNWqFfz8/NC7d2/MnTuX695BQUFITU3Ff/7zHyxcuBDNmzfH7Nmz8cwzz1jL1KxZEykpKZg4cSJeeOEFPP/884KRcFesWIHExEQ8/fTTePjwITp27IitW7c6mc+qYebMmUhMTER2djaaNm2Kr776ynq62bx5c6xbtw5Tp07FjBkzEBwcjOnTp0sGQ5JDqM0TJkzADz/8gPnz5+P27duoXbs25syZgx49emhuH0EQ4hhYCYiJ/e6772LlypVOzv5Vq1ZFSkoKXnnlFcHrXn75ZRiNRnzyyScAzOYZGzdutO6+CZGcnIyUlBSnz2/dugV/f3/VbSgsNIfJl2P14TSM/55vEWFRiAAILu5tFaKM3Ax0XtlZVd0tP8I5iTm6Ki5iCp2l/uv6rEOVclW4FpC87Usfmi67qBRayIX6h4ouAo0mI8IWhIkuNJX2n5a2WOoideoW6BuIdX3WcQUw0rqIl0Oqrx8UPcCgtEGy91gdvxoDowYqfo7QuNJLjuRkmzcg1sivR+JqwVXN9dEDrXK55/weXLp9SXRjwLaslAwbYECgbyCuF1532tiQwjIO53Sdgze+fUOzhYNW1mSuUSzfasajGlmUm9MAcxTftb3Xok8j+U0Up3e6Jk0kKBIDYFBsSSTGtWvXcOjQIURHR9sFEOJFyW8soQ8ZGRno3Lkzbty4IRopmCCIksn9+/eRk5ODOnXqoGzZsqLlPBoQaeLEiTAYDJL/Tp8+rereW7ZswXfffWeXhJmHt956C7du3bL+u3DhgqrnO8Ib3GHQyN+5g+AoCc6ixZTNFYEfeE5y3/j2DUTXinYKECUUjEVNwBsxlAYx0jtwhpa2SAVtsZBfmI/Yz2NlI/+6I7+fVF/rZVanJLiUHifwekX5jI+Mx8XXL6KKXxXRMu4IXmVBSd/YjtHp309H7fm10XllZ/x747/x+o7XMXH3ROQX5gsqVjyBhz7u9TE29NvgNPdV9q0sed2Axweg//r+mvMY64Ea+RYLmicWoEqtLPL4/RqZEVXKicumLU7vdGA8ELFJoKR+iqkeuCMAGkEQBGGPR816x44dK2uGUbduXVSvXt0p3HdRURHy8/NFzXW/++47nDt3zmnnrXfv3oiOjkZGRobgdWXKlEGZMmV4m8ANd3AHgWARUiZzvKZ5epjK6ekL5orch3r4ZVmwLAItJxXrTq4T7Vu9A2doDVgjZgLoiJTJsZ5mymp83QB9zOqUmqLqoRDzynZGbga8vbwlx61PKR989PRHkqc3rg5eZUFJECu5Uzc5OeI1YxWa+zaf2Sx43dyuc/H6t6/rYrKtB3qZjUrNiYG+garmWVdFLrd7pwPjgYc+KPf9R6jHuiO6WXVVwQFdjasDoBEEQRD2eFQ5DQoKQlBQkGy5du3a4ebNmzh8+DBatGgBwKx8mkwmuxxbtkycONEp0l1UVBTmzZuHXr16aa+8QsLDzcGPZAk8K/ixlH8dj/+TkiArYvAuTnlMz7TkPpRSmPSOpMljCuqKwBlafcwsC6qM3Az0W98P+YX5TmXEFuTFwe8S0K6kG01GLDqwSNHiXA+FgVe2Hd+LWL/oHSFWLTx9E+gbKBhd2REeOeJRCoTmPqHr2oe0x9JDSzUHTdMTrfINyM+JiW0TueriKLOuCgYk+E5nFH9FTw8fY4KPmJgYj6WaIQiieFAifE4BoEePHrh8+TI+/PBDayqZli1bWlPJXLp0CV26dMFnn32G1q1bC96Dx+fUkdu3b6NixYpu8Dn98zX8pyzgI55rjMe/Tgwx/xk5lPhMusq3T4lfJwDNu9xK/LR4fOTU1k2rz6caP8Hi5HdpuY8SH2Cxa6SwHVda/czU+nfL3V+NLOjtMyzVNwwMlX0rK85B6mp/WS2yoBWe/lcj3xYzXrGNJ8D8Tqr4VVHls6xkTivOiqVWn1OCIAhCP3h9TktEtF4AWLVqFUaPHo0uXbrAy8sLvXv3xsKFC63fP3r0CGfOnEFBQYEHaymOr685XYxYtF4AZh8cCcUU0GaeK3YCE+ofigGPD8DsH2cDUG86qMQUVOkJVUZuhqJTDy2LXaUnh7wnIGLmhlKniFp37HlP8S7dvoSM3Azk3cnDqaunNN9bz9NXpWZ1YnIohe24EhsnNf1rIqF5Ah4UPUBGboZoHdRaKcj1i1JZ0Cu1kS1Sp7jDmw+XjK4shitTx2iVBa3P5ul/NfLNo2wzMGvwqWsF1xRZAuhxqksQBEEQaigxJ6eeQq+TUwti6WSeecaIn2Pcs1MttpuvZgff9p5KI9bynlClZaUh4asE0RMCW/Q49VB7cijVfwB0OUVUCm9bgvyCuE5YbJE68eJ97q4hu2T9LpXAE2XUFqlxZTtOsvOz8fHhj3HpziXr91KKnlorBQtaTxP1OrUWQ2gOWXdyHVf0WUf0ODkVqg8A3WRBKa7qfzXKdlKbJCzYbw5GpNQSQMtvQnGATk4JgiCKD3+7k9O/C5s2mU18x40zB0kKD8efQSC8kZbFt1Ptqhx9WgI/qAlwxONLp3Qx5s7AT47lxPoPMC+SPRGIhfcUT4li6gm/S154ooxakDsBsoyTtKw0QT9KqaA+YrId6BvItcliiXirZizynlo/Hf40frz4o6p5RGgOUTr29MoTKXZCmdA8QTdZUIKeVgO895Ui7rE4RNeOVuWzTMGACIIgCHdDyqkH8PUFFi92/pxXWdPbVM8WoUWnqwIcAdKLHyWLMT0TomsJBiLUf0pMkqNrReu6EJQyz1MD7yKetw8dFTU10YBtUWIiyrM416JoCMm20WRE7OexsnUTinjLO855N4pC5oXYbUponUeUmDPrpQxKuRIoMTHWM7iU2kjkWu/riO2c6O3lrVrJ1OpaIPX7IbxRq/pRBEEQxN8AUk6LGVLKmp7pPXhxR8RascWP0sWY3EKX9yRKrxQPFngVps2nN2PIxiG6bzyIbXqoMeXlXcS7yu9SDl45nNdtHsa0HiN7f62KhqNsG01G1RFv5ca5Rb43nNog2SYLju9e6zyiZCNED2WQJ4cnD7yywIsr0rAoLS+k/Lsr4iyvOfxnb8Xbubh8+y2wZAmKVZ5TgiAIwv14eboChDNCidbVJlNXgyUS5OvbX0fvdb25EtZblBHLosgRAwwI9Q9VdLLJuxgL9A2UXVCnZaUhbEEYOq/sjEFpg9B5ZWeELQiza4MFp4TxDu0AlJ348CpM8/fP5+prNcRHxiM3MRfpQ9OxOn410oemY163eVzXTo6ebL3m7JizCPQNxJrMNcjIzRCVN6k+lMNW4VMKrxzyKiOuymFrqYtj3SxjWek4t5XvxQcFzDI40GMesWyE1PSvafd5iH8IUmJSrHKUk5ijeSNN6eaVI0plgRfe8X753mXZcaTmvoC5v13lxy6F4zw7LWOanWIKmOe03s8ZsHmz8AbC5s3m2AyEMMOGDVOUcUAtqampTnniCfUYDAZskth1iYmJQVJSktvqo5SwsDDMnz/f09VwG7m5uTAYDDh69CgAICMjAwaDATdv3pS8riT1k5xMehJSTksISk5wtGC7uJi/f77oswD7RazeCh3Avxhb12edrGLaZ10fRYqf1CJb6aJPTmECAG+DcL/Y9vXDoofIyM1QtKC1e4bDpodj28ToUrcLBkYNRH5hPuotqsel4APifRjoG8j1XEsEYSXt9dTGgpoctmIKnFQqFqFxLibfalA6j1g2smzfkdBGSG5iLqZ2mmq34aYVNSeJjn+7IuIs73h/fcfrXONIyX0r+1bGriG7dFH+lcIrh+xhaeDMs5JlNm82m/wSzixYsACpqamersY/guK8eCfcS/v27ZGXl4eKFSsCEN+8OXjwIEaMGOHm2qkjLy8PPXr04C7vzg0rMustIbjKVMwWJcGH1AY4AvQ3r5UyVdPbZ1CND6hcWgYGBiMTV7w85SNoa76s1qRci9/l6zteV9VeXjnkQW8Tb9s6CsnWupPruK63jHO1QXJ47y+FnMm/K01IjSYjLt+7zFU2JSYFy35eplkWeOExb3Yc7zwm1TzpXT7u9TG61O1ifobOOW6lUCSHO+cAHNYU48YJx2b4p2NZHBOu4+HDh/Dx8fF0Nf5WGI1GGAwGeHmVzDMxHx8fVK9eXbZcUFCQG2qjDzzt8RQlU0r+gbjiBMcWtYtcoQBHjqcmtrv47jav1XriLGRirQap07Kktklc9xDzEVRr8svbvwA0mZQ79mFMWIzsCRCgvL22p3iBvoE4N+acqBzyImeezMDQu2Fv7Dm/R/NJtreXt+JxrtS0NciP74dTrh5qrBH0wjKHvL7jdclyFrPdSdGTJOckVyA23nksJKTkiNeiQ8k8qweK5DC/Plex7GwNFdIZoxHIyADWrDH/16jde0aS9evXIyoqCr6+vqhcuTJiY2Nx7949AM5mvTExMRgzZgySkpJQqVIlVKtWDcuWLcO9e/fwwgsvoEKFCqhfvz62bdtmvcZiovjNN9+gcePGKFu2LNq2bYsTJ04I1ic3NxdeXl44dOiQ3efz589H7dq1YTKZBK9bunQpwsPDUbZsWVSrVg19+vSxfidk/ti0aVMkJydb/zYYDPjggw/Qo0cP+Pr6om7duli/fr1dvQwGA9auXYv27dujbNmyePzxx/H999/b3ff7779H69atUaZMGQQHB2PixIkoKiqy68PRo0cjKSkJVapUQbdu3RAWFgYAeO6552AwGKx/A8DmzZvRvHlzlC1bFnXr1kVKSord/bKzs9GxY0eULVsWDRs2xM6dOwX7x5GioiKMHj0aFStWRJUqVTBlyhTYZnu8ceMGnn/+eVSqVAl+fn7o0aMHsm0GSnJyMpo2bWp3z/nz59vV3SI/s2fPRnBwMCpXroxRo0bh0aNH1jJXrlxBr1694Ovrizp16mDVqlVOdZ07dy6ioqJQrlw5hIaG4tVXX8Xdu3et31tO2bZs2YKGDRuiTJky+N///ofSpUvjjz/+sLtXUlISoqPFN3hv3ryJl19+GdWqVbO+46+//tr6/YYNG9CoUSOUKVMGYWFhmDNnjt31YWFhePfdd/Hiiy+iQoUKqFWrFj7++GO7MgcOHECzZs1QtmxZtGzZEkeOHLH73tasNyMjAy+88AJu3boFg8EAg8FglVtHuT5//jzi4uJQvnx5+Pv7o1+/frh8+a+NVcs7+/zzzxEWFoaKFStiwIABuHPnjmh/WPp206ZN1vHVrVs3XLhwwa7cBx98gHr16sHHxwcRERH4/PPP7b63tQywjKW0tDR07twZfn5+aNKkCfbt22dtv1ibpca5Wkg5LQYImcY54gqfTlvU+m9JBTiyLLoBc8Ta13fw+7Ba0Gpe644TZ17EFPe4iDhV93O1j6Clf/U2KVfrj8rrc2lZjNdbVA/5hfku21iwKBrzf5qv2+Jf6TjnldvRrUYjfWg6Lr5+UfM84k7/d0d4TUcdN6/02mSSQ2qDZF63eVwWEnLjiGcD0N0bB4rmz8CzXMXCw1VWRmfS0oCwMKBzZ2DQIPN/w8LMn7uCvLw8DBw4EC+++CKysrKQkZGB+Ph4SKWkX7lyJapUqYIDBw5gzJgxeOWVV9C3b1+0b98eP//8M7p27YohQ4agoKDA7rpx48Zhzpw5OHjwIIKCgtCrVy87JcVCWFgYYmNjsWLFCrvPV6xYgWHDhgmehh06dAivvfYapk+fjjNnzmD79u3o2LGj4v6YMmUKevfujWPHjmHw4MEYMGAAsrKynNoxduxYHDlyBO3atUOvXr1w/brZPeLSpUvo2bMnWrVqhWPHjuGDDz7A8uXL8fbbbzv1oY+PD/bu3YsPP/wQBw8etLYxLy/P+veePXvw/PPPIzExEadOncJHH32E1NRUvPPOOwAAk8mE+Ph4+Pj4YP/+/fjwww8xYcIErrauXLkSpUqVwoEDB7BgwQLMnTsXn3zyifX7YcOG4dChQ9iyZQv27dsHxhh69uwp+M6kSE9Px7lz55Ceno6VK1ciNTXVzlR82LBhuHDhAtLT07F+/XosXboUV65csbuHl5cXFi5ciJMnT2LlypX47rvvMH78eLsyBQUFeO+99/DJJ5/g5MmTaNmyJerWrWunJD169AirVq3Ciy++KFhXk8mEHj16YO/evfi///s/nDp1CjNnzoS3t3kOP3z4MPr164cBAwYgMzMTycnJmDJlipPp+5w5c6xK56uvvopXXnkFZ86cAQDcvXsXTz/9NBo2bIjDhw8jOTkZb775pmj/tW/fHvPnz4e/vz/y8vKQl5cnWN5kMiEuLg75+fn4/vvvsXPnTvz666/o37+/Xblz585h06ZN+Prrr/H111/j+++/x8yZM0Wfb+nbd955B5999hn27t2LmzdvYsCAAdbvN27ciMTERIwdOxYnTpzAyy+/jBdeeAHp6emS9500aRLefPNNHD16FA0aNMDAgQNRVFQk2ma9xrkjZNbrYXij4fKYdGnxn1KqnPGaMwq1zxFXmte6+sRZKUIRM9VGtgWUp6UQMvWT619XKPhqIwgLtdcdUaxt+2jz6c2Yv3++KtNMOZSOc1657d2wt7W/tM4jSjYr9EyNpMS6Q6vZrhqTWKm5fGDUQKzJXMP1bJ5xJBZ511U5VuVQNH8+ORY4OOrPP8Q3p2bN0lYnPUhLA/r0ARz1wkuXzJ+vXw/E63wAn5eXh6KiIsTHx6N27doAgKioKMlrmjRpgsmTJwMA3nrrLcycORNVqlRBQkICAGDq1Kn44IMPcPz4cbRt29Z63bRp0/Dkk08CMCtGISEh2LhxI/r16+f0jOHDh2PkyJGYO3cuypQpg59//hmZmZnYbBty2Ybz58+jXLlyePrpp1GhQgXUrl0bzZo1U9wfffv2xfDhxMN/ZAAAQFZJREFUwwEAM2bMwM6dO7Fo0SIsXbrUWmb06NHo3bs3APNp0fbt27F8+XKMHz8eS5cuRWhoKBYvXgyDwYDHHnsMv//+OyZMmICpU6daFevw8HC8//77Ts8PCAiwM39MSUnBxIkTMXToUABA3bp1MWPGDIwfPx7Tpk3Drl27cPr0aezYsQM1atQAALz77rtcvn2hoaGYN28eDAYDIiIikJmZiXnz5iEhIQHZ2dnYsmUL9u7di/bt2wMAVq1ahdDQUGzatAl9+/bl7tNKlSph8eLF8Pb2xmOPPYannnoKu3fvRkJCAn755Rds27YNBw4cQKtWrQAAy5cvR2RkpN09bIM3hYWF4e2338bIkSPt3sujR4+wdOlSNGnSxPrZSy+9hBUrVmDcuHEAgK+++gr3798XlDkA2LVrFw4cOICsrCw0aNAAgLnPLcydOxddunTBlClTAAANGjTAqVOnMGvWLAwbNsxarmfPnnj11VcBABMmTMC8efOQnp6OiIgIrF69GiaTCcuXL0fZsmXRqFEjXLx4Ea+88opgnXx8fFCxYkUYDAZJ09jdu3cjMzMTOTk5CA0NBQB89tlnaNSoEQ4ePGjtX5PJhNTUVFSoUAEAMGTIEOzevdu64SHEo0ePsHjxYrRp0waAefxGRkbiwIEDaN26NWbPno1hw4ZZ2/zGG2/gp59+wuzZs9G5c2fR+7755pt46qmnAJhlvVGjRjh79iwee+wxwTbrNc4doZNTD6J0h1vPID2OKFlc8C5ilQRqcZV5rd4nzjyn3Eqv1RLZ1gKvj6CYqZ9U/7pKwdcSQZjH51LvUzxvL29E14rG+qz1gt/LPY9XdpSMczXyrWUeMZqM2P3rbtHvbdl8erOupqW81h3zus3TZLarxiSWZy53x0aZuwLn2WI0GWE0GbkDnRl8HgERmyTLxMV5Pt+p0QgkJjorpsBfnyUl6W/i26RJE3Tp0gVRUVHo27cvli1bhhs3bkhe07hxY+v/e3t7o3LlynYKbbVq1QDA6fSrXbt21v8PDAxERESE06mkhWeffRbe3t7YuHEjALNpYefOne1MRm158sknUbt2bdStWxdDhgzBqlWrnE5uebCto+VvxzralilVqhRatmxpLZOVlYV27drBYPhrjuzQoQPu3r2Lixf/GistWrTgqs+xY8cwffp0lC9f3vovISEBeXl5KCgoQFZWFkJDQ62KqVAbxGjbtq1dPdu1a4fs7GwYjUZkZWWhVKlSVmUEACpXriz5zsRo1KiR9eQRAIKDg62yYXmObX889thjToFwdu3ahS5duqBmzZqoUKEChgwZguvXr9u9Yx8fHzvZBMynsmfPnsVPP/0EwCxH/fr1Q7ly5QTrevToUYSEhFgVU0eysrLQoUMHu886dOhg7TcLtvWwKFi2bbaYt1vgfWdSWGTBopgCQMOGDREQEGD3zsLCwqyKKWD/PsQoVaqUVbkF/npHtnIv1C9ysmLbT8HB5t8iqbroNc4dIeXUQ6hdVMuZdKmFJxKkBd5FrB4+rFrRM3qrFj8uuWvFFIbi4CPoSpNytRGEeX0uLYvxjNwMTZGOLahd/CuVHd5xrla+1cwjlja8vedt0TK26J0aiXduqFaumupTQTXjxGgy4rVtr8nO5e1D2rvUNQNwvxuDRSZiP49FfmE+1zUh/iHYsJEhLk64H4pLntM9e4CLEnshjAEXLpjL6Ym3tzd27tyJbdu2oWHDhli0aBEiIiKQk5Mjek3p0qXt/jYYDHafWRQeMd9QHnx8fPD8889jxYoVePjwIVavXi1qigkAFSpUwM8//4w1a9YgODgYU6dORZMmTaypOLy8vJxMlZWap+qJmHLkyN27d5GSkoKjR49a/2VmZiI7O9tOufEEvH0qJC9KZCM3NxdPP/00GjdujA0bNuDw4cNYsmQJAHMwKQu+vr52yjYAVK1aFb169cKKFStw+fJlbNu2TVKOfHXapdLaZldSnOqmdN6QG+dqIeXUQ2jZ4XaF/xTP6V1S2yTuXJd6+rBqRY8TZy3KHe+1QgpDcfARdEWaIDFc5XPZb30/XU7wlCz+rfmCVfhaA/zjXK18K5lHlKarkQv8k7gtEbt/3e2SPJ+uCAonNU7e2fOOUy5Px2sv3L6AHy/+6PJx5E43Bl6ZEMtxu2kTUFAAjBoFdO1q/m9BQfFQTAEgj1N/5y2nBIPBgA4dOiAlJQVHjhyBj4+P9cRSTyynV4A52M4vv/ziZL5py/Dhw7Fr1y4sXbrUanosRalSpRAbG4v3338fx48fR25uLr777jsA5qimeTadd/v2bUEF3LaOlr8d62hbpqioCIcPH7aWiYyMtPpnWti7dy8qVKiAkJAQyfqXLl3a7vQNAJo3b44zZ86gfv36Tv+8vLwQGRmJCxcu2LXNsQ1i7N+/36ld4eHh8Pb2RmRkJIqKiuzKXL9+HWfOnEHDhg0BmPv0jz/+sGurJU8nL4899pi1Dy2cOXPGTtk4fPgwTCYT5syZg7Zt26JBgwb4/fffuZ8xfPhwfPHFF/j4449Rr149pxM+Wxo3boyLFy/il19+Efw+MjISe/futfts7969aNCggd3psBSRkZE4fvw47t+/b/1M7p35+Pg4yYbQfS9cuGAXqOjUqVO4efOm9Z2ppaioyC5AmeUd2cq9UL9oea5Ym6XGuVrI59RDKF3kuiMlgJgfYKh/qNV/Ky0rDfUW1ZP1kXWVD6tatPitavHjUnqtkC9ZcfAR1DM9ixSu8rl0PNVR6x/K+7zs/GyELQjT5GutBL3SHgmhxAqCNzXSxTsX7dIJ8aQJclVaHwtKxomtv/O0jGlc98+7k4eBUQNVjSO9029pnWd5ZCLQNxDr+qyT3Pjw9S2+6WKCOfV33nK87N+/H7t370bXrl1RtWpV7N+/H1evXpVUGtUyffp0VK5cGdWqVcOkSZNQpUoVu0jAjkRGRqJt27aYMGECXnzxRclTra+//hq//vorOnbsiEqVKmHr1q0wmUyIiIgAADzxxBNITU1Fr169EBAQgKlTpwoqE19++SVatmyJf/3rX1i1ahUOHDiA5cuX25VZsmQJwsPDERkZiXnz5uHGjRvW07hXX30V8+fPx5gxYzB69GicOXMG06ZNwxtvvCGb1iQsLAy7d+9Ghw4dUKZMGVSqVAlTp07F008/jVq1aqFPnz7w8vLCsWPHcOLECbz99tuIjY1FgwYNMHToUMyaNQu3b9/GpEmTJJ9j4fz583jjjTfw8ssv4+eff8aiRYuskWfDw8MRFxeHhIQEfPTRR6hQoQImTpyImjVrIi7OHFQxJiYGV69exfvvv48+ffpg+/bt2LZtG/z9/bmeDwARERHo3r07Xn75ZXzwwQcoVaoUkpKS7N51/fr18ejRIyxatAi9evWyBpHipVu3bvD398fbb7+N6dOnS5bt1KkTOnbsiN69e2Pu3LmoX78+Tp8+DYPBgO7du2Ps2LFo1aoVZsyYgf79+2Pfvn1YvHixne+rHIMGDcKkSZOQkJCAt956C7m5uZg9e7bkNWFhYbh79y52796NJk2awM/PD35+fnZlYmNjERUVhcGDB2P+/PkoKirCq6++ik6dOqFly5bc9ROidOnSGDNmDBYuXIhSpUph9OjRaNu2LVq3bg3AHCSsX79+aNasGWJjY/HVV18hLS0Nu3btUv1MoTZ/9913kuNcLXRy6iGULnLdlRJAytxPyemhK3xYtSJ0UsTjB6jllFsPHzB3RSyW8xF0lUm5I3r6XIqh1h+V52S3sm9lJGckK/a11uLPDLjGogJQZgWhJDWSLTwWCK4+wVdqEmtR0HixzIlKx5G702/pMScC5g0hy4ZbSSQ6GggJAQwiU4vBAISGmsvpib+/P3744Qf07NkTDRo0wOTJkzFnzhyugDpKmTlzJhITE9GiRQv88ccf+Oqrr2Tze7700kt4+PChpCkmYA4klJaWhieeeAKRkZH48MMPsWbNGjRq1AiAOXBTp06d8PTTT+Opp57Cs88+i3r16jndJyUlBWvXrkXjxo3x2WefYc2aNU6nPzNnzsTMmTPRpEkT/O9//8OWLVtQpUoVAEDNmjWxdetWHDhwAE2aNMHIkSPx0ksvWQNISTFnzhzs3LkToaGh1iAv3bp1w9dff41vv/0WrVq1Qtu2bTFv3jxr8CovLy9s3LgRhYWFaN26NYYPHy4Z2MaW559/3nrdqFGjkJiYiBEjRli/X7FiBVq0aIGnn34a7dq1A2MMW7dutZpiRkZGYunSpViyZAmaNGmCAwcOSEadFWPFihWoUaMGOnXqhPj4eIwYMQJVq1a1ft+kSRPMnTsX7733Hh5//HGsWrUK//3vf7nv7+XlhWHDhsFoNOL555+XLb9hwwa0atUKAwcORMOGDTF+/HjrCV7z5s2xbt06rF27Fo8//jimTp2K6dOn2wVDkqN8+fL46quvkJmZiWbNmmHSpEl47733JK9p3749Ro4cif79+yMoKEgwmJbBYMDmzZtRqVIldOzYEbGxsahbty6++OIL7rqJ4efnhwkTJmDQoEHo0KEDypcvb3ffZ599FgsWLMDs2bPRqFEjfPTRR1ixYgViYmJUP1OozXLjXC0GJhWfnMDt27dRsWJF3Lp1S9HukxxGkxFhC8Ikd7gDfQORX5jv9L1lkaFHFFKl9RVbkFh25HMSc6xKn1T7bLE9mXUnvJGS12SuwaC0QbL3Wx2/GgOjBtp9puVaR9SeoGfkZqDzSvHobFLoLWtK2sBb1rJpAoDrdM+R9KHpXJGO5Z5nOTWs7FsZ1wuvK6pDUpskrM9aLyuLnoBXhidHT0ZyTDL2nN+jSt4c5xAxhMat1BzCK0e848QiL0rGVah/qGy7hBCLRC03LpX2kdR1es+J7uTatWs4dOgQoqOjuX0KbbFE6wXsAyNZFFZXROt1BxkZGejcuTNu3LjhFOhGjhkzZuDLL7/E8ePHXVM5GwwGAzZu3Ch6mpubm4s6dergyJEjTvk9ieLNSy+9hKtXr2LLli2erkqJIzU1FUlJSZr9Oj3B/fv3kZOTgzp16kj6aJNZr4eQM1+0/K3WlFRv8z6lJm887Utqk4S4x+JcZqYshZL0I1r8uHivPXX1FDJyMyT7Qix9hBw8qWq8Dd6CpphaTU9tZTE7PxsfH/7Yzj9PSgHjba+YybFlc0cOpSboUibOw5sP5zbztGX+/vlOn+mZCkcLvDLcpW4Xa0RjNamRpNIiOc5p58acw48Xf+TauOBRtgDlJrFK5EbNia4WdwI1Zt7umhNLEvHxZgU0MdE+OFJICDB/fslUTNVy9+5d5ObmYvHixU45QgmCl1u3biEzMxOrV68mxZQQhcx6PYiU+WJKTIrk6YteUUF5URMFUqp9G/ptwLzu83Q1P+TBkgoj4asE7sAnWqLV8pqdvr3nbZeZbPOY+sn5CKpJP+Eoi9MypjkFjtESvdUWIVPJdX3WcV2rZgEtZpoZHhiu+F5ywYP0SoUDqEuFpFT+taZGcpxrhOa0eovqIb8wX9KEWWkQM6UmsbxykxKTompzQatLgBIzb6XBoHhkIsQ/BEaTUXOEbE8THw/k5gLp6cDq1eb/5uT8sxRTwJxLtEWLFoiJiZE16SUIMeLi4tC1a1eMHDnSmmOXIBwhs14ZXGXWa4vQSee6k+sUm02pNQHjQanJmy3uCugkh9ApihS2bZEy5QSk+1aJ2akrTbbFTP16N+yN+T/Nl71eiYmemCwKYVnInh1zlus0jBce0/kqflUwr9s81PSvqYtcKjH1dLQokEKp6bEQSk4Rha5VKv9Kx5sFoXGndE5T6oYgV28hk1ge14UQ/xDkJuaqkistprNK51w187tS83ZPmKlrNeslCIIg9IPXrJeUUxncoZwKoXSxwLsYU6sA8Cz0efzFXAHPQkyJsmTBcdGn1o9L7FoxXNmXQn3F6yPIqyDJyaIYQX5BuFpw1fq3HotZJRsDejxPqa+1KzYGhNBj40qN/NvKW9VyVTFs0zBcusM3h2hRMLVspjnWW42/s2O/qtmgU9sGNZsQahVhoWeJ+Vx7IlYCKacEQRDFB/I5LeEo9X/iNQELmReiSgFQmuLDXfAsxJSkwrDF0WxPqR+XmJ/c7l934+094j47Ur53WhHy49Q7/YTaHLe2cgno43Mp5h8qhB7PU+prvef8Hi7lVIvvntZUSGp8PS04ytuCHvxziJrULhbUuCFI1VsMnhRLapRFo8kIo8ko6TctNC6V+I3aotaH1HFOrFquKoZuGip4rZ6pkwiCIIi/L+RzWkxR6v/EuxgTUwB4fP60pDTRmiZDCF6fMqXKkpQPKa8fl5SfXMMgviTISgP1qO1jvVN0KK23GHr5XNr6h/7fc/+HKn5VXP48Xl9rLf7MvKj1XVTr6ymFkjlEi4LpzoA9eqXfsmDp99jPYyUVU8B+XPL6jT4seug0T2iRQ9s50dvL28mv3LEeavzXCYIgiH8OdHJajOHZlbegdpGldDdbbRRItb5uYvCeBj0d/jR2/7qb+756nALLnV4kxyRz3UfJO9Xax0pkTQqjyYjL9y5z11sOvU6RLQvojNwMXCu4ptvzxMw1eceJOywS1Ch5ak/geODtGy0Kpt7WAHJmuUInrWpOrHndD4TGJe8mRM15Ne3GgGWe0EMOtZ5YEwRBEAQpp8Wc+AZxiMuriDMnMpBXHvDuFIPoOs4nFtHV22DqEX9UybuNc5WAJa2AIpG362UCon8Dgu8CeeWBPbUlFuQPHwJLlwLnzgH16gGvvgpvHx/nckYjsGcPkJcHBAebM5N7ewsutrxMQP1jF7F+b29U6ZWCjv+eBHgrW3w7LsRKFQGjDgL1buDP9gubMTu3HTDZ2A/YLfoE2g6ZBOVyC9LSRYBx7hx8er0cjpW/J/ierAvnmu2BjAynPnUk7cSXWDS7H/7l0CalioSt0nD5+gW02Lwfdc8yeO04D9R7KN72P9/9gUObMSvn/5BW5ZqoTYbzexKWU8f39MdN8dMY7vdkNML43W4MyBR+97b8cfOSbN/LbQg4KSwPHwJLFznVU2hjwMsE9L5WBW/WGYzWlwOBBkan5xtNRuzJyYDx+wwE3wUiHo+Bd6cYp3JKlTzj/UIce+sFLMhjTu/ISaliEBz3TjjMD97R0bLKP4+CGeZXA9Fph4GcDXZ96qj0G0zMKk9//DnniSpbDvKU1jkYid+96fyen5yL+GtBom23naOE5d5+3jWajHh962vomMNE56fKvpXxRZ8vzKfWDHYy+keAxBixwXFzxnaeWP/sGhybNsLudyQ4UGSDSmDOF5I1obaX9BQzBEEQhAthhCS3bt1iANitW7fc//ANGxgLCWHMnP/b/C8kxPy5LePGMebtbVfukQFsZnswJNv/e64f2Hl/2JU972/+fPXx1bL3Zd7e5s856ln05ZcsZG6I7PNNQm2SYfXx1dZ7zmxvbq9c+6XaHvheINt1bhcrMhYpa7sD6TnpTn2upJ6GZAMzJBvYvgXjuN590ZdfsksVvQXbZLlf6NzQv9rFg5K2C7x72+crbb/YeyqsHiQsIxpkVKyez/X783kSfb/h1AZmSDY4XWt5fxtOyY9Rx3oWGYtYek46+2FuEtfzhw+r7NRP96pVdpYRYxELmRsiWF8nGRk3jpm8vLjmkswPUvjmJ955TABLPzvW3ZBsYO+1BzM61NWxT5X0k9h7Emp/vICMOrbJMkfJyb1l3s38IEV0frJ9dnpOumCfFlYPEpRnnn+GZANb2rk8Mzm03eTlxYrefFPgxUjP+Zb3JdT2IgOE7+kCrl69yrZt28bu3r3rlucRBEEQ4hQWFrJTp06xwsJCyXKknMrgMeV0wwbGDAb7H3/A/JnB8NciaNw45zIAM/35b2Z7sKD3g6yLbiPM/2zLWj7L/CDlr+eL3Nf6z7IAlKinyWC/sBJ7vskA+zZxYFECZ7b/q61i7edp+74FNsoMb9sFsFWaHRUznnqGzg0114Xn3W/YwEwG8TbZ9n16Tjpfxypp+5/v3rFNQs+Xa/97Mu/J5Nh2JXUVkVGhesb/+XzHetr2vUXZk1ro220IqOhTqXe/4dQGaz2F3r3JAKexJKXkWZXpP+spJ6OW92QyCLRFQEa5ZFmCDac2OPX30s7lnd+RyLs3Ccio4PM55lIpGXW8Z3pOOte4tyibvGP5h7lJgn1qMhiYEWYZVqqcitVTjYzuWzDOunkg1na5eVQvSqpyajKZWEJCAqtUqRIDwI4cOeLW59euXZvNmzfPrc9UAgC2ceNGT1fDbaSnpzMA7MaNG4wxxlasWMEqVqwoe11J6aecnByPyDnhfkg51QmPKKdFRc670o6LgNBQxgoKnE9jHBdAXl7swb07rNbsmuy8v8CCynZRGxpifvaDB5L3ZYD5+4ICyXqaDGC/+YN5TTX/k3q+tU1FfCd8RcYiFvZ+DfbIIL6gMsG8a+/zH+lnmwz469m8bX/wQLBeQienpSZDtp5GLy+WfmYHK3r4gO/dPzCXE7unEX/1ve3pjCRK2i4jo7bP52n/IwNYGZn3ZCcjOsmobT29/5RR0QX6n89PP7uLa8GfnpOua58yg4GZQkNY2Ps1+MeyDUJKXujcULNiKlNPyzsqNfmvsSzXTxYZlZVljjFvOVVefXw1Sz+zw+mET827VyJPvHOJ7T2LCgtYEYfcF929wz2WvaYKnOrb3vPPOdd7Kr9iKjc+lcooCw1lGw6vlmy73T1dSElVTrdu3cpKly7N9u7dy/Ly8tijR4/c+nxSTosXjsppQUEBu3z5svX7adOmsSZNmjhdl5eXx+7fv++mWqqnqKhIsZyLtZko3vAqp+RzWhzZswe4KBFdljHgwgVg3Diz348IBgAwmeDz8Sf4vMoIhN6eJlrWCwAuXDQ/++hRyfsCMH8/bpxkPQ0MqHXb7DsIAKG3Je5nadOePUBMjPSzYQ5Asul6V5RiqeLPB1CKAXN2Sj/bwPDXs3nbvnQpkJTk9JWQn9yog+Z6SNXTYDIhZuspoKkP37tfuhS4eFEktqb5fVr6/vs6nD5eS5fyt71pU8l62j6/6R/y7S/FgA++90Po7QLxgrYyopOM2tazarkghN6+KlrW8nzj9xnSz/2TvDt5uvYpGIPhwkXE7ZaWZ7uxbDOWJAMRzZ8vO5eUYmZZPladcyz/KaOy5TjGvJ3/rkxdAXC9eyXyxDuX2N7T++hRgEPuMWEi11ju+BsQVC4IZf8Ql1HLnPvctSpYX/Uv31LHHMK2yM1PABTJKC5cQPyneyXbbndPgXn0n865c+cQHByM9u3bi5Z5+PAhfGRiIBDClPS+8/X1ha+vr2y56tWru6E22vH29i4xdSXcg5enK0AIkMcZyTA7m6/cuXPo6BPO/+xz53R9fo275kAk3M/npMm98lzl6gtnYxB+Nm/bRcoJpWWpd4Pz+efO8befs5417oI/FYmStnPWM/guf/uH+nXgK+gCGV3afDLWtp/HVZZXloMrBLukT7nlSeB+oqmQOOtZ74aCsczbdgVjXtF9eedHBfLkkrmEs57Bd4E36wzmKvtF+/l2qW0uvn5RNFWMS+YnBb9NJQKj0Rx8as0a83/lNkc0MGzYMIwZMwbnz5+HwWBAWFgYACAmJgajR49GUlISqlSpgm7dugEATpw4gR49eqB8+fKoVq0ahgwZgmvX/tqYMJlM+O9//4s6derA19cXTZo0wfr162XrcefOHQwcOBDlypVDzZo1sWTJErvvz58/j7i4OJQvXx7+/v7o168fLl/+K1L7sGHD8Oyzz9pdk5SUhBibjaiYmBi89tprGD9+PAIDA1G9enUkJyfbXZOdnY2OHTuibNmyaNiwIXbu3OlU1wkTJqBBgwbw8/ND3bp1MWXKFDx69Mj6fXJyMpo2bYpPPvkEderUQdmyZfHZZ5+hcuXKePDggd29nn32WQwZMkS0Xy5evIiBAwciMDAQ5cqVQ8uWLbF//37r9x988AHq1asHHx8fRERE4PPPP7e73mAw4JNPPsFzzz0HPz8/hIeHY8uWLXZltm7digYNGsDX1xedO3dGbm6u3fepqakICAiw/n9KSgqOHTsGg8EAg8GA1NRU67M2bdpkvS4zMxNPPPEEfH19UblyZYwYMQJ37/41oVve2ezZsxEcHIzKlStj1KhRdn3piKVvP/roI4SGhsLPzw/9+vXDrVu3rGVMJhOmT5+OkJAQlClTBk2bNsX27dut3+fm5sJgMODo0aMAgIyMDBgMBuzevRstW7aEn58f2rdvjzNnzki2mTGG5ORk1KpVC2XKlEGNGjXw2muvidadKL6QclocCeaMZBjOqXDWq8d/z+Bgc3kdn59X3hwhk/v5vHDW82yggmfztl2inGMux3OVOJ+v5D1x1jOvvIJUJErazlnPvPL87fdq0ICvoAtktGHjLvCqWVO+IMxRcblzQrqgT7nlyQVj6XpwRYzslaLrPRXVU8l9eedHBfLkkrmEs54je6Wgdcs4rrJeNWvabUL4lPIRzWXskvlJyW9TcSctDQgLAzp3BgYNMv83LMz8uQtYsGCBdSGfl5eHgwcPWr9buXIlfHx8sHfvXnz44Ye4efMmnnjiCTRr1gyHDh3C9u3bcfnyZfTr1896zX//+1989tln+PDDD3Hy5Em8/vrr+Pe//43vv/9esh6zZs1CkyZNcOTIEUycOBGJiYlWxdBkMiEuLg75+fn4/vvvsXPnTvz666/o37+/4vauXLkS5cqVw/79+/H+++9j+vTpds+Jj4+Hj48P9u/fjw8//BATJkxwukeFChWQmpqKU6dOYcGCBVi2bBnmzbPfbDx79iw2bNiAtLQ0HD16FH379oXRaLRTDK9cuYJvvvkGL774omBd7969i06dOuHSpUvYsmULjh07hvHjx8NkMgEANm7ciMTERIwdOxYnTpzAyy+/jBdeeAHp6el290lJSUG/fv1w/Phx9OzZE4MHD0Z+vnnn68KFC4iPj0evXr1w9OhRDB8+HBMnThTtv/79+2Ps2LFo1KgR8vLykJeXJ/ge7t27h27duqFSpUo4ePAgvvzyS+zatQujR4+2K5eeno5z584hPT0dK1euRGpqqlXZFePs2bNYt24dvvrqK2zfvh1HjhzBq6++av1+wYIFmDNnDmbPno3jx4+jW7dueOaZZ5Ats4k1adIkzJkzB4cOHUKpUqWs70WszRs2bMC8efPw0UcfITs7G5s2bUJUVJTkM4hiinusjEsuHvU5FQo6YevXJONzKugrJHdPNf58MvfckLlO1udVqc8pY8wlfmJafU7tXuOffnJrD33GTF5eyny65N6TxZ9PpJwRYBcDvNmGzHW69aeSelr85LyngpWeLBBVVYM8uUJGWVGRojHCFWBI5z7V6nOqdSyZvLxYUWGBbjKqaswr6VMd5cmlc4lMPU0Gm/epZB4XQMjvuM77NfnGp5L5SclvkwvR7HOqQ0AvNcybN4/Vrl3b7rNOnTqxZs2a2X02Y8YM1rVrV7vPLly4wACwM2fOsPv37zM/Pz/2448/2pV56aWX2MCBA0WfX7t2bda9e3e7z/r378969OjBGGPs22+/Zd7e3uz8+fPW70+ePMkAsAMHDjDGGBs6dCiLi4uzu0diYiLr1KmTXZv+9a9/2ZVp1aoVmzBhAmOMsR07drBSpUqxS5cuWb/ftm0bA6R9TmfNmsVatGhh/XvatGmsdOnS7MqVK3blXnnlFWubGGNszpw5rG7dusxkMgne96OPPmIVKlRg169fF/y+ffv2LCEhwe6zvn37sp49e1r/BsAmT55s/fvu3bsMANu2bRtjjLG33nqLNWzY0O4eEyZMYIB4QCQx/0vbfvr4449ZpUqV7MbCN998w7y8vNgff/zBGDO/s9q1a7Mim/mjb9++rH///oLttTzb29ubXbx40frZtm3bmJeXF8vLy2OMMVajRg32zjvv2F3XqlUr9uqrrzLGnAMiWXxsd+3aZVdXAFZfRaE2z5kzhzVo0IA9fPhQtL6EZ+H1OaWT0+KItzewwLzLDYPD6Yzl7/nzAV9f4I03pO/1xhvmnI+89/T2Npfnua+vL9c94x/vi19f/w233kuBwQAwxwMnx+fzwlHPj2PK46EPkNjd/LfJsYDatnP4q1hMKPu3GALD2LGiPmV29+R9Tz4+ouWYwfxR9WVrEf94X9l6WlHSdol6Wvo4qTtQIyAUawdtgNfYsfL35ZQnV8govL0VjRHH03ELIf4h9nlldepTy9+G+Qsw5+lFSBKRZ9OfRQ3zF+g6lgwADGPHwrusry4yqnrMc9QVgK7yxP7875x2sJtLZOcxneppgOGv96lkHhcgPjIeuYm5dia/2WN/4xufSuYnJb9NxRWjEUhMNKujjlg+S0pyqYmvIy1atLD7+9ixY0hPT0f58uWt/x577DEAZr/Vs2fPoqCgAE8++aRdmc8++wznZEyq27Vr5/R3VlYWACArKwuhoaEIDQ21ft+wYUMEBARYy/DSuHFju7+Dg4Nx5coVu+fUqFFDtF4A8MUXX6BDhw6oXr06ypcvj8mTJ+P8+fN2ZWrXro2goCC7zxISEvDtt9/i0iVzjuDU1FQMGzYMBkfZ/pOjR4+iWbNmCAwUNqHIyspChw727ikdOnRw6hPbNpcrVw7+/v52bW7Tpo1deaE2KyUrKwtNmjRBuXLl7OpmMpms5rIA0KhRI3jbzB+270OMWrVqoaaN5VG7du2s9719+zZ+//13rn5xxLafgv+02pCqS9++fVFYWIi6desiISEBGzduRFFRkeQziOIJKafFlfh4YP16wNHUMCTE/Hn8n4vf9983B/5wXIx4e5s/f/995fdUcl/Oe3p7eePxkVNhWL8Bhpoh8s/nRaKehnHj8PKum0gfmo6+k1fj1AcpMIRwPFtJn+pQT9XvSaScISQUhvUb4N2nj0fq+TA4CHvnJuG199KRk5hjVtR0lidFdVVyTwVlhRb61vbaovO7j4+Mx+DkDXh5WGVc8rcvdr96ZRjWb9B9LOkto5rGvJK66iBPBm9vmN58E21Wmd/za++lg335Jd885mEZFULQ79gV794V86g74Q1MuGeP26pkq1gAZjNTi/mn7T+Ln6bFn/Cbb76x+/7UqVNcfqda8PLyAnNQ7IV8F0uXLm33t8FgsJrJ8rBv3z4MHjwYPXv2xNdff40jR45g0qRJePjwoV05x74DgGbNmqFJkyb47LPPcPjwYZw8eRLDhg0TfRZPECIetLbZlRSnutnWxbJhIFWX0NBQnDlzBkuXLoWvry9effVVdOzYUdJnliieULTe4kx8PBAXZ/7xy8sz+/tERzv/2L//PvD22+bIh+fOmf14Xn1VeFea955K7qvknkrK8iJRT2/gryifUQASJunbdp3q6QRvP7m5P3nqWTY6GtHukicPy6hdFFkpdH738ZHxiFsehz2TM/DL9xkIvmv2hfXrFOPRd+82GVVSVx3kydvHBzG25cIAPPdciZBRblzx7l0xj7oL3uBPSgN66Ujz5s2xYcMGhIWFoVQp5+Vcw4YNUaZMGZw/fx6dOnVSdO+ffvrJ6e/IyEgAQGRkJC5cuIALFy5YT09PnTqFmzdvomHDhgCAoKAgnDhxwu4eR48edVJ+pLA8Jy8vz3py5livH3/8EbVr18akSZOsn/3222/czxg+fDjmz5+PS5cuITY21u402JHGjRvjk08+QX5+vuDpaWRkJPbu3YuhQ4daP9u7d6+1T3iIjIx0CpDk2GZHfHx8YJQ5wY+MjERqairu3btnVdT37t0LLy8vREREcNdPiPPnz+P333+3nnD/9NNP1vv6+/ujRo0a2Lt3r50M7t27F61bt1b9TLE2+/r6olevXujVqxdGjRqFxx57DJmZmWjevLnqZxEewD1WxiUXj/icEgRBEAShCU0+p+np0j6zln/p6XpXW9TnNDEx0e6zS5cusaCgINanTx924MABdvbsWbZ9+3Y2bNgwq9/gpEmTWOXKlVlqaio7e/YsO3z4MFu4cCFLTU0VfX7t2rWZv78/e++999iZM2fY4sWLmbe3N9u+fTtjjDGTycSaNm3KoqOj2eHDh9n+/ftZixYt7PxJt2/fzgwGA1u5ciX75Zdf2NSpU5m/v7+Tz6ljm+Li4tjQoUMZY4wZjUbWsGFD9uSTT7KjR4+yH374gbVo0cLOl3Lz5s2sVKlSbM2aNezs2bNswYIFLDAwkMsnkzHGbt68yfz8/JiPjw9bu3ataJ8wxtiDBw9YgwYNWHR0NPvf//7Hzp07x9avX2/16d24cSMrXbo0W7p0Kfvll1/YnDlzmLe3N0u3kREI+MtWrFiRrVixgjHG2G+//cZ8fHzYm2++yU6fPs1WrVrFqlevLulzumrVKlauXDl25MgRdvXqVWtuU9tn3bt3jwUHB7PevXuzzMxM9t1337G6deta+5oxPj9hR6ZNm8bKlSvHYmNjre+oQYMGbMCAAdYy8+bNY/7+/mzt2rXs9OnTbMKECax06dLsl19+YYyJ+5xa2ssYY0eOHGEAWE5OjmibV6xYwT755BOWmZnJzp07xyZPnsx8fX3ZtWvXROtPuBfyOSUIgiAIglBDdLTZVFnE/xAGAxAaai7nISwnUkajEV27dkVUVBSSkpIQEBAALy/z8m7GjBmYMmUK/vvf/yIyMhLdu3fHN998gzp16kjee+zYsTh06BCaNWuGt99+G3PnzrWmrzEYDNi8eTMqVaqEjh07IjY2FnXr1sUXX3xhvb5bt26YMmUKxo8fj1atWuHOnTt4/vnnFbXPy8sLGzduRGFhIVq3bo3hw4fjnXfesSvzzDPP4PXXX8fo0aPRtGlT/Pjjj5gyZQr3MypWrIjevXujfPnyTqlvHPHx8cG3336LqlWromfPnoiKisLMmTOtPprPPvssFixYgNmzZ6NRo0b46KOPsGLFCrv0OXLUqlULGzZswKZNm9CkSRN8+OGHePfddyWv6d27N7p3747OnTsjKCgIa9ascSrj5+eHHTt2ID8/H61atUKfPn3QpUsXLF68mLtuYtSvXx/x8fHo2bMnunbtisaNG2Pp0qXW71977TW88cYbGDt2LKKiorB9+3Zs2bIF4bxRvQUQanNAQACWLVuGDh06oHHjxti1axe++uorVK5cWXMbCfdiYEzI25+wcPv2bVSsWBG3bt2Cv7+//AUEQRAEQXica9eu4dChQ4iOjhb0OZQlLQ2w+O7bLpUsCqsWv2mi2NClSxc0atQICxcu9HRVShzJycnYtGmTNUcpQUhx//595OTkWPMNi0EnpwRBEARBEI64KqAXUSy4ceMGNm7ciIyMDIwaNcrT1SEI4k8oIBJBEARBEIQQrgroRXicZs2a4caNG3jvvfc0BwUiCEI/yKxXBjLrJQiCIIiSh2azXoIgCEI3yKyXIAiCIAiCIAiCKDGQckoQBEEQBEEQBEF4HFJOCYIgCIL4W8IYA3kvEQRBeB7euZiUU4IgCIIg/nZ4e3vj4cOHKCws9HRVCIIg/vEUFBQAAEqXLi1ZjqL1ymDR8m/fvu3hmhAEQRAEwcu9e/ewZcsW1K9fH15eXvDz84PBkqOUIAiCcAuMMRQUFODKlSsICAiAt0y0c4rWK8PFixcRGhrq6WoQBEEQBKEQg8GAU6dOkWkvQRCEhwkICED16tVlNwlJOZXBZDLh999/R4UKFXTdcb19+zZCQ0Nx4cKFv22Kmr97G6l9JZ+/exv/7u0D/v5tpPaphzGGO3fuoEaNGmCM4dGjR7renyAIguCjdOnSsiemFsisVwYvLy+EhIS47P7+/v5/ywWHLX/3NlL7Sj5/9zb+3dsH/P3bSO1TR8WKFa3/z7swIgiCIDwHBUQiCIIgCIIgCIIgPA4ppwRBEARBEARBEITHIeXUQ5QpUwbTpk1DmTJlPF0Vl/F3byO1r+Tzd2/j3719wN+/jdQ+giAI4p8EBUQiCIIgCIIgCIIgPA6dnBIEQRAEQRAEQRAeh5RTgiAIgiAIgiAIwuOQckoQBEEQBEEQBEF4HFJOCYIgCIIgCIIgCI9DyqkLeeedd9C+fXv4+fkhICCA6xrGGKZOnYrg4GD4+voiNjYW2dnZdmXy8/MxePBg+Pv7IyAgAC+99BLu3r3rghZIo7Qeubm5MBgMgv++/PJLazmh79euXeuOJtmhpp9jYmKc6j5y5Ei7MufPn8dTTz0FPz8/VK1aFePGjUNRUZErmyKK0jbm5+djzJgxiIiIgK+vL2rVqoXXXnsNt27dsivnqXe4ZMkShIWFoWzZsmjTpg0OHDggWf7LL7/EY489hrJlyyIqKgpbt261+55nPLobJW1ctmwZoqOjUalSJVSqVAmxsbFO5YcNG+b0rrp37+7qZoiipH2pqalOdS9btqxdmeL2DpW0T2g+MRgMeOqpp6xlitP7++GHH9CrVy/UqFEDBoMBmzZtkr0mIyMDzZs3R5kyZVC/fn2kpqY6lVE6rgmCIIgSDCNcxtSpU9ncuXPZG2+8wSpWrMh1zcyZM1nFihXZpk2b2LFjx9gzzzzD6tSpwwoLC61lunfvzpo0acJ++ukntmfPHla/fn02cOBAF7VCHKX1KCoqYnl5eXb/UlJSWPny5dmdO3es5QCwFStW2JWzbb+7UNPPnTp1YgkJCXZ1v3XrlvX7oqIi9vjjj7PY2Fh25MgRtnXrVlalShX21ltvubo5gihtY2ZmJouPj2dbtmxhZ8+eZbt372bh4eGsd+/eduU88Q7Xrl3LfHx82KeffspOnjzJEhISWEBAALt8+bJg+b179zJvb2/2/vvvs1OnTrHJkyez0qVLs8zMTGsZnvHoTpS2cdCgQWzJkiXsyJEjLCsriw0bNoxVrFiRXbx40Vpm6NChrHv37nbvKj8/311NskNp+1asWMH8/f3t6v7HH3/YlSlO71Bp+65fv27XthMnTjBvb2+2YsUKa5ni9P62bt3KJk2axNLS0hgAtnHjRsnyv/76K/Pz82NvvPEGO3XqFFu0aBHz9vZm27dvt5ZR2mcEQRBEyYaUUzewYsUKLuXUZDKx6tWrs1mzZlk/u3nzJitTpgxbs2YNY4yxU6dOMQDs4MGD1jLbtm1jBoOBXbp0Sfe6i6FXPZo2bcpefPFFu894FjWuRm37OnXqxBITE0W/37p1K/Py8rJbQH/wwQfM39+fPXjwQJe686LXO1y3bh3z8fFhjx49sn7miXfYunVrNmrUKOvfRqOR1ahRg/33v/8VLN+vXz/21FNP2X3Wpk0b9vLLLzPG+Maju1HaRkeKiopYhQoV2MqVK62fDR06lMXFxeldVVUobZ/c3Frc3qHW9zdv3jxWoUIFdvfuXetnxen92cIzB4wfP541atTI7rP+/fuzbt26Wf/W2mcEQRBEyYLMeosROTk5+OOPPxAbG2v9rGLFimjTpg327dsHANi3bx8CAgLQsmVLa5nY2Fh4eXlh//79bqurHvU4fPgwjh49ipdeesnpu1GjRqFKlSpo3bo1Pv30UzA3p+PV0r5Vq1ahSpUqePzxx/HWW2+hoKDA7r5RUVGoVq2a9bNu3brh9u3bOHnypP4NkUAvWbp16xb8/f1RqlQpu8/d+Q4fPnyIw4cP240dLy8vxMbGWseOI/v27bMrD5jfhaU8z3h0J2ra6EhBQQEePXqEwMBAu88zMjJQtWpVRERE4JVXXsH169d1rTsPatt39+5d1K5dG6GhoYiLi7MbR8XpHerx/pYvX44BAwagXLlydp8Xh/enBrkxqEefEQRBECWLUvJFCHfxxx9/AICd4mL52/LdH3/8gapVq9p9X6pUKQQGBlrLuAM96rF8+XJERkaiffv2dp9Pnz4dTzzxBPz8/PDtt9/i1Vdfxd27d/Haa6/pVn851LZv0KBBqF27NmrUqIHjx49jwoQJOHPmDNLS0qz3FXq/lu/ciR7v8Nq1a5gxYwZGjBhh97m73+G1a9dgNBoF+/b06dOC14i9C9uxZvlMrIw7UdNGRyZMmIAaNWrYLfa7d++O+Ph41KlTB+fOncN//vMf9OjRA/v27YO3t7eubZBCTfsiIiLw6aefonHjxrh16xZmz56N9u3b4+TJkwgJCSlW71Dr+ztw4ABOnDiB5cuX231eXN6fGsTG4O3bt1FYWIgbN25olnmCIAiiZEHKqUImTpyI9957T7JMVlYWHnvsMTfVSF9426eVwsJCrF69GlOmTHH6zvazZs2a4d69e5g1a5Yuio2r22erpEVFRSE4OBhdunTBuXPnUK9ePdX3VYK73uHt27fx1FNPoWHDhkhOTrb7zpXvkFDHzJkzsXbtWmRkZNgFDRowYID1/6OiotC4cWPUq1cPGRkZ6NKliyeqyk27du3Qrl0769/t27dHZGQkPvroI8yYMcODNdOf5cuXIyoqCq1bt7b7vCS/P4IgCIJwhJRThYwdOxbDhg2TLFO3bl1V965evToA4PLlywgODrZ+fvnyZTRt2tRa5sqVK3bXFRUVIT8/33q9Fnjbp7Ue69evR0FBAZ5//nnZsm3atMGMGTPw4MEDlClTRra8FO5qn4U2bdoAAM6ePYt69eqhevXqTpEmL1++DAC6vD/APW28c+cOunfvjgoVKmDjxo0oXbq0ZHk936EQVapUgbe3t7UvLVy+fFm0LdWrV5cszzMe3YmaNlqYPXs2Zs6ciV27dqFx48aSZevWrYsqVarg7NmzblVutLTPQunSpdGsWTOcPXsWQPF6h1rad+/ePaxduxbTp0+XfY6n3p8axMagv78/fH194e3trVkmCIIgiJIF+ZwqJCgoCI899pjkPx8fH1X3rlOnDqpXr47du3dbP7t9+zb2799vPR1o164dbt68icOHD1vLfPfddzCZTFZFSAu87dNaj+XLl+OZZ55BUFCQbNmjR4+iUqVKuig17mqfbd0BWBfG7dq1Q2Zmpp1SuHPnTvj7+6Nhw4aa2+eONt6+fRtdu3aFj48PtmzZ4pS6Qwg936EQPj4+aNGihd3YMZlM2L17t93Jmi3t2rWzKw+Y34WlPM94dCdq2ggA77//PmbMmIHt27fb+ReLcfHiRVy/ft1OmXMHattni9FoRGZmprXuxekdamnfl19+iQcPHuDf//637HM89f7UIDcG9ZAJgiAIooTh6YhMf2d+++03duTIEWu6lCNHjrAjR47YpU2JiIhgaWlp1r9nzpzJAgIC2ObNm9nx48dZXFycYCqZZs2asf3797P//e9/LDw83GOpZKTqcfHiRRYREcH2799vd112djYzGAxs27ZtTvfcsmULW7ZsGcvMzGTZ2dls6dKlzM/Pj02dOtXl7XFEafvOnj3Lpk+fzg4dOsRycnLY5s2bWd26dVnHjh2t11hSyXTt2pUdPXqUbd++nQUFBXk0lYySNt66dYu1adOGRUVFsbNnz9qlrygqKmKMee4drl27lpUpU4alpqayU6dOsREjRrCAgABrZOQhQ4awiRMnWsvv3buXlSpVis2ePZtlZWWxadOmCaaSkRuP7kRpG2fOnMl8fHzY+vXr7d6VZQ66c+cOe/PNN9m+fftYTk4O27VrF2vevDkLDw9n9+/fL/btS0lJYTt27GDnzp1jhw8fZgMGDGBly5ZlJ0+etJYpTu9Qafss/Otf/2L9+/d3+ry4vb87d+5Yf+cAsLlz57IjR46w3377jTHG2MSJE9mQIUOs5S2pZMaNG8eysrLYkiVLBFPJSPUZQRAE8feClFMXMnToUAbA6V96erq1DP7MB2nBZDKxKVOmsGrVqrEyZcqwLl26sDNnztjd9/r162zgwIGsfPnyzN/fn73wwgt2Cq+7kKtHTk6OU3sZY+ytt95ioaGhzGg0Ot1z27ZtrGnTpqx8+fKsXLlyrEmTJuzDDz8ULOtqlLbv/PnzrGPHjiwwMJCVKVOG1a9fn40bN84uzyljjOXm5rIePXowX19fVqVKFTZ27Fi7NCzuRGkb09PTBWUaAMvJyWGMefYdLlq0iNWqVYv5+Piw1q1bs59++sn6XadOndjQoUPtyq9bt441aNCA+fj4sEaNGrFvvvnG7nue8ehulLSxdu3agu9q2rRpjDHGCgoKWNeuXVlQUBArXbo0q127NktISPDowl9J+5KSkqxlq1Wrxnr27Ml+/vlnu/sVt3eoVEZPnz7NALBvv/3W6V7F7f2JzQ+WNg0dOpR16tTJ6ZqmTZsyHx8fVrduXbvfQwtSfUYQBEH8vTAw5uYcHQRBEARBEARBEAThAPmcEgRBEARBEARBEB6HlFOCIAiCIAiCIAjC45ByShAEQRAEQRAEQXgcUk4JgiAIgiAIgiAIj0PKKUEQBEEQBEEQBOFxSDklCIIgCIIgCIIgPA4ppwRBEARBEARBEITHIeWUIAhChoyMDBgMBty8edPTVSEIgiAIgvjbQsopQRAlBqPRiPbt2yM+Pt7u81u3biE0NBSTJk1yyXPbt2+PvLw8VKxY0SX3JwiCIAiCIAADY4x5uhIEQRC8/PLLL2jatCmWLVuGwYMHAwCef/55HDt2DAcPHoSPj4+Ha0gQBEEQBEGogU5OCYIoUTRo0AAzZ87EmDFjkJeXh82bN2Pt2rX47LPPRBXTCRMmoEGDBvDz80PdunUxZcoUPHr0CADAGENsbCy6desGy15dfn4+QkJCMHXqVADOZr2//fYbevXqhUqVKqFcuXJo1KgRtm7d6vrGEwRBEARB/I0p5ekKEARBKGXMmDHYuHEjhgwZgszMTEydOhVNmjQRLV+hQgWkpqaiRo0ayMzMREJCAipUqIDx48fDYDBg5cqViIqKwsKFC5GYmIiRI0eiZs2aVuXUkVGjRuHhw4f44YcfUK5cOZw6dQrly5d3VXMJgiAIgiD+EZBZL0EQJZLTp08jMjISUVFR+Pnnn1GqFP9e2+zZs7F27VocOnTI+tmXX36J559/HklJSVi0aBGOHDmC8PBwAOaT086dO+PGjRsICAhA48aN0bt3b0ybNk33dhEEQRAEQfxTIbNegiBKJJ9++in8/PyQk5ODixcvAgBGjhyJ8uXLW/9Z+OKLL9ChQwdUr14d5cuXx+TJk3H+/Hm7+/Xt2xfPPfccZs6cidmzZ1sVUyFee+01vP322+jQoQOmTZuG48ePu6aRBEEQBEEQ/yBIOSUIosTx448/Yt68efj666/RunVrvPTSS2CMYfr06Th69Kj1HwDs27cPgwcPRs+ePfH111/jyJEjmDRpEh4+fGh3z4KCAhw+fBje3t7Izs6WfP7w4cPx66+/Ws2KW7ZsiUWLFrmquQRBEARBEP8ISDklCKJEUVBQgGHDhuGVV15B586dsXz5chw4cAAffvghqlativr161v/AWZFtnbt2pg0aRJatmyJ8PBw/Pbbb073HTt2LLy8vLBt2zYsXLgQ3333nWQ9QkNDMXLkSKSlpWHs2LFYtmyZS9pLEARBEATxT4GUU4IgShRvvfUWGGOYOXMmACAsLAyzZ8/G+PHjkZub61Q+PDwc58+fx9q1a3Hu3DksXLgQGzdutCvzzTff4NNPP8WqVavw5JNPYty4cRg6dChu3LghWIekpCTs2LEDOTk5+Pnnn5Geno7IyEjd20oQBEEQBPFPggIiEQRRYvj+++/RpUsXZGRk4F//+pfdd926dUNRURF27doFg8Fg99348ePx6aef4sGDB3jqqafQtm1bJCcn4+bNm7h69SqioqKQmJiIt956CwDw6NEjtGvXDvXq1cMXX3zhFBBpzJgx2LZtGy5evAh/f390794d8+bNQ+XKld3WFwRBEARBEH83SDklCIIgCIIgCIIgPA6Z9RIEQRAEQRAEQRAeh5RTgiAIgiAIgiAIwuOQckoQBEEQBEEQBEF4HFJOCYIgCIIgCIIgCI9DyilBEARBEARBEAThcUg5JQiCIAiCIAiCIDwOKacEQRAEQRAEQRCExyHllCAIgiAIgiAIgvA4pJwSBEEQBEEQBEEQHoeUU4IgCIIgCIIgCMLjkHJKEARBEARBEARBeBxSTgmCIAiCIAiCIAiP8//O3bdWbNTSsQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plt.scatter(px1, py1, color='green', label='collation points')\n", + "plt.scatter(px2, py2, color='blue', label='simply supported boundary condition points')\n", + "plt.scatter(px3, py3, color='red', label='free boundary condition points')\n", + "plt.title('Summary of points')\n", + "plt.xlabel('X-axis')\n", + "plt.ylabel('Y-axis')\n", + "plt.legend(bbox_to_anchor=(1, 0), loc=3, borderaxespad=0)\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. 初始化深度学习求解器并开始求解" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2024/11/17 17:10:45] ppsci INFO: Using paddlepaddle 3.0.0 on device Place(gpu:0)\u001b[0m\n", + "\u001b[36m[2024/11/17 17:10:45] ppsci MESSAGE: Set to_static=False for computational optimization.\u001b[0m\n", + "[2024/11/17 17:11:51] ppsci INFO: [Train][Epoch 1/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 66.07724s, reader_cost: 0.00001s, ips: 605.35, eta: 7:19:24\u001b[0m\n", + "\u001b[36m[2024/11/17 17:11:51] ppsci MESSAGE: Finish saving checkpoint to: ./output_kirchhoff/checkpoints/latest(latest checkpoint will be saved every epoch as expected, but this log will be printed only once for tidy logging)\u001b[0m\n", + "[2024/11/17 17:12:13] ppsci INFO: [Train][Epoch 100/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.16517s, reader_cost: 0.00002s, ips: 242176.51, eta: 0:00:49\u001b[0m\n", + "[2024/11/17 17:12:32] ppsci INFO: [Train][Epoch 200/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14143s, reader_cost: 0.00002s, ips: 282823.83, eta: 0:00:28\u001b[0m\n", + "[2024/11/17 17:12:50] ppsci INFO: [Train][Epoch 300/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14079s, reader_cost: 0.00002s, ips: 284106.49, eta: 0:00:14\u001b[0m\n", + "[2024/11/17 17:13:09] ppsci INFO: [Train][Epoch 400/400][Iter 1/1] lr: 1.00000, loss: 0.00000, pde_contraint: 0.00000, constraint_left_right: 0.00000, constraint_up_down: 0.00000, batch_cost: 0.14086s, reader_cost: 0.00002s, ips: 283973.91, eta: 0:00:00\u001b[0m\n" + ] + } + ], + "source": [ + "# set optimizer\n", + "opt = ppsci.optimizer.LBFGS(max_iter=1000)(model)\n", + "solver = ppsci.solver.Solver(\n", + " model,\n", + " {\n", + " \"pde_contraint\": pde_contraint,\n", + " \"constraint_left_right\": constraint_left_right,\n", + " \"constraint_up_down\": constraint_up_down,\n", + " },\n", + " output_dir=\"./output_kirchhoff\",\n", + " optimizer=opt,\n", + " epochs=400,\n", + " iters_per_epoch=1,\n", + " log_freq=100,\n", + " # pretrained_model_path=\"./output_kirchhoff/checkpoints/latest\"\n", + ")\n", + "solver.train()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. 结果可视化" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "num_cords = 10201\n", + "(10201,) (10201,) (10201, 1)\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfIAAAGHCAYAAABLftCiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAABiiUlEQVR4nO3de1xUZf4H8A8gMFwckIsMKAgmiSSCoY6wlm2SQ7Gb7Jar5oq6JGVqupimroJpRnnLTH+xVt5+q2lu6pq5JGFmm4SK4i11zUC8MCggjKCAwPn94Y+Txxlghttw4PN+vc7L5jnf85znHKb5znnOc56xEARBABEREcmSpbkbQERERI3HRE5ERCRjTOREREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkROrSozMxMhISHo3LkzJkyYgC1btuCxxx4zevuNGzfC19e33hhfX19s3LjR6DonTJgAa2trODo64vTp00ZvR1TL19cXCoWiwfcmUUtgIqdWNXfuXDz11FO4ffs2Nm7ciLFjx+Ls2bPmbhbGjh2L0tJSBAUF1Rv32Wefwd/fH3Z2dggODsaBAwfqjS8sLMSoUaOgVCrh4uKCV199FRUVFZKYVatWwcfHB/b29vjNb36DkydPStafOHEC4eHhsLe3h4+PD1avXi1Zv3r1aqjVatjb29eZSAoLC/Hyyy+ja9eu6Ny5MwIDA3HixAlx/dGjR/HUU0+hS5cucHd3xx//+Efk5ubWe2wPa6idD6upqcG8efPg4eEBR0dHREZG4vLly+L6a9euYcSIEejRowcsLCz0vpxVVFTglVdegb+/Pzp37gwfHx/MnDkTd+/eFWP27duHp59+Gm5ubujSpQuGDBmC7777TlLPxYsX8bvf/Q6urq5wcXHBM888I/lC9+qrr8LR0VGyWFhYYPr06WJMTk4OkpOTTTpfRM2FiZxa1S+//IKQkBBzN6NRDh8+jL/85S94//33UVJSgtdffx2///3v6014tV8QLl++jDNnzuDYsWOYOXOmuH7btm1YtGgRPv/8cxQVFWH48OGIjIzE7du3AQA6nQ6RkZHQaDQoKirC559/joULF+Kf//ynWIeXlxdmz56Nv/3tbwbbUF5ejqeffhqVlZU4deoUdDodvvzyS3h5eQG4n1CjoqIQGhoKrVaLX375BZ06dcLYsWONPjfGtPNhS5cuxWeffYZDhw5Bq9XCx8cHv//971FTUwMAsLS0xPDhw7F161Z0795db/uqqiq4ubnhyy+/RHFxMb777jscOHAAs2fPFmNu3bqFadOm4dKlS7h58yZGjx6N5557DleuXBFjRo8eDUdHR+Tk5CAvLw99+/ZFVFQUan+GIjk5GaWlpeJy+PBhAMC4ceOMPj9ELUogMtKePXsEb29v8fWaNWsEAEJaWpogCIJQXFwsdOrUSfjvf/+rt21VVZXg4OAgWFhYCLa2toKDg4Pwz3/+U9iwYYPQo0cPSdzy5cuFgIAAQalUCo8//rjwzTffiOsfjr99+7bwl7/8RXBxcRG8vLyE999/X+jRo4ewYcMGo49r/Pjxwvjx442K+9Of/iQpGzRokPDWW28ZjM/OzhYACD/99JNYtm/fPsHe3l64e/euIAiCMHToUGH27Nni+urqakGlUgmbNm0SBOH+8Xp6egrV1dVizOzZs4Xf/va3evt7+NzU+vvf/y54eXkJFRUVBttZVFQkABDOnDkjln355ZeCo6OjwXhDTGlnrR49egj/8z//I76+deuWYGNjI3z33XcGY435m3744YdCUFBQvTGurq7CF198Ib5WKpXC3r17xdenT58WAAgFBQUGt3/llVeEQYMG6ZXXdf6JWhqvyMloTz31FPLy8nDhwgUAQGpqKvz9/ZGamgoA+Pbbb9G9e3f4+/vrbWtlZYXS0lL4+PiIVzgvvPCCXtzixYuxZcsW/Otf/8KtW7cwf/58jBgxApcuXTLYpvj4eJw6dQqnTp3Cf//7X5w5cwbXrl2TxPTr1w/vvvtuUw8fJ0+exIABAyRlAwYMQFZWVp3x9vb26NOnjyT+zp07+O9//2uwTktLSzz++ONinSdPnkT//v1haWkpqaOufRpy4MAB9O7dGy+//DLc3Nzg7++PxMREVFVVAQC6dOmC1157DR9//DHu3r2L4uJibNy4EX/84x+N3oep7SwpKcHly5clx+7s7IxevXqZdGwPS0tLQ3BwcJ3rs7KyUFxcLLmF8re//Q2bN29GcXEx7ty5g7///e8YOnQoXF1d9bbX6XTYsmULXnvttUa3kai5MZGT0Tp37ozBgwdj//79qKqqwsGDB7FkyRLs378fALB//34888wzTdrH+++/j2XLluHRRx+FpaUl/vCHP+CJJ57AZ599phdbU1ODzZs3Y9GiRejWrRscHBzw/vvvi12itU6dOoU5c+Y0qV3A/Q9xZ2dnSZmzszN0Op1J8bXrjKnT1H0aUlBQgG+//RbBwcG4du0adu/ejc2bN2PZsmVizMiRI5GSkgJHR0e4uLggOztbsr4hjTk3tTHGbtOQZcuW4YcffsCSJUsMrtdqtXjxxRfxxhtvSL5sajQa5OTkwMXFBZ07d8a+ffuwbt06g3Vs3rwZtra2GDVqVKPaSNQSmMjJJM888wxSU1ORkZEBX19fREdH4+eff0ZBQQFSU1PFRP7OO+9IBgcZIz8/HzqdDn/4wx/g7OwsLocOHdK7ygaAmzdvoqKiAn5+fmJZ586d4ebm1uTjfOyxx8S2v/rqqwAApVKJkpISSVxxcTGUSqXBOuqKr11nTJ2m7tOQzp07o1u3bpg5cyZsbW3x2GOP4bXXXsPu3bsB3B/spdFo8Ne//hV37tzB7du3ERUVhSFDhkgGjtWnMecGQJOPrdby5cuxYsUKHDhwAD4+Pnrrr127hqeeegqRkZFISkqS7O+3v/0tNBoNbt++jTt37mD27Nn4zW9+A61Wq1fPRx99hIkTJ0KhUJjcRqKWwkROJnnmmWdw8OBBfPXVVxg+fDisra3x5JNP4pNPPsEvv/yCYcOGAQDmzZsnGSBkDGdnZygUCqSkpKC4uFhcysrK8NFHH+nFu7u7w9bWFjk5OWJZaWkpCgoKmnycZ8+eFdteOxo5ODgYx44dk8TVPk5nSHBwMMrKynD+/HlJvJ2dHR599FGDddbU1ODEiRNincHBwThx4oQ4AKyhfRry+OOP65VZWFiI/33q1Ck4OjrilVdega2tLRwcHPDGG2/g4sWL+Omnn4zah6ntdHJyQo8ePSTHXlJSgkuXLpk8GHLhwoVYtWoVvvvuO/Tt21dv/S+//IInnngCv//977FmzRrJsV+6dAm3bt3CrFmz4ODgAFtbW7zyyiuorq7GDz/8IKnn4MGDOHfuHCZPnmxS+4hanLlv0pO8VFVVCU5OToKTk5OQmpoqCIIgrF69WnBychIGDBjQ4PYPD1p6eIDQjBkzhPDwcOGnn34SampqhDt37gjfffedcOHCBYPxsbGxwqBBg4Rr164JZWVlQlxcnGBlZdUig91++OEHwc7OTvjqq6+EyspKYf369YK9vb2Qk5NT5zYajUb43e9+JxQVFQnXr18XBg4cKLz22mvi+s8++0xwcXERMjIyhPLycmHRokWCSqUSdDqdIAiCUFJSInTt2lVYtGiRUF5eLmRkZAguLi7C559/LtZx79494e7du8K6desEHx8f4e7du8Ldu3eFmpoaQRAEITc3V3BwcBBWrVol3Lt3T7hw4YLQs2dPYenSpYIgCEJOTo6gUCiETz75RKwrISFB6Ny5s1BSUiIIwv3zXt/HhTHtfFhSUpLQs2dP4cKFC0JpaanwyiuvCEFBQZIBc7XH4uPjI6xbt064e/euUFlZKa6fOXOm4OvrK1y6dMngPs6dOyd069ZNWLBggcH1paWlgpubm7Bw4ULh7t27wr1794R169YZHLQ5cuRIITIyss7j4WA3MhcmcjJZdHS0YGdnJ5SXlwuCIAjnz58XAAhz585tcNuGEnlVVZWwatUq4bHHHhOUSqXQtWtXITIyUhxR/XC8TqcTJkyYIHTp0qXOUeuBgYHCkiVL6myTsYlcEARh69atQq9evQSFQiEEBQWJI/ZrOTg4CP/4xz/E1zdv3hRGjhwpdO7cWXB2dhbi4uLEEeu1Vq5cKXTv3l1QKBRCWFiYkJWVJVl//PhxYfDgwYJCoRC6d+8ufPDBB5L1iYmJAgC9JTs7W4w5dOiQEBoaKtjb2wu+vr7CokWLhKqqKnH9119/LQwePFhwdnYWunTpIjz55JPCoUOHxPULFy4Uhg4dWu+5aaidkZGRwiuvvCK+rq6uFubMmSO4u7sL9vb2wvDhwyVtFgTB4HHV/q1ycnIEAIKNjY3g4OAgWWpNmDBBAKC3/sH3w5EjR4Rhw4YJLi4u4hfS3bt3S9qRl5cnWFtbC3v27Knz+JnIyVwsBOGhkUFEHcykSZOwdetWWFtb4/vvv29wUpiOaMiQIVi5ciUGDRpk7qa0SY888ghu3LgBLy8v8akOotbCRE5ERCRjHOxGREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjnczdgPaopqYG169fR+fOnSWzSBERyZUgCLh9+za8vLwkP47TWOXl5aisrGzUtjY2Npwm9wFM5C3g+vXr8Pb2NncziIia3ZUrVwz+PrwpysvL4WdvB20jH35WqVTIzs5mMv9/TOQtoHPnzgDuv+Eb8wMQRERtjU6ng7e3t/j51hSVlZXQCsAVB2soTey01AmAt1aLyspKJvL/x0TeAmq705VKJRM5EbUrzXm7UGkBKE2uj3OYPYyD3YiIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiYiIZIyJnIiI2r21a9fC19cXCoUCarUaR44cqTd+x44dCAgIgEKhQFBQEPbt2ydZv3PnTgwfPhyurq6wsLBAVlaWZH1RURGmTZuG3r17w87ODj4+Pnj99ddRUlIiicvNzUVUVBTs7e3RtWtXzJo1C1VVVSYdGxM5ERG1a9u3b0d8fDwSExNx/PhxBAcHQ6PR4MaNGwbjDx8+jDFjxiA2NhYnTpxAdHQ0oqOjcebMGTGmrKwMQ4YMwXvvvWewjuvXr+P69etYvnw5zpw5g40bNyIlJQWxsbFiTHV1NaKiolBZWYnDhw9j06ZN2LhxIxISEkw7QIGaXUlJiQBAKCkpMXdTiIiaRXN+rol1OVoLQmcbk5YSR2uT2zFo0CBhypQp4uvq6mrBy8tLSEpKMhj/pz/9SYiKipKUqdVq4ZVXXtGLzc7OFgAIJ06caLAdn3/+uWBjYyPcu3dPEARB2Ldvn2BpaSlotVox5qOPPhKUSqVQUVFhzKEJgiAIvCInIiLZ0el0kqWiosJgXGVlJTIzMxERESGWWVpaIiIiAunp6Qa3SU9Pl8QDgEajqTPeWCUlJVAqlejUqZO4n6CgIHh4eEj2o9PpcPbsWaPr5cxuRERkHt5OgJWJ15PVNcC5Ar3fs0hMTMTChQv1wgsKClBdXS1JlgDg4eGB8+fPG9yFVqs1GK/Vak1r60PtWLx4MeLi4hrcT+06YzGRExGR7Dz8Wxa2trZmbE39dDodoqKiEBgYaPDLRlMxkRMRkewY+1sWbm5usLKyQn5+vqQ8Pz8fKpXK4DYqlcqk+Prcvn0bkZGR6Ny5M3bt2gVra2vJfh4ePV+7X1P2xXvkRETUbtnY2CA0NBRpaWliWU1NDdLS0hAWFmZwm7CwMEk8AKSmptYZXxedTofhw4fDxsYGe/bs0fu1trCwMJw+fVoyej41NRVKpRKBgYFG74dX5ERE1K7Fx8dj/PjxGDBgAAYNGoRVq1ahrKwMEydOBADExMSgW7duSEpKAgBMnz4dQ4cOxYoVKxAVFYVt27bh2LFjWLdunVhnUVERcnNzcf36dQDAhQsXANy/klapVGISv3PnDv7xj3+Ig/IAwN3dHVZWVhg+fDgCAwMxbtw4LF26FFqtFvPnz8eUKVNMulXARE5ERO3aqFGjcPPmTSQkJECr1SIkJAQpKSniwLLc3FxYWv7aQR0eHo6tW7di/vz5mDdvHvz9/bF792707dtXjNmzZ4/4RQAARo8eDeDXQXfHjx9HRkYGAKBXr16S9mRnZ8PX1xdWVlbYu3cvJk+ejLCwMDg4OGD8+PFYtGiRScdnIQgCf9y1mel0Ojg5OYmPGhARyV1zfq6JdfVxg9LEUeu66ho4nSvg5+sDeI+ciIhIxpjIiYiIZIyJnIiISMaYyImIiGSMo9aJiMg8ujsB1lambXOvGjhX0DLtkSlekRMREckYEzkREZGMMZETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGMMZETERHJGBM5ERGRjMk+ka9duxa+vr5QKBRQq9U4cuRInbFPPfUULCws9JaoqCgxZsKECXrrIyMjW+NQiIiITCbruda3b9+O+Ph4JCcnQ61WY9WqVdBoNLhw4QK6du2qF79z505UVlaKrwsLCxEcHIyRI0dK4iIjI7Fhwwbxta2tbcsdBBFRR+XtBNiYONd6ZXXLtEXGZJ3IV65ciUmTJmHixIkAgOTkZHz11VdYv3495syZoxfv4uIieb1t2zbY29vrJXJbW1uoVCqj21FRUYGKigrxtU6nM+UwiIiIGk22XeuVlZXIzMxERESEWGZpaYmIiAikp6cbVcenn36K0aNHw8HBQVJ+8OBBdO3aFb1798bkyZNRWFhYbz1JSUlwcnISF29vb9MPiIiIqBFkm8gLCgpQXV0NDw8PSbmHhwe0Wm2D2x85cgRnzpzByy+/LCmPjIzE5s2bkZaWhvfeew/fffcdnn32WVRX192dM3fuXJSUlIjLlStXGndQREREJpJ113pTfPrppwgKCsKgQYMk5aNHjxb/OygoCP369cMjjzyCgwcPYtiwYQbrsrW15X10IiIyC9lekbu5ucHKygr5+fmS8vz8/Abvb5eVlWHbtm2IjY1tcD89e/aEm5sbfv755ya1l4iIqCXINpHb2NggNDQUaWlpYllNTQ3S0tIQFhZW77Y7duxARUUF/vznPze4n6tXr6KwsBCenp5NbjMREVFzk20iB4D4+Hh8/PHH2LRpE86dO4fJkyejrKxMHMUeExODuXPn6m336aefIjo6Gq6urpLy0tJSzJo1Cz/++CNycnKQlpaGESNGoFevXtBoNK1yTERERKaQdSIfNWoUli9fjoSEBISEhCArKwspKSniALjc3Fzk5eVJtrlw4QL+85//GOxWt7KywqlTp/D888/j0UcfRWxsLEJDQ/H999/zHjgRkYyZMnkYcL/nNiAgAAqFAkFBQdi3b59k/c6dOzF8+HC4urrCwsICWVlZenWUl5djypQpcHV1haOjI1544QW928GGJinbtm2bScdmIQiCYNIW1CCdTgcnJyeUlJRAqVSauzlERE3WnJ9rYl1/eRxKEyeE0VVWw2n9cZPasX37dsTExEgmD9uxY0edk4cdPnwYTz75JJKSkvC73/0OW7duxXvvvYfjx4+jb9++AID//d//RXZ2Nry8vDBp0iScOHECISEhknomT56Mr776Chs3boSTkxOmTp0KS0tL/PDDD2KMhYUFNmzYIJlB1NnZGQqFwuhzwkTeApjIiai9kXMiV6vVGDhwINasWQPg/ngqb29vTJs2zeDkYaNGjUJZWRn27t0rlg0ePBghISFITk6WxObk5MDPz08vkZeUlMDd3R1bt27Fiy++CAA4f/48+vTpg/T0dAwePBjA/US+a9cuREdHm3IaJGTdtU5ERB2TTqeTLA/Orvmgxkwelp6eLokHAI1GY/RkYwCQmZmJe/fuSeoJCAiAj4+PXj1TpkyBm5sbBg0ahPXr18PU6+sO+xw5ERGZmZcSUJiYhsqrAEBvBs3ExEQsXLhQL7y+ycPOnz9vcBdarbbRk409WIeNjQ2cnZ3rrWfRokV4+umnYW9vj/379+O1115DaWkpXn/9daP3xURORESyc+XKFUnXulwHJC9YsED87/79+6OsrAzLli0zKZGza52IiGRHqVRKlroSeWMmD1OpVI2abOzhOiorK1FcXGxSPWq1GlevXq3zVoEhTORERNRuNWbysLCwMEk8AKSmpjY42diDQkNDYW1tLannwoULyM3NrbeerKwsdOnSxaQeBnatExFRuxYfH4/x48djwIABGDRoEFatWqU3eVi3bt2QlJQEAJg+fTqGDh2KFStWICoqCtu2bcOxY8ewbt06sc6ioiLk5ubi+vXrAO4naeD+lbhKpYKTkxNiY2MRHx8PFxcXKJVKTJs2DWFhYeKI9S+//BL5+fkYPHgwFAoFUlNT8c477+CNN94w6fiYyImIqF0bNWoUbt68iYSEBGi1WoSEhOhNHmZp+WsHdXh4OLZu3Yr58+dj3rx58Pf3x+7du8VnyAFgz5494hcB4Ncf3Hpw0N37778PS0tLvPDCC6ioqIBGo8H//M//iNtYW1tj7dq1+Otf/wpBENCrVy+sXLkSkyZNMun4+Bx5C+Bz5ETU3rTIc+Tzn4LSxFHruvIqOL19kJ+vD+A9ciIiIhljIiciIpIxJnIiIiIZYyInIiKSMSZyIiIiGePjZ0REZB4qJWBnbdo2d++1TFtkjFfkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxrnWiYjIPDyUgL2NadvcqWyZtsiY7K/I165dC19fXygUCqjVahw5cqTO2I0bN8LCwkKyKBQKSYwgCEhISICnpyfs7OwQERGBixcvtvRhEBERNYqsE/n27dsRHx+PxMREHD9+HMHBwdBoNLhx40ad2yiVSuTl5YnL5cuXJeuXLl2K1atXIzk5GRkZGXBwcIBGo0F5eXlLHw4REZHJZJ3IV65ciUmTJmHixIkIDAxEcnIy7O3tsX79+jq3sbCwgEqlEhcPDw9xnSAIWLVqFebPn48RI0agX79+2Lx5M65fv47du3e3whERERGZRraJvLKyEpmZmYiIiBDLLC0tERERgfT09Dq3Ky0tRY8ePeDt7Y0RI0bg7Nmz4rrs7GxotVpJnU5OTlCr1fXWWVFRAZ1OJ1mIiIhag2wHuxUUFKC6ulpyRQ0AHh4eOH/+vMFtevfujfXr16Nfv34oKSnB8uXLER4ejrNnz6J79+7QarViHQ/XWbvOkKSkJLz11lt65dMGC7CxEu6316dG2n6faunr7g+99rwneV3YVfq6i7t0wIe7i7Tr31Upfe3ucFe6XiF9DQDuncqk+8Ad/ZiqMr0yt/LbemWuZfpl7jr9Mrciw196XAr1YwEA2hLD5QCQV8+6q/WsA4DcBtZLYouNj33Q5UZuZw49nBu3nY8J2/k41b++ez3rPetZpzK8rsi1s8HyAhelXtlNpX5soYN+WYFCv+xmJwe9sluwl8ZU6ccUlttJY8qkrwt10vE8N4ukr2/dlA4ac71hLXntlvfQ66tW0te5D7+WXud1ztFrMrURsr0ib4ywsDDExMQgJCQEQ4cOxc6dO+Hu7o6///3vTap37ty5KCkpEZcrV640U4up1TSUVJpDY5Nja2uNdrbG+SbqIGSbyN3c3GBlZYX8/HxJeX5+PlQqlVF1WFtbo3///vj5558BQNzO1DptbW2hVColCxERUWuQbSK3sbFBaGgo0tLSxLKamhqkpaUhLCzMqDqqq6tx+vRpeHp6AgD8/PygUqkkdep0OmRkZBhdp1w83I3X7tXXTWsqU7qPH9bWr8qb0r6mnJeHNeffSwY63P+PZmDKo8oAsGPHDgQEBEChUCAoKAj79u2TrDfmUeXjx4/jmWeegbOzM1xdXREXF4fS0lJJTG5uLqKiomBvb4+uXbti1qxZqKqqMunYZJvIASA+Ph4ff/wxNm3ahHPnzmHy5MkoKyvDxIkTAQAxMTGYO3euGL9o0SLs378fv/zyC44fP44///nPuHz5Ml5++WUA90e0z5gxA2+//Tb27NmD06dPIyYmBl5eXoiOjjbHIZrVw/f1WoKh+5P1quP+p6y01WTeVttlChPfHya//xqhNf4/ovqZ+qjy4cOHMWbMGMTGxuLEiROIjo5GdHQ0zpw5I8Y09Kjy9evXERERgV69eiEjIwMpKSk4e/YsJkyYINZRXV2NqKgoVFZW4vDhw9i0aRM2btyIhIQEk45P1ol81KhRWL58ORISEhASEoKsrCykpKSIg9Vyc3ORl5cnxt+6dQuTJk1Cnz598Nxzz0Gn0+Hw4cMIDAwUY2bPno1p06YhLi4OAwcORGlpKVJSUvQmjqGWVdfApHrVNwDKGK1537atJc2mtqc5B7k1pBF/50a9n6jdMPVR5Q8++ACRkZGYNWsW+vTpg8WLF+Pxxx/HmjVrABj3qPLevXthbW2NtWvXonfv3hg4cCCSk5PxxRdfiLdz9+/fj59++gn/+Mc/EBISgmeffRaLFy/G2rVrUVlp/Ax2sk7kADB16lRcvnwZFRUVyMjIgFqtFtcdPHgQGzduFF+///77YqxWq8VXX32F/v37S+qzsLDAokWLoNVqUV5ejm+++QaPPvpoax0OyUVzdCO3lWTeVtpBZIKHH/mtqKgwGNeYR5XT09Ml8QCg0WjEeGMeVa6oqICNjQ0sLX9Ns3Z292+h/Oc//xH3ExQUJHlSSqPRQKfTSR6NbojsEzmR0TrYfVfZ4t+pwyhycUSRa2fTFhdHAIC3tzecnJzEJSkpyeA+6ntUua7HirVabb3xxjyq/PTTT0Or1WLZsmWorKzErVu3MGfOHAAQe4rr2s+D+zAGEzm1GkPP5rY5pnT7toer8ubYf2t2q7cCWbxPCVeuXJE89vvgeKi24LHHHsOmTZuwYsUK2NvbQ6VSwc/PDx4eHpKr9ObARE4mMTTZBTWRuZK5ub9EtEP8/6P1PPzIr62trcG4xjyqrFKp6o039lHll156CVqtFteuXUNhYSEWLlyImzdvomfPnvXu58F9GIOJvIN4eJaolmZoFqxm0x5Grj+stZNqc+2vOR85M0YjZnRrDi36fjagtf9/bc8a86hyWFiYJB4AUlNTxXhTH1X28PCAo6Mjtm/fDoVCgWeeeUbcz+nTpyWj51NTU6FUKiWDsBsi2ylaqe0oUHQ2OE2rWXg61T9Va3PzcW78lK0P6+HcOtO4mutKvI11qzf10TND07NS2xQfH4/x48djwIABGDRoEFatWqX3qHK3bt3E++zTp0/H0KFDsWLFCkRFRWHbtm04duwY1q1bB0D6qLK/vz/8/PywYMECvUeV16xZg/DwcDg6OiI1NRWzZs3Cu+++C2dnZwDA8OHDERgYiHHjxmHp0qXQarWYP38+pkyZUmcPgyFM5GR2BS5Kg3OuF7l2rnvO9cbq7tTwvOs+TqbNvd6cWjqZt/Xu9BYY6MZHz2jUqFG4efMmEhISoNVqERISoveo8oP3rcPDw7F161bMnz8f8+bNg7+/P3bv3o2+ffuKMbNnz0ZZWRni4uJQXFyMIUOG6D2qfOTIESQmJqK0tBQBAQH4+9//jnHjxonrrayssHfvXkyePBlhYWFwcHDA+PHjsWjRIpOOj4mcqKma86oc+DXZNmdCb4kE3trd6kRNMHXqVEydOtXguoMHD+qVjRw5EiNHjqyzvtpHletLups3b26wXT169NCbNc5UvEdOIkO/yERm1FzJt61fhXcw/P+MmhuvyIkMMWf3+oOacnXekgnc1KvxNnZ/nKg94RU5tT8NTeHZEhOOtHQ3cw9n4xOzKbFtSUN/FzONWCdq63hFTvKkcgK0beCKubXJMUHLCCeDITliIqdWdVPZGe66NvKoWnNr7kFvbRUHuVEzKeqiRKWj8Y9ZAUCpjeE51Tsydq0T1YX3dZsHzyNRi2IipzaNzwBTU7SX90+hjj+jTHVjIqc2oamzbLUZ7b3buaWOr5V/8ay13m+3YN8q+6GOjYm8FRT41Ji7CY0m2w8ic4xcp5bT0N+zBbX2POtEpmIipxbTLj4AG3N/t71elTfmuHh/vN0o9BbM3QSqAxO5GRT4VLf4PjrEPTU+O0wA3wctRM49iR0NEzkRkYkM/fIZf4uczIWJnExm6AOLP+n4kPbWvd7ejoeoHWEip47L2AFvvM/bOMaeNw48bBNa45YftQwm8jagoDv/ByLqqArL7czdBJI5JnJqv8z4yBKA9tMdbe7jaOTf0dTJYDjPOskV51qnVmfqfOtFrp3hUthO52cn6sAKOjvibmfTnrAps7BuodbIF6/IOzB26RERyR8TObUZbXqa1sYOeDN3t3RTNbb9rTVAsBHPkLfp9xlRIzCRk7w1dTIQjpg2L55/s+Eg2/aDiZyIDJN7bwJRByH7RL527Vr4+vpCoVBArVbjyJEjdcZ+/PHHeOKJJ9ClSxd06dIFERERevETJkyAhYWFZImMjGzpw6D2jAmRiFqQrBP59u3bER8fj8TERBw/fhzBwcHQaDS4ceOGwfiDBw9izJgx+Pbbb5Geng5vb28MHz4c165dk8RFRkYiLy9PXD777LPWOBwiIiKTyTqRr1y5EpMmTcLEiRMRGBiI5ORk2NvbY/369Qbjt2zZgtdeew0hISEICAjAJ598gpqaGqSlpUnibG1toVKpxKVLly6tcTjUEprzWfKONMNbU3oRmvM8mXsuACIZkG0ir6ysRGZmJiIiIsQyS0tLREREID093ag67ty5g3v37sHFxUVSfvDgQXTt2hW9e/fG5MmTUVhYWG89FRUV0Ol0kqUpCjzvNWl7aoPYvS5L7eKneAmAabdhAWDHjh0ICAiAQqFAUFAQ9u3bJ1kvCAISEhLg6ekJOzs7RERE4OLFi+L6gwcP6t2mrV2OHj0KAMjJyTG4/scffzTp2GSbyAsKClBdXQ0PDw9JuYeHB7RarVF1vPnmm/Dy8pJ8GYiMjMTmzZuRlpaG9957D9999x2effZZVFfXPcIzKSkJTk5O4uLt7d24g2qH+EFIROZm6m3Yw4cPY8yYMYiNjcWJEycQHR2N6OhonDlzRoxZunQpVq9ejeTkZGRkZMDBwQEajQbl5eUAgPDwcMkt2ry8PLz88svw8/PDgAEDJPv75ptvJHGhoaEmHZ9sE3lTvfvuu9i2bRt27doFheLXmYVGjx6N559/HkFBQYiOjsbevXtx9OhRHDx4sM665s6di5KSEnG5cuVKKxwBNRs+AiXVWr0HPO/USky9DfvBBx8gMjISs2bNQp8+fbB48WI8/vjjWLNmDYD7V+OrVq3C/PnzMWLECPTr1w+bN2/G9evXsXv3bgCAjY2N5Batq6sr/vWvf2HixImwsLCQ7M/V1VUSa21t2ux1sk3kbm5usLKyQn5+vqQ8Pz8fKpWq3m2XL1+Od999F/v370e/fv3qje3Zsyfc3Nzw888/1xlja2sLpVIpWYj0sHu91Zg6zzrJz8O3MysqKgzGNeY2bHp6uiQeADQajRifnZ0NrVYriXFycoJara6zzj179qCwsBATJ07UW/f888+ja9euGDJkCPbs2VP/gRsg20RuY2OD0NBQyUC12oFrYWFhdW63dOlSLF68GCkpKXrdG4ZcvXoVhYWF8PT0bJZ2U+O0mQ/mjjTgrTF4fup1s8rB3E1oU4rsHVHo0NmkpcjeEQDg7e0tuaWZlJRkcB+NuQ2r1Wrrja/915Q6P/30U2g0GnTv3l0sc3R0xIoVK7Bjxw589dVXGDJkCKKjo01O5rL+0ZT4+HiMHz8eAwYMwKBBg7Bq1SqUlZWJ33hiYmLQrVs38Q/83nvvISEhAVu3boWvr694wh0dHeHo6IjS0lK89dZbeOGFF6BSqXDp0iXMnj0bvXr1gkajMdtxEpEBTZ3Vj2TtypUrkt5PW1tbM7amflevXsXXX3+Nzz//XFLu5uaG+Ph48fXAgQNx/fp1LFu2DM8//7zR9cv2ihwARo0aheXLlyMhIQEhISHIyspCSkqK+C0pNzcXeXl5YvxHH32EyspKvPjii/D09BSX5cuXAwCsrKxw6tQpPP/883j00UcRGxuL0NBQfP/99236TdLhyekDva13r7f19hH9v4dvZ9b1Gd2Y27Aqlare+Np/ja1zw4YNcHV1NSo5q9Xqem/lGiLrK3IAmDp1KqZOnWpw3cMD1HJycuqty87ODl9//XUztYyIiMztwduw0dHRAH69DVtX7ggLC0NaWhpmzJghlqWmpoq3bf38/KBSqZCWloaQkBAA9+/ZZ2RkYPLkyZK6BEHAhg0bEBMTY9QgtqysLJNv5co+kRMRNdVNpfFjMAoUbWS8BhnN1Nuw06dPx9ChQ7FixQpERUVh27ZtOHbsGNatWwcAsLCwwIwZM/D222/D398ffn5+WLBgAby8vMQvC7UOHDiA7OxsvPzyy3rt2rRpE2xsbNC/f38AwM6dO7F+/Xp88sknJh0fEzm1f55OQF6JuVvxKx9nILfY3K3Q19a61VtgVjf+hGnHNGrUKNy8eRMJCQnQarUICQnRuw1rafnrnebw8HBs3boV8+fPx7x58+Dv74/du3ejb9++Yszs2bNRVlaGuLg4FBcXY8iQIUhJSZE8zgzcH+QWHh6OgIAAg21bvHgxLl++jE6dOiEgIADbt2/Hiy++aNLxWQiCIJi0BTVIp9PByckJMX2KYWOlRIFPjWR9gY90cpmHf07Q0MxuhV2lZV3cKyWv3V3K9bZxVUrL3B3u6sco9MvcO5VJ94U7+jFVZXplbuW39coAwLXMcLm7znC5W5HhmfFcCg3HAwC0DSTqhhL5VRMSfW4zfClor4nc2FHrxjxD3lAir2dsRF1POdSVyOu6Ijc0oVFdV+Q3O+mPSL8Fe2mMgVHrheV2+nWVScsKdQr9mCJp2a2bNpLXrjek3bhuefrdum5XraSvcx9+/Wtyq6zWYfM5Z5SUlDT5Edvaz8jPrq2EvVL/+OtzR3cXY7rFN0s72gtZD3ajtkXWXY6mTE7CR6wM43khMgsmciJqe93q1Oz4Gw7tFxM5kTkwcRJRM2EiJyJqImPujxO1FCZyIiIiGePjZ0QdHbv5yUwKFY64ozCt5+JupVXDQR0Mr8iJzIUJlIiaARM5ERGRjDGRU6MYGtzTofCZaSmeDyKzYSInIiKSMSZyahAfo2lB5r5Pbu79N5acfrqWqIUxkRNR6zJlOtxGqGuedaL2iomc2ofmuEJr4QRDRNQSmMiJqO1pgZ8wJWqvmpTI7927hytXruDChQsoKipqrjYRGcQuUyIifSYn8tu3b+Ojjz7C0KFDoVQq4evriz59+sDd3R09evTApEmTcPTo0ZZoqyw9/FvkZCZt+QrPXAPO5DrQjYgkTErkK1euhK+vLzZs2ICIiAjs3r0bWVlZ+O9//4v09HQkJiaiqqoKw4cPR2RkJC5evNhS7SYiIiKYONf60aNHcejQITz22GMG1w8aNAh/+ctfkJycjA0bNuD777+Hv79/szSUiIjal4JODlB0Mu3x1vJOFi3UGvkyKZF/9tlnRsXZ2tri1VdfbVSDiIiIyHgctU5ERCRjjf4ZU51Ohw0bNkCr1cLPzw/BwcEICgqCvT1nASMiImotjU7kf/zjH3Hy5EkMHDgQX375JS5cuAAAeOSRRxAcHIzt27c3WyOJiIjIsEYn8vT0dBw8eBADBw4EAFRUVOD06dPIysrCyZMnm62BRNTG8ZfPiMyq0Ym8X79+6NTp181tbW0xYMAADBgwoFkaRkRERA1r9GC3pUuXIiEhARUVFc3ZHiJqDZwMhqjdaHQi9/X1hU6nQ2BgIObNm4c9e/bgypUrzdk2o6xduxa+vr5QKBRQq9U4cuRIvfE7duxAQEAAFAoFgoKCsG/fPsl6QRCQkJAAT09P2NnZISIighPbkGHN2aXMxErUosyVK7766iuo1WrY2dmhS5cuiI6OlqzPzc1FVFQU7O3t0bVrV8yaNQtVVVUmHVujE/kLL7yAnJwc/OY3v8Hhw4cxfvx4+Pr6wt3dHcOHD29stSbZvn074uPjkZiYiOPHjyM4OBgajQY3btwwGH/48GGMGTMGsbGxOHHiBKKjoxEdHY0zZ86IMUuXLsXq1auRnJyMjIwMODg4QKPRoLy8vFWOiYiImpe5csUXX3yBcePGYeLEiTh58iR++OEHvPTSS+L66upqREVFobKyEocPH8amTZuwceNGJCQkmHR8FoIgCCaeEwCAvb090tPTERwcLJbl5OTgxIkTOHXqFBITExtTrUnUajUGDhyINWvWAABqamrg7e2NadOmYc6cOXrxo0aNQllZGfbu3SuWDR48GCEhIUhOToYgCPDy8sLMmTPxxhtvAABKSkrg4eGBjRs3YvTo0Ua1S6fTwcnJCTF9iqHzc9RbX+BTLX3d/aHXnvf0tinsKi3r4l4pee3uov9Fw1UpLXN3uKsfo9Avc+9UplfWBXf046r049zKb+vvo0y/DADcdYbL3Yp0BssBwKXQ8DYAAG1J3esAIK+B9QBw1YiYB+WaGF9vXcXNV1dDmrMHwNSeCWN+LrahufHr+dna+n5cp8BFabD8ptLwNoUO+uUFCv2ym50c9MpuQf9R3JtV+nGF5Xb6cWXSskKdQj+mSFp266aN5LXrDWu9bdzypGVuV62kr3Mffv3rtV5ltQ6bzzmjpKQESqXh82is2s/IhSVboFCaOLOb7g4WOo01qR3myBVVVVXw9fXFW2+9hdjYWIPt+ve//43f/e53uH79Ojw8PAAAycnJePPNN3Hz5k3Y2NgY3O5hjb4iHzhwIMrKpB/kvr6++MMf/tAqSbyyshKZmZmIiIgQyywtLREREYH09HSD26Snp0viAUCj0Yjx2dnZ0Gq1khgnJyeo1eo66wTuj9jX6XSShYiIWs7Dn7l1jdcyV644fvw4rl27BktLS/Tv3x+enp549tlnJVf16enpCAoKEpN47X50Oh3Onj1r9LlodCKfPn06Fi5ciOLi4sZW0SQFBQWorq6WnAAA8PDwgFarNbiNVqutN772X1PqBICkpCQ4OTmJi7e3t8nHQ0TU0RTDDrdgb9JSjPu9Fd7e3pLP3aSkJIP7MFeu+OWXXwAACxcuxPz587F371506dIFTz31lPiz33Xt58F9GKPRj5+9+OKLAAB/f3/84Q9/gFqtRv/+/dG3b1+juwPai7lz5yI+Pl58rdPpmMyJiFrQlStXJF3rtra2ZmyNvpqa+z9h/be//Q0vvPACAGDDhg3o3r07duzYgVdeeaXZ9tXoRJ6dnY2TJ0+KE8C88847yMnJQadOndC7d2+cOnWq2RppiJubG6ysrJCfny8pz8/Ph0qlMriNSqWqN7723/z8fHh6ekpiQkJC6myLra1tm3sTkcy05v1xonZAqVQadY/cXLmitjwwMFBcb2tri549eyI3N1es5+HR87X7ratthjS6a71Hjx54/vnnkZCQgC+++AKXLl1CcXExvvnmm2b9plEXGxsbhIaGIi0tTSyrqalBWloawsLCDG4TFhYmiQeA1NRUMd7Pzw8qlUoSo9PpkJGRUWed1IE150C31sYvDtRBmCtXhIaGwtbWVpy+HADu3buHnJwc9OjRQ9zP6dOnJaPnU1NToVQqJV8AGmLSFXlubi58fHzqXN+5c2c88cQTeOKJJwAA165dQ7du3UzZhUni4+Mxfvx4DBgwAIMGDcKqVatQVlaGiRMnAgBiYmLQrVs38d7J9OnTMXToUKxYsQJRUVHYtm0bjh07hnXr1gEALCwsMGPGDLz99tvw9/eHn58fFixYAC8vL71n/4iISB7MkSuUSiVeffVVJCYmwtvbGz169MCyZcsAACNHjgQADB8+HIGBgRg3bhyWLl0KrVaL+fPnY8qUKSb18pqUyAcOHIjo6Gi8/PLL4hzrDyspKcHnn3+ODz74AHFxcXj99ddN2YVJRo0ahZs3byIhIQFarRYhISFISUkRBwvk5ubC0vLXTofw8HBs3boV8+fPx7x58+Dv74/du3ejb9++Yszs2bNRVlaGuLg4FBcXY8iQIUhJSYFCof/4BxHhfs8E51unNsxcuWLZsmXo1KkTxo0bh7t370KtVuPAgQPo0qULAMDKygp79+7F5MmTERYWBgcHB4wfPx6LFi0y6fhMeo68sLAQS5Yswfr166FQKBAaGgovLy8oFArcunULP/30E86ePYvHH38cCxYswHPPPWdSY9oLPkf+0D7a63Pkcn2GvJa5niXnc+TSfXTg58hnlOyErVL/nNSnQleGVU5/bJZ2tBcm3SN3dXXFypUrkZeXhzVr1sDf3x8FBQXitHRjx45FZmYm0tPTO2wSJyIiak2NGrVuZ2cHtVotPoJGRERE5tHoUesBAQFISEjA3bv6XbNERETUOhqdyFNTU/H111+jV69e2LhxYzM2iYiIiIzV6EQeHh6OjIwMJCUlYcGCBQgNDcX333/fnG0jIiKiBjQ6kdeKiYnBhQsXEBUVhWeffRYvvvgisrOzm6NtRM3HmBHr5mKuyVk4KQyZWUGVA26auBQYGPnf0TU5kdcaPnw4Xn75ZezatQuBgYGYPXs2SktLm6t6ovofPaP2pS1/8SJqYxo913pycjKOHj2Ko0eP4ty5c7C0tETfvn3x6quvIjg4GNu2bUNgYCB27tyJAQMGNGebiYiI6P81OpEvWbIEarUaMTExGDx4MEJDQ2Fn9+skBnFxcXjnnXcwYcIEye+vdjRuuZYo8KkxdzPav4YmgzGGKZPBUJvlUni73klhiNqbRifyK1euNBgTGxuLBQsWNHYX1EYYmtWNqNGulhg3u1t9tCX1zu5G1JE02z1yQ7p27YoDBw605C6I5M3cA87MvX8iarIWTeQWFhYYOnRoS+6CzMTQPOtERNT6WjSRE7Vbcv4t8pbA80FkNkzkREQtgGNLqLUwkRMRNRFvNZE5MZETmUtbGWjWVtpBRI3S6MfPiIiImqKoXAFrG7uGAx9wr7y6hVojX7wiJyIikjEmciIiIhljIicJ906NH7TjVs4fNTFaW7sv3dbaQ0RGYyKnNsWtSGeeHZsyzzqfmTasg5wXfmGltoaJnMzCXccPww7NTD9QU9cXxdZ6Pzalx4uoLkzk1P61td+2bqvd2G2tXW3t70bURjGRExERyRgTORERtXtr166Fr68vFAoF1Go1jhw5Um/8jh07EBAQAIVCgaCgIOzbt0+yXhAEJCQkwNPTE3Z2doiIiMDFixcN1lVRUYGQkBBYWFggKytLLM/JyYGFhYXe8uOPP5p0bEzkJH9aGXXBtrXua6IOYPv27YiPj0diYiKOHz+O4OBgaDQa3Lhxw2D84cOHMWbMGMTGxuLEiROIjo5GdHQ0zpw5I8YsXboUq1evRnJyMjIyMuDg4ACNRoPy8nK9+mbPng0vL6862/fNN98gLy9PXEJDQ006PiZyIvqVnL5oyOkLHJnVypUrMWnSJEycOBGBgYFITk6Gvb091q9fbzD+gw8+QGRkJGbNmoU+ffpg8eLFePzxx7FmzRoA96/GV61ahfnz52PEiBHo168fNm/ejOvXr2P37t2Suv79739j//79WL58eZ3tc3V1hUqlEhdra2uTjo+JnGTBpbCNjHLvII9YNVobOT9t5v1CLUan00mWiooKg3GVlZXIzMxERESEWGZpaYmIiAikp6cb3CY9PV0SDwAajUaMz87OhlarlcQ4OTlBrVZL6szPz8ekSZPwv//7v7C3t6/zWJ5//nl07doVQ4YMwZ49exo++IfINpEXFRVh7NixUCqVcHZ2RmxsLEpLS+uNnzZtGnr37g07Ozv4+Pjg9ddfR0mJ9IPH0P2Kbdu2tfThEBF1OAV37HCzzLSl4M79udm9vb3h5OQkLklJSYb3UVCA6upqeHh4SMo9PDyg1WoNbqPVauuNr/23vhhBEDBhwgS8+uqrGDBggMH9ODo6YsWKFdixYwe++uorDBkyBNHR0SYnc9n+aMrYsWORl5eH1NRU3Lt3DxMnTkRcXBy2bt1qMP769eu4fv06li9fjsDAQFy+fBmvvvoqrl+/jn/+85+S2A0bNiAyMlJ87ezs3JKH0q65lsngyqi1nmmWS7d1bjHg49zy+7laAnR3avn9NIFr2W0UOnQ2dzPIgCtXrkCpVIqvbW1tzdgafR9++CFu376NuXPn1hnj5uaG+Ph48fXAgQNx/fp1LFu2DM8//7zR+5JlIj937hxSUlJw9OhR8ZvOhx9+iOeeew7Lly83OKigb9+++OKLL8TXjzzyCJYsWYI///nPqKqqQqdOv54KZ2dnqFQqo9tTUVEh6dbR6cw0OxkRUQehVColibwubm5usLKyQn5+vqQ8Pz+/zs95lUpVb3ztv/n5+fD09JTEhISEAAAOHDiA9PR0vS8YAwYMwNixY7Fp0yaD+1ar1UhNTW3wuB4ky6719PR0ODs7S7orIiIiYGlpiYyMDKPrKSkpgVKplCRxAJgyZQrc3NwwaNAgrF+/HoIg1FtPUlKSpIvH29vbtAMiIqIWYWNjg9DQUKSlpYllNTU1SEtLQ1hYmMFtwsLCJPEAkJqaKsb7+flBpVJJYnQ6HTIyMsSY1atX4+TJk8jKykJWVpb4+Nr27duxZMmSOtublZUl+XJgDFlekWu1WnTt2lVS1qlTJ7i4uNR5z+NhBQUFWLx4MeLi4iTlixYtwtNPPw17e3vs378fr732GkpLS/H666/XWdfcuXMl3SM6nY7JvK1oztnBmjKQSy7d6rWa0r2eWwL4NFOXeV4J4Nm2u9+p7YuPj8f48eMxYMAADBo0CKtWrUJZWRkmTpwIAIiJiUG3bt3E++zTp0/H0KFDsWLFCkRFRWHbtm04duwY1q1bB+D+WKoZM2bg7bffhr+/P/z8/LBgwQJ4eXkhOjoaAODj4yNpg6OjI4D7vcHdu3cHAGzatAk2Njbo378/AGDnzp1Yv349PvnkE5OOr00l8jlz5uC9996rN+bcuXNN3o9Op0NUVBQCAwOxcOFCyboFCxaI/92/f3+UlZVh2bJl9SZyW1vbNnd/hoiI7hs1ahRu3ryJhIQEaLVahISEICUlRRyslpubC0vLXzuow8PDsXXrVsyfPx/z5s2Dv78/du/ejb59+4oxs2fPRllZGeLi4lBcXIwhQ4YgJSUFCoXCpLYtXrwYly9fRqdOnRAQEIDt27fjxRdfNKmONpXIZ86ciQkTJtQb07NnT6hUKr0H+auqqlBUVNTgve3bt28jMjISnTt3xq5duxp8Xk+tVmPx4sWoqKhgsiYikqmpU6di6tSpBtcdPHhQr2zkyJEYOXJknfVZWFhg0aJFWLRokVH79/X11btNO378eIwfP96o7evTphK5u7s73N3dG4wLCwtDcXExMjMzxRlwDhw4gJqaGqjV6jq30+l00Gg0sLW1xZ49e4z65pSVlYUuXbq0yyTuqrhr7iZ0DHLrVq/VWqPXOzhXxV0UltuZuxkkY7Ic7NanTx9ERkZi0qRJOHLkCH744QdMnToVo0ePFkesX7t2DQEBAeJ8ujqdDsOHD0dZWRk+/fRT6HQ6aLVaaLVaVFdXAwC+/PJLfPLJJzhz5gx+/vlnfPTRR3jnnXcwbdo0sx1rR9Ko3yJv6uxeZvo5Tfp/TT3/jfj7m+0374laSJu6IjfFli1bMHXqVAwbNgyWlpZ44YUXsHr1anH9vXv3cOHCBdy5cwcAcPz4cXFEe69evSR1ZWdnw9fXF9bW1li7di3++te/QhAE9OrVS5zajzq4NjJjmWw054A3gquyHIU60+69Usch20Tu4uJS5+QvgP79iKeeeqrBx8giIyMlE8HImatSf+L+tsJdJ4NJYpqLXLvVa3Wg7nV33W3cVHLyF5IfWXatU8fCebOpMfi+oY5Ctlfk7ZlbnjUKPO+Zuxny15zPkDeG3K/Ga5n7qpzPkrdbRbdtYSWYdsugupSfjQ/jFTkRUTNwryozdxOog2IiJ5MZ+sByK5dhN6axI6Y50K1xjD1vMnxyQJbvd2q3mMiJWkJ76Vav1d6Oh6gdYSInImohXXDH3E2gDoCJnOrVpj+ImjoZDLUPLfw+cC1jNzq1bUzk1GLaxQdgY+6Pt9du6MYcF8cXELU4JnJqE5p92kxzP3pGpmlowFsz/z05TSu1J0zk1Ka12KQeMhwp3SbIrLeBk8JQR8BETtScZJboiEj+mMjbALerVuZuAhnC+7vNQ0bnsUP9DgC1G0zk1Kra9QdlR7ka7yjH2ULcO3EGOGpenGu9FbjlWqLAp+aB11Yo8Kk2Y4sMk9UHTEd/9OxysX5ZD+fWbkXboS0BVO1nPnZ3h7u4WWZn7ma0uMJbClhUmjbXulDGudYfxkRO7U9DI5zlOtDNUPKua70ck/rVEqB7PcmYP55CZBC71okMMfW+bkt2N18ubjiJN8c2pjD1eGV0n5xIbnhF3kG4O9w1dxPIVM2RiGvrkOMVOhEZhVfk1GbJ5hnglrgab+6r6Za4OpfJoLe63kdNnRSGv4BGbQUTOTVZUz/QWnWWLTncH2+pLvGW7GpvLm3072PsdMPt+TfJXa9YmLsJVAcmcjKJsR9Usp5n3Zz3c1s62ZozmTfHeeXUu0R6mMhJfup79Ky1P+ibs3u5tZJsc+6nLXWvd/RHEpuZWy7Tg1zwL0XUFrT2lbIcutnNpF1PWtSBrV27Fr6+vlAoFFCr1Thy5Ei98Tt27EBAQAAUCgWCgoKwb98+yXpBEJCQkABPT0/Y2dkhIiICFy9elMQ8//zz8PHxgUKhgKenJ8aNG4fr169LYk6dOoUnnngCCoUC3t7eWLp0qcnHxkROrUYWH5CmdP8219WouZJqc+3XlPPQAR9D64I7Dca4Kpr/qZLCrqZNnOKW236nit6+fTvi4+ORmJiI48ePIzg4GBqNBjdu3DAYf/jwYYwZMwaxsbE4ceIEoqOjER0djTNnzogxS5cuxerVq5GcnIyMjAw4ODhAo9GgvLxcjPntb3+Lzz//HBcuXMAXX3yBS5cu4cUXXxTX63Q6DB8+HD169EBmZiaWLVuGhQsXYt26dSYdHxM5dRxtcSCVua+Mzb1/Q9ri34lkbeXKlZg0aRImTpyIwMBAJCcnw97eHuvXrzcY/8EHHyAyMhKzZs1Cnz59sHjxYjz++ONYs2YNgPtX46tWrcL8+fMxYsQI9OvXD5s3b8b169exe/dusZ6//vWvGDx4MHr06IHw8HDMmTMHP/74I+7du/8la8uWLaisrMT69evx2GOPYfTo0Xj99dexcuVKk46PibyDMuYKwJgriZYim0fPmqKtJNG20o4W1CHeTx2MTqeTLBUVFQbjKisrkZmZiYiICLHM0tISERERSE9PN7hNenq6JB4ANBqNGJ+dnQ2tViuJcXJyglqtrrPOoqIibNmyBeHh4bC2thb38+STT8LGxkaynwsXLuDWrVtGnIX/Px6jI4nautYc6NbUbvW2ljyb2p7WHPTWjH/nVn30kfTcKrDBrZsmLgX3k563tzecnJzEJSkpyeA+CgoKUF1dDQ8PD0m5h4cHtFqtwW20Wm298bX/GlPnm2++CQcHB7i6uiI3Nxf/+te/GtzPg/swBhM5mZXJH6QtOTK5A96/NYuWPM8deOR6F/dKczehVV25cgUlJSXiMnfuXHM3yaBZs2bhxIkT2L9/P6ysrBATEwNBEJp1H7JN5EVFRRg7diyUSiWcnZ0RGxuL0tLSerd56qmnYGFhIVleffVVSUxubi6ioqJgb2+Prl27YtasWaiqqmrJQ6HW0Jz3Xdvb1XittnRVzvvk1AClUilZbG1tDca5ubnBysoK+fn5kvL8/HyoVCqD26hUqnrja/81pk43Nzc8+uijeOaZZ7Bt2zbs27cPP/74Y737eXAfxpBtIh87dizOnj2L1NRU7N27F4cOHUJcXFyD202aNAl5eXni8uBQ/+rqakRFRaGyshKHDx/Gpk2bsHHjRiQkJLTkoVBH0laTeK223j4zMjTJEadpbftsbGwQGhqKtLQ0saympgZpaWkICwszuE1YWJgkHgBSU1PFeD8/P6hUKkmMTqdDRkZGnXXW7heAeD8/LCwMhw4dEge/1e6nd+/e6NKli9HHKMtEfu7cOaSkpOCTTz6BWq3GkCFD8OGHH2Lbtm16z+g9zN7eHiqVSlyUSqW4bv/+/fjpp5/wj3/8AyEhIXj22WexePFirF27FpWVHavbyliGPshkOatba3SryyVJtkY7m3q+W3g8REs/KuneqelTubq7lDccRACA+Ph4fPzxx9i0aRPOnTuHyZMno6ysDBMnTgQAxMTESLrmp0+fjpSUFKxYsQLnz5/HwoULcezYMUydOhUAYGFhgRkzZuDtt9/Gnj17cPr0acTExMDLywvR0dEAgIyMDKxZswZZWVm4fPkyDhw4gDFjxuCRRx4Rk/1LL70EGxsbxMbG4uzZs9i+fTs++OADxMfHm3R8skzk6enpcHZ2xoABA8SyiIgIWFpaIiMjo95tt2zZAjc3N/Tt2xdz587FnTu/jsxOT09HUFCQZPCBRqOBTqfD2bNn66yzoqJCbwQlSbX4M+StNdCtLc1k1ha14fPDkesd16hRo7B8+XIkJCQgJCQEWVlZSElJET/rc3NzkZeXJ8aHh4dj69atWLduHYKDg/HPf/4Tu3fvRt++fcWY2bNnY9q0aYiLi8PAgQNRWlqKlJQUKBQKAPcvGnfu3Ilhw4ahd+/eiI2NRb9+/fDdd9+JtwGcnJywf/9+ZGdnIzQ0FDNnzkRCQoJRvcsPkuXPmGq1WnTt2lVS1qlTJ7i4uNQ70u+ll15Cjx494OXlhVOnTuHNN9/EhQsXsHPnTrHexowgTEpKwltvvdXYw6GOQC5X47UuF/OnTxvJvaoMNzs5mLsZ9JCpU6eKV9QPO3jwoF7ZyJEjMXLkyDrrs7CwwKJFi7Bo0SKD64OCgnDgwIEG29WvXz98//33DcbVp00l8jlz5uC9996rN+bcuXONrv/BbzlBQUHw9PTEsGHDcOnSJTzyyCONrnfu3LmSrhCdTgdvb298+KMFlMraXwx6eNakhmZRsmt0e1rUw+8YRwMxhso8DJQRtTIXI8sA4NGWbAjQ8P9LD7828/9DOh3wvpN520CGtalEPnPmTEyYMKHemJ49e0KlUulNrVdVVYWioiKTRvqp1WoAwM8//4xHHnkEKpVKb/5dY0YQ2tra1jlikoiIqCW1qUTu7u4Od3f3BuPCwsJQXFyMzMxMhIaGAgAOHDiAmpoaMTkbIysrCwDg6ekp1rtkyRLcuHFD7LpPTU2FUqlEYGCgiUdDRETU8mQ52K1Pnz6IjIzEpEmTcOTIEfzwww+YOnUqRo8eDS8vLwDAtWvXEBAQIF5hX7p0CYsXL0ZmZiZycnKwZ88exMTE4Mknn0S/fv0AAMOHD0dgYCDGjRuHkydP4uuvv8b8+fMxZcoUXnETEVGbJMtEDtwffR4QEIBhw4bhueeew5AhQyS/GHPv3j1cuHBBHJVuY2ODb775BsOHD0dAQABmzpyJF154AV9++aW4jZWVFfbu3QsrKyuEhYXhz3/+M2JiYuoczEBERGRuFkJzzxVH0Ol0cHJyQklJieQ5dSIiuWrOz7XaulzWXIGlnWl11dzVoWiqNz9fHyDbK3IiIiJiIiciIpI1JnIiIiIZYyInIiKSMSZyIiIiGWMiJyIikjEmciIiIhljIiciIpIxJnIiIiIZYyInIiKSsTb162dERNRxuGqtYaWwNmmb6nJrFLVQe+SKV+REREQyxkROREQkY0zkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOREREQyxkRORETt3tq1a+Hr6wuFQgG1Wo0jR47UG79jxw4EBARAoVAgKCgI+/btk6wXBAEJCQnw9PSEnZ0dIiIicPHiRUnMkiVLEB4eDnt7ezg7Oxvcj4WFhd6ybds2k46NiZyIiNq17du3Iz4+HomJiTh+/DiCg4Oh0Whw48YNg/GHDx/GmDFjEBsbixMnTiA6OhrR0dE4c+aMGLN06VKsXr0aycnJyMjIgIODAzQaDcrLy8WYyspKjBw5EpMnT663fRs2bEBeXp64REdHm3R8FoIgCCZtQQ3S6XRwcnJCSUkJlEqluZtDRNRkzfm5VluX/3wtrBSm1VVdrsPFt1UmtUOtVmPgwIFYs2YNAKCmpgbe3t6YNm0a5syZoxc/atQolJWVYe/evWLZ4MGDERISguTkZAiCAC8vL8ycORNvvPEGAKCkpAQeHh7YuHEjRo8eLalv48aNmDFjBoqLi/X2ZWFhgV27dpmcvB/EK3IiIpIdnU4nWSoqKgzGVVZWIjMzExEREWKZpaUlIiIikJ6ebnCb9PR0STwAaDQaMT47OxtarVYS4+TkBLVaXWed9ZkyZQrc3NwwaNAgrF+/HqZeX3OudSIiMgvX61boZGNl0jZVlVa4CMDb21tSnpiYiIULF+rFFxQUoLq6Gh4eHpJyDw8PnD9/3uA+tFqtwXitViuury2rK8ZYixYtwtNPPw17e3vs378fr732GkpLS/H6668bXQcTORERyc6VK1ckXeu2trZmbE3jLViwQPzv/v37o6ysDMuWLTMpkbNrnYiIZEepVEqWuhK5m5sbrKyskJ+fLynPz8+HSqUyuI1Kpao3vvZfU+o0llqtxtWrV+u8VWAIEzkREbVbNjY2CA0NRVpamlhWU1ODtLQ0hIWFGdwmLCxMEg8AqampYryfnx9UKpUkRqfTISMjo846jZWVlYUuXbqY1MPArnUiImrX4uPjMX78eAwYMACDBg3CqlWrUFZWhokTJwIAYmJi0K1bNyQlJQEApk+fjqFDh2LFihWIiorCtm3bcOzYMaxbtw7A/ZHmM2bMwNtvvw1/f3/4+flhwYIF8PLykow+z83NRVFREXJzc1FdXY2srCwAQK9eveDo6Igvv/wS+fn5GDx4MBQKBVJTU/HOO++II+GNxURORETt2qhRo3Dz5k0kJCRAq9UiJCQEKSkp4mC13NxcWFr+2kEdHh6OrVu3Yv78+Zg3bx78/f2xe/du9O3bV4yZPXs2ysrKEBcXh+LiYgwZMgQpKSlQKBRiTEJCAjZt2iS+7t+/PwDg22+/xVNPPQVra2usXbsWf/3rXyEIAnr16oWVK1di0qRJJh2fbJ8jLyoqwrRp0/Dll1/C0tISL7zwAj744AM4OjoajM/JyYGfn5/BdZ9//jlGjhwJ4P43rYd99tlnes8F1ofPkRNRe9MSz5EP/stNdLIxra6qSh1+XO/Oz9cHyPaKfOzYscjLy0Nqairu3buHiRMnIi4uDlu3bjUY7+3tjby8PEnZunXrsGzZMjz77LOS8g0bNiAyMlJ8XdfUekREROYmy0R+7tw5pKSk4OjRoxgwYAAA4MMPP8Rzzz2H5cuXw8vLS28bKysrvdGEu3btwp/+9Ce9q3hnZ+cmjzwkIiJqDbIctZ6eng5nZ2cxiQNAREQELC0tkZGRYVQdmZmZyMrKQmxsrN46U2fZqaio0JtliIiIqDXI8opcq9Wia9eukrJOnTrBxcXF6Fl1Pv30U/Tp0wfh4eGS8sbMspOUlIS33nrL9AMhIiJqojZ1RT5nzhyDP+n24FLXlHqmuHv3LrZu3WrwanzBggX4zW9+g/79++PNN9/E7NmzsWzZsnrrmzt3LkpKSsTlypUrTW4jERGRMdrUFfnMmTMxYcKEemN69uwJlUql9/NzVVVVKCoqMure9j//+U/cuXMHMTExDcaq1WosXrwYFRUVdT6gb2trK9vpAYmIzMX1ihWsrU2ba/3ePdPiO4I2lcjd3d3h7u7eYFxYWBiKi4uRmZmJ0NBQAMCBAwdQU1MDtVrd4Paffvopnn/+eaP21ZhZdoiIiFpLm0rkxurTpw8iIyMxadIkJCcn4969e5g6dSpGjx4tjli/du0ahg0bhs2bN2PQoEHitj///DMOHTqEffv26dXbXLPsEBERtRZZJnIA2LJlC6ZOnYphw4aJE8KsXr1aXH/v3j1cuHABd+7ckWy3fv16dO/eHcOHD9ers7lm2SEiImotsp3ZrS3jzG5E1N60xMxuUc8UwdratLru3dPhq1QXfr4+oE2NWiciIiLTMJETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGMyXZCGCIikjfXq5awsTLterKymtefD+MZISIikjEmciIiIhljIiciIpIxJnIiIiIZYyInIiKSMSZyIiIiGWMiJyIikjEmciIiavfWrl0LX19fKBQKqNVqHDlypN74HTt2ICAgAAqFAkFBQdi3b59kvSAISEhIgKenJ+zs7BAREYGLFy9KYoqKijB27FgolUo4OzsjNjYWpaWlkphTp07hiSeegEKhgLe3N5YuXWrysTGRExFRu7Z9+3bEx8cjMTERx48fR3BwMDQaDW7cuGEw/vDhwxgzZgxiY2Nx4sQJREdHIzo6GmfOnBFjli5ditWrVyM5ORkZGRlwcHCARqNBeXm5GDN27FicPXsWqamp2Lt3Lw4dOoS4uDhxvU6nw/Dhw9GjRw9kZmZi2bJlWLhwIdatW2fS8VkIgiCYeE6oATqdDk5OTigpKYFSqTR3c4iImqw5P9dq64rpUwwbK9PqqqzWYfM5Z5PaoVarMXDgQKxZswYAUFNTA29vb0ybNg1z5szRix81ahTKysqwd+9esWzw4MEICQlBcnIyBEGAl5cXZs6ciTfeeAMAUFJSAg8PD2zcuBGjR4/GuXPnEBgYiKNHj2LAgAEAgJSUFDz33HO4evUqvLy88NFHH+Fvf/sbtFotbGxsAABz5szB7t27cf78eaPPCa/IiYhIdnQ6nWSpqKgwGFdZWYnMzExERESIZZaWloiIiEB6errBbdLT0yXxAKDRaMT47OxsaLVaSYyTkxPUarUYk56eDmdnZzGJA0BERAQsLS2RkZEhxjz55JNiEq/dz4ULF3Dr1i2jzwXnWiciIrNwvWIBWwsLk7apEO7He3t7S8oTExOxcOFCvfiCggJUV1fDw8NDUu7h4VHnVa9WqzUYr9VqxfW1ZfXFdO3aVbK+U6dOcHFxkcT4+fnp1VG7rkuXLgbb9zAmciIikp0rV65IutZtbW3N2BrzYtc6ERHJjlKplCx1JXI3NzdYWVkhPz9fUp6fnw+VSmVwG5VKVW987b8NxTw8mK6qqgpFRUWSGEN1PLgPYzCRExFRu2VjY4PQ0FCkpaWJZTU1NUhLS0NYWJjBbcLCwiTxAJCamirG+/n5QaVSSWJ0Oh0yMjLEmLCwMBQXFyMzM1OMOXDgAGpqaqBWq8WYQ4cO4d69e5L99O7d2+hudYCJnIiI2rn4+Hh8/PHH2LRpE86dO4fJkyejrKwMEydOBADExMRg7ty5Yvz06dORkpKCFStW4Pz581i4cCGOHTuGqVOnAgAsLCwwY8YMvP3229izZw9Onz6NmJgYeHl5ITo6GgDQp08fREZGYtKkSThy5Ah++OEHTJ06FaNHj4aXlxcA4KWXXoKNjQ1iY2Nx9uxZbN++HR988AHi4+NNOj7eIycionZt1KhRuHnzJhISEqDVahESEoKUlBRxYFlubi4sLX+9rg0PD8fWrVsxf/58zJs3D/7+/ti9ezf69u0rxsyePRtlZWWIi4tDcXExhgwZgpSUFCgUCjFmy5YtmDp1KoYNGwZLS0u88MILWL16tbjeyckJ+/fvx5QpUxAaGgo3NzckJCRInjU3Bp8jbwF8jpyI2puWeI78r44lsLUwra4KQYf3S/n5+iB2rRMREckYEzkREZGMMZETERHJGBM5ERGRjHHUeguoHT+o0+nM3BIiouZR+3nWnOOjKwTTPyMbs017x0TeAgoLCwHozwVMRCR3hYWFcHJyalIdNjY2UKlU+B9t4z4jVSqV5IdGOjo+ftYCiouL0aVLF+Tm5jb5Dd9R6HQ6eHt7682fTPXjeTMdz1njlJSUwMfHB7du3YKzs3OT6ysvL0dlZWWjtrWxsZE8r93R8Yq8BdROLODk5MQPChPVzptMpuF5Mx3PWeM8OHFKUygUCibjZsLBbkRERDLGRE5ERCRjTOQtwNbWFomJiR3693FNxXPWODxvpuM5axyet7aLg92IiIhkjFfkREREMsZETkREJGNM5ERERDLGRE5ERCRjTOTNYMmSJQgPD4e9vb3RMx4JgoCEhAR4enrCzs4OERERuHjxYss2tI0pKirC2LFjoVQq4ezsjNjYWJSWlta7zVNPPQULCwvJ8uqrr7ZSi1vf2rVr4evrC4VCAbVajSNHjtQbv2PHDgQEBEChUCAoKAj79u1rpZa2Laact40bN+q9pzraRCWHDh3C73//e3h5ecHCwgK7d+9ucJuDBw/i8ccfh62tLXr16oWNGze2eDvJMCbyZlBZWYmRI0di8uTJRm+zdOlSrF69GsnJycjIyICDgwM0Gg3Ky8tbsKVty9ixY3H27FmkpqZi7969OHToEOLi4hrcbtKkScjLyxOXpUuXtkJrW9/27dsRHx+PxMREHD9+HMHBwdBoNLhx44bB+MOHD2PMmDGIjY3FiRMnEB0djejoaJw5c6aVW25epp434P4sbw++py5fvtyKLTa/srIyBAcHY+3atUbFZ2dnIyoqCr/97W+RlZWFGTNm4OWXX8bXX3/dwi0lgwRqNhs2bBCcnJwajKupqRFUKpWwbNkysay4uFiwtbUVPvvssxZsYdvx008/CQCEo0ePimX//ve/BQsLC+HatWt1bjd06FBh+vTprdBC8xs0aJAwZcoU8XV1dbXg5eUlJCUlGYz/05/+JERFRUnK1Gq18Morr7RoO9saU8+bsf/fdhQAhF27dtUbM3v2bOGxxx6TlI0aNUrQaDQt2DKqC6/IzSA7OxtarRYRERFimZOTE9RqNdLT083YstaTnp4OZ2dnDBgwQCyLiIiApaUlMjIy6t12y5YtcHNzQ9++fTF37lzcuXOnpZvb6iorK5GZmSl5j1haWiIiIqLO90h6erokHgA0Gk2HeU8BjTtvAFBaWooePXrA29sbI0aMwNmzZ1ujubLF91rbwh9NMQOtVgsA8PDwkJR7eHiI69o7rVaLrl27Sso6deoEFxeXes/BSy+9hB49esDLywunTp3Cm2++iQsXLmDnzp0t3eRWVVBQgOrqaoPvkfPnzxvcRqvVduj3FNC489a7d2+sX78e/fr1Q0lJCZYvX47w8HCcPXsW3bt3b41my05d7zWdToe7d+/Czs7OTC3rmHhFXoc5c+boDYB5eKnrg6Eja+nzFhcXB41Gg6CgIIwdOxabN2/Grl27cOnSpWY8CupIwsLCEBMTg5CQEAwdOhQ7d+6Eu7s7/v73v5u7aURG4RV5HWbOnIkJEybUG9OzZ89G1a1SqQAA+fn58PT0FMvz8/MREhLSqDrbCmPPm0ql0ht8VFVVhaKiIvH8GEOtVgMAfv75ZzzyyCMmt7etcnNzg5WVFfLz8yXl+fn5dZ4flUplUnx71Jjz9jBra2v0798fP//8c0s0sV2o672mVCp5NW4GTOR1cHd3h7u7e4vU7efnB5VKhbS0NDFx63Q6ZGRkmDTyvS0y9ryFhYWhuLgYmZmZCA0NBQAcOHAANTU1YnI2RlZWFgBIvhC1BzY2NggNDUVaWhqio6MBADU1NUhLS8PUqVMNbhMWFoa0tDTMmDFDLEtNTUVYWFgrtLhtaMx5e1h1dTVOnz6N5557rgVbKm9hYWF6jzZ2tPdam2Lu0XbtweXLl4UTJ04Ib731luDo6CicOHFCOHHihHD79m0xpnfv3sLOnTvF1++++67g7Ows/Otf/xJOnToljBgxQvDz8xPu3r1rjkMwi8jISKF///5CRkaG8J///Efw9/cXxowZI66/evWq0Lt3byEjI0MQBEH4+eefhUWLFgnHjh0TsrOzhX/9619Cz549hSeffNJch9Citm3bJtja2gobN24UfvrpJyEuLk5wdnYWtFqtIAiCMG7cOGHOnDli/A8//CB06tRJWL58uXDu3DkhMTFRsLa2Fk6fPm2uQzALU8/bW2+9JXz99dfCpUuXhMzMTGH06NGCQqEQzp49a65DaHW3b98WP7cACCtXrhROnDghXL58WRAEQZgzZ44wbtw4Mf6XX34R7O3thVmzZgnnzp0T1q5dK1hZWQkpKSnmOoQOjYm8GYwfP14AoLd8++23YgwAYcOGDeLrmpoaYcGCBYKHh4dga2srDBs2TLhw4ULrN96MCgsLhTFjxgiOjo6CUqkUJk6cKPnyk52dLTmPubm5wpNPPim4uLgItra2Qq9evYRZs2YJJSUlZjqClvfhhx8KPj4+go2NjTBo0CDhxx9/FNcNHTpUGD9+vCT+888/Fx599FHBxsZGeOyxx4SvvvqqlVvcNphy3mbMmCHGenh4CM8995xw/PhxM7TafL799luDn2G152n8+PHC0KFD9bYJCQkRbGxshJ49e0o+36h18WdMiYiIZIyj1omIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiYiIZIyJnIiISMaYyImIiGSMiZyIiEjGmMiJiIhkjImciIhIxpjIiWSisLAQXbt2RU5OTpPqGT16NFasWNE8jSIis+Nc60QyER8fj9u3b+Pjjz9uUj1nzpzBk08+iezsbDg5OTVT64jIXHhFTiQDd+7cwaefforY2Ngm19W3b1888sgj+Mc//tEMLSMic2MiJ2oDPvvsM9jZ2SEvL08smzhxIvr164eSkhLs27cPtra2GDx4sLg+JycHFhYW+OKLL/Dkk0/Czs4OAwcORG5uLr7//nsMHjwY9vb2GDZsGIqLiyX7+/3vf49t27a11uERUQtiIidqA0aPHo1HH30U77zzDgAgMTER33zzDf7973/DyckJ33//PUJDQyXbnDx5EgDw0Ucf4Z133sHhw4eRn5+PP//5z3j33XexZs0afPvttzh58iQ2bNgg2XbQoEE4cuQIKioqWucAiajFdDJ3A4gIsLCwwJIlS/Diiy9CpVLhww8/xPfff49u3boBAC5fvgwvLy/JNllZWXBxccH27dvh6uoKABg6dCj+85//4OzZs7C3twcADBw4EFqtVrKtl5cXKisrodVq0aNHj1Y4QiJqKbwiJ2ojfve73yEwMBCLFi3Crl278Nhjj4nr7t69C4VCIYk/efIk/vCHP4hJHAByc3MxatQoMYnXlvn5+Um2tbOzA3D/3jsRyRsTOVEbkZKSgvPnz6O6uhoeHh6SdW5ubrh165akLCsrC2q1WlJ28uRJyX308vJyXLhwAcHBwZK4oqIiAIC7u3tzHgIRmQETOVEbcPz4cfzpT3/Cp59+imHDhmHBggWS9f3798dPP/0kvtbpdMjJyUH//v3FsuzsbJSUlEjKTp8+DUEQEBQUJKnvzJkz6N69O9zc3FroiIiotTCRE5lZTk4OoqKiMG/ePIwZMwaLFi3CF198gePHj4sxGo0GZ8+eFa/KT548CSsrK/Tt21eMqb1n/uA976ysLDzyyCNwdHSU7PP777/H8OHDW/jIiKg1MJETmVFRUREiIyMxYsQIzJkzBwCgVqvx7LPPYt68eWJcUFAQHn/8cXz++ecA7ify3r17S+6bnzx5UnI1Xlv2cLd6eXk5du/ejUmTJrXUYRFRK+LMbkQy8dVXX2HWrFk4c+YMLC0b/x38o48+wq5du7B///5mbB0RmQsfPyOSiaioKFy8eBHXrl2Dt7d3o+uxtrbGhx9+2IwtIyJz4hU5ERGRjPEeORERkYwxkRMREckYEzkREZGMMZETERHJGBM5ERGRjDGRExERyRgTORERkYwxkRMREckYEzkREZGM/R+bijESVhvIpgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# plot result\n", + "num_cord0 = 101\n", + "num_cord1 = 101\n", + "num_cords = num_cord0 * num_cord1\n", + "print(f\"num_cords = {num_cords}\")\n", + "x, y = np.meshgrid(\n", + " np.linspace(\n", + " start=-Lx / 2, stop=Lx / 2, num=num_cord0, endpoint=True, dtype=\"float32\"\n", + " ),\n", + " np.linspace(\n", + " start=-Ly / 2, stop=Ly / 2, num=num_cord1, endpoint=True, dtype=\"float32\"\n", + " ),\n", + ")\n", + "x = x.ravel()\n", + "y = y.ravel()\n", + "# predict solution of w(x, y) on the 2D grid\n", + "w_pred = solver.predict({\"x\": x[:, None], \"y\": y[:, None]}, return_numpy=True)[\"w\"]\n", + "fig = plt.figure(100, figsize=(5, 4))\n", + "y_min = w_pred.min(axis=(0,))[0]\n", + "y_max = w_pred.max(axis=(0,))[0]\n", + "ax1 = plt.subplot(1, 1, 1)\n", + "plt.tricontourf(x, y, w_pred[:, 0], levels=30, cmap=\"rainbow\")\n", + "print(x.shape, y.shape, w_pred.shape)\n", + "cb1 = plt.colorbar()\n", + "plt.axis(\"equal\")\n", + "plt.xlabel(\"$x (m)$\")\n", + "plt.ylabel(\"$y (m)$\")\n", + "plt.title(f\"w-field: [{y_min:.6f}, {y_max:.6f}]\", fontsize=9.5)\n", + "plt.show()\n", + "# plt.savefig(\"./result.jpg\")\n", + "# print(\"saved matplotlib to: ./result.jpg\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7.有限元计算结果比较\n", + "\n", + "通过比较,可以发现PINN方法计算结果和有限元方法计算结果基本一致。这里有限元计算所用薄板的几何参数、材料参数、载荷及边界条件和第2部分所描述薄板是一样的,有限元计算所用软件为SIPESC2022。PINN方法计算的最大挠度是12.2mm,有限元方法计算的最大挠度是12.2mm,两者的计算结果相差很小。\n", + "\"FEM_result\"\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "conda_py310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/rossler/conf/enn.yaml b/examples/rossler/conf/enn.yaml index b5b7ec2e9a..a541644cd6 100644 --- a/examples/rossler/conf/enn.yaml +++ b/examples/rossler/conf/enn.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -52,3 +53,58 @@ TRAIN: EVAL: batch_size: 8 pretrained_model_path: null +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_rossler_enn + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 6 +output_dir: ${hydra:run.dir} +TRAIN_BLOCK_SIZE: 16 +VALID_BLOCK_SIZE: 32 +TRAIN_FILE_PATH: ./datasets/rossler_training.hdf5 +VALID_FILE_PATH: ./datasets/rossler_valid.hdf5 + +# model settings +MODEL: + input_keys: ["states"] + output_keys: ["pred_states", "recover_states"] + +# training settings +TRAIN: + epochs: 300 + batch_size: 256 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + gamma: 0.995 + by_epoch: true + optimizer: + weight_decay: 1e-8 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 8 + pretrained_model_path: null +>>>>>>> Stashed changes diff --git a/examples/rossler/conf/transformer.yaml b/examples/rossler/conf/transformer.yaml index a5828a0403..b865e39036 100644 --- a/examples/rossler/conf/transformer.yaml +++ b/examples/rossler/conf/transformer.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -91,3 +92,88 @@ INFER: max_batch_size: 64 num_cpu_threads: 4 batch_size: 16 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_rossler_transformer/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - mode + - output_dir + - log_freq + - EMBEDDING_MODEL_PATH + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +TRAIN_BLOCK_SIZE: 32 +VALID_BLOCK_SIZE: 256 +TRAIN_FILE_PATH: ./datasets/rossler_training.hdf5 +VALID_FILE_PATH: ./datasets/rossler_valid.hdf5 + +# set working condition +EMBEDDING_MODEL_PATH: ./outputs_rossler_enn/checkpoints/latest +VIS_DATA_NUMS: 16 + +# model settings +MODEL: + input_keys: ["embeds"] + output_keys: ["pred_embeds"] + num_layers: 4 + num_ctx: 64 + embed_size: 32 + num_heads: 4 + +# training settings +TRAIN: + epochs: 200 + batch_size: 64 + lr_scheduler: + epochs: ${TRAIN.epochs} + learning_rate: 0.001 + T_0: 14 + T_mult: 2 + eta_min: 1.0e-9 + optimizer: + weight_decay: 1.0e-8 + eval_during_train: true + eval_freq: 50 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + batch_size: 16 + pretrained_model_path: null + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/rossler/rossler_transformer_pretrained.pdparams + export_path: ./inference/rossler_transformer + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: false + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 64 + num_cpu_threads: 4 + batch_size: 16 +>>>>>>> Stashed changes diff --git a/examples/rossler/train_enn.py b/examples/rossler/train_enn.py index c26dcb89ec..55d8d079c5 100644 --- a/examples/rossler/train_enn.py +++ b/examples/rossler/train_enn.py @@ -1,261 +1,261 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step1: training a embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp - -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import logger - - -def get_mean_std(data: np.ndarray): - mean = np.asarray( - [np.mean(data[:, :, 0]), np.mean(data[:, :, 1]), np.min(data[:, :, 2])] - ).reshape(1, 3) - std = np.asarray( - [ - np.std(data[:, :, 0]), - np.std(data[:, :, 1]), - np.max(data[:, :, 2]) - np.min(data[:, :, 2]), - ] - ).reshape(1, 3) - return mean, std - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={regularization_key: 1e-1 * (cfg.TRAIN_BLOCK_SIZE - 1)} - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(sup_constraint.data_loader) - - # manually init model - data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) - model = ppsci.arch.RosslerEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( - iters_per_epoch=ITERS_PER_EPOCH, - decay_steps=ITERS_PER_EPOCH, - **cfg.TRAIN.lr_scheduler, - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=True, - validator=validator, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) - regularization_key = "k_matrix" - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELossWithL2Decay( - regularization_dict={regularization_key: 1e-1 * (cfg.TRAIN_BLOCK_SIZE - 1)} - ), - { - key: lambda out, k=key: out[k] - for key in cfg.MODEL.output_keys + (regularization_key,) - }, - name="Sup", - ) - - # manually init model - data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) - model = ppsci.arch.RosslerEmbedding( - cfg.MODEL.input_keys, - cfg.MODEL.output_keys + (regularization_key,), - data_mean, - data_std, - ) - - # manually build validator - weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) - eval_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 32, - "weight_dict": { - key: value for key, value in zip(cfg.MODEL.output_keys, weights) - }, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step1: training a embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + + +def get_mean_std(data: np.ndarray): + mean = np.asarray( + [np.mean(data[:, :, 0]), np.mean(data[:, :, 1]), np.min(data[:, :, 2])] + ).reshape(1, 3) + std = np.asarray( + [ + np.std(data[:, :, 0]), + np.std(data[:, :, 1]), + np.max(data[:, :, 2]) - np.min(data[:, :, 2]), + ] + ).reshape(1, 3) + return mean, std + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={regularization_key: 1e-1 * (cfg.TRAIN_BLOCK_SIZE - 1)} + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(sup_constraint.data_loader) + + # manually init model + data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) + model = ppsci.arch.RosslerEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + iters_per_epoch=ITERS_PER_EPOCH, + decay_steps=ITERS_PER_EPOCH, + **cfg.TRAIN.lr_scheduler, + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=True, + validator=validator, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + weights = (1.0 * (cfg.TRAIN_BLOCK_SIZE - 1), 1.0e3 * cfg.TRAIN_BLOCK_SIZE) + regularization_key = "k_matrix" + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELossWithL2Decay( + regularization_dict={regularization_key: 1e-1 * (cfg.TRAIN_BLOCK_SIZE - 1)} + ), + { + key: lambda out, k=key: out[k] + for key in cfg.MODEL.output_keys + (regularization_key,) + }, + name="Sup", + ) + + # manually init model + data_mean, data_std = get_mean_std(sup_constraint.data_loader.dataset.data) + model = ppsci.arch.RosslerEmbedding( + cfg.MODEL.input_keys, + cfg.MODEL.output_keys + (regularization_key,), + data_mean, + data_std, + ) + + # manually build validator + weights = (1.0 * (cfg.VALID_BLOCK_SIZE - 1), 1.0e4 * cfg.VALID_BLOCK_SIZE) + eval_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 32, + "weight_dict": { + key: value for key, value in zip(cfg.MODEL.output_keys, weights) + }, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="enn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/rossler/train_transformer.py b/examples/rossler/train_transformer.py index a58b8b8d28..3cae5f44b7 100644 --- a/examples/rossler/train_transformer.py +++ b/examples/rossler/train_transformer.py @@ -1,335 +1,335 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Two-stage training -# 1. Train a embedding model by running train_enn.py. -# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. - -# This file is for step2: training a transformer model, based on frozen pretrained embedding model. -# This file is based on PaddleScience/ppsci API. -from os import path as osp -from typing import Dict - -import hydra -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.arch import base -from ppsci.utils import logger -from ppsci.utils import save_load - - -def build_embedding_model(embedding_model_path: str) -> ppsci.arch.RosslerEmbedding: - input_keys = ("states",) - output_keys = ("pred_states", "recover_states") - regularization_key = "k_matrix" - model = ppsci.arch.RosslerEmbedding(input_keys, output_keys + (regularization_key,)) - save_load.load_pretrain(model, embedding_model_path) - return model - - -class OutputTransform(object): - def __init__(self, model: base.Arch): - self.model = model - self.model.eval() - - def __call__(self, x: Dict[str, paddle.Tensor]): - pred_embeds = x["pred_embeds"] - pred_states = self.model.decoder(pred_embeds) - - return pred_states - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually build constraint(s) - train_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.TRAIN_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.TRAIN_BLOCK_SIZE, - "stride": 16, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 4, - } - - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.MSELoss(), - name="Sup", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set iters_per_epoch by dataloader length - ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # init optimizer and lr scheduler - clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) - lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( - iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler - )() - optimizer = ppsci.optimizer.Adam( - lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer - )(model) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - vis_data = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], - "states": states[: cfg.VIS_DATA_NUMS, 1:, :], - } - - visualizer = { - "visualize_states": ppsci.visualize.VisualizerScatter3D( - vis_data, - { - "pred_states": lambda d: output_transform(d), - "states": lambda d: d["states"], - }, - num_timestamps=1, - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - lr_scheduler, - cfg.TRAIN.epochs, - ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - eval_freq=cfg.TRAIN.eval_freq, - validator=validator, - visualizer=visualizer, - ) - # train model - solver.train() - # evaluate after finished training - solver.eval() - # visualize prediction after finished training - solver.visualize() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - output_transform = OutputTransform(embedding_model) - - # manually init model - model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) - - # manually build validator - eval_dataloader_cfg = { - "dataset": { - "name": "RosslerDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - "embedding_model": embedding_model, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": cfg.EVAL.batch_size, - "num_workers": 4, - } - - mse_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss(), - metric={"MSE": ppsci.metric.MSE()}, - name="MSE_Validator", - ) - validator = {mse_validator.name: mse_validator} - - # set visualizer(optional) - states = mse_validator.data_loader.dataset.data - embedding_data = mse_validator.data_loader.dataset.embedding_data - vis_datas = { - "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], - "states": states[: cfg.VIS_DATA_NUMS, 1:, :], - } - - visualizer = { - "visulzie_states": ppsci.visualize.VisualizerScatter3D( - vis_datas, - { - "pred_states": lambda d: output_transform(d), - "states": lambda d: d["states"], - }, - num_timestamps=1, - prefix="result_states", - ) - } - - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - validator=validator, - visualizer=visualizer, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - solver.eval() - # visualize prediction for pretrained model(optional) - solver.visualize() - - -def export(cfg: DictConfig): - # set model - embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) - model_cfg = { - **cfg.MODEL, - "embedding_model": embedding_model, - "input_keys": ["states"], - "output_keys": ["pred_states"], - } - model = ppsci.arch.PhysformerGPT2(**model_cfg) - - # initialize solver - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - # export model - from paddle.static import InputSpec - - input_spec = [ - { - key: InputSpec([None, 255, 3], "float32", name=key) - for key in model.input_keys - }, - ] - - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - dataset_cfg = { - "name": "RosslerDataset", - "file_path": cfg.VALID_FILE_PATH, - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.output_keys, - "block_size": cfg.VALID_BLOCK_SIZE, - "stride": 1024, - } - - dataset = ppsci.data.dataset.build_dataset(dataset_cfg) - - input_dict = { - "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1, :], - } - - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output_keys = ["pred_states"] - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(output_keys, output_dict.keys()) - } - - input_dict = { - "states": dataset.data[: cfg.VIS_DATA_NUMS, 1:, :], - } - - data_dict = {**input_dict, **output_dict} - for i in range(cfg.VIS_DATA_NUMS): - ppsci.visualize.save_plot_from_3d_dict( - f"./rossler_transformer_pred_{i}", - {key: value[i] for key, value in data_dict.items()}, - ("states", "pred_states"), - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Two-stage training +# 1. Train a embedding model by running train_enn.py. +# 2. Load pretrained embedding model and freeze it, then train a transformer model by running train_transformer.py. + +# This file is for step2: training a transformer model, based on frozen pretrained embedding model. +# This file is based on PaddleScience/ppsci API. +from os import path as osp +from typing import Dict + +import hydra +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.arch import base +from ppsci.utils import logger +from ppsci.utils import save_load + + +def build_embedding_model(embedding_model_path: str) -> ppsci.arch.RosslerEmbedding: + input_keys = ("states",) + output_keys = ("pred_states", "recover_states") + regularization_key = "k_matrix" + model = ppsci.arch.RosslerEmbedding(input_keys, output_keys + (regularization_key,)) + save_load.load_pretrain(model, embedding_model_path) + return model + + +class OutputTransform(object): + def __init__(self, model: base.Arch): + self.model = model + self.model.eval() + + def __call__(self, x: Dict[str, paddle.Tensor]): + pred_embeds = x["pred_embeds"] + pred_states = self.model.decoder(pred_embeds) + + return pred_states + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually build constraint(s) + train_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.TRAIN_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.TRAIN_BLOCK_SIZE, + "stride": 16, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 4, + } + + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.MSELoss(), + name="Sup", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set iters_per_epoch by dataloader length + ITERS_PER_EPOCH = len(constraint["Sup"].data_loader) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # init optimizer and lr scheduler + clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=0.1) + lr_scheduler = ppsci.optimizer.lr_scheduler.CosineWarmRestarts( + iters_per_epoch=ITERS_PER_EPOCH, **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam( + lr_scheduler, grad_clip=clip, **cfg.TRAIN.optimizer + )(model) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + vis_data = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], + "states": states[: cfg.VIS_DATA_NUMS, 1:, :], + } + + visualizer = { + "visualize_states": ppsci.visualize.VisualizerScatter3D( + vis_data, + { + "pred_states": lambda d: output_transform(d), + "states": lambda d: d["states"], + }, + num_timestamps=1, + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + lr_scheduler, + cfg.TRAIN.epochs, + ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + eval_freq=cfg.TRAIN.eval_freq, + validator=validator, + visualizer=visualizer, + ) + # train model + solver.train() + # evaluate after finished training + solver.eval() + # visualize prediction after finished training + solver.visualize() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + output_transform = OutputTransform(embedding_model) + + # manually init model + model = ppsci.arch.PhysformerGPT2(**cfg.MODEL) + + # manually build validator + eval_dataloader_cfg = { + "dataset": { + "name": "RosslerDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + "embedding_model": embedding_model, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": cfg.EVAL.batch_size, + "num_workers": 4, + } + + mse_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss(), + metric={"MSE": ppsci.metric.MSE()}, + name="MSE_Validator", + ) + validator = {mse_validator.name: mse_validator} + + # set visualizer(optional) + states = mse_validator.data_loader.dataset.data + embedding_data = mse_validator.data_loader.dataset.embedding_data + vis_datas = { + "embeds": embedding_data[: cfg.VIS_DATA_NUMS, :-1, :], + "states": states[: cfg.VIS_DATA_NUMS, 1:, :], + } + + visualizer = { + "visulzie_states": ppsci.visualize.VisualizerScatter3D( + vis_datas, + { + "pred_states": lambda d: output_transform(d), + "states": lambda d: d["states"], + }, + num_timestamps=1, + prefix="result_states", + ) + } + + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + validator=validator, + visualizer=visualizer, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + solver.eval() + # visualize prediction for pretrained model(optional) + solver.visualize() + + +def export(cfg: DictConfig): + # set model + embedding_model = build_embedding_model(cfg.EMBEDDING_MODEL_PATH) + model_cfg = { + **cfg.MODEL, + "embedding_model": embedding_model, + "input_keys": ["states"], + "output_keys": ["pred_states"], + } + model = ppsci.arch.PhysformerGPT2(**model_cfg) + + # initialize solver + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + # export model + from paddle.static import InputSpec + + input_spec = [ + { + key: InputSpec([None, 255, 3], "float32", name=key) + for key in model.input_keys + }, + ] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + dataset_cfg = { + "name": "RosslerDataset", + "file_path": cfg.VALID_FILE_PATH, + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.output_keys, + "block_size": cfg.VALID_BLOCK_SIZE, + "stride": 1024, + } + + dataset = ppsci.data.dataset.build_dataset(dataset_cfg) + + input_dict = { + "states": dataset.data[: cfg.VIS_DATA_NUMS, :-1, :], + } + + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output_keys = ["pred_states"] + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(output_keys, output_dict.keys()) + } + + input_dict = { + "states": dataset.data[: cfg.VIS_DATA_NUMS, 1:, :], + } + + data_dict = {**input_dict, **output_dict} + for i in range(cfg.VIS_DATA_NUMS): + ppsci.visualize.save_plot_from_3d_dict( + f"./rossler_transformer_pred_{i}", + {key: value[i] for key, value in data_dict.items()}, + ("states", "pred_states"), + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="transformer.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml index 0f9a948571..80074c68d5 100644 --- a/examples/shock_wave/conf/shock_wave_Ma0.728.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma0.728.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -88,3 +89,96 @@ INFER: max_batch_size: 256 num_cpu_threads: 4 batch_size: 256 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_shock_wave/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +MA: 0.728 + +# set hyper-parameters +Lt: 0.4 +Lx: 1.5 +Ly: 2.0 +rx: 1.0 +ry: 1.0 +rd: 0.25 +N_INTERIOR: 100000 +N_BOUNDARY: 10000 +RHO1: 2.112 +P1: 3.001 +GAMMA: 1.4 +V1: 0.0 + +# visualize prediction +Nd: 600 +T: 0.4 + +# model settings +MODEL: + input_keys: ["t", "x", "y"] + output_keys: ["u", "v", "p", "rho"] + num_layers: 9 + hidden_size: 90 + activation: "tanh" + +# training settings +TRAIN: + epochs: 100 + iters_per_epoch: 1 + save_freq: 50 + eval_during_train: false + eval_freq: 20 + learning_rate: 1e-1 + max_iter: 100 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/shockwave/shock_wave_Ma0728_pretrained.pdparams + export_path: ./inference/shock_wave_Ma0.728 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 256 + num_cpu_threads: 4 + batch_size: 256 +>>>>>>> Stashed changes diff --git a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml index a1812a8237..35556146bb 100644 --- a/examples/shock_wave/conf/shock_wave_Ma2.0.yaml +++ b/examples/shock_wave/conf/shock_wave_Ma2.0.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -88,3 +89,96 @@ INFER: max_batch_size: 256 num_cpu_threads: 4 batch_size: 256 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_shock_wave/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set working condition +MA: 2.0 + +# set hyper-parameters +Lt: 0.4 +Lx: 1.5 +Ly: 2.0 +rx: 1.0 +ry: 1.0 +rd: 0.25 +N_INTERIOR: 100000 +N_BOUNDARY: 10000 +RHO1: 2.112 +P1: 3.001 +GAMMA: 1.4 +V1: 0.0 + +# visualize prediction +Nd: 600 +T: 0.4 + +# model settings +MODEL: + input_keys: ["t", "x", "y"] + output_keys: ["u", "v", "p", "rho"] + num_layers: 9 + hidden_size: 90 + activation: "tanh" + +# training settings +TRAIN: + epochs: 100 + iters_per_epoch: 1 + save_freq: 50 + eval_during_train: false + eval_freq: 20 + learning_rate: 1e-1 + max_iter: 100 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/shockwave/shock_wave_Ma2_pretrained.pdparams + export_path: ./inference/shock_wave_Ma2.0 + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 256 + num_cpu_threads: 4 + batch_size: 256 +>>>>>>> Stashed changes diff --git a/examples/shock_wave/lhs.py b/examples/shock_wave/lhs.py index 9efcbbd1bd..f63392dbc3 100644 --- a/examples/shock_wave/lhs.py +++ b/examples/shock_wave/lhs.py @@ -1,113 +1,113 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle - -dtype = paddle.get_default_dtype() - - -def _partition(n_sample: int, bounds: np.ndarray) -> np.ndarray: - """为各变量的变量区间按样本数量进行划分,返回划分后的各变量区间矩阵 - - Args: - n_sample (int): Number of samples. - bounds (np.ndarray): Lower and upper bound of each variable with shape [m, 2]. - - Returns: - np.ndarray: Partition range array with shape [m, n, 2]. - """ - tmp = np.arange(n_sample, dtype=dtype) # [0,1,...,n-1]. - coefficient_lower = np.stack([1 - tmp / n_sample, tmp / n_sample], axis=1,).astype( - dtype - ) # [n, 2] - coefficient_upper = np.stack( - [1 - (tmp + 1) / n_sample, (tmp + 1) / n_sample], - axis=1, - ).astype( - dtype - ) # [n, 2] - partition_lower = coefficient_lower @ bounds.T - partition_upper = coefficient_upper @ bounds.T - - partition_range = np.dstack((partition_lower.T, partition_upper.T)).astype(dtype) - return partition_range - - -def _representative(partition_range: np.ndarray) -> np.ndarray: - """Compute single representative factor. - - Args: - partition_range (np.ndarray): Partition range array with shape [m, n, 2]. - - Returns: - np.ndarray: Matrix of random representative factor with shape [n, m]. - """ - nvar = partition_range.shape[0] - nsample = partition_range.shape[1] - - coefficient_random = np.zeros((nvar, nsample, 2), dtype) - coefficient_random[:, :, 1] = np.random.random((nvar, nsample)) - coefficient_random[:, :, 0] = 1 - coefficient_random[:, :, 1] - - inv_map_arr = partition_range * coefficient_random - - representative_random = inv_map_arr.sum(axis=2).T - return representative_random - - -def _shuffle(array: np.ndarray) -> np.ndarray: - """Shuffle samples for each variable. - - Args: - array (np.ndarray): Array to be shuffled wit shape [n, m]. - - Returns: - np.ndarray: Shuffled array. - """ - for i in range(array.shape[1]): - np.random.shuffle(array[:, i]) - return array - - -def _parameter_array(n_sample: int, bounds: np.ndarray) -> np.ndarray: - """Compute parameters matrix for given number of samples. - - Args: - n_sample (int): Number of samples. - bounds (np.ndarray): Lower and upper bound of each variable with shape [m, 2]. - - Returns: - np.ndarray: Parameters matrix. - """ - arr = _partition(n_sample, bounds) # [m, n, 2] - parameters_matrix = _shuffle(_representative(arr)) - return parameters_matrix - - -class LHS: - """Latin hypercube sampling. - - Args: - n_sample (int): Number of samples. - bounds (np.ndarray): Lower and upper bounds of each variable with shape [m, 2]. - """ - - def __init__(self, n_sample: int, bounds: np.ndarray): - self.nsample = n_sample - self.bounds = bounds - self.parameter_array = _parameter_array(n_sample, bounds) - - def get_sample(self): - return self.parameter_array +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle + +dtype = paddle.get_default_dtype() + + +def _partition(n_sample: int, bounds: np.ndarray) -> np.ndarray: + """为各变量的变量区间按样本数量进行划分,返回划分后的各变量区间矩阵 + + Args: + n_sample (int): Number of samples. + bounds (np.ndarray): Lower and upper bound of each variable with shape [m, 2]. + + Returns: + np.ndarray: Partition range array with shape [m, n, 2]. + """ + tmp = np.arange(n_sample, dtype=dtype) # [0,1,...,n-1]. + coefficient_lower = np.stack([1 - tmp / n_sample, tmp / n_sample], axis=1,).astype( + dtype + ) # [n, 2] + coefficient_upper = np.stack( + [1 - (tmp + 1) / n_sample, (tmp + 1) / n_sample], + axis=1, + ).astype( + dtype + ) # [n, 2] + partition_lower = coefficient_lower @ bounds.T + partition_upper = coefficient_upper @ bounds.T + + partition_range = np.dstack((partition_lower.T, partition_upper.T)).astype(dtype) + return partition_range + + +def _representative(partition_range: np.ndarray) -> np.ndarray: + """Compute single representative factor. + + Args: + partition_range (np.ndarray): Partition range array with shape [m, n, 2]. + + Returns: + np.ndarray: Matrix of random representative factor with shape [n, m]. + """ + nvar = partition_range.shape[0] + nsample = partition_range.shape[1] + + coefficient_random = np.zeros((nvar, nsample, 2), dtype) + coefficient_random[:, :, 1] = np.random.random((nvar, nsample)) + coefficient_random[:, :, 0] = 1 - coefficient_random[:, :, 1] + + inv_map_arr = partition_range * coefficient_random + + representative_random = inv_map_arr.sum(axis=2).T + return representative_random + + +def _shuffle(array: np.ndarray) -> np.ndarray: + """Shuffle samples for each variable. + + Args: + array (np.ndarray): Array to be shuffled wit shape [n, m]. + + Returns: + np.ndarray: Shuffled array. + """ + for i in range(array.shape[1]): + np.random.shuffle(array[:, i]) + return array + + +def _parameter_array(n_sample: int, bounds: np.ndarray) -> np.ndarray: + """Compute parameters matrix for given number of samples. + + Args: + n_sample (int): Number of samples. + bounds (np.ndarray): Lower and upper bound of each variable with shape [m, 2]. + + Returns: + np.ndarray: Parameters matrix. + """ + arr = _partition(n_sample, bounds) # [m, n, 2] + parameters_matrix = _shuffle(_representative(arr)) + return parameters_matrix + + +class LHS: + """Latin hypercube sampling. + + Args: + n_sample (int): Number of samples. + bounds (np.ndarray): Lower and upper bounds of each variable with shape [m, 2]. + """ + + def __init__(self, n_sample: int, bounds: np.ndarray): + self.nsample = n_sample + self.bounds = bounds + self.parameter_array = _parameter_array(n_sample, bounds) + + def get_sample(self): + return self.parameter_array diff --git a/examples/shock_wave/shock_wave.py b/examples/shock_wave/shock_wave.py index ec7225b739..28e9ca0bd0 100644 --- a/examples/shock_wave/shock_wave.py +++ b/examples/shock_wave/shock_wave.py @@ -1,646 +1,646 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import hydra -import lhs -import numpy as np -import paddle -from matplotlib import pyplot as plt -from omegaconf import DictConfig - -import ppsci -from ppsci import equation -from ppsci.autodiff import jacobian -from ppsci.utils import logger -from ppsci.utils import misc - - -class Euler2D(equation.PDE): - def __init__(self): - super().__init__() - # HACK: solver will be added here for tracking run-time epoch to - # compute loss factor `relu` dynamically. - self.solver: ppsci.solver.Solver = None - - def continuity_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - t, x, y = out["t"], out["x"], out["y"] - u, v, rho = out["u"], out["v"], out["rho"] - rho__t = jacobian(rho, t) - rho_u = rho * u - rho_v = rho * v - rho_u__x = jacobian(rho_u, x) - rho_v__y = jacobian(rho_v, y) - - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - nab = paddle.abs(delta_u) - delta_u - lam = (0.1 * nab) * relu + 1 - continuity = (rho__t + rho_u__x + rho_v__y) / lam - return continuity - - self.add_equation("continuity", continuity_compute_func) - - def x_momentum_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - t, x, y = out["t"], out["x"], out["y"] - u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] - rho_u = rho * u - rho_u__t = jacobian(rho_u, t) - - u1 = rho * u**2 + p - u2 = rho * u * v - u1__x = jacobian(u1, x) - u2__y = jacobian(u2, y) - - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - nab = paddle.abs(delta_u) - delta_u - lam = (0.1 * nab) * relu + 1 - x_momentum = (rho_u__t + u1__x + u2__y) / lam - return x_momentum - - self.add_equation("x_momentum", x_momentum_compute_func) - - def y_momentum_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - t, x, y = out["t"], out["x"], out["y"] - u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] - rho_v = rho * v - rho_v__t = jacobian(rho_v, t) - - u2 = rho * u * v - u3 = rho * v**2 + p - u2__x = jacobian(u2, x) - u3__y = jacobian(u3, y) - - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - nab = paddle.abs(delta_u) - delta_u - lam = (0.1 * nab) * relu + 1 - y_momentum = (rho_v__t + u2__x + u3__y) / lam - return y_momentum - - self.add_equation("y_momentum", y_momentum_compute_func) - - def energy_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - t, x, y = out["t"], out["x"], out["y"] - u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] - e1 = (rho * 0.5 * (u**2 + v**2) + 3.5 * p) * u - e2 = (rho * 0.5 * (u**2 + v**2) + 3.5 * p) * v - e = rho * 0.5 * (u**2 + v**2) + p / 0.4 - - e1__x = jacobian(e1, x) - e2__y = jacobian(e2, y) - e__t = jacobian(e, t) - - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - nab = paddle.abs(delta_u) - delta_u - lam = (0.1 * nab) * relu + 1 - energy = (e__t + e1__x + e2__y) / lam - return energy - - self.add_equation("energy", energy_compute_func) - - -class BC_EQ(equation.PDE): - def __init__(self): - super().__init__() - # HACK: solver will be added here for tracking run-time epoch to - # compute loss factor `relu` dynamically. - self.solver: ppsci.solver.Solver = None - - def item1_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - x, y = out["x"], out["y"] - u, v = out["u"], out["v"] - sin, cos = out["sin"], out["cos"] - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - - lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 - item1 = (u * cos + v * sin) / lam - - return item1 - - self.add_equation("item1", item1_compute_func) - - def item2_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - x, y = out["x"], out["y"] - u, v, p = out["u"], out["v"], out["p"] - sin, cos = out["sin"], out["cos"] - p__x = jacobian(p, x) - p__y = jacobian(p, y) - u__x = jacobian(u, x) - v__y = jacobian(v, y) - delta_u = u__x + v__y - - lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 - item2 = (p__x * cos + p__y * sin) / lam - - return item2 - - self.add_equation("item2", item2_compute_func) - - def item3_compute_func(out): - relu = max( - 0.0, - (self.solver.global_step // self.solver.iters_per_epoch + 1) - / self.solver.epochs - - 0.05, - ) - x, y = out["x"], out["y"] - u, v, rho = out["u"], out["v"], out["rho"] - sin, cos = out["sin"], out["cos"] - u__x = jacobian(u, x) - v__y = jacobian(v, y) - rho__x = jacobian(rho, x) - rho__y = jacobian(rho, y) - delta_u = u__x + v__y - - lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 - item3 = (rho__x * cos + rho__y * sin) / lam - - return item3 - - self.add_equation("item3", item3_compute_func) - - -dtype = paddle.get_default_dtype() - - -def generate_bc_down_circle_points(t: float, xc: float, yc: float, r: float, n: int): - rand_arr1 = np.random.randn(n, 1).astype(dtype) - theta = 2 * np.pi * rand_arr1 - cos = np.cos(np.pi / 2 + theta) - sin = np.sin(np.pi / 2 + theta) - - rand_arr2 = np.random.randn(n, 1).astype(dtype) - x = np.concatenate([rand_arr2 * t, xc + cos * r, yc + sin * r], axis=1) - - return x, sin, cos - - -def generate_bc_left_points( - x: np.ndarray, Ma: float, rho1: float, p1: float, v1: float, gamma: float -): - u1: float = np.sqrt(gamma * p1 / rho1) * Ma - u_init = np.full((x.shape[0], 1), u1, dtype) - v_init = np.full((x.shape[0], 1), v1, dtype) - p_init = np.full((x.shape[0], 1), p1, dtype) - rho_init = np.full((x.shape[0], 1), rho1, dtype) - - return u_init, v_init, p_init, rho_init - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # set equation - equation = {"Euler2D": Euler2D(), "BC_EQ": BC_EQ()} - - # Latin HyperCube Sampling - # generate PDE data - xlimits = np.array([[0.0, 0.0, 0.0], [cfg.Lt, cfg.Lx, cfg.Ly]]).T - doe_lhs = lhs.LHS(cfg.N_INTERIOR, xlimits) - x_int_train = doe_lhs.get_sample() - x_int_train = x_int_train[ - ~( - (x_int_train[:, 1] - cfg.rx) ** 2 + (x_int_train[:, 2] - cfg.ry) ** 2 - < cfg.rd**2 - ) - ] - x_int_train_dict = misc.convert_to_dict(x_int_train, cfg.MODEL.input_keys) - - y_int_train = np.zeros([len(x_int_train), len(cfg.MODEL.output_keys)], dtype) - y_int_train_dict = misc.convert_to_dict( - y_int_train, tuple(equation["Euler2D"].equations.keys()) - ) - - # generate BC data(left, right side) - xlimits = np.array([[0.0, 0.0, 0.0], [cfg.Lt, 0.0, cfg.Ly]]).T - doe_lhs = lhs.LHS(cfg.N_BOUNDARY, xlimits) - x_bcL_train = doe_lhs.get_sample() - x_bcL_train_dict = misc.convert_to_dict(x_bcL_train, cfg.MODEL.input_keys) - - u_bcL_train, v_bcL_train, p_bcL_train, rho_bcL_train = generate_bc_left_points( - x_bcL_train, cfg.MA, cfg.RHO1, cfg.P1, cfg.V1, cfg.GAMMA - ) - y_bcL_train = np.concatenate( - [ - u_bcL_train, - v_bcL_train, - p_bcL_train, - rho_bcL_train, - ], - axis=1, - ) - y_bcL_train_dict = misc.convert_to_dict( - y_bcL_train, - tuple(model.output_keys), - ) - - x_bcI_train, sin_bcI_train, cos_bcI_train = generate_bc_down_circle_points( - cfg.Lt, cfg.rx, cfg.ry, cfg.rd, cfg.N_BOUNDARY - ) - x_bcI_train_dict = misc.convert_to_dict( - np.concatenate([x_bcI_train, sin_bcI_train, cos_bcI_train], axis=1), - cfg.MODEL.input_keys + ["sin", "cos"], - ) - y_bcI_train_dict = misc.convert_to_dict( - np.zeros((len(x_bcI_train), 3), dtype), - ("item1", "item2", "item3"), - ) - - # generate IC data - xlimits = np.array([[0.0, 0.0, 0.0], [0.0, cfg.Lx, cfg.Ly]]).T - doe_lhs = lhs.LHS(cfg.N_BOUNDARY, xlimits) - x_ic_train = doe_lhs.get_sample() - x_ic_train = x_ic_train[ - ~( - (x_ic_train[:, 1] - cfg.rx) ** 2 + (x_ic_train[:, 2] - cfg.ry) ** 2 - < cfg.rd**2 - ) - ] - x_ic_train_dict = misc.convert_to_dict(x_ic_train, cfg.MODEL.input_keys) - U1 = np.sqrt(cfg.GAMMA * cfg.P1 / cfg.RHO1) * cfg.MA - y_ic_train = np.concatenate( - [ - np.full([len(x_ic_train), 1], U1, dtype), - np.full([len(x_ic_train), 1], 0, dtype), - np.full([len(x_ic_train), 1], cfg.P1, dtype), - np.full([len(x_ic_train), 1], cfg.RHO1, dtype), - ], - axis=1, - ) - y_ic_train_dict = misc.convert_to_dict( - y_ic_train, - model.output_keys, - ) - - # set constraints - pde_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": x_int_train_dict, - "label": y_int_train_dict, - }, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean"), - output_expr=equation["Euler2D"].equations, - name="PDE", - ) - ic_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": x_ic_train_dict, - "label": y_ic_train_dict, - }, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean", weight=10), - name="IC", - ) - bcI_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": x_bcI_train_dict, - "label": y_bcI_train_dict, - }, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean", weight=10), - output_expr=equation["BC_EQ"].equations, - name="BCI", - ) - bcL_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "IterableNamedArrayDataset", - "input": x_bcL_train_dict, - "label": y_bcL_train_dict, - }, - "iters_per_epoch": cfg.TRAIN.iters_per_epoch, - }, - ppsci.loss.MSELoss("mean", weight=10), - name="BCL", - ) - constraint = { - pde_constraint.name: pde_constraint, - ic_constraint.name: ic_constraint, - bcI_constraint.name: bcI_constraint, - bcL_constraint.name: bcL_constraint, - } - - # set optimizer - optimizer = ppsci.optimizer.LBFGS( - cfg.TRAIN.learning_rate, max_iter=cfg.TRAIN.max_iter - )(model) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - cfg.output_dir, - optimizer, - None, - cfg.TRAIN.epochs, - cfg.TRAIN.iters_per_epoch, - save_freq=cfg.TRAIN.save_freq, - log_freq=cfg.log_freq, - seed=cfg.seed, - equation=equation, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - checkpoint_path=cfg.TRAIN.checkpoint_path, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - ) - # HACK: Given entire solver to euaqtion object for tracking run-time epoch - # to compute factor `relu` dynamically. - equation["Euler2D"].solver = solver - equation["BC_EQ"].solver = solver - - # train model - solver.train() - - -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - # set model - model = ppsci.arch.MLP(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - output_dir=cfg.output_dir, - seed=cfg.seed, - eval_with_no_grad=cfg.EVAL.eval_with_no_grad, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - ) - - # visualize prediction - t = np.linspace(cfg.T, cfg.T, 1, dtype=dtype) - x = np.linspace(0.0, cfg.Lx, cfg.Nd, dtype=dtype) - y = np.linspace(0.0, cfg.Ly, cfg.Nd, dtype=dtype) - _, x_grid, y_grid = np.meshgrid(t, x, y) - - x_test = misc.cartesian_product(t, x, y) - x_test_dict = misc.convert_to_dict( - x_test, - cfg.MODEL.input_keys, - ) - - output_dict = solver.predict(x_test_dict, return_numpy=True) - u, v, p, rho = ( - output_dict["u"], - output_dict["v"], - output_dict["p"], - output_dict["rho"], - ) - - zero_mask = ( - (x_test[:, 1] - cfg.rx) ** 2 + (x_test[:, 2] - cfg.ry) ** 2 - ) < cfg.rd**2 - u[zero_mask] = 0 - v[zero_mask] = 0 - p[zero_mask] = 0 - rho[zero_mask] = 0 - - u = u.reshape(cfg.Nd, cfg.Nd) - v = v.reshape(cfg.Nd, cfg.Nd) - p = p.reshape(cfg.Nd, cfg.Nd) - rho = rho.reshape(cfg.Nd, cfg.Nd) - - fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(15, 15)) - - plt.subplot(2, 2, 1) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], u * 241.315, 60) - plt.title("U m/s") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 2) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], v * 241.315, 60) - plt.title("V m/s") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 3) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], p * 33775, 60) - plt.title("P Pa") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 4) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], rho * 0.58, 60) - plt.title("Rho kg/m^3") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.savefig(osp.join(cfg.output_dir, f"shock_wave(Ma_{cfg.MA:.3f}).png")) - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - # set models - model = ppsci.arch.MLP(**cfg.MODEL) - solver = ppsci.solver.Solver( - model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export models - input_spec = [ - {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - from deploy.python_infer import pinn_predictor - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - # visualize prediction - t = np.linspace(cfg.T, cfg.T, 1, dtype=dtype) - x = np.linspace(0.0, cfg.Lx, cfg.Nd, dtype=dtype) - y = np.linspace(0.0, cfg.Ly, cfg.Nd, dtype=dtype) - _, x_grid, y_grid = np.meshgrid(t, x, y) - - x_test = misc.cartesian_product(t, x, y) - x_test_dict = misc.convert_to_dict( - x_test, - cfg.MODEL.input_keys, - ) - output_dict = predictor.predict( - x_test_dict, - cfg.INFER.batch_size, - ) - - # mapping data to cfg.MODEL.output_keys - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) - } - - u, v, p, rho = ( - output_dict["u"], - output_dict["v"], - output_dict["p"], - output_dict["rho"], - ) - - zero_mask = ( - (x_test[:, 1] - cfg.rx) ** 2 + (x_test[:, 2] - cfg.ry) ** 2 - ) < cfg.rd**2 - u[zero_mask] = 0 - v[zero_mask] = 0 - p[zero_mask] = 0 - rho[zero_mask] = 0 - - u = u.reshape(cfg.Nd, cfg.Nd) - v = v.reshape(cfg.Nd, cfg.Nd) - p = p.reshape(cfg.Nd, cfg.Nd) - rho = rho.reshape(cfg.Nd, cfg.Nd) - - fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(15, 15)) - - plt.subplot(2, 2, 1) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], u * 241.315, 60) - plt.title("U m/s") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 2) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], v * 241.315, 60) - plt.title("V m/s") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 3) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], p * 33775, 60) - plt.title("P Pa") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.subplot(2, 2, 4) - plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], rho * 0.58, 60) - plt.title("Rho kg/m^3") - plt.xlabel("x") - plt.ylabel("y") - axe = plt.gca() - axe.set_aspect(1) - plt.colorbar() - - plt.savefig(osp.join(cfg.output_dir, f"shock_wave(Ma_{cfg.MA:.3f}).png")) - - -@hydra.main( - version_base=None, config_path="./conf", config_name="shock_wave_Ma2.0.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import hydra +import lhs +import numpy as np +import paddle +from matplotlib import pyplot as plt +from omegaconf import DictConfig + +import ppsci +from ppsci import equation +from ppsci.autodiff import jacobian +from ppsci.utils import logger +from ppsci.utils import misc + + +class Euler2D(equation.PDE): + def __init__(self): + super().__init__() + # HACK: solver will be added here for tracking run-time epoch to + # compute loss factor `relu` dynamically. + self.solver: ppsci.solver.Solver = None + + def continuity_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + t, x, y = out["t"], out["x"], out["y"] + u, v, rho = out["u"], out["v"], out["rho"] + rho__t = jacobian(rho, t) + rho_u = rho * u + rho_v = rho * v + rho_u__x = jacobian(rho_u, x) + rho_v__y = jacobian(rho_v, y) + + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + nab = paddle.abs(delta_u) - delta_u + lam = (0.1 * nab) * relu + 1 + continuity = (rho__t + rho_u__x + rho_v__y) / lam + return continuity + + self.add_equation("continuity", continuity_compute_func) + + def x_momentum_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + t, x, y = out["t"], out["x"], out["y"] + u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] + rho_u = rho * u + rho_u__t = jacobian(rho_u, t) + + u1 = rho * u**2 + p + u2 = rho * u * v + u1__x = jacobian(u1, x) + u2__y = jacobian(u2, y) + + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + nab = paddle.abs(delta_u) - delta_u + lam = (0.1 * nab) * relu + 1 + x_momentum = (rho_u__t + u1__x + u2__y) / lam + return x_momentum + + self.add_equation("x_momentum", x_momentum_compute_func) + + def y_momentum_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + t, x, y = out["t"], out["x"], out["y"] + u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] + rho_v = rho * v + rho_v__t = jacobian(rho_v, t) + + u2 = rho * u * v + u3 = rho * v**2 + p + u2__x = jacobian(u2, x) + u3__y = jacobian(u3, y) + + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + nab = paddle.abs(delta_u) - delta_u + lam = (0.1 * nab) * relu + 1 + y_momentum = (rho_v__t + u2__x + u3__y) / lam + return y_momentum + + self.add_equation("y_momentum", y_momentum_compute_func) + + def energy_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + t, x, y = out["t"], out["x"], out["y"] + u, v, p, rho = out["u"], out["v"], out["p"], out["rho"] + e1 = (rho * 0.5 * (u**2 + v**2) + 3.5 * p) * u + e2 = (rho * 0.5 * (u**2 + v**2) + 3.5 * p) * v + e = rho * 0.5 * (u**2 + v**2) + p / 0.4 + + e1__x = jacobian(e1, x) + e2__y = jacobian(e2, y) + e__t = jacobian(e, t) + + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + nab = paddle.abs(delta_u) - delta_u + lam = (0.1 * nab) * relu + 1 + energy = (e__t + e1__x + e2__y) / lam + return energy + + self.add_equation("energy", energy_compute_func) + + +class BC_EQ(equation.PDE): + def __init__(self): + super().__init__() + # HACK: solver will be added here for tracking run-time epoch to + # compute loss factor `relu` dynamically. + self.solver: ppsci.solver.Solver = None + + def item1_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + x, y = out["x"], out["y"] + u, v = out["u"], out["v"] + sin, cos = out["sin"], out["cos"] + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + + lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 + item1 = (u * cos + v * sin) / lam + + return item1 + + self.add_equation("item1", item1_compute_func) + + def item2_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + x, y = out["x"], out["y"] + u, v, p = out["u"], out["v"], out["p"] + sin, cos = out["sin"], out["cos"] + p__x = jacobian(p, x) + p__y = jacobian(p, y) + u__x = jacobian(u, x) + v__y = jacobian(v, y) + delta_u = u__x + v__y + + lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 + item2 = (p__x * cos + p__y * sin) / lam + + return item2 + + self.add_equation("item2", item2_compute_func) + + def item3_compute_func(out): + relu = max( + 0.0, + (self.solver.global_step // self.solver.iters_per_epoch + 1) + / self.solver.epochs + - 0.05, + ) + x, y = out["x"], out["y"] + u, v, rho = out["u"], out["v"], out["rho"] + sin, cos = out["sin"], out["cos"] + u__x = jacobian(u, x) + v__y = jacobian(v, y) + rho__x = jacobian(rho, x) + rho__y = jacobian(rho, y) + delta_u = u__x + v__y + + lam = 0.1 * (paddle.abs(delta_u) - delta_u) * relu + 1 + item3 = (rho__x * cos + rho__y * sin) / lam + + return item3 + + self.add_equation("item3", item3_compute_func) + + +dtype = paddle.get_default_dtype() + + +def generate_bc_down_circle_points(t: float, xc: float, yc: float, r: float, n: int): + rand_arr1 = np.random.randn(n, 1).astype(dtype) + theta = 2 * np.pi * rand_arr1 + cos = np.cos(np.pi / 2 + theta) + sin = np.sin(np.pi / 2 + theta) + + rand_arr2 = np.random.randn(n, 1).astype(dtype) + x = np.concatenate([rand_arr2 * t, xc + cos * r, yc + sin * r], axis=1) + + return x, sin, cos + + +def generate_bc_left_points( + x: np.ndarray, Ma: float, rho1: float, p1: float, v1: float, gamma: float +): + u1: float = np.sqrt(gamma * p1 / rho1) * Ma + u_init = np.full((x.shape[0], 1), u1, dtype) + v_init = np.full((x.shape[0], 1), v1, dtype) + p_init = np.full((x.shape[0], 1), p1, dtype) + rho_init = np.full((x.shape[0], 1), rho1, dtype) + + return u_init, v_init, p_init, rho_init + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # set equation + equation = {"Euler2D": Euler2D(), "BC_EQ": BC_EQ()} + + # Latin HyperCube Sampling + # generate PDE data + xlimits = np.array([[0.0, 0.0, 0.0], [cfg.Lt, cfg.Lx, cfg.Ly]]).T + doe_lhs = lhs.LHS(cfg.N_INTERIOR, xlimits) + x_int_train = doe_lhs.get_sample() + x_int_train = x_int_train[ + ~( + (x_int_train[:, 1] - cfg.rx) ** 2 + (x_int_train[:, 2] - cfg.ry) ** 2 + < cfg.rd**2 + ) + ] + x_int_train_dict = misc.convert_to_dict(x_int_train, cfg.MODEL.input_keys) + + y_int_train = np.zeros([len(x_int_train), len(cfg.MODEL.output_keys)], dtype) + y_int_train_dict = misc.convert_to_dict( + y_int_train, tuple(equation["Euler2D"].equations.keys()) + ) + + # generate BC data(left, right side) + xlimits = np.array([[0.0, 0.0, 0.0], [cfg.Lt, 0.0, cfg.Ly]]).T + doe_lhs = lhs.LHS(cfg.N_BOUNDARY, xlimits) + x_bcL_train = doe_lhs.get_sample() + x_bcL_train_dict = misc.convert_to_dict(x_bcL_train, cfg.MODEL.input_keys) + + u_bcL_train, v_bcL_train, p_bcL_train, rho_bcL_train = generate_bc_left_points( + x_bcL_train, cfg.MA, cfg.RHO1, cfg.P1, cfg.V1, cfg.GAMMA + ) + y_bcL_train = np.concatenate( + [ + u_bcL_train, + v_bcL_train, + p_bcL_train, + rho_bcL_train, + ], + axis=1, + ) + y_bcL_train_dict = misc.convert_to_dict( + y_bcL_train, + tuple(model.output_keys), + ) + + x_bcI_train, sin_bcI_train, cos_bcI_train = generate_bc_down_circle_points( + cfg.Lt, cfg.rx, cfg.ry, cfg.rd, cfg.N_BOUNDARY + ) + x_bcI_train_dict = misc.convert_to_dict( + np.concatenate([x_bcI_train, sin_bcI_train, cos_bcI_train], axis=1), + cfg.MODEL.input_keys + ["sin", "cos"], + ) + y_bcI_train_dict = misc.convert_to_dict( + np.zeros((len(x_bcI_train), 3), dtype), + ("item1", "item2", "item3"), + ) + + # generate IC data + xlimits = np.array([[0.0, 0.0, 0.0], [0.0, cfg.Lx, cfg.Ly]]).T + doe_lhs = lhs.LHS(cfg.N_BOUNDARY, xlimits) + x_ic_train = doe_lhs.get_sample() + x_ic_train = x_ic_train[ + ~( + (x_ic_train[:, 1] - cfg.rx) ** 2 + (x_ic_train[:, 2] - cfg.ry) ** 2 + < cfg.rd**2 + ) + ] + x_ic_train_dict = misc.convert_to_dict(x_ic_train, cfg.MODEL.input_keys) + U1 = np.sqrt(cfg.GAMMA * cfg.P1 / cfg.RHO1) * cfg.MA + y_ic_train = np.concatenate( + [ + np.full([len(x_ic_train), 1], U1, dtype), + np.full([len(x_ic_train), 1], 0, dtype), + np.full([len(x_ic_train), 1], cfg.P1, dtype), + np.full([len(x_ic_train), 1], cfg.RHO1, dtype), + ], + axis=1, + ) + y_ic_train_dict = misc.convert_to_dict( + y_ic_train, + model.output_keys, + ) + + # set constraints + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": x_int_train_dict, + "label": y_int_train_dict, + }, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean"), + output_expr=equation["Euler2D"].equations, + name="PDE", + ) + ic_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": x_ic_train_dict, + "label": y_ic_train_dict, + }, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean", weight=10), + name="IC", + ) + bcI_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": x_bcI_train_dict, + "label": y_bcI_train_dict, + }, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean", weight=10), + output_expr=equation["BC_EQ"].equations, + name="BCI", + ) + bcL_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "IterableNamedArrayDataset", + "input": x_bcL_train_dict, + "label": y_bcL_train_dict, + }, + "iters_per_epoch": cfg.TRAIN.iters_per_epoch, + }, + ppsci.loss.MSELoss("mean", weight=10), + name="BCL", + ) + constraint = { + pde_constraint.name: pde_constraint, + ic_constraint.name: ic_constraint, + bcI_constraint.name: bcI_constraint, + bcL_constraint.name: bcL_constraint, + } + + # set optimizer + optimizer = ppsci.optimizer.LBFGS( + cfg.TRAIN.learning_rate, max_iter=cfg.TRAIN.max_iter + )(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + cfg.output_dir, + optimizer, + None, + cfg.TRAIN.epochs, + cfg.TRAIN.iters_per_epoch, + save_freq=cfg.TRAIN.save_freq, + log_freq=cfg.log_freq, + seed=cfg.seed, + equation=equation, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + checkpoint_path=cfg.TRAIN.checkpoint_path, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + ) + # HACK: Given entire solver to euaqtion object for tracking run-time epoch + # to compute factor `relu` dynamically. + equation["Euler2D"].solver = solver + equation["BC_EQ"].solver = solver + + # train model + solver.train() + + +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + # set model + model = ppsci.arch.MLP(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + output_dir=cfg.output_dir, + seed=cfg.seed, + eval_with_no_grad=cfg.EVAL.eval_with_no_grad, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + ) + + # visualize prediction + t = np.linspace(cfg.T, cfg.T, 1, dtype=dtype) + x = np.linspace(0.0, cfg.Lx, cfg.Nd, dtype=dtype) + y = np.linspace(0.0, cfg.Ly, cfg.Nd, dtype=dtype) + _, x_grid, y_grid = np.meshgrid(t, x, y) + + x_test = misc.cartesian_product(t, x, y) + x_test_dict = misc.convert_to_dict( + x_test, + cfg.MODEL.input_keys, + ) + + output_dict = solver.predict(x_test_dict, return_numpy=True) + u, v, p, rho = ( + output_dict["u"], + output_dict["v"], + output_dict["p"], + output_dict["rho"], + ) + + zero_mask = ( + (x_test[:, 1] - cfg.rx) ** 2 + (x_test[:, 2] - cfg.ry) ** 2 + ) < cfg.rd**2 + u[zero_mask] = 0 + v[zero_mask] = 0 + p[zero_mask] = 0 + rho[zero_mask] = 0 + + u = u.reshape(cfg.Nd, cfg.Nd) + v = v.reshape(cfg.Nd, cfg.Nd) + p = p.reshape(cfg.Nd, cfg.Nd) + rho = rho.reshape(cfg.Nd, cfg.Nd) + + fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(15, 15)) + + plt.subplot(2, 2, 1) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], u * 241.315, 60) + plt.title("U m/s") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 2) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], v * 241.315, 60) + plt.title("V m/s") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 3) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], p * 33775, 60) + plt.title("P Pa") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 4) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], rho * 0.58, 60) + plt.title("Rho kg/m^3") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.savefig(osp.join(cfg.output_dir, f"shock_wave(Ma_{cfg.MA:.3f}).png")) + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + # set models + model = ppsci.arch.MLP(**cfg.MODEL) + solver = ppsci.solver.Solver( + model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export models + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + # visualize prediction + t = np.linspace(cfg.T, cfg.T, 1, dtype=dtype) + x = np.linspace(0.0, cfg.Lx, cfg.Nd, dtype=dtype) + y = np.linspace(0.0, cfg.Ly, cfg.Nd, dtype=dtype) + _, x_grid, y_grid = np.meshgrid(t, x, y) + + x_test = misc.cartesian_product(t, x, y) + x_test_dict = misc.convert_to_dict( + x_test, + cfg.MODEL.input_keys, + ) + output_dict = predictor.predict( + x_test_dict, + cfg.INFER.batch_size, + ) + + # mapping data to cfg.MODEL.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + + u, v, p, rho = ( + output_dict["u"], + output_dict["v"], + output_dict["p"], + output_dict["rho"], + ) + + zero_mask = ( + (x_test[:, 1] - cfg.rx) ** 2 + (x_test[:, 2] - cfg.ry) ** 2 + ) < cfg.rd**2 + u[zero_mask] = 0 + v[zero_mask] = 0 + p[zero_mask] = 0 + rho[zero_mask] = 0 + + u = u.reshape(cfg.Nd, cfg.Nd) + v = v.reshape(cfg.Nd, cfg.Nd) + p = p.reshape(cfg.Nd, cfg.Nd) + rho = rho.reshape(cfg.Nd, cfg.Nd) + + fig, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(15, 15)) + + plt.subplot(2, 2, 1) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], u * 241.315, 60) + plt.title("U m/s") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 2) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], v * 241.315, 60) + plt.title("V m/s") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 3) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], p * 33775, 60) + plt.title("P Pa") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.subplot(2, 2, 4) + plt.contourf(x_grid[:, 0, :], y_grid[:, 0, :], rho * 0.58, 60) + plt.title("Rho kg/m^3") + plt.xlabel("x") + plt.ylabel("y") + axe = plt.gca() + axe.set_aspect(1) + plt.colorbar() + + plt.savefig(osp.join(cfg.output_dir, f"shock_wave(Ma_{cfg.MA:.3f}).png")) + + +@hydra.main( + version_base=None, config_path="./conf", config_name="shock_wave_Ma2.0.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/spinn/conf/helmholtz3d.yaml b/examples/spinn/conf/helmholtz3d.yaml index e7c8f909dc..d78a378651 100644 --- a/examples/spinn/conf/helmholtz3d.yaml +++ b/examples/spinn/conf/helmholtz3d.yaml @@ -1,88 +1,88 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_spinn_helmholtz3d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working directory unchanged - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 111 -output_dir: ${hydra:run.dir} -log_freq: 100 - -# set working condition -K: 1.0 -a1: 4 -a2: 4 -a3: 3 - -# model settings -MODEL: - input_keys: ["x", "y", "z"] - output_keys: ["u"] - num_layers: 4 - hidden_size: 64 - r: 32 - activation: "tanh" - -# training settings -TRAIN: - epochs: 50 - iters_per_epoch: 1000 - save_freq: 10 - eval_during_train: false - eval_freq: 5 - lr_scheduler: - epochs: ${TRAIN.epochs} - iters_per_epoch: ${TRAIN.iters_per_epoch} - learning_rate: 1.0e-3 - gamma: 0.9 - decay_steps: 1000 - by_epoch: false - nc: 64 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - pretrained_model_path: null - eval_with_no_grad: true - batch_size: 1024 - nc: 100 - -# inference settings -INFER: - pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/spinn/spinn_helmholtz3d_pretrained.pdparams - export_path: ./inference/spinn_helmholtz3d - pdmodel_path: ${INFER.export_path}.pdmodel - pdiparams_path: ${INFER.export_path}.pdiparams - onnx_path: ${INFER.export_path}.onnx - device: gpu - engine: native - precision: fp32 - ir_optim: true - min_subgraph_size: 5 - gpu_mem: 2000 - gpu_id: 0 - max_batch_size: 1024 - num_cpu_threads: 10 - batch_size: 1024 +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_spinn_helmholtz3d/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 111 +output_dir: ${hydra:run.dir} +log_freq: 100 + +# set working condition +K: 1.0 +a1: 4 +a2: 4 +a3: 3 + +# model settings +MODEL: + input_keys: ["x", "y", "z"] + output_keys: ["u"] + num_layers: 4 + hidden_size: 64 + r: 32 + activation: "tanh" + +# training settings +TRAIN: + epochs: 50 + iters_per_epoch: 1000 + save_freq: 10 + eval_during_train: false + eval_freq: 5 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1.0e-3 + gamma: 0.9 + decay_steps: 1000 + by_epoch: false + nc: 64 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + eval_with_no_grad: true + batch_size: 1024 + nc: 100 + +# inference settings +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/spinn/spinn_helmholtz3d_pretrained.pdparams + export_path: ./inference/spinn_helmholtz3d + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: true + min_subgraph_size: 5 + gpu_mem: 2000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 1024 diff --git a/examples/spinn/helmholtz3d.py b/examples/spinn/helmholtz3d.py index f54fb8b9b0..2275da76b0 100644 --- a/examples/spinn/helmholtz3d.py +++ b/examples/spinn/helmholtz3d.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Reference: https://github.com/stnamjef/SPINN/blob/main/helmholtz3d.py """ @@ -320,3 +321,328 @@ def main(cfg: DictConfig): if __name__ == "__main__": main() +======= +""" +Reference: https://github.com/stnamjef/SPINN/blob/main/helmholtz3d.py +""" + +from os import path as osp + +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import logger + +dtype = paddle.get_default_dtype() + + +def save_result(filename, x, y, z, u_pred, u_ref): + xm, ym, zm = np.meshgrid(x, y, z, indexing="ij") + xm = xm.reshape(-1, 1) + ym = ym.reshape(-1, 1) + zm = zm.reshape(-1, 1) + u_pred = u_pred.reshape(-1, 1) + u_ref = u_ref.reshape(-1, 1) + ppsci.visualize.save_vtu_from_dict( + filename, + { + "x": xm, + "y": ym, + "z": zm, + "u_pred": u_pred, + "u_ref": u_ref, + }, + ("x", "y", "z"), + ("u_pred", "u_ref"), + ) + + +def _helmholtz3d_exact_u(a1, a2, a3, x, y, z): + return np.sin(a1 * np.pi * x) * np.sin(a2 * np.pi * y) * np.sin(a3 * np.pi * z) + + +def _helmholtz3d_source_term(a1, a2, a3, x, y, z, lda=1.0): + u_gt = _helmholtz3d_exact_u(a1, a2, a3, x, y, z)[..., None] + uxx = -((a1 * np.pi) ** 2) * u_gt + uyy = -((a2 * np.pi) ** 2) * u_gt + uzz = -((a3 * np.pi) ** 2) * u_gt + return uxx + uyy + uzz + lda * u_gt + + +def generate_train_helmholtz3d(a1, a2, a3, nc): + xc = np.random.uniform(-1.0, 1.0, [nc, 1]).astype(dtype) + yc = np.random.uniform(-1.0, 1.0, [nc, 1]).astype(dtype) + zc = np.random.uniform(-1.0, 1.0, [nc, 1]).astype(dtype) + # source term + xcm, ycm, zcm = np.meshgrid(xc, yc, zc, indexing="ij") + uc = _helmholtz3d_source_term(a1, a2, a3, xcm, ycm, zcm).astype(dtype) + # boundary (hard-coded) + xb = [ + np.asarray([[1.0]], dtype=dtype), + np.asarray([[-1.0]], dtype=dtype), + xc, + xc, + xc, + xc, + ] + yb = [ + yc, + yc, + np.asarray([[1.0]], dtype=dtype), + np.asarray([[-1.0]], dtype=dtype), + yc, + yc, + ] + zb = [ + zc, + zc, + zc, + zc, + np.asarray([[1.0]], dtype=dtype), + np.asarray([[-1.0]], dtype=dtype), + ] + return xc, yc, zc, uc, xb, yb, zb + + +def generate_test_helmholtz3d(a1, a2, a3, nc_test): + x = np.linspace(-1.0, 1.0, nc_test, dtype=dtype) + y = np.linspace(-1.0, 1.0, nc_test, dtype=dtype) + z = np.linspace(-1.0, 1.0, nc_test, dtype=dtype) + xm, ym, zm = np.meshgrid(x, y, z, indexing="ij") + u_gt = _helmholtz3d_exact_u(a1, a2, a3, xm, ym, zm).astype(dtype)[..., None] + x = x.reshape(-1, 1) + y = y.reshape(-1, 1) + z = z.reshape(-1, 1) + return x, y, z, u_gt + + +def train(cfg: DictConfig): + # set model + model = ppsci.arch.SPINN(**cfg.MODEL) + + # set equation + equation = {"Helmholtz": ppsci.equation.Helmholtz(3, 1.0)} + equation["Helmholtz"].model = model # set model to equation for hvp + + # set constraint + class InteriorDataGenerator: + def __init__(self): + self.iter = 0 + self._gen() + + def _gen(self): + global xb, yb, zb + xc, yc, zc, uc, xb, yb, zb = generate_train_helmholtz3d( + cfg.a1, + cfg.a2, + cfg.a3, + cfg.TRAIN.nc, + ) + self.xc = xc + self.yc = yc + self.zc = zc + self.uc = uc + + def __call__(self): + self.iter += 1 + + if self.iter % 100 == 0: + self._gen() + + return { + "x": self.xc, + "y": self.yc, + "z": self.zc, + "uc": self.uc, + } + + def gen_label_batch(input_batch): + return {"helmholtz": input_batch["uc"]} + + pde_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": InteriorDataGenerator(), + "label": gen_label_batch, + }, + }, + output_expr=equation["Helmholtz"].equations, + loss=ppsci.loss.MSELoss("mean"), + name="PDE", + ) + # wrap constraints together + constraint = { + pde_constraint.name: pde_constraint, + } + + class BCDataGenerator: + def __init__(self, idx: int): + self.idx = idx + + def __call__(self): + global xb, yb, zb + tmp = { + "x": xb[self.idx], + "y": yb[self.idx], + "z": zb[self.idx], + } + return tmp + + def gen_bc_label(data_dict): + nx = len(data_dict["x"]) + ny = len(data_dict["y"]) + nz = len(data_dict["z"]) + return {"u": np.zeros([nx, ny, nz, 1])} + + for i in range(6): + bc_constraint_i = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "ContinuousNamedArrayDataset", + "input": BCDataGenerator(i), + "label": gen_bc_label, + }, + }, + output_expr={"u": lambda out: out["u"]}, + loss=ppsci.loss.MSELoss("mean"), + name=f"BC{i}", + ) + constraint[bc_constraint_i.name] = bc_constraint_i + + # set optimizer + lr_scheduler = ppsci.optimizer.lr_scheduler.ExponentialDecay( + **cfg.TRAIN.lr_scheduler + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler)(model) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + equation=equation, + cfg=cfg, + ) + # train model + solver.train() + + # evaluate after training + x, y, z, u_gt = generate_test_helmholtz3d(cfg.a1, cfg.a2, cfg.a3, cfg.EVAL.nc) + u_pred = solver.predict( + { + "x": x, + "y": y, + "z": z, + }, + batch_size=None, + return_numpy=True, + )["u"].reshape(-1) + u_gt = u_gt.reshape(-1) + l2_err = np.linalg.norm(u_pred - u_gt, ord=2) / np.linalg.norm(u_gt, ord=2) + rmse = np.sqrt(np.mean((u_pred - u_gt) ** 2)) + logger.message(f"l2_err = {l2_err:.4f}, rmse = {rmse:.4f}") + + save_result( + osp.join(cfg.output_dir, "helmholtz3d_result.vtu"), x, y, z, u_pred, u_gt + ) + + +def evaluate(cfg: DictConfig): + # set model + model = ppsci.arch.SPINN(**cfg.MODEL) + + solver = ppsci.solver.Solver( + model, + cfg=cfg, + ) + + # evaluate + x, y, z, u_gt = generate_test_helmholtz3d(cfg.a1, cfg.a2, cfg.a3, cfg.EVAL.nc) + u_pred = solver.predict( + { + "x": x, + "y": y, + "z": z, + }, + batch_size=None, + return_numpy=True, + )["u"].reshape(-1) + u_gt = u_gt.reshape(-1) + l2_err = np.linalg.norm(u_pred - u_gt, ord=2) / np.linalg.norm(u_gt, ord=2) + rmse = np.sqrt(np.mean((u_pred - u_gt) ** 2)) + logger.message(f"l2_err = {l2_err:.4f}, rmse = {rmse:.4f}") + + save_result( + osp.join(cfg.output_dir, "helmholtz3d_result.vtu"), x, y, z, u_pred, u_gt + ) + + +def export(cfg: DictConfig): + # set model + model = ppsci.arch.SPINN(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver(model, cfg=cfg) + # export model + from paddle.static import InputSpec + + input_spec = [ + {key: InputSpec([None, 1], "float32", name=key) for key in model.input_keys}, + ] + solver.export(input_spec, cfg.INFER.export_path, with_onnx=False) + + +def inference(cfg: DictConfig): + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + # evaluate + x, y, z, u_gt = generate_test_helmholtz3d(cfg.a1, cfg.a2, cfg.a3, cfg.EVAL.nc) + output_dict = predictor.predict( + { + "x": x, + "y": y, + "z": z, + }, + batch_size=None, + ) + # mapping data to cfg.INFER.output_keys + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(cfg.MODEL.output_keys, output_dict.keys()) + } + u_pred = output_dict["u"].reshape(-1) + u_gt = u_gt.reshape(-1) + l2_err = np.linalg.norm(u_pred - u_gt, ord=2) / np.linalg.norm(u_gt, ord=2) + rmse = np.sqrt(np.mean((u_pred - u_gt) ** 2)) + logger.message(f"l2_err = {l2_err:.4f}, rmse = {rmse:.4f}") + + save_result( + osp.join(cfg.output_dir, "helmholtz3d_result.vtu"), x, y, z, u_pred, u_gt + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="helmholtz3d.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() +>>>>>>> Stashed changes diff --git a/examples/tempoGAN/conf/tempogan.yaml b/examples/tempoGAN/conf/tempogan.yaml index 46a6d9b6fa..1014e2b529 100644 --- a/examples/tempoGAN/conf/tempogan.yaml +++ b/examples/tempoGAN/conf/tempogan.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -110,3 +111,118 @@ INFER: max_batch_size: 16 num_cpu_threads: 4 batch_size: 1 +======= +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_tempoGAN/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - INFER.export_path + - mode + - output_dir + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 +DATASET_PATH: ./datasets/tempoGAN/2d_train.mat +DATASET_PATH_VALID: ./datasets/tempoGAN/2d_valid.mat + +# set working condition +USE_AMP: true +USE_SPATIALDISC: true +USE_TEMPODISC: true +WEIGHT_GEN: [5.0, 0.0, 1.0] # lambda_l1, lambda_l2, lambda_t +WEIGHT_GEN_LAYER: [-1.0e-5, -1.0e-5, -1.0e-5, -1.0e-5, -1.0e-5] +WEIGHT_DISC: 1.0 +TILE_RATIO: 1 + +# model settings +MODEL: + gen_net: + input_keys: ["input_gen"] # 'NCHW' + output_keys: ["output_gen"] + in_channel: 1 + out_channels_tuple: [[2, 8, 8], [128, 128, 128], [32, 8, 8], [2, 1, 1]] + kernel_sizes_tuple: [[[5, 5], [5, 5], [1, 1]], [[5, 5], [5, 5], [1, 1]], [[5, 5], [5, 5], [1, 1]], [[5, 5], [5, 5], [1, 1]]] + strides_tuple: [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]] + use_bns_tuple: [[true, true, true], [true, true, true], [true, true, true], [false, false, false]] + acts_tuple: [['relu', null, null], ['relu', null, null], ['relu', null, null], ['relu', null, null]] + disc_net: + input_keys: ['input_disc_from_target', 'input_disc_from_gen'] # 'NCHW' + output_keys: ['out0_layer0', 'out0_layer1', 'out0_layer2', 'out0_layer3', 'out_disc_from_target', 'out1_layer0', 'out1_layer1', 'out1_layer2', 'out1_layer3', 'out_disc_from_gen'] + in_channel: 2 + out_channels: [32, 64, 128, 256] + fc_channel: 1048576 + kernel_sizes: [[4, 4], [4, 4], [4, 4], [4, 4]] + strides: [2, 2, 2, 1] + use_bns: [false, true, true, true] + acts: ['leaky_relu', 'leaky_relu', 'leaky_relu', 'leaky_relu', null] + tempo_net: + input_keys: ['input_tempo_disc_from_target', 'input_tempo_disc_from_gen'] # 'NCHW' + output_keys: ['out0_tempo_layer0', 'out0_tempo_layer1', 'out0_tempo_layer2', 'out0_tempo_layer3', 'out_disc_tempo_from_target', 'out1_tempo_layer0', 'out1_tempo_layer1', 'out1_tempo_layer2', 'out1_tempo_layer3', 'out_disc_tempo_from_gen'] + in_channel: 3 + out_channels: [32, 64, 128, 256] + fc_channel: 1048576 + kernel_sizes: [[4, 4], [4, 4], [4, 4], [4, 4]] + strides: [2, 2, 2, 1] + use_bns: [false, true, true, true] + acts: ['leaky_relu', 'leaky_relu', 'leaky_relu', 'leaky_relu', null] + +# training settings +TRAIN: + epochs: 40000 + epochs_gen: 1 + epochs_disc: 1 + epochs_disc_tempo: 1 + iters_per_epoch: 2 + batch_size: + sup_constraint: 8 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 2.0e-4 + gamma: 0.05 + by_epoch: true + eval_during_train: false + amp_level: O2 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + pretrained_model_path: null + save_outs: true + +INFER: + pretrained_model_path: https://paddle-org.bj.bcebos.com/paddlescience/models/tempoGAN/tempogan_pretrained.pdparams + export_path: ./inference/tempoGAN + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 16 + num_cpu_threads: 4 + batch_size: 1 +>>>>>>> Stashed changes diff --git a/examples/tempoGAN/functions.py b/examples/tempoGAN/functions.py index dcc3f60a15..460def4089 100644 --- a/examples/tempoGAN/functions.py +++ b/examples/tempoGAN/functions.py @@ -1,487 +1,487 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from typing import Dict -from typing import List -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn.functional as F -from matplotlib import image as Img -from PIL import Image -from skimage.metrics import mean_squared_error -from skimage.metrics import peak_signal_noise_ratio -from skimage.metrics import structural_similarity - -import ppsci -from ppsci.utils import logger - - -# train -def interpolate( - data: paddle.Tensor, ratio: int, mode: str = "nearest" -) -> paddle.Tensor: - """Interpolate twice. - - Args: - data (paddle.Tensor): The data to be interpolated. - ratio (int): Ratio of one interpolation. - mode (str, optional): Interpolation method. Defaults to "nearest". - - Returns: - paddle.Tensor: Data interpolated. - """ - for _ in range(2): - data = F.interpolate( - data, - [data.shape[-2] * ratio, data.shape[-1] * ratio], - mode=mode, - ) - return data - - -def reshape_input(input_dict: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: - """Reshape input data for temporally Discriminator. Reshape data from N, C, W, H to N * C, 1, H, W. - Which will merge N dimension and C dimension to 1 dimension but still keep 4 dimensions - to ensure the data can be used for training. - - Args: - input_dict (Dict[str, paddle.Tensor]): input data dict. - - Returns: - Dict[str, paddle.Tensor]: reshaped data dict. - """ - out_dict = {} - for key in input_dict: - input = input_dict[key] - N, C, H, W = input.shape - out_dict[key] = paddle.reshape(input, [N * C, 1, H, W]) - return out_dict - - -def dereshape_input( - input_dict: Dict[str, paddle.Tensor], C: int -) -> Dict[str, paddle.Tensor]: - """Dereshape input data for temporally Discriminator. Deeshape data from 1, N * C, H, W to N, C, W, H. - - Args: - input_dict (Dict[str, paddle.Tensor]): input data dict. - C (int): Channel of dereshape. - - Returns: - Dict[str, paddle.Tensor]: dereshaped data dict. - """ - for key in input_dict: - input = input_dict[key] - _, N, H, W = input.shape - if N < C: - logger.warning( - f"batch_size is smaller than {C}! Tempo needs at least {C} frames, input will be copied." - ) - input_dict[key] = paddle.concat([input[:1]] * C, axis=1) - else: - N_new = int(N // C) - input_dict[key] = paddle.reshape(input[: N_new * C], [-1, C, H, W]) - return input_dict - - -# predict -def split_data(data: np.ndarray, tile_ratio: int) -> np.ndarray: - """Split a numpy image to tiles equally. - - Args: - data (np.ndarray): The image to be Split. - tile_ratio (int): How many tiles of one dim. - Number of result tiles is tile_ratio * tile_ratio for a 2d image. - - Returns: - np.ndarray: Tiles in [N,C,H,W] shape. - """ - _, _, h, w = data.shape - tile_h, tile_w = h // tile_ratio, w // tile_ratio - tiles = [] - for i in range(tile_ratio): - for j in range(tile_ratio): - tiles.append( - data[ - :1, - :, - i * tile_h : i * tile_h + tile_h, - j * tile_w : j * tile_w + tile_w, - ], - ) - return np.concatenate(tiles, axis=0) - - -def concat_data(data: np.ndarray, tile_ratio: int) -> np.ndarray: - """Concat numpy tiles to a image equally. - - Args: - data (np.ndarray): The tiles to be upsplited. - tile_ratio (int): How many tiles of one dim. - Number of input tiles is tile_ratio * tile_ratio for 2d result. - - Returns: - np.ndarray: Image in [H,W] shape. - """ - _, _, tile_h, tile_w = data.shape - h, w = tile_h * tile_ratio, tile_w * tile_ratio - data_whole = np.ones([h, w], dtype=paddle.get_default_dtype()) - tile_idx = 0 - for i in range(tile_ratio): - for j in range(tile_ratio): - data_whole[ - i * tile_h : i * tile_h + tile_h, - j * tile_w : j * tile_w + tile_w, - ] = data[tile_idx][0] - tile_idx += 1 - return data_whole - - -def predict_and_save_plot( - output_dir: str, - epoch_id: int, - solver_gen: ppsci.solver.Solver, - dataset_valid: np.ndarray, - tile_ratio: int = 1, -): - """Predicting and plotting. - - Args: - output_dir (str): Output dir path. - epoch_id (int): Which epoch it is. - solver_gen (ppsci.solver.Solver): Solver for predicting. - dataset_valid (np.ndarray): Valid dataset. - tile_ratio (int, optional): How many tiles of one dim. Defaults to 1. - """ - dir_pred = "predict/" - os.makedirs(os.path.join(output_dir, dir_pred), exist_ok=True) - - start_idx = 190 - density_low = dataset_valid["density_low"][start_idx : start_idx + 3] - density_high = dataset_valid["density_high"][start_idx : start_idx + 3] - - # tile - density_low = ( - split_data(density_low, tile_ratio) if tile_ratio != 1 else density_low - ) - density_high = ( - split_data(density_high, tile_ratio) if tile_ratio != 1 else density_high - ) - - pred_dict = solver_gen.predict( - { - "density_low": density_low, - "density_high": density_high, - }, - {"density_high": lambda out: out["output_gen"]}, - batch_size=tile_ratio * tile_ratio if tile_ratio != 1 else 3, - no_grad=False, - ) - if epoch_id == 1: - # plot interpolated input image - input_img = np.expand_dims(dataset_valid["density_low"][start_idx], axis=0) - input_img = paddle.to_tensor(input_img, dtype=paddle.get_default_dtype()) - input_img = F.interpolate( - input_img, - [input_img.shape[-2] * 4, input_img.shape[-1] * 4], - mode="nearest", - ).numpy() - Img.imsave( - os.path.join(output_dir, dir_pred, "input.png"), - np.squeeze(input_img), - vmin=0.0, - vmax=1.0, - cmap="gray", - ) - # plot target image - Img.imsave( - os.path.join(output_dir, dir_pred, "target.png"), - np.squeeze(dataset_valid["density_high"][start_idx]), - vmin=0.0, - vmax=1.0, - cmap="gray", - ) - # plot pred image - pred_img = ( - concat_data(pred_dict["density_high"].numpy(), tile_ratio) - if tile_ratio != 1 - else np.squeeze(pred_dict["density_high"][0].numpy()) - ) - Img.imsave( - os.path.join(output_dir, dir_pred, f"pred_epoch_{str(epoch_id)}.png"), - pred_img, - vmin=0.0, - vmax=1.0, - cmap="gray", - ) - - -# evaluation -def evaluate_img( - img_target: np.ndarray, img_pred: np.ndarray -) -> Tuple[float, float, float]: - """Evaluate two images. - - Args: - img_target (np.ndarray): Target image. - img_pred (np.ndarray): Image generated by prediction. - - Returns: - Tuple[float, float, float]: MSE, PSNR, SSIM. - """ - eval_mse = mean_squared_error(img_target, img_pred) - eval_psnr = peak_signal_noise_ratio(img_target, img_pred) - eval_ssim = structural_similarity(img_target, img_pred, data_range=1.0) - return eval_mse, eval_psnr, eval_ssim - - -def get_image_array(img_path): - return np.array(Image.open(img_path).convert("L")) - - -class GenFuncs: - """All functions used for Generator, including functions of transform and loss. - - Args: - weight_gen (List[float]): Weights of L1 loss. - weight_gen_layer (List[float], optional): Weights of layers loss. Defaults to None. - """ - - def __init__( - self, weight_gen: List[float], weight_gen_layer: List[float] = None - ) -> None: - self.weight_gen = weight_gen - self.weight_gen_layer = weight_gen_layer - - def transform_in(self, _in): - ratio = 2 - input_dict = reshape_input(_in) - density_low = input_dict["density_low"] - density_low_inp = interpolate(density_low, ratio, "nearest") - return {"input_gen": density_low_inp} - - def loss_func_gen(self, output_dict: Dict, *args) -> paddle.Tensor: - """Calculate loss of generator when use spatial discriminator. - The loss consists of l1 loss, l2 loss and layer loss when use spatial discriminator. - Notice that all item of loss is optional because weight of them might be 0. - - Args: - output_dict (Dict): output dict of model. - - Returns: - paddle.Tensor: Loss of generator. - """ - # l1 loss - loss_l1 = F.l1_loss( - output_dict["output_gen"], output_dict["density_high"], "mean" - ) - losses = loss_l1 * self.weight_gen[0] - - # l2 loss - loss_l2 = F.mse_loss( - output_dict["output_gen"], output_dict["density_high"], "mean" - ) - losses += loss_l2 * self.weight_gen[1] - - if self.weight_gen_layer is not None: - # disc(generator_out) loss - out_disc_from_gen = output_dict["out_disc_from_gen"][-1] - label_ones = paddle.ones_like(out_disc_from_gen) - loss_gen = F.binary_cross_entropy_with_logits( - out_disc_from_gen, label_ones, reduction="mean" - ) - losses += loss_gen - - # layer loss - key_list = list(output_dict.keys()) - # ["out0_layer0","out0_layer1","out0_layer2","out0_layer3","out_disc_from_target", - # "out1_layer0","out1_layer1","out1_layer2","out1_layer3","out_disc_from_gen"] - loss_layer = 0 - for i in range(1, len(self.weight_gen_layer)): - # i = 0,1,2,3 - loss_layer += ( - self.weight_gen_layer[i] - * F.mse_loss( - output_dict[key_list[i]], - output_dict[key_list[5 + i]], - reduction="sum", - ) - / 2 - ) - losses += loss_layer * self.weight_gen_layer[0] - - return {"output_gen": losses} - - def loss_func_gen_tempo(self, output_dict: Dict, *args) -> paddle.Tensor: - """Calculate loss of generator when use temporal discriminator. - The loss is cross entropy loss when use temporal discriminator. - - Args: - output_dict (Dict): output dict of model. - - Returns: - paddle.Tensor: Loss of generator. - """ - out_disc_tempo_from_gen = output_dict["out_disc_tempo_from_gen"][-1] - label_t_ones = paddle.ones_like(out_disc_tempo_from_gen) - - loss_gen_t = F.binary_cross_entropy_with_logits( - out_disc_tempo_from_gen, label_t_ones, reduction="mean" - ) - losses = loss_gen_t * self.weight_gen[2] - return {"out_disc_tempo_from_gen": losses} - - -class DiscFuncs: - """All functions used for Discriminator and temporally Discriminator, including functions of transform and loss. - - Args: - weight_disc (float): Weight of loss generated by the discriminator to judge the true target. - """ - - def __init__(self, weight_disc: float) -> None: - self.weight_disc = weight_disc - self.model_gen = None - - def transform_in(self, _in): - ratio = 2 - input_dict = reshape_input(_in) - density_low = input_dict["density_low"] - density_high_from_target = input_dict["density_high"] - - density_low_inp = interpolate(density_low, ratio, "nearest") - - density_high_from_gen = self.model_gen(input_dict)["output_gen"] - density_high_from_gen.stop_gradient = True - - density_input_from_target = paddle.concat( - [density_low_inp, density_high_from_target], axis=1 - ) - density_input_from_gen = paddle.concat( - [density_low_inp, density_high_from_gen], axis=1 - ) - return { - "input_disc_from_target": density_input_from_target, - "input_disc_from_gen": density_input_from_gen, - } - - def transform_in_tempo(self, _in): - density_high_from_target = _in["density_high"] - - input_dict = reshape_input(_in) - density_high_from_gen = self.model_gen(input_dict)["output_gen"] - density_high_from_gen.stop_gradient = True - - input_trans = { - "input_tempo_disc_from_target": density_high_from_target, - "input_tempo_disc_from_gen": density_high_from_gen, - } - - return dereshape_input(input_trans, 3) - - def loss_func(self, output_dict, *args): - out_disc_from_target = output_dict["out_disc_from_target"] - out_disc_from_gen = output_dict["out_disc_from_gen"] - - label_ones = paddle.ones_like(out_disc_from_target) - label_zeros = paddle.zeros_like(out_disc_from_gen) - - loss_disc_from_target = F.binary_cross_entropy_with_logits( - out_disc_from_target, label_ones, reduction="mean" - ) - loss_disc_from_gen = F.binary_cross_entropy_with_logits( - out_disc_from_gen, label_zeros, reduction="mean" - ) - losses = loss_disc_from_target * self.weight_disc + loss_disc_from_gen - return {"CE_loss": losses} - - def loss_func_tempo(self, output_dict, *args): - out_disc_tempo_from_target = output_dict["out_disc_tempo_from_target"] - out_disc_tempo_from_gen = output_dict["out_disc_tempo_from_gen"] - - label_ones = paddle.ones_like(out_disc_tempo_from_target) - label_zeros = paddle.zeros_like(out_disc_tempo_from_gen) - - loss_disc_tempo_from_target = F.binary_cross_entropy_with_logits( - out_disc_tempo_from_target, label_ones, reduction="mean" - ) - loss_disc_tempo_from_gen = F.binary_cross_entropy_with_logits( - out_disc_tempo_from_gen, label_zeros, reduction="mean" - ) - losses = ( - loss_disc_tempo_from_target * self.weight_disc + loss_disc_tempo_from_gen - ) - return {"CE_tempo_loss": losses} - - -class DataFuncs: - """All functions used for data transform. - - Args: - tile_ratio (int, optional): How many tiles of one dim. Defaults to 1. - density_min (float, optional): Minimize density of one tile. Defaults to 0.02. - max_turn (int, optional): Maximize turn of taking a tile from one image. Defaults to 20. - """ - - def __init__( - self, tile_ratio: int = 1, density_min: float = 0.02, max_turn: int = 20 - ) -> None: - self.tile_ratio = tile_ratio - self.density_min = density_min - self.max_turn = max_turn - - def transform( - self, - input_item: Dict[str, np.ndarray], - label_item: Dict[str, np.ndarray], - weight_item: Dict[str, np.ndarray], - ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: - if self.tile_ratio == 1: - return input_item, label_item, weight_item - for _ in range(self.max_turn): - rand_ratio = np.random.rand() - density_low = self.cut_data(input_item["density_low"], rand_ratio) - density_high = self.cut_data(input_item["density_high"], rand_ratio) - if self.is_valid_tile(density_low): - break - - input_item["density_low"] = density_low - input_item["density_high"] = density_high - return input_item, label_item, weight_item - - def cut_data(self, data: np.ndarray, rand_ratio: float) -> paddle.Tensor: - # data: C,H,W - _, H, W = data.shape - if H % self.tile_ratio != 0 or W % self.tile_ratio != 0: - exit( - f"ERROR: input images cannot be divided into {self.tile_ratio} parts evenly!" - ) - tile_shape = [H // self.tile_ratio, W // self.tile_ratio] - rand_shape = np.floor(rand_ratio * (np.array([H, W]) - np.array(tile_shape))) - start = [int(rand_shape[0]), int(rand_shape[1])] - end = [int(rand_shape[0] + tile_shape[0]), int(rand_shape[1] + tile_shape[1])] - data = paddle.slice( - paddle.to_tensor(data), axes=[-2, -1], starts=start, ends=end - ) - - return data - - def is_valid_tile(self, tile: paddle.Tensor): - img_density = tile[0].sum() - return img_density >= ( - self.density_min * tile.shape[0] * tile.shape[1] * tile.shape[2] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Dict +from typing import List +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from matplotlib import image as Img +from PIL import Image +from skimage.metrics import mean_squared_error +from skimage.metrics import peak_signal_noise_ratio +from skimage.metrics import structural_similarity + +import ppsci +from ppsci.utils import logger + + +# train +def interpolate( + data: paddle.Tensor, ratio: int, mode: str = "nearest" +) -> paddle.Tensor: + """Interpolate twice. + + Args: + data (paddle.Tensor): The data to be interpolated. + ratio (int): Ratio of one interpolation. + mode (str, optional): Interpolation method. Defaults to "nearest". + + Returns: + paddle.Tensor: Data interpolated. + """ + for _ in range(2): + data = F.interpolate( + data, + [data.shape[-2] * ratio, data.shape[-1] * ratio], + mode=mode, + ) + return data + + +def reshape_input(input_dict: Dict[str, paddle.Tensor]) -> Dict[str, paddle.Tensor]: + """Reshape input data for temporally Discriminator. Reshape data from N, C, W, H to N * C, 1, H, W. + Which will merge N dimension and C dimension to 1 dimension but still keep 4 dimensions + to ensure the data can be used for training. + + Args: + input_dict (Dict[str, paddle.Tensor]): input data dict. + + Returns: + Dict[str, paddle.Tensor]: reshaped data dict. + """ + out_dict = {} + for key in input_dict: + input = input_dict[key] + N, C, H, W = input.shape + out_dict[key] = paddle.reshape(input, [N * C, 1, H, W]) + return out_dict + + +def dereshape_input( + input_dict: Dict[str, paddle.Tensor], C: int +) -> Dict[str, paddle.Tensor]: + """Dereshape input data for temporally Discriminator. Deeshape data from 1, N * C, H, W to N, C, W, H. + + Args: + input_dict (Dict[str, paddle.Tensor]): input data dict. + C (int): Channel of dereshape. + + Returns: + Dict[str, paddle.Tensor]: dereshaped data dict. + """ + for key in input_dict: + input = input_dict[key] + _, N, H, W = input.shape + if N < C: + logger.warning( + f"batch_size is smaller than {C}! Tempo needs at least {C} frames, input will be copied." + ) + input_dict[key] = paddle.concat([input[:1]] * C, axis=1) + else: + N_new = int(N // C) + input_dict[key] = paddle.reshape(input[: N_new * C], [-1, C, H, W]) + return input_dict + + +# predict +def split_data(data: np.ndarray, tile_ratio: int) -> np.ndarray: + """Split a numpy image to tiles equally. + + Args: + data (np.ndarray): The image to be Split. + tile_ratio (int): How many tiles of one dim. + Number of result tiles is tile_ratio * tile_ratio for a 2d image. + + Returns: + np.ndarray: Tiles in [N,C,H,W] shape. + """ + _, _, h, w = data.shape + tile_h, tile_w = h // tile_ratio, w // tile_ratio + tiles = [] + for i in range(tile_ratio): + for j in range(tile_ratio): + tiles.append( + data[ + :1, + :, + i * tile_h : i * tile_h + tile_h, + j * tile_w : j * tile_w + tile_w, + ], + ) + return np.concatenate(tiles, axis=0) + + +def concat_data(data: np.ndarray, tile_ratio: int) -> np.ndarray: + """Concat numpy tiles to a image equally. + + Args: + data (np.ndarray): The tiles to be upsplited. + tile_ratio (int): How many tiles of one dim. + Number of input tiles is tile_ratio * tile_ratio for 2d result. + + Returns: + np.ndarray: Image in [H,W] shape. + """ + _, _, tile_h, tile_w = data.shape + h, w = tile_h * tile_ratio, tile_w * tile_ratio + data_whole = np.ones([h, w], dtype=paddle.get_default_dtype()) + tile_idx = 0 + for i in range(tile_ratio): + for j in range(tile_ratio): + data_whole[ + i * tile_h : i * tile_h + tile_h, + j * tile_w : j * tile_w + tile_w, + ] = data[tile_idx][0] + tile_idx += 1 + return data_whole + + +def predict_and_save_plot( + output_dir: str, + epoch_id: int, + solver_gen: ppsci.solver.Solver, + dataset_valid: np.ndarray, + tile_ratio: int = 1, +): + """Predicting and plotting. + + Args: + output_dir (str): Output dir path. + epoch_id (int): Which epoch it is. + solver_gen (ppsci.solver.Solver): Solver for predicting. + dataset_valid (np.ndarray): Valid dataset. + tile_ratio (int, optional): How many tiles of one dim. Defaults to 1. + """ + dir_pred = "predict/" + os.makedirs(os.path.join(output_dir, dir_pred), exist_ok=True) + + start_idx = 190 + density_low = dataset_valid["density_low"][start_idx : start_idx + 3] + density_high = dataset_valid["density_high"][start_idx : start_idx + 3] + + # tile + density_low = ( + split_data(density_low, tile_ratio) if tile_ratio != 1 else density_low + ) + density_high = ( + split_data(density_high, tile_ratio) if tile_ratio != 1 else density_high + ) + + pred_dict = solver_gen.predict( + { + "density_low": density_low, + "density_high": density_high, + }, + {"density_high": lambda out: out["output_gen"]}, + batch_size=tile_ratio * tile_ratio if tile_ratio != 1 else 3, + no_grad=False, + ) + if epoch_id == 1: + # plot interpolated input image + input_img = np.expand_dims(dataset_valid["density_low"][start_idx], axis=0) + input_img = paddle.to_tensor(input_img, dtype=paddle.get_default_dtype()) + input_img = F.interpolate( + input_img, + [input_img.shape[-2] * 4, input_img.shape[-1] * 4], + mode="nearest", + ).numpy() + Img.imsave( + os.path.join(output_dir, dir_pred, "input.png"), + np.squeeze(input_img), + vmin=0.0, + vmax=1.0, + cmap="gray", + ) + # plot target image + Img.imsave( + os.path.join(output_dir, dir_pred, "target.png"), + np.squeeze(dataset_valid["density_high"][start_idx]), + vmin=0.0, + vmax=1.0, + cmap="gray", + ) + # plot pred image + pred_img = ( + concat_data(pred_dict["density_high"].numpy(), tile_ratio) + if tile_ratio != 1 + else np.squeeze(pred_dict["density_high"][0].numpy()) + ) + Img.imsave( + os.path.join(output_dir, dir_pred, f"pred_epoch_{str(epoch_id)}.png"), + pred_img, + vmin=0.0, + vmax=1.0, + cmap="gray", + ) + + +# evaluation +def evaluate_img( + img_target: np.ndarray, img_pred: np.ndarray +) -> Tuple[float, float, float]: + """Evaluate two images. + + Args: + img_target (np.ndarray): Target image. + img_pred (np.ndarray): Image generated by prediction. + + Returns: + Tuple[float, float, float]: MSE, PSNR, SSIM. + """ + eval_mse = mean_squared_error(img_target, img_pred) + eval_psnr = peak_signal_noise_ratio(img_target, img_pred) + eval_ssim = structural_similarity(img_target, img_pred, data_range=1.0) + return eval_mse, eval_psnr, eval_ssim + + +def get_image_array(img_path): + return np.array(Image.open(img_path).convert("L")) + + +class GenFuncs: + """All functions used for Generator, including functions of transform and loss. + + Args: + weight_gen (List[float]): Weights of L1 loss. + weight_gen_layer (List[float], optional): Weights of layers loss. Defaults to None. + """ + + def __init__( + self, weight_gen: List[float], weight_gen_layer: List[float] = None + ) -> None: + self.weight_gen = weight_gen + self.weight_gen_layer = weight_gen_layer + + def transform_in(self, _in): + ratio = 2 + input_dict = reshape_input(_in) + density_low = input_dict["density_low"] + density_low_inp = interpolate(density_low, ratio, "nearest") + return {"input_gen": density_low_inp} + + def loss_func_gen(self, output_dict: Dict, *args) -> paddle.Tensor: + """Calculate loss of generator when use spatial discriminator. + The loss consists of l1 loss, l2 loss and layer loss when use spatial discriminator. + Notice that all item of loss is optional because weight of them might be 0. + + Args: + output_dict (Dict): output dict of model. + + Returns: + paddle.Tensor: Loss of generator. + """ + # l1 loss + loss_l1 = F.l1_loss( + output_dict["output_gen"], output_dict["density_high"], "mean" + ) + losses = loss_l1 * self.weight_gen[0] + + # l2 loss + loss_l2 = F.mse_loss( + output_dict["output_gen"], output_dict["density_high"], "mean" + ) + losses += loss_l2 * self.weight_gen[1] + + if self.weight_gen_layer is not None: + # disc(generator_out) loss + out_disc_from_gen = output_dict["out_disc_from_gen"][-1] + label_ones = paddle.ones_like(out_disc_from_gen) + loss_gen = F.binary_cross_entropy_with_logits( + out_disc_from_gen, label_ones, reduction="mean" + ) + losses += loss_gen + + # layer loss + key_list = list(output_dict.keys()) + # ["out0_layer0","out0_layer1","out0_layer2","out0_layer3","out_disc_from_target", + # "out1_layer0","out1_layer1","out1_layer2","out1_layer3","out_disc_from_gen"] + loss_layer = 0 + for i in range(1, len(self.weight_gen_layer)): + # i = 0,1,2,3 + loss_layer += ( + self.weight_gen_layer[i] + * F.mse_loss( + output_dict[key_list[i]], + output_dict[key_list[5 + i]], + reduction="sum", + ) + / 2 + ) + losses += loss_layer * self.weight_gen_layer[0] + + return {"output_gen": losses} + + def loss_func_gen_tempo(self, output_dict: Dict, *args) -> paddle.Tensor: + """Calculate loss of generator when use temporal discriminator. + The loss is cross entropy loss when use temporal discriminator. + + Args: + output_dict (Dict): output dict of model. + + Returns: + paddle.Tensor: Loss of generator. + """ + out_disc_tempo_from_gen = output_dict["out_disc_tempo_from_gen"][-1] + label_t_ones = paddle.ones_like(out_disc_tempo_from_gen) + + loss_gen_t = F.binary_cross_entropy_with_logits( + out_disc_tempo_from_gen, label_t_ones, reduction="mean" + ) + losses = loss_gen_t * self.weight_gen[2] + return {"out_disc_tempo_from_gen": losses} + + +class DiscFuncs: + """All functions used for Discriminator and temporally Discriminator, including functions of transform and loss. + + Args: + weight_disc (float): Weight of loss generated by the discriminator to judge the true target. + """ + + def __init__(self, weight_disc: float) -> None: + self.weight_disc = weight_disc + self.model_gen = None + + def transform_in(self, _in): + ratio = 2 + input_dict = reshape_input(_in) + density_low = input_dict["density_low"] + density_high_from_target = input_dict["density_high"] + + density_low_inp = interpolate(density_low, ratio, "nearest") + + density_high_from_gen = self.model_gen(input_dict)["output_gen"] + density_high_from_gen.stop_gradient = True + + density_input_from_target = paddle.concat( + [density_low_inp, density_high_from_target], axis=1 + ) + density_input_from_gen = paddle.concat( + [density_low_inp, density_high_from_gen], axis=1 + ) + return { + "input_disc_from_target": density_input_from_target, + "input_disc_from_gen": density_input_from_gen, + } + + def transform_in_tempo(self, _in): + density_high_from_target = _in["density_high"] + + input_dict = reshape_input(_in) + density_high_from_gen = self.model_gen(input_dict)["output_gen"] + density_high_from_gen.stop_gradient = True + + input_trans = { + "input_tempo_disc_from_target": density_high_from_target, + "input_tempo_disc_from_gen": density_high_from_gen, + } + + return dereshape_input(input_trans, 3) + + def loss_func(self, output_dict, *args): + out_disc_from_target = output_dict["out_disc_from_target"] + out_disc_from_gen = output_dict["out_disc_from_gen"] + + label_ones = paddle.ones_like(out_disc_from_target) + label_zeros = paddle.zeros_like(out_disc_from_gen) + + loss_disc_from_target = F.binary_cross_entropy_with_logits( + out_disc_from_target, label_ones, reduction="mean" + ) + loss_disc_from_gen = F.binary_cross_entropy_with_logits( + out_disc_from_gen, label_zeros, reduction="mean" + ) + losses = loss_disc_from_target * self.weight_disc + loss_disc_from_gen + return {"CE_loss": losses} + + def loss_func_tempo(self, output_dict, *args): + out_disc_tempo_from_target = output_dict["out_disc_tempo_from_target"] + out_disc_tempo_from_gen = output_dict["out_disc_tempo_from_gen"] + + label_ones = paddle.ones_like(out_disc_tempo_from_target) + label_zeros = paddle.zeros_like(out_disc_tempo_from_gen) + + loss_disc_tempo_from_target = F.binary_cross_entropy_with_logits( + out_disc_tempo_from_target, label_ones, reduction="mean" + ) + loss_disc_tempo_from_gen = F.binary_cross_entropy_with_logits( + out_disc_tempo_from_gen, label_zeros, reduction="mean" + ) + losses = ( + loss_disc_tempo_from_target * self.weight_disc + loss_disc_tempo_from_gen + ) + return {"CE_tempo_loss": losses} + + +class DataFuncs: + """All functions used for data transform. + + Args: + tile_ratio (int, optional): How many tiles of one dim. Defaults to 1. + density_min (float, optional): Minimize density of one tile. Defaults to 0.02. + max_turn (int, optional): Maximize turn of taking a tile from one image. Defaults to 20. + """ + + def __init__( + self, tile_ratio: int = 1, density_min: float = 0.02, max_turn: int = 20 + ) -> None: + self.tile_ratio = tile_ratio + self.density_min = density_min + self.max_turn = max_turn + + def transform( + self, + input_item: Dict[str, np.ndarray], + label_item: Dict[str, np.ndarray], + weight_item: Dict[str, np.ndarray], + ) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: + if self.tile_ratio == 1: + return input_item, label_item, weight_item + for _ in range(self.max_turn): + rand_ratio = np.random.rand() + density_low = self.cut_data(input_item["density_low"], rand_ratio) + density_high = self.cut_data(input_item["density_high"], rand_ratio) + if self.is_valid_tile(density_low): + break + + input_item["density_low"] = density_low + input_item["density_high"] = density_high + return input_item, label_item, weight_item + + def cut_data(self, data: np.ndarray, rand_ratio: float) -> paddle.Tensor: + # data: C,H,W + _, H, W = data.shape + if H % self.tile_ratio != 0 or W % self.tile_ratio != 0: + exit( + f"ERROR: input images cannot be divided into {self.tile_ratio} parts evenly!" + ) + tile_shape = [H // self.tile_ratio, W // self.tile_ratio] + rand_shape = np.floor(rand_ratio * (np.array([H, W]) - np.array(tile_shape))) + start = [int(rand_shape[0]), int(rand_shape[1])] + end = [int(rand_shape[0] + tile_shape[0]), int(rand_shape[1] + tile_shape[1])] + data = paddle.slice( + paddle.to_tensor(data), axes=[-2, -1], starts=start, ends=end + ) + + return data + + def is_valid_tile(self, tile: paddle.Tensor): + img_density = tile[0].sum() + return img_density >= ( + self.density_min * tile.shape[0] * tile.shape[1] * tile.shape[2] + ) diff --git a/examples/tempoGAN/tempoGAN.py b/examples/tempoGAN/tempoGAN.py index 99db835406..e25c78dd31 100644 --- a/examples/tempoGAN/tempoGAN.py +++ b/examples/tempoGAN/tempoGAN.py @@ -1,483 +1,483 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from os import path as osp - -import functions as func_module -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig - -import ppsci -from ppsci.utils import checker -from ppsci.utils import logger -from ppsci.utils import save_load - -if not checker.dynamic_import_to_globals("hdf5storage"): - raise ImportError( - "Could not import hdf5storage python package. " - "Please install it with `pip install hdf5storage`." - ) -import hdf5storage - - -def train(cfg: DictConfig): - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") - - gen_funcs = func_module.GenFuncs( - cfg.WEIGHT_GEN, (cfg.WEIGHT_GEN_LAYER if cfg.USE_SPATIALDISC else None) - ) - disc_funcs = func_module.DiscFuncs(cfg.WEIGHT_DISC) - data_funcs = func_module.DataFuncs(cfg.TILE_RATIO) - - # load dataset - logger.message( - "Attention! Start loading datasets, this will take tens of seconds to several minutes, please wait patiently." - ) - dataset_train = hdf5storage.loadmat(cfg.DATASET_PATH) - logger.message("Finish loading training dataset.") - dataset_valid = hdf5storage.loadmat(cfg.DATASET_PATH_VALID) - logger.message("Finish loading validation dataset.") - - # define Generator model - model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) - model_gen.register_input_transform(gen_funcs.transform_in) - disc_funcs.model_gen = model_gen - - model_tuple = (model_gen,) - # define Discriminators - if cfg.USE_SPATIALDISC: - model_disc = ppsci.arch.Discriminator(**cfg.MODEL.disc_net) - model_disc.register_input_transform(disc_funcs.transform_in) - model_tuple += (model_disc,) - - # define temporal Discriminators - if cfg.USE_TEMPODISC: - model_disc_tempo = ppsci.arch.Discriminator(**cfg.MODEL.tempo_net) - model_disc_tempo.register_input_transform(disc_funcs.transform_in_tempo) - model_tuple += (model_disc_tempo,) - - # define model_list - model_list = ppsci.arch.ModelList(model_tuple) - - # initialize Adam optimizer - lr_scheduler_gen = ppsci.optimizer.lr_scheduler.Step( - step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler - )() - optimizer_gen = ppsci.optimizer.Adam(lr_scheduler_gen)(model_gen) - if cfg.USE_SPATIALDISC: - lr_scheduler_disc = ppsci.optimizer.lr_scheduler.Step( - step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler - )() - optimizer_disc = ppsci.optimizer.Adam(lr_scheduler_disc)(model_disc) - if cfg.USE_TEMPODISC: - lr_scheduler_disc_tempo = ppsci.optimizer.lr_scheduler.Step( - step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler - )() - optimizer_disc_tempo = ppsci.optimizer.Adam(lr_scheduler_disc_tempo)( - (model_disc_tempo,) - ) - - # Generator - # manually build constraint(s) - sup_constraint_gen = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "density_low": dataset_train["density_low"], - "density_high": dataset_train["density_high"], - }, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": data_funcs.transform, - }, - }, - ), - }, - "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen), - { - "output_gen": lambda out: out["output_gen"], - "density_high": lambda out: out["density_high"], - }, - name="sup_constraint_gen", - ) - constraint_gen = {sup_constraint_gen.name: sup_constraint_gen} - if cfg.USE_TEMPODISC: - sup_constraint_gen_tempo = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "density_low": dataset_train["density_low_tempo"], - "density_high": dataset_train["density_high_tempo"], - }, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": data_funcs.transform, - }, - }, - ), - }, - "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen_tempo), - { - "output_gen": lambda out: out["output_gen"], - "density_high": lambda out: out["density_high"], - }, - name="sup_constraint_gen_tempo", - ) - constraint_gen[sup_constraint_gen_tempo.name] = sup_constraint_gen_tempo - - # Discriminators - # manually build constraint(s) - if cfg.USE_SPATIALDISC: - sup_constraint_disc = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "density_low": dataset_train["density_low"], - "density_high": dataset_train["density_high"], - }, - "label": { - "out_disc_from_target": np.ones( - (np.shape(dataset_train["density_high"])[0], 1), - dtype=paddle.get_default_dtype(), - ), - "out_disc_from_gen": np.ones( - (np.shape(dataset_train["density_high"])[0], 1), - dtype=paddle.get_default_dtype(), - ), - }, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": data_funcs.transform, - }, - }, - ), - }, - "batch_size": cfg.TRAIN.batch_size.sup_constraint, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.FunctionalLoss(disc_funcs.loss_func), - name="sup_constraint_disc", - ) - constraint_disc = {sup_constraint_disc.name: sup_constraint_disc} - - # temporal Discriminators - # manually build constraint(s) - if cfg.USE_TEMPODISC: - sup_constraint_disc_tempo = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "density_low": dataset_train["density_low_tempo"], - "density_high": dataset_train["density_high_tempo"], - }, - "label": { - "out_disc_tempo_from_target": np.ones( - (np.shape(dataset_train["density_high_tempo"])[0], 1), - dtype=paddle.get_default_dtype(), - ), - "out_disc_tempo_from_gen": np.ones( - (np.shape(dataset_train["density_high_tempo"])[0], 1), - dtype=paddle.get_default_dtype(), - ), - }, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": data_funcs.transform, - }, - }, - ), - }, - "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - }, - ppsci.loss.FunctionalLoss(disc_funcs.loss_func_tempo), - name="sup_constraint_disc_tempo", - ) - constraint_disc_tempo = { - sup_constraint_disc_tempo.name: sup_constraint_disc_tempo - } - - # initialize solver - solver_gen = ppsci.solver.Solver( - model_list, - constraint_gen, - cfg.output_dir, - optimizer_gen, - lr_scheduler_gen, - cfg.TRAIN.epochs_gen, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, - ) - if cfg.USE_SPATIALDISC: - solver_disc = ppsci.solver.Solver( - model_list, - constraint_disc, - cfg.output_dir, - optimizer_disc, - lr_scheduler_disc, - cfg.TRAIN.epochs_disc, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, - ) - if cfg.USE_TEMPODISC: - solver_disc_tempo = ppsci.solver.Solver( - model_list, - constraint_disc_tempo, - cfg.output_dir, - optimizer_disc_tempo, - lr_scheduler_disc_tempo, - cfg.TRAIN.epochs_disc_tempo, - cfg.TRAIN.iters_per_epoch, - eval_during_train=cfg.TRAIN.eval_during_train, - use_amp=cfg.USE_AMP, - amp_level=cfg.TRAIN.amp_level, - ) - - PRED_INTERVAL = 200 - for i in range(1, cfg.TRAIN.epochs + 1): - logger.message(f"\nEpoch: {i}\n") - # plotting during training - if i == 1 or i % PRED_INTERVAL == 0 or i == cfg.TRAIN.epochs: - func_module.predict_and_save_plot( - cfg.output_dir, i, solver_gen, dataset_valid, cfg.TILE_RATIO - ) - - disc_funcs.model_gen = model_gen - # train disc, input: (x,y,G(x)) - if cfg.USE_SPATIALDISC: - solver_disc.train() - - # train disc tempo, input: (y_3,G(x)_3) - if cfg.USE_TEMPODISC: - solver_disc_tempo.train() - - # train gen, input: (x,) - solver_gen.train() - - ############### evaluation for training ############### - img_target = ( - func_module.get_image_array( - os.path.join(cfg.output_dir, "predict", "target.png") - ) - / 255.0 - ) - img_pred = ( - func_module.get_image_array( - os.path.join( - cfg.output_dir, "predict", f"pred_epoch_{cfg.TRAIN.epochs}.png" - ) - ) - / 255.0 - ) - eval_mse, eval_psnr, eval_ssim = func_module.evaluate_img(img_target, img_pred) - logger.message(f"MSE: {eval_mse}, PSNR: {eval_psnr}, SSIM: {eval_ssim}") - - -def evaluate(cfg: DictConfig): - if cfg.EVAL.save_outs: - from matplotlib import image as Img - - os.makedirs(osp.join(cfg.output_dir, "eval_outs"), exist_ok=True) - - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") - - gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) - - # load dataset - dataset_valid = hdf5storage.loadmat(cfg.DATASET_PATH_VALID) - - # define Generator model - model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) - model_gen.register_input_transform(gen_funcs.transform_in) - - # define model_list - model_list = ppsci.arch.ModelList((model_gen,)) - - # load pretrained model - save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "density_low": dataset_valid["density_low"], - }, - "label": {"density_high": dataset_valid["density_high"]}, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": False, - }, - "batch_size": 1, - } - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.MSELoss("mean"), - {"density_high": lambda out: out["output_gen"]}, - metric={"metric": ppsci.metric.L2Rel()}, - name="sup_validator_gen", - ) - - # customized evalution - def scale(data): - smax = np.max(data) - smin = np.min(data) - return (data - smin) / (smax - smin) - - eval_mse_list = [] - eval_psnr_list = [] - eval_ssim_list = [] - for i, (input, label, _) in enumerate(sup_validator.data_loader): - output_dict = model_list({"density_low": input["density_low"]}) - output_arr = scale(np.squeeze(output_dict["output_gen"].numpy())) - target_arr = scale(np.squeeze(label["density_high"].numpy())) - - eval_mse, eval_psnr, eval_ssim = func_module.evaluate_img( - target_arr, output_arr - ) - eval_mse_list.append(eval_mse) - eval_psnr_list.append(eval_psnr) - eval_ssim_list.append(eval_ssim) - - if cfg.EVAL.save_outs: - Img.imsave( - osp.join(cfg.output_dir, "eval_outs", f"out_{i}.png"), - output_arr, - vmin=0.0, - vmax=1.0, - cmap="gray", - ) - logger.message( - f"MSE: {np.mean(eval_mse_list)}, PSNR: {np.mean(eval_psnr_list)}, SSIM: {np.mean(eval_ssim_list)}" - ) - - -def export(cfg: DictConfig): - from paddle.static import InputSpec - - # set models - gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) - model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) - model_gen.register_input_transform(gen_funcs.transform_in) - - # define model_list - model_list = ppsci.arch.ModelList((model_gen,)) - - # load pretrained model - solver = ppsci.solver.Solver( - model=model_list, pretrained_model_path=cfg.INFER.pretrained_model_path - ) - - # export models - input_spec = [ - {"density_low": InputSpec([None, 1, 128, 128], "float32", name="density_low")}, - ] - solver.export(input_spec, cfg.INFER.export_path, skip_prune_program=True) - - -def inference(cfg: DictConfig): - from matplotlib import image as Img - - from deploy.python_infer import pinn_predictor - - # set model predictor - predictor = pinn_predictor.PINNPredictor(cfg) - - # load dataset - dataset_infer = { - "density_low": hdf5storage.loadmat(cfg.DATASET_PATH_VALID)["density_low"] - } - - output_dict = predictor.predict(dataset_infer, cfg.INFER.batch_size) - - # mapping data to cfg.INFER.output_keys - output = [output_dict[key] for key in output_dict] - - def scale(data): - smax = np.max(data) - smin = np.min(data) - return (data - smin) / (smax - smin) - - for i, img in enumerate(output[0]): - img = scale(np.squeeze(img)) - Img.imsave( - osp.join(cfg.output_dir, f"out_{i}.png"), - img, - vmin=0.0, - vmax=1.0, - cmap="gray", - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="tempogan.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os import path as osp + +import functions as func_module +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig + +import ppsci +from ppsci.utils import checker +from ppsci.utils import logger +from ppsci.utils import save_load + +if not checker.dynamic_import_to_globals("hdf5storage"): + raise ImportError( + "Could not import hdf5storage python package. " + "Please install it with `pip install hdf5storage`." + ) +import hdf5storage + + +def train(cfg: DictConfig): + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "train.log"), "info") + + gen_funcs = func_module.GenFuncs( + cfg.WEIGHT_GEN, (cfg.WEIGHT_GEN_LAYER if cfg.USE_SPATIALDISC else None) + ) + disc_funcs = func_module.DiscFuncs(cfg.WEIGHT_DISC) + data_funcs = func_module.DataFuncs(cfg.TILE_RATIO) + + # load dataset + logger.message( + "Attention! Start loading datasets, this will take tens of seconds to several minutes, please wait patiently." + ) + dataset_train = hdf5storage.loadmat(cfg.DATASET_PATH) + logger.message("Finish loading training dataset.") + dataset_valid = hdf5storage.loadmat(cfg.DATASET_PATH_VALID) + logger.message("Finish loading validation dataset.") + + # define Generator model + model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) + model_gen.register_input_transform(gen_funcs.transform_in) + disc_funcs.model_gen = model_gen + + model_tuple = (model_gen,) + # define Discriminators + if cfg.USE_SPATIALDISC: + model_disc = ppsci.arch.Discriminator(**cfg.MODEL.disc_net) + model_disc.register_input_transform(disc_funcs.transform_in) + model_tuple += (model_disc,) + + # define temporal Discriminators + if cfg.USE_TEMPODISC: + model_disc_tempo = ppsci.arch.Discriminator(**cfg.MODEL.tempo_net) + model_disc_tempo.register_input_transform(disc_funcs.transform_in_tempo) + model_tuple += (model_disc_tempo,) + + # define model_list + model_list = ppsci.arch.ModelList(model_tuple) + + # initialize Adam optimizer + lr_scheduler_gen = ppsci.optimizer.lr_scheduler.Step( + step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler + )() + optimizer_gen = ppsci.optimizer.Adam(lr_scheduler_gen)(model_gen) + if cfg.USE_SPATIALDISC: + lr_scheduler_disc = ppsci.optimizer.lr_scheduler.Step( + step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler + )() + optimizer_disc = ppsci.optimizer.Adam(lr_scheduler_disc)(model_disc) + if cfg.USE_TEMPODISC: + lr_scheduler_disc_tempo = ppsci.optimizer.lr_scheduler.Step( + step_size=cfg.TRAIN.epochs // 2, **cfg.TRAIN.lr_scheduler + )() + optimizer_disc_tempo = ppsci.optimizer.Adam(lr_scheduler_disc_tempo)( + (model_disc_tempo,) + ) + + # Generator + # manually build constraint(s) + sup_constraint_gen = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "density_low": dataset_train["density_low"], + "density_high": dataset_train["density_high"], + }, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": data_funcs.transform, + }, + }, + ), + }, + "batch_size": cfg.TRAIN.batch_size.sup_constraint, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen), + { + "output_gen": lambda out: out["output_gen"], + "density_high": lambda out: out["density_high"], + }, + name="sup_constraint_gen", + ) + constraint_gen = {sup_constraint_gen.name: sup_constraint_gen} + if cfg.USE_TEMPODISC: + sup_constraint_gen_tempo = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "density_low": dataset_train["density_low_tempo"], + "density_high": dataset_train["density_high_tempo"], + }, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": data_funcs.transform, + }, + }, + ), + }, + "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.FunctionalLoss(gen_funcs.loss_func_gen_tempo), + { + "output_gen": lambda out: out["output_gen"], + "density_high": lambda out: out["density_high"], + }, + name="sup_constraint_gen_tempo", + ) + constraint_gen[sup_constraint_gen_tempo.name] = sup_constraint_gen_tempo + + # Discriminators + # manually build constraint(s) + if cfg.USE_SPATIALDISC: + sup_constraint_disc = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "density_low": dataset_train["density_low"], + "density_high": dataset_train["density_high"], + }, + "label": { + "out_disc_from_target": np.ones( + (np.shape(dataset_train["density_high"])[0], 1), + dtype=paddle.get_default_dtype(), + ), + "out_disc_from_gen": np.ones( + (np.shape(dataset_train["density_high"])[0], 1), + dtype=paddle.get_default_dtype(), + ), + }, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": data_funcs.transform, + }, + }, + ), + }, + "batch_size": cfg.TRAIN.batch_size.sup_constraint, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.FunctionalLoss(disc_funcs.loss_func), + name="sup_constraint_disc", + ) + constraint_disc = {sup_constraint_disc.name: sup_constraint_disc} + + # temporal Discriminators + # manually build constraint(s) + if cfg.USE_TEMPODISC: + sup_constraint_disc_tempo = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "density_low": dataset_train["density_low_tempo"], + "density_high": dataset_train["density_high_tempo"], + }, + "label": { + "out_disc_tempo_from_target": np.ones( + (np.shape(dataset_train["density_high_tempo"])[0], 1), + dtype=paddle.get_default_dtype(), + ), + "out_disc_tempo_from_gen": np.ones( + (np.shape(dataset_train["density_high_tempo"])[0], 1), + dtype=paddle.get_default_dtype(), + ), + }, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": data_funcs.transform, + }, + }, + ), + }, + "batch_size": int(cfg.TRAIN.batch_size.sup_constraint // 3), + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + }, + ppsci.loss.FunctionalLoss(disc_funcs.loss_func_tempo), + name="sup_constraint_disc_tempo", + ) + constraint_disc_tempo = { + sup_constraint_disc_tempo.name: sup_constraint_disc_tempo + } + + # initialize solver + solver_gen = ppsci.solver.Solver( + model_list, + constraint_gen, + cfg.output_dir, + optimizer_gen, + lr_scheduler_gen, + cfg.TRAIN.epochs_gen, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + use_amp=cfg.USE_AMP, + amp_level=cfg.TRAIN.amp_level, + ) + if cfg.USE_SPATIALDISC: + solver_disc = ppsci.solver.Solver( + model_list, + constraint_disc, + cfg.output_dir, + optimizer_disc, + lr_scheduler_disc, + cfg.TRAIN.epochs_disc, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + use_amp=cfg.USE_AMP, + amp_level=cfg.TRAIN.amp_level, + ) + if cfg.USE_TEMPODISC: + solver_disc_tempo = ppsci.solver.Solver( + model_list, + constraint_disc_tempo, + cfg.output_dir, + optimizer_disc_tempo, + lr_scheduler_disc_tempo, + cfg.TRAIN.epochs_disc_tempo, + cfg.TRAIN.iters_per_epoch, + eval_during_train=cfg.TRAIN.eval_during_train, + use_amp=cfg.USE_AMP, + amp_level=cfg.TRAIN.amp_level, + ) + + PRED_INTERVAL = 200 + for i in range(1, cfg.TRAIN.epochs + 1): + logger.message(f"\nEpoch: {i}\n") + # plotting during training + if i == 1 or i % PRED_INTERVAL == 0 or i == cfg.TRAIN.epochs: + func_module.predict_and_save_plot( + cfg.output_dir, i, solver_gen, dataset_valid, cfg.TILE_RATIO + ) + + disc_funcs.model_gen = model_gen + # train disc, input: (x,y,G(x)) + if cfg.USE_SPATIALDISC: + solver_disc.train() + + # train disc tempo, input: (y_3,G(x)_3) + if cfg.USE_TEMPODISC: + solver_disc_tempo.train() + + # train gen, input: (x,) + solver_gen.train() + + ############### evaluation for training ############### + img_target = ( + func_module.get_image_array( + os.path.join(cfg.output_dir, "predict", "target.png") + ) + / 255.0 + ) + img_pred = ( + func_module.get_image_array( + os.path.join( + cfg.output_dir, "predict", f"pred_epoch_{cfg.TRAIN.epochs}.png" + ) + ) + / 255.0 + ) + eval_mse, eval_psnr, eval_ssim = func_module.evaluate_img(img_target, img_pred) + logger.message(f"MSE: {eval_mse}, PSNR: {eval_psnr}, SSIM: {eval_ssim}") + + +def evaluate(cfg: DictConfig): + if cfg.EVAL.save_outs: + from matplotlib import image as Img + + os.makedirs(osp.join(cfg.output_dir, "eval_outs"), exist_ok=True) + + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, "eval.log"), "info") + + gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) + + # load dataset + dataset_valid = hdf5storage.loadmat(cfg.DATASET_PATH_VALID) + + # define Generator model + model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) + model_gen.register_input_transform(gen_funcs.transform_in) + + # define model_list + model_list = ppsci.arch.ModelList((model_gen,)) + + # load pretrained model + save_load.load_pretrain(model_list, cfg.EVAL.pretrained_model_path) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "density_low": dataset_valid["density_low"], + }, + "label": {"density_high": dataset_valid["density_high"]}, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": False, + }, + "batch_size": 1, + } + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.MSELoss("mean"), + {"density_high": lambda out: out["output_gen"]}, + metric={"metric": ppsci.metric.L2Rel()}, + name="sup_validator_gen", + ) + + # customized evalution + def scale(data): + smax = np.max(data) + smin = np.min(data) + return (data - smin) / (smax - smin) + + eval_mse_list = [] + eval_psnr_list = [] + eval_ssim_list = [] + for i, (input, label, _) in enumerate(sup_validator.data_loader): + output_dict = model_list({"density_low": input["density_low"]}) + output_arr = scale(np.squeeze(output_dict["output_gen"].numpy())) + target_arr = scale(np.squeeze(label["density_high"].numpy())) + + eval_mse, eval_psnr, eval_ssim = func_module.evaluate_img( + target_arr, output_arr + ) + eval_mse_list.append(eval_mse) + eval_psnr_list.append(eval_psnr) + eval_ssim_list.append(eval_ssim) + + if cfg.EVAL.save_outs: + Img.imsave( + osp.join(cfg.output_dir, "eval_outs", f"out_{i}.png"), + output_arr, + vmin=0.0, + vmax=1.0, + cmap="gray", + ) + logger.message( + f"MSE: {np.mean(eval_mse_list)}, PSNR: {np.mean(eval_psnr_list)}, SSIM: {np.mean(eval_ssim_list)}" + ) + + +def export(cfg: DictConfig): + from paddle.static import InputSpec + + # set models + gen_funcs = func_module.GenFuncs(cfg.WEIGHT_GEN, None) + model_gen = ppsci.arch.Generator(**cfg.MODEL.gen_net) + model_gen.register_input_transform(gen_funcs.transform_in) + + # define model_list + model_list = ppsci.arch.ModelList((model_gen,)) + + # load pretrained model + solver = ppsci.solver.Solver( + model=model_list, pretrained_model_path=cfg.INFER.pretrained_model_path + ) + + # export models + input_spec = [ + {"density_low": InputSpec([None, 1, 128, 128], "float32", name="density_low")}, + ] + solver.export(input_spec, cfg.INFER.export_path, skip_prune_program=True) + + +def inference(cfg: DictConfig): + from matplotlib import image as Img + + from deploy.python_infer import pinn_predictor + + # set model predictor + predictor = pinn_predictor.PINNPredictor(cfg) + + # load dataset + dataset_infer = { + "density_low": hdf5storage.loadmat(cfg.DATASET_PATH_VALID)["density_low"] + } + + output_dict = predictor.predict(dataset_infer, cfg.INFER.batch_size) + + # mapping data to cfg.INFER.output_keys + output = [output_dict[key] for key in output_dict] + + def scale(data): + smax = np.max(data) + smin = np.min(data) + return (data - smin) / (smax - smin) + + for i, img in enumerate(output[0]): + img = scale(np.squeeze(img)) + Img.imsave( + osp.join(cfg.output_dir, f"out_{i}.png"), + img, + vmin=0.0, + vmax=1.0, + cmap="gray", + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="tempogan.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/tgcn/conf/run.yaml b/examples/tgcn/conf/run.yaml index 32b5c5f05b..128e79b89c 100644 --- a/examples/tgcn/conf/run.yaml +++ b/examples/tgcn/conf/run.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -68,3 +69,73 @@ TRAIN: EVAL: pretrained_model_path: null batch_size: ${batch_size} +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_tgcn/${now:%Y-%m-%d}/${now:%H-%M-%S} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +device: gpu +mode: train +output_dir: ${hydra:run.dir} +log_freq: 100 + +# task settings +data_name: PEMSD8 +data_path: ./Data/${data_name} +input_len: 12 +label_len: 12 +norm_input: True +norm_label: False +reduce: mean + +# model settings +MODEL: + input_keys: ['input'] + label_keys: ['label'] + +seed: 3407 +batch_size: 64 + +input_dim: 1 +output_dim: 1 +emb_dim: 32 +hidden: 64 +gc_layer: 2 +tc_layer: 2 +tc_kernel_size: 3 +dropout: 0.25 +leakyrelu_alpha: 0.1 + +# training settings +TRAIN: + epochs: 200 + learning_rate: 0.01 + pretrained_model_path: null + batch_size: ${batch_size} + +# evaluation settings +EVAL: + pretrained_model_path: null + batch_size: ${batch_size} +>>>>>>> Stashed changes diff --git a/examples/tgcn/run.py b/examples/tgcn/run.py index ab2d6c7e3d..a78898141a 100644 --- a/examples/tgcn/run.py +++ b/examples/tgcn/run.py @@ -1,182 +1,182 @@ -import hydra -from omegaconf import DictConfig - -import ppsci -from ppsci.arch.tgcn import TGCN -from ppsci.data.dataset.pems_dataset import get_edge_index - - -def train(cfg: DictConfig): - # set train dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "PEMSDataset", - "file_path": cfg.data_path, - "split": "train", - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.label_keys, - "norm_input": cfg.norm_input, - "norm_label": cfg.norm_label, - "input_len": cfg.input_len, - "label_len": cfg.label_len, - }, - "sampler": { - "name": "BatchSampler", - "drop_last": True, - "shuffle": True, - }, - "batch_size": cfg.TRAIN.batch_size, - } - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, ppsci.loss.L1Loss(), name="train" - ) - constraint = {sup_constraint.name: sup_constraint} - - # set eval dataloader config - eval_dataloader_cfg = { - "dataset": { - "name": "PEMSDataset", - "file_path": cfg.data_path, - "split": "val", - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.label_keys, - "norm_input": cfg.norm_input, - "norm_label": cfg.norm_label, - "input_len": cfg.input_len, - "label_len": cfg.label_len, - }, - "sampler": { - "name": "BatchSampler", - }, - "batch_size": cfg.EVAL.batch_size, - } - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - ppsci.loss.L1Loss(), - metric={"MAE": ppsci.metric.MAE(), "RMSE": ppsci.metric.RMSE()}, - name="val", - ) - validator = {sup_validator.name: sup_validator} - - # get adj - _, _, adj = get_edge_index(cfg.data_path, reduce=cfg.reduce) - # set model - model = TGCN( - input_keys=cfg.MODEL.input_keys, - output_keys=cfg.MODEL.label_keys, - adj=adj, - in_dim=cfg.input_dim, - emb_dim=cfg.emb_dim, - hidden=cfg.hidden, - gc_layer=cfg.gc_layer, - tc_layer=cfg.tc_layer, - k_s=cfg.tc_kernel_size, - dropout=cfg.dropout, - alpha=cfg.leakyrelu_alpha, - input_len=cfg.input_len, - label_len=cfg.label_len, - ) - # init optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) - # set iters_per_epoch by dataloader length - iters_per_epoch = len(sup_constraint.data_loader) - - # initialize solver - solver = ppsci.solver.Solver( - model=model, - constraint=constraint, - output_dir=cfg.output_dir, - optimizer=optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=iters_per_epoch, - log_freq=cfg.log_freq, - eval_during_train=True, - device=cfg.device, - validator=validator, - pretrained_model_path=cfg.TRAIN.pretrained_model_path, - eval_with_no_grad=True, - ) - # train model - solver.train() - - -def eval(cfg: DictConfig): - # set eval dataloader config - test_dataloader_cfg = { - "dataset": { - "name": "PEMSDataset", - "file_path": cfg.data_path, - "split": "test", - "input_keys": cfg.MODEL.input_keys, - "label_keys": cfg.MODEL.label_keys, - "norm_input": cfg.norm_input, - "norm_label": cfg.norm_label, - "input_len": cfg.input_len, - "label_len": cfg.label_len, - }, - "sampler": { - "name": "BatchSampler", - }, - "batch_size": cfg.EVAL.batch_size, - } - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - test_dataloader_cfg, - ppsci.loss.L1Loss(), - metric={"MAE": ppsci.metric.MAE(), "RMSE": ppsci.metric.RMSE()}, - name="test", - ) - validator = {sup_validator.name: sup_validator} - - # get adj - _, _, adj = get_edge_index(cfg.data_path, reduce=cfg.reduce) - # set model - model = TGCN( - input_keys=cfg.MODEL.input_keys, - output_keys=cfg.MODEL.label_keys, - adj=adj, - in_dim=cfg.input_dim, - emb_dim=cfg.emb_dim, - hidden=cfg.hidden, - gc_layer=cfg.gc_layer, - tc_layer=cfg.tc_layer, - k_s=cfg.tc_kernel_size, - dropout=cfg.dropout, - alpha=cfg.leakyrelu_alpha, - input_len=cfg.input_len, - label_len=cfg.label_len, - ) - - # initialize solver - solver = ppsci.solver.Solver( - model=model, - output_dir=cfg.output_dir, - log_freq=cfg.log_freq, - device=cfg.device, - validator=validator, - pretrained_model_path=cfg.EVAL.pretrained_model_path, - eval_with_no_grad=True, - ) - # evaluate - solver.eval() - - -@hydra.main(version_base=None, config_path="./conf", config_name="run.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - eval(cfg) - else: - raise ValueError( - "cfg.mode should in [train, eval], but got {}".format(cfg.mode) - ) - - -if __name__ == "__main__": - main() +import hydra +from omegaconf import DictConfig + +import ppsci +from ppsci.arch.tgcn import TGCN +from ppsci.data.dataset.pems_dataset import get_edge_index + + +def train(cfg: DictConfig): + # set train dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "PEMSDataset", + "file_path": cfg.data_path, + "split": "train", + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.label_keys, + "norm_input": cfg.norm_input, + "norm_label": cfg.norm_label, + "input_len": cfg.input_len, + "label_len": cfg.label_len, + }, + "sampler": { + "name": "BatchSampler", + "drop_last": True, + "shuffle": True, + }, + "batch_size": cfg.TRAIN.batch_size, + } + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, ppsci.loss.L1Loss(), name="train" + ) + constraint = {sup_constraint.name: sup_constraint} + + # set eval dataloader config + eval_dataloader_cfg = { + "dataset": { + "name": "PEMSDataset", + "file_path": cfg.data_path, + "split": "val", + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.label_keys, + "norm_input": cfg.norm_input, + "norm_label": cfg.norm_label, + "input_len": cfg.input_len, + "label_len": cfg.label_len, + }, + "sampler": { + "name": "BatchSampler", + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + ppsci.loss.L1Loss(), + metric={"MAE": ppsci.metric.MAE(), "RMSE": ppsci.metric.RMSE()}, + name="val", + ) + validator = {sup_validator.name: sup_validator} + + # get adj + _, _, adj = get_edge_index(cfg.data_path, reduce=cfg.reduce) + # set model + model = TGCN( + input_keys=cfg.MODEL.input_keys, + output_keys=cfg.MODEL.label_keys, + adj=adj, + in_dim=cfg.input_dim, + emb_dim=cfg.emb_dim, + hidden=cfg.hidden, + gc_layer=cfg.gc_layer, + tc_layer=cfg.tc_layer, + k_s=cfg.tc_kernel_size, + dropout=cfg.dropout, + alpha=cfg.leakyrelu_alpha, + input_len=cfg.input_len, + label_len=cfg.label_len, + ) + # init optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=cfg.TRAIN.learning_rate)(model) + # set iters_per_epoch by dataloader length + iters_per_epoch = len(sup_constraint.data_loader) + + # initialize solver + solver = ppsci.solver.Solver( + model=model, + constraint=constraint, + output_dir=cfg.output_dir, + optimizer=optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=iters_per_epoch, + log_freq=cfg.log_freq, + eval_during_train=True, + device=cfg.device, + validator=validator, + pretrained_model_path=cfg.TRAIN.pretrained_model_path, + eval_with_no_grad=True, + ) + # train model + solver.train() + + +def eval(cfg: DictConfig): + # set eval dataloader config + test_dataloader_cfg = { + "dataset": { + "name": "PEMSDataset", + "file_path": cfg.data_path, + "split": "test", + "input_keys": cfg.MODEL.input_keys, + "label_keys": cfg.MODEL.label_keys, + "norm_input": cfg.norm_input, + "norm_label": cfg.norm_label, + "input_len": cfg.input_len, + "label_len": cfg.label_len, + }, + "sampler": { + "name": "BatchSampler", + }, + "batch_size": cfg.EVAL.batch_size, + } + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + test_dataloader_cfg, + ppsci.loss.L1Loss(), + metric={"MAE": ppsci.metric.MAE(), "RMSE": ppsci.metric.RMSE()}, + name="test", + ) + validator = {sup_validator.name: sup_validator} + + # get adj + _, _, adj = get_edge_index(cfg.data_path, reduce=cfg.reduce) + # set model + model = TGCN( + input_keys=cfg.MODEL.input_keys, + output_keys=cfg.MODEL.label_keys, + adj=adj, + in_dim=cfg.input_dim, + emb_dim=cfg.emb_dim, + hidden=cfg.hidden, + gc_layer=cfg.gc_layer, + tc_layer=cfg.tc_layer, + k_s=cfg.tc_kernel_size, + dropout=cfg.dropout, + alpha=cfg.leakyrelu_alpha, + input_len=cfg.input_len, + label_len=cfg.label_len, + ) + + # initialize solver + solver = ppsci.solver.Solver( + model=model, + output_dir=cfg.output_dir, + log_freq=cfg.log_freq, + device=cfg.device, + validator=validator, + pretrained_model_path=cfg.EVAL.pretrained_model_path, + eval_with_no_grad=True, + ) + # evaluate + solver.eval() + + +@hydra.main(version_base=None, config_path="./conf", config_name="run.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + eval(cfg) + else: + raise ValueError( + "cfg.mode should in [train, eval], but got {}".format(cfg.mode) + ) + + +if __name__ == "__main__": + main() diff --git a/examples/topopt/conf/topopt.yaml b/examples/topopt/conf/topopt.yaml index a6a1391617..4149ccc07f 100644 --- a/examples/topopt/conf/topopt.yaml +++ b/examples/topopt/conf/topopt.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -104,3 +105,95 @@ INFER: res_img_figsize: null save_res_path: ./inference/predicted_${INFER.pretrained_model_name} save_npy: false +======= +hydra: + run: + # dynamic output directory + dir: outputs_topopt/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.batch_size + - TRAIN.epochs + - TRAIN.learning_rate + - EVAL.pretrained_model_path_dict + - EVAL.batch_size + - EVAL.num_val_step + - INFER.pretrained_model_name + - INFER.pretrained_model_path_dict + - INFER.export_path + - INFER.batch_size + - mode + - vol_coeff + - log_freq + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 42 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# set default cases parameters +CASE_PARAM: [[Poisson, 5], [Poisson, 10], [Poisson, 30], [Uniform, null]] + +# set data path +DATA_PATH: ./datasets/top_dataset.h5 + +# model settings +MODEL: + in_channel: 2 + out_channel: 1 + kernel_size: 3 + filters: [16, 32, 64] + layers: 2 + +# other parameters +n_samples: 10000 +train_test_ratio: 1.0 # use 10000 original data with different channels for training +vol_coeff: 1 # coefficient for volume fraction constraint in the loss - beta in equation (3) in paper + +# training settings +TRAIN: + epochs: 30 + learning_rate: 0.001 + batch_size: 64 + eval_during_train: false + +# evaluation settings +EVAL: + pretrained_model_path_dict: null # a dict: {casename1:path1, casename2:path2, casename3:path3, casename4:path4} + num_val_step: 10 # the number of iteration for each evaluation case + batch_size: 16 + +# inference settings +INFER: + pretrained_model_name: null # a string, indicating which model you want to export. Support [Uniform, Poisson5, Poisson10, Poisson30]. + pretrained_model_path_dict: {'Uniform': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/uniform_pretrained.pdparams', 'Poisson5': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson5_pretrained.pdparams', 'Poisson10': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson10_pretrained.pdparams', 'Poisson30': 'https://paddle-org.bj.bcebos.com/paddlescience/models/topopt/poisson30_pretrained.pdparams'} + export_path: ./inference/topopt_${INFER.pretrained_model_name} + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: null + ir_optim: true + min_subgraph_size: 30 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 1024 + num_cpu_threads: 10 + batch_size: 4 + sampler_key: Fixed # a string, indicating the sampling method. Support [Fixed, Uniform, Poisson]. + sampler_num: 8 # a integer number, indicating the sampling rate of the sampling method, supported when `sampler_key` is Fixed or Poisson. + img_num: 4 + res_img_figsize: null + save_res_path: ./inference/predicted_${INFER.pretrained_model_name} + save_npy: false +>>>>>>> Stashed changes diff --git a/examples/topopt/functions.py b/examples/topopt/functions.py index d4157f085b..a0048db1fd 100644 --- a/examples/topopt/functions.py +++ b/examples/topopt/functions.py @@ -1,134 +1,134 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable -from typing import Dict -from typing import Tuple -from typing import Union - -import numpy as np - - -def uniform_sampler() -> Callable[[], int]: - """Generate uniform sampling function from 1 to 99 - - Returns: - sampler (Callable[[], int]): uniform sampling from 1 to 99 - """ - return lambda: np.random.randint(1, 99) - - -def poisson_sampler(lam: int) -> Callable[[], int]: - """Generate poisson sampling function with parameter lam with range 1 to 99 - - Args: - lam (int): poisson rate parameter - - Returns: - sampler (Callable[[], int]): poisson sampling function with parameter lam with range 1 to 99 - """ - - def func(): - iter_ = max(np.random.poisson(lam), 1) - iter_ = min(iter_, 99) - return iter_ - - return func - - -def generate_sampler(sampler_type: str = "Fixed", num: int = 0) -> Callable[[], int]: - """Generate sampler for the number of initial iteration steps - - Args: - sampler_type (str): "Poisson" for poisson sampler; "Uniform" for uniform sampler; "Fixed" for choosing a fixed number of initial iteration steps. - num (int): If `sampler_type` == "Poisson", `num` specifies the poisson rate parameter; If `sampler_type` == "Fixed", `num` specifies the fixed number of initial iteration steps. - - Returns: - sampler (Callable[[], int]): sampler for the number of initial iteration steps - """ - if sampler_type == "Poisson": - return poisson_sampler(num) - elif sampler_type == "Uniform": - return uniform_sampler() - else: - return lambda: num - - -def generate_train_test( - data_iters: np.ndarray, - data_targets: np.ndarray, - train_test_ratio: float, - n_sample: int, -) -> Union[ - Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] -]: - """Generate training and testing set - - Args: - data_iters (np.ndarray): data with 100 channels corresponding to the results of 100 steps of SIMP algorithm - data_targets (np.ndarray): final optimization solution given by SIMP algorithm - train_test_ratio (float): split ratio of training and testing sets, if `train_test_ratio` = 1 then only return training data - n_sample (int): number of total samples in training and testing sets to be sampled from the h5 dataset - - Returns: - Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]: if `train_test_ratio` = 1, return (train_inputs, train_labels), else return (train_inputs, train_labels, test_inputs, test_labels) - """ - n_obj = len(data_iters) - idx = np.arange(n_obj) - np.random.shuffle(idx) - train_idx = idx[: int(train_test_ratio * n_sample)] - if train_test_ratio == 1.0: - return data_iters[train_idx], data_targets[train_idx] - - test_idx = idx[int(train_test_ratio * n_sample) :] - train_iters = data_iters[train_idx] - train_targets = data_targets[train_idx] - test_iters = data_iters[test_idx] - test_targets = data_targets[test_idx] - return train_iters, train_targets, test_iters, test_targets - - -def augmentation( - input_dict: Dict[str, np.ndarray], - label_dict: Dict[str, np.ndarray], - weight_dict: Dict[str, np.ndarray] = None, -) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: - """Apply random transformation from D4 symmetry group - - Args: - input_dict (Dict[str, np.ndarray]): input dict of np.ndarray size `(batch_size, any, height, width)` - label_dict (Dict[str, np.ndarray]): label dict of np.ndarray size `(batch_size, 1, height, width)` - weight_dict (Dict[str, np.ndarray]): weight dict if any - """ - inputs = input_dict["input"] - labels = label_dict["output"] - assert len(inputs.shape) == 3 - assert len(labels.shape) == 3 - - # random horizontal flip - if np.random.random() > 0.5: - inputs = np.flip(inputs, axis=2) - labels = np.flip(labels, axis=2) - # random vertical flip - if np.random.random() > 0.5: - inputs = np.flip(inputs, axis=1) - labels = np.flip(labels, axis=1) - # random 90* rotation - if np.random.random() > 0.5: - new_perm = list(range(len(inputs.shape))) - new_perm[-2], new_perm[-1] = new_perm[-1], new_perm[-2] - inputs = np.transpose(inputs, new_perm) - labels = np.transpose(labels, new_perm) - - return {"input": inputs}, {"output": labels}, weight_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable +from typing import Dict +from typing import Tuple +from typing import Union + +import numpy as np + + +def uniform_sampler() -> Callable[[], int]: + """Generate uniform sampling function from 1 to 99 + + Returns: + sampler (Callable[[], int]): uniform sampling from 1 to 99 + """ + return lambda: np.random.randint(1, 99) + + +def poisson_sampler(lam: int) -> Callable[[], int]: + """Generate poisson sampling function with parameter lam with range 1 to 99 + + Args: + lam (int): poisson rate parameter + + Returns: + sampler (Callable[[], int]): poisson sampling function with parameter lam with range 1 to 99 + """ + + def func(): + iter_ = max(np.random.poisson(lam), 1) + iter_ = min(iter_, 99) + return iter_ + + return func + + +def generate_sampler(sampler_type: str = "Fixed", num: int = 0) -> Callable[[], int]: + """Generate sampler for the number of initial iteration steps + + Args: + sampler_type (str): "Poisson" for poisson sampler; "Uniform" for uniform sampler; "Fixed" for choosing a fixed number of initial iteration steps. + num (int): If `sampler_type` == "Poisson", `num` specifies the poisson rate parameter; If `sampler_type` == "Fixed", `num` specifies the fixed number of initial iteration steps. + + Returns: + sampler (Callable[[], int]): sampler for the number of initial iteration steps + """ + if sampler_type == "Poisson": + return poisson_sampler(num) + elif sampler_type == "Uniform": + return uniform_sampler() + else: + return lambda: num + + +def generate_train_test( + data_iters: np.ndarray, + data_targets: np.ndarray, + train_test_ratio: float, + n_sample: int, +) -> Union[ + Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray] +]: + """Generate training and testing set + + Args: + data_iters (np.ndarray): data with 100 channels corresponding to the results of 100 steps of SIMP algorithm + data_targets (np.ndarray): final optimization solution given by SIMP algorithm + train_test_ratio (float): split ratio of training and testing sets, if `train_test_ratio` = 1 then only return training data + n_sample (int): number of total samples in training and testing sets to be sampled from the h5 dataset + + Returns: + Union[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]]: if `train_test_ratio` = 1, return (train_inputs, train_labels), else return (train_inputs, train_labels, test_inputs, test_labels) + """ + n_obj = len(data_iters) + idx = np.arange(n_obj) + np.random.shuffle(idx) + train_idx = idx[: int(train_test_ratio * n_sample)] + if train_test_ratio == 1.0: + return data_iters[train_idx], data_targets[train_idx] + + test_idx = idx[int(train_test_ratio * n_sample) :] + train_iters = data_iters[train_idx] + train_targets = data_targets[train_idx] + test_iters = data_iters[test_idx] + test_targets = data_targets[test_idx] + return train_iters, train_targets, test_iters, test_targets + + +def augmentation( + input_dict: Dict[str, np.ndarray], + label_dict: Dict[str, np.ndarray], + weight_dict: Dict[str, np.ndarray] = None, +) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray]]: + """Apply random transformation from D4 symmetry group + + Args: + input_dict (Dict[str, np.ndarray]): input dict of np.ndarray size `(batch_size, any, height, width)` + label_dict (Dict[str, np.ndarray]): label dict of np.ndarray size `(batch_size, 1, height, width)` + weight_dict (Dict[str, np.ndarray]): weight dict if any + """ + inputs = input_dict["input"] + labels = label_dict["output"] + assert len(inputs.shape) == 3 + assert len(labels.shape) == 3 + + # random horizontal flip + if np.random.random() > 0.5: + inputs = np.flip(inputs, axis=2) + labels = np.flip(labels, axis=2) + # random vertical flip + if np.random.random() > 0.5: + inputs = np.flip(inputs, axis=1) + labels = np.flip(labels, axis=1) + # random 90* rotation + if np.random.random() > 0.5: + new_perm = list(range(len(inputs.shape))) + new_perm[-2], new_perm[-1] = new_perm[-1], new_perm[-2] + inputs = np.transpose(inputs, new_perm) + labels = np.transpose(labels, new_perm) + + return {"input": inputs}, {"output": labels}, weight_dict diff --git a/examples/topopt/topopt.py b/examples/topopt/topopt.py index 3e855510a6..19ee8ac925 100644 --- a/examples/topopt/topopt.py +++ b/examples/topopt/topopt.py @@ -1,449 +1,449 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp -from typing import Dict - -import functions as func_module -import h5py -import hydra -import numpy as np -import paddle -from omegaconf import DictConfig -from paddle import nn -from topoptmodel import TopOptNN - -import ppsci -from ppsci.utils import logger - - -def train(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # 4 training cases parameters - LEARNING_RATE = cfg.TRAIN.learning_rate / (1 + cfg.TRAIN.epochs // 15) - ITERS_PER_EPOCH = int(cfg.n_samples * cfg.train_test_ratio / cfg.TRAIN.batch_size) - - # read h5 data - h5data = h5py.File(cfg.DATA_PATH, "r") - data_iters = np.array(h5data["iters"]) - data_targets = np.array(h5data["targets"]) - - # generate training dataset - inputs_train, labels_train = func_module.generate_train_test( - data_iters, data_targets, cfg.train_test_ratio, cfg.n_samples - ) - - # set constraints - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": inputs_train}, - "label": {"output": labels_train}, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": func_module.augmentation, - }, - }, - ), - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - }, - ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), - name="sup_constraint", - ) - constraint = {sup_constraint.name: sup_constraint} - - # train models for 4 cases - for sampler_key, num in cfg.CASE_PARAM: - - # initialize SIMP iteration stop time sampler - SIMP_stop_point_sampler = func_module.generate_sampler(sampler_key, num) - - # initialize logger for training - sampler_name = sampler_key + str(num) if num else sampler_key - OUTPUT_DIR = osp.join( - cfg.output_dir, f"{sampler_name}_vol_coeff{cfg.vol_coeff}" - ) - logger.init_logger("ppsci", osp.join(OUTPUT_DIR, "train.log"), "info") - - # set model - model = TopOptNN(**cfg.MODEL, channel_sampler=SIMP_stop_point_sampler) - - # set optimizer - optimizer = ppsci.optimizer.Adam(learning_rate=LEARNING_RATE, epsilon=1.0e-7)( - model - ) - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - OUTPUT_DIR, - optimizer, - epochs=cfg.TRAIN.epochs, - iters_per_epoch=ITERS_PER_EPOCH, - eval_during_train=cfg.TRAIN.eval_during_train, - seed=cfg.seed, - ) - - # train model - solver.train() - - -# evaluate 4 models -def evaluate(cfg: DictConfig): - # set random seed for reproducibility - ppsci.utils.misc.set_random_seed(cfg.seed) - # initialize logger - logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") - - # fixed iteration stop times for evaluation - iterations_stop_times = range(5, 85, 5) - model = TopOptNN(**cfg.MODEL) - - # evaluation for 4 cases - acc_results_summary = {} - iou_results_summary = {} - - # read h5 data - h5data = h5py.File(cfg.DATA_PATH, "r") - data_iters = np.array(h5data["iters"]) - data_targets = np.array(h5data["targets"]) - - for case_name, model_path in cfg.EVAL.pretrained_model_path_dict.items(): - acc_results, iou_results = evaluate_model( - cfg, model, model_path, data_iters, data_targets, iterations_stop_times - ) - - acc_results_summary[case_name] = acc_results - iou_results_summary[case_name] = iou_results - - # calculate thresholding results - th_acc_results = [] - th_iou_results = [] - for stop_iter in iterations_stop_times: - SIMP_stop_point_sampler = func_module.generate_sampler("Fixed", stop_iter) - - current_acc_results = [] - current_iou_results = [] - - # only calculate for NUM_VAL_STEP times of iteration - for _ in range(cfg.EVAL.num_val_step): - input_full_channel, label = func_module.generate_train_test( - data_iters, data_targets, 1.0, cfg.EVAL.batch_size - ) - # thresholding - SIMP_initial_iter_time = SIMP_stop_point_sampler() # channel k - input_channel_k = paddle.to_tensor( - input_full_channel, dtype=paddle.get_default_dtype() - )[:, SIMP_initial_iter_time, :, :] - input_channel_k_minus_1 = paddle.to_tensor( - input_full_channel, dtype=paddle.get_default_dtype() - )[:, SIMP_initial_iter_time - 1, :, :] - input = paddle.stack( - (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 - ) - out = paddle.cast( - paddle.to_tensor(input)[:, 0:1, :, :] > 0.5, - dtype=paddle.get_default_dtype(), - ) - th_result = val_metric( - {"output": out}, - {"output": paddle.to_tensor(label, dtype=paddle.get_default_dtype())}, - ) - acc_results, iou_results = th_result["Binary_Acc"], th_result["IoU"] - current_acc_results.append(acc_results) - current_iou_results.append(iou_results) - - th_acc_results.append(np.mean(current_acc_results)) - th_iou_results.append(np.mean(current_iou_results)) - - acc_results_summary["thresholding"] = th_acc_results - iou_results_summary["thresholding"] = th_iou_results - - ppsci.utils.misc.plot_curve( - acc_results_summary, - xlabel="iteration", - ylabel="accuracy", - output_dir=cfg.output_dir, - ) - ppsci.utils.misc.plot_curve( - iou_results_summary, xlabel="iteration", ylabel="iou", output_dir=cfg.output_dir - ) - - -def evaluate_model( - cfg, model, pretrained_model_path, data_iters, data_targets, iterations_stop_times -): - # load model parameters - solver = ppsci.solver.Solver( - model, - epochs=1, - iters_per_epoch=cfg.EVAL.num_val_step, - eval_with_no_grad=True, - pretrained_model_path=pretrained_model_path, - ) - - acc_results = [] - iou_results = [] - - # evaluation for different fixed iteration stop times - for stop_iter in iterations_stop_times: - # only evaluate for NUM_VAL_STEP times of iteration - inputs_eval, labels_eval = func_module.generate_train_test( - data_iters, data_targets, 1.0, cfg.EVAL.batch_size * cfg.EVAL.num_val_step - ) - - sup_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": {"input": inputs_eval}, - "label": {"output": labels_eval}, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": func_module.augmentation, - }, - }, - ), - }, - "batch_size": cfg.EVAL.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 0, - }, - ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), - {"output": lambda out: out["output"]}, - {"metric": ppsci.metric.FunctionalMetric(val_metric)}, - name="sup_validator", - ) - validator = {sup_validator.name: sup_validator} - solver.validator = validator - - # modify the channel_sampler in model - SIMP_stop_point_sampler = func_module.generate_sampler("Fixed", stop_iter) - solver.model.channel_sampler = SIMP_stop_point_sampler - - _, eval_result = solver.eval() - - current_acc_results = eval_result["metric"]["Binary_Acc"] - current_iou_results = eval_result["metric"]["IoU"] - - acc_results.append(current_acc_results) - iou_results.append(current_iou_results) - - return acc_results, iou_results - - -# define loss wrapper -def loss_wrapper(cfg: DictConfig): - def loss_expr(output_dict, label_dict, weight_dict=None): - label_true = label_dict["output"].reshape((-1, 1)) - label_pred = output_dict["output"].reshape((-1, 1)) - conf_loss = paddle.mean( - nn.functional.log_loss(label_pred, label_true, epsilon=1.0e-7) - ) - vol_loss = paddle.square(paddle.mean(label_true - label_pred)) - return {"output": conf_loss + cfg.vol_coeff * vol_loss} - - return loss_expr - - -# define metric -def val_metric(output_dict, label_dict, weight_dict=None): - label_pred = output_dict["output"] - label_true = label_dict["output"] - accurates = paddle.equal(paddle.round(label_true), paddle.round(label_pred)) - acc = paddle.mean(paddle.cast(accurates, dtype=paddle.get_default_dtype())) - true_negative = paddle.sum( - paddle.multiply( - paddle.equal(paddle.round(label_pred), 0.0), - paddle.equal(paddle.round(label_true), 0.0), - ), - dtype=paddle.get_default_dtype(), - ) - true_positive = paddle.sum( - paddle.multiply( - paddle.equal(paddle.round(label_pred), 1.0), - paddle.equal(paddle.round(label_true), 1.0), - ), - dtype=paddle.get_default_dtype(), - ) - false_negative = paddle.sum( - paddle.multiply( - paddle.equal(paddle.round(label_pred), 1.0), - paddle.equal(paddle.round(label_true), 0.0), - ), - dtype=paddle.get_default_dtype(), - ) - false_positive = paddle.sum( - paddle.multiply( - paddle.equal(paddle.round(label_pred), 0.0), - paddle.equal(paddle.round(label_true), 1.0), - ), - dtype=paddle.get_default_dtype(), - ) - n_negative = paddle.add(false_negative, true_negative) - n_positive = paddle.add(true_positive, false_positive) - iou = 0.5 * paddle.add( - paddle.divide(true_negative, paddle.add(n_negative, false_positive)), - paddle.divide(true_positive, paddle.add(n_positive, false_negative)), - ) - return {"Binary_Acc": acc, "IoU": iou} - - -# export model -def export(cfg: DictConfig): - # set model - model = TopOptNN(**cfg.MODEL) - - # initialize solver - solver = ppsci.solver.Solver( - model, - eval_with_no_grad=True, - pretrained_model_path=cfg.INFER.pretrained_model_path_dict[ - cfg.INFER.pretrained_model_name - ], - ) - - # export model - from paddle.static import InputSpec - - input_spec = [{"input": InputSpec([None, 2, 40, 40], "float32", name="input")}] - - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - # read h5 data - h5data = h5py.File(cfg.DATA_PATH, "r") - data_iters = np.array(h5data["iters"]) - data_targets = np.array(h5data["targets"]) - idx = np.random.choice(len(data_iters), cfg.INFER.img_num, False) - data_iters = data_iters[idx] - data_targets = data_targets[idx] - - sampler = func_module.generate_sampler(cfg.INFER.sampler_key, cfg.INFER.sampler_num) - data_iters = channel_sampling(sampler, data_iters) - - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - input_dict = {"input": data_iters} - output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) - - # mapping data to output_key - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip({"output"}, output_dict.keys()) - } - - save_topopt_img( - input_dict, - output_dict, - data_targets, - cfg.INFER.save_res_path, - cfg.INFER.res_img_figsize, - cfg.INFER.save_npy, - ) - - -# used for inference -def channel_sampling(sampler, input): - SIMP_initial_iter_time = sampler() - input_channel_k = input[:, SIMP_initial_iter_time, :, :] - input_channel_k_minus_1 = input[:, SIMP_initial_iter_time - 1, :, :] - input = np.stack( - (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 - ) - return input - - -# used for inference -def save_topopt_img( - input_dict: Dict[str, np.ndarray], - output_dict: Dict[str, np.ndarray], - ground_truth: np.ndarray, - save_dir: str, - figsize: tuple = None, - save_npy: bool = False, -): - - input = input_dict["input"] - output = output_dict["output"] - import os - - import matplotlib.pyplot as plt - - os.makedirs(save_dir, exist_ok=True) - for i in range(len(input)): - plt.figure(figsize=figsize) - plt.subplot(1, 4, 1) - plt.axis("off") - plt.imshow(input[i][0], cmap="gray") - plt.title("Input Image") - plt.subplot(1, 4, 2) - plt.axis("off") - plt.imshow(input[i][1], cmap="gray") - plt.title("Input Gradient") - plt.subplot(1, 4, 3) - plt.axis("off") - plt.imshow(np.round(output[i][0]), cmap="gray") - plt.title("Prediction") - plt.subplot(1, 4, 4) - plt.axis("off") - plt.imshow(np.round(ground_truth[i][0]), cmap="gray") - plt.title("Ground Truth") - plt.show() - plt.savefig(osp.join(save_dir, f"Prediction_{i}.png")) - plt.close() - if save_npy: - with open(osp(save_dir, f"Prediction_{i}.npy"), "wb") as f: - np.save(f, output[i]) - - -@hydra.main(version_base=None, config_path="./conf", config_name="topopt.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError( - f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" - ) - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp +from typing import Dict + +import functions as func_module +import h5py +import hydra +import numpy as np +import paddle +from omegaconf import DictConfig +from paddle import nn +from topoptmodel import TopOptNN + +import ppsci +from ppsci.utils import logger + + +def train(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # 4 training cases parameters + LEARNING_RATE = cfg.TRAIN.learning_rate / (1 + cfg.TRAIN.epochs // 15) + ITERS_PER_EPOCH = int(cfg.n_samples * cfg.train_test_ratio / cfg.TRAIN.batch_size) + + # read h5 data + h5data = h5py.File(cfg.DATA_PATH, "r") + data_iters = np.array(h5data["iters"]) + data_targets = np.array(h5data["targets"]) + + # generate training dataset + inputs_train, labels_train = func_module.generate_train_test( + data_iters, data_targets, cfg.train_test_ratio, cfg.n_samples + ) + + # set constraints + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": inputs_train}, + "label": {"output": labels_train}, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": func_module.augmentation, + }, + }, + ), + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + }, + ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), + name="sup_constraint", + ) + constraint = {sup_constraint.name: sup_constraint} + + # train models for 4 cases + for sampler_key, num in cfg.CASE_PARAM: + + # initialize SIMP iteration stop time sampler + SIMP_stop_point_sampler = func_module.generate_sampler(sampler_key, num) + + # initialize logger for training + sampler_name = sampler_key + str(num) if num else sampler_key + OUTPUT_DIR = osp.join( + cfg.output_dir, f"{sampler_name}_vol_coeff{cfg.vol_coeff}" + ) + logger.init_logger("ppsci", osp.join(OUTPUT_DIR, "train.log"), "info") + + # set model + model = TopOptNN(**cfg.MODEL, channel_sampler=SIMP_stop_point_sampler) + + # set optimizer + optimizer = ppsci.optimizer.Adam(learning_rate=LEARNING_RATE, epsilon=1.0e-7)( + model + ) + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + OUTPUT_DIR, + optimizer, + epochs=cfg.TRAIN.epochs, + iters_per_epoch=ITERS_PER_EPOCH, + eval_during_train=cfg.TRAIN.eval_during_train, + seed=cfg.seed, + ) + + # train model + solver.train() + + +# evaluate 4 models +def evaluate(cfg: DictConfig): + # set random seed for reproducibility + ppsci.utils.misc.set_random_seed(cfg.seed) + # initialize logger + logger.init_logger("ppsci", osp.join(cfg.output_dir, f"{cfg.mode}.log"), "info") + + # fixed iteration stop times for evaluation + iterations_stop_times = range(5, 85, 5) + model = TopOptNN(**cfg.MODEL) + + # evaluation for 4 cases + acc_results_summary = {} + iou_results_summary = {} + + # read h5 data + h5data = h5py.File(cfg.DATA_PATH, "r") + data_iters = np.array(h5data["iters"]) + data_targets = np.array(h5data["targets"]) + + for case_name, model_path in cfg.EVAL.pretrained_model_path_dict.items(): + acc_results, iou_results = evaluate_model( + cfg, model, model_path, data_iters, data_targets, iterations_stop_times + ) + + acc_results_summary[case_name] = acc_results + iou_results_summary[case_name] = iou_results + + # calculate thresholding results + th_acc_results = [] + th_iou_results = [] + for stop_iter in iterations_stop_times: + SIMP_stop_point_sampler = func_module.generate_sampler("Fixed", stop_iter) + + current_acc_results = [] + current_iou_results = [] + + # only calculate for NUM_VAL_STEP times of iteration + for _ in range(cfg.EVAL.num_val_step): + input_full_channel, label = func_module.generate_train_test( + data_iters, data_targets, 1.0, cfg.EVAL.batch_size + ) + # thresholding + SIMP_initial_iter_time = SIMP_stop_point_sampler() # channel k + input_channel_k = paddle.to_tensor( + input_full_channel, dtype=paddle.get_default_dtype() + )[:, SIMP_initial_iter_time, :, :] + input_channel_k_minus_1 = paddle.to_tensor( + input_full_channel, dtype=paddle.get_default_dtype() + )[:, SIMP_initial_iter_time - 1, :, :] + input = paddle.stack( + (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 + ) + out = paddle.cast( + paddle.to_tensor(input)[:, 0:1, :, :] > 0.5, + dtype=paddle.get_default_dtype(), + ) + th_result = val_metric( + {"output": out}, + {"output": paddle.to_tensor(label, dtype=paddle.get_default_dtype())}, + ) + acc_results, iou_results = th_result["Binary_Acc"], th_result["IoU"] + current_acc_results.append(acc_results) + current_iou_results.append(iou_results) + + th_acc_results.append(np.mean(current_acc_results)) + th_iou_results.append(np.mean(current_iou_results)) + + acc_results_summary["thresholding"] = th_acc_results + iou_results_summary["thresholding"] = th_iou_results + + ppsci.utils.misc.plot_curve( + acc_results_summary, + xlabel="iteration", + ylabel="accuracy", + output_dir=cfg.output_dir, + ) + ppsci.utils.misc.plot_curve( + iou_results_summary, xlabel="iteration", ylabel="iou", output_dir=cfg.output_dir + ) + + +def evaluate_model( + cfg, model, pretrained_model_path, data_iters, data_targets, iterations_stop_times +): + # load model parameters + solver = ppsci.solver.Solver( + model, + epochs=1, + iters_per_epoch=cfg.EVAL.num_val_step, + eval_with_no_grad=True, + pretrained_model_path=pretrained_model_path, + ) + + acc_results = [] + iou_results = [] + + # evaluation for different fixed iteration stop times + for stop_iter in iterations_stop_times: + # only evaluate for NUM_VAL_STEP times of iteration + inputs_eval, labels_eval = func_module.generate_train_test( + data_iters, data_targets, 1.0, cfg.EVAL.batch_size * cfg.EVAL.num_val_step + ) + + sup_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": {"input": inputs_eval}, + "label": {"output": labels_eval}, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": func_module.augmentation, + }, + }, + ), + }, + "batch_size": cfg.EVAL.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 0, + }, + ppsci.loss.FunctionalLoss(loss_wrapper(cfg)), + {"output": lambda out: out["output"]}, + {"metric": ppsci.metric.FunctionalMetric(val_metric)}, + name="sup_validator", + ) + validator = {sup_validator.name: sup_validator} + solver.validator = validator + + # modify the channel_sampler in model + SIMP_stop_point_sampler = func_module.generate_sampler("Fixed", stop_iter) + solver.model.channel_sampler = SIMP_stop_point_sampler + + _, eval_result = solver.eval() + + current_acc_results = eval_result["metric"]["Binary_Acc"] + current_iou_results = eval_result["metric"]["IoU"] + + acc_results.append(current_acc_results) + iou_results.append(current_iou_results) + + return acc_results, iou_results + + +# define loss wrapper +def loss_wrapper(cfg: DictConfig): + def loss_expr(output_dict, label_dict, weight_dict=None): + label_true = label_dict["output"].reshape((-1, 1)) + label_pred = output_dict["output"].reshape((-1, 1)) + conf_loss = paddle.mean( + nn.functional.log_loss(label_pred, label_true, epsilon=1.0e-7) + ) + vol_loss = paddle.square(paddle.mean(label_true - label_pred)) + return {"output": conf_loss + cfg.vol_coeff * vol_loss} + + return loss_expr + + +# define metric +def val_metric(output_dict, label_dict, weight_dict=None): + label_pred = output_dict["output"] + label_true = label_dict["output"] + accurates = paddle.equal(paddle.round(label_true), paddle.round(label_pred)) + acc = paddle.mean(paddle.cast(accurates, dtype=paddle.get_default_dtype())) + true_negative = paddle.sum( + paddle.multiply( + paddle.equal(paddle.round(label_pred), 0.0), + paddle.equal(paddle.round(label_true), 0.0), + ), + dtype=paddle.get_default_dtype(), + ) + true_positive = paddle.sum( + paddle.multiply( + paddle.equal(paddle.round(label_pred), 1.0), + paddle.equal(paddle.round(label_true), 1.0), + ), + dtype=paddle.get_default_dtype(), + ) + false_negative = paddle.sum( + paddle.multiply( + paddle.equal(paddle.round(label_pred), 1.0), + paddle.equal(paddle.round(label_true), 0.0), + ), + dtype=paddle.get_default_dtype(), + ) + false_positive = paddle.sum( + paddle.multiply( + paddle.equal(paddle.round(label_pred), 0.0), + paddle.equal(paddle.round(label_true), 1.0), + ), + dtype=paddle.get_default_dtype(), + ) + n_negative = paddle.add(false_negative, true_negative) + n_positive = paddle.add(true_positive, false_positive) + iou = 0.5 * paddle.add( + paddle.divide(true_negative, paddle.add(n_negative, false_positive)), + paddle.divide(true_positive, paddle.add(n_positive, false_negative)), + ) + return {"Binary_Acc": acc, "IoU": iou} + + +# export model +def export(cfg: DictConfig): + # set model + model = TopOptNN(**cfg.MODEL) + + # initialize solver + solver = ppsci.solver.Solver( + model, + eval_with_no_grad=True, + pretrained_model_path=cfg.INFER.pretrained_model_path_dict[ + cfg.INFER.pretrained_model_name + ], + ) + + # export model + from paddle.static import InputSpec + + input_spec = [{"input": InputSpec([None, 2, 40, 40], "float32", name="input")}] + + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + # read h5 data + h5data = h5py.File(cfg.DATA_PATH, "r") + data_iters = np.array(h5data["iters"]) + data_targets = np.array(h5data["targets"]) + idx = np.random.choice(len(data_iters), cfg.INFER.img_num, False) + data_iters = data_iters[idx] + data_targets = data_targets[idx] + + sampler = func_module.generate_sampler(cfg.INFER.sampler_key, cfg.INFER.sampler_num) + data_iters = channel_sampling(sampler, data_iters) + + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + input_dict = {"input": data_iters} + output_dict = predictor.predict(input_dict, cfg.INFER.batch_size) + + # mapping data to output_key + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip({"output"}, output_dict.keys()) + } + + save_topopt_img( + input_dict, + output_dict, + data_targets, + cfg.INFER.save_res_path, + cfg.INFER.res_img_figsize, + cfg.INFER.save_npy, + ) + + +# used for inference +def channel_sampling(sampler, input): + SIMP_initial_iter_time = sampler() + input_channel_k = input[:, SIMP_initial_iter_time, :, :] + input_channel_k_minus_1 = input[:, SIMP_initial_iter_time - 1, :, :] + input = np.stack( + (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 + ) + return input + + +# used for inference +def save_topopt_img( + input_dict: Dict[str, np.ndarray], + output_dict: Dict[str, np.ndarray], + ground_truth: np.ndarray, + save_dir: str, + figsize: tuple = None, + save_npy: bool = False, +): + + input = input_dict["input"] + output = output_dict["output"] + import os + + import matplotlib.pyplot as plt + + os.makedirs(save_dir, exist_ok=True) + for i in range(len(input)): + plt.figure(figsize=figsize) + plt.subplot(1, 4, 1) + plt.axis("off") + plt.imshow(input[i][0], cmap="gray") + plt.title("Input Image") + plt.subplot(1, 4, 2) + plt.axis("off") + plt.imshow(input[i][1], cmap="gray") + plt.title("Input Gradient") + plt.subplot(1, 4, 3) + plt.axis("off") + plt.imshow(np.round(output[i][0]), cmap="gray") + plt.title("Prediction") + plt.subplot(1, 4, 4) + plt.axis("off") + plt.imshow(np.round(ground_truth[i][0]), cmap="gray") + plt.title("Ground Truth") + plt.show() + plt.savefig(osp.join(save_dir, f"Prediction_{i}.png")) + plt.close() + if save_npy: + with open(osp(save_dir, f"Prediction_{i}.npy"), "wb") as f: + np.save(f, output[i]) + + +@hydra.main(version_base=None, config_path="./conf", config_name="topopt.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError( + f"cfg.mode should in ['train', 'eval', 'export', 'infer'], but got '{cfg.mode}'" + ) + + +if __name__ == "__main__": + main() diff --git a/examples/topopt/topoptmodel.py b/examples/topopt/topoptmodel.py index 07dc82a534..a466f08548 100644 --- a/examples/topopt/topoptmodel.py +++ b/examples/topopt/topoptmodel.py @@ -1,162 +1,162 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -from paddle import nn - -import ppsci - - -# NCHW data format -class TopOptNN(ppsci.arch.UNetEx): - """Neural network for Topology Optimization, inherited from `ppsci.arch.UNetEx` - - [Sosnovik, I., & Oseledets, I. (2019). Neural networks for topology optimization. Russian Journal of Numerical Analysis and Mathematical Modelling, 34(4), 215-223.](https://arxiv.org/pdf/1709.09578) - - Args: - input_key (str): Name of function data for input. - output_key (str): Name of function data for output. - in_channel (int): Number of channels of input. - out_channel (int): Number of channels of output. - kernel_size (int, optional): Size of kernel of convolution layer. Defaults to 3. - filters (Tuple[int, ...], optional): Number of filters. Defaults to (16, 32, 64). - layers (int, optional): Number of encoders or decoders. Defaults to 3. - channel_sampler (callable, optional): The sampling function for the initial iteration time - (corresponding to the channel number of the input) of SIMP algorithm. The default value - is None, when it is None, input for the forward method should be sampled and prepared - with the shape of [batch, 2, height, width] before passing to forward method. - weight_norm (bool, optional): Whether use weight normalization layer. Defaults to True. - batch_norm (bool, optional): Whether add batch normalization layer. Defaults to True. - activation (Type[nn.Layer], optional): Name of activation function. Defaults to nn.ReLU. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.ppsci.arch.TopOptNN("input", "output", 2, 1, 3, (16, 32, 64), 2, lambda: 1, Flase, False) - """ - - def __init__( - self, - input_key="input", - output_key="output", - in_channel=2, - out_channel=1, - kernel_size=3, - filters=(16, 32, 64), - layers=2, - channel_sampler=None, - weight_norm=False, - batch_norm=False, - activation=nn.ReLU, - ): - super().__init__( - input_key=input_key, - output_key=output_key, - in_channel=in_channel, - out_channel=out_channel, - kernel_size=kernel_size, - filters=filters, - layers=layers, - weight_norm=weight_norm, - batch_norm=batch_norm, - activation=activation, - ) - self.in_channel = in_channel - self.out_channel = out_channel - self.filters = filters - self.channel_sampler = channel_sampler - self.activation = activation - - # Modify Layers - self.encoder[1] = nn.Sequential( - nn.MaxPool2D(self.in_channel, padding="SAME"), - self.encoder[1][0], - nn.Dropout2D(0.1), - self.encoder[1][1], - ) - self.encoder[2] = nn.Sequential( - nn.MaxPool2D(2, padding="SAME"), self.encoder[2] - ) - # Conv2D used in reference code in decoder - self.decoders[0] = nn.Sequential( - nn.Conv2D( - self.filters[-1], self.filters[-1], kernel_size=3, padding="SAME" - ), - self.activation(), - nn.Conv2D( - self.filters[-1], self.filters[-1], kernel_size=3, padding="SAME" - ), - self.activation(), - ) - self.decoders[1] = nn.Sequential( - nn.Conv2D( - sum(self.filters[-2:]), self.filters[-2], kernel_size=3, padding="SAME" - ), - self.activation(), - nn.Dropout2D(0.1), - nn.Conv2D( - self.filters[-2], self.filters[-2], kernel_size=3, padding="SAME" - ), - self.activation(), - ) - self.decoders[2] = nn.Sequential( - nn.Conv2D( - sum(self.filters[:-1]), self.filters[-3], kernel_size=3, padding="SAME" - ), - self.activation(), - nn.Conv2D( - self.filters[-3], self.filters[-3], kernel_size=3, padding="SAME" - ), - self.activation(), - ) - self.output = nn.Sequential( - nn.Conv2D( - self.filters[-3], self.out_channel, kernel_size=3, padding="SAME" - ), - nn.Sigmoid(), - ) - - def forward(self, x): - if self.channel_sampler is not None: - SIMP_initial_iter_time = self.channel_sampler() # channel k - input_channel_k = x[self.input_keys[0]][:, SIMP_initial_iter_time, :, :] - input_channel_k_minus_1 = x[self.input_keys[0]][ - :, SIMP_initial_iter_time - 1, :, : - ] - x = paddle.stack( - (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 - ) - else: - x = x[self.input_keys[0]] - # encode - upsampling_size = [] - skip_connection = [] - n_encoder = len(self.encoder) - for i in range(n_encoder): - x = self.encoder[i](x) - if i is not (n_encoder - 1): - upsampling_size.append(x.shape[-2:]) - skip_connection.append(x) - - # decode - n_decoder = len(self.decoders) - for i in range(n_decoder): - x = self.decoders[i](x) - if i is not (n_decoder - 1): - up_size = upsampling_size.pop() - x = nn.UpsamplingNearest2D(up_size)(x) - skip_output = skip_connection.pop() - x = paddle.concat((skip_output, x), axis=1) - - out = self.output(x) - return {self.output_keys[0]: out} +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle import nn + +import ppsci + + +# NCHW data format +class TopOptNN(ppsci.arch.UNetEx): + """Neural network for Topology Optimization, inherited from `ppsci.arch.UNetEx` + + [Sosnovik, I., & Oseledets, I. (2019). Neural networks for topology optimization. Russian Journal of Numerical Analysis and Mathematical Modelling, 34(4), 215-223.](https://arxiv.org/pdf/1709.09578) + + Args: + input_key (str): Name of function data for input. + output_key (str): Name of function data for output. + in_channel (int): Number of channels of input. + out_channel (int): Number of channels of output. + kernel_size (int, optional): Size of kernel of convolution layer. Defaults to 3. + filters (Tuple[int, ...], optional): Number of filters. Defaults to (16, 32, 64). + layers (int, optional): Number of encoders or decoders. Defaults to 3. + channel_sampler (callable, optional): The sampling function for the initial iteration time + (corresponding to the channel number of the input) of SIMP algorithm. The default value + is None, when it is None, input for the forward method should be sampled and prepared + with the shape of [batch, 2, height, width] before passing to forward method. + weight_norm (bool, optional): Whether use weight normalization layer. Defaults to True. + batch_norm (bool, optional): Whether add batch normalization layer. Defaults to True. + activation (Type[nn.Layer], optional): Name of activation function. Defaults to nn.ReLU. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.ppsci.arch.TopOptNN("input", "output", 2, 1, 3, (16, 32, 64), 2, lambda: 1, Flase, False) + """ + + def __init__( + self, + input_key="input", + output_key="output", + in_channel=2, + out_channel=1, + kernel_size=3, + filters=(16, 32, 64), + layers=2, + channel_sampler=None, + weight_norm=False, + batch_norm=False, + activation=nn.ReLU, + ): + super().__init__( + input_key=input_key, + output_key=output_key, + in_channel=in_channel, + out_channel=out_channel, + kernel_size=kernel_size, + filters=filters, + layers=layers, + weight_norm=weight_norm, + batch_norm=batch_norm, + activation=activation, + ) + self.in_channel = in_channel + self.out_channel = out_channel + self.filters = filters + self.channel_sampler = channel_sampler + self.activation = activation + + # Modify Layers + self.encoder[1] = nn.Sequential( + nn.MaxPool2D(self.in_channel, padding="SAME"), + self.encoder[1][0], + nn.Dropout2D(0.1), + self.encoder[1][1], + ) + self.encoder[2] = nn.Sequential( + nn.MaxPool2D(2, padding="SAME"), self.encoder[2] + ) + # Conv2D used in reference code in decoder + self.decoders[0] = nn.Sequential( + nn.Conv2D( + self.filters[-1], self.filters[-1], kernel_size=3, padding="SAME" + ), + self.activation(), + nn.Conv2D( + self.filters[-1], self.filters[-1], kernel_size=3, padding="SAME" + ), + self.activation(), + ) + self.decoders[1] = nn.Sequential( + nn.Conv2D( + sum(self.filters[-2:]), self.filters[-2], kernel_size=3, padding="SAME" + ), + self.activation(), + nn.Dropout2D(0.1), + nn.Conv2D( + self.filters[-2], self.filters[-2], kernel_size=3, padding="SAME" + ), + self.activation(), + ) + self.decoders[2] = nn.Sequential( + nn.Conv2D( + sum(self.filters[:-1]), self.filters[-3], kernel_size=3, padding="SAME" + ), + self.activation(), + nn.Conv2D( + self.filters[-3], self.filters[-3], kernel_size=3, padding="SAME" + ), + self.activation(), + ) + self.output = nn.Sequential( + nn.Conv2D( + self.filters[-3], self.out_channel, kernel_size=3, padding="SAME" + ), + nn.Sigmoid(), + ) + + def forward(self, x): + if self.channel_sampler is not None: + SIMP_initial_iter_time = self.channel_sampler() # channel k + input_channel_k = x[self.input_keys[0]][:, SIMP_initial_iter_time, :, :] + input_channel_k_minus_1 = x[self.input_keys[0]][ + :, SIMP_initial_iter_time - 1, :, : + ] + x = paddle.stack( + (input_channel_k, input_channel_k - input_channel_k_minus_1), axis=1 + ) + else: + x = x[self.input_keys[0]] + # encode + upsampling_size = [] + skip_connection = [] + n_encoder = len(self.encoder) + for i in range(n_encoder): + x = self.encoder[i](x) + if i is not (n_encoder - 1): + upsampling_size.append(x.shape[-2:]) + skip_connection.append(x) + + # decode + n_decoder = len(self.decoders) + for i in range(n_decoder): + x = self.decoders[i](x) + if i is not (n_decoder - 1): + up_size = upsampling_size.pop() + x = nn.UpsamplingNearest2D(up_size)(x) + skip_output = skip_connection.pop() + x = paddle.concat((skip_output, x), axis=1) + + out = self.output(x) + return {self.output_keys[0]: out} diff --git a/examples/transformer4sr/conf/transformer4sr.yaml b/examples/transformer4sr/conf/transformer4sr.yaml index 770468a80b..787e7a0fa3 100644 --- a/examples/transformer4sr/conf/transformer4sr.yaml +++ b/examples/transformer4sr/conf/transformer4sr.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -126,3 +127,114 @@ INFER: max_batch_size: 128 num_cpu_threads: 4 batch_size: 8 +======= +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_transformer4sr/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 2024 +output_dir: ${hydra:run.dir} +log_freq: 20 + +DATA_GENERATE: + # output path + data_path: "./data_generated/" + # filters + num_nodes: [2,15] # number of nodes + num_nested_max: 6 # multiple levels of nesting + num_consts: [1,1] # number of constants(C) + num_vars: [1,6] # number of variables(x1,x2,...) + seq_length_max: 30 + order_of_mag_limit: 1.0e+9 # magnitude of value + # others + num_init_trials: 100000 # number of initial trials + num_sampling_per_eq: 25 # number of times to evaluate constants for each unique equation + sampling_times: 50 # the number of observations + var_type: 'normal' # variable representation, 'normal' is (y, x1, x2, ...), 'log' is log(abs(y, x1, x2, ...)), or 'both' + num_zfill: 8 +DATA: + data_path: "./data_generated/" # ${DATA_GENERATE.data_path} + data_path_srsd: ["./srsd-feynman_easy/"] + ratio: [0.8,0.1,0.1] + sampling_times: ${DATA_GENERATE.sampling_times} + seq_length_max: 30 # ${DATA_GENERATE.seq_length_max} + response_variable: ["y", "x1", "x2", "x3", "x4", "x5", "x6"] # maximum number of variables is len(response_variable)=7 + vocab_library: ["add","mul","sin","cos","log","exp","neg","inv","sqrt","sq","cb","C","x1","x2","x3","x4","x5","x6"] # vocab_size=len(vocab_library)+2(because add and mul are binary operators) +# model settings +MODEL: + input_keys: ["input", "target_seq"] + output_keys: ["output"] + d_model: 256 # the innermost dimension of model + heads: 4 + num_layers_enc: 4 + num_layers_dec: 8 + act: "relu" + dropout: 0.25 + +# training settings +TRAIN: + epochs: 1000 + iters_per_epoch: -1 + save_freq: 20 + eval_during_train: true + eval_freq: 20 + lr_warmup: 4000 + lr_scheduler: + epochs: ${TRAIN.epochs} + iters_per_epoch: ${TRAIN.iters_per_epoch} + learning_rate: 1 + by_epoch: false + adam: + beta1: 0.9 + beta2: 0.98 + epsilon: 1.0e-9 + batch_size: 512 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + num_repeat: 50 + pretrained_model_path: null + +# inference settings +INFER: + pretrained_model_path: "https://paddle-org.bj.bcebos.com/paddlescience/models/transformer4sr/transformer4sr_pretrained.pdparams" + export_path: ./inference/transformer4sr + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + device: gpu + engine: native + precision: fp32 + onnx_path: ${INFER.export_path}.onnx + ir_optim: true + min_subgraph_size: 10 + gpu_mem: 4000 + gpu_id: 0 + max_batch_size: 128 + num_cpu_threads: 4 + batch_size: 8 +>>>>>>> Stashed changes diff --git a/examples/transformer4sr/functions_data.py b/examples/transformer4sr/functions_data.py index 7c1ee07a36..5a9e435e1b 100644 --- a/examples/transformer4sr/functions_data.py +++ b/examples/transformer4sr/functions_data.py @@ -1,270 +1,270 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - -import json -import os -from typing import Tuple - -import numpy as np -import sympy -from tqdm import tqdm -from utils import from_sympy_to_seq - - -class DataFuncs: - """Functions of dataset generated by this example. - - Args: - data_path (str): Path of dataset. - vocab_library (Tuple): Library of vocabulary, like ("add","mul","sin","C","x1","x2"). - seq_length_max (int): Max length of sequence. - ratio (Tuple): The ratio of dividing training, validation, and test datasets. - shuffle (bool, optional): Whether to shuffle. Defaults to True. - """ - - def __init__( - self, - data_path: str, - vocab_library: Tuple, - seq_length_max: int, - ratio: Tuple, - shuffle: bool = True, - ) -> None: - self.data_path = data_path - self.vocab_library = vocab_library - self.seq_length_max = seq_length_max - self.ratio = ratio - self.load_data() - self.get_vocab_target() - self.split_dataset_idx(shuffle) - - for mode in ["train", "val", "test"]: - self.init_data(mode) - - def init_data(self, mode="test"): - idx = getattr(self, f"idx_{mode}") - setattr(self, f"values_{mode}", self.data_values[idx]) - setattr(self, f"targets_{mode}", self.data_targets[idx]) - - def load_data(self): - gt_path = os.path.join(self.data_path, "ground_truth") - gt_files = sorted(os.listdir(gt_path)) - value_path = os.path.join(self.data_path, "values") - value_files = sorted(os.listdir(value_path)) - - data_values = [] - data_tokens = [] - for i in tqdm(range(len(gt_files)), desc="Loading data"): - try: - tokens = self.get_token_from_file(os.path.join(gt_path, gt_files[i])) - assert len(tokens) <= self.seq_length_max - data_tokens.append(tokens) - data_values.append(np.load(os.path.join(value_path, value_files[i]))) - except Exception: - continue - data_values = np.expand_dims(np.array(data_values), axis=-1) - self.data_tokens = data_tokens - self.data_values = data_values - - def get_token_from_file(self, file_path): - with open(file_path) as f: - lines = [] - for token in f.readlines(): - assert token[-1] == "\n" - if token[0] == "C": - lines.append("C") - else: - lines.append(token[:-1]) - return lines - - def get_vocab_target(self): - data_targets = [] - for tokens in self.data_tokens: - sample_target = [1] - for token in tokens: - sample_target.append(self.vocab_library.index(token) + 2) - sample_target.extend([0] * (self.seq_length_max + 1 - len(sample_target))) - data_targets.append(sample_target) - self.data_targets = np.array(data_targets) - - def split_dataset_idx(self, shuffle=True): - num_total = self.data_values.shape[0] - idx = np.arange(num_total) - if shuffle: - np.random.shuffle(idx) - num_train, num_val, _ = ( - int(num_total * self.ratio[0]), - int(num_total * self.ratio[1]), - int(num_total * self.ratio[2]), - ) - self.idx_train = idx[:num_train] - self.idx_val = idx[num_train : num_train + num_val] - self.idx_test = idx[num_train + num_val :] - - -class SRSDDataFuncs: - """Functions of SRSD dataset. - - Args: - data_path_lst (Tuple): Paths of srsd datasets. - sampling_times (int): Sampling times. - response_variable (Tuple): Response variable, like ("y","x1","x2"). - vocab_library (Tuple): Library of vocabulary, like ("add","mul","sin","C","x1","x2"). - seq_length_max (int): Max length of sequence. - shuffle (bool, optional): Whether to obtain data randomly. If set to true, pseudo-random seeds will be ignored. Defaults to True. - """ - - def __init__( - self, - data_path_lst: Tuple, - sampling_times: int, - response_variable: Tuple, - vocab_library: Tuple, - seq_length_max: int, - shuffle: bool = True, - ) -> None: - self.data_path_lst = data_path_lst - self.st = sampling_times - self.rvar = response_variable - self.vlab = vocab_library - self.seq_length_max = seq_length_max - self.C = sympy.symbols("C", real=True, positive=True) - self.shuffle = shuffle - - for mode in ["train", "val", "test"]: - self.init_data(mode) - - def init_data(self, mode="test"): - mode_lst = [[], [], []] - keys_lst = [] - for data_path in self.data_path_lst: - self.load_supp_info(data_path) - tokens, values, targets = self.load_data(data_path, mode) - mode_lst[0].extend(tokens) - mode_lst[1].append(values) - mode_lst[2].append(targets) - keys_lst.extend(self.keys) - setattr(self, f"tokens_{mode}", mode_lst[0]) - setattr(self, f"values_{mode}", np.concatenate(mode_lst[1], axis=0)) - setattr(self, f"targets_{mode}", np.concatenate(mode_lst[2], axis=0)) - setattr(self, f"keys_{mode}", keys_lst) - - def load_supp_info(self, data_path): - supp_info_path = os.path.join(data_path, "supp_info.json") - with open(supp_info_path, "rb") as f: - feynman_dict = json.load(f) - self.keys = list(feynman_dict.keys()) - self.keys.sort() - self.feynman_dict = feynman_dict - - def load_data(self, data_path, mode="test"): - data_values = [] - data_tokens = [] - data_targets = [] - remaining_keys = [] - for i in tqdm( - range(len(self.keys)), - desc=f"Loading {mode} data from {data_path}", - leave=False, - ): - key = self.keys[i] - try: - data_path_txt = os.path.join(data_path, mode, f"{key}.txt") - values = self.load_srsd_dataset_input(data_path_txt, key) - assert not np.isnan(values).any(), "nan in values. deprecate it." - sample_target_tokens, sample_target = self.load_srsd_dataset_target(key) - assert ( - len(sample_target) <= self.seq_length_max - ), f"{key}'s eq is too complex. deprecate it." - - data_values.append(values) - data_tokens.append(sample_target_tokens) - sample_target.extend( - [0] * (self.seq_length_max + 1 - len(sample_target)) - ) - data_targets.append(np.array(sample_target)) - except Exception: - continue - remaining_keys.append(key) - self.keys = remaining_keys - - return ( - data_tokens, - np.expand_dims(np.array(data_values), axis=-1), - np.array(data_targets), - ) - - def trans_data(self, data, signs=1): - return np.power(10.0, np.log10(data) - np.mean(np.log10(data))) * signs - - def load_srsd_dataset_input(self, data_path, key): - # Load SRSD dataset - data = np.genfromtxt(data_path) - - # Filter valid expressions with respect to the ST (positive inputs) - mask = np.all(data[:, :-1] > 0.0, axis=1) # y is in the last column here - valid_data = data[mask] - N, Vars = valid_data.shape[0], valid_data.shape[1] - assert ( - N >= self.st - ), f"sample quantity {N} of dataset should not be samaller than sampling_times {self.st}." - - # Create normalized dataset (input of ST) - if self.shuffle: - # temporarily avoid non-randomness caused by setting random seed - rng = np.random.default_rng() - idx_rows = rng.choice(N, self.st, replace=False) - else: - idx_rows = np.random.choice(N, self.st, replace=False) - data_values = np.zeros((self.st, len(self.rvar))) - for k in range(Vars - 1): # y will be done separately at the end - data_values[:, k + 1] = ( - valid_data[idx_rows, k] - if self.feynman_dict[key]["si-derived_units"][k + 1] == "$rad$" - else self.trans_data(valid_data[idx_rows, k]) - ) - if data_values.mean() > 1.0e30: - print("data_values", data_values.mean()) - - signs = np.where( - valid_data[idx_rows, -1] < 0.0, -1.0, 1.0 - ) # maybe some negative values for y - data = np.abs(valid_data[idx_rows, -1]) - data_values[:, 0] = self.trans_data(data, signs) - assert ( - np.abs(data_values).mean() < 1.0e10 - ), "value of data is bigger than 1.0e10. deprecate it." - return data_values - - def load_srsd_dataset_target(self, key): - expr_sympy = ( - sympy.sympify(self.feynman_dict[key]["sympy_eq_srepr"]) * self.C - ) # shifted due to rescaling - expr_sympy = expr_sympy.evalf() - expr_srepr = sympy.srepr(expr_sympy) - for i in range(9, 0, -1): # i = 9, 8, ..., 2, 1 - expr_srepr = expr_srepr.replace( - f"Symbol('x{i-1}', real=True)", f"Symbol('x{i}', real=True)" - ) - expr_sympy = sympy.sympify(expr_srepr) - - target_seq_tokens = from_sympy_to_seq(expr_sympy) - target_seq = [] - for token in target_seq_tokens: - target_seq.append(self.vlab.index(token) + 2) - return target_seq_tokens, target_seq +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + +import json +import os +from typing import Tuple + +import numpy as np +import sympy +from tqdm import tqdm +from utils import from_sympy_to_seq + + +class DataFuncs: + """Functions of dataset generated by this example. + + Args: + data_path (str): Path of dataset. + vocab_library (Tuple): Library of vocabulary, like ("add","mul","sin","C","x1","x2"). + seq_length_max (int): Max length of sequence. + ratio (Tuple): The ratio of dividing training, validation, and test datasets. + shuffle (bool, optional): Whether to shuffle. Defaults to True. + """ + + def __init__( + self, + data_path: str, + vocab_library: Tuple, + seq_length_max: int, + ratio: Tuple, + shuffle: bool = True, + ) -> None: + self.data_path = data_path + self.vocab_library = vocab_library + self.seq_length_max = seq_length_max + self.ratio = ratio + self.load_data() + self.get_vocab_target() + self.split_dataset_idx(shuffle) + + for mode in ["train", "val", "test"]: + self.init_data(mode) + + def init_data(self, mode="test"): + idx = getattr(self, f"idx_{mode}") + setattr(self, f"values_{mode}", self.data_values[idx]) + setattr(self, f"targets_{mode}", self.data_targets[idx]) + + def load_data(self): + gt_path = os.path.join(self.data_path, "ground_truth") + gt_files = sorted(os.listdir(gt_path)) + value_path = os.path.join(self.data_path, "values") + value_files = sorted(os.listdir(value_path)) + + data_values = [] + data_tokens = [] + for i in tqdm(range(len(gt_files)), desc="Loading data"): + try: + tokens = self.get_token_from_file(os.path.join(gt_path, gt_files[i])) + assert len(tokens) <= self.seq_length_max + data_tokens.append(tokens) + data_values.append(np.load(os.path.join(value_path, value_files[i]))) + except Exception: + continue + data_values = np.expand_dims(np.array(data_values), axis=-1) + self.data_tokens = data_tokens + self.data_values = data_values + + def get_token_from_file(self, file_path): + with open(file_path) as f: + lines = [] + for token in f.readlines(): + assert token[-1] == "\n" + if token[0] == "C": + lines.append("C") + else: + lines.append(token[:-1]) + return lines + + def get_vocab_target(self): + data_targets = [] + for tokens in self.data_tokens: + sample_target = [1] + for token in tokens: + sample_target.append(self.vocab_library.index(token) + 2) + sample_target.extend([0] * (self.seq_length_max + 1 - len(sample_target))) + data_targets.append(sample_target) + self.data_targets = np.array(data_targets) + + def split_dataset_idx(self, shuffle=True): + num_total = self.data_values.shape[0] + idx = np.arange(num_total) + if shuffle: + np.random.shuffle(idx) + num_train, num_val, _ = ( + int(num_total * self.ratio[0]), + int(num_total * self.ratio[1]), + int(num_total * self.ratio[2]), + ) + self.idx_train = idx[:num_train] + self.idx_val = idx[num_train : num_train + num_val] + self.idx_test = idx[num_train + num_val :] + + +class SRSDDataFuncs: + """Functions of SRSD dataset. + + Args: + data_path_lst (Tuple): Paths of srsd datasets. + sampling_times (int): Sampling times. + response_variable (Tuple): Response variable, like ("y","x1","x2"). + vocab_library (Tuple): Library of vocabulary, like ("add","mul","sin","C","x1","x2"). + seq_length_max (int): Max length of sequence. + shuffle (bool, optional): Whether to obtain data randomly. If set to true, pseudo-random seeds will be ignored. Defaults to True. + """ + + def __init__( + self, + data_path_lst: Tuple, + sampling_times: int, + response_variable: Tuple, + vocab_library: Tuple, + seq_length_max: int, + shuffle: bool = True, + ) -> None: + self.data_path_lst = data_path_lst + self.st = sampling_times + self.rvar = response_variable + self.vlab = vocab_library + self.seq_length_max = seq_length_max + self.C = sympy.symbols("C", real=True, positive=True) + self.shuffle = shuffle + + for mode in ["train", "val", "test"]: + self.init_data(mode) + + def init_data(self, mode="test"): + mode_lst = [[], [], []] + keys_lst = [] + for data_path in self.data_path_lst: + self.load_supp_info(data_path) + tokens, values, targets = self.load_data(data_path, mode) + mode_lst[0].extend(tokens) + mode_lst[1].append(values) + mode_lst[2].append(targets) + keys_lst.extend(self.keys) + setattr(self, f"tokens_{mode}", mode_lst[0]) + setattr(self, f"values_{mode}", np.concatenate(mode_lst[1], axis=0)) + setattr(self, f"targets_{mode}", np.concatenate(mode_lst[2], axis=0)) + setattr(self, f"keys_{mode}", keys_lst) + + def load_supp_info(self, data_path): + supp_info_path = os.path.join(data_path, "supp_info.json") + with open(supp_info_path, "rb") as f: + feynman_dict = json.load(f) + self.keys = list(feynman_dict.keys()) + self.keys.sort() + self.feynman_dict = feynman_dict + + def load_data(self, data_path, mode="test"): + data_values = [] + data_tokens = [] + data_targets = [] + remaining_keys = [] + for i in tqdm( + range(len(self.keys)), + desc=f"Loading {mode} data from {data_path}", + leave=False, + ): + key = self.keys[i] + try: + data_path_txt = os.path.join(data_path, mode, f"{key}.txt") + values = self.load_srsd_dataset_input(data_path_txt, key) + assert not np.isnan(values).any(), "nan in values. deprecate it." + sample_target_tokens, sample_target = self.load_srsd_dataset_target(key) + assert ( + len(sample_target) <= self.seq_length_max + ), f"{key}'s eq is too complex. deprecate it." + + data_values.append(values) + data_tokens.append(sample_target_tokens) + sample_target.extend( + [0] * (self.seq_length_max + 1 - len(sample_target)) + ) + data_targets.append(np.array(sample_target)) + except Exception: + continue + remaining_keys.append(key) + self.keys = remaining_keys + + return ( + data_tokens, + np.expand_dims(np.array(data_values), axis=-1), + np.array(data_targets), + ) + + def trans_data(self, data, signs=1): + return np.power(10.0, np.log10(data) - np.mean(np.log10(data))) * signs + + def load_srsd_dataset_input(self, data_path, key): + # Load SRSD dataset + data = np.genfromtxt(data_path) + + # Filter valid expressions with respect to the ST (positive inputs) + mask = np.all(data[:, :-1] > 0.0, axis=1) # y is in the last column here + valid_data = data[mask] + N, Vars = valid_data.shape[0], valid_data.shape[1] + assert ( + N >= self.st + ), f"sample quantity {N} of dataset should not be samaller than sampling_times {self.st}." + + # Create normalized dataset (input of ST) + if self.shuffle: + # temporarily avoid non-randomness caused by setting random seed + rng = np.random.default_rng() + idx_rows = rng.choice(N, self.st, replace=False) + else: + idx_rows = np.random.choice(N, self.st, replace=False) + data_values = np.zeros((self.st, len(self.rvar))) + for k in range(Vars - 1): # y will be done separately at the end + data_values[:, k + 1] = ( + valid_data[idx_rows, k] + if self.feynman_dict[key]["si-derived_units"][k + 1] == "$rad$" + else self.trans_data(valid_data[idx_rows, k]) + ) + if data_values.mean() > 1.0e30: + print("data_values", data_values.mean()) + + signs = np.where( + valid_data[idx_rows, -1] < 0.0, -1.0, 1.0 + ) # maybe some negative values for y + data = np.abs(valid_data[idx_rows, -1]) + data_values[:, 0] = self.trans_data(data, signs) + assert ( + np.abs(data_values).mean() < 1.0e10 + ), "value of data is bigger than 1.0e10. deprecate it." + return data_values + + def load_srsd_dataset_target(self, key): + expr_sympy = ( + sympy.sympify(self.feynman_dict[key]["sympy_eq_srepr"]) * self.C + ) # shifted due to rescaling + expr_sympy = expr_sympy.evalf() + expr_srepr = sympy.srepr(expr_sympy) + for i in range(9, 0, -1): # i = 9, 8, ..., 2, 1 + expr_srepr = expr_srepr.replace( + f"Symbol('x{i-1}', real=True)", f"Symbol('x{i}', real=True)" + ) + expr_sympy = sympy.sympify(expr_srepr) + + target_seq_tokens = from_sympy_to_seq(expr_sympy) + target_seq = [] + for token in target_seq_tokens: + target_seq.append(self.vlab.index(token) + 2) + return target_seq_tokens, target_seq diff --git a/examples/transformer4sr/functions_loss_metric.py b/examples/transformer4sr/functions_loss_metric.py index 7208e4a729..66701a036d 100644 --- a/examples/transformer4sr/functions_loss_metric.py +++ b/examples/transformer4sr/functions_loss_metric.py @@ -1,67 +1,67 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - -from typing import Dict - -import numpy as np -import paddle -from utils import compute_norm_zss_dist - - -def cross_entropy_loss_func(output_dict, label_dict, *args): - custom_loss = paddle.nn.CrossEntropyLoss(ignore_index=0, label_smoothing=0.0) - loss = custom_loss(output_dict["output"], label_dict["output"]) - return {"ce_loss": loss} - - -def compute_inaccuracy( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, -) -> Dict[str, paddle.Tensor]: - """Calculate the ratio of incorrectly matched tokens to the total number.""" - preds = output_dict["output"] - labels = label_dict["output"] - padding_not_mask = labels != 0 - correct_bool = paddle.equal(paddle.argmax(preds, axis=-1), labels) - correct_bool = paddle.logical_and( - correct_bool, - padding_not_mask, - ) - inacc = 1 - paddle.sum(correct_bool) / paddle.sum(padding_not_mask) - return {"inaccuracy_mean": inacc} - - -def compute_zss( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, -) -> Dict[str, paddle.Tensor]: - """Calculate zss distance, which is a kind of normalized tree-based edit distance. Refer to https://arxiv.org/abs/2206.10540.""" - num_samples = output_dict["output"].shape[-1] - preds = output_dict["output"].reshape([-1, num_samples]) - labels = label_dict["output"].reshape([-1, num_samples]) - zss_dist = [] - for i in range(labels.shape[0]): - zss_dist.append(compute_norm_zss_dist(preds[i][0], labels[i])) - zss_dist_mean = np.nanmean(zss_dist) - return { - "zss_distance": paddle.to_tensor( - zss_dist_mean, dtype=paddle.get_default_dtype() - ) - } +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + +from typing import Dict + +import numpy as np +import paddle +from utils import compute_norm_zss_dist + + +def cross_entropy_loss_func(output_dict, label_dict, *args): + custom_loss = paddle.nn.CrossEntropyLoss(ignore_index=0, label_smoothing=0.0) + loss = custom_loss(output_dict["output"], label_dict["output"]) + return {"ce_loss": loss} + + +def compute_inaccuracy( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, +) -> Dict[str, paddle.Tensor]: + """Calculate the ratio of incorrectly matched tokens to the total number.""" + preds = output_dict["output"] + labels = label_dict["output"] + padding_not_mask = labels != 0 + correct_bool = paddle.equal(paddle.argmax(preds, axis=-1), labels) + correct_bool = paddle.logical_and( + correct_bool, + padding_not_mask, + ) + inacc = 1 - paddle.sum(correct_bool) / paddle.sum(padding_not_mask) + return {"inaccuracy_mean": inacc} + + +def compute_zss( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, +) -> Dict[str, paddle.Tensor]: + """Calculate zss distance, which is a kind of normalized tree-based edit distance. Refer to https://arxiv.org/abs/2206.10540.""" + num_samples = output_dict["output"].shape[-1] + preds = output_dict["output"].reshape([-1, num_samples]) + labels = label_dict["output"].reshape([-1, num_samples]) + zss_dist = [] + for i in range(labels.shape[0]): + zss_dist.append(compute_norm_zss_dist(preds[i][0], labels[i])) + zss_dist_mean = np.nanmean(zss_dist) + return { + "zss_distance": paddle.to_tensor( + zss_dist_mean, dtype=paddle.get_default_dtype() + ) + } diff --git a/examples/transformer4sr/functions_vis.py b/examples/transformer4sr/functions_vis.py index 55b82559d1..767cbf961c 100644 --- a/examples/transformer4sr/functions_vis.py +++ b/examples/transformer4sr/functions_vis.py @@ -1,73 +1,73 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - - -import numpy as np -import paddle -from utils import is_tree_complete -from utils import simplify_output - - -class VisualizeFuncs: - """Visualizing results as equations.""" - - def __init__(self, model): - self.model = model - self.complete_func = is_tree_complete - - def visualize_valid_data(self, data_targets, data_values, num_valid): - """Visulizing for given data and target.""" - for idx in range(min(num_valid, data_values.shape[0])): - target_seq = paddle.to_tensor( - data_targets[idx : idx + 1, :-1], dtype=paddle.get_default_dtype() - ) - sympy_target = simplify_output(target_seq[0], "sympy") - - test_input = paddle.to_tensor( - data_values[idx : idx + 1], dtype=paddle.get_default_dtype() - ) - res = self.model.decode_process(test_input, self.complete_func) - sympy_pred = simplify_output(res[0], "sympy") - print("target", sympy_target, "pred:", sympy_pred) - - def visualize_demo(self): - """Visulizing for a demo of equation '25*x1 + x2*log(x1)'.""" - import sympy - - C, y, x1, x2, x3, x4, x5, x6 = sympy.symbols( - "C, y, x1, x2, x3, x4, x5, x6", real=True, positive=True - ) - y = 25 * x1 + x2 * sympy.log(x1) - print("The ground truth is:", y) - - x1_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) - x2_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) - f = sympy.lambdify([x1, x2], y) - y_values = f(x1_values, x2_values) - dataset = np.zeros((50, 7)) - dataset[:, 0] = y_values - dataset[:, 1] = x1_values - dataset[:, 2] = x2_values - encoder_input = ( - paddle.to_tensor(data=dataset, dtype=paddle.get_default_dtype()) - .unsqueeze(axis=0) - .unsqueeze(axis=-1) - ) - res = self.model.decode_process(encoder_input, self.complete_func) - sympy_pred = simplify_output(res[0], "sympy") - print("The prediction is:", sympy_pred) +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + + +import numpy as np +import paddle +from utils import is_tree_complete +from utils import simplify_output + + +class VisualizeFuncs: + """Visualizing results as equations.""" + + def __init__(self, model): + self.model = model + self.complete_func = is_tree_complete + + def visualize_valid_data(self, data_targets, data_values, num_valid): + """Visulizing for given data and target.""" + for idx in range(min(num_valid, data_values.shape[0])): + target_seq = paddle.to_tensor( + data_targets[idx : idx + 1, :-1], dtype=paddle.get_default_dtype() + ) + sympy_target = simplify_output(target_seq[0], "sympy") + + test_input = paddle.to_tensor( + data_values[idx : idx + 1], dtype=paddle.get_default_dtype() + ) + res = self.model.decode_process(test_input, self.complete_func) + sympy_pred = simplify_output(res[0], "sympy") + print("target", sympy_target, "pred:", sympy_pred) + + def visualize_demo(self): + """Visulizing for a demo of equation '25*x1 + x2*log(x1)'.""" + import sympy + + C, y, x1, x2, x3, x4, x5, x6 = sympy.symbols( + "C, y, x1, x2, x3, x4, x5, x6", real=True, positive=True + ) + y = 25 * x1 + x2 * sympy.log(x1) + print("The ground truth is:", y) + + x1_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) + x2_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) + f = sympy.lambdify([x1, x2], y) + y_values = f(x1_values, x2_values) + dataset = np.zeros((50, 7)) + dataset[:, 0] = y_values + dataset[:, 1] = x1_values + dataset[:, 2] = x2_values + encoder_input = ( + paddle.to_tensor(data=dataset, dtype=paddle.get_default_dtype()) + .unsqueeze(axis=0) + .unsqueeze(axis=-1) + ) + res = self.model.decode_process(encoder_input, self.complete_func) + sympy_pred = simplify_output(res[0], "sympy") + print("The prediction is:", sympy_pred) diff --git a/examples/transformer4sr/generate_datasets.py b/examples/transformer4sr/generate_datasets.py index 28a99c7a9e..75b01675c5 100644 --- a/examples/transformer4sr/generate_datasets.py +++ b/examples/transformer4sr/generate_datasets.py @@ -1,231 +1,231 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - - -import concurrent.futures -import os -import warnings -from functools import partial - -import hydra -import numpy as np -import sympy -from omegaconf import DictConfig -from tqdm import tqdm -from utils import MY_VOCAB -from utils import count_var_num -from utils import expr_tree_depth -from utils import from_seq_to_sympy -from utils import from_sympy_to_seq -from utils import gen_expr -from utils import gen_samples -from utils import reassign_variables - -import ppsci # noqa - -warnings.filterwarnings("ignore") - - -def fliter_nodes(expr, num_nodes): - if num_nodes[0] <= len(expr) <= num_nodes[1]: - return expr - else: - return None - - -def fliter_nested(expr, num_nested_max): - try: - expr_sympy = from_seq_to_sympy(expr) - expr_sympy = sympy.factor(expr_sympy) - expr_sympy = sympy.simplify(expr_sympy) - assert "zoo" not in str(expr_sympy) - assert expr_tree_depth(expr_sympy) <= num_nested_max - expr_sympy = reassign_variables(expr_sympy) - expr_sympy = sympy.factor(expr_sympy) - expr_sympy = sympy.simplify(expr_sympy) - return expr_sympy - except Exception: - return None - - -def fliter_consts_vars_len(expr, num_consts, num_vars, seq_length_max): - try: - cnt_const = expr.count("C") - assert "abort" not in expr - assert num_consts[0] <= cnt_const <= num_consts[1] - assert f"x{num_vars[0]}" in expr - assert f"x{num_vars[1] + 1}" not in expr - assert len(expr) <= seq_length_max - return expr - except Exception: - return None - - -def save_dataset(dataset, ground_truth, value_path, gt_path): - np.save(value_path, dataset) - with open( - gt_path, - "w", - ) as f: - for token in ground_truth: - f.write(f"{token}\n") - - -def generate_data(cfg: DictConfig): - # init trees - exprs_init = [] - num_init_trials = cfg.DATA_GENERATE.num_init_trials - for i in tqdm(range(num_init_trials), desc="Initial expression trees"): - exprs_init.append(gen_expr(MY_VOCAB)) - - # fliter nodes - num_nodes = cfg.DATA_GENERATE.num_nodes - exprs_filter_nodes = [] - for expr in tqdm(exprs_init, desc="Check nodes number"): - expr = fliter_nodes(expr, num_nodes) - if expr is not None: - exprs_filter_nodes.append(expr) - - # fliter nested - num_nested_max = cfg.DATA_GENERATE.num_nested_max - partial_fliter_nested = partial(fliter_nested, num_nested_max=num_nested_max) - exprs_fliter_nested = [] - with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: - future_to_expr = { - executor.submit(partial_fliter_nested, expr): expr - for expr in exprs_filter_nodes - } - progress = tqdm( - concurrent.futures.as_completed(future_to_expr), - total=len(exprs_filter_nodes), - desc=f"Check invalid abd very nested (>{num_nested_max}) expressions", - ) - for future in progress: - expr = future_to_expr[future] - try: - expr_sympy = future.result() - if expr_sympy is not None: - exprs_fliter_nested.append(expr_sympy) - except Exception: - continue - - # fliter consts/vars/seq_length - num_consts = cfg.DATA_GENERATE.num_consts - num_vars = cfg.DATA_GENERATE.num_vars - seq_length_max = cfg.DATA_GENERATE.seq_length_max - exprs_cvl = [] - for i in tqdm(range(len(exprs_fliter_nested)), desc="Check consts and vars."): - expr_seq = from_sympy_to_seq(exprs_fliter_nested[i]) - expr_seq = fliter_consts_vars_len( - expr_seq, num_consts, num_vars, seq_length_max - ) - if expr_seq is not None: - exprs_cvl.append(expr_seq) - - unique_expr_tuples = {tuple(expr) for expr in exprs_cvl} - expr_uniq_seq = [list(expr) for expr in unique_expr_tuples] - - # generate datasets - num_sampling_per_eq = cfg.DATA_GENERATE.num_sampling_per_eq - sampling_times = cfg.DATA_GENERATE.sampling_times - order_of_mag_limit = cfg.DATA_GENERATE.order_of_mag_limit - var_type = cfg.DATA_GENERATE.var_type - num_zfill = cfg.DATA_GENERATE.num_zfill - out_dir = cfg.DATA_GENERATE.data_path - gt_dir = os.path.join(out_dir, "ground_truth") - value_dir = os.path.join(out_dir, "values") - if not os.path.exists(gt_dir): - os.makedirs(gt_dir) - if not os.path.exists(value_dir): - os.makedirs(value_dir) - - count_datasets = 0 - for uniq_seq in tqdm(expr_uniq_seq, desc="Generate datasets"): - try: - for _ in tqdm( - range(num_sampling_per_eq), desc="Generate samples", leave=False - ): - seq_deformed = [] - ground_truth = [] - for token in uniq_seq: - if token == "C": - const_val = np.round( - np.random.uniform(low=-100.0, high=100.0), decimals=2 - ) - seq_deformed.append(str(const_val)) - ground_truth.append(f"C={str(const_val)}") - else: - seq_deformed.append(token) - ground_truth.append(token) - - cur_sympy_expr = from_seq_to_sympy(seq_deformed) - np_y, np_x = gen_samples(cur_sympy_expr, num_samples=1000) - assert np.nanmax(np.abs(np_y)) <= order_of_mag_limit - mask = np.logical_not(np.isnan(np_y)) - num_temp_obs = np.sum(mask) - assert num_temp_obs >= sampling_times - - idx = np.random.choice(num_temp_obs, size=sampling_times, replace=False) - num_var = count_var_num(sampling_times) - x_values = np_x[mask][idx, :num_var] - y_values = np_y[mask][idx] - if var_type == "both": - dataset = np.zeros((sampling_times, 14)) - else: - dataset = np.zeros((sampling_times, 7)) - - if var_type == "normal": - dataset[:, 0] = y_values - dataset[:, 1 : num_var + 1] = x_values - elif var_type == "log": - dataset[:, 0] = np.log(np.abs(y_values) + 1e-10) - dataset[:, 1 : num_var + 1] = np.log(np.abs(x_values) + 1e-10) - elif var_type == "both": - dataset[:, 0] = y_values - dataset[:, 1] = np.log(np.abs(y_values) + 1e-10) - dataset[:, 2 : 2 * num_var + 1 : 2] = x_values - dataset[:, 3 : 2 * num_var + 2 : 2] = np.log( - np.abs(x_values) + 1e-10 - ) - else: - print("VARIABLE_TYPE should be one of 'normal', 'log', or 'both'") - - # save - file_name = str(count_datasets).zfill(num_zfill) - value_path = os.path.join(value_dir, f"data_{file_name}.npy") - gt_path = os.path.join(gt_dir, f"data_{file_name}.npy") - save_dataset(dataset, ground_truth, value_path, gt_path) - count_datasets += 1 - except Exception as e: - print(e) - continue - print(f"=> Number of unique expressions = {len(expr_uniq_seq)}") - print(f"=> Number of datasets created = {count_datasets}") - print("Finish!") - - -@hydra.main(version_base=None, config_path="./conf", config_name="transformer4sr.yaml") -def main(cfg: DictConfig): - C, x1, x2, x3, x4, x5, x6 = sympy.symbols( - "C, x1, x2, x3, x4, x5, x6", real=True, positive=True - ) - generate_data(cfg) - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + + +import concurrent.futures +import os +import warnings +from functools import partial + +import hydra +import numpy as np +import sympy +from omegaconf import DictConfig +from tqdm import tqdm +from utils import MY_VOCAB +from utils import count_var_num +from utils import expr_tree_depth +from utils import from_seq_to_sympy +from utils import from_sympy_to_seq +from utils import gen_expr +from utils import gen_samples +from utils import reassign_variables + +import ppsci # noqa + +warnings.filterwarnings("ignore") + + +def fliter_nodes(expr, num_nodes): + if num_nodes[0] <= len(expr) <= num_nodes[1]: + return expr + else: + return None + + +def fliter_nested(expr, num_nested_max): + try: + expr_sympy = from_seq_to_sympy(expr) + expr_sympy = sympy.factor(expr_sympy) + expr_sympy = sympy.simplify(expr_sympy) + assert "zoo" not in str(expr_sympy) + assert expr_tree_depth(expr_sympy) <= num_nested_max + expr_sympy = reassign_variables(expr_sympy) + expr_sympy = sympy.factor(expr_sympy) + expr_sympy = sympy.simplify(expr_sympy) + return expr_sympy + except Exception: + return None + + +def fliter_consts_vars_len(expr, num_consts, num_vars, seq_length_max): + try: + cnt_const = expr.count("C") + assert "abort" not in expr + assert num_consts[0] <= cnt_const <= num_consts[1] + assert f"x{num_vars[0]}" in expr + assert f"x{num_vars[1] + 1}" not in expr + assert len(expr) <= seq_length_max + return expr + except Exception: + return None + + +def save_dataset(dataset, ground_truth, value_path, gt_path): + np.save(value_path, dataset) + with open( + gt_path, + "w", + ) as f: + for token in ground_truth: + f.write(f"{token}\n") + + +def generate_data(cfg: DictConfig): + # init trees + exprs_init = [] + num_init_trials = cfg.DATA_GENERATE.num_init_trials + for i in tqdm(range(num_init_trials), desc="Initial expression trees"): + exprs_init.append(gen_expr(MY_VOCAB)) + + # fliter nodes + num_nodes = cfg.DATA_GENERATE.num_nodes + exprs_filter_nodes = [] + for expr in tqdm(exprs_init, desc="Check nodes number"): + expr = fliter_nodes(expr, num_nodes) + if expr is not None: + exprs_filter_nodes.append(expr) + + # fliter nested + num_nested_max = cfg.DATA_GENERATE.num_nested_max + partial_fliter_nested = partial(fliter_nested, num_nested_max=num_nested_max) + exprs_fliter_nested = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: + future_to_expr = { + executor.submit(partial_fliter_nested, expr): expr + for expr in exprs_filter_nodes + } + progress = tqdm( + concurrent.futures.as_completed(future_to_expr), + total=len(exprs_filter_nodes), + desc=f"Check invalid abd very nested (>{num_nested_max}) expressions", + ) + for future in progress: + expr = future_to_expr[future] + try: + expr_sympy = future.result() + if expr_sympy is not None: + exprs_fliter_nested.append(expr_sympy) + except Exception: + continue + + # fliter consts/vars/seq_length + num_consts = cfg.DATA_GENERATE.num_consts + num_vars = cfg.DATA_GENERATE.num_vars + seq_length_max = cfg.DATA_GENERATE.seq_length_max + exprs_cvl = [] + for i in tqdm(range(len(exprs_fliter_nested)), desc="Check consts and vars."): + expr_seq = from_sympy_to_seq(exprs_fliter_nested[i]) + expr_seq = fliter_consts_vars_len( + expr_seq, num_consts, num_vars, seq_length_max + ) + if expr_seq is not None: + exprs_cvl.append(expr_seq) + + unique_expr_tuples = {tuple(expr) for expr in exprs_cvl} + expr_uniq_seq = [list(expr) for expr in unique_expr_tuples] + + # generate datasets + num_sampling_per_eq = cfg.DATA_GENERATE.num_sampling_per_eq + sampling_times = cfg.DATA_GENERATE.sampling_times + order_of_mag_limit = cfg.DATA_GENERATE.order_of_mag_limit + var_type = cfg.DATA_GENERATE.var_type + num_zfill = cfg.DATA_GENERATE.num_zfill + out_dir = cfg.DATA_GENERATE.data_path + gt_dir = os.path.join(out_dir, "ground_truth") + value_dir = os.path.join(out_dir, "values") + if not os.path.exists(gt_dir): + os.makedirs(gt_dir) + if not os.path.exists(value_dir): + os.makedirs(value_dir) + + count_datasets = 0 + for uniq_seq in tqdm(expr_uniq_seq, desc="Generate datasets"): + try: + for _ in tqdm( + range(num_sampling_per_eq), desc="Generate samples", leave=False + ): + seq_deformed = [] + ground_truth = [] + for token in uniq_seq: + if token == "C": + const_val = np.round( + np.random.uniform(low=-100.0, high=100.0), decimals=2 + ) + seq_deformed.append(str(const_val)) + ground_truth.append(f"C={str(const_val)}") + else: + seq_deformed.append(token) + ground_truth.append(token) + + cur_sympy_expr = from_seq_to_sympy(seq_deformed) + np_y, np_x = gen_samples(cur_sympy_expr, num_samples=1000) + assert np.nanmax(np.abs(np_y)) <= order_of_mag_limit + mask = np.logical_not(np.isnan(np_y)) + num_temp_obs = np.sum(mask) + assert num_temp_obs >= sampling_times + + idx = np.random.choice(num_temp_obs, size=sampling_times, replace=False) + num_var = count_var_num(sampling_times) + x_values = np_x[mask][idx, :num_var] + y_values = np_y[mask][idx] + if var_type == "both": + dataset = np.zeros((sampling_times, 14)) + else: + dataset = np.zeros((sampling_times, 7)) + + if var_type == "normal": + dataset[:, 0] = y_values + dataset[:, 1 : num_var + 1] = x_values + elif var_type == "log": + dataset[:, 0] = np.log(np.abs(y_values) + 1e-10) + dataset[:, 1 : num_var + 1] = np.log(np.abs(x_values) + 1e-10) + elif var_type == "both": + dataset[:, 0] = y_values + dataset[:, 1] = np.log(np.abs(y_values) + 1e-10) + dataset[:, 2 : 2 * num_var + 1 : 2] = x_values + dataset[:, 3 : 2 * num_var + 2 : 2] = np.log( + np.abs(x_values) + 1e-10 + ) + else: + print("VARIABLE_TYPE should be one of 'normal', 'log', or 'both'") + + # save + file_name = str(count_datasets).zfill(num_zfill) + value_path = os.path.join(value_dir, f"data_{file_name}.npy") + gt_path = os.path.join(gt_dir, f"data_{file_name}.npy") + save_dataset(dataset, ground_truth, value_path, gt_path) + count_datasets += 1 + except Exception as e: + print(e) + continue + print(f"=> Number of unique expressions = {len(expr_uniq_seq)}") + print(f"=> Number of datasets created = {count_datasets}") + print("Finish!") + + +@hydra.main(version_base=None, config_path="./conf", config_name="transformer4sr.yaml") +def main(cfg: DictConfig): + C, x1, x2, x3, x4, x5, x6 = sympy.symbols( + "C, x1, x2, x3, x4, x5, x6", real=True, positive=True + ) + generate_data(cfg) + + +if __name__ == "__main__": + main() diff --git a/examples/transformer4sr/transformer4sr.py b/examples/transformer4sr/transformer4sr.py index 352db5ceab..89e1fdffed 100644 --- a/examples/transformer4sr/transformer4sr.py +++ b/examples/transformer4sr/transformer4sr.py @@ -1,317 +1,317 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - -import hydra -import numpy as np -import paddle -from functions_data import DataFuncs -from functions_data import SRSDDataFuncs -from functions_loss_metric import compute_inaccuracy -from functions_loss_metric import cross_entropy_loss_func -from functions_vis import VisualizeFuncs -from omegaconf import DictConfig -from tqdm import tqdm -from utils import compute_norm_zss_dist -from utils import is_tree_complete -from utils import simplify_output - -import ppsci - - -def train(cfg: DictConfig): - # data - data_funcs = DataFuncs( - cfg.DATA.data_path, - cfg.DATA.vocab_library, - cfg.DATA.seq_length_max, - cfg.DATA.ratio, - shuffle=True, - ) - - # set model - num_var_max = len(cfg.DATA.response_variable) - vocab_size = len(cfg.DATA.vocab_library) + 2 - model = ppsci.arch.Transformer( - **cfg.MODEL, - num_var_max=num_var_max, - vocab_size=vocab_size, - seq_length=data_funcs.seq_length_max, - ) - - # set optimizer - def lr_lambda(step, d_model=cfg.MODEL.d_model, warmup=cfg.TRAIN.lr_warmup): - if step == 0: - step = 1 - lr = d_model ** (-0.5) * min(step ** (-0.5), step * warmup ** (-1.5)) - return lr - - lr_scheduler = ppsci.optimizer.lr_scheduler.LambdaDecay( - **cfg.TRAIN.lr_scheduler, - lr_lambda=lr_lambda, - )() - optimizer = ppsci.optimizer.Adam(lr_scheduler, **cfg.TRAIN.adam)(model) - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "input": data_funcs.values_train.astype(paddle.get_default_dtype()), - "target_seq": data_funcs.targets_train[:, :-1], - }, - "label": {"output": data_funcs.targets_train[:, 1:]}, - }, - "batch_size": cfg.TRAIN.batch_size, - "sampler": { - "name": "BatchSampler", - "drop_last": False, - "shuffle": True, - }, - "num_workers": 1, - }, - ppsci.loss.FunctionalLoss(cross_entropy_loss_func), - name="sup_constraint", - ) - - # wrap constraints together - constraint = {sup_constraint.name: sup_constraint} - - # set validator - sup_validator = ppsci.validate.SupervisedValidator( - { - "dataset": { - "name": "NamedArrayDataset", - "input": { - "input": data_funcs.values_val.astype(paddle.get_default_dtype()), - "target_seq": data_funcs.targets_val[:, :-1], - }, - "label": {"output": data_funcs.targets_val[:, 1:]}, - }, - "batch_size": cfg.TRAIN.batch_size, - "num_workers": 1, - }, - ppsci.loss.FunctionalLoss(cross_entropy_loss_func), - metric={"metric": ppsci.metric.FunctionalMetric(compute_inaccuracy)}, - name="sup_validator", - ) - - # wrap validator together - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - model, - constraint, - optimizer=optimizer, - validator=validator, - cfg=cfg, - ) - - # train model - solver.train() - - # evaluate after finished training - solver.eval() - - -def evaluate(cfg: DictConfig): - # data - data_funcs = SRSDDataFuncs( - cfg.DATA.data_path_srsd, - cfg.DATA.sampling_times, - cfg.DATA.response_variable, - cfg.DATA.vocab_library, - cfg.DATA.seq_length_max, - shuffle=True, - ) - - # set model - num_var_max = len(cfg.DATA.response_variable) - vocab_size = len(cfg.DATA.vocab_library) + 2 - model = ppsci.arch.Transformer( - **cfg.MODEL, - num_var_max=num_var_max, - vocab_size=vocab_size, - seq_length=data_funcs.seq_length_max, - ) - ppsci.utils.save_load.load_pretrain(model, path=cfg.EVAL.pretrained_model_path) - model.eval() - - # evaluate - num_repeat = cfg.EVAL.num_repeat if isinstance(data_funcs, SRSDDataFuncs) else 1 - num_samples = data_funcs.values_test.shape[0] - zss_dist = np.zeros((num_repeat, num_samples)) - for i in tqdm(range(num_repeat), desc="Evaluating"): - encoder_input = paddle.to_tensor( - data_funcs.values_test, dtype=paddle.get_default_dtype() - ) - preds = model.decode_process(encoder_input, is_tree_complete) - labels = paddle.to_tensor(data_funcs.targets_test) - - for j in range(num_samples): - try: - pred_simplify = simplify_output(preds[j], "tensor") - zss_dist[i][j] = compute_norm_zss_dist(pred_simplify[0], labels[j]) - except Exception: - zss_dist[i][j] = np.nan - - if i != num_repeat - 1: - # reload data to increase randomness - data_funcs.init_data("test") - - zss_dist_mean = np.nanmean(zss_dist, axis=0) - zss_dist_std = np.nanstd(zss_dist, axis=0) - zss_dist_min = np.nanmin(zss_dist, axis=0) - zss_dist_max = np.nanmax(zss_dist, axis=0) - - try: - keys = data_funcs.keys_test - assert len(keys) == num_samples - except Exception: - keys = [f"sample_{i}" for i in range(num_samples)] - - print( - f"zss_distance and accuracy in {num_repeat} attempts of {num_samples} samples with format: name => mean +- std | min ~ max" - ) - for i in range(num_samples): - key = keys[i] - print( - f"{key} => {zss_dist_mean[i]:.3f} +- {zss_dist_std[i]:.3f} | {zss_dist_min[i]:.3f} ~ {zss_dist_max[i]:.3f}" - ) - - print("-----------") - print( - f"=> Mean ZSS distance: {np.nanmean(zss_dist):.3f} +- {np.nanstd(zss_dist):.3f}" - ) - print(f"=> Hit rate: {np.sum(np.any(zss_dist==0, axis=0))}/{zss_dist.shape[1]}") - - # visualize prediction - visualizer = VisualizeFuncs(model) - visualizer.visualize_valid_data(data_funcs.targets_test, data_funcs.values_test, 10) - visualizer.visualize_demo() - - -def export(cfg: DictConfig): - def temporary_complete_func(seq_indices): - ".utils.is_tree_complete is not work in static gragh now." - arity = 1 - for n in seq_indices: - n = n.item() - if n == 0 or n == 1: - continue - print("Predict padding or , which is bad...") - if n == 2 or n == 3: - arity = arity + 2 - 1 - elif n in range(4, 13): - arity = arity + 1 - 1 - elif n in range(13, 20): - arity = arity + 0 - 1 - if arity == 0: - return True - else: - return False - - class WarppedModel(ppsci.arch.Transformer): - def __init__(self, *args, complete_func, **kwargs): - super().__init__(*args, **kwargs) - self.complete_func = complete_func - - def forward(self, x): - return {"output": self.decode_process(x["input"], self.complete_func)} - - # set model - num_var_max = len(cfg.DATA.response_variable) - vocab_size = len(cfg.DATA.vocab_library) + 2 - warpped_model = WarppedModel( - **cfg.MODEL, - num_var_max=num_var_max, - vocab_size=vocab_size, - seq_length_max=cfg.DATA.seq_length_max, - complete_func=temporary_complete_func, - ) - warpped_model.eval() - - # initialize solver - solver = ppsci.solver.Solver( - warpped_model, - pretrained_model_path=cfg.INFER.pretrained_model_path, - ) - - # export model - from paddle.static import InputSpec - - input_spec = [ - { - "input": InputSpec( - [None, cfg.DATA.sampling_times, len(cfg.DATA.response_variable), 1], - "float32", - name="input", - ) - } - ] - solver.export(input_spec, cfg.INFER.export_path) - - -def inference(cfg: DictConfig): - import sympy - - from deploy.python_infer import pinn_predictor - - predictor = pinn_predictor.PINNPredictor(cfg) - - C, y, x1, x2, x3, x4, x5, x6 = sympy.symbols( - "C, y, x1, x2, x3, x4, x5, x6", real=True, positive=True - ) - y = 25 * x1 + x2 * sympy.log(x1) - print("The ground truth is:", y) - - x1_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) - x2_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) - f = sympy.lambdify([x1, x2], y) - y_values = f(x1_values, x2_values) - dataset = np.zeros((50, 7)) - dataset[:, 0] = y_values - dataset[:, 1] = x1_values - dataset[:, 2] = x2_values - encoder_input = dataset[np.newaxis, :, :, np.newaxis].astype(np.float32) - output_dict = predictor.predict({"input": encoder_input}, cfg.INFER.batch_size) - output_dict = { - store_key: output_dict[infer_key] - for store_key, infer_key in zip(("output",), output_dict.keys()) - } - sympy_pred = simplify_output(output_dict["output"][0], "sympy") - print("The prediction is:", sympy_pred) - - -@hydra.main(version_base=None, config_path="./conf", config_name="transformer4sr.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - elif cfg.mode == "export": - export(cfg) - elif cfg.mode == "infer": - inference(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + +import hydra +import numpy as np +import paddle +from functions_data import DataFuncs +from functions_data import SRSDDataFuncs +from functions_loss_metric import compute_inaccuracy +from functions_loss_metric import cross_entropy_loss_func +from functions_vis import VisualizeFuncs +from omegaconf import DictConfig +from tqdm import tqdm +from utils import compute_norm_zss_dist +from utils import is_tree_complete +from utils import simplify_output + +import ppsci + + +def train(cfg: DictConfig): + # data + data_funcs = DataFuncs( + cfg.DATA.data_path, + cfg.DATA.vocab_library, + cfg.DATA.seq_length_max, + cfg.DATA.ratio, + shuffle=True, + ) + + # set model + num_var_max = len(cfg.DATA.response_variable) + vocab_size = len(cfg.DATA.vocab_library) + 2 + model = ppsci.arch.Transformer( + **cfg.MODEL, + num_var_max=num_var_max, + vocab_size=vocab_size, + seq_length=data_funcs.seq_length_max, + ) + + # set optimizer + def lr_lambda(step, d_model=cfg.MODEL.d_model, warmup=cfg.TRAIN.lr_warmup): + if step == 0: + step = 1 + lr = d_model ** (-0.5) * min(step ** (-0.5), step * warmup ** (-1.5)) + return lr + + lr_scheduler = ppsci.optimizer.lr_scheduler.LambdaDecay( + **cfg.TRAIN.lr_scheduler, + lr_lambda=lr_lambda, + )() + optimizer = ppsci.optimizer.Adam(lr_scheduler, **cfg.TRAIN.adam)(model) + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "input": data_funcs.values_train.astype(paddle.get_default_dtype()), + "target_seq": data_funcs.targets_train[:, :-1], + }, + "label": {"output": data_funcs.targets_train[:, 1:]}, + }, + "batch_size": cfg.TRAIN.batch_size, + "sampler": { + "name": "BatchSampler", + "drop_last": False, + "shuffle": True, + }, + "num_workers": 1, + }, + ppsci.loss.FunctionalLoss(cross_entropy_loss_func), + name="sup_constraint", + ) + + # wrap constraints together + constraint = {sup_constraint.name: sup_constraint} + + # set validator + sup_validator = ppsci.validate.SupervisedValidator( + { + "dataset": { + "name": "NamedArrayDataset", + "input": { + "input": data_funcs.values_val.astype(paddle.get_default_dtype()), + "target_seq": data_funcs.targets_val[:, :-1], + }, + "label": {"output": data_funcs.targets_val[:, 1:]}, + }, + "batch_size": cfg.TRAIN.batch_size, + "num_workers": 1, + }, + ppsci.loss.FunctionalLoss(cross_entropy_loss_func), + metric={"metric": ppsci.metric.FunctionalMetric(compute_inaccuracy)}, + name="sup_validator", + ) + + # wrap validator together + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + model, + constraint, + optimizer=optimizer, + validator=validator, + cfg=cfg, + ) + + # train model + solver.train() + + # evaluate after finished training + solver.eval() + + +def evaluate(cfg: DictConfig): + # data + data_funcs = SRSDDataFuncs( + cfg.DATA.data_path_srsd, + cfg.DATA.sampling_times, + cfg.DATA.response_variable, + cfg.DATA.vocab_library, + cfg.DATA.seq_length_max, + shuffle=True, + ) + + # set model + num_var_max = len(cfg.DATA.response_variable) + vocab_size = len(cfg.DATA.vocab_library) + 2 + model = ppsci.arch.Transformer( + **cfg.MODEL, + num_var_max=num_var_max, + vocab_size=vocab_size, + seq_length=data_funcs.seq_length_max, + ) + ppsci.utils.save_load.load_pretrain(model, path=cfg.EVAL.pretrained_model_path) + model.eval() + + # evaluate + num_repeat = cfg.EVAL.num_repeat if isinstance(data_funcs, SRSDDataFuncs) else 1 + num_samples = data_funcs.values_test.shape[0] + zss_dist = np.zeros((num_repeat, num_samples)) + for i in tqdm(range(num_repeat), desc="Evaluating"): + encoder_input = paddle.to_tensor( + data_funcs.values_test, dtype=paddle.get_default_dtype() + ) + preds = model.decode_process(encoder_input, is_tree_complete) + labels = paddle.to_tensor(data_funcs.targets_test) + + for j in range(num_samples): + try: + pred_simplify = simplify_output(preds[j], "tensor") + zss_dist[i][j] = compute_norm_zss_dist(pred_simplify[0], labels[j]) + except Exception: + zss_dist[i][j] = np.nan + + if i != num_repeat - 1: + # reload data to increase randomness + data_funcs.init_data("test") + + zss_dist_mean = np.nanmean(zss_dist, axis=0) + zss_dist_std = np.nanstd(zss_dist, axis=0) + zss_dist_min = np.nanmin(zss_dist, axis=0) + zss_dist_max = np.nanmax(zss_dist, axis=0) + + try: + keys = data_funcs.keys_test + assert len(keys) == num_samples + except Exception: + keys = [f"sample_{i}" for i in range(num_samples)] + + print( + f"zss_distance and accuracy in {num_repeat} attempts of {num_samples} samples with format: name => mean +- std | min ~ max" + ) + for i in range(num_samples): + key = keys[i] + print( + f"{key} => {zss_dist_mean[i]:.3f} +- {zss_dist_std[i]:.3f} | {zss_dist_min[i]:.3f} ~ {zss_dist_max[i]:.3f}" + ) + + print("-----------") + print( + f"=> Mean ZSS distance: {np.nanmean(zss_dist):.3f} +- {np.nanstd(zss_dist):.3f}" + ) + print(f"=> Hit rate: {np.sum(np.any(zss_dist==0, axis=0))}/{zss_dist.shape[1]}") + + # visualize prediction + visualizer = VisualizeFuncs(model) + visualizer.visualize_valid_data(data_funcs.targets_test, data_funcs.values_test, 10) + visualizer.visualize_demo() + + +def export(cfg: DictConfig): + def temporary_complete_func(seq_indices): + ".utils.is_tree_complete is not work in static gragh now." + arity = 1 + for n in seq_indices: + n = n.item() + if n == 0 or n == 1: + continue + print("Predict padding or , which is bad...") + if n == 2 or n == 3: + arity = arity + 2 - 1 + elif n in range(4, 13): + arity = arity + 1 - 1 + elif n in range(13, 20): + arity = arity + 0 - 1 + if arity == 0: + return True + else: + return False + + class WarppedModel(ppsci.arch.Transformer): + def __init__(self, *args, complete_func, **kwargs): + super().__init__(*args, **kwargs) + self.complete_func = complete_func + + def forward(self, x): + return {"output": self.decode_process(x["input"], self.complete_func)} + + # set model + num_var_max = len(cfg.DATA.response_variable) + vocab_size = len(cfg.DATA.vocab_library) + 2 + warpped_model = WarppedModel( + **cfg.MODEL, + num_var_max=num_var_max, + vocab_size=vocab_size, + seq_length_max=cfg.DATA.seq_length_max, + complete_func=temporary_complete_func, + ) + warpped_model.eval() + + # initialize solver + solver = ppsci.solver.Solver( + warpped_model, + pretrained_model_path=cfg.INFER.pretrained_model_path, + ) + + # export model + from paddle.static import InputSpec + + input_spec = [ + { + "input": InputSpec( + [None, cfg.DATA.sampling_times, len(cfg.DATA.response_variable), 1], + "float32", + name="input", + ) + } + ] + solver.export(input_spec, cfg.INFER.export_path) + + +def inference(cfg: DictConfig): + import sympy + + from deploy.python_infer import pinn_predictor + + predictor = pinn_predictor.PINNPredictor(cfg) + + C, y, x1, x2, x3, x4, x5, x6 = sympy.symbols( + "C, y, x1, x2, x3, x4, x5, x6", real=True, positive=True + ) + y = 25 * x1 + x2 * sympy.log(x1) + print("The ground truth is:", y) + + x1_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) + x2_values = np.power(10.0, np.random.uniform(-1.0, 1.0, size=50)) + f = sympy.lambdify([x1, x2], y) + y_values = f(x1_values, x2_values) + dataset = np.zeros((50, 7)) + dataset[:, 0] = y_values + dataset[:, 1] = x1_values + dataset[:, 2] = x2_values + encoder_input = dataset[np.newaxis, :, :, np.newaxis].astype(np.float32) + output_dict = predictor.predict({"input": encoder_input}, cfg.INFER.batch_size) + output_dict = { + store_key: output_dict[infer_key] + for store_key, infer_key in zip(("output",), output_dict.keys()) + } + sympy_pred = simplify_output(output_dict["output"][0], "sympy") + print("The prediction is:", sympy_pred) + + +@hydra.main(version_base=None, config_path="./conf", config_name="transformer4sr.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + elif cfg.mode == "export": + export(cfg) + elif cfg.mode == "infer": + inference(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/transformer4sr/utils.py b/examples/transformer4sr/utils.py index 40f5b96aec..c1027f3c1c 100644 --- a/examples/transformer4sr/utils.py +++ b/examples/transformer4sr/utils.py @@ -1,477 +1,477 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - -from typing import List - -import numpy as np -import paddle -import sympy -import yaml -import zss -from typing_extensions import Literal - -with open("./conf/transformer4sr.yaml", "r") as file: - cfg = yaml.safe_load(file) -vocab_library = cfg["DATA"]["vocab_library"] - -C, x1, x2, x3, x4, x5, x6 = sympy.symbols( - "C, x1, x2, x3, x4, x5, x6", real=True, positive=True -) -MY_VOCAB = np.array( - [ - ["add", 4, 2], # binary operators - ["sub", 3, 2], - ["mul", 6, 2], - ["sin", 1, 1], # unary operators - ["cos", 1, 1], - ["log", 2, 1], - ["exp", 2, 1], - ["neg", 0, 1], - ["inv", 3, 1], - ["sq", 2, 1], - ["cb", 0, 1], - ["sqrt", 2, 1], - ["cbrt", 0, 1], - ["C", 8, 0], # leaves - ["x1", 8, 0], - ["x2", 8, 0], - ["x3", 4, 0], - ["x4", 4, 0], - ["x5", 2, 0], - ["x6", 2, 0], - ] -) - - -def from_seq_to_sympy(expr): - """ - Recursive function! - Convert the initial sequence of tokens into SymPy expression. - """ - cur_token = expr[0] - try: - return float(cur_token) - except ValueError: - cur_idx = np.where(MY_VOCAB[:, 0] == cur_token)[0][0] - cur_arity = int(MY_VOCAB[cur_idx, 2]) - if cur_arity == 0: - if cur_token == "C": - return C - elif cur_token == "x1": - return x1 - elif cur_token == "x2": - return x2 - elif cur_token == "x3": - return x3 - elif cur_token == "x4": - return x4 - elif cur_token == "x5": - return x5 - elif cur_token == "x6": - return x6 - elif cur_arity == 1: - if cur_token == "sin": - return sympy.sin(from_seq_to_sympy(expr[1:])) - elif cur_token == "cos": - return sympy.cos(from_seq_to_sympy(expr[1:])) - elif cur_token == "log": - return sympy.log(from_seq_to_sympy(expr[1:])) - elif cur_token == "exp": - return sympy.exp(from_seq_to_sympy(expr[1:])) - elif cur_token == "neg": - return -from_seq_to_sympy(expr[1:]) - elif cur_token == "inv": - return 1 / from_seq_to_sympy(expr[1:]) - elif cur_token == "sq": - return from_seq_to_sympy(expr[1:]) ** 2 - elif cur_token == "cb": - return from_seq_to_sympy(expr[1:]) ** 3 - elif cur_token == "sqrt": - return sympy.sqrt(from_seq_to_sympy(expr[1:])) - elif cur_token == "cbrt": - return sympy.cbrt(from_seq_to_sympy(expr[1:])) - elif cur_arity == 2: - arity_count = 1 - idx_split = 1 - for temp_token in expr[1:]: - try: - float(temp_token) - arity_count += -1 - except ValueError: - temp_idx = np.where(MY_VOCAB[:, 0] == temp_token)[0][0] - arity_count += int(MY_VOCAB[temp_idx, 2]) - 1 - idx_split += 1 - if arity_count == 0: - break - left_list = expr[1:idx_split] - right_list = expr[idx_split:] - if cur_token == "add": - return from_seq_to_sympy(left_list) + from_seq_to_sympy(right_list) - elif cur_token == "sub": - return from_seq_to_sympy(left_list) - from_seq_to_sympy(right_list) - elif cur_token == "mul": - return from_seq_to_sympy(left_list) * from_seq_to_sympy(right_list) - - -def from_sympy_to_seq(sympy_expr): - """ - Recursive function! - Convert a SymPy expression into a standardized sequence of tokens. - This function calls from_sympy_power_to_seq, - from_sympy_mul_to_seq, and from_sympy_addition_to sequence. - """ - if len(sympy_expr.args) == 0: # leaf - return [str(sympy_expr)] - elif len(sympy_expr.args) == 1: # unary operator - return [str(sympy_expr.func)] + from_sympy_to_seq(sympy_expr.args[0]) - elif len(sympy_expr.args) >= 2: # binary operator - if sympy_expr.func == sympy.core.power.Pow: - power_seq = from_sympy_power_to_seq(sympy_expr.args[1]) - return power_seq + from_sympy_to_seq(sympy_expr.args[0]) - elif sympy_expr.func == sympy.core.mul.Mul: - return from_sympy_mul_to_seq(sympy_expr) - elif sympy_expr.func == sympy.core.add.Add: - return from_sympy_add_to_seq(sympy_expr) - - -def from_sympy_power_to_seq(exponent): - """ - C.f. from_sympy_to_seq function. - Standardize the sequence of tokens for power functions. - """ - if exponent == (-4): - return ["inv", "sq", "sq"] - elif exponent == (-3): - return ["inv", "cb"] - elif exponent == (-2): - return ["inv", "sq"] - elif exponent == (-3 / 2): - return ["inv", "cb", "sqrt"] - elif exponent == (-1): - return ["inv"] - elif exponent == (-1 / 2): - return ["inv", "sqrt"] - elif exponent == (-1 / 3): - return ["inv", "cbrt"] - elif exponent == (-1 / 4): - return ["inv", "sqrt", "sqrt"] - elif exponent == (1 / 4): - return ["sqrt", "sqrt"] - elif exponent == (1 / 3): - return ["cbrt"] - elif exponent == (1 / 2): - return ["sqrt"] - elif exponent == (3 / 2): - return ["cb", "sqrt"] - elif exponent == (2): - return ["sq"] - elif exponent == (3): - return ["cb"] - elif exponent == (4): - return ["sq", "sq"] - else: - return ["abort"] - - -def from_sympy_mul_to_seq(sympy_mul_expr): - """ - C.f. from_sympy_to_seq function. - Standardize the sequence of tokens for multiplications. - """ - tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] - num_factors = 0 - num_constants = 0 - is_neg = False - for n in range(len(sympy_mul_expr.args)): - cur_fact = sympy_mul_expr.args[n] - if cur_fact == (-1): - is_neg = True - if any(t in str(cur_fact) for t in tokens): - num_factors += 1 - else: - num_constants += 1 - seq = [] - if is_neg: - seq.append("neg") - for _ in range(num_factors - 1): - seq.append("mul") - if num_constants > 0: - seq.append("mul") - seq.append("C") - for n in range(len(sympy_mul_expr.args)): - cur_fact = sympy_mul_expr.args[n] - if any(t in str(cur_fact) for t in tokens): - seq = seq + from_sympy_to_seq(cur_fact) - return seq - - -def from_sympy_add_to_seq(sympy_add_expr): - """ - C.f. from_sympy_to_seq function. - Standardize the sequence of tokens for additions. - """ - tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] - num_terms = 0 - num_constants = 0 - for n in range(len(sympy_add_expr.args)): - cur_term = sympy_add_expr.args[n] - if any(t in str(cur_term) for t in tokens): - num_terms += 1 - else: - num_constants += 1 - seq = [] - for _ in range(num_terms - 1): - seq.append("add") - if num_constants > 0: - seq.append("add") - seq.append("C") - for n in range(len(sympy_add_expr.args)): - cur_term = sympy_add_expr.args[n] - if any(t in str(cur_term) for t in tokens): - seq = seq + from_sympy_to_seq(cur_term) - return seq - - -def from_seq_to_tokens(seq_int: paddle.Tensor) -> List: - """Convert the sequence of model results into sequence of tokens.""" - seq_tokens = [] - for n in range(len(seq_int)): - if seq_int[n] >= 2: - seq_tokens.append(vocab_library[seq_int[n] - 2]) - return seq_tokens - - -def from_tokens_to_seq(seq_tokens: List) -> paddle.Tensor: - """Convert the sequence of tokens into sequence of model results.""" - seq_int = [] - for token in seq_tokens: - seq_int.append(vocab_library.index(token) + 2) - return paddle.to_tensor(seq_int, dtype=paddle.int64).unsqueeze(0) - - -def from_seq_to_zss_tree(seq_tokens: List): - """ - Convert the sequence into zss tree. Refer to https://arxiv.org/abs/2206.10540. - Note: also works with sequences that do not correspond to complete equation trees! - """ - cur_token = seq_tokens[0] - if cur_token in ["add", "mul"]: - split_idx = find_split_idx(seq_tokens) - if split_idx is None: - tree = zss.Node(cur_token) - if len(seq_tokens[1:]) > 0: - left_kid = from_seq_to_zss_tree(seq_tokens[1:]) - tree.addkid(left_kid) - else: - tree = zss.Node(cur_token) - left_kid = from_seq_to_zss_tree(seq_tokens[1 : split_idx + 1]) - tree.addkid(left_kid) - if len(seq_tokens[split_idx + 1 :]) > 0: - right_kid = from_seq_to_zss_tree(seq_tokens[split_idx + 1 :]) - tree.addkid(right_kid) - return tree - elif cur_token in ["sin", "cos", "log", "exp", "neg", "inv", "sqrt", "sq", "cb"]: - tree = zss.Node(cur_token) - if len(seq_tokens[1:]) > 0: - kid = from_seq_to_zss_tree(seq_tokens[1:]) - tree.addkid(kid) - return tree - elif cur_token in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: - leaf = zss.Node(cur_token) - return leaf - - -def find_split_idx(seq_tokens): - """ - Helper function for from_seq_to_zss_tree. - Locates the split index for binary nodes. - """ - split_idx = 0 - arity = 1 - while arity > 0 and split_idx + 1 < len(seq_tokens): - split_idx += 1 - if seq_tokens[split_idx] in ["add", "mul"]: - arity += 1 - elif seq_tokens[split_idx] in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: - arity += -1 - if split_idx + 1 == len(seq_tokens): - split_idx = None - return split_idx - - -def simplify_output( - out_tensor: paddle.Tensor, - mode: Literal["sympy", "token", "tensor"], -) -> paddle.Tensor: - """Convert the model output results into the corresponding form according to the 'mode' and simplify it.""" - out_tokens = from_seq_to_tokens(out_tensor) - out_sympy = from_seq_to_sympy(out_tokens) - out_sympy_reassign = reassign_variables(out_sympy) - - out_sympy_simplify = sympy.simplify(sympy.factor(out_sympy_reassign)) - if mode == "sympy": - return out_sympy_simplify - - out_re_tokens = from_sympy_to_seq(out_sympy_simplify) - if mode == "token": - return out_re_tokens - - out_re_tensor = from_tokens_to_seq(out_re_tokens) - return out_re_tensor - - -def reassign_variables(sympy_expr): - """ - Counts the number of variables in the SymPy expression and assign firte variables first. - Example: log(x3)+x5 becomes log(x1)+x2 - """ - tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] - sympy_str = str(sympy_expr) - exist = [] - for t in tokens: - exist.append(t in sympy_str) - for idx_new, idx_old in enumerate(np.where(exist)[0]): - sympy_str = sympy_str.replace(f"x{idx_old+1}", f"x{idx_new+1}") - sympy_expr = sympy.sympify(sympy_str) - return sympy_expr - - -def is_tree_complete(seq_indices): - """Check whether a given sequence of tokens defines a complete symbolic expression.""" - arity = 1 - for n in seq_indices: - n = n.item() - if n == 0 or n == 1: - continue - print("Predict padding or , which is bad...") - cur_token = vocab_library[n - 2] - if cur_token in ["add", "mul"]: - arity = arity + 2 - 1 - elif cur_token in [ - "sin", - "cos", - "log", - "exp", - "neg", - "inv", - "sqrt", - "sq", - "cb", - ]: - arity = arity + 1 - 1 - elif cur_token in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: - arity = arity + 0 - 1 - if arity == 0: - return True - else: - return False - - -def compute_norm_zss_dist(pred: paddle.Tensor, label: paddle.Tensor) -> float: - """Computes ZSS tree edit distance, normalized by the length of the ground label and is between [0, 1]. - - Args: - pred (paddle.Tensor): Best sequence as predicted by the model. Typically the result of 'Beam Search' with k=1. - label (paddle.Tensor): Ground_truth (feed to the decoder, shifted right). - - Returns: - float: ZSS distance. - """ - label = from_seq_to_tokens(label) - pred = from_seq_to_tokens(pred) - tree_truth = from_seq_to_zss_tree(label) - tree_pred = from_seq_to_zss_tree(pred) - dist = zss.simple_distance(tree_truth, tree_pred) - norm_dist = dist / float(len(label)) - norm_zss_dist = min(1.0, norm_dist) - return norm_zss_dist - - -def count_var_num(sympy_expr): - """ - Assumes that the variables are properly numbered, i.e. 'reassign_variables' has been applied. - Returns the number of variables in the SymPy expression. - """ - num_var = 0 - while f"x{num_var+1}" in str(sympy_expr): - num_var += 1 - return num_var - - -def expr_tree_depth(sympy_expr): - """ - Recursive function! - Count the maximum depth for a given SymPy expression. - """ - if len(sympy_expr.args) == 0: - return 1 - elif len(sympy_expr.args) == 1: - return 1 + expr_tree_depth(sympy_expr.args[0]) - else: - max_depth = 0 - for a in sympy_expr.args: - temp_depth = expr_tree_depth(a) - if temp_depth > max_depth: - max_depth = temp_depth - return 1 + max_depth - - -def gen_expr(vocab): - """ - Recursive function! - Generate one expression using the tokens and their respective probabiities provided by 'vocab'. - """ - weights = vocab[:, 1].astype("float32") - probs = weights / np.sum(weights) - N = len(vocab) - expr = [] - rand_idx = np.random.choice(N, p=probs) - cur_token = vocab[rand_idx, 0] - cur_arity = int(vocab[rand_idx, 2]) - expr.append(cur_token) - if cur_arity == 0: - return expr - else: - if cur_token in ["sin", "cos"]: - idx1 = np.where(vocab[:, 0] == "sin")[0][0] - idx2 = np.where(vocab[:, 0] == "cos")[0][0] - new_vocab = np.delete(vocab, [idx1, idx2], axis=0) - elif cur_token in ["log", "exp"]: - idx1 = np.where(vocab[:, 0] == "log")[0][0] - idx2 = np.where(vocab[:, 0] == "exp")[0][0] - new_vocab = np.delete(vocab, [idx1, idx2], axis=0) - else: - new_vocab = vocab - if cur_arity == 1: - child = gen_expr(new_vocab) - return expr + child - elif cur_arity == 2: - child1 = gen_expr(new_vocab) - child2 = gen_expr(new_vocab) - return expr + child1 + child2 - - -def gen_samples(sympy_expr, num_samples=200): - """ - Sample from SymPy expression. - Variables are first sampled using log-uniform distributions. - """ - np_x = np.power(10.0, np.random.uniform(low=-1.0, high=1.0, size=(num_samples, 6))) - f = sympy.lambdify([x1, x2, x3, x4, x5, x6], sympy_expr) - np_y = f(np_x[:, 0], np_x[:, 1], np_x[:, 2], np_x[:, 3], np_x[:, 4], np_x[:, 5]) - return np_y, np_x +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + +from typing import List + +import numpy as np +import paddle +import sympy +import yaml +import zss +from typing_extensions import Literal + +with open("./conf/transformer4sr.yaml", "r") as file: + cfg = yaml.safe_load(file) +vocab_library = cfg["DATA"]["vocab_library"] + +C, x1, x2, x3, x4, x5, x6 = sympy.symbols( + "C, x1, x2, x3, x4, x5, x6", real=True, positive=True +) +MY_VOCAB = np.array( + [ + ["add", 4, 2], # binary operators + ["sub", 3, 2], + ["mul", 6, 2], + ["sin", 1, 1], # unary operators + ["cos", 1, 1], + ["log", 2, 1], + ["exp", 2, 1], + ["neg", 0, 1], + ["inv", 3, 1], + ["sq", 2, 1], + ["cb", 0, 1], + ["sqrt", 2, 1], + ["cbrt", 0, 1], + ["C", 8, 0], # leaves + ["x1", 8, 0], + ["x2", 8, 0], + ["x3", 4, 0], + ["x4", 4, 0], + ["x5", 2, 0], + ["x6", 2, 0], + ] +) + + +def from_seq_to_sympy(expr): + """ + Recursive function! + Convert the initial sequence of tokens into SymPy expression. + """ + cur_token = expr[0] + try: + return float(cur_token) + except ValueError: + cur_idx = np.where(MY_VOCAB[:, 0] == cur_token)[0][0] + cur_arity = int(MY_VOCAB[cur_idx, 2]) + if cur_arity == 0: + if cur_token == "C": + return C + elif cur_token == "x1": + return x1 + elif cur_token == "x2": + return x2 + elif cur_token == "x3": + return x3 + elif cur_token == "x4": + return x4 + elif cur_token == "x5": + return x5 + elif cur_token == "x6": + return x6 + elif cur_arity == 1: + if cur_token == "sin": + return sympy.sin(from_seq_to_sympy(expr[1:])) + elif cur_token == "cos": + return sympy.cos(from_seq_to_sympy(expr[1:])) + elif cur_token == "log": + return sympy.log(from_seq_to_sympy(expr[1:])) + elif cur_token == "exp": + return sympy.exp(from_seq_to_sympy(expr[1:])) + elif cur_token == "neg": + return -from_seq_to_sympy(expr[1:]) + elif cur_token == "inv": + return 1 / from_seq_to_sympy(expr[1:]) + elif cur_token == "sq": + return from_seq_to_sympy(expr[1:]) ** 2 + elif cur_token == "cb": + return from_seq_to_sympy(expr[1:]) ** 3 + elif cur_token == "sqrt": + return sympy.sqrt(from_seq_to_sympy(expr[1:])) + elif cur_token == "cbrt": + return sympy.cbrt(from_seq_to_sympy(expr[1:])) + elif cur_arity == 2: + arity_count = 1 + idx_split = 1 + for temp_token in expr[1:]: + try: + float(temp_token) + arity_count += -1 + except ValueError: + temp_idx = np.where(MY_VOCAB[:, 0] == temp_token)[0][0] + arity_count += int(MY_VOCAB[temp_idx, 2]) - 1 + idx_split += 1 + if arity_count == 0: + break + left_list = expr[1:idx_split] + right_list = expr[idx_split:] + if cur_token == "add": + return from_seq_to_sympy(left_list) + from_seq_to_sympy(right_list) + elif cur_token == "sub": + return from_seq_to_sympy(left_list) - from_seq_to_sympy(right_list) + elif cur_token == "mul": + return from_seq_to_sympy(left_list) * from_seq_to_sympy(right_list) + + +def from_sympy_to_seq(sympy_expr): + """ + Recursive function! + Convert a SymPy expression into a standardized sequence of tokens. + This function calls from_sympy_power_to_seq, + from_sympy_mul_to_seq, and from_sympy_addition_to sequence. + """ + if len(sympy_expr.args) == 0: # leaf + return [str(sympy_expr)] + elif len(sympy_expr.args) == 1: # unary operator + return [str(sympy_expr.func)] + from_sympy_to_seq(sympy_expr.args[0]) + elif len(sympy_expr.args) >= 2: # binary operator + if sympy_expr.func == sympy.core.power.Pow: + power_seq = from_sympy_power_to_seq(sympy_expr.args[1]) + return power_seq + from_sympy_to_seq(sympy_expr.args[0]) + elif sympy_expr.func == sympy.core.mul.Mul: + return from_sympy_mul_to_seq(sympy_expr) + elif sympy_expr.func == sympy.core.add.Add: + return from_sympy_add_to_seq(sympy_expr) + + +def from_sympy_power_to_seq(exponent): + """ + C.f. from_sympy_to_seq function. + Standardize the sequence of tokens for power functions. + """ + if exponent == (-4): + return ["inv", "sq", "sq"] + elif exponent == (-3): + return ["inv", "cb"] + elif exponent == (-2): + return ["inv", "sq"] + elif exponent == (-3 / 2): + return ["inv", "cb", "sqrt"] + elif exponent == (-1): + return ["inv"] + elif exponent == (-1 / 2): + return ["inv", "sqrt"] + elif exponent == (-1 / 3): + return ["inv", "cbrt"] + elif exponent == (-1 / 4): + return ["inv", "sqrt", "sqrt"] + elif exponent == (1 / 4): + return ["sqrt", "sqrt"] + elif exponent == (1 / 3): + return ["cbrt"] + elif exponent == (1 / 2): + return ["sqrt"] + elif exponent == (3 / 2): + return ["cb", "sqrt"] + elif exponent == (2): + return ["sq"] + elif exponent == (3): + return ["cb"] + elif exponent == (4): + return ["sq", "sq"] + else: + return ["abort"] + + +def from_sympy_mul_to_seq(sympy_mul_expr): + """ + C.f. from_sympy_to_seq function. + Standardize the sequence of tokens for multiplications. + """ + tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] + num_factors = 0 + num_constants = 0 + is_neg = False + for n in range(len(sympy_mul_expr.args)): + cur_fact = sympy_mul_expr.args[n] + if cur_fact == (-1): + is_neg = True + if any(t in str(cur_fact) for t in tokens): + num_factors += 1 + else: + num_constants += 1 + seq = [] + if is_neg: + seq.append("neg") + for _ in range(num_factors - 1): + seq.append("mul") + if num_constants > 0: + seq.append("mul") + seq.append("C") + for n in range(len(sympy_mul_expr.args)): + cur_fact = sympy_mul_expr.args[n] + if any(t in str(cur_fact) for t in tokens): + seq = seq + from_sympy_to_seq(cur_fact) + return seq + + +def from_sympy_add_to_seq(sympy_add_expr): + """ + C.f. from_sympy_to_seq function. + Standardize the sequence of tokens for additions. + """ + tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] + num_terms = 0 + num_constants = 0 + for n in range(len(sympy_add_expr.args)): + cur_term = sympy_add_expr.args[n] + if any(t in str(cur_term) for t in tokens): + num_terms += 1 + else: + num_constants += 1 + seq = [] + for _ in range(num_terms - 1): + seq.append("add") + if num_constants > 0: + seq.append("add") + seq.append("C") + for n in range(len(sympy_add_expr.args)): + cur_term = sympy_add_expr.args[n] + if any(t in str(cur_term) for t in tokens): + seq = seq + from_sympy_to_seq(cur_term) + return seq + + +def from_seq_to_tokens(seq_int: paddle.Tensor) -> List: + """Convert the sequence of model results into sequence of tokens.""" + seq_tokens = [] + for n in range(len(seq_int)): + if seq_int[n] >= 2: + seq_tokens.append(vocab_library[seq_int[n] - 2]) + return seq_tokens + + +def from_tokens_to_seq(seq_tokens: List) -> paddle.Tensor: + """Convert the sequence of tokens into sequence of model results.""" + seq_int = [] + for token in seq_tokens: + seq_int.append(vocab_library.index(token) + 2) + return paddle.to_tensor(seq_int, dtype=paddle.int64).unsqueeze(0) + + +def from_seq_to_zss_tree(seq_tokens: List): + """ + Convert the sequence into zss tree. Refer to https://arxiv.org/abs/2206.10540. + Note: also works with sequences that do not correspond to complete equation trees! + """ + cur_token = seq_tokens[0] + if cur_token in ["add", "mul"]: + split_idx = find_split_idx(seq_tokens) + if split_idx is None: + tree = zss.Node(cur_token) + if len(seq_tokens[1:]) > 0: + left_kid = from_seq_to_zss_tree(seq_tokens[1:]) + tree.addkid(left_kid) + else: + tree = zss.Node(cur_token) + left_kid = from_seq_to_zss_tree(seq_tokens[1 : split_idx + 1]) + tree.addkid(left_kid) + if len(seq_tokens[split_idx + 1 :]) > 0: + right_kid = from_seq_to_zss_tree(seq_tokens[split_idx + 1 :]) + tree.addkid(right_kid) + return tree + elif cur_token in ["sin", "cos", "log", "exp", "neg", "inv", "sqrt", "sq", "cb"]: + tree = zss.Node(cur_token) + if len(seq_tokens[1:]) > 0: + kid = from_seq_to_zss_tree(seq_tokens[1:]) + tree.addkid(kid) + return tree + elif cur_token in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: + leaf = zss.Node(cur_token) + return leaf + + +def find_split_idx(seq_tokens): + """ + Helper function for from_seq_to_zss_tree. + Locates the split index for binary nodes. + """ + split_idx = 0 + arity = 1 + while arity > 0 and split_idx + 1 < len(seq_tokens): + split_idx += 1 + if seq_tokens[split_idx] in ["add", "mul"]: + arity += 1 + elif seq_tokens[split_idx] in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: + arity += -1 + if split_idx + 1 == len(seq_tokens): + split_idx = None + return split_idx + + +def simplify_output( + out_tensor: paddle.Tensor, + mode: Literal["sympy", "token", "tensor"], +) -> paddle.Tensor: + """Convert the model output results into the corresponding form according to the 'mode' and simplify it.""" + out_tokens = from_seq_to_tokens(out_tensor) + out_sympy = from_seq_to_sympy(out_tokens) + out_sympy_reassign = reassign_variables(out_sympy) + + out_sympy_simplify = sympy.simplify(sympy.factor(out_sympy_reassign)) + if mode == "sympy": + return out_sympy_simplify + + out_re_tokens = from_sympy_to_seq(out_sympy_simplify) + if mode == "token": + return out_re_tokens + + out_re_tensor = from_tokens_to_seq(out_re_tokens) + return out_re_tensor + + +def reassign_variables(sympy_expr): + """ + Counts the number of variables in the SymPy expression and assign firte variables first. + Example: log(x3)+x5 becomes log(x1)+x2 + """ + tokens = ["x1", "x2", "x3", "x4", "x5", "x6"] + sympy_str = str(sympy_expr) + exist = [] + for t in tokens: + exist.append(t in sympy_str) + for idx_new, idx_old in enumerate(np.where(exist)[0]): + sympy_str = sympy_str.replace(f"x{idx_old+1}", f"x{idx_new+1}") + sympy_expr = sympy.sympify(sympy_str) + return sympy_expr + + +def is_tree_complete(seq_indices): + """Check whether a given sequence of tokens defines a complete symbolic expression.""" + arity = 1 + for n in seq_indices: + n = n.item() + if n == 0 or n == 1: + continue + print("Predict padding or , which is bad...") + cur_token = vocab_library[n - 2] + if cur_token in ["add", "mul"]: + arity = arity + 2 - 1 + elif cur_token in [ + "sin", + "cos", + "log", + "exp", + "neg", + "inv", + "sqrt", + "sq", + "cb", + ]: + arity = arity + 1 - 1 + elif cur_token in ["C", "x1", "x2", "x3", "x4", "x5", "x6"]: + arity = arity + 0 - 1 + if arity == 0: + return True + else: + return False + + +def compute_norm_zss_dist(pred: paddle.Tensor, label: paddle.Tensor) -> float: + """Computes ZSS tree edit distance, normalized by the length of the ground label and is between [0, 1]. + + Args: + pred (paddle.Tensor): Best sequence as predicted by the model. Typically the result of 'Beam Search' with k=1. + label (paddle.Tensor): Ground_truth (feed to the decoder, shifted right). + + Returns: + float: ZSS distance. + """ + label = from_seq_to_tokens(label) + pred = from_seq_to_tokens(pred) + tree_truth = from_seq_to_zss_tree(label) + tree_pred = from_seq_to_zss_tree(pred) + dist = zss.simple_distance(tree_truth, tree_pred) + norm_dist = dist / float(len(label)) + norm_zss_dist = min(1.0, norm_dist) + return norm_zss_dist + + +def count_var_num(sympy_expr): + """ + Assumes that the variables are properly numbered, i.e. 'reassign_variables' has been applied. + Returns the number of variables in the SymPy expression. + """ + num_var = 0 + while f"x{num_var+1}" in str(sympy_expr): + num_var += 1 + return num_var + + +def expr_tree_depth(sympy_expr): + """ + Recursive function! + Count the maximum depth for a given SymPy expression. + """ + if len(sympy_expr.args) == 0: + return 1 + elif len(sympy_expr.args) == 1: + return 1 + expr_tree_depth(sympy_expr.args[0]) + else: + max_depth = 0 + for a in sympy_expr.args: + temp_depth = expr_tree_depth(a) + if temp_depth > max_depth: + max_depth = temp_depth + return 1 + max_depth + + +def gen_expr(vocab): + """ + Recursive function! + Generate one expression using the tokens and their respective probabiities provided by 'vocab'. + """ + weights = vocab[:, 1].astype("float32") + probs = weights / np.sum(weights) + N = len(vocab) + expr = [] + rand_idx = np.random.choice(N, p=probs) + cur_token = vocab[rand_idx, 0] + cur_arity = int(vocab[rand_idx, 2]) + expr.append(cur_token) + if cur_arity == 0: + return expr + else: + if cur_token in ["sin", "cos"]: + idx1 = np.where(vocab[:, 0] == "sin")[0][0] + idx2 = np.where(vocab[:, 0] == "cos")[0][0] + new_vocab = np.delete(vocab, [idx1, idx2], axis=0) + elif cur_token in ["log", "exp"]: + idx1 = np.where(vocab[:, 0] == "log")[0][0] + idx2 = np.where(vocab[:, 0] == "exp")[0][0] + new_vocab = np.delete(vocab, [idx1, idx2], axis=0) + else: + new_vocab = vocab + if cur_arity == 1: + child = gen_expr(new_vocab) + return expr + child + elif cur_arity == 2: + child1 = gen_expr(new_vocab) + child2 = gen_expr(new_vocab) + return expr + child1 + child2 + + +def gen_samples(sympy_expr, num_samples=200): + """ + Sample from SymPy expression. + Variables are first sampled using log-uniform distributions. + """ + np_x = np.power(10.0, np.random.uniform(low=-1.0, high=1.0, size=(num_samples, 6))) + f = sympy.lambdify([x1, x2, x3, x4, x5, x6], sympy_expr) + np_y = f(np_x[:, 0], np_x[:, 1], np_x[:, 2], np_x[:, 3], np_x[:, 4], np_x[:, 5]) + return np_y, np_x diff --git a/examples/velocityGAN/dataset_config.json b/examples/velocityGAN/dataset_config.json index af2792a6fa..a1bad1fc57 100644 --- a/examples/velocityGAN/dataset_config.json +++ b/examples/velocityGAN/dataset_config.json @@ -1,189 +1,189 @@ -{ - "curvefault-a": { - "data_max": 52.32, - "data_min": -26.48, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "curvefault-b": { - "data_max": 50.98, - "data_min": -24.93, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "curvevel-a": { - "data_max": 55.1, - "data_min": -27.11, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "curvevel-b": { - "data_max": 57.03, - "data_min": -29.04, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "flatfault-a": { - "data_max": 50.86, - "data_min": -26.1, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "flatfault-b": { - "data_max": 50.28, - "data_min": -24.86, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "flatvel-a": { - "data_max": 52.77, - "data_min": -26.95, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "flatvel-b": { - "data_max": 56.05, - "data_min": -27.17, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "flatvel-tutorial": { - "data_max": 52.77, - "data_min": -26.95, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 120, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "style-a": { - "data_max": 48.93, - "data_min": -24.96, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - }, - "style-b": { - "data_max": 46.01, - "data_min": -23.76, - "dt": 0.001, - "dx": 10, - "f": 15, - "file_size": 500, - "gz": 10, - "label_max": 4500, - "label_min": 1500, - "n_grid": 70, - "nbc": 120, - "ng": 70, - "ns": 5, - "nt": 1000, - "sz": 10 - } -} +{ + "curvefault-a": { + "data_max": 52.32, + "data_min": -26.48, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "curvefault-b": { + "data_max": 50.98, + "data_min": -24.93, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "curvevel-a": { + "data_max": 55.1, + "data_min": -27.11, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "curvevel-b": { + "data_max": 57.03, + "data_min": -29.04, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "flatfault-a": { + "data_max": 50.86, + "data_min": -26.1, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "flatfault-b": { + "data_max": 50.28, + "data_min": -24.86, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "flatvel-a": { + "data_max": 52.77, + "data_min": -26.95, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "flatvel-b": { + "data_max": 56.05, + "data_min": -27.17, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "flatvel-tutorial": { + "data_max": 52.77, + "data_min": -26.95, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 120, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "style-a": { + "data_max": 48.93, + "data_min": -24.96, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + }, + "style-b": { + "data_max": 46.01, + "data_min": -23.76, + "dt": 0.001, + "dx": 10, + "f": 15, + "file_size": 500, + "gz": 10, + "label_max": 4500, + "label_min": 1500, + "n_grid": 70, + "nbc": 120, + "ng": 70, + "ns": 5, + "nt": 1000, + "sz": 10 + } +} diff --git a/examples/xpinn/conf/xpinn.yaml b/examples/xpinn/conf/xpinn.yaml index ab5d79df7b..76711c11a7 100644 --- a/examples/xpinn/conf/xpinn.yaml +++ b/examples/xpinn/conf/xpinn.yaml @@ -1,123 +1,123 @@ -defaults: - - ppsci_default - - TRAIN: train_default - - TRAIN/ema: ema_default - - TRAIN/swa: swa_default - - EVAL: eval_default - - INFER: infer_default - - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default - - _self_ - -hydra: - run: - # dynamic output directory according to running time and override name - dir: outputs_xpinn/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} - job: - name: ${mode} # name of logfile - chdir: false # keep current working direcotry unchaned - callbacks: - init_callback: - _target_: ppsci.utils.callbacks.InitCallback - sweep: - # output directory for multirun - dir: ${hydra.run.dir} - subdir: ./ - -# general settings -mode: train # running mode: train/eval -seed: 134 -output_dir: ${hydra:run.dir} -log_freq: 20 - -# model settings -MODEL: - num_boundary_points: 200 # Boundary points from subdomain 1 - num_residual1_points: 5000 # Residual points in three subdomain 1 - num_residual2_points: 1800 # Residual points in three subdomain 2 - num_residual3_points: 1200 # Residual points in three subdomain 3 - num_interface1: 100 # Interface points along the two interfaces - num_interface2: 100 - layers1: [2, 30, 30, 1] - layers2: [2, 20, 20, 20, 20, 1] - layers3: [2, 25, 25, 25, 1] - -# set training data file -DATA_FILE: "./data/XPINN_2D_PoissonEqn.mat" - -# training settings -TRAIN: - input_keys: - [ - "residual1_x", - "residual1_y", - "residual2_x", - "residual2_y", - "residual3_x", - "residual3_y", - "interface1_x", - "interface1_y", - "interface2_x", - "interface2_y", - "boundary_x", - "boundary_y", - ] - label_keys: ["boundary_u_exact"] - alias_dict: - { - "residual1_x": "x_f1", - "residual1_y": "y_f1", - "residual2_x": "x_f2", - "residual2_y": "y_f2", - "residual3_x": "x_f3", - "residual3_y": "y_f3", - "interface1_x": "xi1", - "interface1_y": "yi1", - "interface2_x": "xi2", - "interface2_y": "yi2", - "boundary_x": "xb", - "boundary_y": "yb", - "boundary_u_exact": "ub", - "residual_u_exact": "u_exact", - "residual2_u_exact": "u_exact2", - "residual3_u_exact": "u_exact3", - } - epochs: 501 - iters_per_epoch: 1 - save_freq: 50 - eval_during_train: true - eval_freq: 50 - learning_rate: 0.0008 - pretrained_model_path: null - checkpoint_path: null - -# evaluation settings -EVAL: - label_keys: - [ - "boundary_u_exact", - "residual_u_exact", - "residual2_u_exact", - "residual3_u_exact", - ] - alias_dict: - { - "residual1_x": "x_f1", - "residual1_y": "y_f1", - "residual2_x": "x_f2", - "residual2_y": "y_f2", - "residual3_x": "x_f3", - "residual3_y": "y_f3", - "interface1_x": "xi1", - "interface1_y": "yi1", - "interface2_x": "xi2", - "interface2_y": "yi2", - "boundary_x": "xb", - "boundary_y": "yb", - "boundary_u_exact": "ub", - "residual_u_exact": "u_exact", - "residual2_u_exact": "u_exact2", - "residual3_u_exact": "u_exact3", - } - batch_size: 1 - pretrained_model_path: null - eval_with_no_grad: false +defaults: + - ppsci_default + - TRAIN: train_default + - TRAIN/ema: ema_default + - TRAIN/swa: swa_default + - EVAL: eval_default + - INFER: infer_default + - hydra/job/config/override_dirname/exclude_keys: exclude_keys_default + - _self_ + +hydra: + run: + # dynamic output directory according to running time and override name + dir: outputs_xpinn/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + job: + name: ${mode} # name of logfile + chdir: false # keep current working direcotry unchaned + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: train # running mode: train/eval +seed: 134 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# model settings +MODEL: + num_boundary_points: 200 # Boundary points from subdomain 1 + num_residual1_points: 5000 # Residual points in three subdomain 1 + num_residual2_points: 1800 # Residual points in three subdomain 2 + num_residual3_points: 1200 # Residual points in three subdomain 3 + num_interface1: 100 # Interface points along the two interfaces + num_interface2: 100 + layers1: [2, 30, 30, 1] + layers2: [2, 20, 20, 20, 20, 1] + layers3: [2, 25, 25, 25, 1] + +# set training data file +DATA_FILE: "./data/XPINN_2D_PoissonEqn.mat" + +# training settings +TRAIN: + input_keys: + [ + "residual1_x", + "residual1_y", + "residual2_x", + "residual2_y", + "residual3_x", + "residual3_y", + "interface1_x", + "interface1_y", + "interface2_x", + "interface2_y", + "boundary_x", + "boundary_y", + ] + label_keys: ["boundary_u_exact"] + alias_dict: + { + "residual1_x": "x_f1", + "residual1_y": "y_f1", + "residual2_x": "x_f2", + "residual2_y": "y_f2", + "residual3_x": "x_f3", + "residual3_y": "y_f3", + "interface1_x": "xi1", + "interface1_y": "yi1", + "interface2_x": "xi2", + "interface2_y": "yi2", + "boundary_x": "xb", + "boundary_y": "yb", + "boundary_u_exact": "ub", + "residual_u_exact": "u_exact", + "residual2_u_exact": "u_exact2", + "residual3_u_exact": "u_exact3", + } + epochs: 501 + iters_per_epoch: 1 + save_freq: 50 + eval_during_train: true + eval_freq: 50 + learning_rate: 0.0008 + pretrained_model_path: null + checkpoint_path: null + +# evaluation settings +EVAL: + label_keys: + [ + "boundary_u_exact", + "residual_u_exact", + "residual2_u_exact", + "residual3_u_exact", + ] + alias_dict: + { + "residual1_x": "x_f1", + "residual1_y": "y_f1", + "residual2_x": "x_f2", + "residual2_y": "y_f2", + "residual3_x": "x_f3", + "residual3_y": "y_f3", + "interface1_x": "xi1", + "interface1_y": "yi1", + "interface2_x": "xi2", + "interface2_y": "yi2", + "boundary_x": "xb", + "boundary_y": "yb", + "boundary_u_exact": "ub", + "residual_u_exact": "u_exact", + "residual2_u_exact": "u_exact2", + "residual3_u_exact": "u_exact3", + } + batch_size: 1 + pretrained_model_path: null + eval_with_no_grad: false diff --git a/examples/xpinn/model.py b/examples/xpinn/model.py index 530e2d25a4..4624e6d64c 100644 --- a/examples/xpinn/model.py +++ b/examples/xpinn/model.py @@ -1,179 +1,179 @@ -from typing import Dict -from typing import List -from typing import Tuple - -import numpy as np -import paddle -from paddle import nn - -import ppsci -from ppsci.arch import base - - -class Model(base.Arch): - def __init__(self, layer_list: Tuple[List[int], List[int], List[int]]): - super().__init__() - # Initialize NNs - self.weights1, self.biases1, self.amplitudes1 = self.initialize_nn( - layer_list[0], "layers1" - ) - self.weights2, self.biases2, self.amplitudes2 = self.initialize_nn( - layer_list[1], "layers2" - ) - self.weights3, self.biases3, self.amplitudes3 = self.initialize_nn( - layer_list[2], "layers3" - ) - - def forward(self, input: Dict[str, paddle.Tensor]): - residual1_u = self.net_subdomain1(input["residual1_x"], input["residual1_y"]) - residual2_u = self.net_subdomain2(input["residual2_x"], input["residual2_y"]) - residual3_u = self.net_subdomain3(input["residual3_x"], input["residual3_y"]) - interface1_u_sub1 = self.net_subdomain1( - input["interface1_x"], input["interface1_y"] - ) - interface1_u_sub2 = self.net_subdomain2( - input["interface1_x"], input["interface1_y"] - ) - interface2_u_sub1 = self.net_subdomain1( - input["interface2_x"], input["interface2_y"] - ) - interface2_u_sub3 = self.net_subdomain3( - input["interface2_x"], input["interface2_y"] - ) - boundary_u = self.net_subdomain1(input["boundary_x"], input["boundary_y"]) - - return { - "residual1_x": input["residual1_x"], - "residual1_y": input["residual1_y"], - "residual2_x": input["residual2_x"], - "residual2_y": input["residual2_y"], - "residual3_x": input["residual3_x"], - "residual3_y": input["residual3_y"], - "interface1_x": input["interface1_x"], - "interface1_y": input["interface1_y"], - "interface2_x": input["interface2_x"], - "interface2_y": input["interface2_y"], - "residual1_u": residual1_u, - "residual2_u": residual2_u, - "residual3_u": residual3_u, - "interface1_u_sub1": interface1_u_sub1, - "interface1_u_sub2": interface1_u_sub2, - "interface2_u_sub1": interface2_u_sub1, - "interface2_u_sub3": interface2_u_sub3, - "boundary_u": boundary_u, - } - - def initialize_nn(self, layers: List[int], name_prefix: str): - # The weight used in neural_net - weights = [] - # The bias used in neural_net - biases = [] - # The amplitude used in neural_net - amplitudes = [] - num_layers = len(layers) - for l in range(0, num_layers - 1): - weight = self.create_parameter( - shape=[layers[l], layers[l + 1]], - dtype="float32", - default_initializer=self.w_init((layers[l], layers[l + 1])), - ) - bias = self.create_parameter( - shape=[1, layers[l + 1]], - dtype="float32", - is_bias=True, - default_initializer=nn.initializer.Constant(0.0), - ) - amplitude = self.create_parameter( - shape=[1], - dtype="float32", - is_bias=True, - default_initializer=nn.initializer.Constant(0.05), - ) - - self.add_parameter(name_prefix + "_w_" + str(l), weight) - self.add_parameter(name_prefix + "_b_" + str(l), bias) - self.add_parameter(name_prefix + "_a_" + str(l), amplitude) - weights.append(weight) - biases.append(bias) - amplitudes.append(amplitude) - return weights, biases, amplitudes - - def w_init(self, size: Tuple[int, int]): - in_dim = size[0] - out_dim = size[1] - xavier_stddev = np.sqrt(2 / (in_dim + out_dim)) - param = paddle.empty(size, "float64") - param = ppsci.utils.initializer.trunc_normal_(param, 0.0, xavier_stddev) - return nn.initializer.Assign(param) - - def neural_net_tanh( - self, - x: List[paddle.Tensor], - weights: List[paddle.Tensor], - biases: List[paddle.Tensor], - amplitudes: List[paddle.Tensor], - ): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.tanh(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def neural_net_sin( - self, - x: List[paddle.Tensor], - weights: List[paddle.Tensor], - biases: List[paddle.Tensor], - amplitudes: List[paddle.Tensor], - ): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.sin(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def neural_net_cos( - self, - x: List[paddle.Tensor], - weights: List[paddle.Tensor], - biases: List[paddle.Tensor], - amplitudes: List[paddle.Tensor], - ): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.cos(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def net_subdomain1(self, x: paddle.Tensor, y: paddle.Tensor): - return self.neural_net_tanh( - paddle.concat([x, y], 1), self.weights1, self.biases1, self.amplitudes1 - ) - - def net_subdomain2(self, x: paddle.Tensor, y: paddle.Tensor): - return self.neural_net_sin( - paddle.concat([x, y], 1), self.weights2, self.biases2, self.amplitudes2 - ) - - def net_subdomain3(self, x, y): - return self.neural_net_cos( - paddle.concat([x, y], 1), self.weights3, self.biases3, self.amplitudes3 - ) +from typing import Dict +from typing import List +from typing import Tuple + +import numpy as np +import paddle +from paddle import nn + +import ppsci +from ppsci.arch import base + + +class Model(base.Arch): + def __init__(self, layer_list: Tuple[List[int], List[int], List[int]]): + super().__init__() + # Initialize NNs + self.weights1, self.biases1, self.amplitudes1 = self.initialize_nn( + layer_list[0], "layers1" + ) + self.weights2, self.biases2, self.amplitudes2 = self.initialize_nn( + layer_list[1], "layers2" + ) + self.weights3, self.biases3, self.amplitudes3 = self.initialize_nn( + layer_list[2], "layers3" + ) + + def forward(self, input: Dict[str, paddle.Tensor]): + residual1_u = self.net_subdomain1(input["residual1_x"], input["residual1_y"]) + residual2_u = self.net_subdomain2(input["residual2_x"], input["residual2_y"]) + residual3_u = self.net_subdomain3(input["residual3_x"], input["residual3_y"]) + interface1_u_sub1 = self.net_subdomain1( + input["interface1_x"], input["interface1_y"] + ) + interface1_u_sub2 = self.net_subdomain2( + input["interface1_x"], input["interface1_y"] + ) + interface2_u_sub1 = self.net_subdomain1( + input["interface2_x"], input["interface2_y"] + ) + interface2_u_sub3 = self.net_subdomain3( + input["interface2_x"], input["interface2_y"] + ) + boundary_u = self.net_subdomain1(input["boundary_x"], input["boundary_y"]) + + return { + "residual1_x": input["residual1_x"], + "residual1_y": input["residual1_y"], + "residual2_x": input["residual2_x"], + "residual2_y": input["residual2_y"], + "residual3_x": input["residual3_x"], + "residual3_y": input["residual3_y"], + "interface1_x": input["interface1_x"], + "interface1_y": input["interface1_y"], + "interface2_x": input["interface2_x"], + "interface2_y": input["interface2_y"], + "residual1_u": residual1_u, + "residual2_u": residual2_u, + "residual3_u": residual3_u, + "interface1_u_sub1": interface1_u_sub1, + "interface1_u_sub2": interface1_u_sub2, + "interface2_u_sub1": interface2_u_sub1, + "interface2_u_sub3": interface2_u_sub3, + "boundary_u": boundary_u, + } + + def initialize_nn(self, layers: List[int], name_prefix: str): + # The weight used in neural_net + weights = [] + # The bias used in neural_net + biases = [] + # The amplitude used in neural_net + amplitudes = [] + num_layers = len(layers) + for l in range(0, num_layers - 1): + weight = self.create_parameter( + shape=[layers[l], layers[l + 1]], + dtype="float32", + default_initializer=self.w_init((layers[l], layers[l + 1])), + ) + bias = self.create_parameter( + shape=[1, layers[l + 1]], + dtype="float32", + is_bias=True, + default_initializer=nn.initializer.Constant(0.0), + ) + amplitude = self.create_parameter( + shape=[1], + dtype="float32", + is_bias=True, + default_initializer=nn.initializer.Constant(0.05), + ) + + self.add_parameter(name_prefix + "_w_" + str(l), weight) + self.add_parameter(name_prefix + "_b_" + str(l), bias) + self.add_parameter(name_prefix + "_a_" + str(l), amplitude) + weights.append(weight) + biases.append(bias) + amplitudes.append(amplitude) + return weights, biases, amplitudes + + def w_init(self, size: Tuple[int, int]): + in_dim = size[0] + out_dim = size[1] + xavier_stddev = np.sqrt(2 / (in_dim + out_dim)) + param = paddle.empty(size, "float64") + param = ppsci.utils.initializer.trunc_normal_(param, 0.0, xavier_stddev) + return nn.initializer.Assign(param) + + def neural_net_tanh( + self, + x: List[paddle.Tensor], + weights: List[paddle.Tensor], + biases: List[paddle.Tensor], + amplitudes: List[paddle.Tensor], + ): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.tanh(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def neural_net_sin( + self, + x: List[paddle.Tensor], + weights: List[paddle.Tensor], + biases: List[paddle.Tensor], + amplitudes: List[paddle.Tensor], + ): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.sin(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def neural_net_cos( + self, + x: List[paddle.Tensor], + weights: List[paddle.Tensor], + biases: List[paddle.Tensor], + amplitudes: List[paddle.Tensor], + ): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.cos(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def net_subdomain1(self, x: paddle.Tensor, y: paddle.Tensor): + return self.neural_net_tanh( + paddle.concat([x, y], 1), self.weights1, self.biases1, self.amplitudes1 + ) + + def net_subdomain2(self, x: paddle.Tensor, y: paddle.Tensor): + return self.neural_net_sin( + paddle.concat([x, y], 1), self.weights2, self.biases2, self.amplitudes2 + ) + + def net_subdomain3(self, x, y): + return self.neural_net_cos( + paddle.concat([x, y], 1), self.weights3, self.biases3, self.amplitudes3 + ) diff --git a/examples/xpinn/plotting.py b/examples/xpinn/plotting.py index 7d02e4e758..700fbfdbe6 100644 --- a/examples/xpinn/plotting.py +++ b/examples/xpinn/plotting.py @@ -1,202 +1,202 @@ -import os - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np -import paddle -from matplotlib import gridspec -from matplotlib import patches -from matplotlib import tri - - -def figsize(scale: float, nplots: float = 1): - fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth - inches_per_pt = 1.0 / 72.27 # Convert pt to inch - golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this) - fig_width = fig_width_pt * inches_per_pt * scale # width in inches - fig_height = nplots * fig_width * golden_mean # height in inches - fig_size = [fig_width, fig_height] - return fig_size - - -def newfig(width: float, nplots: float = 1): - fig = plt.figure(figsize=figsize(width, nplots)) - ax = fig.add_subplot(111) - return fig, ax - - -def savefig(filename: str, crop: bool = True): - if crop: - plt.savefig(f"{filename}.pdf", bbox_inches="tight", pad_inches=0) - plt.savefig(f"{filename}.eps", bbox_inches="tight", pad_inches=0) - else: - plt.savefig(f"{filename}.pdf") - plt.savefig(f"{filename}.eps") - - -def log_image( - residual1_x: paddle.Tensor, - residual1_y: paddle.Tensor, - residual2_x: paddle.Tensor, - residual2_y: paddle.Tensor, - residual3_x: paddle.Tensor, - residual3_y: paddle.Tensor, - interface1_x: paddle.Tensor, - interface1_y: paddle.Tensor, - interface2_x: paddle.Tensor, - interface2_y: paddle.Tensor, - boundary_x: paddle.Tensor, - boundary_y: paddle.Tensor, - residual_u_pred: paddle.Tensor, - residual_u_exact: paddle.Tensor, -): - save_path = "./result" - os.makedirs(save_path, exist_ok=True) - - interface1_x = interface1_x.numpy() - interface1_y = interface1_y.numpy() - interface2_x = interface2_x.numpy() - interface2_y = interface2_y.numpy() - x_tot = np.concatenate([residual1_x, residual2_x, residual3_x]) - y_tot = np.concatenate([residual1_y, residual2_y, residual3_y]) - - aa1 = np.array([[np.squeeze(boundary_x[-1]), np.squeeze(boundary_y[-1])]]) - aa2 = np.array( - [ - [1.8, np.squeeze(boundary_y[-1])], - [+1.8, -1.7], - [-1.6, -1.7], - [-1.6, 1.55], - [1.8, 1.55], - [1.8, np.squeeze(boundary_y[-1])], - ] - ) - x_domain1 = np.squeeze(boundary_x.flatten()[:, None]) - y_domain1 = np.squeeze(boundary_y.flatten()[:, None]) - aa3 = np.array([x_domain1, y_domain1]).T - xx = np.vstack((aa3, aa2, aa1)) - triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) - - fig, ax = newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf(triang_total, np.squeeze(residual_u_exact), 100, cmap="jet") - ax.add_patch( - patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") - ) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("$u$ (Exact)", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - interface1_x, - interface1_y, - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - interface2_x, - interface2_y, - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - savefig(os.path.join(save_path, "XPINN_PoissonEq_ExSol")) - plt.show() - - fig, ax = newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf(triang_total, residual_u_pred.flatten(), 100, cmap="jet") - ax.add_patch( - patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") - ) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("$u$ (Predicted)", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - interface1_x, - interface1_y, - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - interface2_x, - interface2_y, - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - savefig(os.path.join(save_path, "XPINN_PoissonEq_Sol")) - plt.show() - - fig, ax = newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf( - triang_total, - paddle.abs(residual_u_exact.flatten() - residual_u_pred.flatten()), - 100, - cmap="jet", - ) - ax.add_patch( - patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") - ) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("Point-wise Error", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - interface1_x, - interface1_y, - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - interface2_x, - interface2_y, - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - savefig(os.path.join(save_path, "XPINN_PoissonEq_Err")) - plt.show() - - -PGF_WITH_LATEX = { # setup matplotlib to use latex for output - "pgf.texsystem": "pdflatex", # change this if using xetex or latex - # "text.usetex": True, # use LaTeX to write all text - # "font.family": "serif", - # "font.serif": [], # blank entries should cause plots to inherit fonts from the document - # "font.sans-serif": [], - # "font.monospace": [], - "axes.labelsize": 10, # LaTeX default is 10pt font. - "font.size": 10, - "legend.fontsize": 8, # Make the legend/label fonts a little smaller - "xtick.labelsize": 8, - "ytick.labelsize": 8, - "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth - "pgf.preamble": "\n".join( - [ - r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it. - r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble - ] - ), -} -mpl.rcParams.update(PGF_WITH_LATEX) +import os + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import paddle +from matplotlib import gridspec +from matplotlib import patches +from matplotlib import tri + + +def figsize(scale: float, nplots: float = 1): + fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth + inches_per_pt = 1.0 / 72.27 # Convert pt to inch + golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this) + fig_width = fig_width_pt * inches_per_pt * scale # width in inches + fig_height = nplots * fig_width * golden_mean # height in inches + fig_size = [fig_width, fig_height] + return fig_size + + +def newfig(width: float, nplots: float = 1): + fig = plt.figure(figsize=figsize(width, nplots)) + ax = fig.add_subplot(111) + return fig, ax + + +def savefig(filename: str, crop: bool = True): + if crop: + plt.savefig(f"{filename}.pdf", bbox_inches="tight", pad_inches=0) + plt.savefig(f"{filename}.eps", bbox_inches="tight", pad_inches=0) + else: + plt.savefig(f"{filename}.pdf") + plt.savefig(f"{filename}.eps") + + +def log_image( + residual1_x: paddle.Tensor, + residual1_y: paddle.Tensor, + residual2_x: paddle.Tensor, + residual2_y: paddle.Tensor, + residual3_x: paddle.Tensor, + residual3_y: paddle.Tensor, + interface1_x: paddle.Tensor, + interface1_y: paddle.Tensor, + interface2_x: paddle.Tensor, + interface2_y: paddle.Tensor, + boundary_x: paddle.Tensor, + boundary_y: paddle.Tensor, + residual_u_pred: paddle.Tensor, + residual_u_exact: paddle.Tensor, +): + save_path = "./result" + os.makedirs(save_path, exist_ok=True) + + interface1_x = interface1_x.numpy() + interface1_y = interface1_y.numpy() + interface2_x = interface2_x.numpy() + interface2_y = interface2_y.numpy() + x_tot = np.concatenate([residual1_x, residual2_x, residual3_x]) + y_tot = np.concatenate([residual1_y, residual2_y, residual3_y]) + + aa1 = np.array([[np.squeeze(boundary_x[-1]), np.squeeze(boundary_y[-1])]]) + aa2 = np.array( + [ + [1.8, np.squeeze(boundary_y[-1])], + [+1.8, -1.7], + [-1.6, -1.7], + [-1.6, 1.55], + [1.8, 1.55], + [1.8, np.squeeze(boundary_y[-1])], + ] + ) + x_domain1 = np.squeeze(boundary_x.flatten()[:, None]) + y_domain1 = np.squeeze(boundary_y.flatten()[:, None]) + aa3 = np.array([x_domain1, y_domain1]).T + xx = np.vstack((aa3, aa2, aa1)) + triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) + + fig, ax = newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf(triang_total, np.squeeze(residual_u_exact), 100, cmap="jet") + ax.add_patch( + patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") + ) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("$u$ (Exact)", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + interface1_x, + interface1_y, + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + interface2_x, + interface2_y, + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + savefig(os.path.join(save_path, "XPINN_PoissonEq_ExSol")) + plt.show() + + fig, ax = newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf(triang_total, residual_u_pred.flatten(), 100, cmap="jet") + ax.add_patch( + patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") + ) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("$u$ (Predicted)", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + interface1_x, + interface1_y, + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + interface2_x, + interface2_y, + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + savefig(os.path.join(save_path, "XPINN_PoissonEq_Sol")) + plt.show() + + fig, ax = newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf( + triang_total, + paddle.abs(residual_u_exact.flatten() - residual_u_pred.flatten()), + 100, + cmap="jet", + ) + ax.add_patch( + patches.Polygon(xx, closed=True, fill=True, facecolor="w", edgecolor="w") + ) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("Point-wise Error", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + interface1_x, + interface1_y, + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + interface2_x, + interface2_y, + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + savefig(os.path.join(save_path, "XPINN_PoissonEq_Err")) + plt.show() + + +PGF_WITH_LATEX = { # setup matplotlib to use latex for output + "pgf.texsystem": "pdflatex", # change this if using xetex or latex + # "text.usetex": True, # use LaTeX to write all text + # "font.family": "serif", + # "font.serif": [], # blank entries should cause plots to inherit fonts from the document + # "font.sans-serif": [], + # "font.monospace": [], + "axes.labelsize": 10, # LaTeX default is 10pt font. + "font.size": 10, + "legend.fontsize": 8, # Make the legend/label fonts a little smaller + "xtick.labelsize": 8, + "ytick.labelsize": 8, + "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth + "pgf.preamble": "\n".join( + [ + r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it. + r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble + ] + ), +} +mpl.rcParams.update(PGF_WITH_LATEX) diff --git a/examples/xpinn/xpinn.py b/examples/xpinn/xpinn.py index 4788dd18f6..5539f7fb69 100644 --- a/examples/xpinn/xpinn.py +++ b/examples/xpinn/xpinn.py @@ -1,459 +1,459 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable -from typing import Dict -from typing import List -from typing import Tuple - -import hydra -import model -import numpy as np -import paddle -import plotting -from omegaconf import DictConfig - -import ppsci - -# For the use of the second derivative: paddle.cos -paddle.framework.core.set_prim_eager_enabled(True) - - -def _xpinn_loss( - training_pres: List[List[paddle.Tensor]] = None, - training_exacts: List[paddle.Tensor] = None, - training_weight: float = 1, - residual_inputs: List[List[paddle.Tensor]] = None, - residual_pres: List[paddle.Tensor] = None, - residual_weight: float = 1, - interface_inputs: List[List[paddle.Tensor]] = None, - interface_pres: List[paddle.Tensor] = None, - interface_weight: float = 1, - interface_neigh_pres: List[List[paddle.Tensor]] = None, - interface_neigh_weight: float = 1, - residual_func: Callable = lambda x, y: x - y, -) -> float: - """XPINNs loss function for subdomain - - `loss = W_u_q * MSE_u_q + W_F_q * MSE_F_q + W_I_q * MSE_avg_q + W_I_F_q * MSE_R` - - `W_u_q * MSE_u_q` is data mismatch item. - `W_F_q * MSE_F_q` is residual item. - `W_I_q * MSE_avg_q` is interface item. - `W_I_F_q * MSE_R` is interface residual item. - - Args: - training_pres (List[List[paddle.Tensor]], optional): the prediction result for training points input. Defaults to None. - training_exacts (List[paddle.Tensor], optional): the exact result for training points input. Defaults to None. - training_weight (float, optional): the weight of data mismatch item. Defaults to 1. - residual_inputs (List[List[paddle.Tensor]], optional): residual points input. Defaults to None. - residual_pres (List[paddle.Tensor], optional): the prediction result for residual points input. Defaults to None. - residual_weight (float, optional): the weight of residual item. Defaults to 1. - interface_inputs (List[List[paddle.Tensor]], optional): the prediction result for interface points input. Defaults to None. - interface_pres (List[paddle.Tensor], optional): the prediction result for interface points input. Defaults to None. - interface_weight (float, optional): the weight of iinterface item. Defaults to 1. - interface_neigh_pres (List[List[paddle.Tensor]], optional): the prediction result of neighbouring subdomain model for interface points input. Defaults to None. - interface_neigh_weight (float, optional): the weight of interface residual term. Defaults to 1. - residual_func (Callable, optional): residual calculation function. Defaults to lambda x,y : x - y. - """ - - def _get_grad(outputs: paddle.Tensor, inputs: paddle.Tensor) -> paddle.Tensor: - grad = paddle.grad(outputs, inputs, retain_graph=True, create_graph=True) - return grad[0] - - def _get_second_derivatives( - outputs_list: List[paddle.Tensor], - inputs_list: List[List[paddle.Tensor]], - ) -> Tuple[List[List[paddle.Tensor]], List[List[paddle.Tensor]]]: - d1_list = [ - [_get_grad(_out, _in) for _in in _ins] - for _out, _ins in zip(outputs_list, inputs_list) - ] - d2_list = [ - [_get_grad(_d1, _in) for _d1, _in in zip(d1s_, _ins)] - for d1s_, _ins in zip(d1_list, inputs_list) - ] - return d2_list - - residual_u_d2_list = _get_second_derivatives(residual_pres, residual_inputs) - interface_u_d2_list = _get_second_derivatives(interface_pres, interface_inputs) - interface_neigh_u_d2_list = _get_second_derivatives( - interface_neigh_pres, interface_inputs - ) - - MSE_u_q = 0 - - if training_pres is not None: - for _pre, _exact in zip(training_pres, training_exacts): - MSE_u_q += training_weight * paddle.mean(paddle.square(_pre - _exact)) - - MSE_F_q = 0 - - if residual_inputs is not None: - for _ins, _d2 in zip(residual_inputs, residual_u_d2_list): - MSE_F_q += residual_weight * paddle.mean( - paddle.square(residual_func(_d2, _ins)) - ) - - MSE_avg_q = 0 - MSE_R = 0 - - if interface_inputs is not None: - for _ins, _pre, _n_pres in zip( - interface_inputs, interface_pres, interface_neigh_pres - ): - pre_list = [_pre] + _n_pres - pre_avg = paddle.add_n(pre_list) / len(pre_list) - MSE_avg_q += interface_weight * paddle.mean(paddle.square(_pre - pre_avg)) - - for _ins, _d2, _n_d2 in zip( - interface_inputs, interface_u_d2_list, interface_neigh_u_d2_list - ): - MSE_R += interface_neigh_weight * paddle.mean( - paddle.square(residual_func(_d2, _ins) - residual_func(_n_d2, _ins)) - ) - - return MSE_u_q + MSE_F_q + MSE_avg_q + MSE_R - - -def loss_fun( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, -) -> float: - def residual_func(output_der: paddle.Tensor, input: paddle.Tensor) -> paddle.Tensor: - return paddle.add_n(output_der) - paddle.add_n( - [paddle.exp(_in) for _in in input] - ) - - # subdomain 1 - loss1 = _xpinn_loss( - training_pres=[output_dict["boundary_u"]], - training_exacts=[label_dict["boundary_u_exact"]], - training_weight=20, - residual_inputs=[[output_dict["residual1_x"], output_dict["residual1_y"]]], - residual_pres=[output_dict["residual1_u"]], - residual_weight=1, - interface_inputs=[ - [output_dict["interface1_x"], output_dict["interface1_y"]], - [output_dict["interface2_x"], output_dict["interface2_y"]], - ], - interface_pres=[ - output_dict["interface1_u_sub1"], - output_dict["interface2_u_sub1"], - ], - interface_weight=20, - interface_neigh_pres=[ - [output_dict["interface1_u_sub2"]], - [output_dict["interface2_u_sub3"]], - ], - interface_neigh_weight=1, - residual_func=residual_func, - ) - - # subdomain 2 - loss2 = _xpinn_loss( - residual_inputs=[[output_dict["residual2_x"], output_dict["residual2_y"]]], - residual_pres=[output_dict["residual2_u"]], - residual_weight=1, - interface_inputs=[[output_dict["interface1_x"], output_dict["interface1_y"]]], - interface_pres=[output_dict["interface1_u_sub1"]], - interface_weight=20, - interface_neigh_pres=[[output_dict["interface1_u_sub2"]]], - interface_neigh_weight=1, - residual_func=residual_func, - ) - - # subdomain 3 - loss3 = _xpinn_loss( - residual_inputs=[[output_dict["residual3_x"], output_dict["residual3_y"]]], - residual_pres=[output_dict["residual3_u"]], - residual_weight=1, - interface_inputs=[[output_dict["interface2_x"], output_dict["interface2_y"]]], - interface_pres=[output_dict["interface2_u_sub1"]], - interface_weight=20, - interface_neigh_pres=[[output_dict["interface2_u_sub3"]]], - interface_neigh_weight=1, - residual_func=residual_func, - ) - - return {"residuals": loss1 + loss2 + loss3} - - -def eval_l2_rel_func( - output_dict: Dict[str, paddle.Tensor], - label_dict: Dict[str, paddle.Tensor], - *args, -) -> Dict[str, paddle.Tensor]: - u_pred = paddle.concat( - [ - output_dict["residual1_u"], - output_dict["residual2_u"], - output_dict["residual3_u"], - ] - ) - - # the shape of label_dict["residual_u_exact"] is [22387, 1], and be cut into [18211, 1] `_eval_by_dataset`(ppsci/solver/eval.py). - u_exact = paddle.concat( - [ - label_dict["residual_u_exact"], - label_dict["residual2_u_exact"], - label_dict["residual3_u_exact"], - ] - ) - - error_total = paddle.linalg.norm( - u_exact.flatten() - u_pred.flatten(), 2 - ) / paddle.linalg.norm(u_exact.flatten(), 2) - return {"l2_error": error_total} - - -def train(cfg: DictConfig): - # set training dataset transformation - def train_dataset_transform_func( - _input: Dict[str, np.ndarray], - _label: Dict[str, np.ndarray], - weight_: Dict[str, np.ndarray], - ) -> Dict[str, np.ndarray]: - # Randomly select the residual points from sub-domains - id_x1 = np.random.choice( - _input["residual1_x"].shape[0], - cfg.MODEL.num_residual1_points, - replace=False, - ) - _input["residual1_x"] = _input["residual1_x"][id_x1, :] - _input["residual1_y"] = _input["residual1_y"][id_x1, :] - - id_x2 = np.random.choice( - _input["residual2_x"].shape[0], - cfg.MODEL.num_residual2_points, - replace=False, - ) - _input["residual2_x"] = _input["residual2_x"][id_x2, :] - _input["residual2_y"] = _input["residual2_y"][id_x2, :] - - id_x3 = np.random.choice( - _input["residual3_x"].shape[0], - cfg.MODEL.num_residual3_points, - replace=False, - ) - _input["residual3_x"] = _input["residual3_x"][id_x3, :] - _input["residual3_y"] = _input["residual3_y"][id_x3, :] - - # Randomly select boundary points - id_x4 = np.random.choice( - _input["boundary_x"].shape[0], cfg.MODEL.num_boundary_points, replace=False - ) - _input["boundary_x"] = _input["boundary_x"][id_x4, :] - _input["boundary_y"] = _input["boundary_y"][id_x4, :] - _label["boundary_u_exact"] = _label["boundary_u_exact"][id_x4, :] - - # Randomly select the interface points along two interfaces - id_xi1 = np.random.choice( - _input["interface1_x"].shape[0], cfg.MODEL.num_interface1, replace=False - ) - _input["interface1_x"] = _input["interface1_x"][id_xi1, :] - _input["interface1_y"] = _input["interface1_y"][id_xi1, :] - - id_xi2 = np.random.choice( - _input["interface2_x"].shape[0], cfg.MODEL.num_interface2, replace=False - ) - _input["interface2_x"] = _input["interface2_x"][id_xi2, :] - _input["interface2_y"] = _input["interface2_y"][id_xi2, :] - - return _input, _label, weight_ - - # set dataloader config - train_dataloader_cfg = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATA_FILE, - "input_keys": cfg.TRAIN.input_keys, - "label_keys": cfg.TRAIN.label_keys, - "alias_dict": cfg.TRAIN.alias_dict, - "transforms": ( - { - "FunctionalTransform": { - "transform_func": train_dataset_transform_func, - }, - }, - ), - } - } - - layer_list = ( - cfg.MODEL.layers1, - cfg.MODEL.layers2, - cfg.MODEL.layers3, - ) - - # set model - custom_model = model.Model(layer_list) - - # set constraint - sup_constraint = ppsci.constraint.SupervisedConstraint( - train_dataloader_cfg, - ppsci.loss.FunctionalLoss(loss_fun), - {"residual1_u": lambda out: out["residual1_u"]}, - name="sup_constraint", - ) - constraint = {sup_constraint.name: sup_constraint} - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATA_FILE, - "input_keys": cfg.TRAIN.input_keys, - "label_keys": cfg.EVAL.label_keys, - "alias_dict": cfg.EVAL.alias_dict, - } - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(loss_fun), - output_expr={ - "residual1_u": lambda out: out["residual1_u"], - "residual2_u": lambda out: out["residual2_u"], - "residual3_u": lambda out: out["residual3_u"], - }, - metric={"L2Rel": ppsci.metric.FunctionalMetric(eval_l2_rel_func)}, - name="sup_validator", - ) - validator = {sup_validator.name: sup_validator} - - # set optimizer - optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(custom_model) - - # initialize solver - solver = ppsci.solver.Solver( - custom_model, - constraint, - optimizer=optimizer, - validator=validator, - cfg=cfg, - ) - - solver.train() - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (_input, _label, _) in enumerate(sup_validator.data_loader): - u_exact = _label["residual_u_exact"] - output_ = custom_model(_input) - u_pred = paddle.concat( - [output_["residual1_u"], output_["residual2_u"], output_["residual3_u"]] - ) - - plotting.log_image( - residual1_x=_input["residual1_x"], - residual1_y=_input["residual1_y"], - residual2_x=_input["residual2_x"], - residual2_y=_input["residual2_y"], - residual3_x=_input["residual3_x"], - residual3_y=_input["residual3_y"], - interface1_x=_input["interface1_x"], - interface1_y=_input["interface1_y"], - interface2_x=_input["interface2_x"], - interface2_y=_input["interface2_y"], - boundary_x=_input["boundary_x"], - boundary_y=_input["boundary_y"], - residual_u_pred=u_pred, - residual_u_exact=u_exact, - ) - - -def evaluate(cfg: DictConfig): - layer_list = ( - cfg.MODEL.layers1, - cfg.MODEL.layers2, - cfg.MODEL.layers3, - ) - - custom_model = model.Model(layer_list) - - # set validator - eval_dataloader_cfg = { - "dataset": { - "name": "IterableMatDataset", - "file_path": cfg.DATA_FILE, - "input_keys": cfg.TRAIN.input_keys, - "label_keys": cfg.EVAL.label_keys, - "alias_dict": cfg.EVAL.alias_dict, - } - } - - sup_validator = ppsci.validate.SupervisedValidator( - eval_dataloader_cfg, - loss=ppsci.loss.FunctionalLoss(loss_fun), - output_expr={ - "residual1_u": lambda out: out["residual1_u"], - "residual2_u": lambda out: out["residual2_u"], - "residual3_u": lambda out: out["residual3_u"], - }, - metric={"L2Rel": ppsci.metric.FunctionalMetric(eval_l2_rel_func)}, - name="sup_validator", - ) - validator = {sup_validator.name: sup_validator} - - # initialize solver - solver = ppsci.solver.Solver( - custom_model, - validator=validator, - cfg=cfg, - ) - - solver.eval() - - # visualize prediction - with solver.no_grad_context_manager(True): - for index, (_input, _label, _) in enumerate(sup_validator.data_loader): - u_exact = _label["residual_u_exact"] - _output = custom_model(_input) - u_pred = paddle.concat( - [_output["residual1_u"], _output["residual2_u"], _output["residual3_u"]] - ) - - plotting.log_image( - residual1_x=_input["residual1_x"], - residual1_y=_input["residual1_y"], - residual2_x=_input["residual2_x"], - residual2_y=_input["residual2_y"], - residual3_x=_input["residual3_x"], - residual3_y=_input["residual3_y"], - interface1_x=_input["interface1_x"], - interface1_y=_input["interface1_y"], - interface2_x=_input["interface2_x"], - interface2_y=_input["interface2_y"], - boundary_x=_input["boundary_x"], - boundary_y=_input["boundary_y"], - residual_u_pred=u_pred, - residual_u_exact=u_exact, - ) - - -@hydra.main(version_base=None, config_path="./conf", config_name="xpinn.yaml") -def main(cfg: DictConfig): - if cfg.mode == "train": - train(cfg) - elif cfg.mode == "eval": - evaluate(cfg) - else: - raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable +from typing import Dict +from typing import List +from typing import Tuple + +import hydra +import model +import numpy as np +import paddle +import plotting +from omegaconf import DictConfig + +import ppsci + +# For the use of the second derivative: paddle.cos +paddle.framework.core.set_prim_eager_enabled(True) + + +def _xpinn_loss( + training_pres: List[List[paddle.Tensor]] = None, + training_exacts: List[paddle.Tensor] = None, + training_weight: float = 1, + residual_inputs: List[List[paddle.Tensor]] = None, + residual_pres: List[paddle.Tensor] = None, + residual_weight: float = 1, + interface_inputs: List[List[paddle.Tensor]] = None, + interface_pres: List[paddle.Tensor] = None, + interface_weight: float = 1, + interface_neigh_pres: List[List[paddle.Tensor]] = None, + interface_neigh_weight: float = 1, + residual_func: Callable = lambda x, y: x - y, +) -> float: + """XPINNs loss function for subdomain + + `loss = W_u_q * MSE_u_q + W_F_q * MSE_F_q + W_I_q * MSE_avg_q + W_I_F_q * MSE_R` + + `W_u_q * MSE_u_q` is data mismatch item. + `W_F_q * MSE_F_q` is residual item. + `W_I_q * MSE_avg_q` is interface item. + `W_I_F_q * MSE_R` is interface residual item. + + Args: + training_pres (List[List[paddle.Tensor]], optional): the prediction result for training points input. Defaults to None. + training_exacts (List[paddle.Tensor], optional): the exact result for training points input. Defaults to None. + training_weight (float, optional): the weight of data mismatch item. Defaults to 1. + residual_inputs (List[List[paddle.Tensor]], optional): residual points input. Defaults to None. + residual_pres (List[paddle.Tensor], optional): the prediction result for residual points input. Defaults to None. + residual_weight (float, optional): the weight of residual item. Defaults to 1. + interface_inputs (List[List[paddle.Tensor]], optional): the prediction result for interface points input. Defaults to None. + interface_pres (List[paddle.Tensor], optional): the prediction result for interface points input. Defaults to None. + interface_weight (float, optional): the weight of iinterface item. Defaults to 1. + interface_neigh_pres (List[List[paddle.Tensor]], optional): the prediction result of neighbouring subdomain model for interface points input. Defaults to None. + interface_neigh_weight (float, optional): the weight of interface residual term. Defaults to 1. + residual_func (Callable, optional): residual calculation function. Defaults to lambda x,y : x - y. + """ + + def _get_grad(outputs: paddle.Tensor, inputs: paddle.Tensor) -> paddle.Tensor: + grad = paddle.grad(outputs, inputs, retain_graph=True, create_graph=True) + return grad[0] + + def _get_second_derivatives( + outputs_list: List[paddle.Tensor], + inputs_list: List[List[paddle.Tensor]], + ) -> Tuple[List[List[paddle.Tensor]], List[List[paddle.Tensor]]]: + d1_list = [ + [_get_grad(_out, _in) for _in in _ins] + for _out, _ins in zip(outputs_list, inputs_list) + ] + d2_list = [ + [_get_grad(_d1, _in) for _d1, _in in zip(d1s_, _ins)] + for d1s_, _ins in zip(d1_list, inputs_list) + ] + return d2_list + + residual_u_d2_list = _get_second_derivatives(residual_pres, residual_inputs) + interface_u_d2_list = _get_second_derivatives(interface_pres, interface_inputs) + interface_neigh_u_d2_list = _get_second_derivatives( + interface_neigh_pres, interface_inputs + ) + + MSE_u_q = 0 + + if training_pres is not None: + for _pre, _exact in zip(training_pres, training_exacts): + MSE_u_q += training_weight * paddle.mean(paddle.square(_pre - _exact)) + + MSE_F_q = 0 + + if residual_inputs is not None: + for _ins, _d2 in zip(residual_inputs, residual_u_d2_list): + MSE_F_q += residual_weight * paddle.mean( + paddle.square(residual_func(_d2, _ins)) + ) + + MSE_avg_q = 0 + MSE_R = 0 + + if interface_inputs is not None: + for _ins, _pre, _n_pres in zip( + interface_inputs, interface_pres, interface_neigh_pres + ): + pre_list = [_pre] + _n_pres + pre_avg = paddle.add_n(pre_list) / len(pre_list) + MSE_avg_q += interface_weight * paddle.mean(paddle.square(_pre - pre_avg)) + + for _ins, _d2, _n_d2 in zip( + interface_inputs, interface_u_d2_list, interface_neigh_u_d2_list + ): + MSE_R += interface_neigh_weight * paddle.mean( + paddle.square(residual_func(_d2, _ins) - residual_func(_n_d2, _ins)) + ) + + return MSE_u_q + MSE_F_q + MSE_avg_q + MSE_R + + +def loss_fun( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, +) -> float: + def residual_func(output_der: paddle.Tensor, input: paddle.Tensor) -> paddle.Tensor: + return paddle.add_n(output_der) - paddle.add_n( + [paddle.exp(_in) for _in in input] + ) + + # subdomain 1 + loss1 = _xpinn_loss( + training_pres=[output_dict["boundary_u"]], + training_exacts=[label_dict["boundary_u_exact"]], + training_weight=20, + residual_inputs=[[output_dict["residual1_x"], output_dict["residual1_y"]]], + residual_pres=[output_dict["residual1_u"]], + residual_weight=1, + interface_inputs=[ + [output_dict["interface1_x"], output_dict["interface1_y"]], + [output_dict["interface2_x"], output_dict["interface2_y"]], + ], + interface_pres=[ + output_dict["interface1_u_sub1"], + output_dict["interface2_u_sub1"], + ], + interface_weight=20, + interface_neigh_pres=[ + [output_dict["interface1_u_sub2"]], + [output_dict["interface2_u_sub3"]], + ], + interface_neigh_weight=1, + residual_func=residual_func, + ) + + # subdomain 2 + loss2 = _xpinn_loss( + residual_inputs=[[output_dict["residual2_x"], output_dict["residual2_y"]]], + residual_pres=[output_dict["residual2_u"]], + residual_weight=1, + interface_inputs=[[output_dict["interface1_x"], output_dict["interface1_y"]]], + interface_pres=[output_dict["interface1_u_sub1"]], + interface_weight=20, + interface_neigh_pres=[[output_dict["interface1_u_sub2"]]], + interface_neigh_weight=1, + residual_func=residual_func, + ) + + # subdomain 3 + loss3 = _xpinn_loss( + residual_inputs=[[output_dict["residual3_x"], output_dict["residual3_y"]]], + residual_pres=[output_dict["residual3_u"]], + residual_weight=1, + interface_inputs=[[output_dict["interface2_x"], output_dict["interface2_y"]]], + interface_pres=[output_dict["interface2_u_sub1"]], + interface_weight=20, + interface_neigh_pres=[[output_dict["interface2_u_sub3"]]], + interface_neigh_weight=1, + residual_func=residual_func, + ) + + return {"residuals": loss1 + loss2 + loss3} + + +def eval_l2_rel_func( + output_dict: Dict[str, paddle.Tensor], + label_dict: Dict[str, paddle.Tensor], + *args, +) -> Dict[str, paddle.Tensor]: + u_pred = paddle.concat( + [ + output_dict["residual1_u"], + output_dict["residual2_u"], + output_dict["residual3_u"], + ] + ) + + # the shape of label_dict["residual_u_exact"] is [22387, 1], and be cut into [18211, 1] `_eval_by_dataset`(ppsci/solver/eval.py). + u_exact = paddle.concat( + [ + label_dict["residual_u_exact"], + label_dict["residual2_u_exact"], + label_dict["residual3_u_exact"], + ] + ) + + error_total = paddle.linalg.norm( + u_exact.flatten() - u_pred.flatten(), 2 + ) / paddle.linalg.norm(u_exact.flatten(), 2) + return {"l2_error": error_total} + + +def train(cfg: DictConfig): + # set training dataset transformation + def train_dataset_transform_func( + _input: Dict[str, np.ndarray], + _label: Dict[str, np.ndarray], + weight_: Dict[str, np.ndarray], + ) -> Dict[str, np.ndarray]: + # Randomly select the residual points from sub-domains + id_x1 = np.random.choice( + _input["residual1_x"].shape[0], + cfg.MODEL.num_residual1_points, + replace=False, + ) + _input["residual1_x"] = _input["residual1_x"][id_x1, :] + _input["residual1_y"] = _input["residual1_y"][id_x1, :] + + id_x2 = np.random.choice( + _input["residual2_x"].shape[0], + cfg.MODEL.num_residual2_points, + replace=False, + ) + _input["residual2_x"] = _input["residual2_x"][id_x2, :] + _input["residual2_y"] = _input["residual2_y"][id_x2, :] + + id_x3 = np.random.choice( + _input["residual3_x"].shape[0], + cfg.MODEL.num_residual3_points, + replace=False, + ) + _input["residual3_x"] = _input["residual3_x"][id_x3, :] + _input["residual3_y"] = _input["residual3_y"][id_x3, :] + + # Randomly select boundary points + id_x4 = np.random.choice( + _input["boundary_x"].shape[0], cfg.MODEL.num_boundary_points, replace=False + ) + _input["boundary_x"] = _input["boundary_x"][id_x4, :] + _input["boundary_y"] = _input["boundary_y"][id_x4, :] + _label["boundary_u_exact"] = _label["boundary_u_exact"][id_x4, :] + + # Randomly select the interface points along two interfaces + id_xi1 = np.random.choice( + _input["interface1_x"].shape[0], cfg.MODEL.num_interface1, replace=False + ) + _input["interface1_x"] = _input["interface1_x"][id_xi1, :] + _input["interface1_y"] = _input["interface1_y"][id_xi1, :] + + id_xi2 = np.random.choice( + _input["interface2_x"].shape[0], cfg.MODEL.num_interface2, replace=False + ) + _input["interface2_x"] = _input["interface2_x"][id_xi2, :] + _input["interface2_y"] = _input["interface2_y"][id_xi2, :] + + return _input, _label, weight_ + + # set dataloader config + train_dataloader_cfg = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATA_FILE, + "input_keys": cfg.TRAIN.input_keys, + "label_keys": cfg.TRAIN.label_keys, + "alias_dict": cfg.TRAIN.alias_dict, + "transforms": ( + { + "FunctionalTransform": { + "transform_func": train_dataset_transform_func, + }, + }, + ), + } + } + + layer_list = ( + cfg.MODEL.layers1, + cfg.MODEL.layers2, + cfg.MODEL.layers3, + ) + + # set model + custom_model = model.Model(layer_list) + + # set constraint + sup_constraint = ppsci.constraint.SupervisedConstraint( + train_dataloader_cfg, + ppsci.loss.FunctionalLoss(loss_fun), + {"residual1_u": lambda out: out["residual1_u"]}, + name="sup_constraint", + ) + constraint = {sup_constraint.name: sup_constraint} + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATA_FILE, + "input_keys": cfg.TRAIN.input_keys, + "label_keys": cfg.EVAL.label_keys, + "alias_dict": cfg.EVAL.alias_dict, + } + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(loss_fun), + output_expr={ + "residual1_u": lambda out: out["residual1_u"], + "residual2_u": lambda out: out["residual2_u"], + "residual3_u": lambda out: out["residual3_u"], + }, + metric={"L2Rel": ppsci.metric.FunctionalMetric(eval_l2_rel_func)}, + name="sup_validator", + ) + validator = {sup_validator.name: sup_validator} + + # set optimizer + optimizer = ppsci.optimizer.Adam(cfg.TRAIN.learning_rate)(custom_model) + + # initialize solver + solver = ppsci.solver.Solver( + custom_model, + constraint, + optimizer=optimizer, + validator=validator, + cfg=cfg, + ) + + solver.train() + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (_input, _label, _) in enumerate(sup_validator.data_loader): + u_exact = _label["residual_u_exact"] + output_ = custom_model(_input) + u_pred = paddle.concat( + [output_["residual1_u"], output_["residual2_u"], output_["residual3_u"]] + ) + + plotting.log_image( + residual1_x=_input["residual1_x"], + residual1_y=_input["residual1_y"], + residual2_x=_input["residual2_x"], + residual2_y=_input["residual2_y"], + residual3_x=_input["residual3_x"], + residual3_y=_input["residual3_y"], + interface1_x=_input["interface1_x"], + interface1_y=_input["interface1_y"], + interface2_x=_input["interface2_x"], + interface2_y=_input["interface2_y"], + boundary_x=_input["boundary_x"], + boundary_y=_input["boundary_y"], + residual_u_pred=u_pred, + residual_u_exact=u_exact, + ) + + +def evaluate(cfg: DictConfig): + layer_list = ( + cfg.MODEL.layers1, + cfg.MODEL.layers2, + cfg.MODEL.layers3, + ) + + custom_model = model.Model(layer_list) + + # set validator + eval_dataloader_cfg = { + "dataset": { + "name": "IterableMatDataset", + "file_path": cfg.DATA_FILE, + "input_keys": cfg.TRAIN.input_keys, + "label_keys": cfg.EVAL.label_keys, + "alias_dict": cfg.EVAL.alias_dict, + } + } + + sup_validator = ppsci.validate.SupervisedValidator( + eval_dataloader_cfg, + loss=ppsci.loss.FunctionalLoss(loss_fun), + output_expr={ + "residual1_u": lambda out: out["residual1_u"], + "residual2_u": lambda out: out["residual2_u"], + "residual3_u": lambda out: out["residual3_u"], + }, + metric={"L2Rel": ppsci.metric.FunctionalMetric(eval_l2_rel_func)}, + name="sup_validator", + ) + validator = {sup_validator.name: sup_validator} + + # initialize solver + solver = ppsci.solver.Solver( + custom_model, + validator=validator, + cfg=cfg, + ) + + solver.eval() + + # visualize prediction + with solver.no_grad_context_manager(True): + for index, (_input, _label, _) in enumerate(sup_validator.data_loader): + u_exact = _label["residual_u_exact"] + _output = custom_model(_input) + u_pred = paddle.concat( + [_output["residual1_u"], _output["residual2_u"], _output["residual3_u"]] + ) + + plotting.log_image( + residual1_x=_input["residual1_x"], + residual1_y=_input["residual1_y"], + residual2_x=_input["residual2_x"], + residual2_y=_input["residual2_y"], + residual3_x=_input["residual3_x"], + residual3_y=_input["residual3_y"], + interface1_x=_input["interface1_x"], + interface1_y=_input["interface1_y"], + interface2_x=_input["interface2_x"], + interface2_y=_input["interface2_y"], + boundary_x=_input["boundary_x"], + boundary_y=_input["boundary_y"], + residual_u_pred=u_pred, + residual_u_exact=u_exact, + ) + + +@hydra.main(version_base=None, config_path="./conf", config_name="xpinn.yaml") +def main(cfg: DictConfig): + if cfg.mode == "train": + train(cfg) + elif cfg.mode == "eval": + evaluate(cfg) + else: + raise ValueError(f"cfg.mode should in ['train', 'eval'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/yinglong/conf/yinglong_eastern.yaml b/examples/yinglong/conf/yinglong_eastern.yaml index 83b1b98d69..8da24af0dd 100644 --- a/examples/yinglong/conf/yinglong_eastern.yaml +++ b/examples/yinglong/conf/yinglong_eastern.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -64,3 +65,61 @@ INFER: ] geo_file: ./eastern_valid_data/geo.h5 num_timestamps: 48 +======= +hydra: + run: + # dynamic output directory according to running time and override name + # dir: outputs_yinglong/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + dir: ./outputs_yinglong_eastern + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: infer # running mode: infer +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# inference settings +INFER: + pretrained_model_path: null + export_path: inference/yinglong_eastern + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: false + min_subgraph_size: 30 + gpu_mem: 100 + gpu_id: 3 + max_batch_size: 1 + num_cpu_threads: 10 + batch_size: 1 + mean_path: ./eastern_valid_data/stat/mean_crop.npy + std_path: ./eastern_valid_data/stat/std_crop.npy + input_file: [./eastern_valid_data/valid/2022/01/01.h5, ./eastern_valid_data/valid/2022/01/02.h5, ./eastern_valid_data/valid/2022/01/03.h5] + init_time: 2022/01/01/00 + nwp_file: [./eastern_valid_data/hrrr_nwp_h5/2022/01/01.h5, ./eastern_valid_data/hrrr_nwp_h5/2022/01/02.h5, ./eastern_valid_data/hrrr_nwp_h5/2022/01/03.h5] + geo_file: ./eastern_valid_data/geo.h5 + num_timestamps: 48 +>>>>>>> Stashed changes diff --git a/examples/yinglong/conf/yinglong_western.yaml b/examples/yinglong/conf/yinglong_western.yaml index 1415a3d732..6173a2fa4f 100644 --- a/examples/yinglong/conf/yinglong_western.yaml +++ b/examples/yinglong/conf/yinglong_western.yaml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream defaults: - ppsci_default - TRAIN: train_default @@ -64,3 +65,61 @@ INFER: ] geo_file: ./western_valid_data/geo.h5 num_timestamps: 48 +======= +hydra: + run: + # dynamic output directory according to running time and override name + # dir: outputs_yinglong/${now:%Y-%m-%d}/${now:%H-%M-%S}/${hydra.job.override_dirname} + dir: ./outputs_yinglong_western + job: + name: ${mode} # name of logfile + chdir: false # keep current working directory unchanged + config: + override_dirname: + exclude_keys: + - TRAIN.checkpoint_path + - TRAIN.pretrained_model_path + - EVAL.pretrained_model_path + - INFER.pretrained_model_path + - mode + - output_dir + - log_freq + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback + sweep: + # output directory for multirun + dir: ${hydra.run.dir} + subdir: ./ + +# general settings +mode: infer # running mode: infer +seed: 2023 +output_dir: ${hydra:run.dir} +log_freq: 20 + +# inference settings +INFER: + pretrained_model_path: null + export_path: inference/yinglong_western + pdmodel_path: ${INFER.export_path}.pdmodel + pdiparams_path: ${INFER.export_path}.pdiparams + onnx_path: ${INFER.export_path}.onnx + device: gpu + engine: native + precision: fp32 + ir_optim: false + min_subgraph_size: 30 + gpu_mem: 100 + gpu_id: 3 + max_batch_size: 1 + num_cpu_threads: 10 + batch_size: 1 + mean_path: ./western_valid_data/stat/mean_crop.npy + std_path: ./western_valid_data/stat/std_crop.npy + input_file: [./western_valid_data/valid/2022/01/01.h5, ./western_valid_data/valid/2022/01/02.h5, ./western_valid_data/valid/2022/01/03.h5] + init_time: 2022/01/01/00 + nwp_file: [./western_valid_data/hrrr_nwp_h5/2022/01/01.h5, ./western_valid_data/hrrr_nwp_h5/2022/01/02.h5, ./western_valid_data/hrrr_nwp_h5/2022/01/03.h5] + geo_file: ./western_valid_data/geo.h5 + num_timestamps: 48 +>>>>>>> Stashed changes diff --git a/examples/yinglong/plot.py b/examples/yinglong/plot.py index b65fbbe1c8..1c88450605 100644 --- a/examples/yinglong/plot.py +++ b/examples/yinglong/plot.py @@ -1,224 +1,224 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -from typing import Dict -from typing import Tuple -from typing import Union - -import imageio -import matplotlib -import numpy as np -import paddle -from matplotlib import colormaps as cm -from matplotlib import pyplot as plt -from matplotlib.legend_handler import HandlerBase -from matplotlib.patches import Rectangle - - -class HandlerColormap(HandlerBase): - """Class for creating colormap legend rectangles. - - Args: - cmap (matplotlib.cm): Matplotlib colormap. - num_stripes (int, optional): Number of contour levels (strips) in rectangle. Defaults to 8. - """ - - def __init__(self, cmap: matplotlib.cm, num_stripes: int = 8, **kw): - HandlerBase.__init__(self, **kw) - self.cmap = cmap - self.num_stripes = num_stripes - - def create_artists( - self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans - ): - stripes = [] - for i in range(self.num_stripes): - s = Rectangle( - [xdescent + i * width / self.num_stripes, ydescent], - width / self.num_stripes, - height, - fc=self.cmap((2 * i + 1) / (2 * self.num_stripes)), - transform=trans, - ) - stripes.append(s) - return stripes - - -def _save_plot_weather_from_array( - filename: str, - pred: np.ndarray, - target: np.ndarray, - pred_key: str, - target_key: str, - xticks: Tuple[float, ...], - xticklabels: Tuple[str, ...], - yticks: Tuple[float, ...], - yticklabels: Tuple[str, ...], - vmin: float, - vmax: float, - colorbar_label: str = "", - log_norm: bool = False, -): - """Plot weather result as file from array data. - - Args: - filename (str): Output file name. - pred (np.ndarray): The predict data. - target (np.ndarray): The target data. - pred_key (str): The key of predict data. - target_key (str): The key of target data. - xticks (Tuple[float, ...]): The list of xtick locations. - xticklabels (Tuple[str, ...]): The x-axis' tick labels. - yticks (Tuple[float, ...]): The list of ytick locations. - yticklabels (Tuple[str, ...]): The y-axis' tick labels. - vmin (float): Minimal value that the colormap covers. - vmax (float): Maximal value that the colormap covers. - colorbar_label (str, optional): The color-bar label. Defaults to "". - log_norm (bool, optional): Whether use log norm. Defaults to False. - """ - - def plot_weather( - ax, - data, - title_text, - xticks, - xticklabels, - yticks, - yticklabels, - vmin, - vmax, - log_norm, - cmap=cm.get_cmap("turbo"), - ): - ax.title.set_text(title_text) - ax.set_yticks(yticks) - ax.set_yticklabels(yticklabels) - ax.set_xticks(xticks) - ax.set_xticklabels(xticklabels) - if not log_norm: - map_ = ax.imshow( - data, - interpolation="nearest", - cmap=cmap, - aspect="auto", - vmin=vmin, - vmax=vmax, - ) - else: - norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax, clip=True) - map_ = ax.imshow( - data, interpolation="nearest", cmap=cmap, aspect="auto", norm=norm - ) - plt.colorbar(mappable=map_, cax=None, ax=None, shrink=0.5, label=colorbar_label) - - os.makedirs(os.path.dirname(filename), exist_ok=True) - fig = plt.figure(facecolor="w", figsize=(10, 4)) - ax = fig.add_subplot(1, 2, 1) - plot_weather( - ax, - pred, - pred_key, - xticks, - xticklabels, - yticks, - yticklabels, - vmin, - vmax, - log_norm, - ) - bx = fig.add_subplot(1, 2, 2) - plot_weather( - bx, - target, - target_key, - xticks, - xticklabels, - yticks, - yticklabels, - vmin, - vmax, - log_norm, - ) - fig.savefig(filename, dpi=300) - plt.close() - - -def save_plot_weather_from_dict( - foldername: str, - data_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], - visu_keys: Tuple[str, ...], - xticks: Tuple[float, ...], - xticklabels: Tuple[str, ...], - yticks: Tuple[float, ...], - yticklabels: Tuple[str, ...], - vmin: float, - vmax: float, - colorbar_label: str = "", - log_norm: bool = False, - num_timestamps: int = 1, -): - """Plot weather result as file from dict data. - - Args: - foldername (str): Output folder name. - data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. - visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("output_6h", "target_6h"). - xticks (Tuple[float, ...]): The list of xtick locations. - xticklabels (Tuple[str, ...]): The x-axis' tick labels. - yticks (Tuple[float, ...]): The list of ytick locations, - yticklabels (Tuple[str, ...]): The y-axis' tick labels. - vmin (float): Minimal value that the colormap covers. - vmax (float): Maximal value that the colormap covers. - colorbar_label (str, optional): The colorbar label. Defaults to "". - log_norm (bool, optional): Whether use log norm. Defaults to False. - num_timestamps (int): Number of timestamp in data_dict. Defaults to 1. - """ - os.makedirs(foldername, exist_ok=True) - - visu_data = [data_dict[k] for k in visu_keys] - if isinstance(visu_data[0], paddle.Tensor): - visu_data = [x.numpy() for x in visu_data] - - frames = [] - for t in range(num_timestamps): - pred_key, target_key = visu_keys[2 * t], visu_keys[2 * t + 1] - pred_data = visu_data[2 * t] - target_data = visu_data[2 * t + 1] - filename_t = os.path.join(foldername, f"{t}.png") - _save_plot_weather_from_array( - filename_t, - pred_data, - target_data, - pred_key, - target_key, - xticks, - xticklabels, - yticks, - yticklabels, - vmin=vmin, - vmax=vmax, - colorbar_label=colorbar_label, - log_norm=log_norm, - ) - frames.append(imageio.imread(filename_t)) - filename = os.path.join(foldername, "result.gif") - imageio.mimsave( - filename, - frames, - "GIF", - duration=1, - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import Dict +from typing import Tuple +from typing import Union + +import imageio +import matplotlib +import numpy as np +import paddle +from matplotlib import colormaps as cm +from matplotlib import pyplot as plt +from matplotlib.legend_handler import HandlerBase +from matplotlib.patches import Rectangle + + +class HandlerColormap(HandlerBase): + """Class for creating colormap legend rectangles. + + Args: + cmap (matplotlib.cm): Matplotlib colormap. + num_stripes (int, optional): Number of contour levels (strips) in rectangle. Defaults to 8. + """ + + def __init__(self, cmap: matplotlib.cm, num_stripes: int = 8, **kw): + HandlerBase.__init__(self, **kw) + self.cmap = cmap + self.num_stripes = num_stripes + + def create_artists( + self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans + ): + stripes = [] + for i in range(self.num_stripes): + s = Rectangle( + [xdescent + i * width / self.num_stripes, ydescent], + width / self.num_stripes, + height, + fc=self.cmap((2 * i + 1) / (2 * self.num_stripes)), + transform=trans, + ) + stripes.append(s) + return stripes + + +def _save_plot_weather_from_array( + filename: str, + pred: np.ndarray, + target: np.ndarray, + pred_key: str, + target_key: str, + xticks: Tuple[float, ...], + xticklabels: Tuple[str, ...], + yticks: Tuple[float, ...], + yticklabels: Tuple[str, ...], + vmin: float, + vmax: float, + colorbar_label: str = "", + log_norm: bool = False, +): + """Plot weather result as file from array data. + + Args: + filename (str): Output file name. + pred (np.ndarray): The predict data. + target (np.ndarray): The target data. + pred_key (str): The key of predict data. + target_key (str): The key of target data. + xticks (Tuple[float, ...]): The list of xtick locations. + xticklabels (Tuple[str, ...]): The x-axis' tick labels. + yticks (Tuple[float, ...]): The list of ytick locations. + yticklabels (Tuple[str, ...]): The y-axis' tick labels. + vmin (float): Minimal value that the colormap covers. + vmax (float): Maximal value that the colormap covers. + colorbar_label (str, optional): The color-bar label. Defaults to "". + log_norm (bool, optional): Whether use log norm. Defaults to False. + """ + + def plot_weather( + ax, + data, + title_text, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + cmap=cm.get_cmap("turbo"), + ): + ax.title.set_text(title_text) + ax.set_yticks(yticks) + ax.set_yticklabels(yticklabels) + ax.set_xticks(xticks) + ax.set_xticklabels(xticklabels) + if not log_norm: + map_ = ax.imshow( + data, + interpolation="nearest", + cmap=cmap, + aspect="auto", + vmin=vmin, + vmax=vmax, + ) + else: + norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax, clip=True) + map_ = ax.imshow( + data, interpolation="nearest", cmap=cmap, aspect="auto", norm=norm + ) + plt.colorbar(mappable=map_, cax=None, ax=None, shrink=0.5, label=colorbar_label) + + os.makedirs(os.path.dirname(filename), exist_ok=True) + fig = plt.figure(facecolor="w", figsize=(10, 4)) + ax = fig.add_subplot(1, 2, 1) + plot_weather( + ax, + pred, + pred_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + ) + bx = fig.add_subplot(1, 2, 2) + plot_weather( + bx, + target, + target_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + ) + fig.savefig(filename, dpi=300) + plt.close() + + +def save_plot_weather_from_dict( + foldername: str, + data_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], + visu_keys: Tuple[str, ...], + xticks: Tuple[float, ...], + xticklabels: Tuple[str, ...], + yticks: Tuple[float, ...], + yticklabels: Tuple[str, ...], + vmin: float, + vmax: float, + colorbar_label: str = "", + log_norm: bool = False, + num_timestamps: int = 1, +): + """Plot weather result as file from dict data. + + Args: + foldername (str): Output folder name. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("output_6h", "target_6h"). + xticks (Tuple[float, ...]): The list of xtick locations. + xticklabels (Tuple[str, ...]): The x-axis' tick labels. + yticks (Tuple[float, ...]): The list of ytick locations, + yticklabels (Tuple[str, ...]): The y-axis' tick labels. + vmin (float): Minimal value that the colormap covers. + vmax (float): Maximal value that the colormap covers. + colorbar_label (str, optional): The colorbar label. Defaults to "". + log_norm (bool, optional): Whether use log norm. Defaults to False. + num_timestamps (int): Number of timestamp in data_dict. Defaults to 1. + """ + os.makedirs(foldername, exist_ok=True) + + visu_data = [data_dict[k] for k in visu_keys] + if isinstance(visu_data[0], paddle.Tensor): + visu_data = [x.numpy() for x in visu_data] + + frames = [] + for t in range(num_timestamps): + pred_key, target_key = visu_keys[2 * t], visu_keys[2 * t + 1] + pred_data = visu_data[2 * t] + target_data = visu_data[2 * t + 1] + filename_t = os.path.join(foldername, f"{t}.png") + _save_plot_weather_from_array( + filename_t, + pred_data, + target_data, + pred_key, + target_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin=vmin, + vmax=vmax, + colorbar_label=colorbar_label, + log_norm=log_norm, + ) + frames.append(imageio.imread(filename_t)) + filename = os.path.join(foldername, "result.gif") + imageio.mimsave( + filename, + frames, + "GIF", + duration=1, + ) diff --git a/examples/yinglong/predict.py b/examples/yinglong/predict.py index bfe3e61989..9c0d8adfdd 100644 --- a/examples/yinglong/predict.py +++ b/examples/yinglong/predict.py @@ -1,138 +1,138 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from os import path as osp - -import h5py -import hydra -import numpy as np -import paddle -import pandas as pd -from omegaconf import DictConfig -from omegaconf import ListConfig -from packaging import version - -from examples.yinglong.plot import save_plot_weather_from_dict -from examples.yinglong.predictor import YingLongPredictor -from ppsci.utils import logger - - -def read_h5py(filename): - """Read hdf5 file.""" - if isinstance(filename, str): - data = np.array(h5py.File(filename, "r")["fields"]) - elif isinstance(filename, ListConfig): - data = [h5py.File(x, "r")["fields"] for x in filename] - data = np.concatenate(data, axis=0) - else: - raise TypeError( - f"The type of INFER.input_file must be str or list, but got {type(filename)}" - ) - return data - - -def inference(cfg: DictConfig): - # log paddlepaddle's version - if version.Version(paddle.__version__) != version.Version("0.0.0"): - paddle_version = paddle.__version__ - if version.Version(paddle.__version__) < version.Version("2.6.0"): - logger.warning( - f"Detected paddlepaddle version is '{paddle_version}', " - "currently it is recommended to use release 2.6 or develop version." - ) - else: - paddle_version = f"develop({paddle.version.commit[:7]})" - - logger.info(f"Using paddlepaddle {paddle_version}") - - num_timestamps = cfg.INFER.num_timestamps - # create predictor - predictor = YingLongPredictor(cfg) - - # load data - # HRRR Crop use 24 atmospheric variable,their index in the dataset is from 0 to 23. - # The variable name is 'z50', 'z500', 'z850', 'z1000', 't50', 't500', 't850', 'z1000', - # 's50', 's500', 's850', 's1000', 'u50', 'u500', 'u850', 'u1000', 'v50', 'v500', - # 'v850', 'v1000', 'mslp', 'u10', 'v10', 't2m'. - input_data = read_h5py(cfg.INFER.input_file) - nwp_data = read_h5py(cfg.INFER.nwp_file) - geo_data = read_h5py(cfg.INFER.geo_file) - - # input_data.shape: (1, 24, 440, 408) - input_data_0 = input_data[0:1] - # nwp_data.shape: # (num_timestamps, 24, 440, 408) - nwp_data = nwp_data[0:num_timestamps] - # ground_truth.shape: (num_timestamps, 24, 440, 408) - ground_truth = input_data[1 : num_timestamps + 1] - - # create time stamps - cur_time = pd.to_datetime(cfg.INFER.init_time, format="%Y/%m/%d/%H") - time_stamps = [[cur_time]] - for _ in range(num_timestamps): - cur_time += pd.Timedelta(hours=1) - time_stamps.append([cur_time]) - - # run predictor - pred_data = predictor.predict(input_data_0, time_stamps, nwp_data, geo_data) - pred_data = pred_data.squeeze(axis=1) # (num_timestamps, 24, 440, 408) - - # save predict data - save_path = osp.join(cfg.output_dir, "result.npy") - np.save(save_path, pred_data) - logger.info(f"Save output to {save_path}") - - # plot wind data - u10_idx, v10_idx = 21, 22 - pred_wind = (pred_data[:, u10_idx] ** 2 + pred_data[:, v10_idx] ** 2) ** 0.5 - ground_truth_wind = ( - ground_truth[:, u10_idx] ** 2 + ground_truth[:, v10_idx] ** 2 - ) ** 0.5 - data_dict = {} - visu_keys = [] - for i in range(num_timestamps): - visu_key = f"Init time: {cfg.INFER.init_time}h\n Ground truth: {i+1}h" - visu_keys.append(visu_key) - data_dict[visu_key] = ground_truth_wind[i] - visu_key = f"Init time: {cfg.INFER.init_time}h\n YingLong-12 Layers: {i+1}h" - visu_keys.append(visu_key) - data_dict[visu_key] = pred_wind[i] - - save_plot_weather_from_dict( - foldername=cfg.output_dir, - data_dict=data_dict, - visu_keys=visu_keys, - xticks=np.linspace(0, 407, 7), - xticklabels=[str(i) for i in range(0, 409, 68)], - yticks=np.linspace(0, 439, 9), - yticklabels=[str(i) for i in range(0, 441, 55)], - vmin=0, - vmax=15, - colorbar_label="m/s", - num_timestamps=48, - ) - logger.info(f"Save plot to {cfg.output_dir}") - - -@hydra.main( - version_base=None, config_path="./conf", config_name="yinglong_eastern.yaml" -) -def main(cfg: DictConfig): - if cfg.mode == "infer": - inference(cfg) - else: - raise ValueError(f"cfg.mode should in ['infer'], but got '{cfg.mode}'") - - -if __name__ == "__main__": - main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path as osp + +import h5py +import hydra +import numpy as np +import paddle +import pandas as pd +from omegaconf import DictConfig +from omegaconf import ListConfig +from packaging import version + +from examples.yinglong.plot import save_plot_weather_from_dict +from examples.yinglong.predictor import YingLongPredictor +from ppsci.utils import logger + + +def read_h5py(filename): + """Read hdf5 file.""" + if isinstance(filename, str): + data = np.array(h5py.File(filename, "r")["fields"]) + elif isinstance(filename, ListConfig): + data = [h5py.File(x, "r")["fields"] for x in filename] + data = np.concatenate(data, axis=0) + else: + raise TypeError( + f"The type of INFER.input_file must be str or list, but got {type(filename)}" + ) + return data + + +def inference(cfg: DictConfig): + # log paddlepaddle's version + if version.Version(paddle.__version__) != version.Version("0.0.0"): + paddle_version = paddle.__version__ + if version.Version(paddle.__version__) < version.Version("2.6.0"): + logger.warning( + f"Detected paddlepaddle version is '{paddle_version}', " + "currently it is recommended to use release 2.6 or develop version." + ) + else: + paddle_version = f"develop({paddle.version.commit[:7]})" + + logger.info(f"Using paddlepaddle {paddle_version}") + + num_timestamps = cfg.INFER.num_timestamps + # create predictor + predictor = YingLongPredictor(cfg) + + # load data + # HRRR Crop use 24 atmospheric variable,their index in the dataset is from 0 to 23. + # The variable name is 'z50', 'z500', 'z850', 'z1000', 't50', 't500', 't850', 'z1000', + # 's50', 's500', 's850', 's1000', 'u50', 'u500', 'u850', 'u1000', 'v50', 'v500', + # 'v850', 'v1000', 'mslp', 'u10', 'v10', 't2m'. + input_data = read_h5py(cfg.INFER.input_file) + nwp_data = read_h5py(cfg.INFER.nwp_file) + geo_data = read_h5py(cfg.INFER.geo_file) + + # input_data.shape: (1, 24, 440, 408) + input_data_0 = input_data[0:1] + # nwp_data.shape: # (num_timestamps, 24, 440, 408) + nwp_data = nwp_data[0:num_timestamps] + # ground_truth.shape: (num_timestamps, 24, 440, 408) + ground_truth = input_data[1 : num_timestamps + 1] + + # create time stamps + cur_time = pd.to_datetime(cfg.INFER.init_time, format="%Y/%m/%d/%H") + time_stamps = [[cur_time]] + for _ in range(num_timestamps): + cur_time += pd.Timedelta(hours=1) + time_stamps.append([cur_time]) + + # run predictor + pred_data = predictor.predict(input_data_0, time_stamps, nwp_data, geo_data) + pred_data = pred_data.squeeze(axis=1) # (num_timestamps, 24, 440, 408) + + # save predict data + save_path = osp.join(cfg.output_dir, "result.npy") + np.save(save_path, pred_data) + logger.info(f"Save output to {save_path}") + + # plot wind data + u10_idx, v10_idx = 21, 22 + pred_wind = (pred_data[:, u10_idx] ** 2 + pred_data[:, v10_idx] ** 2) ** 0.5 + ground_truth_wind = ( + ground_truth[:, u10_idx] ** 2 + ground_truth[:, v10_idx] ** 2 + ) ** 0.5 + data_dict = {} + visu_keys = [] + for i in range(num_timestamps): + visu_key = f"Init time: {cfg.INFER.init_time}h\n Ground truth: {i+1}h" + visu_keys.append(visu_key) + data_dict[visu_key] = ground_truth_wind[i] + visu_key = f"Init time: {cfg.INFER.init_time}h\n YingLong-12 Layers: {i+1}h" + visu_keys.append(visu_key) + data_dict[visu_key] = pred_wind[i] + + save_plot_weather_from_dict( + foldername=cfg.output_dir, + data_dict=data_dict, + visu_keys=visu_keys, + xticks=np.linspace(0, 407, 7), + xticklabels=[str(i) for i in range(0, 409, 68)], + yticks=np.linspace(0, 439, 9), + yticklabels=[str(i) for i in range(0, 441, 55)], + vmin=0, + vmax=15, + colorbar_label="m/s", + num_timestamps=48, + ) + logger.info(f"Save plot to {cfg.output_dir}") + + +@hydra.main( + version_base=None, config_path="./conf", config_name="yinglong_eastern.yaml" +) +def main(cfg: DictConfig): + if cfg.mode == "infer": + inference(cfg) + else: + raise ValueError(f"cfg.mode should in ['infer'], but got '{cfg.mode}'") + + +if __name__ == "__main__": + main() diff --git a/examples/yinglong/predictor.py b/examples/yinglong/predictor.py index 04ca1fb615..0cbc9786ac 100644 --- a/examples/yinglong/predictor.py +++ b/examples/yinglong/predictor.py @@ -1,165 +1,165 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from typing import List -from typing import Tuple - -import numpy as np -import pandas as pd -from omegaconf import DictConfig - -from deploy.python_infer import base -from examples.yinglong.timefeatures import time_features -from ppsci.utils import logger - - -class YingLongPredictor(base.Predictor): - """General predictor for YingLong model. - - Args: - cfg (DictConfig): Running configuration. - """ - - def __init__( - self, - cfg: DictConfig, - ): - super().__init__( - cfg.INFER.pdmodel_path, - cfg.INFER.pdiparams_path, - device=cfg.INFER.device, - engine=cfg.INFER.engine, - precision=cfg.INFER.precision, - onnx_path=cfg.INFER.onnx_path, - ir_optim=cfg.INFER.ir_optim, - min_subgraph_size=cfg.INFER.min_subgraph_size, - gpu_mem=cfg.INFER.gpu_mem, - gpu_id=cfg.INFER.gpu_id, - max_batch_size=cfg.INFER.max_batch_size, - num_cpu_threads=cfg.INFER.num_cpu_threads, - ) - self.log_freq = cfg.log_freq - - # get input names and data handles - self.input_names = self.predictor.get_input_names() - self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) - self.time_stamps_handle = self.predictor.get_input_handle(self.input_names[1]) - self.nwp_data_handle = self.predictor.get_input_handle(self.input_names[2]) - self.geo_data_handle = self.predictor.get_input_handle(self.input_names[3]) - - # get output names and data handles - self.output_names = self.predictor.get_output_names() - self.output_handle = self.predictor.get_output_handle(self.output_names[0]) - - # load mean and std data - self.mean = np.load(cfg.INFER.mean_path).reshape(-1, 1, 1).astype("float32") - self.std = np.load(cfg.INFER.std_path).reshape(-1, 1, 1).astype("float32") - - def _preprocess_data( - self, - input_data: np.ndarray, - time_stamps: List[List[pd.Timestamp]], - nwp_data: np.ndarray, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - # normalize data - input_data = (input_data - self.mean) / self.std - nwp_data = (nwp_data - self.mean) / self.std - - # process time stamps - for i in range(len(time_stamps)): - time_stamps[i] = pd.DataFrame({"date": time_stamps[i]}) - time_stamps[i] = time_features(time_stamps[i], timeenc=1, freq="h").astype( - np.float32 - ) - time_stamps = np.asarray(time_stamps) - return input_data, time_stamps, nwp_data - - def _postprocess_data(self, data: np.ndarray): - # denormalize data - data = data * self.std + self.mean - return data - - def predict( - self, - input_data: np.ndarray, - time_stamps: List[List[pd.Timestamp]], - nwp_data: np.ndarray, - geo_data: np.ndarray, - batch_size: int = 1, - ) -> np.ndarray: - """Predicts the output of the yinglong model for the given input. - - Args: - input_data (np.ndarray): Input data of shape (N, T, H, W). - time_stamps (List[List[pd.Timestamp]]): Timestamps data. - nwp_data (np.ndarray): NWP data. - geo_data (np.ndarray): Geographic data. - batch_size (int, optional): Batch size, now only support 1. Defaults to 1. - - Returns: - np.ndarray: Prediction. - """ - if batch_size != 1: - raise ValueError( - f"YingLongPredictor only support batch_size=1, but got {batch_size}" - ) - - # prepare input handle(s) - input_handles = { - self.input_names[0]: self.input_data_handle, - self.input_names[1]: self.time_stamps_handle, - self.input_names[2]: self.nwp_data_handle, - self.input_names[3]: self.geo_data_handle, - } - # prepare output handle(s) - output_handles = {self.output_names[0]: self.output_handle} - - num_samples = len(input_data) - if num_samples != 1: - raise ValueError( - f"YingLongPredictor only support num_samples=1, but got {num_samples}" - ) - - batch_num = 1 - - # inference by batch - for batch_id in range(1, batch_num + 1): - if batch_id % self.log_freq == 0 or batch_id == batch_num: - logger.info(f"Predicting batch {batch_id}/{batch_num}") - - # preprocess data - input_data, time_stamps, nwp_data = self._preprocess_data( - input_data, time_stamps, nwp_data - ) - # prepare batch input dict - batch_input_dict = { - self.input_names[0]: input_data, - self.input_names[1]: time_stamps, - self.input_names[2]: nwp_data, - self.input_names[3]: geo_data, - } - - # send batch input data to input handle(s) - for name, handle in input_handles.items(): - handle.copy_from_cpu(batch_input_dict[name]) - - # run predictor - self.predictor.run() - - # receive batch output data from output handle(s) - pred = output_handles[self.output_names[0]].copy_to_cpu() - pred = self._postprocess_data(pred) - - return pred +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import List +from typing import Tuple + +import numpy as np +import pandas as pd +from omegaconf import DictConfig + +from deploy.python_infer import base +from examples.yinglong.timefeatures import time_features +from ppsci.utils import logger + + +class YingLongPredictor(base.Predictor): + """General predictor for YingLong model. + + Args: + cfg (DictConfig): Running configuration. + """ + + def __init__( + self, + cfg: DictConfig, + ): + super().__init__( + cfg.INFER.pdmodel_path, + cfg.INFER.pdiparams_path, + device=cfg.INFER.device, + engine=cfg.INFER.engine, + precision=cfg.INFER.precision, + onnx_path=cfg.INFER.onnx_path, + ir_optim=cfg.INFER.ir_optim, + min_subgraph_size=cfg.INFER.min_subgraph_size, + gpu_mem=cfg.INFER.gpu_mem, + gpu_id=cfg.INFER.gpu_id, + max_batch_size=cfg.INFER.max_batch_size, + num_cpu_threads=cfg.INFER.num_cpu_threads, + ) + self.log_freq = cfg.log_freq + + # get input names and data handles + self.input_names = self.predictor.get_input_names() + self.input_data_handle = self.predictor.get_input_handle(self.input_names[0]) + self.time_stamps_handle = self.predictor.get_input_handle(self.input_names[1]) + self.nwp_data_handle = self.predictor.get_input_handle(self.input_names[2]) + self.geo_data_handle = self.predictor.get_input_handle(self.input_names[3]) + + # get output names and data handles + self.output_names = self.predictor.get_output_names() + self.output_handle = self.predictor.get_output_handle(self.output_names[0]) + + # load mean and std data + self.mean = np.load(cfg.INFER.mean_path).reshape(-1, 1, 1).astype("float32") + self.std = np.load(cfg.INFER.std_path).reshape(-1, 1, 1).astype("float32") + + def _preprocess_data( + self, + input_data: np.ndarray, + time_stamps: List[List[pd.Timestamp]], + nwp_data: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + # normalize data + input_data = (input_data - self.mean) / self.std + nwp_data = (nwp_data - self.mean) / self.std + + # process time stamps + for i in range(len(time_stamps)): + time_stamps[i] = pd.DataFrame({"date": time_stamps[i]}) + time_stamps[i] = time_features(time_stamps[i], timeenc=1, freq="h").astype( + np.float32 + ) + time_stamps = np.asarray(time_stamps) + return input_data, time_stamps, nwp_data + + def _postprocess_data(self, data: np.ndarray): + # denormalize data + data = data * self.std + self.mean + return data + + def predict( + self, + input_data: np.ndarray, + time_stamps: List[List[pd.Timestamp]], + nwp_data: np.ndarray, + geo_data: np.ndarray, + batch_size: int = 1, + ) -> np.ndarray: + """Predicts the output of the yinglong model for the given input. + + Args: + input_data (np.ndarray): Input data of shape (N, T, H, W). + time_stamps (List[List[pd.Timestamp]]): Timestamps data. + nwp_data (np.ndarray): NWP data. + geo_data (np.ndarray): Geographic data. + batch_size (int, optional): Batch size, now only support 1. Defaults to 1. + + Returns: + np.ndarray: Prediction. + """ + if batch_size != 1: + raise ValueError( + f"YingLongPredictor only support batch_size=1, but got {batch_size}" + ) + + # prepare input handle(s) + input_handles = { + self.input_names[0]: self.input_data_handle, + self.input_names[1]: self.time_stamps_handle, + self.input_names[2]: self.nwp_data_handle, + self.input_names[3]: self.geo_data_handle, + } + # prepare output handle(s) + output_handles = {self.output_names[0]: self.output_handle} + + num_samples = len(input_data) + if num_samples != 1: + raise ValueError( + f"YingLongPredictor only support num_samples=1, but got {num_samples}" + ) + + batch_num = 1 + + # inference by batch + for batch_id in range(1, batch_num + 1): + if batch_id % self.log_freq == 0 or batch_id == batch_num: + logger.info(f"Predicting batch {batch_id}/{batch_num}") + + # preprocess data + input_data, time_stamps, nwp_data = self._preprocess_data( + input_data, time_stamps, nwp_data + ) + # prepare batch input dict + batch_input_dict = { + self.input_names[0]: input_data, + self.input_names[1]: time_stamps, + self.input_names[2]: nwp_data, + self.input_names[3]: geo_data, + } + + # send batch input data to input handle(s) + for name, handle in input_handles.items(): + handle.copy_from_cpu(batch_input_dict[name]) + + # run predictor + self.predictor.run() + + # receive batch output data from output handle(s) + pred = output_handles[self.output_names[0]].copy_to_cpu() + pred = self._postprocess_data(pred) + + return pred diff --git a/examples/yinglong/timefeatures.py b/examples/yinglong/timefeatures.py index 6f0fef6dae..39ab458efb 100644 --- a/examples/yinglong/timefeatures.py +++ b/examples/yinglong/timefeatures.py @@ -1,177 +1,177 @@ -# This code is reference from https://github.com/zhouhaoyi/Informer2020 -from typing import List - -import numpy as np -import pandas as pd -from pandas.tseries import offsets -from pandas.tseries.frequencies import to_offset - - -class TimeFeature: - def __init__(self): - pass - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - pass - - def __repr__(self): - return self.__class__.__name__ + "()" - - -class SecondOfMinute(TimeFeature): - """Minute of hour encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return index.second / 59.0 - 0.5 - - -class MinuteOfHour(TimeFeature): - """Minute of hour encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return index.minute / 59.0 - 0.5 - - -class HourOfDay(TimeFeature): - """Hour of day encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return index.hour / 23.0 - 0.5 - - -class DayOfWeek(TimeFeature): - """Hour of day encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return index.dayofweek / 6.0 - 0.5 - - -class DayOfMonth(TimeFeature): - """Day of month encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return (index.day - 1) / 30.0 - 0.5 - - -class DayOfYear(TimeFeature): - """Day of year encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return (index.dayofyear - 1) / 365.0 - 0.5 - - -class MonthOfYear(TimeFeature): - """Month of year encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return (index.month - 1) / 11.0 - 0.5 - - -class WeekOfYear(TimeFeature): - """Week of year encoded as value between [-0.5, 0.5]""" - - def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: - return (index.week - 1) / 52.0 - 0.5 - - -def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: - """ - Returns a list of time features that will be appropriate for the given frequency string. - Parameters - ---------- - freq_str - Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. - """ - - features_by_offsets = { - offsets.YearEnd: [], - offsets.QuarterEnd: [MonthOfYear], - offsets.MonthEnd: [MonthOfYear], - offsets.Week: [DayOfMonth, WeekOfYear], - offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], - offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], - offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], - offsets.Minute: [ - MinuteOfHour, - HourOfDay, - DayOfWeek, - DayOfMonth, - DayOfYear, - ], - offsets.Second: [ - SecondOfMinute, - MinuteOfHour, - HourOfDay, - DayOfWeek, - DayOfMonth, - DayOfYear, - ], - } - - offset = to_offset(freq_str) - - for offset_type, feature_classes in features_by_offsets.items(): - if isinstance(offset, offset_type): - return [cls() for cls in feature_classes] - - supported_freq_msg = f""" - Unsupported frequency {freq_str} - The following frequencies are supported: - Y - yearly - alias: A - M - monthly - W - weekly - D - daily - B - business days - H - hourly - T - minutely - alias: min - S - secondly - """ - raise RuntimeError(supported_freq_msg) - - -def time_features(dates, timeenc=1, freq="h"): - """ - > `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0: - > * m - [month] - > * w - [month] - > * d - [month, day, weekday] - > * b - [month, day, weekday] - > * h - [month, day, weekday, hour] - > * t - [month, day, weekday, hour, *minute] - > - > If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]): - > * Q - [month] - > * M - [month] - > * W - [Day of month, week of year] - > * D - [Day of week, day of month, day of year] - > * B - [Day of week, day of month, day of year] - > * H - [Hour of day, day of week, day of month, day of year] - > * T - [Minute of hour*, hour of day, day of week, day of month, day of year] - > * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year] - - *minute returns a number from 0-3 corresponding to the 15 minute period it falls into. - """ - if timeenc == 0: - dates["month"] = dates.date.apply(lambda row: row.month, 1) - dates["day"] = dates.date.apply(lambda row: row.day, 1) - dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1) - dates["hour"] = dates.date.apply(lambda row: row.hour, 1) - dates["minute"] = dates.date.apply(lambda row: row.minute, 1) - dates["minute"] = dates.minute.map(lambda x: x // 15) - freq_map = { - "y": [], - "m": ["month"], - "w": ["month"], - "d": ["month", "day", "weekday"], - "b": ["month", "day", "weekday"], - "h": ["month", "day", "weekday", "hour"], - "t": ["month", "day", "weekday", "hour", "minute"], - } - return dates[freq_map[freq.lower()]].values - if timeenc == 1: - dates = pd.to_datetime(dates.date.values) - return np.vstack( - [feat(dates) for feat in time_features_from_frequency_str(freq)] - ).transpose(1, 0) +# This code is reference from https://github.com/zhouhaoyi/Informer2020 +from typing import List + +import numpy as np +import pandas as pd +from pandas.tseries import offsets +from pandas.tseries.frequencies import to_offset + + +class TimeFeature: + def __init__(self): + pass + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + pass + + def __repr__(self): + return self.__class__.__name__ + "()" + + +class SecondOfMinute(TimeFeature): + """Minute of hour encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.second / 59.0 - 0.5 + + +class MinuteOfHour(TimeFeature): + """Minute of hour encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.minute / 59.0 - 0.5 + + +class HourOfDay(TimeFeature): + """Hour of day encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.hour / 23.0 - 0.5 + + +class DayOfWeek(TimeFeature): + """Hour of day encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return index.dayofweek / 6.0 - 0.5 + + +class DayOfMonth(TimeFeature): + """Day of month encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.day - 1) / 30.0 - 0.5 + + +class DayOfYear(TimeFeature): + """Day of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.dayofyear - 1) / 365.0 - 0.5 + + +class MonthOfYear(TimeFeature): + """Month of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.month - 1) / 11.0 - 0.5 + + +class WeekOfYear(TimeFeature): + """Week of year encoded as value between [-0.5, 0.5]""" + + def __call__(self, index: pd.DatetimeIndex) -> np.ndarray: + return (index.week - 1) / 52.0 - 0.5 + + +def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]: + """ + Returns a list of time features that will be appropriate for the given frequency string. + Parameters + ---------- + freq_str + Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc. + """ + + features_by_offsets = { + offsets.YearEnd: [], + offsets.QuarterEnd: [MonthOfYear], + offsets.MonthEnd: [MonthOfYear], + offsets.Week: [DayOfMonth, WeekOfYear], + offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear], + offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear], + offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear], + offsets.Minute: [ + MinuteOfHour, + HourOfDay, + DayOfWeek, + DayOfMonth, + DayOfYear, + ], + offsets.Second: [ + SecondOfMinute, + MinuteOfHour, + HourOfDay, + DayOfWeek, + DayOfMonth, + DayOfYear, + ], + } + + offset = to_offset(freq_str) + + for offset_type, feature_classes in features_by_offsets.items(): + if isinstance(offset, offset_type): + return [cls() for cls in feature_classes] + + supported_freq_msg = f""" + Unsupported frequency {freq_str} + The following frequencies are supported: + Y - yearly + alias: A + M - monthly + W - weekly + D - daily + B - business days + H - hourly + T - minutely + alias: min + S - secondly + """ + raise RuntimeError(supported_freq_msg) + + +def time_features(dates, timeenc=1, freq="h"): + """ + > `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0: + > * m - [month] + > * w - [month] + > * d - [month, day, weekday] + > * b - [month, day, weekday] + > * h - [month, day, weekday, hour] + > * t - [month, day, weekday, hour, *minute] + > + > If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]): + > * Q - [month] + > * M - [month] + > * W - [Day of month, week of year] + > * D - [Day of week, day of month, day of year] + > * B - [Day of week, day of month, day of year] + > * H - [Hour of day, day of week, day of month, day of year] + > * T - [Minute of hour*, hour of day, day of week, day of month, day of year] + > * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year] + + *minute returns a number from 0-3 corresponding to the 15 minute period it falls into. + """ + if timeenc == 0: + dates["month"] = dates.date.apply(lambda row: row.month, 1) + dates["day"] = dates.date.apply(lambda row: row.day, 1) + dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1) + dates["hour"] = dates.date.apply(lambda row: row.hour, 1) + dates["minute"] = dates.date.apply(lambda row: row.minute, 1) + dates["minute"] = dates.minute.map(lambda x: x // 15) + freq_map = { + "y": [], + "m": ["month"], + "w": ["month"], + "d": ["month", "day", "weekday"], + "b": ["month", "day", "weekday"], + "h": ["month", "day", "weekday", "hour"], + "t": ["month", "day", "weekday", "hour", "minute"], + } + return dates[freq_map[freq.lower()]].values + if timeenc == 1: + dates = pd.to_datetime(dates.date.values) + return np.vstack( + [feat(dates) for feat in time_features_from_frequency_str(freq)] + ).transpose(1, 0) diff --git a/jointContribution/CFDGCN/coarse.cfg b/jointContribution/CFDGCN/coarse.cfg index d73d67ff97..16d51fcb02 100644 --- a/jointContribution/CFDGCN/coarse.cfg +++ b/jointContribution/CFDGCN/coarse.cfg @@ -1,357 +1,357 @@ -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% % -% SU2 configuration file % -% Case description: Transonic inviscid flow around a NACA0012 airfoil % -% Author: Thomas D. Economon % -% Institution: Stanford University % -% Date: 2014.06.11 % -% File Version 6.2.0 "Falcon" % -% % -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% -% -% Physical governing equations (EULER, NAVIER_STOKES, -% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, -% POISSON_EQUATION) -PHYSICAL_PROBLEM= EULER -% -% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) -MATH_PROBLEM= DIRECT -% -% Restart solution (NO, YES) -RESTART_SOL= NO - -% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% -% -% Mach number (non-dimensional, based on the free-stream values) -MACH_NUMBER= 0.8 -% -% Angle of attack (degrees) -AOA= 1.25 -% -% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) -FREESTREAM_PRESSURE= 101325.0 -% -% Free-stream temperature (273.15 K by default) -FREESTREAM_TEMPERATURE= 273.15 - -% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% -% -% Ratio of specific heats (1.4 (air), only for compressible flows) -GAMMA_VALUE= 1.4 -% -% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) -GAS_CONSTANT= 287.87 - -% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% -% -% Reference origin for moment computation -REF_ORIGIN_MOMENT_X = 0.25 -REF_ORIGIN_MOMENT_Y = 0.00 -REF_ORIGIN_MOMENT_Z = 0.00 -% -% Reference length for pitching, rolling, and yawing non-dimensional moment -REF_LENGTH= 1.0 -% -% Reference area for force coefficients (0 implies automatic calculation) -REF_AREA= 1.0 -% -% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, -% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) -REF_DIMENSIONALIZATION= DIMENSIONAL - -% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% -% -% Marker of the Euler boundary (NONE = no marker) -MARKER_EULER= ( airfoil ) -% -% Marker of the far field (NONE = no marker) -MARKER_FAR= ( farfield ) - -% ------------------------ SURFACES IDENTIFICATION ----------------------------% -% -% Marker(s) of the surface in the surface flow solution file -MARKER_PLOTTING = ( airfoil ) -% -% Marker(s) of the surface where the non-dimensional coefficients are evaluated. -MARKER_MONITORING = ( airfoil ) -% -% Marker(s) of the surface where obj. func. (design problem) will be evaluated -MARKER_DESIGNING = ( airfoil ) - -% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% -% -% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) -NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES -% -% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, -% MOMENT_Y, MOMENT_Z, EFFICIENCY, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% FORCE_X, FORCE_Y, FORCE_Z, THRUST, -% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, -% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, -% INVERSE_DESIGN_HEATFLUX) -% OBJECTIVE_FUNCTION= DRAG -% -% Courant-Friedrichs-Lewy condition of the finest grid -%CFL_NUMBER= 4.0 -CFL_NUMBER= 1.0 -%CFL_NUMBER=0.1 -% -% Number of total iterations -EXT_ITER=200 -ITER= 200 - -% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% -% -% Linear solver for implicit formulations (BCGSTAB, FGMRES) -LINEAR_SOLVER= FGMRES -% -% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) -LINEAR_SOLVER_PREC= LU_SGS -% -% Minimum error of the linear solver for implicit formulations -LINEAR_SOLVER_ERROR= 1E-6 -% -% Max number of iterations of the linear solver for the implicit formulation -LINEAR_SOLVER_ITER= 5 - -% -------------------------- MULTIGRID PARAMETERS -----------------------------% -% -% Multi-Grid Levels (0 = no multi-grid) -MGLEVEL= 2 -% -% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) -MGCYCLE= W_CYCLE -% -% Multi-Grid PreSmoothing Level -MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) -% -% Multi-Grid PostSmoothing Level -MG_POST_SMOOTH= ( 0, 0, 0, 0 ) -% -% Jacobi implicit smoothing of the correction -MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) -% -% Damping factor for the residual restriction -MG_DAMP_RESTRICTION= 1.0 -% -% Damping factor for the correction prolongation -MG_DAMP_PROLONGATION= 1.0 - -% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, -% TURKEL_PREC, MSW) -CONV_NUM_METHOD_FLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_FLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, -% BARTH_JESPERSEN, VAN_ALBADA_EDGE) -SLOPE_LIMITER_FLOW= VENKATAKRISHNAN -% -% 2nd and 4th order artificial dissipation coefficients -JST_SENSOR_COEFF= ( 0.5, 0.02 ) -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) -TIME_DISCRE_FLOW= EULER_IMPLICIT - -% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, ROE) -CONV_NUM_METHOD_ADJFLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_ADJFLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, -% SHARP_EDGES, WALL_DISTANCE) -SLOPE_LIMITER_ADJFLOW= NONE -% -% Reduction factor of the CFL coefficient in the adjoint problem -CFL_REDUCTION_ADJFLOW= 0.5 -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) -TIME_DISCRE_ADJFLOW= EULER_IMPLICIT - -% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% -% -% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, -% FFD_SETTING, FFD_NACELLE -% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST -% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, -% HICKS_HENNE, SURFACE_BUMP) -DV_KIND= HICKS_HENNE -% -% Marker of the surface in which we are going apply the shape deformation -DV_MARKER= ( airfoil ) -% -% Parameters of the shape deformation -% - NO_DEFORMATION ( 1.0 ) -% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector -% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - SCALE ( 1.0 ) -% - ANGLE_OF_ATTACK ( 1.0 ) -% - FFD_SETTING ( 1.0 ) -% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) -% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) -% - FFD_GULL ( FFD_BoxTag, j_Ind ) -% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) -% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) -% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) -% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) -% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) -% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) -% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) -DV_PARAM= ( 1, 0.5 ) -% -% Value of the shape deformation -DV_VALUE= 0.01 - -% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% -% -% Number of smoothing iterations for FEA mesh deformation -DEFORM_LINEAR_ITER= 500 -% -% Number of nonlinear deformation iterations (surface deformation increments) -DEFORM_NONLINEAR_ITER= 1 -% -% Minimum residual criteria for the linear solver convergence of grid deformation -DEFORM_LINEAR_SOLVER_ERROR= 1E-14 -% -% Print the residuals during mesh deformation to the console (YES, NO) -DEFORM_CONSOLE_OUTPUT= YES -% -% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, -% WALL_DISTANCE, CONSTANT_STIFFNESS) -DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME -% -% Visualize the surface deformation (NO, YES) -VISUALIZE_SURFACE_DEF= NO -% -% Visualize the volume deformation (NO, YES) -VISUALIZE_VOLUME_DEF= NO - -% --------------------------- CONVERGENCE PARAMETERS --------------------------% -% Convergence criteria (CAUCHY, RESIDUAL) -% -CONV_CRITERIA= RESIDUAL -% -% Residual reduction (order of magnitude with respect to the initial value) -RESIDUAL_REDUCTION= 6 -% -% Min value of the residual (log10 of the residual) -RESIDUAL_MINVAL= -8 -% -% Start Cauchy criteria at iteration number -STARTCONV_ITER= 10 -% -% Number of elements to apply the criteria -CAUCHY_ELEMS= 100 -% -% Epsilon to control the series convergence -CAUCHY_EPS= 1E-6 -% -% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, -% DELTA_LIFT, DELTA_DRAG) -CAUCHY_FUNC_FLOW= DRAG - -% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% -% Mesh input file -%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 -MESH_FILENAME=passed_as_flag_to_train.py.su2 -% -% Mesh input file format (SU2, CGNS, NETCDF_ASCII) -MESH_FORMAT= SU2 -% -% Mesh output file -MESH_OUT_FILENAME= mesh_out.su2 -% -% Restart flow input file -SOLUTION_FLOW_FILENAME= solution_flow.dat -% -% Restart adjoint input file -SOLUTION_ADJ_FILENAME= solution_adj.dat -% -% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) -%OUTPUT_FORMAT= TECPLOT_BINARY -% -% Output file convergence history (w/o extension) -CONV_FILENAME= history -% -% Output file restart flow -RESTART_FLOW_FILENAME= restart_flow.dat -% -% Output file restart adjoint -RESTART_ADJ_FILENAME= restart_adj.dat -% -% Output file flow (w/o extension) variables -VOLUME_FLOW_FILENAME= flow -% -% Output file adjoint (w/o extension) variables -VOLUME_ADJ_FILENAME= adjoint -% -% Output Objective function gradient (using continuous adjoint) -GRAD_OBJFUNC_FILENAME= of_grad.dat -% -% Output file surface flow coefficient (w/o extension) -SURFACE_FLOW_FILENAME= surface_flow -% -% Output file surface adjoint coefficient (w/o extension) -SURFACE_ADJ_FILENAME= surface_adjoint -% -% Writing solution file frequency -WRT_SOL_FREQ= 1000 -% -% Writing convergence history frequency -WRT_CON_FREQ= 1000 - -% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% -% Available flow based objective functions or constraint functions -% DRAG, LIFT, SIDEFORCE, EFFICIENCY, -% FORCE_X, FORCE_Y, FORCE_Z, -% MOMENT_X, MOMENT_Y, MOMENT_Z, -% THRUST, TORQUE, FIGURE_OF_MERIT, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, -% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, -% -% Available geometrical based objective functions or constraint functions -% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, -% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL -% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, -% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) -% -% Available design variables -% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) -% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) -% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) -% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) -% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) -% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% -% Optimization objective function with scaling factor -% ex= Objective * Scale -% OPT_OBJECTIVE= DRAG * 0.001 -% -% Optimization constraint functions with scaling factors, separated by semicolons -% ex= (Objective = Value ) * Scale, use '>','<','=' -% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 -% -% Optimization design variables, separated by semicolons -% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) - - - - -DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH -DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% SU2 configuration file % +% Case description: Transonic inviscid flow around a NACA0012 airfoil % +% Author: Thomas D. Economon % +% Institution: Stanford University % +% Date: 2014.06.11 % +% File Version 6.2.0 "Falcon" % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% +% +% Physical governing equations (EULER, NAVIER_STOKES, +% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, +% POISSON_EQUATION) +PHYSICAL_PROBLEM= EULER +% +% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) +MATH_PROBLEM= DIRECT +% +% Restart solution (NO, YES) +RESTART_SOL= NO + +% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% +% +% Mach number (non-dimensional, based on the free-stream values) +MACH_NUMBER= 0.8 +% +% Angle of attack (degrees) +AOA= 1.25 +% +% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) +FREESTREAM_PRESSURE= 101325.0 +% +% Free-stream temperature (273.15 K by default) +FREESTREAM_TEMPERATURE= 273.15 + +% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% +% +% Ratio of specific heats (1.4 (air), only for compressible flows) +GAMMA_VALUE= 1.4 +% +% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) +GAS_CONSTANT= 287.87 + +% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% +% +% Reference origin for moment computation +REF_ORIGIN_MOMENT_X = 0.25 +REF_ORIGIN_MOMENT_Y = 0.00 +REF_ORIGIN_MOMENT_Z = 0.00 +% +% Reference length for pitching, rolling, and yawing non-dimensional moment +REF_LENGTH= 1.0 +% +% Reference area for force coefficients (0 implies automatic calculation) +REF_AREA= 1.0 +% +% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, +% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) +REF_DIMENSIONALIZATION= DIMENSIONAL + +% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% +% +% Marker of the Euler boundary (NONE = no marker) +MARKER_EULER= ( airfoil ) +% +% Marker of the far field (NONE = no marker) +MARKER_FAR= ( farfield ) + +% ------------------------ SURFACES IDENTIFICATION ----------------------------% +% +% Marker(s) of the surface in the surface flow solution file +MARKER_PLOTTING = ( airfoil ) +% +% Marker(s) of the surface where the non-dimensional coefficients are evaluated. +MARKER_MONITORING = ( airfoil ) +% +% Marker(s) of the surface where obj. func. (design problem) will be evaluated +MARKER_DESIGNING = ( airfoil ) + +% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% +% +% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) +NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES +% +% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, +% MOMENT_Y, MOMENT_Z, EFFICIENCY, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% FORCE_X, FORCE_Y, FORCE_Z, THRUST, +% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, +% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, +% INVERSE_DESIGN_HEATFLUX) +% OBJECTIVE_FUNCTION= DRAG +% +% Courant-Friedrichs-Lewy condition of the finest grid +%CFL_NUMBER= 4.0 +CFL_NUMBER= 1.0 +%CFL_NUMBER=0.1 +% +% Number of total iterations +EXT_ITER=200 +ITER= 200 + +% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% +% +% Linear solver for implicit formulations (BCGSTAB, FGMRES) +LINEAR_SOLVER= FGMRES +% +% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) +LINEAR_SOLVER_PREC= LU_SGS +% +% Minimum error of the linear solver for implicit formulations +LINEAR_SOLVER_ERROR= 1E-6 +% +% Max number of iterations of the linear solver for the implicit formulation +LINEAR_SOLVER_ITER= 5 + +% -------------------------- MULTIGRID PARAMETERS -----------------------------% +% +% Multi-Grid Levels (0 = no multi-grid) +MGLEVEL= 2 +% +% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) +MGCYCLE= W_CYCLE +% +% Multi-Grid PreSmoothing Level +MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) +% +% Multi-Grid PostSmoothing Level +MG_POST_SMOOTH= ( 0, 0, 0, 0 ) +% +% Jacobi implicit smoothing of the correction +MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) +% +% Damping factor for the residual restriction +MG_DAMP_RESTRICTION= 1.0 +% +% Damping factor for the correction prolongation +MG_DAMP_PROLONGATION= 1.0 + +% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, +% TURKEL_PREC, MSW) +CONV_NUM_METHOD_FLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_FLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, +% BARTH_JESPERSEN, VAN_ALBADA_EDGE) +SLOPE_LIMITER_FLOW= VENKATAKRISHNAN +% +% 2nd and 4th order artificial dissipation coefficients +JST_SENSOR_COEFF= ( 0.5, 0.02 ) +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) +TIME_DISCRE_FLOW= EULER_IMPLICIT + +% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, ROE) +CONV_NUM_METHOD_ADJFLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_ADJFLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, +% SHARP_EDGES, WALL_DISTANCE) +SLOPE_LIMITER_ADJFLOW= NONE +% +% Reduction factor of the CFL coefficient in the adjoint problem +CFL_REDUCTION_ADJFLOW= 0.5 +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) +TIME_DISCRE_ADJFLOW= EULER_IMPLICIT + +% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% +% +% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, +% FFD_SETTING, FFD_NACELLE +% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST +% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, +% HICKS_HENNE, SURFACE_BUMP) +DV_KIND= HICKS_HENNE +% +% Marker of the surface in which we are going apply the shape deformation +DV_MARKER= ( airfoil ) +% +% Parameters of the shape deformation +% - NO_DEFORMATION ( 1.0 ) +% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector +% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - SCALE ( 1.0 ) +% - ANGLE_OF_ATTACK ( 1.0 ) +% - FFD_SETTING ( 1.0 ) +% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) +% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) +% - FFD_GULL ( FFD_BoxTag, j_Ind ) +% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) +% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) +% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) +% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) +% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) +% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) +% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) +DV_PARAM= ( 1, 0.5 ) +% +% Value of the shape deformation +DV_VALUE= 0.01 + +% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% +% +% Number of smoothing iterations for FEA mesh deformation +DEFORM_LINEAR_ITER= 500 +% +% Number of nonlinear deformation iterations (surface deformation increments) +DEFORM_NONLINEAR_ITER= 1 +% +% Minimum residual criteria for the linear solver convergence of grid deformation +DEFORM_LINEAR_SOLVER_ERROR= 1E-14 +% +% Print the residuals during mesh deformation to the console (YES, NO) +DEFORM_CONSOLE_OUTPUT= YES +% +% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, +% WALL_DISTANCE, CONSTANT_STIFFNESS) +DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME +% +% Visualize the surface deformation (NO, YES) +VISUALIZE_SURFACE_DEF= NO +% +% Visualize the volume deformation (NO, YES) +VISUALIZE_VOLUME_DEF= NO + +% --------------------------- CONVERGENCE PARAMETERS --------------------------% +% Convergence criteria (CAUCHY, RESIDUAL) +% +CONV_CRITERIA= RESIDUAL +% +% Residual reduction (order of magnitude with respect to the initial value) +RESIDUAL_REDUCTION= 6 +% +% Min value of the residual (log10 of the residual) +RESIDUAL_MINVAL= -8 +% +% Start Cauchy criteria at iteration number +STARTCONV_ITER= 10 +% +% Number of elements to apply the criteria +CAUCHY_ELEMS= 100 +% +% Epsilon to control the series convergence +CAUCHY_EPS= 1E-6 +% +% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, +% DELTA_LIFT, DELTA_DRAG) +CAUCHY_FUNC_FLOW= DRAG + +% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% +% Mesh input file +%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 +MESH_FILENAME=passed_as_flag_to_train.py.su2 +% +% Mesh input file format (SU2, CGNS, NETCDF_ASCII) +MESH_FORMAT= SU2 +% +% Mesh output file +MESH_OUT_FILENAME= mesh_out.su2 +% +% Restart flow input file +SOLUTION_FLOW_FILENAME= solution_flow.dat +% +% Restart adjoint input file +SOLUTION_ADJ_FILENAME= solution_adj.dat +% +% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) +%OUTPUT_FORMAT= TECPLOT_BINARY +% +% Output file convergence history (w/o extension) +CONV_FILENAME= history +% +% Output file restart flow +RESTART_FLOW_FILENAME= restart_flow.dat +% +% Output file restart adjoint +RESTART_ADJ_FILENAME= restart_adj.dat +% +% Output file flow (w/o extension) variables +VOLUME_FLOW_FILENAME= flow +% +% Output file adjoint (w/o extension) variables +VOLUME_ADJ_FILENAME= adjoint +% +% Output Objective function gradient (using continuous adjoint) +GRAD_OBJFUNC_FILENAME= of_grad.dat +% +% Output file surface flow coefficient (w/o extension) +SURFACE_FLOW_FILENAME= surface_flow +% +% Output file surface adjoint coefficient (w/o extension) +SURFACE_ADJ_FILENAME= surface_adjoint +% +% Writing solution file frequency +WRT_SOL_FREQ= 1000 +% +% Writing convergence history frequency +WRT_CON_FREQ= 1000 + +% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% +% Available flow based objective functions or constraint functions +% DRAG, LIFT, SIDEFORCE, EFFICIENCY, +% FORCE_X, FORCE_Y, FORCE_Z, +% MOMENT_X, MOMENT_Y, MOMENT_Z, +% THRUST, TORQUE, FIGURE_OF_MERIT, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, +% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, +% +% Available geometrical based objective functions or constraint functions +% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, +% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL +% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, +% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) +% +% Available design variables +% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) +% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) +% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) +% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) +% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) +% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% +% Optimization objective function with scaling factor +% ex= Objective * Scale +% OPT_OBJECTIVE= DRAG * 0.001 +% +% Optimization constraint functions with scaling factors, separated by semicolons +% ex= (Objective = Value ) * Scale, use '>','<','=' +% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 +% +% Optimization design variables, separated by semicolons +% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) + + + + +DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH +DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE diff --git a/jointContribution/CFDGCN/common.py b/jointContribution/CFDGCN/common.py index e4a2476b69..3bdec5134d 100644 --- a/jointContribution/CFDGCN/common.py +++ b/jointContribution/CFDGCN/common.py @@ -1,178 +1,178 @@ -import math -import warnings -from typing import BinaryIO -from typing import List -from typing import Optional -from typing import Text -from typing import Tuple -from typing import Union - -import matplotlib.pyplot as plt -import paddle -import PIL - -plt.switch_backend("agg") - - -def pad_sequence( - sequences: List[paddle.Tensor], - batch_first: bool = False, - padding_value: float = 0.0, -) -> paddle.Tensor: - r"""Pad a list of variable length Tensors with ``padding_value`` - ``pad_sequence`` stacks a list of Tensors along a new dimension, - and pads them to equal length. For example, if the input is list of - sequences with size ``L x *`` and if batch_first is False, and ``T x B x *`` - otherwise. - `B` is batch size. It is equal to the number of elements in ``sequences``. - `T` is length of the longest sequence. - `L` is length of the sequence. - `*` is any number of trailing dimensions, including none. - Example: - >>> a = paddle.ones(25, 300) - >>> b = paddle.ones(22, 300) - >>> c = paddle.ones(15, 300) - >>> pad_sequence([a, b, c]).shape - paddle.Tensor([25, 3, 300]) - Note: - This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` - where `T` is the length of the longest sequence. This function assumes - trailing dimensions and type of all the Tensors in sequences are same. - Args: - sequences (list[Tensor]): list of variable length sequences. - batch_first (bool, optional): output will be in ``B x T x *`` if True, or in - ``T x B x *`` otherwise - padding_value (float, optional): value for padded elements. Default: 0. - Returns: - Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. - Tensor of size ``B x T x *`` otherwise - """ - - # assuming trailing dimensions and type of all the Tensors - # in sequences are same and fetching those from sequences[0] - max_size = paddle.shape(sequences[0]) - trailing_dims = ( - tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () - ) - max_len = max([s.shape[0] for s in sequences]) - if batch_first: - out_dims = (len(sequences), max_len) + trailing_dims - else: - out_dims = (max_len, len(sequences)) + trailing_dims - out_tensor = paddle.full(out_dims, padding_value, sequences[0].dtype) - for i, tensor in enumerate(sequences): - length = tensor.shape[0] - # use index notation to prevent duplicate references to the tensor - if batch_first: - if length != 0: - out_tensor[i, :length] = tensor - else: - out_tensor[i, length] = tensor - else: - if length != 0: - out_tensor[:length, i] = tensor - else: - out_tensor[length, i] = tensor - - return out_tensor - - -@paddle.no_grad() -def make_grid( - tensor: Union[paddle.Tensor, List[paddle.Tensor]], - nrow: int = 8, - padding: int = 2, - normalize: bool = False, - value_range: Optional[Tuple[int, int]] = None, - scale_each: bool = False, - pad_value: int = 0, - **kwargs, -) -> paddle.Tensor: - if not ( - isinstance(tensor, paddle.Tensor) - or ( - isinstance(tensor, list) - and all(isinstance(t, paddle.Tensor) for t in tensor) - ) - ): - raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") - - if "range" in kwargs: - warning = "range will be deprecated, please use value_range instead." - warnings.warn(warning) - value_range = kwargs["range"] - - # if list of tensors, convert to a 4D mini-batch Tensor - if isinstance(tensor, list): - tensor = paddle.stack(tensor, axis=0) - - if tensor.dim() == 2: # single image H x W - tensor = tensor.unsqueeze(0) - if tensor.dim() == 3: # single image - if tensor.shape[0] == 1: # if single-channel, convert to 3-channel - tensor = paddle.concat((tensor, tensor, tensor), 0) - tensor = tensor.unsqueeze(0) - if tensor.dim() == 4 and tensor.shape[1] == 1: # single-channel images - tensor = paddle.concat((tensor, tensor, tensor), 1) - - if normalize is True: - if value_range is not None: - if not isinstance(value_range, tuple): - raise ValueError( - "value_range has to be a tuple (min, max) if specified. min and max are numbers" - ) - - def norm_input_pic(img, low, high): - img.clip(min=low, max=high) - img = img - low - img = img / max(high - low, 1e-5) - - def norm_range(t, value_range): - if value_range is not None: - norm_input_pic(t, value_range[0], value_range[1]) - else: - norm_input_pic(t, float(t.min()), float(t.max())) - - if scale_each is True: - for t in tensor: # loop over mini-batch dimension - norm_range(t, value_range) - else: - norm_range(tensor, value_range) - - if tensor.shape[0] == 1: - return tensor.squeeze(0) - - # make the mini-batch of images into a grid - nmaps = tensor.shape[0] - xmaps = min(nrow, nmaps) - ymaps = int(math.ceil(float(nmaps) / xmaps)) - height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) - num_channels = tensor.shape[1] - grid = paddle.full( - (num_channels, height * ymaps + padding, width * xmaps + padding), pad_value - ) - k = 0 - for y in range(ymaps): - for x in range(xmaps): - if k >= nmaps: - break - grid[ - :, - y * height + padding : (y + 1) * height, - x * width + padding : (x + 1) * width, - ] = tensor[k] - k = k + 1 - return grid - - -@paddle.no_grad() -def save_image( - tensor: Union[paddle.Tensor, List[paddle.Tensor]], - fp: Union[Text, str, BinaryIO], - format: Optional[str] = None, - **kwargs, -) -> None: - grid = make_grid(tensor, **kwargs) - ndarr = paddle.clip(grid * 255 + 0.5, 0, 255).cast("uint8").numpy() - im = PIL.Image.fromarray(ndarr) - im.save(fp, format=format) +import math +import warnings +from typing import BinaryIO +from typing import List +from typing import Optional +from typing import Text +from typing import Tuple +from typing import Union + +import matplotlib.pyplot as plt +import paddle +import PIL + +plt.switch_backend("agg") + + +def pad_sequence( + sequences: List[paddle.Tensor], + batch_first: bool = False, + padding_value: float = 0.0, +) -> paddle.Tensor: + r"""Pad a list of variable length Tensors with ``padding_value`` + ``pad_sequence`` stacks a list of Tensors along a new dimension, + and pads them to equal length. For example, if the input is list of + sequences with size ``L x *`` and if batch_first is False, and ``T x B x *`` + otherwise. + `B` is batch size. It is equal to the number of elements in ``sequences``. + `T` is length of the longest sequence. + `L` is length of the sequence. + `*` is any number of trailing dimensions, including none. + Example: + >>> a = paddle.ones(25, 300) + >>> b = paddle.ones(22, 300) + >>> c = paddle.ones(15, 300) + >>> pad_sequence([a, b, c]).shape + paddle.Tensor([25, 3, 300]) + Note: + This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` + where `T` is the length of the longest sequence. This function assumes + trailing dimensions and type of all the Tensors in sequences are same. + Args: + sequences (list[Tensor]): list of variable length sequences. + batch_first (bool, optional): output will be in ``B x T x *`` if True, or in + ``T x B x *`` otherwise + padding_value (float, optional): value for padded elements. Default: 0. + Returns: + Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. + Tensor of size ``B x T x *`` otherwise + """ + + # assuming trailing dimensions and type of all the Tensors + # in sequences are same and fetching those from sequences[0] + max_size = paddle.shape(sequences[0]) + trailing_dims = ( + tuple(max_size[1:].numpy().tolist()) if sequences[0].ndim >= 2 else () + ) + max_len = max([s.shape[0] for s in sequences]) + if batch_first: + out_dims = (len(sequences), max_len) + trailing_dims + else: + out_dims = (max_len, len(sequences)) + trailing_dims + out_tensor = paddle.full(out_dims, padding_value, sequences[0].dtype) + for i, tensor in enumerate(sequences): + length = tensor.shape[0] + # use index notation to prevent duplicate references to the tensor + if batch_first: + if length != 0: + out_tensor[i, :length] = tensor + else: + out_tensor[i, length] = tensor + else: + if length != 0: + out_tensor[:length, i] = tensor + else: + out_tensor[length, i] = tensor + + return out_tensor + + +@paddle.no_grad() +def make_grid( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, + **kwargs, +) -> paddle.Tensor: + if not ( + isinstance(tensor, paddle.Tensor) + or ( + isinstance(tensor, list) + and all(isinstance(t, paddle.Tensor) for t in tensor) + ) + ): + raise TypeError(f"tensor or list of tensors expected, got {type(tensor)}") + + if "range" in kwargs: + warning = "range will be deprecated, please use value_range instead." + warnings.warn(warning) + value_range = kwargs["range"] + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = paddle.stack(tensor, axis=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.shape[0] == 1: # if single-channel, convert to 3-channel + tensor = paddle.concat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + if tensor.dim() == 4 and tensor.shape[1] == 1: # single-channel images + tensor = paddle.concat((tensor, tensor, tensor), 1) + + if normalize is True: + if value_range is not None: + if not isinstance(value_range, tuple): + raise ValueError( + "value_range has to be a tuple (min, max) if specified. min and max are numbers" + ) + + def norm_input_pic(img, low, high): + img.clip(min=low, max=high) + img = img - low + img = img / max(high - low, 1e-5) + + def norm_range(t, value_range): + if value_range is not None: + norm_input_pic(t, value_range[0], value_range[1]) + else: + norm_input_pic(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if tensor.shape[0] == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.shape[0] + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) + num_channels = tensor.shape[1] + grid = paddle.full( + (num_channels, height * ymaps + padding, width * xmaps + padding), pad_value + ) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + grid[ + :, + y * height + padding : (y + 1) * height, + x * width + padding : (x + 1) * width, + ] = tensor[k] + k = k + 1 + return grid + + +@paddle.no_grad() +def save_image( + tensor: Union[paddle.Tensor, List[paddle.Tensor]], + fp: Union[Text, str, BinaryIO], + format: Optional[str] = None, + **kwargs, +) -> None: + grid = make_grid(tensor, **kwargs) + ndarr = paddle.clip(grid * 255 + 0.5, 0, 255).cast("uint8").numpy() + im = PIL.Image.fromarray(ndarr) + im.save(fp, format=format) diff --git a/jointContribution/CFDGCN/data.py b/jointContribution/CFDGCN/data.py index 7f43b4cddd..b7e2eee7e9 100644 --- a/jointContribution/CFDGCN/data.py +++ b/jointContribution/CFDGCN/data.py @@ -1,110 +1,110 @@ -import os -import pickle - -import mesh_utils -import numpy as np -import paddle -import pgl -import pgl.utils.data.dataloader as pgl_dataloader - - -class MeshAirfoilDataset(pgl_dataloader.Dataset): - def __init__(self, root, mode="train"): - super().__init__() - - self.mode = mode - self.data_dir = os.path.join(root, f"outputs_${mode}") - self.file_list = os.listdir(self.data_dir) - self.len = len(self.file_list) - - self.mesh_graph = mesh_utils.get_mesh_graph(os.path.join(root, "mesh_fine.su2")) - - # either [maxes, mins] or [means, stds] from data for normalization - with open(os.path.join(root, "train_max_min.pkl"), "rb") as f: - self.normalization_factors = pickle.load(f) - - self.nodes = self.mesh_graph[0] - self.edges = paddle.to_tensor(self.mesh_graph[1]).transpose([1, 0]) - self.elems_list = self.mesh_graph[2] - self.marker_dict = self.mesh_graph[3] - self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) - for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): - for elem in marker_elems: - self.node_markers[elem[0]] = i - self.node_markers[elem[1]] = i - - self.graphs = [] - - for idx in range(self.len): - with open(self.data_dir / self.file_list[idx], "rb") as f: - fields = pickle.load(f) - fields = paddle.to_tensor(self.preprocess(fields)) - - aoa, reynolds, mach = self.get_params_from_name(self.file_list[idx]) - aoa = paddle.to_tensor(aoa) - mach_or_reynolds = paddle.to_tensor(mach if reynolds is None else reynolds) - - norm_aoa = paddle.to_tensor(aoa / 10) - norm_mach_or_reynolds = paddle.to_tensor( - mach_or_reynolds - if reynolds is None - else (mach_or_reynolds - 1.5e6) / 1.5e6 - ) - - # add physics parameters to graph - nodes = np.concatenate( - [ - self.nodes, - np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], - np.repeat(a=norm_mach_or_reynolds, repeats=self.nodes.shape[0])[ - :, np.newaxis - ], - self.node_markers, - ], - axis=-1, - ).astype(np.float32) - nodes = paddle.to_tensor(nodes) - - graph = pgl.Graph( - num_nodes=nodes.shape[0], edges=self.edges, node_feat={"feature": nodes} - ) - - graph.y = fields - graph.aoa = paddle.to_tensor(aoa) - graph.norm_aoa = paddle.to_tensor(norm_aoa) - graph.mach_or_reynolds = paddle.to_tensor(mach_or_reynolds) - graph.norm_mach_or_reynolds = paddle.to_tensor(norm_mach_or_reynolds) - - self.graphs.append(graph) - - def preprocess(self, tensor_list, stack_output=True): - # data_means, data_stds = self.normalization_factors - data_max, data_min = self.normalization_factors - normalized_tensors = [] - for i in range(len(tensor_list)): - normalized = (tensor_list[i] - data_min[i]) / ( - data_max[i] - data_min[i] - ) * 2 - 1 - normalized_tensors.append(normalized) - if stack_output: - normalized_tensors = np.stack(normalized_tensors, axis=1) - return normalized_tensors - - @staticmethod - def get_params_from_name(filename): - s = filename.rsplit(".", 1)[0].split("_") - aoa = np.array(s[s.index("aoa") + 1])[np.newaxis].astype(np.float32) - reynolds = s[s.index("re") + 1] - reynolds = ( - np.array(reynolds)[np.newaxis].astype(np.float32) - if reynolds != "None" - else None - ) - mach = np.array(s[s.index("mach") + 1])[np.newaxis].astype(np.float32) - return aoa, reynolds, mach - - def __len__(self): - return self.len - - def __getitem__(self, idx): - return self.graphs[idx] +import os +import pickle + +import mesh_utils +import numpy as np +import paddle +import pgl +import pgl.utils.data.dataloader as pgl_dataloader + + +class MeshAirfoilDataset(pgl_dataloader.Dataset): + def __init__(self, root, mode="train"): + super().__init__() + + self.mode = mode + self.data_dir = os.path.join(root, f"outputs_${mode}") + self.file_list = os.listdir(self.data_dir) + self.len = len(self.file_list) + + self.mesh_graph = mesh_utils.get_mesh_graph(os.path.join(root, "mesh_fine.su2")) + + # either [maxes, mins] or [means, stds] from data for normalization + with open(os.path.join(root, "train_max_min.pkl"), "rb") as f: + self.normalization_factors = pickle.load(f) + + self.nodes = self.mesh_graph[0] + self.edges = paddle.to_tensor(self.mesh_graph[1]).transpose([1, 0]) + self.elems_list = self.mesh_graph[2] + self.marker_dict = self.mesh_graph[3] + self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) + for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): + for elem in marker_elems: + self.node_markers[elem[0]] = i + self.node_markers[elem[1]] = i + + self.graphs = [] + + for idx in range(self.len): + with open(self.data_dir / self.file_list[idx], "rb") as f: + fields = pickle.load(f) + fields = paddle.to_tensor(self.preprocess(fields)) + + aoa, reynolds, mach = self.get_params_from_name(self.file_list[idx]) + aoa = paddle.to_tensor(aoa) + mach_or_reynolds = paddle.to_tensor(mach if reynolds is None else reynolds) + + norm_aoa = paddle.to_tensor(aoa / 10) + norm_mach_or_reynolds = paddle.to_tensor( + mach_or_reynolds + if reynolds is None + else (mach_or_reynolds - 1.5e6) / 1.5e6 + ) + + # add physics parameters to graph + nodes = np.concatenate( + [ + self.nodes, + np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], + np.repeat(a=norm_mach_or_reynolds, repeats=self.nodes.shape[0])[ + :, np.newaxis + ], + self.node_markers, + ], + axis=-1, + ).astype(np.float32) + nodes = paddle.to_tensor(nodes) + + graph = pgl.Graph( + num_nodes=nodes.shape[0], edges=self.edges, node_feat={"feature": nodes} + ) + + graph.y = fields + graph.aoa = paddle.to_tensor(aoa) + graph.norm_aoa = paddle.to_tensor(norm_aoa) + graph.mach_or_reynolds = paddle.to_tensor(mach_or_reynolds) + graph.norm_mach_or_reynolds = paddle.to_tensor(norm_mach_or_reynolds) + + self.graphs.append(graph) + + def preprocess(self, tensor_list, stack_output=True): + # data_means, data_stds = self.normalization_factors + data_max, data_min = self.normalization_factors + normalized_tensors = [] + for i in range(len(tensor_list)): + normalized = (tensor_list[i] - data_min[i]) / ( + data_max[i] - data_min[i] + ) * 2 - 1 + normalized_tensors.append(normalized) + if stack_output: + normalized_tensors = np.stack(normalized_tensors, axis=1) + return normalized_tensors + + @staticmethod + def get_params_from_name(filename): + s = filename.rsplit(".", 1)[0].split("_") + aoa = np.array(s[s.index("aoa") + 1])[np.newaxis].astype(np.float32) + reynolds = s[s.index("re") + 1] + reynolds = ( + np.array(reynolds)[np.newaxis].astype(np.float32) + if reynolds != "None" + else None + ) + mach = np.array(s[s.index("mach") + 1])[np.newaxis].astype(np.float32) + return aoa, reynolds, mach + + def __len__(self): + return self.len + + def __getitem__(self, idx): + return self.graphs[idx] diff --git a/jointContribution/CFDGCN/fine.cfg b/jointContribution/CFDGCN/fine.cfg index adaf460d8a..da05a61de2 100644 --- a/jointContribution/CFDGCN/fine.cfg +++ b/jointContribution/CFDGCN/fine.cfg @@ -1,357 +1,357 @@ -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% % -% SU2 configuration file % -% Case description: Transonic inviscid flow around a NACA0012 airfoil % -% Author: Thomas D. Economon % -% Institution: Stanford University % -% Date: 2014.06.11 % -% File Version 6.2.0 "Falcon" % -% % -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% -% -% Physical governing equations (EULER, NAVIER_STOKES, -% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, -% POISSON_EQUATION) -PHYSICAL_PROBLEM= EULER -% -% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) -MATH_PROBLEM= DIRECT -% -% Restart solution (NO, YES) -RESTART_SOL= NO - -% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% -% -% Mach number (non-dimensional, based on the free-stream values) -MACH_NUMBER= 0.8 -% -% Angle of attack (degrees) -AOA= 1.25 -% -% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) -FREESTREAM_PRESSURE= 101325.0 -% -% Free-stream temperature (273.15 K by default) -FREESTREAM_TEMPERATURE= 273.15 - -% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% -% -% Ratio of specific heats (1.4 (air), only for compressible flows) -GAMMA_VALUE= 1.4 -% -% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) -GAS_CONSTANT= 287.87 - -% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% -% -% Reference origin for moment computation -REF_ORIGIN_MOMENT_X = 0.25 -REF_ORIGIN_MOMENT_Y = 0.00 -REF_ORIGIN_MOMENT_Z = 0.00 -% -% Reference length for pitching, rolling, and yawing non-dimensional moment -REF_LENGTH= 1.0 -% -% Reference area for force coefficients (0 implies automatic calculation) -REF_AREA= 1.0 -% -% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, -% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) -REF_DIMENSIONALIZATION= DIMENSIONAL - -% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% -% -% Marker of the Euler boundary (NONE = no marker) -MARKER_EULER= ( airfoil ) -% -% Marker of the far field (NONE = no marker) -MARKER_FAR= ( farfield ) - -% ------------------------ SURFACES IDENTIFICATION ----------------------------% -% -% Marker(s) of the surface in the surface flow solution file -MARKER_PLOTTING = ( airfoil ) -% -% Marker(s) of the surface where the non-dimensional coefficients are evaluated. -MARKER_MONITORING = ( airfoil ) -% -% Marker(s) of the surface where obj. func. (design problem) will be evaluated -MARKER_DESIGNING = ( airfoil ) - -% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% -% -% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) -NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES -% -% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, -% MOMENT_Y, MOMENT_Z, EFFICIENCY, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% FORCE_X, FORCE_Y, FORCE_Z, THRUST, -% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, -% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, -% INVERSE_DESIGN_HEATFLUX) -% OBJECTIVE_FUNCTION= DRAG -% -% Courant-Friedrichs-Lewy condition of the finest grid -%CFL_NUMBER= 4.0 -CFL_NUMBER= 1.0 -%CFL_NUMBER=0.1 -% -% Number of total iterations -EXT_ITER=1000 -ITER= 1000 - -% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% -% -% Linear solver for implicit formulations (BCGSTAB, FGMRES) -LINEAR_SOLVER= FGMRES -% -% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) -LINEAR_SOLVER_PREC= LU_SGS -% -% Minimum error of the linear solver for implicit formulations -LINEAR_SOLVER_ERROR= 1E-6 -% -% Max number of iterations of the linear solver for the implicit formulation -LINEAR_SOLVER_ITER= 5 - -% -------------------------- MULTIGRID PARAMETERS -----------------------------% -% -% Multi-Grid Levels (0 = no multi-grid) -MGLEVEL= 2 -% -% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) -MGCYCLE= W_CYCLE -% -% Multi-Grid PreSmoothing Level -MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) -% -% Multi-Grid PostSmoothing Level -MG_POST_SMOOTH= ( 0, 0, 0, 0 ) -% -% Jacobi implicit smoothing of the correction -MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) -% -% Damping factor for the residual restriction -MG_DAMP_RESTRICTION= 1.0 -% -% Damping factor for the correction prolongation -MG_DAMP_PROLONGATION= 1.0 - -% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, -% TURKEL_PREC, MSW) -CONV_NUM_METHOD_FLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_FLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, -% BARTH_JESPERSEN, VAN_ALBADA_EDGE) -SLOPE_LIMITER_FLOW= VENKATAKRISHNAN -% -% 2nd and 4th order artificial dissipation coefficients -JST_SENSOR_COEFF= ( 0.5, 0.02 ) -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) -TIME_DISCRE_FLOW= EULER_IMPLICIT - -% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% -% -% Convective numerical method (JST, LAX-FRIEDRICH, ROE) -CONV_NUM_METHOD_ADJFLOW= JST -% -% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. -% Required for 2nd order upwind schemes (NO, YES) -MUSCL_ADJFLOW= YES -% -% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, -% SHARP_EDGES, WALL_DISTANCE) -SLOPE_LIMITER_ADJFLOW= NONE -% -% Reduction factor of the CFL coefficient in the adjoint problem -CFL_REDUCTION_ADJFLOW= 0.5 -% -% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) -TIME_DISCRE_ADJFLOW= EULER_IMPLICIT - -% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% -% -% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, -% FFD_SETTING, FFD_NACELLE -% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST -% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, -% HICKS_HENNE, SURFACE_BUMP) -DV_KIND= HICKS_HENNE -% -% Marker of the surface in which we are going apply the shape deformation -DV_MARKER= ( airfoil ) -% -% Parameters of the shape deformation -% - NO_DEFORMATION ( 1.0 ) -% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector -% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - SCALE ( 1.0 ) -% - ANGLE_OF_ATTACK ( 1.0 ) -% - FFD_SETTING ( 1.0 ) -% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) -% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) -% - FFD_GULL ( FFD_BoxTag, j_Ind ) -% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) -% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) -% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) -% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) -% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) -% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) -% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) -% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) -% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) -DV_PARAM= ( 1, 0.5 ) -% -% Value of the shape deformation -DV_VALUE= 0.01 - -% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% -% -% Number of smoothing iterations for FEA mesh deformation -DEFORM_LINEAR_ITER= 500 -% -% Number of nonlinear deformation iterations (surface deformation increments) -DEFORM_NONLINEAR_ITER= 1 -% -% Minimum residual criteria for the linear solver convergence of grid deformation -DEFORM_LINEAR_SOLVER_ERROR= 1E-14 -% -% Print the residuals during mesh deformation to the console (YES, NO) -DEFORM_CONSOLE_OUTPUT= YES -% -% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, -% WALL_DISTANCE, CONSTANT_STIFFNESS) -DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME -% -% Visualize the surface deformation (NO, YES) -VISUALIZE_SURFACE_DEF= NO -% -% Visualize the volume deformation (NO, YES) -VISUALIZE_VOLUME_DEF= NO - -% --------------------------- CONVERGENCE PARAMETERS --------------------------% -% Convergence criteria (CAUCHY, RESIDUAL) -% -CONV_CRITERIA= RESIDUAL -% -% Residual reduction (order of magnitude with respect to the initial value) -RESIDUAL_REDUCTION= 10 -% -% Min value of the residual (log10 of the residual) -RESIDUAL_MINVAL= -10 -% -% Start Cauchy criteria at iteration number -STARTCONV_ITER= 10 -% -% Number of elements to apply the criteria -CAUCHY_ELEMS= 100 -% -% Epsilon to control the series convergence -CAUCHY_EPS= 1E-6 -% -% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, -% DELTA_LIFT, DELTA_DRAG) -CAUCHY_FUNC_FLOW= DRAG - -% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% -% Mesh input file -%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 -MESH_FILENAME=passed_as_flag_to_train.py.su2 -% -% Mesh input file format (SU2, CGNS, NETCDF_ASCII) -MESH_FORMAT= SU2 -% -% Mesh output file -MESH_OUT_FILENAME= mesh_out.su2 -% -% Restart flow input file -SOLUTION_FLOW_FILENAME= solution_flow.dat -% -% Restart adjoint input file -SOLUTION_ADJ_FILENAME= solution_adj.dat -% -% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) -%OUTPUT_FORMAT= TECPLOT_BINARY -% -% Output file convergence history (w/o extension) -CONV_FILENAME= history -% -% Output file restart flow -RESTART_FLOW_FILENAME= restart_flow.dat -% -% Output file restart adjoint -RESTART_ADJ_FILENAME= restart_adj.dat -% -% Output file flow (w/o extension) variables -VOLUME_FLOW_FILENAME= flow -% -% Output file adjoint (w/o extension) variables -VOLUME_ADJ_FILENAME= adjoint -% -% Output Objective function gradient (using continuous adjoint) -GRAD_OBJFUNC_FILENAME= of_grad.dat -% -% Output file surface flow coefficient (w/o extension) -SURFACE_FLOW_FILENAME= surface_flow -% -% Output file surface adjoint coefficient (w/o extension) -SURFACE_ADJ_FILENAME= surface_adjoint -% -% Writing solution file frequency -WRT_SOL_FREQ= 1000 -% -% Writing convergence history frequency -WRT_CON_FREQ= 1000 - -% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% -% Available flow based objective functions or constraint functions -% DRAG, LIFT, SIDEFORCE, EFFICIENCY, -% FORCE_X, FORCE_Y, FORCE_Z, -% MOMENT_X, MOMENT_Y, MOMENT_Z, -% THRUST, TORQUE, FIGURE_OF_MERIT, -% EQUIVALENT_AREA, NEARFIELD_PRESSURE, -% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, -% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, -% -% Available geometrical based objective functions or constraint functions -% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, -% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL -% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, -% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) -% -% Available design variables -% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) -% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) -% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) -% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) -% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) -% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) -% -% Optimization objective function with scaling factor -% ex= Objective * Scale -% OPT_OBJECTIVE= DRAG * 0.001 -% -% Optimization constraint functions with scaling factors, separated by semicolons -% ex= (Objective = Value ) * Scale, use '>','<','=' -% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 -% -% Optimization design variables, separated by semicolons -% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) - - - - -DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH -DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% % +% SU2 configuration file % +% Case description: Transonic inviscid flow around a NACA0012 airfoil % +% Author: Thomas D. Economon % +% Institution: Stanford University % +% Date: 2014.06.11 % +% File Version 6.2.0 "Falcon" % +% % +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% ------------- DIRECT, ADJOINT, AND LINEARIZED PROBLEM DEFINITION ------------% +% +% Physical governing equations (EULER, NAVIER_STOKES, +% WAVE_EQUATION, HEAT_EQUATION, FEM_ELASTICITY, +% POISSON_EQUATION) +PHYSICAL_PROBLEM= EULER +% +% Mathematical problem (DIRECT, CONTINUOUS_ADJOINT) +MATH_PROBLEM= DIRECT +% +% Restart solution (NO, YES) +RESTART_SOL= NO + +% ----------- COMPRESSIBLE AND INCOMPRESSIBLE FREE-STREAM DEFINITION ----------% +% +% Mach number (non-dimensional, based on the free-stream values) +MACH_NUMBER= 0.8 +% +% Angle of attack (degrees) +AOA= 1.25 +% +% Free-stream pressure (101325.0 N/m^2 by default, only Euler flows) +FREESTREAM_PRESSURE= 101325.0 +% +% Free-stream temperature (273.15 K by default) +FREESTREAM_TEMPERATURE= 273.15 + +% -------------- COMPRESSIBLE AND INCOMPRESSIBLE FLUID CONSTANTS --------------% +% +% Ratio of specific heats (1.4 (air), only for compressible flows) +GAMMA_VALUE= 1.4 +% +% Specific gas constant (287.87 J/kg*K (air), only for compressible flows) +GAS_CONSTANT= 287.87 + +% ---------------------- REFERENCE VALUE DEFINITION ---------------------------% +% +% Reference origin for moment computation +REF_ORIGIN_MOMENT_X = 0.25 +REF_ORIGIN_MOMENT_Y = 0.00 +REF_ORIGIN_MOMENT_Z = 0.00 +% +% Reference length for pitching, rolling, and yawing non-dimensional moment +REF_LENGTH= 1.0 +% +% Reference area for force coefficients (0 implies automatic calculation) +REF_AREA= 1.0 +% +% Flow non-dimensionalization (DIMENSIONAL, FREESTREAM_PRESS_EQ_ONE, +% FREESTREAM_VEL_EQ_MACH, FREESTREAM_VEL_EQ_ONE) +REF_DIMENSIONALIZATION= DIMENSIONAL + +% ----------------------- BOUNDARY CONDITION DEFINITION -----------------------% +% +% Marker of the Euler boundary (NONE = no marker) +MARKER_EULER= ( airfoil ) +% +% Marker of the far field (NONE = no marker) +MARKER_FAR= ( farfield ) + +% ------------------------ SURFACES IDENTIFICATION ----------------------------% +% +% Marker(s) of the surface in the surface flow solution file +MARKER_PLOTTING = ( airfoil ) +% +% Marker(s) of the surface where the non-dimensional coefficients are evaluated. +MARKER_MONITORING = ( airfoil ) +% +% Marker(s) of the surface where obj. func. (design problem) will be evaluated +MARKER_DESIGNING = ( airfoil ) + +% ------------- COMMON PARAMETERS TO DEFINE THE NUMERICAL METHOD --------------% +% +% Numerical method for spatial gradients (GREEN_GAUSS, WEIGHTED_LEAST_SQUARES) +NUM_METHOD_GRAD= WEIGHTED_LEAST_SQUARES +% +% Objective function in optimization problem (DRAG, LIFT, SIDEFORCE, MOMENT_X, +% MOMENT_Y, MOMENT_Z, EFFICIENCY, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% FORCE_X, FORCE_Y, FORCE_Z, THRUST, +% TORQUE, FREE_SURFACE, TOTAL_HEATFLUX, +% MAXIMUM_HEATFLUX, INVERSE_DESIGN_PRESSURE, +% INVERSE_DESIGN_HEATFLUX) +% OBJECTIVE_FUNCTION= DRAG +% +% Courant-Friedrichs-Lewy condition of the finest grid +%CFL_NUMBER= 4.0 +CFL_NUMBER= 1.0 +%CFL_NUMBER=0.1 +% +% Number of total iterations +EXT_ITER=1000 +ITER= 1000 + +% ------------------------ LINEAR SOLVER DEFINITION ---------------------------% +% +% Linear solver for implicit formulations (BCGSTAB, FGMRES) +LINEAR_SOLVER= FGMRES +% +% Preconditioner of the Krylov linear solver (JACOBI, LINELET, LU_SGS) +LINEAR_SOLVER_PREC= LU_SGS +% +% Minimum error of the linear solver for implicit formulations +LINEAR_SOLVER_ERROR= 1E-6 +% +% Max number of iterations of the linear solver for the implicit formulation +LINEAR_SOLVER_ITER= 5 + +% -------------------------- MULTIGRID PARAMETERS -----------------------------% +% +% Multi-Grid Levels (0 = no multi-grid) +MGLEVEL= 2 +% +% Multi-grid cycle (V_CYCLE, W_CYCLE, FULLMG_CYCLE) +MGCYCLE= W_CYCLE +% +% Multi-Grid PreSmoothing Level +MG_PRE_SMOOTH= ( 1, 2, 3, 3 ) +% +% Multi-Grid PostSmoothing Level +MG_POST_SMOOTH= ( 0, 0, 0, 0 ) +% +% Jacobi implicit smoothing of the correction +MG_CORRECTION_SMOOTH= ( 0, 0, 0, 0 ) +% +% Damping factor for the residual restriction +MG_DAMP_RESTRICTION= 1.0 +% +% Damping factor for the correction prolongation +MG_DAMP_PROLONGATION= 1.0 + +% -------------------- FLOW NUMERICAL METHOD DEFINITION -----------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, CUSP, ROE, AUSM, HLLC, +% TURKEL_PREC, MSW) +CONV_NUM_METHOD_FLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_FLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, VENKATAKRISHNAN_WANG, +% BARTH_JESPERSEN, VAN_ALBADA_EDGE) +SLOPE_LIMITER_FLOW= VENKATAKRISHNAN +% +% 2nd and 4th order artificial dissipation coefficients +JST_SENSOR_COEFF= ( 0.5, 0.02 ) +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT, EULER_EXPLICIT) +TIME_DISCRE_FLOW= EULER_IMPLICIT + +% ---------------- ADJOINT-FLOW NUMERICAL METHOD DEFINITION -------------------% +% +% Convective numerical method (JST, LAX-FRIEDRICH, ROE) +CONV_NUM_METHOD_ADJFLOW= JST +% +% Monotonic Upwind Scheme for Conservation Laws (TVD) in the adjoint flow equations. +% Required for 2nd order upwind schemes (NO, YES) +MUSCL_ADJFLOW= YES +% +% Slope limiter (NONE, VENKATAKRISHNAN, BARTH_JESPERSEN, VAN_ALBADA_EDGE, +% SHARP_EDGES, WALL_DISTANCE) +SLOPE_LIMITER_ADJFLOW= NONE +% +% Reduction factor of the CFL coefficient in the adjoint problem +CFL_REDUCTION_ADJFLOW= 0.5 +% +% Time discretization (RUNGE-KUTTA_EXPLICIT, EULER_IMPLICIT) +TIME_DISCRE_ADJFLOW= EULER_IMPLICIT + +% ----------------------- DESIGN VARIABLE PARAMETERS --------------------------% +% +% Kind of deformation (NO_DEFORMATION, TRANSLATION, ROTATION, SCALE, +% FFD_SETTING, FFD_NACELLE +% FFD_CONTROL_POINT, FFD_CAMBER, FFD_THICKNESS, FFD_TWIST +% FFD_CONTROL_POINT_2D, FFD_CAMBER_2D, FFD_THICKNESS_2D, FFD_TWIST_2D, +% HICKS_HENNE, SURFACE_BUMP) +DV_KIND= HICKS_HENNE +% +% Marker of the surface in which we are going apply the shape deformation +DV_MARKER= ( airfoil ) +% +% Parameters of the shape deformation +% - NO_DEFORMATION ( 1.0 ) +% - TRANSLATION ( x_Disp, y_Disp, z_Disp ), as a unit vector +% - ROTATION ( x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - SCALE ( 1.0 ) +% - ANGLE_OF_ATTACK ( 1.0 ) +% - FFD_SETTING ( 1.0 ) +% - FFD_CONTROL_POINT ( FFD_BoxTag, i_Ind, j_Ind, k_Ind, x_Disp, y_Disp, z_Disp ) +% - FFD_NACELLE ( FFD_BoxTag, rho_Ind, theta_Ind, phi_Ind, rho_Disp, phi_Disp ) +% - FFD_GULL ( FFD_BoxTag, j_Ind ) +% - FFD_ANGLE_OF_ATTACK ( FFD_BoxTag, 1.0 ) +% - FFD_CAMBER ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_THICKNESS ( FFD_BoxTag, i_Ind, j_Ind ) +% - FFD_TWIST ( FFD_BoxTag, j_Ind, x_Orig, y_Orig, z_Orig, x_End, y_End, z_End ) +% - FFD_CONTROL_POINT_2D ( FFD_BoxTag, i_Ind, j_Ind, x_Disp, y_Disp ) +% - FFD_CAMBER_2D ( FFD_BoxTag, i_Ind ) +% - FFD_THICKNESS_2D ( FFD_BoxTag, i_Ind ) +% - FFD_TWIST_2D ( FFD_BoxTag, x_Orig, y_Orig ) +% - HICKS_HENNE ( Lower Surface (0)/Upper Surface (1)/Only one Surface (2), x_Loc ) +% - SURFACE_BUMP ( x_Start, x_End, x_Loc ) +DV_PARAM= ( 1, 0.5 ) +% +% Value of the shape deformation +DV_VALUE= 0.01 + +% ------------------------ GRID DEFORMATION PARAMETERS ------------------------% +% +% Number of smoothing iterations for FEA mesh deformation +DEFORM_LINEAR_ITER= 500 +% +% Number of nonlinear deformation iterations (surface deformation increments) +DEFORM_NONLINEAR_ITER= 1 +% +% Minimum residual criteria for the linear solver convergence of grid deformation +DEFORM_LINEAR_SOLVER_ERROR= 1E-14 +% +% Print the residuals during mesh deformation to the console (YES, NO) +DEFORM_CONSOLE_OUTPUT= YES +% +% Type of element stiffness imposed for FEA mesh deformation (INVERSE_VOLUME, +% WALL_DISTANCE, CONSTANT_STIFFNESS) +DEFORM_STIFFNESS_TYPE= INVERSE_VOLUME +% +% Visualize the surface deformation (NO, YES) +VISUALIZE_SURFACE_DEF= NO +% +% Visualize the volume deformation (NO, YES) +VISUALIZE_VOLUME_DEF= NO + +% --------------------------- CONVERGENCE PARAMETERS --------------------------% +% Convergence criteria (CAUCHY, RESIDUAL) +% +CONV_CRITERIA= RESIDUAL +% +% Residual reduction (order of magnitude with respect to the initial value) +RESIDUAL_REDUCTION= 10 +% +% Min value of the residual (log10 of the residual) +RESIDUAL_MINVAL= -10 +% +% Start Cauchy criteria at iteration number +STARTCONV_ITER= 10 +% +% Number of elements to apply the criteria +CAUCHY_ELEMS= 100 +% +% Epsilon to control the series convergence +CAUCHY_EPS= 1E-6 +% +% Function to apply the criteria (LIFT, DRAG, SENS_GEOMETRY, SENS_MACH, +% DELTA_LIFT, DELTA_DRAG) +CAUCHY_FUNC_FLOW= DRAG + +% ------------------------- INPUT/OUTPUT INFORMATION --------------------------% +% Mesh input file +%MESH_FILENAME=mesh_NACA0012_xcoarse.su2 +MESH_FILENAME=passed_as_flag_to_train.py.su2 +% +% Mesh input file format (SU2, CGNS, NETCDF_ASCII) +MESH_FORMAT= SU2 +% +% Mesh output file +MESH_OUT_FILENAME= mesh_out.su2 +% +% Restart flow input file +SOLUTION_FLOW_FILENAME= solution_flow.dat +% +% Restart adjoint input file +SOLUTION_ADJ_FILENAME= solution_adj.dat +% +% Output file format (TECPLOT, PARAVIEW, TECPLOT_BINARY) +%OUTPUT_FORMAT= TECPLOT_BINARY +% +% Output file convergence history (w/o extension) +CONV_FILENAME= history +% +% Output file restart flow +RESTART_FLOW_FILENAME= restart_flow.dat +% +% Output file restart adjoint +RESTART_ADJ_FILENAME= restart_adj.dat +% +% Output file flow (w/o extension) variables +VOLUME_FLOW_FILENAME= flow +% +% Output file adjoint (w/o extension) variables +VOLUME_ADJ_FILENAME= adjoint +% +% Output Objective function gradient (using continuous adjoint) +GRAD_OBJFUNC_FILENAME= of_grad.dat +% +% Output file surface flow coefficient (w/o extension) +SURFACE_FLOW_FILENAME= surface_flow +% +% Output file surface adjoint coefficient (w/o extension) +SURFACE_ADJ_FILENAME= surface_adjoint +% +% Writing solution file frequency +WRT_SOL_FREQ= 1000 +% +% Writing convergence history frequency +WRT_CON_FREQ= 1000 + +% --------------------- OPTIMAL SHAPE DESIGN DEFINITION -----------------------% +% Available flow based objective functions or constraint functions +% DRAG, LIFT, SIDEFORCE, EFFICIENCY, +% FORCE_X, FORCE_Y, FORCE_Z, +% MOMENT_X, MOMENT_Y, MOMENT_Z, +% THRUST, TORQUE, FIGURE_OF_MERIT, +% EQUIVALENT_AREA, NEARFIELD_PRESSURE, +% TOTAL_HEATFLUX, MAXIMUM_HEATFLUX, +% INVERSE_DESIGN_PRESSURE, INVERSE_DESIGN_HEATFLUX, +% +% Available geometrical based objective functions or constraint functions +% AIRFOIL_AREA, AIRFOIL_THICKNESS, AIRFOIL_CHORD, AIRFOIL_TOC, AIRFOIL_AOA, +% WING_VOLUME, WING_MIN_THICKNESS, WING_MAX_THICKNESS, WING_MAX_CHORD, WING_MIN_TOC, WING_MAX_TWIST, WING_MAX_CURVATURE, WING_MAX_DIHEDRAL +% STATION#_WIDTH, STATION#_AREA, STATION#_THICKNESS, STATION#_CHORD, STATION#_TOC, +% STATION#_TWIST (where # is the index of the station defined in GEO_LOCATION_STATIONS) +% +% Available design variables +% HICKS_HENNE ( 1, Scale | Mark. List | Lower(0)/Upper(1) side, x_Loc ) +% NACA_4DIGITS ( 4, Scale | Mark. List | 1st digit, 2nd digit, 3rd and 4th digit ) +% TRANSLATION ( 5, Scale | Mark. List | x_Disp, y_Disp, z_Disp ) +% ROTATION ( 6, Scale | Mark. List | x_Axis, y_Axis, z_Axis, x_Turn, y_Turn, z_Turn ) +% FFD_CONTROL_POINT_2D ( 15, Scale | Mark. List | FFD_Box_ID, i_Ind, j_Ind, x_Mov, y_Mov ) +% FFD_CAMBER_2D ( 16, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% FFD_THICKNESS_2D ( 17, Scale | Mark. List | FFD_Box_ID, i_Ind ) +% +% Optimization objective function with scaling factor +% ex= Objective * Scale +% OPT_OBJECTIVE= DRAG * 0.001 +% +% Optimization constraint functions with scaling factors, separated by semicolons +% ex= (Objective = Value ) * Scale, use '>','<','=' +% OPT_CONSTRAINT= ( LIFT > 0.328188 ) * 0.001; ( MOMENT_Z > 0.034068 ) * 0.001; ( AIRFOIL_THICKNESS > 0.11 ) * 0.001 +% +% Optimization design variables, separated by semicolons +% DEFINITION_DV= ( 1, 1.0 | airfoil | 0, 0.05 ); ( 1, 1.0 | airfoil | 0, 0.10 ); ( 1, 1.0 | airfoil | 0, 0.15 ); ( 1, 1.0 | airfoil | 0, 0.20 ); ( 1, 1.0 | airfoil | 0, 0.25 ); ( 1, 1.0 | airfoil | 0, 0.30 ); ( 1, 1.0 | airfoil | 0, 0.35 ); ( 1, 1.0 | airfoil | 0, 0.40 ); ( 1, 1.0 | airfoil | 0, 0.45 ); ( 1, 1.0 | airfoil | 0, 0.50 ); ( 1, 1.0 | airfoil | 0, 0.55 ); ( 1, 1.0 | airfoil | 0, 0.60 ); ( 1, 1.0 | airfoil | 0, 0.65 ); ( 1, 1.0 | airfoil | 0, 0.70 ); ( 1, 1.0 | airfoil | 0, 0.75 ); ( 1, 1.0 | airfoil | 0, 0.80 ); ( 1, 1.0 | airfoil | 0, 0.85 ); ( 1, 1.0 | airfoil | 0, 0.90 ); ( 1, 1.0 | airfoil | 0, 0.95 ); ( 1, 1.0 | airfoil | 1, 0.05 ); ( 1, 1.0 | airfoil | 1, 0.10 ); ( 1, 1.0 | airfoil | 1, 0.15 ); ( 1, 1.0 | airfoil | 1, 0.20 ); ( 1, 1.0 | airfoil | 1, 0.25 ); ( 1, 1.0 | airfoil | 1, 0.30 ); ( 1, 1.0 | airfoil | 1, 0.35 ); ( 1, 1.0 | airfoil | 1, 0.40 ); ( 1, 1.0 | airfoil | 1, 0.45 ); ( 1, 1.0 | airfoil | 1, 0.50 ); ( 1, 1.0 | airfoil | 1, 0.55 ); ( 1, 1.0 | airfoil | 1, 0.60 ); ( 1, 1.0 | airfoil | 1, 0.65 ); ( 1, 1.0 | airfoil | 1, 0.70 ); ( 1, 1.0 | airfoil | 1, 0.75 ); ( 1, 1.0 | airfoil | 1, 0.80 ); ( 1, 1.0 | airfoil | 1, 0.85 ); ( 1, 1.0 | airfoil | 1, 0.90 ); ( 1, 1.0 | airfoil | 1, 0.95 ) + + + + +DIFF_INPUTS= COORDS_X, COORDS_Y, AOA, MACH +DIFF_OUTPUTS= VEL_X, VEL_Y, PRESSURE diff --git a/jointContribution/CFDGCN/main.py b/jointContribution/CFDGCN/main.py index 7f60485208..e32f025c03 100644 --- a/jointContribution/CFDGCN/main.py +++ b/jointContribution/CFDGCN/main.py @@ -1,442 +1,442 @@ -import argparse -import os -import sys - -import data -import mesh_utils -import models -import numpy as np -import paddle -import pgl.utils.data.dataloader as pgl_dataloader -import PIL -import su2paddle.su2_function_mpi as su2_function_mpi - -import ppsci -from ppsci.utils import logger - -os.environ["SU2_RUN"] = "/root/autodl-tmp/SU2_bin" -sys.path.append("/root/autodl-tmp/SU2_bin") - - -# GCN -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--exp-name", - "-e", - default="gcn_interp", - help="Experiment name, defaults to model name.", - ) - parser.add_argument("--su2-config", "-sc", default="coarse.cfg") - parser.add_argument( - "--data-dir", - "-d", - default="data/NACA0012_interpolate", - help="Directory with dataset.", - ) - parser.add_argument( - "--coarse-mesh", - default="meshes/mesh_NACA0012_xcoarse.su2", - help="Path to coarse mesh (required for CFD-GCN).", - ) - parser.add_argument( - "--version", - type=int, - default=None, - help="If specified log version doesnt exist, create it." - " If it exists, continue from where it stopped.", - ) - parser.add_argument( - "--load-model", "-lm", default="", help="Load previously trained model." - ) - - parser.add_argument("--model", "-m", default="gcn", help="Which model to use.") - parser.add_argument( - "--max-epochs", - "-me", - type=int, - default=1000, - help="Max number of epochs to train for.", - ) - parser.add_argument("--optim", default="adam", help="Optimizer.") - parser.add_argument("--batch-size", "-bs", type=int, default=4) - parser.add_argument("--learning-rate", "-lr", dest="lr", type=float, default=5e-4) - parser.add_argument("--num-layers", "-nl", type=int, default=3) - parser.add_argument("--num-end-convs", type=int, default=3) - parser.add_argument("--hidden-size", "-hs", type=int, default=512) - parser.add_argument( - "--freeze-mesh", action="store_true", help="Do not do any learning on the mesh." - ) - - parser.add_argument( - "--eval", action="store_true", help="Skips training, does only eval." - ) - parser.add_argument("--profile", action="store_true", help="Run profiler.") - parser.add_argument("--seed", type=int, default=0, help="Random seed") - parser.add_argument( - "--gpus", type=int, default=1, help="Number of gpus to use, 0 for none." - ) - parser.add_argument( - "--dataloader-workers", - "-dw", - type=int, - default=2, - help="Number of Paddle Dataloader workers to use.", - ) - parser.add_argument( - "--train-val-split", - "-tvs", - type=float, - default=0.9, - help="Percentage of training set to use for training.", - ) - parser.add_argument( - "--val-check-interval", - "-vci", - type=int, - default=None, - help="Run validation every N batches, " "defaults to once every epoch.", - ) - parser.add_argument( - "--early-stop-patience", - "-esp", - type=int, - default=0, - help="Patience before early stopping. " "Does not early stop by default.", - ) - parser.add_argument( - "--train-pct", - type=float, - default=1.0, - help="Run on a reduced percentage of the training set," - " defaults to running with full data.", - ) - parser.add_argument( - "--verbose", - type=int, - default=1, - choices=[0, 1], - help="Verbosity level. Defaults to 1, 0 for quiet.", - ) - parser.add_argument( - "--debug", - action="store_true", - help="Run in debug mode. Doesnt write logs. Runs " - "a single iteration of training and validation.", - ) - parser.add_argument( - "--no-log", - action="store_true", - default=False, - help="Dont save any logs or checkpoints.", - ) - - args = parser.parse_args() - args.nodename = os.uname().nodename - if args.exp_name == "": - args.exp_name = args.model - if args.val_check_interval is None: - args.val_check_interval = 1.0 - args.distributed_backend = "dp" - - return args - - -def collate_fn(batch_data): - return batch_data - - -class Runner: - def __init__(self, hparams): - self.hparams = hparams - self.step = None # count test step because apparently Trainer doesnt - self.criterion = paddle.nn.MSELoss() - self.data = data.MeshAirfoilDataset(hparams.data_dir, mode="train") - self.val_data = data.MeshAirfoilDataset(hparams.data_dir, mode="test") - self.test_data = data.MeshAirfoilDataset(hparams.data_dir, mode="test") - - in_channels = self.data[0].node_feat["feature"].shape[-1] - out_channels = self.data[0].y.shape[-1] - hidden_channels = hparams.hidden_size - - if hparams.model == "cfd_gcn": - self.model = models.CFDGCN( - hparams.su2_config, - self.hparams.coarse_mesh, - fine_marker_dict=self.data.marker_dict, - hidden_channels=hidden_channels, - num_convs=self.hparams.num_layers, - num_end_convs=self.hparams.num_end_convs, - out_channels=out_channels, - process_sim=self.data.preprocess, - freeze_mesh=self.hparams.freeze_mesh, - ) - elif hparams.model == "gcn": - self.model = models.MeshGCN( - in_channels, - hidden_channels, - out_channels, - fine_marker_dict=self.data.marker_dict, - num_layers=hparams.num_layers, - ) - else: - raise NotImplementedError - - # config optimizer - self.parameters = self.model.parameters() - if self.hparams.optim.lower() == "adam": - self.optimizer = paddle.optimizer.Adam( - parameters=self.parameters, learning_rate=self.hparams.lr - ) - elif self.hparams.optim.lower() == "rmsprop": - self.optimizer = paddle.optimizer.RMSProp( - parameters=self.parameters, learning_rate=self.hparams.lr - ) - elif self.hparams.optim.lower() == "sgd": - self.optimizer = paddle.optimizer.SGD( - parameters=self.parameters, learning_rate=self.hparams.lr - ) - else: - self.optimizer = paddle.optimizer.SGD( - parameters=self.parameters, learning_rate=self.hparams.lr - ) - logger.warning( - f"Please confirm optimizer type: {self.hparams.optim}, we set SGD as default optimizer." - ) - - # config dataloader - self.train_loader = self.train_dataloader() - self.val_loader = self.val_dataloader() - self.test_loader = self.test_dataloader() - - # config criterion - self.criterion = paddle.nn.loss.MSELoss() - - self.sum_loss = 0.0 - self.global_step = 0 - - def on_epoch_start(self): - logger.info("------") - self.sum_loss = 0.0 - - def on_epoch_end(self): - avg_loss = self.sum_loss / max(len(self.train_loader), 1) - logger.info(f"train_loss:{avg_loss}, step:{self.global_step}") - - def common_step(self, graphs): - loss = 0.0 - pred_fields = self.model(graphs) - for idx, pred_field in enumerate(pred_fields): - true_field = graphs[idx].y - mse_loss = self.criterion(pred_field, true_field) - loss += mse_loss - - loss = loss / len(graphs) - self.global_step += 1 - - return loss, pred_fields - - def training_step(self, batch, batch_idx): - loss, pred = self.common_step(batch) - self.sum_loss += loss.item() - - logger.info(f"batch_train_loss:{loss.item()}") - - if batch_idx == 0 and not self.hparams.no_log: - self.log_images( - batch[0].node_feat["feature"][:, :2], - pred[0], - batch[0].y, - self.data.elems_list, - "train", - ) - - loss.backward() - self.optimizer.step() - self.optimizer.clear_grad() - - def validation_step(self, batch, batch_idx): - loss, pred = self.common_step(batch) - - if batch_idx == 0 and not self.hparams.no_log: - self.log_images( - batch[0].node_feat["feature"][:, :2], - pred[0], - batch[0].y, - self.data.elems_list, - "val", - ) - - return loss.item() - - def test_step(self, batch, batch_idx): - loss, pred = self.common_step(batch) - self.step = 0 if self.step is None else self.step - self.step += 1 - - if not self.hparams.no_log: - for i in range(len(pred)): - self.log_images( - batch[i].node_feat["feature"][:, :2], - pred[i], - batch[i].y, - self.data.elems_list, - "test", - i, - batch_idx, - ) - - return loss.item() - - def train_dataloader(self): - train_loader = pgl_dataloader.Dataloader( - self.data, - batch_size=self.hparams.batch_size, - shuffle=( - self.hparams.train_pct == 1.0 - ), # don't shuffle if using reduced set - num_workers=1, - collate_fn=collate_fn, - ) - if self.hparams.verbose: - logger.info( - f"Train data: {len(self.data)} examples, " - f"{len(train_loader)} batches." - ) - return train_loader - - def val_dataloader(self): - # use test data here to get full training curve for test set - val_loader = pgl_dataloader.Dataloader( - self.val_data, - batch_size=self.hparams.batch_size, - shuffle=False, - num_workers=1, - collate_fn=collate_fn, - ) - if self.hparams.verbose: - logger.info( - f"Val data: {len(self.val_data)} examples, " - f"{len(val_loader)} batches." - ) - return val_loader - - def test_dataloader(self): - test_loader = pgl_dataloader.Dataloader( - self.test_data, - batch_size=self.hparams.batch_size, - shuffle=False, - num_workers=1, - collate_fn=collate_fn, - ) - if self.hparams.verbose: - logger.info( - f"Test data: {len(self.test_data)} examples, " - f"{len(test_loader)} batches." - ) - return test_loader - - def log_images(self, nodes, pred, true, elems_list, mode, log_idx=0, epoch_idx=0): - for field in range(pred.shape[1]): - true_img = mesh_utils.plot_field( - nodes, elems_list, true[:, field], title="true" - ) - min_max = (true[:, field].min().item(), true[:, field].max().item()) - pred_img = mesh_utils.plot_field( - nodes, elems_list, pred[:, field], title="pred", clim=min_max - ) - os.makedirs(f"{self.hparams.model}-fig", exist_ok=True) - img_true_name = f"{self.hparams.model}-fig/{mode}_true_f{field}_idx{log_idx}_{epoch_idx}.png" - img_pred_name = f"{self.hparams.model}-fig/{mode}_pred_f{field}_idx{log_idx}_{epoch_idx}.png" - im = PIL.Image.fromarray(true_img) - im.save(img_true_name) - im = PIL.Image.fromarray(pred_img) - im.save(img_pred_name) - - @staticmethod - def get_cross_prods(meshes, store_elems): - cross_prods = [ - mesh_utils.is_counter_clock_wise(mesh[e, :2], ret_val=True) - for mesh, elems in zip(meshes, store_elems) - for e in elems - ] - return cross_prods - - -if __name__ == "__main__": - paddle.set_device("gpu") - su2_function_mpi.activate_su2_mpi(remove_temp_files=True) - - args = parse_args() - logger.info(args) - ppsci.utils.misc.set_random_seed(args.seed) - - trainer = Runner(args) - - # test for special epoch - # epoch = 4 - # trainer.model.set_state_dict( - # paddle.load( - # os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams") - # ) - # ) - # trainer.optimizer.set_state_dict( - # paddle.load( - # os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt") - # ) - # ) - # total_test_loss = [] - # for i, x in enumerate(trainer.test_loader): - # test_loss = trainer.test_step(x, i) - # total_test_loss.append(test_loss) - # mean_test_loss = np.stack(total_test_loss).mean() - # logger.info(f"test_loss (mean):{mean_test_loss}") - - # load model from special epoch - # epoch = 254 - # trainer.model.set_state_dict( - # paddle.load( - # os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams") - # ) - # ) - # trainer.optimizer.set_state_dict( - # paddle.load( - # os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt") - # ) - # ) - - for epoch in range(args.max_epochs): - logger.info(f"epoch:{epoch}") - trainer.on_epoch_start() - - # for train - for i, graphs in enumerate(trainer.train_loader()): - trainer.training_step(graphs, i) - - trainer.on_epoch_end() - - # for val - total_val_loss = [] - for i, x in enumerate(trainer.val_loader): - val_loss = trainer.validation_step(x, i) - total_val_loss.append(val_loss) - mean_val_loss = np.stack(total_val_loss).mean() - logger.info(f"val_loss (mean):{mean_val_loss}") - - # for test - total_test_loss = [] - for i, x in enumerate(trainer.test_loader): - test_loss = trainer.test_step(x, i) - total_test_loss.append(test_loss) - mean_test_loss = np.stack(total_test_loss).mean() - logger.info(f"test_loss (mean):{mean_test_loss}") - - os.makedirs(f"params_{trainer.hparams.model}", exist_ok=True) - paddle.save( - trainer.model.state_dict(), - os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams"), - ) - paddle.save( - trainer.optimizer.state_dict(), - os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt"), - ) +import argparse +import os +import sys + +import data +import mesh_utils +import models +import numpy as np +import paddle +import pgl.utils.data.dataloader as pgl_dataloader +import PIL +import su2paddle.su2_function_mpi as su2_function_mpi + +import ppsci +from ppsci.utils import logger + +os.environ["SU2_RUN"] = "/root/autodl-tmp/SU2_bin" +sys.path.append("/root/autodl-tmp/SU2_bin") + + +# GCN +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--exp-name", + "-e", + default="gcn_interp", + help="Experiment name, defaults to model name.", + ) + parser.add_argument("--su2-config", "-sc", default="coarse.cfg") + parser.add_argument( + "--data-dir", + "-d", + default="data/NACA0012_interpolate", + help="Directory with dataset.", + ) + parser.add_argument( + "--coarse-mesh", + default="meshes/mesh_NACA0012_xcoarse.su2", + help="Path to coarse mesh (required for CFD-GCN).", + ) + parser.add_argument( + "--version", + type=int, + default=None, + help="If specified log version doesnt exist, create it." + " If it exists, continue from where it stopped.", + ) + parser.add_argument( + "--load-model", "-lm", default="", help="Load previously trained model." + ) + + parser.add_argument("--model", "-m", default="gcn", help="Which model to use.") + parser.add_argument( + "--max-epochs", + "-me", + type=int, + default=1000, + help="Max number of epochs to train for.", + ) + parser.add_argument("--optim", default="adam", help="Optimizer.") + parser.add_argument("--batch-size", "-bs", type=int, default=4) + parser.add_argument("--learning-rate", "-lr", dest="lr", type=float, default=5e-4) + parser.add_argument("--num-layers", "-nl", type=int, default=3) + parser.add_argument("--num-end-convs", type=int, default=3) + parser.add_argument("--hidden-size", "-hs", type=int, default=512) + parser.add_argument( + "--freeze-mesh", action="store_true", help="Do not do any learning on the mesh." + ) + + parser.add_argument( + "--eval", action="store_true", help="Skips training, does only eval." + ) + parser.add_argument("--profile", action="store_true", help="Run profiler.") + parser.add_argument("--seed", type=int, default=0, help="Random seed") + parser.add_argument( + "--gpus", type=int, default=1, help="Number of gpus to use, 0 for none." + ) + parser.add_argument( + "--dataloader-workers", + "-dw", + type=int, + default=2, + help="Number of Paddle Dataloader workers to use.", + ) + parser.add_argument( + "--train-val-split", + "-tvs", + type=float, + default=0.9, + help="Percentage of training set to use for training.", + ) + parser.add_argument( + "--val-check-interval", + "-vci", + type=int, + default=None, + help="Run validation every N batches, " "defaults to once every epoch.", + ) + parser.add_argument( + "--early-stop-patience", + "-esp", + type=int, + default=0, + help="Patience before early stopping. " "Does not early stop by default.", + ) + parser.add_argument( + "--train-pct", + type=float, + default=1.0, + help="Run on a reduced percentage of the training set," + " defaults to running with full data.", + ) + parser.add_argument( + "--verbose", + type=int, + default=1, + choices=[0, 1], + help="Verbosity level. Defaults to 1, 0 for quiet.", + ) + parser.add_argument( + "--debug", + action="store_true", + help="Run in debug mode. Doesnt write logs. Runs " + "a single iteration of training and validation.", + ) + parser.add_argument( + "--no-log", + action="store_true", + default=False, + help="Dont save any logs or checkpoints.", + ) + + args = parser.parse_args() + args.nodename = os.uname().nodename + if args.exp_name == "": + args.exp_name = args.model + if args.val_check_interval is None: + args.val_check_interval = 1.0 + args.distributed_backend = "dp" + + return args + + +def collate_fn(batch_data): + return batch_data + + +class Runner: + def __init__(self, hparams): + self.hparams = hparams + self.step = None # count test step because apparently Trainer doesnt + self.criterion = paddle.nn.MSELoss() + self.data = data.MeshAirfoilDataset(hparams.data_dir, mode="train") + self.val_data = data.MeshAirfoilDataset(hparams.data_dir, mode="test") + self.test_data = data.MeshAirfoilDataset(hparams.data_dir, mode="test") + + in_channels = self.data[0].node_feat["feature"].shape[-1] + out_channels = self.data[0].y.shape[-1] + hidden_channels = hparams.hidden_size + + if hparams.model == "cfd_gcn": + self.model = models.CFDGCN( + hparams.su2_config, + self.hparams.coarse_mesh, + fine_marker_dict=self.data.marker_dict, + hidden_channels=hidden_channels, + num_convs=self.hparams.num_layers, + num_end_convs=self.hparams.num_end_convs, + out_channels=out_channels, + process_sim=self.data.preprocess, + freeze_mesh=self.hparams.freeze_mesh, + ) + elif hparams.model == "gcn": + self.model = models.MeshGCN( + in_channels, + hidden_channels, + out_channels, + fine_marker_dict=self.data.marker_dict, + num_layers=hparams.num_layers, + ) + else: + raise NotImplementedError + + # config optimizer + self.parameters = self.model.parameters() + if self.hparams.optim.lower() == "adam": + self.optimizer = paddle.optimizer.Adam( + parameters=self.parameters, learning_rate=self.hparams.lr + ) + elif self.hparams.optim.lower() == "rmsprop": + self.optimizer = paddle.optimizer.RMSProp( + parameters=self.parameters, learning_rate=self.hparams.lr + ) + elif self.hparams.optim.lower() == "sgd": + self.optimizer = paddle.optimizer.SGD( + parameters=self.parameters, learning_rate=self.hparams.lr + ) + else: + self.optimizer = paddle.optimizer.SGD( + parameters=self.parameters, learning_rate=self.hparams.lr + ) + logger.warning( + f"Please confirm optimizer type: {self.hparams.optim}, we set SGD as default optimizer." + ) + + # config dataloader + self.train_loader = self.train_dataloader() + self.val_loader = self.val_dataloader() + self.test_loader = self.test_dataloader() + + # config criterion + self.criterion = paddle.nn.loss.MSELoss() + + self.sum_loss = 0.0 + self.global_step = 0 + + def on_epoch_start(self): + logger.info("------") + self.sum_loss = 0.0 + + def on_epoch_end(self): + avg_loss = self.sum_loss / max(len(self.train_loader), 1) + logger.info(f"train_loss:{avg_loss}, step:{self.global_step}") + + def common_step(self, graphs): + loss = 0.0 + pred_fields = self.model(graphs) + for idx, pred_field in enumerate(pred_fields): + true_field = graphs[idx].y + mse_loss = self.criterion(pred_field, true_field) + loss += mse_loss + + loss = loss / len(graphs) + self.global_step += 1 + + return loss, pred_fields + + def training_step(self, batch, batch_idx): + loss, pred = self.common_step(batch) + self.sum_loss += loss.item() + + logger.info(f"batch_train_loss:{loss.item()}") + + if batch_idx == 0 and not self.hparams.no_log: + self.log_images( + batch[0].node_feat["feature"][:, :2], + pred[0], + batch[0].y, + self.data.elems_list, + "train", + ) + + loss.backward() + self.optimizer.step() + self.optimizer.clear_grad() + + def validation_step(self, batch, batch_idx): + loss, pred = self.common_step(batch) + + if batch_idx == 0 and not self.hparams.no_log: + self.log_images( + batch[0].node_feat["feature"][:, :2], + pred[0], + batch[0].y, + self.data.elems_list, + "val", + ) + + return loss.item() + + def test_step(self, batch, batch_idx): + loss, pred = self.common_step(batch) + self.step = 0 if self.step is None else self.step + self.step += 1 + + if not self.hparams.no_log: + for i in range(len(pred)): + self.log_images( + batch[i].node_feat["feature"][:, :2], + pred[i], + batch[i].y, + self.data.elems_list, + "test", + i, + batch_idx, + ) + + return loss.item() + + def train_dataloader(self): + train_loader = pgl_dataloader.Dataloader( + self.data, + batch_size=self.hparams.batch_size, + shuffle=( + self.hparams.train_pct == 1.0 + ), # don't shuffle if using reduced set + num_workers=1, + collate_fn=collate_fn, + ) + if self.hparams.verbose: + logger.info( + f"Train data: {len(self.data)} examples, " + f"{len(train_loader)} batches." + ) + return train_loader + + def val_dataloader(self): + # use test data here to get full training curve for test set + val_loader = pgl_dataloader.Dataloader( + self.val_data, + batch_size=self.hparams.batch_size, + shuffle=False, + num_workers=1, + collate_fn=collate_fn, + ) + if self.hparams.verbose: + logger.info( + f"Val data: {len(self.val_data)} examples, " + f"{len(val_loader)} batches." + ) + return val_loader + + def test_dataloader(self): + test_loader = pgl_dataloader.Dataloader( + self.test_data, + batch_size=self.hparams.batch_size, + shuffle=False, + num_workers=1, + collate_fn=collate_fn, + ) + if self.hparams.verbose: + logger.info( + f"Test data: {len(self.test_data)} examples, " + f"{len(test_loader)} batches." + ) + return test_loader + + def log_images(self, nodes, pred, true, elems_list, mode, log_idx=0, epoch_idx=0): + for field in range(pred.shape[1]): + true_img = mesh_utils.plot_field( + nodes, elems_list, true[:, field], title="true" + ) + min_max = (true[:, field].min().item(), true[:, field].max().item()) + pred_img = mesh_utils.plot_field( + nodes, elems_list, pred[:, field], title="pred", clim=min_max + ) + os.makedirs(f"{self.hparams.model}-fig", exist_ok=True) + img_true_name = f"{self.hparams.model}-fig/{mode}_true_f{field}_idx{log_idx}_{epoch_idx}.png" + img_pred_name = f"{self.hparams.model}-fig/{mode}_pred_f{field}_idx{log_idx}_{epoch_idx}.png" + im = PIL.Image.fromarray(true_img) + im.save(img_true_name) + im = PIL.Image.fromarray(pred_img) + im.save(img_pred_name) + + @staticmethod + def get_cross_prods(meshes, store_elems): + cross_prods = [ + mesh_utils.is_counter_clock_wise(mesh[e, :2], ret_val=True) + for mesh, elems in zip(meshes, store_elems) + for e in elems + ] + return cross_prods + + +if __name__ == "__main__": + paddle.set_device("gpu") + su2_function_mpi.activate_su2_mpi(remove_temp_files=True) + + args = parse_args() + logger.info(args) + ppsci.utils.misc.set_random_seed(args.seed) + + trainer = Runner(args) + + # test for special epoch + # epoch = 4 + # trainer.model.set_state_dict( + # paddle.load( + # os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams") + # ) + # ) + # trainer.optimizer.set_state_dict( + # paddle.load( + # os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt") + # ) + # ) + # total_test_loss = [] + # for i, x in enumerate(trainer.test_loader): + # test_loss = trainer.test_step(x, i) + # total_test_loss.append(test_loss) + # mean_test_loss = np.stack(total_test_loss).mean() + # logger.info(f"test_loss (mean):{mean_test_loss}") + + # load model from special epoch + # epoch = 254 + # trainer.model.set_state_dict( + # paddle.load( + # os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams") + # ) + # ) + # trainer.optimizer.set_state_dict( + # paddle.load( + # os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt") + # ) + # ) + + for epoch in range(args.max_epochs): + logger.info(f"epoch:{epoch}") + trainer.on_epoch_start() + + # for train + for i, graphs in enumerate(trainer.train_loader()): + trainer.training_step(graphs, i) + + trainer.on_epoch_end() + + # for val + total_val_loss = [] + for i, x in enumerate(trainer.val_loader): + val_loss = trainer.validation_step(x, i) + total_val_loss.append(val_loss) + mean_val_loss = np.stack(total_val_loss).mean() + logger.info(f"val_loss (mean):{mean_val_loss}") + + # for test + total_test_loss = [] + for i, x in enumerate(trainer.test_loader): + test_loss = trainer.test_step(x, i) + total_test_loss.append(test_loss) + mean_test_loss = np.stack(total_test_loss).mean() + logger.info(f"test_loss (mean):{mean_test_loss}") + + os.makedirs(f"params_{trainer.hparams.model}", exist_ok=True) + paddle.save( + trainer.model.state_dict(), + os.path.join(f"params_{trainer.hparams.model}", f"model{epoch}.pdparams"), + ) + paddle.save( + trainer.optimizer.state_dict(), + os.path.join(f"params_{trainer.hparams.model}", f"opt{epoch}.pdopt"), + ) diff --git a/jointContribution/CFDGCN/mesh_utils.py b/jointContribution/CFDGCN/mesh_utils.py index 19f3a863ba..5fd3b0f592 100644 --- a/jointContribution/CFDGCN/mesh_utils.py +++ b/jointContribution/CFDGCN/mesh_utils.py @@ -1,516 +1,516 @@ -import pickle -import time -from os import PathLike -from typing import Dict -from typing import List -from typing import Sequence -from typing import Tuple -from typing import Union - -import matplotlib.collections -import matplotlib.pyplot as plt -import numpy as np -import paddle -import scipy - -from ppsci.utils import logger - -UnionTensor = Union[paddle.Tensor, np.ndarray] - - -SU2_SHAPE_IDS = { - "line": 3, - "triangle": 5, - "quad": 9, -} - - -def get_mesh_graph( - mesh_filename: Union[str, PathLike], dtype: np.dtype = np.float32 -) -> Tuple[np.ndarray, np.ndarray, List[List[List[int]]], Dict[str, List[List[int]]]]: - def get_rhs(s: str) -> str: - return s.split("=")[-1] - - marker_dict = {} - with open(mesh_filename) as f: - for line in f: - if line.startswith("NPOIN"): - num_points = int(get_rhs(line)) - mesh_points = [ - [float(p) for p in f.readline().split()[:2]] - for _ in range(num_points) - ] - nodes = np.array(mesh_points, dtype=dtype) - - if line.startswith("NMARK"): - num_markers = int(get_rhs(line)) - for _ in range(num_markers): - line = f.readline() - assert line.startswith("MARKER_TAG") - marker_tag = get_rhs(line).strip() - num_elems = int(get_rhs(f.readline())) - marker_elems = [ - [int(e) for e in f.readline().split()[-2:]] - for _ in range(num_elems) - ] - marker_dict[marker_tag] = marker_elems - - if line.startswith("NELEM"): - edges = [] - triangles = [] - quads = [] - num_edges = int(get_rhs(line)) - for _ in range(num_edges): - elem = [int(p) for p in f.readline().split()] - if elem[0] == SU2_SHAPE_IDS["triangle"]: - n = 3 - triangles.append(elem[1 : 1 + n]) - elif elem[0] == SU2_SHAPE_IDS["quad"]: - n = 4 - quads.append(elem[1 : 1 + n]) - else: - raise NotImplementedError - elem = elem[1 : 1 + n] - edges += [[elem[i], elem[(i + 1) % n]] for i in range(n)] - edges = np.array(edges, dtype=np.compat.long).transpose() - elems = [triangles, quads] - - return nodes, edges, elems, marker_dict - - -def write_graph_mesh( - output_filename: Union[str, PathLike], - points: UnionTensor, - elems_list: Sequence[Sequence[Sequence[int]]], - marker_dict: Dict[str, Sequence[Sequence[int]]], - dims: int = 2, -) -> None: - def seq2str(s: Sequence[int]) -> str: - return " ".join(str(x) for x in s) - - with open(output_filename, "w") as f: - f.write(f"NDIME={dims}\n") - - num_points = points.shape[0] - f.write(f"NPOIN={num_points}\n") - for i, p in enumerate(points): - f.write(f"{seq2str(p.tolist())} {i}\n") - f.write("\n") - - num_elems = sum([len(elems) for elems in elems_list]) - f.write(f"NELEM={num_elems}\n") - for elems in elems_list: - for e in elems: - if len(e) != 3 and len(e) != 4: - raise ValueError( - f"Meshes only support triangles and quadrilaterals, " - f"passed element had {len(e)} vertices." - ) - elem_id = ( - SU2_SHAPE_IDS["triangle"] if len(e) == 3 else SU2_SHAPE_IDS["quad"] - ) - f.write(f"{elem_id} {seq2str(e)}\n") - f.write("\n") - - num_markers = len(marker_dict) - f.write(f"NMARK={num_markers}\n") - for marker_tag in marker_dict: - f.write(f"MARKER_TAG={marker_tag}\n") - marker_elems = marker_dict[marker_tag] - f.write(f"MARKER_ELEMS={len(marker_elems)}\n") - for m in marker_elems: - f.write(f'{SU2_SHAPE_IDS["line"]} {seq2str(m)}\n') - f.write("\n") - - -def generate_mesh( - mesh_type="regular", - airfoil_nodes=None, - farfield_nodes=None, - num_x=21, - num_y=21, - min_x=-20, - max_x=20, - min_y=-20, - max_y=20, -): - if mesh_type == "regular": - num_nodes = num_x * num_y - inds = np.arange(num_nodes).reshape(num_x, num_y) - x_pos = np.linspace(min_x, max_x, num_x) - y_pos = np.linspace(max_y, min_y, num_y) - grid = np.stack(np.meshgrid(x_pos, y_pos)) - nodes = grid.transpose().reshape(num_nodes, 2) - elif mesh_type == "random": - num_nodes = num_x * num_y - x_pos = np.random.uniform(min_x, max_x, num_nodes) - y_pos = np.random.uniform(min_y, max_y, num_nodes) - grid = np.stack([x_pos, y_pos], axis=1) - nodes = grid.transpose().reshape(num_nodes, 2) - elif mesh_type == "normal": - num_nodes = num_x * num_y - # set distance between min and max to be equal to 4 std devs, to have ~95% of points inside - x_pos = np.random.normal(scale=(max_x - min_x) / 4, size=num_nodes) - y_pos = np.random.normal(scale=(max_y - min_y) / 4, size=num_nodes) - grid = np.stack([x_pos, y_pos], axis=1) - nodes = grid.transpose().reshape(num_nodes, 2) - else: - raise NotImplementedError - - if airfoil_nodes is not None: - # remove nodes that are repeated - non_repeated_inds = [] - airfoil_list = ( - airfoil_nodes.tolist() - ) # have to convert to list to check containment - for i, n in enumerate(nodes): - if n.tolist() in airfoil_list: - logger.info(f"Removed node {i}: {n} because its already in airfoil.") - else: - non_repeated_inds.append(i) - nodes = nodes[non_repeated_inds] - - # add airfoil nodes and remove nodes that are inside the airfoil - nodes_with_airfoil = paddle.to_tensor( - np.concatenate([nodes, airfoil_nodes], axis=0) - ) - airfoil_inds = np.arange(nodes.shape[0], nodes_with_airfoil.shape[0]) - airfoil_signed_dists = signed_dist_graph( - nodes_with_airfoil, airfoil_inds, with_sign=True - ).numpy() - is_inside_airfoil = airfoil_signed_dists < 0 - nodes_outside_airfoil = nodes_with_airfoil[~is_inside_airfoil] - - # adjust indices to account for removed nodes - num_nodes_removed = is_inside_airfoil.sum() - airfoil_inds = airfoil_inds - num_nodes_removed - nodes = nodes_outside_airfoil.numpy() - - if farfield_nodes is not None: - # remove nodes that are repeated - num_nodes_removed = 0 - non_repeated_inds = [] - farfield_list = ( - farfield_nodes.tolist() - ) # have to convert to list to check containment - for i, n in enumerate(nodes): - if n.tolist() in farfield_list: - logger.info(f"Removed node {i}: {n} because its already in farfield.") - num_nodes_removed += 1 - else: - non_repeated_inds.append(i) - if airfoil_nodes is not None: - airfoil_inds -= num_nodes_removed - nodes = nodes[non_repeated_inds] - - # add airfoil nodes and remove nodes that are inside the airfoil - nodes_with_farfield = paddle.to_tensor( - np.concatenate([nodes, farfield_nodes], axis=0) - ) - farfield_inds = np.arange(nodes.shape[0], nodes_with_farfield.shape[0]) - farfield_signed_dists = signed_dist_graph( - nodes_with_farfield, farfield_inds, with_sign=True - ).numpy() - is_outside_farfield = farfield_signed_dists > 0 - nodes_inside_farfield = nodes_with_farfield[~is_outside_farfield] - - # adjust indices to account for removed nodes - num_nodes_removed = is_outside_farfield.sum() - airfoil_inds = airfoil_inds - num_nodes_removed - farfield_inds = farfield_inds - num_nodes_removed - nodes = nodes_inside_farfield.numpy() - - elems = delauney(nodes).tolist() - if airfoil_nodes is not None: - # keep only elems that are outside airfoil - elems = [e for e in elems if len([i for i in e if i in airfoil_inds]) < 3] - - marker_dict = {} - if airfoil_nodes is not None: - num_airfoil = airfoil_nodes.shape[0] - marker_dict["airfoil"] = [ - [airfoil_inds[i], airfoil_inds[(i + 1) % num_airfoil]] - for i in range(num_airfoil) - ] - - if farfield_nodes is not None: - num_farfield = farfield_nodes.shape[0] - marker_dict["farfield"] = [ - [farfield_inds[i], farfield_inds[(i + 1) % num_farfield]] - for i in range(num_farfield) - ] - else: - marker_dict["farfield"] = [] - marker_dict["farfield"] += [ - [inds[0, j], inds[0, j + 1]] for j in range(num_x - 1) - ] - marker_dict["farfield"] += [ - [inds[-1, j], inds[-1, j + 1]] for j in range(num_x - 1) - ] - marker_dict["farfield"] += [ - [inds[i, 0], inds[i + 1, 0]] for i in range(num_y - 1) - ] - marker_dict["farfield"] += [ - [inds[i, -1], inds[i + 1, -1]] for i in range(num_y - 1) - ] - - # write_graph_mesh(output_filename, nodes, [elems], marker_dict) - return nodes, elems, marker_dict - - -def delauney(x): - """Adapted from torch_geometric.transforms.delaunay.Delaunay.""" - pos = x[:, :2] - if pos.shape[0] > 3: - tri = scipy.spatial.Delaunay(pos, qhull_options="QJ") - face = tri.simplices - elif pos.size(0) == 3: - face = np.array([[0, 1, 2]]) - else: - raise ValueError( - f"Not enough points to contruct Delaunay triangulation, got {pos.shape[0]} " - + "but expected at least 3" - ) - - elems = face.astype(np.compat.long) - return elems - - -def get_dists(edge_index, pos, norm=True, max=None): - """Adapted from torch_geometric.transforms.Distance""" - (row, col), pos = edge_index, pos - dist = paddle.norm(pos[col] - pos[row], p=2, axis=-1).view(-1, 1) - if norm and dist.numel() > 0: - max_value = dist.max() if max is None else max - dist = dist / max_value - return dist - - -def is_counter_clock_wise(points, ret_val=False): - """From: https://stackoverflow.com/questions/1165647#1180256""" - n = points.shape[0] - a = paddle.argmin(points[:, 1]) - b = (a - 1) % n - c = (a + 1) % n - - ab = points[a] - points[b] - ac = points[a] - points[c] - cross = ab[0] * ac[1] - ab[1] * ac[0] - - if not ret_val: - return cross <= 0 - else: - return cross - - -def is_clock_wise(points, triangles, ret_val=False): - tri_pts = points[triangles] - a = tri_pts[:, 0] - tri_pts[:, 1] - b = tri_pts[:, 1] - tri_pts[:, 2] - cross = b[:, 0] * a[:, 1] - b[:, 1] * a[:, 0] - - if not ret_val: - return cross > 0 - else: - return cross - - -def quad2tri(elems): - new_elems = [] - new_edges = [] - for e in elems: - if len(e) <= 3: - new_elems.append(e) - else: - new_elems.append([e[0], e[1], e[2]]) - new_elems.append([e[0], e[2], e[3]]) - new_edges.append(paddle.to_tensor(([[e[0]], [e[2]]]), dtype=paddle.int64)) - new_edges = ( - paddle.concat(new_edges, axis=1) - if new_edges - else paddle.to_tensor([], dtype=paddle.int64) - ) - return new_elems, new_edges - - -def left_orthogonal(v): - return paddle.stack([-v[..., 1], v[..., 0]], axis=-1) - - -def signed_dist_graph(nodes, marker_inds, with_sign=False): - # assumes shape is convex - # approximate signed distance by distance to closest point on surface - signed_dists = paddle.zeros([nodes.shape[0]], dtype=paddle.float32) - marker_nodes = nodes[marker_inds] - if type(marker_inds) is paddle.Tensor: - marker_inds = marker_inds.tolist() - marker_inds = set(marker_inds) - - if with_sign: - marker_surfaces = marker_nodes[:-1] - marker_nodes[1:] - last_surface = marker_nodes[-1] - marker_nodes[0] - marker_surfaces = paddle.concat([marker_surfaces, last_surface.unsqueeze(0)]) - normals = left_orthogonal(marker_surfaces) / marker_surfaces.norm( - dim=1 - ).unsqueeze(1) - - for i, x in enumerate(nodes): - if i not in marker_inds: - vecs = marker_nodes - x - dists = paddle.linalg.norm(vecs, axis=1) - min_dist = dists.min() - - if with_sign: - # if sign is requested, check if inside marker shape - # dot product with normals to find if inside shape - surface_dists = (vecs * normals).sum(dim=1) - if (surface_dists < 0).unique().shape[0] == 1: - # if all point in same direction it is inside - min_dist *= -1 - - signed_dists[i] = min_dist - return signed_dists - - -def plot_field( - nodes, - elems_list, - field, - contour=False, - clim=None, - zoom=True, - get_array=True, - out_file=None, - show=False, - title="", -): - elems_list = sum(elems_list, []) - tris, _ = quad2tri(elems_list) - tris = np.array(tris) - x, y = nodes[:, :2].t().detach().cpu().numpy() - field = field.detach().cpu().numpy() - fig = plt.figure() - if contour: - plt.tricontourf(x, y, tris, field) - else: - plt.tripcolor(x, y, tris, field) - if clim: - plt.clim(*clim) - plt.colorbar() - if zoom: - plt.xlim(left=-0.5, right=1.5) - plt.ylim(bottom=-1, top=1) - if title: - plt.title(title) - - if out_file is not None: - plt.savefig(out_file) - plt.close() - - if show: - raise NotImplementedError - - if get_array: - fig.canvas.draw() - a = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - a = a.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return a - - -def write_tecplot(graph, fields, elems_list, filename="flow.dat"): - x = graph.x - edge_index = graph.edge_index - num_nodes = x.shape[0] - num_edges = edge_index.shape[1] - with open(filename, "w") as f: - f.write('TITLE = "Visualization of the volumetric solution"\n') - f.write( - 'VARIABLES = "x","y","Density","Momentum_x","Momentum_y",' - '"Energy","Pressure","Temperature","Mach","Cp"\n' - ) - f.write( - f"ZONE NODES = {num_nodes}, ELEMENTS = {num_edges}, " - f"DATAPACKING = POINT, ZONETYPE = FEQUADRILATERAL\n" - ) - for node, field in zip(x, fields): - f.write( - f"{node[0].item()}\t{node[1].item()}\t0.0\t" - f"{field[0].item()}\t{field[1].item()}\t0.0\t" - f"{field[2].item()}\t0.0\t0.0\t0.0\n" - ) - elems_list = sum(elems_list, []) - for elem in elems_list: - f.write("\t".join(str(x + 1) for x in elem)) - if len(elem) == 3: - # repeat last vertex if triangle - f.write(f"\t{elem[-1]+1}") - f.write("\n") - - -def visualize_mesh( - nodes, elements, xlims=None, ylims=None, marker=".", plot_inds=False -): - """Modified from: https://stackoverflow.com/questions/52202014""" - - x = nodes[:, 0] - y = nodes[:, 1] - - # https://stackoverflow.com/questions/49640311/ - def plot_elems(x, y, elems, ax=None, **kwargs): - if not ax: - ax = plt.gca() - xy = np.c_[x, y] - verts = xy[elems] - pc = matplotlib.collections.PolyCollection(verts, **kwargs) - ax.add_collection(pc) - ax.autoscale() - - plt.figure() - plt.gca().set_aspect("equal") - - plot_elems(x, y, np.asarray(elements), ax=None, color="crimson", facecolor="None") - plt.plot(x, y, marker=marker, ls="", color="crimson") - - if plot_inds: - for i, pos in enumerate(nodes): - plt.annotate(i, (pos[0], pos[1])) - - plt.xlabel("X Axis") - plt.ylabel("Y Axis") - - if xlims: - plt.xlim(left=xlims[0], right=xlims[1]) - if ylims: - plt.ylim(top=ylims[1], bottom=ylims[0]) - - plt.show() - - -if __name__ == "__main__": - mesh = "mesh_NACA0012_fine.su2" - start = time.time() - x, edge_index, _, marker_dict = get_mesh_graph(f"meshes/{mesh}") - - x = paddle.to_tensor(x, dtype=paddle.float32) - edge_index = paddle.to_tensor(edge_index) - - triangulation = scipy.spatial.Delaunay(x) - airfoil_markers = set(marker_dict["airfoil"][0]) - elems = triangulation.simplices - keep_inds = [ - i - for i in range(elems.shape[1]) - if not ( - elems[0, i].item() in airfoil_markers - and elems[1, i].item() in airfoil_markers - and elems[2, i].item() in airfoil_markers - ) - ] - elems = elems[:, keep_inds] - - write_graph_mesh("test_mesh.su2", x, [elems], marker_dict) - logger.info(f"Took: {time.time() - start}") - - with open(f"meshes/graph_{mesh}.pkl", "wb") as f: - pickle.dump([x, edge_index], f) +import pickle +import time +from os import PathLike +from typing import Dict +from typing import List +from typing import Sequence +from typing import Tuple +from typing import Union + +import matplotlib.collections +import matplotlib.pyplot as plt +import numpy as np +import paddle +import scipy + +from ppsci.utils import logger + +UnionTensor = Union[paddle.Tensor, np.ndarray] + + +SU2_SHAPE_IDS = { + "line": 3, + "triangle": 5, + "quad": 9, +} + + +def get_mesh_graph( + mesh_filename: Union[str, PathLike], dtype: np.dtype = np.float32 +) -> Tuple[np.ndarray, np.ndarray, List[List[List[int]]], Dict[str, List[List[int]]]]: + def get_rhs(s: str) -> str: + return s.split("=")[-1] + + marker_dict = {} + with open(mesh_filename) as f: + for line in f: + if line.startswith("NPOIN"): + num_points = int(get_rhs(line)) + mesh_points = [ + [float(p) for p in f.readline().split()[:2]] + for _ in range(num_points) + ] + nodes = np.array(mesh_points, dtype=dtype) + + if line.startswith("NMARK"): + num_markers = int(get_rhs(line)) + for _ in range(num_markers): + line = f.readline() + assert line.startswith("MARKER_TAG") + marker_tag = get_rhs(line).strip() + num_elems = int(get_rhs(f.readline())) + marker_elems = [ + [int(e) for e in f.readline().split()[-2:]] + for _ in range(num_elems) + ] + marker_dict[marker_tag] = marker_elems + + if line.startswith("NELEM"): + edges = [] + triangles = [] + quads = [] + num_edges = int(get_rhs(line)) + for _ in range(num_edges): + elem = [int(p) for p in f.readline().split()] + if elem[0] == SU2_SHAPE_IDS["triangle"]: + n = 3 + triangles.append(elem[1 : 1 + n]) + elif elem[0] == SU2_SHAPE_IDS["quad"]: + n = 4 + quads.append(elem[1 : 1 + n]) + else: + raise NotImplementedError + elem = elem[1 : 1 + n] + edges += [[elem[i], elem[(i + 1) % n]] for i in range(n)] + edges = np.array(edges, dtype=np.compat.long).transpose() + elems = [triangles, quads] + + return nodes, edges, elems, marker_dict + + +def write_graph_mesh( + output_filename: Union[str, PathLike], + points: UnionTensor, + elems_list: Sequence[Sequence[Sequence[int]]], + marker_dict: Dict[str, Sequence[Sequence[int]]], + dims: int = 2, +) -> None: + def seq2str(s: Sequence[int]) -> str: + return " ".join(str(x) for x in s) + + with open(output_filename, "w") as f: + f.write(f"NDIME={dims}\n") + + num_points = points.shape[0] + f.write(f"NPOIN={num_points}\n") + for i, p in enumerate(points): + f.write(f"{seq2str(p.tolist())} {i}\n") + f.write("\n") + + num_elems = sum([len(elems) for elems in elems_list]) + f.write(f"NELEM={num_elems}\n") + for elems in elems_list: + for e in elems: + if len(e) != 3 and len(e) != 4: + raise ValueError( + f"Meshes only support triangles and quadrilaterals, " + f"passed element had {len(e)} vertices." + ) + elem_id = ( + SU2_SHAPE_IDS["triangle"] if len(e) == 3 else SU2_SHAPE_IDS["quad"] + ) + f.write(f"{elem_id} {seq2str(e)}\n") + f.write("\n") + + num_markers = len(marker_dict) + f.write(f"NMARK={num_markers}\n") + for marker_tag in marker_dict: + f.write(f"MARKER_TAG={marker_tag}\n") + marker_elems = marker_dict[marker_tag] + f.write(f"MARKER_ELEMS={len(marker_elems)}\n") + for m in marker_elems: + f.write(f'{SU2_SHAPE_IDS["line"]} {seq2str(m)}\n') + f.write("\n") + + +def generate_mesh( + mesh_type="regular", + airfoil_nodes=None, + farfield_nodes=None, + num_x=21, + num_y=21, + min_x=-20, + max_x=20, + min_y=-20, + max_y=20, +): + if mesh_type == "regular": + num_nodes = num_x * num_y + inds = np.arange(num_nodes).reshape(num_x, num_y) + x_pos = np.linspace(min_x, max_x, num_x) + y_pos = np.linspace(max_y, min_y, num_y) + grid = np.stack(np.meshgrid(x_pos, y_pos)) + nodes = grid.transpose().reshape(num_nodes, 2) + elif mesh_type == "random": + num_nodes = num_x * num_y + x_pos = np.random.uniform(min_x, max_x, num_nodes) + y_pos = np.random.uniform(min_y, max_y, num_nodes) + grid = np.stack([x_pos, y_pos], axis=1) + nodes = grid.transpose().reshape(num_nodes, 2) + elif mesh_type == "normal": + num_nodes = num_x * num_y + # set distance between min and max to be equal to 4 std devs, to have ~95% of points inside + x_pos = np.random.normal(scale=(max_x - min_x) / 4, size=num_nodes) + y_pos = np.random.normal(scale=(max_y - min_y) / 4, size=num_nodes) + grid = np.stack([x_pos, y_pos], axis=1) + nodes = grid.transpose().reshape(num_nodes, 2) + else: + raise NotImplementedError + + if airfoil_nodes is not None: + # remove nodes that are repeated + non_repeated_inds = [] + airfoil_list = ( + airfoil_nodes.tolist() + ) # have to convert to list to check containment + for i, n in enumerate(nodes): + if n.tolist() in airfoil_list: + logger.info(f"Removed node {i}: {n} because its already in airfoil.") + else: + non_repeated_inds.append(i) + nodes = nodes[non_repeated_inds] + + # add airfoil nodes and remove nodes that are inside the airfoil + nodes_with_airfoil = paddle.to_tensor( + np.concatenate([nodes, airfoil_nodes], axis=0) + ) + airfoil_inds = np.arange(nodes.shape[0], nodes_with_airfoil.shape[0]) + airfoil_signed_dists = signed_dist_graph( + nodes_with_airfoil, airfoil_inds, with_sign=True + ).numpy() + is_inside_airfoil = airfoil_signed_dists < 0 + nodes_outside_airfoil = nodes_with_airfoil[~is_inside_airfoil] + + # adjust indices to account for removed nodes + num_nodes_removed = is_inside_airfoil.sum() + airfoil_inds = airfoil_inds - num_nodes_removed + nodes = nodes_outside_airfoil.numpy() + + if farfield_nodes is not None: + # remove nodes that are repeated + num_nodes_removed = 0 + non_repeated_inds = [] + farfield_list = ( + farfield_nodes.tolist() + ) # have to convert to list to check containment + for i, n in enumerate(nodes): + if n.tolist() in farfield_list: + logger.info(f"Removed node {i}: {n} because its already in farfield.") + num_nodes_removed += 1 + else: + non_repeated_inds.append(i) + if airfoil_nodes is not None: + airfoil_inds -= num_nodes_removed + nodes = nodes[non_repeated_inds] + + # add airfoil nodes and remove nodes that are inside the airfoil + nodes_with_farfield = paddle.to_tensor( + np.concatenate([nodes, farfield_nodes], axis=0) + ) + farfield_inds = np.arange(nodes.shape[0], nodes_with_farfield.shape[0]) + farfield_signed_dists = signed_dist_graph( + nodes_with_farfield, farfield_inds, with_sign=True + ).numpy() + is_outside_farfield = farfield_signed_dists > 0 + nodes_inside_farfield = nodes_with_farfield[~is_outside_farfield] + + # adjust indices to account for removed nodes + num_nodes_removed = is_outside_farfield.sum() + airfoil_inds = airfoil_inds - num_nodes_removed + farfield_inds = farfield_inds - num_nodes_removed + nodes = nodes_inside_farfield.numpy() + + elems = delauney(nodes).tolist() + if airfoil_nodes is not None: + # keep only elems that are outside airfoil + elems = [e for e in elems if len([i for i in e if i in airfoil_inds]) < 3] + + marker_dict = {} + if airfoil_nodes is not None: + num_airfoil = airfoil_nodes.shape[0] + marker_dict["airfoil"] = [ + [airfoil_inds[i], airfoil_inds[(i + 1) % num_airfoil]] + for i in range(num_airfoil) + ] + + if farfield_nodes is not None: + num_farfield = farfield_nodes.shape[0] + marker_dict["farfield"] = [ + [farfield_inds[i], farfield_inds[(i + 1) % num_farfield]] + for i in range(num_farfield) + ] + else: + marker_dict["farfield"] = [] + marker_dict["farfield"] += [ + [inds[0, j], inds[0, j + 1]] for j in range(num_x - 1) + ] + marker_dict["farfield"] += [ + [inds[-1, j], inds[-1, j + 1]] for j in range(num_x - 1) + ] + marker_dict["farfield"] += [ + [inds[i, 0], inds[i + 1, 0]] for i in range(num_y - 1) + ] + marker_dict["farfield"] += [ + [inds[i, -1], inds[i + 1, -1]] for i in range(num_y - 1) + ] + + # write_graph_mesh(output_filename, nodes, [elems], marker_dict) + return nodes, elems, marker_dict + + +def delauney(x): + """Adapted from torch_geometric.transforms.delaunay.Delaunay.""" + pos = x[:, :2] + if pos.shape[0] > 3: + tri = scipy.spatial.Delaunay(pos, qhull_options="QJ") + face = tri.simplices + elif pos.size(0) == 3: + face = np.array([[0, 1, 2]]) + else: + raise ValueError( + f"Not enough points to contruct Delaunay triangulation, got {pos.shape[0]} " + + "but expected at least 3" + ) + + elems = face.astype(np.compat.long) + return elems + + +def get_dists(edge_index, pos, norm=True, max=None): + """Adapted from torch_geometric.transforms.Distance""" + (row, col), pos = edge_index, pos + dist = paddle.norm(pos[col] - pos[row], p=2, axis=-1).view(-1, 1) + if norm and dist.numel() > 0: + max_value = dist.max() if max is None else max + dist = dist / max_value + return dist + + +def is_counter_clock_wise(points, ret_val=False): + """From: https://stackoverflow.com/questions/1165647#1180256""" + n = points.shape[0] + a = paddle.argmin(points[:, 1]) + b = (a - 1) % n + c = (a + 1) % n + + ab = points[a] - points[b] + ac = points[a] - points[c] + cross = ab[0] * ac[1] - ab[1] * ac[0] + + if not ret_val: + return cross <= 0 + else: + return cross + + +def is_clock_wise(points, triangles, ret_val=False): + tri_pts = points[triangles] + a = tri_pts[:, 0] - tri_pts[:, 1] + b = tri_pts[:, 1] - tri_pts[:, 2] + cross = b[:, 0] * a[:, 1] - b[:, 1] * a[:, 0] + + if not ret_val: + return cross > 0 + else: + return cross + + +def quad2tri(elems): + new_elems = [] + new_edges = [] + for e in elems: + if len(e) <= 3: + new_elems.append(e) + else: + new_elems.append([e[0], e[1], e[2]]) + new_elems.append([e[0], e[2], e[3]]) + new_edges.append(paddle.to_tensor(([[e[0]], [e[2]]]), dtype=paddle.int64)) + new_edges = ( + paddle.concat(new_edges, axis=1) + if new_edges + else paddle.to_tensor([], dtype=paddle.int64) + ) + return new_elems, new_edges + + +def left_orthogonal(v): + return paddle.stack([-v[..., 1], v[..., 0]], axis=-1) + + +def signed_dist_graph(nodes, marker_inds, with_sign=False): + # assumes shape is convex + # approximate signed distance by distance to closest point on surface + signed_dists = paddle.zeros([nodes.shape[0]], dtype=paddle.float32) + marker_nodes = nodes[marker_inds] + if type(marker_inds) is paddle.Tensor: + marker_inds = marker_inds.tolist() + marker_inds = set(marker_inds) + + if with_sign: + marker_surfaces = marker_nodes[:-1] - marker_nodes[1:] + last_surface = marker_nodes[-1] - marker_nodes[0] + marker_surfaces = paddle.concat([marker_surfaces, last_surface.unsqueeze(0)]) + normals = left_orthogonal(marker_surfaces) / marker_surfaces.norm( + dim=1 + ).unsqueeze(1) + + for i, x in enumerate(nodes): + if i not in marker_inds: + vecs = marker_nodes - x + dists = paddle.linalg.norm(vecs, axis=1) + min_dist = dists.min() + + if with_sign: + # if sign is requested, check if inside marker shape + # dot product with normals to find if inside shape + surface_dists = (vecs * normals).sum(dim=1) + if (surface_dists < 0).unique().shape[0] == 1: + # if all point in same direction it is inside + min_dist *= -1 + + signed_dists[i] = min_dist + return signed_dists + + +def plot_field( + nodes, + elems_list, + field, + contour=False, + clim=None, + zoom=True, + get_array=True, + out_file=None, + show=False, + title="", +): + elems_list = sum(elems_list, []) + tris, _ = quad2tri(elems_list) + tris = np.array(tris) + x, y = nodes[:, :2].t().detach().cpu().numpy() + field = field.detach().cpu().numpy() + fig = plt.figure() + if contour: + plt.tricontourf(x, y, tris, field) + else: + plt.tripcolor(x, y, tris, field) + if clim: + plt.clim(*clim) + plt.colorbar() + if zoom: + plt.xlim(left=-0.5, right=1.5) + plt.ylim(bottom=-1, top=1) + if title: + plt.title(title) + + if out_file is not None: + plt.savefig(out_file) + plt.close() + + if show: + raise NotImplementedError + + if get_array: + fig.canvas.draw() + a = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + a = a.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + plt.close() + return a + + +def write_tecplot(graph, fields, elems_list, filename="flow.dat"): + x = graph.x + edge_index = graph.edge_index + num_nodes = x.shape[0] + num_edges = edge_index.shape[1] + with open(filename, "w") as f: + f.write('TITLE = "Visualization of the volumetric solution"\n') + f.write( + 'VARIABLES = "x","y","Density","Momentum_x","Momentum_y",' + '"Energy","Pressure","Temperature","Mach","Cp"\n' + ) + f.write( + f"ZONE NODES = {num_nodes}, ELEMENTS = {num_edges}, " + f"DATAPACKING = POINT, ZONETYPE = FEQUADRILATERAL\n" + ) + for node, field in zip(x, fields): + f.write( + f"{node[0].item()}\t{node[1].item()}\t0.0\t" + f"{field[0].item()}\t{field[1].item()}\t0.0\t" + f"{field[2].item()}\t0.0\t0.0\t0.0\n" + ) + elems_list = sum(elems_list, []) + for elem in elems_list: + f.write("\t".join(str(x + 1) for x in elem)) + if len(elem) == 3: + # repeat last vertex if triangle + f.write(f"\t{elem[-1]+1}") + f.write("\n") + + +def visualize_mesh( + nodes, elements, xlims=None, ylims=None, marker=".", plot_inds=False +): + """Modified from: https://stackoverflow.com/questions/52202014""" + + x = nodes[:, 0] + y = nodes[:, 1] + + # https://stackoverflow.com/questions/49640311/ + def plot_elems(x, y, elems, ax=None, **kwargs): + if not ax: + ax = plt.gca() + xy = np.c_[x, y] + verts = xy[elems] + pc = matplotlib.collections.PolyCollection(verts, **kwargs) + ax.add_collection(pc) + ax.autoscale() + + plt.figure() + plt.gca().set_aspect("equal") + + plot_elems(x, y, np.asarray(elements), ax=None, color="crimson", facecolor="None") + plt.plot(x, y, marker=marker, ls="", color="crimson") + + if plot_inds: + for i, pos in enumerate(nodes): + plt.annotate(i, (pos[0], pos[1])) + + plt.xlabel("X Axis") + plt.ylabel("Y Axis") + + if xlims: + plt.xlim(left=xlims[0], right=xlims[1]) + if ylims: + plt.ylim(top=ylims[1], bottom=ylims[0]) + + plt.show() + + +if __name__ == "__main__": + mesh = "mesh_NACA0012_fine.su2" + start = time.time() + x, edge_index, _, marker_dict = get_mesh_graph(f"meshes/{mesh}") + + x = paddle.to_tensor(x, dtype=paddle.float32) + edge_index = paddle.to_tensor(edge_index) + + triangulation = scipy.spatial.Delaunay(x) + airfoil_markers = set(marker_dict["airfoil"][0]) + elems = triangulation.simplices + keep_inds = [ + i + for i in range(elems.shape[1]) + if not ( + elems[0, i].item() in airfoil_markers + and elems[1, i].item() in airfoil_markers + and elems[2, i].item() in airfoil_markers + ) + ] + elems = elems[:, keep_inds] + + write_graph_mesh("test_mesh.su2", x, [elems], marker_dict) + logger.info(f"Took: {time.time() - start}") + + with open(f"meshes/graph_{mesh}.pkl", "wb") as f: + pickle.dump([x, edge_index], f) diff --git a/jointContribution/CFDGCN/models.py b/jointContribution/CFDGCN/models.py index fda758f8e4..65b16406df 100644 --- a/jointContribution/CFDGCN/models.py +++ b/jointContribution/CFDGCN/models.py @@ -1,239 +1,239 @@ -import os - -import mesh_utils -import paddle -import paddle.nn.functional as F -import pgl -import su2paddle - -from ppsci.utils import logger - - -class MeshGCN(paddle.nn.Layer): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - num_layers=6, - fine_marker_dict=None, - ): - super().__init__() - self.fine_marker_dict = paddle.unique( - paddle.to_tensor(fine_marker_dict["airfoil"]) - ) - self.sdf = None - in_channels += 1 # account for sdf - - channels = [in_channels] - channels += [hidden_channels] * (num_layers - 1) - channels += [out_channels] - - self.convs = paddle.nn.LayerList() - for i in range(num_layers): - self.convs.append(pgl.nn.GCNConv(channels[i], channels[i + 1])) - - def forward(self, graphs): - pred_fields = [] - for graph in graphs: - x = graph.node_feat["feature"] - - if self.sdf is None: - with paddle.no_grad(): - self.sdf = mesh_utils.signed_dist_graph( - x[:, :2], self.fine_marker_dict - ).unsqueeze(1) - x = paddle.concat([x, self.sdf], axis=-1) - - for i, conv in enumerate(self.convs[:-1]): - x = conv(graph, x) - x = F.relu(x) - - pred_field = self.convs[-1](graph, x) - pred_fields.append(pred_field) - return pred_fields - - -class CFDGCN(paddle.nn.Layer): - def __init__( - self, - config_file, - coarse_mesh, - fine_marker_dict, - process_sim=lambda x, y: x, - freeze_mesh=False, - num_convs=6, - num_end_convs=3, - hidden_channels=512, - out_channels=3, - ): - super().__init__() - meshes_temp_dir = "temp_meshes" - os.makedirs(meshes_temp_dir, exist_ok=True) - self.mesh_file = os.path.join(meshes_temp_dir, f"{str(os.getpid())}_mesh.su2") - - if not coarse_mesh: - raise ValueError("Need to provide a coarse mesh for CFD-GCN.") - nodes, edges, self.elems, self.marker_dict = mesh_utils.get_mesh_graph( - coarse_mesh - ) - if not freeze_mesh: - self.nodes = paddle.to_tensor(nodes, stop_gradient=False) - else: - self.nodes = paddle.to_tensor(nodes, stop_gradient=True) - - self.elems, new_edges = mesh_utils.quad2tri(sum(self.elems, [])) - self.elems = [self.elems] - self.edges = paddle.to_tensor(edges) - logger.info(self.edges.dtype, new_edges.dtype) - self.edges = paddle.concat([self.edges, new_edges], axis=1) - self.marker_inds = paddle.to_tensor(sum(self.marker_dict.values(), [])).unique() - assert ( - mesh_utils.is_clock_wise(self.nodes, paddle.to_tensor(self.elems[0])) - .nonzero() - .shape[0] - == 0 - ), "Mesh has flipped elems" - - self.process_sim = process_sim - self.su2 = su2paddle.SU2Module(config_file, mesh_file=self.mesh_file) - logger.info( - f'Mesh filename: {self.mesh_file.format(batch_index="*")}', flush=True - ) - - self.fine_marker_dict = paddle.to_tensor(fine_marker_dict["airfoil"]).unique() - self.sdf = None - - self.num_convs = num_end_convs - self.convs = [] - if self.num_convs > 0: - self.convs = paddle.nn.LayerList() - in_channels = out_channels + hidden_channels - for i in range(self.num_convs - 1): - self.convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) - in_channels = hidden_channels - self.convs.append(pgl.nn.GCNConv(in_channels, out_channels)) - - self.num_pre_convs = num_convs - num_end_convs - self.pre_convs = [] - if self.num_pre_convs > 0: - in_channels = 5 + 1 # one extra channel for sdf - self.pre_convs = paddle.nn.LayerList() - for i in range(self.num_pre_convs - 1): - self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) - in_channels = hidden_channels - self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) - - self.sim_info = {} # store output of coarse simulation for logging / debugging - - def forward(self, graphs): - batch_size = len(graphs) - nodes_list = [] - aoa_list = [] - mach_or_reynolds_list = [] - fine_x_list = [] - for graph in graphs: - x = graph.node_feat["feature"] - - if self.sdf is None: - with paddle.no_grad(): - self.sdf = mesh_utils.signed_dist_graph( - x[:, :2], self.fine_marker_dict - ).unsqueeze(1) - fine_x = paddle.concat([x, self.sdf], axis=1) - - for i, conv in enumerate(self.pre_convs): - fine_x = F.relu(conv(graph, fine_x)) - fine_x_list.append(fine_x) - - nodes = self.get_nodes() # [353,2] - self.write_mesh_file( - nodes, self.elems, self.marker_dict, filename=self.mesh_file - ) - - nodes_list.append(nodes) - aoa_list.append(graph.aoa) - mach_or_reynolds_list.append(graph.mach_or_reynolds) - - # paddle stack for [batch,nodes],[batch,nodes],[batch,1],[batch,1] for su2 - # su2 can apply each item of one batch with mpi - nodes_input = paddle.stack(nodes_list, axis=0) - aoa_input = paddle.stack(aoa_list, axis=0) - mach_or_reynolds_input = paddle.stack(mach_or_reynolds_list, axis=0) - - batch_y = self.su2( - nodes_input[..., 0], - nodes_input[..., 1], - aoa_input[..., None], - mach_or_reynolds_input[..., None], - ) - batch_y = self.process_sim( - batch_y, False - ) # [8,353] * 3, a list with three items - - pred_fields = [] - for idx in range(batch_size): - graph = graphs[idx] - coarse_y = paddle.stack([y[idx].flatten() for y in batch_y], axis=1).astype( - "float32" - ) # features [353,3] - nodes = self.get_nodes() # [353,2] - x = graph.node_feat[ - "feature" - ] # [6684,5] the two-first columns are the node locations - fine_y = self.upsample( - features=coarse_y, coarse_nodes=nodes[:, :2], fine_nodes=x[:, :2] - ) - fine_y = paddle.concat([fine_y, fine_x_list[idx]], axis=1) - - for i, conv in enumerate(self.convs[:-1]): - fine_y = F.relu(conv(graph, fine_y)) - fine_y = self.convs[-1](graph, fine_y) - pred_fields.append(fine_y) - - return pred_fields - - def upsample(self, features, coarse_nodes, fine_nodes): - """ - Args: - features (tensor): [353, 3] - coarse_nodes (tensor): [353, 2] - fine_nodes (tensor): [6684, 2] - - Returns: - tensor: upsample result [6684, 3] - """ - coarse_nodes_input = paddle.repeat_interleave( - coarse_nodes.unsqueeze(0), fine_nodes.shape[0], 0 - ) # [6684,352,2] - fine_nodes_input = paddle.repeat_interleave( - fine_nodes.unsqueeze(1), coarse_nodes.shape[0], 1 - ) # [6684,352,2] - - dist_w = 1.0 / ( - paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 - ) # [6684,352] - knn_value, knn_index = paddle.topk( - dist_w, k=3, largest=True - ) # [6684,3],[6684,3] - - weight = knn_value.unsqueeze(-2) - features_input = features[knn_index] - - output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( - knn_value, axis=-1, keepdim=True - ) - return output - - def get_nodes(self): - # return torch.cat([self.marker_nodes, self.not_marker_nodes]) - return self.nodes - - @staticmethod - def write_mesh_file(x, elems, marker_dict, filename="mesh.su2"): - mesh_utils.write_graph_mesh(filename, x[:, :2], elems, marker_dict) - - @staticmethod - def contiguous_elems_list(elems, inds): - # Hack to easily have compatibility with MeshEdgePool - return elems +import os + +import mesh_utils +import paddle +import paddle.nn.functional as F +import pgl +import su2paddle + +from ppsci.utils import logger + + +class MeshGCN(paddle.nn.Layer): + def __init__( + self, + in_channels, + hidden_channels, + out_channels, + num_layers=6, + fine_marker_dict=None, + ): + super().__init__() + self.fine_marker_dict = paddle.unique( + paddle.to_tensor(fine_marker_dict["airfoil"]) + ) + self.sdf = None + in_channels += 1 # account for sdf + + channels = [in_channels] + channels += [hidden_channels] * (num_layers - 1) + channels += [out_channels] + + self.convs = paddle.nn.LayerList() + for i in range(num_layers): + self.convs.append(pgl.nn.GCNConv(channels[i], channels[i + 1])) + + def forward(self, graphs): + pred_fields = [] + for graph in graphs: + x = graph.node_feat["feature"] + + if self.sdf is None: + with paddle.no_grad(): + self.sdf = mesh_utils.signed_dist_graph( + x[:, :2], self.fine_marker_dict + ).unsqueeze(1) + x = paddle.concat([x, self.sdf], axis=-1) + + for i, conv in enumerate(self.convs[:-1]): + x = conv(graph, x) + x = F.relu(x) + + pred_field = self.convs[-1](graph, x) + pred_fields.append(pred_field) + return pred_fields + + +class CFDGCN(paddle.nn.Layer): + def __init__( + self, + config_file, + coarse_mesh, + fine_marker_dict, + process_sim=lambda x, y: x, + freeze_mesh=False, + num_convs=6, + num_end_convs=3, + hidden_channels=512, + out_channels=3, + ): + super().__init__() + meshes_temp_dir = "temp_meshes" + os.makedirs(meshes_temp_dir, exist_ok=True) + self.mesh_file = os.path.join(meshes_temp_dir, f"{str(os.getpid())}_mesh.su2") + + if not coarse_mesh: + raise ValueError("Need to provide a coarse mesh for CFD-GCN.") + nodes, edges, self.elems, self.marker_dict = mesh_utils.get_mesh_graph( + coarse_mesh + ) + if not freeze_mesh: + self.nodes = paddle.to_tensor(nodes, stop_gradient=False) + else: + self.nodes = paddle.to_tensor(nodes, stop_gradient=True) + + self.elems, new_edges = mesh_utils.quad2tri(sum(self.elems, [])) + self.elems = [self.elems] + self.edges = paddle.to_tensor(edges) + logger.info(self.edges.dtype, new_edges.dtype) + self.edges = paddle.concat([self.edges, new_edges], axis=1) + self.marker_inds = paddle.to_tensor(sum(self.marker_dict.values(), [])).unique() + assert ( + mesh_utils.is_clock_wise(self.nodes, paddle.to_tensor(self.elems[0])) + .nonzero() + .shape[0] + == 0 + ), "Mesh has flipped elems" + + self.process_sim = process_sim + self.su2 = su2paddle.SU2Module(config_file, mesh_file=self.mesh_file) + logger.info( + f'Mesh filename: {self.mesh_file.format(batch_index="*")}', flush=True + ) + + self.fine_marker_dict = paddle.to_tensor(fine_marker_dict["airfoil"]).unique() + self.sdf = None + + self.num_convs = num_end_convs + self.convs = [] + if self.num_convs > 0: + self.convs = paddle.nn.LayerList() + in_channels = out_channels + hidden_channels + for i in range(self.num_convs - 1): + self.convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) + in_channels = hidden_channels + self.convs.append(pgl.nn.GCNConv(in_channels, out_channels)) + + self.num_pre_convs = num_convs - num_end_convs + self.pre_convs = [] + if self.num_pre_convs > 0: + in_channels = 5 + 1 # one extra channel for sdf + self.pre_convs = paddle.nn.LayerList() + for i in range(self.num_pre_convs - 1): + self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) + in_channels = hidden_channels + self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channels)) + + self.sim_info = {} # store output of coarse simulation for logging / debugging + + def forward(self, graphs): + batch_size = len(graphs) + nodes_list = [] + aoa_list = [] + mach_or_reynolds_list = [] + fine_x_list = [] + for graph in graphs: + x = graph.node_feat["feature"] + + if self.sdf is None: + with paddle.no_grad(): + self.sdf = mesh_utils.signed_dist_graph( + x[:, :2], self.fine_marker_dict + ).unsqueeze(1) + fine_x = paddle.concat([x, self.sdf], axis=1) + + for i, conv in enumerate(self.pre_convs): + fine_x = F.relu(conv(graph, fine_x)) + fine_x_list.append(fine_x) + + nodes = self.get_nodes() # [353,2] + self.write_mesh_file( + nodes, self.elems, self.marker_dict, filename=self.mesh_file + ) + + nodes_list.append(nodes) + aoa_list.append(graph.aoa) + mach_or_reynolds_list.append(graph.mach_or_reynolds) + + # paddle stack for [batch,nodes],[batch,nodes],[batch,1],[batch,1] for su2 + # su2 can apply each item of one batch with mpi + nodes_input = paddle.stack(nodes_list, axis=0) + aoa_input = paddle.stack(aoa_list, axis=0) + mach_or_reynolds_input = paddle.stack(mach_or_reynolds_list, axis=0) + + batch_y = self.su2( + nodes_input[..., 0], + nodes_input[..., 1], + aoa_input[..., None], + mach_or_reynolds_input[..., None], + ) + batch_y = self.process_sim( + batch_y, False + ) # [8,353] * 3, a list with three items + + pred_fields = [] + for idx in range(batch_size): + graph = graphs[idx] + coarse_y = paddle.stack([y[idx].flatten() for y in batch_y], axis=1).astype( + "float32" + ) # features [353,3] + nodes = self.get_nodes() # [353,2] + x = graph.node_feat[ + "feature" + ] # [6684,5] the two-first columns are the node locations + fine_y = self.upsample( + features=coarse_y, coarse_nodes=nodes[:, :2], fine_nodes=x[:, :2] + ) + fine_y = paddle.concat([fine_y, fine_x_list[idx]], axis=1) + + for i, conv in enumerate(self.convs[:-1]): + fine_y = F.relu(conv(graph, fine_y)) + fine_y = self.convs[-1](graph, fine_y) + pred_fields.append(fine_y) + + return pred_fields + + def upsample(self, features, coarse_nodes, fine_nodes): + """ + Args: + features (tensor): [353, 3] + coarse_nodes (tensor): [353, 2] + fine_nodes (tensor): [6684, 2] + + Returns: + tensor: upsample result [6684, 3] + """ + coarse_nodes_input = paddle.repeat_interleave( + coarse_nodes.unsqueeze(0), fine_nodes.shape[0], 0 + ) # [6684,352,2] + fine_nodes_input = paddle.repeat_interleave( + fine_nodes.unsqueeze(1), coarse_nodes.shape[0], 1 + ) # [6684,352,2] + + dist_w = 1.0 / ( + paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 + ) # [6684,352] + knn_value, knn_index = paddle.topk( + dist_w, k=3, largest=True + ) # [6684,3],[6684,3] + + weight = knn_value.unsqueeze(-2) + features_input = features[knn_index] + + output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( + knn_value, axis=-1, keepdim=True + ) + return output + + def get_nodes(self): + # return torch.cat([self.marker_nodes, self.not_marker_nodes]) + return self.nodes + + @staticmethod + def write_mesh_file(x, elems, marker_dict, filename="mesh.su2"): + mesh_utils.write_graph_mesh(filename, x[:, :2], elems, marker_dict) + + @staticmethod + def contiguous_elems_list(elems, inds): + # Hack to easily have compatibility with MeshEdgePool + return elems diff --git a/jointContribution/CFDGCN/run.sh b/jointContribution/CFDGCN/run.sh index 365da49540..9436f1952d 100644 --- a/jointContribution/CFDGCN/run.sh +++ b/jointContribution/CFDGCN/run.sh @@ -1,13 +1,13 @@ -export BATCH_SIZE=16 - -# Prediction experiments -# for CFD-GCN -mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --su2-config config/coarse.cfg --model cfd_gcn --hidden-size 512 --num-layers 6 --num-end-convs 3 --optim adam -lr 5e-4 --data-dir data/NACA0012_interpolate --coarse-mesh meshes/mesh_NACA0012_xcoarse.su2 -e cfd_gcn_interp > /dev/null -# for GCN -mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --model gcn --hidden-size 512 --num-layers 6 --optim adam -lr 5e-4 --data-dir data/NACA0012_interpolate/ -e gcn_interp - -# Generalization experiments -# for CFD-GCN -mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --su2-config config/coarse.cfg --model cfd_gcn --hidden-size 512 --num-layers 6 --num-end-convs 3 --optim adam -lr 5e-4 --data-dir data/NACA0012_machsplit_noshock --coarse-mesh meshes/mesh_NACA0012_xcoarse.su2 -e cfd_gcn_gen > /dev/null -# for GCN -mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --model gcn --hidden-size 512 --num-layers 6 --optim adam -lr 5e-4 --data-dir data/NACA0012_machsplit_noshock/ -e gcn_gen +export BATCH_SIZE=16 + +# Prediction experiments +# for CFD-GCN +mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --su2-config config/coarse.cfg --model cfd_gcn --hidden-size 512 --num-layers 6 --num-end-convs 3 --optim adam -lr 5e-4 --data-dir data/NACA0012_interpolate --coarse-mesh meshes/mesh_NACA0012_xcoarse.su2 -e cfd_gcn_interp > /dev/null +# for GCN +mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --model gcn --hidden-size 512 --num-layers 6 --optim adam -lr 5e-4 --data-dir data/NACA0012_interpolate/ -e gcn_interp + +# Generalization experiments +# for CFD-GCN +mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --su2-config config/coarse.cfg --model cfd_gcn --hidden-size 512 --num-layers 6 --num-end-convs 3 --optim adam -lr 5e-4 --data-dir data/NACA0012_machsplit_noshock --coarse-mesh meshes/mesh_NACA0012_xcoarse.su2 -e cfd_gcn_gen > /dev/null +# for GCN +mpirun -np $((BATCH_SIZE+1)) --oversubscribe python main.py --batch-size $BATCH_SIZE --gpus 1 -dw 1 --model gcn --hidden-size 512 --num-layers 6 --optim adam -lr 5e-4 --data-dir data/NACA0012_machsplit_noshock/ -e gcn_gen diff --git a/jointContribution/CFDGCN/su2paddle/su2_function.py b/jointContribution/CFDGCN/su2paddle/su2_function.py index 7a3885ab81..edbbd67928 100644 --- a/jointContribution/CFDGCN/su2paddle/su2_function.py +++ b/jointContribution/CFDGCN/su2paddle/su2_function.py @@ -1,167 +1,167 @@ -import math -from typing import Tuple - -import common -import mpi4py -import paddle -import pysu2 -import su2_function_mpi - -_global_max_ppe = -1 - - -class SU2Module(paddle.nn.Layer): - def __init__( - self, config_file: str, mesh_file: str, dims: int = 2, num_zones: int = 1 - ) -> None: - """Initialize the SU2 configurations for the provided config file. - - Args: - config_file (str): The SU2 configuration file name. - mesh_file (str): Optional parameter, if not set defaults to the mesh filename set in the config file. - Can be used to run a batch with different meshes for each sample. - Passing in mesh_file with batch_index parameter in string format (e.g., 'b{batch_index}_mesh.su2') - causes each element in batch to get assigned to the correct mesh file (0 indexed). - If running multiple processes in parallel, take care to name each mesh file uniquely to avoid conflicts - (e.g., unique = str(os.getpid()); mesh_file = 'b{batch_index}_' + unique + '_mesh.su2'). - dims (int, optional): Number of dimensions for the problem (2D or 3D). Defaults to 2. - num_zones (int, optional): Number of zones in the simulation (only 1 supported currently). Defaults to 1. - """ - super().__init__() - if num_zones != 1: - raise ValueError("Only supports 1 zone for now.") - if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: - raise ValueError("Only rank 0 can run SU2Function, not rank 0 in comm") - if _global_max_ppe <= 0: - raise ValueError( - "Before running SU2Function, a (single) call to activate_su2_mpi is needed." - ) - - self.num_zones = num_zones - self.dims = dims - self.mesh_file = mesh_file - - self.forward_config = config_file - self.forward_driver = None - - def forward(self, *inputs: paddle.Tensor) -> Tuple[paddle.Tensor, ...]: - return SU2Function.apply( - *inputs, - self.forward_config, - self.mesh_file, - self.num_zones, - self.dims, - self.set_forward_driver, - ) - - def get_forward_driver(self): - if self.forward_driver is None: - raise AttributeError("Forward driver is only set after running forward()") - return self.forward_driver - - def set_forward_driver(self, f): - if self.forward_driver is not None: - self.forward_driver.Postprocessing() - self.forward_driver = f - - def __del__(self): - """Close existing drivers and MPI communicators.""" - if hasattr(self, "forward_driver") and self.forward_driver is not None: - self.forward_driver.Postprocessing() - - -class SU2Function(paddle.autograd.PyLayer): - num_params = 5 - - @staticmethod - def forward(ctx, *inputs): - su2_function_mpi.non_busy_post(mpi4py.MPI.COMM_WORLD) - x = inputs[: -SU2Function.num_params] - forward_config, mesh_file, num_zones, dims, set_forward_driver_hook = inputs[ - -SU2Function.num_params : - ] - - if x[0].dim() < 2: - raise TypeError( - "Input is expected to have first dimension for batch, " - "e.g. x[0, :] is first item in batch." - ) - batch_size = x[0].shape[0] - max_ppe = _global_max_ppe - workers = mpi4py.MPI.COMM_WORLD.Get_size() - 1 - if 0 <= workers < batch_size: - raise TypeError( - "Batch size is larger than number of workers, not enough processes to run batch." - ) - - mpi4py.MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) - procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) - - x = tuple((i.numpy() for i in x)) - - mpi4py.MPI.COMM_WORLD.bcast( - [num_zones, dims, forward_config, mesh_file, procs_per_example, x], root=0 - ) - - # instantiate forward_driver while workers work - worker_forward_config = mpi4py.MPI.COMM_WORLD.recv(source=1) - forward_driver = pysu2.CSinglezoneDriver( - worker_forward_config, num_zones, dims, mpi4py.MPI.COMM_SELF - ) - num_diff_inputs = forward_driver.GetnDiff_Inputs() - num_diff_outputs = forward_driver.GetnDiff_Outputs() - - if not (num_diff_inputs > 0 and num_diff_outputs > 0): - raise ValueError( - "Need to define at least one differentiable input and output. " - "To run without differentiation, use the SU2Numpy class." - ) - - if len(x) != num_diff_inputs: - raise TypeError( - f"{len(x)} inputs were provided, but the config file " - f"({forward_config}) defines {num_diff_inputs} diff inputs." - ) - set_forward_driver_hook(forward_driver) - ctx.num_diff_inputs = num_diff_inputs - - outputs = [] - su2_function_mpi.non_busy_wait(mpi4py.MPI.COMM_WORLD) - for i in range(batch_size): - output = mpi4py.MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) - outputs.append(output) - outputs = tuple( - common.pad_sequence( - [paddle.to_tensor(o[i], dtype=paddle.float32) for o in outputs], - batch_first=True, - ) - for i in range(num_diff_outputs) - ) - return outputs - - @staticmethod - def backward(ctx, *grad_outputs): - su2_function_mpi.non_busy_post(mpi4py.MPI.COMM_WORLD) - max_ppe = _global_max_ppe - workers = mpi4py.MPI.COMM_WORLD.Get_size() - 1 - mpi4py.MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) - grad_outputs = tuple([i.numpy() for i in grad_outputs]) - mpi4py.MPI.COMM_WORLD.bcast(grad_outputs, root=0) - batch_size = grad_outputs[0].shape[0] - procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) - su2_function_mpi.non_busy_wait(mpi4py.MPI.COMM_WORLD) - grads = [] - for i in range(batch_size): - grad = mpi4py.MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) - grads.append(grad) - print("grads", len(grads), flush=True) - grads = tuple( - common.pad_sequence( - [paddle.to_tensor(g[i], dtype=paddle.float32) for g in grads], - batch_first=True, - ) - for i in range(ctx.num_diff_inputs) - ) - return tuple( - [grads[0], grads[1], None, None] - ) # + (None,) * SU2Function.num_params +import math +from typing import Tuple + +import common +import mpi4py +import paddle +import pysu2 +import su2_function_mpi + +_global_max_ppe = -1 + + +class SU2Module(paddle.nn.Layer): + def __init__( + self, config_file: str, mesh_file: str, dims: int = 2, num_zones: int = 1 + ) -> None: + """Initialize the SU2 configurations for the provided config file. + + Args: + config_file (str): The SU2 configuration file name. + mesh_file (str): Optional parameter, if not set defaults to the mesh filename set in the config file. + Can be used to run a batch with different meshes for each sample. + Passing in mesh_file with batch_index parameter in string format (e.g., 'b{batch_index}_mesh.su2') + causes each element in batch to get assigned to the correct mesh file (0 indexed). + If running multiple processes in parallel, take care to name each mesh file uniquely to avoid conflicts + (e.g., unique = str(os.getpid()); mesh_file = 'b{batch_index}_' + unique + '_mesh.su2'). + dims (int, optional): Number of dimensions for the problem (2D or 3D). Defaults to 2. + num_zones (int, optional): Number of zones in the simulation (only 1 supported currently). Defaults to 1. + """ + super().__init__() + if num_zones != 1: + raise ValueError("Only supports 1 zone for now.") + if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: + raise ValueError("Only rank 0 can run SU2Function, not rank 0 in comm") + if _global_max_ppe <= 0: + raise ValueError( + "Before running SU2Function, a (single) call to activate_su2_mpi is needed." + ) + + self.num_zones = num_zones + self.dims = dims + self.mesh_file = mesh_file + + self.forward_config = config_file + self.forward_driver = None + + def forward(self, *inputs: paddle.Tensor) -> Tuple[paddle.Tensor, ...]: + return SU2Function.apply( + *inputs, + self.forward_config, + self.mesh_file, + self.num_zones, + self.dims, + self.set_forward_driver, + ) + + def get_forward_driver(self): + if self.forward_driver is None: + raise AttributeError("Forward driver is only set after running forward()") + return self.forward_driver + + def set_forward_driver(self, f): + if self.forward_driver is not None: + self.forward_driver.Postprocessing() + self.forward_driver = f + + def __del__(self): + """Close existing drivers and MPI communicators.""" + if hasattr(self, "forward_driver") and self.forward_driver is not None: + self.forward_driver.Postprocessing() + + +class SU2Function(paddle.autograd.PyLayer): + num_params = 5 + + @staticmethod + def forward(ctx, *inputs): + su2_function_mpi.non_busy_post(mpi4py.MPI.COMM_WORLD) + x = inputs[: -SU2Function.num_params] + forward_config, mesh_file, num_zones, dims, set_forward_driver_hook = inputs[ + -SU2Function.num_params : + ] + + if x[0].dim() < 2: + raise TypeError( + "Input is expected to have first dimension for batch, " + "e.g. x[0, :] is first item in batch." + ) + batch_size = x[0].shape[0] + max_ppe = _global_max_ppe + workers = mpi4py.MPI.COMM_WORLD.Get_size() - 1 + if 0 <= workers < batch_size: + raise TypeError( + "Batch size is larger than number of workers, not enough processes to run batch." + ) + + mpi4py.MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) + procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) + + x = tuple((i.numpy() for i in x)) + + mpi4py.MPI.COMM_WORLD.bcast( + [num_zones, dims, forward_config, mesh_file, procs_per_example, x], root=0 + ) + + # instantiate forward_driver while workers work + worker_forward_config = mpi4py.MPI.COMM_WORLD.recv(source=1) + forward_driver = pysu2.CSinglezoneDriver( + worker_forward_config, num_zones, dims, mpi4py.MPI.COMM_SELF + ) + num_diff_inputs = forward_driver.GetnDiff_Inputs() + num_diff_outputs = forward_driver.GetnDiff_Outputs() + + if not (num_diff_inputs > 0 and num_diff_outputs > 0): + raise ValueError( + "Need to define at least one differentiable input and output. " + "To run without differentiation, use the SU2Numpy class." + ) + + if len(x) != num_diff_inputs: + raise TypeError( + f"{len(x)} inputs were provided, but the config file " + f"({forward_config}) defines {num_diff_inputs} diff inputs." + ) + set_forward_driver_hook(forward_driver) + ctx.num_diff_inputs = num_diff_inputs + + outputs = [] + su2_function_mpi.non_busy_wait(mpi4py.MPI.COMM_WORLD) + for i in range(batch_size): + output = mpi4py.MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) + outputs.append(output) + outputs = tuple( + common.pad_sequence( + [paddle.to_tensor(o[i], dtype=paddle.float32) for o in outputs], + batch_first=True, + ) + for i in range(num_diff_outputs) + ) + return outputs + + @staticmethod + def backward(ctx, *grad_outputs): + su2_function_mpi.non_busy_post(mpi4py.MPI.COMM_WORLD) + max_ppe = _global_max_ppe + workers = mpi4py.MPI.COMM_WORLD.Get_size() - 1 + mpi4py.MPI.COMM_WORLD.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) + grad_outputs = tuple([i.numpy() for i in grad_outputs]) + mpi4py.MPI.COMM_WORLD.bcast(grad_outputs, root=0) + batch_size = grad_outputs[0].shape[0] + procs_per_example = min(max_ppe, math.ceil(workers / batch_size)) + su2_function_mpi.non_busy_wait(mpi4py.MPI.COMM_WORLD) + grads = [] + for i in range(batch_size): + grad = mpi4py.MPI.COMM_WORLD.recv(source=1 + i * procs_per_example) + grads.append(grad) + print("grads", len(grads), flush=True) + grads = tuple( + common.pad_sequence( + [paddle.to_tensor(g[i], dtype=paddle.float32) for g in grads], + batch_first=True, + ) + for i in range(ctx.num_diff_inputs) + ) + return tuple( + [grads[0], grads[1], None, None] + ) # + (None,) * SU2Function.num_params diff --git a/jointContribution/CFDGCN/su2paddle/su2_function_mpi.py b/jointContribution/CFDGCN/su2paddle/su2_function_mpi.py index 8f7e3585cb..cb2f88cbc1 100644 --- a/jointContribution/CFDGCN/su2paddle/su2_function_mpi.py +++ b/jointContribution/CFDGCN/su2paddle/su2_function_mpi.py @@ -1,356 +1,356 @@ -import atexit -import os -import shutil -import time -import warnings -from enum import IntEnum -from typing import Dict -from typing import Sequence -from typing import Tuple -from typing import TypeVar -from typing import Union - -import mpi4py -import numpy as np -import paddle -import pysu2 -import pysu2ad -import SU2 -import su2paddle.su2_function - -warnings.filterwarnings("ignore", category=DeprecationWarning) - -GenTensor = TypeVar("GenTensor", paddle.Tensor, np.ndarray) - -_non_busy_wait_max_time = 0.1 - - -class RunCode(IntEnum): - """Run codes for communication with worker processes.""" - - STOP = -1 - RUN_FORWARD = 0 - RUN_ADJOINT = 1 - - -def run_forward( - comm: mpi4py.MPI.Intracomm, - forward_driver: pysu2.CSinglezoneDriver, - inputs: Sequence[GenTensor], -) -> Tuple[GenTensor, ...]: - """Runs a simulation with the provided driver, using the inputs to set the values - defined in DIFF_INPUTS in the config file. - - Args: - comm (mpi4py.MPI.Intracomm): The communicator for the processes running the simulation. - forward_driver (pysu2.CSinglezoneDriver): The driver for the simulation, created using the same comm - as passed into this function. - inputs (Sequence[GenTensor]): The inputs used to set the DIFF_INPUTS as defined in the configuration file. - - Returns: - Tuple[GenTensor, ...]: The outputs of the simulation, as defined in DIFF_OUTPUTS in the config file. - """ - rank = comm.Get_rank() - for i, x in enumerate(inputs): - forward_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) - forward_driver.ApplyDiff_Inputs_Vars() - - forward_driver.StartSolver() - comm.Barrier() - - # are we using numpy or torch - is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray - if is_numpy: - array_func = np.array - cat_func = np.concatenate - else: - import paddle - - array_func = paddle.to_tensor(inputs[0]) - cat_func = paddle.concat - - num_diff_outputs = forward_driver.GetnDiff_Outputs() - outputs = [ - array_func(forward_driver.GetDiff_Outputs_Vars(i)) - for i in range(num_diff_outputs) - ] - - for i in range(num_diff_outputs): - if outputs[i].shape[0] > 1: - # if dealing with full-grid, reorder according to GlobalIndex - if comm.Get_size() > 1: - # gather outputs in rank 0 if more than one rank - outputs[i] = comm.gather(outputs[i], root=0) - global_inds = comm.gather(forward_driver.GetAllGlobalIndices(), root=0) - if rank == 0: - outputs[i] = cat_func(outputs[i]) - global_inds = list(sum(global_inds, tuple())) # join tuples - else: - global_inds = list(forward_driver.GetAllGlobalIndices()) - - if rank == 0: - # TODO Make the list integers on the C side - global_inds = np.array(global_inds, dtype=np.long) - assert outputs[i].shape[0] == len( - global_inds - ), "Only full grid outputs supported by now (besides scalars)." - # order by global_inds - outputs[i][global_inds] = ( - outputs[i].copy() if is_numpy else outputs[i].clone() - ) - else: - outputs[i] = None - return tuple(outputs) - - -def run_adjoint( - comm: mpi4py.MPI.Intracomm, - adjoint_driver: pysu2ad.CDiscAdjSinglezoneDriver, - inputs: Sequence[GenTensor], - grad_outputs: Sequence[GenTensor], -) -> Tuple[GenTensor, ...]: - """Runs a simulation with the provided driver, using the inputs to set the values - defined in DIFF_INPUTS in the config file. - - Args: - comm (mpi4py.MPI.Intracomm): The communicator for the processes running the simulation. - adjoint_driver (pysu2ad.CDiscAdjSinglezoneDriver): The driver for the adjoint computation, - created using the same comm as passed into this function. - inputs (Sequence[GenTensor]): The same inputs used to set the DIFF_INPUTS in the forward pass. - grad_outputs (Sequence[GenTensor]): Gradients of a scalar loss with respect to the forward outputs, - see SU2Function's backward() method. - - Returns: - Tuple[GenTensor, ...]: The gradients of the loss with respect to the inputs. - """ - - rank = comm.Get_rank() - for i, x in enumerate(inputs): - adjoint_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) - adjoint_driver.ApplyDiff_Inputs_Vars() - for i, g in enumerate(grad_outputs): - adjoint_driver.SetBackprop_Derivs(g.flatten().tolist(), i) - - adjoint_driver.StartSolver() - - # are we using numpy or torch - is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray - if is_numpy: - array_func = np.array - cat_func = np.concatenate - else: - array_func = paddle.to_tensor(inputs[0]) - cat_func = paddle.concat - - num_diff_inputs = adjoint_driver.GetnDiff_Inputs() - grads = [ - array_func(adjoint_driver.GetTotal_Sens_Diff_Inputs(i)) - for i in range(num_diff_inputs) - ] - for i in range(num_diff_inputs): - if grads[i].shape[0] > 1: - # if dealing with full-grid, reorder according to GlobalIndex - if comm.Get_size() > 1: - # gather outputs in rank 0 if more than one rank - grads[i] = comm.gather(grads[i], root=0) - global_inds = comm.gather(adjoint_driver.GetAllGlobalIndices(), root=0) - if rank == 0: - grads[i] = cat_func(grads[i]) - global_inds = list(sum(global_inds, tuple())) # join tuples - else: - global_inds = list(adjoint_driver.GetAllGlobalIndices()) - - if rank == 0: - global_inds = np.array(global_inds, dtype=np.long) - assert grads[i].shape[0] == len( - global_inds - ), "Only full grid outputs supported by now (besides scalars)." - # order by global_inds - grads[i][global_inds] = ( - grads[i].copy() if is_numpy else grads[i].clone() - ) - else: - grads[i] = None - return tuple(grads) - - -def modify_config( - config: SU2.io.Config, - new_params: Dict[str, str], - outfile: Union[str, os.PathLike, None] = None, -) -> SU2.io.Config: - """Modify a config, saving the modifications to outfile if provided.""" - temp_config = config.copy() - for k, v in new_params.items(): - temp_config[k] = v - if outfile is not None: - temp_config.write(outfile) - return temp_config - - -def activate_su2_mpi( - remove_temp_files: bool = True, - max_procs_per_example: int = 1, - non_busy_wait_max_time: float = 0.1, -) -> None: - assert ( - mpi4py.MPI.COMM_WORLD.Get_size() > 1 - ), 'Need at least 1 master and 1 worker process, run with "mpirun -np ...' - - if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: - global _non_busy_wait_max_time - _non_busy_wait_max_time = non_busy_wait_max_time - main(remove_temp_files=remove_temp_files) - exit(0) - - # Only rank 0 from here on - def stop(): - non_busy_post(mpi4py.MPI.COMM_WORLD) - mpi4py.MPI.COMM_WORLD.bcast(RunCode.STOP, root=0) - - atexit.register(stop) - su2paddle.su2_function._global_max_ppe = max_procs_per_example - - -def non_busy_wait(comm: mpi4py.MPI.Intracomm) -> None: - b = comm.Ibarrier() - start = time.time() - while not b.Get_status(): - time.sleep(min((time.time() - start) / 2, _non_busy_wait_max_time)) - - -def non_busy_post(comm: mpi4py.MPI.Intracomm) -> None: - comm.Ibarrier() - - -def main(remove_temp_files: bool = True) -> None: - """Runs a loop for the worker processes. - Can be signaled to run either a forward simulation or an adjoint computation - using RunCodes. - """ - local_comm = mpi4py.MPI.COMM_WORLD.Create_group( - mpi4py.MPI.Group.Excl(mpi4py.MPI.COMM_WORLD.Get_group(), [0]) - ) - local_rank = local_comm.Get_rank() - local_size = local_comm.Get_size() - ppid = str(os.getppid()) - - x = inputs = batch_comm = batch_index = batch_rank = forward_config = None - num_zones = dims = batch_solution_filename = batch_restart_filename = None - batch_size = procs_per_example = 1 - while True: - non_busy_wait(mpi4py.MPI.COMM_WORLD) - run_type = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) - if run_type == RunCode.STOP: - # remove temporary files - if local_rank == 0 and remove_temp_files: - os.system(f"rm b*_{ppid}_* 2> /dev/null") - break - - if run_type == RunCode.RUN_FORWARD: - if procs_per_example != 1 and procs_per_example != local_size: - # disconnect batch_comm from previous run, if it was created - batch_comm.Disconnect() - ( - num_zones, - dims, - forward_config, - mesh_file, - procs_per_example, - inputs, - ) = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) - batch_size = inputs[0].shape[0] - batch_index = local_rank // procs_per_example - if procs_per_example == 1: - batch_comm = mpi4py.MPI.COMM_SELF - elif procs_per_example == local_size: - batch_comm = local_comm - else: - batch_comm = local_comm.Split(batch_index, local_rank) - if local_rank >= batch_size * procs_per_example: - # these procs wont be used - non_busy_post(mpi4py.MPI.COMM_WORLD) - continue - batch_rank = batch_comm.Get_rank() - x = [z[batch_index] for z in inputs] - - batch_forward_config = f"b{batch_index}_{ppid}_{forward_config}" - if batch_rank == 0: - old_config = SU2.io.Config(forward_config) - restart_filename = old_config["RESTART_FLOW_FILENAME"] - batch_restart_filename = f"b{batch_index}_{ppid}_{restart_filename}" - mesh_file = ( - mesh_file.format(batch_index=batch_index) - if mesh_file - else old_config["MESH_FILENAME"] - ) - new_config = { - "RESTART_FLOW_FILENAME": batch_restart_filename, - "MESH_FILENAME": mesh_file, - } - shutil.copy(forward_config, batch_forward_config) - modify_config(old_config, new_config, outfile=batch_forward_config) - if local_rank == 0: - mpi4py.MPI.COMM_WORLD.send(batch_forward_config, dest=0) - batch_comm.Barrier() - - forward_driver = pysu2.CSinglezoneDriver( - batch_forward_config, num_zones, dims, batch_comm - ) - # TODO SetRestart_FlowFileName is not necessary anymore, remove from C++ - # forward_driver.SetRestart_FlowFileName(batch_restart_filename) - outputs = run_forward(batch_comm, forward_driver, x) - output_lengths = [o.shape[0] for o in outputs] - non_busy_post(mpi4py.MPI.COMM_WORLD) - if batch_rank == 0: - mpi4py.MPI.COMM_WORLD.send(outputs, dest=0) - # TODO Way to get results in-memory, without writing to file? - batch_solution_filename = batch_restart_filename.replace( - "restart", "solution" - ) - shutil.move(batch_restart_filename, batch_solution_filename) - forward_driver.Postprocessing() - - elif run_type == RunCode.RUN_ADJOINT: - # assert inputs is not None, 'Run forward simulation before running the adjoint.' - inputs = None - grad_outputs = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) - if local_rank >= batch_size * procs_per_example: - # these procs wont be used - non_busy_post(mpi4py.MPI.COMM_WORLD) - continue - dl = [ - z[batch_index, : output_lengths[i]] for i, z in enumerate(grad_outputs) - ] - - batch_adjoint_config = ( - f"b{batch_index}_{str(os.getppid())}_adjoint_{forward_config}" - ) - if batch_rank == 0: - old_config = SU2.io.Config(forward_config) - mesh_file = ( - mesh_file.format(batch_index=batch_index) - if mesh_file - else old_config["MESH_FILENAME"] - ) - new_config = { - "MATH_PROBLEM": "DISCRETE_ADJOINT", - "SOLUTION_FLOW_FILENAME": batch_solution_filename, - "RESTART_ADJ_FILENAME": batch_restart_filename.replace( - "flow", "adj" - ), - "MESH_FILENAME": mesh_file, - } - shutil.copy(forward_config, batch_adjoint_config) - modify_config(old_config, new_config, outfile=batch_adjoint_config) - batch_comm.Barrier() - adjoint_driver = pysu2ad.CDiscAdjSinglezoneDriver( - batch_adjoint_config, num_zones, dims, batch_comm - ) - grads = run_adjoint(batch_comm, adjoint_driver, x, dl) - non_busy_post(mpi4py.MPI.COMM_WORLD) - if batch_rank == 0: - mpi4py.MPI.COMM_WORLD.send(grads, dest=0) - adjoint_driver.Postprocessing() - else: - raise NotImplementedError +import atexit +import os +import shutil +import time +import warnings +from enum import IntEnum +from typing import Dict +from typing import Sequence +from typing import Tuple +from typing import TypeVar +from typing import Union + +import mpi4py +import numpy as np +import paddle +import pysu2 +import pysu2ad +import SU2 +import su2paddle.su2_function + +warnings.filterwarnings("ignore", category=DeprecationWarning) + +GenTensor = TypeVar("GenTensor", paddle.Tensor, np.ndarray) + +_non_busy_wait_max_time = 0.1 + + +class RunCode(IntEnum): + """Run codes for communication with worker processes.""" + + STOP = -1 + RUN_FORWARD = 0 + RUN_ADJOINT = 1 + + +def run_forward( + comm: mpi4py.MPI.Intracomm, + forward_driver: pysu2.CSinglezoneDriver, + inputs: Sequence[GenTensor], +) -> Tuple[GenTensor, ...]: + """Runs a simulation with the provided driver, using the inputs to set the values + defined in DIFF_INPUTS in the config file. + + Args: + comm (mpi4py.MPI.Intracomm): The communicator for the processes running the simulation. + forward_driver (pysu2.CSinglezoneDriver): The driver for the simulation, created using the same comm + as passed into this function. + inputs (Sequence[GenTensor]): The inputs used to set the DIFF_INPUTS as defined in the configuration file. + + Returns: + Tuple[GenTensor, ...]: The outputs of the simulation, as defined in DIFF_OUTPUTS in the config file. + """ + rank = comm.Get_rank() + for i, x in enumerate(inputs): + forward_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) + forward_driver.ApplyDiff_Inputs_Vars() + + forward_driver.StartSolver() + comm.Barrier() + + # are we using numpy or torch + is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray + if is_numpy: + array_func = np.array + cat_func = np.concatenate + else: + import paddle + + array_func = paddle.to_tensor(inputs[0]) + cat_func = paddle.concat + + num_diff_outputs = forward_driver.GetnDiff_Outputs() + outputs = [ + array_func(forward_driver.GetDiff_Outputs_Vars(i)) + for i in range(num_diff_outputs) + ] + + for i in range(num_diff_outputs): + if outputs[i].shape[0] > 1: + # if dealing with full-grid, reorder according to GlobalIndex + if comm.Get_size() > 1: + # gather outputs in rank 0 if more than one rank + outputs[i] = comm.gather(outputs[i], root=0) + global_inds = comm.gather(forward_driver.GetAllGlobalIndices(), root=0) + if rank == 0: + outputs[i] = cat_func(outputs[i]) + global_inds = list(sum(global_inds, tuple())) # join tuples + else: + global_inds = list(forward_driver.GetAllGlobalIndices()) + + if rank == 0: + # TODO Make the list integers on the C side + global_inds = np.array(global_inds, dtype=np.long) + assert outputs[i].shape[0] == len( + global_inds + ), "Only full grid outputs supported by now (besides scalars)." + # order by global_inds + outputs[i][global_inds] = ( + outputs[i].copy() if is_numpy else outputs[i].clone() + ) + else: + outputs[i] = None + return tuple(outputs) + + +def run_adjoint( + comm: mpi4py.MPI.Intracomm, + adjoint_driver: pysu2ad.CDiscAdjSinglezoneDriver, + inputs: Sequence[GenTensor], + grad_outputs: Sequence[GenTensor], +) -> Tuple[GenTensor, ...]: + """Runs a simulation with the provided driver, using the inputs to set the values + defined in DIFF_INPUTS in the config file. + + Args: + comm (mpi4py.MPI.Intracomm): The communicator for the processes running the simulation. + adjoint_driver (pysu2ad.CDiscAdjSinglezoneDriver): The driver for the adjoint computation, + created using the same comm as passed into this function. + inputs (Sequence[GenTensor]): The same inputs used to set the DIFF_INPUTS in the forward pass. + grad_outputs (Sequence[GenTensor]): Gradients of a scalar loss with respect to the forward outputs, + see SU2Function's backward() method. + + Returns: + Tuple[GenTensor, ...]: The gradients of the loss with respect to the inputs. + """ + + rank = comm.Get_rank() + for i, x in enumerate(inputs): + adjoint_driver.SetDiff_Inputs_Vars(x.flatten().tolist(), i) + adjoint_driver.ApplyDiff_Inputs_Vars() + for i, g in enumerate(grad_outputs): + adjoint_driver.SetBackprop_Derivs(g.flatten().tolist(), i) + + adjoint_driver.StartSolver() + + # are we using numpy or torch + is_numpy = len(inputs) == 0 or type(inputs[0]) is np.ndarray + if is_numpy: + array_func = np.array + cat_func = np.concatenate + else: + array_func = paddle.to_tensor(inputs[0]) + cat_func = paddle.concat + + num_diff_inputs = adjoint_driver.GetnDiff_Inputs() + grads = [ + array_func(adjoint_driver.GetTotal_Sens_Diff_Inputs(i)) + for i in range(num_diff_inputs) + ] + for i in range(num_diff_inputs): + if grads[i].shape[0] > 1: + # if dealing with full-grid, reorder according to GlobalIndex + if comm.Get_size() > 1: + # gather outputs in rank 0 if more than one rank + grads[i] = comm.gather(grads[i], root=0) + global_inds = comm.gather(adjoint_driver.GetAllGlobalIndices(), root=0) + if rank == 0: + grads[i] = cat_func(grads[i]) + global_inds = list(sum(global_inds, tuple())) # join tuples + else: + global_inds = list(adjoint_driver.GetAllGlobalIndices()) + + if rank == 0: + global_inds = np.array(global_inds, dtype=np.long) + assert grads[i].shape[0] == len( + global_inds + ), "Only full grid outputs supported by now (besides scalars)." + # order by global_inds + grads[i][global_inds] = ( + grads[i].copy() if is_numpy else grads[i].clone() + ) + else: + grads[i] = None + return tuple(grads) + + +def modify_config( + config: SU2.io.Config, + new_params: Dict[str, str], + outfile: Union[str, os.PathLike, None] = None, +) -> SU2.io.Config: + """Modify a config, saving the modifications to outfile if provided.""" + temp_config = config.copy() + for k, v in new_params.items(): + temp_config[k] = v + if outfile is not None: + temp_config.write(outfile) + return temp_config + + +def activate_su2_mpi( + remove_temp_files: bool = True, + max_procs_per_example: int = 1, + non_busy_wait_max_time: float = 0.1, +) -> None: + assert ( + mpi4py.MPI.COMM_WORLD.Get_size() > 1 + ), 'Need at least 1 master and 1 worker process, run with "mpirun -np ...' + + if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: + global _non_busy_wait_max_time + _non_busy_wait_max_time = non_busy_wait_max_time + main(remove_temp_files=remove_temp_files) + exit(0) + + # Only rank 0 from here on + def stop(): + non_busy_post(mpi4py.MPI.COMM_WORLD) + mpi4py.MPI.COMM_WORLD.bcast(RunCode.STOP, root=0) + + atexit.register(stop) + su2paddle.su2_function._global_max_ppe = max_procs_per_example + + +def non_busy_wait(comm: mpi4py.MPI.Intracomm) -> None: + b = comm.Ibarrier() + start = time.time() + while not b.Get_status(): + time.sleep(min((time.time() - start) / 2, _non_busy_wait_max_time)) + + +def non_busy_post(comm: mpi4py.MPI.Intracomm) -> None: + comm.Ibarrier() + + +def main(remove_temp_files: bool = True) -> None: + """Runs a loop for the worker processes. + Can be signaled to run either a forward simulation or an adjoint computation + using RunCodes. + """ + local_comm = mpi4py.MPI.COMM_WORLD.Create_group( + mpi4py.MPI.Group.Excl(mpi4py.MPI.COMM_WORLD.Get_group(), [0]) + ) + local_rank = local_comm.Get_rank() + local_size = local_comm.Get_size() + ppid = str(os.getppid()) + + x = inputs = batch_comm = batch_index = batch_rank = forward_config = None + num_zones = dims = batch_solution_filename = batch_restart_filename = None + batch_size = procs_per_example = 1 + while True: + non_busy_wait(mpi4py.MPI.COMM_WORLD) + run_type = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) + if run_type == RunCode.STOP: + # remove temporary files + if local_rank == 0 and remove_temp_files: + os.system(f"rm b*_{ppid}_* 2> /dev/null") + break + + if run_type == RunCode.RUN_FORWARD: + if procs_per_example != 1 and procs_per_example != local_size: + # disconnect batch_comm from previous run, if it was created + batch_comm.Disconnect() + ( + num_zones, + dims, + forward_config, + mesh_file, + procs_per_example, + inputs, + ) = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) + batch_size = inputs[0].shape[0] + batch_index = local_rank // procs_per_example + if procs_per_example == 1: + batch_comm = mpi4py.MPI.COMM_SELF + elif procs_per_example == local_size: + batch_comm = local_comm + else: + batch_comm = local_comm.Split(batch_index, local_rank) + if local_rank >= batch_size * procs_per_example: + # these procs wont be used + non_busy_post(mpi4py.MPI.COMM_WORLD) + continue + batch_rank = batch_comm.Get_rank() + x = [z[batch_index] for z in inputs] + + batch_forward_config = f"b{batch_index}_{ppid}_{forward_config}" + if batch_rank == 0: + old_config = SU2.io.Config(forward_config) + restart_filename = old_config["RESTART_FLOW_FILENAME"] + batch_restart_filename = f"b{batch_index}_{ppid}_{restart_filename}" + mesh_file = ( + mesh_file.format(batch_index=batch_index) + if mesh_file + else old_config["MESH_FILENAME"] + ) + new_config = { + "RESTART_FLOW_FILENAME": batch_restart_filename, + "MESH_FILENAME": mesh_file, + } + shutil.copy(forward_config, batch_forward_config) + modify_config(old_config, new_config, outfile=batch_forward_config) + if local_rank == 0: + mpi4py.MPI.COMM_WORLD.send(batch_forward_config, dest=0) + batch_comm.Barrier() + + forward_driver = pysu2.CSinglezoneDriver( + batch_forward_config, num_zones, dims, batch_comm + ) + # TODO SetRestart_FlowFileName is not necessary anymore, remove from C++ + # forward_driver.SetRestart_FlowFileName(batch_restart_filename) + outputs = run_forward(batch_comm, forward_driver, x) + output_lengths = [o.shape[0] for o in outputs] + non_busy_post(mpi4py.MPI.COMM_WORLD) + if batch_rank == 0: + mpi4py.MPI.COMM_WORLD.send(outputs, dest=0) + # TODO Way to get results in-memory, without writing to file? + batch_solution_filename = batch_restart_filename.replace( + "restart", "solution" + ) + shutil.move(batch_restart_filename, batch_solution_filename) + forward_driver.Postprocessing() + + elif run_type == RunCode.RUN_ADJOINT: + # assert inputs is not None, 'Run forward simulation before running the adjoint.' + inputs = None + grad_outputs = mpi4py.MPI.COMM_WORLD.bcast(None, root=0) + if local_rank >= batch_size * procs_per_example: + # these procs wont be used + non_busy_post(mpi4py.MPI.COMM_WORLD) + continue + dl = [ + z[batch_index, : output_lengths[i]] for i, z in enumerate(grad_outputs) + ] + + batch_adjoint_config = ( + f"b{batch_index}_{str(os.getppid())}_adjoint_{forward_config}" + ) + if batch_rank == 0: + old_config = SU2.io.Config(forward_config) + mesh_file = ( + mesh_file.format(batch_index=batch_index) + if mesh_file + else old_config["MESH_FILENAME"] + ) + new_config = { + "MATH_PROBLEM": "DISCRETE_ADJOINT", + "SOLUTION_FLOW_FILENAME": batch_solution_filename, + "RESTART_ADJ_FILENAME": batch_restart_filename.replace( + "flow", "adj" + ), + "MESH_FILENAME": mesh_file, + } + shutil.copy(forward_config, batch_adjoint_config) + modify_config(old_config, new_config, outfile=batch_adjoint_config) + batch_comm.Barrier() + adjoint_driver = pysu2ad.CDiscAdjSinglezoneDriver( + batch_adjoint_config, num_zones, dims, batch_comm + ) + grads = run_adjoint(batch_comm, adjoint_driver, x, dl) + non_busy_post(mpi4py.MPI.COMM_WORLD) + if batch_rank == 0: + mpi4py.MPI.COMM_WORLD.send(grads, dest=0) + adjoint_driver.Postprocessing() + else: + raise NotImplementedError diff --git a/jointContribution/CFDGCN/su2paddle/su2_numpy.py b/jointContribution/CFDGCN/su2paddle/su2_numpy.py index 9baaa77c38..873609fdb3 100644 --- a/jointContribution/CFDGCN/su2paddle/su2_numpy.py +++ b/jointContribution/CFDGCN/su2paddle/su2_numpy.py @@ -1,151 +1,151 @@ -import math - -import mpi4py -import numpy as np -import pysu2 -import su2_function_mpi - - -class SU2Numpy: - """Class that uses the SU2 in-memory python wrapper - to provide differentiable physics simulations. - - Usage example for scalar output case: - - # define differentiable inputs and outputs in the config - # with DIFF_INPUTS and DIFF_OUTPUTS fields - su2 = SU2Numpy('config.cfg') - inputs = np.array([1.0]) - outputs = su2(inputs) - # if output is a scalar, we can get the gradient of the output - # with respect to the inputs by simply doing - doutput_dinputs = loss.backward() - """ - - def __init__(self, config_file, dims=2, num_zones=1): - """Initialize the SU2 configurations for the provided config file. - - Args: - config_file (_type_): The SU2 configuration file name. - dims (int, optional): Number of dimensions for the problem (2D or 3D). Defaults to 2. - num_zones (int, optional): Number of zones in the simulation (only 1 supported currently).. Defaults to 1. - """ - if num_zones != 1: - raise ValueError("Only supports 1 zone for now.") - if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: - raise ValueError("Only rank 0 can run SU2Function, not rank 0 in comm") - - self.comm = mpi4py.MPI.COMM_WORLD - self.workers = self.comm.Get_size() - 1 - if self.workers <= 0: - raise ValueError("Need at least 1 master and 1 worker process.") - self.num_zones = num_zones - self.dims = dims - self.outputs_shape = None - self.batch_size = -1 - - self.forward_config = config_file - self.forward_driver = pysu2.CSinglezoneDriver( - self.forward_config, self.num_zones, self.dims, mpi4py.MPI.COMM_SELF - ) - self.num_diff_inputs = self.forward_driver.GetnDiff_Inputs() - self.num_diff_outputs = self.forward_driver.GetnDiff_Outputs() - - def __call__(self, *inputs): - return self.forward(*inputs) - - def forward(self, *inputs): - """Runs a batch of SU2 simulations. - - Args: - inputs : The differentiable inputs for the batch of simulations. - Number of inputs depends on the number of DIFF_INPUTS set in the configuration file. - Each input is of shape BATCH_SIZE x SHAPE, where SHAPE is the shape of the given input. - For example, a batch of 10 scalars would have input shape 10 x 1, - a batch of 10 vectors of length N would have input shape 10 x N. - - Returns: - tuple: A tuple of tensors with the batch of differentiable outputs. - Number of outputs depends on the number of DIFF_OUTPUTS set in the configuration file. - As for the inputs, each output is of shape BATCH_SIZE x SHAPE, - where SHAPE is the shape of the given output. - Outputs are always either scalars or vectors. - """ - if len(inputs) != self.num_diff_inputs: - raise TypeError( - f"{len(inputs)} inputs were provided, but the config file ({self.forward_config}) defines {self.num_diff_inputs} diff inputs." - ) - if self.num_diff_inputs > 0 and inputs[0].ndim < 2: - raise TypeError( - "Input is expected to have first dimension for batch, " - "e.g. x[0, :] is first item in batch." - ) - self.batch_size = inputs[0].shape[0] if self.num_diff_inputs > 0 else 1 - if 0 <= self.workers < self.batch_size: - raise TypeError( - "Batch size is larger than number of workers, not enough processes to run batch." - ) - procs_per_example = math.ceil(self.workers / self.batch_size) - - self.comm.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) - self.comm.bcast( - [self.num_zones, self.dims, self.forward_config, inputs], root=0 - ) - outputs = [] - for i in range(self.batch_size): - output = self.comm.recv(source=1 + i * procs_per_example) - outputs.append(output) - outputs = tuple( - np.concatenate([np.expand_dims(o[i], axis=0) for o in outputs]) - for i in range(self.num_diff_outputs) - ) - self.outputs_shape = [o.shape for o in outputs] - return outputs - - def backward(self, *grad_outputs): - """Gives the gradient of some scalar loss with respect to the inputs of the previous - forward call when provided the gradients of this loss with respect to the outputs of - the forward call. - - Args: - grad_outputs: Gradients of a scalar loss with respect to the forward outputs. - For example, if the loss is the sum of the outputs, the grad_outputs should be a all ones. - This defaults to 1.0 when the output of the forward call is just a scalar (or batch of scalars). - Raises: - TypeError: _description_ - - Returns: - tuple: The gradients of the loss with respect to the forward inputs. - """ - - if ( - len(grad_outputs) == 0 - and len(self.outputs_shape) == 1 - and self.outputs_shape[0][1] == 1 - ): - # if no grad_outputs was provided and just one output scalar (or batch of scalars) - # was used, then use a default grad outputs of 1.0 - grad_outputs = [np.ones(self.outputs_shape[0])] - elif self.num_diff_outputs != len(grad_outputs): - raise TypeError( - "To run backward() you need to provide the gradients of a scalar loss " - "with respect to the outputs of the forward pass" - ) - - procs_per_example = math.ceil(self.workers / self.batch_size) - self.comm.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) - self.comm.bcast(grad_outputs, root=0) - grads = [] - for i in range(self.batch_size): - grad = self.comm.recv(source=1 + i * procs_per_example) - grads.append(grad) - grads = tuple( - np.concatenate([np.expand_dims(g[i], axis=0) for g in grads]) - for i in range(self.num_diff_inputs) - ) - return grads - - def __del__(self): - """Close existing drivers and MPI communicators.""" - if self.forward_driver is not None: - self.forward_driver.Postprocessing() +import math + +import mpi4py +import numpy as np +import pysu2 +import su2_function_mpi + + +class SU2Numpy: + """Class that uses the SU2 in-memory python wrapper + to provide differentiable physics simulations. + + Usage example for scalar output case: + + # define differentiable inputs and outputs in the config + # with DIFF_INPUTS and DIFF_OUTPUTS fields + su2 = SU2Numpy('config.cfg') + inputs = np.array([1.0]) + outputs = su2(inputs) + # if output is a scalar, we can get the gradient of the output + # with respect to the inputs by simply doing + doutput_dinputs = loss.backward() + """ + + def __init__(self, config_file, dims=2, num_zones=1): + """Initialize the SU2 configurations for the provided config file. + + Args: + config_file (_type_): The SU2 configuration file name. + dims (int, optional): Number of dimensions for the problem (2D or 3D). Defaults to 2. + num_zones (int, optional): Number of zones in the simulation (only 1 supported currently).. Defaults to 1. + """ + if num_zones != 1: + raise ValueError("Only supports 1 zone for now.") + if mpi4py.MPI.COMM_WORLD.Get_rank() != 0: + raise ValueError("Only rank 0 can run SU2Function, not rank 0 in comm") + + self.comm = mpi4py.MPI.COMM_WORLD + self.workers = self.comm.Get_size() - 1 + if self.workers <= 0: + raise ValueError("Need at least 1 master and 1 worker process.") + self.num_zones = num_zones + self.dims = dims + self.outputs_shape = None + self.batch_size = -1 + + self.forward_config = config_file + self.forward_driver = pysu2.CSinglezoneDriver( + self.forward_config, self.num_zones, self.dims, mpi4py.MPI.COMM_SELF + ) + self.num_diff_inputs = self.forward_driver.GetnDiff_Inputs() + self.num_diff_outputs = self.forward_driver.GetnDiff_Outputs() + + def __call__(self, *inputs): + return self.forward(*inputs) + + def forward(self, *inputs): + """Runs a batch of SU2 simulations. + + Args: + inputs : The differentiable inputs for the batch of simulations. + Number of inputs depends on the number of DIFF_INPUTS set in the configuration file. + Each input is of shape BATCH_SIZE x SHAPE, where SHAPE is the shape of the given input. + For example, a batch of 10 scalars would have input shape 10 x 1, + a batch of 10 vectors of length N would have input shape 10 x N. + + Returns: + tuple: A tuple of tensors with the batch of differentiable outputs. + Number of outputs depends on the number of DIFF_OUTPUTS set in the configuration file. + As for the inputs, each output is of shape BATCH_SIZE x SHAPE, + where SHAPE is the shape of the given output. + Outputs are always either scalars or vectors. + """ + if len(inputs) != self.num_diff_inputs: + raise TypeError( + f"{len(inputs)} inputs were provided, but the config file ({self.forward_config}) defines {self.num_diff_inputs} diff inputs." + ) + if self.num_diff_inputs > 0 and inputs[0].ndim < 2: + raise TypeError( + "Input is expected to have first dimension for batch, " + "e.g. x[0, :] is first item in batch." + ) + self.batch_size = inputs[0].shape[0] if self.num_diff_inputs > 0 else 1 + if 0 <= self.workers < self.batch_size: + raise TypeError( + "Batch size is larger than number of workers, not enough processes to run batch." + ) + procs_per_example = math.ceil(self.workers / self.batch_size) + + self.comm.bcast(su2_function_mpi.RunCode.RUN_FORWARD, root=0) + self.comm.bcast( + [self.num_zones, self.dims, self.forward_config, inputs], root=0 + ) + outputs = [] + for i in range(self.batch_size): + output = self.comm.recv(source=1 + i * procs_per_example) + outputs.append(output) + outputs = tuple( + np.concatenate([np.expand_dims(o[i], axis=0) for o in outputs]) + for i in range(self.num_diff_outputs) + ) + self.outputs_shape = [o.shape for o in outputs] + return outputs + + def backward(self, *grad_outputs): + """Gives the gradient of some scalar loss with respect to the inputs of the previous + forward call when provided the gradients of this loss with respect to the outputs of + the forward call. + + Args: + grad_outputs: Gradients of a scalar loss with respect to the forward outputs. + For example, if the loss is the sum of the outputs, the grad_outputs should be a all ones. + This defaults to 1.0 when the output of the forward call is just a scalar (or batch of scalars). + Raises: + TypeError: _description_ + + Returns: + tuple: The gradients of the loss with respect to the forward inputs. + """ + + if ( + len(grad_outputs) == 0 + and len(self.outputs_shape) == 1 + and self.outputs_shape[0][1] == 1 + ): + # if no grad_outputs was provided and just one output scalar (or batch of scalars) + # was used, then use a default grad outputs of 1.0 + grad_outputs = [np.ones(self.outputs_shape[0])] + elif self.num_diff_outputs != len(grad_outputs): + raise TypeError( + "To run backward() you need to provide the gradients of a scalar loss " + "with respect to the outputs of the forward pass" + ) + + procs_per_example = math.ceil(self.workers / self.batch_size) + self.comm.bcast(su2_function_mpi.RunCode.RUN_ADJOINT, root=0) + self.comm.bcast(grad_outputs, root=0) + grads = [] + for i in range(self.batch_size): + grad = self.comm.recv(source=1 + i * procs_per_example) + grads.append(grad) + grads = tuple( + np.concatenate([np.expand_dims(g[i], axis=0) for g in grads]) + for i in range(self.num_diff_inputs) + ) + return grads + + def __del__(self): + """Close existing drivers and MPI communicators.""" + if self.forward_driver is not None: + self.forward_driver.Postprocessing() diff --git a/jointContribution/CHGNet/chgnet/__init__.py b/jointContribution/CHGNet/chgnet/__init__.py index 06b06c674f..480688868b 100644 --- a/jointContribution/CHGNet/chgnet/__init__.py +++ b/jointContribution/CHGNet/chgnet/__init__.py @@ -1,14 +1,14 @@ -from __future__ import annotations - -import os -from importlib.metadata import PackageNotFoundError -from importlib.metadata import version -from typing import Literal - -try: - __version__ = version(__name__) -except PackageNotFoundError: - __version__ = "unknown" -TrainTask = Literal["ef", "efs", "efsm"] -PredTask = Literal["e", "ef", "em", "efs", "efsm"] -ROOT = os.path.dirname(os.path.dirname(__file__)) +from __future__ import annotations + +import os +from importlib.metadata import PackageNotFoundError +from importlib.metadata import version +from typing import Literal + +try: + __version__ = version(__name__) +except PackageNotFoundError: + __version__ = "unknown" +TrainTask = Literal["ef", "efs", "efsm"] +PredTask = Literal["e", "ef", "em", "efs", "efsm"] +ROOT = os.path.dirname(os.path.dirname(__file__)) diff --git a/jointContribution/CHGNet/chgnet/data/dataset.py b/jointContribution/CHGNet/chgnet/data/dataset.py index 183cfb30fd..5ae6bcf18f 100644 --- a/jointContribution/CHGNet/chgnet/data/dataset.py +++ b/jointContribution/CHGNet/chgnet/data/dataset.py @@ -1,847 +1,847 @@ -from __future__ import annotations - -import functools -import os -import random -import warnings -from typing import TYPE_CHECKING - -import numpy as np -import paddle -from chgnet import utils -from chgnet.graph import CrystalGraph -from chgnet.graph import CrystalGraphConverter -from pymatgen.core.structure import Structure - -if TYPE_CHECKING: - from collections.abc import Sequence - - from chgnet import TrainTask - from typing_extensions import Self -warnings.filterwarnings("ignore") -DTYPE = "float32" - - -class StructureData(paddle.io.Dataset): - """A simple paddle Dataset of structures.""" - - def __init__( - self, - structures: list[Structure], - energies: list[float], - forces: list[Sequence[Sequence[float]]], - *, - stresses: (list[Sequence[Sequence[float]]] | None) = None, - magmoms: (list[Sequence[Sequence[float]]] | None) = None, - structure_ids: (list | None) = None, - graph_converter: (CrystalGraphConverter | None) = None, - shuffle: bool = True, - ) -> None: - """Initialize the dataset. - - Args: - structures (list[dict]): pymatgen Structure objects. - energies (list[float]): [data_size, 1] - forces (list[list[float]]): [data_size, n_atoms, 3] - stresses (list[list[float]], optional): [data_size, 3, 3] - Default = None - magmoms (list[list[float]], optional): [data_size, n_atoms, 1] - Default = None - structure_ids (list, optional): a list of ids to track the structures - Default = None - graph_converter (CrystalGraphConverter, optional): Converts the structures - to graphs. If None, it will be set to CHGNet 0.3.0 converter - with AtomGraph cutoff = 6A. - shuffle (bool): whether to shuffle the sequence of dataset - Default = True - - Raises: - RuntimeError: if the length of structures and labels (energies, forces, - stresses, magmoms) are not equal. - """ - for idx, struct in enumerate(structures): - if not isinstance(struct, Structure): - raise TypeError(f"{idx} is not a pymatgen Structure object: {struct}") - for name in "energies forces stresses magmoms structure_ids".split(): - labels = locals()[name] - if labels is not None and len(labels) != len(structures): - raise RuntimeError( - f"Inconsistent number of structures and labels: len(structures)={len(structures)!r}, len({name})={len(labels)}" - ) - self.structures = structures - self.energies = energies - self.forces = forces - self.stresses = stresses - self.magmoms = magmoms - self.structure_ids = structure_ids - self.keys = np.arange(len(structures)) - if shuffle: - random.shuffle(self.keys) - print(f"{type(self).__name__} imported {len(structures):,} structures") - self.graph_converter = graph_converter or CrystalGraphConverter( - atom_graph_cutoff=6, bond_graph_cutoff=3 - ) - self.failed_idx: list[int] = [] - self.failed_graph_id: dict[str, str] = {} - - @classmethod - def from_vasp( - cls, - file_root: str, - *, - check_electronic_convergence: bool = True, - save_path: (str | None) = None, - graph_converter: (CrystalGraphConverter | None) = None, - shuffle: bool = True, - ) -> Self: - """Parse VASP output files into structures and labels and feed into the dataset. - - Args: - file_root (str): the directory of the VASP calculation outputs - check_electronic_convergence (bool): if set to True, this function will - raise Exception to VASP calculation that did not achieve - electronic convergence. - Default = True - save_path (str): path to save the parsed VASP labels - Default = None - graph_converter (CrystalGraphConverter, optional): Converts the structures - to graphs. If None, it will be set to CHGNet 0.3.0 converter - with AtomGraph cutoff = 6A. - shuffle (bool): whether to shuffle the sequence of dataset - Default = True - """ - result_dict = utils.parse_vasp_dir( - base_dir=file_root, - check_electronic_convergence=check_electronic_convergence, - save_path=save_path, - ) - return cls( - structures=result_dict["structure"], - energies=result_dict["energy_per_atom"], - forces=result_dict["force"], - stresses=None - if result_dict["stress"] in [None, []] - else result_dict["stress"], - magmoms=None - if result_dict["magmom"] in [None, []] - else result_dict["magmom"], - structure_ids=np.arange(len(result_dict["structure"])), - graph_converter=graph_converter, - shuffle=shuffle, - ) - - def __len__(self) -> int: - """Get the number of structures in this dataset.""" - return len(self.keys) - - @functools.cache - def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict]: - """Get one graph for a structure in this dataset. - - Args: - idx (int): Index of the structure - - Returns: - crystal_graph (CrystalGraph): graph of the crystal structure - targets (dict): list of targets. i.e. energy, force, stress - """ - if idx not in self.failed_idx: - graph_id = self.keys[idx] - try: - struct = self.structures[graph_id] - if self.structure_ids is not None: - mp_id = self.structure_ids[graph_id] - else: - mp_id = graph_id - crystal_graph = self.graph_converter( - struct, graph_id=graph_id, mp_id=mp_id - ) - targets = { - "e": paddle.to_tensor(data=self.energies[graph_id], dtype=DTYPE), - "f": paddle.to_tensor(data=self.forces[graph_id], dtype=DTYPE), - } - if self.stresses is not None: - targets["s"] = ( - paddle.to_tensor(data=self.stresses[graph_id], dtype=DTYPE) - * -0.1 - ) - if self.magmoms is not None: - mag = self.magmoms[graph_id] - if mag is None: - targets["m"] = None - else: - targets["m"] = paddle.abs( - x=paddle.to_tensor(data=mag, dtype=DTYPE) - ) - return crystal_graph, targets - except Exception: - struct = self.structures[graph_id] - self.failed_graph_id[graph_id] = struct.composition.formula - self.failed_idx.append(idx) - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - else: - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - - -class CIFData(paddle.io.Dataset): - """A dataset from CIFs.""" - - def __init__( - self, - cif_path: str, - *, - labels: (str | dict) = "labels.json", - targets: TrainTask = "efsm", - graph_converter: (CrystalGraphConverter | None) = None, - energy_key: str = "energy_per_atom", - force_key: str = "force", - stress_key: str = "stress", - magmom_key: str = "magmom", - shuffle: bool = True, - ) -> None: - """Initialize the dataset from a directory containing CIFs. - - Args: - cif_path (str): path that contain all the graphs, labels.json - labels (str, dict): the path or dictionary of labels - targets ("ef" | "efs" | "efm" | "efsm"): The training targets. - Default = "efsm" - graph_converter (CrystalGraphConverter, optional): Converts the structures - to graphs. If None, it will be set to CHGNet 0.3.0 converter - with AtomGraph cutoff = 6A. - energy_key (str, optional): the key of energy in the labels. - Default = "energy_per_atom" - force_key (str, optional): the key of force in the labels. - Default = "force" - stress_key (str, optional): the key of stress in the labels. - Default = "stress" - magmom_key (str, optional): the key of magmom in the labels. - Default = "magmom" - shuffle (bool): whether to shuffle the sequence of dataset - Default = True - """ - self.data_dir = cif_path - self.data = utils.read_json(os.path.join(cif_path, labels)) - self.cif_ids = list(self.data) - if shuffle: - random.shuffle(self.cif_ids) - print(f"{cif_path}: {len(self.cif_ids):,} structures imported") - self.graph_converter = graph_converter or CrystalGraphConverter( - atom_graph_cutoff=6, bond_graph_cutoff=3 - ) - self.energy_key = energy_key - self.force_key = force_key - self.stress_key = stress_key - self.magmom_key = magmom_key - self.targets = targets - self.failed_idx: list[int] = [] - self.failed_graph_id: dict[str, str] = {} - - def __len__(self) -> int: - """Get the number of structures in this dataset.""" - return len(self.cif_ids) - - @functools.cache - def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: - """Get one item in the dataset. - - Returns: - tuple[CrystalGraph, dict[str, Tensor]]: graph of the crystal structure - and dict of targets i.e. energy, force, stress - """ - if idx not in self.failed_idx: - try: - graph_id = self.cif_ids[idx] - mp_id = self.data[graph_id].get("material_id", graph_id) - structure = Structure.from_file( - os.path.join(self.data_dir, f"{graph_id}.cif") - ) - crystal_graph = self.graph_converter( - structure, graph_id=graph_id, mp_id=mp_id - ) - targets = {} - for key in self.targets: - if key == "e": - energy = self.data[graph_id][self.energy_key] - targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) - elif key == "f": - force = self.data[graph_id][self.force_key] - targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) - elif key == "s": - stress = self.data[graph_id][self.stress_key] - targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 - elif key == "m": - mag = self.data[graph_id][self.magmom_key] - targets["m"] = paddle.abs( - x=paddle.to_tensor(data=mag, dtype=DTYPE) - ) - return crystal_graph, targets - except Exception: - try: - graph_id = self.cif_ids[idx] - except IndexError: - print(idx, len(self.cif_ids)) - structure = Structure.from_file( - os.path.join(self.data_dir, f"{graph_id}.cif") - ) - self.failed_graph_id[graph_id] = structure.composition.formula - self.failed_idx.append(idx) - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - else: - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - - -class GraphData(paddle.io.Dataset): - """A dataset of graphs. This is compatible with the graph.pt documents made by - make_graphs.py. We recommend you to use the dataset to avoid graph conversion steps. - """ - - def __init__( - self, - graph_path: str, - *, - labels: (str | dict) = "labels.json", - targets: TrainTask = "efsm", - exclude: (str | list | None) = None, - energy_key: str = "energy_per_atom", - force_key: str = "force", - stress_key: str = "stress", - magmom_key: str = "magmom", - shuffle: bool = True, - ) -> None: - """Initialize the dataset from a directory containing saved crystal graphs. - - Args: - graph_path (str): path that contain all the graphs, labels.json - labels (str, dict): the path or dictionary of labels. - Default = "labels.json" - targets ("ef" | "efs" | "efm" | "efsm"): The training targets. - Default = "efsm" - exclude (str, list | None): the path or list of excluded graphs. - Default = None - energy_key (str, optional): the key of energy in the labels. - Default = "energy_per_atom" - force_key (str, optional): the key of force in the labels. - Default = "force" - stress_key (str, optional): the key of stress in the labels. - Default = "stress" - magmom_key (str, optional): the key of magmom in the labels. - Default = "magmom" - shuffle (bool): whether to shuffle the sequence of dataset - Default = True - """ - self.graph_path = graph_path - if isinstance(labels, str): - labels = os.path.join(graph_path, labels) - print(f"Importing: {labels}") - self.labels = utils.read_json(labels) - elif isinstance(labels, dict): - self.labels = labels - if isinstance(exclude, str): - self.excluded_graph = utils.read_json(exclude) - elif isinstance(exclude, list): - self.excluded_graph = exclude - else: - self.excluded_graph = [] - self.keys = [ - (mp_id, graph_id) for mp_id, dic in self.labels.items() for graph_id in dic - ] - if shuffle: - random.shuffle(self.keys) - print(f"{len(self.labels)} mp_ids, {len(self)} frames imported") - if self.excluded_graph is not None: - print(f"{len(self.excluded_graph)} graphs are pre-excluded") - self.energy_key = energy_key - self.force_key = force_key - self.stress_key = stress_key - self.magmom_key = magmom_key - self.targets = targets - self.failed_idx: list[int] = [] - self.failed_graph_id: list[str] = [] - - def __len__(self) -> int: - """Get the number of graphs in this dataset.""" - return len(self.keys) - - def __getitem__(self, idx) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: - """Get one item in the dataset. - - Returns: - crystal_graph (CrystalGraph): graph of the crystal structure - targets (dict): dictionary of targets. i.e. energy, force, stress, magmom - """ - if idx not in self.failed_idx: - mp_id, graph_id = self.keys[idx] - if [mp_id, graph_id] in self.excluded_graph: - self.failed_graph_id.append(graph_id) - self.failed_idx.append(idx) - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - try: - graph_path = os.path.join(self.graph_path, f"{graph_id}.pt") - crystal_graph = CrystalGraph.from_file(graph_path) - targets = {} - for key in self.targets: - if key == "e": - energy = self.labels[mp_id][graph_id][self.energy_key] - targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) - elif key == "f": - force = self.labels[mp_id][graph_id][self.force_key] - targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) - elif key == "s": - stress = self.labels[mp_id][graph_id][self.stress_key] - targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 - elif key == "m": - mag = self.labels[mp_id][graph_id][self.magmom_key] - if mag is None: - targets["m"] = None - else: - targets["m"] = paddle.abs( - x=paddle.to_tensor(data=mag, dtype=DTYPE) - ) - return crystal_graph, targets - except Exception: - self.failed_graph_id.append(graph_id) - self.failed_idx.append(idx) - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - else: - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - - def get_train_val_test_loader( - self, - train_ratio: float = 0.8, - val_ratio: float = 0.1, - *, - train_key: (list[str] | None) = None, - val_key: (list[str] | None) = None, - test_key: (list[str] | None) = None, - batch_size=32, - num_workers=0, - pin_memory=True, - ) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: - """Partition the GraphData using materials id, - randomly select the train_keys, val_keys, test_keys by train val test ratio, - or use pre-defined train_keys, val_keys, and test_keys to create train, val, - test loaders. - - Args: - train_ratio (float): The ratio of the dataset to use for training - Default = 0.8 - val_ratio (float): The ratio of the dataset to use for validation - Default: 0.1 - train_key (List(str), optional): a list of mp_ids for train set - val_key (List(str), optional): a list of mp_ids for val set - test_key (List(str), optional): a list of mp_ids for test set - batch_size (int): batch size - Default = 32 - num_workers (int): The number of worker processes for loading the data - see Dataloader documentation for more info - Default = 0 - pin_memory (bool): Whether to pin the memory of the data loaders - Default: True - - Returns: - train_loader, val_loader, test_loader - """ - train_labels, val_labels, test_labels = {}, {}, {} - if train_key is None: - mp_ids = list(self.labels) - random.shuffle(mp_ids) - n_train = int(train_ratio * len(mp_ids)) - n_val = int(val_ratio * len(mp_ids)) - train_key = mp_ids[:n_train] - val_key = mp_ids[n_train : n_train + n_val] - test_key = mp_ids[n_train + n_val :] - for mp_id in train_key: - if mp_id in self.labels: - train_labels[mp_id] = self.labels.pop(mp_id) - train_dataset = GraphData( - graph_path=self.graph_path, - labels=train_labels, - targets=self.targets, - exclude=self.excluded_graph, - energy_key=self.energy_key, - ) - train_loader = paddle.io.DataLoader( - dataset=train_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - shuffle=True, - num_workers=num_workers, - ) - for mp_id in val_key: - if mp_id in self.labels: - val_labels[mp_id] = self.labels.pop(mp_id) - val_dataset = GraphData( - graph_path=self.graph_path, - labels=val_labels, - targets=self.targets, - exclude=self.excluded_graph, - energy_key=self.energy_key, - ) - val_loader = paddle.io.DataLoader( - dataset=val_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - shuffle=True, - num_workers=num_workers, - ) - if test_key is not None: - for mp_id in test_key: - if mp_id in self.labels: - test_labels[mp_id] = self.labels.pop(mp_id) - test_dataset = GraphData( - graph_path=self.graph_path, - labels=test_labels, - targets=self.targets, - exclude=self.excluded_graph, - energy_key=self.energy_key, - ) - test_loader = paddle.io.DataLoader( - dataset=test_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - num_workers=num_workers, - ) - else: - test_loader = None - return train_loader, val_loader, test_loader - - -class StructureJsonData(paddle.io.Dataset): - """Read structure and targets from a JSON file. - This class is used to load the MPtrj dataset. - """ - - def __init__( - self, - data: (str | dict), - graph_converter: CrystalGraphConverter, - *, - targets: TrainTask = "efsm", - energy_key: str = "energy_per_atom", - force_key: str = "force", - stress_key: str = "stress", - magmom_key: str = "magmom", - shuffle: bool = True, - ) -> None: - """Initialize the dataset by reading JSON files. - - Args: - data (str | dict): file path or dir name that contain all the JSONs - graph_converter (CrystalGraphConverter): Converts pymatgen.core.Structure - to CrystalGraph object. - targets ("ef" | "efs" | "efm" | "efsm"): The training targets. - Default = "efsm" - energy_key (str, optional): the key of energy in the labels. - Default = "energy_per_atom" - force_key (str, optional): the key of force in the labels. - Default = "force" - stress_key (str, optional): the key of stress in the labels. - Default = "stress" - magmom_key (str, optional): the key of magmom in the labels. - Default = "magmom" - shuffle (bool): whether to shuffle the sequence of dataset - Default = True - """ - if isinstance(data, str): - self.data = {} - if os.path.isdir(data): - for json_path in os.listdir(data): - if json_path.endswith(".json"): - print(f"Importing: {json_path}") - self.data.update(utils.read_json(os.path.join(data, json_path))) - else: - print(f"Importing: {data}") - self.data.update(utils.read_json(data)) - elif isinstance(data, dict): - self.data = data - else: - raise TypeError(f"data must be JSON path or dictionary, got {type(data)}") - self.keys = [ - (mp_id, graph_id) for mp_id, dct in self.data.items() for graph_id in dct - ] - if shuffle: - random.shuffle(self.keys) - print(f"{len(self.data)} MP IDs, {len(self)} structures imported") - self.graph_converter = graph_converter - self.energy_key = energy_key - self.force_key = force_key - self.stress_key = stress_key - self.magmom_key = magmom_key - self.targets = targets - self.failed_idx: list[int] = [] - self.failed_graph_id: dict[str, str] = {} - - def __len__(self) -> int: - """Get the number of structures with targets in the dataset.""" - return len(self.keys) - - @functools.cache - def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: - """Get one item in the dataset. - - Returns: - crystal_graph (CrystalGraph): graph of the crystal structure - targets (dict): dictionary of targets. i.e. energy, force, stress, magmom - """ - if idx not in self.failed_idx: - mp_id, graph_id = self.keys[idx] - try: - struct = Structure.from_dict(self.data[mp_id][graph_id]["structure"]) - crystal_graph = self.graph_converter( - struct, graph_id=graph_id, mp_id=mp_id - ) - targets = {} - for key in self.targets: - if key == "e": - energy = self.data[mp_id][graph_id][self.energy_key] - targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) - elif key == "f": - force = self.data[mp_id][graph_id][self.force_key] - targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) - elif key == "s": - stress = self.data[mp_id][graph_id][self.stress_key] - targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 - elif key == "m": - mag = self.data[mp_id][graph_id][self.magmom_key] - if mag is None: - targets["m"] = None - else: - targets["m"] = paddle.abs( - x=paddle.to_tensor(data=mag, dtype=DTYPE) - ) - return crystal_graph, targets - except Exception: - structure = Structure.from_dict(self.data[mp_id][graph_id]["structure"]) - self.failed_graph_id[graph_id] = structure.composition.formula - self.failed_idx.append(idx) - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - else: - idx = random.randint(0, len(self) - 1) - return self.__getitem__(idx) - - def get_train_val_test_loader( - self, - train_ratio: float = 0.8, - val_ratio: float = 0.1, - *, - train_key: (list[str] | None) = None, - val_key: (list[str] | None) = None, - test_key: (list[str] | None) = None, - batch_size=32, - num_workers=0, - pin_memory=True, - ) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: - """Partition the Dataset using materials id, - randomly select the train_keys, val_keys, test_keys by train val test ratio, - or use pre-defined train_keys, val_keys, and test_keys to create train, val, - test loaders. - - Args: - train_ratio (float): The ratio of the dataset to use for training - Default = 0.8 - val_ratio (float): The ratio of the dataset to use for validation - Default: 0.1 - train_key (List(str), optional): a list of mp_ids for train set - val_key (List(str), optional): a list of mp_ids for val set - test_key (List(str), optional): a list of mp_ids for test set - batch_size (int): batch size - Default = 32 - num_workers (int): The number of worker processes for loading the data - see Dataloader documentation for more info - Default = 0 - pin_memory (bool): Whether to pin the memory of the data loaders - Default: True - - Returns: - train_loader, val_loader, test_loader - """ - train_data, val_data, test_data = {}, {}, {} - if train_key is None: - mp_ids = list(self.data) - random.shuffle(mp_ids) - n_train = int(train_ratio * len(mp_ids)) - n_val = int(val_ratio * len(mp_ids)) - train_key = mp_ids[:n_train] - val_key = mp_ids[n_train : n_train + n_val] - test_key = mp_ids[n_train + n_val :] - for mp_id in train_key: - train_data[mp_id] = self.data.pop(mp_id) - train_dataset = StructureJsonData( - data=train_data, - graph_converter=self.graph_converter, - targets=self.targets, - energy_key=self.energy_key, - ) - train_loader = paddle.io.DataLoader( - dataset=train_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - shuffle=True, - num_workers=num_workers, - ) - for mp_id in val_key: - val_data[mp_id] = self.data.pop(mp_id) - val_dataset = StructureJsonData( - data=val_data, - graph_converter=self.graph_converter, - targets=self.targets, - energy_key=self.energy_key, - ) - val_loader = paddle.io.DataLoader( - dataset=val_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - shuffle=True, - num_workers=num_workers, - ) - if test_key is not None: - for mp_id in test_key: - test_data[mp_id] = self.data.pop(mp_id) - test_dataset = StructureJsonData( - data=test_data, - graph_converter=self.graph_converter, - targets=self.targets, - energy_key=self.energy_key, - ) - test_loader = paddle.io.DataLoader( - dataset=test_dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - num_workers=num_workers, - ) - else: - test_loader = None - return train_loader, val_loader, test_loader - - -def collate_graphs( - batch_data: list, -) -> tuple[list[CrystalGraph], dict[str, paddle.Tensor]]: - """Collate of list of (graph, target) into batch data. - - Args: - batch_data (list): list of (graph, target(dict)) - - Returns: - graphs (List): a list of graphs - targets (Dict): dictionary of targets, where key and values are: - e (Tensor): energies of the structures [batch_size] - f (Tensor): forces of the structures [n_batch_atoms, 3] - s (Tensor): stresses of the structures [3*batch_size, 3] - m (Tensor): magmom of the structures [n_batch_atoms] - """ - graphs = [graph for graph, _ in batch_data] - all_targets = {key: [] for key in batch_data[0][1]} - all_targets["e"] = paddle.to_tensor( - data=[targets["e"] for _, targets in batch_data], dtype=DTYPE - ) - for _, targets in batch_data: - for target, value in targets.items(): - if target != "e": - all_targets[target].append(value) - return graphs, all_targets - - -def get_train_val_test_loader( - dataset: paddle.io.Dataset, - *, - batch_size: int = 64, - train_ratio: float = 0.8, - val_ratio: float = 0.1, - return_test: bool = True, - num_workers: int = 0, - pin_memory: bool = True, -) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: - """Randomly partition a dataset into train, val, test loaders. - - Args: - dataset (Dataset): The dataset to partition. - batch_size (int): The batch size for the data loaders - Default = 64 - train_ratio (float): The ratio of the dataset to use for training - Default = 0.8 - val_ratio (float): The ratio of the dataset to use for validation - Default: 0.1 - return_test (bool): Whether to return a test data loader - Default = True - num_workers (int): The number of worker processes for loading the data - see Dataloader documentation for more info - Default = 0 - pin_memory (bool): Whether to pin the memory of the data loaders - Default: True - - Returns: - train_loader, val_loader and optionally test_loader - """ - total_size = len(dataset) - indices = list(range(total_size)) - random.shuffle(indices) - train_size = int(train_ratio * total_size) - val_size = int(val_ratio * total_size) - train_loader = paddle.io.DataLoader( - dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - sampler=paddle.io.SubsetRandomSampler(indices=indices[0:train_size]), - num_workers=num_workers, - pin_memory=pin_memory, - ) - val_loader = paddle.io.DataLoader( - dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - sampler=paddle.io.SubsetRandomSampler( - indices=indices[train_size : train_size + val_size] - ), - num_workers=num_workers, - pin_memory=pin_memory, - ) - if return_test: - test_loader = paddle.io.DataLoader( - dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - sampler=paddle.io.SubsetRandomSampler( - indices=indices[train_size + val_size :] - ), - num_workers=num_workers, - pin_memory=pin_memory, - ) - return train_loader, val_loader, test_loader - return train_loader, val_loader - - -def get_loader( - dataset, *, batch_size: int = 64, num_workers: int = 0, pin_memory: bool = True -) -> paddle.io.DataLoader: - """Get a dataloader from a dataset. - - Args: - dataset (Dataset): The dataset to partition. - batch_size (int): The batch size for the data loaders - Default = 64 - num_workers (int): The number of worker processes for loading the data - see Dataloader documentation for more info - Default = 0 - pin_memory (bool): Whether to pin the memory of the data loaders - Default: True - - Returns: - data_loader - """ - return paddle.io.DataLoader( - dataset=dataset, - batch_size=batch_size, - collate_fn=collate_graphs, - shuffle=True, - num_workers=num_workers, - ) +from __future__ import annotations + +import functools +import os +import random +import warnings +from typing import TYPE_CHECKING + +import numpy as np +import paddle +from chgnet import utils +from chgnet.graph import CrystalGraph +from chgnet.graph import CrystalGraphConverter +from pymatgen.core.structure import Structure + +if TYPE_CHECKING: + from collections.abc import Sequence + + from chgnet import TrainTask + from typing_extensions import Self +warnings.filterwarnings("ignore") +DTYPE = "float32" + + +class StructureData(paddle.io.Dataset): + """A simple paddle Dataset of structures.""" + + def __init__( + self, + structures: list[Structure], + energies: list[float], + forces: list[Sequence[Sequence[float]]], + *, + stresses: (list[Sequence[Sequence[float]]] | None) = None, + magmoms: (list[Sequence[Sequence[float]]] | None) = None, + structure_ids: (list | None) = None, + graph_converter: (CrystalGraphConverter | None) = None, + shuffle: bool = True, + ) -> None: + """Initialize the dataset. + + Args: + structures (list[dict]): pymatgen Structure objects. + energies (list[float]): [data_size, 1] + forces (list[list[float]]): [data_size, n_atoms, 3] + stresses (list[list[float]], optional): [data_size, 3, 3] + Default = None + magmoms (list[list[float]], optional): [data_size, n_atoms, 1] + Default = None + structure_ids (list, optional): a list of ids to track the structures + Default = None + graph_converter (CrystalGraphConverter, optional): Converts the structures + to graphs. If None, it will be set to CHGNet 0.3.0 converter + with AtomGraph cutoff = 6A. + shuffle (bool): whether to shuffle the sequence of dataset + Default = True + + Raises: + RuntimeError: if the length of structures and labels (energies, forces, + stresses, magmoms) are not equal. + """ + for idx, struct in enumerate(structures): + if not isinstance(struct, Structure): + raise TypeError(f"{idx} is not a pymatgen Structure object: {struct}") + for name in "energies forces stresses magmoms structure_ids".split(): + labels = locals()[name] + if labels is not None and len(labels) != len(structures): + raise RuntimeError( + f"Inconsistent number of structures and labels: len(structures)={len(structures)!r}, len({name})={len(labels)}" + ) + self.structures = structures + self.energies = energies + self.forces = forces + self.stresses = stresses + self.magmoms = magmoms + self.structure_ids = structure_ids + self.keys = np.arange(len(structures)) + if shuffle: + random.shuffle(self.keys) + print(f"{type(self).__name__} imported {len(structures):,} structures") + self.graph_converter = graph_converter or CrystalGraphConverter( + atom_graph_cutoff=6, bond_graph_cutoff=3 + ) + self.failed_idx: list[int] = [] + self.failed_graph_id: dict[str, str] = {} + + @classmethod + def from_vasp( + cls, + file_root: str, + *, + check_electronic_convergence: bool = True, + save_path: (str | None) = None, + graph_converter: (CrystalGraphConverter | None) = None, + shuffle: bool = True, + ) -> Self: + """Parse VASP output files into structures and labels and feed into the dataset. + + Args: + file_root (str): the directory of the VASP calculation outputs + check_electronic_convergence (bool): if set to True, this function will + raise Exception to VASP calculation that did not achieve + electronic convergence. + Default = True + save_path (str): path to save the parsed VASP labels + Default = None + graph_converter (CrystalGraphConverter, optional): Converts the structures + to graphs. If None, it will be set to CHGNet 0.3.0 converter + with AtomGraph cutoff = 6A. + shuffle (bool): whether to shuffle the sequence of dataset + Default = True + """ + result_dict = utils.parse_vasp_dir( + base_dir=file_root, + check_electronic_convergence=check_electronic_convergence, + save_path=save_path, + ) + return cls( + structures=result_dict["structure"], + energies=result_dict["energy_per_atom"], + forces=result_dict["force"], + stresses=None + if result_dict["stress"] in [None, []] + else result_dict["stress"], + magmoms=None + if result_dict["magmom"] in [None, []] + else result_dict["magmom"], + structure_ids=np.arange(len(result_dict["structure"])), + graph_converter=graph_converter, + shuffle=shuffle, + ) + + def __len__(self) -> int: + """Get the number of structures in this dataset.""" + return len(self.keys) + + @functools.cache + def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict]: + """Get one graph for a structure in this dataset. + + Args: + idx (int): Index of the structure + + Returns: + crystal_graph (CrystalGraph): graph of the crystal structure + targets (dict): list of targets. i.e. energy, force, stress + """ + if idx not in self.failed_idx: + graph_id = self.keys[idx] + try: + struct = self.structures[graph_id] + if self.structure_ids is not None: + mp_id = self.structure_ids[graph_id] + else: + mp_id = graph_id + crystal_graph = self.graph_converter( + struct, graph_id=graph_id, mp_id=mp_id + ) + targets = { + "e": paddle.to_tensor(data=self.energies[graph_id], dtype=DTYPE), + "f": paddle.to_tensor(data=self.forces[graph_id], dtype=DTYPE), + } + if self.stresses is not None: + targets["s"] = ( + paddle.to_tensor(data=self.stresses[graph_id], dtype=DTYPE) + * -0.1 + ) + if self.magmoms is not None: + mag = self.magmoms[graph_id] + if mag is None: + targets["m"] = None + else: + targets["m"] = paddle.abs( + x=paddle.to_tensor(data=mag, dtype=DTYPE) + ) + return crystal_graph, targets + except Exception: + struct = self.structures[graph_id] + self.failed_graph_id[graph_id] = struct.composition.formula + self.failed_idx.append(idx) + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + else: + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + + +class CIFData(paddle.io.Dataset): + """A dataset from CIFs.""" + + def __init__( + self, + cif_path: str, + *, + labels: (str | dict) = "labels.json", + targets: TrainTask = "efsm", + graph_converter: (CrystalGraphConverter | None) = None, + energy_key: str = "energy_per_atom", + force_key: str = "force", + stress_key: str = "stress", + magmom_key: str = "magmom", + shuffle: bool = True, + ) -> None: + """Initialize the dataset from a directory containing CIFs. + + Args: + cif_path (str): path that contain all the graphs, labels.json + labels (str, dict): the path or dictionary of labels + targets ("ef" | "efs" | "efm" | "efsm"): The training targets. + Default = "efsm" + graph_converter (CrystalGraphConverter, optional): Converts the structures + to graphs. If None, it will be set to CHGNet 0.3.0 converter + with AtomGraph cutoff = 6A. + energy_key (str, optional): the key of energy in the labels. + Default = "energy_per_atom" + force_key (str, optional): the key of force in the labels. + Default = "force" + stress_key (str, optional): the key of stress in the labels. + Default = "stress" + magmom_key (str, optional): the key of magmom in the labels. + Default = "magmom" + shuffle (bool): whether to shuffle the sequence of dataset + Default = True + """ + self.data_dir = cif_path + self.data = utils.read_json(os.path.join(cif_path, labels)) + self.cif_ids = list(self.data) + if shuffle: + random.shuffle(self.cif_ids) + print(f"{cif_path}: {len(self.cif_ids):,} structures imported") + self.graph_converter = graph_converter or CrystalGraphConverter( + atom_graph_cutoff=6, bond_graph_cutoff=3 + ) + self.energy_key = energy_key + self.force_key = force_key + self.stress_key = stress_key + self.magmom_key = magmom_key + self.targets = targets + self.failed_idx: list[int] = [] + self.failed_graph_id: dict[str, str] = {} + + def __len__(self) -> int: + """Get the number of structures in this dataset.""" + return len(self.cif_ids) + + @functools.cache + def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: + """Get one item in the dataset. + + Returns: + tuple[CrystalGraph, dict[str, Tensor]]: graph of the crystal structure + and dict of targets i.e. energy, force, stress + """ + if idx not in self.failed_idx: + try: + graph_id = self.cif_ids[idx] + mp_id = self.data[graph_id].get("material_id", graph_id) + structure = Structure.from_file( + os.path.join(self.data_dir, f"{graph_id}.cif") + ) + crystal_graph = self.graph_converter( + structure, graph_id=graph_id, mp_id=mp_id + ) + targets = {} + for key in self.targets: + if key == "e": + energy = self.data[graph_id][self.energy_key] + targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) + elif key == "f": + force = self.data[graph_id][self.force_key] + targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) + elif key == "s": + stress = self.data[graph_id][self.stress_key] + targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 + elif key == "m": + mag = self.data[graph_id][self.magmom_key] + targets["m"] = paddle.abs( + x=paddle.to_tensor(data=mag, dtype=DTYPE) + ) + return crystal_graph, targets + except Exception: + try: + graph_id = self.cif_ids[idx] + except IndexError: + print(idx, len(self.cif_ids)) + structure = Structure.from_file( + os.path.join(self.data_dir, f"{graph_id}.cif") + ) + self.failed_graph_id[graph_id] = structure.composition.formula + self.failed_idx.append(idx) + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + else: + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + + +class GraphData(paddle.io.Dataset): + """A dataset of graphs. This is compatible with the graph.pt documents made by + make_graphs.py. We recommend you to use the dataset to avoid graph conversion steps. + """ + + def __init__( + self, + graph_path: str, + *, + labels: (str | dict) = "labels.json", + targets: TrainTask = "efsm", + exclude: (str | list | None) = None, + energy_key: str = "energy_per_atom", + force_key: str = "force", + stress_key: str = "stress", + magmom_key: str = "magmom", + shuffle: bool = True, + ) -> None: + """Initialize the dataset from a directory containing saved crystal graphs. + + Args: + graph_path (str): path that contain all the graphs, labels.json + labels (str, dict): the path or dictionary of labels. + Default = "labels.json" + targets ("ef" | "efs" | "efm" | "efsm"): The training targets. + Default = "efsm" + exclude (str, list | None): the path or list of excluded graphs. + Default = None + energy_key (str, optional): the key of energy in the labels. + Default = "energy_per_atom" + force_key (str, optional): the key of force in the labels. + Default = "force" + stress_key (str, optional): the key of stress in the labels. + Default = "stress" + magmom_key (str, optional): the key of magmom in the labels. + Default = "magmom" + shuffle (bool): whether to shuffle the sequence of dataset + Default = True + """ + self.graph_path = graph_path + if isinstance(labels, str): + labels = os.path.join(graph_path, labels) + print(f"Importing: {labels}") + self.labels = utils.read_json(labels) + elif isinstance(labels, dict): + self.labels = labels + if isinstance(exclude, str): + self.excluded_graph = utils.read_json(exclude) + elif isinstance(exclude, list): + self.excluded_graph = exclude + else: + self.excluded_graph = [] + self.keys = [ + (mp_id, graph_id) for mp_id, dic in self.labels.items() for graph_id in dic + ] + if shuffle: + random.shuffle(self.keys) + print(f"{len(self.labels)} mp_ids, {len(self)} frames imported") + if self.excluded_graph is not None: + print(f"{len(self.excluded_graph)} graphs are pre-excluded") + self.energy_key = energy_key + self.force_key = force_key + self.stress_key = stress_key + self.magmom_key = magmom_key + self.targets = targets + self.failed_idx: list[int] = [] + self.failed_graph_id: list[str] = [] + + def __len__(self) -> int: + """Get the number of graphs in this dataset.""" + return len(self.keys) + + def __getitem__(self, idx) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: + """Get one item in the dataset. + + Returns: + crystal_graph (CrystalGraph): graph of the crystal structure + targets (dict): dictionary of targets. i.e. energy, force, stress, magmom + """ + if idx not in self.failed_idx: + mp_id, graph_id = self.keys[idx] + if [mp_id, graph_id] in self.excluded_graph: + self.failed_graph_id.append(graph_id) + self.failed_idx.append(idx) + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + try: + graph_path = os.path.join(self.graph_path, f"{graph_id}.pt") + crystal_graph = CrystalGraph.from_file(graph_path) + targets = {} + for key in self.targets: + if key == "e": + energy = self.labels[mp_id][graph_id][self.energy_key] + targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) + elif key == "f": + force = self.labels[mp_id][graph_id][self.force_key] + targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) + elif key == "s": + stress = self.labels[mp_id][graph_id][self.stress_key] + targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 + elif key == "m": + mag = self.labels[mp_id][graph_id][self.magmom_key] + if mag is None: + targets["m"] = None + else: + targets["m"] = paddle.abs( + x=paddle.to_tensor(data=mag, dtype=DTYPE) + ) + return crystal_graph, targets + except Exception: + self.failed_graph_id.append(graph_id) + self.failed_idx.append(idx) + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + else: + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + + def get_train_val_test_loader( + self, + train_ratio: float = 0.8, + val_ratio: float = 0.1, + *, + train_key: (list[str] | None) = None, + val_key: (list[str] | None) = None, + test_key: (list[str] | None) = None, + batch_size=32, + num_workers=0, + pin_memory=True, + ) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: + """Partition the GraphData using materials id, + randomly select the train_keys, val_keys, test_keys by train val test ratio, + or use pre-defined train_keys, val_keys, and test_keys to create train, val, + test loaders. + + Args: + train_ratio (float): The ratio of the dataset to use for training + Default = 0.8 + val_ratio (float): The ratio of the dataset to use for validation + Default: 0.1 + train_key (List(str), optional): a list of mp_ids for train set + val_key (List(str), optional): a list of mp_ids for val set + test_key (List(str), optional): a list of mp_ids for test set + batch_size (int): batch size + Default = 32 + num_workers (int): The number of worker processes for loading the data + see Dataloader documentation for more info + Default = 0 + pin_memory (bool): Whether to pin the memory of the data loaders + Default: True + + Returns: + train_loader, val_loader, test_loader + """ + train_labels, val_labels, test_labels = {}, {}, {} + if train_key is None: + mp_ids = list(self.labels) + random.shuffle(mp_ids) + n_train = int(train_ratio * len(mp_ids)) + n_val = int(val_ratio * len(mp_ids)) + train_key = mp_ids[:n_train] + val_key = mp_ids[n_train : n_train + n_val] + test_key = mp_ids[n_train + n_val :] + for mp_id in train_key: + if mp_id in self.labels: + train_labels[mp_id] = self.labels.pop(mp_id) + train_dataset = GraphData( + graph_path=self.graph_path, + labels=train_labels, + targets=self.targets, + exclude=self.excluded_graph, + energy_key=self.energy_key, + ) + train_loader = paddle.io.DataLoader( + dataset=train_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + shuffle=True, + num_workers=num_workers, + ) + for mp_id in val_key: + if mp_id in self.labels: + val_labels[mp_id] = self.labels.pop(mp_id) + val_dataset = GraphData( + graph_path=self.graph_path, + labels=val_labels, + targets=self.targets, + exclude=self.excluded_graph, + energy_key=self.energy_key, + ) + val_loader = paddle.io.DataLoader( + dataset=val_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + shuffle=True, + num_workers=num_workers, + ) + if test_key is not None: + for mp_id in test_key: + if mp_id in self.labels: + test_labels[mp_id] = self.labels.pop(mp_id) + test_dataset = GraphData( + graph_path=self.graph_path, + labels=test_labels, + targets=self.targets, + exclude=self.excluded_graph, + energy_key=self.energy_key, + ) + test_loader = paddle.io.DataLoader( + dataset=test_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + num_workers=num_workers, + ) + else: + test_loader = None + return train_loader, val_loader, test_loader + + +class StructureJsonData(paddle.io.Dataset): + """Read structure and targets from a JSON file. + This class is used to load the MPtrj dataset. + """ + + def __init__( + self, + data: (str | dict), + graph_converter: CrystalGraphConverter, + *, + targets: TrainTask = "efsm", + energy_key: str = "energy_per_atom", + force_key: str = "force", + stress_key: str = "stress", + magmom_key: str = "magmom", + shuffle: bool = True, + ) -> None: + """Initialize the dataset by reading JSON files. + + Args: + data (str | dict): file path or dir name that contain all the JSONs + graph_converter (CrystalGraphConverter): Converts pymatgen.core.Structure + to CrystalGraph object. + targets ("ef" | "efs" | "efm" | "efsm"): The training targets. + Default = "efsm" + energy_key (str, optional): the key of energy in the labels. + Default = "energy_per_atom" + force_key (str, optional): the key of force in the labels. + Default = "force" + stress_key (str, optional): the key of stress in the labels. + Default = "stress" + magmom_key (str, optional): the key of magmom in the labels. + Default = "magmom" + shuffle (bool): whether to shuffle the sequence of dataset + Default = True + """ + if isinstance(data, str): + self.data = {} + if os.path.isdir(data): + for json_path in os.listdir(data): + if json_path.endswith(".json"): + print(f"Importing: {json_path}") + self.data.update(utils.read_json(os.path.join(data, json_path))) + else: + print(f"Importing: {data}") + self.data.update(utils.read_json(data)) + elif isinstance(data, dict): + self.data = data + else: + raise TypeError(f"data must be JSON path or dictionary, got {type(data)}") + self.keys = [ + (mp_id, graph_id) for mp_id, dct in self.data.items() for graph_id in dct + ] + if shuffle: + random.shuffle(self.keys) + print(f"{len(self.data)} MP IDs, {len(self)} structures imported") + self.graph_converter = graph_converter + self.energy_key = energy_key + self.force_key = force_key + self.stress_key = stress_key + self.magmom_key = magmom_key + self.targets = targets + self.failed_idx: list[int] = [] + self.failed_graph_id: dict[str, str] = {} + + def __len__(self) -> int: + """Get the number of structures with targets in the dataset.""" + return len(self.keys) + + @functools.cache + def __getitem__(self, idx: int) -> tuple[CrystalGraph, dict[str, paddle.Tensor]]: + """Get one item in the dataset. + + Returns: + crystal_graph (CrystalGraph): graph of the crystal structure + targets (dict): dictionary of targets. i.e. energy, force, stress, magmom + """ + if idx not in self.failed_idx: + mp_id, graph_id = self.keys[idx] + try: + struct = Structure.from_dict(self.data[mp_id][graph_id]["structure"]) + crystal_graph = self.graph_converter( + struct, graph_id=graph_id, mp_id=mp_id + ) + targets = {} + for key in self.targets: + if key == "e": + energy = self.data[mp_id][graph_id][self.energy_key] + targets["e"] = paddle.to_tensor(data=energy, dtype=DTYPE) + elif key == "f": + force = self.data[mp_id][graph_id][self.force_key] + targets["f"] = paddle.to_tensor(data=force, dtype=DTYPE) + elif key == "s": + stress = self.data[mp_id][graph_id][self.stress_key] + targets["s"] = paddle.to_tensor(data=stress, dtype=DTYPE) * -0.1 + elif key == "m": + mag = self.data[mp_id][graph_id][self.magmom_key] + if mag is None: + targets["m"] = None + else: + targets["m"] = paddle.abs( + x=paddle.to_tensor(data=mag, dtype=DTYPE) + ) + return crystal_graph, targets + except Exception: + structure = Structure.from_dict(self.data[mp_id][graph_id]["structure"]) + self.failed_graph_id[graph_id] = structure.composition.formula + self.failed_idx.append(idx) + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + else: + idx = random.randint(0, len(self) - 1) + return self.__getitem__(idx) + + def get_train_val_test_loader( + self, + train_ratio: float = 0.8, + val_ratio: float = 0.1, + *, + train_key: (list[str] | None) = None, + val_key: (list[str] | None) = None, + test_key: (list[str] | None) = None, + batch_size=32, + num_workers=0, + pin_memory=True, + ) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: + """Partition the Dataset using materials id, + randomly select the train_keys, val_keys, test_keys by train val test ratio, + or use pre-defined train_keys, val_keys, and test_keys to create train, val, + test loaders. + + Args: + train_ratio (float): The ratio of the dataset to use for training + Default = 0.8 + val_ratio (float): The ratio of the dataset to use for validation + Default: 0.1 + train_key (List(str), optional): a list of mp_ids for train set + val_key (List(str), optional): a list of mp_ids for val set + test_key (List(str), optional): a list of mp_ids for test set + batch_size (int): batch size + Default = 32 + num_workers (int): The number of worker processes for loading the data + see Dataloader documentation for more info + Default = 0 + pin_memory (bool): Whether to pin the memory of the data loaders + Default: True + + Returns: + train_loader, val_loader, test_loader + """ + train_data, val_data, test_data = {}, {}, {} + if train_key is None: + mp_ids = list(self.data) + random.shuffle(mp_ids) + n_train = int(train_ratio * len(mp_ids)) + n_val = int(val_ratio * len(mp_ids)) + train_key = mp_ids[:n_train] + val_key = mp_ids[n_train : n_train + n_val] + test_key = mp_ids[n_train + n_val :] + for mp_id in train_key: + train_data[mp_id] = self.data.pop(mp_id) + train_dataset = StructureJsonData( + data=train_data, + graph_converter=self.graph_converter, + targets=self.targets, + energy_key=self.energy_key, + ) + train_loader = paddle.io.DataLoader( + dataset=train_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + shuffle=True, + num_workers=num_workers, + ) + for mp_id in val_key: + val_data[mp_id] = self.data.pop(mp_id) + val_dataset = StructureJsonData( + data=val_data, + graph_converter=self.graph_converter, + targets=self.targets, + energy_key=self.energy_key, + ) + val_loader = paddle.io.DataLoader( + dataset=val_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + shuffle=True, + num_workers=num_workers, + ) + if test_key is not None: + for mp_id in test_key: + test_data[mp_id] = self.data.pop(mp_id) + test_dataset = StructureJsonData( + data=test_data, + graph_converter=self.graph_converter, + targets=self.targets, + energy_key=self.energy_key, + ) + test_loader = paddle.io.DataLoader( + dataset=test_dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + num_workers=num_workers, + ) + else: + test_loader = None + return train_loader, val_loader, test_loader + + +def collate_graphs( + batch_data: list, +) -> tuple[list[CrystalGraph], dict[str, paddle.Tensor]]: + """Collate of list of (graph, target) into batch data. + + Args: + batch_data (list): list of (graph, target(dict)) + + Returns: + graphs (List): a list of graphs + targets (Dict): dictionary of targets, where key and values are: + e (Tensor): energies of the structures [batch_size] + f (Tensor): forces of the structures [n_batch_atoms, 3] + s (Tensor): stresses of the structures [3*batch_size, 3] + m (Tensor): magmom of the structures [n_batch_atoms] + """ + graphs = [graph for graph, _ in batch_data] + all_targets = {key: [] for key in batch_data[0][1]} + all_targets["e"] = paddle.to_tensor( + data=[targets["e"] for _, targets in batch_data], dtype=DTYPE + ) + for _, targets in batch_data: + for target, value in targets.items(): + if target != "e": + all_targets[target].append(value) + return graphs, all_targets + + +def get_train_val_test_loader( + dataset: paddle.io.Dataset, + *, + batch_size: int = 64, + train_ratio: float = 0.8, + val_ratio: float = 0.1, + return_test: bool = True, + num_workers: int = 0, + pin_memory: bool = True, +) -> tuple[paddle.io.DataLoader, paddle.io.DataLoader, paddle.io.DataLoader]: + """Randomly partition a dataset into train, val, test loaders. + + Args: + dataset (Dataset): The dataset to partition. + batch_size (int): The batch size for the data loaders + Default = 64 + train_ratio (float): The ratio of the dataset to use for training + Default = 0.8 + val_ratio (float): The ratio of the dataset to use for validation + Default: 0.1 + return_test (bool): Whether to return a test data loader + Default = True + num_workers (int): The number of worker processes for loading the data + see Dataloader documentation for more info + Default = 0 + pin_memory (bool): Whether to pin the memory of the data loaders + Default: True + + Returns: + train_loader, val_loader and optionally test_loader + """ + total_size = len(dataset) + indices = list(range(total_size)) + random.shuffle(indices) + train_size = int(train_ratio * total_size) + val_size = int(val_ratio * total_size) + train_loader = paddle.io.DataLoader( + dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + sampler=paddle.io.SubsetRandomSampler(indices=indices[0:train_size]), + num_workers=num_workers, + pin_memory=pin_memory, + ) + val_loader = paddle.io.DataLoader( + dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + sampler=paddle.io.SubsetRandomSampler( + indices=indices[train_size : train_size + val_size] + ), + num_workers=num_workers, + pin_memory=pin_memory, + ) + if return_test: + test_loader = paddle.io.DataLoader( + dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + sampler=paddle.io.SubsetRandomSampler( + indices=indices[train_size + val_size :] + ), + num_workers=num_workers, + pin_memory=pin_memory, + ) + return train_loader, val_loader, test_loader + return train_loader, val_loader + + +def get_loader( + dataset, *, batch_size: int = 64, num_workers: int = 0, pin_memory: bool = True +) -> paddle.io.DataLoader: + """Get a dataloader from a dataset. + + Args: + dataset (Dataset): The dataset to partition. + batch_size (int): The batch size for the data loaders + Default = 64 + num_workers (int): The number of worker processes for loading the data + see Dataloader documentation for more info + Default = 0 + pin_memory (bool): Whether to pin the memory of the data loaders + Default: True + + Returns: + data_loader + """ + return paddle.io.DataLoader( + dataset=dataset, + batch_size=batch_size, + collate_fn=collate_graphs, + shuffle=True, + num_workers=num_workers, + ) diff --git a/jointContribution/CHGNet/chgnet/graph/__init__.py b/jointContribution/CHGNet/chgnet/graph/__init__.py index 951be595c6..909ac80f8d 100644 --- a/jointContribution/CHGNet/chgnet/graph/__init__.py +++ b/jointContribution/CHGNet/chgnet/graph/__init__.py @@ -1,4 +1,4 @@ -from __future__ import annotations - -from chgnet.graph.converter import CrystalGraphConverter # noqa -from chgnet.graph.crystalgraph import CrystalGraph # noqa +from __future__ import annotations + +from chgnet.graph.converter import CrystalGraphConverter # noqa +from chgnet.graph.crystalgraph import CrystalGraph # noqa diff --git a/jointContribution/CHGNet/chgnet/graph/converter.py b/jointContribution/CHGNet/chgnet/graph/converter.py index 0beea60b38..654720f0d0 100644 --- a/jointContribution/CHGNet/chgnet/graph/converter.py +++ b/jointContribution/CHGNet/chgnet/graph/converter.py @@ -1,264 +1,264 @@ -from __future__ import annotations - -import gc -import sys -import warnings -from typing import TYPE_CHECKING - -import numpy as np -import paddle -from chgnet.graph.crystalgraph import CrystalGraph -from chgnet.graph.graph import Graph -from chgnet.graph.graph import Node - -if TYPE_CHECKING: - from typing import Literal - - from pymatgen.core import Structure - from typing_extensions import Self -# try: -# from chgnet.graph.cygraph import make_graph -# except (ImportError, AttributeError): -# make_graph = None -make_graph = None -DTYPE = "float32" - - -class CrystalGraphConverter(paddle.nn.Layer): - """Convert a pymatgen.core.Structure to a CrystalGraph - The CrystalGraph dataclass stores essential field to make sure that - gradients like force and stress can be calculated through back-propagation later. - """ - - make_graph = None - - def __init__( - self, - *, - atom_graph_cutoff: float = 6, - bond_graph_cutoff: float = 3, - algorithm: Literal["legacy", "fast"] = "fast", - on_isolated_atoms: Literal["ignore", "warn", "error"] = "error", - verbose: bool = False, - ) -> None: - """Initialize the Crystal Graph Converter. - - Args: - atom_graph_cutoff (float): cutoff radius to search for neighboring atom in - atom_graph. Default = 5. - bond_graph_cutoff (float): bond length threshold to include bond in - bond_graph. Default = 3. - algorithm ('legacy' | 'fast'): algorithm to use for converting graphs. - 'legacy': python implementation of graph creation - 'fast': C implementation of graph creation, this is faster, - but will need the cygraph.c file correctly compiled from pip install - Default = 'fast' - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'error' - verbose (bool): whether to print the CrystalGraphConverter - initialization message. Default = False. - """ - super().__init__() - self.atom_graph_cutoff = atom_graph_cutoff - self.bond_graph_cutoff = ( - atom_graph_cutoff if bond_graph_cutoff is None else bond_graph_cutoff - ) - self.on_isolated_atoms = on_isolated_atoms - self.create_graph = self._create_graph_legacy - self.algorithm = "legacy" - if algorithm == "fast": - if make_graph is not None: - self.create_graph = self._create_graph_fast - self.algorithm = "fast" - else: - warnings.warn( - "`fast` algorithm is not available, using `legacy`", - UserWarning, - stacklevel=1, - ) - elif algorithm != "legacy": - warnings.warn( - f"Unknown algorithm={algorithm!r}, using `legacy`", - UserWarning, - stacklevel=1, - ) - if verbose: - print(self) - - def __repr__(self) -> str: - """String representation of the CrystalGraphConverter.""" - atom_graph_cutoff = self.atom_graph_cutoff - bond_graph_cutoff = self.bond_graph_cutoff - algorithm = self.algorithm - cls_name = type(self).__name__ - return f"{cls_name}(algorithm={algorithm!r}, atom_graph_cutoff={atom_graph_cutoff!r}, bond_graph_cutoff={bond_graph_cutoff!r})" - - def forward(self, structure: Structure, graph_id=None, mp_id=None) -> CrystalGraph: - """Convert a structure, return a CrystalGraph. - - Args: - structure (pymatgen.core.Structure): structure to convert - graph_id (str): an id to keep track of this crystal graph - Default = None - mp_id (str): Materials Project id of this structure - Default = None - - Return: - CrystalGraph that is ready to use by CHGNet - """ - n_atoms = len(structure) - data = [site.specie.Z for site in structure] - atomic_number = paddle.to_tensor(data, dtype="int32", stop_gradient=not False) - atom_frac_coord = paddle.to_tensor( - data=structure.frac_coords, dtype=DTYPE, stop_gradient=not True - ) - lattice = paddle.to_tensor( - data=structure.lattice.matrix, dtype=DTYPE, stop_gradient=not True - ) - center_index, neighbor_index, image, distance = structure.get_neighbor_list( - r=self.atom_graph_cutoff, sites=structure.sites, numerical_tol=1e-08 - ) - graph = self.create_graph( - n_atoms, center_index, neighbor_index, image, distance - ) - atom_graph, directed2undirected = graph.adjacency_list() - atom_graph = paddle.to_tensor(data=atom_graph, dtype="int32") - directed2undirected = paddle.to_tensor(data=directed2undirected, dtype="int32") - try: - bond_graph, undirected2directed = graph.line_graph_adjacency_list( - cutoff=self.bond_graph_cutoff - ) - except Exception as exc: - structure.to(filename="bond_graph_error.cif") - raise RuntimeError( - f"Failed creating bond graph for {graph_id}, check bond_graph_error.cif" - ) from exc - bond_graph = paddle.to_tensor(data=bond_graph, dtype="int32") - undirected2directed = paddle.to_tensor(data=undirected2directed, dtype="int32") - n_isolated_atoms = len({*range(n_atoms)} - {*center_index}) - if n_isolated_atoms: - atom_graph_cutoff = self.atom_graph_cutoff - msg = f"Structure graph_id={graph_id!r} has {n_isolated_atoms} isolated atom(s) with atom_graph_cutoff={atom_graph_cutoff!r}. CHGNet calculation will likely go wrong" - if self.on_isolated_atoms == "error": - raise ValueError(msg) - elif self.on_isolated_atoms == "warn": - print(msg, file=sys.stderr) - return CrystalGraph( - atomic_number=atomic_number, - atom_frac_coord=atom_frac_coord, - atom_graph=atom_graph, - neighbor_image=paddle.to_tensor(data=image, dtype=DTYPE), - directed2undirected=directed2undirected, - undirected2directed=undirected2directed, - bond_graph=bond_graph, - lattice=lattice, - graph_id=graph_id, - mp_id=mp_id, - composition=structure.composition.formula, - atom_graph_cutoff=self.atom_graph_cutoff, - bond_graph_cutoff=self.bond_graph_cutoff, - ) - - @staticmethod - def _create_graph_legacy( - n_atoms: int, - center_index: np.ndarray, - neighbor_index: np.ndarray, - image: np.ndarray, - distance: np.ndarray, - ) -> Graph: - """Given structure information, create a Graph structure to be used to - create Crystal_Graph using pure python implementation. - - Args: - n_atoms (int): the number of atoms in the structure - center_index (np.ndarray): np array of indices of center atoms. - [num_undirected_bonds] - neighbor_index (np.ndarray): np array of indices of neighbor atoms. - [num_undirected_bonds] - image (np.ndarray): np array of images for each edge. - [num_undirected_bonds, 3] - distance (np.ndarray): np array of distances. - [num_undirected_bonds] - - Return: - Graph data structure used to create Crystal_Graph object - """ - graph = Graph([Node(index=idx) for idx in range(n_atoms)]) - for ii, jj, img, dist in zip( - center_index, neighbor_index, image, distance, strict=True - ): - graph.add_edge(center_index=ii, neighbor_index=jj, image=img, distance=dist) - return graph - - @staticmethod - def _create_graph_fast( - n_atoms: int, - center_index: np.ndarray, - neighbor_index: np.ndarray, - image: np.ndarray, - distance: np.ndarray, - ) -> Graph: - """Given structure information, create a Graph structure to be used to - create Crystal_Graph using C implementation. - - NOTE: this is the fast version of _create_graph_legacy optimized - in c (~3x speedup). - - Args: - n_atoms (int): the number of atoms in the structure - center_index (np.ndarray): np array of indices of center atoms. - [num_undirected_bonds] - neighbor_index (np.ndarray): np array of indices of neighbor atoms. - [num_undirected_bonds] - image (np.ndarray): np array of images for each edge. - [num_undirected_bonds, 3] - distance (np.ndarray): np array of distances. - [num_undirected_bonds] - - Return: - Graph data structure used to create Crystal_Graph object - """ - center_index = np.ascontiguousarray(center_index) - neighbor_index = np.ascontiguousarray(neighbor_index) - image = np.ascontiguousarray(image, dtype=np.int64) - distance = np.ascontiguousarray(distance) - gc_saved = gc.get_threshold() - gc.set_threshold(0) - nodes, dir_edges_list, undir_edges_list, undirected_edges = make_graph( - center_index, len(center_index), neighbor_index, image, distance, n_atoms - ) - graph = Graph(nodes=nodes) - graph.directed_edges_list = dir_edges_list - graph.undirected_edges_list = undir_edges_list - graph.undirected_edges = undirected_edges - gc.set_threshold(gc_saved[0]) - return graph - - def set_isolated_atom_response( - self, on_isolated_atoms: Literal["ignore", "warn", "error"] - ) -> None: - """Set the graph converter's response to isolated atom graph - Args: - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'error'. - - Returns: - None - """ - self.on_isolated_atoms = on_isolated_atoms - - def as_dict(self) -> dict[str, str | float]: - """Save the args of the graph converter.""" - return { - "atom_graph_cutoff": self.atom_graph_cutoff, - "bond_graph_cutoff": self.bond_graph_cutoff, - "algorithm": self.algorithm, - } - - @classmethod - def from_dict(cls, dct: dict) -> Self: - """Create converter from dictionary.""" - return cls(**dct) +from __future__ import annotations + +import gc +import sys +import warnings +from typing import TYPE_CHECKING + +import numpy as np +import paddle +from chgnet.graph.crystalgraph import CrystalGraph +from chgnet.graph.graph import Graph +from chgnet.graph.graph import Node + +if TYPE_CHECKING: + from typing import Literal + + from pymatgen.core import Structure + from typing_extensions import Self +# try: +# from chgnet.graph.cygraph import make_graph +# except (ImportError, AttributeError): +# make_graph = None +make_graph = None +DTYPE = "float32" + + +class CrystalGraphConverter(paddle.nn.Layer): + """Convert a pymatgen.core.Structure to a CrystalGraph + The CrystalGraph dataclass stores essential field to make sure that + gradients like force and stress can be calculated through back-propagation later. + """ + + make_graph = None + + def __init__( + self, + *, + atom_graph_cutoff: float = 6, + bond_graph_cutoff: float = 3, + algorithm: Literal["legacy", "fast"] = "fast", + on_isolated_atoms: Literal["ignore", "warn", "error"] = "error", + verbose: bool = False, + ) -> None: + """Initialize the Crystal Graph Converter. + + Args: + atom_graph_cutoff (float): cutoff radius to search for neighboring atom in + atom_graph. Default = 5. + bond_graph_cutoff (float): bond length threshold to include bond in + bond_graph. Default = 3. + algorithm ('legacy' | 'fast'): algorithm to use for converting graphs. + 'legacy': python implementation of graph creation + 'fast': C implementation of graph creation, this is faster, + but will need the cygraph.c file correctly compiled from pip install + Default = 'fast' + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'error' + verbose (bool): whether to print the CrystalGraphConverter + initialization message. Default = False. + """ + super().__init__() + self.atom_graph_cutoff = atom_graph_cutoff + self.bond_graph_cutoff = ( + atom_graph_cutoff if bond_graph_cutoff is None else bond_graph_cutoff + ) + self.on_isolated_atoms = on_isolated_atoms + self.create_graph = self._create_graph_legacy + self.algorithm = "legacy" + if algorithm == "fast": + if make_graph is not None: + self.create_graph = self._create_graph_fast + self.algorithm = "fast" + else: + warnings.warn( + "`fast` algorithm is not available, using `legacy`", + UserWarning, + stacklevel=1, + ) + elif algorithm != "legacy": + warnings.warn( + f"Unknown algorithm={algorithm!r}, using `legacy`", + UserWarning, + stacklevel=1, + ) + if verbose: + print(self) + + def __repr__(self) -> str: + """String representation of the CrystalGraphConverter.""" + atom_graph_cutoff = self.atom_graph_cutoff + bond_graph_cutoff = self.bond_graph_cutoff + algorithm = self.algorithm + cls_name = type(self).__name__ + return f"{cls_name}(algorithm={algorithm!r}, atom_graph_cutoff={atom_graph_cutoff!r}, bond_graph_cutoff={bond_graph_cutoff!r})" + + def forward(self, structure: Structure, graph_id=None, mp_id=None) -> CrystalGraph: + """Convert a structure, return a CrystalGraph. + + Args: + structure (pymatgen.core.Structure): structure to convert + graph_id (str): an id to keep track of this crystal graph + Default = None + mp_id (str): Materials Project id of this structure + Default = None + + Return: + CrystalGraph that is ready to use by CHGNet + """ + n_atoms = len(structure) + data = [site.specie.Z for site in structure] + atomic_number = paddle.to_tensor(data, dtype="int32", stop_gradient=not False) + atom_frac_coord = paddle.to_tensor( + data=structure.frac_coords, dtype=DTYPE, stop_gradient=not True + ) + lattice = paddle.to_tensor( + data=structure.lattice.matrix, dtype=DTYPE, stop_gradient=not True + ) + center_index, neighbor_index, image, distance = structure.get_neighbor_list( + r=self.atom_graph_cutoff, sites=structure.sites, numerical_tol=1e-08 + ) + graph = self.create_graph( + n_atoms, center_index, neighbor_index, image, distance + ) + atom_graph, directed2undirected = graph.adjacency_list() + atom_graph = paddle.to_tensor(data=atom_graph, dtype="int32") + directed2undirected = paddle.to_tensor(data=directed2undirected, dtype="int32") + try: + bond_graph, undirected2directed = graph.line_graph_adjacency_list( + cutoff=self.bond_graph_cutoff + ) + except Exception as exc: + structure.to(filename="bond_graph_error.cif") + raise RuntimeError( + f"Failed creating bond graph for {graph_id}, check bond_graph_error.cif" + ) from exc + bond_graph = paddle.to_tensor(data=bond_graph, dtype="int32") + undirected2directed = paddle.to_tensor(data=undirected2directed, dtype="int32") + n_isolated_atoms = len({*range(n_atoms)} - {*center_index}) + if n_isolated_atoms: + atom_graph_cutoff = self.atom_graph_cutoff + msg = f"Structure graph_id={graph_id!r} has {n_isolated_atoms} isolated atom(s) with atom_graph_cutoff={atom_graph_cutoff!r}. CHGNet calculation will likely go wrong" + if self.on_isolated_atoms == "error": + raise ValueError(msg) + elif self.on_isolated_atoms == "warn": + print(msg, file=sys.stderr) + return CrystalGraph( + atomic_number=atomic_number, + atom_frac_coord=atom_frac_coord, + atom_graph=atom_graph, + neighbor_image=paddle.to_tensor(data=image, dtype=DTYPE), + directed2undirected=directed2undirected, + undirected2directed=undirected2directed, + bond_graph=bond_graph, + lattice=lattice, + graph_id=graph_id, + mp_id=mp_id, + composition=structure.composition.formula, + atom_graph_cutoff=self.atom_graph_cutoff, + bond_graph_cutoff=self.bond_graph_cutoff, + ) + + @staticmethod + def _create_graph_legacy( + n_atoms: int, + center_index: np.ndarray, + neighbor_index: np.ndarray, + image: np.ndarray, + distance: np.ndarray, + ) -> Graph: + """Given structure information, create a Graph structure to be used to + create Crystal_Graph using pure python implementation. + + Args: + n_atoms (int): the number of atoms in the structure + center_index (np.ndarray): np array of indices of center atoms. + [num_undirected_bonds] + neighbor_index (np.ndarray): np array of indices of neighbor atoms. + [num_undirected_bonds] + image (np.ndarray): np array of images for each edge. + [num_undirected_bonds, 3] + distance (np.ndarray): np array of distances. + [num_undirected_bonds] + + Return: + Graph data structure used to create Crystal_Graph object + """ + graph = Graph([Node(index=idx) for idx in range(n_atoms)]) + for ii, jj, img, dist in zip( + center_index, neighbor_index, image, distance, strict=True + ): + graph.add_edge(center_index=ii, neighbor_index=jj, image=img, distance=dist) + return graph + + @staticmethod + def _create_graph_fast( + n_atoms: int, + center_index: np.ndarray, + neighbor_index: np.ndarray, + image: np.ndarray, + distance: np.ndarray, + ) -> Graph: + """Given structure information, create a Graph structure to be used to + create Crystal_Graph using C implementation. + + NOTE: this is the fast version of _create_graph_legacy optimized + in c (~3x speedup). + + Args: + n_atoms (int): the number of atoms in the structure + center_index (np.ndarray): np array of indices of center atoms. + [num_undirected_bonds] + neighbor_index (np.ndarray): np array of indices of neighbor atoms. + [num_undirected_bonds] + image (np.ndarray): np array of images for each edge. + [num_undirected_bonds, 3] + distance (np.ndarray): np array of distances. + [num_undirected_bonds] + + Return: + Graph data structure used to create Crystal_Graph object + """ + center_index = np.ascontiguousarray(center_index) + neighbor_index = np.ascontiguousarray(neighbor_index) + image = np.ascontiguousarray(image, dtype=np.int64) + distance = np.ascontiguousarray(distance) + gc_saved = gc.get_threshold() + gc.set_threshold(0) + nodes, dir_edges_list, undir_edges_list, undirected_edges = make_graph( + center_index, len(center_index), neighbor_index, image, distance, n_atoms + ) + graph = Graph(nodes=nodes) + graph.directed_edges_list = dir_edges_list + graph.undirected_edges_list = undir_edges_list + graph.undirected_edges = undirected_edges + gc.set_threshold(gc_saved[0]) + return graph + + def set_isolated_atom_response( + self, on_isolated_atoms: Literal["ignore", "warn", "error"] + ) -> None: + """Set the graph converter's response to isolated atom graph + Args: + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'error'. + + Returns: + None + """ + self.on_isolated_atoms = on_isolated_atoms + + def as_dict(self) -> dict[str, str | float]: + """Save the args of the graph converter.""" + return { + "atom_graph_cutoff": self.atom_graph_cutoff, + "bond_graph_cutoff": self.bond_graph_cutoff, + "algorithm": self.algorithm, + } + + @classmethod + def from_dict(cls, dct: dict) -> Self: + """Create converter from dictionary.""" + return cls(**dct) diff --git a/jointContribution/CHGNet/chgnet/graph/crystalgraph.py b/jointContribution/CHGNet/chgnet/graph/crystalgraph.py index acff053972..24f6b424dc 100644 --- a/jointContribution/CHGNet/chgnet/graph/crystalgraph.py +++ b/jointContribution/CHGNet/chgnet/graph/crystalgraph.py @@ -1,193 +1,193 @@ -from __future__ import annotations - -import os -from typing import TYPE_CHECKING -from typing import Any - -import paddle - -if TYPE_CHECKING: - from typing_extensions import Self -DTYPE = "float32" - - -class CrystalGraph: - """A data class for crystal graph.""" - - def __init__( - self, - atomic_number: paddle.Tensor, - atom_frac_coord: paddle.Tensor, - atom_graph: paddle.Tensor, - atom_graph_cutoff: float, - neighbor_image: paddle.Tensor, - directed2undirected: paddle.Tensor, - undirected2directed: paddle.Tensor, - bond_graph: paddle.Tensor, - bond_graph_cutoff: float, - lattice: paddle.Tensor, - graph_id: (str | None) = None, - mp_id: (str | None) = None, - composition: (str | None) = None, - ) -> None: - """Initialize the crystal graph. - - Attention! This data class is not intended to be created manually. CrystalGraph - should be returned by a CrystalGraphConverter - - Args: - atomic_number (Tensor): the atomic numbers of atoms in the structure - [n_atom] - atom_frac_coord (Tensor): the fractional coordinates of the atoms - [n_atom, 3] - atom_graph (Tensor): a directed graph adjacency list, - (center atom indices, neighbor atom indices, undirected bond index) - for bonds in bond_fea - [num_directed_bonds, 2] - atom_graph_cutoff (float): the cutoff radius to draw edges in atom_graph - neighbor_image (Tensor): the periodic image specifying the location of - neighboring atom - see: https://github.com/materialsproject/pymatgen/blob/ca2175c762e37ea7 - c9f3950ef249bc540e683da1/pymatgen/core/structure.py#L1485-L1541 - [num_directed_bonds, 3] - directed2undirected (Tensor): the mapping from directed edge index to - undirected edge index for the atom graph - [num_directed_bonds] - undirected2directed (Tensor): the mapping from undirected edge index to - one of its directed edge index, this is essentially the inverse - mapping of the directed2undirected this tensor is needed for - computation efficiency. - Note that num_directed_bonds = 2 * num_undirected_bonds - [num_undirected_bonds] - bond_graph (Tensor): a directed graph adjacency list, - (atom indices, 1st undirected bond idx, 1st directed bond idx, - 2nd undirected bond idx, 2nd directed bond idx) for angles in angle_fea - [n_angle, 5] - bond_graph_cutoff (float): the cutoff bond length to include bond - as nodes in bond_graph - lattice (Tensor): lattices of the input structure - [3, 3] - graph_id (str | None): an id to keep track of this crystal graph - Default = None - mp_id (str | None): Materials Project id of this structure - Default = None - composition: Chemical composition of the compound, used just for - better tracking of the graph - Default = None. - - Raises: - ValueError: if len(directed2undirected) != 2 * len(undirected2directed) - """ - super().__init__() - self.atomic_number = atomic_number - self.atom_frac_coord = atom_frac_coord - self.atom_graph = atom_graph - self.atom_graph_cutoff = atom_graph_cutoff - self.neighbor_image = neighbor_image - self.directed2undirected = directed2undirected - self.undirected2directed = undirected2directed - self.bond_graph = bond_graph - self.bond_graph_cutoff = bond_graph_cutoff - self.lattice = lattice - self.graph_id = graph_id - self.mp_id = mp_id - self.composition = composition - if len(directed2undirected) != 2 * len(undirected2directed): - raise ValueError( - f"{graph_id} number of directed indices ({len(directed2undirected)}) != 2 * number of undirected indices ({2 * len(undirected2directed)})!" - ) - - def to(self, device: str = "cpu") -> CrystalGraph: - """Move the graph to a device. Default = 'cpu'.""" - return CrystalGraph( - atomic_number=self.atomic_number.to(device), - atom_frac_coord=self.atom_frac_coord.to(device), - atom_graph=self.atom_graph.to(device), - atom_graph_cutoff=self.atom_graph_cutoff, - neighbor_image=self.neighbor_image.to(device), - directed2undirected=self.directed2undirected.to(device), - undirected2directed=self.undirected2directed.to(device), - bond_graph=self.bond_graph.to(device), - bond_graph_cutoff=self.bond_graph_cutoff, - lattice=self.lattice.to(device), - graph_id=self.graph_id, - mp_id=self.mp_id, - composition=self.composition, - ) - - def to_dict(self) -> dict[str, Any]: - """Convert the graph to a dictionary.""" - return { - "atomic_number": self.atomic_number, - "atom_frac_coord": self.atom_frac_coord, - "atom_graph": self.atom_graph, - "atom_graph_cutoff": self.atom_graph_cutoff, - "neighbor_image": self.neighbor_image, - "directed2undirected": self.directed2undirected, - "undirected2directed": self.undirected2directed, - "bond_graph": self.bond_graph, - "bond_graph_cutoff": self.bond_graph_cutoff, - "lattice": self.lattice, - "graph_id": self.graph_id, - "mp_id": self.mp_id, - "composition": self.composition, - } - - def save(self, fname: (str | None) = None, save_dir: str = ".") -> str: - """Save the graph to a file. - - Args: - fname (str, optional): File name. Defaults to None. - save_dir (str, optional): Directory to save the file. Defaults to ".". - - Returns: - str: The path to the saved file. - """ - if fname is not None: - save_name = os.path.join(save_dir, fname) - elif self.graph_id is not None: - save_name = os.path.join(save_dir, f"{self.graph_id}.pt") - else: - save_name = os.path.join(save_dir, f"{self.composition}.pt") - paddle.save(obj=self.to_dict(), path=save_name) - return save_name - - @classmethod - def from_file(cls, file_name: str) -> Self: - """Load a crystal graph from a file. - - Args: - file_name (str): The path to the file. - - Returns: - CrystalGraph: The loaded graph. - """ - return paddle.load(path=str(file_name)) - - @classmethod - def from_dict(cls, dic: dict[str, Any]) -> Self: - """Load a CrystalGraph from a dictionary.""" - return cls(**dic) - - def __repr__(self) -> str: - """String representation of the graph.""" - composition = self.composition - atom_graph_cutoff = self.atom_graph_cutoff - bond_graph_cutoff = self.bond_graph_cutoff - atom_graph_len = self.atom_graph - n_atoms = len(self.atomic_number) - atom_graph_len = len(self.atom_graph) - bond_graph_len = len(self.bond_graph) - return f"CrystalGraph(composition={composition!r}, atom_graph_cutoff={atom_graph_cutoff!r}, bond_graph_cutoff={bond_graph_cutoff!r}, n_atoms={n_atoms!r}, atom_graph_len={atom_graph_len!r}, bond_graph_len={bond_graph_len!r})" - - @property - def num_isolated_atoms(self) -> int: - """Number of isolated atoms given the atom graph cutoff - Isolated atoms are disconnected nodes in the atom graph - that will not get updated in CHGNet. - These atoms will always have calculated force equal to zero. - - With the default CHGNet atom graph cutoff radius, only ~ 0.1% of MPtrj dataset - structures has isolated atoms. - """ - return len(self.atomic_number) - paddle.unique(x=self.atom_graph[:, 0]).size +from __future__ import annotations + +import os +from typing import TYPE_CHECKING +from typing import Any + +import paddle + +if TYPE_CHECKING: + from typing_extensions import Self +DTYPE = "float32" + + +class CrystalGraph: + """A data class for crystal graph.""" + + def __init__( + self, + atomic_number: paddle.Tensor, + atom_frac_coord: paddle.Tensor, + atom_graph: paddle.Tensor, + atom_graph_cutoff: float, + neighbor_image: paddle.Tensor, + directed2undirected: paddle.Tensor, + undirected2directed: paddle.Tensor, + bond_graph: paddle.Tensor, + bond_graph_cutoff: float, + lattice: paddle.Tensor, + graph_id: (str | None) = None, + mp_id: (str | None) = None, + composition: (str | None) = None, + ) -> None: + """Initialize the crystal graph. + + Attention! This data class is not intended to be created manually. CrystalGraph + should be returned by a CrystalGraphConverter + + Args: + atomic_number (Tensor): the atomic numbers of atoms in the structure + [n_atom] + atom_frac_coord (Tensor): the fractional coordinates of the atoms + [n_atom, 3] + atom_graph (Tensor): a directed graph adjacency list, + (center atom indices, neighbor atom indices, undirected bond index) + for bonds in bond_fea + [num_directed_bonds, 2] + atom_graph_cutoff (float): the cutoff radius to draw edges in atom_graph + neighbor_image (Tensor): the periodic image specifying the location of + neighboring atom + see: https://github.com/materialsproject/pymatgen/blob/ca2175c762e37ea7 + c9f3950ef249bc540e683da1/pymatgen/core/structure.py#L1485-L1541 + [num_directed_bonds, 3] + directed2undirected (Tensor): the mapping from directed edge index to + undirected edge index for the atom graph + [num_directed_bonds] + undirected2directed (Tensor): the mapping from undirected edge index to + one of its directed edge index, this is essentially the inverse + mapping of the directed2undirected this tensor is needed for + computation efficiency. + Note that num_directed_bonds = 2 * num_undirected_bonds + [num_undirected_bonds] + bond_graph (Tensor): a directed graph adjacency list, + (atom indices, 1st undirected bond idx, 1st directed bond idx, + 2nd undirected bond idx, 2nd directed bond idx) for angles in angle_fea + [n_angle, 5] + bond_graph_cutoff (float): the cutoff bond length to include bond + as nodes in bond_graph + lattice (Tensor): lattices of the input structure + [3, 3] + graph_id (str | None): an id to keep track of this crystal graph + Default = None + mp_id (str | None): Materials Project id of this structure + Default = None + composition: Chemical composition of the compound, used just for + better tracking of the graph + Default = None. + + Raises: + ValueError: if len(directed2undirected) != 2 * len(undirected2directed) + """ + super().__init__() + self.atomic_number = atomic_number + self.atom_frac_coord = atom_frac_coord + self.atom_graph = atom_graph + self.atom_graph_cutoff = atom_graph_cutoff + self.neighbor_image = neighbor_image + self.directed2undirected = directed2undirected + self.undirected2directed = undirected2directed + self.bond_graph = bond_graph + self.bond_graph_cutoff = bond_graph_cutoff + self.lattice = lattice + self.graph_id = graph_id + self.mp_id = mp_id + self.composition = composition + if len(directed2undirected) != 2 * len(undirected2directed): + raise ValueError( + f"{graph_id} number of directed indices ({len(directed2undirected)}) != 2 * number of undirected indices ({2 * len(undirected2directed)})!" + ) + + def to(self, device: str = "cpu") -> CrystalGraph: + """Move the graph to a device. Default = 'cpu'.""" + return CrystalGraph( + atomic_number=self.atomic_number.to(device), + atom_frac_coord=self.atom_frac_coord.to(device), + atom_graph=self.atom_graph.to(device), + atom_graph_cutoff=self.atom_graph_cutoff, + neighbor_image=self.neighbor_image.to(device), + directed2undirected=self.directed2undirected.to(device), + undirected2directed=self.undirected2directed.to(device), + bond_graph=self.bond_graph.to(device), + bond_graph_cutoff=self.bond_graph_cutoff, + lattice=self.lattice.to(device), + graph_id=self.graph_id, + mp_id=self.mp_id, + composition=self.composition, + ) + + def to_dict(self) -> dict[str, Any]: + """Convert the graph to a dictionary.""" + return { + "atomic_number": self.atomic_number, + "atom_frac_coord": self.atom_frac_coord, + "atom_graph": self.atom_graph, + "atom_graph_cutoff": self.atom_graph_cutoff, + "neighbor_image": self.neighbor_image, + "directed2undirected": self.directed2undirected, + "undirected2directed": self.undirected2directed, + "bond_graph": self.bond_graph, + "bond_graph_cutoff": self.bond_graph_cutoff, + "lattice": self.lattice, + "graph_id": self.graph_id, + "mp_id": self.mp_id, + "composition": self.composition, + } + + def save(self, fname: (str | None) = None, save_dir: str = ".") -> str: + """Save the graph to a file. + + Args: + fname (str, optional): File name. Defaults to None. + save_dir (str, optional): Directory to save the file. Defaults to ".". + + Returns: + str: The path to the saved file. + """ + if fname is not None: + save_name = os.path.join(save_dir, fname) + elif self.graph_id is not None: + save_name = os.path.join(save_dir, f"{self.graph_id}.pt") + else: + save_name = os.path.join(save_dir, f"{self.composition}.pt") + paddle.save(obj=self.to_dict(), path=save_name) + return save_name + + @classmethod + def from_file(cls, file_name: str) -> Self: + """Load a crystal graph from a file. + + Args: + file_name (str): The path to the file. + + Returns: + CrystalGraph: The loaded graph. + """ + return paddle.load(path=str(file_name)) + + @classmethod + def from_dict(cls, dic: dict[str, Any]) -> Self: + """Load a CrystalGraph from a dictionary.""" + return cls(**dic) + + def __repr__(self) -> str: + """String representation of the graph.""" + composition = self.composition + atom_graph_cutoff = self.atom_graph_cutoff + bond_graph_cutoff = self.bond_graph_cutoff + atom_graph_len = self.atom_graph + n_atoms = len(self.atomic_number) + atom_graph_len = len(self.atom_graph) + bond_graph_len = len(self.bond_graph) + return f"CrystalGraph(composition={composition!r}, atom_graph_cutoff={atom_graph_cutoff!r}, bond_graph_cutoff={bond_graph_cutoff!r}, n_atoms={n_atoms!r}, atom_graph_len={atom_graph_len!r}, bond_graph_len={bond_graph_len!r})" + + @property + def num_isolated_atoms(self) -> int: + """Number of isolated atoms given the atom graph cutoff + Isolated atoms are disconnected nodes in the atom graph + that will not get updated in CHGNet. + These atoms will always have calculated force equal to zero. + + With the default CHGNet atom graph cutoff radius, only ~ 0.1% of MPtrj dataset + structures has isolated atoms. + """ + return len(self.atomic_number) - paddle.unique(x=self.atom_graph[:, 0]).size diff --git a/jointContribution/CHGNet/chgnet/graph/graph.py b/jointContribution/CHGNet/chgnet/graph/graph.py index 013698d9d7..8757c74071 100644 --- a/jointContribution/CHGNet/chgnet/graph/graph.py +++ b/jointContribution/CHGNet/chgnet/graph/graph.py @@ -1,296 +1,296 @@ -from __future__ import annotations - -import sys -from abc import ABC -from abc import abstractmethod - -from chgnet.utils import write_json - - -class Node: - """A node in a graph.""" - - def __init__(self, index: int, info: (dict | None) = None) -> None: - """Initialize a Node. - - Args: - index (int): the index of this node - info (dict, optional): any additional information about this node. - """ - self.index = index - self.info = info - self.neighbors: dict[int, list[DirectedEdge | UndirectedEdge]] = {} - - def add_neighbor(self, index, edge) -> None: - """Draw an directed edge between self and the node specified by index. - - Args: - index (int): the index of neighboring node - edge (DirectedEdge): an DirectedEdge object pointing from self to the node. - """ - if index not in self.neighbors: - self.neighbors[index] = [edge] - else: - self.neighbors[index].append(edge) - - -class Edge(ABC): - """Abstract base class for edges in a graph.""" - - def __init__( - self, nodes: list, index: (int | None) = None, info: (dict | None) = None - ) -> None: - """Initialize an Edge.""" - self.nodes = nodes - self.index = index - self.info = info - - def __repr__(self) -> str: - """String representation of this edge.""" - nodes, index, info = self.nodes, self.index, self.info - return f"{type(self).__name__}(nodes={nodes!r}, index={index!r}, info={info!r})" - - def __hash__(self) -> int: - """Hash this edge.""" - img = (self.info or {}).get("image") - img_str = "" if img is None else img.tobytes() - return hash((self.nodes[0], self.nodes[1], img_str)) - - @abstractmethod - def __eq__(self, other: object) -> bool: - """Check if two edges are equal.""" - raise NotImplementedError - - -class UndirectedEdge(Edge): - """An undirected/bi-directed edge in a graph.""" - - __hash__ = Edge.__hash__ - - def __eq__(self, other: object) -> bool: - """Check if two undirected edges are equal.""" - return set(self.nodes) == set(other.nodes) and self.info == other.info - - -class DirectedEdge(Edge): - """A directed edge in a graph.""" - - __hash__ = Edge.__hash__ - - def make_undirected(self, index: int, info: (dict | None) = None) -> UndirectedEdge: - """Make a directed edge undirected.""" - info = info or {} - info["distance"] = self.info["distance"] - return UndirectedEdge(self.nodes, index, info) - - def __eq__(self, other: object) -> bool: - """Check if the two directed edges are equal. - - Args: - other (DirectedEdge): another DirectedEdge to compare to - - Returns: - bool: True if other is the same directed edge, or if other is the directed - edge with reverse direction of self, else False. - """ - if not isinstance(other, DirectedEdge): - return False - self_img = (self.info or {}).get("image") - other_img = (other.info or {}).get("image") - none_img = self_img is other_img is None - if self.nodes == other.nodes and (none_img or all(self_img == other_img)): - print( - "!!!!!! the two directed edges are equal but this operation is not supposed to happen", - file=sys.stderr, - ) - return True - return self.nodes == other.nodes[::-1] and ( - none_img or all(self_img == -1 * other_img) - ) - - -class Graph: - """A graph for storing the neighbor information of atoms.""" - - def __init__(self, nodes: list[Node]) -> None: - """Initialize a Graph from a list of nodes.""" - self.nodes = nodes - self.directed_edges: dict[frozenset[int], list[DirectedEdge]] = {} - self.directed_edges_list: list[DirectedEdge] = [] - self.undirected_edges: dict[frozenset[int], list[UndirectedEdge]] = {} - self.undirected_edges_list: list[UndirectedEdge] = [] - - def add_edge( - self, center_index, neighbor_index, image, distance, dist_tol: float = 1e-06 - ) -> None: - """Add an directed edge to the graph. - - Args: - center_index (int): center node index - neighbor_index (int): neighbor node index - image (np.array): the periodic cell image the neighbor is from - distance (float): distance between center and neighbor. - dist_tol (float): tolerance for distance comparison between edges. - Default = 1e-6 - """ - directed_edge_index = len(self.directed_edges_list) - this_directed_edge = DirectedEdge( - [center_index, neighbor_index], - index=directed_edge_index, - info={"image": image, "distance": distance}, - ) - tmp = frozenset([center_index, neighbor_index]) - if tmp not in self.undirected_edges: - this_directed_edge.info["undirected_edge_index"] = len( - self.undirected_edges_list - ) - this_undirected_edge = this_directed_edge.make_undirected( - index=len(self.undirected_edges_list), - info={"directed_edge_index": [directed_edge_index]}, - ) - self.undirected_edges[tmp] = [this_undirected_edge] - self.undirected_edges_list.append(this_undirected_edge) - self.nodes[center_index].add_neighbor(neighbor_index, this_directed_edge) - self.directed_edges_list.append(this_directed_edge) - else: - for undirected_edge in self.undirected_edges[tmp]: - if ( - abs(undirected_edge.info["distance"] - distance) < dist_tol - and len(undirected_edge.info["directed_edge_index"]) == 1 - ): - added_dir_edge = self.directed_edges_list[ - undirected_edge.info["directed_edge_index"][0] - ] - if added_dir_edge == this_directed_edge: - this_directed_edge.info[ - "undirected_edge_index" - ] = added_dir_edge.info["undirected_edge_index"] - self.nodes[center_index].add_neighbor( - neighbor_index, this_directed_edge - ) - self.directed_edges_list.append(this_directed_edge) - undirected_edge.info["directed_edge_index"].append( - directed_edge_index - ) - return - this_directed_edge.info["undirected_edge_index"] = len( - self.undirected_edges_list - ) - this_undirected_edge = this_directed_edge.make_undirected( - index=len(self.undirected_edges_list), - info={"directed_edge_index": [directed_edge_index]}, - ) - self.undirected_edges[tmp].append(this_undirected_edge) - self.undirected_edges_list.append(this_undirected_edge) - self.nodes[center_index].add_neighbor(neighbor_index, this_directed_edge) - self.directed_edges_list.append(this_directed_edge) - - def adjacency_list(self) -> tuple[list[list[int]], list[int]]: - """Get the adjacency list - Return: - graph: the adjacency list - [[0, 1], - [0, 2], - ... - [5, 2] - ... ]] - the fist column specifies center/source node, - the second column specifies neighbor/destination node - directed2undirected: - [0, 1, ...] - a list of length = num_directed_edge that specifies - the undirected edge index corresponding to the directed edges - represented in each row in the graph adjacency list. - """ - graph = [edge.nodes for edge in self.directed_edges_list] - directed2undirected = [ - edge.info["undirected_edge_index"] for edge in self.directed_edges_list - ] - return graph, directed2undirected - - def line_graph_adjacency_list(self, cutoff) -> tuple[list[list[int]], list[int]]: - """Get the line graph adjacency list. - - Args: - cutoff (float): a float to indicate the maximum edge length to be included - in constructing the line graph, this is used to decrease computation - complexity - - Return: - line_graph: - [[0, 1, 1, 2, 2], - [0, 1, 1, 4, 23], - [1, 4, 23, 5, 66], - ... ... ] - the fist column specifies node(atom) index at this angle, - the second column specifies 1st undirected edge(left bond) index, - the third column specifies 1st directed edge(left bond) index, - the fourth column specifies 2nd undirected edge(right bond) index, - the fifth column specifies 2nd directed edge(right bond) index,. - undirected2directed: - [32, 45, ...] - a list of length = num_undirected_edge that - maps the undirected edge index to one of its directed edges indices - """ - if len(self.directed_edges_list) != 2 * len(self.undirected_edges_list): - raise ValueError( - f"Error: number of directed edges={len(self.directed_edges_list)} != 2 * number of undirected edges={len(self.undirected_edges_list)}!This indicates directed edges are not complete" - ) - line_graph = [] - undirected2directed = [] - for u_edge in self.undirected_edges_list: - undirected2directed.append(u_edge.info["directed_edge_index"][0]) - if u_edge.info["distance"] > cutoff: - continue - if len(u_edge.info["directed_edge_index"]) != 2: - raise ValueError( - f"Did not find 2 Directed_edges !!!undirected edge {u_edge} has:edge.info['directed_edge_index'] = {u_edge.info['directed_edge_index']}len directed_edges_list = {len(self.directed_edges_list)}len undirected_edges_list = {len(self.undirected_edges_list)}" - ) - for center, dir_edge in zip( - u_edge.nodes, u_edge.info["directed_edge_index"], strict=True - ): - for directed_edges in self.nodes[center].neighbors.values(): - for directed_edge in directed_edges: - if directed_edge.index == dir_edge: - continue - if directed_edge.info["distance"] < cutoff: - line_graph.append( - [ - center, - u_edge.index, - dir_edge, - directed_edge.info["undirected_edge_index"], - directed_edge.index, - ] - ) - return line_graph, undirected2directed - - def undirected2directed(self) -> list[int]: - """The index map from undirected_edge index to one of its directed_edge - index. - """ - return [ - undirected_edge.info["directed_edge_index"][0] - for undirected_edge in self.undirected_edges_list - ] - - def as_dict(self) -> dict: - """Return dictionary serialization of a Graph.""" - return { - "nodes": self.nodes, - "directed_edges": self.directed_edges, - "directed_edges_list": self.directed_edges_list, - "undirected_edges": self.undirected_edges, - "undirected_edges_list": self.undirected_edges_list, - } - - def to(self, filename="graph.json") -> None: - """Save graph dictionary to file.""" - write_json(self.as_dict(), filename) - - def __repr__(self) -> str: - """Return string representation of the Graph.""" - num_nodes = len(self.nodes) - num_directed_edges = len(self.directed_edges_list) - num_undirected_edges = len(self.undirected_edges_list) - return f"Graph(num_nodes={num_nodes!r}, num_directed_edges={num_directed_edges!r}, num_undirected_edges={num_undirected_edges!r})" +from __future__ import annotations + +import sys +from abc import ABC +from abc import abstractmethod + +from chgnet.utils import write_json + + +class Node: + """A node in a graph.""" + + def __init__(self, index: int, info: (dict | None) = None) -> None: + """Initialize a Node. + + Args: + index (int): the index of this node + info (dict, optional): any additional information about this node. + """ + self.index = index + self.info = info + self.neighbors: dict[int, list[DirectedEdge | UndirectedEdge]] = {} + + def add_neighbor(self, index, edge) -> None: + """Draw an directed edge between self and the node specified by index. + + Args: + index (int): the index of neighboring node + edge (DirectedEdge): an DirectedEdge object pointing from self to the node. + """ + if index not in self.neighbors: + self.neighbors[index] = [edge] + else: + self.neighbors[index].append(edge) + + +class Edge(ABC): + """Abstract base class for edges in a graph.""" + + def __init__( + self, nodes: list, index: (int | None) = None, info: (dict | None) = None + ) -> None: + """Initialize an Edge.""" + self.nodes = nodes + self.index = index + self.info = info + + def __repr__(self) -> str: + """String representation of this edge.""" + nodes, index, info = self.nodes, self.index, self.info + return f"{type(self).__name__}(nodes={nodes!r}, index={index!r}, info={info!r})" + + def __hash__(self) -> int: + """Hash this edge.""" + img = (self.info or {}).get("image") + img_str = "" if img is None else img.tobytes() + return hash((self.nodes[0], self.nodes[1], img_str)) + + @abstractmethod + def __eq__(self, other: object) -> bool: + """Check if two edges are equal.""" + raise NotImplementedError + + +class UndirectedEdge(Edge): + """An undirected/bi-directed edge in a graph.""" + + __hash__ = Edge.__hash__ + + def __eq__(self, other: object) -> bool: + """Check if two undirected edges are equal.""" + return set(self.nodes) == set(other.nodes) and self.info == other.info + + +class DirectedEdge(Edge): + """A directed edge in a graph.""" + + __hash__ = Edge.__hash__ + + def make_undirected(self, index: int, info: (dict | None) = None) -> UndirectedEdge: + """Make a directed edge undirected.""" + info = info or {} + info["distance"] = self.info["distance"] + return UndirectedEdge(self.nodes, index, info) + + def __eq__(self, other: object) -> bool: + """Check if the two directed edges are equal. + + Args: + other (DirectedEdge): another DirectedEdge to compare to + + Returns: + bool: True if other is the same directed edge, or if other is the directed + edge with reverse direction of self, else False. + """ + if not isinstance(other, DirectedEdge): + return False + self_img = (self.info or {}).get("image") + other_img = (other.info or {}).get("image") + none_img = self_img is other_img is None + if self.nodes == other.nodes and (none_img or all(self_img == other_img)): + print( + "!!!!!! the two directed edges are equal but this operation is not supposed to happen", + file=sys.stderr, + ) + return True + return self.nodes == other.nodes[::-1] and ( + none_img or all(self_img == -1 * other_img) + ) + + +class Graph: + """A graph for storing the neighbor information of atoms.""" + + def __init__(self, nodes: list[Node]) -> None: + """Initialize a Graph from a list of nodes.""" + self.nodes = nodes + self.directed_edges: dict[frozenset[int], list[DirectedEdge]] = {} + self.directed_edges_list: list[DirectedEdge] = [] + self.undirected_edges: dict[frozenset[int], list[UndirectedEdge]] = {} + self.undirected_edges_list: list[UndirectedEdge] = [] + + def add_edge( + self, center_index, neighbor_index, image, distance, dist_tol: float = 1e-06 + ) -> None: + """Add an directed edge to the graph. + + Args: + center_index (int): center node index + neighbor_index (int): neighbor node index + image (np.array): the periodic cell image the neighbor is from + distance (float): distance between center and neighbor. + dist_tol (float): tolerance for distance comparison between edges. + Default = 1e-6 + """ + directed_edge_index = len(self.directed_edges_list) + this_directed_edge = DirectedEdge( + [center_index, neighbor_index], + index=directed_edge_index, + info={"image": image, "distance": distance}, + ) + tmp = frozenset([center_index, neighbor_index]) + if tmp not in self.undirected_edges: + this_directed_edge.info["undirected_edge_index"] = len( + self.undirected_edges_list + ) + this_undirected_edge = this_directed_edge.make_undirected( + index=len(self.undirected_edges_list), + info={"directed_edge_index": [directed_edge_index]}, + ) + self.undirected_edges[tmp] = [this_undirected_edge] + self.undirected_edges_list.append(this_undirected_edge) + self.nodes[center_index].add_neighbor(neighbor_index, this_directed_edge) + self.directed_edges_list.append(this_directed_edge) + else: + for undirected_edge in self.undirected_edges[tmp]: + if ( + abs(undirected_edge.info["distance"] - distance) < dist_tol + and len(undirected_edge.info["directed_edge_index"]) == 1 + ): + added_dir_edge = self.directed_edges_list[ + undirected_edge.info["directed_edge_index"][0] + ] + if added_dir_edge == this_directed_edge: + this_directed_edge.info[ + "undirected_edge_index" + ] = added_dir_edge.info["undirected_edge_index"] + self.nodes[center_index].add_neighbor( + neighbor_index, this_directed_edge + ) + self.directed_edges_list.append(this_directed_edge) + undirected_edge.info["directed_edge_index"].append( + directed_edge_index + ) + return + this_directed_edge.info["undirected_edge_index"] = len( + self.undirected_edges_list + ) + this_undirected_edge = this_directed_edge.make_undirected( + index=len(self.undirected_edges_list), + info={"directed_edge_index": [directed_edge_index]}, + ) + self.undirected_edges[tmp].append(this_undirected_edge) + self.undirected_edges_list.append(this_undirected_edge) + self.nodes[center_index].add_neighbor(neighbor_index, this_directed_edge) + self.directed_edges_list.append(this_directed_edge) + + def adjacency_list(self) -> tuple[list[list[int]], list[int]]: + """Get the adjacency list + Return: + graph: the adjacency list + [[0, 1], + [0, 2], + ... + [5, 2] + ... ]] + the fist column specifies center/source node, + the second column specifies neighbor/destination node + directed2undirected: + [0, 1, ...] + a list of length = num_directed_edge that specifies + the undirected edge index corresponding to the directed edges + represented in each row in the graph adjacency list. + """ + graph = [edge.nodes for edge in self.directed_edges_list] + directed2undirected = [ + edge.info["undirected_edge_index"] for edge in self.directed_edges_list + ] + return graph, directed2undirected + + def line_graph_adjacency_list(self, cutoff) -> tuple[list[list[int]], list[int]]: + """Get the line graph adjacency list. + + Args: + cutoff (float): a float to indicate the maximum edge length to be included + in constructing the line graph, this is used to decrease computation + complexity + + Return: + line_graph: + [[0, 1, 1, 2, 2], + [0, 1, 1, 4, 23], + [1, 4, 23, 5, 66], + ... ... ] + the fist column specifies node(atom) index at this angle, + the second column specifies 1st undirected edge(left bond) index, + the third column specifies 1st directed edge(left bond) index, + the fourth column specifies 2nd undirected edge(right bond) index, + the fifth column specifies 2nd directed edge(right bond) index,. + undirected2directed: + [32, 45, ...] + a list of length = num_undirected_edge that + maps the undirected edge index to one of its directed edges indices + """ + if len(self.directed_edges_list) != 2 * len(self.undirected_edges_list): + raise ValueError( + f"Error: number of directed edges={len(self.directed_edges_list)} != 2 * number of undirected edges={len(self.undirected_edges_list)}!This indicates directed edges are not complete" + ) + line_graph = [] + undirected2directed = [] + for u_edge in self.undirected_edges_list: + undirected2directed.append(u_edge.info["directed_edge_index"][0]) + if u_edge.info["distance"] > cutoff: + continue + if len(u_edge.info["directed_edge_index"]) != 2: + raise ValueError( + f"Did not find 2 Directed_edges !!!undirected edge {u_edge} has:edge.info['directed_edge_index'] = {u_edge.info['directed_edge_index']}len directed_edges_list = {len(self.directed_edges_list)}len undirected_edges_list = {len(self.undirected_edges_list)}" + ) + for center, dir_edge in zip( + u_edge.nodes, u_edge.info["directed_edge_index"], strict=True + ): + for directed_edges in self.nodes[center].neighbors.values(): + for directed_edge in directed_edges: + if directed_edge.index == dir_edge: + continue + if directed_edge.info["distance"] < cutoff: + line_graph.append( + [ + center, + u_edge.index, + dir_edge, + directed_edge.info["undirected_edge_index"], + directed_edge.index, + ] + ) + return line_graph, undirected2directed + + def undirected2directed(self) -> list[int]: + """The index map from undirected_edge index to one of its directed_edge + index. + """ + return [ + undirected_edge.info["directed_edge_index"][0] + for undirected_edge in self.undirected_edges_list + ] + + def as_dict(self) -> dict: + """Return dictionary serialization of a Graph.""" + return { + "nodes": self.nodes, + "directed_edges": self.directed_edges, + "directed_edges_list": self.directed_edges_list, + "undirected_edges": self.undirected_edges, + "undirected_edges_list": self.undirected_edges_list, + } + + def to(self, filename="graph.json") -> None: + """Save graph dictionary to file.""" + write_json(self.as_dict(), filename) + + def __repr__(self) -> str: + """Return string representation of the Graph.""" + num_nodes = len(self.nodes) + num_directed_edges = len(self.directed_edges_list) + num_undirected_edges = len(self.undirected_edges_list) + return f"Graph(num_nodes={num_nodes!r}, num_directed_edges={num_directed_edges!r}, num_undirected_edges={num_undirected_edges!r})" diff --git a/jointContribution/CHGNet/chgnet/model/__init__.py b/jointContribution/CHGNet/chgnet/model/__init__.py index 8a330beb07..e118faa2e5 100644 --- a/jointContribution/CHGNet/chgnet/model/__init__.py +++ b/jointContribution/CHGNet/chgnet/model/__init__.py @@ -1,6 +1,6 @@ -from __future__ import annotations - -from chgnet.model.dynamics import CHGNetCalculator # noqa -from chgnet.model.dynamics import MolecularDynamics # noqa -from chgnet.model.dynamics import StructOptimizer # noqa -from chgnet.model.model import CHGNet # noqa +from __future__ import annotations + +from chgnet.model.dynamics import CHGNetCalculator # noqa +from chgnet.model.dynamics import MolecularDynamics # noqa +from chgnet.model.dynamics import StructOptimizer # noqa +from chgnet.model.model import CHGNet # noqa diff --git a/jointContribution/CHGNet/chgnet/model/basis.py b/jointContribution/CHGNet/chgnet/model/basis.py index 00702a535d..c0d3b91788 100644 --- a/jointContribution/CHGNet/chgnet/model/basis.py +++ b/jointContribution/CHGNet/chgnet/model/basis.py @@ -1,219 +1,219 @@ -from __future__ import annotations - -import numpy as np -import paddle - - -class Fourier(paddle.nn.Layer): - """Fourier Expansion for angle features.""" - - def __init__(self, *, order: int = 5, learnable: bool = False) -> None: - """Initialize the Fourier expansion. - - Args: - order (int): the maximum order, refer to the N in eq 1 in CHGNet paper - Default = 5 - learnable (bool): whether to set the frequencies as learnable parameters - Default = False - """ - super().__init__() - self.order = order - if learnable: - self.frequencies = paddle.base.framework.EagerParamBase.from_tensor( - tensor=paddle.arange(start=1, end=order + 1, dtype="float32"), - trainable=True, - ) - else: - self.register_buffer( - name="frequencies", - tensor=paddle.arange(start=1, end=order + 1, dtype="float32"), - ) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - """Apply Fourier expansion to a feature Tensor.""" - - # result = paddle.zeros(shape=[tuple(x.shape)[0], 1 + 2 * self.order], - # dtype=x.dtype) - result = paddle.ones(shape=[tuple(x.shape)[0], 1], dtype=x.dtype) - result = result / paddle.sqrt(x=paddle.to_tensor(data=[2.0])) - - tmp = paddle.outer(x=x, y=self.frequencies) - # result[:, 1:self.order + 1] = paddle.sin(x=tmp) - # result[:, self.order + 1:] = paddle.cos(x=tmp) - - result = paddle.concat([result, paddle.sin(tmp), paddle.cos(tmp)], axis=1) - - return result / np.sqrt(np.pi) - - -class RadialBessel(paddle.nn.Layer): - """1D Bessel Basis - from: https://github.com/TUM-DAML/gemnet_pytorch/. - """ - - def __init__( - self, - *, - num_radial: int = 9, - cutoff: float = 5, - learnable: bool = False, - smooth_cutoff: int = 5, - ) -> None: - """Initialize the SmoothRBF function. - - Args: - num_radial (int): Controls maximum frequency - Default = 9 - cutoff (float): Cutoff distance in Angstrom. - Default = 5 - learnable (bool): whether to set the frequencies learnable - Default = False - smooth_cutoff (int): smooth cutoff strength - Default = 5 - """ - super().__init__() - self.num_radial = num_radial - self.inv_cutoff = 1 / cutoff - self.norm_const = (2 * self.inv_cutoff) ** 0.5 - if learnable: - self.frequencies = paddle.base.framework.EagerParamBase.from_tensor( - tensor=paddle.to_tensor( - data=np.pi * np.arange(1, self.num_radial + 1, dtype=np.float32), - dtype="float32", - ), - trainable=True, - ) - else: - self.register_buffer( - name="frequencies", - tensor=np.pi - * paddle.arange(start=1, end=self.num_radial + 1, dtype="float32"), - ) - if smooth_cutoff is not None: - self.smooth_cutoff = CutoffPolynomial( - cutoff=cutoff, cutoff_coeff=smooth_cutoff - ) - else: - self.smooth_cutoff = None - - def forward( - self, dist: paddle.Tensor, *, return_smooth_factor: bool = False - ) -> (paddle.Tensor | tuple[paddle.Tensor, paddle.Tensor]): - """Apply Bessel expansion to a feature Tensor. - - Args: - dist (Tensor): tensor of distances [n, 1] - return_smooth_factor (bool): whether to return the smooth factor - Default = False - - Returns: - out (Tensor): tensor of Bessel distances [n, dim] - where the expanded dimension will be num_radial - smooth_factor (Tensor): tensor of smooth factors [n, 1] - """ - dist = dist[:, None] - d_scaled = dist * self.inv_cutoff - out = self.norm_const * paddle.sin(x=self.frequencies * d_scaled) / dist - if self.smooth_cutoff is not None: - smooth_factor = self.smooth_cutoff(dist) - out = smooth_factor * out - if return_smooth_factor: - return out, smooth_factor - return out - - -class GaussianExpansion(paddle.nn.Layer): - """Expands the distance by Gaussian basis. - Unit: angstrom. - """ - - def __init__( - self, - min: float = 0, - max: float = 5, - step: float = 0.5, - var: (float | None) = None, - ) -> None: - """Gaussian Expansion - expand a scalar feature to a soft-one-hot feature vector. - - Args: - min (float): minimum Gaussian center value - max (float): maximum Gaussian center value - step (float): Step size between the Gaussian centers - var (float): variance in gaussian filter, default to step - """ - super().__init__() - if min >= max: - raise ValueError(f"min={min!r} must be less than max={max!r}") - if max - min <= step: - raise ValueError( - f"max - min={max - min!r} must be greater than step={step!r}" - ) - self.register_buffer( - name="gaussian_centers", - tensor=paddle.arange(start=min, end=max + step, step=step), - ) - self.var = var or step - if self.var <= 0: - raise ValueError(f"var={var!r} must be positive") - - def expand(self, features: paddle.Tensor) -> paddle.Tensor: - """Apply Gaussian filter to a feature Tensor. - - Args: - features (Tensor): tensor of features [n] - - Returns: - expanded features (Tensor): tensor of Gaussian distances [n, dim] - where the expanded dimension will be (dmax - dmin) / step + 1 - """ - return paddle.exp( - x=-((features.reshape(-1, 1) - self.gaussian_centers) ** 2) / self.var**2 - ) - - -class CutoffPolynomial(paddle.nn.Layer): - """Polynomial soft-cutoff function for atom graph - ref: https://github.com/TUM-DAML/gemnet_pytorch/blob/-/gemnet/model/layers/envelope.py. - """ - - def __init__(self, cutoff: float = 5, cutoff_coeff: float = 5) -> None: - """Initialize the polynomial cutoff function. - - Args: - cutoff (float): cutoff radius (A) in atom graph construction - Default = 5 - cutoff_coeff (float): the strength of soft-Cutoff - 0 will disable the cutoff, returning 1 at every r - for positive numbers > 0, the smaller cutoff_coeff is, the faster this - function decays. Default = 5. - """ - super().__init__() - self.cutoff = cutoff - self.p = cutoff_coeff - self.a = -(self.p + 1) * (self.p + 2) / 2 - self.b = self.p * (self.p + 2) - self.c = -self.p * (self.p + 1) / 2 - - def forward(self, r: paddle.Tensor) -> paddle.Tensor: - """Polynomial cutoff function. - - Args: - r (Tensor): radius distance tensor - - Returns: - polynomial cutoff functions: decaying from 1 at r=0 to 0 at r=cutoff - """ - if self.p != 0: - r_scaled = r / self.cutoff - env_val = ( - 1 - + self.a * r_scaled**self.p - + self.b * r_scaled ** (self.p + 1) - + self.c * r_scaled ** (self.p + 2) - ) - return paddle.where( - condition=r_scaled < 1, x=env_val, y=paddle.zeros_like(x=r_scaled) - ) - return paddle.ones(shape=tuple(r.shape), dtype=r.dtype) +from __future__ import annotations + +import numpy as np +import paddle + + +class Fourier(paddle.nn.Layer): + """Fourier Expansion for angle features.""" + + def __init__(self, *, order: int = 5, learnable: bool = False) -> None: + """Initialize the Fourier expansion. + + Args: + order (int): the maximum order, refer to the N in eq 1 in CHGNet paper + Default = 5 + learnable (bool): whether to set the frequencies as learnable parameters + Default = False + """ + super().__init__() + self.order = order + if learnable: + self.frequencies = paddle.base.framework.EagerParamBase.from_tensor( + tensor=paddle.arange(start=1, end=order + 1, dtype="float32"), + trainable=True, + ) + else: + self.register_buffer( + name="frequencies", + tensor=paddle.arange(start=1, end=order + 1, dtype="float32"), + ) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Apply Fourier expansion to a feature Tensor.""" + + # result = paddle.zeros(shape=[tuple(x.shape)[0], 1 + 2 * self.order], + # dtype=x.dtype) + result = paddle.ones(shape=[tuple(x.shape)[0], 1], dtype=x.dtype) + result = result / paddle.sqrt(x=paddle.to_tensor(data=[2.0])) + + tmp = paddle.outer(x=x, y=self.frequencies) + # result[:, 1:self.order + 1] = paddle.sin(x=tmp) + # result[:, self.order + 1:] = paddle.cos(x=tmp) + + result = paddle.concat([result, paddle.sin(tmp), paddle.cos(tmp)], axis=1) + + return result / np.sqrt(np.pi) + + +class RadialBessel(paddle.nn.Layer): + """1D Bessel Basis + from: https://github.com/TUM-DAML/gemnet_pytorch/. + """ + + def __init__( + self, + *, + num_radial: int = 9, + cutoff: float = 5, + learnable: bool = False, + smooth_cutoff: int = 5, + ) -> None: + """Initialize the SmoothRBF function. + + Args: + num_radial (int): Controls maximum frequency + Default = 9 + cutoff (float): Cutoff distance in Angstrom. + Default = 5 + learnable (bool): whether to set the frequencies learnable + Default = False + smooth_cutoff (int): smooth cutoff strength + Default = 5 + """ + super().__init__() + self.num_radial = num_radial + self.inv_cutoff = 1 / cutoff + self.norm_const = (2 * self.inv_cutoff) ** 0.5 + if learnable: + self.frequencies = paddle.base.framework.EagerParamBase.from_tensor( + tensor=paddle.to_tensor( + data=np.pi * np.arange(1, self.num_radial + 1, dtype=np.float32), + dtype="float32", + ), + trainable=True, + ) + else: + self.register_buffer( + name="frequencies", + tensor=np.pi + * paddle.arange(start=1, end=self.num_radial + 1, dtype="float32"), + ) + if smooth_cutoff is not None: + self.smooth_cutoff = CutoffPolynomial( + cutoff=cutoff, cutoff_coeff=smooth_cutoff + ) + else: + self.smooth_cutoff = None + + def forward( + self, dist: paddle.Tensor, *, return_smooth_factor: bool = False + ) -> (paddle.Tensor | tuple[paddle.Tensor, paddle.Tensor]): + """Apply Bessel expansion to a feature Tensor. + + Args: + dist (Tensor): tensor of distances [n, 1] + return_smooth_factor (bool): whether to return the smooth factor + Default = False + + Returns: + out (Tensor): tensor of Bessel distances [n, dim] + where the expanded dimension will be num_radial + smooth_factor (Tensor): tensor of smooth factors [n, 1] + """ + dist = dist[:, None] + d_scaled = dist * self.inv_cutoff + out = self.norm_const * paddle.sin(x=self.frequencies * d_scaled) / dist + if self.smooth_cutoff is not None: + smooth_factor = self.smooth_cutoff(dist) + out = smooth_factor * out + if return_smooth_factor: + return out, smooth_factor + return out + + +class GaussianExpansion(paddle.nn.Layer): + """Expands the distance by Gaussian basis. + Unit: angstrom. + """ + + def __init__( + self, + min: float = 0, + max: float = 5, + step: float = 0.5, + var: (float | None) = None, + ) -> None: + """Gaussian Expansion + expand a scalar feature to a soft-one-hot feature vector. + + Args: + min (float): minimum Gaussian center value + max (float): maximum Gaussian center value + step (float): Step size between the Gaussian centers + var (float): variance in gaussian filter, default to step + """ + super().__init__() + if min >= max: + raise ValueError(f"min={min!r} must be less than max={max!r}") + if max - min <= step: + raise ValueError( + f"max - min={max - min!r} must be greater than step={step!r}" + ) + self.register_buffer( + name="gaussian_centers", + tensor=paddle.arange(start=min, end=max + step, step=step), + ) + self.var = var or step + if self.var <= 0: + raise ValueError(f"var={var!r} must be positive") + + def expand(self, features: paddle.Tensor) -> paddle.Tensor: + """Apply Gaussian filter to a feature Tensor. + + Args: + features (Tensor): tensor of features [n] + + Returns: + expanded features (Tensor): tensor of Gaussian distances [n, dim] + where the expanded dimension will be (dmax - dmin) / step + 1 + """ + return paddle.exp( + x=-((features.reshape(-1, 1) - self.gaussian_centers) ** 2) / self.var**2 + ) + + +class CutoffPolynomial(paddle.nn.Layer): + """Polynomial soft-cutoff function for atom graph + ref: https://github.com/TUM-DAML/gemnet_pytorch/blob/-/gemnet/model/layers/envelope.py. + """ + + def __init__(self, cutoff: float = 5, cutoff_coeff: float = 5) -> None: + """Initialize the polynomial cutoff function. + + Args: + cutoff (float): cutoff radius (A) in atom graph construction + Default = 5 + cutoff_coeff (float): the strength of soft-Cutoff + 0 will disable the cutoff, returning 1 at every r + for positive numbers > 0, the smaller cutoff_coeff is, the faster this + function decays. Default = 5. + """ + super().__init__() + self.cutoff = cutoff + self.p = cutoff_coeff + self.a = -(self.p + 1) * (self.p + 2) / 2 + self.b = self.p * (self.p + 2) + self.c = -self.p * (self.p + 1) / 2 + + def forward(self, r: paddle.Tensor) -> paddle.Tensor: + """Polynomial cutoff function. + + Args: + r (Tensor): radius distance tensor + + Returns: + polynomial cutoff functions: decaying from 1 at r=0 to 0 at r=cutoff + """ + if self.p != 0: + r_scaled = r / self.cutoff + env_val = ( + 1 + + self.a * r_scaled**self.p + + self.b * r_scaled ** (self.p + 1) + + self.c * r_scaled ** (self.p + 2) + ) + return paddle.where( + condition=r_scaled < 1, x=env_val, y=paddle.zeros_like(x=r_scaled) + ) + return paddle.ones(shape=tuple(r.shape), dtype=r.dtype) diff --git a/jointContribution/CHGNet/chgnet/model/composition_model.py b/jointContribution/CHGNet/chgnet/model/composition_model.py index 86d542f69c..542b2d755d 100644 --- a/jointContribution/CHGNet/chgnet/model/composition_model.py +++ b/jointContribution/CHGNet/chgnet/model/composition_model.py @@ -1,439 +1,439 @@ -from __future__ import annotations - -import collections -from typing import TYPE_CHECKING - -import numpy as np -import paddle -from chgnet.model.functions import GatedMLP -from chgnet.model.functions import find_activation -from pymatgen.core import Structure - -if TYPE_CHECKING: - from collections.abc import Sequence - from pathlib import Path - - from chgnet.graph.crystalgraph import CrystalGraph - - -class CompositionModel(paddle.nn.Layer): - """A simple FC model that takes in a chemical composition (no structure info) - and outputs energy. - """ - - def __init__( - self, - *, - atom_fea_dim: int = 64, - activation: str = "silu", - is_intensive: bool = True, - max_num_elements: int = 94, - ) -> None: - """Initialize a CompositionModel.""" - super().__init__() - self.is_intensive = is_intensive - self.max_num_elements = max_num_elements - self.fc1 = paddle.nn.Linear( - in_features=max_num_elements, out_features=atom_fea_dim - ) - self.activation = find_activation(activation) - self.gated_mlp = GatedMLP( - input_dim=atom_fea_dim, - output_dim=atom_fea_dim, - hidden_dim=atom_fea_dim, - activation=activation, - ) - self.fc2 = paddle.nn.Linear(in_features=atom_fea_dim, out_features=1) - - def _get_energy(self, composition_feas: paddle.Tensor) -> paddle.Tensor: - """Predict the energy given composition encoding. - - Args: - composition_feas: batched atom feature matrix of shape - [batch_size, total_num_elements]. - - Returns: - prediction associated with each composition [batchsize]. - """ - composition_feas = self.activation(self.fc1(composition_feas)) - composition_feas += self.gated_mlp(composition_feas) - return self.fc2(composition_feas).reshape([-1]) - - def forward(self, graphs: list[CrystalGraph]) -> paddle.Tensor: - """Get the energy of a list of CrystalGraphs as Tensor.""" - composition_feas = self._assemble_graphs(graphs) - return self._get_energy(composition_feas) - - def _assemble_graphs(self, graphs: list[CrystalGraph]) -> paddle.Tensor: - """Assemble a list of graphs into one-hot composition encodings. - - Args: - graphs (list[CrystalGraph]): a list of CrystalGraphs - - Returns: - assembled batch_graph that contains all information for model. - """ - composition_feas = [] - for graph in graphs: - composition_fea = paddle.bincount( - x=graph.atomic_number - 1, minlength=self.max_num_elements - ) - if self.is_intensive: - n_atom = graph.atomic_number.shape[0] - composition_fea = composition_fea / n_atom - composition_feas.append(composition_fea) - return paddle.stack(x=composition_feas, axis=0) - - -class AtomRef(paddle.nn.Layer): - """A linear regression for elemental energy. - From: https://github.com/materialsvirtuallab/m3gnet/. - """ - - def __init__( - self, *, is_intensive: bool = True, max_num_elements: int = 94 - ) -> None: - """Initialize an AtomRef model.""" - super().__init__() - self.is_intensive = is_intensive - self.max_num_elements = max_num_elements - self.fc = paddle.nn.Linear( - in_features=max_num_elements, out_features=1, bias_attr=False - ) - self.fitted = False - - def forward(self, graphs: list[CrystalGraph]) -> paddle.Tensor: - """Get the energy of a list of CrystalGraphs. - - Args: - graphs (List(CrystalGraph)): a list of Crystal Graph to compute - - Returns: - energy (tensor) - """ - if not self.fitted: - raise ValueError("composition model needs to be fitted first!") - composition_feas = self._assemble_graphs(graphs) - return self._get_energy(composition_feas) - - def _get_energy(self, composition_feas: paddle.Tensor) -> paddle.Tensor: - """Predict the energy given composition encoding. - - Args: - composition_feas: batched atom feature matrix of shape - [batch_size, total_num_elements]. - - Returns: - prediction associated with each composition [batchsize]. - """ - return self.fc(composition_feas).flatten() - - # .view(-1) - - def fit( - self, - structures_or_graphs: Sequence[Structure | CrystalGraph], - energies: Sequence[float], - ) -> None: - """Fit the model to a list of crystals and energies. - - Args: - structures_or_graphs (list[Structure | CrystalGraph]): Any iterable of - pymatgen structures and/or graphs. - energies (list[float]): Target energies. - """ - num_data = len(energies) - composition_feas = paddle.zeros(shape=[num_data, self.max_num_elements]) - e = paddle.zeros(shape=[num_data]) - for index, (structure, energy) in enumerate( - zip(structures_or_graphs, energies, strict=True) - ): - - if isinstance(structure, Structure): - atomic_number = paddle.to_tensor( - [site.specie.Z for site in structure], dtype="int32" - ) - else: - atomic_number = structure.atomic_number - composition_fea = paddle.bincount( - atomic_number - 1, minlength=self.max_num_elements - ) - if self.is_intensive: - composition_fea = composition_fea / atomic_number.shape[0] - composition_feas[index, :] = composition_fea - e[index] = energy - - # Use numpy for pinv - self.feature_matrix = composition_feas.detach().numpy() - self.energies = e.detach().numpy() - state_dict = collections.OrderedDict() - weight = ( - np.linalg.pinv(self.feature_matrix.T @ self.feature_matrix) - @ self.feature_matrix.T - @ self.energies - ) - state_dict["weight"] = paddle.to_tensor(data=weight).view(94, 1) - self.fc.set_state_dict(state_dict) - self.fitted = True - - def _assemble_graphs(self, graphs: list[CrystalGraph]) -> paddle.Tensor: - """Assemble a list of graphs into one-hot composition encodings - Args: - graphs (list[Tensor]): a list of CrystalGraphs - Returns: - assembled batch_graph that contains all information for model. - """ - composition_feas = [] - for graph in graphs: - if not paddle.all(graph.atomic_number >= 0): - raise ValueError("atomic_number should be non-negative integers.") - composition_fea = paddle.bincount( - graph.atomic_number - 1, minlength=self.max_num_elements - ) - if self.is_intensive: - n_atom = graph.atomic_number.shape[0] - composition_fea = composition_fea / n_atom - composition_feas.append(composition_fea) - return paddle.stack(composition_feas, axis=0).astype("float32") - - def get_site_energies(self, graphs: list[CrystalGraph]) -> list[paddle.Tensor]: - """Predict the site energies given a list of CrystalGraphs. - - Args: - graphs (List(CrystalGraph)): a list of Crystal Graph to compute - - Returns: - a list of tensors corresponding to site energies of each graph [batchsize]. - """ - return [ - self.fc.state_dict()["weight"][0, graph.atomic_number - 1] - for graph in graphs - ] - - def initialize_from(self, dataset: str) -> None: - """Initialize pre-fitted weights from a dataset.""" - if dataset in {"MPtrj", "MPtrj_e"}: - self.initialize_from_MPtrj() - elif dataset == "MPF": - self.initialize_from_MPF() - else: - raise NotImplementedError(f"dataset={dataset!r} not supported yet") - - def initialize_from_MPtrj(self) -> None: - """Initialize pre-fitted weights from MPtrj dataset.""" - state_dict = collections.OrderedDict() - state_dict["weight"] = paddle.to_tensor( - data=[ - -3.4431, - -0.1279, - -2.83, - -3.4737, - -7.4946, - -8.2354, - -8.1611, - -8.3861, - -5.7498, - -0.0236, - -1.7406, - -1.6788, - -4.2833, - -6.2002, - -6.1315, - -5.8405, - -3.8795, - -0.0703, - -1.5668, - -3.4451, - -7.0549, - -9.1465, - -9.2594, - -9.3514, - -8.9843, - -8.0228, - -6.4955, - -5.6057, - -3.4002, - -0.9217, - -3.2499, - -4.9164, - -4.781, - -5.0191, - -3.3316, - 0.513, - -1.4043, - -3.2175, - -7.4994, - -9.3816, - -10.4386, - -9.9539, - -7.9555, - -8.544, - -7.3245, - -5.2771, - -1.9014, - -0.4034, - -2.6002, - -4.0054, - -4.1156, - -3.9928, - -2.7003, - 2.217, - -1.9671, - -3.718, - -6.8133, - -7.3502, - -6.0712, - -6.1699, - -5.1471, - -6.1925, - -11.5829, - -15.8841, - -5.9994, - -6.0798, - -5.9513, - -6.04, - -5.9773, - -2.5091, - -6.0767, - -10.6666, - -11.8761, - -11.8491, - -10.7397, - -9.61, - -8.4755, - -6.207, - -3.0337, - 0.4726, - -1.6425, - -3.1295, - -3.3328, - -0.1221, - -0.3448, - -0.4364, - -0.1661, - -0.368, - -4.1869, - -8.4233, - -10.0467, - -12.0953, - -12.5228, - -14.253, - ] - ).view([94, 1]) - self.fc.set_state_dict(state_dict=state_dict) - self.is_intensive = True - self.fitted = True - - def initialize_from_MPF(self) -> None: - """Initialize pre-fitted weights from MPF dataset.""" - state_dict = collections.OrderedDict() - state_dict["weight"] = paddle.to_tensor( - data=[ - -3.4654, - -0.62617, - -3.4622, - -4.7758, - -8.0362, - -8.4038, - -7.7681, - -7.3892, - -4.9472, - -5.4833, - -2.4783, - -2.0202, - -5.1548, - -7.9121, - -6.9135, - -4.6228, - -3.0155, - -2.1285, - -2.3174, - -4.7595, - -8.1742, - -11.421, - -8.9229, - -8.4901, - -8.1664, - -6.5826, - -5.2614, - -4.4841, - -3.2737, - -1.3498, - -3.6264, - -4.6727, - -4.1316, - -3.6755, - -2.803, - 6.4728, - -2.2469, - -4.251, - -10.245, - -11.666, - -11.802, - -8.6551, - -9.3641, - -7.5716, - -5.699, - -4.9716, - -1.8871, - -0.67951, - -2.7488, - -3.7945, - -3.3883, - -2.5588, - -1.9621, - 9.9793, - -2.5566, - -4.8803, - -8.8604, - -9.0537, - -7.9431, - -8.1259, - -6.3212, - -8.3025, - -12.289, - -17.31, - -7.5512, - -8.1959, - -8.3493, - -7.2591, - -8.417, - -3.3873, - -7.6823, - -12.63, - -13.626, - -9.5299, - -11.84, - -9.799, - -7.5561, - -5.469, - -2.6508, - 0.41746, - -2.3255, - -3.483, - -3.1808, - -0.016934, - -0.036191, - -0.010842, - 0.01317, - -0.065371, - -5.4892, - -10.335, - -11.13, - -14.312, - -14.7, - -15.473, - ] - ).view([94, 1]) - self.fc.set_state_dict(state_dict=state_dict) - self.is_intensive = False - self.fitted = True - - def initialize_from_numpy(self, file_name: (str | Path)) -> None: - """Initialize pre-fitted weights from numpy file.""" - atom_ref_np = np.load(file_name) - state_dict = collections.OrderedDict() - state_dict["weight"] = paddle.to_tensor(data=atom_ref_np).view([1, 94]) - self.fc.set_state_dict(state_dict=state_dict) - self.is_intensive = False - self.fitted = True +from __future__ import annotations + +import collections +from typing import TYPE_CHECKING + +import numpy as np +import paddle +from chgnet.model.functions import GatedMLP +from chgnet.model.functions import find_activation +from pymatgen.core import Structure + +if TYPE_CHECKING: + from collections.abc import Sequence + from pathlib import Path + + from chgnet.graph.crystalgraph import CrystalGraph + + +class CompositionModel(paddle.nn.Layer): + """A simple FC model that takes in a chemical composition (no structure info) + and outputs energy. + """ + + def __init__( + self, + *, + atom_fea_dim: int = 64, + activation: str = "silu", + is_intensive: bool = True, + max_num_elements: int = 94, + ) -> None: + """Initialize a CompositionModel.""" + super().__init__() + self.is_intensive = is_intensive + self.max_num_elements = max_num_elements + self.fc1 = paddle.nn.Linear( + in_features=max_num_elements, out_features=atom_fea_dim + ) + self.activation = find_activation(activation) + self.gated_mlp = GatedMLP( + input_dim=atom_fea_dim, + output_dim=atom_fea_dim, + hidden_dim=atom_fea_dim, + activation=activation, + ) + self.fc2 = paddle.nn.Linear(in_features=atom_fea_dim, out_features=1) + + def _get_energy(self, composition_feas: paddle.Tensor) -> paddle.Tensor: + """Predict the energy given composition encoding. + + Args: + composition_feas: batched atom feature matrix of shape + [batch_size, total_num_elements]. + + Returns: + prediction associated with each composition [batchsize]. + """ + composition_feas = self.activation(self.fc1(composition_feas)) + composition_feas += self.gated_mlp(composition_feas) + return self.fc2(composition_feas).reshape([-1]) + + def forward(self, graphs: list[CrystalGraph]) -> paddle.Tensor: + """Get the energy of a list of CrystalGraphs as Tensor.""" + composition_feas = self._assemble_graphs(graphs) + return self._get_energy(composition_feas) + + def _assemble_graphs(self, graphs: list[CrystalGraph]) -> paddle.Tensor: + """Assemble a list of graphs into one-hot composition encodings. + + Args: + graphs (list[CrystalGraph]): a list of CrystalGraphs + + Returns: + assembled batch_graph that contains all information for model. + """ + composition_feas = [] + for graph in graphs: + composition_fea = paddle.bincount( + x=graph.atomic_number - 1, minlength=self.max_num_elements + ) + if self.is_intensive: + n_atom = graph.atomic_number.shape[0] + composition_fea = composition_fea / n_atom + composition_feas.append(composition_fea) + return paddle.stack(x=composition_feas, axis=0) + + +class AtomRef(paddle.nn.Layer): + """A linear regression for elemental energy. + From: https://github.com/materialsvirtuallab/m3gnet/. + """ + + def __init__( + self, *, is_intensive: bool = True, max_num_elements: int = 94 + ) -> None: + """Initialize an AtomRef model.""" + super().__init__() + self.is_intensive = is_intensive + self.max_num_elements = max_num_elements + self.fc = paddle.nn.Linear( + in_features=max_num_elements, out_features=1, bias_attr=False + ) + self.fitted = False + + def forward(self, graphs: list[CrystalGraph]) -> paddle.Tensor: + """Get the energy of a list of CrystalGraphs. + + Args: + graphs (List(CrystalGraph)): a list of Crystal Graph to compute + + Returns: + energy (tensor) + """ + if not self.fitted: + raise ValueError("composition model needs to be fitted first!") + composition_feas = self._assemble_graphs(graphs) + return self._get_energy(composition_feas) + + def _get_energy(self, composition_feas: paddle.Tensor) -> paddle.Tensor: + """Predict the energy given composition encoding. + + Args: + composition_feas: batched atom feature matrix of shape + [batch_size, total_num_elements]. + + Returns: + prediction associated with each composition [batchsize]. + """ + return self.fc(composition_feas).flatten() + + # .view(-1) + + def fit( + self, + structures_or_graphs: Sequence[Structure | CrystalGraph], + energies: Sequence[float], + ) -> None: + """Fit the model to a list of crystals and energies. + + Args: + structures_or_graphs (list[Structure | CrystalGraph]): Any iterable of + pymatgen structures and/or graphs. + energies (list[float]): Target energies. + """ + num_data = len(energies) + composition_feas = paddle.zeros(shape=[num_data, self.max_num_elements]) + e = paddle.zeros(shape=[num_data]) + for index, (structure, energy) in enumerate( + zip(structures_or_graphs, energies, strict=True) + ): + + if isinstance(structure, Structure): + atomic_number = paddle.to_tensor( + [site.specie.Z for site in structure], dtype="int32" + ) + else: + atomic_number = structure.atomic_number + composition_fea = paddle.bincount( + atomic_number - 1, minlength=self.max_num_elements + ) + if self.is_intensive: + composition_fea = composition_fea / atomic_number.shape[0] + composition_feas[index, :] = composition_fea + e[index] = energy + + # Use numpy for pinv + self.feature_matrix = composition_feas.detach().numpy() + self.energies = e.detach().numpy() + state_dict = collections.OrderedDict() + weight = ( + np.linalg.pinv(self.feature_matrix.T @ self.feature_matrix) + @ self.feature_matrix.T + @ self.energies + ) + state_dict["weight"] = paddle.to_tensor(data=weight).view(94, 1) + self.fc.set_state_dict(state_dict) + self.fitted = True + + def _assemble_graphs(self, graphs: list[CrystalGraph]) -> paddle.Tensor: + """Assemble a list of graphs into one-hot composition encodings + Args: + graphs (list[Tensor]): a list of CrystalGraphs + Returns: + assembled batch_graph that contains all information for model. + """ + composition_feas = [] + for graph in graphs: + if not paddle.all(graph.atomic_number >= 0): + raise ValueError("atomic_number should be non-negative integers.") + composition_fea = paddle.bincount( + graph.atomic_number - 1, minlength=self.max_num_elements + ) + if self.is_intensive: + n_atom = graph.atomic_number.shape[0] + composition_fea = composition_fea / n_atom + composition_feas.append(composition_fea) + return paddle.stack(composition_feas, axis=0).astype("float32") + + def get_site_energies(self, graphs: list[CrystalGraph]) -> list[paddle.Tensor]: + """Predict the site energies given a list of CrystalGraphs. + + Args: + graphs (List(CrystalGraph)): a list of Crystal Graph to compute + + Returns: + a list of tensors corresponding to site energies of each graph [batchsize]. + """ + return [ + self.fc.state_dict()["weight"][0, graph.atomic_number - 1] + for graph in graphs + ] + + def initialize_from(self, dataset: str) -> None: + """Initialize pre-fitted weights from a dataset.""" + if dataset in {"MPtrj", "MPtrj_e"}: + self.initialize_from_MPtrj() + elif dataset == "MPF": + self.initialize_from_MPF() + else: + raise NotImplementedError(f"dataset={dataset!r} not supported yet") + + def initialize_from_MPtrj(self) -> None: + """Initialize pre-fitted weights from MPtrj dataset.""" + state_dict = collections.OrderedDict() + state_dict["weight"] = paddle.to_tensor( + data=[ + -3.4431, + -0.1279, + -2.83, + -3.4737, + -7.4946, + -8.2354, + -8.1611, + -8.3861, + -5.7498, + -0.0236, + -1.7406, + -1.6788, + -4.2833, + -6.2002, + -6.1315, + -5.8405, + -3.8795, + -0.0703, + -1.5668, + -3.4451, + -7.0549, + -9.1465, + -9.2594, + -9.3514, + -8.9843, + -8.0228, + -6.4955, + -5.6057, + -3.4002, + -0.9217, + -3.2499, + -4.9164, + -4.781, + -5.0191, + -3.3316, + 0.513, + -1.4043, + -3.2175, + -7.4994, + -9.3816, + -10.4386, + -9.9539, + -7.9555, + -8.544, + -7.3245, + -5.2771, + -1.9014, + -0.4034, + -2.6002, + -4.0054, + -4.1156, + -3.9928, + -2.7003, + 2.217, + -1.9671, + -3.718, + -6.8133, + -7.3502, + -6.0712, + -6.1699, + -5.1471, + -6.1925, + -11.5829, + -15.8841, + -5.9994, + -6.0798, + -5.9513, + -6.04, + -5.9773, + -2.5091, + -6.0767, + -10.6666, + -11.8761, + -11.8491, + -10.7397, + -9.61, + -8.4755, + -6.207, + -3.0337, + 0.4726, + -1.6425, + -3.1295, + -3.3328, + -0.1221, + -0.3448, + -0.4364, + -0.1661, + -0.368, + -4.1869, + -8.4233, + -10.0467, + -12.0953, + -12.5228, + -14.253, + ] + ).view([94, 1]) + self.fc.set_state_dict(state_dict=state_dict) + self.is_intensive = True + self.fitted = True + + def initialize_from_MPF(self) -> None: + """Initialize pre-fitted weights from MPF dataset.""" + state_dict = collections.OrderedDict() + state_dict["weight"] = paddle.to_tensor( + data=[ + -3.4654, + -0.62617, + -3.4622, + -4.7758, + -8.0362, + -8.4038, + -7.7681, + -7.3892, + -4.9472, + -5.4833, + -2.4783, + -2.0202, + -5.1548, + -7.9121, + -6.9135, + -4.6228, + -3.0155, + -2.1285, + -2.3174, + -4.7595, + -8.1742, + -11.421, + -8.9229, + -8.4901, + -8.1664, + -6.5826, + -5.2614, + -4.4841, + -3.2737, + -1.3498, + -3.6264, + -4.6727, + -4.1316, + -3.6755, + -2.803, + 6.4728, + -2.2469, + -4.251, + -10.245, + -11.666, + -11.802, + -8.6551, + -9.3641, + -7.5716, + -5.699, + -4.9716, + -1.8871, + -0.67951, + -2.7488, + -3.7945, + -3.3883, + -2.5588, + -1.9621, + 9.9793, + -2.5566, + -4.8803, + -8.8604, + -9.0537, + -7.9431, + -8.1259, + -6.3212, + -8.3025, + -12.289, + -17.31, + -7.5512, + -8.1959, + -8.3493, + -7.2591, + -8.417, + -3.3873, + -7.6823, + -12.63, + -13.626, + -9.5299, + -11.84, + -9.799, + -7.5561, + -5.469, + -2.6508, + 0.41746, + -2.3255, + -3.483, + -3.1808, + -0.016934, + -0.036191, + -0.010842, + 0.01317, + -0.065371, + -5.4892, + -10.335, + -11.13, + -14.312, + -14.7, + -15.473, + ] + ).view([94, 1]) + self.fc.set_state_dict(state_dict=state_dict) + self.is_intensive = False + self.fitted = True + + def initialize_from_numpy(self, file_name: (str | Path)) -> None: + """Initialize pre-fitted weights from numpy file.""" + atom_ref_np = np.load(file_name) + state_dict = collections.OrderedDict() + state_dict["weight"] = paddle.to_tensor(data=atom_ref_np).view([1, 94]) + self.fc.set_state_dict(state_dict=state_dict) + self.is_intensive = False + self.fitted = True diff --git a/jointContribution/CHGNet/chgnet/model/dynamics.py b/jointContribution/CHGNet/chgnet/model/dynamics.py index 8e91d8e6fc..ec0bb46dd9 100644 --- a/jointContribution/CHGNet/chgnet/model/dynamics.py +++ b/jointContribution/CHGNet/chgnet/model/dynamics.py @@ -1,840 +1,840 @@ -from __future__ import annotations - -import contextlib -import inspect -import io -import pickle -import sys -import warnings -from typing import TYPE_CHECKING -from typing import Literal - -import numpy as np -from ase import Atoms -from ase import units -from ase.calculators.calculator import Calculator -from ase.calculators.calculator import all_changes -from ase.calculators.calculator import all_properties -from ase.md.npt import NPT -from ase.md.nptberendsen import Inhomogeneous_NPTBerendsen -from ase.md.nptberendsen import NPTBerendsen -from ase.md.nptberendsen import NVTBerendsen -from ase.md.velocitydistribution import MaxwellBoltzmannDistribution -from ase.md.velocitydistribution import Stationary -from ase.md.verlet import VelocityVerlet -from ase.optimize.bfgs import BFGS -from ase.optimize.bfgslinesearch import BFGSLineSearch -from ase.optimize.fire import FIRE -from ase.optimize.lbfgs import LBFGS -from ase.optimize.lbfgs import LBFGSLineSearch -from ase.optimize.mdmin import MDMin -from ase.optimize.sciopt import SciPyFminBFGS -from ase.optimize.sciopt import SciPyFminCG -from chgnet.model.model import CHGNet -from chgnet.utils import determine_device -from pymatgen.analysis.eos import BirchMurnaghan -from pymatgen.core.structure import Molecule -from pymatgen.core.structure import Structure -from pymatgen.io.ase import AseAtomsAdaptor - -if TYPE_CHECKING: - from ase.io import Trajectory - from ase.optimize.optimize import Optimizer - from typing_extensions import Self -OPTIMIZERS = { - "FIRE": FIRE, - "BFGS": BFGS, - "LBFGS": LBFGS, - "LBFGSLineSearch": LBFGSLineSearch, - "MDMin": MDMin, - "SciPyFminCG": SciPyFminCG, - "SciPyFminBFGS": SciPyFminBFGS, - "BFGSLineSearch": BFGSLineSearch, -} - - -class CHGNetCalculator(Calculator): - """CHGNet Calculator for ASE applications.""" - - implemented_properties = "energy", "forces", "stress", "magmoms" - - def __init__( - self, - model: (CHGNet | None) = None, - *, - use_device: (str | None) = None, - check_cuda_mem: bool = False, - stress_weight: (float | None) = 1 / 160.21766208, - on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", - **kwargs, - ) -> None: - """Provide a CHGNet instance to calculate various atomic properties using ASE. - - Args: - model (CHGNet): instance of a chgnet model. If set to None, - the pretrained CHGNet is loaded. - Default = None - use_device (str, optional): The device to be used for predictions, - either "cpu", "cuda", or "mps". If not specified, the default device is - automatically selected based on the available options. - Default = None - check_cuda_mem (bool): Whether to use cuda with most available memory - Default = False - stress_weight (float): the conversion factor to convert GPa to eV/A^3. - Default = 1/160.21 - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'warn' - **kwargs: Passed to the Calculator parent class. - """ - super().__init__(**kwargs) - device = determine_device(use_device=use_device, check_cuda_mem=check_cuda_mem) - self.device = device - if model is None: - self.model = CHGNet.load(verbose=False, use_device=self.device) - else: - self.model = model.to(self.device) - self.model.graph_converter.set_isolated_atom_response(on_isolated_atoms) - self.stress_weight = stress_weight - print(f"CHGNet will run on {self.device}") - - @classmethod - def from_file(cls, path: str, use_device: (str | None) = None, **kwargs) -> Self: - """Load a user's CHGNet model and initialize the Calculator.""" - return cls(model=CHGNet.from_file(path), use_device=use_device, **kwargs) - - @property - def version(self) -> (str | None): - """The version of CHGNet.""" - return self.model.version - - @property - def n_params(self) -> int: - """The number of parameters in CHGNet.""" - return self.model.n_params - - def calculate( - self, - atoms: (Atoms | None) = None, - properties: (list | None) = None, - system_changes: (list | None) = None, - ) -> None: - """Calculate various properties of the atoms using CHGNet. - - Args: - atoms (Atoms | None): The atoms object to calculate properties for. - properties (list | None): The properties to calculate. - Default is all properties. - system_changes (list | None): The changes made to the system. - Default is all changes. - """ - properties = properties or all_properties - system_changes = system_changes or all_changes - super().calculate( - atoms=atoms, properties=properties, system_changes=system_changes - ) - structure = AseAtomsAdaptor.get_structure(atoms) - graph = self.model.graph_converter(structure) - model_prediction = self.model.predict_graph( - graph.to(self.device), task="efsm", return_crystal_feas=True - ) - factor = 1 if not self.model.is_intensive else structure.composition.num_atoms - self.results.update( - energy=model_prediction["e"] * factor, - forces=model_prediction["f"], - free_energy=model_prediction["e"] * factor, - magmoms=model_prediction["m"], - stress=model_prediction["s"] * self.stress_weight, - crystal_fea=model_prediction["crystal_fea"], - ) - - -class StructOptimizer: - """Wrapper class for structural relaxation.""" - - def __init__( - self, - model: (CHGNet | CHGNetCalculator | None) = None, - optimizer_class: (Optimizer | str | None) = "FIRE", - use_device: (str | None) = None, - stress_weight: float = 1 / 160.21766208, - on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", - ) -> None: - """Provide a trained CHGNet model and an optimizer to relax crystal structures. - - Args: - model (CHGNet): instance of a CHGNet model or CHGNetCalculator. - If set to None, the pretrained CHGNet is loaded. - Default = None - optimizer_class (Optimizer,str): choose optimizer from ASE. - Default = "FIRE" - use_device (str, optional): The device to be used for predictions, - either "cpu", "cuda", or "mps". If not specified, the default device is - automatically selected based on the available options. - Default = None - stress_weight (float): the conversion factor to convert GPa to eV/A^3. - Default = 1/160.21 - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'warn' - """ - if isinstance(optimizer_class, str): - if optimizer_class in OPTIMIZERS: - optimizer_class = OPTIMIZERS[optimizer_class] - else: - raise ValueError( - f"Optimizer instance not found. Select from {list(OPTIMIZERS)}" - ) - self.optimizer_class: Optimizer = optimizer_class - if isinstance(model, CHGNetCalculator): - self.calculator = model - else: - self.calculator = CHGNetCalculator( - model=model, - stress_weight=stress_weight, - use_device=use_device, - on_isolated_atoms=on_isolated_atoms, - ) - - @property - def version(self) -> str: - """The version of CHGNet.""" - return self.calculator.model.version - - @property - def n_params(self) -> int: - """The number of parameters in CHGNet.""" - return self.calculator.model.n_params - - def relax( - self, - atoms: (Structure | Atoms), - *, - fmax: (float | None) = 0.1, - steps: (int | None) = 500, - relax_cell: (bool | None) = True, - ase_filter: (str | None) = "FrechetCellFilter", - save_path: (str | None) = None, - loginterval: (int | None) = 1, - crystal_feas_save_path: (str | None) = None, - verbose: bool = True, - assign_magmoms: bool = True, - **kwargs, - ) -> dict[str, Structure | TrajectoryObserver]: - """Relax the Structure/Atoms until maximum force is smaller than fmax. - - Args: - atoms (Structure | Atoms): A Structure or Atoms object to relax. - fmax (float | None): The maximum force tolerance for relaxation. - Default = 0.1 - steps (int | None): The maximum number of steps for relaxation. - Default = 500 - relax_cell (bool | None): Whether to relax the cell as well. - Default = True - ase_filter (str | ase.filters.Filter): The filter to apply to the atoms - object for relaxation. Default = FrechetCellFilter - Default used to be ExpCellFilter which was removed due to bug reported - in https://gitlab.com/ase/ase/-/issues/1321 and fixed in - https://gitlab.com/ase/ase/-/merge_requests/3024. - save_path (str | None): The path to save the trajectory. - Default = None - loginterval (int | None): Interval for logging trajectory and crystal - features. Default = 1 - crystal_feas_save_path (str | None): Path to save crystal feature vectors - which are logged at a loginterval rage - Default = None - verbose (bool): Whether to print the output of the ASE optimizer. - Default = True - assign_magmoms (bool): Whether to assign magnetic moments to the final - structure. Default = True - **kwargs: Additional parameters for the optimizer. - - Returns: - dict[str, Structure | TrajectoryObserver]: - A dictionary with 'final_structure' and 'trajectory'. - """ - from ase import filters - from ase.filters import Filter - - valid_filter_names = [ - name - for name, cls in inspect.getmembers(filters, inspect.isclass) - if issubclass(cls, Filter) - ] - if isinstance(ase_filter, str): - if ase_filter in valid_filter_names: - ase_filter = getattr(filters, ase_filter) - else: - raise ValueError( - f"Invalid ase_filter={ase_filter!r}, must be one of {valid_filter_names}. " - ) - if isinstance(atoms, Structure): - atoms = AseAtomsAdaptor().get_atoms(atoms) - atoms.calc = self.calculator - stream = sys.stdout if verbose else io.StringIO() - with contextlib.redirect_stdout(stream): - obs = TrajectoryObserver(atoms) - if crystal_feas_save_path: - cry_obs = CrystalFeasObserver(atoms) - if relax_cell: - atoms = ase_filter(atoms) - optimizer: Optimizer = self.optimizer_class(atoms, **kwargs) - optimizer.attach(obs, interval=loginterval) - if crystal_feas_save_path: - optimizer.attach(cry_obs, interval=loginterval) - optimizer.run(fmax=fmax, steps=steps) - obs() - if save_path is not None: - obs.save(save_path) - if crystal_feas_save_path: - cry_obs.save(crystal_feas_save_path) - if isinstance(atoms, Filter): - atoms = atoms.atoms - struct = AseAtomsAdaptor.get_structure(atoms) - if assign_magmoms: - for key in struct.site_properties: - struct.remove_site_property(property_name=key) - struct.add_site_property( - "magmom", [float(magmom) for magmom in atoms.get_magnetic_moments()] - ) - return {"final_structure": struct, "trajectory": obs} - - -class TrajectoryObserver: - """Trajectory observer is a hook in the relaxation process that saves the - intermediate structures. - """ - - def __init__(self, atoms: Atoms) -> None: - """Create a TrajectoryObserver from an Atoms object. - - Args: - atoms (Atoms): the structure to observe. - """ - self.atoms = atoms - self.energies: list[float] = [] - self.forces: list[np.ndarray] = [] - self.stresses: list[np.ndarray] = [] - self.magmoms: list[np.ndarray] = [] - self.atom_positions: list[np.ndarray] = [] - self.cells: list[np.ndarray] = [] - - def __call__(self) -> None: - """The logic for saving the properties of an Atoms during the relaxation.""" - self.energies.append(self.compute_energy()) - self.forces.append(self.atoms.get_forces()) - self.stresses.append(self.atoms.get_stress()) - self.magmoms.append(self.atoms.get_magnetic_moments()) - self.atom_positions.append(self.atoms.get_positions()) - self.cells.append(self.atoms.get_cell()[:]) - - def __len__(self) -> int: - """The number of steps in the trajectory.""" - return len(self.energies) - - def compute_energy(self) -> float: - """Calculate the potential energy. - - Returns: - energy (float): the potential energy. - """ - return self.atoms.get_potential_energy() - - def save(self, filename: str) -> None: - """Save the trajectory to file. - - Args: - filename (str): filename to save the trajectory - """ - out_pkl = { - "energy": self.energies, - "forces": self.forces, - "stresses": self.stresses, - "magmoms": self.magmoms, - "atom_positions": self.atom_positions, - "cell": self.cells, - "atomic_number": self.atoms.get_atomic_numbers(), - } - with open(filename, "wb") as file: - pickle.dump(out_pkl, file) - - -class CrystalFeasObserver: - """CrystalFeasObserver is a hook in the relaxation and MD process that saves the - intermediate crystal feature structures. - """ - - def __init__(self, atoms: Atoms) -> None: - """Create a CrystalFeasObserver from an Atoms object.""" - self.atoms = atoms - self.crystal_feature_vectors: list[np.ndarray] = [] - - def __call__(self) -> None: - """Record Atoms crystal feature vectors after an MD/relaxation step.""" - self.crystal_feature_vectors.append(self.atoms._calc.results["crystal_fea"]) - - def __len__(self) -> int: - """Number of recorded steps.""" - return len(self.crystal_feature_vectors) - - def save(self, filename: str) -> None: - """Save the crystal feature vectors to filename in pickle format.""" - out_pkl = {"crystal_feas": self.crystal_feature_vectors} - with open(filename, "wb") as file: - pickle.dump(out_pkl, file) - - -class MolecularDynamics: - """Molecular dynamics class.""" - - def __init__( - self, - atoms: (Atoms | Structure), - *, - model: (CHGNet | CHGNetCalculator | None) = None, - ensemble: str = "nvt", - thermostat: str = "Berendsen_inhomogeneous", - temperature: int = 300, - starting_temperature: (int | None) = None, - timestep: float = 2.0, - pressure: float = 0.000101325, - taut: (float | None) = None, - taup: (float | None) = None, - bulk_modulus: (float | None) = None, - trajectory: (str | Trajectory | None) = None, - logfile: (str | None) = None, - loginterval: int = 1, - crystal_feas_logfile: (str | None) = None, - append_trajectory: bool = False, - on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", - use_device: (str | None) = None, - ) -> None: - """Initialize the MD class. - - Args: - atoms (Atoms): atoms to run the MD - model (CHGNet): instance of a CHGNet model or CHGNetCalculator. - If set to None, the pretrained CHGNet is loaded. - Default = None - ensemble (str): choose from 'nve', 'nvt', 'npt' - Default = "nvt" - thermostat (str): Thermostat to use - choose from "Nose-Hoover", "Berendsen", "Berendsen_inhomogeneous" - Default = "Berendsen_inhomogeneous" - temperature (float): temperature for MD simulation, in K - Default = 300 - starting_temperature (float): starting temperature of MD simulation, in K - if set as None, the MD starts with the momentum carried by ase.Atoms - if input is a pymatgen.core.Structure, the MD starts at 0K - Default = None - timestep (float): time step in fs - Default = 2 - pressure (float): pressure in GPa - Can be 3x3 or 6 np.array if thermostat is "Nose-Hoover" - Default = 1.01325e-4 GPa = 1 atm - taut (float): time constant for temperature coupling in fs. - The temperature will be raised to target temperature in approximate - 10 * taut time. - Default = 100 * timestep - taup (float): time constant for pressure coupling in fs - Default = 1000 * timestep - bulk_modulus (float): bulk modulus of the material in GPa. - Used in NPT ensemble for the barostat pressure coupling. - The DFT bulk modulus can be found for most materials at - https://next-gen.materialsproject.org/ - - In NPT ensemble, the effective damping time for pressure is multiplied - by compressibility. In LAMMPS, Bulk modulus is defaulted to 10 - see: https://docs.lammps.org/fix_press_berendsen.html - and: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py - - If bulk modulus is not provided here, it will be calculated by CHGNet - through Birch Murnaghan equation of state (EOS). - Note the EOS fitting can fail because of non-parabolic potential - energy surface, which is common with soft system like liquid and gas. - In such case, user should provide an input bulk modulus for better - barostat coupling, otherwise a guessed bulk modulus = 2 GPa will be used - (water's bulk modulus) - - Default = None - trajectory (str or Trajectory): Attach trajectory object - Default = None - logfile (str): open this file for recording MD outputs - Default = None - loginterval (int): write to log file every interval steps - Default = 1 - crystal_feas_logfile (str): open this file for recording crystal features - during MD. Default = None - append_trajectory (bool): Whether to append to prev trajectory. - If false, previous trajectory gets overwritten - Default = False - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'warn' - use_device (str): the device for the MD run - Default = None - """ - self.ensemble = ensemble - self.thermostat = thermostat - if isinstance(atoms, Structure | Molecule): - atoms = AseAtomsAdaptor().get_atoms(atoms) - if starting_temperature is not None: - MaxwellBoltzmannDistribution( - atoms, temperature_K=starting_temperature, force_temp=True - ) - Stationary(atoms) - self.atoms = atoms - if isinstance(model, CHGNetCalculator): - self.atoms.calc = model - else: - self.atoms.calc = CHGNetCalculator( - model=model, use_device=use_device, on_isolated_atoms=on_isolated_atoms - ) - if taut is None: - taut = 100 * timestep - if taup is None: - taup = 1000 * timestep - if ensemble.lower() == "nve": - """ - VelocityVerlet (constant N, V, E) molecular dynamics. - - Note: it's recommended to use smaller timestep for NVE compared to other - ensembles, since the VelocityVerlet algorithm assumes a strict conservative - force field. - """ - self.dyn = VelocityVerlet( - atoms=self.atoms, - timestep=timestep * units.fs, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - append_trajectory=append_trajectory, - ) - print("NVE-MD created") - elif ensemble.lower() == "nvt": - """ - Constant volume/temperature molecular dynamics. - """ - if thermostat.lower() == "nose-hoover": - """ - Nose-hoover (constant N, V, T) molecular dynamics. - ASE implementation currently only supports upper triangular lattice - """ - self.upper_triangular_cell() - self.dyn = NPT( - atoms=self.atoms, - timestep=timestep * units.fs, - temperature_K=temperature, - externalstress=pressure * units.GPa, - ttime=taut * units.fs, - pfactor=None, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - append_trajectory=append_trajectory, - ) - print("NVT-Nose-Hoover MD created") - elif thermostat.lower().startswith("berendsen"): - """ - Berendsen (constant N, V, T) molecular dynamics. - """ - self.dyn = NVTBerendsen( - atoms=self.atoms, - timestep=timestep * units.fs, - temperature_K=temperature, - taut=taut * units.fs, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - append_trajectory=append_trajectory, - ) - print("NVT-Berendsen-MD created") - else: - raise ValueError( - "Thermostat not supported, choose in 'Nose-Hoover', 'Berendsen', 'Berendsen_inhomogeneous'" - ) - elif ensemble.lower() == "npt": - """ - Constant pressure/temperature molecular dynamics. - """ - if bulk_modulus is not None: - bulk_modulus_au = bulk_modulus / 160.2176 - compressibility_au = 1 / bulk_modulus_au - else: - try: - eos = EquationOfState(model=self.atoms.calc) - eos.fit(atoms=atoms, steps=500, fmax=0.1, verbose=False) - bulk_modulus = eos.get_bulk_modulus(unit="GPa") - bulk_modulus_au = eos.get_bulk_modulus(unit="eV/A^3") - compressibility_au = eos.get_compressibility(unit="A^3/eV") - print( - f"Completed bulk modulus calculation: k = {bulk_modulus:.3}GPa, {bulk_modulus_au:.3}eV/A^3" - ) - except Exception: - bulk_modulus_au = 2 / 160.2176 - compressibility_au = 1 / bulk_modulus_au - warnings.warn( - "Warning!!! Equation of State fitting failed, setting bulk modulus to 2 GPa. NPT simulation can proceed with incorrect pressure relaxation time.User input for bulk modulus is recommended.", - stacklevel=2, - ) - self.bulk_modulus = bulk_modulus - if thermostat.lower() == "nose-hoover": - """ - Combined Nose-Hoover and Parrinello-Rahman dynamics, creating an - NPT (or N,stress,T) ensemble. - see: https://gitlab.com/ase/ase/-/blob/master/ase/md/npt.py - ASE implementation currently only supports upper triangular lattice - """ - self.upper_triangular_cell() - ptime = taup * units.fs - self.dyn = NPT( - atoms=self.atoms, - timestep=timestep * units.fs, - temperature_K=temperature, - externalstress=pressure * units.GPa, - ttime=taut * units.fs, - pfactor=bulk_modulus * units.GPa * ptime * ptime, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - append_trajectory=append_trajectory, - ) - print("NPT-Nose-Hoover MD created") - elif thermostat.lower() == "berendsen_inhomogeneous": - """ - Inhomogeneous_NPTBerendsen thermo/barostat - This is a more flexible scheme that fixes three angles of the unit - cell but allows three lattice parameter to change independently. - see: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py - """ - self.dyn = Inhomogeneous_NPTBerendsen( - atoms=self.atoms, - timestep=timestep * units.fs, - temperature_K=temperature, - pressure_au=pressure * units.GPa, - taut=taut * units.fs, - taup=taup * units.fs, - compressibility_au=compressibility_au, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - ) - print("NPT-Berendsen-inhomogeneous-MD created") - elif thermostat.lower() == "npt_berendsen": - """ - This is a similar scheme to the Inhomogeneous_NPTBerendsen. - This is a less flexible scheme that fixes the shape of the - cell - three angles are fixed and the ratios between the three - lattice constants. - see: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py - """ - self.dyn = NPTBerendsen( - atoms=self.atoms, - timestep=timestep * units.fs, - temperature_K=temperature, - pressure_au=pressure * units.GPa, - taut=taut * units.fs, - taup=taup * units.fs, - compressibility_au=compressibility_au, - trajectory=trajectory, - logfile=logfile, - loginterval=loginterval, - append_trajectory=append_trajectory, - ) - print("NPT-Berendsen-MD created") - else: - raise ValueError( - "Thermostat not supported, choose in 'Nose-Hoover', 'Berendsen', 'Berendsen_inhomogeneous'" - ) - self.trajectory = trajectory - self.logfile = logfile - self.loginterval = loginterval - self.timestep = timestep - self.crystal_feas_logfile = crystal_feas_logfile - - def run(self, steps: int) -> None: - """Thin wrapper of ase MD run. - - Args: - steps (int): number of MD steps - """ - if self.crystal_feas_logfile: - obs = CrystalFeasObserver(self.atoms) - self.dyn.attach(obs, interval=self.loginterval) - self.dyn.run(steps) - if self.crystal_feas_logfile: - obs.save(self.crystal_feas_logfile) - - def set_atoms(self, atoms: Atoms) -> None: - """Set new atoms to run MD. - - Args: - atoms (Atoms): new atoms for running MD - """ - calculator = self.atoms.calc - self.atoms = atoms - self.dyn.atoms = atoms - self.dyn.atoms.calc = calculator - - def upper_triangular_cell(self, *, verbose: (bool | None) = False) -> None: - """Transform to upper-triangular cell. - ASE Nose-Hoover implementation only supports upper-triangular cell - while ASE's canonical description is lower-triangular cell. - - Args: - verbose (bool): Whether to notify user about upper-triangular cell - transformation. Default = False - """ - if not NPT._isuppertriangular(self.atoms.get_cell()): - a, b, c, alpha, beta, gamma = self.atoms.cell.cellpar() - angles = np.radians((alpha, beta, gamma)) - sin_a, sin_b, _sin_g = np.sin(angles) - cos_a, cos_b, cos_g = np.cos(angles) - cos_p = (cos_g - cos_a * cos_b) / (sin_a * sin_b) - cos_p = np.clip(cos_p, -1, 1) - sin_p = (1 - cos_p**2) ** 0.5 - new_basis = [ - (a * sin_b * sin_p, a * sin_b * cos_p, a * cos_b), - (0, b * sin_a, b * cos_a), - (0, 0, c), - ] - self.atoms.set_cell(new_basis, scale_atoms=True) - if verbose: - print("Transformed to upper triangular unit cell.", flush=True) - - -class EquationOfState: - """Class to calculate equation of state.""" - - def __init__( - self, - model: (CHGNet | CHGNetCalculator | None) = None, - optimizer_class: (Optimizer | str | None) = "FIRE", - use_device: (str | None) = None, - stress_weight: float = 1 / 160.21766208, - on_isolated_atoms: Literal["ignore", "warn", "error"] = "error", - ) -> None: - """Initialize a structure optimizer object for calculation of bulk modulus. - - Args: - model (CHGNet): instance of a CHGNet model or CHGNetCalculator. - If set to None, the pretrained CHGNet is loaded. - Default = None - optimizer_class (Optimizer,str): choose optimizer from ASE. - Default = "FIRE" - use_device (str, optional): The device to be used for predictions, - either "cpu", "cuda", or "mps". If not specified, the default device is - automatically selected based on the available options. - Default = None - stress_weight (float): the conversion factor to convert GPa to eV/A^3. - Default = 1/160.21 - on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures - with isolated atoms. - Default = 'error' - """ - self.relaxer = StructOptimizer( - model=model, - optimizer_class=optimizer_class, - use_device=use_device, - stress_weight=stress_weight, - on_isolated_atoms=on_isolated_atoms, - ) - self.fitted = False - - def fit( - self, - atoms: (Structure | Atoms), - *, - n_points: int = 11, - fmax: (float | None) = 0.1, - steps: (int | None) = 500, - verbose: (bool | None) = False, - **kwargs, - ) -> None: - """Relax the Structure/Atoms and fit the Birch-Murnaghan equation of state. - - Args: - atoms (Structure | Atoms): A Structure or Atoms object to relax. - n_points (int): Number of structures used in fitting the equation of states - fmax (float | None): The maximum force tolerance for relaxation. - Default = 0.1 - steps (int | None): The maximum number of steps for relaxation. - Default = 500 - verbose (bool): Whether to print the output of the ASE optimizer. - Default = False - **kwargs: Additional parameters for the optimizer. - - Returns: - Bulk Modulus (float) - """ - if isinstance(atoms, Atoms): - atoms = AseAtomsAdaptor.get_structure(atoms) - primitive_cell = atoms.get_primitive_structure() - local_minima = self.relaxer.relax( - primitive_cell, - relax_cell=True, - fmax=fmax, - steps=steps, - verbose=verbose, - **kwargs, - ) - volumes, energies = [], [] - for idx in np.linspace(-0.1, 0.1, n_points): - structure_strained = local_minima["final_structure"].copy() - structure_strained.apply_strain([idx, idx, idx]) - result = self.relaxer.relax( - structure_strained, - relax_cell=False, - fmax=fmax, - steps=steps, - verbose=verbose, - **kwargs, - ) - volumes.append(result["final_structure"].volume) - energies.append(result["trajectory"].energies[-1]) - self.bm = BirchMurnaghan(volumes=volumes, energies=energies) - self.bm.fit() - self.fitted = True - - def get_bulk_modulus(self, unit: str = "eV/A^3") -> float: - """Get the bulk modulus of from the fitted Birch-Murnaghan equation of state. - - Args: - unit (str): The unit of bulk modulus. Can be "eV/A^3" or "GPa" - Default = "eV/A^3" - - Returns: - Bulk Modulus (float) - """ - if self.fitted is False: - raise ValueError( - "Equation of state needs to be fitted first through self.fit()" - ) - if unit == "eV/A^3": - return self.bm.b0 - if unit == "GPa": - return self.bm.b0_GPa - raise NotImplementedError("unit has to be eV/A^3 or GPa") - - def get_compressibility(self, unit: str = "A^3/eV") -> float: - """Get the bulk modulus of from the fitted Birch-Murnaghan equation of state. - - Args: - unit (str): The unit of bulk modulus. Can be "A^3/eV", - "GPa^-1" "Pa^-1" or "m^2/N" - Default = "A^3/eV" - - Returns: - Bulk Modulus (float) - """ - if self.fitted is False: - raise ValueError( - "Equation of state needs to be fitted first through self.fit()" - ) - if unit == "A^3/eV": - return 1 / self.bm.b0 - if unit == "GPa^-1": - return 1 / self.bm.b0_GPa - if unit in {"Pa^-1", "m^2/N"}: - return 1 / (self.bm.b0_GPa * 1000000000.0) - raise NotImplementedError("unit has to be one of A^3/eV, GPa^-1 Pa^-1 or m^2/N") +from __future__ import annotations + +import contextlib +import inspect +import io +import pickle +import sys +import warnings +from typing import TYPE_CHECKING +from typing import Literal + +import numpy as np +from ase import Atoms +from ase import units +from ase.calculators.calculator import Calculator +from ase.calculators.calculator import all_changes +from ase.calculators.calculator import all_properties +from ase.md.npt import NPT +from ase.md.nptberendsen import Inhomogeneous_NPTBerendsen +from ase.md.nptberendsen import NPTBerendsen +from ase.md.nptberendsen import NVTBerendsen +from ase.md.velocitydistribution import MaxwellBoltzmannDistribution +from ase.md.velocitydistribution import Stationary +from ase.md.verlet import VelocityVerlet +from ase.optimize.bfgs import BFGS +from ase.optimize.bfgslinesearch import BFGSLineSearch +from ase.optimize.fire import FIRE +from ase.optimize.lbfgs import LBFGS +from ase.optimize.lbfgs import LBFGSLineSearch +from ase.optimize.mdmin import MDMin +from ase.optimize.sciopt import SciPyFminBFGS +from ase.optimize.sciopt import SciPyFminCG +from chgnet.model.model import CHGNet +from chgnet.utils import determine_device +from pymatgen.analysis.eos import BirchMurnaghan +from pymatgen.core.structure import Molecule +from pymatgen.core.structure import Structure +from pymatgen.io.ase import AseAtomsAdaptor + +if TYPE_CHECKING: + from ase.io import Trajectory + from ase.optimize.optimize import Optimizer + from typing_extensions import Self +OPTIMIZERS = { + "FIRE": FIRE, + "BFGS": BFGS, + "LBFGS": LBFGS, + "LBFGSLineSearch": LBFGSLineSearch, + "MDMin": MDMin, + "SciPyFminCG": SciPyFminCG, + "SciPyFminBFGS": SciPyFminBFGS, + "BFGSLineSearch": BFGSLineSearch, +} + + +class CHGNetCalculator(Calculator): + """CHGNet Calculator for ASE applications.""" + + implemented_properties = "energy", "forces", "stress", "magmoms" + + def __init__( + self, + model: (CHGNet | None) = None, + *, + use_device: (str | None) = None, + check_cuda_mem: bool = False, + stress_weight: (float | None) = 1 / 160.21766208, + on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", + **kwargs, + ) -> None: + """Provide a CHGNet instance to calculate various atomic properties using ASE. + + Args: + model (CHGNet): instance of a chgnet model. If set to None, + the pretrained CHGNet is loaded. + Default = None + use_device (str, optional): The device to be used for predictions, + either "cpu", "cuda", or "mps". If not specified, the default device is + automatically selected based on the available options. + Default = None + check_cuda_mem (bool): Whether to use cuda with most available memory + Default = False + stress_weight (float): the conversion factor to convert GPa to eV/A^3. + Default = 1/160.21 + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'warn' + **kwargs: Passed to the Calculator parent class. + """ + super().__init__(**kwargs) + device = determine_device(use_device=use_device, check_cuda_mem=check_cuda_mem) + self.device = device + if model is None: + self.model = CHGNet.load(verbose=False, use_device=self.device) + else: + self.model = model.to(self.device) + self.model.graph_converter.set_isolated_atom_response(on_isolated_atoms) + self.stress_weight = stress_weight + print(f"CHGNet will run on {self.device}") + + @classmethod + def from_file(cls, path: str, use_device: (str | None) = None, **kwargs) -> Self: + """Load a user's CHGNet model and initialize the Calculator.""" + return cls(model=CHGNet.from_file(path), use_device=use_device, **kwargs) + + @property + def version(self) -> (str | None): + """The version of CHGNet.""" + return self.model.version + + @property + def n_params(self) -> int: + """The number of parameters in CHGNet.""" + return self.model.n_params + + def calculate( + self, + atoms: (Atoms | None) = None, + properties: (list | None) = None, + system_changes: (list | None) = None, + ) -> None: + """Calculate various properties of the atoms using CHGNet. + + Args: + atoms (Atoms | None): The atoms object to calculate properties for. + properties (list | None): The properties to calculate. + Default is all properties. + system_changes (list | None): The changes made to the system. + Default is all changes. + """ + properties = properties or all_properties + system_changes = system_changes or all_changes + super().calculate( + atoms=atoms, properties=properties, system_changes=system_changes + ) + structure = AseAtomsAdaptor.get_structure(atoms) + graph = self.model.graph_converter(structure) + model_prediction = self.model.predict_graph( + graph.to(self.device), task="efsm", return_crystal_feas=True + ) + factor = 1 if not self.model.is_intensive else structure.composition.num_atoms + self.results.update( + energy=model_prediction["e"] * factor, + forces=model_prediction["f"], + free_energy=model_prediction["e"] * factor, + magmoms=model_prediction["m"], + stress=model_prediction["s"] * self.stress_weight, + crystal_fea=model_prediction["crystal_fea"], + ) + + +class StructOptimizer: + """Wrapper class for structural relaxation.""" + + def __init__( + self, + model: (CHGNet | CHGNetCalculator | None) = None, + optimizer_class: (Optimizer | str | None) = "FIRE", + use_device: (str | None) = None, + stress_weight: float = 1 / 160.21766208, + on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", + ) -> None: + """Provide a trained CHGNet model and an optimizer to relax crystal structures. + + Args: + model (CHGNet): instance of a CHGNet model or CHGNetCalculator. + If set to None, the pretrained CHGNet is loaded. + Default = None + optimizer_class (Optimizer,str): choose optimizer from ASE. + Default = "FIRE" + use_device (str, optional): The device to be used for predictions, + either "cpu", "cuda", or "mps". If not specified, the default device is + automatically selected based on the available options. + Default = None + stress_weight (float): the conversion factor to convert GPa to eV/A^3. + Default = 1/160.21 + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'warn' + """ + if isinstance(optimizer_class, str): + if optimizer_class in OPTIMIZERS: + optimizer_class = OPTIMIZERS[optimizer_class] + else: + raise ValueError( + f"Optimizer instance not found. Select from {list(OPTIMIZERS)}" + ) + self.optimizer_class: Optimizer = optimizer_class + if isinstance(model, CHGNetCalculator): + self.calculator = model + else: + self.calculator = CHGNetCalculator( + model=model, + stress_weight=stress_weight, + use_device=use_device, + on_isolated_atoms=on_isolated_atoms, + ) + + @property + def version(self) -> str: + """The version of CHGNet.""" + return self.calculator.model.version + + @property + def n_params(self) -> int: + """The number of parameters in CHGNet.""" + return self.calculator.model.n_params + + def relax( + self, + atoms: (Structure | Atoms), + *, + fmax: (float | None) = 0.1, + steps: (int | None) = 500, + relax_cell: (bool | None) = True, + ase_filter: (str | None) = "FrechetCellFilter", + save_path: (str | None) = None, + loginterval: (int | None) = 1, + crystal_feas_save_path: (str | None) = None, + verbose: bool = True, + assign_magmoms: bool = True, + **kwargs, + ) -> dict[str, Structure | TrajectoryObserver]: + """Relax the Structure/Atoms until maximum force is smaller than fmax. + + Args: + atoms (Structure | Atoms): A Structure or Atoms object to relax. + fmax (float | None): The maximum force tolerance for relaxation. + Default = 0.1 + steps (int | None): The maximum number of steps for relaxation. + Default = 500 + relax_cell (bool | None): Whether to relax the cell as well. + Default = True + ase_filter (str | ase.filters.Filter): The filter to apply to the atoms + object for relaxation. Default = FrechetCellFilter + Default used to be ExpCellFilter which was removed due to bug reported + in https://gitlab.com/ase/ase/-/issues/1321 and fixed in + https://gitlab.com/ase/ase/-/merge_requests/3024. + save_path (str | None): The path to save the trajectory. + Default = None + loginterval (int | None): Interval for logging trajectory and crystal + features. Default = 1 + crystal_feas_save_path (str | None): Path to save crystal feature vectors + which are logged at a loginterval rage + Default = None + verbose (bool): Whether to print the output of the ASE optimizer. + Default = True + assign_magmoms (bool): Whether to assign magnetic moments to the final + structure. Default = True + **kwargs: Additional parameters for the optimizer. + + Returns: + dict[str, Structure | TrajectoryObserver]: + A dictionary with 'final_structure' and 'trajectory'. + """ + from ase import filters + from ase.filters import Filter + + valid_filter_names = [ + name + for name, cls in inspect.getmembers(filters, inspect.isclass) + if issubclass(cls, Filter) + ] + if isinstance(ase_filter, str): + if ase_filter in valid_filter_names: + ase_filter = getattr(filters, ase_filter) + else: + raise ValueError( + f"Invalid ase_filter={ase_filter!r}, must be one of {valid_filter_names}. " + ) + if isinstance(atoms, Structure): + atoms = AseAtomsAdaptor().get_atoms(atoms) + atoms.calc = self.calculator + stream = sys.stdout if verbose else io.StringIO() + with contextlib.redirect_stdout(stream): + obs = TrajectoryObserver(atoms) + if crystal_feas_save_path: + cry_obs = CrystalFeasObserver(atoms) + if relax_cell: + atoms = ase_filter(atoms) + optimizer: Optimizer = self.optimizer_class(atoms, **kwargs) + optimizer.attach(obs, interval=loginterval) + if crystal_feas_save_path: + optimizer.attach(cry_obs, interval=loginterval) + optimizer.run(fmax=fmax, steps=steps) + obs() + if save_path is not None: + obs.save(save_path) + if crystal_feas_save_path: + cry_obs.save(crystal_feas_save_path) + if isinstance(atoms, Filter): + atoms = atoms.atoms + struct = AseAtomsAdaptor.get_structure(atoms) + if assign_magmoms: + for key in struct.site_properties: + struct.remove_site_property(property_name=key) + struct.add_site_property( + "magmom", [float(magmom) for magmom in atoms.get_magnetic_moments()] + ) + return {"final_structure": struct, "trajectory": obs} + + +class TrajectoryObserver: + """Trajectory observer is a hook in the relaxation process that saves the + intermediate structures. + """ + + def __init__(self, atoms: Atoms) -> None: + """Create a TrajectoryObserver from an Atoms object. + + Args: + atoms (Atoms): the structure to observe. + """ + self.atoms = atoms + self.energies: list[float] = [] + self.forces: list[np.ndarray] = [] + self.stresses: list[np.ndarray] = [] + self.magmoms: list[np.ndarray] = [] + self.atom_positions: list[np.ndarray] = [] + self.cells: list[np.ndarray] = [] + + def __call__(self) -> None: + """The logic for saving the properties of an Atoms during the relaxation.""" + self.energies.append(self.compute_energy()) + self.forces.append(self.atoms.get_forces()) + self.stresses.append(self.atoms.get_stress()) + self.magmoms.append(self.atoms.get_magnetic_moments()) + self.atom_positions.append(self.atoms.get_positions()) + self.cells.append(self.atoms.get_cell()[:]) + + def __len__(self) -> int: + """The number of steps in the trajectory.""" + return len(self.energies) + + def compute_energy(self) -> float: + """Calculate the potential energy. + + Returns: + energy (float): the potential energy. + """ + return self.atoms.get_potential_energy() + + def save(self, filename: str) -> None: + """Save the trajectory to file. + + Args: + filename (str): filename to save the trajectory + """ + out_pkl = { + "energy": self.energies, + "forces": self.forces, + "stresses": self.stresses, + "magmoms": self.magmoms, + "atom_positions": self.atom_positions, + "cell": self.cells, + "atomic_number": self.atoms.get_atomic_numbers(), + } + with open(filename, "wb") as file: + pickle.dump(out_pkl, file) + + +class CrystalFeasObserver: + """CrystalFeasObserver is a hook in the relaxation and MD process that saves the + intermediate crystal feature structures. + """ + + def __init__(self, atoms: Atoms) -> None: + """Create a CrystalFeasObserver from an Atoms object.""" + self.atoms = atoms + self.crystal_feature_vectors: list[np.ndarray] = [] + + def __call__(self) -> None: + """Record Atoms crystal feature vectors after an MD/relaxation step.""" + self.crystal_feature_vectors.append(self.atoms._calc.results["crystal_fea"]) + + def __len__(self) -> int: + """Number of recorded steps.""" + return len(self.crystal_feature_vectors) + + def save(self, filename: str) -> None: + """Save the crystal feature vectors to filename in pickle format.""" + out_pkl = {"crystal_feas": self.crystal_feature_vectors} + with open(filename, "wb") as file: + pickle.dump(out_pkl, file) + + +class MolecularDynamics: + """Molecular dynamics class.""" + + def __init__( + self, + atoms: (Atoms | Structure), + *, + model: (CHGNet | CHGNetCalculator | None) = None, + ensemble: str = "nvt", + thermostat: str = "Berendsen_inhomogeneous", + temperature: int = 300, + starting_temperature: (int | None) = None, + timestep: float = 2.0, + pressure: float = 0.000101325, + taut: (float | None) = None, + taup: (float | None) = None, + bulk_modulus: (float | None) = None, + trajectory: (str | Trajectory | None) = None, + logfile: (str | None) = None, + loginterval: int = 1, + crystal_feas_logfile: (str | None) = None, + append_trajectory: bool = False, + on_isolated_atoms: Literal["ignore", "warn", "error"] = "warn", + use_device: (str | None) = None, + ) -> None: + """Initialize the MD class. + + Args: + atoms (Atoms): atoms to run the MD + model (CHGNet): instance of a CHGNet model or CHGNetCalculator. + If set to None, the pretrained CHGNet is loaded. + Default = None + ensemble (str): choose from 'nve', 'nvt', 'npt' + Default = "nvt" + thermostat (str): Thermostat to use + choose from "Nose-Hoover", "Berendsen", "Berendsen_inhomogeneous" + Default = "Berendsen_inhomogeneous" + temperature (float): temperature for MD simulation, in K + Default = 300 + starting_temperature (float): starting temperature of MD simulation, in K + if set as None, the MD starts with the momentum carried by ase.Atoms + if input is a pymatgen.core.Structure, the MD starts at 0K + Default = None + timestep (float): time step in fs + Default = 2 + pressure (float): pressure in GPa + Can be 3x3 or 6 np.array if thermostat is "Nose-Hoover" + Default = 1.01325e-4 GPa = 1 atm + taut (float): time constant for temperature coupling in fs. + The temperature will be raised to target temperature in approximate + 10 * taut time. + Default = 100 * timestep + taup (float): time constant for pressure coupling in fs + Default = 1000 * timestep + bulk_modulus (float): bulk modulus of the material in GPa. + Used in NPT ensemble for the barostat pressure coupling. + The DFT bulk modulus can be found for most materials at + https://next-gen.materialsproject.org/ + + In NPT ensemble, the effective damping time for pressure is multiplied + by compressibility. In LAMMPS, Bulk modulus is defaulted to 10 + see: https://docs.lammps.org/fix_press_berendsen.html + and: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py + + If bulk modulus is not provided here, it will be calculated by CHGNet + through Birch Murnaghan equation of state (EOS). + Note the EOS fitting can fail because of non-parabolic potential + energy surface, which is common with soft system like liquid and gas. + In such case, user should provide an input bulk modulus for better + barostat coupling, otherwise a guessed bulk modulus = 2 GPa will be used + (water's bulk modulus) + + Default = None + trajectory (str or Trajectory): Attach trajectory object + Default = None + logfile (str): open this file for recording MD outputs + Default = None + loginterval (int): write to log file every interval steps + Default = 1 + crystal_feas_logfile (str): open this file for recording crystal features + during MD. Default = None + append_trajectory (bool): Whether to append to prev trajectory. + If false, previous trajectory gets overwritten + Default = False + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'warn' + use_device (str): the device for the MD run + Default = None + """ + self.ensemble = ensemble + self.thermostat = thermostat + if isinstance(atoms, Structure | Molecule): + atoms = AseAtomsAdaptor().get_atoms(atoms) + if starting_temperature is not None: + MaxwellBoltzmannDistribution( + atoms, temperature_K=starting_temperature, force_temp=True + ) + Stationary(atoms) + self.atoms = atoms + if isinstance(model, CHGNetCalculator): + self.atoms.calc = model + else: + self.atoms.calc = CHGNetCalculator( + model=model, use_device=use_device, on_isolated_atoms=on_isolated_atoms + ) + if taut is None: + taut = 100 * timestep + if taup is None: + taup = 1000 * timestep + if ensemble.lower() == "nve": + """ + VelocityVerlet (constant N, V, E) molecular dynamics. + + Note: it's recommended to use smaller timestep for NVE compared to other + ensembles, since the VelocityVerlet algorithm assumes a strict conservative + force field. + """ + self.dyn = VelocityVerlet( + atoms=self.atoms, + timestep=timestep * units.fs, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + append_trajectory=append_trajectory, + ) + print("NVE-MD created") + elif ensemble.lower() == "nvt": + """ + Constant volume/temperature molecular dynamics. + """ + if thermostat.lower() == "nose-hoover": + """ + Nose-hoover (constant N, V, T) molecular dynamics. + ASE implementation currently only supports upper triangular lattice + """ + self.upper_triangular_cell() + self.dyn = NPT( + atoms=self.atoms, + timestep=timestep * units.fs, + temperature_K=temperature, + externalstress=pressure * units.GPa, + ttime=taut * units.fs, + pfactor=None, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + append_trajectory=append_trajectory, + ) + print("NVT-Nose-Hoover MD created") + elif thermostat.lower().startswith("berendsen"): + """ + Berendsen (constant N, V, T) molecular dynamics. + """ + self.dyn = NVTBerendsen( + atoms=self.atoms, + timestep=timestep * units.fs, + temperature_K=temperature, + taut=taut * units.fs, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + append_trajectory=append_trajectory, + ) + print("NVT-Berendsen-MD created") + else: + raise ValueError( + "Thermostat not supported, choose in 'Nose-Hoover', 'Berendsen', 'Berendsen_inhomogeneous'" + ) + elif ensemble.lower() == "npt": + """ + Constant pressure/temperature molecular dynamics. + """ + if bulk_modulus is not None: + bulk_modulus_au = bulk_modulus / 160.2176 + compressibility_au = 1 / bulk_modulus_au + else: + try: + eos = EquationOfState(model=self.atoms.calc) + eos.fit(atoms=atoms, steps=500, fmax=0.1, verbose=False) + bulk_modulus = eos.get_bulk_modulus(unit="GPa") + bulk_modulus_au = eos.get_bulk_modulus(unit="eV/A^3") + compressibility_au = eos.get_compressibility(unit="A^3/eV") + print( + f"Completed bulk modulus calculation: k = {bulk_modulus:.3}GPa, {bulk_modulus_au:.3}eV/A^3" + ) + except Exception: + bulk_modulus_au = 2 / 160.2176 + compressibility_au = 1 / bulk_modulus_au + warnings.warn( + "Warning!!! Equation of State fitting failed, setting bulk modulus to 2 GPa. NPT simulation can proceed with incorrect pressure relaxation time.User input for bulk modulus is recommended.", + stacklevel=2, + ) + self.bulk_modulus = bulk_modulus + if thermostat.lower() == "nose-hoover": + """ + Combined Nose-Hoover and Parrinello-Rahman dynamics, creating an + NPT (or N,stress,T) ensemble. + see: https://gitlab.com/ase/ase/-/blob/master/ase/md/npt.py + ASE implementation currently only supports upper triangular lattice + """ + self.upper_triangular_cell() + ptime = taup * units.fs + self.dyn = NPT( + atoms=self.atoms, + timestep=timestep * units.fs, + temperature_K=temperature, + externalstress=pressure * units.GPa, + ttime=taut * units.fs, + pfactor=bulk_modulus * units.GPa * ptime * ptime, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + append_trajectory=append_trajectory, + ) + print("NPT-Nose-Hoover MD created") + elif thermostat.lower() == "berendsen_inhomogeneous": + """ + Inhomogeneous_NPTBerendsen thermo/barostat + This is a more flexible scheme that fixes three angles of the unit + cell but allows three lattice parameter to change independently. + see: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py + """ + self.dyn = Inhomogeneous_NPTBerendsen( + atoms=self.atoms, + timestep=timestep * units.fs, + temperature_K=temperature, + pressure_au=pressure * units.GPa, + taut=taut * units.fs, + taup=taup * units.fs, + compressibility_au=compressibility_au, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + ) + print("NPT-Berendsen-inhomogeneous-MD created") + elif thermostat.lower() == "npt_berendsen": + """ + This is a similar scheme to the Inhomogeneous_NPTBerendsen. + This is a less flexible scheme that fixes the shape of the + cell - three angles are fixed and the ratios between the three + lattice constants. + see: https://gitlab.com/ase/ase/-/blob/master/ase/md/nptberendsen.py + """ + self.dyn = NPTBerendsen( + atoms=self.atoms, + timestep=timestep * units.fs, + temperature_K=temperature, + pressure_au=pressure * units.GPa, + taut=taut * units.fs, + taup=taup * units.fs, + compressibility_au=compressibility_au, + trajectory=trajectory, + logfile=logfile, + loginterval=loginterval, + append_trajectory=append_trajectory, + ) + print("NPT-Berendsen-MD created") + else: + raise ValueError( + "Thermostat not supported, choose in 'Nose-Hoover', 'Berendsen', 'Berendsen_inhomogeneous'" + ) + self.trajectory = trajectory + self.logfile = logfile + self.loginterval = loginterval + self.timestep = timestep + self.crystal_feas_logfile = crystal_feas_logfile + + def run(self, steps: int) -> None: + """Thin wrapper of ase MD run. + + Args: + steps (int): number of MD steps + """ + if self.crystal_feas_logfile: + obs = CrystalFeasObserver(self.atoms) + self.dyn.attach(obs, interval=self.loginterval) + self.dyn.run(steps) + if self.crystal_feas_logfile: + obs.save(self.crystal_feas_logfile) + + def set_atoms(self, atoms: Atoms) -> None: + """Set new atoms to run MD. + + Args: + atoms (Atoms): new atoms for running MD + """ + calculator = self.atoms.calc + self.atoms = atoms + self.dyn.atoms = atoms + self.dyn.atoms.calc = calculator + + def upper_triangular_cell(self, *, verbose: (bool | None) = False) -> None: + """Transform to upper-triangular cell. + ASE Nose-Hoover implementation only supports upper-triangular cell + while ASE's canonical description is lower-triangular cell. + + Args: + verbose (bool): Whether to notify user about upper-triangular cell + transformation. Default = False + """ + if not NPT._isuppertriangular(self.atoms.get_cell()): + a, b, c, alpha, beta, gamma = self.atoms.cell.cellpar() + angles = np.radians((alpha, beta, gamma)) + sin_a, sin_b, _sin_g = np.sin(angles) + cos_a, cos_b, cos_g = np.cos(angles) + cos_p = (cos_g - cos_a * cos_b) / (sin_a * sin_b) + cos_p = np.clip(cos_p, -1, 1) + sin_p = (1 - cos_p**2) ** 0.5 + new_basis = [ + (a * sin_b * sin_p, a * sin_b * cos_p, a * cos_b), + (0, b * sin_a, b * cos_a), + (0, 0, c), + ] + self.atoms.set_cell(new_basis, scale_atoms=True) + if verbose: + print("Transformed to upper triangular unit cell.", flush=True) + + +class EquationOfState: + """Class to calculate equation of state.""" + + def __init__( + self, + model: (CHGNet | CHGNetCalculator | None) = None, + optimizer_class: (Optimizer | str | None) = "FIRE", + use_device: (str | None) = None, + stress_weight: float = 1 / 160.21766208, + on_isolated_atoms: Literal["ignore", "warn", "error"] = "error", + ) -> None: + """Initialize a structure optimizer object for calculation of bulk modulus. + + Args: + model (CHGNet): instance of a CHGNet model or CHGNetCalculator. + If set to None, the pretrained CHGNet is loaded. + Default = None + optimizer_class (Optimizer,str): choose optimizer from ASE. + Default = "FIRE" + use_device (str, optional): The device to be used for predictions, + either "cpu", "cuda", or "mps". If not specified, the default device is + automatically selected based on the available options. + Default = None + stress_weight (float): the conversion factor to convert GPa to eV/A^3. + Default = 1/160.21 + on_isolated_atoms ('ignore' | 'warn' | 'error'): how to handle Structures + with isolated atoms. + Default = 'error' + """ + self.relaxer = StructOptimizer( + model=model, + optimizer_class=optimizer_class, + use_device=use_device, + stress_weight=stress_weight, + on_isolated_atoms=on_isolated_atoms, + ) + self.fitted = False + + def fit( + self, + atoms: (Structure | Atoms), + *, + n_points: int = 11, + fmax: (float | None) = 0.1, + steps: (int | None) = 500, + verbose: (bool | None) = False, + **kwargs, + ) -> None: + """Relax the Structure/Atoms and fit the Birch-Murnaghan equation of state. + + Args: + atoms (Structure | Atoms): A Structure or Atoms object to relax. + n_points (int): Number of structures used in fitting the equation of states + fmax (float | None): The maximum force tolerance for relaxation. + Default = 0.1 + steps (int | None): The maximum number of steps for relaxation. + Default = 500 + verbose (bool): Whether to print the output of the ASE optimizer. + Default = False + **kwargs: Additional parameters for the optimizer. + + Returns: + Bulk Modulus (float) + """ + if isinstance(atoms, Atoms): + atoms = AseAtomsAdaptor.get_structure(atoms) + primitive_cell = atoms.get_primitive_structure() + local_minima = self.relaxer.relax( + primitive_cell, + relax_cell=True, + fmax=fmax, + steps=steps, + verbose=verbose, + **kwargs, + ) + volumes, energies = [], [] + for idx in np.linspace(-0.1, 0.1, n_points): + structure_strained = local_minima["final_structure"].copy() + structure_strained.apply_strain([idx, idx, idx]) + result = self.relaxer.relax( + structure_strained, + relax_cell=False, + fmax=fmax, + steps=steps, + verbose=verbose, + **kwargs, + ) + volumes.append(result["final_structure"].volume) + energies.append(result["trajectory"].energies[-1]) + self.bm = BirchMurnaghan(volumes=volumes, energies=energies) + self.bm.fit() + self.fitted = True + + def get_bulk_modulus(self, unit: str = "eV/A^3") -> float: + """Get the bulk modulus of from the fitted Birch-Murnaghan equation of state. + + Args: + unit (str): The unit of bulk modulus. Can be "eV/A^3" or "GPa" + Default = "eV/A^3" + + Returns: + Bulk Modulus (float) + """ + if self.fitted is False: + raise ValueError( + "Equation of state needs to be fitted first through self.fit()" + ) + if unit == "eV/A^3": + return self.bm.b0 + if unit == "GPa": + return self.bm.b0_GPa + raise NotImplementedError("unit has to be eV/A^3 or GPa") + + def get_compressibility(self, unit: str = "A^3/eV") -> float: + """Get the bulk modulus of from the fitted Birch-Murnaghan equation of state. + + Args: + unit (str): The unit of bulk modulus. Can be "A^3/eV", + "GPa^-1" "Pa^-1" or "m^2/N" + Default = "A^3/eV" + + Returns: + Bulk Modulus (float) + """ + if self.fitted is False: + raise ValueError( + "Equation of state needs to be fitted first through self.fit()" + ) + if unit == "A^3/eV": + return 1 / self.bm.b0 + if unit == "GPa^-1": + return 1 / self.bm.b0_GPa + if unit in {"Pa^-1", "m^2/N"}: + return 1 / (self.bm.b0_GPa * 1000000000.0) + raise NotImplementedError("unit has to be one of A^3/eV, GPa^-1 Pa^-1 or m^2/N") diff --git a/jointContribution/CHGNet/chgnet/model/encoders.py b/jointContribution/CHGNet/chgnet/model/encoders.py index 23222b0134..bb78218961 100644 --- a/jointContribution/CHGNet/chgnet/model/encoders.py +++ b/jointContribution/CHGNet/chgnet/model/encoders.py @@ -1,142 +1,142 @@ -from __future__ import annotations - -import paddle -from chgnet.model.basis import Fourier -from chgnet.model.basis import RadialBessel - - -class AtomEmbedding(paddle.nn.Layer): - """Encode an atom by its atomic number using a learnable embedding layer.""" - - def __init__(self, atom_feature_dim: int, max_num_elements: int = 94) -> None: - """Initialize the Atom featurizer. - - Args: - atom_feature_dim (int): dimension of atomic embedding. - max_num_elements (int): maximum number of elements in the dataset. - Default = 94 - """ - super().__init__() - self.embedding = paddle.nn.Embedding( - num_embeddings=max_num_elements, embedding_dim=atom_feature_dim - ) - - def forward(self, atomic_numbers: paddle.Tensor) -> paddle.Tensor: - """Convert the structure to a atom embedding tensor. - - Args: - atomic_numbers (Tensor): [n_atom, 1]. - - Returns: - atom_fea (Tensor): atom embeddings [n_atom, atom_feature_dim]. - """ - return self.embedding(atomic_numbers) - - -class BondEncoder(paddle.nn.Layer): - """Encode a chemical bond given the positions of two atoms using Gaussian - distance. - """ - - def __init__( - self, - *, - atom_graph_cutoff: float = 5, - bond_graph_cutoff: float = 3, - num_radial: int = 9, - cutoff_coeff: int = 5, - learnable: bool = False, - ) -> None: - """Initialize the bond encoder. - - Args: - atom_graph_cutoff (float): The cutoff for constructing AtomGraph default = 5 - bond_graph_cutoff (float): The cutoff for constructing BondGraph default = 3 - num_radial (int): The number of radial component. Default = 9 - cutoff_coeff (int): Strength for graph cutoff smoothness. Default = 5 - learnable(bool): Whether the frequency in rbf expansion is learnable. - Default = False - """ - super().__init__() - self.rbf_expansion_ag = RadialBessel( - num_radial=num_radial, - cutoff=atom_graph_cutoff, - smooth_cutoff=cutoff_coeff, - learnable=learnable, - ) - self.rbf_expansion_bg = RadialBessel( - num_radial=num_radial, - cutoff=bond_graph_cutoff, - smooth_cutoff=cutoff_coeff, - learnable=learnable, - ) - - def forward( - self, - center: paddle.Tensor, - neighbor: paddle.Tensor, - undirected2directed: paddle.Tensor, - image: paddle.Tensor, - lattice: paddle.Tensor, - ) -> tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: - """Compute the pairwise distance between 2 3d coordinates. - - Args: - center (Tensor): 3d cartesian coordinates of center atoms [n_bond, 3] - neighbor (Tensor): 3d cartesian coordinates of neighbor atoms [n_bond, 3] - undirected2directed (Tensor): mapping from undirected bond to one of its - directed bond [n_bond] - image (Tensor): the periodic image specifying the location of neighboring - atom [n_bond, 3] - lattice (Tensor): the lattice of this structure [3, 3] - - Returns: - bond_basis_ag (Tensor): the bond basis in AtomGraph [n_bond, num_radial] - bond_basis_ag (Tensor): the bond basis in BondGraph [n_bond, num_radial] - bond_vectors (Tensor): normalized bond vectors, for tracking the bond - directions [n_bond, 3] - """ - neighbor = neighbor + image @ lattice - bond_vectors = center - neighbor - bond_lengths = paddle.linalg.norm(x=bond_vectors, axis=1) - bond_vectors = bond_vectors / bond_lengths[:, None] - undirected_bond_lengths = paddle.index_select( - x=bond_lengths, axis=0, index=undirected2directed - ) - bond_basis_ag = self.rbf_expansion_ag(undirected_bond_lengths) - bond_basis_bg = self.rbf_expansion_bg(undirected_bond_lengths) - return bond_basis_ag, bond_basis_bg, bond_vectors - - -class AngleEncoder(paddle.nn.Layer): - """Encode an angle given the two bond vectors using Fourier Expansion.""" - - def __init__(self, *, num_angular: int = 9, learnable: bool = True) -> None: - """Initialize the angle encoder. - - Args: - num_angular (int): number of angular basis to use. Must be an odd integer. - learnable (bool): whether to set the frequencies of the Fourier expansion - as learnable parameters. Default = False - """ - super().__init__() - if num_angular % 2 != 1: - raise ValueError(f"num_angular={num_angular!r} must be an odd integer") - circular_harmonics_order = (num_angular - 1) // 2 - self.fourier_expansion = Fourier( - order=circular_harmonics_order, learnable=learnable - ) - - def forward(self, bond_i: paddle.Tensor, bond_j: paddle.Tensor) -> paddle.Tensor: - """Compute the angles between normalized vectors. - - Args: - bond_i (Tensor): normalized left bond vector [n_angle, 3] - bond_j (Tensor): normalized right bond vector [n_angle, 3] - - Returns: - angle_fea (Tensor): expanded cos_ij [n_angle, angle_feature_dim] - """ - cosine_ij = paddle.sum(x=bond_i * bond_j, axis=1) * (1 - 1e-06) - angle = paddle.acos(x=cosine_ij) - return self.fourier_expansion(angle) +from __future__ import annotations + +import paddle +from chgnet.model.basis import Fourier +from chgnet.model.basis import RadialBessel + + +class AtomEmbedding(paddle.nn.Layer): + """Encode an atom by its atomic number using a learnable embedding layer.""" + + def __init__(self, atom_feature_dim: int, max_num_elements: int = 94) -> None: + """Initialize the Atom featurizer. + + Args: + atom_feature_dim (int): dimension of atomic embedding. + max_num_elements (int): maximum number of elements in the dataset. + Default = 94 + """ + super().__init__() + self.embedding = paddle.nn.Embedding( + num_embeddings=max_num_elements, embedding_dim=atom_feature_dim + ) + + def forward(self, atomic_numbers: paddle.Tensor) -> paddle.Tensor: + """Convert the structure to a atom embedding tensor. + + Args: + atomic_numbers (Tensor): [n_atom, 1]. + + Returns: + atom_fea (Tensor): atom embeddings [n_atom, atom_feature_dim]. + """ + return self.embedding(atomic_numbers) + + +class BondEncoder(paddle.nn.Layer): + """Encode a chemical bond given the positions of two atoms using Gaussian + distance. + """ + + def __init__( + self, + *, + atom_graph_cutoff: float = 5, + bond_graph_cutoff: float = 3, + num_radial: int = 9, + cutoff_coeff: int = 5, + learnable: bool = False, + ) -> None: + """Initialize the bond encoder. + + Args: + atom_graph_cutoff (float): The cutoff for constructing AtomGraph default = 5 + bond_graph_cutoff (float): The cutoff for constructing BondGraph default = 3 + num_radial (int): The number of radial component. Default = 9 + cutoff_coeff (int): Strength for graph cutoff smoothness. Default = 5 + learnable(bool): Whether the frequency in rbf expansion is learnable. + Default = False + """ + super().__init__() + self.rbf_expansion_ag = RadialBessel( + num_radial=num_radial, + cutoff=atom_graph_cutoff, + smooth_cutoff=cutoff_coeff, + learnable=learnable, + ) + self.rbf_expansion_bg = RadialBessel( + num_radial=num_radial, + cutoff=bond_graph_cutoff, + smooth_cutoff=cutoff_coeff, + learnable=learnable, + ) + + def forward( + self, + center: paddle.Tensor, + neighbor: paddle.Tensor, + undirected2directed: paddle.Tensor, + image: paddle.Tensor, + lattice: paddle.Tensor, + ) -> tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor]: + """Compute the pairwise distance between 2 3d coordinates. + + Args: + center (Tensor): 3d cartesian coordinates of center atoms [n_bond, 3] + neighbor (Tensor): 3d cartesian coordinates of neighbor atoms [n_bond, 3] + undirected2directed (Tensor): mapping from undirected bond to one of its + directed bond [n_bond] + image (Tensor): the periodic image specifying the location of neighboring + atom [n_bond, 3] + lattice (Tensor): the lattice of this structure [3, 3] + + Returns: + bond_basis_ag (Tensor): the bond basis in AtomGraph [n_bond, num_radial] + bond_basis_ag (Tensor): the bond basis in BondGraph [n_bond, num_radial] + bond_vectors (Tensor): normalized bond vectors, for tracking the bond + directions [n_bond, 3] + """ + neighbor = neighbor + image @ lattice + bond_vectors = center - neighbor + bond_lengths = paddle.linalg.norm(x=bond_vectors, axis=1) + bond_vectors = bond_vectors / bond_lengths[:, None] + undirected_bond_lengths = paddle.index_select( + x=bond_lengths, axis=0, index=undirected2directed + ) + bond_basis_ag = self.rbf_expansion_ag(undirected_bond_lengths) + bond_basis_bg = self.rbf_expansion_bg(undirected_bond_lengths) + return bond_basis_ag, bond_basis_bg, bond_vectors + + +class AngleEncoder(paddle.nn.Layer): + """Encode an angle given the two bond vectors using Fourier Expansion.""" + + def __init__(self, *, num_angular: int = 9, learnable: bool = True) -> None: + """Initialize the angle encoder. + + Args: + num_angular (int): number of angular basis to use. Must be an odd integer. + learnable (bool): whether to set the frequencies of the Fourier expansion + as learnable parameters. Default = False + """ + super().__init__() + if num_angular % 2 != 1: + raise ValueError(f"num_angular={num_angular!r} must be an odd integer") + circular_harmonics_order = (num_angular - 1) // 2 + self.fourier_expansion = Fourier( + order=circular_harmonics_order, learnable=learnable + ) + + def forward(self, bond_i: paddle.Tensor, bond_j: paddle.Tensor) -> paddle.Tensor: + """Compute the angles between normalized vectors. + + Args: + bond_i (Tensor): normalized left bond vector [n_angle, 3] + bond_j (Tensor): normalized right bond vector [n_angle, 3] + + Returns: + angle_fea (Tensor): expanded cos_ij [n_angle, angle_feature_dim] + """ + cosine_ij = paddle.sum(x=bond_i * bond_j, axis=1) * (1 - 1e-06) + angle = paddle.acos(x=cosine_ij) + return self.fourier_expansion(angle) diff --git a/jointContribution/CHGNet/chgnet/model/functions.py b/jointContribution/CHGNet/chgnet/model/functions.py index 2dc5657c04..07eeddb1bb 100644 --- a/jointContribution/CHGNet/chgnet/model/functions.py +++ b/jointContribution/CHGNet/chgnet/model/functions.py @@ -1,251 +1,251 @@ -from __future__ import annotations - -import itertools -from collections.abc import Sequence - -import paddle -from paddle import nn - - -def aggregate( - data: paddle.Tensor, owners: paddle.Tensor, *, average=True, num_owner=None -) -> paddle.Tensor: - """Aggregate rows in data by specifying the owners. - - Args: - data (Tensor): data tensor to aggregate [n_row, feature_dim] - owners (Tensor): specify the owner of each row [n_row, 1] - average (bool): if True, average the rows, if False, sum the rows. - Default = True - num_owner (int, optional): the number of owners, this is needed if the - max idx of owner is not presented in owners tensor - Default = None - - Returns: - output (Tensor): [num_owner, feature_dim] - """ - - bin_count = paddle.bincount(x=owners.cast("int32")) - # bin_count = (bin_count!=0).cast(dtype=bin_count.dtype) - bin_count = paddle.where( - bin_count != 0, bin_count, paddle.ones([1], dtype=bin_count.dtype) - ) - # .where(bin_count != 0, y=paddle.ones( - # shape=[1], dtype=bin_count.dtype)) - # bin_count = bin_count.where(bin_count != 0, bin_count.new_ones(1)) - if num_owner is not None and tuple(bin_count.shape)[0] != num_owner: - difference = num_owner - tuple(bin_count.shape)[0] - bin_count = paddle.concat( - x=[bin_count, paddle.ones(shape=difference, dtype=bin_count.dtype)] - ) - - output0 = paddle.zeros( - shape=[tuple(bin_count.shape)[0], tuple(data.shape)[1]], dtype=data.dtype - ) - output0.stop_gradient = False - output = output0.index_add(axis=0, index=owners.cast("int32"), value=data) - - if average: - output = (output.T / bin_count).T - return output - - -class MLP(paddle.nn.Layer): - """Multi-Layer Perceptron used for non-linear regression.""" - - def __init__( - self, - input_dim: int, - *, - output_dim: int = 1, - hidden_dim: (int | Sequence[int] | None) = (64, 64), - dropout: float = 0, - activation: str = "silu", - bias: bool = True, - ) -> None: - """Initialize the MLP. - - Args: - input_dim (int): the input dimension - output_dim (int): the output dimension - hidden_dim (list[int] | int]): a list of integers or a single integer - representing the number of hidden units in each layer of the MLP. - Default = [64, 64] - dropout (float): the dropout rate before each linear layer. Default: 0 - activation (str, optional): The name of the activation function to use - in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". - Default = "silu" - bias (bool): whether to use bias in each Linear layers. - Default = True - """ - super().__init__() - if hidden_dim is None or hidden_dim == 0: - layers = [ - paddle.nn.Dropout(p=dropout), - paddle.nn.Linear( - in_features=input_dim, out_features=output_dim, bias_attr=bias - ), - ] - elif isinstance(hidden_dim, int): - layers = [ - paddle.nn.Linear( - in_features=input_dim, out_features=hidden_dim, bias_attr=bias - ), - find_activation(activation), - paddle.nn.Dropout(p=dropout), - paddle.nn.Linear( - in_features=hidden_dim, out_features=output_dim, bias_attr=bias - ), - ] - elif isinstance(hidden_dim, Sequence): - layers = [ - paddle.nn.Linear( - in_features=input_dim, out_features=hidden_dim[0], bias_attr=bias - ), - find_activation(activation), - ] - if len(hidden_dim) != 1: - for h_in, h_out in itertools.pairwise(hidden_dim): - layers.append( - paddle.nn.Linear( - in_features=h_in, out_features=h_out, bias_attr=bias - ) - ) - layers.append(find_activation(activation)) - layers.append(paddle.nn.Dropout(p=dropout)) - layers.append( - paddle.nn.Linear( - in_features=hidden_dim[-1], out_features=output_dim, bias_attr=bias - ) - ) - else: - raise TypeError( - f"hidden_dim={hidden_dim!r} must be an integer, a list of integers, or None." - ) - self.layers = paddle.nn.Sequential(*layers) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - """Performs a forward pass through the MLP. - - Args: - x (Tensor): a tensor of shape (batch_size, input_dim) - - Returns: - Tensor: a tensor of shape (batch_size, output_dim) - """ - return self.layers(x) - - -class GatedMLP(paddle.nn.Layer): - """Gated MLP - similar model structure is used in CGCNN and M3GNet. - """ - - def __init__( - self, - input_dim: int, - output_dim: int, - *, - hidden_dim: (int | list[int] | None) = None, - dropout: float = 0, - activation: str = "silu", - norm: str = "batch", - bias: bool = True, - ) -> None: - """Initialize a gated MLP. - - Args: - input_dim (int): the input dimension - output_dim (int): the output dimension - hidden_dim (list[int] | int]): a list of integers or a single integer - representing the number of hidden units in each layer of the MLP. - Default = None - dropout (float): the dropout rate before each linear layer. - Default: 0 - activation (str, optional): The name of the activation function to use in - the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". - Default = "silu" - norm (str, optional): The name of the normalization layer to use on the - updated atom features. Must be one of "batch", "layer", or None. - Default = "batch" - bias (bool): whether to use bias in each Linear layers. - Default = True - """ - super().__init__() - self.mlp_core = MLP( - input_dim=input_dim, - output_dim=output_dim, - hidden_dim=hidden_dim, - dropout=dropout, - activation=activation, - bias=bias, - ) - self.mlp_gate = MLP( - input_dim=input_dim, - output_dim=output_dim, - hidden_dim=hidden_dim, - dropout=dropout, - activation=activation, - bias=bias, - ) - self.activation = find_activation(activation) - self.sigmoid = paddle.nn.Sigmoid() - self.norm = norm - self.bn1 = find_normalization(name=norm, dim=output_dim) - self.bn2 = find_normalization(name=norm, dim=output_dim) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - """Performs a forward pass through the MLP. - - Args: - x (Tensor): a tensor of shape (batch_size, input_dim) - - Returns: - Tensor: a tensor of shape (batch_size, output_dim) - """ - if self.norm is None: - core = self.activation(self.mlp_core(x)) - gate = self.sigmoid(self.mlp_gate(x)) - else: - core = self.activation(self.bn1(self.mlp_core(x))) - gate = self.sigmoid(self.bn2(self.mlp_gate(x))) - return core * gate - - -class ScaledSiLU(paddle.nn.Layer): - """Scaled Sigmoid Linear Unit.""" - - def __init__(self) -> None: - """Initialize a scaled SiLU.""" - super().__init__() - self.scale_factor = 1 / 0.6 - self._activation = paddle.nn.Silu() - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - """Forward pass.""" - return self._activation(x) * self.scale_factor - - -def find_activation(name: str) -> paddle.nn.Layer: - """Return an activation function using name.""" - try: - return { - "relu": paddle.nn.ReLU, - "silu": paddle.nn.Silu, - "scaledsilu": ScaledSiLU, - "gelu": paddle.nn.GELU, - "softplus": paddle.nn.Softplus, - "sigmoid": paddle.nn.Sigmoid, - "tanh": paddle.nn.Tanh, - }[name.lower()]() - except KeyError as exc: - raise NotImplementedError from exc - - -def find_normalization(name: str, dim: (int | None) = None) -> (paddle.nn.Layer | None): - """Return an normalization function using name.""" - if name is None: - return None - return {"batch": nn.BatchNorm1D(dim), "layer": nn.LayerNorm(dim)}.get( - name.lower(), None - ) +from __future__ import annotations + +import itertools +from collections.abc import Sequence + +import paddle +from paddle import nn + + +def aggregate( + data: paddle.Tensor, owners: paddle.Tensor, *, average=True, num_owner=None +) -> paddle.Tensor: + """Aggregate rows in data by specifying the owners. + + Args: + data (Tensor): data tensor to aggregate [n_row, feature_dim] + owners (Tensor): specify the owner of each row [n_row, 1] + average (bool): if True, average the rows, if False, sum the rows. + Default = True + num_owner (int, optional): the number of owners, this is needed if the + max idx of owner is not presented in owners tensor + Default = None + + Returns: + output (Tensor): [num_owner, feature_dim] + """ + + bin_count = paddle.bincount(x=owners.cast("int32")) + # bin_count = (bin_count!=0).cast(dtype=bin_count.dtype) + bin_count = paddle.where( + bin_count != 0, bin_count, paddle.ones([1], dtype=bin_count.dtype) + ) + # .where(bin_count != 0, y=paddle.ones( + # shape=[1], dtype=bin_count.dtype)) + # bin_count = bin_count.where(bin_count != 0, bin_count.new_ones(1)) + if num_owner is not None and tuple(bin_count.shape)[0] != num_owner: + difference = num_owner - tuple(bin_count.shape)[0] + bin_count = paddle.concat( + x=[bin_count, paddle.ones(shape=difference, dtype=bin_count.dtype)] + ) + + output0 = paddle.zeros( + shape=[tuple(bin_count.shape)[0], tuple(data.shape)[1]], dtype=data.dtype + ) + output0.stop_gradient = False + output = output0.index_add(axis=0, index=owners.cast("int32"), value=data) + + if average: + output = (output.T / bin_count).T + return output + + +class MLP(paddle.nn.Layer): + """Multi-Layer Perceptron used for non-linear regression.""" + + def __init__( + self, + input_dim: int, + *, + output_dim: int = 1, + hidden_dim: (int | Sequence[int] | None) = (64, 64), + dropout: float = 0, + activation: str = "silu", + bias: bool = True, + ) -> None: + """Initialize the MLP. + + Args: + input_dim (int): the input dimension + output_dim (int): the output dimension + hidden_dim (list[int] | int]): a list of integers or a single integer + representing the number of hidden units in each layer of the MLP. + Default = [64, 64] + dropout (float): the dropout rate before each linear layer. Default: 0 + activation (str, optional): The name of the activation function to use + in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". + Default = "silu" + bias (bool): whether to use bias in each Linear layers. + Default = True + """ + super().__init__() + if hidden_dim is None or hidden_dim == 0: + layers = [ + paddle.nn.Dropout(p=dropout), + paddle.nn.Linear( + in_features=input_dim, out_features=output_dim, bias_attr=bias + ), + ] + elif isinstance(hidden_dim, int): + layers = [ + paddle.nn.Linear( + in_features=input_dim, out_features=hidden_dim, bias_attr=bias + ), + find_activation(activation), + paddle.nn.Dropout(p=dropout), + paddle.nn.Linear( + in_features=hidden_dim, out_features=output_dim, bias_attr=bias + ), + ] + elif isinstance(hidden_dim, Sequence): + layers = [ + paddle.nn.Linear( + in_features=input_dim, out_features=hidden_dim[0], bias_attr=bias + ), + find_activation(activation), + ] + if len(hidden_dim) != 1: + for h_in, h_out in itertools.pairwise(hidden_dim): + layers.append( + paddle.nn.Linear( + in_features=h_in, out_features=h_out, bias_attr=bias + ) + ) + layers.append(find_activation(activation)) + layers.append(paddle.nn.Dropout(p=dropout)) + layers.append( + paddle.nn.Linear( + in_features=hidden_dim[-1], out_features=output_dim, bias_attr=bias + ) + ) + else: + raise TypeError( + f"hidden_dim={hidden_dim!r} must be an integer, a list of integers, or None." + ) + self.layers = paddle.nn.Sequential(*layers) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Performs a forward pass through the MLP. + + Args: + x (Tensor): a tensor of shape (batch_size, input_dim) + + Returns: + Tensor: a tensor of shape (batch_size, output_dim) + """ + return self.layers(x) + + +class GatedMLP(paddle.nn.Layer): + """Gated MLP + similar model structure is used in CGCNN and M3GNet. + """ + + def __init__( + self, + input_dim: int, + output_dim: int, + *, + hidden_dim: (int | list[int] | None) = None, + dropout: float = 0, + activation: str = "silu", + norm: str = "batch", + bias: bool = True, + ) -> None: + """Initialize a gated MLP. + + Args: + input_dim (int): the input dimension + output_dim (int): the output dimension + hidden_dim (list[int] | int]): a list of integers or a single integer + representing the number of hidden units in each layer of the MLP. + Default = None + dropout (float): the dropout rate before each linear layer. + Default: 0 + activation (str, optional): The name of the activation function to use in + the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". + Default = "silu" + norm (str, optional): The name of the normalization layer to use on the + updated atom features. Must be one of "batch", "layer", or None. + Default = "batch" + bias (bool): whether to use bias in each Linear layers. + Default = True + """ + super().__init__() + self.mlp_core = MLP( + input_dim=input_dim, + output_dim=output_dim, + hidden_dim=hidden_dim, + dropout=dropout, + activation=activation, + bias=bias, + ) + self.mlp_gate = MLP( + input_dim=input_dim, + output_dim=output_dim, + hidden_dim=hidden_dim, + dropout=dropout, + activation=activation, + bias=bias, + ) + self.activation = find_activation(activation) + self.sigmoid = paddle.nn.Sigmoid() + self.norm = norm + self.bn1 = find_normalization(name=norm, dim=output_dim) + self.bn2 = find_normalization(name=norm, dim=output_dim) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Performs a forward pass through the MLP. + + Args: + x (Tensor): a tensor of shape (batch_size, input_dim) + + Returns: + Tensor: a tensor of shape (batch_size, output_dim) + """ + if self.norm is None: + core = self.activation(self.mlp_core(x)) + gate = self.sigmoid(self.mlp_gate(x)) + else: + core = self.activation(self.bn1(self.mlp_core(x))) + gate = self.sigmoid(self.bn2(self.mlp_gate(x))) + return core * gate + + +class ScaledSiLU(paddle.nn.Layer): + """Scaled Sigmoid Linear Unit.""" + + def __init__(self) -> None: + """Initialize a scaled SiLU.""" + super().__init__() + self.scale_factor = 1 / 0.6 + self._activation = paddle.nn.Silu() + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """Forward pass.""" + return self._activation(x) * self.scale_factor + + +def find_activation(name: str) -> paddle.nn.Layer: + """Return an activation function using name.""" + try: + return { + "relu": paddle.nn.ReLU, + "silu": paddle.nn.Silu, + "scaledsilu": ScaledSiLU, + "gelu": paddle.nn.GELU, + "softplus": paddle.nn.Softplus, + "sigmoid": paddle.nn.Sigmoid, + "tanh": paddle.nn.Tanh, + }[name.lower()]() + except KeyError as exc: + raise NotImplementedError from exc + + +def find_normalization(name: str, dim: (int | None) = None) -> (paddle.nn.Layer | None): + """Return an normalization function using name.""" + if name is None: + return None + return {"batch": nn.BatchNorm1D(dim), "layer": nn.LayerNorm(dim)}.get( + name.lower(), None + ) diff --git a/jointContribution/CHGNet/chgnet/model/layers.py b/jointContribution/CHGNet/chgnet/model/layers.py index d89e9a71f4..7e10b3e703 100644 --- a/jointContribution/CHGNet/chgnet/model/layers.py +++ b/jointContribution/CHGNet/chgnet/model/layers.py @@ -1,436 +1,436 @@ -from __future__ import annotations - -import paddle -from chgnet.model.functions import MLP -from chgnet.model.functions import GatedMLP -from chgnet.model.functions import aggregate -from chgnet.model.functions import find_activation -from chgnet.model.functions import find_normalization - - -class AtomConv(paddle.nn.Layer): - """A convolution Layer to update atom features.""" - - def __init__( - self, - *, - atom_fea_dim: int, - bond_fea_dim: int, - hidden_dim: int = 64, - dropout: float = 0, - activation: str = "silu", - norm: (str | None) = None, - use_mlp_out: bool = True, - mlp_out_bias: bool = False, - resnet: bool = True, - gMLP_norm: (str | None) = None, - ) -> None: - """Initialize the AtomConv layer. - - Args: - atom_fea_dim (int): The dimensionality of the input atom features. - bond_fea_dim (int): The dimensionality of the input bond features. - hidden_dim (int, optional): The dimensionality of the hidden layers in the - gated MLP. - Default = 64 - dropout (float, optional): The dropout rate to apply to the gated MLP. - Default = 0. - activation (str, optional): The name of the activation function to use in - the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". - Default = "silu" - norm (str, optional): The name of the normalization layer to use on the - updated atom features. Must be one of "batch", "layer", or None. - Default = None - use_mlp_out (bool, optional): Whether to apply an MLP output layer to the - updated atom features. - Default = True - mlp_out_bias (bool): whether to use bias in the output MLP Linear layer. - Default = False - resnet (bool, optional): Whether to apply a residual connection to the - updated atom features. - Default = True - gMLP_norm (str, optional): The name of the normalization layer to use on the - gated MLP. Must be one of "batch", "layer", or None. - Default = None - """ - super().__init__() - self.use_mlp_out = use_mlp_out - self.resnet = resnet - self.activation = find_activation(activation) - self.twoBody_atom = GatedMLP( - input_dim=2 * atom_fea_dim + bond_fea_dim, - output_dim=atom_fea_dim, - hidden_dim=hidden_dim, - dropout=dropout, - norm=gMLP_norm, - activation=activation, - ) - if self.use_mlp_out: - self.mlp_out = MLP( - input_dim=atom_fea_dim, - output_dim=atom_fea_dim, - hidden_dim=0, - bias=mlp_out_bias, - ) - self.atom_norm = find_normalization(name=norm, dim=atom_fea_dim) - - def forward( - self, - atom_feas: paddle.Tensor, - bond_feas: paddle.Tensor, - bond_weights: paddle.Tensor, - atom_graph: paddle.Tensor, - directed2undirected: paddle.Tensor, - ) -> paddle.Tensor: - """Forward pass of AtomConv module that updates the atom features and - optionally bond features. - - Args: - atom_feas (Tensor): Input tensor with shape - [num_batch_atoms, atom_fea_dim] - bond_feas (Tensor): Input tensor with shape - [num_undirected_bonds, bond_fea_dim] - bond_weights (Tensor): AtomGraph bond weights with shape - [num_undirected_bonds, bond_fea_dim] - atom_graph (Tensor): Directed AtomGraph adjacency list with shape - [num_directed_bonds, 2] - directed2undirected (Tensor): Index tensor that maps directed bonds to - undirected bonds.with shape - [num_undirected_bonds] - - Returns: - Tensor: the updated atom features tensor with shape - [num_batch_atom, atom_fea_dim] - - Notes: - - num_batch_atoms = sum(num_atoms) in batch - """ - center_atoms = paddle.index_select(x=atom_feas, axis=0, index=atom_graph[:, 0]) - nbr_atoms = paddle.index_select(x=atom_feas, axis=0, index=atom_graph[:, 1]) - bonds = paddle.index_select(x=bond_feas, axis=0, index=directed2undirected) - messages = paddle.concat(x=[center_atoms, bonds, nbr_atoms], axis=1) - messages = self.twoBody_atom(messages) - bond_weight = paddle.index_select( - x=bond_weights, axis=0, index=directed2undirected - ) - messages *= bond_weight - new_atom_feas = aggregate( - messages, atom_graph[:, 0], average=False, num_owner=len(atom_feas) - ) - if self.use_mlp_out: - new_atom_feas = self.mlp_out(new_atom_feas) - if self.resnet: - new_atom_feas += atom_feas - if self.atom_norm is not None: - new_atom_feas = self.atom_norm(new_atom_feas) - return new_atom_feas - - -class BondConv(paddle.nn.Layer): - """A convolution Layer to update bond features.""" - - def __init__( - self, - atom_fea_dim: int, - bond_fea_dim: int, - angle_fea_dim: int, - *, - hidden_dim: int = 64, - dropout: float = 0, - activation: str = "silu", - norm: (str | None) = None, - use_mlp_out: bool = True, - mlp_out_bias: bool = False, - resnet=True, - gMLP_norm: (str | None) = None, - ) -> None: - """Initialize the BondConv layer. - - Args: - atom_fea_dim (int): The dimensionality of the input atom features. - bond_fea_dim (int): The dimensionality of the input bond features. - angle_fea_dim (int): The dimensionality of the input angle features. - hidden_dim (int, optional): The dimensionality of the hidden layers - in the gated MLP. - Default = 64 - dropout (float, optional): The dropout rate to apply to the gated MLP. - Default = 0. - activation (str, optional): The name of the activation function to use - in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". - Default = "silu" - norm (str, optional): The name of the normalization layer to use on the - updated atom features. Must be one of "batch", "layer", or None. - Default = None - use_mlp_out (bool, optional): Whether to apply an MLP output layer to the - updated atom features. - Default = True - mlp_out_bias (bool): whether to use bias in the output MLP Linear layer. - Default = False - resnet (bool, optional): Whether to apply a residual connection to the - updated atom features. - Default = True - gMLP_norm (str, optional): The name of the normalization layer to use on the - gated MLP. Must be one of "batch", "layer", or None. - Default = None - """ - super().__init__() - self.use_mlp_out = use_mlp_out - self.resnet = resnet - self.activation = find_activation(activation) - self.twoBody_bond = GatedMLP( - input_dim=atom_fea_dim + 2 * bond_fea_dim + angle_fea_dim, - output_dim=bond_fea_dim, - hidden_dim=hidden_dim, - dropout=dropout, - norm=gMLP_norm, - activation=activation, - ) - if self.use_mlp_out: - self.mlp_out = MLP( - input_dim=bond_fea_dim, - output_dim=bond_fea_dim, - hidden_dim=0, - bias=mlp_out_bias, - ) - self.bond_norm = find_normalization(name=norm, dim=bond_fea_dim) - - def forward( - self, - atom_feas: paddle.Tensor, - bond_feas: paddle.Tensor, - bond_weights: paddle.Tensor, - angle_feas: paddle.Tensor, - bond_graph: paddle.Tensor, - ) -> paddle.Tensor: - """Update the bond features. - - Args: - atom_feas (Tensor): atom features tensor with shape - [num_batch_atoms, atom_fea_dim] - bond_feas (Tensor): bond features tensor with shape - [num_undirected_bonds, bond_fea_dim] - bond_weights (Tensor): BondGraph bond weights with shape - [num_undirected_bonds, bond_fea_dim] - angle_feas (Tensor): angle features tensor with shape - [num_batch_angles, angle_fea_dim] - bond_graph (Tensor): Directed BondGraph tensor with shape - [num_batched_angles, 3] - - Returns: - new_bond_feas (Tensor): bond feature tensor with shape - [num_undirected_bonds, bond_fea_dim] - - Notes: - - num_batch_atoms = sum(num_atoms) in batch - """ - center_atoms = paddle.index_select( - x=atom_feas, axis=0, index=bond_graph[:, 0].cast("int32") - ) - bond_feas_i = paddle.index_select( - x=bond_feas, axis=0, index=bond_graph[:, 1].cast("int32") - ) - bond_feas_j = paddle.index_select( - x=bond_feas, axis=0, index=bond_graph[:, 2].cast("int32") - ) - total_fea = paddle.concat( - x=[bond_feas_i, bond_feas_j, angle_feas, center_atoms], axis=1 - ) - bond_update = self.twoBody_bond(total_fea) - bond_weights_i = paddle.index_select( - x=bond_weights, axis=0, index=bond_graph[:, 1].cast("int32") - ) - bond_weights_j = paddle.index_select( - x=bond_weights, axis=0, index=bond_graph[:, 2].cast("int32") - ) - bond_update = bond_update * bond_weights_i * bond_weights_j - new_bond_feas = aggregate( - bond_update, bond_graph[:, 1], average=False, num_owner=len(bond_feas) - ) - if self.use_mlp_out: - new_bond_feas = self.mlp_out(new_bond_feas) - if self.resnet: - new_bond_feas += bond_feas - if self.bond_norm is not None: - new_bond_feas = self.bond_norm(new_bond_feas) - return new_bond_feas - - -class AngleUpdate(paddle.nn.Layer): - """Update angle features.""" - - def __init__( - self, - atom_fea_dim: int, - bond_fea_dim: int, - angle_fea_dim: int, - *, - hidden_dim: int = 0, - dropout: float = 0, - activation: str = "silu", - norm: (str | None) = None, - resnet: bool = True, - gMLP_norm: (str | None) = None, - ) -> None: - """Initialize the AngleUpdate layer. - - Args: - atom_fea_dim (int): The dimensionality of the input atom features. - bond_fea_dim (int): The dimensionality of the input bond features. - angle_fea_dim (int): The dimensionality of the input angle features. - hidden_dim (int, optional): The dimensionality of the hidden layers - in the gated MLP. - Default = 0 - dropout (float, optional): The dropout rate to apply to the gated MLP. - Default = 0. - activation (str, optional): The name of the activation function to use - in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". - Default = "silu" - norm (str, optional): The name of the normalization layer to use on the - updated atom features. Must be one of "batch", "layer", or None. - Default = None - resnet (bool, optional): Whether to apply a residual connection to the - updated atom features. - Default = True - gMLP_norm (str, optional): The name of the normalization layer to use on the - gated MLP. Must be one of "batch", "layer", or None. - Default = None - """ - super().__init__() - self.resnet = resnet - self.activation = find_activation(activation) - self.twoBody_bond = GatedMLP( - input_dim=atom_fea_dim + 2 * bond_fea_dim + angle_fea_dim, - output_dim=angle_fea_dim, - hidden_dim=hidden_dim, - dropout=dropout, - norm=gMLP_norm, - activation=activation, - ) - self.angle_norm = find_normalization(norm, dim=angle_fea_dim) - - def forward( - self, - atom_feas: paddle.Tensor, - bond_feas: paddle.Tensor, - angle_feas: paddle.Tensor, - bond_graph: paddle.Tensor, - ) -> paddle.Tensor: - """Update the angle features using bond graph. - - Args: - atom_feas (Tensor): atom features tensor with shape - [num_batch_atoms, atom_fea_dim] - bond_feas (Tensor): bond features tensor with shape - [num_undirected_bonds, bond_fea_dim] - angle_feas (Tensor): angle features tensor with shape - [num_batch_angles, angle_fea_dim] - bond_graph (Tensor): Directed BondGraph tensor with shape - [num_batched_angles, 3] - - Returns: - new_angle_feas (Tensor): angle features tensor with shape - [num_batch_angles, angle_fea_dim] - - Notes: - - num_batch_atoms = sum(num_atoms) in batch - """ - bond_graph = bond_graph.astype("int64") - center_atoms = paddle.index_select(x=atom_feas, axis=0, index=bond_graph[:, 0]) - bond_feas_i = paddle.index_select(x=bond_feas, axis=0, index=bond_graph[:, 1]) - bond_feas_j = paddle.index_select(x=bond_feas, axis=0, index=bond_graph[:, 2]) - total_fea = paddle.concat( - x=[bond_feas_i, bond_feas_j, angle_feas, center_atoms], axis=1 - ) - new_angle_feas = self.twoBody_bond(total_fea) - - if self.resnet: - new_angle_feas += angle_feas - if self.angle_norm is not None: - new_angle_feas = self.angle_norm(new_angle_feas) - return new_angle_feas - - -class GraphPooling(paddle.nn.Layer): - """Pooling the sub-graphs in the batched graph.""" - - def __init__(self, *, average: bool = False) -> None: - """Args: - average (bool): whether to average the features. - """ - super().__init__() - self.average = average - - def forward( - self, atom_feas: paddle.Tensor, atom_owner: paddle.Tensor - ) -> paddle.Tensor: - """Merge the atom features that belong to same graph in a batched graph. - - Args: - atom_feas (Tensor): batched atom features after convolution layers. - [num_batch_atoms, atom_fea_dim or 1] - atom_owner (Tensor): graph indices for each atom. - [num_batch_atoms] - - Returns: - crystal_feas (Tensor): crystal feature matrix. - [n_crystals, atom_fea_dim or 1] - """ - return aggregate(atom_feas, atom_owner, average=self.average) - - -class GraphAttentionReadOut(paddle.nn.Layer): - """Multi Head Attention Read Out Layer - merge the information from atom_feas to crystal_fea. - """ - - def __init__( - self, - atom_fea_dim: int, - num_head: int = 3, - hidden_dim: int = 32, - *, - average=False, - ) -> None: - """Initialize the layer. - - Args: - atom_fea_dim (int): atom feature dimension - num_head (int): number of attention heads used - hidden_dim (int): dimension of hidden layer - average (bool): whether to average the features - """ - super().__init__() - self.key = MLP( - input_dim=atom_fea_dim, output_dim=num_head, hidden_dim=hidden_dim - ) - self.softmax = paddle.nn.Softmax(axis=0) - self.average = average - - def forward( - self, atom_feas: paddle.Tensor, atom_owner: paddle.Tensor - ) -> paddle.Tensor: - """Merge the atom features that belong to same graph in a batched graph. - - Args: - atom_feas (Tensor): batched atom features after convolution layers. - [num_batch_atoms, atom_fea_dim] - atom_owner (Tensor): graph indices for each atom. - [num_batch_atoms] - - Returns: - crystal_feas (Tensor): crystal feature matrix. - [n_crystals, atom_fea_dim] - """ - crystal_feas = [] - weights = self.key(atom_feas) - bin_count = paddle.bincount(x=atom_owner) - start_index = 0 - for n_atom in bin_count: - atom_fea = atom_feas[start_index : start_index + n_atom, :] - weight = self.softmax(weights[start_index : start_index + n_atom, :]) - crystal_fea = (atom_fea.T @ weight).reshape([-1]) - if self.average: - crystal_fea /= n_atom - crystal_feas.append(crystal_fea) - start_index += n_atom - return paddle.stack(x=crystal_feas, axis=0) +from __future__ import annotations + +import paddle +from chgnet.model.functions import MLP +from chgnet.model.functions import GatedMLP +from chgnet.model.functions import aggregate +from chgnet.model.functions import find_activation +from chgnet.model.functions import find_normalization + + +class AtomConv(paddle.nn.Layer): + """A convolution Layer to update atom features.""" + + def __init__( + self, + *, + atom_fea_dim: int, + bond_fea_dim: int, + hidden_dim: int = 64, + dropout: float = 0, + activation: str = "silu", + norm: (str | None) = None, + use_mlp_out: bool = True, + mlp_out_bias: bool = False, + resnet: bool = True, + gMLP_norm: (str | None) = None, + ) -> None: + """Initialize the AtomConv layer. + + Args: + atom_fea_dim (int): The dimensionality of the input atom features. + bond_fea_dim (int): The dimensionality of the input bond features. + hidden_dim (int, optional): The dimensionality of the hidden layers in the + gated MLP. + Default = 64 + dropout (float, optional): The dropout rate to apply to the gated MLP. + Default = 0. + activation (str, optional): The name of the activation function to use in + the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". + Default = "silu" + norm (str, optional): The name of the normalization layer to use on the + updated atom features. Must be one of "batch", "layer", or None. + Default = None + use_mlp_out (bool, optional): Whether to apply an MLP output layer to the + updated atom features. + Default = True + mlp_out_bias (bool): whether to use bias in the output MLP Linear layer. + Default = False + resnet (bool, optional): Whether to apply a residual connection to the + updated atom features. + Default = True + gMLP_norm (str, optional): The name of the normalization layer to use on the + gated MLP. Must be one of "batch", "layer", or None. + Default = None + """ + super().__init__() + self.use_mlp_out = use_mlp_out + self.resnet = resnet + self.activation = find_activation(activation) + self.twoBody_atom = GatedMLP( + input_dim=2 * atom_fea_dim + bond_fea_dim, + output_dim=atom_fea_dim, + hidden_dim=hidden_dim, + dropout=dropout, + norm=gMLP_norm, + activation=activation, + ) + if self.use_mlp_out: + self.mlp_out = MLP( + input_dim=atom_fea_dim, + output_dim=atom_fea_dim, + hidden_dim=0, + bias=mlp_out_bias, + ) + self.atom_norm = find_normalization(name=norm, dim=atom_fea_dim) + + def forward( + self, + atom_feas: paddle.Tensor, + bond_feas: paddle.Tensor, + bond_weights: paddle.Tensor, + atom_graph: paddle.Tensor, + directed2undirected: paddle.Tensor, + ) -> paddle.Tensor: + """Forward pass of AtomConv module that updates the atom features and + optionally bond features. + + Args: + atom_feas (Tensor): Input tensor with shape + [num_batch_atoms, atom_fea_dim] + bond_feas (Tensor): Input tensor with shape + [num_undirected_bonds, bond_fea_dim] + bond_weights (Tensor): AtomGraph bond weights with shape + [num_undirected_bonds, bond_fea_dim] + atom_graph (Tensor): Directed AtomGraph adjacency list with shape + [num_directed_bonds, 2] + directed2undirected (Tensor): Index tensor that maps directed bonds to + undirected bonds.with shape + [num_undirected_bonds] + + Returns: + Tensor: the updated atom features tensor with shape + [num_batch_atom, atom_fea_dim] + + Notes: + - num_batch_atoms = sum(num_atoms) in batch + """ + center_atoms = paddle.index_select(x=atom_feas, axis=0, index=atom_graph[:, 0]) + nbr_atoms = paddle.index_select(x=atom_feas, axis=0, index=atom_graph[:, 1]) + bonds = paddle.index_select(x=bond_feas, axis=0, index=directed2undirected) + messages = paddle.concat(x=[center_atoms, bonds, nbr_atoms], axis=1) + messages = self.twoBody_atom(messages) + bond_weight = paddle.index_select( + x=bond_weights, axis=0, index=directed2undirected + ) + messages *= bond_weight + new_atom_feas = aggregate( + messages, atom_graph[:, 0], average=False, num_owner=len(atom_feas) + ) + if self.use_mlp_out: + new_atom_feas = self.mlp_out(new_atom_feas) + if self.resnet: + new_atom_feas += atom_feas + if self.atom_norm is not None: + new_atom_feas = self.atom_norm(new_atom_feas) + return new_atom_feas + + +class BondConv(paddle.nn.Layer): + """A convolution Layer to update bond features.""" + + def __init__( + self, + atom_fea_dim: int, + bond_fea_dim: int, + angle_fea_dim: int, + *, + hidden_dim: int = 64, + dropout: float = 0, + activation: str = "silu", + norm: (str | None) = None, + use_mlp_out: bool = True, + mlp_out_bias: bool = False, + resnet=True, + gMLP_norm: (str | None) = None, + ) -> None: + """Initialize the BondConv layer. + + Args: + atom_fea_dim (int): The dimensionality of the input atom features. + bond_fea_dim (int): The dimensionality of the input bond features. + angle_fea_dim (int): The dimensionality of the input angle features. + hidden_dim (int, optional): The dimensionality of the hidden layers + in the gated MLP. + Default = 64 + dropout (float, optional): The dropout rate to apply to the gated MLP. + Default = 0. + activation (str, optional): The name of the activation function to use + in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". + Default = "silu" + norm (str, optional): The name of the normalization layer to use on the + updated atom features. Must be one of "batch", "layer", or None. + Default = None + use_mlp_out (bool, optional): Whether to apply an MLP output layer to the + updated atom features. + Default = True + mlp_out_bias (bool): whether to use bias in the output MLP Linear layer. + Default = False + resnet (bool, optional): Whether to apply a residual connection to the + updated atom features. + Default = True + gMLP_norm (str, optional): The name of the normalization layer to use on the + gated MLP. Must be one of "batch", "layer", or None. + Default = None + """ + super().__init__() + self.use_mlp_out = use_mlp_out + self.resnet = resnet + self.activation = find_activation(activation) + self.twoBody_bond = GatedMLP( + input_dim=atom_fea_dim + 2 * bond_fea_dim + angle_fea_dim, + output_dim=bond_fea_dim, + hidden_dim=hidden_dim, + dropout=dropout, + norm=gMLP_norm, + activation=activation, + ) + if self.use_mlp_out: + self.mlp_out = MLP( + input_dim=bond_fea_dim, + output_dim=bond_fea_dim, + hidden_dim=0, + bias=mlp_out_bias, + ) + self.bond_norm = find_normalization(name=norm, dim=bond_fea_dim) + + def forward( + self, + atom_feas: paddle.Tensor, + bond_feas: paddle.Tensor, + bond_weights: paddle.Tensor, + angle_feas: paddle.Tensor, + bond_graph: paddle.Tensor, + ) -> paddle.Tensor: + """Update the bond features. + + Args: + atom_feas (Tensor): atom features tensor with shape + [num_batch_atoms, atom_fea_dim] + bond_feas (Tensor): bond features tensor with shape + [num_undirected_bonds, bond_fea_dim] + bond_weights (Tensor): BondGraph bond weights with shape + [num_undirected_bonds, bond_fea_dim] + angle_feas (Tensor): angle features tensor with shape + [num_batch_angles, angle_fea_dim] + bond_graph (Tensor): Directed BondGraph tensor with shape + [num_batched_angles, 3] + + Returns: + new_bond_feas (Tensor): bond feature tensor with shape + [num_undirected_bonds, bond_fea_dim] + + Notes: + - num_batch_atoms = sum(num_atoms) in batch + """ + center_atoms = paddle.index_select( + x=atom_feas, axis=0, index=bond_graph[:, 0].cast("int32") + ) + bond_feas_i = paddle.index_select( + x=bond_feas, axis=0, index=bond_graph[:, 1].cast("int32") + ) + bond_feas_j = paddle.index_select( + x=bond_feas, axis=0, index=bond_graph[:, 2].cast("int32") + ) + total_fea = paddle.concat( + x=[bond_feas_i, bond_feas_j, angle_feas, center_atoms], axis=1 + ) + bond_update = self.twoBody_bond(total_fea) + bond_weights_i = paddle.index_select( + x=bond_weights, axis=0, index=bond_graph[:, 1].cast("int32") + ) + bond_weights_j = paddle.index_select( + x=bond_weights, axis=0, index=bond_graph[:, 2].cast("int32") + ) + bond_update = bond_update * bond_weights_i * bond_weights_j + new_bond_feas = aggregate( + bond_update, bond_graph[:, 1], average=False, num_owner=len(bond_feas) + ) + if self.use_mlp_out: + new_bond_feas = self.mlp_out(new_bond_feas) + if self.resnet: + new_bond_feas += bond_feas + if self.bond_norm is not None: + new_bond_feas = self.bond_norm(new_bond_feas) + return new_bond_feas + + +class AngleUpdate(paddle.nn.Layer): + """Update angle features.""" + + def __init__( + self, + atom_fea_dim: int, + bond_fea_dim: int, + angle_fea_dim: int, + *, + hidden_dim: int = 0, + dropout: float = 0, + activation: str = "silu", + norm: (str | None) = None, + resnet: bool = True, + gMLP_norm: (str | None) = None, + ) -> None: + """Initialize the AngleUpdate layer. + + Args: + atom_fea_dim (int): The dimensionality of the input atom features. + bond_fea_dim (int): The dimensionality of the input bond features. + angle_fea_dim (int): The dimensionality of the input angle features. + hidden_dim (int, optional): The dimensionality of the hidden layers + in the gated MLP. + Default = 0 + dropout (float, optional): The dropout rate to apply to the gated MLP. + Default = 0. + activation (str, optional): The name of the activation function to use + in the gated MLP. Must be one of "relu", "silu", "tanh", or "gelu". + Default = "silu" + norm (str, optional): The name of the normalization layer to use on the + updated atom features. Must be one of "batch", "layer", or None. + Default = None + resnet (bool, optional): Whether to apply a residual connection to the + updated atom features. + Default = True + gMLP_norm (str, optional): The name of the normalization layer to use on the + gated MLP. Must be one of "batch", "layer", or None. + Default = None + """ + super().__init__() + self.resnet = resnet + self.activation = find_activation(activation) + self.twoBody_bond = GatedMLP( + input_dim=atom_fea_dim + 2 * bond_fea_dim + angle_fea_dim, + output_dim=angle_fea_dim, + hidden_dim=hidden_dim, + dropout=dropout, + norm=gMLP_norm, + activation=activation, + ) + self.angle_norm = find_normalization(norm, dim=angle_fea_dim) + + def forward( + self, + atom_feas: paddle.Tensor, + bond_feas: paddle.Tensor, + angle_feas: paddle.Tensor, + bond_graph: paddle.Tensor, + ) -> paddle.Tensor: + """Update the angle features using bond graph. + + Args: + atom_feas (Tensor): atom features tensor with shape + [num_batch_atoms, atom_fea_dim] + bond_feas (Tensor): bond features tensor with shape + [num_undirected_bonds, bond_fea_dim] + angle_feas (Tensor): angle features tensor with shape + [num_batch_angles, angle_fea_dim] + bond_graph (Tensor): Directed BondGraph tensor with shape + [num_batched_angles, 3] + + Returns: + new_angle_feas (Tensor): angle features tensor with shape + [num_batch_angles, angle_fea_dim] + + Notes: + - num_batch_atoms = sum(num_atoms) in batch + """ + bond_graph = bond_graph.astype("int64") + center_atoms = paddle.index_select(x=atom_feas, axis=0, index=bond_graph[:, 0]) + bond_feas_i = paddle.index_select(x=bond_feas, axis=0, index=bond_graph[:, 1]) + bond_feas_j = paddle.index_select(x=bond_feas, axis=0, index=bond_graph[:, 2]) + total_fea = paddle.concat( + x=[bond_feas_i, bond_feas_j, angle_feas, center_atoms], axis=1 + ) + new_angle_feas = self.twoBody_bond(total_fea) + + if self.resnet: + new_angle_feas += angle_feas + if self.angle_norm is not None: + new_angle_feas = self.angle_norm(new_angle_feas) + return new_angle_feas + + +class GraphPooling(paddle.nn.Layer): + """Pooling the sub-graphs in the batched graph.""" + + def __init__(self, *, average: bool = False) -> None: + """Args: + average (bool): whether to average the features. + """ + super().__init__() + self.average = average + + def forward( + self, atom_feas: paddle.Tensor, atom_owner: paddle.Tensor + ) -> paddle.Tensor: + """Merge the atom features that belong to same graph in a batched graph. + + Args: + atom_feas (Tensor): batched atom features after convolution layers. + [num_batch_atoms, atom_fea_dim or 1] + atom_owner (Tensor): graph indices for each atom. + [num_batch_atoms] + + Returns: + crystal_feas (Tensor): crystal feature matrix. + [n_crystals, atom_fea_dim or 1] + """ + return aggregate(atom_feas, atom_owner, average=self.average) + + +class GraphAttentionReadOut(paddle.nn.Layer): + """Multi Head Attention Read Out Layer + merge the information from atom_feas to crystal_fea. + """ + + def __init__( + self, + atom_fea_dim: int, + num_head: int = 3, + hidden_dim: int = 32, + *, + average=False, + ) -> None: + """Initialize the layer. + + Args: + atom_fea_dim (int): atom feature dimension + num_head (int): number of attention heads used + hidden_dim (int): dimension of hidden layer + average (bool): whether to average the features + """ + super().__init__() + self.key = MLP( + input_dim=atom_fea_dim, output_dim=num_head, hidden_dim=hidden_dim + ) + self.softmax = paddle.nn.Softmax(axis=0) + self.average = average + + def forward( + self, atom_feas: paddle.Tensor, atom_owner: paddle.Tensor + ) -> paddle.Tensor: + """Merge the atom features that belong to same graph in a batched graph. + + Args: + atom_feas (Tensor): batched atom features after convolution layers. + [num_batch_atoms, atom_fea_dim] + atom_owner (Tensor): graph indices for each atom. + [num_batch_atoms] + + Returns: + crystal_feas (Tensor): crystal feature matrix. + [n_crystals, atom_fea_dim] + """ + crystal_feas = [] + weights = self.key(atom_feas) + bin_count = paddle.bincount(x=atom_owner) + start_index = 0 + for n_atom in bin_count: + atom_fea = atom_feas[start_index : start_index + n_atom, :] + weight = self.softmax(weights[start_index : start_index + n_atom, :]) + crystal_fea = (atom_fea.T @ weight).reshape([-1]) + if self.average: + crystal_fea /= n_atom + crystal_feas.append(crystal_fea) + start_index += n_atom + return paddle.stack(x=crystal_feas, axis=0) diff --git a/jointContribution/CHGNet/chgnet/model/model.py b/jointContribution/CHGNet/chgnet/model/model.py index 0b981d293a..9dd4573f64 100644 --- a/jointContribution/CHGNet/chgnet/model/model.py +++ b/jointContribution/CHGNet/chgnet/model/model.py @@ -1,839 +1,839 @@ -from __future__ import annotations - -import math -import os -from collections.abc import Sequence -from dataclasses import dataclass -from typing import TYPE_CHECKING -from typing import Literal - -import paddle -from chgnet.graph import CrystalGraph -from chgnet.graph import CrystalGraphConverter -from chgnet.graph.crystalgraph import DTYPE -from chgnet.model.composition_model import AtomRef -from chgnet.model.encoders import AngleEncoder -from chgnet.model.encoders import AtomEmbedding -from chgnet.model.encoders import BondEncoder -from chgnet.model.functions import MLP -from chgnet.model.functions import GatedMLP -from chgnet.model.functions import find_normalization -from chgnet.model.layers import AngleUpdate -from chgnet.model.layers import AtomConv -from chgnet.model.layers import BondConv -from chgnet.model.layers import GraphAttentionReadOut -from chgnet.model.layers import GraphPooling -from chgnet.utils import determine_device -from pymatgen.core import Structure - -if TYPE_CHECKING: - from chgnet import PredTask - from typing_extensions import Self -module_dir = os.path.dirname(os.path.abspath(__file__)) - - -class CHGNet(paddle.nn.Layer): - """Crystal Hamiltonian Graph neural Network - A model that takes in a crystal graph and output energy, force, magmom, stress. - """ - - def __init__( - self, - *, - atom_fea_dim: int = 64, - bond_fea_dim: int = 64, - angle_fea_dim: int = 64, - composition_model: (str | paddle.nn.Layer) = "MPtrj", - num_radial: int = 31, - num_angular: int = 31, - n_conv: int = 4, - atom_conv_hidden_dim: (Sequence[int] | int) = 64, - update_bond: bool = True, - bond_conv_hidden_dim: (Sequence[int] | int) = 64, - update_angle: bool = True, - angle_layer_hidden_dim: (Sequence[int] | int) = 0, - conv_dropout: float = 0, - read_out: str = "ave", - mlp_hidden_dims: (Sequence[int] | int) = (64, 64, 64), - mlp_dropout: float = 0, - mlp_first: bool = True, - is_intensive: bool = True, - non_linearity: Literal["silu", "relu", "tanh", "gelu"] = "silu", - atom_graph_cutoff: float = 6, - bond_graph_cutoff: float = 3, - graph_converter_algorithm: Literal["legacy", "fast"] = "fast", - cutoff_coeff: int = 8, - learnable_rbf: bool = True, - gMLP_norm: (str | None) = "layer", - readout_norm: (str | None) = "layer", - version: (str | None) = None, - **kwargs, - ) -> None: - """Initialize CHGNet. - - Args: - atom_fea_dim (int): atom feature vector embedding dimension. - Default = 64 - bond_fea_dim (int): bond feature vector embedding dimension. - Default = 64 - angle_fea_dim (int): angle feature vector embedding dimension. - Default = 64 - bond_fea_dim (int): angle feature vector embedding dimension. - Default = 64 - composition_model (nn.Module, optional): attach a composition model to - predict energy or initialize a pretrained linear regression (AtomRef). - The default 'MPtrj' is the atom reference energy linear regression - trained on all Materials Project relaxation trajectories - Default = 'MPtrj' - num_radial (int): number of radial basis used in bond basis expansion. - Default = 9 - num_angular (int): number of angular basis used in angle basis expansion. - Default = 9 - n_conv (int): number of interaction blocks. - Default = 4 - Note: last interaction block contain only an atom_conv layer - atom_conv_hidden_dim (List or int): hidden dimensions of - atom convolution layers. - Default = 64 - update_bond (bool): whether to use bond_conv_layer in bond graph to - update bond embeddings - Default = True. - bond_conv_hidden_dim (List or int): hidden dimensions of - bond convolution layers. - Default = 64 - update_angle (bool): whether to use angle_update_layer to - update angle embeddings. - Default = True - angle_layer_hidden_dim (List or int): hidden dimensions of angle layers. - Default = 0 - conv_dropout (float): dropout rate in all conv_layers. - Default = 0 - read_out (str): method for pooling layer, 'ave' for standard - average pooling, 'attn' for multi-head attention. - Default = "ave" - mlp_hidden_dims (int or list): readout multilayer perceptron - hidden dimensions. - Default = [64, 64] - mlp_dropout (float): dropout rate in readout MLP. - Default = 0. - is_intensive (bool): whether the energy training label is intensive - i.e. energy per atom. - Default = True - non_linearity ('silu' | 'relu' | 'tanh' | 'gelu'): The name of the - activation function to use in the gated MLP. - Default = "silu". - mlp_first (bool): whether to apply mlp first then pooling. - if set to True, then CHGNet is essentially calculating energy for each - atom, them sum them up, this is used for the pretrained model - Default = True - atom_graph_cutoff (float): cutoff radius (A) in creating atom_graph, - this need to be consistent with the value in training dataloader - Default = 5 - bond_graph_cutoff (float): cutoff radius (A) in creating bond_graph, - this need to be consistent with value in training dataloader - Default = 3 - graph_converter_algorithm ('legacy' | 'fast'): algorithm to use - for converting pymatgen.core.Structure to CrystalGraph. - 'legacy': python implementation of graph creation - 'fast': C implementation of graph creation, this is faster, - but will need the cygraph.c file correctly compiled from pip install - default = 'fast' - cutoff_coeff (float): cutoff strength used in graph smooth cutoff function. - the smaller this coeff is, the smoother the basis is - Default = 5 - learnable_rbf (bool): whether to set the frequencies in rbf and Fourier - basis functions learnable. - Default = True - gMLP_norm (str): normalization layer to use in gate-MLP - Default = 'layer' - readout_norm (str): normalization layer to use before readout layer - Default = 'layer' - version (str): Pretrained checkpoint version. - **kwargs: Additional keyword arguments - """ - self.model_args = { - key: val - for key, val in locals().items() - if key not in {"self", "__class__", "kwargs"} - } - self.model_args.update(kwargs) - if version: - self.model_args["version"] = version - super().__init__() - self.atom_fea_dim = atom_fea_dim - self.bond_fea_dim = bond_fea_dim - self.is_intensive = is_intensive - self.n_conv = n_conv - if isinstance(composition_model, paddle.nn.Layer): - self.composition_model = composition_model - elif isinstance(composition_model, str): - self.composition_model = AtomRef(is_intensive=is_intensive) - # import pdb - # pdb.set_trace() - self.composition_model.initialize_from(composition_model) - else: - self.composition_model = None - if self.composition_model is not None: - for param in self.composition_model.parameters(): - param.stop_gradient = not False - self.graph_converter = CrystalGraphConverter( - atom_graph_cutoff=atom_graph_cutoff, - bond_graph_cutoff=bond_graph_cutoff, - algorithm=graph_converter_algorithm, - verbose=kwargs.pop("converter_verbose", False), - ) - self.atom_embedding = AtomEmbedding(atom_feature_dim=atom_fea_dim) - self.bond_basis_expansion = BondEncoder( - atom_graph_cutoff=atom_graph_cutoff, - bond_graph_cutoff=bond_graph_cutoff, - num_radial=num_radial, - cutoff_coeff=cutoff_coeff, - learnable=learnable_rbf, - ) - self.bond_embedding = paddle.nn.Linear( - in_features=num_radial, out_features=bond_fea_dim, bias_attr=False - ) - self.bond_weights_ag = paddle.nn.Linear( - in_features=num_radial, out_features=atom_fea_dim, bias_attr=False - ) - self.bond_weights_bg = paddle.nn.Linear( - in_features=num_radial, out_features=bond_fea_dim, bias_attr=False - ) - self.angle_basis_expansion = AngleEncoder( - num_angular=num_angular, learnable=learnable_rbf - ) - self.angle_embedding = paddle.nn.Linear( - in_features=num_angular, out_features=angle_fea_dim, bias_attr=False - ) - conv_norm = kwargs.pop("conv_norm", None) - mlp_out_bias = kwargs.pop("mlp_out_bias", False) - atom_graph_layers = [ - AtomConv( - atom_fea_dim=atom_fea_dim, - bond_fea_dim=bond_fea_dim, - hidden_dim=atom_conv_hidden_dim, - dropout=conv_dropout, - activation=non_linearity, - norm=conv_norm, - gMLP_norm=gMLP_norm, - use_mlp_out=True, - mlp_out_bias=mlp_out_bias, - resnet=True, - ) - for _ in range(n_conv) - ] - self.atom_conv_layers = paddle.nn.LayerList(sublayers=atom_graph_layers) - if update_bond: - bond_graph_layers = [ - BondConv( - atom_fea_dim=atom_fea_dim, - bond_fea_dim=bond_fea_dim, - angle_fea_dim=angle_fea_dim, - hidden_dim=bond_conv_hidden_dim, - dropout=conv_dropout, - activation=non_linearity, - norm=conv_norm, - gMLP_norm=gMLP_norm, - use_mlp_out=True, - mlp_out_bias=mlp_out_bias, - resnet=True, - ) - for _ in range(n_conv - 1) - ] - self.bond_conv_layers = paddle.nn.LayerList(sublayers=bond_graph_layers) - else: - self.bond_conv_layers = [None for _ in range(n_conv - 1)] - if update_angle: - angle_layers = [ - AngleUpdate( - atom_fea_dim=atom_fea_dim, - bond_fea_dim=bond_fea_dim, - angle_fea_dim=angle_fea_dim, - hidden_dim=angle_layer_hidden_dim, - dropout=conv_dropout, - activation=non_linearity, - norm=conv_norm, - gMLP_norm=gMLP_norm, - resnet=True, - ) - for _ in range(n_conv - 1) - ] - self.angle_layers = paddle.nn.LayerList(sublayers=angle_layers) - else: - self.angle_layers = [None for _ in range(n_conv - 1)] - self.site_wise = paddle.nn.Linear(in_features=atom_fea_dim, out_features=1) - self.readout_norm = find_normalization(readout_norm, dim=atom_fea_dim) - self.mlp_first = mlp_first - if mlp_first: - self.read_out_type = "sum" - input_dim = atom_fea_dim - self.pooling = GraphPooling(average=False) - elif read_out in {"attn", "weighted"}: - self.read_out_type = "attn" - num_heads = kwargs.pop("num_heads", 3) - self.pooling = GraphAttentionReadOut( - atom_fea_dim, num_head=num_heads, average=True - ) - input_dim = atom_fea_dim * num_heads - else: - self.read_out_type = "ave" - input_dim = atom_fea_dim - self.pooling = GraphPooling(average=True) - if kwargs.pop("final_mlp", "MLP") in {"normal", "MLP"}: - self.mlp = MLP( - input_dim=input_dim, - hidden_dim=mlp_hidden_dims, - output_dim=1, - dropout=mlp_dropout, - activation=non_linearity, - ) - else: - self.mlp = paddle.nn.Sequential( - GatedMLP( - input_dim=input_dim, - hidden_dim=mlp_hidden_dims, - output_dim=mlp_hidden_dims[-1], - dropout=mlp_dropout, - norm=gMLP_norm, - activation=non_linearity, - ), - paddle.nn.Linear(in_features=mlp_hidden_dims[-1], out_features=1), - ) - version_str = f" v{version}" if version else "" - print(f"CHGNet{version_str} initialized with {self.n_params:,} parameters") - - @property - def version(self) -> (str | None): - """Return the version of the loaded checkpoint.""" - return self.model_args.get("version") - - @property - def n_params(self) -> int: - """Return the number of parameters in the model.""" - return sum(p.size for p in self.parameters()) - - def forward( - self, - graphs: Sequence[CrystalGraph], - *, - task: PredTask = "e", - return_site_energies: bool = False, - return_atom_feas: bool = False, - return_crystal_feas: bool = False, - ) -> dict[str, paddle.Tensor]: - """Get prediction associated with input graphs - Args: - graphs (List): a list of CrystalGraphs - task (str): the prediction task. One of 'e', 'em', 'ef', 'efs', 'efsm'. - Default = 'e' - return_site_energies (bool): whether to return per-site energies, - only available if self.mlp_first == True - Default = False - return_atom_feas (bool): whether to return the atom features before last - conv layer. - Default = False - return_crystal_feas (bool): whether to return crystal feature. - Default = False - Returns: - model output (dict). - """ - comp_energy = ( - 0 if self.composition_model is None else self.composition_model(graphs) - ) - batched_graph = BatchedGraph.from_graphs( - graphs, - bond_basis_expansion=self.bond_basis_expansion, - angle_basis_expansion=self.angle_basis_expansion, - compute_stress="s" in task, - ) - prediction = self._compute( - batched_graph, - compute_force="f" in task, - compute_stress="s" in task, - compute_magmom="m" in task, - return_site_energies=return_site_energies, - return_atom_feas=return_atom_feas, - return_crystal_feas=return_crystal_feas, - ) - prediction["e"] += comp_energy - if return_site_energies and self.composition_model is not None: - site_energy_shifts = self.composition_model.get_site_energies(graphs) - prediction["site_energies"] = [ - (i + j) - for i, j in zip( - prediction["site_energies"], site_energy_shifts, strict=True - ) - ] - return prediction - - def _compute( - self, - g: BatchedGraph, - *, - compute_force: bool = False, - compute_stress: bool = False, - compute_magmom: bool = False, - return_site_energies: bool = False, - return_atom_feas: bool = False, - return_crystal_feas: bool = False, - ) -> dict: - """Get Energy, Force, Stress, Magmom associated with input graphs - force = - d(Energy)/d(atom_positions) - stress = 1/V * d(Energy)/d(strain). - - Args: - g (BatchedGraph): batched graph - compute_force (bool): whether to compute force. - Default = False - compute_stress (bool): whether to compute stress. - Default = False - compute_magmom (bool): whether to compute magmom. - Default = False - return_site_energies (bool): whether to return per-site energies, - only available if self.mlp_first == True - Default = False - return_atom_feas (bool): whether to return atom features. - Default = False - return_crystal_feas (bool): whether to return crystal features. - Default = False - - Returns: - prediction (dict): containing the fields: - e (Tensor) : energy of structures [batch_size, 1] - f (Tensor) : force on atoms [num_batch_atoms, 3] - s (Tensor) : stress of structure [3 * batch_size, 3] - m (Tensor) : magnetic moments of sites [num_batch_atoms, 3] - """ - prediction = {} - atoms_per_graph = paddle.bincount(x=g.atom_owners) - prediction["atoms_per_graph"] = atoms_per_graph - atom_feas = self.atom_embedding(g.atomic_numbers - 1) - bond_feas = self.bond_embedding(g.bond_bases_ag) - bond_weights_ag = self.bond_weights_ag(g.bond_bases_ag) - bond_weights_bg = self.bond_weights_bg(g.bond_bases_bg) - if len(g.angle_bases) != 0: - angle_feas = self.angle_embedding(g.angle_bases) - for idx, (atom_layer, bond_layer, angle_layer) in enumerate( - zip( - self.atom_conv_layers[:-1], - self.bond_conv_layers, - self.angle_layers, - strict=False, - ) - ): - atom_feas = atom_layer( - atom_feas=atom_feas, - bond_feas=bond_feas, - bond_weights=bond_weights_ag, - atom_graph=g.batched_atom_graph, - directed2undirected=g.directed2undirected, - ) - if len(g.angle_bases) != 0 and bond_layer is not None: - bond_feas = bond_layer( - atom_feas=atom_feas, - bond_feas=bond_feas, - bond_weights=bond_weights_bg, - angle_feas=angle_feas, - bond_graph=g.batched_bond_graph, - ) - if angle_layer is not None: - angle_feas = angle_layer( - atom_feas=atom_feas, - bond_feas=bond_feas, - angle_feas=angle_feas, - bond_graph=g.batched_bond_graph, - ) - if idx == self.n_conv - 2: - if return_atom_feas: - prediction["atom_fea"] = paddle.split( - x=atom_feas, num_or_sections=atoms_per_graph.tolist() - ) - if compute_magmom: - magmom = paddle.abs(x=self.site_wise(atom_feas)) - prediction["m"] = list( - paddle.split( - x=magmom.reshape([-1]), - num_or_sections=atoms_per_graph.tolist(), - ) - ) - atom_feas = self.atom_conv_layers[-1]( - atom_feas=atom_feas, - bond_feas=bond_feas, - bond_weights=bond_weights_ag, - atom_graph=g.batched_atom_graph, - directed2undirected=g.directed2undirected, - ) - if self.readout_norm is not None: - atom_feas = self.readout_norm(atom_feas) - if self.mlp_first: - energies = self.mlp(atom_feas) - energy = self.pooling(energies, g.atom_owners).reshape([-1]) - if return_site_energies: - prediction["site_energies"] = paddle.split( - x=energies.squeeze(axis=1), num_or_sections=atoms_per_graph.tolist() - ) - if return_crystal_feas: - prediction["crystal_fea"] = self.pooling(atom_feas, g.atom_owners) - else: - crystal_feas = self.pooling(atom_feas, g.atom_owners) - energy = self.mlp(crystal_feas).reshape([-1]) * atoms_per_graph - if return_crystal_feas: - prediction["crystal_fea"] = crystal_feas - - if compute_force: - force = paddle.grad( - outputs=energy.sum(), - inputs=g.atom_positions, - create_graph=False, - retain_graph=True, - ) - prediction["f"] = [(-1 * force_dim) for force_dim in force] - if compute_stress: - stress = paddle.grad( - outputs=energy.sum(), - inputs=g.strains, - create_graph=False, - retain_graph=True, - ) - scale = 1 / g.volumes * 160.21766208 - stress = [(i * j) for i, j in zip(stress, scale, strict=False)] - prediction["s"] = stress - - if self.is_intensive: - energy /= atoms_per_graph.cast("float32") - prediction["e"] = energy - return prediction - - def predict_structure( - self, - structure: (Structure | Sequence[Structure]), - *, - task: PredTask = "efsm", - return_site_energies: bool = False, - return_atom_feas: bool = False, - return_crystal_feas: bool = False, - batch_size: int = 16, - ) -> (dict[str, paddle.Tensor] | list[dict[str, paddle.Tensor]]): - """Predict from pymatgen.core.Structure. - - Args: - structure (Structure | Sequence[Structure]): structure or a list of - structures to predict. - task (str): can be 'e' 'ef', 'em', 'efs', 'efsm' - Default = "efsm" - return_site_energies (bool): whether to return per-site energies. - Default = False - return_atom_feas (bool): whether to return atom features. - Default = False - return_crystal_feas (bool): whether to return crystal features. - Default = False - batch_size (int): batch_size for predict structures. - Default = 16 - - Returns: - prediction (dict): dict or list of dict containing the fields: - e (Tensor) : energy of structures float in eV/atom - f (Tensor) : force on atoms [num_atoms, 3] in eV/A - s (Tensor) : stress of structure [3, 3] in GPa - m (Tensor) : magnetic moments of sites [num_atoms, 3] in Bohr - magneton mu_B - """ - if self.graph_converter is None: - raise ValueError("graph_converter cannot be None!") - structures = [structure] if isinstance(structure, Structure) else structure - graphs = [self.graph_converter(struct) for struct in structures] - return self.predict_graph( - graphs, - task=task, - return_site_energies=return_site_energies, - return_atom_feas=return_atom_feas, - return_crystal_feas=return_crystal_feas, - batch_size=batch_size, - ) - - def predict_graph( - self, - graph: (CrystalGraph | Sequence[CrystalGraph]), - *, - task: PredTask = "efsm", - return_site_energies: bool = False, - return_atom_feas: bool = False, - return_crystal_feas: bool = False, - batch_size: int = 16, - ) -> (dict[str, paddle.Tensor] | list[dict[str, paddle.Tensor]]): - """Predict from CrustalGraph. - - Args: - graph (CrystalGraph | Sequence[CrystalGraph]): CrystalGraph(s) to predict. - task (str): can be 'e' 'ef', 'em', 'efs', 'efsm' - Default = "efsm" - return_site_energies (bool): whether to return per-site energies. - Default = False - return_atom_feas (bool): whether to return atom features. - Default = False - return_crystal_feas (bool): whether to return crystal features. - Default = False - batch_size (int): batch_size for predict structures. - Default = 16 - - Returns: - prediction (dict): dict or list of dict containing the fields: - e (Tensor) : energy of structures float in eV/atom - f (Tensor) : force on atoms [num_atoms, 3] in eV/A - s (Tensor) : stress of structure [3, 3] in GPa - m (Tensor) : magnetic moments of sites [num_atoms, 3] in Bohr - magneton mu_B - """ - if not isinstance(graph, CrystalGraph | Sequence): - raise TypeError( - f"type(graph)={type(graph)!r} must be CrystalGraph or list of CrystalGraphs" - ) - # next(iter(self.parameters())).place - graphs = [graph] if isinstance(graph, CrystalGraph) else graph - self.eval() - predictions: list[dict[str, paddle.Tensor]] = [{} for _ in range(len(graphs))] - n_steps = math.ceil(len(graphs) / batch_size) - for step in range(n_steps): - prediction = self.forward( - [g for g in graphs[batch_size * step : batch_size * (step + 1)]], - task=task, - return_site_energies=return_site_energies, - return_atom_feas=return_atom_feas, - return_crystal_feas=return_crystal_feas, - ) - for key in { - "e", - "f", - "s", - "m", - "site_energies", - "atom_fea", - "crystal_fea", - } & {*prediction}: - for idx, tensor in enumerate(prediction[key]): - predictions[step * batch_size + idx][key] = ( - tensor.cpu().detach().numpy() - ) - return predictions[0] if len(graphs) == 1 else predictions - - def as_dict(self) -> dict: - """Return the CHGNet weights and args in a dictionary.""" - return {"state_dict": self.state_dict(), "model_args": self.model_args} - - def todict(self) -> dict: - """Needed for ASE JSON serialization when saving CHGNet potential to - trajectory file (https://github.com/CederGroupHub/chgnet/issues/48). - """ - return {"model_name": type(self).__name__, "model_args": self.model_args} - - @classmethod - def from_dict(cls, dct: dict, **kwargs) -> Self: - """Build a CHGNet from a saved dictionary.""" - chgnet = cls(**dct["model_args"], **kwargs) - chgnet.set_state_dict(state_dict=dct["state_dict"]) - return chgnet - - @classmethod - def from_file(cls, path: str, **kwargs) -> Self: - """Build a CHGNet from a saved file.""" - state = paddle.load(path=str(path)) - return cls.from_dict(state["model"], **kwargs) - - @classmethod - def load( - cls, - *, - model_name: str = "0.3.0", - use_device: (str | None) = None, - check_cuda_mem: bool = False, - verbose: bool = True, - ) -> Self: - """Load pretrained CHGNet model. - - Args: - model_name (str, optional): - Default = "0.3.0". - use_device (str, optional): The device to be used for predictions, - either "cpu", "cuda", or "mps". If not specified, the default device is - automatically selected based on the available options. - Default = None - check_cuda_mem (bool): Whether to use cuda with most available memory - Default = False - verbose (bool): whether to print model device information - Default = True - Raises: - ValueError: On unknown model_name. - """ - checkpoint_path = { - "0.3.0": "../pretrained/0.3.0/chgnet_0.3.0_paddle.pdparams", - "0.2.0": "../pretrained/0.2.0/chgnet_0.2.0_e30f77s348m32.pth.tar", - }.get(model_name) - if checkpoint_path is None: - raise ValueError(f"Unknown model_name={model_name!r}") - model = cls.from_file( - os.path.join(module_dir, checkpoint_path), - mlp_out_bias=model_name == "0.2.0", - version=model_name, - ) - device = determine_device(use_device=use_device, check_cuda_mem=check_cuda_mem) - if verbose: - print(f"CHGNet will run on {device}") - return model - - -@dataclass -class BatchedGraph: - """Batched crystal graph for parallel computing. - - Attributes: - atomic_numbers (Tensor): atomic numbers vector - [num_batch_atoms] - bond_bases_ag (Tensor): bond bases vector for atom_graph - [num_batch_bonds_ag, num_radial] - bond_bases_bg (Tensor): bond bases vector for atom_graph - [num_batch_bonds_bg, num_radial] - angle_bases (Tensor): angle bases vector - [num_batch_angles, num_angular] - batched_atom_graph (Tensor) : batched atom graph adjacency list - [num_batch_bonds, 2] - batched_bond_graph (Tensor) : bond graph adjacency list - [num_batch_angles, 3] - atom_owners (Tensor): graph indices for each atom, used aggregate batched - graph back to single graph - [num_batch_atoms] - directed2undirected (Tensor): the utility tensor used to quickly - map directed edges to undirected edges in graph - [num_directed] - atom_positions (list[Tensor]): cartesian coordinates of the atoms - from structures - [[num_atoms_1, 3], [num_atoms_2, 3], ...] - strains (list[Tensor]): a list of strains that's initialized to be zeros - [[3, 3], [3, 3], ...] - volumes (Tensor): the volume of each structure in the batch - [batch_size] - """ - - atomic_numbers: paddle.Tensor - bond_bases_ag: paddle.Tensor - bond_bases_bg: paddle.Tensor - angle_bases: paddle.Tensor - batched_atom_graph: paddle.Tensor - batched_bond_graph: paddle.Tensor - atom_owners: paddle.Tensor - directed2undirected: paddle.Tensor - atom_positions: Sequence[paddle.Tensor] - strains: Sequence[paddle.Tensor] - volumes: Sequence[paddle.Tensor] | paddle.Tensor - - @classmethod - def from_graphs( - cls, - graphs: Sequence[CrystalGraph], - bond_basis_expansion: paddle.nn.Layer, - angle_basis_expansion: paddle.nn.Layer, - *, - compute_stress: bool = False, - ) -> Self: - """Featurize and assemble a list of graphs. - - Args: - graphs (list[Tensor]): a list of CrystalGraphs - bond_basis_expansion (nn.Module): bond basis expansion layer in CHGNet - angle_basis_expansion (nn.Module): angle basis expansion layer in CHGNet - compute_stress (bool): whether to compute stress. Default = False - - Returns: - BatchedGraph: assembled graphs ready for batched CHGNet forward pass - """ - atomic_numbers, atom_positions = [], [] - strains, volumes = [], [] - bond_bases_ag, bond_bases_bg, angle_bases = [], [], [] - batched_atom_graph, batched_bond_graph = [], [] - directed2undirected = [] - atom_owners = [] - atom_offset_idx = n_undirected = 0 - for graph_idx, graph in enumerate(graphs): - n_atom = graph.atomic_number.shape[0] - atomic_numbers.append(graph.atomic_number) - if compute_stress: - strain = paddle.to_tensor( - paddle.zeros([3, 3], dtype="float32"), stop_gradient=False - ) - lattice = paddle.matmul( - graph.lattice, paddle.eye(3, dtype="float32") + strain - ) - else: - strain = None - lattice = graph.lattice - volumes.append( - paddle.dot( - x=lattice[0], y=paddle.cross(x=lattice[1], y=lattice[2], axis=-1) - ) - ) - strains.append(strain) - atom_cart_coords = graph.atom_frac_coord @ lattice - if graph.atom_graph.dim() == 1: - graph.atom_graph = graph.atom_graph.reshape(0, 2) - bond_basis_ag, bond_basis_bg, bond_vectors = bond_basis_expansion( - center=atom_cart_coords[graph.atom_graph[:, 0]], - neighbor=atom_cart_coords[graph.atom_graph[:, 1]], - undirected2directed=graph.undirected2directed, - image=graph.neighbor_image, - lattice=lattice, - ) - atom_positions.append(atom_cart_coords) - bond_bases_ag.append(bond_basis_ag) - bond_bases_bg.append(bond_basis_bg) - batched_atom_graph.append(graph.atom_graph + atom_offset_idx) - directed2undirected.append(graph.directed2undirected + n_undirected) - if len(graph.bond_graph) != 0: - bond_vecs_i = paddle.gather( - x=bond_vectors, axis=0, index=graph.bond_graph[:, 2] - ) - bond_vecs_j = paddle.gather( - x=bond_vectors, axis=0, index=graph.bond_graph[:, 4] - ) - angle_basis = angle_basis_expansion(bond_vecs_i, bond_vecs_j) - angle_bases.append(angle_basis) - bond_graph = paddle.zeros([graph.bond_graph.shape[0], 3]) - # graph.bond_graph.new_zeros() - bond_graph[:, 0] = graph.bond_graph[:, 0] + atom_offset_idx - bond_graph[:, 1] = graph.bond_graph[:, 1] + n_undirected - bond_graph[:, 2] = graph.bond_graph[:, 3] + n_undirected - batched_bond_graph.append(bond_graph) - out_0 = paddle.ones(shape=n_atom) - out_0.stop_gradient = not False - atom_owners.append(out_0 * graph_idx) - atom_offset_idx += n_atom - n_undirected += len(bond_basis_ag) - atomic_numbers = paddle.concat(x=atomic_numbers, axis=0) - bond_bases_ag = paddle.concat(x=bond_bases_ag, axis=0) - bond_bases_bg = paddle.concat(x=bond_bases_bg, axis=0) - angle_bases = ( - paddle.concat(x=angle_bases, axis=0) - if len(angle_bases) != 0 - else paddle.to_tensor(data=[]) - ) - batched_atom_graph = paddle.concat(x=batched_atom_graph, axis=0) - if batched_bond_graph != []: - batched_bond_graph = paddle.concat(x=batched_bond_graph, axis=0) - else: - batched_bond_graph = paddle.to_tensor(data=[]) - atom_owners = paddle.concat(x=atom_owners, axis=0).astype("int32") - directed2undirected = paddle.concat(x=directed2undirected, axis=0) - volumes = paddle.to_tensor( - data=volumes, dtype=DTYPE, place=atomic_numbers.place - ) - return cls( - atomic_numbers=atomic_numbers, - bond_bases_ag=bond_bases_ag, - bond_bases_bg=bond_bases_bg, - angle_bases=angle_bases, - batched_atom_graph=batched_atom_graph, - batched_bond_graph=batched_bond_graph, - atom_owners=atom_owners, - directed2undirected=directed2undirected, - atom_positions=atom_positions, - strains=strains, - volumes=volumes, - ) +from __future__ import annotations + +import math +import os +from collections.abc import Sequence +from dataclasses import dataclass +from typing import TYPE_CHECKING +from typing import Literal + +import paddle +from chgnet.graph import CrystalGraph +from chgnet.graph import CrystalGraphConverter +from chgnet.graph.crystalgraph import DTYPE +from chgnet.model.composition_model import AtomRef +from chgnet.model.encoders import AngleEncoder +from chgnet.model.encoders import AtomEmbedding +from chgnet.model.encoders import BondEncoder +from chgnet.model.functions import MLP +from chgnet.model.functions import GatedMLP +from chgnet.model.functions import find_normalization +from chgnet.model.layers import AngleUpdate +from chgnet.model.layers import AtomConv +from chgnet.model.layers import BondConv +from chgnet.model.layers import GraphAttentionReadOut +from chgnet.model.layers import GraphPooling +from chgnet.utils import determine_device +from pymatgen.core import Structure + +if TYPE_CHECKING: + from chgnet import PredTask + from typing_extensions import Self +module_dir = os.path.dirname(os.path.abspath(__file__)) + + +class CHGNet(paddle.nn.Layer): + """Crystal Hamiltonian Graph neural Network + A model that takes in a crystal graph and output energy, force, magmom, stress. + """ + + def __init__( + self, + *, + atom_fea_dim: int = 64, + bond_fea_dim: int = 64, + angle_fea_dim: int = 64, + composition_model: (str | paddle.nn.Layer) = "MPtrj", + num_radial: int = 31, + num_angular: int = 31, + n_conv: int = 4, + atom_conv_hidden_dim: (Sequence[int] | int) = 64, + update_bond: bool = True, + bond_conv_hidden_dim: (Sequence[int] | int) = 64, + update_angle: bool = True, + angle_layer_hidden_dim: (Sequence[int] | int) = 0, + conv_dropout: float = 0, + read_out: str = "ave", + mlp_hidden_dims: (Sequence[int] | int) = (64, 64, 64), + mlp_dropout: float = 0, + mlp_first: bool = True, + is_intensive: bool = True, + non_linearity: Literal["silu", "relu", "tanh", "gelu"] = "silu", + atom_graph_cutoff: float = 6, + bond_graph_cutoff: float = 3, + graph_converter_algorithm: Literal["legacy", "fast"] = "fast", + cutoff_coeff: int = 8, + learnable_rbf: bool = True, + gMLP_norm: (str | None) = "layer", + readout_norm: (str | None) = "layer", + version: (str | None) = None, + **kwargs, + ) -> None: + """Initialize CHGNet. + + Args: + atom_fea_dim (int): atom feature vector embedding dimension. + Default = 64 + bond_fea_dim (int): bond feature vector embedding dimension. + Default = 64 + angle_fea_dim (int): angle feature vector embedding dimension. + Default = 64 + bond_fea_dim (int): angle feature vector embedding dimension. + Default = 64 + composition_model (nn.Module, optional): attach a composition model to + predict energy or initialize a pretrained linear regression (AtomRef). + The default 'MPtrj' is the atom reference energy linear regression + trained on all Materials Project relaxation trajectories + Default = 'MPtrj' + num_radial (int): number of radial basis used in bond basis expansion. + Default = 9 + num_angular (int): number of angular basis used in angle basis expansion. + Default = 9 + n_conv (int): number of interaction blocks. + Default = 4 + Note: last interaction block contain only an atom_conv layer + atom_conv_hidden_dim (List or int): hidden dimensions of + atom convolution layers. + Default = 64 + update_bond (bool): whether to use bond_conv_layer in bond graph to + update bond embeddings + Default = True. + bond_conv_hidden_dim (List or int): hidden dimensions of + bond convolution layers. + Default = 64 + update_angle (bool): whether to use angle_update_layer to + update angle embeddings. + Default = True + angle_layer_hidden_dim (List or int): hidden dimensions of angle layers. + Default = 0 + conv_dropout (float): dropout rate in all conv_layers. + Default = 0 + read_out (str): method for pooling layer, 'ave' for standard + average pooling, 'attn' for multi-head attention. + Default = "ave" + mlp_hidden_dims (int or list): readout multilayer perceptron + hidden dimensions. + Default = [64, 64] + mlp_dropout (float): dropout rate in readout MLP. + Default = 0. + is_intensive (bool): whether the energy training label is intensive + i.e. energy per atom. + Default = True + non_linearity ('silu' | 'relu' | 'tanh' | 'gelu'): The name of the + activation function to use in the gated MLP. + Default = "silu". + mlp_first (bool): whether to apply mlp first then pooling. + if set to True, then CHGNet is essentially calculating energy for each + atom, them sum them up, this is used for the pretrained model + Default = True + atom_graph_cutoff (float): cutoff radius (A) in creating atom_graph, + this need to be consistent with the value in training dataloader + Default = 5 + bond_graph_cutoff (float): cutoff radius (A) in creating bond_graph, + this need to be consistent with value in training dataloader + Default = 3 + graph_converter_algorithm ('legacy' | 'fast'): algorithm to use + for converting pymatgen.core.Structure to CrystalGraph. + 'legacy': python implementation of graph creation + 'fast': C implementation of graph creation, this is faster, + but will need the cygraph.c file correctly compiled from pip install + default = 'fast' + cutoff_coeff (float): cutoff strength used in graph smooth cutoff function. + the smaller this coeff is, the smoother the basis is + Default = 5 + learnable_rbf (bool): whether to set the frequencies in rbf and Fourier + basis functions learnable. + Default = True + gMLP_norm (str): normalization layer to use in gate-MLP + Default = 'layer' + readout_norm (str): normalization layer to use before readout layer + Default = 'layer' + version (str): Pretrained checkpoint version. + **kwargs: Additional keyword arguments + """ + self.model_args = { + key: val + for key, val in locals().items() + if key not in {"self", "__class__", "kwargs"} + } + self.model_args.update(kwargs) + if version: + self.model_args["version"] = version + super().__init__() + self.atom_fea_dim = atom_fea_dim + self.bond_fea_dim = bond_fea_dim + self.is_intensive = is_intensive + self.n_conv = n_conv + if isinstance(composition_model, paddle.nn.Layer): + self.composition_model = composition_model + elif isinstance(composition_model, str): + self.composition_model = AtomRef(is_intensive=is_intensive) + # import pdb + # pdb.set_trace() + self.composition_model.initialize_from(composition_model) + else: + self.composition_model = None + if self.composition_model is not None: + for param in self.composition_model.parameters(): + param.stop_gradient = not False + self.graph_converter = CrystalGraphConverter( + atom_graph_cutoff=atom_graph_cutoff, + bond_graph_cutoff=bond_graph_cutoff, + algorithm=graph_converter_algorithm, + verbose=kwargs.pop("converter_verbose", False), + ) + self.atom_embedding = AtomEmbedding(atom_feature_dim=atom_fea_dim) + self.bond_basis_expansion = BondEncoder( + atom_graph_cutoff=atom_graph_cutoff, + bond_graph_cutoff=bond_graph_cutoff, + num_radial=num_radial, + cutoff_coeff=cutoff_coeff, + learnable=learnable_rbf, + ) + self.bond_embedding = paddle.nn.Linear( + in_features=num_radial, out_features=bond_fea_dim, bias_attr=False + ) + self.bond_weights_ag = paddle.nn.Linear( + in_features=num_radial, out_features=atom_fea_dim, bias_attr=False + ) + self.bond_weights_bg = paddle.nn.Linear( + in_features=num_radial, out_features=bond_fea_dim, bias_attr=False + ) + self.angle_basis_expansion = AngleEncoder( + num_angular=num_angular, learnable=learnable_rbf + ) + self.angle_embedding = paddle.nn.Linear( + in_features=num_angular, out_features=angle_fea_dim, bias_attr=False + ) + conv_norm = kwargs.pop("conv_norm", None) + mlp_out_bias = kwargs.pop("mlp_out_bias", False) + atom_graph_layers = [ + AtomConv( + atom_fea_dim=atom_fea_dim, + bond_fea_dim=bond_fea_dim, + hidden_dim=atom_conv_hidden_dim, + dropout=conv_dropout, + activation=non_linearity, + norm=conv_norm, + gMLP_norm=gMLP_norm, + use_mlp_out=True, + mlp_out_bias=mlp_out_bias, + resnet=True, + ) + for _ in range(n_conv) + ] + self.atom_conv_layers = paddle.nn.LayerList(sublayers=atom_graph_layers) + if update_bond: + bond_graph_layers = [ + BondConv( + atom_fea_dim=atom_fea_dim, + bond_fea_dim=bond_fea_dim, + angle_fea_dim=angle_fea_dim, + hidden_dim=bond_conv_hidden_dim, + dropout=conv_dropout, + activation=non_linearity, + norm=conv_norm, + gMLP_norm=gMLP_norm, + use_mlp_out=True, + mlp_out_bias=mlp_out_bias, + resnet=True, + ) + for _ in range(n_conv - 1) + ] + self.bond_conv_layers = paddle.nn.LayerList(sublayers=bond_graph_layers) + else: + self.bond_conv_layers = [None for _ in range(n_conv - 1)] + if update_angle: + angle_layers = [ + AngleUpdate( + atom_fea_dim=atom_fea_dim, + bond_fea_dim=bond_fea_dim, + angle_fea_dim=angle_fea_dim, + hidden_dim=angle_layer_hidden_dim, + dropout=conv_dropout, + activation=non_linearity, + norm=conv_norm, + gMLP_norm=gMLP_norm, + resnet=True, + ) + for _ in range(n_conv - 1) + ] + self.angle_layers = paddle.nn.LayerList(sublayers=angle_layers) + else: + self.angle_layers = [None for _ in range(n_conv - 1)] + self.site_wise = paddle.nn.Linear(in_features=atom_fea_dim, out_features=1) + self.readout_norm = find_normalization(readout_norm, dim=atom_fea_dim) + self.mlp_first = mlp_first + if mlp_first: + self.read_out_type = "sum" + input_dim = atom_fea_dim + self.pooling = GraphPooling(average=False) + elif read_out in {"attn", "weighted"}: + self.read_out_type = "attn" + num_heads = kwargs.pop("num_heads", 3) + self.pooling = GraphAttentionReadOut( + atom_fea_dim, num_head=num_heads, average=True + ) + input_dim = atom_fea_dim * num_heads + else: + self.read_out_type = "ave" + input_dim = atom_fea_dim + self.pooling = GraphPooling(average=True) + if kwargs.pop("final_mlp", "MLP") in {"normal", "MLP"}: + self.mlp = MLP( + input_dim=input_dim, + hidden_dim=mlp_hidden_dims, + output_dim=1, + dropout=mlp_dropout, + activation=non_linearity, + ) + else: + self.mlp = paddle.nn.Sequential( + GatedMLP( + input_dim=input_dim, + hidden_dim=mlp_hidden_dims, + output_dim=mlp_hidden_dims[-1], + dropout=mlp_dropout, + norm=gMLP_norm, + activation=non_linearity, + ), + paddle.nn.Linear(in_features=mlp_hidden_dims[-1], out_features=1), + ) + version_str = f" v{version}" if version else "" + print(f"CHGNet{version_str} initialized with {self.n_params:,} parameters") + + @property + def version(self) -> (str | None): + """Return the version of the loaded checkpoint.""" + return self.model_args.get("version") + + @property + def n_params(self) -> int: + """Return the number of parameters in the model.""" + return sum(p.size for p in self.parameters()) + + def forward( + self, + graphs: Sequence[CrystalGraph], + *, + task: PredTask = "e", + return_site_energies: bool = False, + return_atom_feas: bool = False, + return_crystal_feas: bool = False, + ) -> dict[str, paddle.Tensor]: + """Get prediction associated with input graphs + Args: + graphs (List): a list of CrystalGraphs + task (str): the prediction task. One of 'e', 'em', 'ef', 'efs', 'efsm'. + Default = 'e' + return_site_energies (bool): whether to return per-site energies, + only available if self.mlp_first == True + Default = False + return_atom_feas (bool): whether to return the atom features before last + conv layer. + Default = False + return_crystal_feas (bool): whether to return crystal feature. + Default = False + Returns: + model output (dict). + """ + comp_energy = ( + 0 if self.composition_model is None else self.composition_model(graphs) + ) + batched_graph = BatchedGraph.from_graphs( + graphs, + bond_basis_expansion=self.bond_basis_expansion, + angle_basis_expansion=self.angle_basis_expansion, + compute_stress="s" in task, + ) + prediction = self._compute( + batched_graph, + compute_force="f" in task, + compute_stress="s" in task, + compute_magmom="m" in task, + return_site_energies=return_site_energies, + return_atom_feas=return_atom_feas, + return_crystal_feas=return_crystal_feas, + ) + prediction["e"] += comp_energy + if return_site_energies and self.composition_model is not None: + site_energy_shifts = self.composition_model.get_site_energies(graphs) + prediction["site_energies"] = [ + (i + j) + for i, j in zip( + prediction["site_energies"], site_energy_shifts, strict=True + ) + ] + return prediction + + def _compute( + self, + g: BatchedGraph, + *, + compute_force: bool = False, + compute_stress: bool = False, + compute_magmom: bool = False, + return_site_energies: bool = False, + return_atom_feas: bool = False, + return_crystal_feas: bool = False, + ) -> dict: + """Get Energy, Force, Stress, Magmom associated with input graphs + force = - d(Energy)/d(atom_positions) + stress = 1/V * d(Energy)/d(strain). + + Args: + g (BatchedGraph): batched graph + compute_force (bool): whether to compute force. + Default = False + compute_stress (bool): whether to compute stress. + Default = False + compute_magmom (bool): whether to compute magmom. + Default = False + return_site_energies (bool): whether to return per-site energies, + only available if self.mlp_first == True + Default = False + return_atom_feas (bool): whether to return atom features. + Default = False + return_crystal_feas (bool): whether to return crystal features. + Default = False + + Returns: + prediction (dict): containing the fields: + e (Tensor) : energy of structures [batch_size, 1] + f (Tensor) : force on atoms [num_batch_atoms, 3] + s (Tensor) : stress of structure [3 * batch_size, 3] + m (Tensor) : magnetic moments of sites [num_batch_atoms, 3] + """ + prediction = {} + atoms_per_graph = paddle.bincount(x=g.atom_owners) + prediction["atoms_per_graph"] = atoms_per_graph + atom_feas = self.atom_embedding(g.atomic_numbers - 1) + bond_feas = self.bond_embedding(g.bond_bases_ag) + bond_weights_ag = self.bond_weights_ag(g.bond_bases_ag) + bond_weights_bg = self.bond_weights_bg(g.bond_bases_bg) + if len(g.angle_bases) != 0: + angle_feas = self.angle_embedding(g.angle_bases) + for idx, (atom_layer, bond_layer, angle_layer) in enumerate( + zip( + self.atom_conv_layers[:-1], + self.bond_conv_layers, + self.angle_layers, + strict=False, + ) + ): + atom_feas = atom_layer( + atom_feas=atom_feas, + bond_feas=bond_feas, + bond_weights=bond_weights_ag, + atom_graph=g.batched_atom_graph, + directed2undirected=g.directed2undirected, + ) + if len(g.angle_bases) != 0 and bond_layer is not None: + bond_feas = bond_layer( + atom_feas=atom_feas, + bond_feas=bond_feas, + bond_weights=bond_weights_bg, + angle_feas=angle_feas, + bond_graph=g.batched_bond_graph, + ) + if angle_layer is not None: + angle_feas = angle_layer( + atom_feas=atom_feas, + bond_feas=bond_feas, + angle_feas=angle_feas, + bond_graph=g.batched_bond_graph, + ) + if idx == self.n_conv - 2: + if return_atom_feas: + prediction["atom_fea"] = paddle.split( + x=atom_feas, num_or_sections=atoms_per_graph.tolist() + ) + if compute_magmom: + magmom = paddle.abs(x=self.site_wise(atom_feas)) + prediction["m"] = list( + paddle.split( + x=magmom.reshape([-1]), + num_or_sections=atoms_per_graph.tolist(), + ) + ) + atom_feas = self.atom_conv_layers[-1]( + atom_feas=atom_feas, + bond_feas=bond_feas, + bond_weights=bond_weights_ag, + atom_graph=g.batched_atom_graph, + directed2undirected=g.directed2undirected, + ) + if self.readout_norm is not None: + atom_feas = self.readout_norm(atom_feas) + if self.mlp_first: + energies = self.mlp(atom_feas) + energy = self.pooling(energies, g.atom_owners).reshape([-1]) + if return_site_energies: + prediction["site_energies"] = paddle.split( + x=energies.squeeze(axis=1), num_or_sections=atoms_per_graph.tolist() + ) + if return_crystal_feas: + prediction["crystal_fea"] = self.pooling(atom_feas, g.atom_owners) + else: + crystal_feas = self.pooling(atom_feas, g.atom_owners) + energy = self.mlp(crystal_feas).reshape([-1]) * atoms_per_graph + if return_crystal_feas: + prediction["crystal_fea"] = crystal_feas + + if compute_force: + force = paddle.grad( + outputs=energy.sum(), + inputs=g.atom_positions, + create_graph=False, + retain_graph=True, + ) + prediction["f"] = [(-1 * force_dim) for force_dim in force] + if compute_stress: + stress = paddle.grad( + outputs=energy.sum(), + inputs=g.strains, + create_graph=False, + retain_graph=True, + ) + scale = 1 / g.volumes * 160.21766208 + stress = [(i * j) for i, j in zip(stress, scale, strict=False)] + prediction["s"] = stress + + if self.is_intensive: + energy /= atoms_per_graph.cast("float32") + prediction["e"] = energy + return prediction + + def predict_structure( + self, + structure: (Structure | Sequence[Structure]), + *, + task: PredTask = "efsm", + return_site_energies: bool = False, + return_atom_feas: bool = False, + return_crystal_feas: bool = False, + batch_size: int = 16, + ) -> (dict[str, paddle.Tensor] | list[dict[str, paddle.Tensor]]): + """Predict from pymatgen.core.Structure. + + Args: + structure (Structure | Sequence[Structure]): structure or a list of + structures to predict. + task (str): can be 'e' 'ef', 'em', 'efs', 'efsm' + Default = "efsm" + return_site_energies (bool): whether to return per-site energies. + Default = False + return_atom_feas (bool): whether to return atom features. + Default = False + return_crystal_feas (bool): whether to return crystal features. + Default = False + batch_size (int): batch_size for predict structures. + Default = 16 + + Returns: + prediction (dict): dict or list of dict containing the fields: + e (Tensor) : energy of structures float in eV/atom + f (Tensor) : force on atoms [num_atoms, 3] in eV/A + s (Tensor) : stress of structure [3, 3] in GPa + m (Tensor) : magnetic moments of sites [num_atoms, 3] in Bohr + magneton mu_B + """ + if self.graph_converter is None: + raise ValueError("graph_converter cannot be None!") + structures = [structure] if isinstance(structure, Structure) else structure + graphs = [self.graph_converter(struct) for struct in structures] + return self.predict_graph( + graphs, + task=task, + return_site_energies=return_site_energies, + return_atom_feas=return_atom_feas, + return_crystal_feas=return_crystal_feas, + batch_size=batch_size, + ) + + def predict_graph( + self, + graph: (CrystalGraph | Sequence[CrystalGraph]), + *, + task: PredTask = "efsm", + return_site_energies: bool = False, + return_atom_feas: bool = False, + return_crystal_feas: bool = False, + batch_size: int = 16, + ) -> (dict[str, paddle.Tensor] | list[dict[str, paddle.Tensor]]): + """Predict from CrustalGraph. + + Args: + graph (CrystalGraph | Sequence[CrystalGraph]): CrystalGraph(s) to predict. + task (str): can be 'e' 'ef', 'em', 'efs', 'efsm' + Default = "efsm" + return_site_energies (bool): whether to return per-site energies. + Default = False + return_atom_feas (bool): whether to return atom features. + Default = False + return_crystal_feas (bool): whether to return crystal features. + Default = False + batch_size (int): batch_size for predict structures. + Default = 16 + + Returns: + prediction (dict): dict or list of dict containing the fields: + e (Tensor) : energy of structures float in eV/atom + f (Tensor) : force on atoms [num_atoms, 3] in eV/A + s (Tensor) : stress of structure [3, 3] in GPa + m (Tensor) : magnetic moments of sites [num_atoms, 3] in Bohr + magneton mu_B + """ + if not isinstance(graph, CrystalGraph | Sequence): + raise TypeError( + f"type(graph)={type(graph)!r} must be CrystalGraph or list of CrystalGraphs" + ) + # next(iter(self.parameters())).place + graphs = [graph] if isinstance(graph, CrystalGraph) else graph + self.eval() + predictions: list[dict[str, paddle.Tensor]] = [{} for _ in range(len(graphs))] + n_steps = math.ceil(len(graphs) / batch_size) + for step in range(n_steps): + prediction = self.forward( + [g for g in graphs[batch_size * step : batch_size * (step + 1)]], + task=task, + return_site_energies=return_site_energies, + return_atom_feas=return_atom_feas, + return_crystal_feas=return_crystal_feas, + ) + for key in { + "e", + "f", + "s", + "m", + "site_energies", + "atom_fea", + "crystal_fea", + } & {*prediction}: + for idx, tensor in enumerate(prediction[key]): + predictions[step * batch_size + idx][key] = ( + tensor.cpu().detach().numpy() + ) + return predictions[0] if len(graphs) == 1 else predictions + + def as_dict(self) -> dict: + """Return the CHGNet weights and args in a dictionary.""" + return {"state_dict": self.state_dict(), "model_args": self.model_args} + + def todict(self) -> dict: + """Needed for ASE JSON serialization when saving CHGNet potential to + trajectory file (https://github.com/CederGroupHub/chgnet/issues/48). + """ + return {"model_name": type(self).__name__, "model_args": self.model_args} + + @classmethod + def from_dict(cls, dct: dict, **kwargs) -> Self: + """Build a CHGNet from a saved dictionary.""" + chgnet = cls(**dct["model_args"], **kwargs) + chgnet.set_state_dict(state_dict=dct["state_dict"]) + return chgnet + + @classmethod + def from_file(cls, path: str, **kwargs) -> Self: + """Build a CHGNet from a saved file.""" + state = paddle.load(path=str(path)) + return cls.from_dict(state["model"], **kwargs) + + @classmethod + def load( + cls, + *, + model_name: str = "0.3.0", + use_device: (str | None) = None, + check_cuda_mem: bool = False, + verbose: bool = True, + ) -> Self: + """Load pretrained CHGNet model. + + Args: + model_name (str, optional): + Default = "0.3.0". + use_device (str, optional): The device to be used for predictions, + either "cpu", "cuda", or "mps". If not specified, the default device is + automatically selected based on the available options. + Default = None + check_cuda_mem (bool): Whether to use cuda with most available memory + Default = False + verbose (bool): whether to print model device information + Default = True + Raises: + ValueError: On unknown model_name. + """ + checkpoint_path = { + "0.3.0": "../pretrained/0.3.0/chgnet_0.3.0_paddle.pdparams", + "0.2.0": "../pretrained/0.2.0/chgnet_0.2.0_e30f77s348m32.pth.tar", + }.get(model_name) + if checkpoint_path is None: + raise ValueError(f"Unknown model_name={model_name!r}") + model = cls.from_file( + os.path.join(module_dir, checkpoint_path), + mlp_out_bias=model_name == "0.2.0", + version=model_name, + ) + device = determine_device(use_device=use_device, check_cuda_mem=check_cuda_mem) + if verbose: + print(f"CHGNet will run on {device}") + return model + + +@dataclass +class BatchedGraph: + """Batched crystal graph for parallel computing. + + Attributes: + atomic_numbers (Tensor): atomic numbers vector + [num_batch_atoms] + bond_bases_ag (Tensor): bond bases vector for atom_graph + [num_batch_bonds_ag, num_radial] + bond_bases_bg (Tensor): bond bases vector for atom_graph + [num_batch_bonds_bg, num_radial] + angle_bases (Tensor): angle bases vector + [num_batch_angles, num_angular] + batched_atom_graph (Tensor) : batched atom graph adjacency list + [num_batch_bonds, 2] + batched_bond_graph (Tensor) : bond graph adjacency list + [num_batch_angles, 3] + atom_owners (Tensor): graph indices for each atom, used aggregate batched + graph back to single graph + [num_batch_atoms] + directed2undirected (Tensor): the utility tensor used to quickly + map directed edges to undirected edges in graph + [num_directed] + atom_positions (list[Tensor]): cartesian coordinates of the atoms + from structures + [[num_atoms_1, 3], [num_atoms_2, 3], ...] + strains (list[Tensor]): a list of strains that's initialized to be zeros + [[3, 3], [3, 3], ...] + volumes (Tensor): the volume of each structure in the batch + [batch_size] + """ + + atomic_numbers: paddle.Tensor + bond_bases_ag: paddle.Tensor + bond_bases_bg: paddle.Tensor + angle_bases: paddle.Tensor + batched_atom_graph: paddle.Tensor + batched_bond_graph: paddle.Tensor + atom_owners: paddle.Tensor + directed2undirected: paddle.Tensor + atom_positions: Sequence[paddle.Tensor] + strains: Sequence[paddle.Tensor] + volumes: Sequence[paddle.Tensor] | paddle.Tensor + + @classmethod + def from_graphs( + cls, + graphs: Sequence[CrystalGraph], + bond_basis_expansion: paddle.nn.Layer, + angle_basis_expansion: paddle.nn.Layer, + *, + compute_stress: bool = False, + ) -> Self: + """Featurize and assemble a list of graphs. + + Args: + graphs (list[Tensor]): a list of CrystalGraphs + bond_basis_expansion (nn.Module): bond basis expansion layer in CHGNet + angle_basis_expansion (nn.Module): angle basis expansion layer in CHGNet + compute_stress (bool): whether to compute stress. Default = False + + Returns: + BatchedGraph: assembled graphs ready for batched CHGNet forward pass + """ + atomic_numbers, atom_positions = [], [] + strains, volumes = [], [] + bond_bases_ag, bond_bases_bg, angle_bases = [], [], [] + batched_atom_graph, batched_bond_graph = [], [] + directed2undirected = [] + atom_owners = [] + atom_offset_idx = n_undirected = 0 + for graph_idx, graph in enumerate(graphs): + n_atom = graph.atomic_number.shape[0] + atomic_numbers.append(graph.atomic_number) + if compute_stress: + strain = paddle.to_tensor( + paddle.zeros([3, 3], dtype="float32"), stop_gradient=False + ) + lattice = paddle.matmul( + graph.lattice, paddle.eye(3, dtype="float32") + strain + ) + else: + strain = None + lattice = graph.lattice + volumes.append( + paddle.dot( + x=lattice[0], y=paddle.cross(x=lattice[1], y=lattice[2], axis=-1) + ) + ) + strains.append(strain) + atom_cart_coords = graph.atom_frac_coord @ lattice + if graph.atom_graph.dim() == 1: + graph.atom_graph = graph.atom_graph.reshape(0, 2) + bond_basis_ag, bond_basis_bg, bond_vectors = bond_basis_expansion( + center=atom_cart_coords[graph.atom_graph[:, 0]], + neighbor=atom_cart_coords[graph.atom_graph[:, 1]], + undirected2directed=graph.undirected2directed, + image=graph.neighbor_image, + lattice=lattice, + ) + atom_positions.append(atom_cart_coords) + bond_bases_ag.append(bond_basis_ag) + bond_bases_bg.append(bond_basis_bg) + batched_atom_graph.append(graph.atom_graph + atom_offset_idx) + directed2undirected.append(graph.directed2undirected + n_undirected) + if len(graph.bond_graph) != 0: + bond_vecs_i = paddle.gather( + x=bond_vectors, axis=0, index=graph.bond_graph[:, 2] + ) + bond_vecs_j = paddle.gather( + x=bond_vectors, axis=0, index=graph.bond_graph[:, 4] + ) + angle_basis = angle_basis_expansion(bond_vecs_i, bond_vecs_j) + angle_bases.append(angle_basis) + bond_graph = paddle.zeros([graph.bond_graph.shape[0], 3]) + # graph.bond_graph.new_zeros() + bond_graph[:, 0] = graph.bond_graph[:, 0] + atom_offset_idx + bond_graph[:, 1] = graph.bond_graph[:, 1] + n_undirected + bond_graph[:, 2] = graph.bond_graph[:, 3] + n_undirected + batched_bond_graph.append(bond_graph) + out_0 = paddle.ones(shape=n_atom) + out_0.stop_gradient = not False + atom_owners.append(out_0 * graph_idx) + atom_offset_idx += n_atom + n_undirected += len(bond_basis_ag) + atomic_numbers = paddle.concat(x=atomic_numbers, axis=0) + bond_bases_ag = paddle.concat(x=bond_bases_ag, axis=0) + bond_bases_bg = paddle.concat(x=bond_bases_bg, axis=0) + angle_bases = ( + paddle.concat(x=angle_bases, axis=0) + if len(angle_bases) != 0 + else paddle.to_tensor(data=[]) + ) + batched_atom_graph = paddle.concat(x=batched_atom_graph, axis=0) + if batched_bond_graph != []: + batched_bond_graph = paddle.concat(x=batched_bond_graph, axis=0) + else: + batched_bond_graph = paddle.to_tensor(data=[]) + atom_owners = paddle.concat(x=atom_owners, axis=0).astype("int32") + directed2undirected = paddle.concat(x=directed2undirected, axis=0) + volumes = paddle.to_tensor( + data=volumes, dtype=DTYPE, place=atomic_numbers.place + ) + return cls( + atomic_numbers=atomic_numbers, + bond_bases_ag=bond_bases_ag, + bond_bases_bg=bond_bases_bg, + angle_bases=angle_bases, + batched_atom_graph=batched_atom_graph, + batched_bond_graph=batched_bond_graph, + atom_owners=atom_owners, + directed2undirected=directed2undirected, + atom_positions=atom_positions, + strains=strains, + volumes=volumes, + ) diff --git a/jointContribution/CHGNet/chgnet/mp-18767-LiMnO2.cif b/jointContribution/CHGNet/chgnet/mp-18767-LiMnO2.cif index 7e04fa343a..234c620be5 100644 --- a/jointContribution/CHGNet/chgnet/mp-18767-LiMnO2.cif +++ b/jointContribution/CHGNet/chgnet/mp-18767-LiMnO2.cif @@ -1,40 +1,40 @@ -# generated using pymatgen -data_LiMnO2 -_symmetry_space_group_name_H-M 'P 1' -_cell_length_a 2.86877900 -_cell_length_b 4.63447500 -_cell_length_c 5.83250700 -_cell_angle_alpha 90.00000000 -_cell_angle_beta 90.00000000 -_cell_angle_gamma 90.00000000 -_symmetry_Int_Tables_number 1 -_chemical_formula_structural LiMnO2 -_chemical_formula_sum 'Li2 Mn2 O4' -_cell_volume 77.54484024 -_cell_formula_units_Z 2 -loop_ - _symmetry_equiv_pos_site_id - _symmetry_equiv_pos_as_xyz - 1 'x, y, z' -loop_ - _atom_type_symbol - _atom_type_oxidation_number - Li+ 1.0 - Mn3+ 3.0 - O2- -2.0 -loop_ - _atom_site_type_symbol - _atom_site_label - _atom_site_symmetry_multiplicity - _atom_site_fract_x - _atom_site_fract_y - _atom_site_fract_z - _atom_site_occupancy - Li+ Li0 1 0.50000000 0.50000000 0.37975050 1 - Li+ Li1 1 0.00000000 0.00000000 0.62024950 1 - Mn3+ Mn2 1 0.50000000 0.50000000 0.86325250 1 - Mn3+ Mn3 1 0.00000000 0.00000000 0.13674750 1 - O2- O4 1 0.50000000 0.00000000 0.36082450 1 - O2- O5 1 0.00000000 0.50000000 0.09851350 1 - O2- O6 1 0.50000000 0.00000000 0.90148650 1 - O2- O7 1 0.00000000 0.50000000 0.63917550 1 +# generated using pymatgen +data_LiMnO2 +_symmetry_space_group_name_H-M 'P 1' +_cell_length_a 2.86877900 +_cell_length_b 4.63447500 +_cell_length_c 5.83250700 +_cell_angle_alpha 90.00000000 +_cell_angle_beta 90.00000000 +_cell_angle_gamma 90.00000000 +_symmetry_Int_Tables_number 1 +_chemical_formula_structural LiMnO2 +_chemical_formula_sum 'Li2 Mn2 O4' +_cell_volume 77.54484024 +_cell_formula_units_Z 2 +loop_ + _symmetry_equiv_pos_site_id + _symmetry_equiv_pos_as_xyz + 1 'x, y, z' +loop_ + _atom_type_symbol + _atom_type_oxidation_number + Li+ 1.0 + Mn3+ 3.0 + O2- -2.0 +loop_ + _atom_site_type_symbol + _atom_site_label + _atom_site_symmetry_multiplicity + _atom_site_fract_x + _atom_site_fract_y + _atom_site_fract_z + _atom_site_occupancy + Li+ Li0 1 0.50000000 0.50000000 0.37975050 1 + Li+ Li1 1 0.00000000 0.00000000 0.62024950 1 + Mn3+ Mn2 1 0.50000000 0.50000000 0.86325250 1 + Mn3+ Mn3 1 0.00000000 0.00000000 0.13674750 1 + O2- O4 1 0.50000000 0.00000000 0.36082450 1 + O2- O5 1 0.00000000 0.50000000 0.09851350 1 + O2- O6 1 0.50000000 0.00000000 0.90148650 1 + O2- O7 1 0.00000000 0.50000000 0.63917550 1 diff --git a/jointContribution/CHGNet/chgnet/utils/__init__.py b/jointContribution/CHGNet/chgnet/utils/__init__.py index f6c1da468c..76f5a9f188 100644 --- a/jointContribution/CHGNet/chgnet/utils/__init__.py +++ b/jointContribution/CHGNet/chgnet/utils/__init__.py @@ -1,11 +1,11 @@ -from __future__ import annotations - -from chgnet.utils.common_utils import AverageMeter # noqa -from chgnet.utils.common_utils import cuda_devices_sorted_by_free_mem # noqa -from chgnet.utils.common_utils import determine_device # noqa -from chgnet.utils.common_utils import mae # noqa -from chgnet.utils.common_utils import mkdir # noqa -from chgnet.utils.common_utils import read_json # noqa -from chgnet.utils.common_utils import write_json # noqa -from chgnet.utils.vasp_utils import parse_vasp_dir # noqa -from chgnet.utils.vasp_utils import solve_charge_by_mag # noqa +from __future__ import annotations + +from chgnet.utils.common_utils import AverageMeter # noqa +from chgnet.utils.common_utils import cuda_devices_sorted_by_free_mem # noqa +from chgnet.utils.common_utils import determine_device # noqa +from chgnet.utils.common_utils import mae # noqa +from chgnet.utils.common_utils import mkdir # noqa +from chgnet.utils.common_utils import read_json # noqa +from chgnet.utils.common_utils import write_json # noqa +from chgnet.utils.vasp_utils import parse_vasp_dir # noqa +from chgnet.utils.vasp_utils import solve_charge_by_mag # noqa diff --git a/jointContribution/CHGNet/chgnet/utils/common_utils.py b/jointContribution/CHGNet/chgnet/utils/common_utils.py index 109f3fa9c3..bbed91568d 100644 --- a/jointContribution/CHGNet/chgnet/utils/common_utils.py +++ b/jointContribution/CHGNet/chgnet/utils/common_utils.py @@ -1,143 +1,143 @@ -from __future__ import annotations - -import json -import os - -import numpy as np -import nvidia_smi -import paddle - - -def determine_device( - use_device: (str | None) = None, *, check_cuda_mem: bool = False -) -> str: - """Determine the device to use for model. - - Args: - use_device (str): User specify device name - check_cuda_mem (bool): Whether to return cuda with most available memory - Default = False - - Returns: - device (str): device name to be passed to model.to(device) - """ - use_device = use_device or os.getenv("CHGNET_DEVICE") - if use_device in {"gpu", None} or paddle.is_compiled_with_cuda(): - device = "gpu" - else: - device = "cpu" - return device - - -def cuda_devices_sorted_by_free_mem() -> list[int]: - """List available CUDA devices sorted by increasing available memory. - - To get the device with the most free memory, use the last list item. - - Returns: - list[int]: CUDA device numbers sorted by increasing free memory. - """ - if not paddle.device.cuda.device_count() >= 1: - return [] - free_memories = [] - nvidia_smi.nvmlInit() - device_count = nvidia_smi.nvmlDeviceGetCount() - for idx in range(device_count): - handle = nvidia_smi.nvmlDeviceGetHandleByIndex(idx) - info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle) - free_memories.append(info.free) - nvidia_smi.nvmlShutdown() - return sorted(range(len(free_memories)), key=lambda x: free_memories[x]) - - -class AverageMeter: - """Computes and stores the average and current value.""" - - def __init__(self) -> None: - """Initialize the meter.""" - self.reset() - - def reset(self) -> None: - """Reset the meter value, average, sum and count to 0.""" - self.val = self.avg = self.sum = self.count = 0.0 - - def update(self, val: float, n: int = 1) -> None: - """Update the meter value, average, sum and count. - - Args: - val (float): New value to be added to the running average. - n (int, optional): Number of times the value is added. Default = 1. - """ - self.val = val - self.sum += val * n - self.count += n - if self.count != 0: - self.avg = self.sum / self.count - - -def mae(prediction: paddle.Tensor, target: paddle.Tensor) -> paddle.Tensor: - """Computes the mean absolute error between prediction and target. - - Args: - prediction: Tensor (N, 1) - target: Tensor (N, 1). - - Returns: - tensor - """ - return paddle.mean(x=paddle.abs(x=target - prediction)) - - -def read_json(filepath: str) -> dict: - """Read the JSON file. - - Args: - filepath (str): file name of JSON to read. - - Returns: - dict: data stored in filepath - """ - with open(filepath) as file: - return json.load(file) - - -def write_json(dct: dict, filepath: str) -> dict: - """Write the JSON file. - - Args: - dct (dict): dictionary to write - filepath (str): file name of JSON to write. - """ - - def handler(obj: object) -> (int | object): - """Convert numpy int64 to int. - - Fixes TypeError: Object of type int64 is not JSON serializable - reported in https://github.com/CederGroupHub/chgnet/issues/168. - - Returns: - int | object: object for serialization - """ - if isinstance(obj, np.integer): - return int(obj) - return obj - - with open(filepath, mode="w") as file: - json.dump(dct, file, default=handler) - - -def mkdir(path: str) -> str: - """Make directory. - - Args: - path (str): directory name - - Returns: - path - """ - folder = os.path.exists(path) - if not folder: - os.makedirs(path) - else: - print("Folder exists") - return path +from __future__ import annotations + +import json +import os + +import numpy as np +import nvidia_smi +import paddle + + +def determine_device( + use_device: (str | None) = None, *, check_cuda_mem: bool = False +) -> str: + """Determine the device to use for model. + + Args: + use_device (str): User specify device name + check_cuda_mem (bool): Whether to return cuda with most available memory + Default = False + + Returns: + device (str): device name to be passed to model.to(device) + """ + use_device = use_device or os.getenv("CHGNET_DEVICE") + if use_device in {"gpu", None} or paddle.is_compiled_with_cuda(): + device = "gpu" + else: + device = "cpu" + return device + + +def cuda_devices_sorted_by_free_mem() -> list[int]: + """List available CUDA devices sorted by increasing available memory. + + To get the device with the most free memory, use the last list item. + + Returns: + list[int]: CUDA device numbers sorted by increasing free memory. + """ + if not paddle.device.cuda.device_count() >= 1: + return [] + free_memories = [] + nvidia_smi.nvmlInit() + device_count = nvidia_smi.nvmlDeviceGetCount() + for idx in range(device_count): + handle = nvidia_smi.nvmlDeviceGetHandleByIndex(idx) + info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle) + free_memories.append(info.free) + nvidia_smi.nvmlShutdown() + return sorted(range(len(free_memories)), key=lambda x: free_memories[x]) + + +class AverageMeter: + """Computes and stores the average and current value.""" + + def __init__(self) -> None: + """Initialize the meter.""" + self.reset() + + def reset(self) -> None: + """Reset the meter value, average, sum and count to 0.""" + self.val = self.avg = self.sum = self.count = 0.0 + + def update(self, val: float, n: int = 1) -> None: + """Update the meter value, average, sum and count. + + Args: + val (float): New value to be added to the running average. + n (int, optional): Number of times the value is added. Default = 1. + """ + self.val = val + self.sum += val * n + self.count += n + if self.count != 0: + self.avg = self.sum / self.count + + +def mae(prediction: paddle.Tensor, target: paddle.Tensor) -> paddle.Tensor: + """Computes the mean absolute error between prediction and target. + + Args: + prediction: Tensor (N, 1) + target: Tensor (N, 1). + + Returns: + tensor + """ + return paddle.mean(x=paddle.abs(x=target - prediction)) + + +def read_json(filepath: str) -> dict: + """Read the JSON file. + + Args: + filepath (str): file name of JSON to read. + + Returns: + dict: data stored in filepath + """ + with open(filepath) as file: + return json.load(file) + + +def write_json(dct: dict, filepath: str) -> dict: + """Write the JSON file. + + Args: + dct (dict): dictionary to write + filepath (str): file name of JSON to write. + """ + + def handler(obj: object) -> (int | object): + """Convert numpy int64 to int. + + Fixes TypeError: Object of type int64 is not JSON serializable + reported in https://github.com/CederGroupHub/chgnet/issues/168. + + Returns: + int | object: object for serialization + """ + if isinstance(obj, np.integer): + return int(obj) + return obj + + with open(filepath, mode="w") as file: + json.dump(dct, file, default=handler) + + +def mkdir(path: str) -> str: + """Make directory. + + Args: + path (str): directory name + + Returns: + path + """ + folder = os.path.exists(path) + if not folder: + os.makedirs(path) + else: + print("Folder exists") + return path diff --git a/jointContribution/CHGNet/chgnet/utils/vasp_utils.py b/jointContribution/CHGNet/chgnet/utils/vasp_utils.py index e7aebdc836..ebd2cf201e 100644 --- a/jointContribution/CHGNet/chgnet/utils/vasp_utils.py +++ b/jointContribution/CHGNet/chgnet/utils/vasp_utils.py @@ -1,195 +1,195 @@ -from __future__ import annotations - -import os -import re -import warnings -from typing import TYPE_CHECKING - -from chgnet.utils import write_json -from monty.io import zopen -from monty.os.path import zpath -from pymatgen.io.vasp.outputs import Oszicar -from pymatgen.io.vasp.outputs import Vasprun - -if TYPE_CHECKING: - from pymatgen.core import Structure - - -def parse_vasp_dir( - base_dir: str, - *, - check_electronic_convergence: bool = True, - save_path: (str | None) = None, -) -> dict[str, list]: - """Parse VASP output files into structures and labels - By default, the magnetization is read from mag_x from VASP, - plz modify the code if magnetization is for (y) and (z). - - Args: - base_dir (str): the directory of the VASP calculation outputs - check_electronic_convergence (bool): if set to True, this function will raise - Exception to VASP calculation that did not achieve electronic convergence. - Default = True - save_path (str): path to save the parsed VASP labels - - Raises: - NotADirectoryError: if the base_dir is not a directory - - Returns: - dict: a dictionary of lists with keys for structure, uncorrected_total_energy, - energy_per_atom, force, magmom, stress. - """ - if not os.path.isdir(base_dir): - raise NotADirectoryError(f"base_dir={base_dir!r} is not a directory") - oszicar_path = zpath(f"{base_dir}/OSZICAR") - vasprun_path = zpath(f"{base_dir}/vasprun.xml") - outcar_path = zpath(f"{base_dir}/OUTCAR") - if not os.path.exists(oszicar_path) or not os.path.exists(vasprun_path): - raise RuntimeError(f"No data parsed from {base_dir}!") - oszicar = Oszicar(oszicar_path) - vasprun_orig = Vasprun( - vasprun_path, - parse_dos=False, - parse_eigen=False, - parse_projected_eigen=False, - parse_potcar_file=False, - exception_on_bad_xml=False, - ) - charge, mag_x, mag_y, mag_z, header = [], [], [], [], [] - with zopen(outcar_path, encoding="utf-8") as file: - all_lines = [line.strip() for line in file.readlines()] - read_charge = read_mag_x = read_mag_y = read_mag_z = False - mag_x_all = [] - ion_step_count = 0 - for clean in all_lines: - if "magnetization (x)" in clean: - ion_step_count += 1 - if read_charge or read_mag_x or read_mag_y or read_mag_z: - if clean.startswith("# of ion"): - header = re.split("\\s{2,}", clean.strip()) - header.pop(0) - elif re.match("\\s*(\\d+)\\s+(([\\d\\.\\-]+)\\s+)+", clean): - tokens = [float(token) for token in re.findall("[\\d\\.\\-]+", clean)] - tokens.pop(0) - if read_charge: - charge.append(dict(zip(header, tokens, strict=True))) - elif read_mag_x: - mag_x.append(dict(zip(header, tokens, strict=True))) - elif read_mag_y: - mag_y.append(dict(zip(header, tokens, strict=True))) - elif read_mag_z: - mag_z.append(dict(zip(header, tokens, strict=True))) - elif clean.startswith("tot"): - if ion_step_count == len(mag_x_all) + 1: - mag_x_all.append(mag_x) - read_charge = read_mag_x = read_mag_y = read_mag_z = False - if clean == "total charge": - read_charge = True - read_mag_x = read_mag_y = read_mag_z = False - elif clean == "magnetization (x)": - mag_x = [] - read_mag_x = True - read_charge = read_mag_y = read_mag_z = False - elif clean == "magnetization (y)": - mag_y = [] - read_mag_y = True - read_charge = read_mag_x = read_mag_z = False - elif clean == "magnetization (z)": - mag_z = [] - read_mag_z = True - read_charge = read_mag_x = read_mag_y = False - elif re.search("electrostatic", clean): - read_charge = read_mag_x = read_mag_y = read_mag_z = False - if len(oszicar.ionic_steps) == len(mag_x_all): - warnings.warn("Unfinished OUTCAR", stacklevel=2) - elif len(oszicar.ionic_steps) == len(mag_x_all) - 1: - mag_x_all.pop(-1) - n_atoms = len(vasprun_orig.ionic_steps[0]["structure"]) - dataset = { - "structure": [], - "uncorrected_total_energy": [], - "energy_per_atom": [], - "force": [], - "magmom": [], - "stress": None if "stress" not in vasprun_orig.ionic_steps[0] else [], - } - for index, ionic_step in enumerate(vasprun_orig.ionic_steps): - if ( - check_electronic_convergence - and len(ionic_step["electronic_steps"]) >= vasprun_orig.parameters["NELM"] - ): - continue - dataset["structure"].append(ionic_step["structure"]) - dataset["uncorrected_total_energy"].append(ionic_step["e_0_energy"]) - dataset["energy_per_atom"].append(ionic_step["e_0_energy"] / n_atoms) - dataset["force"].append(ionic_step["forces"]) - if mag_x_all != []: - dataset["magmom"].append([site["tot"] for site in mag_x_all[index]]) - if "stress" in ionic_step: - dataset["stress"].append(ionic_step["stress"]) - if dataset["uncorrected_total_energy"] == []: - raise RuntimeError(f"No data parsed from {base_dir}!") - if save_path is not None: - save_dict = dataset.copy() - save_dict["structure"] = [struct.as_dict() for struct in dataset["structure"]] - write_json(save_dict, save_path) - return dataset - - -def solve_charge_by_mag( - structure: Structure, - default_ox: (dict[str, float] | None) = None, - ox_ranges: (dict[str, dict[tuple[float, float], int]] | None) = None, -) -> (Structure | None): - """Solve oxidation states by magmom. - - Args: - structure (Structure): pymatgen structure with magmoms in site_properties. Dict - key must be either magmom or final_magmom. - default_ox (dict[str, float]): default oxidation state for elements. - Default = dict(Li=1, O=-2) - ox_ranges (dict[str, dict[tuple[float, float], int]]): user-defined range to - convert magmoms into formal valence. - Example for Mn (Default): - ("Mn": ( - (0.5, 1.5): 2, - (1.5, 2.5): 3, - (2.5, 3.5): 4, - (3.5, 4.2): 3, - (4.2, 5): 2 - )) - - Returns: - Structure: pymatgen Structure with oxidation states assigned based on magmoms. - """ - out_structure = structure.copy() - out_structure.remove_oxidation_states() - ox_list = [] - solved_ox = True - default_ox = default_ox or {"Li": 1, "O": -2} - ox_ranges = ox_ranges or { - "Mn": {(0.5, 1.5): 2, (1.5, 2.5): 3, (2.5, 3.5): 4, (3.5, 4.2): 3, (4.2, 5): 2} - } - magmoms = structure.site_properties.get( - "final_magmom", structure.site_properties.get("magmom") - ) - for idx, site in enumerate(out_structure): - assigned = False - if site.species_string in ox_ranges: - for (min_mag, max_mag), mag_ox in ox_ranges[site.species_string].items(): - if min_mag <= magmoms[idx] < max_mag: - ox_list.append(mag_ox) - assigned = True - break - elif site.species_string in default_ox: - ox_list.append(default_ox[site.species_string]) - assigned = True - if not assigned: - solved_ox = False - if solved_ox: - total_charge = sum(ox_list) - print(f"Solved oxidation state, total_charge={total_charge!r}") - out_structure.add_oxidation_state_by_site(ox_list) - return out_structure - warnings.warn("Failed to solve oxidation state", stacklevel=2) - return None +from __future__ import annotations + +import os +import re +import warnings +from typing import TYPE_CHECKING + +from chgnet.utils import write_json +from monty.io import zopen +from monty.os.path import zpath +from pymatgen.io.vasp.outputs import Oszicar +from pymatgen.io.vasp.outputs import Vasprun + +if TYPE_CHECKING: + from pymatgen.core import Structure + + +def parse_vasp_dir( + base_dir: str, + *, + check_electronic_convergence: bool = True, + save_path: (str | None) = None, +) -> dict[str, list]: + """Parse VASP output files into structures and labels + By default, the magnetization is read from mag_x from VASP, + plz modify the code if magnetization is for (y) and (z). + + Args: + base_dir (str): the directory of the VASP calculation outputs + check_electronic_convergence (bool): if set to True, this function will raise + Exception to VASP calculation that did not achieve electronic convergence. + Default = True + save_path (str): path to save the parsed VASP labels + + Raises: + NotADirectoryError: if the base_dir is not a directory + + Returns: + dict: a dictionary of lists with keys for structure, uncorrected_total_energy, + energy_per_atom, force, magmom, stress. + """ + if not os.path.isdir(base_dir): + raise NotADirectoryError(f"base_dir={base_dir!r} is not a directory") + oszicar_path = zpath(f"{base_dir}/OSZICAR") + vasprun_path = zpath(f"{base_dir}/vasprun.xml") + outcar_path = zpath(f"{base_dir}/OUTCAR") + if not os.path.exists(oszicar_path) or not os.path.exists(vasprun_path): + raise RuntimeError(f"No data parsed from {base_dir}!") + oszicar = Oszicar(oszicar_path) + vasprun_orig = Vasprun( + vasprun_path, + parse_dos=False, + parse_eigen=False, + parse_projected_eigen=False, + parse_potcar_file=False, + exception_on_bad_xml=False, + ) + charge, mag_x, mag_y, mag_z, header = [], [], [], [], [] + with zopen(outcar_path, encoding="utf-8") as file: + all_lines = [line.strip() for line in file.readlines()] + read_charge = read_mag_x = read_mag_y = read_mag_z = False + mag_x_all = [] + ion_step_count = 0 + for clean in all_lines: + if "magnetization (x)" in clean: + ion_step_count += 1 + if read_charge or read_mag_x or read_mag_y or read_mag_z: + if clean.startswith("# of ion"): + header = re.split("\\s{2,}", clean.strip()) + header.pop(0) + elif re.match("\\s*(\\d+)\\s+(([\\d\\.\\-]+)\\s+)+", clean): + tokens = [float(token) for token in re.findall("[\\d\\.\\-]+", clean)] + tokens.pop(0) + if read_charge: + charge.append(dict(zip(header, tokens, strict=True))) + elif read_mag_x: + mag_x.append(dict(zip(header, tokens, strict=True))) + elif read_mag_y: + mag_y.append(dict(zip(header, tokens, strict=True))) + elif read_mag_z: + mag_z.append(dict(zip(header, tokens, strict=True))) + elif clean.startswith("tot"): + if ion_step_count == len(mag_x_all) + 1: + mag_x_all.append(mag_x) + read_charge = read_mag_x = read_mag_y = read_mag_z = False + if clean == "total charge": + read_charge = True + read_mag_x = read_mag_y = read_mag_z = False + elif clean == "magnetization (x)": + mag_x = [] + read_mag_x = True + read_charge = read_mag_y = read_mag_z = False + elif clean == "magnetization (y)": + mag_y = [] + read_mag_y = True + read_charge = read_mag_x = read_mag_z = False + elif clean == "magnetization (z)": + mag_z = [] + read_mag_z = True + read_charge = read_mag_x = read_mag_y = False + elif re.search("electrostatic", clean): + read_charge = read_mag_x = read_mag_y = read_mag_z = False + if len(oszicar.ionic_steps) == len(mag_x_all): + warnings.warn("Unfinished OUTCAR", stacklevel=2) + elif len(oszicar.ionic_steps) == len(mag_x_all) - 1: + mag_x_all.pop(-1) + n_atoms = len(vasprun_orig.ionic_steps[0]["structure"]) + dataset = { + "structure": [], + "uncorrected_total_energy": [], + "energy_per_atom": [], + "force": [], + "magmom": [], + "stress": None if "stress" not in vasprun_orig.ionic_steps[0] else [], + } + for index, ionic_step in enumerate(vasprun_orig.ionic_steps): + if ( + check_electronic_convergence + and len(ionic_step["electronic_steps"]) >= vasprun_orig.parameters["NELM"] + ): + continue + dataset["structure"].append(ionic_step["structure"]) + dataset["uncorrected_total_energy"].append(ionic_step["e_0_energy"]) + dataset["energy_per_atom"].append(ionic_step["e_0_energy"] / n_atoms) + dataset["force"].append(ionic_step["forces"]) + if mag_x_all != []: + dataset["magmom"].append([site["tot"] for site in mag_x_all[index]]) + if "stress" in ionic_step: + dataset["stress"].append(ionic_step["stress"]) + if dataset["uncorrected_total_energy"] == []: + raise RuntimeError(f"No data parsed from {base_dir}!") + if save_path is not None: + save_dict = dataset.copy() + save_dict["structure"] = [struct.as_dict() for struct in dataset["structure"]] + write_json(save_dict, save_path) + return dataset + + +def solve_charge_by_mag( + structure: Structure, + default_ox: (dict[str, float] | None) = None, + ox_ranges: (dict[str, dict[tuple[float, float], int]] | None) = None, +) -> (Structure | None): + """Solve oxidation states by magmom. + + Args: + structure (Structure): pymatgen structure with magmoms in site_properties. Dict + key must be either magmom or final_magmom. + default_ox (dict[str, float]): default oxidation state for elements. + Default = dict(Li=1, O=-2) + ox_ranges (dict[str, dict[tuple[float, float], int]]): user-defined range to + convert magmoms into formal valence. + Example for Mn (Default): + ("Mn": ( + (0.5, 1.5): 2, + (1.5, 2.5): 3, + (2.5, 3.5): 4, + (3.5, 4.2): 3, + (4.2, 5): 2 + )) + + Returns: + Structure: pymatgen Structure with oxidation states assigned based on magmoms. + """ + out_structure = structure.copy() + out_structure.remove_oxidation_states() + ox_list = [] + solved_ox = True + default_ox = default_ox or {"Li": 1, "O": -2} + ox_ranges = ox_ranges or { + "Mn": {(0.5, 1.5): 2, (1.5, 2.5): 3, (2.5, 3.5): 4, (3.5, 4.2): 3, (4.2, 5): 2} + } + magmoms = structure.site_properties.get( + "final_magmom", structure.site_properties.get("magmom") + ) + for idx, site in enumerate(out_structure): + assigned = False + if site.species_string in ox_ranges: + for (min_mag, max_mag), mag_ox in ox_ranges[site.species_string].items(): + if min_mag <= magmoms[idx] < max_mag: + ox_list.append(mag_ox) + assigned = True + break + elif site.species_string in default_ox: + ox_list.append(default_ox[site.species_string]) + assigned = True + if not assigned: + solved_ox = False + if solved_ox: + total_charge = sum(ox_list) + print(f"Solved oxidation state, total_charge={total_charge!r}") + out_structure.add_oxidation_state_by_site(ox_list) + return out_structure + warnings.warn("Failed to solve oxidation state", stacklevel=2) + return None diff --git a/jointContribution/CHGNet/main.py b/jointContribution/CHGNet/main.py index 83c2985362..c8e0a9d321 100644 --- a/jointContribution/CHGNet/main.py +++ b/jointContribution/CHGNet/main.py @@ -1,65 +1,65 @@ -from ase.io.trajectory import Trajectory -from chgnet.model import StructOptimizer -from chgnet.model.dynamics import MolecularDynamics -from chgnet.model.model import CHGNet -from chgnet.utils import solve_charge_by_mag -from pymatgen.core import Structure -from pymatgen.io.ase import AseAtomsAdaptor - -# This is the paddle version of chgnet, which heavily references -# https://github.com/CederGroupHub/chgnet. - -# load the CHGNet model -chgnet = CHGNet.load() -structure = Structure.from_file("chgnet/mp-18767-LiMnO2.cif") - -# predict the structure -prediction = chgnet.predict_structure(structure) - - -for key, unit in [ - ("energy", "eV/atom"), - ("forces", "eV/A"), - ("stress", "GPa"), - ("magmom", "mu_B"), -]: - print(f"CHGNet-predicted {key} ({unit}):\n{prediction[key[0]]}\n") - - -# structure optimizer -relaxer = StructOptimizer() -# Perturb the structure -structure.perturb(0.1) -# Relax the perturbed structure -result = relaxer.relax(structure, verbose=True) - -print("Relaxed structure:\n") -print(result["final_structure"]) - -print(result["trajectory"].energies) - - -# run molecular dynamics -md = MolecularDynamics( - atoms=structure, - model=chgnet, - ensemble="nvt", - temperature=1000, # in k - timestep=2, # in fs - trajectory="md_out.traj", - logfile="md_out.log", - loginterval=100, -) -md.run(50) # run a 0.1 ps MD simulation - - -traj = Trajectory("md_out.traj") -mag = traj[-1].get_magnetic_moments() - -# get the non-charge-decorated structure -structure = AseAtomsAdaptor.get_structure(traj[-1]) -print(structure) - -# get the charge-decorated structure -struct_with_chg = solve_charge_by_mag(structure) -print(struct_with_chg) +from ase.io.trajectory import Trajectory +from chgnet.model import StructOptimizer +from chgnet.model.dynamics import MolecularDynamics +from chgnet.model.model import CHGNet +from chgnet.utils import solve_charge_by_mag +from pymatgen.core import Structure +from pymatgen.io.ase import AseAtomsAdaptor + +# This is the paddle version of chgnet, which heavily references +# https://github.com/CederGroupHub/chgnet. + +# load the CHGNet model +chgnet = CHGNet.load() +structure = Structure.from_file("chgnet/mp-18767-LiMnO2.cif") + +# predict the structure +prediction = chgnet.predict_structure(structure) + + +for key, unit in [ + ("energy", "eV/atom"), + ("forces", "eV/A"), + ("stress", "GPa"), + ("magmom", "mu_B"), +]: + print(f"CHGNet-predicted {key} ({unit}):\n{prediction[key[0]]}\n") + + +# structure optimizer +relaxer = StructOptimizer() +# Perturb the structure +structure.perturb(0.1) +# Relax the perturbed structure +result = relaxer.relax(structure, verbose=True) + +print("Relaxed structure:\n") +print(result["final_structure"]) + +print(result["trajectory"].energies) + + +# run molecular dynamics +md = MolecularDynamics( + atoms=structure, + model=chgnet, + ensemble="nvt", + temperature=1000, # in k + timestep=2, # in fs + trajectory="md_out.traj", + logfile="md_out.log", + loginterval=100, +) +md.run(50) # run a 0.1 ps MD simulation + + +traj = Trajectory("md_out.traj") +mag = traj[-1].get_magnetic_moments() + +# get the non-charge-decorated structure +structure = AseAtomsAdaptor.get_structure(traj[-1]) +print(structure) + +# get the charge-decorated structure +struct_with_chg = solve_charge_by_mag(structure) +print(struct_with_chg) diff --git a/jointContribution/DU_CNN/.gitignore b/jointContribution/DU_CNN/.gitignore index fe99f7b353..a3a3aedfb8 100644 --- a/jointContribution/DU_CNN/.gitignore +++ b/jointContribution/DU_CNN/.gitignore @@ -1 +1 @@ -data/rawdataM.mat +data/rawdataM.mat diff --git a/jointContribution/DU_CNN/config.yaml b/jointContribution/DU_CNN/config.yaml index e8e097a286..11f90cea62 100644 --- a/jointContribution/DU_CNN/config.yaml +++ b/jointContribution/DU_CNN/config.yaml @@ -1,15 +1,15 @@ -Load_Data: false -Scheduler: true -Randomintensity: true -num_epochs: 50 -batch_size: 128 -learning_rate: 1.0e-5 -layers: 7 -kernels: 15 -channels: 32 -downsamples: 16 -lossnum: 2 #0: Difference Loss, 1: POD loss, 2: norm POD loss, 3: only POD loss, 4: only normalized POD loss -criterion: MSE -data_directory: ./data -data_name: rawdataM.mat -pod_data_name: poddata.mat +Load_Data: false +Scheduler: true +Randomintensity: true +num_epochs: 50 +batch_size: 128 +learning_rate: 1.0e-5 +layers: 7 +kernels: 15 +channels: 32 +downsamples: 16 +lossnum: 2 #0: Difference Loss, 1: POD loss, 2: norm POD loss, 3: only POD loss, 4: only normalized POD loss +criterion: MSE +data_directory: ./data +data_name: rawdataM.mat +pod_data_name: poddata.mat diff --git a/jointContribution/DU_CNN/main.py b/jointContribution/DU_CNN/main.py index 4294b8cd15..62a20d88f6 100644 --- a/jointContribution/DU_CNN/main.py +++ b/jointContribution/DU_CNN/main.py @@ -1,105 +1,105 @@ -import os - -import paddle -from models import network_model_batch -from paddle import nn -from sklearn import model_selection -from utils import lr_scheduler -from utils import util - - -def main(): - # Function to load yaml configuration file - config = util.load_config("./config.yaml") - # data directory - data_path, data_name, poddata_name = ( - config["data_directory"], - config["data_name"], - config["pod_data_name"], - ) - # structure and training hyperparameters - nlayer, nkernel, nchannel, ndownsample, lossnum = ( - config["layers"], - config["kernels"], - config["channels"], - config["downsamples"], - config["lossnum"], - ) - print( - f"nlayer: {nlayer}, nkernel: {nkernel}, nchannel: {nchannel}, ndownsample: {ndownsample}, lossnum: {lossnum}" - ) - - # load data - data, poddata = util.get_dataset(data_path, data_name, poddata_name) - # split data case - trainnum, testnum = model_selection.train_test_split( - range(len(data["shortdata"])), test_size=0.1, random_state=2 - ) - # dataset loading - trainloader, testloader = util.get_dataloader(config, data, trainnum, testnum) - # make directory for save the results - output_dir = util.setup_log_directory(config) - - # model initialization - criterion = nn.MSELoss() - model = network_model_batch.Networkn( - nlayer, ndownsample, nkernel, nchannel, in_nc=1, out_nc=1, act_mode="BR" - ) - if config["Scheduler"]: - scheduler = lr_scheduler.CosineAnnealingWarmUpRestarts( - config["learning_rate"], T_0=50, T_mult=1, eta_max=0.005, T_up=10, gamma=0.1 - ) - optimizer = paddle.optimizer.Adam( - parameters=model.parameters(), - learning_rate=scheduler, - weight_decay=1e-6, - ) - else: - scheduler = False - optimizer = paddle.optimizer.Adam( - parameters=model.parameters(), - learning_rate=config["learning_rate"], - weight_decay=1e-6, - ) - epoch, loss_, evalloss_ = (0, [], []) # train loss, test loss - - # load pretrained model - if config["Load_Data"] == 1 and os.path.isfile( - os.path.join(output_dir, "checkpoint.ckpt") - ): - model, optimizer, epoch, loss_ = util.load_network( - os.path.join(output_dir, "checkpoint.ckpt") - ) - - # training - total_epochs = config["num_epochs"] + 1 - while epoch < total_epochs: - util.train_one_epoch( - epoch, - model, - trainloader, - optimizer, - criterion, - scheduler, - config, - poddata, - loss_, - ) - if epoch % 10 == 0: - util.evaluate_testloss(model, testloader, criterion, evalloss_) - util.save_figures( - os.path.join(output_dir, "logs"), epoch, model, testnum, data - ) - epoch += 1 - - # Save model - util.save_model( - epoch, model, optimizer, loss_, evalloss_, testnum, trainnum, output_dir - ) - util.save_figures(output_dir, epoch, model, testnum, data) - util.save_lossfigure(output_dir, loss_, evalloss_) - paddle.device.cuda.empty_cache() - - -if __name__ == "__main__": - main() +import os + +import paddle +from models import network_model_batch +from paddle import nn +from sklearn import model_selection +from utils import lr_scheduler +from utils import util + + +def main(): + # Function to load yaml configuration file + config = util.load_config("./config.yaml") + # data directory + data_path, data_name, poddata_name = ( + config["data_directory"], + config["data_name"], + config["pod_data_name"], + ) + # structure and training hyperparameters + nlayer, nkernel, nchannel, ndownsample, lossnum = ( + config["layers"], + config["kernels"], + config["channels"], + config["downsamples"], + config["lossnum"], + ) + print( + f"nlayer: {nlayer}, nkernel: {nkernel}, nchannel: {nchannel}, ndownsample: {ndownsample}, lossnum: {lossnum}" + ) + + # load data + data, poddata = util.get_dataset(data_path, data_name, poddata_name) + # split data case + trainnum, testnum = model_selection.train_test_split( + range(len(data["shortdata"])), test_size=0.1, random_state=2 + ) + # dataset loading + trainloader, testloader = util.get_dataloader(config, data, trainnum, testnum) + # make directory for save the results + output_dir = util.setup_log_directory(config) + + # model initialization + criterion = nn.MSELoss() + model = network_model_batch.Networkn( + nlayer, ndownsample, nkernel, nchannel, in_nc=1, out_nc=1, act_mode="BR" + ) + if config["Scheduler"]: + scheduler = lr_scheduler.CosineAnnealingWarmUpRestarts( + config["learning_rate"], T_0=50, T_mult=1, eta_max=0.005, T_up=10, gamma=0.1 + ) + optimizer = paddle.optimizer.Adam( + parameters=model.parameters(), + learning_rate=scheduler, + weight_decay=1e-6, + ) + else: + scheduler = False + optimizer = paddle.optimizer.Adam( + parameters=model.parameters(), + learning_rate=config["learning_rate"], + weight_decay=1e-6, + ) + epoch, loss_, evalloss_ = (0, [], []) # train loss, test loss + + # load pretrained model + if config["Load_Data"] == 1 and os.path.isfile( + os.path.join(output_dir, "checkpoint.ckpt") + ): + model, optimizer, epoch, loss_ = util.load_network( + os.path.join(output_dir, "checkpoint.ckpt") + ) + + # training + total_epochs = config["num_epochs"] + 1 + while epoch < total_epochs: + util.train_one_epoch( + epoch, + model, + trainloader, + optimizer, + criterion, + scheduler, + config, + poddata, + loss_, + ) + if epoch % 10 == 0: + util.evaluate_testloss(model, testloader, criterion, evalloss_) + util.save_figures( + os.path.join(output_dir, "logs"), epoch, model, testnum, data + ) + epoch += 1 + + # Save model + util.save_model( + epoch, model, optimizer, loss_, evalloss_, testnum, trainnum, output_dir + ) + util.save_figures(output_dir, epoch, model, testnum, data) + util.save_lossfigure(output_dir, loss_, evalloss_) + paddle.device.cuda.empty_cache() + + +if __name__ == "__main__": + main() diff --git a/jointContribution/DU_CNN/models/basicblock.py b/jointContribution/DU_CNN/models/basicblock.py index 19fc9f7173..733a2c8b29 100644 --- a/jointContribution/DU_CNN/models/basicblock.py +++ b/jointContribution/DU_CNN/models/basicblock.py @@ -1,98 +1,98 @@ -""" -# -------------------------------------------- -# Advanced nn.Sequential -# https://github.com/xinntao/BasicSR -# -------------------------------------------- -""" -from collections import OrderedDict - -from paddle import nn - - -def to_sequential(*args): - """Advanced nn.Sequential. - - Raises: - NotImplementedError: Sequential does not support OrderedDict input. - - Returns: - nn.Sequential: The nn.Sequential of module list. - """ - if len(args) == 1: - if isinstance(args[0], OrderedDict): - raise NotImplementedError("Sequential does not support OrderedDict input.") - return args[0] # No sequential is needed. - modules = [] - for module in args: - if isinstance(module, nn.Sequential): - for submodule in module.children(): - modules.append(submodule) - elif isinstance(module, nn.Layer): - modules.append(module) - return nn.Sequential(*modules) - - -# -------------------------------------------- -# return nn.Sequential of (Conv + BN + ReLU) -# -------------------------------------------- -def conv1( - in_channels=64, - out_channels=64, - kernel_size=3, - stride=1, - padding=1, - bias=True, - mode="CBR", - negative_slope=0.2, -): - if bias: - bias_attr = None - else: - bias_attr = False - L = [] - for t in mode: - if t == "C": - L.append( - nn.Conv1D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - bias_attr=bias_attr, - ) - ) - elif t == "T": - L.append( - nn.Conv1DTranspose( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=padding, - bias_attr=bias_attr, - ) - ) - elif t == "B": - L.append( - nn.BatchNorm1D( - out_channels, - momentum=0.9, - epsilon=1e-04, - weight_attr=None, - bias_attr=None, - ) - ) - elif t == "I": - L.append(nn.InstanceNorm1D(out_channels, weight_attr=None, bias_attr=None)) - elif t in ("R", "r"): - L.append(nn.ReLU()) - elif t in ("L", "l"): - L.append(nn.LeakyReLU(negative_slope=negative_slope)) - elif t == "M": - L.append(nn.MaxPool1D(kernel_size=kernel_size, stride=stride, padding=0)) - elif t == "A": - L.append(nn.AvgPool1D(kernel_size=kernel_size, stride=stride, padding=0)) - else: - raise NotImplementedError(f"Undefined type: {t}") - return to_sequential(*L) +""" +# -------------------------------------------- +# Advanced nn.Sequential +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +""" +from collections import OrderedDict + +from paddle import nn + + +def to_sequential(*args): + """Advanced nn.Sequential. + + Raises: + NotImplementedError: Sequential does not support OrderedDict input. + + Returns: + nn.Sequential: The nn.Sequential of module list. + """ + if len(args) == 1: + if isinstance(args[0], OrderedDict): + raise NotImplementedError("Sequential does not support OrderedDict input.") + return args[0] # No sequential is needed. + modules = [] + for module in args: + if isinstance(module, nn.Sequential): + for submodule in module.children(): + modules.append(submodule) + elif isinstance(module, nn.Layer): + modules.append(module) + return nn.Sequential(*modules) + + +# -------------------------------------------- +# return nn.Sequential of (Conv + BN + ReLU) +# -------------------------------------------- +def conv1( + in_channels=64, + out_channels=64, + kernel_size=3, + stride=1, + padding=1, + bias=True, + mode="CBR", + negative_slope=0.2, +): + if bias: + bias_attr = None + else: + bias_attr = False + L = [] + for t in mode: + if t == "C": + L.append( + nn.Conv1D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias_attr=bias_attr, + ) + ) + elif t == "T": + L.append( + nn.Conv1DTranspose( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias_attr=bias_attr, + ) + ) + elif t == "B": + L.append( + nn.BatchNorm1D( + out_channels, + momentum=0.9, + epsilon=1e-04, + weight_attr=None, + bias_attr=None, + ) + ) + elif t == "I": + L.append(nn.InstanceNorm1D(out_channels, weight_attr=None, bias_attr=None)) + elif t in ("R", "r"): + L.append(nn.ReLU()) + elif t in ("L", "l"): + L.append(nn.LeakyReLU(negative_slope=negative_slope)) + elif t == "M": + L.append(nn.MaxPool1D(kernel_size=kernel_size, stride=stride, padding=0)) + elif t == "A": + L.append(nn.AvgPool1D(kernel_size=kernel_size, stride=stride, padding=0)) + else: + raise NotImplementedError(f"Undefined type: {t}") + return to_sequential(*L) diff --git a/jointContribution/DU_CNN/models/network_model_batch.py b/jointContribution/DU_CNN/models/network_model_batch.py index 4eed366ccb..ffdee075fa 100644 --- a/jointContribution/DU_CNN/models/network_model_batch.py +++ b/jointContribution/DU_CNN/models/network_model_batch.py @@ -1,59 +1,59 @@ -# https://github.com/viktor-ktorvi/1d-convolutional-neural-networks - -import paddle.nn as nn -from models import basicblock -from utils import pixelshuffle1d - - -class Networkn(nn.Layer): - """Init function. - - Args: - nb (int): Total number of conv layers. - nc (int): Channel number. - downsample (int): Downsample number. - kerneln (int): Kernel number. - in_nc (int, optional): Channel number of input. Defaults to 1. - out_nc (int, optional): Channel number of output. Defaults to 1. - act_mode (str, optional): Batch norm + activation function; 'BR' means BN+ReLU. Defaults to 'BR'. - - Raises: - ValueError: Examples of activation function: R, L, BR, BL, IR, IL. - """ - - def __init__(self, nb, downsample, kerneln, nc, in_nc=1, out_nc=1, act_mode="BR"): - super(Networkn, self).__init__() - - # encoder - self.down = pixelshuffle1d.PixelUnshuffle1D(downsample) - self.up = pixelshuffle1d.PixelShuffle1D(downsample) - - if "R" not in act_mode and "L" not in act_mode: - raise ValueError("Examples of activation function: R, L, BR, BL, IR, IL.") - bias = True - - m_head = basicblock.conv1( - in_nc * downsample, - nc, - kerneln, - padding=kerneln // 2, - mode="C" + act_mode[-1], - bias=bias, - ) - m_body = [ - basicblock.conv1( - nc, nc, kerneln, padding=kerneln // 2, mode="C" + act_mode, bias=bias - ) - for _ in range(nb - 2) - ] - m_tail = basicblock.conv1( - nc, out_nc * downsample, kerneln, padding=kerneln // 2, mode="C", bias=bias - ) - - self.model = basicblock.to_sequential(m_head, *m_body, m_tail) - - def forward(self, x): - x = self.down(x) - x = self.model(x) - x = self.up(x) - return x +# https://github.com/viktor-ktorvi/1d-convolutional-neural-networks + +import paddle.nn as nn +from models import basicblock +from utils import pixelshuffle1d + + +class Networkn(nn.Layer): + """Init function. + + Args: + nb (int): Total number of conv layers. + nc (int): Channel number. + downsample (int): Downsample number. + kerneln (int): Kernel number. + in_nc (int, optional): Channel number of input. Defaults to 1. + out_nc (int, optional): Channel number of output. Defaults to 1. + act_mode (str, optional): Batch norm + activation function; 'BR' means BN+ReLU. Defaults to 'BR'. + + Raises: + ValueError: Examples of activation function: R, L, BR, BL, IR, IL. + """ + + def __init__(self, nb, downsample, kerneln, nc, in_nc=1, out_nc=1, act_mode="BR"): + super(Networkn, self).__init__() + + # encoder + self.down = pixelshuffle1d.PixelUnshuffle1D(downsample) + self.up = pixelshuffle1d.PixelShuffle1D(downsample) + + if "R" not in act_mode and "L" not in act_mode: + raise ValueError("Examples of activation function: R, L, BR, BL, IR, IL.") + bias = True + + m_head = basicblock.conv1( + in_nc * downsample, + nc, + kerneln, + padding=kerneln // 2, + mode="C" + act_mode[-1], + bias=bias, + ) + m_body = [ + basicblock.conv1( + nc, nc, kerneln, padding=kerneln // 2, mode="C" + act_mode, bias=bias + ) + for _ in range(nb - 2) + ] + m_tail = basicblock.conv1( + nc, out_nc * downsample, kerneln, padding=kerneln // 2, mode="C", bias=bias + ) + + self.model = basicblock.to_sequential(m_head, *m_body, m_tail) + + def forward(self, x): + x = self.down(x) + x = self.model(x) + x = self.up(x) + return x diff --git a/jointContribution/DU_CNN/requirements.txt b/jointContribution/DU_CNN/requirements.txt index df2b9a3181..06247a4260 100644 --- a/jointContribution/DU_CNN/requirements.txt +++ b/jointContribution/DU_CNN/requirements.txt @@ -1,7 +1,7 @@ -numpy -pyaml -tqdm -matplotlib -sklearn -mat73 -paddlepaddle-gpu +mat73 +matplotlib +numpy +paddlepaddle-gpu +pyaml +sklearn +tqdm diff --git a/jointContribution/DU_CNN/utils/lr_scheduler.py b/jointContribution/DU_CNN/utils/lr_scheduler.py index 66bffe8c32..b2df2cd297 100644 --- a/jointContribution/DU_CNN/utils/lr_scheduler.py +++ b/jointContribution/DU_CNN/utils/lr_scheduler.py @@ -1,87 +1,87 @@ -import math - -from paddle.optimizer import lr - - -class CosineAnnealingWarmUpRestarts(lr.LRScheduler): - def __init__( - self, - learning_rate, - T_0, - T_mult=1, - eta_max=0.1, - T_up=0, - gamma=1.0, - last_epoch=-1, - ): - if T_0 <= 0 or not isinstance(T_0, int): - raise ValueError("Expected positive integer T_0, but got {}".format(T_0)) - if T_mult < 1 or not isinstance(T_mult, int): - raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult)) - if T_up < 0 or not isinstance(T_up, int): - raise ValueError("Expected positive integer T_up, but got {}".format(T_up)) - self.T_0 = T_0 - self.T_mult = T_mult - self.base_eta_max = eta_max - self.eta_max = eta_max - self.T_up = T_up - self.T_i = T_0 - self.gamma = gamma - self.cycle = 0 - self.T_cur = last_epoch - self.base_lrs = [learning_rate] - super(CosineAnnealingWarmUpRestarts, self).__init__(learning_rate, last_epoch) - - def get_lr(self): - if self.T_cur == -1: - return self.base_lrs - elif self.T_cur < self.T_up: - return [ - (self.eta_max - base_lr) * self.T_cur / self.T_up + base_lr - for base_lr in self.base_lrs - ] - else: - return [ - base_lr - + (self.eta_max - base_lr) - * ( - 1 - + math.cos( - math.pi * (self.T_cur - self.T_up) / (self.T_i - self.T_up) - ) - ) - / 2 - for base_lr in self.base_lrs - ] - - def step(self, epoch=None): - if epoch is None: - epoch = self.last_epoch + 1 - self.T_cur = self.T_cur + 1 - if self.T_cur >= self.T_i: - self.cycle += 1 - self.T_cur = self.T_cur - self.T_i - self.T_i = (self.T_i - self.T_up) * self.T_mult + self.T_up - else: - if epoch >= self.T_0: - if self.T_mult == 1: - self.T_cur = epoch % self.T_0 - self.cycle = epoch // self.T_0 - else: - n = int( - math.log( - (epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult - ) - ) - self.cycle = n - self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / ( - self.T_mult - 1 - ) - self.T_i = self.T_0 * self.T_mult ** (n) - else: - self.T_i = self.T_0 - self.T_cur = epoch - - self.eta_max = self.base_eta_max * (self.gamma**self.cycle) - self.last_epoch = math.floor(epoch) - self.last_lr = self.get_lr()[0] +import math + +from paddle.optimizer import lr + + +class CosineAnnealingWarmUpRestarts(lr.LRScheduler): + def __init__( + self, + learning_rate, + T_0, + T_mult=1, + eta_max=0.1, + T_up=0, + gamma=1.0, + last_epoch=-1, + ): + if T_0 <= 0 or not isinstance(T_0, int): + raise ValueError("Expected positive integer T_0, but got {}".format(T_0)) + if T_mult < 1 or not isinstance(T_mult, int): + raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult)) + if T_up < 0 or not isinstance(T_up, int): + raise ValueError("Expected positive integer T_up, but got {}".format(T_up)) + self.T_0 = T_0 + self.T_mult = T_mult + self.base_eta_max = eta_max + self.eta_max = eta_max + self.T_up = T_up + self.T_i = T_0 + self.gamma = gamma + self.cycle = 0 + self.T_cur = last_epoch + self.base_lrs = [learning_rate] + super(CosineAnnealingWarmUpRestarts, self).__init__(learning_rate, last_epoch) + + def get_lr(self): + if self.T_cur == -1: + return self.base_lrs + elif self.T_cur < self.T_up: + return [ + (self.eta_max - base_lr) * self.T_cur / self.T_up + base_lr + for base_lr in self.base_lrs + ] + else: + return [ + base_lr + + (self.eta_max - base_lr) + * ( + 1 + + math.cos( + math.pi * (self.T_cur - self.T_up) / (self.T_i - self.T_up) + ) + ) + / 2 + for base_lr in self.base_lrs + ] + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.T_cur = self.T_cur + 1 + if self.T_cur >= self.T_i: + self.cycle += 1 + self.T_cur = self.T_cur - self.T_i + self.T_i = (self.T_i - self.T_up) * self.T_mult + self.T_up + else: + if epoch >= self.T_0: + if self.T_mult == 1: + self.T_cur = epoch % self.T_0 + self.cycle = epoch // self.T_0 + else: + n = int( + math.log( + (epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult + ) + ) + self.cycle = n + self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / ( + self.T_mult - 1 + ) + self.T_i = self.T_0 * self.T_mult ** (n) + else: + self.T_i = self.T_0 + self.T_cur = epoch + + self.eta_max = self.base_eta_max * (self.gamma**self.cycle) + self.last_epoch = math.floor(epoch) + self.last_lr = self.get_lr()[0] diff --git a/jointContribution/DU_CNN/utils/mydataset.py b/jointContribution/DU_CNN/utils/mydataset.py index bd8e7f11ea..b7347841c6 100644 --- a/jointContribution/DU_CNN/utils/mydataset.py +++ b/jointContribution/DU_CNN/utils/mydataset.py @@ -1,31 +1,31 @@ -import numpy as np -import paddle -from paddle import io - - -class MapDataset(io.Dataset): - def __init__(self, datashort, datalong, num): - self.datashort = np.array(datashort).astype(paddle.get_default_dtype()) - self.datalong = np.array(datalong).astype(paddle.get_default_dtype()) - self.datarange = len(num) - self.datarangeshort = datashort[0][0].shape[1] - self.datarangelong = datalong[0][0].shape[1] - self.datarange2 = datashort[0][0].shape[1] * datalong[0][0].shape[1] - self.num = num - self.length = 0 - - for i in num: - self.length += datashort[i][0].shape[1] * datalong[i][0].shape[1] - - def __len__(self): - return self.length - - def __getitem__(self, idx): - return ( - self.datashort[self.num[idx // self.datarange2]][0][ - :, idx % self.datarange2 // self.datarangelong - ], - self.datalong[self.num[idx // self.datarange2]][0][ - :, idx % self.datarange2 % self.datarangelong - ], - ) +import numpy as np +import paddle +from paddle import io + + +class MapDataset(io.Dataset): + def __init__(self, datashort, datalong, num): + self.datashort = np.array(datashort).astype(paddle.get_default_dtype()) + self.datalong = np.array(datalong).astype(paddle.get_default_dtype()) + self.datarange = len(num) + self.datarangeshort = datashort[0][0].shape[1] + self.datarangelong = datalong[0][0].shape[1] + self.datarange2 = datashort[0][0].shape[1] * datalong[0][0].shape[1] + self.num = num + self.length = 0 + + for i in num: + self.length += datashort[i][0].shape[1] * datalong[i][0].shape[1] + + def __len__(self): + return self.length + + def __getitem__(self, idx): + return ( + self.datashort[self.num[idx // self.datarange2]][0][ + :, idx % self.datarange2 // self.datarangelong + ], + self.datalong[self.num[idx // self.datarange2]][0][ + :, idx % self.datarange2 % self.datarangelong + ], + ) diff --git a/jointContribution/DU_CNN/utils/pixelshuffle1d.py b/jointContribution/DU_CNN/utils/pixelshuffle1d.py index d54b97d0b7..d8817b3a8b 100644 --- a/jointContribution/DU_CNN/utils/pixelshuffle1d.py +++ b/jointContribution/DU_CNN/utils/pixelshuffle1d.py @@ -1,56 +1,56 @@ -from paddle import nn - -# "long" and "short" denote longer and shorter samples - - -class PixelShuffle1D(nn.Layer): - """ - 1D pixel shuffler. https://arxiv.org/pdf/1609.05158.pdf - Upscales sample length, downscales channel length - "short" is input, "long" is output - """ - - def __init__(self, upscale_factor): - super(PixelShuffle1D, self).__init__() - self.upscale_factor = upscale_factor - - def forward(self, x): - batch_size = x.shape[0] - short_channel_len = x.shape[1] - short_width = x.shape[2] - - long_channel_len = short_channel_len // self.upscale_factor - long_width = self.upscale_factor * short_width - - x = x.reshape([batch_size, self.upscale_factor, long_channel_len, short_width]) - x = x.transpose(perm=(0, 2, 3, 1)) - x = x.reshape((batch_size, long_channel_len, long_width)) - - return x - - -class PixelUnshuffle1D(nn.Layer): - """ - Inverse of 1D pixel shuffler - Upscales channel length, downscales sample length - "long" is input, "short" is output - """ - - def __init__(self, downscale_factor): - super(PixelUnshuffle1D, self).__init__() - self.downscale_factor = downscale_factor - - def forward(self, x): - batch_size = x.shape[0] - long_channel_len = x.shape[1] - long_width = x.shape[2] - - short_channel_len = long_channel_len * self.downscale_factor - short_width = long_width // self.downscale_factor - - x = x.reshape( - [batch_size, long_channel_len, short_width, self.downscale_factor] - ) - x = x.transpose(perm=(0, 3, 1, 2)) - x = x.reshape([batch_size, short_channel_len, short_width]) - return x +from paddle import nn + +# "long" and "short" denote longer and shorter samples + + +class PixelShuffle1D(nn.Layer): + """ + 1D pixel shuffler. https://arxiv.org/pdf/1609.05158.pdf + Upscales sample length, downscales channel length + "short" is input, "long" is output + """ + + def __init__(self, upscale_factor): + super(PixelShuffle1D, self).__init__() + self.upscale_factor = upscale_factor + + def forward(self, x): + batch_size = x.shape[0] + short_channel_len = x.shape[1] + short_width = x.shape[2] + + long_channel_len = short_channel_len // self.upscale_factor + long_width = self.upscale_factor * short_width + + x = x.reshape([batch_size, self.upscale_factor, long_channel_len, short_width]) + x = x.transpose(perm=(0, 2, 3, 1)) + x = x.reshape((batch_size, long_channel_len, long_width)) + + return x + + +class PixelUnshuffle1D(nn.Layer): + """ + Inverse of 1D pixel shuffler + Upscales channel length, downscales sample length + "long" is input, "short" is output + """ + + def __init__(self, downscale_factor): + super(PixelUnshuffle1D, self).__init__() + self.downscale_factor = downscale_factor + + def forward(self, x): + batch_size = x.shape[0] + long_channel_len = x.shape[1] + long_width = x.shape[2] + + short_channel_len = long_channel_len * self.downscale_factor + short_width = long_width // self.downscale_factor + + x = x.reshape( + [batch_size, long_channel_len, short_width, self.downscale_factor] + ) + x = x.transpose(perm=(0, 3, 1, 2)) + x = x.reshape([batch_size, short_channel_len, short_width]) + return x diff --git a/jointContribution/DU_CNN/utils/util.py b/jointContribution/DU_CNN/utils/util.py index 730d6f3e51..2012487362 100644 --- a/jointContribution/DU_CNN/utils/util.py +++ b/jointContribution/DU_CNN/utils/util.py @@ -1,296 +1,296 @@ -import os - -import mat73 -import matplotlib.pyplot as plt -import numpy as np -import paddle -import yaml -from paddle import io -from tqdm import tqdm -from utils import mydataset - - -def load_config(config_path): - with open(config_path) as file: - config = yaml.safe_load(file) - return config - - -def get_dataset(data_path, datafile_name, podfile_name): - data = {} - poddata = {} - specfile = mat73.loadmat(os.path.join(data_path, datafile_name)) - data["wv"] = np.transpose(specfile["xframe"]) - data["shortdata"] = specfile["val"] - data["longdata"] = specfile["vallong"] - podfile = mat73.loadmat(os.path.join(data_path, podfile_name)) - - poddata["snapshot_mean"] = paddle.to_tensor( - np.array(podfile["Snapshot_mean"]), dtype=paddle.get_default_dtype() - ) - poddata["V"] = paddle.to_tensor( - np.array(podfile["V"]), dtype=paddle.get_default_dtype() - ) - poddata["Dvbd"] = paddle.to_tensor( - np.array(podfile["DvBoundary"]), dtype=paddle.get_default_dtype() - ) - return data, poddata - - -def get_dataloader(config, data, trainnum, testnum): - trainloader = io.DataLoader( - mydataset.MapDataset(data["shortdata"], data["longdata"], trainnum), - batch_size=config["batch_size"], - shuffle=True, - num_workers=4, - persistent_workers=True, - ) - - testloader = io.DataLoader( - mydataset.MapDataset(data["shortdata"], data["longdata"], testnum), - batch_size=2, - shuffle=False, - ) - - return trainloader, testloader - - -def setup_log_directory(config): - nlayer, nkernel, nchannel, ndownsample, lossnum = ( - config["layers"], - config["kernels"], - config["channels"], - config["downsamples"], - config["lossnum"], - ) - output_dir = f"./{nlayer}layers_{nkernel}kernel_{nchannel}channel_down{ndownsample}_loss{lossnum}" - output_logs_dir = os.path.join(output_dir, "logs") - os.makedirs(output_logs_dir, exist_ok=True) - return output_dir - - -def load_network(path, model, optimizer): - checkpoint = paddle.load(path) - model.load_state_dict(checkpoint["model_state_dict"]) - optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) - epoch = checkpoint["epoch"] - loss = checkpoint["loss"] - return model, optimizer, epoch, loss - - -def train_one_epoch( - epoch, model, trainloader, optimizer, criterion, scheduler, config, poddata, loss_ -): - start, end = paddle.device.cuda.Event(enable_timing=True), paddle.device.cuda.Event( - enable_timing=True - ) - start.record() - running_loss = 0.0 - model.train() - totalepoch = int(config["num_epochs"]) - with tqdm(total=len(trainloader), dynamic_ncols=True) as tq: - tq.set_description(f"Train :: Epoch: {epoch}/{totalepoch}") - data_count = 0 - for data, data_label in trainloader: - data_count = data_count + len(data) - tq.update(1) - if config["Randomintensity"] == 1: - randint = paddle.to_tensor( - np.random.uniform(0.8, 1.2, size=(data.shape[0], 1)), - dtype=paddle.get_default_dtype(), - ) - else: - randint = 1 - spec = data * randint - label = data_label * randint - output = model(spec.unsqueeze(1)) - - loss = calloss( - label, output, randint, criterion, poddata, config["lossnum"] - ) - - optimizer.clear_grad() - loss.backward() - optimizer.step() - running_loss += loss.item() * data.shape[0] - - tq.set_postfix_str(s=f"Loss: {loss.item():.4f}") - if config["Scheduler"]: - scheduler.step() - - # ===================log======================== - end.record() # Waits for everything to finish running - loss_.append(running_loss / data_count) - paddle.device.cuda.synchronize() - tq.set_postfix_str(s=f"Epoch Loss: {loss_[-1]:.4f}") - - -def calloss(label, output, randint, criterion, poddata, lossnum): - if lossnum == 0: - loss = criterion(output.squeeze(), label) - elif lossnum == 1: - ouputpca = paddle.matmul( - output.squeeze().subtract(randint * poddata["snapshot_mean"]), - poddata["V"], - ) - labelpca = paddle.matmul( - label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] - ) - loss = 0.9 * criterion(output.squeeze(), label) + 0.1 * criterion( - ouputpca, labelpca - ) - elif lossnum == 2: - ouputpca = paddle.matmul( - output.squeeze().subtract(randint * poddata["snapshot_mean"]), - poddata["V"], - ) - labelpca = paddle.matmul( - label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] - ) - normouputpca = (ouputpca - poddata["Dvbd"][:, 1]) / ( - poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] - ).tile((labelpca.shape[0], 1)) - normlabelpca = (labelpca - poddata["Dvbd"][:, 1]) / ( - poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] - ).tile((labelpca.shape[0], 1)) - loss = 0.9 * criterion(output.squeeze(), label) + 0.1 * criterion( - normouputpca, normlabelpca - ) - elif lossnum == 3: - ouputpca = paddle.matmul( - output.squeeze().subtract(randint * poddata["snapshot_mean"]), - poddata["V"], - ) - labelpca = paddle.matmul( - label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] - ) - loss = criterion(ouputpca, labelpca) - elif lossnum == 4: - ouputpca = paddle.matmul( - output.squeeze().subtract(randint * poddata["snapshot_mean"]), - poddata["V"], - ) - labelpca = paddle.matmul( - label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] - ) - normouputpca = (ouputpca - poddata["Dvbd"][:, 1]) / ( - poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] - ).tile((labelpca.shape[0], 1)) - normlabelpca = (labelpca - poddata["Dvbd"][:, 1]) / ( - poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] - ).tile((labelpca.shape[0], 1)) - loss = criterion(normouputpca, normlabelpca) - return loss - - -def save_model( - epoch, model, optimizer, loss_, evalloss_, testnum, trainnum, output_dir -): - paddle.save( - { - "epoch": epoch, - "model_state_dict": model.state_dict(), - "optimizer_state_dict": optimizer.state_dict(), - "loss": loss_, - "testloss": evalloss_, - "testnum": testnum, - "trainnum": trainnum, - }, - os.path.join(output_dir, "tut.ckpt"), - ) - - -def evaluate_testloss(model, testloader, criterion, evalloss_): - model.eval() - eval_loss = 0.0 - for evaldata, eval_label in testloader: - spec = evaldata - label = eval_label - with paddle.no_grad(): - out = model(spec.unsqueeze(1)) - loss = criterion(out.squeeze(), label) - eval_loss += loss.item() * evaldata.shape[0] - evalloss_.append(eval_loss / len(testloader)) - - -def save_figures(output_dir, epoch, model, testnum, data): - wv = data["wv"] - testshow = [] - testshow_label = [] - - for i in range(len(testnum)): - testshow.append(data["shortdata"][testnum[i]][0][:, 1]) - testshow_label.append(data["longdata"][testnum[i]][0][:, 1]) - testshow = paddle.to_tensor(np.array(testshow), dtype=paddle.get_default_dtype()) - testshow_label = paddle.to_tensor( - np.array(testshow_label), dtype=paddle.get_default_dtype() - ) - - testshow_predict = model(testshow.unsqueeze(1)).numpy() - plt.figure(figsize=(20, 10)) - for i in range(len(testnum)): - plt.subplot(len(testnum), 4, 4 * i + 1) # nrows=2, ncols=1, index=1 - plt.plot(wv, testshow[i].numpy()) - - if i == 0: - plt.title("input") - - if i == len(testnum) - 1: - plt.xticks(visible=True) - plt.xlabel("wavelength (nm)") - else: - plt.xticks(visible=False) - - plt.subplot(len(testnum), 4, 4 * i + 2) # nrows=2, ncols=1, index=2 - plt.plot(wv, testshow_predict[i].squeeze()) - if i == 0: - plt.title("output") - if i == len(testnum) - 1: - plt.xlabel("wavelength (nm)") - plt.xticks(visible=True) - else: - plt.xticks(visible=False) - - plt.subplot(len(testnum), 4, 4 * i + 3) # nrows=2, ncols=1, index=2 - plt.plot(wv, testshow_label[i].numpy()) - - if i == 0: - plt.title("real output") - if i == len(testnum) - 1: - plt.xlabel("wavelength (nm)") - plt.xticks(visible=True) - else: - plt.xticks(visible=False) - - plt.subplot(len(testnum), 4, 4 * i + 4) # nrows=2, ncols=1, index=2 - plt.plot( - wv, - (testshow_predict[i].squeeze() - testshow_label[i].numpy()) - / testshow_label[i].numpy(), - ) - plt.ylim([-0.2, 0.2]) - if i == 0: - plt.title("Normalized difference") - if i == len(testnum) - 1: - plt.xlabel("wavelength (nm)") - plt.xticks(visible=True) - else: - plt.xticks(visible=False) - - plt.tight_layout() - plt.savefig(os.path.join(output_dir, f"image_{epoch}.png"), bbox_inches="tight") - plt.close("all") - plt.clf() - - -def save_lossfigure(output_dir, loss_, evalloss_): - plt.figure(2) - plt.plot(range(1, len(loss_) + 1), loss_) - plt.title("Loss") - plt.xlabel("epoch") - plt.plot(range(1, len(evalloss_) * 10 + 1, 10), evalloss_) - plt.legend(["train loss", "test loss"]) - plt.yscale("log") - plt.savefig(os.path.join(output_dir, "loss.png"), bbox_inches="tight") - plt.close("all") - plt.clf() +import os + +import mat73 +import matplotlib.pyplot as plt +import numpy as np +import paddle +import yaml +from paddle import io +from tqdm import tqdm +from utils import mydataset + + +def load_config(config_path): + with open(config_path) as file: + config = yaml.safe_load(file) + return config + + +def get_dataset(data_path, datafile_name, podfile_name): + data = {} + poddata = {} + specfile = mat73.loadmat(os.path.join(data_path, datafile_name)) + data["wv"] = np.transpose(specfile["xframe"]) + data["shortdata"] = specfile["val"] + data["longdata"] = specfile["vallong"] + podfile = mat73.loadmat(os.path.join(data_path, podfile_name)) + + poddata["snapshot_mean"] = paddle.to_tensor( + np.array(podfile["Snapshot_mean"]), dtype=paddle.get_default_dtype() + ) + poddata["V"] = paddle.to_tensor( + np.array(podfile["V"]), dtype=paddle.get_default_dtype() + ) + poddata["Dvbd"] = paddle.to_tensor( + np.array(podfile["DvBoundary"]), dtype=paddle.get_default_dtype() + ) + return data, poddata + + +def get_dataloader(config, data, trainnum, testnum): + trainloader = io.DataLoader( + mydataset.MapDataset(data["shortdata"], data["longdata"], trainnum), + batch_size=config["batch_size"], + shuffle=True, + num_workers=4, + persistent_workers=True, + ) + + testloader = io.DataLoader( + mydataset.MapDataset(data["shortdata"], data["longdata"], testnum), + batch_size=2, + shuffle=False, + ) + + return trainloader, testloader + + +def setup_log_directory(config): + nlayer, nkernel, nchannel, ndownsample, lossnum = ( + config["layers"], + config["kernels"], + config["channels"], + config["downsamples"], + config["lossnum"], + ) + output_dir = f"./{nlayer}layers_{nkernel}kernel_{nchannel}channel_down{ndownsample}_loss{lossnum}" + output_logs_dir = os.path.join(output_dir, "logs") + os.makedirs(output_logs_dir, exist_ok=True) + return output_dir + + +def load_network(path, model, optimizer): + checkpoint = paddle.load(path) + model.load_state_dict(checkpoint["model_state_dict"]) + optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + epoch = checkpoint["epoch"] + loss = checkpoint["loss"] + return model, optimizer, epoch, loss + + +def train_one_epoch( + epoch, model, trainloader, optimizer, criterion, scheduler, config, poddata, loss_ +): + start, end = paddle.device.cuda.Event(enable_timing=True), paddle.device.cuda.Event( + enable_timing=True + ) + start.record() + running_loss = 0.0 + model.train() + totalepoch = int(config["num_epochs"]) + with tqdm(total=len(trainloader), dynamic_ncols=True) as tq: + tq.set_description(f"Train :: Epoch: {epoch}/{totalepoch}") + data_count = 0 + for data, data_label in trainloader: + data_count = data_count + len(data) + tq.update(1) + if config["Randomintensity"] == 1: + randint = paddle.to_tensor( + np.random.uniform(0.8, 1.2, size=(data.shape[0], 1)), + dtype=paddle.get_default_dtype(), + ) + else: + randint = 1 + spec = data * randint + label = data_label * randint + output = model(spec.unsqueeze(1)) + + loss = calloss( + label, output, randint, criterion, poddata, config["lossnum"] + ) + + optimizer.clear_grad() + loss.backward() + optimizer.step() + running_loss += loss.item() * data.shape[0] + + tq.set_postfix_str(s=f"Loss: {loss.item():.4f}") + if config["Scheduler"]: + scheduler.step() + + # ===================log======================== + end.record() # Waits for everything to finish running + loss_.append(running_loss / data_count) + paddle.device.cuda.synchronize() + tq.set_postfix_str(s=f"Epoch Loss: {loss_[-1]:.4f}") + + +def calloss(label, output, randint, criterion, poddata, lossnum): + if lossnum == 0: + loss = criterion(output.squeeze(), label) + elif lossnum == 1: + ouputpca = paddle.matmul( + output.squeeze().subtract(randint * poddata["snapshot_mean"]), + poddata["V"], + ) + labelpca = paddle.matmul( + label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] + ) + loss = 0.9 * criterion(output.squeeze(), label) + 0.1 * criterion( + ouputpca, labelpca + ) + elif lossnum == 2: + ouputpca = paddle.matmul( + output.squeeze().subtract(randint * poddata["snapshot_mean"]), + poddata["V"], + ) + labelpca = paddle.matmul( + label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] + ) + normouputpca = (ouputpca - poddata["Dvbd"][:, 1]) / ( + poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] + ).tile((labelpca.shape[0], 1)) + normlabelpca = (labelpca - poddata["Dvbd"][:, 1]) / ( + poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] + ).tile((labelpca.shape[0], 1)) + loss = 0.9 * criterion(output.squeeze(), label) + 0.1 * criterion( + normouputpca, normlabelpca + ) + elif lossnum == 3: + ouputpca = paddle.matmul( + output.squeeze().subtract(randint * poddata["snapshot_mean"]), + poddata["V"], + ) + labelpca = paddle.matmul( + label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] + ) + loss = criterion(ouputpca, labelpca) + elif lossnum == 4: + ouputpca = paddle.matmul( + output.squeeze().subtract(randint * poddata["snapshot_mean"]), + poddata["V"], + ) + labelpca = paddle.matmul( + label.subtract(randint * poddata["snapshot_mean"]), poddata["V"] + ) + normouputpca = (ouputpca - poddata["Dvbd"][:, 1]) / ( + poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] + ).tile((labelpca.shape[0], 1)) + normlabelpca = (labelpca - poddata["Dvbd"][:, 1]) / ( + poddata["Dvbd"][:, 0] - poddata["Dvbd"][:, 1] + ).tile((labelpca.shape[0], 1)) + loss = criterion(normouputpca, normlabelpca) + return loss + + +def save_model( + epoch, model, optimizer, loss_, evalloss_, testnum, trainnum, output_dir +): + paddle.save( + { + "epoch": epoch, + "model_state_dict": model.state_dict(), + "optimizer_state_dict": optimizer.state_dict(), + "loss": loss_, + "testloss": evalloss_, + "testnum": testnum, + "trainnum": trainnum, + }, + os.path.join(output_dir, "tut.ckpt"), + ) + + +def evaluate_testloss(model, testloader, criterion, evalloss_): + model.eval() + eval_loss = 0.0 + for evaldata, eval_label in testloader: + spec = evaldata + label = eval_label + with paddle.no_grad(): + out = model(spec.unsqueeze(1)) + loss = criterion(out.squeeze(), label) + eval_loss += loss.item() * evaldata.shape[0] + evalloss_.append(eval_loss / len(testloader)) + + +def save_figures(output_dir, epoch, model, testnum, data): + wv = data["wv"] + testshow = [] + testshow_label = [] + + for i in range(len(testnum)): + testshow.append(data["shortdata"][testnum[i]][0][:, 1]) + testshow_label.append(data["longdata"][testnum[i]][0][:, 1]) + testshow = paddle.to_tensor(np.array(testshow), dtype=paddle.get_default_dtype()) + testshow_label = paddle.to_tensor( + np.array(testshow_label), dtype=paddle.get_default_dtype() + ) + + testshow_predict = model(testshow.unsqueeze(1)).numpy() + plt.figure(figsize=(20, 10)) + for i in range(len(testnum)): + plt.subplot(len(testnum), 4, 4 * i + 1) # nrows=2, ncols=1, index=1 + plt.plot(wv, testshow[i].numpy()) + + if i == 0: + plt.title("input") + + if i == len(testnum) - 1: + plt.xticks(visible=True) + plt.xlabel("wavelength (nm)") + else: + plt.xticks(visible=False) + + plt.subplot(len(testnum), 4, 4 * i + 2) # nrows=2, ncols=1, index=2 + plt.plot(wv, testshow_predict[i].squeeze()) + if i == 0: + plt.title("output") + if i == len(testnum) - 1: + plt.xlabel("wavelength (nm)") + plt.xticks(visible=True) + else: + plt.xticks(visible=False) + + plt.subplot(len(testnum), 4, 4 * i + 3) # nrows=2, ncols=1, index=2 + plt.plot(wv, testshow_label[i].numpy()) + + if i == 0: + plt.title("real output") + if i == len(testnum) - 1: + plt.xlabel("wavelength (nm)") + plt.xticks(visible=True) + else: + plt.xticks(visible=False) + + plt.subplot(len(testnum), 4, 4 * i + 4) # nrows=2, ncols=1, index=2 + plt.plot( + wv, + (testshow_predict[i].squeeze() - testshow_label[i].numpy()) + / testshow_label[i].numpy(), + ) + plt.ylim([-0.2, 0.2]) + if i == 0: + plt.title("Normalized difference") + if i == len(testnum) - 1: + plt.xlabel("wavelength (nm)") + plt.xticks(visible=True) + else: + plt.xticks(visible=False) + + plt.tight_layout() + plt.savefig(os.path.join(output_dir, f"image_{epoch}.png"), bbox_inches="tight") + plt.close("all") + plt.clf() + + +def save_lossfigure(output_dir, loss_, evalloss_): + plt.figure(2) + plt.plot(range(1, len(loss_) + 1), loss_) + plt.title("Loss") + plt.xlabel("epoch") + plt.plot(range(1, len(evalloss_) * 10 + 1, 10), evalloss_) + plt.legend(["train loss", "test loss"]) + plt.yscale("log") + plt.savefig(os.path.join(output_dir, "loss.png"), bbox_inches="tight") + plt.close("all") + plt.clf() diff --git a/jointContribution/Deep-Spatio-Temporal/getNRELdata.py b/jointContribution/Deep-Spatio-Temporal/getNRELdata.py index 7e7c036de8..f134cf8023 100644 --- a/jointContribution/Deep-Spatio-Temporal/getNRELdata.py +++ b/jointContribution/Deep-Spatio-Temporal/getNRELdata.py @@ -1,51 +1,51 @@ -""" - Get wind speed data from NREL WIND - https://www.nrel.gov/grid/wind-toolkit.html - Select one wind farm with 100 turbines from Wyoming -""" -import h5pyd -import numpy as np -import pandas as pd - -f = h5pyd.File("/nrel/wtk/conus/wtk_conus_2012.h5", "r") -meta = pd.DataFrame(f["meta"][()]) - -lon = -105.243988 -lat = 41.868515 -df = meta[ - (meta["longitude"] < lon + 0.25) - & (meta["longitude"] >= lon) - & (meta["latitude"] <= lat + 0.03) - & (meta["latitude"] > lat - 0.18) -] - -df = df.drop( - [ - 864121, - 868456, - 869542, - 870629, - 871718, - 872807, - 873897, - 876088, - 866300, - 867383, - 868467, - 869553, - 870640, - ] -) -df.to_csv("./data/wind_speed_meta.csv") -gid_list = list(df.index) -wind_speed_list = [] -for gid in gid_list: - wind_speed_list.append(f["windspeed_100m"][:, gid]) - -time_array = f["time_index"][()] -wind_speed_array = np.vstack(wind_speed_list) - -wind_speed_df = pd.DataFrame(wind_speed_array, index=df.index, columns=time_array) -wind_speed_df = wind_speed_df / f["windspeed_100m"].attrs["scale_factor"] - -wind_speed_df.to_csv("./data/wind_speed.csv") +""" + Get wind speed data from NREL WIND + https://www.nrel.gov/grid/wind-toolkit.html + Select one wind farm with 100 turbines from Wyoming +""" +import h5pyd +import numpy as np +import pandas as pd + +f = h5pyd.File("/nrel/wtk/conus/wtk_conus_2012.h5", "r") +meta = pd.DataFrame(f["meta"][()]) + +lon = -105.243988 +lat = 41.868515 +df = meta[ + (meta["longitude"] < lon + 0.25) + & (meta["longitude"] >= lon) + & (meta["latitude"] <= lat + 0.03) + & (meta["latitude"] > lat - 0.18) +] + +df = df.drop( + [ + 864121, + 868456, + 869542, + 870629, + 871718, + 872807, + 873897, + 876088, + 866300, + 867383, + 868467, + 869553, + 870640, + ] +) +df.to_csv("./data/wind_speed_meta.csv") +gid_list = list(df.index) +wind_speed_list = [] +for gid in gid_list: + wind_speed_list.append(f["windspeed_100m"][:, gid]) + +time_array = f["time_index"][()] +wind_speed_array = np.vstack(wind_speed_list) + +wind_speed_df = pd.DataFrame(wind_speed_array, index=df.index, columns=time_array) +wind_speed_df = wind_speed_df / f["windspeed_100m"].attrs["scale_factor"] + +wind_speed_df.to_csv("./data/wind_speed.csv") diff --git a/jointContribution/Deep-Spatio-Temporal/src/datamgr.py b/jointContribution/Deep-Spatio-Temporal/src/datamgr.py index 6fb0750b47..6dba36f8d5 100644 --- a/jointContribution/Deep-Spatio-Temporal/src/datamgr.py +++ b/jointContribution/Deep-Spatio-Temporal/src/datamgr.py @@ -1,308 +1,308 @@ -import numpy as np -import paddle -import pandas as pd -from paddle import io -from sklearn import neighbors -from sklearn import preprocessing - - -class CRSData: - def __init__( - self, - file_path="../data/Wind Spatio-Temporal Dataset2.csv", - speed_scale=1.0, - speed_lwrbd=0.0, - speed_uprbd=25.0, - power_scale=1.0, - power_lwrbd=0.0, - power_uprbd=1.0, - K=5, - ): - self.csv_data = pd.read_csv(file_path, index_col=0, low_memory=False) - self.spatio_info = self.csv_data.iloc[0:2, range(0, 200)].transpose() - self.norm_spatio = 10.0 * preprocessing.scale(self.spatio_info, with_std=False) - self.K = K - - self.speed_scale = speed_scale - self.speed_lwrbd = speed_lwrbd - self.speed_uprbd = speed_uprbd - - self.power_scale = power_scale - self.power_lwrbd = power_lwrbd - self.power_uprbd = power_uprbd - - temporal_data = self.csv_data.iloc[4:].astype("float").transpose() - temporal_data.index = self.csv_data.iloc[3] - self.temporal_data = self.correct_ts_index(temporal_data) - self.temporal_data.columns = pd.to_datetime(self.temporal_data.columns) - - self.turbine_speed = self.temporal_data.iloc[range(0, 400, 2)] - self.turbine_power = self.temporal_data.iloc[range(1, 400, 2)] - self.mast_speed = self.temporal_data.iloc[[400, 402, 404]] - self.mast_direction = self.temporal_data.iloc[[401, 403, 405]] - - self.scale_data( - self.speed_scale, - self.speed_lwrbd, - self.speed_uprbd, - self.power_scale, - self.power_lwrbd, - self.power_uprbd, - ) - - self._get_neighbors() - - def correct_ts_index(self, original_df): - corrected_time_index = pd.date_range( - original_df.columns[0], original_df.columns[-1], freq="1H" - ) - # expand one hour - corrected_time_index = corrected_time_index.union( - [corrected_time_index[-1] + pd.Timedelta(1, unit="H")] - ) - corrected_data = original_df.copy() - corrected_data.columns = corrected_time_index - return corrected_data - - def scale_data( - self, - speed_scale=1.0, - speed_lwrbd=0.0, - speed_uprbd=25.0, - power_scale=1.0, - power_lwrbd=0.0, - power_uprbd=1.0, - ): - """Scale Data""" - self.norm_speed = ( - 2 - * speed_scale - * (self.turbine_speed - speed_lwrbd) - / (speed_uprbd - speed_lwrbd) - - speed_scale - ) - self.norm_power = ( - 2 - * power_scale - * (self.turbine_power - power_lwrbd) - / (power_uprbd - power_lwrbd) - - power_scale - ) - - def _get_neighbors(self): - nbrs = neighbors.NearestNeighbors(n_neighbors=self.K, algorithm="auto").fit( - self.norm_spatio - ) - self.kNN_neighbors = nbrs.kneighbors(self.norm_spatio)[1] - - speed_mat = self.norm_speed.to_numpy() - speed_diff = speed_mat[:, range(1, 30 * 24)] - speed_mat[:, range(30 * 24 - 1)] - norm_speed_diff = preprocessing.normalize(speed_diff) - - speed_diff_sim = np.matmul(norm_speed_diff, norm_speed_diff.transpose()) - - self.speed_diff_neighbors = (-speed_diff_sim).argsort()[:, : self.K] - - -class DataMgr(CRSData): - def __init__( - self, - file_path="../../data/Wind Spatio-Temporal Dataset2.csv", - train_len=60 * 24, - val_len=30 * 24, - time_len=365 * 24, - ENC_LEN=48, - DEC_LEN=12, - K=5, - similarity="spatio", - ): - super().__init__(file_path=file_path, K=K) - self.ids = ( - paddle.arange(200).astype(paddle.get_default_dtype()).reshape((200, 1)) - ) - if K <= 1: - self.K = 1 - self.neighbors = paddle.arange(200).reshape((200, 1)) - elif similarity == "spatio": - self.K = K - self.neighbors = self.kNN_neighbors - elif similarity == "speed diff": - self.K = K - self.neighbors = self.speed_diff_neighbors - else: - raise NameError("K or similarity not correctly defined") - - self.speed_tensor = paddle.to_tensor( - self.norm_speed.to_numpy(), dtype=paddle.get_default_dtype() - ).unsqueeze(-1) - self.power_tensor = paddle.to_tensor( - self.norm_power.to_numpy(), dtype=paddle.get_default_dtype() - ).unsqueeze(-1) - - self.time_features = ( - paddle.to_tensor( - self.norm_speed.columns.hour.values, dtype=paddle.get_default_dtype() - ) - .tile(repeat_times=(200, 1)) - .unsqueeze(-1) - ) - self.spatio_tensor = ( - paddle.to_tensor(self.norm_spatio, dtype=paddle.get_default_dtype()) - .unsqueeze(1) - .tile(repeat_times=(1, 8760, 1)) - ) - - knn_speed_list = [] - for idx in self.neighbors: - t_select = paddle.index_select( - self.speed_tensor, paddle.to_tensor(idx), axis=0 - ) - ndim = t_select.ndim - perm = list(range(ndim)) - perm[0] = 2 - perm[2] = 0 - knn_speed_list.append(t_select.transpose(perm=perm)) - - self.data = paddle.concat(knn_speed_list, axis=0) - - self.data = paddle.concat( - (self.data, self.power_tensor, self.time_features, self.spatio_tensor), - axis=2, - ) - - self.train_len = train_len - self.time_len = time_len - - self.enc_len = ENC_LEN - self.dec_len = DEC_LEN - self.total_len = ENC_LEN + DEC_LEN - - self.train_data = self.data[:, :train_len, :] - self.val_data = self.data[:, train_len : (train_len + val_len), :] - self.test_data = self.data[:, (train_len + val_len) :, :] - - -class wpDataset(io.Dataset): - def __init__(self, data, ENC_LEN=48, DEC_LEN=12, K=5): - self.data = data - self.enc_len = ENC_LEN - self.total_len = ENC_LEN + DEC_LEN - - def __getitem__(self, index): - tim = index // 200 - turbine = index % 200 - one_point = self.data[turbine, tim : (tim + self.total_len), :] - x = paddle.index_select( - one_point[: self.enc_len, :], - paddle.to_tensor([*range(5), *range(6, 9)]), - axis=1, - ) - y = one_point[(self.enc_len - 1) : self.total_len, :] - - y = y[:, 5:9] - return paddle.to_tensor(turbine, dtype=paddle.int64), x, y - - def __len__(self): - return self.data.shape[0] * (self.data.shape[1] - self.total_len) - - -class NRELDataKneighbors: - def __init__( - self, - folder_path="../data/", - file_path="1h_wyoming_wind_speed_100m.csv", - meta_path="1h_wyoming_meta.csv", - K=9, - ): - self.K = K - - self.df_wind_speed = pd.read_csv(folder_path + file_path, index_col=0) - self.df_meta = pd.read_csv(folder_path + meta_path, index_col=0) - self.wind_speed_tensor = paddle.to_tensor( - self.df_wind_speed.to_numpy(), dtype=paddle.get_default_dtype() - ) - self.time_series = pd.to_datetime( - [x.split("'")[1] for x in self.df_wind_speed.columns] - ) - self._scale_data() - self._get_neighbors() - - def _scale_data(self, lwr=0, upr=40, scale=1.0): - self.wind_speed_tensor = ( - 2 * scale * (self.wind_speed_tensor - lwr) / upr - scale - ) - - def _get_neighbors(self): - nbrs = neighbors.NearestNeighbors(n_neighbors=9, algorithm="auto").fit( - self.df_meta.iloc[:, :2] - ) - self.kNN_neighbors = nbrs.kneighbors(self.df_meta.iloc[:, :2])[1] - self.norm_spatio = 10.0 * preprocessing.scale( - self.df_meta.iloc[:, :2], with_std=False - ) - - -class NRELDataMgr(NRELDataKneighbors): - def __init__( - self, - folder_path="../data/", - file_path="1h_wyoming_wind_speed_100m.csv", - meta_path="1h_wyoming_meta.csv", - train_len=6 * 30 * 24, - val_len=2 * 30 * 24, - K=9, - ): - super().__init__( - folder_path=folder_path, file_path=file_path, meta_path=meta_path, K=K - ) - self.neighbors = self.kNN_neighbors - self.wind_speed_tensor = self.wind_speed_tensor.unsqueeze(-1) - self.spatio_tensor = paddle.to_tensor( - self.norm_spatio, dtype=paddle.get_default_dtype() - ).unsqueeze(1) - self.spatio_tensor = self.spatio_tensor.tile(repeat_times=(1, 8784, 1)) - - knn_speed_list = [] - for idx in self.neighbors: - select1 = paddle.index_select( - self.wind_speed_tensor, paddle.to_tensor(idx), axis=0 - ) - knn_speed_list.append(select1.transpose((2, 1, 0))) - self.data = paddle.concat(knn_speed_list, axis=0) - - self.data = paddle.concat((self.data, self.spatio_tensor), axis=2) - - self.hour = paddle.to_tensor( - self.time_series.hour.values, dtype=paddle.get_default_dtype() - ) - self.hour = self.hour.unsqueeze(0) - self.hour = self.hour.unsqueeze(-1) - self.hour = self.hour.tile(repeat_times=(100, 1, 1)) - - self.data = paddle.concat((self.data, self.hour), axis=2) - - self.train_data = self.data[:, :train_len, :] - self.val_data = self.data[:, train_len : (train_len + val_len), :] - self.test_data = self.data[:, (train_len + val_len) :, :] - - -class NRELwpDataset(io.Dataset): - def __init__(self, data, ENC_LEN=48, DEC_LEN=12, K=9): - self.data = data - self.enc_len = ENC_LEN - self.total_len = ENC_LEN + DEC_LEN - self.K = K - - def __getitem__(self, index): - tim = index // 100 - turbine = index % 100 - one_point = self.data[turbine, tim : (tim + self.total_len), :] - x = one_point[: self.enc_len, :] - y = one_point[(self.enc_len - 1) : self.total_len, :] - y = paddle.index_select( - y, paddle.to_tensor([0, self.K, self.K + 1, self.K + 2]), axis=1 - ) - return paddle.to_tensor(turbine, dtype=paddle.int64), x, y - - def __len__(self): - return self.data.shape[0] * (self.data.shape[1] - self.total_len) +import numpy as np +import paddle +import pandas as pd +from paddle import io +from sklearn import neighbors +from sklearn import preprocessing + + +class CRSData: + def __init__( + self, + file_path="../data/Wind Spatio-Temporal Dataset2.csv", + speed_scale=1.0, + speed_lwrbd=0.0, + speed_uprbd=25.0, + power_scale=1.0, + power_lwrbd=0.0, + power_uprbd=1.0, + K=5, + ): + self.csv_data = pd.read_csv(file_path, index_col=0, low_memory=False) + self.spatio_info = self.csv_data.iloc[0:2, range(0, 200)].transpose() + self.norm_spatio = 10.0 * preprocessing.scale(self.spatio_info, with_std=False) + self.K = K + + self.speed_scale = speed_scale + self.speed_lwrbd = speed_lwrbd + self.speed_uprbd = speed_uprbd + + self.power_scale = power_scale + self.power_lwrbd = power_lwrbd + self.power_uprbd = power_uprbd + + temporal_data = self.csv_data.iloc[4:].astype("float").transpose() + temporal_data.index = self.csv_data.iloc[3] + self.temporal_data = self.correct_ts_index(temporal_data) + self.temporal_data.columns = pd.to_datetime(self.temporal_data.columns) + + self.turbine_speed = self.temporal_data.iloc[range(0, 400, 2)] + self.turbine_power = self.temporal_data.iloc[range(1, 400, 2)] + self.mast_speed = self.temporal_data.iloc[[400, 402, 404]] + self.mast_direction = self.temporal_data.iloc[[401, 403, 405]] + + self.scale_data( + self.speed_scale, + self.speed_lwrbd, + self.speed_uprbd, + self.power_scale, + self.power_lwrbd, + self.power_uprbd, + ) + + self._get_neighbors() + + def correct_ts_index(self, original_df): + corrected_time_index = pd.date_range( + original_df.columns[0], original_df.columns[-1], freq="1H" + ) + # expand one hour + corrected_time_index = corrected_time_index.union( + [corrected_time_index[-1] + pd.Timedelta(1, unit="H")] + ) + corrected_data = original_df.copy() + corrected_data.columns = corrected_time_index + return corrected_data + + def scale_data( + self, + speed_scale=1.0, + speed_lwrbd=0.0, + speed_uprbd=25.0, + power_scale=1.0, + power_lwrbd=0.0, + power_uprbd=1.0, + ): + """Scale Data""" + self.norm_speed = ( + 2 + * speed_scale + * (self.turbine_speed - speed_lwrbd) + / (speed_uprbd - speed_lwrbd) + - speed_scale + ) + self.norm_power = ( + 2 + * power_scale + * (self.turbine_power - power_lwrbd) + / (power_uprbd - power_lwrbd) + - power_scale + ) + + def _get_neighbors(self): + nbrs = neighbors.NearestNeighbors(n_neighbors=self.K, algorithm="auto").fit( + self.norm_spatio + ) + self.kNN_neighbors = nbrs.kneighbors(self.norm_spatio)[1] + + speed_mat = self.norm_speed.to_numpy() + speed_diff = speed_mat[:, range(1, 30 * 24)] - speed_mat[:, range(30 * 24 - 1)] + norm_speed_diff = preprocessing.normalize(speed_diff) + + speed_diff_sim = np.matmul(norm_speed_diff, norm_speed_diff.transpose()) + + self.speed_diff_neighbors = (-speed_diff_sim).argsort()[:, : self.K] + + +class DataMgr(CRSData): + def __init__( + self, + file_path="../../data/Wind Spatio-Temporal Dataset2.csv", + train_len=60 * 24, + val_len=30 * 24, + time_len=365 * 24, + ENC_LEN=48, + DEC_LEN=12, + K=5, + similarity="spatio", + ): + super().__init__(file_path=file_path, K=K) + self.ids = ( + paddle.arange(200).astype(paddle.get_default_dtype()).reshape((200, 1)) + ) + if K <= 1: + self.K = 1 + self.neighbors = paddle.arange(200).reshape((200, 1)) + elif similarity == "spatio": + self.K = K + self.neighbors = self.kNN_neighbors + elif similarity == "speed diff": + self.K = K + self.neighbors = self.speed_diff_neighbors + else: + raise NameError("K or similarity not correctly defined") + + self.speed_tensor = paddle.to_tensor( + self.norm_speed.to_numpy(), dtype=paddle.get_default_dtype() + ).unsqueeze(-1) + self.power_tensor = paddle.to_tensor( + self.norm_power.to_numpy(), dtype=paddle.get_default_dtype() + ).unsqueeze(-1) + + self.time_features = ( + paddle.to_tensor( + self.norm_speed.columns.hour.values, dtype=paddle.get_default_dtype() + ) + .tile(repeat_times=(200, 1)) + .unsqueeze(-1) + ) + self.spatio_tensor = ( + paddle.to_tensor(self.norm_spatio, dtype=paddle.get_default_dtype()) + .unsqueeze(1) + .tile(repeat_times=(1, 8760, 1)) + ) + + knn_speed_list = [] + for idx in self.neighbors: + t_select = paddle.index_select( + self.speed_tensor, paddle.to_tensor(idx), axis=0 + ) + ndim = t_select.ndim + perm = list(range(ndim)) + perm[0] = 2 + perm[2] = 0 + knn_speed_list.append(t_select.transpose(perm=perm)) + + self.data = paddle.concat(knn_speed_list, axis=0) + + self.data = paddle.concat( + (self.data, self.power_tensor, self.time_features, self.spatio_tensor), + axis=2, + ) + + self.train_len = train_len + self.time_len = time_len + + self.enc_len = ENC_LEN + self.dec_len = DEC_LEN + self.total_len = ENC_LEN + DEC_LEN + + self.train_data = self.data[:, :train_len, :] + self.val_data = self.data[:, train_len : (train_len + val_len), :] + self.test_data = self.data[:, (train_len + val_len) :, :] + + +class wpDataset(io.Dataset): + def __init__(self, data, ENC_LEN=48, DEC_LEN=12, K=5): + self.data = data + self.enc_len = ENC_LEN + self.total_len = ENC_LEN + DEC_LEN + + def __getitem__(self, index): + tim = index // 200 + turbine = index % 200 + one_point = self.data[turbine, tim : (tim + self.total_len), :] + x = paddle.index_select( + one_point[: self.enc_len, :], + paddle.to_tensor([*range(5), *range(6, 9)]), + axis=1, + ) + y = one_point[(self.enc_len - 1) : self.total_len, :] + + y = y[:, 5:9] + return paddle.to_tensor(turbine, dtype=paddle.int64), x, y + + def __len__(self): + return self.data.shape[0] * (self.data.shape[1] - self.total_len) + + +class NRELDataKneighbors: + def __init__( + self, + folder_path="../data/", + file_path="1h_wyoming_wind_speed_100m.csv", + meta_path="1h_wyoming_meta.csv", + K=9, + ): + self.K = K + + self.df_wind_speed = pd.read_csv(folder_path + file_path, index_col=0) + self.df_meta = pd.read_csv(folder_path + meta_path, index_col=0) + self.wind_speed_tensor = paddle.to_tensor( + self.df_wind_speed.to_numpy(), dtype=paddle.get_default_dtype() + ) + self.time_series = pd.to_datetime( + [x.split("'")[1] for x in self.df_wind_speed.columns] + ) + self._scale_data() + self._get_neighbors() + + def _scale_data(self, lwr=0, upr=40, scale=1.0): + self.wind_speed_tensor = ( + 2 * scale * (self.wind_speed_tensor - lwr) / upr - scale + ) + + def _get_neighbors(self): + nbrs = neighbors.NearestNeighbors(n_neighbors=9, algorithm="auto").fit( + self.df_meta.iloc[:, :2] + ) + self.kNN_neighbors = nbrs.kneighbors(self.df_meta.iloc[:, :2])[1] + self.norm_spatio = 10.0 * preprocessing.scale( + self.df_meta.iloc[:, :2], with_std=False + ) + + +class NRELDataMgr(NRELDataKneighbors): + def __init__( + self, + folder_path="../data/", + file_path="1h_wyoming_wind_speed_100m.csv", + meta_path="1h_wyoming_meta.csv", + train_len=6 * 30 * 24, + val_len=2 * 30 * 24, + K=9, + ): + super().__init__( + folder_path=folder_path, file_path=file_path, meta_path=meta_path, K=K + ) + self.neighbors = self.kNN_neighbors + self.wind_speed_tensor = self.wind_speed_tensor.unsqueeze(-1) + self.spatio_tensor = paddle.to_tensor( + self.norm_spatio, dtype=paddle.get_default_dtype() + ).unsqueeze(1) + self.spatio_tensor = self.spatio_tensor.tile(repeat_times=(1, 8784, 1)) + + knn_speed_list = [] + for idx in self.neighbors: + select1 = paddle.index_select( + self.wind_speed_tensor, paddle.to_tensor(idx), axis=0 + ) + knn_speed_list.append(select1.transpose((2, 1, 0))) + self.data = paddle.concat(knn_speed_list, axis=0) + + self.data = paddle.concat((self.data, self.spatio_tensor), axis=2) + + self.hour = paddle.to_tensor( + self.time_series.hour.values, dtype=paddle.get_default_dtype() + ) + self.hour = self.hour.unsqueeze(0) + self.hour = self.hour.unsqueeze(-1) + self.hour = self.hour.tile(repeat_times=(100, 1, 1)) + + self.data = paddle.concat((self.data, self.hour), axis=2) + + self.train_data = self.data[:, :train_len, :] + self.val_data = self.data[:, train_len : (train_len + val_len), :] + self.test_data = self.data[:, (train_len + val_len) :, :] + + +class NRELwpDataset(io.Dataset): + def __init__(self, data, ENC_LEN=48, DEC_LEN=12, K=9): + self.data = data + self.enc_len = ENC_LEN + self.total_len = ENC_LEN + DEC_LEN + self.K = K + + def __getitem__(self, index): + tim = index // 100 + turbine = index % 100 + one_point = self.data[turbine, tim : (tim + self.total_len), :] + x = one_point[: self.enc_len, :] + y = one_point[(self.enc_len - 1) : self.total_len, :] + y = paddle.index_select( + y, paddle.to_tensor([0, self.K, self.K + 1, self.K + 2]), axis=1 + ) + return paddle.to_tensor(turbine, dtype=paddle.int64), x, y + + def __len__(self): + return self.data.shape[0] * (self.data.shape[1] - self.total_len) diff --git a/jointContribution/Deep-Spatio-Temporal/src/model.py b/jointContribution/Deep-Spatio-Temporal/src/model.py index 67da63cbaf..d8cc24cd52 100644 --- a/jointContribution/Deep-Spatio-Temporal/src/model.py +++ b/jointContribution/Deep-Spatio-Temporal/src/model.py @@ -1,335 +1,335 @@ -import paddle -import paddle.nn.functional as F -from paddle import nn -from src import utils - - -class Embedding(nn.Layer): - def __init__(self, embd_dim=5, num_pts=200): - super().__init__() - self.num_pts = num_pts - self.embd_dim = embd_dim - self.embedding = nn.Embedding( - num_pts, embd_dim, weight_attr=utils.get_weight_attr() - ) - - def forward(self, batch_ids): - pts_embedded = self.embedding(batch_ids) - return pts_embedded - - -class Encoder(nn.Layer): - def __init__( - self, - enc_dim=64, - dec_dim=32, - input_dim=8, - embedding_layer=None, - GRU_LSTM="GRU", - is_bidirectional=True, - ): - super().__init__() - self.enc_dim = enc_dim - self.dec_dim = dec_dim - - self.GRU_LSTM = GRU_LSTM - self.is_bidirectional = is_bidirectional - - self.embedding_layer = embedding_layer - if embedding_layer is not None: - embd_dim = embedding_layer.embd_dim - else: - embd_dim = 0 - - self.input_dim = input_dim + embd_dim - - if is_bidirectional: - direction = "bidirectional" - else: - direction = "forward" - - if GRU_LSTM == "GRU": - self.rnn = nn.GRU( - self.input_dim, - self.enc_dim, - direction=direction, - time_major=True, - weight_ih_attr=utils.get_weight_attr(), - weight_hh_attr=utils.get_weight_attr(), - bias_ih_attr=utils.get_bias_attr(), - bias_hh_attr=utils.get_bias_attr(), - ) - if GRU_LSTM == "LSTM": - self.rnn = nn.LSTM( - self.input_dim, - self.enc_dim, - direction=direction, - time_major=True, - weight_ih_attr=utils.get_weight_attr(), - weight_hh_attr=utils.get_weight_attr(), - bias_ih_attr=utils.get_bias_attr(), - bias_hh_attr=utils.get_bias_attr(), - ) - - if self.is_bidirectional: - self.fc = nn.Linear( - enc_dim * 2, - dec_dim, - weight_attr=utils.get_weight_attr(), - bias_attr=utils.get_bias_attr(), - ) - else: - self.fc = nn.Linear( - enc_dim, - dec_dim, - weight_attr=utils.get_weight_attr(), - bias_attr=utils.get_bias_attr(), - ) - - def forward(self, one_batch): - """The forward function of the encoder. - - Args: - one_batch (Tensor): The input batch. - Returns: - Tuple[Tensor, Tensor]: The encoder outputs and the hidden variable returned by the decoder. - """ - batch_ids = one_batch[0] - - rnn_input = one_batch[1].transpose(perm=[1, 0, 2]) - rnn_input = paddle.nan_to_num(rnn_input) - - enc_len = rnn_input.shape[0] - - if self.embedding_layer is not None: - pts_embedded = ( - self.embedding_layer(batch_ids) - .unsqueeze(0) - .tile(repeat_times=(enc_len, 1, 1)) - ) - rnn_input = paddle.concat((rnn_input, pts_embedded), axis=2) - - if self.GRU_LSTM == "GRU": - outputs, hidden = self.rnn(rnn_input) - elif self.GRU_LSTM == "LSTM": - outputs, (hidden, _) = self.rnn(rnn_input) - - if self.is_bidirectional: - hidden = paddle.tanh( - self.fc(paddle.concat((hidden[-2, :, :], hidden[-1, :, :]), axis=-1)) - ) - else: - hidden = paddle.tanh(self.fc(hidden[-1, :, :])) - return outputs, hidden - - -class Decoder(nn.Layer): - def __init__( - self, - enc_dim=64, - dec_dim=32, - dec_input_dim=4, - enc_len=48, - embedding_layer=None, - attention_ind=False, - GRU_LSTM="GRU", - is_bidirectional=True, - ): - super().__init__() - self.enc_dim = enc_dim - self.dec_dim = dec_dim - self.attention_ind = attention_ind - - self.GRU_LSTM = GRU_LSTM - self.is_bidirectional = is_bidirectional - - self.embedding_layer = embedding_layer - if embedding_layer is not None: - embd_dim = embedding_layer.embd_dim - else: - embd_dim = 0 - self.dec_input_dim = dec_input_dim + embd_dim - if GRU_LSTM == "GRU": - self.rnn = nn.GRU( - self.dec_input_dim, - dec_dim, - time_major=True, - weight_ih_attr=utils.get_weight_attr(), - weight_hh_attr=utils.get_weight_attr(), - bias_ih_attr=utils.get_bias_attr(), - bias_hh_attr=utils.get_bias_attr(), - ) - if GRU_LSTM == "LSTM": - self.rnn = nn.LSTM( - self.dec_input_dim, - dec_dim, - time_major=True, - weight_ih_attr=utils.get_weight_attr(), - weight_hh_attr=utils.get_weight_attr(), - bias_ih_attr=utils.get_bias_attr(), - bias_hh_attr=utils.get_bias_attr(), - ) - self.fc = nn.Linear( - dec_dim, - 1, - weight_attr=utils.get_weight_attr(), - bias_attr=utils.get_bias_attr(), - ) - - if self.attention_ind: - self.attn = nn.Linear(self.dec_input_dim + dec_dim, enc_len) - if self.is_bidirectional: - self.attn_combined = nn.Linear( - self.dec_input_dim + 2 * enc_dim, - self.dec_input_dim, - weight_attr=utils.get_weight_attr(), - bias_attr=utils.get_bias_attr(), - ) - else: - self.attn_combined = nn.Linear( - self.dec_input_dim + enc_dim, - self.dec_input_dim, - weight_attr=utils.get_weight_attr(), - bias_attr=utils.get_bias_attr(), - ) - - def forward(self, one_batch, encoder_outputs, hidden): - """The forward function of the decoder. - - Args: - one_batch (Tensor): The input batch. - encoder_outputs (Tensor): The encoder outputs. - hidden (Tensor): The hidden variable returned by the decoder. - Returns: - Tensor: The predicted value. - """ - # change size to 1 * size * dim - batch_ids = one_batch[0] - y_ = one_batch[2].transpose(perm=[1, 0, 2]) - encoder_outputs = encoder_outputs.transpose(perm=[1, 0, 2]) - - rnn_input = paddle.index_select(y_, paddle.to_tensor([0])) - rnn_input = paddle.nan_to_num(rnn_input) - - cell_state = paddle.zeros_like(hidden).unsqueeze(0) - - output_list = [] - for i in range(1, 13): - if self.embedding_layer is not None: - pts_embedded = self.embedding_layer(batch_ids).unsqueeze(0) - rnn_input = paddle.concat((rnn_input, pts_embedded), axis=2) - - if self.attention_ind: - attn_weights = F.softmax( - self.attn(paddle.concat((rnn_input[0, :, :], hidden), axis=1)) - ) - - attn_applied = paddle.bmm(attn_weights.unsqueeze(1), encoder_outputs) - attn_applied = attn_applied.squeeze(1) - - rnn_input = rnn_input.squeeze(0) - rnn_input = self.attn_combined( - paddle.concat((attn_applied, rnn_input), axis=1) - ) - rnn_input = rnn_input.unsqueeze(0) - - hidden = hidden.unsqueeze(0) - if self.GRU_LSTM == "GRU": - output, hidden = self.rnn(rnn_input, hidden) - elif self.GRU_LSTM == "LSTM": - output, (hidden, cell_state) = self.rnn(rnn_input, (hidden, cell_state)) - - assert (output == hidden).all() - # 1 * size * dec_dim - - output = output.squeeze(0) - hidden = hidden.squeeze(0) - output = self.fc(output) # output with size*1 prediction - - rnn_input = y_[i, :, 1:] - rnn_input = paddle.concat((rnn_input, output), axis=1) - rnn_input = rnn_input.unsqueeze(0) - - output_list.append(output) - - t_tmp = paddle.concat(output_list, axis=1) - perm = list(range(t_tmp.ndim)) - perm[0] = 1 - perm[1] = 0 - y_pred = t_tmp.transpose(perm=perm) - - return y_pred - - -class Seq2Seq(nn.Layer): - def __init__( - self, - enc_dim=64, - dec_dim=32, - input_dim=4, - K=5, - enc_len=48, - embedding_dim=5, - attention_ind=False, - GRU_LSTM="GRU", - is_bidirectional=False, - n_turbines=200, - device="cpu", - ): - super().__init__() - self.enc_dim = enc_dim - self.dec_dim = dec_dim - self.enc_input_dim = input_dim + K - 1 - self.dec_input_dim = input_dim - self.device = device - - if embedding_dim > 0: - self.embedding_layer = Embedding(embedding_dim, num_pts=n_turbines) - else: - self.embedding_layer = None - - self.attention_ind = attention_ind - - if GRU_LSTM == "GRU": - self.encoder = Encoder( - enc_dim, - dec_dim, - self.enc_input_dim, - self.embedding_layer, - GRU_LSTM="GRU", - is_bidirectional=is_bidirectional, - ) - self.decoder = Decoder( - enc_dim, - dec_dim, - self.dec_input_dim, - enc_len, - self.embedding_layer, - self.attention_ind, - GRU_LSTM="GRU", - is_bidirectional=is_bidirectional, - ) - if GRU_LSTM == "LSTM": - self.encoder = Encoder( - enc_dim, - dec_dim, - self.enc_input_dim, - self.embedding_layer, - GRU_LSTM="LSTM", - is_bidirectional=is_bidirectional, - ) - self.decoder = Decoder( - enc_dim, - dec_dim, - self.dec_input_dim, - enc_len, - self.embedding_layer, - self.attention_ind, - GRU_LSTM="LSTM", - is_bidirectional=is_bidirectional, - ) - - def forward(self, one_batch): - encoder_outputs, hidden = self.encoder(one_batch) - y_pred = self.decoder(one_batch, encoder_outputs, hidden) - return y_pred +import paddle +import paddle.nn.functional as F +from paddle import nn +from src import utils + + +class Embedding(nn.Layer): + def __init__(self, embd_dim=5, num_pts=200): + super().__init__() + self.num_pts = num_pts + self.embd_dim = embd_dim + self.embedding = nn.Embedding( + num_pts, embd_dim, weight_attr=utils.get_weight_attr() + ) + + def forward(self, batch_ids): + pts_embedded = self.embedding(batch_ids) + return pts_embedded + + +class Encoder(nn.Layer): + def __init__( + self, + enc_dim=64, + dec_dim=32, + input_dim=8, + embedding_layer=None, + GRU_LSTM="GRU", + is_bidirectional=True, + ): + super().__init__() + self.enc_dim = enc_dim + self.dec_dim = dec_dim + + self.GRU_LSTM = GRU_LSTM + self.is_bidirectional = is_bidirectional + + self.embedding_layer = embedding_layer + if embedding_layer is not None: + embd_dim = embedding_layer.embd_dim + else: + embd_dim = 0 + + self.input_dim = input_dim + embd_dim + + if is_bidirectional: + direction = "bidirectional" + else: + direction = "forward" + + if GRU_LSTM == "GRU": + self.rnn = nn.GRU( + self.input_dim, + self.enc_dim, + direction=direction, + time_major=True, + weight_ih_attr=utils.get_weight_attr(), + weight_hh_attr=utils.get_weight_attr(), + bias_ih_attr=utils.get_bias_attr(), + bias_hh_attr=utils.get_bias_attr(), + ) + if GRU_LSTM == "LSTM": + self.rnn = nn.LSTM( + self.input_dim, + self.enc_dim, + direction=direction, + time_major=True, + weight_ih_attr=utils.get_weight_attr(), + weight_hh_attr=utils.get_weight_attr(), + bias_ih_attr=utils.get_bias_attr(), + bias_hh_attr=utils.get_bias_attr(), + ) + + if self.is_bidirectional: + self.fc = nn.Linear( + enc_dim * 2, + dec_dim, + weight_attr=utils.get_weight_attr(), + bias_attr=utils.get_bias_attr(), + ) + else: + self.fc = nn.Linear( + enc_dim, + dec_dim, + weight_attr=utils.get_weight_attr(), + bias_attr=utils.get_bias_attr(), + ) + + def forward(self, one_batch): + """The forward function of the encoder. + + Args: + one_batch (Tensor): The input batch. + Returns: + Tuple[Tensor, Tensor]: The encoder outputs and the hidden variable returned by the decoder. + """ + batch_ids = one_batch[0] + + rnn_input = one_batch[1].transpose(perm=[1, 0, 2]) + rnn_input = paddle.nan_to_num(rnn_input) + + enc_len = rnn_input.shape[0] + + if self.embedding_layer is not None: + pts_embedded = ( + self.embedding_layer(batch_ids) + .unsqueeze(0) + .tile(repeat_times=(enc_len, 1, 1)) + ) + rnn_input = paddle.concat((rnn_input, pts_embedded), axis=2) + + if self.GRU_LSTM == "GRU": + outputs, hidden = self.rnn(rnn_input) + elif self.GRU_LSTM == "LSTM": + outputs, (hidden, _) = self.rnn(rnn_input) + + if self.is_bidirectional: + hidden = paddle.tanh( + self.fc(paddle.concat((hidden[-2, :, :], hidden[-1, :, :]), axis=-1)) + ) + else: + hidden = paddle.tanh(self.fc(hidden[-1, :, :])) + return outputs, hidden + + +class Decoder(nn.Layer): + def __init__( + self, + enc_dim=64, + dec_dim=32, + dec_input_dim=4, + enc_len=48, + embedding_layer=None, + attention_ind=False, + GRU_LSTM="GRU", + is_bidirectional=True, + ): + super().__init__() + self.enc_dim = enc_dim + self.dec_dim = dec_dim + self.attention_ind = attention_ind + + self.GRU_LSTM = GRU_LSTM + self.is_bidirectional = is_bidirectional + + self.embedding_layer = embedding_layer + if embedding_layer is not None: + embd_dim = embedding_layer.embd_dim + else: + embd_dim = 0 + self.dec_input_dim = dec_input_dim + embd_dim + if GRU_LSTM == "GRU": + self.rnn = nn.GRU( + self.dec_input_dim, + dec_dim, + time_major=True, + weight_ih_attr=utils.get_weight_attr(), + weight_hh_attr=utils.get_weight_attr(), + bias_ih_attr=utils.get_bias_attr(), + bias_hh_attr=utils.get_bias_attr(), + ) + if GRU_LSTM == "LSTM": + self.rnn = nn.LSTM( + self.dec_input_dim, + dec_dim, + time_major=True, + weight_ih_attr=utils.get_weight_attr(), + weight_hh_attr=utils.get_weight_attr(), + bias_ih_attr=utils.get_bias_attr(), + bias_hh_attr=utils.get_bias_attr(), + ) + self.fc = nn.Linear( + dec_dim, + 1, + weight_attr=utils.get_weight_attr(), + bias_attr=utils.get_bias_attr(), + ) + + if self.attention_ind: + self.attn = nn.Linear(self.dec_input_dim + dec_dim, enc_len) + if self.is_bidirectional: + self.attn_combined = nn.Linear( + self.dec_input_dim + 2 * enc_dim, + self.dec_input_dim, + weight_attr=utils.get_weight_attr(), + bias_attr=utils.get_bias_attr(), + ) + else: + self.attn_combined = nn.Linear( + self.dec_input_dim + enc_dim, + self.dec_input_dim, + weight_attr=utils.get_weight_attr(), + bias_attr=utils.get_bias_attr(), + ) + + def forward(self, one_batch, encoder_outputs, hidden): + """The forward function of the decoder. + + Args: + one_batch (Tensor): The input batch. + encoder_outputs (Tensor): The encoder outputs. + hidden (Tensor): The hidden variable returned by the decoder. + Returns: + Tensor: The predicted value. + """ + # change size to 1 * size * dim + batch_ids = one_batch[0] + y_ = one_batch[2].transpose(perm=[1, 0, 2]) + encoder_outputs = encoder_outputs.transpose(perm=[1, 0, 2]) + + rnn_input = paddle.index_select(y_, paddle.to_tensor([0])) + rnn_input = paddle.nan_to_num(rnn_input) + + cell_state = paddle.zeros_like(hidden).unsqueeze(0) + + output_list = [] + for i in range(1, 13): + if self.embedding_layer is not None: + pts_embedded = self.embedding_layer(batch_ids).unsqueeze(0) + rnn_input = paddle.concat((rnn_input, pts_embedded), axis=2) + + if self.attention_ind: + attn_weights = F.softmax( + self.attn(paddle.concat((rnn_input[0, :, :], hidden), axis=1)) + ) + + attn_applied = paddle.bmm(attn_weights.unsqueeze(1), encoder_outputs) + attn_applied = attn_applied.squeeze(1) + + rnn_input = rnn_input.squeeze(0) + rnn_input = self.attn_combined( + paddle.concat((attn_applied, rnn_input), axis=1) + ) + rnn_input = rnn_input.unsqueeze(0) + + hidden = hidden.unsqueeze(0) + if self.GRU_LSTM == "GRU": + output, hidden = self.rnn(rnn_input, hidden) + elif self.GRU_LSTM == "LSTM": + output, (hidden, cell_state) = self.rnn(rnn_input, (hidden, cell_state)) + + assert (output == hidden).all() + # 1 * size * dec_dim + + output = output.squeeze(0) + hidden = hidden.squeeze(0) + output = self.fc(output) # output with size*1 prediction + + rnn_input = y_[i, :, 1:] + rnn_input = paddle.concat((rnn_input, output), axis=1) + rnn_input = rnn_input.unsqueeze(0) + + output_list.append(output) + + t_tmp = paddle.concat(output_list, axis=1) + perm = list(range(t_tmp.ndim)) + perm[0] = 1 + perm[1] = 0 + y_pred = t_tmp.transpose(perm=perm) + + return y_pred + + +class Seq2Seq(nn.Layer): + def __init__( + self, + enc_dim=64, + dec_dim=32, + input_dim=4, + K=5, + enc_len=48, + embedding_dim=5, + attention_ind=False, + GRU_LSTM="GRU", + is_bidirectional=False, + n_turbines=200, + device="cpu", + ): + super().__init__() + self.enc_dim = enc_dim + self.dec_dim = dec_dim + self.enc_input_dim = input_dim + K - 1 + self.dec_input_dim = input_dim + self.device = device + + if embedding_dim > 0: + self.embedding_layer = Embedding(embedding_dim, num_pts=n_turbines) + else: + self.embedding_layer = None + + self.attention_ind = attention_ind + + if GRU_LSTM == "GRU": + self.encoder = Encoder( + enc_dim, + dec_dim, + self.enc_input_dim, + self.embedding_layer, + GRU_LSTM="GRU", + is_bidirectional=is_bidirectional, + ) + self.decoder = Decoder( + enc_dim, + dec_dim, + self.dec_input_dim, + enc_len, + self.embedding_layer, + self.attention_ind, + GRU_LSTM="GRU", + is_bidirectional=is_bidirectional, + ) + if GRU_LSTM == "LSTM": + self.encoder = Encoder( + enc_dim, + dec_dim, + self.enc_input_dim, + self.embedding_layer, + GRU_LSTM="LSTM", + is_bidirectional=is_bidirectional, + ) + self.decoder = Decoder( + enc_dim, + dec_dim, + self.dec_input_dim, + enc_len, + self.embedding_layer, + self.attention_ind, + GRU_LSTM="LSTM", + is_bidirectional=is_bidirectional, + ) + + def forward(self, one_batch): + encoder_outputs, hidden = self.encoder(one_batch) + y_pred = self.decoder(one_batch, encoder_outputs, hidden) + return y_pred diff --git a/jointContribution/Deep-Spatio-Temporal/src/trainer.py b/jointContribution/Deep-Spatio-Temporal/src/trainer.py index ea8396f77d..c7804e961c 100644 --- a/jointContribution/Deep-Spatio-Temporal/src/trainer.py +++ b/jointContribution/Deep-Spatio-Temporal/src/trainer.py @@ -1,233 +1,233 @@ -import os - -import numpy as np -import paddle -import tqdm -from paddle import io -from src import datamgr -from src import utils - - -class EarlyStopping: - def __init__(self, patience=10, min_delta=0): - self.patience = patience - self.min_delta = min_delta - self.counter = 0 - self.best_loss = None - self.early_stop = False - - def __call__(self, val_loss): - if self.best_loss is None: - self.best_loss = val_loss - elif self.best_loss - val_loss > self.min_delta: - self.best_loss = val_loss - self.counter = 0 - elif self.best_loss - val_loss < self.min_delta: - self.counter += 1 - print(f"INFO: Early stopping counter {self.counter} of {self.patience}") - if self.counter >= self.patience: - print("INFO: Early stopping") - self.early_stop = True - - -class Trainer: - def __init__( - self, - model, - data_mgr, - optimizer, - criterion, - SAVE_FILE, - BATCH_SIZE, - ENC_LEN=48, - DEC_LEN=12, - name="wind_power", - ): - self.model = model - self.name = name - if name == "wind_power": - train_dataset = datamgr.wpDataset( - data_mgr.train_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - val_dataset = datamgr.wpDataset( - data_mgr.val_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - test_dataset = datamgr.wpDataset( - data_mgr.test_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - else: - train_dataset = datamgr.NRELwpDataset( - data_mgr.train_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - val_dataset = datamgr.NRELwpDataset( - data_mgr.val_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - test_dataset = datamgr.NRELwpDataset( - data_mgr.test_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN - ) - train_dataloader = io.DataLoader(train_dataset, batch_size=BATCH_SIZE) - val_dataloader = io.DataLoader(val_dataset, batch_size=BATCH_SIZE) - test_dataloader = io.DataLoader(test_dataset, batch_size=BATCH_SIZE) - - self.train_dataloader = train_dataloader - self.val_dataloader = val_dataloader - self.test_dataloader = test_dataloader - - self.train_dataset = train_dataset - self.val_dataset = val_dataset - self.test_dataset = test_dataset - - self.optimizer = optimizer - self.criterion = criterion - - self.SAVE_FILE = SAVE_FILE - - def train(self, epochs): - early_stopping = EarlyStopping() - for epoch in range(epochs): - print(" ") - print(f"Epoch {epoch+1} of {epochs}") - train_loss, train_mae, train_rmse = self.fit() - print(f"Train Loss: {train_loss:.4f}") - print(f"Train MAE: {np.array(train_mae).reshape(2,6)}") - print(f"Train RMSE: {np.array(train_rmse).reshape(2,6)}") - - val_loss, val_mae, val_rmse = self.validate() - print(f"Val Loss: {val_loss:.4f}") - print(f"Val MAE: {np.array(val_mae).reshape(2,6)}") - print(f"Val RMSE: {np.array(val_rmse).reshape(2,6)}") - - early_stopping(val_loss) - print(f"Best Val Loss: {early_stopping.best_loss:.4f}") - - if early_stopping.early_stop: - paddle.save( - self.model.state_dict(), - os.path.join("outputs", self.SAVE_FILE + ".pdparams"), - ) - break - else: - paddle.save( - self.model.state_dict(), - os.path.join("outputs", self.SAVE_FILE + ".pdparams"), - ) - - def fit(self): - print("Training") - self.model.train() - counter = 0 - running_loss = 0.0 - running_mae = [0.0] * 12 - running_rmse = [0.0] * 12 - prog_bar = tqdm.tqdm( - enumerate(self.train_dataloader), - total=int(len(self.train_dataset) / self.train_dataloader.batch_size), - ) - for i, data in prog_bar: - counter += 1 - self.optimizer.clear_grad() - y_pred = self.model(data) - y_true = data[2] - y_true = y_true[:, 1:, 0] - y_pred = y_pred.transpose((1, 0)) - mae, rmse = utils.cal_loss(y_true, y_pred, self.name) - - y_true, y_pred = self.rescale_output(y_true, y_pred) - - idx = ~paddle.isnan(y_true) - loss = self.criterion(y_pred[idx], y_true[idx]) - running_loss += loss.item() - loss.backward() - self.optimizer.step() - - running_mae = [x + y for x, y in zip(running_mae, mae)] - running_rmse = [x + y for x, y in zip(running_rmse, rmse)] - - train_loss = running_loss / counter - train_mae = [x / counter for x in running_mae] - train_rmse = [x / counter for x in running_rmse] - - return train_loss, train_mae, train_rmse - - def validate(self): - print("Validating") - self.model.eval() - counter = 0 - running_loss = 0.0 - running_mae = [0.0] * 12 - running_rmse = [0.0] * 12 - - prog_bar = tqdm.tqdm( - enumerate(self.val_dataloader), - total=int(len(self.val_dataset) / self.val_dataloader.batch_size), - ) - with paddle.no_grad(): - for i, data in prog_bar: - counter += 1 - y_pred = self.model(data) - y_true = data[2] - y_true = y_true[:, 1:, 0] - y_pred = y_pred.transpose((1, 0)) - mae, rmse = utils.cal_loss(y_true, y_pred, self.name) - - y_true, y_pred = self.rescale_output(y_true, y_pred) - - idx = ~paddle.isnan(y_true) - loss = self.criterion(y_pred[idx], y_true[idx]) - running_loss += loss.item() - - running_mae = [x + y for x, y in zip(running_mae, mae)] - running_rmse = [x + y for x, y in zip(running_rmse, rmse)] - - val_loss = running_loss / counter - val_mae = [x / counter for x in running_mae] - val_rmse = [x / counter for x in running_rmse] - - return val_loss, val_mae, val_rmse - - def report_test_error(self): - print("Calculating Test Error") - self.model.eval() - counter = 0 - running_loss = 0.0 - running_mae = [0.0] * 12 - running_rmse = [0.0] * 12 - - prog_bar = tqdm.tqdm( - enumerate(self.test_dataloader), - total=int(len(self.test_dataset) / self.test_dataloader.batch_size), - ) - with paddle.no_grad(): - for i, data in prog_bar: - counter += 1 - y_pred = self.model(data) - y_true = data[2] - y_true = y_true[:, 1:, 0] - y_pred = y_pred.transpose((1, 0)) - mae, rmse = utils.cal_loss(y_true, y_pred, self.name) - - y_true, y_pred = self.rescale_output(y_true, y_pred) - idx = ~paddle.isnan(y_true) - - loss = self.criterion(y_pred[idx], y_true[idx]) - running_loss += loss.item() - - running_mae = [x + y for x, y in zip(running_mae, mae)] - running_rmse = [x + y for x, y in zip(running_rmse, rmse)] - - test_loss = running_loss / counter - test_mae = [x / counter for x in running_mae] - test_rmse = [x / counter for x in running_rmse] - - print(f"Test Loss: {test_loss:.4f}") - print(f"Test MAE: {np.array(test_mae).reshape(2,6)}") - print(f"Test RMSE: {np.array(test_rmse).reshape(2,6)}") - - return test_loss, test_mae, test_rmse - - def rescale_output(self, y_true, y_pred): - for i in range(12): - y_true[:, i] = y_true[:, i] * np.sqrt(12 - i) - y_pred[:, i] = y_pred[:, i] * np.sqrt(12 - i) - - return y_true, y_pred +import os + +import numpy as np +import paddle +import tqdm +from paddle import io +from src import datamgr +from src import utils + + +class EarlyStopping: + def __init__(self, patience=10, min_delta=0): + self.patience = patience + self.min_delta = min_delta + self.counter = 0 + self.best_loss = None + self.early_stop = False + + def __call__(self, val_loss): + if self.best_loss is None: + self.best_loss = val_loss + elif self.best_loss - val_loss > self.min_delta: + self.best_loss = val_loss + self.counter = 0 + elif self.best_loss - val_loss < self.min_delta: + self.counter += 1 + print(f"INFO: Early stopping counter {self.counter} of {self.patience}") + if self.counter >= self.patience: + print("INFO: Early stopping") + self.early_stop = True + + +class Trainer: + def __init__( + self, + model, + data_mgr, + optimizer, + criterion, + SAVE_FILE, + BATCH_SIZE, + ENC_LEN=48, + DEC_LEN=12, + name="wind_power", + ): + self.model = model + self.name = name + if name == "wind_power": + train_dataset = datamgr.wpDataset( + data_mgr.train_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + val_dataset = datamgr.wpDataset( + data_mgr.val_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + test_dataset = datamgr.wpDataset( + data_mgr.test_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + else: + train_dataset = datamgr.NRELwpDataset( + data_mgr.train_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + val_dataset = datamgr.NRELwpDataset( + data_mgr.val_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + test_dataset = datamgr.NRELwpDataset( + data_mgr.test_data, ENC_LEN=ENC_LEN, DEC_LEN=DEC_LEN + ) + train_dataloader = io.DataLoader(train_dataset, batch_size=BATCH_SIZE) + val_dataloader = io.DataLoader(val_dataset, batch_size=BATCH_SIZE) + test_dataloader = io.DataLoader(test_dataset, batch_size=BATCH_SIZE) + + self.train_dataloader = train_dataloader + self.val_dataloader = val_dataloader + self.test_dataloader = test_dataloader + + self.train_dataset = train_dataset + self.val_dataset = val_dataset + self.test_dataset = test_dataset + + self.optimizer = optimizer + self.criterion = criterion + + self.SAVE_FILE = SAVE_FILE + + def train(self, epochs): + early_stopping = EarlyStopping() + for epoch in range(epochs): + print(" ") + print(f"Epoch {epoch+1} of {epochs}") + train_loss, train_mae, train_rmse = self.fit() + print(f"Train Loss: {train_loss:.4f}") + print(f"Train MAE: {np.array(train_mae).reshape(2,6)}") + print(f"Train RMSE: {np.array(train_rmse).reshape(2,6)}") + + val_loss, val_mae, val_rmse = self.validate() + print(f"Val Loss: {val_loss:.4f}") + print(f"Val MAE: {np.array(val_mae).reshape(2,6)}") + print(f"Val RMSE: {np.array(val_rmse).reshape(2,6)}") + + early_stopping(val_loss) + print(f"Best Val Loss: {early_stopping.best_loss:.4f}") + + if early_stopping.early_stop: + paddle.save( + self.model.state_dict(), + os.path.join("outputs", self.SAVE_FILE + ".pdparams"), + ) + break + else: + paddle.save( + self.model.state_dict(), + os.path.join("outputs", self.SAVE_FILE + ".pdparams"), + ) + + def fit(self): + print("Training") + self.model.train() + counter = 0 + running_loss = 0.0 + running_mae = [0.0] * 12 + running_rmse = [0.0] * 12 + prog_bar = tqdm.tqdm( + enumerate(self.train_dataloader), + total=int(len(self.train_dataset) / self.train_dataloader.batch_size), + ) + for i, data in prog_bar: + counter += 1 + self.optimizer.clear_grad() + y_pred = self.model(data) + y_true = data[2] + y_true = y_true[:, 1:, 0] + y_pred = y_pred.transpose((1, 0)) + mae, rmse = utils.cal_loss(y_true, y_pred, self.name) + + y_true, y_pred = self.rescale_output(y_true, y_pred) + + idx = ~paddle.isnan(y_true) + loss = self.criterion(y_pred[idx], y_true[idx]) + running_loss += loss.item() + loss.backward() + self.optimizer.step() + + running_mae = [x + y for x, y in zip(running_mae, mae)] + running_rmse = [x + y for x, y in zip(running_rmse, rmse)] + + train_loss = running_loss / counter + train_mae = [x / counter for x in running_mae] + train_rmse = [x / counter for x in running_rmse] + + return train_loss, train_mae, train_rmse + + def validate(self): + print("Validating") + self.model.eval() + counter = 0 + running_loss = 0.0 + running_mae = [0.0] * 12 + running_rmse = [0.0] * 12 + + prog_bar = tqdm.tqdm( + enumerate(self.val_dataloader), + total=int(len(self.val_dataset) / self.val_dataloader.batch_size), + ) + with paddle.no_grad(): + for i, data in prog_bar: + counter += 1 + y_pred = self.model(data) + y_true = data[2] + y_true = y_true[:, 1:, 0] + y_pred = y_pred.transpose((1, 0)) + mae, rmse = utils.cal_loss(y_true, y_pred, self.name) + + y_true, y_pred = self.rescale_output(y_true, y_pred) + + idx = ~paddle.isnan(y_true) + loss = self.criterion(y_pred[idx], y_true[idx]) + running_loss += loss.item() + + running_mae = [x + y for x, y in zip(running_mae, mae)] + running_rmse = [x + y for x, y in zip(running_rmse, rmse)] + + val_loss = running_loss / counter + val_mae = [x / counter for x in running_mae] + val_rmse = [x / counter for x in running_rmse] + + return val_loss, val_mae, val_rmse + + def report_test_error(self): + print("Calculating Test Error") + self.model.eval() + counter = 0 + running_loss = 0.0 + running_mae = [0.0] * 12 + running_rmse = [0.0] * 12 + + prog_bar = tqdm.tqdm( + enumerate(self.test_dataloader), + total=int(len(self.test_dataset) / self.test_dataloader.batch_size), + ) + with paddle.no_grad(): + for i, data in prog_bar: + counter += 1 + y_pred = self.model(data) + y_true = data[2] + y_true = y_true[:, 1:, 0] + y_pred = y_pred.transpose((1, 0)) + mae, rmse = utils.cal_loss(y_true, y_pred, self.name) + + y_true, y_pred = self.rescale_output(y_true, y_pred) + idx = ~paddle.isnan(y_true) + + loss = self.criterion(y_pred[idx], y_true[idx]) + running_loss += loss.item() + + running_mae = [x + y for x, y in zip(running_mae, mae)] + running_rmse = [x + y for x, y in zip(running_rmse, rmse)] + + test_loss = running_loss / counter + test_mae = [x / counter for x in running_mae] + test_rmse = [x / counter for x in running_rmse] + + print(f"Test Loss: {test_loss:.4f}") + print(f"Test MAE: {np.array(test_mae).reshape(2,6)}") + print(f"Test RMSE: {np.array(test_rmse).reshape(2,6)}") + + return test_loss, test_mae, test_rmse + + def rescale_output(self, y_true, y_pred): + for i in range(12): + y_true[:, i] = y_true[:, i] * np.sqrt(12 - i) + y_pred[:, i] = y_pred[:, i] * np.sqrt(12 - i) + + return y_true, y_pred diff --git a/jointContribution/Deep-Spatio-Temporal/src/utils.py b/jointContribution/Deep-Spatio-Temporal/src/utils.py index b60c7ad84c..8b6cbe0135 100644 --- a/jointContribution/Deep-Spatio-Temporal/src/utils.py +++ b/jointContribution/Deep-Spatio-Temporal/src/utils.py @@ -1,33 +1,33 @@ -import paddle - - -def cal_loss(y_true, y_pred, name="wind_power"): - y_true = (y_true + 1) / 2 - y_pred = (y_pred + 1) / 2 - diff = y_true - y_pred - if name == "wind_speed": - diff = diff * 40 - mae = [] - rmse = [] - for i in range(y_true.shape[1]): - x = diff[:, i].detach() - idx = ~paddle.isnan(x) - l1_x = paddle.abs(x[idx]).mean().item() - l2_x = (x[idx] ** 2).mean().item() ** 0.5 - - mae.append(l1_x) - rmse.append(l2_x) - - return mae, rmse - - -def get_weight_attr(): - return paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0, std=0.1)) - - -def get_bias_attr(): - return paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0)) - - -def count_parameters(model): - return sum(p.numel() for p in model.parameters() if not p.stop_gradient) +import paddle + + +def cal_loss(y_true, y_pred, name="wind_power"): + y_true = (y_true + 1) / 2 + y_pred = (y_pred + 1) / 2 + diff = y_true - y_pred + if name == "wind_speed": + diff = diff * 40 + mae = [] + rmse = [] + for i in range(y_true.shape[1]): + x = diff[:, i].detach() + idx = ~paddle.isnan(x) + l1_x = paddle.abs(x[idx]).mean().item() + l2_x = (x[idx] ** 2).mean().item() ** 0.5 + + mae.append(l1_x) + rmse.append(l2_x) + + return mae, rmse + + +def get_weight_attr(): + return paddle.ParamAttr(initializer=paddle.nn.initializer.Normal(mean=0, std=0.1)) + + +def get_bias_attr(): + return paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0)) + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if not p.stop_gradient) diff --git a/jointContribution/Deep-Spatio-Temporal/train.py b/jointContribution/Deep-Spatio-Temporal/train.py index ed0619cb76..6a131e73a8 100644 --- a/jointContribution/Deep-Spatio-Temporal/train.py +++ b/jointContribution/Deep-Spatio-Temporal/train.py @@ -1,81 +1,81 @@ -import argparse -import os -import sys - -import paddle -import pandas as pd -from paddle import nn -from src import datamgr -from src import model -from src import trainer -from src import utils - - -def main(): - parser = argparse.ArgumentParser( - description="Deep Spatio Temporal Wind Forecasting" - ) - parser.add_argument("--name", default="wind_power", type=str, help="model name") - parser.add_argument("--epoch", default=300, type=int, help="max epochs") - parser.add_argument("--batch_size", default=20000, type=int, help="batch size") - parser.add_argument("--lr", default=0.001, type=float, help="learning rate") - parser.add_argument("--k", default=5, type=int, help="number of spatio neighbors") - parser.add_argument( - "--n_turbines", default=200, type=int, help="number of turbines" - ) - - args = parser.parse_args() - - print(f"Running with following command line arguments: {args}") - - BATCH_SIZE = args.batch_size - EPOCHS = args.epoch - LR = args.lr - K = args.k - SAVE_FILE = args.name - - if args.name == "wind_power": - if not os.path.exists("./data/wind_power.csv"): - sys.exit( - "No data found!!!\n" - "Download wind power data first (follow the instructions in readme)." - ) - data_mgr = datamgr.DataMgr( - file_path=os.path.join("./data", args.name + ".csv"), K=K - ) - elif args.name == "wind_speed": - if not os.path.exists("./data/wind_speed.csv"): - sys.exit( - "No data found!!!\n" - "Download wind speed data first (follow the instructions in readme)." - ) - data_mgr = datamgr.NRELDataMgr( - folder_path="./data/", - file_path="wind_speed.csv", - meta_path="wind_speed_meta.csv", - ) - model_obj = model.Seq2Seq(K=K, n_turbines=args.n_turbines) - - criterion = nn.MSELoss() - optimizer = paddle.optimizer.Adam( - parameters=model_obj.parameters(), learning_rate=LR - ) - print(f"model parameters:{utils.count_parameters(model_obj)}") - trainer_obj = trainer.Trainer( - model=model_obj, - data_mgr=data_mgr, - optimizer=optimizer, - criterion=criterion, - SAVE_FILE=SAVE_FILE, - BATCH_SIZE=BATCH_SIZE, - name=args.name, - ) - trainer_obj.train(epochs=EPOCHS) - loss, mae, rmse = trainer_obj.report_test_error() - outdf = pd.DataFrame({"MAE": mae, "RMSE": rmse}, index=[*range(1, 13)]) - outdf.to_csv(os.path.join("outputs", SAVE_FILE + "_metrics.csv")) - - -if __name__ == "__main__": - main() - print("Done!") +import argparse +import os +import sys + +import paddle +import pandas as pd +from paddle import nn +from src import datamgr +from src import model +from src import trainer +from src import utils + + +def main(): + parser = argparse.ArgumentParser( + description="Deep Spatio Temporal Wind Forecasting" + ) + parser.add_argument("--name", default="wind_power", type=str, help="model name") + parser.add_argument("--epoch", default=300, type=int, help="max epochs") + parser.add_argument("--batch_size", default=20000, type=int, help="batch size") + parser.add_argument("--lr", default=0.001, type=float, help="learning rate") + parser.add_argument("--k", default=5, type=int, help="number of spatio neighbors") + parser.add_argument( + "--n_turbines", default=200, type=int, help="number of turbines" + ) + + args = parser.parse_args() + + print(f"Running with following command line arguments: {args}") + + BATCH_SIZE = args.batch_size + EPOCHS = args.epoch + LR = args.lr + K = args.k + SAVE_FILE = args.name + + if args.name == "wind_power": + if not os.path.exists("./data/wind_power.csv"): + sys.exit( + "No data found!!!\n" + "Download wind power data first (follow the instructions in readme)." + ) + data_mgr = datamgr.DataMgr( + file_path=os.path.join("./data", args.name + ".csv"), K=K + ) + elif args.name == "wind_speed": + if not os.path.exists("./data/wind_speed.csv"): + sys.exit( + "No data found!!!\n" + "Download wind speed data first (follow the instructions in readme)." + ) + data_mgr = datamgr.NRELDataMgr( + folder_path="./data/", + file_path="wind_speed.csv", + meta_path="wind_speed_meta.csv", + ) + model_obj = model.Seq2Seq(K=K, n_turbines=args.n_turbines) + + criterion = nn.MSELoss() + optimizer = paddle.optimizer.Adam( + parameters=model_obj.parameters(), learning_rate=LR + ) + print(f"model parameters:{utils.count_parameters(model_obj)}") + trainer_obj = trainer.Trainer( + model=model_obj, + data_mgr=data_mgr, + optimizer=optimizer, + criterion=criterion, + SAVE_FILE=SAVE_FILE, + BATCH_SIZE=BATCH_SIZE, + name=args.name, + ) + trainer_obj.train(epochs=EPOCHS) + loss, mae, rmse = trainer_obj.report_test_error() + outdf = pd.DataFrame({"MAE": mae, "RMSE": rmse}, index=[*range(1, 13)]) + outdf.to_csv(os.path.join("outputs", SAVE_FILE + "_metrics.csv")) + + +if __name__ == "__main__": + main() + print("Done!") diff --git a/jointContribution/HighResolution/ffd/engine.py b/jointContribution/HighResolution/ffd/engine.py index 61a4b244da..4d23bce02a 100644 --- a/jointContribution/HighResolution/ffd/engine.py +++ b/jointContribution/HighResolution/ffd/engine.py @@ -1,211 +1,211 @@ -from __future__ import annotations - -import math -import weakref -from collections import OrderedDict -from timeit import default_timer as timer -from typing import Any -from typing import Callable -from typing import Tuple - -import paddle - -from .losses import RegistrationLoss -from .losses import RegistrationResult -from .optim import slope_of_least_squares_fit - -PROFILING = False - - -class RegistrationEngine: - """Minimize registration loss until convergence.""" - - def __init__( - self, - model: paddle.nn.Layer, - loss: RegistrationLoss, - optimizer: paddle.optimizer.Optimizer, - max_steps: int = 500, - min_delta: float = 1e-06, - min_value: float = float("nan"), - max_history: int = 10, - ): - """Initialize registration loop.""" - self.model = model - self.loss = loss - self.optimizer = optimizer - self.num_steps = 0 - self.max_steps = max_steps - self.min_delta = min_delta - self.min_value = min_value - self.max_history = max(2, max_history) - self.loss_values = [] - self._eval_hooks = OrderedDict() - self._step_hooks = OrderedDict() - - @property - def loss_value(self) -> float: - if not self.loss_values: - return float("inf") - return self.loss_values[-1] - - def step(self) -> float: - """Perform one registration step. - - Returns: - Loss value prior to taking gradient step. - - """ - num_evals = 0 - - def closure() -> float: - self.optimizer.clear_grad() - t_start = timer() - result = self.loss.eval() - if PROFILING: - print(f"Forward pass in {timer() - t_start:.3f}s") - loss = result["loss"] - assert isinstance(loss, paddle.Tensor) - t_start = timer() - loss.backward() - if PROFILING: - print(f"Backward pass in {timer() - t_start:.3f}s") - nonlocal num_evals - num_evals += 1 - with paddle.no_grad(): - for hook in self._eval_hooks.values(): - hook(self, self.num_steps, num_evals, result) - return float(loss) - - loss_value = closure() - self.optimizer.step() - assert loss_value is not None - with paddle.no_grad(): - for hook in self._step_hooks.values(): - hook(self, self.num_steps, num_evals, loss_value) - return loss_value - - def run(self) -> float: - """Perform registration steps until convergence. - - Returns: - Loss value prior to taking last gradient step. - - """ - self.loss_values = [] - self.num_steps = 0 - while self.num_steps < self.max_steps and not self.converged(): - value = self.step() - self.num_steps += 1 - if math.isnan(value): - raise RuntimeError( - f"NaN value in registration loss at gradient step {self.num_steps}" - ) - if math.isinf(value): - raise RuntimeError( - f"Inf value in registration loss at gradient step {self.num_steps}" - ) - self.loss_values.append(value) - if len(self.loss_values) > self.max_history: - self.loss_values.pop(0) - return self.loss_value - - def converged(self) -> bool: - """Check convergence criteria.""" - values = self.loss_values - if not values: - return False - value = values[-1] - if self.min_delta < 0: - epsilon = abs(self.min_delta * value) - else: - epsilon = self.min_delta - slope = slope_of_least_squares_fit(values) - if abs(slope) < epsilon: - return True - if value < self.min_value: - return True - return False - - def register_eval_hook( - self, - hook: Callable[["RegistrationEngine", int, int, "RegistrationResult"], None], - ) -> "RemovableHandle": - r"""Registers an evaluation hook.""" - handle = RemovableHandle(self._eval_hooks) - self._eval_hooks[handle.id] = hook - return handle - - def register_step_hook( - self, hook: Callable[["RegistrationEngine", int, int, float], None] - ) -> "RemovableHandle": - r"""Registers a gradient step hook.""" - handle = RemovableHandle(self._step_hooks) - self._step_hooks[handle.id] = hook - return handle - - -class RemovableHandle: - r""" - A handle which provides the capability to remove a hook. - - Args: - hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``. - extra_dict (Union[dict, List[dict]]): An additional dictionary or list of - dictionaries whose keys will be deleted when the same keys are - removed from ``hooks_dict``. - """ - - id: int - next_id: int = 0 - - def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None: - self.hooks_dict_ref = weakref.ref(hooks_dict) - self.id = RemovableHandle.next_id - RemovableHandle.next_id += 1 - - self.extra_dict_ref: Tuple = () - if isinstance(extra_dict, dict): - self.extra_dict_ref = (weakref.ref(extra_dict),) - elif isinstance(extra_dict, list): - self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict) - - def remove(self) -> None: - hooks_dict = self.hooks_dict_ref() - if hooks_dict is not None and self.id in hooks_dict: - del hooks_dict[self.id] - - for ref in self.extra_dict_ref: - extra_dict = ref() - if extra_dict is not None and self.id in extra_dict: - del extra_dict[self.id] - - def __getstate__(self): - if self.extra_dict_ref is None: - return (self.hooks_dict_ref(), self.id) - else: - return ( - self.hooks_dict_ref(), - self.id, - tuple(ref() for ref in self.extra_dict_ref), - ) - - def __setstate__(self, state) -> None: - if state[0] is None: - # create a dead reference - self.hooks_dict_ref = weakref.ref(OrderedDict()) - else: - self.hooks_dict_ref = weakref.ref(state[0]) - self.id = state[1] - RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1) - - if len(state) < 3 or state[2] is None: - self.extra_dict_ref = () - else: - self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2]) - - def __enter__(self) -> "RemovableHandle": - return self - - def __exit__(self, type: Any, value: Any, tb: Any) -> None: - self.remove() +from __future__ import annotations + +import math +import weakref +from collections import OrderedDict +from timeit import default_timer as timer +from typing import Any +from typing import Callable +from typing import Tuple + +import paddle + +from .losses import RegistrationLoss +from .losses import RegistrationResult +from .optim import slope_of_least_squares_fit + +PROFILING = False + + +class RegistrationEngine: + """Minimize registration loss until convergence.""" + + def __init__( + self, + model: paddle.nn.Layer, + loss: RegistrationLoss, + optimizer: paddle.optimizer.Optimizer, + max_steps: int = 500, + min_delta: float = 1e-06, + min_value: float = float("nan"), + max_history: int = 10, + ): + """Initialize registration loop.""" + self.model = model + self.loss = loss + self.optimizer = optimizer + self.num_steps = 0 + self.max_steps = max_steps + self.min_delta = min_delta + self.min_value = min_value + self.max_history = max(2, max_history) + self.loss_values = [] + self._eval_hooks = OrderedDict() + self._step_hooks = OrderedDict() + + @property + def loss_value(self) -> float: + if not self.loss_values: + return float("inf") + return self.loss_values[-1] + + def step(self) -> float: + """Perform one registration step. + + Returns: + Loss value prior to taking gradient step. + + """ + num_evals = 0 + + def closure() -> float: + self.optimizer.clear_grad() + t_start = timer() + result = self.loss.eval() + if PROFILING: + print(f"Forward pass in {timer() - t_start:.3f}s") + loss = result["loss"] + assert isinstance(loss, paddle.Tensor) + t_start = timer() + loss.backward() + if PROFILING: + print(f"Backward pass in {timer() - t_start:.3f}s") + nonlocal num_evals + num_evals += 1 + with paddle.no_grad(): + for hook in self._eval_hooks.values(): + hook(self, self.num_steps, num_evals, result) + return float(loss) + + loss_value = closure() + self.optimizer.step() + assert loss_value is not None + with paddle.no_grad(): + for hook in self._step_hooks.values(): + hook(self, self.num_steps, num_evals, loss_value) + return loss_value + + def run(self) -> float: + """Perform registration steps until convergence. + + Returns: + Loss value prior to taking last gradient step. + + """ + self.loss_values = [] + self.num_steps = 0 + while self.num_steps < self.max_steps and not self.converged(): + value = self.step() + self.num_steps += 1 + if math.isnan(value): + raise RuntimeError( + f"NaN value in registration loss at gradient step {self.num_steps}" + ) + if math.isinf(value): + raise RuntimeError( + f"Inf value in registration loss at gradient step {self.num_steps}" + ) + self.loss_values.append(value) + if len(self.loss_values) > self.max_history: + self.loss_values.pop(0) + return self.loss_value + + def converged(self) -> bool: + """Check convergence criteria.""" + values = self.loss_values + if not values: + return False + value = values[-1] + if self.min_delta < 0: + epsilon = abs(self.min_delta * value) + else: + epsilon = self.min_delta + slope = slope_of_least_squares_fit(values) + if abs(slope) < epsilon: + return True + if value < self.min_value: + return True + return False + + def register_eval_hook( + self, + hook: Callable[["RegistrationEngine", int, int, "RegistrationResult"], None], + ) -> "RemovableHandle": + r"""Registers an evaluation hook.""" + handle = RemovableHandle(self._eval_hooks) + self._eval_hooks[handle.id] = hook + return handle + + def register_step_hook( + self, hook: Callable[["RegistrationEngine", int, int, float], None] + ) -> "RemovableHandle": + r"""Registers a gradient step hook.""" + handle = RemovableHandle(self._step_hooks) + self._step_hooks[handle.id] = hook + return handle + + +class RemovableHandle: + r""" + A handle which provides the capability to remove a hook. + + Args: + hooks_dict (dict): A dictionary of hooks, indexed by hook ``id``. + extra_dict (Union[dict, List[dict]]): An additional dictionary or list of + dictionaries whose keys will be deleted when the same keys are + removed from ``hooks_dict``. + """ + + id: int + next_id: int = 0 + + def __init__(self, hooks_dict: Any, *, extra_dict: Any = None) -> None: + self.hooks_dict_ref = weakref.ref(hooks_dict) + self.id = RemovableHandle.next_id + RemovableHandle.next_id += 1 + + self.extra_dict_ref: Tuple = () + if isinstance(extra_dict, dict): + self.extra_dict_ref = (weakref.ref(extra_dict),) + elif isinstance(extra_dict, list): + self.extra_dict_ref = tuple(weakref.ref(d) for d in extra_dict) + + def remove(self) -> None: + hooks_dict = self.hooks_dict_ref() + if hooks_dict is not None and self.id in hooks_dict: + del hooks_dict[self.id] + + for ref in self.extra_dict_ref: + extra_dict = ref() + if extra_dict is not None and self.id in extra_dict: + del extra_dict[self.id] + + def __getstate__(self): + if self.extra_dict_ref is None: + return (self.hooks_dict_ref(), self.id) + else: + return ( + self.hooks_dict_ref(), + self.id, + tuple(ref() for ref in self.extra_dict_ref), + ) + + def __setstate__(self, state) -> None: + if state[0] is None: + # create a dead reference + self.hooks_dict_ref = weakref.ref(OrderedDict()) + else: + self.hooks_dict_ref = weakref.ref(state[0]) + self.id = state[1] + RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1) + + if len(state) < 3 or state[2] is None: + self.extra_dict_ref = () + else: + self.extra_dict_ref = tuple(weakref.ref(d) for d in state[2]) + + def __enter__(self) -> "RemovableHandle": + return self + + def __exit__(self, type: Any, value: Any, tb: Any) -> None: + self.remove() diff --git a/jointContribution/HighResolution/ffd/hooks.py b/jointContribution/HighResolution/ffd/hooks.py index 628f4fa606..045b9c4609 100644 --- a/jointContribution/HighResolution/ffd/hooks.py +++ b/jointContribution/HighResolution/ffd/hooks.py @@ -1,72 +1,72 @@ -from typing import Callable - -import paddle -from deepali.core import functional as U -from deepali.core.kernels import gaussian1d -from deepali.spatial import is_linear_transform - -from .engine import RegistrationEngine -from .engine import RegistrationResult - -RegistrationEvalHook = Callable[ - [RegistrationEngine, int, int, RegistrationResult], None -] -RegistrationStepHook = Callable[[RegistrationEngine, int, int, float], None] - - -def noop(reg: RegistrationEngine, *args, **kwargs) -> None: - """Dummy no-op loss evaluation hook.""" - ... - - -def normalize_linear_grad(reg: RegistrationEngine, *args, **kwargs) -> None: - """Loss evaluation hook for normalization of linear transformation gradient after backward pass.""" - denom = None - for param in reg.model.parameters(): - if not param.stop_gradient and param.grad is not None: - max_abs_grad = paddle.max(paddle.abs(param.grad)) - if denom is None or denom < max_abs_grad: - denom = max_abs_grad - if denom is None: - return - for param in reg.model.parameters(): - if not param.stop_gradient and param.grad is not None: - param.grad /= denom - - -def normalize_nonrigid_grad(reg: RegistrationEngine, *args, **kwargs) -> None: - """Loss evaluation hook for normalization of non-rigid transformation gradient after backward pass.""" - for param in reg.model.parameters(): - if not param.stop_gradient and param.grad is not None: - paddle.assign( - paddle.nn.functional.normalize(x=param.grad, p=2, axis=1), - output=param.grad, - ) - - -def normalize_grad_hook(transform) -> RegistrationEvalHook: - """Loss evaluation hook for normalization of transformation gradient after backward pass.""" - if is_linear_transform(transform): - return normalize_linear_grad - return normalize_nonrigid_grad - - -def _smooth_nonrigid_grad(reg: RegistrationEngine, sigma: float = 1) -> None: - """Loss evaluation hook for Gaussian smoothing of non-rigid transformation gradient after backward pass.""" - if sigma <= 0: - return - kernel = gaussian1d(sigma) - for param in reg.model.parameters(): - if not param.stop_gradient and param.grad is not None: - param.grad.copy_(U.conv(param.grad, kernel)) - - -def smooth_grad_hook(transform, sigma: float) -> RegistrationEvalHook: - """Loss evaluation hook for Gaussian smoothing of non-rigid gradient after backward pass.""" - if is_linear_transform(transform): - return noop - - def fn(reg: RegistrationEngine, *args, **kwargs): - return _smooth_nonrigid_grad(reg, sigma=sigma) - - return fn +from typing import Callable + +import paddle +from deepali.core import functional as U +from deepali.core.kernels import gaussian1d +from deepali.spatial import is_linear_transform + +from .engine import RegistrationEngine +from .engine import RegistrationResult + +RegistrationEvalHook = Callable[ + [RegistrationEngine, int, int, RegistrationResult], None +] +RegistrationStepHook = Callable[[RegistrationEngine, int, int, float], None] + + +def noop(reg: RegistrationEngine, *args, **kwargs) -> None: + """Dummy no-op loss evaluation hook.""" + ... + + +def normalize_linear_grad(reg: RegistrationEngine, *args, **kwargs) -> None: + """Loss evaluation hook for normalization of linear transformation gradient after backward pass.""" + denom = None + for param in reg.model.parameters(): + if not param.stop_gradient and param.grad is not None: + max_abs_grad = paddle.max(paddle.abs(param.grad)) + if denom is None or denom < max_abs_grad: + denom = max_abs_grad + if denom is None: + return + for param in reg.model.parameters(): + if not param.stop_gradient and param.grad is not None: + param.grad /= denom + + +def normalize_nonrigid_grad(reg: RegistrationEngine, *args, **kwargs) -> None: + """Loss evaluation hook for normalization of non-rigid transformation gradient after backward pass.""" + for param in reg.model.parameters(): + if not param.stop_gradient and param.grad is not None: + paddle.assign( + paddle.nn.functional.normalize(x=param.grad, p=2, axis=1), + output=param.grad, + ) + + +def normalize_grad_hook(transform) -> RegistrationEvalHook: + """Loss evaluation hook for normalization of transformation gradient after backward pass.""" + if is_linear_transform(transform): + return normalize_linear_grad + return normalize_nonrigid_grad + + +def _smooth_nonrigid_grad(reg: RegistrationEngine, sigma: float = 1) -> None: + """Loss evaluation hook for Gaussian smoothing of non-rigid transformation gradient after backward pass.""" + if sigma <= 0: + return + kernel = gaussian1d(sigma) + for param in reg.model.parameters(): + if not param.stop_gradient and param.grad is not None: + param.grad.copy_(U.conv(param.grad, kernel)) + + +def smooth_grad_hook(transform, sigma: float) -> RegistrationEvalHook: + """Loss evaluation hook for Gaussian smoothing of non-rigid gradient after backward pass.""" + if is_linear_transform(transform): + return noop + + def fn(reg: RegistrationEngine, *args, **kwargs): + return _smooth_nonrigid_grad(reg, sigma=sigma) + + return fn diff --git a/jointContribution/HighResolution/ffd/losses.py b/jointContribution/HighResolution/ffd/losses.py index 7694cbd9d1..096b541407 100644 --- a/jointContribution/HighResolution/ffd/losses.py +++ b/jointContribution/HighResolution/ffd/losses.py @@ -1,382 +1,382 @@ -import re -from collections import defaultdict -from typing import Dict -from typing import Generator -from typing import List -from typing import Mapping -from typing import Optional -from typing import Sequence -from typing import Tuple -from typing import Type -from typing import TypeVar -from typing import Union - -import paddle -from deepali.core import Grid -from deepali.core import PaddingMode -from deepali.core import Sampling -from deepali.core import functional as U -from deepali.losses import BSplineLoss -from deepali.losses import DisplacementLoss -from deepali.losses import LandmarkPointDistance -from deepali.losses import PairwiseImageLoss -from deepali.losses import ParamsLoss -from deepali.losses import PointSetDistance -from deepali.losses import RegistrationLoss -from deepali.losses import RegistrationLosses -from deepali.losses import RegistrationResult -from deepali.modules import SampleImage -from deepali.spatial import BSplineTransform -from deepali.spatial import CompositeTransform -from deepali.spatial import SequentialTransform -from deepali.spatial import SpatialTransform -from paddle import Tensor -from paddle.nn import Layer - -RE_WEIGHT = re.compile( - "^((?P[0-9]+(\\.[0-9]+)?)\\s*[\\* ])?\\s*(?P[a-zA-Z0-9_-]+)\\s*(\\+\\s*(?P[0-9]+(\\.[0-9]+)?))?$" -) -RE_TERM_VAR = re.compile("^[a-zA-Z0-9_-]+\\((?P[a-zA-Z0-9_]+)\\)$") -TLayer = TypeVar("TLayer", bound=Layer) -TSpatialTransform = TypeVar("TSpatialTransform", bound=SpatialTransform) - - -class PairwiseImageRegistrationLoss(RegistrationLoss): - """Loss function for pairwise multi-channel image registration.""" - - def __init__( - self, - source_data: paddle.Tensor, - target_data: paddle.Tensor, - source_grid: Grid, - target_grid: Grid, - source_chns: Mapping[str, Union[int, Tuple[int, int]]], - target_chns: Mapping[str, Union[int, Tuple[int, int]]], - source_pset: Optional[paddle.Tensor] = None, - target_pset: Optional[paddle.Tensor] = None, - source_landmarks: Optional[paddle.Tensor] = None, - target_landmarks: Optional[paddle.Tensor] = None, - losses: Optional[RegistrationLosses] = None, - weights: Mapping[str, Union[float, str]] = None, - transform: Optional[Union[CompositeTransform, SpatialTransform]] = None, - sampling: Union[Sampling, str] = Sampling.LINEAR, - ): - """Initialize multi-channel registration loss function. - - Args: - source_data: Moving normalized multi-channel source image batch tensor. - source_data: Fixed normalized multi-channel target image batch tensor. - source_grid: Sampling grid of source image. - source_grid: Sampling grid of target image. - source_chns: Mapping from channel (loss, weight) name to index or range. - target_chns: Mapping from channel (loss, weight) name to index or range. - source_pset: Point sets defined with respect to source image grid. - target_pset: Point sets defined with respect to target image grid. - source_landmarks: Landmark points defined with respect to source image grid. - target_landmarks: Landmark points defined with respect to target image grid. - losses: Dictionary of named loss terms. Loss terms must be either a subclass of - ``PairwiseImageLoss``, ``DisplacementLoss``, ``PointSetDistance``, ``ParamsLoss``, - or ``paddle.nn.Layer``. In case of a ``PairwiseImageLoss``, the key (name) of the - loss term must be found in ``channels`` which identifies the corresponding ``target`` - and ``source`` data channels that this loss term relates to. If the name is not found - in the ``channels`` mapping, the loss term is called with all image channels as input. - If a loss term is not an instance of a known registration loss type, it is assumed to be a - regularization term without arguments, e.g., a ``paddle.nn.Layer`` which itself has a reference - to the parameters of the transformation that it is based on. - weights: Scalar weights of loss terms or name of channel with locally adaptive weights. - transform: Spatial transformation to apply to ``source`` image. - sampling: Image interpolation mode. - - """ - super().__init__() - self.register_buffer(name="_source_data", tensor=source_data) - self.register_buffer(name="_target_data", tensor=target_data) - self.source_grid = source_grid - self.target_grid = target_grid - self.source_chns = dict(source_chns or {}) - self.target_chns = dict(target_chns or {}) - self.source_pset = source_pset - self.target_pset = target_pset - self.source_landmarks = source_landmarks - self.target_landmarks = target_landmarks - if transform is None: - transform = SequentialTransform(self.target_grid) - elif isinstance(transform, SpatialTransform): - transform = SequentialTransform(transform) - elif not isinstance(transform, CompositeTransform): - raise TypeError( - "PairwiseImageRegistrationLoss() 'transform' must be of type CompositeTransform" - ) - self.transform = transform - self._sample_image = SampleImage( - target=self.target_grid, - source=self.source_grid, - sampling=sampling, - padding=PaddingMode.ZEROS, - align_centers=False, - ) - points = self.target_grid.coords(device=self._target_data.place) - self.register_buffer(name="grid_points", tensor=points.unsqueeze(axis=0)) - self.loss_terms = self.as_module_dict(losses) - self.weights = dict(weights or {}) - - @property - def device(self) -> str: - """Device on which loss is evaluated.""" - device = self._target_data.place - assert isinstance(device, paddle.base.libpaddle.Place) - return device - - def loss_terms_of_type(self, loss_type: Type[TLayer]) -> Dict[str, TLayer]: - """Get dictionary of loss terms of a specifictype.""" - return { - name: module - for name, module in self.loss_terms.items() - if isinstance(module, loss_type) - } - - def transforms_of_type( - self, transform_type: Type[TSpatialTransform] - ) -> List[TSpatialTransform]: - """Get list of spatial transformations of a specific type.""" - - def _iter_transforms(transform) -> Generator[SpatialTransform, None, None]: - if isinstance(transform, transform_type): - yield transform - elif isinstance(transform, CompositeTransform): - for t in transform.transforms(): - yield from _iter_transforms(t) - - transforms = list(_iter_transforms(self.transform)) - return transforms - - @property - def has_transform(self) -> bool: - """Whether a spatial transformation is set.""" - return len(self.transform) > 0 - - def target_data(self) -> paddle.Tensor: - """Target image tensor.""" - data = self._target_data - assert isinstance(data, paddle.Tensor) - return data - - def source_data(self, grid: Optional[paddle.Tensor] = None) -> paddle.Tensor: - """Sample source image at transformed target grid points.""" - data = self._source_data - assert isinstance(data, paddle.Tensor) - if grid is None: - return data - return self._sample_image(grid, data) - - def data_mask( - self, data: paddle.Tensor, channels: Dict[str, Union[int, Tuple[int, int]]] - ) -> paddle.Tensor: - """Get boolean mask from data tensor.""" - slice_ = self.as_slice(channels["msk"]) - start, stop = slice_.start, slice_.stop - start_0 = data.shape[1] + start if start < 0 else start - mask = paddle.slice(data, [1], [start_0], [start_0 + (stop - start)]) - return mask > 0.9 - - def overlap_mask( - self, source: paddle.Tensor, target: paddle.Tensor - ) -> Optional[paddle.Tensor]: - """Overlap mask at which to evaluate pairwise data term.""" - mask = self.data_mask(source, self.source_chns) - mask &= self.data_mask(target, self.target_chns) - return mask - - @classmethod - def as_slice(cls, arg: Union[int, Sequence[int]]) -> slice: - """Slice of image data channels associated with the specified name.""" - if isinstance(arg, int): - arg = (arg,) - if len(arg) == 1: - arg = arg[0], arg[0] + 1 - if len(arg) == 2: - arg = arg[0], arg[1], 1 - if len(arg) != 3: - raise ValueError( - f"{cls.__name__}.as_slice() 'arg' must be int or sequence of length 1, 2, or 3" - ) - return slice(*arg) - - @classmethod - def data_channels(cls, data: paddle.Tensor, c: slice) -> paddle.Tensor: - """Get subimage data tensor of named channel.""" - i = (slice(0, tuple(data.shape)[0]), c) + tuple( - slice(0, tuple(data.shape)[dim]) for dim in range(2, data.ndim) - ) - return data[i] - - def loss_input( - self, - name: str, - data: paddle.Tensor, - channels: Dict[str, Union[int, Tuple[int, int]]], - ) -> paddle.Tensor: - """Get input for named loss term.""" - if name in channels: - c = channels[name] - elif "img" not in channels: - raise RuntimeError( - f"Channels map contains neither entry for '{name}' nor 'img'" - ) - else: - c = channels["img"] - i: slice = self.as_slice(c) - return self.data_channels(data, i) - - def loss_mask( - self, - name: str, - data: paddle.Tensor, - channels: Dict[str, Union[int, Tuple[int, int]]], - mask: paddle.Tensor, - ) -> paddle.Tensor: - """Get mask for named loss term.""" - weight = self.weights.get(name, 1.0) - if not isinstance(weight, str): - return mask - match = RE_WEIGHT.match(weight) - if match is None: - raise RuntimeError( - f"Invalid weight string ('{weight}') for loss term '{name}'" - ) - chn = match.group("chn") - mul = match.group("mul") - add = match.group("add") - c = channels.get(chn) - if c is None: - raise RuntimeError( - f"Channels map contains no entry for '{name}' weight string '{weight}'" - ) - i = self.as_slice(c) - w = self.data_channels(data, i) - if mul is not None: - w = w * float(mul) - if add is not None: - w = w + float(add) - return w * mask - - def eval(self) -> RegistrationResult: - """Evaluate pairwise image registration loss.""" - result = {} - losses = {} - misc_excl = set() - x: Tensor = self.grid_points - y: Tensor = self.transform(x, grid=True) - variables = defaultdict(list) - for name, buf in self.transform.named_buffers(): - if not buf.stop_gradient: - var = name.rsplit(".", 1)[-1] - variables[var].append(buf) - variables["w"] = [U.move_dim(y - x, -1, 1)] - data_terms = self.loss_terms_of_type(PairwiseImageLoss) - misc_excl |= set(data_terms.keys()) - if data_terms: - source = self.source_data(y) - target = self.target_data() - mask = self.overlap_mask(source, target) - for name, term in data_terms.items(): - s = self.loss_input(name, source, self.source_chns) - t = self.loss_input(name, target, self.target_chns) - m = self.loss_mask(name, target, self.target_chns, mask) - losses[name] = term(s, t, mask=m) - result["source"] = source - result["target"] = target - result["mask"] = mask - dist_terms = self.loss_terms_of_type(PointSetDistance) - misc_excl |= set(dist_terms.keys()) - ldist_terms = { - k: v for k, v in dist_terms.items() if isinstance(v, LandmarkPointDistance) - } - dist_terms = {k: v for k, v in dist_terms.items() if k not in ldist_terms} - if dist_terms: - if self.source_pset is None: - raise RuntimeError(f"{type(self).__name__}() missing source point set") - if self.target_pset is None: - raise RuntimeError(f"{type(self).__name__}() missing target point set") - s = self.source_pset - t = self.transform(self.target_pset) - for name, term in dist_terms.items(): - losses[name] = term(t, s) - if ldist_terms: - if self.source_landmarks is None: - raise RuntimeError(f"{type(self).__name__}() missing source landmarks") - if self.target_landmarks is None: - raise RuntimeError(f"{type(self).__name__}() missing target landmarks") - s = self.source_landmarks - t = self.transform(self.target_landmarks) - for name, term in ldist_terms.items(): - losses[name] = term(t, s) - disp_terms = self.loss_terms_of_type(DisplacementLoss) - misc_excl |= set(disp_terms.keys()) - for name, term in disp_terms.items(): - match = RE_TERM_VAR.match(name) - if match: - var = match.group("var") - elif "v" in variables: - var = "v" - elif "u" in variables: - var = "u" - else: - var = "w" - bufs = variables.get(var) - if not bufs: - raise RuntimeError(f"Unknown variable in loss term name '{name}'") - value = paddle.to_tensor(data=0, dtype="float32", place=self.device) - for buf in bufs: - value += term(buf) - losses[name] = value - bspline_transforms = self.transforms_of_type(BSplineTransform) - bspline_terms = self.loss_terms_of_type(BSplineLoss) - misc_excl |= set(bspline_terms.keys()) - for name, term in bspline_terms.items(): - value = paddle.to_tensor(data=0, dtype="float32", place=self.device) - for bspline_transform in bspline_transforms: - value += term(bspline_transform.data()) - losses[name] = value - params_terms = self.loss_terms_of_type(ParamsLoss) - misc_excl |= set(params_terms.keys()) - for name, term in params_terms.items(): - value = paddle.to_tensor(data=0, dtype="float32", place=self.device) - count = 0 - for params in self.transform.parameters(): - value += term(params) - count += 1 - if count > 1: - value /= count - losses[name] = value - misc_terms = {k: v for k, v in self.loss_terms.items() if k not in misc_excl} - for name, term in misc_terms.items(): - losses[name] = term() - result["losses"] = losses - result["weights"] = self.weights - result["loss"] = self._weighted_sum(losses) - return result - - def _weighted_sum(self, losses: Mapping[str, paddle.Tensor]) -> paddle.Tensor: - """Compute weighted sum of loss terms.""" - loss = paddle.to_tensor(data=0, dtype="float32", place=self.device) - weights = self.weights - for name, value in losses.items(): - w = weights.get(name, 1.0) - if not isinstance(w, str): - value = w * value - loss += value.sum() - return loss - - -def weight_channel_names(weights: Mapping[str, Union[float, str]]) -> Dict[str, str]: - """Get names of channels that are used to weight loss term of another channel.""" - names = {} - for term, weight in weights.items(): - if not isinstance(weight, str): - continue - match = RE_WEIGHT.match(weight) - if match is None: - continue - names[term] = match.group("chn") - return names +import re +from collections import defaultdict +from typing import Dict +from typing import Generator +from typing import List +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +import paddle +from deepali.core import Grid +from deepali.core import PaddingMode +from deepali.core import Sampling +from deepali.core import functional as U +from deepali.losses import BSplineLoss +from deepali.losses import DisplacementLoss +from deepali.losses import LandmarkPointDistance +from deepali.losses import PairwiseImageLoss +from deepali.losses import ParamsLoss +from deepali.losses import PointSetDistance +from deepali.losses import RegistrationLoss +from deepali.losses import RegistrationLosses +from deepali.losses import RegistrationResult +from deepali.modules import SampleImage +from deepali.spatial import BSplineTransform +from deepali.spatial import CompositeTransform +from deepali.spatial import SequentialTransform +from deepali.spatial import SpatialTransform +from paddle import Tensor +from paddle.nn import Layer + +RE_WEIGHT = re.compile( + "^((?P[0-9]+(\\.[0-9]+)?)\\s*[\\* ])?\\s*(?P[a-zA-Z0-9_-]+)\\s*(\\+\\s*(?P[0-9]+(\\.[0-9]+)?))?$" +) +RE_TERM_VAR = re.compile("^[a-zA-Z0-9_-]+\\((?P[a-zA-Z0-9_]+)\\)$") +TLayer = TypeVar("TLayer", bound=Layer) +TSpatialTransform = TypeVar("TSpatialTransform", bound=SpatialTransform) + + +class PairwiseImageRegistrationLoss(RegistrationLoss): + """Loss function for pairwise multi-channel image registration.""" + + def __init__( + self, + source_data: paddle.Tensor, + target_data: paddle.Tensor, + source_grid: Grid, + target_grid: Grid, + source_chns: Mapping[str, Union[int, Tuple[int, int]]], + target_chns: Mapping[str, Union[int, Tuple[int, int]]], + source_pset: Optional[paddle.Tensor] = None, + target_pset: Optional[paddle.Tensor] = None, + source_landmarks: Optional[paddle.Tensor] = None, + target_landmarks: Optional[paddle.Tensor] = None, + losses: Optional[RegistrationLosses] = None, + weights: Mapping[str, Union[float, str]] = None, + transform: Optional[Union[CompositeTransform, SpatialTransform]] = None, + sampling: Union[Sampling, str] = Sampling.LINEAR, + ): + """Initialize multi-channel registration loss function. + + Args: + source_data: Moving normalized multi-channel source image batch tensor. + source_data: Fixed normalized multi-channel target image batch tensor. + source_grid: Sampling grid of source image. + source_grid: Sampling grid of target image. + source_chns: Mapping from channel (loss, weight) name to index or range. + target_chns: Mapping from channel (loss, weight) name to index or range. + source_pset: Point sets defined with respect to source image grid. + target_pset: Point sets defined with respect to target image grid. + source_landmarks: Landmark points defined with respect to source image grid. + target_landmarks: Landmark points defined with respect to target image grid. + losses: Dictionary of named loss terms. Loss terms must be either a subclass of + ``PairwiseImageLoss``, ``DisplacementLoss``, ``PointSetDistance``, ``ParamsLoss``, + or ``paddle.nn.Layer``. In case of a ``PairwiseImageLoss``, the key (name) of the + loss term must be found in ``channels`` which identifies the corresponding ``target`` + and ``source`` data channels that this loss term relates to. If the name is not found + in the ``channels`` mapping, the loss term is called with all image channels as input. + If a loss term is not an instance of a known registration loss type, it is assumed to be a + regularization term without arguments, e.g., a ``paddle.nn.Layer`` which itself has a reference + to the parameters of the transformation that it is based on. + weights: Scalar weights of loss terms or name of channel with locally adaptive weights. + transform: Spatial transformation to apply to ``source`` image. + sampling: Image interpolation mode. + + """ + super().__init__() + self.register_buffer(name="_source_data", tensor=source_data) + self.register_buffer(name="_target_data", tensor=target_data) + self.source_grid = source_grid + self.target_grid = target_grid + self.source_chns = dict(source_chns or {}) + self.target_chns = dict(target_chns or {}) + self.source_pset = source_pset + self.target_pset = target_pset + self.source_landmarks = source_landmarks + self.target_landmarks = target_landmarks + if transform is None: + transform = SequentialTransform(self.target_grid) + elif isinstance(transform, SpatialTransform): + transform = SequentialTransform(transform) + elif not isinstance(transform, CompositeTransform): + raise TypeError( + "PairwiseImageRegistrationLoss() 'transform' must be of type CompositeTransform" + ) + self.transform = transform + self._sample_image = SampleImage( + target=self.target_grid, + source=self.source_grid, + sampling=sampling, + padding=PaddingMode.ZEROS, + align_centers=False, + ) + points = self.target_grid.coords(device=self._target_data.place) + self.register_buffer(name="grid_points", tensor=points.unsqueeze(axis=0)) + self.loss_terms = self.as_module_dict(losses) + self.weights = dict(weights or {}) + + @property + def device(self) -> str: + """Device on which loss is evaluated.""" + device = self._target_data.place + assert isinstance(device, paddle.base.libpaddle.Place) + return device + + def loss_terms_of_type(self, loss_type: Type[TLayer]) -> Dict[str, TLayer]: + """Get dictionary of loss terms of a specifictype.""" + return { + name: module + for name, module in self.loss_terms.items() + if isinstance(module, loss_type) + } + + def transforms_of_type( + self, transform_type: Type[TSpatialTransform] + ) -> List[TSpatialTransform]: + """Get list of spatial transformations of a specific type.""" + + def _iter_transforms(transform) -> Generator[SpatialTransform, None, None]: + if isinstance(transform, transform_type): + yield transform + elif isinstance(transform, CompositeTransform): + for t in transform.transforms(): + yield from _iter_transforms(t) + + transforms = list(_iter_transforms(self.transform)) + return transforms + + @property + def has_transform(self) -> bool: + """Whether a spatial transformation is set.""" + return len(self.transform) > 0 + + def target_data(self) -> paddle.Tensor: + """Target image tensor.""" + data = self._target_data + assert isinstance(data, paddle.Tensor) + return data + + def source_data(self, grid: Optional[paddle.Tensor] = None) -> paddle.Tensor: + """Sample source image at transformed target grid points.""" + data = self._source_data + assert isinstance(data, paddle.Tensor) + if grid is None: + return data + return self._sample_image(grid, data) + + def data_mask( + self, data: paddle.Tensor, channels: Dict[str, Union[int, Tuple[int, int]]] + ) -> paddle.Tensor: + """Get boolean mask from data tensor.""" + slice_ = self.as_slice(channels["msk"]) + start, stop = slice_.start, slice_.stop + start_0 = data.shape[1] + start if start < 0 else start + mask = paddle.slice(data, [1], [start_0], [start_0 + (stop - start)]) + return mask > 0.9 + + def overlap_mask( + self, source: paddle.Tensor, target: paddle.Tensor + ) -> Optional[paddle.Tensor]: + """Overlap mask at which to evaluate pairwise data term.""" + mask = self.data_mask(source, self.source_chns) + mask &= self.data_mask(target, self.target_chns) + return mask + + @classmethod + def as_slice(cls, arg: Union[int, Sequence[int]]) -> slice: + """Slice of image data channels associated with the specified name.""" + if isinstance(arg, int): + arg = (arg,) + if len(arg) == 1: + arg = arg[0], arg[0] + 1 + if len(arg) == 2: + arg = arg[0], arg[1], 1 + if len(arg) != 3: + raise ValueError( + f"{cls.__name__}.as_slice() 'arg' must be int or sequence of length 1, 2, or 3" + ) + return slice(*arg) + + @classmethod + def data_channels(cls, data: paddle.Tensor, c: slice) -> paddle.Tensor: + """Get subimage data tensor of named channel.""" + i = (slice(0, tuple(data.shape)[0]), c) + tuple( + slice(0, tuple(data.shape)[dim]) for dim in range(2, data.ndim) + ) + return data[i] + + def loss_input( + self, + name: str, + data: paddle.Tensor, + channels: Dict[str, Union[int, Tuple[int, int]]], + ) -> paddle.Tensor: + """Get input for named loss term.""" + if name in channels: + c = channels[name] + elif "img" not in channels: + raise RuntimeError( + f"Channels map contains neither entry for '{name}' nor 'img'" + ) + else: + c = channels["img"] + i: slice = self.as_slice(c) + return self.data_channels(data, i) + + def loss_mask( + self, + name: str, + data: paddle.Tensor, + channels: Dict[str, Union[int, Tuple[int, int]]], + mask: paddle.Tensor, + ) -> paddle.Tensor: + """Get mask for named loss term.""" + weight = self.weights.get(name, 1.0) + if not isinstance(weight, str): + return mask + match = RE_WEIGHT.match(weight) + if match is None: + raise RuntimeError( + f"Invalid weight string ('{weight}') for loss term '{name}'" + ) + chn = match.group("chn") + mul = match.group("mul") + add = match.group("add") + c = channels.get(chn) + if c is None: + raise RuntimeError( + f"Channels map contains no entry for '{name}' weight string '{weight}'" + ) + i = self.as_slice(c) + w = self.data_channels(data, i) + if mul is not None: + w = w * float(mul) + if add is not None: + w = w + float(add) + return w * mask + + def eval(self) -> RegistrationResult: + """Evaluate pairwise image registration loss.""" + result = {} + losses = {} + misc_excl = set() + x: Tensor = self.grid_points + y: Tensor = self.transform(x, grid=True) + variables = defaultdict(list) + for name, buf in self.transform.named_buffers(): + if not buf.stop_gradient: + var = name.rsplit(".", 1)[-1] + variables[var].append(buf) + variables["w"] = [U.move_dim(y - x, -1, 1)] + data_terms = self.loss_terms_of_type(PairwiseImageLoss) + misc_excl |= set(data_terms.keys()) + if data_terms: + source = self.source_data(y) + target = self.target_data() + mask = self.overlap_mask(source, target) + for name, term in data_terms.items(): + s = self.loss_input(name, source, self.source_chns) + t = self.loss_input(name, target, self.target_chns) + m = self.loss_mask(name, target, self.target_chns, mask) + losses[name] = term(s, t, mask=m) + result["source"] = source + result["target"] = target + result["mask"] = mask + dist_terms = self.loss_terms_of_type(PointSetDistance) + misc_excl |= set(dist_terms.keys()) + ldist_terms = { + k: v for k, v in dist_terms.items() if isinstance(v, LandmarkPointDistance) + } + dist_terms = {k: v for k, v in dist_terms.items() if k not in ldist_terms} + if dist_terms: + if self.source_pset is None: + raise RuntimeError(f"{type(self).__name__}() missing source point set") + if self.target_pset is None: + raise RuntimeError(f"{type(self).__name__}() missing target point set") + s = self.source_pset + t = self.transform(self.target_pset) + for name, term in dist_terms.items(): + losses[name] = term(t, s) + if ldist_terms: + if self.source_landmarks is None: + raise RuntimeError(f"{type(self).__name__}() missing source landmarks") + if self.target_landmarks is None: + raise RuntimeError(f"{type(self).__name__}() missing target landmarks") + s = self.source_landmarks + t = self.transform(self.target_landmarks) + for name, term in ldist_terms.items(): + losses[name] = term(t, s) + disp_terms = self.loss_terms_of_type(DisplacementLoss) + misc_excl |= set(disp_terms.keys()) + for name, term in disp_terms.items(): + match = RE_TERM_VAR.match(name) + if match: + var = match.group("var") + elif "v" in variables: + var = "v" + elif "u" in variables: + var = "u" + else: + var = "w" + bufs = variables.get(var) + if not bufs: + raise RuntimeError(f"Unknown variable in loss term name '{name}'") + value = paddle.to_tensor(data=0, dtype="float32", place=self.device) + for buf in bufs: + value += term(buf) + losses[name] = value + bspline_transforms = self.transforms_of_type(BSplineTransform) + bspline_terms = self.loss_terms_of_type(BSplineLoss) + misc_excl |= set(bspline_terms.keys()) + for name, term in bspline_terms.items(): + value = paddle.to_tensor(data=0, dtype="float32", place=self.device) + for bspline_transform in bspline_transforms: + value += term(bspline_transform.data()) + losses[name] = value + params_terms = self.loss_terms_of_type(ParamsLoss) + misc_excl |= set(params_terms.keys()) + for name, term in params_terms.items(): + value = paddle.to_tensor(data=0, dtype="float32", place=self.device) + count = 0 + for params in self.transform.parameters(): + value += term(params) + count += 1 + if count > 1: + value /= count + losses[name] = value + misc_terms = {k: v for k, v in self.loss_terms.items() if k not in misc_excl} + for name, term in misc_terms.items(): + losses[name] = term() + result["losses"] = losses + result["weights"] = self.weights + result["loss"] = self._weighted_sum(losses) + return result + + def _weighted_sum(self, losses: Mapping[str, paddle.Tensor]) -> paddle.Tensor: + """Compute weighted sum of loss terms.""" + loss = paddle.to_tensor(data=0, dtype="float32", place=self.device) + weights = self.weights + for name, value in losses.items(): + w = weights.get(name, 1.0) + if not isinstance(w, str): + value = w * value + loss += value.sum() + return loss + + +def weight_channel_names(weights: Mapping[str, Union[float, str]]) -> Dict[str, str]: + """Get names of channels that are used to weight loss term of another channel.""" + names = {} + for term, weight in weights.items(): + if not isinstance(weight, str): + continue + match = RE_WEIGHT.match(weight) + if match is None: + continue + names[term] = match.group("chn") + return names diff --git a/jointContribution/HighResolution/ffd/optim.py b/jointContribution/HighResolution/ffd/optim.py index 3e9626755c..d774978d5e 100644 --- a/jointContribution/HighResolution/ffd/optim.py +++ b/jointContribution/HighResolution/ffd/optim.py @@ -1,56 +1,56 @@ -from typing import Sequence - -import paddle -import paddle.optimizer as optim - - -def new_optimizer( - name: str, model: paddle.nn.Layer, **kwargs -) -> paddle.optimizer.Optimizer: - """Initialize new optimizer for parameters of given model. - - Args: - name: Name of optimizer. - model: Module whose parameters are to be optimized. - kwargs: Keyword arguments for named optimizer. - - Returns: - New optimizer instance. - - """ - cls = getattr(optim, name, None) - if cls is None: - raise ValueError(f"Unknown optimizer: {name}") - if not issubclass(cls, paddle.optimizer.Optimizer): - raise TypeError( - f"Requested type '{name}' is not a subclass of paddle.optimizer.Optimizer" - ) - if "learning_rate" in kwargs: - if "lr" in kwargs: - raise ValueError( - "new_optimizer() 'lr' and 'learning_rate' are mutually exclusive" - ) - kwargs["lr"] = kwargs.pop("learning_rate") - kwargs["learning_rate"] = kwargs.pop("lr") - return cls(parameters=model.parameters(), **kwargs) - - -def slope_of_least_squares_fit(values: Sequence[float]) -> float: - """Compute slope of least squares fit of line to last n objective function values - - See also: - - https://www.che.udel.edu/pdf/FittingData.pdf - - https://en.wikipedia.org/wiki/1_%2B_2_%2B_3_%2B_4_%2B_%E2%8B%AF - - https://proofwiki.org/wiki/Sum_of_Sequence_of_Squares - - """ - n = len(values) - if n < 2: - return float("nan") - if n == 2: - return values[1] - values[0] - sum_x1 = (n + 1) / 2 - sum_x2 = n * (n + 1) * (2 * n + 1) / 6 - sum_y1 = sum(values) - sum_xy = sum((x + 1) * y for x, y in enumerate(values)) - return (sum_xy - sum_x1 * sum_y1) / (sum_x2 - n * sum_x1 * sum_x1) +from typing import Sequence + +import paddle +import paddle.optimizer as optim + + +def new_optimizer( + name: str, model: paddle.nn.Layer, **kwargs +) -> paddle.optimizer.Optimizer: + """Initialize new optimizer for parameters of given model. + + Args: + name: Name of optimizer. + model: Module whose parameters are to be optimized. + kwargs: Keyword arguments for named optimizer. + + Returns: + New optimizer instance. + + """ + cls = getattr(optim, name, None) + if cls is None: + raise ValueError(f"Unknown optimizer: {name}") + if not issubclass(cls, paddle.optimizer.Optimizer): + raise TypeError( + f"Requested type '{name}' is not a subclass of paddle.optimizer.Optimizer" + ) + if "learning_rate" in kwargs: + if "lr" in kwargs: + raise ValueError( + "new_optimizer() 'lr' and 'learning_rate' are mutually exclusive" + ) + kwargs["lr"] = kwargs.pop("learning_rate") + kwargs["learning_rate"] = kwargs.pop("lr") + return cls(parameters=model.parameters(), **kwargs) + + +def slope_of_least_squares_fit(values: Sequence[float]) -> float: + """Compute slope of least squares fit of line to last n objective function values + + See also: + - https://www.che.udel.edu/pdf/FittingData.pdf + - https://en.wikipedia.org/wiki/1_%2B_2_%2B_3_%2B_4_%2B_%E2%8B%AF + - https://proofwiki.org/wiki/Sum_of_Sequence_of_Squares + + """ + n = len(values) + if n < 2: + return float("nan") + if n == 2: + return values[1] - values[0] + sum_x1 = (n + 1) / 2 + sum_x2 = n * (n + 1) * (2 * n + 1) / 6 + sum_y1 = sum(values) + sum_xy = sum((x + 1) * y for x, y in enumerate(values)) + return (sum_xy - sum_x1 * sum_y1) / (sum_x2 - n * sum_x1 * sum_x1) diff --git a/jointContribution/HighResolution/ffd/pairwise.py b/jointContribution/HighResolution/ffd/pairwise.py index 0d10917b27..6769f13e2a 100644 --- a/jointContribution/HighResolution/ffd/pairwise.py +++ b/jointContribution/HighResolution/ffd/pairwise.py @@ -1,869 +1,869 @@ -from pathlib import Path -from timeit import default_timer as timer -from typing import Any -from typing import Dict -from typing import Mapping -from typing import Optional -from typing import Sequence -from typing import Set -from typing import Tuple -from typing import Union - -import paddle -from deepali.core import Axes -from deepali.core import Device -from deepali.core import Grid -from deepali.core import PathStr -from deepali.core import functional as U -from deepali.core.config import join_kwargs_in_sequence -from deepali.data import FlowField -from deepali.data import Image -from deepali.losses import RegistrationResult -from deepali.losses import new_loss -from deepali.spatial import DisplacementFieldTransform -from deepali.spatial import HomogeneousTransform -from deepali.spatial import NonRigidTransform -from deepali.spatial import QuaternionRotation -from deepali.spatial import RigidQuaternionTransform -from deepali.spatial import SequentialTransform -from deepali.spatial import SpatialTransform -from deepali.spatial import Translation -from deepali.spatial import new_spatial_transform - -from .engine import RegistrationEngine -from .hooks import RegistrationEvalHook -from .hooks import RegistrationStepHook -from .hooks import normalize_grad_hook -from .hooks import smooth_grad_hook -from .losses import PairwiseImageRegistrationLoss -from .losses import weight_channel_names -from .optim import new_optimizer - - -def register_pairwise( - target: Union[PathStr, Dict[str, PathStr]], - source: Union[PathStr, Dict[str, PathStr]], - config: Optional[Dict[str, Any]] = None, - outdir: Optional[PathStr] = None, - verbose: Union[bool, int] = False, - debug: Union[bool, int] = False, - device: Optional[Device] = None, -) -> SpatialTransform: - """Register pair of images.""" - if config is None: - config = {} - if outdir is not None: - outdir = Path(outdir).absolute() - outdir.mkdir(parents=True, exist_ok=True) - loss_config, loss_weights = get_loss_config(config) - model_name, model_args, model_init = get_model_config(config) - optim_name, optim_args, optim_loop = get_optim_config(config) - levels, coarsest_level, finest_level = get_levels_config(config) - finest_spacing, min_size, pyramid_dims = get_pyramid_config(config) - device = get_device_config(config, device) - verbose = int(verbose) - debug = int(debug) - if verbose > 0: - print() - start = timer() - target_keys = set(loss_config.keys()) | set( - weight_channel_names(loss_weights).values() - ) - target_image, target_chns = read_images(target, names=target_keys, device=device) - source_image, source_chns = read_images( - source, names=loss_config.keys(), device=device - ) - if verbose > 3: - print(f"Read images from files in {timer() - start:.3f}s") - start_reg = timer() - target_image = append_mask(target_image, target_chns, config) - source_image = append_mask(source_image, source_chns, config) - norm_params = get_normalize_config(config, target_image, target_chns) - target_image = normalize_data_(target_image, target_chns, **norm_params) - source_image = normalize_data_(source_image, source_chns, **norm_params) - start = timer() - target_pyramid = target_image.pyramid( - levels, - start=finest_level, - end=coarsest_level, - dims=pyramid_dims, - spacing=finest_spacing, - min_size=min_size, - ) - source_pyramid = source_image.pyramid( - levels, - start=finest_level, - end=coarsest_level, - dims=pyramid_dims, - spacing=finest_spacing, - min_size=min_size, - ) - if verbose > 3: - print(f"Constructed Gaussian resolution pyramids in {timer() - start:.3f}s\n") - if verbose > 2: - print("Target image pyramid:") - print_pyramid_info(target_pyramid) - print("Source image pyramid:") - print_pyramid_info(source_pyramid) - del target_image - del source_image - source_grid = source_pyramid[finest_level].grid() - finest_grid = target_pyramid[finest_level].grid() - coarsest_grid = target_pyramid[coarsest_level].grid() - post_transform = get_post_transform(config, finest_grid, source_grid) - transform_downsample = model_args.pop("downsample", 0) - transform_grid = coarsest_grid.downsample(transform_downsample) - # here is ok - transform = new_spatial_transform( - model_name, grid=transform_grid, groups=1, **model_args - ) - if model_init: - if verbose > 1: - print(f"Fitting '{model_init}'...") - disp_field = FlowField.read(model_init).to(device=device) - assert isinstance(disp_field, FlowField) - start = timer() - transform = transform.to(device=device).fit(disp_field.batch()) - if verbose > 0: - print(f"Fitted initial displacement field in {timer() - start:.3f}s") - del disp_field - grid_transform = SequentialTransform(transform, post_transform) - grid_transform = grid_transform.to(device=device) - for level in range(coarsest_level, finest_level - 1, -1): - target_image = target_pyramid[level] - source_image = source_pyramid[level] - # here is ok - if outdir and debug > 0: - write_channels( - data=target_image.tensor(), - grid=target_image.grid(), - channels=target_chns, - outdir=outdir, - prefix=f"level_{level}_target_", - ) - write_channels( - data=source_image.tensor(), - grid=source_image.grid(), - channels=source_chns, - outdir=outdir, - prefix=f"level_{level}_source_", - ) - if level != coarsest_level: - start = timer() - transform_grid = target_image.grid().downsample(transform_downsample) - transform.grid_(transform_grid) - if verbose > 3: - print(f"Subdivided control point grid in {timer() - start:.3f}s") - grid_transform.grid_(target_image.grid()) - loss_terms = new_loss_terms(loss_config) - loss = PairwiseImageRegistrationLoss( - losses=loss_terms, - source_data=source_image.tensor().unsqueeze(axis=0), - target_data=target_image.tensor().unsqueeze(axis=0), - source_grid=source_image.grid(), - target_grid=target_image.grid(), - source_chns=source_chns, - target_chns=target_chns, - transform=grid_transform, - weights=loss_weights, - ) - loss = loss.to(device=device) - if outdir and debug > 1: - start = timer() - result = loss.eval() - if verbose > 3: - print(f"Evaluated initial loss in {timer() - start:.3f}s") - write_result( - result, - grid=target_image.grid(), - channels=source_chns, - outdir=outdir, - prefix=f"level_{level}_initial_", - ) - flow = grid_transform.flow(target_image.grid(), device=device) - flow[0].write(outdir / f"level_{level}_initial_def.mha") - optimizer = new_optimizer(optim_name, model=grid_transform, **optim_args) - engine = RegistrationEngine( - model=grid_transform, - loss=loss, - optimizer=optimizer, - max_steps=optim_loop.get("max_steps", 250), - min_delta=float(optim_loop.get("min_delta", "nan")), - ) - grad_sigma = float(optim_loop.get("smooth_grad", 0)) - if isinstance(transform, NonRigidTransform) and grad_sigma > 0: - engine.register_eval_hook(smooth_grad_hook(transform, sigma=grad_sigma)) - engine.register_eval_hook(normalize_grad_hook(transform)) - if verbose > 2: - engine.register_eval_hook(print_eval_loss_hook(level)) - elif verbose > 1: - engine.register_step_hook(print_step_loss_hook(level)) - if outdir and debug > 2: - engine.register_eval_hook( - write_result_hook( - level=level, - grid=target_image.grid(), - channels=source_chns, - outdir=outdir, - ) - ) - engine.run() - if verbose > 0 or outdir and debug > 0: - start = timer() - result = loss.eval() - if verbose > 3: - print(f"Evaluated final loss in {timer() - start:.3f}s") - if verbose > 0: - loss_value = float(result["loss"]) - print( - f"level={level:d}: loss={loss_value:.5f} ({engine.num_steps:d} steps)", - flush=True, - ) - if outdir and debug > 0: - write_result( - result, - grid=target_image.grid(), - channels=source_chns, - outdir=outdir, - prefix=f"level_{level}_final_", - ) - flow = grid_transform.flow(device=device) - flow[0].write(outdir / f"level_{level}_final_def.mha") - if verbose > 3: - print(f"Registered images in {timer() - start_reg:.3f}s") - if verbose > 0: - print() - return grid_transform - - -def append_mask( - image: Image, channels: Dict[str, Tuple[int, int]], config: Dict[str, Any] -) -> Image: - """Append foreground mask to data tensor.""" - data = image.tensor() - if "img" in channels: - lower_threshold, upper_threshold = get_clamp_config(config, "img") - mask = U.threshold( - data[slice(*channels["img"])], lower_threshold, upper_threshold - ) - else: - mask = paddle.ones(shape=(1,) + tuple(data.shape)[1:], dtype=data.dtype) - data = paddle.concat(x=[data, mask.astype(data.dtype)], axis=0) - channels["msk"] = tuple(data.shape)[0] - 1, tuple(data.shape)[0] - return Image(data, image.grid()) - - -def append_data( - data: Optional[paddle.Tensor], - channels: Dict[str, Tuple[int, int]], - name: str, - other: paddle.Tensor, -) -> paddle.Tensor: - """Append image data.""" - if data is None: - data = other - else: - data = paddle.concat(x=[data, other], axis=0) - channels[name] = tuple(data.shape)[0] - tuple(other.shape)[0], tuple(data.shape)[0] - return data - - -def read_images( - sample: Union[PathStr, Dict[str, PathStr]], names: Set[str], device: str -) -> Tuple[Image, Dict[str, Tuple[int, int]]]: - """Read image data from input files.""" - data = None - grid = None - if isinstance(sample, (Path, str)): - sample = {"img": sample} - img_path = sample.get("img") - seg_path = sample.get("seg") - sdf_path = sample.get("sdf") - for path in (img_path, seg_path, sdf_path): - if not path: - continue - grid = Grid.from_file(path).align_corners_(True) - break - else: - raise ValueError( - "One of 'img', 'seg', or 'sdf' input image file paths is required" - ) - assert grid is not None - dtype = "float32" - channels = {} - if "img" in names: - temp = Image.read(img_path, dtype=dtype, device=device) - data = append_data(data, channels, "img", temp.tensor()) - if "seg" in names: - if seg_path is None: - raise ValueError("Missing segmentation label image file path") - temp = Image.read(seg_path, dtype="int64", device=device) - temp_grid = temp.grid() - num_classes = int(temp.max()) + 1 - temp = temp.tensor().unsqueeze(axis=0) - temp = U.as_one_hot_tensor(temp, num_classes).to(dtype=dtype) - temp = temp.squeeze(axis=0) - temp = Image(temp, grid=temp_grid).sample(grid) - data = append_data(data, channels, "seg", temp.tensor()) - if "sdf" in names: - if sdf_path is None: - raise ValueError( - "Missing segmentation boundary signed distance field file path" - ) - temp = Image.read(sdf_path, dtype=dtype, device=device) - temp = temp.sample(shape=grid) - data = append_data(data, channels, "sdf", temp.tensor()) - if data is None: - if img_path is None: - raise ValueError("Missing intensity image file path") - data = Image.read(img_path, dtype=dtype, device=device) - channels = {"img": (0, 1)} - image = Image(data, grid=grid) - return image, channels - - -def get_device_config( - config: Dict[str, Any], device: Optional[Union[str, str]] = None -) -> str: - """Get configured PyTorch device.""" - if device is None: - device = config.get("device", "cpu") - if isinstance(device, int): - device = f"cuda:{device}" - elif device == "cuda": - device = "cuda:0" - return str(device).replace("cuda", "gpu") - - -def load_transform(path: PathStr, grid: Grid) -> SpatialTransform: - """Load transformation from file. - - Args: - path: File path from which to load spatial transformation. - grid: Target domain grid with respect to which transformation is defined. - - Returns: - Loaded spatial transformation. - - """ - target_grid = grid - - def convert_matrix( - matrix: paddle.Tensor, grid: Optional[Grid] = None - ) -> paddle.Tensor: - if grid is None: - pre = target_grid.transform(Axes.CUBE_CORNERS, Axes.WORLD) - post = target_grid.transform(Axes.WORLD, Axes.CUBE_CORNERS) - matrix = U.homogeneous_matmul(post, matrix, pre) - elif grid != target_grid: - pre = target_grid.transform(Axes.CUBE_CORNERS, grid=grid) - post = grid.transform(Axes.CUBE_CORNERS, grid=target_grid) - matrix = U.homogeneous_matmul(post, matrix, pre) - return matrix - - path = Path(path) - if path.suffix == ".pt": - value = paddle.load(path=path) - if isinstance(value, dict): - matrix = value.get("matrix") - if matrix is None: - raise KeyError( - "load_transform() .pt file dict must contain key 'matrix'" - ) - grid = value.get("grid") - elif isinstance(value, paddle.Tensor): - matrix = value - grid = None - else: - raise RuntimeError("load_transform() .pt file must contain tensor or dict") - if matrix.ndim == 2: - matrix = matrix.unsqueeze(axis=0) - if matrix.ndim != 3 or tuple(matrix.shape)[1:] != (3, 4): - raise RuntimeError( - "load_transform() .pt file tensor must have shape (N, 3, 4)" - ) - params = convert_matrix(matrix, grid) - return HomogeneousTransform(target_grid, params=params) - flow = FlowField.read(path, axes=Axes.WORLD) - flow = flow.axes(Axes.from_grid(target_grid)) - flow = flow.sample(shape=target_grid) - return DisplacementFieldTransform( - target_grid, params=flow.tensor().unsqueeze(axis=0) - ) - - -def get_post_transform( - config: Dict[str, Any], target_grid: Grid, source_grid: Grid -) -> Optional[SpatialTransform]: - """Get constant rigid transformation between image grid domains.""" - align = config.get("align", False) - if align is False or align is None: - return None - if isinstance(align, (Path, str)): - return load_transform(align, target_grid) - if align is True: - align_centers = True - align_directions = True - elif isinstance(align, dict): - align_centers = bool(align.get("centers", True)) - align_directions = bool(align.get("directions", True)) - else: - raise ValueError( - "get_post_transform() 'config' has invalid 'align' value: {align}" - ) - center_offset = ( - target_grid.world_to_cube(source_grid.center()).unsqueeze(axis=0) - if align_centers - else None - ) - rotation_matrix = ( - source_grid.direction() @ target_grid.direction().t().unsqueeze(axis=0) - if align_directions - else None - ) - transform = None - if center_offset is not None and rotation_matrix is not None: - transform = RigidQuaternionTransform( - target_grid, translation=center_offset, rotation=False - ) - transform.rotation.matrix_(rotation_matrix) - elif center_offset is not None: - transform = Translation(target_grid, params=center_offset) - elif rotation_matrix is not None: - transform = QuaternionRotation(target_grid, params=False) - transform.matrix_(rotation_matrix) - return transform - - -def get_clamp_config( - config: Dict[str, Any], channel: str -) -> Tuple[Optional[float], Optional[float]]: - """Get thresholds for named image channel. - - Args: - config: Configuration. - channel: Name of image channel. - - Returns: - lower_threshold: Lower threshold. - upper_threshold: Upper threshold. - - """ - input_config = config.get("input", {}) - if not isinstance(input_config, dict): - raise ValueError("get_clamp_config() 'input' value must be dict") - channel_config = input_config.get(channel) - if not isinstance(channel_config, dict): - channel_config = {"clamp": channel_config} - thresholds = channel_config.get("clamp", input_config.get("clamp")) - if thresholds is None: - thresholds = None, None - elif isinstance(thresholds, (int, float)): - thresholds = float(thresholds), None - if not isinstance(thresholds, (list, tuple)): - raise ValueError("get_clamp_config() value must be scalar or sequence") - if len(thresholds) != 2: - raise ValueError("get_clamp_config() value must be scalar or [min, max]") - thresholds = tuple(None if v is None else float(v) for v in thresholds) - lower_threshold, upper_threshold = thresholds - return lower_threshold, upper_threshold - - -def get_scale_config(config: Dict[str, Any], channel: str) -> Optional[float]: - """Get channel scaling factor.""" - input_config = config.get("input", {}) - if not isinstance(input_config, dict): - return None - channel_config = input_config.get(channel) - if not isinstance(channel_config, dict): - return None - value = channel_config.get("scale", input_config.get("scale")) - if value is None: - return None - return float(value) - - -def get_normalize_config( - config: Dict[str, Any], image: Image, channels: Dict[str, Tuple[int, int]] -) -> Dict[str, Dict[str, paddle.Tensor]]: - """Calculate data normalization parameters. - - Args: - config: Configuration. - image: Image data. - channels: Map of image channel slices. - - Returns: - Dictionary of normalization parameters. - - """ - scale = {} - shift = {} - for channel, (start, stop) in channels.items(): - start_1 = image.tensor().shape[0] + start if start < 0 else start - data = paddle.slice(image.tensor(), [0], [start_1], [start_1 + (stop - start)]) - lower_threshold, upper_threshold = get_clamp_config(config, channel) - scale_factor = get_scale_config(config, channel) - if channel in ("msk", "seg"): - if lower_threshold is None: - lower_threshold = 0 - if upper_threshold is None: - upper_threshold = 1 - else: - if lower_threshold is None: - lower_threshold = data.min() - if upper_threshold is None: - upper_threshold = data.max() - if scale_factor is None: - if upper_threshold > lower_threshold: - scale_factor = upper_threshold - lower_threshold - else: - scale_factor = 1 - else: - scale_factor = 1 / scale_factor - shift[channel] = lower_threshold - scale[channel] = scale_factor - return dict(shift=shift, scale=scale) - - -def normalize_data_( - image: Image, - channels: Dict[str, Tuple[int, int]], - shift: Optional[Dict[str, paddle.Tensor]] = None, - scale: Optional[Dict[str, paddle.Tensor]] = None, -) -> Image: - """Normalize image data.""" - if shift is None: - shift = {} - if scale is None: - scale = {} - for channel, (start, stop) in channels.items(): - start_2 = image.tensor().shape[0] + start if start < 0 else start - data = paddle.slice(image.tensor(), [0], [start_2], [start_2 + (stop - start)]) - offset = shift.get(channel) - if offset is not None: - data -= offset - norm = scale.get(channel) - if norm is not None: - data /= norm - if channel in ("msk", "seg"): - data.clip_(min=0, max=1) - return image - - -def get_levels_config(config: Dict[str, Any]) -> Tuple[int, int, int]: - """Get indices of coarsest and finest level from configuration.""" - cfg = config.get("pyramid", {}) - levels = cfg.get("levels", 4) - if isinstance(levels, int): - levels = levels - 1, 0 - if not isinstance(levels, (list, tuple)): - raise TypeError( - "register_pairwise() 'config' key 'pyramid.levels': value must be int, tuple, or list" - ) - coarsest_level, finest_level = levels - if finest_level > coarsest_level: - raise ValueError( - "register_pairwise() 'config' key 'pyramid.levels':" - + " finest level must be less or equal than coarsest level" - ) - levels = coarsest_level + 1 - if "max_level" in cfg: - levels = max(levels, cfg["max_level"]) - return levels, coarsest_level, finest_level - - -def get_pyramid_config( - config: Dict[str, Any] -) -> Tuple[Optional[Union[float, Sequence[float]]], int, Optional[Union[str, int]]]: - """Get settings of Gaussian resolution pyramid from configuration.""" - cfg = config.get("pyramid", {}) - min_size = cfg.get("min_size", 16) - finest_spacing = cfg.get("spacing") - dims = cfg.get("dims") - return finest_spacing, min_size, dims - - -def get_loss_config( - config: Dict[str, Any] -) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, float]]: - """Instantiate terms of registration loss given configuration object. - - Args: - config: Configuration. - - Returns: - losses: Preparsed configuration of loss terms (cf. ``new_loss_terms()``). - weights: Weights of loss terms. - - """ - cfg = None - losses = {} - weights = {} - sections = "loss", "losses", "energy" - for name in sections: - if name in config: - if cfg is not None: - raise ValueError( - "get_loss_config() keys {sections} are mutually exclusive" - ) - cfg = config[name] - if not cfg: - cfg = "SSD" - if isinstance(cfg, str): - cfg = (cfg,) - if isinstance(cfg, Sequence): - names, cfg = cfg, {} - for i, name in enumerate(names): - cfg[f"loss_{i}"] = str(name) - if isinstance(cfg, dict): - for key, value in cfg.items(): - name = None - weight = 1 - kwargs = {} - if isinstance(value, str): - name = value - elif isinstance(value, Sequence): - if not value: - raise ValueError(f"get_loss_config() '{key}' loss entry is empty") - if len(value) == 1: - if isinstance(value[0], str): - value = {"name": value[0]} - elif len(value) > 1: - if isinstance(value[0], (int, float)): - value[0] = {"weight": value[0]} - if isinstance(value[1], str): - value[1] = {"name": value[1]} - value = join_kwargs_in_sequence(value) - if isinstance(value, dict): - kwargs = dict(value) - name = kwargs.pop("name", None) - weight = kwargs.pop("weight", 1) - elif len(value) == 2: - name = value[0] - kwargs = dict(value[1]) - elif len(value) == 3: - weight = float(value[0]) - name = value[1] - kwargs = dict(value[2]) - else: - raise ValueError( - f"get_loss_config() '{key}' invalid loss configuration" - ) - elif isinstance(value, dict): - kwargs = dict(value) - name = kwargs.pop("name", None) - weight = kwargs.pop("weight", 1) - else: - weight, name = value - if name is None: - raise ValueError(f"get_loss_config() missing 'name' for loss '{key}'") - if not isinstance(name, str): - raise TypeError(f"get_loss_config() 'name' of loss '{key}' must be str") - kwargs["name"] = name - losses[key] = kwargs - weights[key] = weight - else: - raise TypeError( - "get_loss_config() 'config' \"losses\" must be str, tuple, list, or dict" - ) - weights_config = config.get("weights", {}) - if isinstance(weights_config, (int, float)): - weights_config = (weights_config,) - if isinstance(weights_config, (list, tuple)): - names, weights_config = weights_config, {} - for i, weight in enumerate(names): - weights_config[f"loss_{i}"] = weight - if not isinstance(weights_config, dict): - raise TypeError( - "get_loss_config() 'weights' must be scalar, tuple, list, or dict" - ) - weights.update(weights_config) - losses = {k: v for k, v in losses.items() if weights.get(k, 0)} - weights = {k: v for k, v in weights.items() if k in losses} - return losses, weights - - -def new_loss_terms(config: Dict[str, Any]) -> Dict[str, paddle.nn.Layer]: - """Instantiate terms of registration loss. - - Args: - config: Preparsed configuration of loss terms. - target_tree: Target vessel centerline tree. - - Returns: - Mapping from channel or loss name to loss module instance. - - """ - losses = {} - for key, value in config.items(): - kwargs = dict(value) - name = kwargs.pop("name", None) - _ = kwargs.pop("weight", None) - if name is None: - raise ValueError(f"new_loss_terms() missing 'name' for loss '{key}'") - if not isinstance(name, str): - raise TypeError(f"new_loss_terms() 'name' of loss '{key}' must be str") - loss = new_loss(name, **kwargs) - losses[key] = loss - return losses - - -def get_model_config( - config: Dict[str, Any] -) -> Tuple[str, Dict[str, Any], Optional[str]]: - """Get configuration of transformation model to use.""" - cfg = config.get("model", {}) - cfg = dict(name=cfg) if isinstance(cfg, str) else dict(cfg) - model_name = cfg.pop("name") - assert isinstance(model_name, str) - assert model_name != "" - model_init = cfg.pop("init", None) - if model_init is not None: - model_init = Path(model_init).as_posix() - model_args = dict(cfg.get(model_name, cfg)) - return model_name, model_args, model_init - - -def get_optim_config( - config: Dict[str, Any] -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: - """Get configuration of optimizer to use.""" - cfg = config.get("optim", {}) - cfg = dict(name=cfg) if isinstance(cfg, str) else dict(cfg) - if "optimizer" in cfg: - if "name" in cfg: - raise ValueError( - "get_optim_config() keys ('name', 'optimizer') are mutually exclusive" - ) - cfg["name"] = cfg.pop("optimizer") - optim_name = str(cfg.pop("name", "LBFGS")) - optim_loop = {} - for key in ("max_steps", "min_delta", "smooth_grad"): - if key in cfg: - optim_loop[key] = cfg.pop(key) - optim_args = {k: v for k, v in cfg.items() if isinstance(k, str) and k[0].islower()} - optim_args.update(cfg.get(optim_name, {})) - lr_keys = "step_size", "learning_rate" - for lr_key in ("step_size", "learning_rate"): - if lr_key in optim_args: - if "lr" in optim_args: - raise ValueError( - f"get_optim_config() keys {lr_keys + ('lr',)} are mutually exclusive" - ) - optim_args["lr"] = optim_args.pop(lr_key) - return optim_name, optim_args, optim_loop - - -@paddle.no_grad() -def write_channels( - data: paddle.Tensor, - grid: Grid, - channels: Mapping[str, Tuple[int, int]], - outdir: PathStr, - prefix: str = "", -) -> None: - """Write image channels.""" - for name, (start, stop) in channels.items(): - image = data[slice(start, stop, 1)] - if name == "seg": - image = image.argmax(axis=0, keepdim=True).astype("uint8") - elif name == "msk": - image = image.mul(255).clip_(min=0, max=255).astype("uint8") - if not isinstance(image, Image): - image = Image(image, grid=grid) - image.write(outdir / f"{prefix}{name}.mha") - - -@paddle.no_grad() -def write_result( - result: RegistrationResult, - grid: Grid, - channels: Mapping[str, Tuple[int, int]], - outdir: PathStr, - prefix: str = "", -) -> None: - """Write registration result to output directory.""" - data = result["source"] - assert isinstance(data, paddle.Tensor) - write_channels(data[0], grid=grid, channels=channels, outdir=outdir, prefix=prefix) - data = result["mask"] - assert isinstance(data, paddle.Tensor) - if data.dtype == "bool": - data = data.astype("uint8").multiply_(y=paddle.to_tensor(255)) - mask = Image(data[0], grid=grid) - mask.write(outdir / f"{prefix}olm.mha") - - -def write_result_hook( - level: int, grid: Grid, channels: Mapping[str, Tuple[int, int]], outdir: Path -) -> RegistrationEvalHook: - """Get callback function for writing results after each evaluation.""" - - def fn( - _: RegistrationEngine, - num_steps: int, - num_evals: int, - result: RegistrationResult, - ) -> None: - prefix = f"level_{level}_step_{num_steps:03d}_eval_{num_evals}_" - write_result(result, grid=grid, channels=channels, outdir=outdir, prefix=prefix) - - return fn - - -def print_eval_loss_hook(level: int) -> RegistrationEvalHook: - """Get callback function for printing loss after each evaluation.""" - - def fn( - _: RegistrationEngine, num_steps: int, num_eval: int, result: RegistrationResult - ) -> None: - loss = float(result["loss"]) - message = f" {num_steps:>4d}:" - message += f" {loss:>12.05f} (loss)" - weights: Dict[str, Union[str, float]] = result.get("weights", {}) - losses: Dict[str, paddle.Tensor] = result["losses"] - for name, value in losses.items(): - value = float(value) - weight = weights.get(name, 1.0) - if not isinstance(weight, str): - value *= weight - elif "+" in weight: - weight = f"({weight})" - message += f", {value:>12.05f} [{weight} * {name}]" - if num_eval > 1: - message += " [evals={num_eval:d}]" - print(message, flush=True) - - return fn - - -def print_step_loss_hook(level: int) -> RegistrationStepHook: - """Get callback function for printing loss after each step.""" - - def fn(_: RegistrationEngine, num_steps: int, num_eval: int, loss: float) -> None: - message = f" {num_steps:>4d}: {loss:>12.05f}" - if num_eval > 1: - message += " [evals={num_eval:d}]" - print(message, flush=True) - - return fn - - -def print_pyramid_info(pyramid: Dict[str, Image]) -> None: - """Print information of image resolution pyramid.""" - levels = sorted(pyramid.keys()) - for level in reversed(levels): - grid = pyramid[level].grid() - size = ", ".join([f"{n:>3d}" for n in tuple(grid.shape)]) - origin = ", ".join([f"{n:.2f}" for n in grid.origin()]) - extent = ", ".join([f"{n:.2f}" for n in grid.extent()]) - domain = ", ".join([f"{n:.2f}" for n in grid.cube_extent()]) - print( - f"- Level {level}:" - + f" size=({size})" - + f", origin=({origin})" - + f", extent=({extent})" - + f", domain=({domain})" - ) - print() +from pathlib import Path +from timeit import default_timer as timer +from typing import Any +from typing import Dict +from typing import Mapping +from typing import Optional +from typing import Sequence +from typing import Set +from typing import Tuple +from typing import Union + +import paddle +from deepali.core import Axes +from deepali.core import Device +from deepali.core import Grid +from deepali.core import PathStr +from deepali.core import functional as U +from deepali.core.config import join_kwargs_in_sequence +from deepali.data import FlowField +from deepali.data import Image +from deepali.losses import RegistrationResult +from deepali.losses import new_loss +from deepali.spatial import DisplacementFieldTransform +from deepali.spatial import HomogeneousTransform +from deepali.spatial import NonRigidTransform +from deepali.spatial import QuaternionRotation +from deepali.spatial import RigidQuaternionTransform +from deepali.spatial import SequentialTransform +from deepali.spatial import SpatialTransform +from deepali.spatial import Translation +from deepali.spatial import new_spatial_transform + +from .engine import RegistrationEngine +from .hooks import RegistrationEvalHook +from .hooks import RegistrationStepHook +from .hooks import normalize_grad_hook +from .hooks import smooth_grad_hook +from .losses import PairwiseImageRegistrationLoss +from .losses import weight_channel_names +from .optim import new_optimizer + + +def register_pairwise( + target: Union[PathStr, Dict[str, PathStr]], + source: Union[PathStr, Dict[str, PathStr]], + config: Optional[Dict[str, Any]] = None, + outdir: Optional[PathStr] = None, + verbose: Union[bool, int] = False, + debug: Union[bool, int] = False, + device: Optional[Device] = None, +) -> SpatialTransform: + """Register pair of images.""" + if config is None: + config = {} + if outdir is not None: + outdir = Path(outdir).absolute() + outdir.mkdir(parents=True, exist_ok=True) + loss_config, loss_weights = get_loss_config(config) + model_name, model_args, model_init = get_model_config(config) + optim_name, optim_args, optim_loop = get_optim_config(config) + levels, coarsest_level, finest_level = get_levels_config(config) + finest_spacing, min_size, pyramid_dims = get_pyramid_config(config) + device = get_device_config(config, device) + verbose = int(verbose) + debug = int(debug) + if verbose > 0: + print() + start = timer() + target_keys = set(loss_config.keys()) | set( + weight_channel_names(loss_weights).values() + ) + target_image, target_chns = read_images(target, names=target_keys, device=device) + source_image, source_chns = read_images( + source, names=loss_config.keys(), device=device + ) + if verbose > 3: + print(f"Read images from files in {timer() - start:.3f}s") + start_reg = timer() + target_image = append_mask(target_image, target_chns, config) + source_image = append_mask(source_image, source_chns, config) + norm_params = get_normalize_config(config, target_image, target_chns) + target_image = normalize_data_(target_image, target_chns, **norm_params) + source_image = normalize_data_(source_image, source_chns, **norm_params) + start = timer() + target_pyramid = target_image.pyramid( + levels, + start=finest_level, + end=coarsest_level, + dims=pyramid_dims, + spacing=finest_spacing, + min_size=min_size, + ) + source_pyramid = source_image.pyramid( + levels, + start=finest_level, + end=coarsest_level, + dims=pyramid_dims, + spacing=finest_spacing, + min_size=min_size, + ) + if verbose > 3: + print(f"Constructed Gaussian resolution pyramids in {timer() - start:.3f}s\n") + if verbose > 2: + print("Target image pyramid:") + print_pyramid_info(target_pyramid) + print("Source image pyramid:") + print_pyramid_info(source_pyramid) + del target_image + del source_image + source_grid = source_pyramid[finest_level].grid() + finest_grid = target_pyramid[finest_level].grid() + coarsest_grid = target_pyramid[coarsest_level].grid() + post_transform = get_post_transform(config, finest_grid, source_grid) + transform_downsample = model_args.pop("downsample", 0) + transform_grid = coarsest_grid.downsample(transform_downsample) + # here is ok + transform = new_spatial_transform( + model_name, grid=transform_grid, groups=1, **model_args + ) + if model_init: + if verbose > 1: + print(f"Fitting '{model_init}'...") + disp_field = FlowField.read(model_init).to(device=device) + assert isinstance(disp_field, FlowField) + start = timer() + transform = transform.to(device=device).fit(disp_field.batch()) + if verbose > 0: + print(f"Fitted initial displacement field in {timer() - start:.3f}s") + del disp_field + grid_transform = SequentialTransform(transform, post_transform) + grid_transform = grid_transform.to(device=device) + for level in range(coarsest_level, finest_level - 1, -1): + target_image = target_pyramid[level] + source_image = source_pyramid[level] + # here is ok + if outdir and debug > 0: + write_channels( + data=target_image.tensor(), + grid=target_image.grid(), + channels=target_chns, + outdir=outdir, + prefix=f"level_{level}_target_", + ) + write_channels( + data=source_image.tensor(), + grid=source_image.grid(), + channels=source_chns, + outdir=outdir, + prefix=f"level_{level}_source_", + ) + if level != coarsest_level: + start = timer() + transform_grid = target_image.grid().downsample(transform_downsample) + transform.grid_(transform_grid) + if verbose > 3: + print(f"Subdivided control point grid in {timer() - start:.3f}s") + grid_transform.grid_(target_image.grid()) + loss_terms = new_loss_terms(loss_config) + loss = PairwiseImageRegistrationLoss( + losses=loss_terms, + source_data=source_image.tensor().unsqueeze(axis=0), + target_data=target_image.tensor().unsqueeze(axis=0), + source_grid=source_image.grid(), + target_grid=target_image.grid(), + source_chns=source_chns, + target_chns=target_chns, + transform=grid_transform, + weights=loss_weights, + ) + loss = loss.to(device=device) + if outdir and debug > 1: + start = timer() + result = loss.eval() + if verbose > 3: + print(f"Evaluated initial loss in {timer() - start:.3f}s") + write_result( + result, + grid=target_image.grid(), + channels=source_chns, + outdir=outdir, + prefix=f"level_{level}_initial_", + ) + flow = grid_transform.flow(target_image.grid(), device=device) + flow[0].write(outdir / f"level_{level}_initial_def.mha") + optimizer = new_optimizer(optim_name, model=grid_transform, **optim_args) + engine = RegistrationEngine( + model=grid_transform, + loss=loss, + optimizer=optimizer, + max_steps=optim_loop.get("max_steps", 250), + min_delta=float(optim_loop.get("min_delta", "nan")), + ) + grad_sigma = float(optim_loop.get("smooth_grad", 0)) + if isinstance(transform, NonRigidTransform) and grad_sigma > 0: + engine.register_eval_hook(smooth_grad_hook(transform, sigma=grad_sigma)) + engine.register_eval_hook(normalize_grad_hook(transform)) + if verbose > 2: + engine.register_eval_hook(print_eval_loss_hook(level)) + elif verbose > 1: + engine.register_step_hook(print_step_loss_hook(level)) + if outdir and debug > 2: + engine.register_eval_hook( + write_result_hook( + level=level, + grid=target_image.grid(), + channels=source_chns, + outdir=outdir, + ) + ) + engine.run() + if verbose > 0 or outdir and debug > 0: + start = timer() + result = loss.eval() + if verbose > 3: + print(f"Evaluated final loss in {timer() - start:.3f}s") + if verbose > 0: + loss_value = float(result["loss"]) + print( + f"level={level:d}: loss={loss_value:.5f} ({engine.num_steps:d} steps)", + flush=True, + ) + if outdir and debug > 0: + write_result( + result, + grid=target_image.grid(), + channels=source_chns, + outdir=outdir, + prefix=f"level_{level}_final_", + ) + flow = grid_transform.flow(device=device) + flow[0].write(outdir / f"level_{level}_final_def.mha") + if verbose > 3: + print(f"Registered images in {timer() - start_reg:.3f}s") + if verbose > 0: + print() + return grid_transform + + +def append_mask( + image: Image, channels: Dict[str, Tuple[int, int]], config: Dict[str, Any] +) -> Image: + """Append foreground mask to data tensor.""" + data = image.tensor() + if "img" in channels: + lower_threshold, upper_threshold = get_clamp_config(config, "img") + mask = U.threshold( + data[slice(*channels["img"])], lower_threshold, upper_threshold + ) + else: + mask = paddle.ones(shape=(1,) + tuple(data.shape)[1:], dtype=data.dtype) + data = paddle.concat(x=[data, mask.astype(data.dtype)], axis=0) + channels["msk"] = tuple(data.shape)[0] - 1, tuple(data.shape)[0] + return Image(data, image.grid()) + + +def append_data( + data: Optional[paddle.Tensor], + channels: Dict[str, Tuple[int, int]], + name: str, + other: paddle.Tensor, +) -> paddle.Tensor: + """Append image data.""" + if data is None: + data = other + else: + data = paddle.concat(x=[data, other], axis=0) + channels[name] = tuple(data.shape)[0] - tuple(other.shape)[0], tuple(data.shape)[0] + return data + + +def read_images( + sample: Union[PathStr, Dict[str, PathStr]], names: Set[str], device: str +) -> Tuple[Image, Dict[str, Tuple[int, int]]]: + """Read image data from input files.""" + data = None + grid = None + if isinstance(sample, (Path, str)): + sample = {"img": sample} + img_path = sample.get("img") + seg_path = sample.get("seg") + sdf_path = sample.get("sdf") + for path in (img_path, seg_path, sdf_path): + if not path: + continue + grid = Grid.from_file(path).align_corners_(True) + break + else: + raise ValueError( + "One of 'img', 'seg', or 'sdf' input image file paths is required" + ) + assert grid is not None + dtype = "float32" + channels = {} + if "img" in names: + temp = Image.read(img_path, dtype=dtype, device=device) + data = append_data(data, channels, "img", temp.tensor()) + if "seg" in names: + if seg_path is None: + raise ValueError("Missing segmentation label image file path") + temp = Image.read(seg_path, dtype="int64", device=device) + temp_grid = temp.grid() + num_classes = int(temp.max()) + 1 + temp = temp.tensor().unsqueeze(axis=0) + temp = U.as_one_hot_tensor(temp, num_classes).to(dtype=dtype) + temp = temp.squeeze(axis=0) + temp = Image(temp, grid=temp_grid).sample(grid) + data = append_data(data, channels, "seg", temp.tensor()) + if "sdf" in names: + if sdf_path is None: + raise ValueError( + "Missing segmentation boundary signed distance field file path" + ) + temp = Image.read(sdf_path, dtype=dtype, device=device) + temp = temp.sample(shape=grid) + data = append_data(data, channels, "sdf", temp.tensor()) + if data is None: + if img_path is None: + raise ValueError("Missing intensity image file path") + data = Image.read(img_path, dtype=dtype, device=device) + channels = {"img": (0, 1)} + image = Image(data, grid=grid) + return image, channels + + +def get_device_config( + config: Dict[str, Any], device: Optional[Union[str, str]] = None +) -> str: + """Get configured PyTorch device.""" + if device is None: + device = config.get("device", "cpu") + if isinstance(device, int): + device = f"cuda:{device}" + elif device == "cuda": + device = "cuda:0" + return str(device).replace("cuda", "gpu") + + +def load_transform(path: PathStr, grid: Grid) -> SpatialTransform: + """Load transformation from file. + + Args: + path: File path from which to load spatial transformation. + grid: Target domain grid with respect to which transformation is defined. + + Returns: + Loaded spatial transformation. + + """ + target_grid = grid + + def convert_matrix( + matrix: paddle.Tensor, grid: Optional[Grid] = None + ) -> paddle.Tensor: + if grid is None: + pre = target_grid.transform(Axes.CUBE_CORNERS, Axes.WORLD) + post = target_grid.transform(Axes.WORLD, Axes.CUBE_CORNERS) + matrix = U.homogeneous_matmul(post, matrix, pre) + elif grid != target_grid: + pre = target_grid.transform(Axes.CUBE_CORNERS, grid=grid) + post = grid.transform(Axes.CUBE_CORNERS, grid=target_grid) + matrix = U.homogeneous_matmul(post, matrix, pre) + return matrix + + path = Path(path) + if path.suffix == ".pt": + value = paddle.load(path=path) + if isinstance(value, dict): + matrix = value.get("matrix") + if matrix is None: + raise KeyError( + "load_transform() .pt file dict must contain key 'matrix'" + ) + grid = value.get("grid") + elif isinstance(value, paddle.Tensor): + matrix = value + grid = None + else: + raise RuntimeError("load_transform() .pt file must contain tensor or dict") + if matrix.ndim == 2: + matrix = matrix.unsqueeze(axis=0) + if matrix.ndim != 3 or tuple(matrix.shape)[1:] != (3, 4): + raise RuntimeError( + "load_transform() .pt file tensor must have shape (N, 3, 4)" + ) + params = convert_matrix(matrix, grid) + return HomogeneousTransform(target_grid, params=params) + flow = FlowField.read(path, axes=Axes.WORLD) + flow = flow.axes(Axes.from_grid(target_grid)) + flow = flow.sample(shape=target_grid) + return DisplacementFieldTransform( + target_grid, params=flow.tensor().unsqueeze(axis=0) + ) + + +def get_post_transform( + config: Dict[str, Any], target_grid: Grid, source_grid: Grid +) -> Optional[SpatialTransform]: + """Get constant rigid transformation between image grid domains.""" + align = config.get("align", False) + if align is False or align is None: + return None + if isinstance(align, (Path, str)): + return load_transform(align, target_grid) + if align is True: + align_centers = True + align_directions = True + elif isinstance(align, dict): + align_centers = bool(align.get("centers", True)) + align_directions = bool(align.get("directions", True)) + else: + raise ValueError( + "get_post_transform() 'config' has invalid 'align' value: {align}" + ) + center_offset = ( + target_grid.world_to_cube(source_grid.center()).unsqueeze(axis=0) + if align_centers + else None + ) + rotation_matrix = ( + source_grid.direction() @ target_grid.direction().t().unsqueeze(axis=0) + if align_directions + else None + ) + transform = None + if center_offset is not None and rotation_matrix is not None: + transform = RigidQuaternionTransform( + target_grid, translation=center_offset, rotation=False + ) + transform.rotation.matrix_(rotation_matrix) + elif center_offset is not None: + transform = Translation(target_grid, params=center_offset) + elif rotation_matrix is not None: + transform = QuaternionRotation(target_grid, params=False) + transform.matrix_(rotation_matrix) + return transform + + +def get_clamp_config( + config: Dict[str, Any], channel: str +) -> Tuple[Optional[float], Optional[float]]: + """Get thresholds for named image channel. + + Args: + config: Configuration. + channel: Name of image channel. + + Returns: + lower_threshold: Lower threshold. + upper_threshold: Upper threshold. + + """ + input_config = config.get("input", {}) + if not isinstance(input_config, dict): + raise ValueError("get_clamp_config() 'input' value must be dict") + channel_config = input_config.get(channel) + if not isinstance(channel_config, dict): + channel_config = {"clamp": channel_config} + thresholds = channel_config.get("clamp", input_config.get("clamp")) + if thresholds is None: + thresholds = None, None + elif isinstance(thresholds, (int, float)): + thresholds = float(thresholds), None + if not isinstance(thresholds, (list, tuple)): + raise ValueError("get_clamp_config() value must be scalar or sequence") + if len(thresholds) != 2: + raise ValueError("get_clamp_config() value must be scalar or [min, max]") + thresholds = tuple(None if v is None else float(v) for v in thresholds) + lower_threshold, upper_threshold = thresholds + return lower_threshold, upper_threshold + + +def get_scale_config(config: Dict[str, Any], channel: str) -> Optional[float]: + """Get channel scaling factor.""" + input_config = config.get("input", {}) + if not isinstance(input_config, dict): + return None + channel_config = input_config.get(channel) + if not isinstance(channel_config, dict): + return None + value = channel_config.get("scale", input_config.get("scale")) + if value is None: + return None + return float(value) + + +def get_normalize_config( + config: Dict[str, Any], image: Image, channels: Dict[str, Tuple[int, int]] +) -> Dict[str, Dict[str, paddle.Tensor]]: + """Calculate data normalization parameters. + + Args: + config: Configuration. + image: Image data. + channels: Map of image channel slices. + + Returns: + Dictionary of normalization parameters. + + """ + scale = {} + shift = {} + for channel, (start, stop) in channels.items(): + start_1 = image.tensor().shape[0] + start if start < 0 else start + data = paddle.slice(image.tensor(), [0], [start_1], [start_1 + (stop - start)]) + lower_threshold, upper_threshold = get_clamp_config(config, channel) + scale_factor = get_scale_config(config, channel) + if channel in ("msk", "seg"): + if lower_threshold is None: + lower_threshold = 0 + if upper_threshold is None: + upper_threshold = 1 + else: + if lower_threshold is None: + lower_threshold = data.min() + if upper_threshold is None: + upper_threshold = data.max() + if scale_factor is None: + if upper_threshold > lower_threshold: + scale_factor = upper_threshold - lower_threshold + else: + scale_factor = 1 + else: + scale_factor = 1 / scale_factor + shift[channel] = lower_threshold + scale[channel] = scale_factor + return dict(shift=shift, scale=scale) + + +def normalize_data_( + image: Image, + channels: Dict[str, Tuple[int, int]], + shift: Optional[Dict[str, paddle.Tensor]] = None, + scale: Optional[Dict[str, paddle.Tensor]] = None, +) -> Image: + """Normalize image data.""" + if shift is None: + shift = {} + if scale is None: + scale = {} + for channel, (start, stop) in channels.items(): + start_2 = image.tensor().shape[0] + start if start < 0 else start + data = paddle.slice(image.tensor(), [0], [start_2], [start_2 + (stop - start)]) + offset = shift.get(channel) + if offset is not None: + data -= offset + norm = scale.get(channel) + if norm is not None: + data /= norm + if channel in ("msk", "seg"): + data.clip_(min=0, max=1) + return image + + +def get_levels_config(config: Dict[str, Any]) -> Tuple[int, int, int]: + """Get indices of coarsest and finest level from configuration.""" + cfg = config.get("pyramid", {}) + levels = cfg.get("levels", 4) + if isinstance(levels, int): + levels = levels - 1, 0 + if not isinstance(levels, (list, tuple)): + raise TypeError( + "register_pairwise() 'config' key 'pyramid.levels': value must be int, tuple, or list" + ) + coarsest_level, finest_level = levels + if finest_level > coarsest_level: + raise ValueError( + "register_pairwise() 'config' key 'pyramid.levels':" + + " finest level must be less or equal than coarsest level" + ) + levels = coarsest_level + 1 + if "max_level" in cfg: + levels = max(levels, cfg["max_level"]) + return levels, coarsest_level, finest_level + + +def get_pyramid_config( + config: Dict[str, Any] +) -> Tuple[Optional[Union[float, Sequence[float]]], int, Optional[Union[str, int]]]: + """Get settings of Gaussian resolution pyramid from configuration.""" + cfg = config.get("pyramid", {}) + min_size = cfg.get("min_size", 16) + finest_spacing = cfg.get("spacing") + dims = cfg.get("dims") + return finest_spacing, min_size, dims + + +def get_loss_config( + config: Dict[str, Any] +) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, float]]: + """Instantiate terms of registration loss given configuration object. + + Args: + config: Configuration. + + Returns: + losses: Preparsed configuration of loss terms (cf. ``new_loss_terms()``). + weights: Weights of loss terms. + + """ + cfg = None + losses = {} + weights = {} + sections = "loss", "losses", "energy" + for name in sections: + if name in config: + if cfg is not None: + raise ValueError( + "get_loss_config() keys {sections} are mutually exclusive" + ) + cfg = config[name] + if not cfg: + cfg = "SSD" + if isinstance(cfg, str): + cfg = (cfg,) + if isinstance(cfg, Sequence): + names, cfg = cfg, {} + for i, name in enumerate(names): + cfg[f"loss_{i}"] = str(name) + if isinstance(cfg, dict): + for key, value in cfg.items(): + name = None + weight = 1 + kwargs = {} + if isinstance(value, str): + name = value + elif isinstance(value, Sequence): + if not value: + raise ValueError(f"get_loss_config() '{key}' loss entry is empty") + if len(value) == 1: + if isinstance(value[0], str): + value = {"name": value[0]} + elif len(value) > 1: + if isinstance(value[0], (int, float)): + value[0] = {"weight": value[0]} + if isinstance(value[1], str): + value[1] = {"name": value[1]} + value = join_kwargs_in_sequence(value) + if isinstance(value, dict): + kwargs = dict(value) + name = kwargs.pop("name", None) + weight = kwargs.pop("weight", 1) + elif len(value) == 2: + name = value[0] + kwargs = dict(value[1]) + elif len(value) == 3: + weight = float(value[0]) + name = value[1] + kwargs = dict(value[2]) + else: + raise ValueError( + f"get_loss_config() '{key}' invalid loss configuration" + ) + elif isinstance(value, dict): + kwargs = dict(value) + name = kwargs.pop("name", None) + weight = kwargs.pop("weight", 1) + else: + weight, name = value + if name is None: + raise ValueError(f"get_loss_config() missing 'name' for loss '{key}'") + if not isinstance(name, str): + raise TypeError(f"get_loss_config() 'name' of loss '{key}' must be str") + kwargs["name"] = name + losses[key] = kwargs + weights[key] = weight + else: + raise TypeError( + "get_loss_config() 'config' \"losses\" must be str, tuple, list, or dict" + ) + weights_config = config.get("weights", {}) + if isinstance(weights_config, (int, float)): + weights_config = (weights_config,) + if isinstance(weights_config, (list, tuple)): + names, weights_config = weights_config, {} + for i, weight in enumerate(names): + weights_config[f"loss_{i}"] = weight + if not isinstance(weights_config, dict): + raise TypeError( + "get_loss_config() 'weights' must be scalar, tuple, list, or dict" + ) + weights.update(weights_config) + losses = {k: v for k, v in losses.items() if weights.get(k, 0)} + weights = {k: v for k, v in weights.items() if k in losses} + return losses, weights + + +def new_loss_terms(config: Dict[str, Any]) -> Dict[str, paddle.nn.Layer]: + """Instantiate terms of registration loss. + + Args: + config: Preparsed configuration of loss terms. + target_tree: Target vessel centerline tree. + + Returns: + Mapping from channel or loss name to loss module instance. + + """ + losses = {} + for key, value in config.items(): + kwargs = dict(value) + name = kwargs.pop("name", None) + _ = kwargs.pop("weight", None) + if name is None: + raise ValueError(f"new_loss_terms() missing 'name' for loss '{key}'") + if not isinstance(name, str): + raise TypeError(f"new_loss_terms() 'name' of loss '{key}' must be str") + loss = new_loss(name, **kwargs) + losses[key] = loss + return losses + + +def get_model_config( + config: Dict[str, Any] +) -> Tuple[str, Dict[str, Any], Optional[str]]: + """Get configuration of transformation model to use.""" + cfg = config.get("model", {}) + cfg = dict(name=cfg) if isinstance(cfg, str) else dict(cfg) + model_name = cfg.pop("name") + assert isinstance(model_name, str) + assert model_name != "" + model_init = cfg.pop("init", None) + if model_init is not None: + model_init = Path(model_init).as_posix() + model_args = dict(cfg.get(model_name, cfg)) + return model_name, model_args, model_init + + +def get_optim_config( + config: Dict[str, Any] +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + """Get configuration of optimizer to use.""" + cfg = config.get("optim", {}) + cfg = dict(name=cfg) if isinstance(cfg, str) else dict(cfg) + if "optimizer" in cfg: + if "name" in cfg: + raise ValueError( + "get_optim_config() keys ('name', 'optimizer') are mutually exclusive" + ) + cfg["name"] = cfg.pop("optimizer") + optim_name = str(cfg.pop("name", "LBFGS")) + optim_loop = {} + for key in ("max_steps", "min_delta", "smooth_grad"): + if key in cfg: + optim_loop[key] = cfg.pop(key) + optim_args = {k: v for k, v in cfg.items() if isinstance(k, str) and k[0].islower()} + optim_args.update(cfg.get(optim_name, {})) + lr_keys = "step_size", "learning_rate" + for lr_key in ("step_size", "learning_rate"): + if lr_key in optim_args: + if "lr" in optim_args: + raise ValueError( + f"get_optim_config() keys {lr_keys + ('lr',)} are mutually exclusive" + ) + optim_args["lr"] = optim_args.pop(lr_key) + return optim_name, optim_args, optim_loop + + +@paddle.no_grad() +def write_channels( + data: paddle.Tensor, + grid: Grid, + channels: Mapping[str, Tuple[int, int]], + outdir: PathStr, + prefix: str = "", +) -> None: + """Write image channels.""" + for name, (start, stop) in channels.items(): + image = data[slice(start, stop, 1)] + if name == "seg": + image = image.argmax(axis=0, keepdim=True).astype("uint8") + elif name == "msk": + image = image.mul(255).clip_(min=0, max=255).astype("uint8") + if not isinstance(image, Image): + image = Image(image, grid=grid) + image.write(outdir / f"{prefix}{name}.mha") + + +@paddle.no_grad() +def write_result( + result: RegistrationResult, + grid: Grid, + channels: Mapping[str, Tuple[int, int]], + outdir: PathStr, + prefix: str = "", +) -> None: + """Write registration result to output directory.""" + data = result["source"] + assert isinstance(data, paddle.Tensor) + write_channels(data[0], grid=grid, channels=channels, outdir=outdir, prefix=prefix) + data = result["mask"] + assert isinstance(data, paddle.Tensor) + if data.dtype == "bool": + data = data.astype("uint8").multiply_(y=paddle.to_tensor(255)) + mask = Image(data[0], grid=grid) + mask.write(outdir / f"{prefix}olm.mha") + + +def write_result_hook( + level: int, grid: Grid, channels: Mapping[str, Tuple[int, int]], outdir: Path +) -> RegistrationEvalHook: + """Get callback function for writing results after each evaluation.""" + + def fn( + _: RegistrationEngine, + num_steps: int, + num_evals: int, + result: RegistrationResult, + ) -> None: + prefix = f"level_{level}_step_{num_steps:03d}_eval_{num_evals}_" + write_result(result, grid=grid, channels=channels, outdir=outdir, prefix=prefix) + + return fn + + +def print_eval_loss_hook(level: int) -> RegistrationEvalHook: + """Get callback function for printing loss after each evaluation.""" + + def fn( + _: RegistrationEngine, num_steps: int, num_eval: int, result: RegistrationResult + ) -> None: + loss = float(result["loss"]) + message = f" {num_steps:>4d}:" + message += f" {loss:>12.05f} (loss)" + weights: Dict[str, Union[str, float]] = result.get("weights", {}) + losses: Dict[str, paddle.Tensor] = result["losses"] + for name, value in losses.items(): + value = float(value) + weight = weights.get(name, 1.0) + if not isinstance(weight, str): + value *= weight + elif "+" in weight: + weight = f"({weight})" + message += f", {value:>12.05f} [{weight} * {name}]" + if num_eval > 1: + message += " [evals={num_eval:d}]" + print(message, flush=True) + + return fn + + +def print_step_loss_hook(level: int) -> RegistrationStepHook: + """Get callback function for printing loss after each step.""" + + def fn(_: RegistrationEngine, num_steps: int, num_eval: int, loss: float) -> None: + message = f" {num_steps:>4d}: {loss:>12.05f}" + if num_eval > 1: + message += " [evals={num_eval:d}]" + print(message, flush=True) + + return fn + + +def print_pyramid_info(pyramid: Dict[str, Image]) -> None: + """Print information of image resolution pyramid.""" + levels = sorted(pyramid.keys()) + for level in reversed(levels): + grid = pyramid[level].grid() + size = ", ".join([f"{n:>3d}" for n in tuple(grid.shape)]) + origin = ", ".join([f"{n:.2f}" for n in grid.origin()]) + extent = ", ".join([f"{n:.2f}" for n in grid.extent()]) + domain = ", ".join([f"{n:.2f}" for n in grid.cube_extent()]) + print( + f"- Level {level}:" + + f" size=({size})" + + f", origin=({origin})" + + f", extent=({extent})" + + f", domain=({domain})" + ) + print() diff --git a/jointContribution/HighResolution/ffd/params_atlas_affine.yaml b/jointContribution/HighResolution/ffd/params_atlas_affine.yaml index 5c43427ef6..4ea66aabe1 100644 --- a/jointContribution/HighResolution/ffd/params_atlas_affine.yaml +++ b/jointContribution/HighResolution/ffd/params_atlas_affine.yaml @@ -1,17 +1,17 @@ -# Using free-form deformation model -model: - name: FullAffine -# Loss terms of objective function to minimize -energy: - seg: [1, MSE] -# Optimization scheme and parameters -optim: - name: Adam - step_size: 0.01 - min_delta: -0.01 - max_steps: 100 -# Gaussian resolution pyramid -pyramid: - dims: ["x", "y", "z"] - levels: 3 - spacing: [1., 1., 1.] +# Using free-form deformation model +model: + name: FullAffine +# Loss terms of objective function to minimize +energy: + seg: [1, MSE] +# Optimization scheme and parameters +optim: + name: Adam + step_size: 0.01 + min_delta: -0.01 + max_steps: 100 +# Gaussian resolution pyramid +pyramid: + dims: ["x", "y", "z"] + levels: 3 + spacing: [1., 1., 1.] diff --git a/jointContribution/HighResolution/ffd/params_seg.yaml b/jointContribution/HighResolution/ffd/params_seg.yaml index a1aa7701bb..f87966bd98 100644 --- a/jointContribution/HighResolution/ffd/params_seg.yaml +++ b/jointContribution/HighResolution/ffd/params_seg.yaml @@ -1,22 +1,22 @@ -# Using free-form deformation model -model: - name: FFD - stride: &stride [8, 8, 8] -# Loss terms of objective function to minimize -energy: - seg: [1, MSE] -# seg: [1, MSE] - be: [0.01, BSplineBending, stride: *stride] - # To approximate bending energy on coarser grid, use smaller stride, e.g.: - # be: [0.005, BSplineBending, stride: 1] -# Optimization scheme and parameters -optim: - name: Adam - step_size: 0.001 - min_delta: -0.01 - max_steps: 100 -# Gaussian resolution pyramid -pyramid: - dims: ["x", "y", "z"] - levels: 3 - spacing: [1., 1., 1.] +# Using free-form deformation model +model: + name: FFD + stride: &stride [8, 8, 8] +# Loss terms of objective function to minimize +energy: + seg: [1, MSE] +# seg: [1, MSE] + be: [0.01, BSplineBending, stride: *stride] + # To approximate bending energy on coarser grid, use smaller stride, e.g.: + # be: [0.005, BSplineBending, stride: 1] +# Optimization scheme and parameters +optim: + name: Adam + step_size: 0.001 + min_delta: -0.01 + max_steps: 100 +# Gaussian resolution pyramid +pyramid: + dims: ["x", "y", "z"] + levels: 3 + spacing: [1., 1., 1.] diff --git a/jointContribution/HighResolution/ffd/register.py b/jointContribution/HighResolution/ffd/register.py index 4763c0a223..be854abd60 100644 --- a/jointContribution/HighResolution/ffd/register.py +++ b/jointContribution/HighResolution/ffd/register.py @@ -1,172 +1,172 @@ -import json -import logging -import sys -from pathlib import Path -from timeit import default_timer as timer -from typing import Any -from typing import Dict - -import deepali -import paddle -import yaml -from deepali.core import Grid -from deepali.core import PathStr - -# from deepali.utils.cli import filter_warning_of_experimental_named_tensors_feature -from deepali.core.argparse import Args -from deepali.core.argparse import ArgumentParser -from deepali.core.argparse import main_func -from deepali.core.environ import cuda_visible_devices -from deepali.core.logging import configure_logging -from deepali.core.pathlib import unlink_or_mkdir -from deepali.data import Image -from deepali.modules import TransformImage -from paddle import Tensor - -from .pairwise import register_pairwise - -log = logging.getLogger() - - -def parser(**kwargs) -> ArgumentParser: - """Construct argument parser.""" - if "description" not in kwargs: - kwargs["description"] = globals()["__doc__"] - parser = ArgumentParser(**kwargs) - parser.add_argument( - "-c", - "--config", - help="Configuration file", - default=Path(__file__).parent / "params_seg.yaml", - ) - parser.add_argument( - "-t", "--target", "--target-img", dest="target_img", help="Fixed target image" - ) - parser.add_argument( - "-s", "--source", "--source-img", dest="source_img", help="Moving source image" - ) - parser.add_argument("--target-seg", help="Fixed target segmentation label image") - parser.add_argument("--source-seg", help="Moving source segmentation label image") - parser.add_argument( - "-o", - "--output", - "--output-transform", - dest="output_transform", - help="Output transformation parameters", - ) - parser.add_argument( - "-w", - "--warped", - "--warped-img", - "--output-img", - dest="warped_img", - help="Deformed source image", - ) - parser.add_argument( - "--warped-seg", - "--output-seg", - dest="warped_seg", - help="Deformed source segmentation label image", - ) - parser.add_argument( - "--device", - help="Device on which to execute registration", - choices=("cpu", "cuda"), - default="cpu", - ) - parser.add_argument("--debug-dir", help="Output directory for intermediate files") - parser.add_argument( - "--debug", "--debug-level", help="Debug level", type=int, default=0 - ) - parser.add_argument( - "-v", "--verbose", help="Verbosity of output messages", type=int, default=0 - ) - parser.add_argument( - "--log-level", - help="Logging level", - choices=["DEBUG", "INFO", "WARNING", "ERROR"], - default="INFO", - ) - return parser - - -def init(args: Args) -> int: - """Initialize registration.""" - configure_logging(log, args) - if args.device == "cuda": - if not paddle.device.cuda.device_count() >= 1: - log.error("Cannot use --device 'cuda' ") - return 1 - gpu_ids = cuda_visible_devices() - if len(gpu_ids) != 1: - log.error("CUDA_VISIBLE_DEVICES must be set to one GPU") - return 1 - # filter_warning_of_experimental_named_tensors_feature() - return 0 - - -def register_func(args: Args) -> deepali.spatial.SpatialTransform: - """Execute registration given parsed arguments.""" - config = load_config_para(args.config) - device = str("cuda:0" if args.device == "cuda" else "cpu").replace("cuda", "gpu") - start = timer() - transform = register_pairwise( - target={"img": args.target_img, "seg": args.target_seg}, - source={"img": args.source_img, "seg": args.source_seg}, - config=config, - outdir=args.debug_dir, - device=args.device, - verbose=args.verbose, - debug=args.debug, - ) - log.info(f"Elapsed time: {timer() - start:.3f}s") - if args.warped_img: - target_grid = Grid.from_file(args.target_img) - source_image = Image.read(args.source_img, device=device) - warp_image = TransformImage( - target=target_grid, - source=source_image.grid(), - sampling="linear", - padding=source_image.min(), - ).to(device) - # here is ok - data: Tensor = warp_image(transform.tensor(), source_image) - warped_image = Image(data, target_grid) - warped_image.write(unlink_or_mkdir(args.warped_img)) - if args.warped_seg: - target_grid = Grid.from_file(args.target_seg) - source_image = Image.read(args.source_seg, device=device) - warp_labels = TransformImage( - target=target_grid, - source=source_image.grid(), - sampling="nearest", - padding=0, - ).to(device) - data: Tensor = warp_labels(transform.tensor(), source_image) - warped_image = Image(data, target_grid) - warped_image.write(unlink_or_mkdir(args.warped_seg)) - if args.output_transform: - path = unlink_or_mkdir(args.output_transform) - if path.suffix == ".pt": - transform.clear_buffers() - paddle.save(obj=transform, path=path) - else: - transform.flow()[0].write(path) - return transform - - -main = main_func(parser, register_func, init=init) - - -def load_config_para(path: PathStr) -> Dict[str, Any]: - """Load registration parameters from configuration file.""" - config_path = Path(path).absolute() - log.info(f"Load configuration from {config_path}") - config_text = config_path.read_text() - if config_path.suffix == ".json": - return json.loads(config_text) - return yaml.safe_load(config_text) - - -if __name__ == "__main__": - sys.exit(main()) +import json +import logging +import sys +from pathlib import Path +from timeit import default_timer as timer +from typing import Any +from typing import Dict + +import deepali +import paddle +import yaml +from deepali.core import Grid +from deepali.core import PathStr + +# from deepali.utils.cli import filter_warning_of_experimental_named_tensors_feature +from deepali.core.argparse import Args +from deepali.core.argparse import ArgumentParser +from deepali.core.argparse import main_func +from deepali.core.environ import cuda_visible_devices +from deepali.core.logging import configure_logging +from deepali.core.pathlib import unlink_or_mkdir +from deepali.data import Image +from deepali.modules import TransformImage +from paddle import Tensor + +from .pairwise import register_pairwise + +log = logging.getLogger() + + +def parser(**kwargs) -> ArgumentParser: + """Construct argument parser.""" + if "description" not in kwargs: + kwargs["description"] = globals()["__doc__"] + parser = ArgumentParser(**kwargs) + parser.add_argument( + "-c", + "--config", + help="Configuration file", + default=Path(__file__).parent / "params_seg.yaml", + ) + parser.add_argument( + "-t", "--target", "--target-img", dest="target_img", help="Fixed target image" + ) + parser.add_argument( + "-s", "--source", "--source-img", dest="source_img", help="Moving source image" + ) + parser.add_argument("--target-seg", help="Fixed target segmentation label image") + parser.add_argument("--source-seg", help="Moving source segmentation label image") + parser.add_argument( + "-o", + "--output", + "--output-transform", + dest="output_transform", + help="Output transformation parameters", + ) + parser.add_argument( + "-w", + "--warped", + "--warped-img", + "--output-img", + dest="warped_img", + help="Deformed source image", + ) + parser.add_argument( + "--warped-seg", + "--output-seg", + dest="warped_seg", + help="Deformed source segmentation label image", + ) + parser.add_argument( + "--device", + help="Device on which to execute registration", + choices=("cpu", "cuda"), + default="cpu", + ) + parser.add_argument("--debug-dir", help="Output directory for intermediate files") + parser.add_argument( + "--debug", "--debug-level", help="Debug level", type=int, default=0 + ) + parser.add_argument( + "-v", "--verbose", help="Verbosity of output messages", type=int, default=0 + ) + parser.add_argument( + "--log-level", + help="Logging level", + choices=["DEBUG", "INFO", "WARNING", "ERROR"], + default="INFO", + ) + return parser + + +def init(args: Args) -> int: + """Initialize registration.""" + configure_logging(log, args) + if args.device == "cuda": + if not paddle.device.cuda.device_count() >= 1: + log.error("Cannot use --device 'cuda' ") + return 1 + gpu_ids = cuda_visible_devices() + if len(gpu_ids) != 1: + log.error("CUDA_VISIBLE_DEVICES must be set to one GPU") + return 1 + # filter_warning_of_experimental_named_tensors_feature() + return 0 + + +def register_func(args: Args) -> deepali.spatial.SpatialTransform: + """Execute registration given parsed arguments.""" + config = load_config_para(args.config) + device = str("cuda:0" if args.device == "cuda" else "cpu").replace("cuda", "gpu") + start = timer() + transform = register_pairwise( + target={"img": args.target_img, "seg": args.target_seg}, + source={"img": args.source_img, "seg": args.source_seg}, + config=config, + outdir=args.debug_dir, + device=args.device, + verbose=args.verbose, + debug=args.debug, + ) + log.info(f"Elapsed time: {timer() - start:.3f}s") + if args.warped_img: + target_grid = Grid.from_file(args.target_img) + source_image = Image.read(args.source_img, device=device) + warp_image = TransformImage( + target=target_grid, + source=source_image.grid(), + sampling="linear", + padding=source_image.min(), + ).to(device) + # here is ok + data: Tensor = warp_image(transform.tensor(), source_image) + warped_image = Image(data, target_grid) + warped_image.write(unlink_or_mkdir(args.warped_img)) + if args.warped_seg: + target_grid = Grid.from_file(args.target_seg) + source_image = Image.read(args.source_seg, device=device) + warp_labels = TransformImage( + target=target_grid, + source=source_image.grid(), + sampling="nearest", + padding=0, + ).to(device) + data: Tensor = warp_labels(transform.tensor(), source_image) + warped_image = Image(data, target_grid) + warped_image.write(unlink_or_mkdir(args.warped_seg)) + if args.output_transform: + path = unlink_or_mkdir(args.output_transform) + if path.suffix == ".pt": + transform.clear_buffers() + paddle.save(obj=transform, path=path) + else: + transform.flow()[0].write(path) + return transform + + +main = main_func(parser, register_func, init=init) + + +def load_config_para(path: PathStr) -> Dict[str, Any]: + """Load registration parameters from configuration file.""" + config_path = Path(path).absolute() + log.info(f"Load configuration from {config_path}") + config_text = config_path.read_text() + if config_path.suffix == ".json": + return json.loads(config_text) + return yaml.safe_load(config_text) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/jointContribution/HighResolution/main_ACDC.py b/jointContribution/HighResolution/main_ACDC.py index 3494583ecc..757b7aba7b 100644 --- a/jointContribution/HighResolution/main_ACDC.py +++ b/jointContribution/HighResolution/main_ACDC.py @@ -1,221 +1,221 @@ -import os -import shutil - -import nibabel as nib -import numpy as np -import util.pre_process as pre_process -import util.utils as util -from scipy.ndimage import zoom -from util.image_utils import combine_labels -from util.image_utils import crop_3Dimage -from util.image_utils import np_mean_dice -from util.image_utils import refineFusionResults - - -def atlas_selection( - atlas_path, - atlas_img_type, - atlas_nums_top, - target_path, - frame, - dice_scores, - parameter_file, -): - # calculate the similarity between the target image and the ED/ES of atlas - # decide to use ED or ES - atlas_top_list = [] - for index, atlas_id in enumerate(dice_scores[:atlas_nums_top]): - source_img = f"{atlas_path}/{atlas_id[0]}/seg_{frame}_{atlas_img_type}.nii.gz" - target_img_atlas = f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" - affine_warped_image_path = f"{target_path}/tmps/affine_warped_source.nii.gz" - # read atlas sa and seg, and save them in the image space - - pre_process.register_with_deepali( - target_img, - source_img, - target_seg_file=target_img, - source_seg_file=source_img, - ffd_params_file=f"{parameter_file}_atlas_affine.yaml", - warped_img_path=affine_warped_image_path, - ) - - pre_process.register_with_deepali( - target_img, - affine_warped_image_path, - target_seg_file=target_img, - source_seg_file=affine_warped_image_path, - ffd_params_file=f"{parameter_file}_seg.yaml", - warped_img_path=target_img_atlas, - ) - - target_img_atlas = f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" - seg_EDES = nib.load( - f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" - ).get_fdata() - seg_EDES = refineFusionResults(seg_EDES, 2) - nib.save(nib.Nifti1Image(seg_EDES, np.eye(4)), target_img_atlas) - atlas_top_list.append(target_img_atlas) - return atlas_top_list - - -def crop_data_into_atlas_size(seg_nib, img_nib, atlas_size): - # 0 - background, 1 - LV, 2 - MYO, 4 - RV, img_data: 4D, WHD*time_t - # the template information 140, 140, 56 - seg_data = seg_nib.get_fdata() - img_data = img_nib.get_fdata().squeeze() - seg_data[seg_data == 1] = 0 - seg_data[seg_data == 3] = 1 - - affine_sa = seg_nib.affine - new_affine = affine_sa.copy() - # align with the atlas data - seg_data = np.flip(np.flip(seg_data, 2), 1) - img_data = np.flip(np.flip(img_data, 2), 1) - - res_xy, res_z = seg_nib.header["pixdim"][1], seg_nib.header["pixdim"][3] - atlas_xy, atlas_z = 1.25, 2.0 - - raw_size = seg_data.shape - # resize to atlas - if seg_data.ndim == 3: - seg_data = zoom( - seg_data, - zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z), - order=0, - ) - img_data = zoom( - img_data, - zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z), - order=1, - ) - else: - seg_data = zoom( - seg_data, - zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z, 1), - order=0, - ) - img_data = zoom( - img_data, - zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z, 1), - order=1, - ) - new_affine[:3, 0] /= seg_data.shape[0] / raw_size[0] - new_affine[:3, 1] /= seg_data.shape[1] / raw_size[1] - new_affine[:3, 2] /= seg_data.shape[2] / raw_size[2] - - # calculate coordinates of heart center and crop - heart_mask = (seg_data > 0).astype(np.uint8) - - c0 = np.median(np.where(heart_mask.sum(axis=-1).sum(axis=-1))[0]).astype(int) - c1 = np.median(np.where(heart_mask.sum(axis=0).sum(axis=-1))[0]).astype(int) - c2 = np.median(np.where(heart_mask.sum(axis=0).sum(axis=0))[0]).astype(int) - - crop_seg, crop_sa, new_affine = crop_3Dimage( - seg_data, img_data, (c0, c1, c2), atlas_size, affine_matrix=new_affine - ) - - return crop_seg, crop_sa, new_affine - - -if __name__ == "__main__": - data_dir = "./data" - atlas_path = "./Hammersmith_myo2" - parameter_file = "./ffd/params" - atlas_img_type = "image_space_crop" - atlas_nums_top = 3 - atlas_list = sorted(os.listdir(atlas_path)) - - device = "gpu" - tag = 0 - os.environ["CUDA_VISIBLE_DEVICES"] = str(0) - - subject_list = sorted(os.listdir(data_dir)) - interval = len(subject_list) - - print(f"----- from dataset {tag * interval} to {(tag + 1) * interval} ------") - - for i in range(tag * interval, (tag + 1) * interval): - subid = subject_list[i] - - print(f"----- processing {i}:{subid} ------") - img_path = f"{data_dir}/{subid}" - target_path = f"{data_dir}/{subid}/image_space_pipemesh" - util.setup_dir(target_path) - - Info_path = os.path.join(img_path, "Info.cfg") - with open(Info_path, "r") as file: - lines = file.readlines() - config = {} - for line in lines: - if ":" in line: - key, value = line.strip().split(":", 1) - config[key.strip()] = value.strip() - ED = int(config.get("ED", 0)) - ED = str(ED).zfill(2) - ES = int(config.get("ES", 0)) - ES = str(ES).zfill(2) - - sa_ED_path = f"{img_path}/{subid}_frame{ED}.nii.gz" - seg_sa_ED_path = f"{img_path}/{subid}_frame{ED}_gt.nii.gz" - sa_ED_nib = nib.load(sa_ED_path) - seg_ED_nib = nib.load(seg_sa_ED_path) - crop_seg_ED, crop_sa_ED, new_affine_ED = crop_data_into_atlas_size( - seg_ED_nib, sa_ED_nib, (140, 140, 56) - ) - - sa_ES_path = f"{img_path}/{subid}_frame{ES}.nii.gz" - seg_sa_ES_path = f"{img_path}/{subid}_frame{ES}_gt.nii.gz" - sa_ES_nib = nib.load(sa_ES_path) - seg_ES_nib = nib.load(seg_sa_ES_path) - crop_seg_ES, crop_sa_ES, new_affine_ES = crop_data_into_atlas_size( - seg_ES_nib, sa_ES_nib, (140, 140, 56) - ) - - # calculate top 3 similar atlases - dice_scores = [] - seg_ED = crop_seg_ED - for atlas_id in atlas_list: - source_img = f"{atlas_path}/{atlas_id}/seg_ED_{atlas_img_type}.nii.gz" - atlas_img = nib.load(source_img).get_fdata() - if atlas_img.shape[2] == 56: - # calculate the similarity between the target image and the atlas - dice_score = np_mean_dice(seg_ED, atlas_img) - dice_scores.append((atlas_id, dice_score)) - dice_scores.sort(key=lambda x: x[1], reverse=True) - - for frame in ["ED", "ES"]: - # save it in the image space - if frame == "ED": - seg_flip_time = crop_seg_ED - sa_flip_time = crop_sa_ED - else: - seg_flip_time = crop_seg_ES - sa_flip_time = crop_sa_ES - - target_img = f"{target_path}/seg_sa_{frame}.nii.gz" - nib.save(nib.Nifti1Image(seg_flip_time, np.eye(4)), target_img) - util.setup_dir(f"{target_path}/tmps") - - atlas_top_list = atlas_selection( - atlas_path, - atlas_img_type, - atlas_nums_top, - target_path, - frame, - dice_scores, - parameter_file, - ) - # vote the top 3 atlases - seg = combine_labels(atlas_top_list) - - nib.save( - nib.Nifti1Image(sa_flip_time, np.eye(4)), - f"{target_path}/sa_{frame}.nii.gz", - ) - - nib.save(nib.Nifti1Image(seg, np.eye(4)), target_img) - - try: - shutil.rmtree(f"{target_path}/tmps") - except FileNotFoundError: - pass +import os +import shutil + +import nibabel as nib +import numpy as np +import util.pre_process as pre_process +import util.utils as util +from scipy.ndimage import zoom +from util.image_utils import combine_labels +from util.image_utils import crop_3Dimage +from util.image_utils import np_mean_dice +from util.image_utils import refineFusionResults + + +def atlas_selection( + atlas_path, + atlas_img_type, + atlas_nums_top, + target_path, + frame, + dice_scores, + parameter_file, +): + # calculate the similarity between the target image and the ED/ES of atlas + # decide to use ED or ES + atlas_top_list = [] + for index, atlas_id in enumerate(dice_scores[:atlas_nums_top]): + source_img = f"{atlas_path}/{atlas_id[0]}/seg_{frame}_{atlas_img_type}.nii.gz" + target_img_atlas = f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" + affine_warped_image_path = f"{target_path}/tmps/affine_warped_source.nii.gz" + # read atlas sa and seg, and save them in the image space + + pre_process.register_with_deepali( + target_img, + source_img, + target_seg_file=target_img, + source_seg_file=source_img, + ffd_params_file=f"{parameter_file}_atlas_affine.yaml", + warped_img_path=affine_warped_image_path, + ) + + pre_process.register_with_deepali( + target_img, + affine_warped_image_path, + target_seg_file=target_img, + source_seg_file=affine_warped_image_path, + ffd_params_file=f"{parameter_file}_seg.yaml", + warped_img_path=target_img_atlas, + ) + + target_img_atlas = f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" + seg_EDES = nib.load( + f"{target_path}/tmps/seg_{frame}_{index}.nii.gz" + ).get_fdata() + seg_EDES = refineFusionResults(seg_EDES, 2) + nib.save(nib.Nifti1Image(seg_EDES, np.eye(4)), target_img_atlas) + atlas_top_list.append(target_img_atlas) + return atlas_top_list + + +def crop_data_into_atlas_size(seg_nib, img_nib, atlas_size): + # 0 - background, 1 - LV, 2 - MYO, 4 - RV, img_data: 4D, WHD*time_t + # the template information 140, 140, 56 + seg_data = seg_nib.get_fdata() + img_data = img_nib.get_fdata().squeeze() + seg_data[seg_data == 1] = 0 + seg_data[seg_data == 3] = 1 + + affine_sa = seg_nib.affine + new_affine = affine_sa.copy() + # align with the atlas data + seg_data = np.flip(np.flip(seg_data, 2), 1) + img_data = np.flip(np.flip(img_data, 2), 1) + + res_xy, res_z = seg_nib.header["pixdim"][1], seg_nib.header["pixdim"][3] + atlas_xy, atlas_z = 1.25, 2.0 + + raw_size = seg_data.shape + # resize to atlas + if seg_data.ndim == 3: + seg_data = zoom( + seg_data, + zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z), + order=0, + ) + img_data = zoom( + img_data, + zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z), + order=1, + ) + else: + seg_data = zoom( + seg_data, + zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z, 1), + order=0, + ) + img_data = zoom( + img_data, + zoom=(res_xy / atlas_xy, res_xy / atlas_xy, res_z / atlas_z, 1), + order=1, + ) + new_affine[:3, 0] /= seg_data.shape[0] / raw_size[0] + new_affine[:3, 1] /= seg_data.shape[1] / raw_size[1] + new_affine[:3, 2] /= seg_data.shape[2] / raw_size[2] + + # calculate coordinates of heart center and crop + heart_mask = (seg_data > 0).astype(np.uint8) + + c0 = np.median(np.where(heart_mask.sum(axis=-1).sum(axis=-1))[0]).astype(int) + c1 = np.median(np.where(heart_mask.sum(axis=0).sum(axis=-1))[0]).astype(int) + c2 = np.median(np.where(heart_mask.sum(axis=0).sum(axis=0))[0]).astype(int) + + crop_seg, crop_sa, new_affine = crop_3Dimage( + seg_data, img_data, (c0, c1, c2), atlas_size, affine_matrix=new_affine + ) + + return crop_seg, crop_sa, new_affine + + +if __name__ == "__main__": + data_dir = "./data" + atlas_path = "./Hammersmith_myo2" + parameter_file = "./ffd/params" + atlas_img_type = "image_space_crop" + atlas_nums_top = 3 + atlas_list = sorted(os.listdir(atlas_path)) + + device = "gpu" + tag = 0 + os.environ["CUDA_VISIBLE_DEVICES"] = str(0) + + subject_list = sorted(os.listdir(data_dir)) + interval = len(subject_list) + + print(f"----- from dataset {tag * interval} to {(tag + 1) * interval} ------") + + for i in range(tag * interval, (tag + 1) * interval): + subid = subject_list[i] + + print(f"----- processing {i}:{subid} ------") + img_path = f"{data_dir}/{subid}" + target_path = f"{data_dir}/{subid}/image_space_pipemesh" + util.setup_dir(target_path) + + Info_path = os.path.join(img_path, "Info.cfg") + with open(Info_path, "r") as file: + lines = file.readlines() + config = {} + for line in lines: + if ":" in line: + key, value = line.strip().split(":", 1) + config[key.strip()] = value.strip() + ED = int(config.get("ED", 0)) + ED = str(ED).zfill(2) + ES = int(config.get("ES", 0)) + ES = str(ES).zfill(2) + + sa_ED_path = f"{img_path}/{subid}_frame{ED}.nii.gz" + seg_sa_ED_path = f"{img_path}/{subid}_frame{ED}_gt.nii.gz" + sa_ED_nib = nib.load(sa_ED_path) + seg_ED_nib = nib.load(seg_sa_ED_path) + crop_seg_ED, crop_sa_ED, new_affine_ED = crop_data_into_atlas_size( + seg_ED_nib, sa_ED_nib, (140, 140, 56) + ) + + sa_ES_path = f"{img_path}/{subid}_frame{ES}.nii.gz" + seg_sa_ES_path = f"{img_path}/{subid}_frame{ES}_gt.nii.gz" + sa_ES_nib = nib.load(sa_ES_path) + seg_ES_nib = nib.load(seg_sa_ES_path) + crop_seg_ES, crop_sa_ES, new_affine_ES = crop_data_into_atlas_size( + seg_ES_nib, sa_ES_nib, (140, 140, 56) + ) + + # calculate top 3 similar atlases + dice_scores = [] + seg_ED = crop_seg_ED + for atlas_id in atlas_list: + source_img = f"{atlas_path}/{atlas_id}/seg_ED_{atlas_img_type}.nii.gz" + atlas_img = nib.load(source_img).get_fdata() + if atlas_img.shape[2] == 56: + # calculate the similarity between the target image and the atlas + dice_score = np_mean_dice(seg_ED, atlas_img) + dice_scores.append((atlas_id, dice_score)) + dice_scores.sort(key=lambda x: x[1], reverse=True) + + for frame in ["ED", "ES"]: + # save it in the image space + if frame == "ED": + seg_flip_time = crop_seg_ED + sa_flip_time = crop_sa_ED + else: + seg_flip_time = crop_seg_ES + sa_flip_time = crop_sa_ES + + target_img = f"{target_path}/seg_sa_{frame}.nii.gz" + nib.save(nib.Nifti1Image(seg_flip_time, np.eye(4)), target_img) + util.setup_dir(f"{target_path}/tmps") + + atlas_top_list = atlas_selection( + atlas_path, + atlas_img_type, + atlas_nums_top, + target_path, + frame, + dice_scores, + parameter_file, + ) + # vote the top 3 atlases + seg = combine_labels(atlas_top_list) + + nib.save( + nib.Nifti1Image(sa_flip_time, np.eye(4)), + f"{target_path}/sa_{frame}.nii.gz", + ) + + nib.save(nib.Nifti1Image(seg, np.eye(4)), target_img) + + try: + shutil.rmtree(f"{target_path}/tmps") + except FileNotFoundError: + pass diff --git a/jointContribution/HighResolution/requirements.txt b/jointContribution/HighResolution/requirements.txt index 72320b3db7..21dbfa2713 100644 --- a/jointContribution/HighResolution/requirements.txt +++ b/jointContribution/HighResolution/requirements.txt @@ -1,11 +1,11 @@ -boto3==1.35.37 -dacite==1.8.1 -deprecation==2.1.0 -nibabel==5.3.0 -pandas==2.2.3 -pyvista==0.44.1 -PyYAML==6.0.2 -scipy==1.14.1 -setuptools==72.1.0 -SimpleITK==2.4.0 -typing_extensions==4.12.2 +boto3==1.35.37 +dacite==1.8.1 +deprecation==2.1.0 +nibabel==5.3.0 +pandas==2.2.3 +pyvista==0.44.1 +PyYAML==6.0.2 +scipy==1.14.1 +setuptools==72.1.0 +SimpleITK==2.4.0 +typing_extensions==4.12.2 diff --git a/jointContribution/HighResolution/util/image_utils.py b/jointContribution/HighResolution/util/image_utils.py index f7bb8e485f..739298ed59 100644 --- a/jointContribution/HighResolution/util/image_utils.py +++ b/jointContribution/HighResolution/util/image_utils.py @@ -1,183 +1,183 @@ -import nibabel as nib -import numpy as np -from scipy.ndimage import gaussian_filter - - -def crop_3Dimage(seg, image_sa, center, size, affine_matrix=None): - """Crop a 3D image using a bounding box centred at (c0, c1, c2) with specified size (size0, size1, size2)""" - c0, c1, c2 = center - size0, size1, size2 = size - S_seg = tuple(seg.shape) - S0, S1, S2 = S_seg[0], S_seg[1], S_seg[2] - r0, r1, r2 = int(size0 / 2), int(size1 / 2), int(size2 / 2) - start0, end0 = c0 - r0, c0 + r0 - start1, end1 = c1 - r1, c1 + r1 - start2, end2 = c2 - r2, c2 + r2 - start0_, end0_ = max(start0, 0), min(end0, S0) - start1_, end1_ = max(start1, 0), min(end1, S1) - start2_, end2_ = max(start2, 0), min(end2, S2) - crop = seg[start0_:end0_, start1_:end1_, start2_:end2_] - crop_img = image_sa[start0_:end0_, start1_:end1_, start2_:end2_] - if crop_img.ndim == 3: - crop_img = np.pad( - crop_img, - ( - (start0_ - start0, end0 - end0_), - (start1_ - start1, end1 - end1_), - (start2_ - start2, end2 - end2_), - ), - "constant", - ) - crop = np.pad( - crop, - ( - (start0_ - start0, end0 - end0_), - (start1_ - start1, end1 - end1_), - (start2_ - start2, end2 - end2_), - ), - "constant", - ) - else: - crop_img = np.pad( - crop_img, - ( - (start0_ - start0, end0 - end0_), - (start1_ - start1, end1 - end1_), - (start2_ - start2, end2 - end2_), - (0, 0), - ), - "constant", - ) - crop = np.pad( - crop, - ( - (start0_ - start0, end0 - end0_), - (start1_ - start1, end1 - end1_), - (start2_ - start2, end2 - end2_), - (0, 0), - ), - "constant", - ) - if affine_matrix is None: - return crop, crop_img - else: - R, b = affine_matrix[0:3, 0:3], affine_matrix[0:3, -1] - affine_matrix[0:3, -1] = R.dot(np.array([c0 - r0, c1 - r1, c2 - r2])) + b - return crop, crop_img, affine_matrix - - -def np_categorical_dice(pred, truth, k): - """Dice overlap metric for label k""" - A = (pred == k).astype(np.float32) - B = (truth == k).astype(np.float32) - return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B)) - - -def np_mean_dice(pred, truth): - """Dice mean metric""" - dsc = [] - for k in np.unique(truth)[1:]: - dsc.append(np_categorical_dice(pred, truth, k)) - return np.mean(dsc) - - -def combine_labels(input_paths, pad=-1, seed=None): - def get_most_popular(count_map): - return max(count_map, key=count_map.get) - - def is_equivocal(count_map): - return len(set(count_map.values())) > 1 - - def decide_on_tie(count_map, rng): - max_count = max(count_map.values()) - tied_labels = [ - label for label, count in count_map.items() if count == max_count - ] - return rng.choice(tied_labels) - - def calculate_counts(input_paths, output_shape): - counts = [{} for _ in range(np.prod(output_shape))] - for input_path in input_paths: - input_image = nib.load(input_path).get_fdata().astype(np.int32) - contended_voxel_indices = np.where( - np.logical_and( - output != input_image, - np.logical_or(output > pad, input_image > pad), - ) - ) - idx = np.ravel_multi_index(contended_voxel_indices, output_shape) - labels = input_image[contended_voxel_indices] - _, counts_per_label = np.unique(idx, return_counts=True) - for idx, label, count in zip(idx, labels, counts_per_label): - counts[idx][label] = counts[idx].get(label, 0) + count - return counts - - output_image = nib.load(input_paths[0]) - output_data = output_image.get_fdata().astype(np.uint8) - output_shape = tuple(output_data.shape) - unanimous_mask = np.ones(output_shape, dtype=np.uint8) - output = output_data.copy() - counts = calculate_counts(input_paths, output_shape) - contended_voxel_indices = np.where(unanimous_mask == 0) - idx = np.ravel_multi_index(contended_voxel_indices, output_shape) - for idx, (z, y, x) in zip(idx, np.transpose(contended_voxel_indices)): - output[z, y, x] = get_most_popular(counts[idx]) - if seed is not None: - rng = np.random.default_rng(seed) - else: - rng = np.random.default_rng() - equivocal_voxel_indices = np.where(unanimous_mask == 0) - idx = np.ravel_multi_index(equivocal_voxel_indices, output_shape) - unique_indices, counts_per_voxel = np.unique(idx, return_counts=True) - for idx, (z, y, x) in zip(unique_indices, np.transpose(equivocal_voxel_indices)): - if is_equivocal(counts[idx]): - output[z, y, x] = decide_on_tie(counts[idx], rng) - return output - - -def threshold_image(data, threshold=130): - # Perform thresholding using NumPy operations - thresholded_data = data.copy() - thresholded_data[data <= threshold] = 0 - thresholded_data[(data > threshold)] = 1 - return thresholded_data - - -def blur_image(data, sigma): - # Apply Gaussian blurring to the data using scipy.ndimage.gaussian_filter - blurred_data = gaussian_filter(data, sigma=sigma) - return blurred_data - - -def binarize_image(data, lower_threshold=4, upper_threshold=4, binary_value=255): - # Perform binarization using NumPy operations - binarized_data = np.zeros_like(data) - binarized_data[(data >= lower_threshold) & (data <= upper_threshold)] = binary_value - return binarized_data - - -def padding(imageA, imageB, threshold, padding, invert=False): - # Create a mask for positions that require padding - if invert: - mask = imageB != threshold - else: - mask = imageB == threshold - - # Update 'imageA' using the mask and padding value - imageA[mask] = padding - return imageA - - -def refineFusionResults(data, alfa): - data = np.round(data) - - hrt = threshold_image(blur_image(binarize_image(data, 1, 4), alfa), 130) - rvendo = threshold_image(blur_image(binarize_image(data, 4, 4), alfa), 130) - lvepi = threshold_image(blur_image(binarize_image(data, 1, 2), alfa), 115) - lvendo = threshold_image(blur_image(binarize_image(data, 1, 1), alfa), 130) - - hrt = padding(hrt, hrt, 1, 4) - rvendo = padding(hrt, rvendo, 1, 4) - lvepi = padding(rvendo, lvepi, 1, 2) - data_final = padding(lvepi, lvendo, 1, 1) - return data_final +import nibabel as nib +import numpy as np +from scipy.ndimage import gaussian_filter + + +def crop_3Dimage(seg, image_sa, center, size, affine_matrix=None): + """Crop a 3D image using a bounding box centred at (c0, c1, c2) with specified size (size0, size1, size2)""" + c0, c1, c2 = center + size0, size1, size2 = size + S_seg = tuple(seg.shape) + S0, S1, S2 = S_seg[0], S_seg[1], S_seg[2] + r0, r1, r2 = int(size0 / 2), int(size1 / 2), int(size2 / 2) + start0, end0 = c0 - r0, c0 + r0 + start1, end1 = c1 - r1, c1 + r1 + start2, end2 = c2 - r2, c2 + r2 + start0_, end0_ = max(start0, 0), min(end0, S0) + start1_, end1_ = max(start1, 0), min(end1, S1) + start2_, end2_ = max(start2, 0), min(end2, S2) + crop = seg[start0_:end0_, start1_:end1_, start2_:end2_] + crop_img = image_sa[start0_:end0_, start1_:end1_, start2_:end2_] + if crop_img.ndim == 3: + crop_img = np.pad( + crop_img, + ( + (start0_ - start0, end0 - end0_), + (start1_ - start1, end1 - end1_), + (start2_ - start2, end2 - end2_), + ), + "constant", + ) + crop = np.pad( + crop, + ( + (start0_ - start0, end0 - end0_), + (start1_ - start1, end1 - end1_), + (start2_ - start2, end2 - end2_), + ), + "constant", + ) + else: + crop_img = np.pad( + crop_img, + ( + (start0_ - start0, end0 - end0_), + (start1_ - start1, end1 - end1_), + (start2_ - start2, end2 - end2_), + (0, 0), + ), + "constant", + ) + crop = np.pad( + crop, + ( + (start0_ - start0, end0 - end0_), + (start1_ - start1, end1 - end1_), + (start2_ - start2, end2 - end2_), + (0, 0), + ), + "constant", + ) + if affine_matrix is None: + return crop, crop_img + else: + R, b = affine_matrix[0:3, 0:3], affine_matrix[0:3, -1] + affine_matrix[0:3, -1] = R.dot(np.array([c0 - r0, c1 - r1, c2 - r2])) + b + return crop, crop_img, affine_matrix + + +def np_categorical_dice(pred, truth, k): + """Dice overlap metric for label k""" + A = (pred == k).astype(np.float32) + B = (truth == k).astype(np.float32) + return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B)) + + +def np_mean_dice(pred, truth): + """Dice mean metric""" + dsc = [] + for k in np.unique(truth)[1:]: + dsc.append(np_categorical_dice(pred, truth, k)) + return np.mean(dsc) + + +def combine_labels(input_paths, pad=-1, seed=None): + def get_most_popular(count_map): + return max(count_map, key=count_map.get) + + def is_equivocal(count_map): + return len(set(count_map.values())) > 1 + + def decide_on_tie(count_map, rng): + max_count = max(count_map.values()) + tied_labels = [ + label for label, count in count_map.items() if count == max_count + ] + return rng.choice(tied_labels) + + def calculate_counts(input_paths, output_shape): + counts = [{} for _ in range(np.prod(output_shape))] + for input_path in input_paths: + input_image = nib.load(input_path).get_fdata().astype(np.int32) + contended_voxel_indices = np.where( + np.logical_and( + output != input_image, + np.logical_or(output > pad, input_image > pad), + ) + ) + idx = np.ravel_multi_index(contended_voxel_indices, output_shape) + labels = input_image[contended_voxel_indices] + _, counts_per_label = np.unique(idx, return_counts=True) + for idx, label, count in zip(idx, labels, counts_per_label): + counts[idx][label] = counts[idx].get(label, 0) + count + return counts + + output_image = nib.load(input_paths[0]) + output_data = output_image.get_fdata().astype(np.uint8) + output_shape = tuple(output_data.shape) + unanimous_mask = np.ones(output_shape, dtype=np.uint8) + output = output_data.copy() + counts = calculate_counts(input_paths, output_shape) + contended_voxel_indices = np.where(unanimous_mask == 0) + idx = np.ravel_multi_index(contended_voxel_indices, output_shape) + for idx, (z, y, x) in zip(idx, np.transpose(contended_voxel_indices)): + output[z, y, x] = get_most_popular(counts[idx]) + if seed is not None: + rng = np.random.default_rng(seed) + else: + rng = np.random.default_rng() + equivocal_voxel_indices = np.where(unanimous_mask == 0) + idx = np.ravel_multi_index(equivocal_voxel_indices, output_shape) + unique_indices, counts_per_voxel = np.unique(idx, return_counts=True) + for idx, (z, y, x) in zip(unique_indices, np.transpose(equivocal_voxel_indices)): + if is_equivocal(counts[idx]): + output[z, y, x] = decide_on_tie(counts[idx], rng) + return output + + +def threshold_image(data, threshold=130): + # Perform thresholding using NumPy operations + thresholded_data = data.copy() + thresholded_data[data <= threshold] = 0 + thresholded_data[(data > threshold)] = 1 + return thresholded_data + + +def blur_image(data, sigma): + # Apply Gaussian blurring to the data using scipy.ndimage.gaussian_filter + blurred_data = gaussian_filter(data, sigma=sigma) + return blurred_data + + +def binarize_image(data, lower_threshold=4, upper_threshold=4, binary_value=255): + # Perform binarization using NumPy operations + binarized_data = np.zeros_like(data) + binarized_data[(data >= lower_threshold) & (data <= upper_threshold)] = binary_value + return binarized_data + + +def padding(imageA, imageB, threshold, padding, invert=False): + # Create a mask for positions that require padding + if invert: + mask = imageB != threshold + else: + mask = imageB == threshold + + # Update 'imageA' using the mask and padding value + imageA[mask] = padding + return imageA + + +def refineFusionResults(data, alfa): + data = np.round(data) + + hrt = threshold_image(blur_image(binarize_image(data, 1, 4), alfa), 130) + rvendo = threshold_image(blur_image(binarize_image(data, 4, 4), alfa), 130) + lvepi = threshold_image(blur_image(binarize_image(data, 1, 2), alfa), 115) + lvendo = threshold_image(blur_image(binarize_image(data, 1, 1), alfa), 130) + + hrt = padding(hrt, hrt, 1, 4) + rvendo = padding(hrt, rvendo, 1, 4) + lvepi = padding(rvendo, lvepi, 1, 2) + data_final = padding(lvepi, lvendo, 1, 1) + return data_final diff --git a/jointContribution/HighResolution/util/paddle_aux.py b/jointContribution/HighResolution/util/paddle_aux.py index d11f7d8de6..28ce6909c4 100644 --- a/jointContribution/HighResolution/util/paddle_aux.py +++ b/jointContribution/HighResolution/util/paddle_aux.py @@ -1,63 +1,63 @@ -import paddle - - -def min_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.minimum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.min(self, *args, **kwargs), paddle.argmin( - self, *args, **kwargs - ) - else: - ret = paddle.min(self, *args, **kwargs) - - return ret - - -def max_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.maximum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.max(self, *args, **kwargs), paddle.argmax( - self, *args, **kwargs - ) - else: - ret = paddle.max(self, *args, **kwargs) - - return ret - - -setattr(paddle.Tensor, "min", min_class_func) -setattr(paddle.Tensor, "max", max_class_func) - - -def mul(self, *args, **kwargs): - if "other" in kwargs: - y = kwargs["other"] - elif "y" in kwargs: - y = kwargs["y"] - else: - y = args[0] - - if not isinstance(y, paddle.Tensor): - y = paddle.to_tensor(y) - - return paddle.multiply(self, y.astype(self.dtype)) - - -setattr(paddle.Tensor, "mul", mul) -setattr(paddle.Tensor, "multiply", mul) +import paddle + + +def min_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.minimum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.min(self, *args, **kwargs), paddle.argmin( + self, *args, **kwargs + ) + else: + ret = paddle.min(self, *args, **kwargs) + + return ret + + +def max_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.maximum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.max(self, *args, **kwargs), paddle.argmax( + self, *args, **kwargs + ) + else: + ret = paddle.max(self, *args, **kwargs) + + return ret + + +setattr(paddle.Tensor, "min", min_class_func) +setattr(paddle.Tensor, "max", max_class_func) + + +def mul(self, *args, **kwargs): + if "other" in kwargs: + y = kwargs["other"] + elif "y" in kwargs: + y = kwargs["y"] + else: + y = args[0] + + if not isinstance(y, paddle.Tensor): + y = paddle.to_tensor(y) + + return paddle.multiply(self, y.astype(self.dtype)) + + +setattr(paddle.Tensor, "mul", mul) +setattr(paddle.Tensor, "multiply", mul) diff --git a/jointContribution/HighResolution/util/pre_process.py b/jointContribution/HighResolution/util/pre_process.py index 731d331942..e191a2cd1f 100644 --- a/jointContribution/HighResolution/util/pre_process.py +++ b/jointContribution/HighResolution/util/pre_process.py @@ -1,64 +1,64 @@ -from dataclasses import dataclass - -import ffd.register as ffd_register -import paddle -import pyvista as pv -from deepali.core import PathStr - - -@dataclass -class DeepaliFFDRuntimeArgs: - """Dataclass packing registration arguments""" - - target_img: PathStr - source_img: PathStr - target_seg: PathStr = None - source_seg: PathStr = None - config: PathStr = None - output_transform: PathStr = None - warped_img: PathStr = None - warped_seg: PathStr = None - device: str = "cuda" - debug_dir: PathStr = None - debug: int = 0 - verbose: int = 0 - log_level: str = "WARNING" - - -def register_with_deepali( - target_img_file: PathStr = None, - source_img_file: PathStr = None, - target_seg_file: PathStr = None, - source_seg_file: PathStr = None, - target_mesh_file: PathStr = None, - ffd_params_file: PathStr = None, - output_transform_path: PathStr = None, - warped_img_path: PathStr = None, - warped_mesh_path: PathStr = None, - warped_seg_path: PathStr = None, -): - """Register two images using FFD with GPU-enabled Deepali and transform the mesh.""" - args = DeepaliFFDRuntimeArgs( - target_img=target_img_file, - source_img=source_img_file, - target_seg=target_seg_file, - source_seg=source_seg_file, - config=ffd_params_file, - output_transform=output_transform_path, - warped_img=warped_img_path, - warped_seg=warped_seg_path, - ) - ffd_register.init(args) - transform = ffd_register.register_func(args) - if target_mesh_file is not None: - warp_transform_on_mesh(transform, target_mesh_file, warped_mesh_path) - return transform - - -def warp_transform_on_mesh(transform, target_mesh_file, warped_mesh_path): - target_mesh = pv.read(target_mesh_file) - target_points = paddle.to_tensor(data=target_mesh.points).unsqueeze(axis=0) - target_points = target_points.to(device=transform.place) - warped_target_points = transform.points(target_points, axes="grid") - target_mesh.points = warped_target_points.squeeze(axis=0).detach().cpu().numpy() - target_mesh.save(warped_mesh_path) +from dataclasses import dataclass + +import ffd.register as ffd_register +import paddle +import pyvista as pv +from deepali.core import PathStr + + +@dataclass +class DeepaliFFDRuntimeArgs: + """Dataclass packing registration arguments""" + + target_img: PathStr + source_img: PathStr + target_seg: PathStr = None + source_seg: PathStr = None + config: PathStr = None + output_transform: PathStr = None + warped_img: PathStr = None + warped_seg: PathStr = None + device: str = "cuda" + debug_dir: PathStr = None + debug: int = 0 + verbose: int = 0 + log_level: str = "WARNING" + + +def register_with_deepali( + target_img_file: PathStr = None, + source_img_file: PathStr = None, + target_seg_file: PathStr = None, + source_seg_file: PathStr = None, + target_mesh_file: PathStr = None, + ffd_params_file: PathStr = None, + output_transform_path: PathStr = None, + warped_img_path: PathStr = None, + warped_mesh_path: PathStr = None, + warped_seg_path: PathStr = None, +): + """Register two images using FFD with GPU-enabled Deepali and transform the mesh.""" + args = DeepaliFFDRuntimeArgs( + target_img=target_img_file, + source_img=source_img_file, + target_seg=target_seg_file, + source_seg=source_seg_file, + config=ffd_params_file, + output_transform=output_transform_path, + warped_img=warped_img_path, + warped_seg=warped_seg_path, + ) + ffd_register.init(args) + transform = ffd_register.register_func(args) + if target_mesh_file is not None: + warp_transform_on_mesh(transform, target_mesh_file, warped_mesh_path) + return transform + + +def warp_transform_on_mesh(transform, target_mesh_file, warped_mesh_path): + target_mesh = pv.read(target_mesh_file) + target_points = paddle.to_tensor(data=target_mesh.points).unsqueeze(axis=0) + target_points = target_points.to(device=transform.place) + warped_target_points = transform.points(target_points, axes="grid") + target_mesh.points = warped_target_points.squeeze(axis=0).detach().cpu().numpy() + target_mesh.save(warped_mesh_path) diff --git a/jointContribution/HighResolution/util/utils.py b/jointContribution/HighResolution/util/utils.py index 776e1b17eb..ac4b927299 100644 --- a/jointContribution/HighResolution/util/utils.py +++ b/jointContribution/HighResolution/util/utils.py @@ -1,7 +1,7 @@ -import os - - -def setup_dir(dir_path): - if not os.path.exists(dir_path): - os.makedirs(dir_path) - return dir_path +import os + + +def setup_dir(dir_path): + if not os.path.exists(dir_path): + os.makedirs(dir_path) + return dir_path diff --git a/jointContribution/IJCAI_2024/aminos/Extract_mesh/merge_h5.py b/jointContribution/IJCAI_2024/aminos/Extract_mesh/merge_h5.py index 90f51750f7..48b798a03f 100644 --- a/jointContribution/IJCAI_2024/aminos/Extract_mesh/merge_h5.py +++ b/jointContribution/IJCAI_2024/aminos/Extract_mesh/merge_h5.py @@ -1,121 +1,121 @@ -import argparse -import os - -import h5py -import numpy as np -import paddle - - -def load_ds_trackA_info(file_path, key_list): - path_trackA_ds = file_path - key_list = np.sort([int(key) for key in key_list]) - key_list = [str(key) for key in key_list] - bounds = np.loadtxt(path_trackA_ds + "/watertight_global_bounds.txt") - pressure_mean_std = paddle.to_tensor( - data=np.loadtxt(path_trackA_ds + "/train_pressure_min_std.txt") - ).to("float32") - voxel_mean_std = paddle.to_tensor( - data=np.loadtxt(path_trackA_ds + "/voxel_mean_std.txt") - ).to("float32") - pos_mean_std = np.loadtxt(path_trackA_ds + "/pos_mean_std.txt") - normal_mean_std = np.loadtxt(path_trackA_ds + "/normal_mean_std.txt") - PN_mean_std = paddle.to_tensor( - data=np.concatenate([pos_mean_std, normal_mean_std], axis=-1) - ).to("float32") - physics_info = { - "key_list": key_list, - "bounds": bounds, - "voxel_mean_std": voxel_mean_std, - "pressure_mean_std": pressure_mean_std, - "PN_mean_std": PN_mean_std, - } - return physics_info - - -def load_ds_trackB_info(file_path, key_list): - path_trackB_ds = file_path - key_list = np.sort([int(key) for key in key_list]) - key_list = [str(key) for key in key_list] - pressure_mean_std = paddle.to_tensor( - data=np.loadtxt(path_trackB_ds + "/train_pressure_mean_std.txt") - ).to("float32") - bounds = np.loadtxt(path_trackB_ds + "/global_bounds.txt") - voxel_mean_std = paddle.to_tensor( - data=np.loadtxt(path_trackB_ds + "/voxel_mean_std.txt") - ).to("float32") - PNA_mean_std = paddle.to_tensor( - data=np.loadtxt(path_trackB_ds + "/PosNormalArea_mean_std.txt") - ).to("float32") - PN_mean_std = PNA_mean_std[:, :6] - physics_info = { - "key_list": key_list, - "bounds": bounds, - "voxel_mean_std": voxel_mean_std, - "pressure_mean_std": pressure_mean_std, - "PN_mean_std": PN_mean_std, - } - return physics_info - - -def load_extra_info(file_path, key_list, track_type="A"): - if track_type == "A": - physics_info = load_ds_trackA_info(file_path, key_list) - else: - physics_info = load_ds_trackB_info(file_path, key_list) - return physics_info - - -def add_physics_info_to_group(group, physics_info): - for key, value in physics_info.items(): - group.create_dataset(key, data=value) - - -def merge_h5_files(fileA_path, fileB_path, merged_file_path): - with h5py.File(fileA_path, "r") as fileA, h5py.File( - fileB_path, "r" - ) as fileB, h5py.File(merged_file_path, "w") as merged_file: - key_list_A = list(fileA.keys()) - key_list_B = list(fileB.keys()) - physics_info_A = load_extra_info( - os.path.dirname(fileA_path), key_list_A, track_type="A" - ) - physics_info_B = load_extra_info( - os.path.dirname(fileB_path), key_list_B, track_type="B" - ) - for key in fileA.keys(): - group = fileA[key] - new_key = "A_" + key - merged_file.copy(group, new_key) - add_physics_info_to_group(merged_file[new_key], physics_info_A) - for key in fileB.keys(): - group = fileB[key] - new_key = "B_" + key - merged_file.copy(group, new_key) - add_physics_info_to_group(merged_file[new_key], physics_info_B) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="train / test a paddle model to predict frames" - ) - parser.add_argument( - "--A_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackA/test.h5", - type=str, - help="", - ) - parser.add_argument( - "--B_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackB/test.h5", - type=str, - help="", - ) - parser.add_argument( - "--C_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackC/k1.h5", - type=str, - help="", - ) - params = parser.parse_args() - merge_h5_files(params.A_dir, params.B_dir, params.C_dir) -print("done") +import argparse +import os + +import h5py +import numpy as np +import paddle + + +def load_ds_trackA_info(file_path, key_list): + path_trackA_ds = file_path + key_list = np.sort([int(key) for key in key_list]) + key_list = [str(key) for key in key_list] + bounds = np.loadtxt(path_trackA_ds + "/watertight_global_bounds.txt") + pressure_mean_std = paddle.to_tensor( + data=np.loadtxt(path_trackA_ds + "/train_pressure_min_std.txt") + ).to("float32") + voxel_mean_std = paddle.to_tensor( + data=np.loadtxt(path_trackA_ds + "/voxel_mean_std.txt") + ).to("float32") + pos_mean_std = np.loadtxt(path_trackA_ds + "/pos_mean_std.txt") + normal_mean_std = np.loadtxt(path_trackA_ds + "/normal_mean_std.txt") + PN_mean_std = paddle.to_tensor( + data=np.concatenate([pos_mean_std, normal_mean_std], axis=-1) + ).to("float32") + physics_info = { + "key_list": key_list, + "bounds": bounds, + "voxel_mean_std": voxel_mean_std, + "pressure_mean_std": pressure_mean_std, + "PN_mean_std": PN_mean_std, + } + return physics_info + + +def load_ds_trackB_info(file_path, key_list): + path_trackB_ds = file_path + key_list = np.sort([int(key) for key in key_list]) + key_list = [str(key) for key in key_list] + pressure_mean_std = paddle.to_tensor( + data=np.loadtxt(path_trackB_ds + "/train_pressure_mean_std.txt") + ).to("float32") + bounds = np.loadtxt(path_trackB_ds + "/global_bounds.txt") + voxel_mean_std = paddle.to_tensor( + data=np.loadtxt(path_trackB_ds + "/voxel_mean_std.txt") + ).to("float32") + PNA_mean_std = paddle.to_tensor( + data=np.loadtxt(path_trackB_ds + "/PosNormalArea_mean_std.txt") + ).to("float32") + PN_mean_std = PNA_mean_std[:, :6] + physics_info = { + "key_list": key_list, + "bounds": bounds, + "voxel_mean_std": voxel_mean_std, + "pressure_mean_std": pressure_mean_std, + "PN_mean_std": PN_mean_std, + } + return physics_info + + +def load_extra_info(file_path, key_list, track_type="A"): + if track_type == "A": + physics_info = load_ds_trackA_info(file_path, key_list) + else: + physics_info = load_ds_trackB_info(file_path, key_list) + return physics_info + + +def add_physics_info_to_group(group, physics_info): + for key, value in physics_info.items(): + group.create_dataset(key, data=value) + + +def merge_h5_files(fileA_path, fileB_path, merged_file_path): + with h5py.File(fileA_path, "r") as fileA, h5py.File( + fileB_path, "r" + ) as fileB, h5py.File(merged_file_path, "w") as merged_file: + key_list_A = list(fileA.keys()) + key_list_B = list(fileB.keys()) + physics_info_A = load_extra_info( + os.path.dirname(fileA_path), key_list_A, track_type="A" + ) + physics_info_B = load_extra_info( + os.path.dirname(fileB_path), key_list_B, track_type="B" + ) + for key in fileA.keys(): + group = fileA[key] + new_key = "A_" + key + merged_file.copy(group, new_key) + add_physics_info_to_group(merged_file[new_key], physics_info_A) + for key in fileB.keys(): + group = fileB[key] + new_key = "B_" + key + merged_file.copy(group, new_key) + add_physics_info_to_group(merged_file[new_key], physics_info_B) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="train / test a paddle model to predict frames" + ) + parser.add_argument( + "--A_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackA/test.h5", + type=str, + help="", + ) + parser.add_argument( + "--B_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackB/test.h5", + type=str, + help="", + ) + parser.add_argument( + "--C_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/converted_dataset/trackC/k1.h5", + type=str, + help="", + ) + params = parser.parse_args() + merge_h5_files(params.A_dir, params.B_dir, params.C_dir) +print("done") diff --git a/jointContribution/IJCAI_2024/aminos/Extract_mesh/parse_dataset.py b/jointContribution/IJCAI_2024/aminos/Extract_mesh/parse_dataset.py index c18ff03e52..f5f3c03a8a 100644 --- a/jointContribution/IJCAI_2024/aminos/Extract_mesh/parse_dataset.py +++ b/jointContribution/IJCAI_2024/aminos/Extract_mesh/parse_dataset.py @@ -1,678 +1,678 @@ -import argparse -import math -import multiprocessing -import os -import shutil -import subprocess -import sys -import threading - -import h5py -import numpy as np -import paddle -import trimesh -import utils.DS_utils as DS_utils -from paddle_aux import scatter_paddle -from utils.knn import knn_graph -from utils.knn import knn_scipy_batched -from utils.utilities import calc_cell_centered_with_node_attr -from utils.utilities import calc_node_centered_with_cell_attr - -sys.path.append( - os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) -) - - -sys.stdout.flush() -lock = threading.Lock() - - -class Basemanager: - def polygon_area(self, vertices): - x = vertices[:, 0] - y = vertices[:, 1] - return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - def triangles_to_faces(self, faces): - """Computes mesh edges from triangles.""" - cells_face_node = paddle.concat( - x=( - faces[:, 0:2], - faces[:, 1:3], - paddle.stack(x=(faces[:, 2], faces[:, 0]), axis=1), - ), - axis=0, - ) - return cells_face_node.numpy() - - def position_relative_to_line_paddle(A, B, angle_c): - A = paddle.to_tensor(data=A, dtype="float64") - B = paddle.to_tensor(data=B, dtype="float64") - angle_c = paddle.to_tensor(data=angle_c, dtype="float64") - direction_vector = paddle.to_tensor( - data=[ - paddle.cos(x=angle_c * math.pi / 180.0), - paddle.sin(x=angle_c * math.pi / 180.0), - ], - dtype="float64", - ) - vector_AB = B - A - cross_product = ( - direction_vector[0] * vector_AB[:, 1] - - direction_vector[1] * vector_AB[:, 0] - ) - mask = cross_product > 0 - return mask.view(-1, 1) - - def is_convex(self, polygon): - n = len(polygon) - for i in range(n): - a = polygon[i] - b = polygon[(i + 1) % n] - c = polygon[(i + 2) % n] - ba = a - b - bc = c - b - cross_product = np.cross(ba, bc) - if cross_product < 0: - return False - return True - - def reorder_polygon(self, polygon): - centroid = np.mean(polygon, axis=0) - sorted_polygon = sorted( - polygon, key=lambda p: np.arctan2(p[1] - centroid[1], p[0] - centroid[0]) - ) - return np.array(sorted_polygon) - - def ensure_counterclockwise(self, cells, mesh_pos): - for i, cell in enumerate(cells): - vertices = mesh_pos[cell] - if not self.is_convex(vertices): - vertices = self.reorder_polygon(vertices) - sorted_indices = sorted( - range(len(cell)), - key=lambda k: list(map(list, vertices)).index( - list(mesh_pos[cell][k]) - ), - ) - cells[i] = np.array(cell)[sorted_indices] - return cell - - def is_equal(self, x, pivot): - """ - Determine if a value x is between two other values a and b. - - Parameters: - - a (float or int): The lower bound. - - b (float or int): The upper bound. - - x (float or int): The value to check. - - Returns: - - (bool): True if x is between a and b (inclusive), False otherwise. - """ - a = abs(pivot) - float(1e-08) - b = abs(pivot) + float(1e-08) - if a <= abs(x) <= b: - return True - else: - return False - - def make_edges_unique(self, cells_face_node, cells_node, cells_index): - """Computes mesh edges from triangles.""" - cells_face_node_biased = ( - paddle.sort(x=cells_face_node, axis=1), - paddle.argsort(x=cells_face_node, axis=1), - )[0] - senders, receivers = cells_face_node_biased[:, 0], cells_face_node_biased[:, 1] - packed_edges = paddle.stack(x=(senders, receivers), axis=1) - singleway_edge_index = paddle.unique( - x=packed_edges, return_inverse=False, return_counts=False, axis=0 - ).to("int64") - cells_face = [] - edge_indice = paddle.arange(end=tuple(singleway_edge_index.shape)[0]) - for i_edge in range(tuple(cells_face_node.shape)[0]): - current_edge = ( - paddle.sort(x=cells_face_node[i_edge : i_edge + 1, :], axis=-1), - paddle.argsort(x=cells_face_node[i_edge : i_edge + 1, :], axis=-1), - )[0] - mask = (singleway_edge_index == current_edge).astype("bool").all(axis=-1) - cells_face.append(edge_indice[mask]) - cells_face = paddle.concat(x=cells_face).view(-1, 1) - if tuple(cells_face.shape)[0] != tuple(cells_face_node.shape)[0]: - raise ValueError("cells_face shape is not equal to cells_face_node shape") - return { - "edge_index": singleway_edge_index, - "cells_face": cells_face, - "cells_face_node_unbiased": cells_face_node, - "cells_face_node_biased": packed_edges, - } - - def create_neighbor_matrix(self, vertex_coords, edges): - """ - Create a matrix representing the neighbors for each vertex in a graph. - - Parameters: - vertex_coords (Tensor): A tensor of shape [n, 2] representing n vertex coordinates. - edges (Tensor): A tensor of shape [m, 2] representing m edges, - where each edge is a pair of vertex indices. - - Returns: - Tensor: A matrix where each row corresponds to a vertex and contains the indices of its neighbors. - """ - edges_mod = edges % tuple(vertex_coords.shape)[0] - counts = paddle.zeros(shape=tuple(vertex_coords.shape)[0], dtype="int64") - counts.put_along_axis_( - axis=0, - indices=edges_mod.view(-1), - values=paddle.ones_like(x=edges_mod.view(-1)), - reduce="add", - ) - max_neighbors = counts.max() - neighbor_matrix = paddle.full( - shape=(tuple(vertex_coords.shape)[0], max_neighbors), - fill_value=-1, - dtype="int64", - ) - current_count = paddle.zeros(shape=tuple(vertex_coords.shape)[0], dtype="int64") - for edge in edges_mod: - start, end = edge - neighbor_matrix[start, current_count[start]] = end - current_count[start] += 1 - neighbor_matrix[end, current_count[end]] = start - current_count[end] += 1 - return neighbor_matrix, max_neighbors - - def generate_directed_edges(self, cells_node): - edges = [] - for i in range(len(cells_node)): - for j in range(i + 1, len(cells_node)): - edge = [cells_node[i], cells_node[j]] - reversed_edge = [cells_node[j], cells_node[i]] - if reversed_edge not in edges: - edges.append(edge) - return edges - - def compose_edge_index_x( - self, face_node, cells_face_node_biased, cells_node, cells_index - ): - face_node_x = face_node.clone() - for i in range(cells_index.max() + 1): - mask_cell = (cells_index == i).view(-1) - current_cells_face_node_biased = cells_face_node_biased[mask_cell] - current_cells_node = cells_node[mask_cell] - all_possible_edges, _ = paddle.sort( - x=paddle.to_tensor( - data=self.generate_directed_edges(current_cells_node) - ), - axis=-1, - ), paddle.argsort( - x=paddle.to_tensor( - data=self.generate_directed_edges(current_cells_node) - ), - axis=-1, - ) - for edge in all_possible_edges: - edge = edge.unsqueeze(axis=0) - if (edge.unsqueeze(axis=0) == current_cells_face_node_biased).astype( - "bool" - ).all(axis=-1).sum() < 1: - face_node_x = paddle.concat(x=(face_node_x, edge), axis=0) - return face_node_x - - def convert_to_tensors(self, input_dict): - if isinstance(input_dict, dict): - for key in input_dict.keys(): - value = input_dict[key] - if isinstance(value, np.ndarray): - input_dict[key] = paddle.to_tensor(data=value) - elif not isinstance(value, paddle.Tensor): - input_dict[key] = paddle.to_tensor(data=value) - elif isinstance(input_dict, list): - for i in range(len(input_dict)): - value = input_dict[i] - if isinstance(value, np.ndarray): - input_dict[i] = paddle.to_tensor(data=value) - elif not isinstance(value, paddle.Tensor): - input_dict[i] = paddle.to_tensor(data=value) - return input_dict - - def convert_to_numpy(self, input_dict): - if isinstance(input_dict, dict): - for key in input_dict.keys(): - value = input_dict[key] - if isinstance(value, paddle.Tensor): - input_dict[key] = value.numpy() - elif not isinstance(value, paddle.Tensor): - input_dict[key] = paddle.to_tensor(data=value).numpy() - elif isinstance(input_dict, list): - for i in range(len(input)): - value = input_dict[i] - if isinstance(value, paddle.Tensor): - input_dict[i] = value.numpy() - elif not isinstance(value, paddle.Tensor): - input_dict[i] = paddle.to_tensor(data=value).numpy() - return input_dict - - def compute_unit_normals( - self, - mesh_pos: paddle.Tensor, - cells_node: paddle.Tensor, - centroid: paddle.Tensor = None, - ): - cells_node = cells_node.reshape(-1, 3) - A = mesh_pos[cells_node[:, 0]] - B = mesh_pos[cells_node[:, 1]] - C = mesh_pos[cells_node[:, 2]] - AB = B - A - AC = C - A - N = paddle.cross(x=AB, y=AC, axis=-1) - norm = paddle.linalg.norm(x=N, axis=-1, keepdim=True) - unit_N = N / norm - geo_center = paddle.mean(x=centroid, axis=0, keepdim=True) - outward = centroid - geo_center - mask_outward = (unit_N * outward).sum(axis=-1, keepdim=True) > 0 - unit_N = paddle.where(condition=mask_outward.repeat(1, 3), x=unit_N, y=-unit_N) - return unit_N - - -class PlyMesh(Basemanager): - """ - Tecplot .dat file is only supported with Tobias`s airfoil dataset ,No more data file supported - """ - - def __init__(self, path=None): - mesh_pos, cells_node = DS_utils.load_mesh_ply_vtk(path["mesh_file_path"]) - self.mesh_pos = mesh_pos - self.cells_node = cells_node - cells_face_node = self.triangles_to_faces(paddle.to_tensor(data=cells_node)) - cells_index = ( - paddle.arange(end=tuple(cells_node.shape)[0]) - .view(-1, 1) - .repeat(1, 3) - .numpy() - ) - try: - pressuredata = np.expand_dims(np.load(path["data_file_path"]), axis=1) - except Exception: - pressuredata = np.zeros((tuple(mesh_pos.shape)[0], 1), dtype=np.float32) - if tuple(mesh_pos.shape)[0] < 10000: - self.mesh_info = { - "node|pos": mesh_pos, - "cell|cells_node": cells_node, - "cells_node": cells_node.reshape(-1, 1), - "cells_index": cells_index.reshape(-1, 1), - "cells_face_node": cells_face_node, - "node|pressure": np.concatenate( - (pressuredata[0:16], pressuredata[112:]), axis=0 - ), - } - else: - mesh_idx = ( - path["mesh_file_path"].rsplit("/")[-1].split("_")[1].split(".")[0] - ) - if path["split"] == "train": - centroid = np.load( - os.path.join(path["label_dir"], f"centroid_{mesh_idx}.npy") - ) - else: - infer_dir = path["mesh_file_path"].rsplit("/", maxsplit=1)[0] - centroid = np.load(os.path.join(infer_dir, f"centroid_{mesh_idx}.npy")) - self.mesh_info = { - "node|pos": mesh_pos, - "cell|cells_node": cells_node, - "cells_node": cells_node.reshape(-1, 1), - "cells_index": cells_index.reshape(-1, 1), - "cells_face_node": cells_face_node, - "cell|pressure": pressuredata, - "cell|centroid": centroid, - } - self.path = path - - def extract_mesh_A(self, data_index=None): - """ - all input dataset values should be paddle tensor object - """ - dataset = self.convert_to_tensors(self.mesh_info) - cells_node = dataset["cells_node"][:, 0] - cells_index = dataset["cells_index"][:, 0] - """>>>compute centroid crds>>>""" - mesh_pos = dataset["node|pos"] - centroid = calc_cell_centered_with_node_attr( - node_attr=dataset["node|pos"], - cells_node=cells_node, - cells_index=cells_index, - reduce="mean", - ) - dataset["centroid"] = centroid - """<<>> compose face and face_center_pos >>> """ - decomposed_cells = self.make_edges_unique( - dataset["cells_face_node"], cells_node.view(-1, 1), cells_index.view(-1, 1) - ) - cells_face_node = decomposed_cells["cells_face_node_biased"] - cells_face = decomposed_cells["cells_face"] - dataset["cells_face"] = cells_face - face_node = decomposed_cells["edge_index"].T - dataset["face_node"] = face_node - face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0 - dataset["face_center_pos"] = face_center_pos - """ <<< compose face <<< """ - """ >>> compute face length >>>""" - face_length = paddle.linalg.norm( - x=mesh_pos[face_node[0]] - mesh_pos[face_node[1]], axis=1, keepdim=True - ) - dataset["face_length"] = face_length - """ <<< compute face length <<<""" - """ >>> compute cells_face and neighbor_cell >>> """ - senders_cell = calc_node_centered_with_cell_attr( - cell_attr=cells_index.view(-1), - cells_node=cells_face.view(-1), - cells_index=cells_index.view(-1), - reduce="max", - map=False, - ) - recivers_cell = calc_node_centered_with_cell_attr( - cell_attr=cells_index.view(-1), - cells_node=cells_face.view(-1), - cells_index=cells_index.view(-1), - reduce="min", - map=False, - ) - neighbour_cell = paddle.stack(x=(recivers_cell, senders_cell), axis=0) - dataset["neighbour_cell"] = neighbour_cell.to("int64") - """ <<< compute cells_face and neighbor_cell <<< """ - """ >>> compute cell_area >>> """ - cells_node_reshape = cells_node.reshape(-1, 3) - cells_face_node = paddle.stack( - x=( - cells_node_reshape[:, 0:2], - cells_node_reshape[:, 1:3], - paddle.stack( - x=(cells_node_reshape[:, 2], cells_node_reshape[:, 0]), axis=1 - ), - ), - axis=1, - ) - cells_length = paddle.linalg.norm( - x=mesh_pos[cells_face_node[:, :, 0]] - mesh_pos[cells_face_node[:, :, 1]], - axis=-1, - keepdim=True, - ) - circum = cells_length.sum(axis=1, keepdim=True) * 0.5 - mul = ( - circum[:, 0] - * (circum - cells_length)[:, 0] - * (circum - cells_length)[:, 1] - * (circum - cells_length)[:, 2] - ) - valid_cells_area = paddle.sqrt(x=mul) - dataset["cells_area"] = valid_cells_area - """ <<< compute cell_area <<< """ - """ >>> unit normal vector >>> """ - unv = self.compute_unit_normals(mesh_pos, cells_node, centroid=centroid) - node_unv = calc_node_centered_with_cell_attr( - cell_attr=unv[cells_index], - cells_node=cells_node.view(-1, 1), - cells_index=cells_index.view(-1, 1), - reduce="mean", - map=False, - ) - dataset["unit_norm_v"] = node_unv - """ <<< unit normal vector <<< """ - bounds = np.loadtxt( - os.path.join(self.path["aux_dir"], "watertight_global_bounds.txt") - ) - pos = dataset["node|pos"] - grid, sdf = DS_utils.compute_sdf_grid( - pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64] - ) - ply_file = self.path["mesh_file_path"] - ao = DS_utils.compute_ao(ply_file) - dataset["node|ao"] = ao - output_dict = { - "node|pos": dataset["node|pos"], - "node|pressure": dataset["node|pressure"], - "node|unit_norm_v": dataset["unit_norm_v"], - "node|ao": dataset["node|ao"], - "face|face_node": dataset["face_node"], - "face|face_center_pos": dataset["face_center_pos"], - "face|face_length": dataset["face_length"], - "face|neighbour_cell": dataset["neighbour_cell"], - "cell|cells_area": dataset["cells_area"], - "cell|centroid": dataset["centroid"], - "cells_node": dataset["cells_node"], - "cells_index": dataset["cells_index"], - "cells_face": dataset["cells_face"], - "voxel|grid": grid, - "voxel|sdf": sdf[:, None], - } - h5_dataset = output_dict - print("{0}th mesh has been extracted".format(data_index)) - return h5_dataset - - def extract_mesh_B(self, data_index=None): - """ - all input dataset values should be paddle tensor object - """ - dataset = self.convert_to_tensors(self.mesh_info) - car_model = trimesh.load(self.path["mesh_file_path"], force="mesh") - vertices = car_model.vertices - normals = car_model.vertex_normals - faces = car_model.faces - _, cells_node = vertices, faces - self.triangles_to_faces(paddle.to_tensor(data=cells_node)) - (paddle.arange(end=tuple(cells_node.shape)[0]).view(-1, 1).repeat(1, 3).numpy()) - areadata = np.zeros([tuple(dataset["cell|centroid"].shape)[0], 1]) - centroiddata = dataset["cell|centroid"] - centroid = centroiddata - device = "cuda:0" if paddle.device.cuda.device_count() >= 1 else "cpu" - normals_cuda = paddle.to_tensor(data=normals).to("float32").to(device) - points_cuda = paddle.to_tensor(data=vertices).to("float32").to(device) - centroid_cuda = centroiddata.to("float32").to(device) - knn_idx = knn_scipy_batched(points_cuda, centroid_cuda, 4) - centroid_normals = paddle.full( - shape=[tuple(centroid.shape)[0], 3], fill_value=0.0 - ) - centroid_normals = scatter_paddle( - normals_cuda[knn_idx[1]], index=knn_idx[0], dim=0, reduce="mean" - ).cpu() - factor = paddle.linalg.norm(x=centroid_normals, axis=-1, keepdim=True) - centroid_normals = centroid_normals / factor - centroid_normals = centroid_normals.cpu().numpy() - bounds = np.loadtxt(os.path.join(path["aux_dir"], "global_bounds.txt")) - pos = dataset["node|pos"] - grid, sdf = DS_utils.compute_sdf_grid( - pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64] - ) - pos_tensor = paddle.to_tensor(data=centroid, dtype="float32") - edge_index, _ = knn_graph(pos_tensor, k=4).sort(dim=0) - edge_index = paddle.unique(x=edge_index, axis=1) - edge_index_np = edge_index.numpy() - output_dict = { - "node|pos": dataset["node|pos"].cpu().numpy(), - "cell|cells_area": areadata, - "cell|centroid": centroid.cpu().numpy(), - "cells_node": dataset["cells_node"].cpu().numpy(), - "cell|unit_norm_v": centroid_normals, - "cell|pressure": dataset["cell|pressure"].cpu().numpy(), - "voxel|grid": grid, - "voxel|sdf": sdf, - "face|neighbour_cell": edge_index_np, - } - h5_dataset = output_dict - print("{0}th mesh has been extracted".format(data_index)) - return h5_dataset - - -def random_samples_no_replacement(arr, num_samples, num_iterations): - if num_samples * num_iterations > len(arr): - raise ValueError( - "Number of samples multiplied by iterations cannot be greater than the length of the array." - ) - samples = [] - arr_copy = arr.copy() - for _ in range(num_iterations): - sample_indices = np.random.choice(len(arr_copy), num_samples, replace=False) - sample = arr_copy[sample_indices] - samples.append(sample) - arr_copy = np.delete(arr_copy, sample_indices) - return samples, arr_copy - - -def process_file(file_index, file_path, path, queue): - file_name = os.path.basename(file_path) - mesh_name = file_name - path["mesh_file_path"] = file_path - if path["mesh_file_path"].endswith("ply"): - mesh_index = int("".join(char for char in mesh_name if char.isdigit())) - data_name = f"press_{''.join(char for char in mesh_name if char.isdigit())}.npy" - data_file_path = f"{path['label_dir']}/{data_name}" - path["mesh_file_path"] = file_path - path["data_file_path"] = data_file_path - data = PlyMesh(path=path) - if data.mesh_pos.shape[0] < 10000: - h5_data = data.extract_mesh_A(data_index=mesh_index) - else: - h5_data = data.extract_mesh_B(data_index=mesh_index) - else: - raise ValueError(f"wrong mesh file at {path['mesh_file_path']}") - queue.put((h5_data, mesh_index)) - - -def string_to_floats(s): - return np.asarray([float(ord(c)) for c in s]) - - -def floats_to_string(floats): - return "".join([chr(int(f)) for f in floats]) - - -def writer_process(queue, split, path): - os.makedirs(path["h5_save_path"], exist_ok=True) - h5_writer = h5py.File(f"{path['h5_save_path']}/{split}.h5", "w") - sdf_list = [] - while True: - h5_data, file_index = queue.get() - if h5_data is None: - break - if str(file_index) in h5_writer: - continue - current_traj = h5_writer.create_group(str(file_index)) - for key, value in h5_data.items(): - current_traj.create_dataset(key, data=value) - if key == "voxel|sdf": - sdf_list.append(paddle.to_tensor(data=value)) - print("{0}th mesh has been writed".format(file_index)) - if split == "train": - voxel_mean, voxel_std = DS_utils.compute_mean_std(sdf_list) - np.savetxt( - f"{path['h5_save_path']}/voxel_mean_std.txt", - np.array([voxel_mean.item(), voxel_std.item()]), - ) - if path["track"] == "A": - shutil.copy( - f"{path['aux_dir']}/train_pressure_min_std.txt", - f"{path['h5_save_path']}/train_pressure_min_std.txt", - ) - shutil.copy( - f"{path['aux_dir']}/watertight_global_bounds.txt", - f"{path['h5_save_path']}/watertight_global_bounds.txt", - ) - else: - shutil.copy( - f"{path['aux_dir']}/train_pressure_mean_std.txt", - f"{path['h5_save_path']}/train_pressure_mean_std.txt", - ) - shutil.copy( - f"{path['aux_dir']}/global_bounds.txt", - f"{path['h5_save_path']}/global_bounds.txt", - ) - h5_writer.close() - - -def run_command(tfrecord_file, idx_file): - subprocess.run( - ["python", "-m", "tfrecord.tools.tfrecord2idx", tfrecord_file, idx_file], - check=True, - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="train / test a paddle model to predict frames" - ) - parser.add_argument( - "--msh_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Train/Feature/dataset_3/train_mesh_0603", - type=str, - help="", - ) - parser.add_argument( - "--label_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Train/Label/dataset_2", - type=str, - help="", - ) - parser.add_argument( - "--aux_dir", - default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Test/Testset_track_B/Auxiliary", - type=str, - help="", - ) - parser.add_argument( - "--h5_save_path", - default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/converted_dataset_test/trackB", - type=str, - help="", - ) - parser.add_argument("--split", default="test", type=str, help="") - parser.add_argument("--track", default="B", type=str, help="") - params = parser.parse_args() - debug_file_path = None - path = { - "msh_dir": params.msh_dir, - "label_dir": params.label_dir, - "aux_dir": params.aux_dir, - "h5_save_path": params.h5_save_path, - "split": params.split, - "track": params.track, - "plot": False, - } - os.makedirs(path["h5_save_path"], exist_ok=True) - total_samples = 0 - file_paths_list = [] - for subdir, _, files in os.walk(path["msh_dir"]): - for data_name in files: - if data_name.endswith(".ply"): - file_paths_list.append(os.path.join(subdir, data_name)) - np.random.shuffle(file_paths_list) - print(f"Total samples: {len(file_paths_list)}") - if debug_file_path is not None: - multi_process = 1 - elif len(file_paths_list) < multiprocessing.cpu_count(): - multi_process = len(file_paths_list) - else: - multi_process = int(multiprocessing.cpu_count() / 2) - global_data_index = 0 - with multiprocessing.Pool(multi_process) as pool: - manager = multiprocessing.Manager() - queue = manager.Queue() - writer_proc = multiprocessing.Process( - target=writer_process, args=(queue, params.split, path) - ) - writer_proc.start() - if debug_file_path is not None: - file_path = debug_file_path - results = [pool.apply_async(process_file, args=(0, file_path, path, queue))] - else: - results = [ - pool.apply_async( - process_file, args=(file_index, file_path, path, queue) - ) - for file_index, file_path in enumerate(file_paths_list) - ] - for res in results: - res.get() - queue.put((None, None)) - writer_proc.join() - print("Fininsh parsing train dataset calc mean and std") +import argparse +import math +import multiprocessing +import os +import shutil +import subprocess +import sys +import threading + +import h5py +import numpy as np +import paddle +import trimesh +import utils.DS_utils as DS_utils +from paddle_aux import scatter_paddle +from utils.knn import knn_graph +from utils.knn import knn_scipy_batched +from utils.utilities import calc_cell_centered_with_node_attr +from utils.utilities import calc_node_centered_with_cell_attr + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)) +) + + +sys.stdout.flush() +lock = threading.Lock() + + +class Basemanager: + def polygon_area(self, vertices): + x = vertices[:, 0] + y = vertices[:, 1] + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + def triangles_to_faces(self, faces): + """Computes mesh edges from triangles.""" + cells_face_node = paddle.concat( + x=( + faces[:, 0:2], + faces[:, 1:3], + paddle.stack(x=(faces[:, 2], faces[:, 0]), axis=1), + ), + axis=0, + ) + return cells_face_node.numpy() + + def position_relative_to_line_paddle(A, B, angle_c): + A = paddle.to_tensor(data=A, dtype="float64") + B = paddle.to_tensor(data=B, dtype="float64") + angle_c = paddle.to_tensor(data=angle_c, dtype="float64") + direction_vector = paddle.to_tensor( + data=[ + paddle.cos(x=angle_c * math.pi / 180.0), + paddle.sin(x=angle_c * math.pi / 180.0), + ], + dtype="float64", + ) + vector_AB = B - A + cross_product = ( + direction_vector[0] * vector_AB[:, 1] + - direction_vector[1] * vector_AB[:, 0] + ) + mask = cross_product > 0 + return mask.view(-1, 1) + + def is_convex(self, polygon): + n = len(polygon) + for i in range(n): + a = polygon[i] + b = polygon[(i + 1) % n] + c = polygon[(i + 2) % n] + ba = a - b + bc = c - b + cross_product = np.cross(ba, bc) + if cross_product < 0: + return False + return True + + def reorder_polygon(self, polygon): + centroid = np.mean(polygon, axis=0) + sorted_polygon = sorted( + polygon, key=lambda p: np.arctan2(p[1] - centroid[1], p[0] - centroid[0]) + ) + return np.array(sorted_polygon) + + def ensure_counterclockwise(self, cells, mesh_pos): + for i, cell in enumerate(cells): + vertices = mesh_pos[cell] + if not self.is_convex(vertices): + vertices = self.reorder_polygon(vertices) + sorted_indices = sorted( + range(len(cell)), + key=lambda k: list(map(list, vertices)).index( + list(mesh_pos[cell][k]) + ), + ) + cells[i] = np.array(cell)[sorted_indices] + return cell + + def is_equal(self, x, pivot): + """ + Determine if a value x is between two other values a and b. + + Parameters: + - a (float or int): The lower bound. + - b (float or int): The upper bound. + - x (float or int): The value to check. + + Returns: + - (bool): True if x is between a and b (inclusive), False otherwise. + """ + a = abs(pivot) - float(1e-08) + b = abs(pivot) + float(1e-08) + if a <= abs(x) <= b: + return True + else: + return False + + def make_edges_unique(self, cells_face_node, cells_node, cells_index): + """Computes mesh edges from triangles.""" + cells_face_node_biased = ( + paddle.sort(x=cells_face_node, axis=1), + paddle.argsort(x=cells_face_node, axis=1), + )[0] + senders, receivers = cells_face_node_biased[:, 0], cells_face_node_biased[:, 1] + packed_edges = paddle.stack(x=(senders, receivers), axis=1) + singleway_edge_index = paddle.unique( + x=packed_edges, return_inverse=False, return_counts=False, axis=0 + ).to("int64") + cells_face = [] + edge_indice = paddle.arange(end=tuple(singleway_edge_index.shape)[0]) + for i_edge in range(tuple(cells_face_node.shape)[0]): + current_edge = ( + paddle.sort(x=cells_face_node[i_edge : i_edge + 1, :], axis=-1), + paddle.argsort(x=cells_face_node[i_edge : i_edge + 1, :], axis=-1), + )[0] + mask = (singleway_edge_index == current_edge).astype("bool").all(axis=-1) + cells_face.append(edge_indice[mask]) + cells_face = paddle.concat(x=cells_face).view(-1, 1) + if tuple(cells_face.shape)[0] != tuple(cells_face_node.shape)[0]: + raise ValueError("cells_face shape is not equal to cells_face_node shape") + return { + "edge_index": singleway_edge_index, + "cells_face": cells_face, + "cells_face_node_unbiased": cells_face_node, + "cells_face_node_biased": packed_edges, + } + + def create_neighbor_matrix(self, vertex_coords, edges): + """ + Create a matrix representing the neighbors for each vertex in a graph. + + Parameters: + vertex_coords (Tensor): A tensor of shape [n, 2] representing n vertex coordinates. + edges (Tensor): A tensor of shape [m, 2] representing m edges, + where each edge is a pair of vertex indices. + + Returns: + Tensor: A matrix where each row corresponds to a vertex and contains the indices of its neighbors. + """ + edges_mod = edges % tuple(vertex_coords.shape)[0] + counts = paddle.zeros(shape=tuple(vertex_coords.shape)[0], dtype="int64") + counts.put_along_axis_( + axis=0, + indices=edges_mod.view(-1), + values=paddle.ones_like(x=edges_mod.view(-1)), + reduce="add", + ) + max_neighbors = counts.max() + neighbor_matrix = paddle.full( + shape=(tuple(vertex_coords.shape)[0], max_neighbors), + fill_value=-1, + dtype="int64", + ) + current_count = paddle.zeros(shape=tuple(vertex_coords.shape)[0], dtype="int64") + for edge in edges_mod: + start, end = edge + neighbor_matrix[start, current_count[start]] = end + current_count[start] += 1 + neighbor_matrix[end, current_count[end]] = start + current_count[end] += 1 + return neighbor_matrix, max_neighbors + + def generate_directed_edges(self, cells_node): + edges = [] + for i in range(len(cells_node)): + for j in range(i + 1, len(cells_node)): + edge = [cells_node[i], cells_node[j]] + reversed_edge = [cells_node[j], cells_node[i]] + if reversed_edge not in edges: + edges.append(edge) + return edges + + def compose_edge_index_x( + self, face_node, cells_face_node_biased, cells_node, cells_index + ): + face_node_x = face_node.clone() + for i in range(cells_index.max() + 1): + mask_cell = (cells_index == i).view(-1) + current_cells_face_node_biased = cells_face_node_biased[mask_cell] + current_cells_node = cells_node[mask_cell] + all_possible_edges, _ = paddle.sort( + x=paddle.to_tensor( + data=self.generate_directed_edges(current_cells_node) + ), + axis=-1, + ), paddle.argsort( + x=paddle.to_tensor( + data=self.generate_directed_edges(current_cells_node) + ), + axis=-1, + ) + for edge in all_possible_edges: + edge = edge.unsqueeze(axis=0) + if (edge.unsqueeze(axis=0) == current_cells_face_node_biased).astype( + "bool" + ).all(axis=-1).sum() < 1: + face_node_x = paddle.concat(x=(face_node_x, edge), axis=0) + return face_node_x + + def convert_to_tensors(self, input_dict): + if isinstance(input_dict, dict): + for key in input_dict.keys(): + value = input_dict[key] + if isinstance(value, np.ndarray): + input_dict[key] = paddle.to_tensor(data=value) + elif not isinstance(value, paddle.Tensor): + input_dict[key] = paddle.to_tensor(data=value) + elif isinstance(input_dict, list): + for i in range(len(input_dict)): + value = input_dict[i] + if isinstance(value, np.ndarray): + input_dict[i] = paddle.to_tensor(data=value) + elif not isinstance(value, paddle.Tensor): + input_dict[i] = paddle.to_tensor(data=value) + return input_dict + + def convert_to_numpy(self, input_dict): + if isinstance(input_dict, dict): + for key in input_dict.keys(): + value = input_dict[key] + if isinstance(value, paddle.Tensor): + input_dict[key] = value.numpy() + elif not isinstance(value, paddle.Tensor): + input_dict[key] = paddle.to_tensor(data=value).numpy() + elif isinstance(input_dict, list): + for i in range(len(input)): + value = input_dict[i] + if isinstance(value, paddle.Tensor): + input_dict[i] = value.numpy() + elif not isinstance(value, paddle.Tensor): + input_dict[i] = paddle.to_tensor(data=value).numpy() + return input_dict + + def compute_unit_normals( + self, + mesh_pos: paddle.Tensor, + cells_node: paddle.Tensor, + centroid: paddle.Tensor = None, + ): + cells_node = cells_node.reshape(-1, 3) + A = mesh_pos[cells_node[:, 0]] + B = mesh_pos[cells_node[:, 1]] + C = mesh_pos[cells_node[:, 2]] + AB = B - A + AC = C - A + N = paddle.cross(x=AB, y=AC, axis=-1) + norm = paddle.linalg.norm(x=N, axis=-1, keepdim=True) + unit_N = N / norm + geo_center = paddle.mean(x=centroid, axis=0, keepdim=True) + outward = centroid - geo_center + mask_outward = (unit_N * outward).sum(axis=-1, keepdim=True) > 0 + unit_N = paddle.where(condition=mask_outward.repeat(1, 3), x=unit_N, y=-unit_N) + return unit_N + + +class PlyMesh(Basemanager): + """ + Tecplot .dat file is only supported with Tobias`s airfoil dataset ,No more data file supported + """ + + def __init__(self, path=None): + mesh_pos, cells_node = DS_utils.load_mesh_ply_vtk(path["mesh_file_path"]) + self.mesh_pos = mesh_pos + self.cells_node = cells_node + cells_face_node = self.triangles_to_faces(paddle.to_tensor(data=cells_node)) + cells_index = ( + paddle.arange(end=tuple(cells_node.shape)[0]) + .view(-1, 1) + .repeat(1, 3) + .numpy() + ) + try: + pressuredata = np.expand_dims(np.load(path["data_file_path"]), axis=1) + except Exception: + pressuredata = np.zeros((tuple(mesh_pos.shape)[0], 1), dtype=np.float32) + if tuple(mesh_pos.shape)[0] < 10000: + self.mesh_info = { + "node|pos": mesh_pos, + "cell|cells_node": cells_node, + "cells_node": cells_node.reshape(-1, 1), + "cells_index": cells_index.reshape(-1, 1), + "cells_face_node": cells_face_node, + "node|pressure": np.concatenate( + (pressuredata[0:16], pressuredata[112:]), axis=0 + ), + } + else: + mesh_idx = ( + path["mesh_file_path"].rsplit("/")[-1].split("_")[1].split(".")[0] + ) + if path["split"] == "train": + centroid = np.load( + os.path.join(path["label_dir"], f"centroid_{mesh_idx}.npy") + ) + else: + infer_dir = path["mesh_file_path"].rsplit("/", maxsplit=1)[0] + centroid = np.load(os.path.join(infer_dir, f"centroid_{mesh_idx}.npy")) + self.mesh_info = { + "node|pos": mesh_pos, + "cell|cells_node": cells_node, + "cells_node": cells_node.reshape(-1, 1), + "cells_index": cells_index.reshape(-1, 1), + "cells_face_node": cells_face_node, + "cell|pressure": pressuredata, + "cell|centroid": centroid, + } + self.path = path + + def extract_mesh_A(self, data_index=None): + """ + all input dataset values should be paddle tensor object + """ + dataset = self.convert_to_tensors(self.mesh_info) + cells_node = dataset["cells_node"][:, 0] + cells_index = dataset["cells_index"][:, 0] + """>>>compute centroid crds>>>""" + mesh_pos = dataset["node|pos"] + centroid = calc_cell_centered_with_node_attr( + node_attr=dataset["node|pos"], + cells_node=cells_node, + cells_index=cells_index, + reduce="mean", + ) + dataset["centroid"] = centroid + """<<>> compose face and face_center_pos >>> """ + decomposed_cells = self.make_edges_unique( + dataset["cells_face_node"], cells_node.view(-1, 1), cells_index.view(-1, 1) + ) + cells_face_node = decomposed_cells["cells_face_node_biased"] + cells_face = decomposed_cells["cells_face"] + dataset["cells_face"] = cells_face + face_node = decomposed_cells["edge_index"].T + dataset["face_node"] = face_node + face_center_pos = (mesh_pos[face_node[0]] + mesh_pos[face_node[1]]) / 2.0 + dataset["face_center_pos"] = face_center_pos + """ <<< compose face <<< """ + """ >>> compute face length >>>""" + face_length = paddle.linalg.norm( + x=mesh_pos[face_node[0]] - mesh_pos[face_node[1]], axis=1, keepdim=True + ) + dataset["face_length"] = face_length + """ <<< compute face length <<<""" + """ >>> compute cells_face and neighbor_cell >>> """ + senders_cell = calc_node_centered_with_cell_attr( + cell_attr=cells_index.view(-1), + cells_node=cells_face.view(-1), + cells_index=cells_index.view(-1), + reduce="max", + map=False, + ) + recivers_cell = calc_node_centered_with_cell_attr( + cell_attr=cells_index.view(-1), + cells_node=cells_face.view(-1), + cells_index=cells_index.view(-1), + reduce="min", + map=False, + ) + neighbour_cell = paddle.stack(x=(recivers_cell, senders_cell), axis=0) + dataset["neighbour_cell"] = neighbour_cell.to("int64") + """ <<< compute cells_face and neighbor_cell <<< """ + """ >>> compute cell_area >>> """ + cells_node_reshape = cells_node.reshape(-1, 3) + cells_face_node = paddle.stack( + x=( + cells_node_reshape[:, 0:2], + cells_node_reshape[:, 1:3], + paddle.stack( + x=(cells_node_reshape[:, 2], cells_node_reshape[:, 0]), axis=1 + ), + ), + axis=1, + ) + cells_length = paddle.linalg.norm( + x=mesh_pos[cells_face_node[:, :, 0]] - mesh_pos[cells_face_node[:, :, 1]], + axis=-1, + keepdim=True, + ) + circum = cells_length.sum(axis=1, keepdim=True) * 0.5 + mul = ( + circum[:, 0] + * (circum - cells_length)[:, 0] + * (circum - cells_length)[:, 1] + * (circum - cells_length)[:, 2] + ) + valid_cells_area = paddle.sqrt(x=mul) + dataset["cells_area"] = valid_cells_area + """ <<< compute cell_area <<< """ + """ >>> unit normal vector >>> """ + unv = self.compute_unit_normals(mesh_pos, cells_node, centroid=centroid) + node_unv = calc_node_centered_with_cell_attr( + cell_attr=unv[cells_index], + cells_node=cells_node.view(-1, 1), + cells_index=cells_index.view(-1, 1), + reduce="mean", + map=False, + ) + dataset["unit_norm_v"] = node_unv + """ <<< unit normal vector <<< """ + bounds = np.loadtxt( + os.path.join(self.path["aux_dir"], "watertight_global_bounds.txt") + ) + pos = dataset["node|pos"] + grid, sdf = DS_utils.compute_sdf_grid( + pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64] + ) + ply_file = self.path["mesh_file_path"] + ao = DS_utils.compute_ao(ply_file) + dataset["node|ao"] = ao + output_dict = { + "node|pos": dataset["node|pos"], + "node|pressure": dataset["node|pressure"], + "node|unit_norm_v": dataset["unit_norm_v"], + "node|ao": dataset["node|ao"], + "face|face_node": dataset["face_node"], + "face|face_center_pos": dataset["face_center_pos"], + "face|face_length": dataset["face_length"], + "face|neighbour_cell": dataset["neighbour_cell"], + "cell|cells_area": dataset["cells_area"], + "cell|centroid": dataset["centroid"], + "cells_node": dataset["cells_node"], + "cells_index": dataset["cells_index"], + "cells_face": dataset["cells_face"], + "voxel|grid": grid, + "voxel|sdf": sdf[:, None], + } + h5_dataset = output_dict + print("{0}th mesh has been extracted".format(data_index)) + return h5_dataset + + def extract_mesh_B(self, data_index=None): + """ + all input dataset values should be paddle tensor object + """ + dataset = self.convert_to_tensors(self.mesh_info) + car_model = trimesh.load(self.path["mesh_file_path"], force="mesh") + vertices = car_model.vertices + normals = car_model.vertex_normals + faces = car_model.faces + _, cells_node = vertices, faces + self.triangles_to_faces(paddle.to_tensor(data=cells_node)) + (paddle.arange(end=tuple(cells_node.shape)[0]).view(-1, 1).repeat(1, 3).numpy()) + areadata = np.zeros([tuple(dataset["cell|centroid"].shape)[0], 1]) + centroiddata = dataset["cell|centroid"] + centroid = centroiddata + device = "cuda:0" if paddle.device.cuda.device_count() >= 1 else "cpu" + normals_cuda = paddle.to_tensor(data=normals).to("float32").to(device) + points_cuda = paddle.to_tensor(data=vertices).to("float32").to(device) + centroid_cuda = centroiddata.to("float32").to(device) + knn_idx = knn_scipy_batched(points_cuda, centroid_cuda, 4) + centroid_normals = paddle.full( + shape=[tuple(centroid.shape)[0], 3], fill_value=0.0 + ) + centroid_normals = scatter_paddle( + normals_cuda[knn_idx[1]], index=knn_idx[0], dim=0, reduce="mean" + ).cpu() + factor = paddle.linalg.norm(x=centroid_normals, axis=-1, keepdim=True) + centroid_normals = centroid_normals / factor + centroid_normals = centroid_normals.cpu().numpy() + bounds = np.loadtxt(os.path.join(path["aux_dir"], "global_bounds.txt")) + pos = dataset["node|pos"] + grid, sdf = DS_utils.compute_sdf_grid( + pos, dataset["cells_node"].reshape(-1, 3), bounds, [64, 64, 64] + ) + pos_tensor = paddle.to_tensor(data=centroid, dtype="float32") + edge_index, _ = knn_graph(pos_tensor, k=4).sort(dim=0) + edge_index = paddle.unique(x=edge_index, axis=1) + edge_index_np = edge_index.numpy() + output_dict = { + "node|pos": dataset["node|pos"].cpu().numpy(), + "cell|cells_area": areadata, + "cell|centroid": centroid.cpu().numpy(), + "cells_node": dataset["cells_node"].cpu().numpy(), + "cell|unit_norm_v": centroid_normals, + "cell|pressure": dataset["cell|pressure"].cpu().numpy(), + "voxel|grid": grid, + "voxel|sdf": sdf, + "face|neighbour_cell": edge_index_np, + } + h5_dataset = output_dict + print("{0}th mesh has been extracted".format(data_index)) + return h5_dataset + + +def random_samples_no_replacement(arr, num_samples, num_iterations): + if num_samples * num_iterations > len(arr): + raise ValueError( + "Number of samples multiplied by iterations cannot be greater than the length of the array." + ) + samples = [] + arr_copy = arr.copy() + for _ in range(num_iterations): + sample_indices = np.random.choice(len(arr_copy), num_samples, replace=False) + sample = arr_copy[sample_indices] + samples.append(sample) + arr_copy = np.delete(arr_copy, sample_indices) + return samples, arr_copy + + +def process_file(file_index, file_path, path, queue): + file_name = os.path.basename(file_path) + mesh_name = file_name + path["mesh_file_path"] = file_path + if path["mesh_file_path"].endswith("ply"): + mesh_index = int("".join(char for char in mesh_name if char.isdigit())) + data_name = f"press_{''.join(char for char in mesh_name if char.isdigit())}.npy" + data_file_path = f"{path['label_dir']}/{data_name}" + path["mesh_file_path"] = file_path + path["data_file_path"] = data_file_path + data = PlyMesh(path=path) + if data.mesh_pos.shape[0] < 10000: + h5_data = data.extract_mesh_A(data_index=mesh_index) + else: + h5_data = data.extract_mesh_B(data_index=mesh_index) + else: + raise ValueError(f"wrong mesh file at {path['mesh_file_path']}") + queue.put((h5_data, mesh_index)) + + +def string_to_floats(s): + return np.asarray([float(ord(c)) for c in s]) + + +def floats_to_string(floats): + return "".join([chr(int(f)) for f in floats]) + + +def writer_process(queue, split, path): + os.makedirs(path["h5_save_path"], exist_ok=True) + h5_writer = h5py.File(f"{path['h5_save_path']}/{split}.h5", "w") + sdf_list = [] + while True: + h5_data, file_index = queue.get() + if h5_data is None: + break + if str(file_index) in h5_writer: + continue + current_traj = h5_writer.create_group(str(file_index)) + for key, value in h5_data.items(): + current_traj.create_dataset(key, data=value) + if key == "voxel|sdf": + sdf_list.append(paddle.to_tensor(data=value)) + print("{0}th mesh has been writed".format(file_index)) + if split == "train": + voxel_mean, voxel_std = DS_utils.compute_mean_std(sdf_list) + np.savetxt( + f"{path['h5_save_path']}/voxel_mean_std.txt", + np.array([voxel_mean.item(), voxel_std.item()]), + ) + if path["track"] == "A": + shutil.copy( + f"{path['aux_dir']}/train_pressure_min_std.txt", + f"{path['h5_save_path']}/train_pressure_min_std.txt", + ) + shutil.copy( + f"{path['aux_dir']}/watertight_global_bounds.txt", + f"{path['h5_save_path']}/watertight_global_bounds.txt", + ) + else: + shutil.copy( + f"{path['aux_dir']}/train_pressure_mean_std.txt", + f"{path['h5_save_path']}/train_pressure_mean_std.txt", + ) + shutil.copy( + f"{path['aux_dir']}/global_bounds.txt", + f"{path['h5_save_path']}/global_bounds.txt", + ) + h5_writer.close() + + +def run_command(tfrecord_file, idx_file): + subprocess.run( + ["python", "-m", "tfrecord.tools.tfrecord2idx", tfrecord_file, idx_file], + check=True, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="train / test a paddle model to predict frames" + ) + parser.add_argument( + "--msh_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Train/Feature/dataset_3/train_mesh_0603", + type=str, + help="", + ) + parser.add_argument( + "--label_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Train/Label/dataset_2", + type=str, + help="", + ) + parser.add_argument( + "--aux_dir", + default="/home/xiaoli/project/3D-ShapeNet-car/src/Dataset/rawDataset/trackB/Test/Testset_track_B/Auxiliary", + type=str, + help="", + ) + parser.add_argument( + "--h5_save_path", + default="/lvm_data/litianyu/mycode-new/3D-ShapeNet-car/src/Dataset/converted_dataset_test/trackB", + type=str, + help="", + ) + parser.add_argument("--split", default="test", type=str, help="") + parser.add_argument("--track", default="B", type=str, help="") + params = parser.parse_args() + debug_file_path = None + path = { + "msh_dir": params.msh_dir, + "label_dir": params.label_dir, + "aux_dir": params.aux_dir, + "h5_save_path": params.h5_save_path, + "split": params.split, + "track": params.track, + "plot": False, + } + os.makedirs(path["h5_save_path"], exist_ok=True) + total_samples = 0 + file_paths_list = [] + for subdir, _, files in os.walk(path["msh_dir"]): + for data_name in files: + if data_name.endswith(".ply"): + file_paths_list.append(os.path.join(subdir, data_name)) + np.random.shuffle(file_paths_list) + print(f"Total samples: {len(file_paths_list)}") + if debug_file_path is not None: + multi_process = 1 + elif len(file_paths_list) < multiprocessing.cpu_count(): + multi_process = len(file_paths_list) + else: + multi_process = int(multiprocessing.cpu_count() / 2) + global_data_index = 0 + with multiprocessing.Pool(multi_process) as pool: + manager = multiprocessing.Manager() + queue = manager.Queue() + writer_proc = multiprocessing.Process( + target=writer_process, args=(queue, params.split, path) + ) + writer_proc.start() + if debug_file_path is not None: + file_path = debug_file_path + results = [pool.apply_async(process_file, args=(0, file_path, path, queue))] + else: + results = [ + pool.apply_async( + process_file, args=(file_index, file_path, path, queue) + ) + for file_index, file_path in enumerate(file_paths_list) + ] + for res in results: + res.get() + queue.put((None, None)) + writer_proc.join() + print("Fininsh parsing train dataset calc mean and std") diff --git a/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/EPDbackbone.py b/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/EPDbackbone.py index 9e058ea03c..a6d69cd50a 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/EPDbackbone.py +++ b/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/EPDbackbone.py @@ -1,263 +1,263 @@ -import paddle -import pgl -from dataset.Load_mesh import CustomGraphData -from utils.normalization import Normalizer -from utils.paddle_aux import scatter_paddle -from utils.utilities import copy_geometric_data -from utils.utilities import decompose_and_trans_node_attr_to_cell_attr_graph - -from .blocks import EdgeBlock -from .blocks import NodeBlock - - -def build_mlp( - in_size, hidden_size, out_size, drop_out=True, lay_norm=True, dropout_prob=0.2 -): - if drop_out: - module = paddle.nn.Sequential( - paddle.nn.Linear(in_features=in_size, out_features=hidden_size), - paddle.nn.Dropout(p=dropout_prob), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size), - paddle.nn.Dropout(p=dropout_prob), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=hidden_size, out_features=out_size), - ) - else: - module = paddle.nn.Sequential( - paddle.nn.Linear(in_features=in_size, out_features=hidden_size), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=hidden_size, out_features=out_size), - ) - if lay_norm: - return paddle.nn.Sequential( - module, paddle.nn.LayerNorm(normalized_shape=out_size) - ) - return module - - -def build_mlp_test( - in_size, - hidden_size, - out_size, - drop_out=False, - lay_norm=True, - dropout_prob=0.2, - specify_hidden_layer_num=2, -): - layers = [] - layers.append(paddle.nn.Linear(in_features=in_size, out_features=hidden_size)) - if drop_out: - layers.append(paddle.nn.Dropout(p=dropout_prob)) - layers.append(paddle.nn.GELU()) - for i in range(specify_hidden_layer_num - 1): - layers.append( - paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size) - ) - if drop_out: - layers.append(paddle.nn.Dropout(p=dropout_prob)) - layers.append(paddle.nn.GELU()) - layers.append(paddle.nn.Linear(in_features=hidden_size, out_features=out_size)) - if lay_norm: - layers.append(paddle.nn.LayerNorm(normalized_shape=out_size)) - return paddle.nn.Sequential(*layers) - - -class Encoder(paddle.nn.Layer): - def __init__( - self, - node_input_size=128, - edge_input_size=128, - cell_input_size=128, - hidden_size=128, - attention=False, - ): - super(Encoder, self).__init__() - self.eb_encoder = build_mlp( - edge_input_size, hidden_size, int(hidden_size), drop_out=False - ) - self.nb_encoder = build_mlp( - node_input_size, hidden_size, int(hidden_size), drop_out=False - ) - self.attention = attention - self.scale = paddle.sqrt( - x=paddle.to_tensor(data=hidden_size, dtype=paddle.get_default_dtype()) - ) - - def forward(self, graph_node, graph_cell): - ( - node_attr, - edge_index, - edge_attr, - face, - _, - _, - ) = decompose_and_trans_node_attr_to_cell_attr_graph( - graph_node, has_changed_node_attr_to_cell_attr=False - ) - node_ = self.nb_encoder(node_attr) - edge_ = self.eb_encoder(edge_attr) - ret = CustomGraphData( - x=node_, - edge_attr=edge_, - edge_index=edge_index, - face=face, - num_graphs=graph_node.num_graph, - batch=graph_node.batch, - ) - ret.keys = ["x", "num_graphs", "edge_index", "batch", "edge_attr"] - return ret, edge_, node_ - - -class GraphSCA3D(paddle.nn.Layer): - def __init__(self, channel, reduction=2): - super().__init__() - self.channel_excitation = paddle.nn.Sequential( - paddle.nn.Linear( - in_features=channel, out_features=int(channel // reduction) - ), - paddle.nn.ReLU(), - paddle.nn.Linear( - in_features=int(channel // reduction), out_features=channel - ), - ) - self.spatial_se = pgl.nn.GCNConv(input_size=channel, output_size=1) - - def forward(self, x, batch, edge_index): - BN, C = tuple(x.shape) - chn_se = scatter_paddle(x, index=batch, dim=0, reduce="mean").view(-1, C) - chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) - chn_se = x * chn_se[batch] - spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x, edge_index)) - spa_se = x * spa_se - net_out = spa_se + x + chn_se - return net_out - - -class GnBlock(paddle.nn.Layer): - def __init__(self, hidden_size=128, drop_out=False, attention=True, MultiHead=1): - super(GnBlock, self).__init__() - eb_input_dim = int(3 * hidden_size) - nb_input_dim = int(hidden_size + hidden_size // 2.0) - self.nb_module = NodeBlock( - hidden_size, - hidden_size, - attention=attention, - MultiHead=MultiHead, - custom_func=build_mlp( - nb_input_dim, hidden_size, int(hidden_size), drop_out=False - ), - ) - self.eb_module = EdgeBlock( - input_size=hidden_size, - custom_func=build_mlp( - eb_input_dim, hidden_size, int(hidden_size), drop_out=False - ), - ) - - def forward(self, graph_node, graph_cell=None): - graph_node_last = copy_geometric_data( - graph_node, has_changed_node_attr_to_cell_attr=True - ) - graph_node = self.eb_module(graph_node, graph_cell=None) - graph_node = self.nb_module(graph_node, graph_cell=None) - x = graph_node.x + graph_node_last.x - edge_attr = graph_node.edge_attr + graph_node_last.edge_attr - ret = CustomGraphData( - x=x, - edge_attr=edge_attr, - edge_index=graph_node.edge_index, - face=graph_node.face, - num_graphs=graph_node.num_graph, - batch=graph_node.batch, - ) - return ret - - -class Decoder(paddle.nn.Layer): - def __init__( - self, - edge_hidden_size=128, - cell_hidden_size=128, - edge_output_size=3, - cell_output_size=2, - cell_input_size=2, - node_output_size=2, - attention=False, - ): - super(Decoder, self).__init__() - self.node_decode_module = build_mlp_test( - cell_hidden_size, - cell_hidden_size, - node_output_size, - drop_out=False, - lay_norm=False, - specify_hidden_layer_num=2, - ) - - def forward(self, trans_feature=None, latent_graph_node=None): - node_attr, _, _, _, _, _ = decompose_and_trans_node_attr_to_cell_attr_graph( - latent_graph_node, has_changed_node_attr_to_cell_attr=True - ) - node_decode_attr = self.node_decode_module(node_attr) - return node_decode_attr - - -class EncoderProcesserDecoder(paddle.nn.Layer): - def __init__( - self, - message_passing_num, - cell_input_size, - edge_input_size, - node_input_size, - cell_output_size, - edge_output_size, - node_output_size, - drop_out=False, - hidden_size=128, - attention=False, - params=None, - MultiHead=1, - ): - super(EncoderProcesserDecoder, self).__init__() - self.encoder = Encoder( - node_input_size=node_input_size, - edge_input_size=edge_input_size, - cell_input_size=cell_input_size, - hidden_size=hidden_size, - attention=attention, - ) - try: - satistic_times = params.dataset_size // params.batch_size - except Exception: - satistic_times = 500 - self.node_norm = Normalizer(node_input_size, satistic_times) - self.edge_norm = Normalizer(edge_input_size, satistic_times) - GN_block_list = [] - for _ in range(message_passing_num): - GN_block_list.append( - GnBlock( - hidden_size=hidden_size, - drop_out=drop_out, - attention=attention, - MultiHead=MultiHead, - ) - ) - self.GN_block_list = paddle.nn.LayerList(sublayers=GN_block_list) - - def forward( - self, - graph_node=None, - graph_edge=None, - graph_cell=None, - params=None, - is_training=True, - ): - graph_node.x = self.node_norm(graph_node.x) - graph_node.edge_attr = self.edge_norm(graph_node.edge_attr) - latent_graph_node, _, _ = self.encoder(graph_node, graph_cell=graph_cell) - for model in self.GN_block_list: - latent_graph_node = model(latent_graph_node, graph_cell=graph_cell) - return latent_graph_node.x +import paddle +import pgl +from dataset.Load_mesh import CustomGraphData +from utils.normalization import Normalizer +from utils.paddle_aux import scatter_paddle +from utils.utilities import copy_geometric_data +from utils.utilities import decompose_and_trans_node_attr_to_cell_attr_graph + +from .blocks import EdgeBlock +from .blocks import NodeBlock + + +def build_mlp( + in_size, hidden_size, out_size, drop_out=True, lay_norm=True, dropout_prob=0.2 +): + if drop_out: + module = paddle.nn.Sequential( + paddle.nn.Linear(in_features=in_size, out_features=hidden_size), + paddle.nn.Dropout(p=dropout_prob), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size), + paddle.nn.Dropout(p=dropout_prob), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=hidden_size, out_features=out_size), + ) + else: + module = paddle.nn.Sequential( + paddle.nn.Linear(in_features=in_size, out_features=hidden_size), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=hidden_size, out_features=out_size), + ) + if lay_norm: + return paddle.nn.Sequential( + module, paddle.nn.LayerNorm(normalized_shape=out_size) + ) + return module + + +def build_mlp_test( + in_size, + hidden_size, + out_size, + drop_out=False, + lay_norm=True, + dropout_prob=0.2, + specify_hidden_layer_num=2, +): + layers = [] + layers.append(paddle.nn.Linear(in_features=in_size, out_features=hidden_size)) + if drop_out: + layers.append(paddle.nn.Dropout(p=dropout_prob)) + layers.append(paddle.nn.GELU()) + for i in range(specify_hidden_layer_num - 1): + layers.append( + paddle.nn.Linear(in_features=hidden_size, out_features=hidden_size) + ) + if drop_out: + layers.append(paddle.nn.Dropout(p=dropout_prob)) + layers.append(paddle.nn.GELU()) + layers.append(paddle.nn.Linear(in_features=hidden_size, out_features=out_size)) + if lay_norm: + layers.append(paddle.nn.LayerNorm(normalized_shape=out_size)) + return paddle.nn.Sequential(*layers) + + +class Encoder(paddle.nn.Layer): + def __init__( + self, + node_input_size=128, + edge_input_size=128, + cell_input_size=128, + hidden_size=128, + attention=False, + ): + super(Encoder, self).__init__() + self.eb_encoder = build_mlp( + edge_input_size, hidden_size, int(hidden_size), drop_out=False + ) + self.nb_encoder = build_mlp( + node_input_size, hidden_size, int(hidden_size), drop_out=False + ) + self.attention = attention + self.scale = paddle.sqrt( + x=paddle.to_tensor(data=hidden_size, dtype=paddle.get_default_dtype()) + ) + + def forward(self, graph_node, graph_cell): + ( + node_attr, + edge_index, + edge_attr, + face, + _, + _, + ) = decompose_and_trans_node_attr_to_cell_attr_graph( + graph_node, has_changed_node_attr_to_cell_attr=False + ) + node_ = self.nb_encoder(node_attr) + edge_ = self.eb_encoder(edge_attr) + ret = CustomGraphData( + x=node_, + edge_attr=edge_, + edge_index=edge_index, + face=face, + num_graphs=graph_node.num_graph, + batch=graph_node.batch, + ) + ret.keys = ["x", "num_graphs", "edge_index", "batch", "edge_attr"] + return ret, edge_, node_ + + +class GraphSCA3D(paddle.nn.Layer): + def __init__(self, channel, reduction=2): + super().__init__() + self.channel_excitation = paddle.nn.Sequential( + paddle.nn.Linear( + in_features=channel, out_features=int(channel // reduction) + ), + paddle.nn.ReLU(), + paddle.nn.Linear( + in_features=int(channel // reduction), out_features=channel + ), + ) + self.spatial_se = pgl.nn.GCNConv(input_size=channel, output_size=1) + + def forward(self, x, batch, edge_index): + BN, C = tuple(x.shape) + chn_se = scatter_paddle(x, index=batch, dim=0, reduce="mean").view(-1, C) + chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) + chn_se = x * chn_se[batch] + spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x, edge_index)) + spa_se = x * spa_se + net_out = spa_se + x + chn_se + return net_out + + +class GnBlock(paddle.nn.Layer): + def __init__(self, hidden_size=128, drop_out=False, attention=True, MultiHead=1): + super(GnBlock, self).__init__() + eb_input_dim = int(3 * hidden_size) + nb_input_dim = int(hidden_size + hidden_size // 2.0) + self.nb_module = NodeBlock( + hidden_size, + hidden_size, + attention=attention, + MultiHead=MultiHead, + custom_func=build_mlp( + nb_input_dim, hidden_size, int(hidden_size), drop_out=False + ), + ) + self.eb_module = EdgeBlock( + input_size=hidden_size, + custom_func=build_mlp( + eb_input_dim, hidden_size, int(hidden_size), drop_out=False + ), + ) + + def forward(self, graph_node, graph_cell=None): + graph_node_last = copy_geometric_data( + graph_node, has_changed_node_attr_to_cell_attr=True + ) + graph_node = self.eb_module(graph_node, graph_cell=None) + graph_node = self.nb_module(graph_node, graph_cell=None) + x = graph_node.x + graph_node_last.x + edge_attr = graph_node.edge_attr + graph_node_last.edge_attr + ret = CustomGraphData( + x=x, + edge_attr=edge_attr, + edge_index=graph_node.edge_index, + face=graph_node.face, + num_graphs=graph_node.num_graph, + batch=graph_node.batch, + ) + return ret + + +class Decoder(paddle.nn.Layer): + def __init__( + self, + edge_hidden_size=128, + cell_hidden_size=128, + edge_output_size=3, + cell_output_size=2, + cell_input_size=2, + node_output_size=2, + attention=False, + ): + super(Decoder, self).__init__() + self.node_decode_module = build_mlp_test( + cell_hidden_size, + cell_hidden_size, + node_output_size, + drop_out=False, + lay_norm=False, + specify_hidden_layer_num=2, + ) + + def forward(self, trans_feature=None, latent_graph_node=None): + node_attr, _, _, _, _, _ = decompose_and_trans_node_attr_to_cell_attr_graph( + latent_graph_node, has_changed_node_attr_to_cell_attr=True + ) + node_decode_attr = self.node_decode_module(node_attr) + return node_decode_attr + + +class EncoderProcesserDecoder(paddle.nn.Layer): + def __init__( + self, + message_passing_num, + cell_input_size, + edge_input_size, + node_input_size, + cell_output_size, + edge_output_size, + node_output_size, + drop_out=False, + hidden_size=128, + attention=False, + params=None, + MultiHead=1, + ): + super(EncoderProcesserDecoder, self).__init__() + self.encoder = Encoder( + node_input_size=node_input_size, + edge_input_size=edge_input_size, + cell_input_size=cell_input_size, + hidden_size=hidden_size, + attention=attention, + ) + try: + satistic_times = params.dataset_size // params.batch_size + except Exception: + satistic_times = 500 + self.node_norm = Normalizer(node_input_size, satistic_times) + self.edge_norm = Normalizer(edge_input_size, satistic_times) + GN_block_list = [] + for _ in range(message_passing_num): + GN_block_list.append( + GnBlock( + hidden_size=hidden_size, + drop_out=drop_out, + attention=attention, + MultiHead=MultiHead, + ) + ) + self.GN_block_list = paddle.nn.LayerList(sublayers=GN_block_list) + + def forward( + self, + graph_node=None, + graph_edge=None, + graph_cell=None, + params=None, + is_training=True, + ): + graph_node.x = self.node_norm(graph_node.x) + graph_node.edge_attr = self.edge_norm(graph_node.edge_attr) + latent_graph_node, _, _ = self.encoder(graph_node, graph_cell=graph_cell) + for model in self.GN_block_list: + latent_graph_node = model(latent_graph_node, graph_cell=graph_cell) + return latent_graph_node.x diff --git a/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/blocks.py b/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/blocks.py index a6c3bd955d..4c9980b26d 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/blocks.py +++ b/jointContribution/IJCAI_2024/aminos/NN/GNN/FiniteVolumeGN/blocks.py @@ -1,219 +1,219 @@ -import paddle -import pgl -import utils.paddle_aux as paddle_aux -from dataset.Load_mesh import CustomGraphData -from utils.utilities import calc_cell_centered_with_node_attr -from utils.utilities import decompose_and_trans_node_attr_to_cell_attr_graph - - -class GraphSCA3D(paddle.nn.Layer): - def __init__(self, channel, reduction=16): - super().__init__() - self.channel_excitation = paddle.nn.Sequential( - paddle.nn.Linear( - in_features=channel, out_features=int(channel // reduction) - ), - paddle.nn.ReLU(), - paddle.nn.Linear( - in_features=int(channel // reduction), out_features=channel - ), - ) - self.spatial_se = pgl.nn.GCNConv(input_size=channel, output_size=1) - - def forward(self, x, batch, edge_index): - BN, C = tuple(x.shape) - chn_se = paddle_aux.scatter_paddle(x, index=batch, dim=0, reduce="mean").view( - -1, C - ) - chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) - chn_se = x * chn_se[batch] - spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x, edge_index)) - spa_se = x * spa_se - net_out = spa_se + x + chn_se - return net_out - - -class GraphCA3D(paddle.nn.Layer): - def __init__(self, channel, reduction=16): - super().__init__() - self.channel_excitation = paddle.nn.Sequential( - paddle.nn.Linear( - in_features=channel, out_features=int(channel // reduction) - ), - paddle.nn.ReLU(), - paddle.nn.Linear( - in_features=int(channel // reduction), out_features=channel - ), - ) - - def forward(self, x, batch): - BN, C = tuple(x.shape) - chn_se = paddle_aux.scatter_paddle(x, index=batch, dim=0, reduce="mean").view( - -1, C - ) - chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) - chn_se = x * chn_se[batch] - net_out = x + chn_se - return net_out - - -class NodeBlock(paddle.nn.Layer): - def __init__( - self, input_size, attention_size, attention=True, MultiHead=1, custom_func=None - ): - super(NodeBlock, self).__init__() - self.net = custom_func - - def forward(self, graph_node, graph_cell=None): - ( - node_attr, - edge_index, - edge_attr, - face, - _, - _, - ) = decompose_and_trans_node_attr_to_cell_attr_graph( - graph_node, has_changed_node_attr_to_cell_attr=True - ) - """ cell-based two step message passing algorithm """ - if graph_cell is not None: - senders_cell_idx, receivers_cell_idx = graph_cell.edge_index - twoway_edge_attr = paddle.concat( - x=paddle.chunk(x=edge_attr, chunks=2, axis=-1), axis=0 - ) - twoway_cell_connections_indegree = paddle.concat( - x=[senders_cell_idx, receivers_cell_idx], axis=0 - ) - twoway_cell_connections_outdegree = paddle.concat( - x=[receivers_cell_idx, senders_cell_idx], axis=0 - ) - - cell_agg_received_edges = paddle_aux.scatter_paddle( - twoway_edge_attr, twoway_cell_connections_indegree, dim=0, reduce="add" - ) - cell_agg_neighbour_cell = paddle_aux.scatter_paddle( - cell_agg_received_edges[twoway_cell_connections_indegree], - twoway_cell_connections_outdegree, - dim=0, - reduce="add", - ) - cells_node = graph_node.face[0] - cells_index = graph_cell.face[0] - cell_to_node = cell_agg_neighbour_cell[cells_index] - node_agg_received_edges = paddle_aux.scatter_paddle( - cell_to_node, index=cells_node, dim=0, reduce="mean" - ) - x = self.net(paddle.concat(x=(node_agg_received_edges, node_attr), axis=1)) - else: - """node-based two step message passing algorithm""" - senders_node_idx, receivers_node_idx = edge_index - twoway_node_connections_indegree = paddle.concat( - x=[senders_node_idx, receivers_node_idx], axis=0 - ) - twoway_node_connections_outdegree = paddle.concat( - x=[receivers_node_idx, senders_node_idx], axis=0 - ) - twoway_edge_attr = paddle.concat( - x=paddle.chunk(x=edge_attr, chunks=2, axis=-1), axis=0 - ) - - node_agg_received_edges = paddle_aux.scatter_paddle( - twoway_edge_attr, - twoway_node_connections_indegree, - dim=0, - out=paddle.zeros(shape=(node_attr.shape[0], twoway_edge_attr.shape[1])), - reduce="add", - ) - node_avg_neighbour_node = paddle_aux.scatter_paddle( - node_agg_received_edges[twoway_node_connections_outdegree], - twoway_node_connections_indegree, - dim=0, - out=paddle.zeros(shape=(node_attr.shape[0], twoway_edge_attr.shape[1])), - reduce="mean", - ) - x = self.net(paddle.concat(x=(node_avg_neighbour_node, node_attr), axis=1)) - ret = CustomGraphData( - x=x, - edge_attr=edge_attr, - edge_index=edge_index, - face=face, - num_graphs=graph_node.num_graph, - batch=graph_node.batch, - ) - return ret - - -class EdgeBlock(paddle.nn.Layer): - def __init__(self, input_size=None, custom_func=None): - super(EdgeBlock, self).__init__() - self.net = custom_func - - def forward(self, graph_node, graph_cell=None): - ( - node_attr, - edge_index, - edge_attr, - face, - _, - _, - ) = decompose_and_trans_node_attr_to_cell_attr_graph( - graph_node, has_changed_node_attr_to_cell_attr=True - ) - edges_to_collect = [] - """ >>> node to cell and concancentendate to edge >>> """ - if graph_cell is not None: - cells_node = graph_node.face - cells_index = graph_cell.face - cell_attr = calc_cell_centered_with_node_attr( - node_attr=node_attr, - cells_node=cells_node, - cells_index=cells_index, - reduce="sum", - map=True, - ) - senders_cell_idx, receivers_cell_idx = graph_cell.edge_index - mask = paddle.logical_not( - x=senders_cell_idx == receivers_cell_idx - ).unsqueeze(axis=1) - senders_attr = cell_attr[senders_cell_idx] - receivers_attr = cell_attr[receivers_cell_idx] - edges_to_collect.append(senders_attr) - edges_to_collect.append(receivers_attr * mask.astype(dtype="int64")) - edges_to_collect.append(edge_attr) - """ <<< node to cell and concancentendate to edge <<< """ - collected_edges = paddle.concat(x=edges_to_collect, axis=1) - edge_attr_ = self.net(collected_edges) - else: - """>>> only node concancentendate to edge >>>""" - senders_node_idx, receivers_node_idx = edge_index - twoway_node_connections_indegree = paddle.concat( - x=[senders_node_idx, receivers_node_idx], axis=0 - ) - twoway_node_connections_outdegree = paddle.concat( - x=[receivers_node_idx, senders_node_idx], axis=0 - ) - - node_avg_neighbour_node = paddle_aux.scatter_paddle( - node_attr[twoway_node_connections_outdegree], - twoway_node_connections_indegree, - dim=0, - out=paddle.zeros(shape=(node_attr.shape[0], node_attr.shape[1])), - reduce="add", - ) - senders_attr = node_avg_neighbour_node[senders_node_idx] - receivers_attr = node_avg_neighbour_node[receivers_node_idx] - edges_to_collect.append(senders_attr) - edges_to_collect.append(receivers_attr) - edges_to_collect.append(edge_attr) - """ >>>> only node concancentendate to edge >>> """ - collected_edges = paddle.concat(x=edges_to_collect, axis=1) - edge_attr_ = self.net(collected_edges) - ret = CustomGraphData( - x=node_attr, - edge_attr=edge_attr_, - edge_index=edge_index, - face=face, - num_graphs=graph_node.num_graph, - batch=graph_node.batch, - ) - return ret +import paddle +import pgl +import utils.paddle_aux as paddle_aux +from dataset.Load_mesh import CustomGraphData +from utils.utilities import calc_cell_centered_with_node_attr +from utils.utilities import decompose_and_trans_node_attr_to_cell_attr_graph + + +class GraphSCA3D(paddle.nn.Layer): + def __init__(self, channel, reduction=16): + super().__init__() + self.channel_excitation = paddle.nn.Sequential( + paddle.nn.Linear( + in_features=channel, out_features=int(channel // reduction) + ), + paddle.nn.ReLU(), + paddle.nn.Linear( + in_features=int(channel // reduction), out_features=channel + ), + ) + self.spatial_se = pgl.nn.GCNConv(input_size=channel, output_size=1) + + def forward(self, x, batch, edge_index): + BN, C = tuple(x.shape) + chn_se = paddle_aux.scatter_paddle(x, index=batch, dim=0, reduce="mean").view( + -1, C + ) + chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) + chn_se = x * chn_se[batch] + spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x, edge_index)) + spa_se = x * spa_se + net_out = spa_se + x + chn_se + return net_out + + +class GraphCA3D(paddle.nn.Layer): + def __init__(self, channel, reduction=16): + super().__init__() + self.channel_excitation = paddle.nn.Sequential( + paddle.nn.Linear( + in_features=channel, out_features=int(channel // reduction) + ), + paddle.nn.ReLU(), + paddle.nn.Linear( + in_features=int(channel // reduction), out_features=channel + ), + ) + + def forward(self, x, batch): + BN, C = tuple(x.shape) + chn_se = paddle_aux.scatter_paddle(x, index=batch, dim=0, reduce="mean").view( + -1, C + ) + chn_se = paddle.nn.functional.sigmoid(x=self.channel_excitation(chn_se)) + chn_se = x * chn_se[batch] + net_out = x + chn_se + return net_out + + +class NodeBlock(paddle.nn.Layer): + def __init__( + self, input_size, attention_size, attention=True, MultiHead=1, custom_func=None + ): + super(NodeBlock, self).__init__() + self.net = custom_func + + def forward(self, graph_node, graph_cell=None): + ( + node_attr, + edge_index, + edge_attr, + face, + _, + _, + ) = decompose_and_trans_node_attr_to_cell_attr_graph( + graph_node, has_changed_node_attr_to_cell_attr=True + ) + """ cell-based two step message passing algorithm """ + if graph_cell is not None: + senders_cell_idx, receivers_cell_idx = graph_cell.edge_index + twoway_edge_attr = paddle.concat( + x=paddle.chunk(x=edge_attr, chunks=2, axis=-1), axis=0 + ) + twoway_cell_connections_indegree = paddle.concat( + x=[senders_cell_idx, receivers_cell_idx], axis=0 + ) + twoway_cell_connections_outdegree = paddle.concat( + x=[receivers_cell_idx, senders_cell_idx], axis=0 + ) + + cell_agg_received_edges = paddle_aux.scatter_paddle( + twoway_edge_attr, twoway_cell_connections_indegree, dim=0, reduce="add" + ) + cell_agg_neighbour_cell = paddle_aux.scatter_paddle( + cell_agg_received_edges[twoway_cell_connections_indegree], + twoway_cell_connections_outdegree, + dim=0, + reduce="add", + ) + cells_node = graph_node.face[0] + cells_index = graph_cell.face[0] + cell_to_node = cell_agg_neighbour_cell[cells_index] + node_agg_received_edges = paddle_aux.scatter_paddle( + cell_to_node, index=cells_node, dim=0, reduce="mean" + ) + x = self.net(paddle.concat(x=(node_agg_received_edges, node_attr), axis=1)) + else: + """node-based two step message passing algorithm""" + senders_node_idx, receivers_node_idx = edge_index + twoway_node_connections_indegree = paddle.concat( + x=[senders_node_idx, receivers_node_idx], axis=0 + ) + twoway_node_connections_outdegree = paddle.concat( + x=[receivers_node_idx, senders_node_idx], axis=0 + ) + twoway_edge_attr = paddle.concat( + x=paddle.chunk(x=edge_attr, chunks=2, axis=-1), axis=0 + ) + + node_agg_received_edges = paddle_aux.scatter_paddle( + twoway_edge_attr, + twoway_node_connections_indegree, + dim=0, + out=paddle.zeros(shape=(node_attr.shape[0], twoway_edge_attr.shape[1])), + reduce="add", + ) + node_avg_neighbour_node = paddle_aux.scatter_paddle( + node_agg_received_edges[twoway_node_connections_outdegree], + twoway_node_connections_indegree, + dim=0, + out=paddle.zeros(shape=(node_attr.shape[0], twoway_edge_attr.shape[1])), + reduce="mean", + ) + x = self.net(paddle.concat(x=(node_avg_neighbour_node, node_attr), axis=1)) + ret = CustomGraphData( + x=x, + edge_attr=edge_attr, + edge_index=edge_index, + face=face, + num_graphs=graph_node.num_graph, + batch=graph_node.batch, + ) + return ret + + +class EdgeBlock(paddle.nn.Layer): + def __init__(self, input_size=None, custom_func=None): + super(EdgeBlock, self).__init__() + self.net = custom_func + + def forward(self, graph_node, graph_cell=None): + ( + node_attr, + edge_index, + edge_attr, + face, + _, + _, + ) = decompose_and_trans_node_attr_to_cell_attr_graph( + graph_node, has_changed_node_attr_to_cell_attr=True + ) + edges_to_collect = [] + """ >>> node to cell and concancentendate to edge >>> """ + if graph_cell is not None: + cells_node = graph_node.face + cells_index = graph_cell.face + cell_attr = calc_cell_centered_with_node_attr( + node_attr=node_attr, + cells_node=cells_node, + cells_index=cells_index, + reduce="sum", + map=True, + ) + senders_cell_idx, receivers_cell_idx = graph_cell.edge_index + mask = paddle.logical_not( + x=senders_cell_idx == receivers_cell_idx + ).unsqueeze(axis=1) + senders_attr = cell_attr[senders_cell_idx] + receivers_attr = cell_attr[receivers_cell_idx] + edges_to_collect.append(senders_attr) + edges_to_collect.append(receivers_attr * mask.astype(dtype="int64")) + edges_to_collect.append(edge_attr) + """ <<< node to cell and concancentendate to edge <<< """ + collected_edges = paddle.concat(x=edges_to_collect, axis=1) + edge_attr_ = self.net(collected_edges) + else: + """>>> only node concancentendate to edge >>>""" + senders_node_idx, receivers_node_idx = edge_index + twoway_node_connections_indegree = paddle.concat( + x=[senders_node_idx, receivers_node_idx], axis=0 + ) + twoway_node_connections_outdegree = paddle.concat( + x=[receivers_node_idx, senders_node_idx], axis=0 + ) + + node_avg_neighbour_node = paddle_aux.scatter_paddle( + node_attr[twoway_node_connections_outdegree], + twoway_node_connections_indegree, + dim=0, + out=paddle.zeros(shape=(node_attr.shape[0], node_attr.shape[1])), + reduce="add", + ) + senders_attr = node_avg_neighbour_node[senders_node_idx] + receivers_attr = node_avg_neighbour_node[receivers_node_idx] + edges_to_collect.append(senders_attr) + edges_to_collect.append(receivers_attr) + edges_to_collect.append(edge_attr) + """ >>>> only node concancentendate to edge >>> """ + collected_edges = paddle.concat(x=edges_to_collect, axis=1) + edge_attr_ = self.net(collected_edges) + ret = CustomGraphData( + x=node_attr, + edge_attr=edge_attr_, + edge_index=edge_index, + face=face, + num_graphs=graph_node.num_graph, + batch=graph_node.batch, + ) + return ret diff --git a/jointContribution/IJCAI_2024/aminos/NN/Transolver/FVGNAttUnet.py b/jointContribution/IJCAI_2024/aminos/NN/Transolver/FVGNAttUnet.py index bd68807362..96f6d0ff81 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/Transolver/FVGNAttUnet.py +++ b/jointContribution/IJCAI_2024/aminos/NN/Transolver/FVGNAttUnet.py @@ -1,104 +1,104 @@ -import os -import sys - -import paddle -from NN.GNN.FiniteVolumeGN.EPDbackbone import EncoderProcesserDecoder -from NN.UNet.attention_unet import UNet3DWithSamplePoints - -import ppsci - -cur_path = os.path.split(__file__)[0] -sys.path.append(cur_path) -sys.path.append(os.path.join(cur_path, "..")) - - -class Model(paddle.nn.Layer): - def __init__( - self, - space_dim=1, - n_layers=5, - n_hidden=256, - dropout=0, - n_head=8, - act="gelu", - mlp_ratio=1, - fun_dim=1, - out_dim=1, - slice_num=32, - ref=8, - unified_pos=False, - params=None, - ): - super(Model, self).__init__() - self.unet_o_dim = n_hidden - self.unet = UNet3DWithSamplePoints( - in_channels=1, - out_channels=self.unet_o_dim, - hidden_channels=self.unet_o_dim, - num_levels=4, - ) - self.fvgn = EncoderProcesserDecoder( - message_passing_num=params.message_passing_num, - cell_input_size=params.cell_input_size, - edge_input_size=params.edge_input_size, - node_input_size=params.node_input_size, - cell_output_size=params.cell_output_size, - edge_output_size=params.edge_output_size, - node_output_size=params.node_output_size, - hidden_size=n_hidden, - params=params, - ) - self.last_layer = paddle.nn.Sequential( - paddle.nn.Linear(in_features=n_hidden * 2, out_features=n_hidden * 4), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=n_hidden * 4, out_features=out_dim), - ) - self.initialize_weights() - - def initialize_weights(self): - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, paddle.nn.Linear): - m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) - if isinstance(m, paddle.nn.Linear) and m.bias is not None: - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - init_Constant = paddle.nn.initializer.Constant(value=1.0) - init_Constant(m.weight) - - def forward( - self, - x, - graph_node=None, - graph_edge=None, - graph_cell=None, - params=None, - is_training=True, - ): - query_list = [] - for i in range(graph_cell.num_graph): - # graph_cell.batch is a function in pgl - # mask = i == graph_cell.batch - # cur_query = graph_cell.query[mask] - cur_query = graph_cell.query - - # use paddle<2.6 to support pgl so that cur_query[:, [2, 0, 1]] not work - # cur_query = cur_query[:, [2, 0, 1]][None,] # B, N, 3 - cur_query = paddle.stack( - [cur_query[:, 2], cur_query[:, 0], cur_query[:, 1]], axis=-1 - )[ - None, - ] - - cur_query = cur_query.unsqueeze(axis=2).unsqueeze(axis=2) - query_list.append(cur_query) - ufeatures = self.unet(graph_cell.voxel, query_list, half=False) - graph_feature = self.fvgn( - graph_node=graph_cell, params=params, is_training=is_training - ) - fx = self.last_layer(paddle.concat(x=(ufeatures, graph_feature), axis=-1)) - return fx +import os +import sys + +import paddle +from NN.GNN.FiniteVolumeGN.EPDbackbone import EncoderProcesserDecoder +from NN.UNet.attention_unet import UNet3DWithSamplePoints + +import ppsci + +cur_path = os.path.split(__file__)[0] +sys.path.append(cur_path) +sys.path.append(os.path.join(cur_path, "..")) + + +class Model(paddle.nn.Layer): + def __init__( + self, + space_dim=1, + n_layers=5, + n_hidden=256, + dropout=0, + n_head=8, + act="gelu", + mlp_ratio=1, + fun_dim=1, + out_dim=1, + slice_num=32, + ref=8, + unified_pos=False, + params=None, + ): + super(Model, self).__init__() + self.unet_o_dim = n_hidden + self.unet = UNet3DWithSamplePoints( + in_channels=1, + out_channels=self.unet_o_dim, + hidden_channels=self.unet_o_dim, + num_levels=4, + ) + self.fvgn = EncoderProcesserDecoder( + message_passing_num=params.message_passing_num, + cell_input_size=params.cell_input_size, + edge_input_size=params.edge_input_size, + node_input_size=params.node_input_size, + cell_output_size=params.cell_output_size, + edge_output_size=params.edge_output_size, + node_output_size=params.node_output_size, + hidden_size=n_hidden, + params=params, + ) + self.last_layer = paddle.nn.Sequential( + paddle.nn.Linear(in_features=n_hidden * 2, out_features=n_hidden * 4), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=n_hidden * 4, out_features=out_dim), + ) + self.initialize_weights() + + def initialize_weights(self): + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, paddle.nn.Linear): + m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) + if isinstance(m, paddle.nn.Linear) and m.bias is not None: + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + init_Constant = paddle.nn.initializer.Constant(value=1.0) + init_Constant(m.weight) + + def forward( + self, + x, + graph_node=None, + graph_edge=None, + graph_cell=None, + params=None, + is_training=True, + ): + query_list = [] + for i in range(graph_cell.num_graph): + # graph_cell.batch is a function in pgl + # mask = i == graph_cell.batch + # cur_query = graph_cell.query[mask] + cur_query = graph_cell.query + + # use paddle<2.6 to support pgl so that cur_query[:, [2, 0, 1]] not work + # cur_query = cur_query[:, [2, 0, 1]][None,] # B, N, 3 + cur_query = paddle.stack( + [cur_query[:, 2], cur_query[:, 0], cur_query[:, 1]], axis=-1 + )[ + None, + ] + + cur_query = cur_query.unsqueeze(axis=2).unsqueeze(axis=2) + query_list.append(cur_query) + ufeatures = self.unet(graph_cell.voxel, query_list, half=False) + graph_feature = self.fvgn( + graph_node=graph_cell, params=params, is_training=is_training + ) + fx = self.last_layer(paddle.concat(x=(ufeatures, graph_feature), axis=-1)) + return fx diff --git a/jointContribution/IJCAI_2024/aminos/NN/Transolver/SageTrans_importer_B.py b/jointContribution/IJCAI_2024/aminos/NN/Transolver/SageTrans_importer_B.py index 6120e5fd52..8ec5225541 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/Transolver/SageTrans_importer_B.py +++ b/jointContribution/IJCAI_2024/aminos/NN/Transolver/SageTrans_importer_B.py @@ -1,86 +1,86 @@ -import paddle - -from .FVGNAttUnet import Model - - -class FVGN(paddle.nn.Layer): - def __init__(self, params) -> None: - super().__init__() - self.params = params - space_dim = params.cell_input_size - self.nn = Model( - space_dim=space_dim, - n_hidden=params.hidden_size, - n_layers=3, - fun_dim=0, - n_head=4, - mlp_ratio=2, - out_dim=1, - slice_num=32, - unified_pos=0, - params=params, - ) - - def forward( - self, - graph_cell=None, - graph_edge=None, - graph_node=None, - params=None, - is_training=True, - ): - x = graph_cell.x - z = x - output = self.nn( - z, - graph_node=graph_node, - graph_edge=graph_edge, - graph_cell=graph_cell, - params=params, - ) - return output - - def load_checkpoint( - self, optimizer=None, scheduler=None, ckpdir=None, device=None, is_training=True - ): - if ckpdir is None: - ckpdir = self.model_dir - dicts = paddle.load(path=ckpdir) - self.set_state_dict(state_dict=dicts["model"]) - keys = list(dicts.keys()) - keys.remove("model") - if optimizer is not None: - if type(optimizer) is not list: - optimizer = [optimizer] - for i, o in enumerate(optimizer): - o.set_state_dict(state_dict=dicts["optimizer{}".format(i)]) - keys.remove("optimizer{}".format(i)) - if scheduler is not None: - if type(scheduler) is not list: - scheduler = [scheduler] - for i, s in enumerate(scheduler): - s.set_state_dict(state_dict=dicts["scheduler{}".format(i)]) - keys.remove("scheduler{}".format(i)) - if not is_training: - for key in keys.copy(): - if key.find("optimizer") >= 0: - keys.remove(key) - elif key.find("scheduler") >= 0: - keys.remove(key) - print("Simulator model and optimizer/scheduler loaded checkpoint %s" % ckpdir) - - def save_checkpoint(self, path=None, optimizer=None, scheduler=None): - if path is None: - path = self.model_dir - model = self.state_dict() - to_save = {"model": model} - if type(optimizer) is not list: - optimizer = [optimizer] - for i, o in enumerate(optimizer): - to_save.update({"optimizer{}".format(i): o.state_dict()}) - if type(scheduler) is not list: - scheduler = [scheduler] - for i, s in enumerate(scheduler): - to_save.update({"scheduler{}".format(i): s.state_dict()}) - paddle.save(obj=to_save, path=path) - print("Simulator model saved at %s" % path) +import paddle + +from .FVGNAttUnet import Model + + +class FVGN(paddle.nn.Layer): + def __init__(self, params) -> None: + super().__init__() + self.params = params + space_dim = params.cell_input_size + self.nn = Model( + space_dim=space_dim, + n_hidden=params.hidden_size, + n_layers=3, + fun_dim=0, + n_head=4, + mlp_ratio=2, + out_dim=1, + slice_num=32, + unified_pos=0, + params=params, + ) + + def forward( + self, + graph_cell=None, + graph_edge=None, + graph_node=None, + params=None, + is_training=True, + ): + x = graph_cell.x + z = x + output = self.nn( + z, + graph_node=graph_node, + graph_edge=graph_edge, + graph_cell=graph_cell, + params=params, + ) + return output + + def load_checkpoint( + self, optimizer=None, scheduler=None, ckpdir=None, device=None, is_training=True + ): + if ckpdir is None: + ckpdir = self.model_dir + dicts = paddle.load(path=ckpdir) + self.set_state_dict(state_dict=dicts["model"]) + keys = list(dicts.keys()) + keys.remove("model") + if optimizer is not None: + if type(optimizer) is not list: + optimizer = [optimizer] + for i, o in enumerate(optimizer): + o.set_state_dict(state_dict=dicts["optimizer{}".format(i)]) + keys.remove("optimizer{}".format(i)) + if scheduler is not None: + if type(scheduler) is not list: + scheduler = [scheduler] + for i, s in enumerate(scheduler): + s.set_state_dict(state_dict=dicts["scheduler{}".format(i)]) + keys.remove("scheduler{}".format(i)) + if not is_training: + for key in keys.copy(): + if key.find("optimizer") >= 0: + keys.remove(key) + elif key.find("scheduler") >= 0: + keys.remove(key) + print("Simulator model and optimizer/scheduler loaded checkpoint %s" % ckpdir) + + def save_checkpoint(self, path=None, optimizer=None, scheduler=None): + if path is None: + path = self.model_dir + model = self.state_dict() + to_save = {"model": model} + if type(optimizer) is not list: + optimizer = [optimizer] + for i, o in enumerate(optimizer): + to_save.update({"optimizer{}".format(i): o.state_dict()}) + if type(scheduler) is not list: + scheduler = [scheduler] + for i, s in enumerate(scheduler): + to_save.update({"scheduler{}".format(i): s.state_dict()}) + paddle.save(obj=to_save, path=path) + print("Simulator model saved at %s" % path) diff --git a/jointContribution/IJCAI_2024/aminos/NN/UNet/BuildingBlocks.py b/jointContribution/IJCAI_2024/aminos/NN/UNet/BuildingBlocks.py index 979c261b7f..cc59092805 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/UNet/BuildingBlocks.py +++ b/jointContribution/IJCAI_2024/aminos/NN/UNet/BuildingBlocks.py @@ -1,421 +1,421 @@ -import paddle - - -class SCA3D(paddle.nn.Layer): - def __init__(self, channel, reduction=16): - super().__init__() - self.avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=1) - self.channel_excitation = paddle.nn.Sequential( - paddle.nn.Linear( - in_features=channel, out_features=int(channel // reduction) - ), - paddle.nn.ReLU(), - paddle.nn.Linear( - in_features=int(channel // reduction), out_features=channel - ), - ) - self.spatial_se = paddle.nn.Conv3D( - in_channels=channel, - out_channels=1, - kernel_size=1, - stride=1, - padding=0, - bias_attr=False, - ) - - def forward(self, x): - bahs, chs, _, _, _ = tuple(x.shape) - chn_se = self.avg_pool(x).view(bahs, chs) - chn_se = paddle.nn.functional.sigmoid( - x=self.channel_excitation(chn_se).view(bahs, chs, 1, 1, 1) - ) - chn_se = paddle.multiply(x=x, y=paddle.to_tensor(chn_se)) - spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x)) - spa_se = paddle.multiply(x=x, y=paddle.to_tensor(spa_se)) - net_out = spa_se + x + chn_se - return net_out - - -def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): - return paddle.nn.Conv3D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - padding=padding, - bias_attr=bias, - ) - - -def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): - """ - Create a list of modules with together constitute a single conv layer with non-linearity - and optional batchnorm/groupnorm. - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - order (string): order of things, e.g. - 'cr' -> conv + ReLU - 'crg' -> conv + ReLU + groupnorm - 'cl' -> conv + LeakyReLU - 'ce' -> conv + ELU - num_groups (int): number of groups for the GroupNorm - padding (int): add zero-padding to the input - Return: - list of tuple (name, module) - """ - assert "c" in order, "Conv layer MUST be present" - assert ( - order[0] not in "rle" - ), "Non-linearity cannot be the first operation in the layer" - modules = [] - for i, char in enumerate(order): - if char == "r": - modules.append(("ReLU", paddle.nn.ReLU())) - elif char == "l": - modules.append(("LeakyReLU", paddle.nn.LeakyReLU(negative_slope=0.1))) - elif char == "e": - modules.append(("ELU", paddle.nn.ELU())) - elif char == "c": - bias = not ("g" in order or "b" in order) - modules.append( - ( - "conv", - conv3d( - in_channels, out_channels, kernel_size, bias, padding=padding - ), - ) - ) - elif char == "g": - is_before_conv = i < order.index("c") - assert not is_before_conv, "GroupNorm MUST go after the Conv3d" - if out_channels < num_groups: - num_groups = out_channels - modules.append( - ( - "groupnorm", - paddle.nn.GroupNorm( - num_groups=num_groups, num_channels=out_channels - ), - ) - ) - elif char == "b": - is_before_conv = i < order.index("c") - if is_before_conv: - modules.append( - ("batchnorm", paddle.nn.BatchNorm3D(num_features=in_channels)) - ) - else: - modules.append( - ("batchnorm", paddle.nn.BatchNorm3D(num_features=out_channels)) - ) - else: - raise ValueError( - "Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" - ) - return modules - - -class SingleConv(paddle.nn.Sequential): - """ - Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order - of operations can be specified via the `order` parameter - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - kernel_size (int): size of the convolving kernel - order (string): determines the order of layers, e.g. - 'cr' -> conv + ReLU - 'crg' -> conv + ReLU + groupnorm - 'cl' -> conv + LeakyReLU - 'ce' -> conv + ELU - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, - in_channels, - out_channels, - kernel_size=3, - order="crg", - num_groups=8, - padding=1, - ): - super(SingleConv, self).__init__() - for name, module in create_conv( - in_channels, out_channels, kernel_size, order, num_groups, padding=padding - ): - self.add_sublayer(name=name, sublayer=module) - - -class DoubleConv(paddle.nn.Sequential): - """ - A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). - We use (Conv3d+ReLU+GroupNorm3d) by default. - This can be changed however by providing the 'order' argument, e.g. in order - to change to Conv3d+BatchNorm3d+ELU use order='cbe'. - Use padded convolutions to make sure that the output (H_out, W_out) is the same - as (H_in, W_in), so that you don't have to crop in the decoder path. - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - encoder (bool): if True we're in the encoder path, otherwise we're in the decoder - kernel_size (int): size of the convolving kernel - order (string): determines the order of layers, e.g. - 'cr' -> conv + ReLU - 'crg' -> conv + ReLU + groupnorm - 'cl' -> conv + LeakyReLU - 'ce' -> conv + ELU - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, - in_channels, - out_channels, - encoder, - kernel_size=3, - order="crg", - num_groups=8, - ): - super(DoubleConv, self).__init__() - if encoder: - conv1_in_channels = in_channels - conv1_out_channels = out_channels // 2 - if conv1_out_channels < in_channels: - conv1_out_channels = in_channels - conv2_in_channels, conv2_out_channels = (conv1_out_channels, out_channels) - else: - conv1_in_channels, conv1_out_channels = in_channels, out_channels - conv2_in_channels, conv2_out_channels = out_channels, out_channels - self.add_sublayer( - name="SingleConv1", - sublayer=SingleConv( - conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups - ), - ) - self.add_sublayer( - name="SingleConv2", - sublayer=SingleConv( - conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups - ), - ) - - -class ExtResNetBlock(paddle.nn.Layer): - """ - Basic UNet block consisting of a SingleConv followed by the residual block. - The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number - of output channels is compatible with the residual block that follows. - This block can be used instead of standard DoubleConv in the Encoder module. - Motivated by: https://arxiv.org/pdf/1706.00120.pdf - Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. - """ - - def __init__( - self, - in_channels, - out_channels, - kernel_size=3, - order="cge", - num_groups=8, - **kwargs - ): - super(ExtResNetBlock, self).__init__() - self.conv1 = SingleConv( - in_channels, - out_channels, - kernel_size=kernel_size, - order=order, - num_groups=num_groups, - ) - self.conv2 = SingleConv( - out_channels, - out_channels, - kernel_size=kernel_size, - order=order, - num_groups=num_groups, - ) - n_order = order - for c in "rel": - n_order = n_order.replace(c, "") - self.conv3 = SingleConv( - out_channels, - out_channels, - kernel_size=kernel_size, - order=n_order, - num_groups=num_groups, - ) - if "l" in order: - self.non_linearity = paddle.nn.LeakyReLU(negative_slope=0.1) - elif "e" in order: - self.non_linearity = paddle.nn.ELU() - else: - self.non_linearity = paddle.nn.ReLU() - - def forward(self, x): - out = self.conv1(x) - residual = out - out = self.conv2(out) - out = self.conv3(out) - out += residual - out = self.non_linearity(out) - return out - - -class Encoder(paddle.nn.Layer): - """ - A single module from the encoder path consisting of the optional max - pooling layer (one may specify the MaxPool kernel_size to be different - than the standard (2,2,2), e.g. if the volumetric data is anisotropic - (make sure to use complementary scale_factor in the decoder path) followed by - a DoubleConv module. - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - conv_kernel_size (int): size of the convolving kernel - apply_pooling (bool): if True use MaxPool3d before DoubleConv - pool_kernel_size (tuple): the size of the window to take a max over - pool_type (str): pooling layer: 'max' or 'avg' - basic_module(nn.Module): either ResNetBlock or DoubleConv - conv_layer_order (string): determines the order of layers - in `DoubleConv` module. See `DoubleConv` for more info. - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, - in_channels, - out_channels, - conv_kernel_size=3, - apply_pooling=True, - pool_kernel_size=(2, 2, 2), - pool_type="max", - basic_module=DoubleConv, - conv_layer_order="crg", - num_groups=8, - ): - super(Encoder, self).__init__() - assert pool_type in ["max", "avg"] - if apply_pooling: - if pool_type == "max": - self.pooling = paddle.nn.MaxPool3D(kernel_size=pool_kernel_size) - else: - self.pooling = paddle.nn.AvgPool3D( - kernel_size=pool_kernel_size, exclusive=False - ) - else: - self.pooling = None - self.basic_module = basic_module( - in_channels, - out_channels, - encoder=True, - kernel_size=conv_kernel_size, - order=conv_layer_order, - num_groups=num_groups, - ) - - def forward(self, x): - if self.pooling is not None: - x = self.pooling(x) - x = self.basic_module(x) - return x - - -class Decoder(paddle.nn.Layer): - """ - A single module for decoder path consisting of the upsample layer - (either learned ConvTranspose3d or interpolation) followed by a DoubleConv - module. - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - kernel_size (int): size of the convolving kernel - scale_factor (tuple): used as the multiplier for the image H/W/D in - case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation - from the corresponding encoder - basic_module(nn.Module): either ResNetBlock or DoubleConv - conv_layer_order (string): determines the order of layers - in `DoubleConv` module. See `DoubleConv` for more info. - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, - in_channels, - out_channels, - kernel_size=3, - scale_factor=(2, 2, 2), - basic_module=DoubleConv, - conv_layer_order="crg", - num_groups=8, - ): - super(Decoder, self).__init__() - if basic_module == DoubleConv: - self.upsample = None - else: - self.upsample = paddle.nn.Conv3DTranspose( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=scale_factor, - padding=1, - output_padding=1, - ) - in_channels = out_channels - self.scse = SCA3D(in_channels) - self.basic_module = basic_module( - in_channels, - out_channels, - encoder=False, - kernel_size=kernel_size, - order=conv_layer_order, - num_groups=num_groups, - ) - - def forward(self, encoder_features, x): - if self.upsample is None: - output_size = tuple(encoder_features.shape)[2:] - x = paddle.nn.functional.interpolate( - x=x, size=output_size, mode="nearest", data_format="NCDHW" - ) - x = paddle.concat(x=(encoder_features, x), axis=1) - else: - x = self.upsample(x) - x += encoder_features - x = self.scse(x) - x = self.basic_module(x) - return x - - -class FinalConv(paddle.nn.Sequential): - """ - A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution - which reduces the number of channels to 'out_channels'. - with the number of output channels 'out_channels // 2' and 'out_channels' respectively. - We use (Conv3d+ReLU+GroupNorm3d) by default. - This can be change however by providing the 'order' argument, e.g. in order - to change to Conv3d+BatchNorm3d+ReLU use order='cbr'. - Args: - in_channels (int): number of input channels - out_channels (int): number of output channels - kernel_size (int): size of the convolving kernel - order (string): determines the order of layers, e.g. - 'cr' -> conv + ReLU - 'crg' -> conv + ReLU + groupnorm - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, in_channels, out_channels, kernel_size=3, order="crg", num_groups=8 - ): - super(FinalConv, self).__init__() - self.add_sublayer( - name="SingleConv", - sublayer=SingleConv( - in_channels, in_channels, kernel_size, order, num_groups - ), - ) - final_conv = paddle.nn.Conv3D( - in_channels=in_channels, out_channels=out_channels, kernel_size=1 - ) - self.add_sublayer(name="final_conv", sublayer=final_conv) +import paddle + + +class SCA3D(paddle.nn.Layer): + def __init__(self, channel, reduction=16): + super().__init__() + self.avg_pool = paddle.nn.AdaptiveAvgPool3D(output_size=1) + self.channel_excitation = paddle.nn.Sequential( + paddle.nn.Linear( + in_features=channel, out_features=int(channel // reduction) + ), + paddle.nn.ReLU(), + paddle.nn.Linear( + in_features=int(channel // reduction), out_features=channel + ), + ) + self.spatial_se = paddle.nn.Conv3D( + in_channels=channel, + out_channels=1, + kernel_size=1, + stride=1, + padding=0, + bias_attr=False, + ) + + def forward(self, x): + bahs, chs, _, _, _ = tuple(x.shape) + chn_se = self.avg_pool(x).view(bahs, chs) + chn_se = paddle.nn.functional.sigmoid( + x=self.channel_excitation(chn_se).view(bahs, chs, 1, 1, 1) + ) + chn_se = paddle.multiply(x=x, y=paddle.to_tensor(chn_se)) + spa_se = paddle.nn.functional.sigmoid(x=self.spatial_se(x)) + spa_se = paddle.multiply(x=x, y=paddle.to_tensor(spa_se)) + net_out = spa_se + x + chn_se + return net_out + + +def conv3d(in_channels, out_channels, kernel_size, bias, padding=1): + return paddle.nn.Conv3D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + padding=padding, + bias_attr=bias, + ) + + +def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1): + """ + Create a list of modules with together constitute a single conv layer with non-linearity + and optional batchnorm/groupnorm. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + order (string): order of things, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + padding (int): add zero-padding to the input + Return: + list of tuple (name, module) + """ + assert "c" in order, "Conv layer MUST be present" + assert ( + order[0] not in "rle" + ), "Non-linearity cannot be the first operation in the layer" + modules = [] + for i, char in enumerate(order): + if char == "r": + modules.append(("ReLU", paddle.nn.ReLU())) + elif char == "l": + modules.append(("LeakyReLU", paddle.nn.LeakyReLU(negative_slope=0.1))) + elif char == "e": + modules.append(("ELU", paddle.nn.ELU())) + elif char == "c": + bias = not ("g" in order or "b" in order) + modules.append( + ( + "conv", + conv3d( + in_channels, out_channels, kernel_size, bias, padding=padding + ), + ) + ) + elif char == "g": + is_before_conv = i < order.index("c") + assert not is_before_conv, "GroupNorm MUST go after the Conv3d" + if out_channels < num_groups: + num_groups = out_channels + modules.append( + ( + "groupnorm", + paddle.nn.GroupNorm( + num_groups=num_groups, num_channels=out_channels + ), + ) + ) + elif char == "b": + is_before_conv = i < order.index("c") + if is_before_conv: + modules.append( + ("batchnorm", paddle.nn.BatchNorm3D(num_features=in_channels)) + ) + else: + modules.append( + ("batchnorm", paddle.nn.BatchNorm3D(num_features=out_channels)) + ) + else: + raise ValueError( + "Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']" + ) + return modules + + +class SingleConv(paddle.nn.Sequential): + """ + Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order + of operations can be specified via the `order` parameter + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + order="crg", + num_groups=8, + padding=1, + ): + super(SingleConv, self).__init__() + for name, module in create_conv( + in_channels, out_channels, kernel_size, order, num_groups, padding=padding + ): + self.add_sublayer(name=name, sublayer=module) + + +class DoubleConv(paddle.nn.Sequential): + """ + A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d). + We use (Conv3d+ReLU+GroupNorm3d) by default. + This can be changed however by providing the 'order' argument, e.g. in order + to change to Conv3d+BatchNorm3d+ELU use order='cbe'. + Use padded convolutions to make sure that the output (H_out, W_out) is the same + as (H_in, W_in), so that you don't have to crop in the decoder path. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + encoder (bool): if True we're in the encoder path, otherwise we're in the decoder + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + 'cl' -> conv + LeakyReLU + 'ce' -> conv + ELU + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, + in_channels, + out_channels, + encoder, + kernel_size=3, + order="crg", + num_groups=8, + ): + super(DoubleConv, self).__init__() + if encoder: + conv1_in_channels = in_channels + conv1_out_channels = out_channels // 2 + if conv1_out_channels < in_channels: + conv1_out_channels = in_channels + conv2_in_channels, conv2_out_channels = (conv1_out_channels, out_channels) + else: + conv1_in_channels, conv1_out_channels = in_channels, out_channels + conv2_in_channels, conv2_out_channels = out_channels, out_channels + self.add_sublayer( + name="SingleConv1", + sublayer=SingleConv( + conv1_in_channels, conv1_out_channels, kernel_size, order, num_groups + ), + ) + self.add_sublayer( + name="SingleConv2", + sublayer=SingleConv( + conv2_in_channels, conv2_out_channels, kernel_size, order, num_groups + ), + ) + + +class ExtResNetBlock(paddle.nn.Layer): + """ + Basic UNet block consisting of a SingleConv followed by the residual block. + The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number + of output channels is compatible with the residual block that follows. + This block can be used instead of standard DoubleConv in the Encoder module. + Motivated by: https://arxiv.org/pdf/1706.00120.pdf + Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm. + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + order="cge", + num_groups=8, + **kwargs + ): + super(ExtResNetBlock, self).__init__() + self.conv1 = SingleConv( + in_channels, + out_channels, + kernel_size=kernel_size, + order=order, + num_groups=num_groups, + ) + self.conv2 = SingleConv( + out_channels, + out_channels, + kernel_size=kernel_size, + order=order, + num_groups=num_groups, + ) + n_order = order + for c in "rel": + n_order = n_order.replace(c, "") + self.conv3 = SingleConv( + out_channels, + out_channels, + kernel_size=kernel_size, + order=n_order, + num_groups=num_groups, + ) + if "l" in order: + self.non_linearity = paddle.nn.LeakyReLU(negative_slope=0.1) + elif "e" in order: + self.non_linearity = paddle.nn.ELU() + else: + self.non_linearity = paddle.nn.ReLU() + + def forward(self, x): + out = self.conv1(x) + residual = out + out = self.conv2(out) + out = self.conv3(out) + out += residual + out = self.non_linearity(out) + return out + + +class Encoder(paddle.nn.Layer): + """ + A single module from the encoder path consisting of the optional max + pooling layer (one may specify the MaxPool kernel_size to be different + than the standard (2,2,2), e.g. if the volumetric data is anisotropic + (make sure to use complementary scale_factor in the decoder path) followed by + a DoubleConv module. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + conv_kernel_size (int): size of the convolving kernel + apply_pooling (bool): if True use MaxPool3d before DoubleConv + pool_kernel_size (tuple): the size of the window to take a max over + pool_type (str): pooling layer: 'max' or 'avg' + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, + in_channels, + out_channels, + conv_kernel_size=3, + apply_pooling=True, + pool_kernel_size=(2, 2, 2), + pool_type="max", + basic_module=DoubleConv, + conv_layer_order="crg", + num_groups=8, + ): + super(Encoder, self).__init__() + assert pool_type in ["max", "avg"] + if apply_pooling: + if pool_type == "max": + self.pooling = paddle.nn.MaxPool3D(kernel_size=pool_kernel_size) + else: + self.pooling = paddle.nn.AvgPool3D( + kernel_size=pool_kernel_size, exclusive=False + ) + else: + self.pooling = None + self.basic_module = basic_module( + in_channels, + out_channels, + encoder=True, + kernel_size=conv_kernel_size, + order=conv_layer_order, + num_groups=num_groups, + ) + + def forward(self, x): + if self.pooling is not None: + x = self.pooling(x) + x = self.basic_module(x) + return x + + +class Decoder(paddle.nn.Layer): + """ + A single module for decoder path consisting of the upsample layer + (either learned ConvTranspose3d or interpolation) followed by a DoubleConv + module. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + scale_factor (tuple): used as the multiplier for the image H/W/D in + case of nn.Upsample or as stride in case of ConvTranspose3d, must reverse the MaxPool3d operation + from the corresponding encoder + basic_module(nn.Module): either ResNetBlock or DoubleConv + conv_layer_order (string): determines the order of layers + in `DoubleConv` module. See `DoubleConv` for more info. + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size=3, + scale_factor=(2, 2, 2), + basic_module=DoubleConv, + conv_layer_order="crg", + num_groups=8, + ): + super(Decoder, self).__init__() + if basic_module == DoubleConv: + self.upsample = None + else: + self.upsample = paddle.nn.Conv3DTranspose( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=scale_factor, + padding=1, + output_padding=1, + ) + in_channels = out_channels + self.scse = SCA3D(in_channels) + self.basic_module = basic_module( + in_channels, + out_channels, + encoder=False, + kernel_size=kernel_size, + order=conv_layer_order, + num_groups=num_groups, + ) + + def forward(self, encoder_features, x): + if self.upsample is None: + output_size = tuple(encoder_features.shape)[2:] + x = paddle.nn.functional.interpolate( + x=x, size=output_size, mode="nearest", data_format="NCDHW" + ) + x = paddle.concat(x=(encoder_features, x), axis=1) + else: + x = self.upsample(x) + x += encoder_features + x = self.scse(x) + x = self.basic_module(x) + return x + + +class FinalConv(paddle.nn.Sequential): + """ + A module consisting of a convolution layer (e.g. Conv3d+ReLU+GroupNorm3d) and the final 1x1 convolution + which reduces the number of channels to 'out_channels'. + with the number of output channels 'out_channels // 2' and 'out_channels' respectively. + We use (Conv3d+ReLU+GroupNorm3d) by default. + This can be change however by providing the 'order' argument, e.g. in order + to change to Conv3d+BatchNorm3d+ReLU use order='cbr'. + Args: + in_channels (int): number of input channels + out_channels (int): number of output channels + kernel_size (int): size of the convolving kernel + order (string): determines the order of layers, e.g. + 'cr' -> conv + ReLU + 'crg' -> conv + ReLU + groupnorm + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, in_channels, out_channels, kernel_size=3, order="crg", num_groups=8 + ): + super(FinalConv, self).__init__() + self.add_sublayer( + name="SingleConv", + sublayer=SingleConv( + in_channels, in_channels, kernel_size, order, num_groups + ), + ) + final_conv = paddle.nn.Conv3D( + in_channels=in_channels, out_channels=out_channels, kernel_size=1 + ) + self.add_sublayer(name="final_conv", sublayer=final_conv) diff --git a/jointContribution/IJCAI_2024/aminos/NN/UNet/attention_unet.py b/jointContribution/IJCAI_2024/aminos/NN/UNet/attention_unet.py index 8214b5e0e0..7988c43b18 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/UNet/attention_unet.py +++ b/jointContribution/IJCAI_2024/aminos/NN/UNet/attention_unet.py @@ -1,151 +1,151 @@ -from typing import List - -import paddle - -from .BuildingBlocks import Decoder -from .BuildingBlocks import DoubleConv -from .BuildingBlocks import Encoder - - -def create_feature_maps(init_channel_number, number_of_fmaps): - return [(init_channel_number * 2**k) for k in range(number_of_fmaps)] - - -class UNet3D(paddle.nn.Layer): - """ - 3DUnet model from - `"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation" - `. - Args: - in_channels (int): number of input channels - out_channels (int): number of output segmentation masks; - Note that that the of out_channels might correspond to either - different semantic classes or to different binary segmentation mask. - It's up to the user of the class to interpret the out_channels and - use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class) - or BCEWithLogitsLoss (two-class) respectively) - f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number - of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4 - final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the - final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used - to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model. - layer_order (string): determines the order of layers - in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d. - See `SingleConv` for more info - init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 - num_groups (int): number of groups for the GroupNorm - """ - - def __init__( - self, - in_channels, - nlayers=4, - f_maps=16, - layer_order="crg", - num_groups=8, - **kwargs - ): - super(UNet3D, self).__init__() - if isinstance(f_maps, int): - f_maps = create_feature_maps(f_maps, number_of_fmaps=nlayers) - encoders = [] - for i, out_feature_num in enumerate(f_maps): - if i == 0: - encoder = Encoder( - in_channels, - out_feature_num, - apply_pooling=False, - basic_module=DoubleConv, - conv_layer_order=layer_order, - num_groups=num_groups, - ) - else: - encoder = Encoder( - f_maps[i - 1], - out_feature_num, - basic_module=DoubleConv, - conv_layer_order=layer_order, - num_groups=num_groups, - ) - encoders.append(encoder) - self.encoders = paddle.nn.LayerList(sublayers=encoders) - decoders = [] - reversed_f_maps = list(reversed(f_maps)) - for i in range(len(reversed_f_maps) - 1): - in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1] - out_feature_num = reversed_f_maps[i + 1] - decoder = Decoder( - in_feature_num, - out_feature_num, - basic_module=DoubleConv, - conv_layer_order=layer_order, - num_groups=num_groups, - ) - decoders.append(decoder) - self.decoders = paddle.nn.LayerList(sublayers=decoders) - - def forward(self, x): - encoders_features = [] - for encoder in self.encoders: - x = encoder(x) - encoders_features.insert(0, x) - encoders_features = encoders_features[1:] - for decoder, encoder_features in zip(self.decoders, encoders_features): - x = decoder(encoder_features, x) - return x - - -class UNet3DWithSamplePoints(paddle.nn.Layer): - def __init__( - self, - in_channels: int, - out_channels: int, - hidden_channels: int, - num_levels: int, - use_position_input: bool = True, - ): - super().__init__() - self.unet3d = UNet3D( - in_channels=in_channels, nlayers=num_levels, f_maps=hidden_channels - ) - self.out_ln = paddle.nn.LayerNorm(normalized_shape=hidden_channels) - - def voxel_expand(self, voxel_left): - indices = paddle.arange(start=31, end=-1, step=-1, dtype="int32") - voxel_right = voxel_left[:, :, :, indices, :] - voxel_merge = paddle.concat((voxel_left, voxel_right), axis=3) - return voxel_merge - - def forward(self, x, output_points, half=False): - x = self.unet3d.forward(x) - if half: - x = self.voxel_expand(x) - if isinstance(output_points, List): - rt_x = [] - for idx, output_point in enumerate(output_points): - cur_x = ( - paddle.nn.functional.grid_sample( - x=x[idx : idx + 1], grid=output_point, align_corners=False - ) - .squeeze() - .T - ) - rt_x.append(cur_x) - rt_x = paddle.concat(x=rt_x, axis=0) - else: - rt_x = ( - paddle.nn.functional.grid_sample( - x=x, grid=output_points, align_corners=False - ) - .squeeze() - .T - ) - return self.out_ln(rt_x) - - -if __name__ == "__main__": - net = UNet3DWithSamplePoints(1, 64, 64, 4, True) - sdf = paddle.randn(shape=[7, 1, 64, 64, 64]) - outputpoints = paddle.randn(shape=[7, 1471, 1, 1, 3]) - output = net.forward(sdf, outputpoints, half=False) - print(tuple(output.shape)) +from typing import List + +import paddle + +from .BuildingBlocks import Decoder +from .BuildingBlocks import DoubleConv +from .BuildingBlocks import Encoder + + +def create_feature_maps(init_channel_number, number_of_fmaps): + return [(init_channel_number * 2**k) for k in range(number_of_fmaps)] + + +class UNet3D(paddle.nn.Layer): + """ + 3DUnet model from + `"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation" + `. + Args: + in_channels (int): number of input channels + out_channels (int): number of output segmentation masks; + Note that that the of out_channels might correspond to either + different semantic classes or to different binary segmentation mask. + It's up to the user of the class to interpret the out_channels and + use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class) + or BCEWithLogitsLoss (two-class) respectively) + f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number + of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4 + final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the + final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used + to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model. + layer_order (string): determines the order of layers + in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d. + See `SingleConv` for more info + init_channel_number (int): number of feature maps in the first conv layer of the encoder; default: 64 + num_groups (int): number of groups for the GroupNorm + """ + + def __init__( + self, + in_channels, + nlayers=4, + f_maps=16, + layer_order="crg", + num_groups=8, + **kwargs + ): + super(UNet3D, self).__init__() + if isinstance(f_maps, int): + f_maps = create_feature_maps(f_maps, number_of_fmaps=nlayers) + encoders = [] + for i, out_feature_num in enumerate(f_maps): + if i == 0: + encoder = Encoder( + in_channels, + out_feature_num, + apply_pooling=False, + basic_module=DoubleConv, + conv_layer_order=layer_order, + num_groups=num_groups, + ) + else: + encoder = Encoder( + f_maps[i - 1], + out_feature_num, + basic_module=DoubleConv, + conv_layer_order=layer_order, + num_groups=num_groups, + ) + encoders.append(encoder) + self.encoders = paddle.nn.LayerList(sublayers=encoders) + decoders = [] + reversed_f_maps = list(reversed(f_maps)) + for i in range(len(reversed_f_maps) - 1): + in_feature_num = reversed_f_maps[i] + reversed_f_maps[i + 1] + out_feature_num = reversed_f_maps[i + 1] + decoder = Decoder( + in_feature_num, + out_feature_num, + basic_module=DoubleConv, + conv_layer_order=layer_order, + num_groups=num_groups, + ) + decoders.append(decoder) + self.decoders = paddle.nn.LayerList(sublayers=decoders) + + def forward(self, x): + encoders_features = [] + for encoder in self.encoders: + x = encoder(x) + encoders_features.insert(0, x) + encoders_features = encoders_features[1:] + for decoder, encoder_features in zip(self.decoders, encoders_features): + x = decoder(encoder_features, x) + return x + + +class UNet3DWithSamplePoints(paddle.nn.Layer): + def __init__( + self, + in_channels: int, + out_channels: int, + hidden_channels: int, + num_levels: int, + use_position_input: bool = True, + ): + super().__init__() + self.unet3d = UNet3D( + in_channels=in_channels, nlayers=num_levels, f_maps=hidden_channels + ) + self.out_ln = paddle.nn.LayerNorm(normalized_shape=hidden_channels) + + def voxel_expand(self, voxel_left): + indices = paddle.arange(start=31, end=-1, step=-1, dtype="int32") + voxel_right = voxel_left[:, :, :, indices, :] + voxel_merge = paddle.concat((voxel_left, voxel_right), axis=3) + return voxel_merge + + def forward(self, x, output_points, half=False): + x = self.unet3d.forward(x) + if half: + x = self.voxel_expand(x) + if isinstance(output_points, List): + rt_x = [] + for idx, output_point in enumerate(output_points): + cur_x = ( + paddle.nn.functional.grid_sample( + x=x[idx : idx + 1], grid=output_point, align_corners=False + ) + .squeeze() + .T + ) + rt_x.append(cur_x) + rt_x = paddle.concat(x=rt_x, axis=0) + else: + rt_x = ( + paddle.nn.functional.grid_sample( + x=x, grid=output_points, align_corners=False + ) + .squeeze() + .T + ) + return self.out_ln(rt_x) + + +if __name__ == "__main__": + net = UNet3DWithSamplePoints(1, 64, 64, 4, True) + sdf = paddle.randn(shape=[7, 1, 64, 64, 64]) + outputpoints = paddle.randn(shape=[7, 1471, 1, 1, 3]) + output = net.forward(sdf, outputpoints, half=False) + print(tuple(output.shape)) diff --git a/jointContribution/IJCAI_2024/aminos/NN/UNet/unet_parts.py b/jointContribution/IJCAI_2024/aminos/NN/UNet/unet_parts.py index ecead9dd48..0c1c3d0911 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/UNet/unet_parts.py +++ b/jointContribution/IJCAI_2024/aminos/NN/UNet/unet_parts.py @@ -1,101 +1,101 @@ -import paddle -import paddle_aux - -""" Parts of the U-Net model """ - - -class DoubleConv(paddle.nn.Layer): - """(convolution => [BN] => ReLU) * 2""" - - def __init__(self, in_channels, out_channels, mid_channels=None): - super().__init__() - if not mid_channels: - mid_channels = out_channels - self.double_conv = paddle.nn.Sequential( - paddle.nn.Conv3D( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=3, - padding=1, - bias_attr=False, - ), - paddle.nn.BatchNorm3D(num_features=mid_channels), - paddle.nn.ReLU(), - paddle.nn.Conv3D( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=3, - padding=1, - bias_attr=False, - ), - paddle.nn.BatchNorm3D(num_features=out_channels), - paddle.nn.ReLU(), - ) - - def forward(self, x): - return self.double_conv(x) - - -class Down(paddle.nn.Layer): - """Downscaling with maxpool then double conv""" - - def __init__(self, in_channels, out_channels): - super().__init__() - self.out_dim = out_channels - self.maxpool_conv = paddle.nn.Sequential( - paddle.nn.MaxPool3D(kernel_size=2), DoubleConv(in_channels, out_channels) - ) - - def forward(self, x): - return self.maxpool_conv(x) - - -class Up(paddle.nn.Layer): - """Upscaling then double conv""" - - def __init__(self, in_channels, out_channels, bilinear=True): - super().__init__() - self.out_dim = out_channels - if bilinear: - self.up = paddle.nn.Upsample( - scale_factor=2, mode="bilinear", align_corners=True - ) - self.conv = DoubleConv(in_channels, out_channels, in_channels // 2) - else: - self.up = paddle.nn.Conv3DTranspose( - in_channels=in_channels, - out_channels=in_channels // 2, - kernel_size=2, - stride=2, - ) - self.conv = DoubleConv(in_channels, out_channels) - - def forward(self, x1, x2): - x1 = self.up(x1) - diffZ = tuple(x2.shape)[2] - tuple(x1.shape)[2] - diffY = tuple(x2.shape)[3] - tuple(x1.shape)[3] - diffX = tuple(x2.shape)[4] - tuple(x1.shape)[4] - x1 = paddle_aux._FUNCTIONAL_PAD( - pad=[ - diffZ // 2, - diffZ - diffZ // 2, - diffX // 2, - diffX - diffX // 2, - diffY // 2, - diffY - diffY // 2, - ], - x=x1, - ) - x = paddle.concat(x=[x2, x1], axis=1) - return self.conv(x) - - -class OutConv(paddle.nn.Layer): - def __init__(self, in_channels, out_channels): - super(OutConv, self).__init__() - self.conv = paddle.nn.Conv3D( - in_channels=in_channels, out_channels=out_channels, kernel_size=1 - ) - - def forward(self, x): - return self.conv(x) +import paddle +import paddle_aux + +""" Parts of the U-Net model """ + + +class DoubleConv(paddle.nn.Layer): + """(convolution => [BN] => ReLU) * 2""" + + def __init__(self, in_channels, out_channels, mid_channels=None): + super().__init__() + if not mid_channels: + mid_channels = out_channels + self.double_conv = paddle.nn.Sequential( + paddle.nn.Conv3D( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=3, + padding=1, + bias_attr=False, + ), + paddle.nn.BatchNorm3D(num_features=mid_channels), + paddle.nn.ReLU(), + paddle.nn.Conv3D( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=3, + padding=1, + bias_attr=False, + ), + paddle.nn.BatchNorm3D(num_features=out_channels), + paddle.nn.ReLU(), + ) + + def forward(self, x): + return self.double_conv(x) + + +class Down(paddle.nn.Layer): + """Downscaling with maxpool then double conv""" + + def __init__(self, in_channels, out_channels): + super().__init__() + self.out_dim = out_channels + self.maxpool_conv = paddle.nn.Sequential( + paddle.nn.MaxPool3D(kernel_size=2), DoubleConv(in_channels, out_channels) + ) + + def forward(self, x): + return self.maxpool_conv(x) + + +class Up(paddle.nn.Layer): + """Upscaling then double conv""" + + def __init__(self, in_channels, out_channels, bilinear=True): + super().__init__() + self.out_dim = out_channels + if bilinear: + self.up = paddle.nn.Upsample( + scale_factor=2, mode="bilinear", align_corners=True + ) + self.conv = DoubleConv(in_channels, out_channels, in_channels // 2) + else: + self.up = paddle.nn.Conv3DTranspose( + in_channels=in_channels, + out_channels=in_channels // 2, + kernel_size=2, + stride=2, + ) + self.conv = DoubleConv(in_channels, out_channels) + + def forward(self, x1, x2): + x1 = self.up(x1) + diffZ = tuple(x2.shape)[2] - tuple(x1.shape)[2] + diffY = tuple(x2.shape)[3] - tuple(x1.shape)[3] + diffX = tuple(x2.shape)[4] - tuple(x1.shape)[4] + x1 = paddle_aux._FUNCTIONAL_PAD( + pad=[ + diffZ // 2, + diffZ - diffZ // 2, + diffX // 2, + diffX - diffX // 2, + diffY // 2, + diffY - diffY // 2, + ], + x=x1, + ) + x = paddle.concat(x=[x2, x1], axis=1) + return self.conv(x) + + +class OutConv(paddle.nn.Layer): + def __init__(self, in_channels, out_channels): + super(OutConv, self).__init__() + self.conv = paddle.nn.Conv3D( + in_channels=in_channels, out_channels=out_channels, kernel_size=1 + ) + + def forward(self, x): + return self.conv(x) diff --git a/jointContribution/IJCAI_2024/aminos/NN/__init__.py b/jointContribution/IJCAI_2024/aminos/NN/__init__.py index e7eb8c7c49..a384d4ca43 100644 --- a/jointContribution/IJCAI_2024/aminos/NN/__init__.py +++ b/jointContribution/IJCAI_2024/aminos/NN/__init__.py @@ -1,5 +1,5 @@ -import os -import sys - -cur_path = os.path.split(__file__)[0] -sys.path.append(cur_path) +import os +import sys + +cur_path = os.path.split(__file__)[0] +sys.path.append(cur_path) diff --git a/jointContribution/IJCAI_2024/aminos/dataset/Load_mesh.py b/jointContribution/IJCAI_2024/aminos/dataset/Load_mesh.py index 231ceb4700..f4c230fa3f 100644 --- a/jointContribution/IJCAI_2024/aminos/dataset/Load_mesh.py +++ b/jointContribution/IJCAI_2024/aminos/dataset/Load_mesh.py @@ -1,564 +1,564 @@ -import os -import re - -import h5py -import numpy as np -import paddle -import pgl -from utils.knn import k_hop_subgraph - - -def trans_edge_index(edge_index): - assert isinstance(edge_index, paddle.Tensor) - edge_index_np = edge_index.numpy() - edges = list(zip(edge_index_np[0], edge_index_np[1])) - return edges - - -class DOF_Dataset(paddle.io.Dataset): - def __init__( - self, params=None, is_training=True, split="train", device=None, is_norm=False - ): - self.params = params - self.is_training = is_training - self.device = device - self.epoch = 0 - self.pool = h5py.File(self.params.dataset_dir + f"/DOF_{split}.h5", "r") - self.split = split - self.is_norm = is_norm - self.key_list = list(self.pool.keys()) - self.key_list = np.sort([int(key) for key in self.key_list]) - self.key_list = [str(key) for key in self.key_list] - self.mean_std = paddle.to_tensor( - data=np.loadtxt(self.params.dataset_dir + "dof_mean_std.txt") - ).to("float32") - - def __getitem__(self, index): - dof = paddle.to_tensor(data=self.pool[self.key_list[index]][:]).to("float32") - if self.is_norm: - dof = dof.transpose(perm=[1, 2, 0]) - dof -= self.mean_std[0] - dof /= self.mean_std[1] - dof = dof.transpose(perm=[1, 2, 0]) - return dof - - def len(self): - return len(self.key_list) - - -class CarDataset(paddle.io.Dataset): - def __init__(self, path, mesh_indices): - super().__init__() - self.mesh_indices = mesh_indices - self.dataset_dir = path - self.file_handle = h5py.File(self.dataset_dir, "r") - - def __getitem__(self, index): - file_idx = str(int(self.mesh_indices[index])) - handle = self.file_handle[file_idx] - handle_dict = dict(handle) - for k, v in handle_dict.items(): - handle_dict[k] = v[:] - return handle_dict, file_idx - - def __len__(self): - return len(self.mesh_indices) - - -class CarDataset4UNet(CarDataset): - def __init__(self, path, mesh_indices, gt_exist=True): - super().__init__(path, mesh_indices) - self.current_idx = None - self.gt_exist = gt_exist - - def __getitem__(self, index): - data, file_idx = super().__getitem__(index) - self.current_idx = file_idx - rdata = {} - rdata["node|pos"] = paddle.to_tensor(data=data["node|pos"]).to("float32") - if self.gt_exist: - rdata["node|pressure"] = paddle.to_tensor(data=data["node|pressure"]).to( - "float32" - ) - rdata["voxel|sdf"] = ( - paddle.to_tensor(data=data["voxel|sdf"]) - .reshape(1, *tuple(data["voxel|grid"].shape)[:-1]) - .to("float32") - ) - rdata["node|unit_norm_v"] = paddle.to_tensor(data=data["node|unit_norm_v"]).to( - "float32" - ) - return rdata - - def get_cur_file_idx(self): - return self.current_idx - - -class CarDatasetGraph(CarDataset4UNet): - def __init__(self, path, mesh_indices, gt_exist=True): - super().__init__(path, mesh_indices, gt_exist) - - def __getitem__(self, index): - raw_data, _ = super().__getitem__(index) - data = super().__getitem__(index) - num_nodes = data["node|unit_norm_v"].shape[0] - edges = trans_edge_index(raw_data["face|face_node"]) - pgl.graph.Graph( - num_nodes=num_nodes, - edges=edges, - pos=data["node|pos"], - pressure=data["node|pressure"], - ) - - -def GetCarDatasetInfoList(params, path, split: list): - dataset_dir = os.path.join(path, "train.h5") - pressure_min_std = (np.loadtxt(os.path.join(path, "train_pressure_min_std.txt")),) - bounds = (np.loadtxt(os.path.join(path, "watertight_global_bounds.txt")),) - all_mesh_indices = np.loadtxt(os.path.join(path, "watertight_meshes.txt")).reshape( - -1 - ) - splited_mesh_indices = [ - all_mesh_indices[start:end] for start, end in zip(split[:-1], split[1:]) - ] - return dataset_dir, *pressure_min_std, *bounds, splited_mesh_indices - - -class CFDdatasetmap(paddle.io.Dataset): - def __init__( - self, params, path, split="train", dataset_type="h5", is_training=False - ): - super().__init__() - self.path = path - self.split = split - self.dataset_dir = path - self.params = params - self.is_training = is_training - if dataset_type == "h5": - self.file_handle = h5py.File(self.dataset_dir + f"/{split}.h5", "r") - else: - raise ValueError("invalid data format") - - def __getitem__(self, index): - trajectory_handle = self.file_handle[str(index)] - trajectory = {} - for key in trajectory_handle.keys(): - trajectory[key] = paddle.to_tensor(data=trajectory_handle[key][:]) - return trajectory - - def __len__(self): - return len(self.file_handle) - - -def sort_key_list(in_list: list): - a_list = [] - b_list = [] - for k in in_list: - if k.startswith("A"): - a_list.append(k) - elif k.startswith("B"): - b_list.append(k) - sorted_a_list = sorted(a_list, key=lambda s: int(re.search("_(\\d+)", s).group(1))) - sorted_b_list = sorted(b_list, key=lambda s: int(re.search("_(\\d+)", s).group(1))) - rt_list = sorted_a_list + sorted_b_list - return rt_list - - -class Data_Pool: - def __init__(self, params=None, is_training=True, split="train", device=None): - self.params = params - self.is_training = is_training - self.device = device - self.epoch = 0 - self.load_mesh_to_cpu(split=split, dataset_dir=params.dataset_dir) - - def load_mesh_to_cpu(self, split="train", dataset_dir=None): - self.valid_pool = [] - if dataset_dir is not None: - self.pool = h5py.File(dataset_dir + f"/{split}.h5", "r") - else: - self.pool = h5py.File(self.params.dataset_dir + f"/{split}.h5", "r") - self.key_list = list(self.pool.keys()) - self.key_list = sort_key_list(self.key_list) - return self.params.dataset_dir - - @staticmethod - def datapreprocessing(graph_cell, is_training=False): - def randbool(*size, device="cuda"): - """Returns 50% channce of True of False""" - return paddle.randint(low=2, high=size) == paddle.randint(low=2, high=size) - - graph_cell.ball_edge_index = None - cell_attr = paddle.concat(x=(graph_cell.x, graph_cell.pos), axis=-1) - senders, receivers = graph_cell.edge_index - if is_training: - random_mask = randbool( - 1, tuple(senders.shape)[0], device=senders.place - ).repeat(2, 1) - random_direction_edge = paddle.where( - condition=random_mask, - x=paddle.stack(x=(senders, receivers), axis=0), - y=paddle.stack(x=(receivers, senders), axis=0), - ) - else: - random_direction_edge = paddle.stack(x=(senders, receivers), axis=0) - releative_node_attr = ( - cell_attr[random_direction_edge[0]] - cell_attr[random_direction_edge[1]] - ) - graph_cell.edge_index = random_direction_edge - graph_cell.edge_attr = releative_node_attr - return graph_cell - - -class CustomGraphData(pgl.graph.Graph): - def __init__(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - self.keys = [ - "norm_y", - "pos", - "voxel", - "origin_id", - "press_std", - "edge_attr", - "press_mean", - "edge_index", - "batch", - "graph_index", - "y", - "x", - "query", - "ptr", - "ao", - ] - edges = trans_edge_index(self.edge_index) - super().__init__(num_nodes=self.x.shape[0], edges=edges, **kwargs) - - def __inc__(self, key, value, *args, **kwargs): - offset_rules = { - "edge_index": self.num_nodes, - "face": self.num_nodes, - "cells_node": self.num_nodes, - "face_node": self.num_nodes, - "cells_face": self.num_nodes, - "neighbour_cell": self.num_nodes, - "face_node_x": self.num_nodes, - "pos": 0, - "A_node_to_node": 0, - "A_node_to_node_x": 0, - "B_node_to_node": 0, - "B_node_to_node_x": 0, - "cell_area": 0, - "node_type": 0, - "graph_index": 0, - "pde_theta": 0, - "neural_network_output_mask": 0, - "uvp_dim": 0, - "dt_graph": 0, - "x": 0, - "y": 0, - "m_ids": 0, - "m_gs": 0, - "case_global_index": 0, - } - return offset_rules.get(key, super().__inc__(key, value, *args, **kwargs)) - - def __cat_dim__(self, key, value, *args, **kwargs): - cat_dim_rules = { - "x": 0, - "pos": 0, - "y": 0, - "norm_y": 0, - "query": 0, - "edge_index": 1, - "voxel": 0, - "graph_index": 0, - } - return cat_dim_rules.get(key, super().__cat_dim__(key, value, *args, **kwargs)) - - -class GraphCellDataset(paddle.io.Dataset): - def __init__( - self, - base_dataset, - len_ds=None, - indices=None, - params=None, - subsampling=False, - sample_ratio=0.2, - ): - super().__init__() - self.base_dataset = base_dataset - self._len = len_ds - self.idx_indices = None - if indices is not None: - self._len = len(indices) - self.idx_indices = indices - self.params = params - self.subsampling = subsampling - self.sample_ratio = sample_ratio - self.k_hop = self.params.sample_khop - - @property - def pool(self): - return self.base_dataset.pool - - @property - def key_list(self): - return self.base_dataset.key_list - - def len(self): - return self._len - - def __len__(self): - return self._len - - def load_A_data(self, idx): - """""" - minibatch_data = self.pool[self.key_list[idx]] - mesh_pos = paddle.to_tensor( - data=minibatch_data["node|pos"][:], dtype=paddle.get_default_dtype() - ) - unit_norm_v = paddle.to_tensor( - data=minibatch_data["node|unit_norm_v"][:], dtype=paddle.get_default_dtype() - ) - face_node = paddle.to_tensor( - data=minibatch_data["face|face_node"][:], dtype=paddle.int64 - ) - ao = paddle.to_tensor( - data=minibatch_data["node|ao"][:], dtype=paddle.get_default_dtype() - ) - voxel = paddle.to_tensor( - data=minibatch_data["voxel|sdf"][:], dtype=paddle.get_default_dtype() - ).reshape(1, 1, *tuple(minibatch_data["voxel|grid"][:].shape)[:-1]) - voxel = (voxel - minibatch_data["voxel_mean_std"][0]) / minibatch_data[ - "voxel_mean_std" - ][1] - bounds = minibatch_data["bounds"] - mid = (bounds[0] + bounds[1]) / 2 - scale = (bounds[1] - bounds[0]) / 2 - canonical_query = (mesh_pos - mid) / scale - canonical_query = canonical_query.astype("float32") - y = paddle.to_tensor( - data=minibatch_data["node|pressure"][:], dtype=paddle.get_default_dtype() - ) - norm_y = (y - minibatch_data["pressure_mean_std"][0]) / minibatch_data[ - "pressure_mean_std" - ][1] - graph_node = CustomGraphData( - x=unit_norm_v, - edge_index=face_node, - pos=mesh_pos, - y=y, - norm_y=norm_y, - ao=ao, - voxel=voxel, - query=canonical_query, - graph_index=paddle.to_tensor(data=[idx], dtype="int64"), - origin_id=paddle.to_tensor( - data=[ord(char) for char in self.key_list[idx]], dtype="int64" - ), - press_mean=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][0]), - press_std=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][1]), - ) - return graph_node - - def load_B_data(self, idx): - """""" - minibatch_data = self.pool[self.key_list[idx]] - mesh_pos = paddle.to_tensor( - data=minibatch_data["cell|centroid"][:], dtype=paddle.get_default_dtype() - ) - normals = paddle.to_tensor( - data=minibatch_data["cell|unit_norm_v"][:], dtype=paddle.get_default_dtype() - ) - edge_index = paddle.to_tensor( - data=minibatch_data["face|neighbour_cell"][:], dtype=paddle.int64 - ) - y = paddle.to_tensor( - data=minibatch_data["cell|pressure"][:], dtype=paddle.get_default_dtype() - ) - norm_y = (y - minibatch_data["pressure_mean_std"][0]) / minibatch_data[ - "pressure_mean_std" - ][1] - voxel = paddle.to_tensor( - data=minibatch_data["voxel|sdf"][:], dtype=paddle.get_default_dtype() - ).reshape(1, 1, *tuple(minibatch_data["voxel|grid"][:].shape)[:-1]) - voxel = (voxel - minibatch_data["voxel_mean_std"][0]) / minibatch_data[ - "voxel_mean_std" - ][1] - bounds = minibatch_data["bounds"] - mid = (bounds[0] + bounds[1]) / 2 - scale = (bounds[1] - bounds[0]) / 2 - canonical_query = (mesh_pos - mid) / scale - canonical_query = canonical_query.astype("float32") - ao = paddle.zeros_like(x=y) - if self.subsampling: - sampled_nodes = paddle.randint( - low=0, high=tuple(normals.shape)[0], shape=[self.params.num_samples] - ) - subgraph_nodes, subgraph_edge_index, _, _ = k_hop_subgraph( - sampled_nodes, self.k_hop, edge_index, relabel_nodes=True - ) - normals = normals[subgraph_nodes] - mesh_pos = mesh_pos[subgraph_nodes] - y = y[subgraph_nodes] - norm_y = norm_y[subgraph_nodes] - ao = ao[subgraph_nodes] - canonical_query = canonical_query[subgraph_nodes] - edge_index = subgraph_edge_index - graph_cell = CustomGraphData( - x=normals, - edge_index=edge_index, - pos=mesh_pos, - y=y, - norm_y=norm_y, - ao=ao, - query=canonical_query, - voxel=voxel, - graph_index=paddle.to_tensor(data=[idx], dtype="int64"), - origin_id=paddle.to_tensor( - data=[ord(char) for char in self.key_list[idx]], dtype="int64" - ), - press_mean=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][0]), - press_std=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][1]), - ) - return graph_cell - - def get(self, idx): - """""" - if self.idx_indices is not None: - idx = self.idx_indices[idx] - if self.key_list[idx].startswith("A"): - graph_cell = self.load_A_data(idx) - elif self.key_list[idx].startswith("B"): - graph_cell = self.load_B_data(idx) - else: - minibatch_data = self.key_list[self.key_list[idx]] - if tuple(minibatch_data["cell|centroid"].shape)[0] < 10000: - graph_cell = self.load_A_data(idx) - else: - graph_cell = self.load_B_data(idx) - return graph_cell - - def __getitem__(self, idx): - return self.get(idx) - - -class CustomGraphDataLoader(paddle.io.DataLoader): - def __init__( - self, dataset, batch_size=1, shuffle=False, collate_fn=None, num_workers=0 - ): - super().__init__( - dataset, - batch_size=batch_size, - shuffle=shuffle, - collate_fn=collate_fn, - num_workers=num_workers, - ) - self.dataset = dataset - self.batch_size = batch_size - self.index = 0 - - def __iter__(self): - return self - - def __next__(self): - if self.index >= len(self.dataset): - raise StopIteration - - batch_data = [ - self.dataset[i] - for i in range( - self.index, min(self.index + self.batch_size, len(self.dataset)) - ) - ] - self.index += self.batch_size - - return batch_data - - -class DatasetFactory: - def __init__(self, params=None, device=None, split="test"): - self.params = params - self.train_dataset = Data_Pool( - params=params, is_training=True, split=split, device=device - ) - self.test_dataset = Data_Pool( - params=params, is_training=False, split=split, device=device - ) - - def create_trainset( - self, - batch_size=100, - num_workers=4, - pin_memory=True, - persistent_workers=True, - indices=None, - subsampling=True, - ratio=0.2, - ): - """training set""" - graph_cell_dataset = GraphCellDataset( - base_dataset=self.train_dataset, - len_ds=len(self.train_dataset.pool), - indices=indices, - params=self.params, - subsampling=subsampling, - sample_ratio=ratio, - ) - loader = paddle.io.DataLoader( - dataset=graph_cell_dataset, - batch_size=batch_size, - num_workers=num_workers, - shuffle=True, - ) - """ training set """ - return self.train_dataset, loader - - def create_testset( - self, - batch_size=1, - num_workers=0, - pin_memory=False, - persistent_workers=False, - valid_num=10, - subsampling=True, - indices=None, - ): - """test set""" - if indices is not None: - valid_num = len(indices) - graph_cell_dataset = GraphCellDataset( - base_dataset=self.test_dataset, - len_ds=valid_num, - params=self.params, - subsampling=subsampling, - indices=indices, - ) - loader = CustomGraphDataLoader( - dataset=graph_cell_dataset, - batch_size=batch_size, - num_workers=num_workers, - shuffle=True, - ) - """ test set """ - return self.test_dataset, loader +import os +import re + +import h5py +import numpy as np +import paddle +import pgl +from utils.knn import k_hop_subgraph + + +def trans_edge_index(edge_index): + assert isinstance(edge_index, paddle.Tensor) + edge_index_np = edge_index.numpy() + edges = list(zip(edge_index_np[0], edge_index_np[1])) + return edges + + +class DOF_Dataset(paddle.io.Dataset): + def __init__( + self, params=None, is_training=True, split="train", device=None, is_norm=False + ): + self.params = params + self.is_training = is_training + self.device = device + self.epoch = 0 + self.pool = h5py.File(self.params.dataset_dir + f"/DOF_{split}.h5", "r") + self.split = split + self.is_norm = is_norm + self.key_list = list(self.pool.keys()) + self.key_list = np.sort([int(key) for key in self.key_list]) + self.key_list = [str(key) for key in self.key_list] + self.mean_std = paddle.to_tensor( + data=np.loadtxt(self.params.dataset_dir + "dof_mean_std.txt") + ).to("float32") + + def __getitem__(self, index): + dof = paddle.to_tensor(data=self.pool[self.key_list[index]][:]).to("float32") + if self.is_norm: + dof = dof.transpose(perm=[1, 2, 0]) + dof -= self.mean_std[0] + dof /= self.mean_std[1] + dof = dof.transpose(perm=[1, 2, 0]) + return dof + + def len(self): + return len(self.key_list) + + +class CarDataset(paddle.io.Dataset): + def __init__(self, path, mesh_indices): + super().__init__() + self.mesh_indices = mesh_indices + self.dataset_dir = path + self.file_handle = h5py.File(self.dataset_dir, "r") + + def __getitem__(self, index): + file_idx = str(int(self.mesh_indices[index])) + handle = self.file_handle[file_idx] + handle_dict = dict(handle) + for k, v in handle_dict.items(): + handle_dict[k] = v[:] + return handle_dict, file_idx + + def __len__(self): + return len(self.mesh_indices) + + +class CarDataset4UNet(CarDataset): + def __init__(self, path, mesh_indices, gt_exist=True): + super().__init__(path, mesh_indices) + self.current_idx = None + self.gt_exist = gt_exist + + def __getitem__(self, index): + data, file_idx = super().__getitem__(index) + self.current_idx = file_idx + rdata = {} + rdata["node|pos"] = paddle.to_tensor(data=data["node|pos"]).to("float32") + if self.gt_exist: + rdata["node|pressure"] = paddle.to_tensor(data=data["node|pressure"]).to( + "float32" + ) + rdata["voxel|sdf"] = ( + paddle.to_tensor(data=data["voxel|sdf"]) + .reshape(1, *tuple(data["voxel|grid"].shape)[:-1]) + .to("float32") + ) + rdata["node|unit_norm_v"] = paddle.to_tensor(data=data["node|unit_norm_v"]).to( + "float32" + ) + return rdata + + def get_cur_file_idx(self): + return self.current_idx + + +class CarDatasetGraph(CarDataset4UNet): + def __init__(self, path, mesh_indices, gt_exist=True): + super().__init__(path, mesh_indices, gt_exist) + + def __getitem__(self, index): + raw_data, _ = super().__getitem__(index) + data = super().__getitem__(index) + num_nodes = data["node|unit_norm_v"].shape[0] + edges = trans_edge_index(raw_data["face|face_node"]) + pgl.graph.Graph( + num_nodes=num_nodes, + edges=edges, + pos=data["node|pos"], + pressure=data["node|pressure"], + ) + + +def GetCarDatasetInfoList(params, path, split: list): + dataset_dir = os.path.join(path, "train.h5") + pressure_min_std = (np.loadtxt(os.path.join(path, "train_pressure_min_std.txt")),) + bounds = (np.loadtxt(os.path.join(path, "watertight_global_bounds.txt")),) + all_mesh_indices = np.loadtxt(os.path.join(path, "watertight_meshes.txt")).reshape( + -1 + ) + splited_mesh_indices = [ + all_mesh_indices[start:end] for start, end in zip(split[:-1], split[1:]) + ] + return dataset_dir, *pressure_min_std, *bounds, splited_mesh_indices + + +class CFDdatasetmap(paddle.io.Dataset): + def __init__( + self, params, path, split="train", dataset_type="h5", is_training=False + ): + super().__init__() + self.path = path + self.split = split + self.dataset_dir = path + self.params = params + self.is_training = is_training + if dataset_type == "h5": + self.file_handle = h5py.File(self.dataset_dir + f"/{split}.h5", "r") + else: + raise ValueError("invalid data format") + + def __getitem__(self, index): + trajectory_handle = self.file_handle[str(index)] + trajectory = {} + for key in trajectory_handle.keys(): + trajectory[key] = paddle.to_tensor(data=trajectory_handle[key][:]) + return trajectory + + def __len__(self): + return len(self.file_handle) + + +def sort_key_list(in_list: list): + a_list = [] + b_list = [] + for k in in_list: + if k.startswith("A"): + a_list.append(k) + elif k.startswith("B"): + b_list.append(k) + sorted_a_list = sorted(a_list, key=lambda s: int(re.search("_(\\d+)", s).group(1))) + sorted_b_list = sorted(b_list, key=lambda s: int(re.search("_(\\d+)", s).group(1))) + rt_list = sorted_a_list + sorted_b_list + return rt_list + + +class Data_Pool: + def __init__(self, params=None, is_training=True, split="train", device=None): + self.params = params + self.is_training = is_training + self.device = device + self.epoch = 0 + self.load_mesh_to_cpu(split=split, dataset_dir=params.dataset_dir) + + def load_mesh_to_cpu(self, split="train", dataset_dir=None): + self.valid_pool = [] + if dataset_dir is not None: + self.pool = h5py.File(dataset_dir + f"/{split}.h5", "r") + else: + self.pool = h5py.File(self.params.dataset_dir + f"/{split}.h5", "r") + self.key_list = list(self.pool.keys()) + self.key_list = sort_key_list(self.key_list) + return self.params.dataset_dir + + @staticmethod + def datapreprocessing(graph_cell, is_training=False): + def randbool(*size, device="cuda"): + """Returns 50% channce of True of False""" + return paddle.randint(low=2, high=size) == paddle.randint(low=2, high=size) + + graph_cell.ball_edge_index = None + cell_attr = paddle.concat(x=(graph_cell.x, graph_cell.pos), axis=-1) + senders, receivers = graph_cell.edge_index + if is_training: + random_mask = randbool( + 1, tuple(senders.shape)[0], device=senders.place + ).repeat(2, 1) + random_direction_edge = paddle.where( + condition=random_mask, + x=paddle.stack(x=(senders, receivers), axis=0), + y=paddle.stack(x=(receivers, senders), axis=0), + ) + else: + random_direction_edge = paddle.stack(x=(senders, receivers), axis=0) + releative_node_attr = ( + cell_attr[random_direction_edge[0]] - cell_attr[random_direction_edge[1]] + ) + graph_cell.edge_index = random_direction_edge + graph_cell.edge_attr = releative_node_attr + return graph_cell + + +class CustomGraphData(pgl.graph.Graph): + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + self.keys = [ + "norm_y", + "pos", + "voxel", + "origin_id", + "press_std", + "edge_attr", + "press_mean", + "edge_index", + "batch", + "graph_index", + "y", + "x", + "query", + "ptr", + "ao", + ] + edges = trans_edge_index(self.edge_index) + super().__init__(num_nodes=self.x.shape[0], edges=edges, **kwargs) + + def __inc__(self, key, value, *args, **kwargs): + offset_rules = { + "edge_index": self.num_nodes, + "face": self.num_nodes, + "cells_node": self.num_nodes, + "face_node": self.num_nodes, + "cells_face": self.num_nodes, + "neighbour_cell": self.num_nodes, + "face_node_x": self.num_nodes, + "pos": 0, + "A_node_to_node": 0, + "A_node_to_node_x": 0, + "B_node_to_node": 0, + "B_node_to_node_x": 0, + "cell_area": 0, + "node_type": 0, + "graph_index": 0, + "pde_theta": 0, + "neural_network_output_mask": 0, + "uvp_dim": 0, + "dt_graph": 0, + "x": 0, + "y": 0, + "m_ids": 0, + "m_gs": 0, + "case_global_index": 0, + } + return offset_rules.get(key, super().__inc__(key, value, *args, **kwargs)) + + def __cat_dim__(self, key, value, *args, **kwargs): + cat_dim_rules = { + "x": 0, + "pos": 0, + "y": 0, + "norm_y": 0, + "query": 0, + "edge_index": 1, + "voxel": 0, + "graph_index": 0, + } + return cat_dim_rules.get(key, super().__cat_dim__(key, value, *args, **kwargs)) + + +class GraphCellDataset(paddle.io.Dataset): + def __init__( + self, + base_dataset, + len_ds=None, + indices=None, + params=None, + subsampling=False, + sample_ratio=0.2, + ): + super().__init__() + self.base_dataset = base_dataset + self._len = len_ds + self.idx_indices = None + if indices is not None: + self._len = len(indices) + self.idx_indices = indices + self.params = params + self.subsampling = subsampling + self.sample_ratio = sample_ratio + self.k_hop = self.params.sample_khop + + @property + def pool(self): + return self.base_dataset.pool + + @property + def key_list(self): + return self.base_dataset.key_list + + def len(self): + return self._len + + def __len__(self): + return self._len + + def load_A_data(self, idx): + """""" + minibatch_data = self.pool[self.key_list[idx]] + mesh_pos = paddle.to_tensor( + data=minibatch_data["node|pos"][:], dtype=paddle.get_default_dtype() + ) + unit_norm_v = paddle.to_tensor( + data=minibatch_data["node|unit_norm_v"][:], dtype=paddle.get_default_dtype() + ) + face_node = paddle.to_tensor( + data=minibatch_data["face|face_node"][:], dtype=paddle.int64 + ) + ao = paddle.to_tensor( + data=minibatch_data["node|ao"][:], dtype=paddle.get_default_dtype() + ) + voxel = paddle.to_tensor( + data=minibatch_data["voxel|sdf"][:], dtype=paddle.get_default_dtype() + ).reshape(1, 1, *tuple(minibatch_data["voxel|grid"][:].shape)[:-1]) + voxel = (voxel - minibatch_data["voxel_mean_std"][0]) / minibatch_data[ + "voxel_mean_std" + ][1] + bounds = minibatch_data["bounds"] + mid = (bounds[0] + bounds[1]) / 2 + scale = (bounds[1] - bounds[0]) / 2 + canonical_query = (mesh_pos - mid) / scale + canonical_query = canonical_query.astype("float32") + y = paddle.to_tensor( + data=minibatch_data["node|pressure"][:], dtype=paddle.get_default_dtype() + ) + norm_y = (y - minibatch_data["pressure_mean_std"][0]) / minibatch_data[ + "pressure_mean_std" + ][1] + graph_node = CustomGraphData( + x=unit_norm_v, + edge_index=face_node, + pos=mesh_pos, + y=y, + norm_y=norm_y, + ao=ao, + voxel=voxel, + query=canonical_query, + graph_index=paddle.to_tensor(data=[idx], dtype="int64"), + origin_id=paddle.to_tensor( + data=[ord(char) for char in self.key_list[idx]], dtype="int64" + ), + press_mean=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][0]), + press_std=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][1]), + ) + return graph_node + + def load_B_data(self, idx): + """""" + minibatch_data = self.pool[self.key_list[idx]] + mesh_pos = paddle.to_tensor( + data=minibatch_data["cell|centroid"][:], dtype=paddle.get_default_dtype() + ) + normals = paddle.to_tensor( + data=minibatch_data["cell|unit_norm_v"][:], dtype=paddle.get_default_dtype() + ) + edge_index = paddle.to_tensor( + data=minibatch_data["face|neighbour_cell"][:], dtype=paddle.int64 + ) + y = paddle.to_tensor( + data=minibatch_data["cell|pressure"][:], dtype=paddle.get_default_dtype() + ) + norm_y = (y - minibatch_data["pressure_mean_std"][0]) / minibatch_data[ + "pressure_mean_std" + ][1] + voxel = paddle.to_tensor( + data=minibatch_data["voxel|sdf"][:], dtype=paddle.get_default_dtype() + ).reshape(1, 1, *tuple(minibatch_data["voxel|grid"][:].shape)[:-1]) + voxel = (voxel - minibatch_data["voxel_mean_std"][0]) / minibatch_data[ + "voxel_mean_std" + ][1] + bounds = minibatch_data["bounds"] + mid = (bounds[0] + bounds[1]) / 2 + scale = (bounds[1] - bounds[0]) / 2 + canonical_query = (mesh_pos - mid) / scale + canonical_query = canonical_query.astype("float32") + ao = paddle.zeros_like(x=y) + if self.subsampling: + sampled_nodes = paddle.randint( + low=0, high=tuple(normals.shape)[0], shape=[self.params.num_samples] + ) + subgraph_nodes, subgraph_edge_index, _, _ = k_hop_subgraph( + sampled_nodes, self.k_hop, edge_index, relabel_nodes=True + ) + normals = normals[subgraph_nodes] + mesh_pos = mesh_pos[subgraph_nodes] + y = y[subgraph_nodes] + norm_y = norm_y[subgraph_nodes] + ao = ao[subgraph_nodes] + canonical_query = canonical_query[subgraph_nodes] + edge_index = subgraph_edge_index + graph_cell = CustomGraphData( + x=normals, + edge_index=edge_index, + pos=mesh_pos, + y=y, + norm_y=norm_y, + ao=ao, + query=canonical_query, + voxel=voxel, + graph_index=paddle.to_tensor(data=[idx], dtype="int64"), + origin_id=paddle.to_tensor( + data=[ord(char) for char in self.key_list[idx]], dtype="int64" + ), + press_mean=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][0]), + press_std=paddle.to_tensor(data=minibatch_data["pressure_mean_std"][1]), + ) + return graph_cell + + def get(self, idx): + """""" + if self.idx_indices is not None: + idx = self.idx_indices[idx] + if self.key_list[idx].startswith("A"): + graph_cell = self.load_A_data(idx) + elif self.key_list[idx].startswith("B"): + graph_cell = self.load_B_data(idx) + else: + minibatch_data = self.key_list[self.key_list[idx]] + if tuple(minibatch_data["cell|centroid"].shape)[0] < 10000: + graph_cell = self.load_A_data(idx) + else: + graph_cell = self.load_B_data(idx) + return graph_cell + + def __getitem__(self, idx): + return self.get(idx) + + +class CustomGraphDataLoader(paddle.io.DataLoader): + def __init__( + self, dataset, batch_size=1, shuffle=False, collate_fn=None, num_workers=0 + ): + super().__init__( + dataset, + batch_size=batch_size, + shuffle=shuffle, + collate_fn=collate_fn, + num_workers=num_workers, + ) + self.dataset = dataset + self.batch_size = batch_size + self.index = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.index >= len(self.dataset): + raise StopIteration + + batch_data = [ + self.dataset[i] + for i in range( + self.index, min(self.index + self.batch_size, len(self.dataset)) + ) + ] + self.index += self.batch_size + + return batch_data + + +class DatasetFactory: + def __init__(self, params=None, device=None, split="test"): + self.params = params + self.train_dataset = Data_Pool( + params=params, is_training=True, split=split, device=device + ) + self.test_dataset = Data_Pool( + params=params, is_training=False, split=split, device=device + ) + + def create_trainset( + self, + batch_size=100, + num_workers=4, + pin_memory=True, + persistent_workers=True, + indices=None, + subsampling=True, + ratio=0.2, + ): + """training set""" + graph_cell_dataset = GraphCellDataset( + base_dataset=self.train_dataset, + len_ds=len(self.train_dataset.pool), + indices=indices, + params=self.params, + subsampling=subsampling, + sample_ratio=ratio, + ) + loader = paddle.io.DataLoader( + dataset=graph_cell_dataset, + batch_size=batch_size, + num_workers=num_workers, + shuffle=True, + ) + """ training set """ + return self.train_dataset, loader + + def create_testset( + self, + batch_size=1, + num_workers=0, + pin_memory=False, + persistent_workers=False, + valid_num=10, + subsampling=True, + indices=None, + ): + """test set""" + if indices is not None: + valid_num = len(indices) + graph_cell_dataset = GraphCellDataset( + base_dataset=self.test_dataset, + len_ds=valid_num, + params=self.params, + subsampling=subsampling, + indices=indices, + ) + loader = CustomGraphDataLoader( + dataset=graph_cell_dataset, + batch_size=batch_size, + num_workers=num_workers, + shuffle=True, + ) + """ test set """ + return self.test_dataset, loader diff --git a/jointContribution/IJCAI_2024/aminos/download_dataset.ipynb b/jointContribution/IJCAI_2024/aminos/download_dataset.ipynb index 48aa6357f8..43bc9d5f99 100644 --- a/jointContribution/IJCAI_2024/aminos/download_dataset.ipynb +++ b/jointContribution/IJCAI_2024/aminos/download_dataset.ipynb @@ -1,139 +1,139 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "kBRw5QHhBkax" - }, - "source": [ - "# 数据集导入(预制链接)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "\n", - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=8ce890e0-0019-4e1e-ac63-14718948f612&at=APZUnTW-e7sn7C7k5UVU2BaxZPGT%3A1721020888524' -O dataset_1.zip\n", - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1izP72pHtoXpQvOV8WFCnh_LekzLunyG5&export=download&authuser=0&confirm=t&uuid=8e453e3d-84ac-4f51-9cbf-45d47cbdcc65&at=APZUnTVfJYZBQwnHawB72aq5MPvv%3A1721020973099' -O dataset_2.zip\n", - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1djT0tlmLBi15LYZG0dxci1RSjPI94sM8&export=download&authuser=0&confirm=t&uuid=4687dd5d-a001-47f2-bacd-e72d5c7361e4&at=APZUnTWWEM2OCtpaZNuS4UQjMzxc%3A1721021154071' -O dataset_3.zip\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gaD7ugivEL2R" - }, - "source": [ - "## 官方版本数据导入" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Unzip dataset file\n", - "import os\n", - "import shutil\n", - "\n", - "# 指定新文件夹的名称\n", - "new_folder_A = 'unziped/dataset_1'\n", - "new_folder_B = 'unziped/dataset_2'\n", - "new_folder_C = 'unziped/dataset_3'\n", - "\n", - "# 在当前目录下创建新文件夹\n", - "if not os.path.exists(new_folder_A):\n", - " os.makedirs(new_folder_A)\n", - "if not os.path.exists(new_folder_B):\n", - " os.makedirs(new_folder_B)\n", - "if not os.path.exists(new_folder_B):\n", - " os.makedirs(new_folder_B)\n", - "\n", - "# 使用!unzip命令将文件解压到新文件夹中\n", - "!unzip dataset_1.zip -d {new_folder_A}\n", - "!unzip dataset_2.zip -d {new_folder_B}\n", - "!unzip dataset_3.zip -d {new_folder_C}\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "z0Sek0wtEs5n" - }, - "source": [ - "## 百度Baseline版本数据导入" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kY81z-fCgPfK" - }, - "source": [ - "## 自定义导入(在下面代码块导入并解压您的数据集)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ITzT8s2wgZG0" - }, - "source": [ - "下载我们自己的数据集" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1lUV7mJaoWPfRks4DMhJgDCxTZwMr8LQI&export=download&authuser=0&confirm=t&uuid=fbd3c896-23e8-42e0-b09d-410cf3c91487&at=APZUnTXNoF-geyh6R_qbWipUdgeP%3A1721020120231' -O trackCtrain.zip\n", - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1imPyFjFqAj5WTT_K-lHe7_6lFJ4azJ1L&export=download&authuser=0&confirm=t&uuid=ca9d62d3-394a-4798-b2f3-03455a381cf0&at=APZUnTVrYJw4okYQJSrdDXLPqdSX%3A1721020169107' -O trackCtest.zip\n", - "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1y23xCHepK-NamTm3OFCAy41RnYolCjzq&export=download&authuser=0' -O trackCsrc.zip" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!mkdir ./Datasets\n", - "!unzip trackCtrain.zip -d ./Datasets\n", - "!unzip trackCtest.zip -d ./Datasets\n", - "!unzip trackCsrc.zip" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "machine_shape": "hm", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "kBRw5QHhBkax" + }, + "source": [ + "# 数据集导入(预制链接)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=8ce890e0-0019-4e1e-ac63-14718948f612&at=APZUnTW-e7sn7C7k5UVU2BaxZPGT%3A1721020888524' -O dataset_1.zip\n", + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1izP72pHtoXpQvOV8WFCnh_LekzLunyG5&export=download&authuser=0&confirm=t&uuid=8e453e3d-84ac-4f51-9cbf-45d47cbdcc65&at=APZUnTVfJYZBQwnHawB72aq5MPvv%3A1721020973099' -O dataset_2.zip\n", + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1djT0tlmLBi15LYZG0dxci1RSjPI94sM8&export=download&authuser=0&confirm=t&uuid=4687dd5d-a001-47f2-bacd-e72d5c7361e4&at=APZUnTWWEM2OCtpaZNuS4UQjMzxc%3A1721021154071' -O dataset_3.zip\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gaD7ugivEL2R" + }, + "source": [ + "## 官方版本数据导入" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Unzip dataset file\n", + "import os\n", + "import shutil\n", + "\n", + "# 指定新文件夹的名称\n", + "new_folder_A = 'unziped/dataset_1'\n", + "new_folder_B = 'unziped/dataset_2'\n", + "new_folder_C = 'unziped/dataset_3'\n", + "\n", + "# 在当前目录下创建新文件夹\n", + "if not os.path.exists(new_folder_A):\n", + " os.makedirs(new_folder_A)\n", + "if not os.path.exists(new_folder_B):\n", + " os.makedirs(new_folder_B)\n", + "if not os.path.exists(new_folder_B):\n", + " os.makedirs(new_folder_B)\n", + "\n", + "# 使用!unzip命令将文件解压到新文件夹中\n", + "!unzip dataset_1.zip -d {new_folder_A}\n", + "!unzip dataset_2.zip -d {new_folder_B}\n", + "!unzip dataset_3.zip -d {new_folder_C}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z0Sek0wtEs5n" + }, + "source": [ + "## 百度Baseline版本数据导入" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kY81z-fCgPfK" + }, + "source": [ + "## 自定义导入(在下面代码块导入并解压您的数据集)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ITzT8s2wgZG0" + }, + "source": [ + "下载我们自己的数据集" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1lUV7mJaoWPfRks4DMhJgDCxTZwMr8LQI&export=download&authuser=0&confirm=t&uuid=fbd3c896-23e8-42e0-b09d-410cf3c91487&at=APZUnTXNoF-geyh6R_qbWipUdgeP%3A1721020120231' -O trackCtrain.zip\n", + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1imPyFjFqAj5WTT_K-lHe7_6lFJ4azJ1L&export=download&authuser=0&confirm=t&uuid=ca9d62d3-394a-4798-b2f3-03455a381cf0&at=APZUnTVrYJw4okYQJSrdDXLPqdSX%3A1721020169107' -O trackCtest.zip\n", + "!wget --no-check-certificate 'https://drive.usercontent.google.com/download?id=1y23xCHepK-NamTm3OFCAy41RnYolCjzq&export=download&authuser=0' -O trackCsrc.zip" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!mkdir ./Datasets\n", + "!unzip trackCtrain.zip -d ./Datasets\n", + "!unzip trackCtest.zip -d ./Datasets\n", + "!unzip trackCsrc.zip" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "machine_shape": "hm", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/jointContribution/IJCAI_2024/aminos/infer.py b/jointContribution/IJCAI_2024/aminos/infer.py index 07738b67da..b12a1c62d8 100644 --- a/jointContribution/IJCAI_2024/aminos/infer.py +++ b/jointContribution/IJCAI_2024/aminos/infer.py @@ -1,72 +1,72 @@ -import time - -import paddle -from dataset.Load_mesh import DatasetFactory -from NN.Transolver.SageTrans_importer_B import FVGN -from utils import get_param -from utils.get_param import get_hyperparam -from utils.Logger import Logger - -params, git_info = get_param.params() -if params.load_index is None: - params.load_index = "90" -device = str("cuda" if paddle.device.cuda.device_count() >= 1 else "cpu").replace( - "cuda", "gpu" -) -print(f"Using device:{device}") -logger = Logger( - get_hyperparam(params), - use_csv=True, - use_tensorboard=False, - params=params, - git_info=git_info, - copy_code=False, - saving_path="./Logger", - loading_path="./Logger", -) -datasets_factory = DatasetFactory(params=params, device=device) -test_indices = list(range(0, 100)) -test_dataset, test_loader = datasets_factory.create_testset( - batch_size=1, - num_workers=0, - pin_memory=False, - persistent_workers=False, - valid_num=len(test_indices), - subsampling=False, - indices=test_indices, -) - - -model = FVGN(params) -fluid_model = model.to(device) -fluid_model.eval() -params.load_date_time, params.load_index = logger.load_state( - model=fluid_model, - optimizer=None, - scheduler=None, - datetime=logger.datetime, - index=params.load_index, - device=device, -) -params.load_index = params.load_index -print(f"loaded: {params.load_date_time}, {params.load_index}") -params.load_index = 0 if params.load_index is None else params.load_index -start = time.time() -with paddle.no_grad(): - epoc_val_loss = 0 - for batch_index, graph_cell in enumerate(test_loader): - graph_cell = graph_cell[0] - graph_cell = test_dataset.datapreprocessing(graph_cell, is_training=False) - pred_node_valid = fluid_model( - graph_cell=graph_cell, is_training=False, params=params - ) - reversed_node_press = ( - pred_node_valid - 1e-08 - ) * graph_cell.press_std + graph_cell.press_mean - logger.save_test_results( - value=reversed_node_press.cpu().detach().squeeze().numpy(), - num_id="".join( - [chr(ascii_code) for ascii_code in graph_cell.origin_id.cpu().tolist()] - )[2:], - ) -print(f"Generating answer completed completed in {time.time() - start:.2f} seconds") +import time + +import paddle +from dataset.Load_mesh import DatasetFactory +from NN.Transolver.SageTrans_importer_B import FVGN +from utils import get_param +from utils.get_param import get_hyperparam +from utils.Logger import Logger + +params, git_info = get_param.params() +if params.load_index is None: + params.load_index = "90" +device = str("cuda" if paddle.device.cuda.device_count() >= 1 else "cpu").replace( + "cuda", "gpu" +) +print(f"Using device:{device}") +logger = Logger( + get_hyperparam(params), + use_csv=True, + use_tensorboard=False, + params=params, + git_info=git_info, + copy_code=False, + saving_path="./Logger", + loading_path="./Logger", +) +datasets_factory = DatasetFactory(params=params, device=device) +test_indices = list(range(0, 100)) +test_dataset, test_loader = datasets_factory.create_testset( + batch_size=1, + num_workers=0, + pin_memory=False, + persistent_workers=False, + valid_num=len(test_indices), + subsampling=False, + indices=test_indices, +) + + +model = FVGN(params) +fluid_model = model.to(device) +fluid_model.eval() +params.load_date_time, params.load_index = logger.load_state( + model=fluid_model, + optimizer=None, + scheduler=None, + datetime=logger.datetime, + index=params.load_index, + device=device, +) +params.load_index = params.load_index +print(f"loaded: {params.load_date_time}, {params.load_index}") +params.load_index = 0 if params.load_index is None else params.load_index +start = time.time() +with paddle.no_grad(): + epoc_val_loss = 0 + for batch_index, graph_cell in enumerate(test_loader): + graph_cell = graph_cell[0] + graph_cell = test_dataset.datapreprocessing(graph_cell, is_training=False) + pred_node_valid = fluid_model( + graph_cell=graph_cell, is_training=False, params=params + ) + reversed_node_press = ( + pred_node_valid - 1e-08 + ) * graph_cell.press_std + graph_cell.press_mean + logger.save_test_results( + value=reversed_node_press.cpu().detach().squeeze().numpy(), + num_id="".join( + [chr(ascii_code) for ascii_code in graph_cell.origin_id.cpu().tolist()] + )[2:], + ) +print(f"Generating answer completed completed in {time.time() - start:.2f} seconds") diff --git a/jointContribution/IJCAI_2024/aminos/requirements.txt b/jointContribution/IJCAI_2024/aminos/requirements.txt index cecd9a0db6..ba6f3beccb 100644 --- a/jointContribution/IJCAI_2024/aminos/requirements.txt +++ b/jointContribution/IJCAI_2024/aminos/requirements.txt @@ -1,10 +1,10 @@ -h5py -natsort -numpy < 2 -paddlepaddle_gpu <= 2.5.2 -pgl -pyvista -scipy -trimesh -visualdl -vtk +h5py +natsort +numpy < 2 +paddlepaddle_gpu <= 2.5.2 +pgl +pyvista +scipy +trimesh +visualdl +vtk diff --git a/jointContribution/IJCAI_2024/aminos/utils/DS_utils.py b/jointContribution/IJCAI_2024/aminos/utils/DS_utils.py index 4938774035..cf6b477b3d 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/DS_utils.py +++ b/jointContribution/IJCAI_2024/aminos/utils/DS_utils.py @@ -1,302 +1,302 @@ -import os - -import h5py -import numpy as np -import paddle -import pyvista as pv -import trimesh -import trimesh.sample as sample -import vtk - - -def load_mesh_ply_vtk(file_path): - mesh = pv.read(file_path) - points = mesh.points - cells_vtk = list(mesh.cell) - cells = [] - for cell_vtk in cells_vtk: - cell = [] - for id in range(cell_vtk.GetNumberOfPoints()): - cell.append(cell_vtk.GetPointId(id)) - cells.append(cell) - points = np.array(points) - cells = np.array(cells) - return points, cells - - -def read_vtk(filename): - reader = vtk.vtkUnstructuredGridReader() - reader.SetFileName(filename) - reader.Update() - return reader.GetOutput() - - -def convert_quads_to_tris(unstructured_grid): - geometry_filter = vtk.vtkGeometryFilter() - geometry_filter.SetInputData(unstructured_grid) - geometry_filter.Update() - poly_data = geometry_filter.GetOutput() - triangle_filter = vtk.vtkTriangleFilter() - triangle_filter.SetInputData(poly_data) - triangle_filter.Update() - return triangle_filter.GetOutput() - - -def compute_and_add_normals(poly_data): - normal_generator = vtk.vtkPolyDataNormals() - normal_generator.SetInputData(poly_data) - normal_generator.ComputePointNormalsOn() - normal_generator.ComputeCellNormalsOff() - normal_generator.Update() - normals = normal_generator.GetOutput().GetPointData().GetNormals() - return normals - - -def get_points(poly_data): - points = poly_data.GetPoints() - num_points = points.GetNumberOfPoints() - points_array = np.zeros((num_points, 3)) - for i in range(num_points): - points_array[i, :] = points.GetPoint(i) - return points_array - - -def get_pressure_data(poly_data): - pressure_array = poly_data.GetPointData().GetArray("point_scalars") - num_points = poly_data.GetNumberOfPoints() - if pressure_array is None: - raise ValueError("Pressure data not found in the input VTK file.") - pressure = np.zeros((num_points, 1)) - for i in range(num_points): - pressure[i, 0] = pressure_array.GetValue(i) - return pressure - - -def extract_triangle_indices(poly_data): - poly_data.BuildLinks() - num_cells = poly_data.GetNumberOfCells() - triangle_indices = [] - for cell_id in range(num_cells): - cell = poly_data.GetCell(cell_id) - if cell.GetCellType() == vtk.VTK_TRIANGLE: - point_ids = cell.GetPointIds() - indices = [point_ids.GetId(i) for i in range(3)] - triangle_indices.append(indices) - return np.array(triangle_indices) - - -def write_to_vtk(data: dict, write_file_path): - grid = vtk.vtkUnstructuredGrid() - points = data["node|pos"] - points_vtk = vtk.vtkPoints() - [points_vtk.InsertNextPoint(point) for point in points] - grid.SetPoints(points_vtk) - point_data = grid.GetPointData() - for key in data.keys(): - if not key.startswith("node"): - continue - if key == "node|pos": - continue - array_data = data[key] - vtk_data_array = vtk.vtkFloatArray() - k = ( - 1 - if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 - else len(array_data[0]) - ) - vtk_data_array.SetNumberOfComponents(k) - if k == 1: - [vtk_data_array.InsertNextTuple([value]) for value in array_data] - else: - [vtk_data_array.InsertNextTuple(value) for value in array_data] - vtk_data_array.SetName(key) - point_data.AddArray(vtk_data_array) - cells = data["cells_node"].reshape(-1, 3) - cell_array = vtk.vtkCellArray() - for cell in cells: - triangle = vtk.vtkTriangle() - for i, id in enumerate(cell): - triangle.GetPointIds().SetId(i, id) - cell_array.InsertNextCell(triangle) - grid.SetCells(vtk.vtkTriangle().GetCellType(), cell_array) - cell_data = grid.GetCellData() - for key in data.keys(): - if not key.startswith("cell|"): - continue - if key == "cell|cells_node": - continue - array_data = data[key] - vtk_data_array = vtk.vtkFloatArray() - k = ( - 1 - if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 - else len(array_data[0]) - ) - vtk_data_array.SetNumberOfComponents(k) - if k == 1: - [vtk_data_array.InsertNextTuple([value]) for value in array_data] - else: - [vtk_data_array.InsertNextTuple(value) for value in array_data] - vtk_data_array.SetName(key) - cell_data.AddArray(vtk_data_array) - writer = vtk.vtkXMLUnstructuredGridWriter() - writer.SetFileName(write_file_path) - writer.SetInputData(grid) - writer.Write() - print(f"vtu file saved:{write_file_path}") - - -def write_point_cloud_to_vtk(data: dict, write_file_path): - grid = vtk.vtkUnstructuredGrid() - points = data["node|pos"] - points_vtk = vtk.vtkPoints() - [points_vtk.InsertNextPoint(point) for point in points] - grid.SetPoints(points_vtk) - point_data = grid.GetPointData() - for key in data.keys(): - if not key.startswith("node"): - continue - if key == "node|pos": - continue - array_data = data[key] - vtk_data_array = vtk.vtkFloatArray() - k = ( - 1 - if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 - else len(array_data[0]) - ) - vtk_data_array.SetNumberOfComponents(k) - if k == 1: - [vtk_data_array.InsertNextTuple([value]) for value in array_data] - else: - [vtk_data_array.InsertNextTuple(value) for value in array_data] - vtk_data_array.SetName(key) - point_data.AddArray(vtk_data_array) - writer = vtk.vtkXMLUnstructuredGridWriter() - writer.SetFileName(write_file_path) - writer.SetInputData(grid) - writer.Write() - print(f"vtu file saved:[{write_file_path}]") - - -def compute_sdf_query_points(points, cells, query_points) -> np.ndarray: - mesh = trimesh.Trimesh(vertices=points, faces=cells) - sds = mesh.nearest.signed_distance(query_points) - return sds - - -def compute_sdf_grid(points, cells, bounds, resolution: list, eq_res=False): - res = resolution - x, y, z = [ - np.linspace(bounds[0][i], bounds[1][i], res_i) for i, res_i in enumerate(res) - ] - xx, yy, zz = np.meshgrid(x, y, z) - grids = np.stack([xx, yy, zz], axis=-1) - query_points = np.vstack([xx.flatten(), yy.flatten(), zz.flatten()]).T - return grids, compute_sdf_query_points(points, cells, query_points) - - -def normalize_points(points, bounds): - """ - Normalize points to a cube defined by [-1, 1] in each dimension. - - Parameters: - points (numpy.ndarray): An array of shape (N, 3) representing the points. - bounds (numpy.ndarray): An array of shape (2, 3) representing the min and max bounds for x, y, z. - - Returns: - numpy.ndarray: The normalized points. - """ - min_bounds = bounds[0] - max_bounds = bounds[1] - center = (min_bounds + max_bounds) / 2.0 - half_range = (max_bounds - min_bounds) / 2.0 - normalized_points = (points - center) / half_range - return normalized_points - - -def compute_mean_std(data): - mean = 0.0 - std = 0.0 - n_samples = 0 - for x in data: - x = x.reshape(-1, 1) - n_samples += x.shape[0] - mean += x.sum(axis=0) - mean /= n_samples - for x in data: - x = x.reshape(-1, 1) - std += ((x - mean) ** 2).sum(axis=0) - std = paddle.sqrt(x=std / n_samples) - return mean.to("float32"), std.to("float32") - - -def compute_mean_std_3dvector(data): - normals = np.concatenate(data, axis=0) - mean_vector = np.mean(normals, axis=0) - variance_vector = np.var(normals, axis=0) - return paddle.to_tensor(data=mean_vector).to("float32"), paddle.to_tensor( - data=variance_vector - ).to("float32") - - -def dict2Device(data: dict, device): - for key, v in data.items(): - data[key] = v.to("float32").to(device) - return data - - -def compute_sdf_for_h5_file(h5_file_path): - with h5py.File(h5_file_path, "r+") as h5file: - for key in h5file.keys(): - dataset = h5file[key] - pos = dataset["node|pos"][:] - cells_node = dataset["cells_node"][:].reshape(-1, 3) - bounds = np.array([1, 1, 1]) - bounds = np.loadtxt( - os.path.join( - os.path.dirname(h5_file_path), "watertight_global_bounds.txt" - ) - ) - if "voxel|sdf" in dataset.keys(): - del dataset["voxel|sdf"] - if "voxel|grid" in dataset.keys(): - del dataset["voxel|grid"] - grid, sdf = compute_sdf_grid(pos, cells_node, bounds, [64, 64, 64]) - dataset.create_dataset("voxel|sdf", data=sdf) - dataset.create_dataset("voxel|grid", data=grid) - print(f"process {key} done") - - -def compute_ao(ply_file, n_samples=64): - model = trimesh.load(ply_file, force="mesh") - assert isinstance(model, trimesh.Trimesh) - NDIRS = n_samples - RELSIZE = 0.05 - sphere_pts, _ = sample.sample_surface_even(trimesh.primitives.Sphere(), count=NDIRS) - normal_dir_similarities = model.vertex_normals @ sphere_pts.T - assert tuple(normal_dir_similarities.shape)[0] == len(model.vertex_normals) - assert tuple(normal_dir_similarities.shape)[1] == len(sphere_pts) - normal_dir_similarities[normal_dir_similarities <= 0] = 0 - normal_dir_similarities[normal_dir_similarities > 0] = 1 - vert_idxs, dir_idxs = np.where(normal_dir_similarities) - del normal_dir_similarities - normals = model.vertex_normals[vert_idxs] - origins = model.vertices[vert_idxs] + normals * model.scale * 0.0005 - directions = sphere_pts[dir_idxs] - assert len(origins) == len(directions) - hit_pts, idxs_rays, _ = model.ray.intersects_location( - ray_origins=origins, ray_directions=directions - ) - succ_origs = origins[idxs_rays] - distances = np.linalg.norm(succ_origs - hit_pts, axis=1) - idxs_rays = idxs_rays[distances < RELSIZE * model.scale] - idxs_orig = vert_idxs[idxs_rays] - uidxs, uidxscounts = np.unique(idxs_orig, return_counts=True) - assert len(uidxs) == len(uidxscounts) - counts_verts = np.zeros(len(model.vertices)) - counts_verts[uidxs] = uidxscounts - counts_verts = counts_verts / np.max(counts_verts) * 255 - counts_verts = 255 - counts_verts.astype(int).reshape(-1, 1) - AO = counts_verts / np.full_like(counts_verts, 255.0) - return AO +import os + +import h5py +import numpy as np +import paddle +import pyvista as pv +import trimesh +import trimesh.sample as sample +import vtk + + +def load_mesh_ply_vtk(file_path): + mesh = pv.read(file_path) + points = mesh.points + cells_vtk = list(mesh.cell) + cells = [] + for cell_vtk in cells_vtk: + cell = [] + for id in range(cell_vtk.GetNumberOfPoints()): + cell.append(cell_vtk.GetPointId(id)) + cells.append(cell) + points = np.array(points) + cells = np.array(cells) + return points, cells + + +def read_vtk(filename): + reader = vtk.vtkUnstructuredGridReader() + reader.SetFileName(filename) + reader.Update() + return reader.GetOutput() + + +def convert_quads_to_tris(unstructured_grid): + geometry_filter = vtk.vtkGeometryFilter() + geometry_filter.SetInputData(unstructured_grid) + geometry_filter.Update() + poly_data = geometry_filter.GetOutput() + triangle_filter = vtk.vtkTriangleFilter() + triangle_filter.SetInputData(poly_data) + triangle_filter.Update() + return triangle_filter.GetOutput() + + +def compute_and_add_normals(poly_data): + normal_generator = vtk.vtkPolyDataNormals() + normal_generator.SetInputData(poly_data) + normal_generator.ComputePointNormalsOn() + normal_generator.ComputeCellNormalsOff() + normal_generator.Update() + normals = normal_generator.GetOutput().GetPointData().GetNormals() + return normals + + +def get_points(poly_data): + points = poly_data.GetPoints() + num_points = points.GetNumberOfPoints() + points_array = np.zeros((num_points, 3)) + for i in range(num_points): + points_array[i, :] = points.GetPoint(i) + return points_array + + +def get_pressure_data(poly_data): + pressure_array = poly_data.GetPointData().GetArray("point_scalars") + num_points = poly_data.GetNumberOfPoints() + if pressure_array is None: + raise ValueError("Pressure data not found in the input VTK file.") + pressure = np.zeros((num_points, 1)) + for i in range(num_points): + pressure[i, 0] = pressure_array.GetValue(i) + return pressure + + +def extract_triangle_indices(poly_data): + poly_data.BuildLinks() + num_cells = poly_data.GetNumberOfCells() + triangle_indices = [] + for cell_id in range(num_cells): + cell = poly_data.GetCell(cell_id) + if cell.GetCellType() == vtk.VTK_TRIANGLE: + point_ids = cell.GetPointIds() + indices = [point_ids.GetId(i) for i in range(3)] + triangle_indices.append(indices) + return np.array(triangle_indices) + + +def write_to_vtk(data: dict, write_file_path): + grid = vtk.vtkUnstructuredGrid() + points = data["node|pos"] + points_vtk = vtk.vtkPoints() + [points_vtk.InsertNextPoint(point) for point in points] + grid.SetPoints(points_vtk) + point_data = grid.GetPointData() + for key in data.keys(): + if not key.startswith("node"): + continue + if key == "node|pos": + continue + array_data = data[key] + vtk_data_array = vtk.vtkFloatArray() + k = ( + 1 + if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 + else len(array_data[0]) + ) + vtk_data_array.SetNumberOfComponents(k) + if k == 1: + [vtk_data_array.InsertNextTuple([value]) for value in array_data] + else: + [vtk_data_array.InsertNextTuple(value) for value in array_data] + vtk_data_array.SetName(key) + point_data.AddArray(vtk_data_array) + cells = data["cells_node"].reshape(-1, 3) + cell_array = vtk.vtkCellArray() + for cell in cells: + triangle = vtk.vtkTriangle() + for i, id in enumerate(cell): + triangle.GetPointIds().SetId(i, id) + cell_array.InsertNextCell(triangle) + grid.SetCells(vtk.vtkTriangle().GetCellType(), cell_array) + cell_data = grid.GetCellData() + for key in data.keys(): + if not key.startswith("cell|"): + continue + if key == "cell|cells_node": + continue + array_data = data[key] + vtk_data_array = vtk.vtkFloatArray() + k = ( + 1 + if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 + else len(array_data[0]) + ) + vtk_data_array.SetNumberOfComponents(k) + if k == 1: + [vtk_data_array.InsertNextTuple([value]) for value in array_data] + else: + [vtk_data_array.InsertNextTuple(value) for value in array_data] + vtk_data_array.SetName(key) + cell_data.AddArray(vtk_data_array) + writer = vtk.vtkXMLUnstructuredGridWriter() + writer.SetFileName(write_file_path) + writer.SetInputData(grid) + writer.Write() + print(f"vtu file saved:{write_file_path}") + + +def write_point_cloud_to_vtk(data: dict, write_file_path): + grid = vtk.vtkUnstructuredGrid() + points = data["node|pos"] + points_vtk = vtk.vtkPoints() + [points_vtk.InsertNextPoint(point) for point in points] + grid.SetPoints(points_vtk) + point_data = grid.GetPointData() + for key in data.keys(): + if not key.startswith("node"): + continue + if key == "node|pos": + continue + array_data = data[key] + vtk_data_array = vtk.vtkFloatArray() + k = ( + 1 + if type(array_data[0]) is np.float64 or type(array_data[0]) is np.float32 + else len(array_data[0]) + ) + vtk_data_array.SetNumberOfComponents(k) + if k == 1: + [vtk_data_array.InsertNextTuple([value]) for value in array_data] + else: + [vtk_data_array.InsertNextTuple(value) for value in array_data] + vtk_data_array.SetName(key) + point_data.AddArray(vtk_data_array) + writer = vtk.vtkXMLUnstructuredGridWriter() + writer.SetFileName(write_file_path) + writer.SetInputData(grid) + writer.Write() + print(f"vtu file saved:[{write_file_path}]") + + +def compute_sdf_query_points(points, cells, query_points) -> np.ndarray: + mesh = trimesh.Trimesh(vertices=points, faces=cells) + sds = mesh.nearest.signed_distance(query_points) + return sds + + +def compute_sdf_grid(points, cells, bounds, resolution: list, eq_res=False): + res = resolution + x, y, z = [ + np.linspace(bounds[0][i], bounds[1][i], res_i) for i, res_i in enumerate(res) + ] + xx, yy, zz = np.meshgrid(x, y, z) + grids = np.stack([xx, yy, zz], axis=-1) + query_points = np.vstack([xx.flatten(), yy.flatten(), zz.flatten()]).T + return grids, compute_sdf_query_points(points, cells, query_points) + + +def normalize_points(points, bounds): + """ + Normalize points to a cube defined by [-1, 1] in each dimension. + + Parameters: + points (numpy.ndarray): An array of shape (N, 3) representing the points. + bounds (numpy.ndarray): An array of shape (2, 3) representing the min and max bounds for x, y, z. + + Returns: + numpy.ndarray: The normalized points. + """ + min_bounds = bounds[0] + max_bounds = bounds[1] + center = (min_bounds + max_bounds) / 2.0 + half_range = (max_bounds - min_bounds) / 2.0 + normalized_points = (points - center) / half_range + return normalized_points + + +def compute_mean_std(data): + mean = 0.0 + std = 0.0 + n_samples = 0 + for x in data: + x = x.reshape(-1, 1) + n_samples += x.shape[0] + mean += x.sum(axis=0) + mean /= n_samples + for x in data: + x = x.reshape(-1, 1) + std += ((x - mean) ** 2).sum(axis=0) + std = paddle.sqrt(x=std / n_samples) + return mean.to("float32"), std.to("float32") + + +def compute_mean_std_3dvector(data): + normals = np.concatenate(data, axis=0) + mean_vector = np.mean(normals, axis=0) + variance_vector = np.var(normals, axis=0) + return paddle.to_tensor(data=mean_vector).to("float32"), paddle.to_tensor( + data=variance_vector + ).to("float32") + + +def dict2Device(data: dict, device): + for key, v in data.items(): + data[key] = v.to("float32").to(device) + return data + + +def compute_sdf_for_h5_file(h5_file_path): + with h5py.File(h5_file_path, "r+") as h5file: + for key in h5file.keys(): + dataset = h5file[key] + pos = dataset["node|pos"][:] + cells_node = dataset["cells_node"][:].reshape(-1, 3) + bounds = np.array([1, 1, 1]) + bounds = np.loadtxt( + os.path.join( + os.path.dirname(h5_file_path), "watertight_global_bounds.txt" + ) + ) + if "voxel|sdf" in dataset.keys(): + del dataset["voxel|sdf"] + if "voxel|grid" in dataset.keys(): + del dataset["voxel|grid"] + grid, sdf = compute_sdf_grid(pos, cells_node, bounds, [64, 64, 64]) + dataset.create_dataset("voxel|sdf", data=sdf) + dataset.create_dataset("voxel|grid", data=grid) + print(f"process {key} done") + + +def compute_ao(ply_file, n_samples=64): + model = trimesh.load(ply_file, force="mesh") + assert isinstance(model, trimesh.Trimesh) + NDIRS = n_samples + RELSIZE = 0.05 + sphere_pts, _ = sample.sample_surface_even(trimesh.primitives.Sphere(), count=NDIRS) + normal_dir_similarities = model.vertex_normals @ sphere_pts.T + assert tuple(normal_dir_similarities.shape)[0] == len(model.vertex_normals) + assert tuple(normal_dir_similarities.shape)[1] == len(sphere_pts) + normal_dir_similarities[normal_dir_similarities <= 0] = 0 + normal_dir_similarities[normal_dir_similarities > 0] = 1 + vert_idxs, dir_idxs = np.where(normal_dir_similarities) + del normal_dir_similarities + normals = model.vertex_normals[vert_idxs] + origins = model.vertices[vert_idxs] + normals * model.scale * 0.0005 + directions = sphere_pts[dir_idxs] + assert len(origins) == len(directions) + hit_pts, idxs_rays, _ = model.ray.intersects_location( + ray_origins=origins, ray_directions=directions + ) + succ_origs = origins[idxs_rays] + distances = np.linalg.norm(succ_origs - hit_pts, axis=1) + idxs_rays = idxs_rays[distances < RELSIZE * model.scale] + idxs_orig = vert_idxs[idxs_rays] + uidxs, uidxscounts = np.unique(idxs_orig, return_counts=True) + assert len(uidxs) == len(uidxscounts) + counts_verts = np.zeros(len(model.vertices)) + counts_verts[uidxs] = uidxscounts + counts_verts = counts_verts / np.max(counts_verts) * 255 + counts_verts = 255 - counts_verts.astype(int).reshape(-1, 1) + AO = counts_verts / np.full_like(counts_verts, 255.0) + return AO diff --git a/jointContribution/IJCAI_2024/aminos/utils/Logger.py b/jointContribution/IJCAI_2024/aminos/utils/Logger.py index 9ac4f3a82c..a1b4d63fd3 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/Logger.py +++ b/jointContribution/IJCAI_2024/aminos/utils/Logger.py @@ -1,315 +1,315 @@ -import datetime as dt -import json -import os -import pickle -import shutil -import time - -import numpy as np -import paddle -import visualdl -from natsort import natsorted -from utils import DS_utils - - -class Logger: - def __init__( - self, - name, - datetime=None, - use_csv=False, - use_tensorboard=True, - params=None, - git_info=None, - saving_path=None, - copy_code=True, - loading_path=None, - ): - """ - Logger logs metrics to CSV files / tensorboard - :name: logging name (e.g. model name / dataset name / ...) - :datetime: date and time of logging start (useful in case of multiple runs). - Default: current date and time is picked - :use_csv: log output to csv files (needed for plotting) - :use_tensorboard: log output to tensorboard - """ - self.name = name - self.params = params - self.log_item = {} - if datetime: - self.datetime = datetime - else: - self.datetime = dt.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") - if saving_path is not None: - self.saving_path = saving_path - else: - self.saving_path = os.getcwd() + f"/Logger/{name}/{self.datetime}" - if loading_path is not None: - self.loading_path = loading_path - source_valid_file_path = os.path.split(os.path.split(__file__)[0])[0] - target_valid_file_path = f"{self.saving_path}/source" - if copy_code: - os.makedirs(f"{self.saving_path}/source", exist_ok=True) - shutil.copytree( - source_valid_file_path, - target_valid_file_path, - ignore=self.ignore_files_and_folders, - dirs_exist_ok=True, - ) - self.target_valid_file_path = target_valid_file_path + "/validate.py" - self.use_tensorboard = use_tensorboard - self.use_csv = use_csv - if use_csv: - self.csv_file_path = f"{self.saving_path}/Loss_monitor.dat" - headers = ["epoch", "Epoch_loss", "Epoch_eval_loss"] - header_line = "Variables=" + " ".join(f'"{header}"' for header in headers) - with open(self.csv_file_path, "w") as file: - file.write(header_line + "\n") - if use_tensorboard: - directory = self.saving_path + "/tensorboard" - os.makedirs(directory, exist_ok=True) - self.writer = visualdl.LogWriter(directory) - self.git_info = git_info - - def ignore_files_and_folders(self, dir_name, names): - ignored = set() - files_to_ignore = {} - folders_to_ignore = {"rollout", "Logger", "Datasets"} - for name in names: - path = os.path.join(dir_name, name) - if os.path.isfile(path) and name in files_to_ignore: - ignored.add(name) - elif os.path.isdir(path) and name in folders_to_ignore: - ignored.add(name) - return ignored - - def add_log_item(self, item: str, value, index): - if item not in self.log_item: - self.log_item[item] = [value] - else: - self.log_item[item].append(value) - - def log(self, item, value, index): - """ - log index value couple for specific item into csv file / tensorboard - :item: string describing item (e.g. "training_loss","test_loss") - :value: value to log - :index: index (e.g. batchindex / epoch) - """ - if self.use_csv: - self.add_log_item(item, value, index) - with open(self.csv_file_path, "a") as file: - row = [index] - for _, v in self.log_item.items(): - row.append(v[-1]) - row_string = " ".join(str(item) for item in row) - file.write(row_string + "\n") - if self.use_tensorboard: - self.writer.add_scalar(item, value, index) - - def log_histogram(self, item, values, index): - """ - log index values-histogram couple for specific item to tensorboard - :item: string describing item (e.g. "training_loss","test_loss") - :values: values to log - :index: index (e.g. batchindex / epoch) - """ - if self.use_tensorboard: - self.writer.add_histogram(item, values, index) - - def log_model_gradients(self, item, model, index): - """ - log index model-gradients-histogram couple for specific item to tensorboard - :item: string describing model item (e.g. "encoder","discriminator") - :values: values to log - :index: index (e.g. batchindex / epoch) - """ - if self.use_tensorboard: - params = [p for p in model.parameters()] - if len(params) != 0: - gradients = paddle.concat( - x=[p.grad.view(-1) for p in params if p.grad is not None] - ) - self.writer.add_histogram(f"{item}_grad_histogram", gradients, index) - self.writer.add_scalar(f"{item}_grad_norm2", gradients.norm(p=2), index) - - def plot(self, res_dict=None, data_index=None, split="train"): - """ - plot item metrics - :item: item - :log: logarithmic scale. Default: False - :smoothing: smoothing of metric. Default: 0.025 - :ylim: y-axis limits [lower,upper] - """ - if split == "train": - res_saving_dir = f"{self.saving_path}/traing_results/{data_index}.vtu" - else: - res_saving_dir = f"{self.saving_path}/valid_case/{data_index}.vtu" - os.makedirs(os.path.dirname(res_saving_dir), exist_ok=True) - if "cells_node" in res_dict: - DS_utils.write_to_vtk(res_dict, res_saving_dir) - else: - DS_utils.write_point_cloud_to_vtk(res_dict, res_saving_dir) - - def save_test_results(self, value=None, num_id=0): - self.answer_saving_dir = f"{self.saving_path}/gen_answers_C" - os.makedirs(self.answer_saving_dir, exist_ok=True) - np.save(f"{self.answer_saving_dir}/press_{num_id}.npy", value) - print(f"npy file saved:{self.answer_saving_dir}/press_{num_id}.npy") - - def save_state(self, model, optimizer, scheduler, index="final"): - """ - saves state of model and optimizer - :model: model to save (if list: save multiple models) - :optimizer: optimizer (if list: save multiple optimizers) - :index: index of state to save (e.g. specific epoch) - """ - os.makedirs(self.saving_path + "/states", exist_ok=True) - path = self.saving_path + "/states" - with open(path + "/commandline_args.json", "wt") as f: - json.dump( - {**vars(self.params), **self.git_info}, f, indent=4, ensure_ascii=False - ) - model.save_checkpoint(path + "/{}.state".format(index), optimizer, scheduler) - return path + "/{}.state".format(index) - - def save_dict(self, dic, index="final"): - """ - saves dictionary - helpful to save the population state of an evolutionary optimization algorithm - :dic: dictionary to store - :index: index of state to save (e.g. specific evolution) - """ - os.makedirs( - "Logger/{}/{}/states".format(self.name, self.datetime), exist_ok=True - ) - path = "Logger/{}/{}/states/{}.dic".format(self.name, self.datetime, index) - with open(path, "wb") as f: - pickle.dump(dic, f) - - def load_state( - self, - model, - optimizer, - scheduler, - datetime=None, - index=None, - continue_datetime=False, - device=None, - ): - """ - loads state of model and optimizer - :model: model to load (if list: load multiple models) - :optimizer: optimizer to load (if list: load multiple optimizers; if None: don't load) - :datetime: date and time from run to load (if None: take latest folder) - :index: index of state to load (e.g. specific epoch) (if None: take latest index) - :continue_datetime: flag whether to continue on this run. Default: False - :return: datetime, index (helpful, if datetime / index wasn't given) - """ - if datetime is None: - for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): - datetime = sorted(dirs)[-1] - if datetime == self.datetime: - datetime = sorted(dirs)[-2] - break - if continue_datetime: - os.rmdir() - self.datetime = datetime - if index is None: - for _, _, files in os.walk( - "Logger/{}/{}/states/".format(self.name, datetime) - ): - index = os.path.splitext(natsorted(files)[-1])[0] - break - if self.loading_path is not None: - path = os.path.join(self.loading_path, "states/{}.pdparams".format(index)) - else: - path = "Logger/{}/{}/states/{}.pdparams".format(self.name, datetime, index) - model.load_checkpoint( - optimizer=optimizer, scheduler=scheduler, ckpdir=path, device=device - ) - return datetime, index - - def load_dict(self, dic, datetime=None, index=None, continue_datetime=False): - """ - loads state of model and optimizer - :dic: (empty) dictionary to fill with state information - :datetime: date and time from run to load (if None: take latest folder) - :index: index of state to load (e.g. specific epoch) (if None: take latest index) - :continue_datetime: flag whether to continue on this run. Default: False - :return: datetime, index (helpful, if datetime / index wasn't given) - """ - if datetime is None: - for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): - datetime = sorted(dirs)[-1] - if datetime == self.datetime: - datetime = sorted(dirs)[-2] - break - if continue_datetime: - os.rmdir() - self.datetime = datetime - if index is None: - for _, _, files in os.walk( - "Logger/{}/{}/states/".format(self.name, datetime) - ): - index = os.path.splitext(natsorted(files)[-1])[0] - break - if self.loading_path is not None: - path = os.path.join(self.loading_path, "states/{}.dic".format(index)) - else: - path = "Logger/{}/{}/states/{}.dic".format(self.name, datetime, index) - with open(path, "rb") as f: - state = pickle.load(f) - for key in state.keys(): - dic[key] = state[key] - return datetime, index - - def load_logger(self, datetime=None, load=False, saving_path=None): - """ - copy older tensorboard logger to new dir - :datetime: date and time from run to load (if None: take latest folder) - """ - if datetime is None: - for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): - datetime = sorted(dirs)[-1] - if datetime == self.datetime: - datetime = sorted(dirs)[-2] - break - if load: - cwd = os.getcwd() - if self.loading_path is not None: - path = os.path.join(self.loading_path, "tensorboard/") - else: - path = "Logger/{0}/{1}/tensorboard/".format(self.name, datetime) - for _, _, files in os.walk(path): - for file in files: - older_tensorboard_n = file - older_tensorboard = path + older_tensorboard_n - newer_tensorboard = ( - cwd - + "/Logger/{0}/{1}/tensorboard/".format( - self.name, self.datetime - ) - + older_tensorboard_n - ) - shutil.copyfile(older_tensorboard, newer_tensorboard) - break - if os.path.exists(newer_tensorboard): - print( - "older tensorboard aleady been copied to {0}".format( - newer_tensorboard - ) - ) - - -t_start = 0 - - -def t_step(): - """ - returns delta t from last call of t_step() - """ - global t_start - t_end = time.perf_counter() - delta_t = t_end - t_start - t_start = t_end - return delta_t +import datetime as dt +import json +import os +import pickle +import shutil +import time + +import numpy as np +import paddle +import visualdl +from natsort import natsorted +from utils import DS_utils + + +class Logger: + def __init__( + self, + name, + datetime=None, + use_csv=False, + use_tensorboard=True, + params=None, + git_info=None, + saving_path=None, + copy_code=True, + loading_path=None, + ): + """ + Logger logs metrics to CSV files / tensorboard + :name: logging name (e.g. model name / dataset name / ...) + :datetime: date and time of logging start (useful in case of multiple runs). + Default: current date and time is picked + :use_csv: log output to csv files (needed for plotting) + :use_tensorboard: log output to tensorboard + """ + self.name = name + self.params = params + self.log_item = {} + if datetime: + self.datetime = datetime + else: + self.datetime = dt.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") + if saving_path is not None: + self.saving_path = saving_path + else: + self.saving_path = os.getcwd() + f"/Logger/{name}/{self.datetime}" + if loading_path is not None: + self.loading_path = loading_path + source_valid_file_path = os.path.split(os.path.split(__file__)[0])[0] + target_valid_file_path = f"{self.saving_path}/source" + if copy_code: + os.makedirs(f"{self.saving_path}/source", exist_ok=True) + shutil.copytree( + source_valid_file_path, + target_valid_file_path, + ignore=self.ignore_files_and_folders, + dirs_exist_ok=True, + ) + self.target_valid_file_path = target_valid_file_path + "/validate.py" + self.use_tensorboard = use_tensorboard + self.use_csv = use_csv + if use_csv: + self.csv_file_path = f"{self.saving_path}/Loss_monitor.dat" + headers = ["epoch", "Epoch_loss", "Epoch_eval_loss"] + header_line = "Variables=" + " ".join(f'"{header}"' for header in headers) + with open(self.csv_file_path, "w") as file: + file.write(header_line + "\n") + if use_tensorboard: + directory = self.saving_path + "/tensorboard" + os.makedirs(directory, exist_ok=True) + self.writer = visualdl.LogWriter(directory) + self.git_info = git_info + + def ignore_files_and_folders(self, dir_name, names): + ignored = set() + files_to_ignore = {} + folders_to_ignore = {"rollout", "Logger", "Datasets"} + for name in names: + path = os.path.join(dir_name, name) + if os.path.isfile(path) and name in files_to_ignore: + ignored.add(name) + elif os.path.isdir(path) and name in folders_to_ignore: + ignored.add(name) + return ignored + + def add_log_item(self, item: str, value, index): + if item not in self.log_item: + self.log_item[item] = [value] + else: + self.log_item[item].append(value) + + def log(self, item, value, index): + """ + log index value couple for specific item into csv file / tensorboard + :item: string describing item (e.g. "training_loss","test_loss") + :value: value to log + :index: index (e.g. batchindex / epoch) + """ + if self.use_csv: + self.add_log_item(item, value, index) + with open(self.csv_file_path, "a") as file: + row = [index] + for _, v in self.log_item.items(): + row.append(v[-1]) + row_string = " ".join(str(item) for item in row) + file.write(row_string + "\n") + if self.use_tensorboard: + self.writer.add_scalar(item, value, index) + + def log_histogram(self, item, values, index): + """ + log index values-histogram couple for specific item to tensorboard + :item: string describing item (e.g. "training_loss","test_loss") + :values: values to log + :index: index (e.g. batchindex / epoch) + """ + if self.use_tensorboard: + self.writer.add_histogram(item, values, index) + + def log_model_gradients(self, item, model, index): + """ + log index model-gradients-histogram couple for specific item to tensorboard + :item: string describing model item (e.g. "encoder","discriminator") + :values: values to log + :index: index (e.g. batchindex / epoch) + """ + if self.use_tensorboard: + params = [p for p in model.parameters()] + if len(params) != 0: + gradients = paddle.concat( + x=[p.grad.view(-1) for p in params if p.grad is not None] + ) + self.writer.add_histogram(f"{item}_grad_histogram", gradients, index) + self.writer.add_scalar(f"{item}_grad_norm2", gradients.norm(p=2), index) + + def plot(self, res_dict=None, data_index=None, split="train"): + """ + plot item metrics + :item: item + :log: logarithmic scale. Default: False + :smoothing: smoothing of metric. Default: 0.025 + :ylim: y-axis limits [lower,upper] + """ + if split == "train": + res_saving_dir = f"{self.saving_path}/traing_results/{data_index}.vtu" + else: + res_saving_dir = f"{self.saving_path}/valid_case/{data_index}.vtu" + os.makedirs(os.path.dirname(res_saving_dir), exist_ok=True) + if "cells_node" in res_dict: + DS_utils.write_to_vtk(res_dict, res_saving_dir) + else: + DS_utils.write_point_cloud_to_vtk(res_dict, res_saving_dir) + + def save_test_results(self, value=None, num_id=0): + self.answer_saving_dir = f"{self.saving_path}/gen_answers_C" + os.makedirs(self.answer_saving_dir, exist_ok=True) + np.save(f"{self.answer_saving_dir}/press_{num_id}.npy", value) + print(f"npy file saved:{self.answer_saving_dir}/press_{num_id}.npy") + + def save_state(self, model, optimizer, scheduler, index="final"): + """ + saves state of model and optimizer + :model: model to save (if list: save multiple models) + :optimizer: optimizer (if list: save multiple optimizers) + :index: index of state to save (e.g. specific epoch) + """ + os.makedirs(self.saving_path + "/states", exist_ok=True) + path = self.saving_path + "/states" + with open(path + "/commandline_args.json", "wt") as f: + json.dump( + {**vars(self.params), **self.git_info}, f, indent=4, ensure_ascii=False + ) + model.save_checkpoint(path + "/{}.state".format(index), optimizer, scheduler) + return path + "/{}.state".format(index) + + def save_dict(self, dic, index="final"): + """ + saves dictionary - helpful to save the population state of an evolutionary optimization algorithm + :dic: dictionary to store + :index: index of state to save (e.g. specific evolution) + """ + os.makedirs( + "Logger/{}/{}/states".format(self.name, self.datetime), exist_ok=True + ) + path = "Logger/{}/{}/states/{}.dic".format(self.name, self.datetime, index) + with open(path, "wb") as f: + pickle.dump(dic, f) + + def load_state( + self, + model, + optimizer, + scheduler, + datetime=None, + index=None, + continue_datetime=False, + device=None, + ): + """ + loads state of model and optimizer + :model: model to load (if list: load multiple models) + :optimizer: optimizer to load (if list: load multiple optimizers; if None: don't load) + :datetime: date and time from run to load (if None: take latest folder) + :index: index of state to load (e.g. specific epoch) (if None: take latest index) + :continue_datetime: flag whether to continue on this run. Default: False + :return: datetime, index (helpful, if datetime / index wasn't given) + """ + if datetime is None: + for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): + datetime = sorted(dirs)[-1] + if datetime == self.datetime: + datetime = sorted(dirs)[-2] + break + if continue_datetime: + os.rmdir() + self.datetime = datetime + if index is None: + for _, _, files in os.walk( + "Logger/{}/{}/states/".format(self.name, datetime) + ): + index = os.path.splitext(natsorted(files)[-1])[0] + break + if self.loading_path is not None: + path = os.path.join(self.loading_path, "states/{}.pdparams".format(index)) + else: + path = "Logger/{}/{}/states/{}.pdparams".format(self.name, datetime, index) + model.load_checkpoint( + optimizer=optimizer, scheduler=scheduler, ckpdir=path, device=device + ) + return datetime, index + + def load_dict(self, dic, datetime=None, index=None, continue_datetime=False): + """ + loads state of model and optimizer + :dic: (empty) dictionary to fill with state information + :datetime: date and time from run to load (if None: take latest folder) + :index: index of state to load (e.g. specific epoch) (if None: take latest index) + :continue_datetime: flag whether to continue on this run. Default: False + :return: datetime, index (helpful, if datetime / index wasn't given) + """ + if datetime is None: + for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): + datetime = sorted(dirs)[-1] + if datetime == self.datetime: + datetime = sorted(dirs)[-2] + break + if continue_datetime: + os.rmdir() + self.datetime = datetime + if index is None: + for _, _, files in os.walk( + "Logger/{}/{}/states/".format(self.name, datetime) + ): + index = os.path.splitext(natsorted(files)[-1])[0] + break + if self.loading_path is not None: + path = os.path.join(self.loading_path, "states/{}.dic".format(index)) + else: + path = "Logger/{}/{}/states/{}.dic".format(self.name, datetime, index) + with open(path, "rb") as f: + state = pickle.load(f) + for key in state.keys(): + dic[key] = state[key] + return datetime, index + + def load_logger(self, datetime=None, load=False, saving_path=None): + """ + copy older tensorboard logger to new dir + :datetime: date and time from run to load (if None: take latest folder) + """ + if datetime is None: + for _, dirs, _ in os.walk("Logger/{}/".format(self.name)): + datetime = sorted(dirs)[-1] + if datetime == self.datetime: + datetime = sorted(dirs)[-2] + break + if load: + cwd = os.getcwd() + if self.loading_path is not None: + path = os.path.join(self.loading_path, "tensorboard/") + else: + path = "Logger/{0}/{1}/tensorboard/".format(self.name, datetime) + for _, _, files in os.walk(path): + for file in files: + older_tensorboard_n = file + older_tensorboard = path + older_tensorboard_n + newer_tensorboard = ( + cwd + + "/Logger/{0}/{1}/tensorboard/".format( + self.name, self.datetime + ) + + older_tensorboard_n + ) + shutil.copyfile(older_tensorboard, newer_tensorboard) + break + if os.path.exists(newer_tensorboard): + print( + "older tensorboard aleady been copied to {0}".format( + newer_tensorboard + ) + ) + + +t_start = 0 + + +def t_step(): + """ + returns delta t from last call of t_step() + """ + global t_start + t_end = time.perf_counter() + delta_t = t_end - t_start + t_start = t_end + return delta_t diff --git a/jointContribution/IJCAI_2024/aminos/utils/get_param.py b/jointContribution/IJCAI_2024/aminos/utils/get_param.py index 4fef0a6c3f..cecc517d0a 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/get_param.py +++ b/jointContribution/IJCAI_2024/aminos/utils/get_param.py @@ -1,279 +1,279 @@ -import argparse -import json - - -def str2bool(v): - """ - 'boolean type variable' for add_argument - """ - if v.lower() in ("yes", "true", "t", "y", "1"): - return True - elif v.lower() in ("no", "false", "f", "n", "0"): - return False - else: - raise argparse.ArgumentTypeError("boolean value expected.") - - -def params(load=None): - if load is not None: - parser = argparse.ArgumentParser( - description="train / test a paddle model to predict frames" - ) - params = vars(parser.parse_args([])) - with open(load + "/commandline_args.json", "rt") as f: - params.update(json.load(f)) - for k, v in params.items(): - parser.add_argument("--" + k, default=v) - args = parser.parse_args([]) - return args - else: - """ - return parameters for training / testing / plotting of models - :return: parameter-Namespace - """ - parser = argparse.ArgumentParser( - description="train / test a paddle model to predict frames" - ) - parser.add_argument( - "--net", - default="SAGE-Trans", - type=str, - help="network to train (default: GN-Cell)", - choices=[ - "GN-Cell", - "GN-Node", - "SAGE-Trans", - "GeoATT", - "MultiGN", - "MultiTrans", - "Trans", - "GM", - ], - ) - parser.add_argument( - "--SAGE_MIXING_TYPE", - default="FVGNAttUNet", - type=str, - help="SAGE & transolver features mixing type", - choices=[ - "Origin", - "AttMixing", - "transUnet", - "TransU-sep", - "PureUnet", - "FVGNUnet", - "SDFUnet", - "SageSDFUnet", - "TransolverUnet", - "TransAttUnet", - "FVGNAttUNet", - ], - ) - parser.add_argument( - "--GM_TYPE", - default="AttuMMLP", - type=str, - help="SAGE & transolver features mixing type", - choices=["FVGNAttUnet", "SageAttUnet", "transolunet", "AttuMMLP"], - ) - parser.add_argument( - "--n_epochs", - default=150, - type=int, - help="number of epochs (after each epoch, the model gets saved)", - ) - parser.add_argument( - "--batch_size", default=4, type=int, help="batch size (default: 100)" - ) - parser.add_argument( - "--dataset_size", default=500, type=int, help="dataset size (default: 500)" - ) - parser.add_argument( - "--batch_size_for_attn", - default=1, - type=int, - help="batch size (default: 100)", - ) - parser.add_argument( - "--lr", - default=0.001, - type=float, - help="learning rate of optimizer (default: 0.0001)", - ) - parser.add_argument( - "--lr_scheduler", - default="fixlr", - type=str, - help="choose learing rate scheduler (default: coslr)", - choices=["coslr", "fix"], - ) - parser.add_argument( - "--log", - default=True, - type=str2bool, - help="log models / metrics during training (turn off for debugging)", - ) - parser.add_argument( - "--on_gpu", default=0, type=int, help="set training on which gpu" - ) - parser.add_argument( - "--num_samples", default=5000, type=int, help="subsampling for trackB" - ) - parser.add_argument( - "--sample_khop", default=5, type=int, help="subsampling k-hop for trackB" - ) - parser.add_argument( - "--statistics_times", - default=20, - type=int, - help="accumlate data statistics for normalization before backprapagation (default: 1)", - ) - parser.add_argument( - "--before_explr_decay_steps", - default=500, - type=int, - help="steps before using exp lr decay technique (default:12000)", - ) - parser.add_argument( - "--loss", - default="square", - type=str, - help="loss type to train network (default: square)", - choices=["square"], - ) - parser.add_argument( - "--wgrad", - default=0, - type=float, - help="weight of gradient loss (default: 0.1)", - ) - parser.add_argument( - "--wpress", - default=1, - type=float, - help="weight of pressure loss (default: 0.9)", - ) - parser.add_argument( - "--load_date_time", - default=None, - type=str, - help="date_time of run to load (default: None)", - ) - parser.add_argument( - "--load_index", - default=None, - type=int, - help="index of run to load (default: None)", - ) - parser.add_argument( - "--load_optimizer", - default=False, - type=str2bool, - help="load state of optimizer (default: True)", - ) - parser.add_argument( - "--load_latest", - default=False, - type=str2bool, - help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", - ) - parser.add_argument( - "--hidden_size", - default=128, - type=int, - help="hidden size of network (default: 20)", - ) - parser.add_argument( - "--message_passing_num", - default=8, - type=int, - help="message passing layer number (default:15)", - ) - parser.add_argument( - "--node_input_size", - default=3, - type=int, - help="node encoder node_input_size (default: 2)", - ) - parser.add_argument( - "--edge_input_size", - default=6, - type=int, - help="edge encoder edge_input_size, include edge center pos (x,y) (default: 3)", - ) - parser.add_argument( - "--cell_input_size", - default=3, - type=int, - help="cell encoder cell_input_size, include uvp (default: 3)", - ) - parser.add_argument( - "--node_output_size", - default=1, - type=int, - help="edge decoder edge_output_size uvp on edge center(default: 8)", - ) - parser.add_argument( - "--edge_output_size", - default=1, - type=int, - help="edge decoder edge_output_size uvp on edge center(default: 8)", - ) - parser.add_argument( - "--cell_output_size", - default=1, - type=int, - help="cell decoder cell_output_size uvp on cell center(default: 1)", - ) - parser.add_argument( - "--drop_out", - default=False, - type=str2bool, - help="using dropout technique in message passing layer(default: True)", - ) - parser.add_argument( - "--attention", - default=False, - type=str2bool, - help="using dropout technique in message passing layer(default: True)", - ) - parser.add_argument( - "--multihead", - default=1, - type=int, - help="using dropout technique in message passing layer(default: True)", - ) - parser.add_argument( - "--dataset_type", - default="h5", - type=str, - help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", - ) - parser.add_argument( - "--dataset_dir", - default="./Datasets", - type=str, - help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", - ) - parser.add_argument( - "--git_branch", - default="FVGN-pde-jtedu smaller tanh factor,test no prevent oversmooth still normalize,lr on bc=1e-2", - type=str, - help="current running code git branch", - ) - parser.add_argument( - "--git_commit_dates", - default="March 14th, 2023 10:56 PM", - type=str, - help="current running code git commit date", - ) - params = parser.parse_args([]) - git_info = { - "git_branch": params.git_branch, - "git_commit_dates": params.git_commit_dates, - } - return params, git_info - - -def get_hyperparam(params): - return f"net {params.net}; hs {params.hidden_size};" +import argparse +import json + + +def str2bool(v): + """ + 'boolean type variable' for add_argument + """ + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("boolean value expected.") + + +def params(load=None): + if load is not None: + parser = argparse.ArgumentParser( + description="train / test a paddle model to predict frames" + ) + params = vars(parser.parse_args([])) + with open(load + "/commandline_args.json", "rt") as f: + params.update(json.load(f)) + for k, v in params.items(): + parser.add_argument("--" + k, default=v) + args = parser.parse_args([]) + return args + else: + """ + return parameters for training / testing / plotting of models + :return: parameter-Namespace + """ + parser = argparse.ArgumentParser( + description="train / test a paddle model to predict frames" + ) + parser.add_argument( + "--net", + default="SAGE-Trans", + type=str, + help="network to train (default: GN-Cell)", + choices=[ + "GN-Cell", + "GN-Node", + "SAGE-Trans", + "GeoATT", + "MultiGN", + "MultiTrans", + "Trans", + "GM", + ], + ) + parser.add_argument( + "--SAGE_MIXING_TYPE", + default="FVGNAttUNet", + type=str, + help="SAGE & transolver features mixing type", + choices=[ + "Origin", + "AttMixing", + "transUnet", + "TransU-sep", + "PureUnet", + "FVGNUnet", + "SDFUnet", + "SageSDFUnet", + "TransolverUnet", + "TransAttUnet", + "FVGNAttUNet", + ], + ) + parser.add_argument( + "--GM_TYPE", + default="AttuMMLP", + type=str, + help="SAGE & transolver features mixing type", + choices=["FVGNAttUnet", "SageAttUnet", "transolunet", "AttuMMLP"], + ) + parser.add_argument( + "--n_epochs", + default=150, + type=int, + help="number of epochs (after each epoch, the model gets saved)", + ) + parser.add_argument( + "--batch_size", default=4, type=int, help="batch size (default: 100)" + ) + parser.add_argument( + "--dataset_size", default=500, type=int, help="dataset size (default: 500)" + ) + parser.add_argument( + "--batch_size_for_attn", + default=1, + type=int, + help="batch size (default: 100)", + ) + parser.add_argument( + "--lr", + default=0.001, + type=float, + help="learning rate of optimizer (default: 0.0001)", + ) + parser.add_argument( + "--lr_scheduler", + default="fixlr", + type=str, + help="choose learing rate scheduler (default: coslr)", + choices=["coslr", "fix"], + ) + parser.add_argument( + "--log", + default=True, + type=str2bool, + help="log models / metrics during training (turn off for debugging)", + ) + parser.add_argument( + "--on_gpu", default=0, type=int, help="set training on which gpu" + ) + parser.add_argument( + "--num_samples", default=5000, type=int, help="subsampling for trackB" + ) + parser.add_argument( + "--sample_khop", default=5, type=int, help="subsampling k-hop for trackB" + ) + parser.add_argument( + "--statistics_times", + default=20, + type=int, + help="accumlate data statistics for normalization before backprapagation (default: 1)", + ) + parser.add_argument( + "--before_explr_decay_steps", + default=500, + type=int, + help="steps before using exp lr decay technique (default:12000)", + ) + parser.add_argument( + "--loss", + default="square", + type=str, + help="loss type to train network (default: square)", + choices=["square"], + ) + parser.add_argument( + "--wgrad", + default=0, + type=float, + help="weight of gradient loss (default: 0.1)", + ) + parser.add_argument( + "--wpress", + default=1, + type=float, + help="weight of pressure loss (default: 0.9)", + ) + parser.add_argument( + "--load_date_time", + default=None, + type=str, + help="date_time of run to load (default: None)", + ) + parser.add_argument( + "--load_index", + default=None, + type=int, + help="index of run to load (default: None)", + ) + parser.add_argument( + "--load_optimizer", + default=False, + type=str2bool, + help="load state of optimizer (default: True)", + ) + parser.add_argument( + "--load_latest", + default=False, + type=str2bool, + help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", + ) + parser.add_argument( + "--hidden_size", + default=128, + type=int, + help="hidden size of network (default: 20)", + ) + parser.add_argument( + "--message_passing_num", + default=8, + type=int, + help="message passing layer number (default:15)", + ) + parser.add_argument( + "--node_input_size", + default=3, + type=int, + help="node encoder node_input_size (default: 2)", + ) + parser.add_argument( + "--edge_input_size", + default=6, + type=int, + help="edge encoder edge_input_size, include edge center pos (x,y) (default: 3)", + ) + parser.add_argument( + "--cell_input_size", + default=3, + type=int, + help="cell encoder cell_input_size, include uvp (default: 3)", + ) + parser.add_argument( + "--node_output_size", + default=1, + type=int, + help="edge decoder edge_output_size uvp on edge center(default: 8)", + ) + parser.add_argument( + "--edge_output_size", + default=1, + type=int, + help="edge decoder edge_output_size uvp on edge center(default: 8)", + ) + parser.add_argument( + "--cell_output_size", + default=1, + type=int, + help="cell decoder cell_output_size uvp on cell center(default: 1)", + ) + parser.add_argument( + "--drop_out", + default=False, + type=str2bool, + help="using dropout technique in message passing layer(default: True)", + ) + parser.add_argument( + "--attention", + default=False, + type=str2bool, + help="using dropout technique in message passing layer(default: True)", + ) + parser.add_argument( + "--multihead", + default=1, + type=int, + help="using dropout technique in message passing layer(default: True)", + ) + parser.add_argument( + "--dataset_type", + default="h5", + type=str, + help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", + ) + parser.add_argument( + "--dataset_dir", + default="./Datasets", + type=str, + help="load latest version for training (if True: leave load_date_time and load_index None. default: False)", + ) + parser.add_argument( + "--git_branch", + default="FVGN-pde-jtedu smaller tanh factor,test no prevent oversmooth still normalize,lr on bc=1e-2", + type=str, + help="current running code git branch", + ) + parser.add_argument( + "--git_commit_dates", + default="March 14th, 2023 10:56 PM", + type=str, + help="current running code git commit date", + ) + params = parser.parse_args([]) + git_info = { + "git_branch": params.git_branch, + "git_commit_dates": params.git_commit_dates, + } + return params, git_info + + +def get_hyperparam(params): + return f"net {params.net}; hs {params.hidden_size};" diff --git a/jointContribution/IJCAI_2024/aminos/utils/knn.py b/jointContribution/IJCAI_2024/aminos/utils/knn.py index 94889dc12a..02d4a03691 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/knn.py +++ b/jointContribution/IJCAI_2024/aminos/utils/knn.py @@ -1,98 +1,98 @@ -from typing import List -from typing import Union - -import numpy as np -import paddle -from scipy.spatial import cKDTree - - -def knn_scipy_batched(x, y, k, batch_x=None, batch_y=None): - assert batch_x is not None and batch_y is not None, "Batch information is required." - - unique_batches = np.unique(batch_x) - all_distances = np.full((x.shape[0], k), np.inf) - all_indices = np.full((x.shape[0], k), -1) - - for batch in unique_batches: - mask_x = batch_x == batch - mask_y = batch_y == batch - batch_x_points = x[mask_x] - batch_y_points = y[mask_y] - - if batch_x_points.size == 0 or batch_y_points.size == 0: - continue - - tree = cKDTree(batch_y_points) - distances, indices = tree.query(batch_x_points, k=k) - - true_indices = np.where(mask_y)[0][indices] - - all_distances[mask_x] = distances - all_indices[mask_x] = true_indices - - return all_distances, all_indices - - -def knn_graph(pos_tensor, k): - dist = paddle.cdist(pos_tensor, pos_tensor) - - nn_indices = [] - - for i in range(pos_tensor.shape[0]): - distances = dist[i].numpy() - distances[i] = np.inf - - indices = np.argsort(distances)[:k] - nn_indices.append(indices) - - nn_indices_tensor = paddle.to_tensor(nn_indices) - - return nn_indices_tensor - - -def k_hop_subgraph( - edge_index: paddle.Tensor, - num_hops: int, - node_idx: Union[int, List[int], paddle.Tensor], - relabel_nodes: bool = False, -) -> paddle.Tensor: - if not isinstance(node_idx, paddle.Tensor): - node_idx = paddle.to_tensor(node_idx, dtype="int64") - - visited = paddle.zeros([edge_index.max() + 1], dtype="bool") - queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx - visited[queue] = True - sub_edge_index = [] - - current_hop = 0 - - while queue and current_hop < num_hops: - current_hop += 1 - next_queue = [] - - for node in queue: - neighbors = edge_index[1] == node - neighbors = edge_index[0][neighbors] - neighbors = neighbors[~visited[neighbors]] - - next_queue.extend(neighbors.tolist()) - visited[neighbors] = True - - for neighbor in neighbors: - if relabel_nodes: - original_idx = ( - paddle.nonzero(node_idx == node)[0].item() - if isinstance(node_idx, paddle.Tensor) - else node_idx.index(node) - ) - sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) - else: - sub_edge_index.append([node, neighbor]) - - queue = next_queue - - sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") - if relabel_nodes: - return sub_edge_index.reshape([-1, 2])[:, 1] - else: - return sub_edge_index.reshape([-1, 2]) +from typing import List +from typing import Union + +import numpy as np +import paddle +from scipy.spatial import cKDTree + + +def knn_scipy_batched(x, y, k, batch_x=None, batch_y=None): + assert batch_x is not None and batch_y is not None, "Batch information is required." + + unique_batches = np.unique(batch_x) + all_distances = np.full((x.shape[0], k), np.inf) + all_indices = np.full((x.shape[0], k), -1) + + for batch in unique_batches: + mask_x = batch_x == batch + mask_y = batch_y == batch + batch_x_points = x[mask_x] + batch_y_points = y[mask_y] + + if batch_x_points.size == 0 or batch_y_points.size == 0: + continue + + tree = cKDTree(batch_y_points) + distances, indices = tree.query(batch_x_points, k=k) + + true_indices = np.where(mask_y)[0][indices] + + all_distances[mask_x] = distances + all_indices[mask_x] = true_indices + + return all_distances, all_indices + + +def knn_graph(pos_tensor, k): + dist = paddle.cdist(pos_tensor, pos_tensor) + + nn_indices = [] + + for i in range(pos_tensor.shape[0]): + distances = dist[i].numpy() + distances[i] = np.inf + + indices = np.argsort(distances)[:k] + nn_indices.append(indices) + + nn_indices_tensor = paddle.to_tensor(nn_indices) + + return nn_indices_tensor + + +def k_hop_subgraph( + edge_index: paddle.Tensor, + num_hops: int, + node_idx: Union[int, List[int], paddle.Tensor], + relabel_nodes: bool = False, +) -> paddle.Tensor: + if not isinstance(node_idx, paddle.Tensor): + node_idx = paddle.to_tensor(node_idx, dtype="int64") + + visited = paddle.zeros([edge_index.max() + 1], dtype="bool") + queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx + visited[queue] = True + sub_edge_index = [] + + current_hop = 0 + + while queue and current_hop < num_hops: + current_hop += 1 + next_queue = [] + + for node in queue: + neighbors = edge_index[1] == node + neighbors = edge_index[0][neighbors] + neighbors = neighbors[~visited[neighbors]] + + next_queue.extend(neighbors.tolist()) + visited[neighbors] = True + + for neighbor in neighbors: + if relabel_nodes: + original_idx = ( + paddle.nonzero(node_idx == node)[0].item() + if isinstance(node_idx, paddle.Tensor) + else node_idx.index(node) + ) + sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) + else: + sub_edge_index.append([node, neighbor]) + + queue = next_queue + + sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") + if relabel_nodes: + return sub_edge_index.reshape([-1, 2])[:, 1] + else: + return sub_edge_index.reshape([-1, 2]) diff --git a/jointContribution/IJCAI_2024/aminos/utils/losses.py b/jointContribution/IJCAI_2024/aminos/utils/losses.py index f904f2df32..cb02849e43 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/losses.py +++ b/jointContribution/IJCAI_2024/aminos/utils/losses.py @@ -1,64 +1,64 @@ -import paddle - - -class LpLoss(object): - def __init__(self, d=2, p=2, size_average=True, reduction=True): - super(LpLoss, self).__init__() - assert d > 0 and p > 0 - self.d = d - self.p = p - self.reduction = reduction - self.size_average = size_average - - def abs(self, x, y): - num_examples = tuple(x.shape)[0] - h = 1.0 / (tuple(x.shape)[1] - 1.0) - all_norms = h ** (self.d / self.p) * paddle.linalg.norm( - x=x.view(num_examples, -1) - y.view(num_examples, -1), p=self.p, axis=1 - ) - if self.reduction: - if self.size_average: - return paddle.mean(x=all_norms) - else: - return paddle.sum(x=all_norms) - return all_norms - - def rel(self, x, y): - num_examples = tuple(x.shape)[0] - diff_norms = paddle.linalg.norm( - x=x.reshape(num_examples, -1) - y.reshape(num_examples, -1), - p=self.p, - axis=1, - ) - y_norms = paddle.linalg.norm(x=y.reshape(num_examples, -1), p=self.p, axis=1) - if self.reduction: - if self.size_average: - return paddle.mean(x=diff_norms / y_norms) - else: - return paddle.sum(x=diff_norms / y_norms) - return diff_norms / y_norms - - def rel_batch(self, x, y, batch, num_graphs): - loss = paddle.to_tensor(data=0.0, dtype=x.dtype, place=x.place) - for i in range(num_graphs): - mask = i == batch - rel_loss = self.rel( - x[mask][ - None, - ], - y[mask][ - None, - ] - + 1e-08, - ) - if paddle.isnan(x=rel_loss).astype("bool").any(): - raise ValueError(f"NaN detected in rel_loss for graph {i}") - loss = loss + rel_loss - loss /= num_graphs - return loss - - def __call__(self, x, y, batch=None, num_graphs=None): - if batch is None: - return self.rel(x, y) - else: - return self.rel_batch(x, y, batch, num_graphs=num_graphs) +import paddle + + +class LpLoss(object): + def __init__(self, d=2, p=2, size_average=True, reduction=True): + super(LpLoss, self).__init__() + assert d > 0 and p > 0 + self.d = d + self.p = p + self.reduction = reduction + self.size_average = size_average + + def abs(self, x, y): + num_examples = tuple(x.shape)[0] + h = 1.0 / (tuple(x.shape)[1] - 1.0) + all_norms = h ** (self.d / self.p) * paddle.linalg.norm( + x=x.view(num_examples, -1) - y.view(num_examples, -1), p=self.p, axis=1 + ) + if self.reduction: + if self.size_average: + return paddle.mean(x=all_norms) + else: + return paddle.sum(x=all_norms) + return all_norms + + def rel(self, x, y): + num_examples = tuple(x.shape)[0] + diff_norms = paddle.linalg.norm( + x=x.reshape(num_examples, -1) - y.reshape(num_examples, -1), + p=self.p, + axis=1, + ) + y_norms = paddle.linalg.norm(x=y.reshape(num_examples, -1), p=self.p, axis=1) + if self.reduction: + if self.size_average: + return paddle.mean(x=diff_norms / y_norms) + else: + return paddle.sum(x=diff_norms / y_norms) + return diff_norms / y_norms + + def rel_batch(self, x, y, batch, num_graphs): + loss = paddle.to_tensor(data=0.0, dtype=x.dtype, place=x.place) + for i in range(num_graphs): + mask = i == batch + rel_loss = self.rel( + x[mask][ + None, + ], + y[mask][ + None, + ] + + 1e-08, + ) + if paddle.isnan(x=rel_loss).astype("bool").any(): + raise ValueError(f"NaN detected in rel_loss for graph {i}") + loss = loss + rel_loss + loss /= num_graphs + return loss + + def __call__(self, x, y, batch=None, num_graphs=None): + if batch is None: + return self.rel(x, y) + else: + return self.rel_batch(x, y, batch, num_graphs=num_graphs) diff --git a/jointContribution/IJCAI_2024/aminos/utils/normalization.py b/jointContribution/IJCAI_2024/aminos/utils/normalization.py index 4c4e6e495b..ab70ca40aa 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/normalization.py +++ b/jointContribution/IJCAI_2024/aminos/utils/normalization.py @@ -1,90 +1,90 @@ -import paddle -import utils.paddle_aux as paddle_aux - - -class Normalizer(paddle.nn.Layer): - def __init__(self, size, max_accumulations=10**7, epsilon=1e-08, device=None): - """ - Online normalization module - - size: feature dimension - max_accumulation: maximum number of batches - epsilon: std cutoff for constant variable - device: device - """ - super(Normalizer, self).__init__() - self.max_accumulations = max_accumulations - self.epsilon = epsilon - self.register_buffer( - name="acc_count", - tensor=paddle.to_tensor( - data=1.0, dtype=paddle.get_default_dtype(), place=device - ), - ) - self.register_buffer( - name="num_accumulations", - tensor=paddle.to_tensor( - data=1.0, dtype=paddle.get_default_dtype(), place=device - ), - ) - self.register_buffer( - name="acc_sum", - tensor=paddle.zeros(shape=size, dtype=paddle.get_default_dtype()), - ) - self.register_buffer( - name="acc_sum_squared", - tensor=paddle.zeros(shape=size, dtype=paddle.get_default_dtype()), - ) - - def forward(self, batched_data, accumulate=True): - """ - Updates mean/standard deviation and normalizes input data - - batched_data: batch of data - accumulate: if True, update accumulation statistics - """ - if accumulate and self.num_accumulations < self.max_accumulations: - self._accumulate(batched_data) - return (batched_data - self._mean()) / self._std() - - def inverse(self, normalized_batch_data): - """ - Unnormalizes input data - """ - return normalized_batch_data * self._std().to( - normalized_batch_data.place - ) + self._mean().to(normalized_batch_data.place) - - def _accumulate(self, batched_data): - """ - Accumulates statistics for mean/standard deviation computation - """ - count = paddle.to_tensor(data=tuple(batched_data.shape)[0]).astype( - dtype="float32" - ) - data_sum = paddle.sum(x=batched_data, axis=0) - squared_data_sum = paddle.sum(x=batched_data**2, axis=0) - self.acc_sum += data_sum.to(self.acc_sum.place) - self.acc_sum_squared += squared_data_sum.to(self.acc_sum_squared.place) - self.acc_count += count.to(self.acc_count.place) - self.num_accumulations += 1 - - def _mean(self): - """ - Returns accumulated mean - """ - safe_count = paddle_aux.max( - self.acc_count, paddle.to_tensor(data=1.0).astype(dtype="float32") - ) - return self.acc_sum / safe_count - - def _std(self): - """ - Returns accumulated standard deviation - """ - safe_count = paddle_aux.max( - self.acc_count, paddle.to_tensor(data=1.0).astype(dtype="float32") - ) - std = paddle.sqrt(x=self.acc_sum_squared / safe_count - self._mean() ** 2) - std[std < self.epsilon] = 1.0 - return std +import paddle +import utils.paddle_aux as paddle_aux + + +class Normalizer(paddle.nn.Layer): + def __init__(self, size, max_accumulations=10**7, epsilon=1e-08, device=None): + """ + Online normalization module + + size: feature dimension + max_accumulation: maximum number of batches + epsilon: std cutoff for constant variable + device: device + """ + super(Normalizer, self).__init__() + self.max_accumulations = max_accumulations + self.epsilon = epsilon + self.register_buffer( + name="acc_count", + tensor=paddle.to_tensor( + data=1.0, dtype=paddle.get_default_dtype(), place=device + ), + ) + self.register_buffer( + name="num_accumulations", + tensor=paddle.to_tensor( + data=1.0, dtype=paddle.get_default_dtype(), place=device + ), + ) + self.register_buffer( + name="acc_sum", + tensor=paddle.zeros(shape=size, dtype=paddle.get_default_dtype()), + ) + self.register_buffer( + name="acc_sum_squared", + tensor=paddle.zeros(shape=size, dtype=paddle.get_default_dtype()), + ) + + def forward(self, batched_data, accumulate=True): + """ + Updates mean/standard deviation and normalizes input data + + batched_data: batch of data + accumulate: if True, update accumulation statistics + """ + if accumulate and self.num_accumulations < self.max_accumulations: + self._accumulate(batched_data) + return (batched_data - self._mean()) / self._std() + + def inverse(self, normalized_batch_data): + """ + Unnormalizes input data + """ + return normalized_batch_data * self._std().to( + normalized_batch_data.place + ) + self._mean().to(normalized_batch_data.place) + + def _accumulate(self, batched_data): + """ + Accumulates statistics for mean/standard deviation computation + """ + count = paddle.to_tensor(data=tuple(batched_data.shape)[0]).astype( + dtype="float32" + ) + data_sum = paddle.sum(x=batched_data, axis=0) + squared_data_sum = paddle.sum(x=batched_data**2, axis=0) + self.acc_sum += data_sum.to(self.acc_sum.place) + self.acc_sum_squared += squared_data_sum.to(self.acc_sum_squared.place) + self.acc_count += count.to(self.acc_count.place) + self.num_accumulations += 1 + + def _mean(self): + """ + Returns accumulated mean + """ + safe_count = paddle_aux.max( + self.acc_count, paddle.to_tensor(data=1.0).astype(dtype="float32") + ) + return self.acc_sum / safe_count + + def _std(self): + """ + Returns accumulated standard deviation + """ + safe_count = paddle_aux.max( + self.acc_count, paddle.to_tensor(data=1.0).astype(dtype="float32") + ) + std = paddle.sqrt(x=self.acc_sum_squared / safe_count - self._mean() ** 2) + std[std < self.epsilon] = 1.0 + return std diff --git a/jointContribution/IJCAI_2024/aminos/utils/paddle_aux.py b/jointContribution/IJCAI_2024/aminos/utils/paddle_aux.py index f1aa88d2c4..f9f9629000 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/paddle_aux.py +++ b/jointContribution/IJCAI_2024/aminos/utils/paddle_aux.py @@ -1,239 +1,239 @@ -# This file is generated by PaConvert ToolKit, please Don't edit it! -import paddle - - -def reshape(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) - else: - return paddle.reshape(self, list(args)) - elif kwargs: - assert "shape" in kwargs - return paddle.reshape(self, shape=kwargs["shape"]) - - -setattr(paddle.Tensor, "reshape", reshape) - - -def view(self, *args, **kwargs): - if args: - if len(args) == 1: - if isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) # To change reshape => view - elif isinstance(args[0], str): - return paddle.view(self, args[0]) - else: - return paddle.reshape(self, list(args)) # To change reshape => view - else: - return paddle.reshape(self, list(args)) # To change reshape => view - elif kwargs: - key = [k for k in kwargs.keys()] - if "dtype" in kwargs: - return paddle.view(self, shape_or_dtype=kwargs[key[0]]) - else: - return paddle.reshape( - self, shape=kwargs[key[0]] - ) # To change reshape => view - - -setattr(paddle.Tensor, "view", view) - - -def min(*args, **kwargs): - if "input" in kwargs: - kwargs["x"] = kwargs.pop("input") - - out_v = None - if "out" in kwargs: - out_v = kwargs.pop("out") - - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(*args, **kwargs) - elif len(args) == 2 and isinstance(args[1], paddle.Tensor): - ret = paddle.minimum(*args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 2: - if out_v: - ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) - paddle.assign(ret[0], out_v[0]) - paddle.assign(ret[1], out_v[1]) - return out_v - else: - ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) - return ret - else: - ret = paddle.min(*args, **kwargs) - return ret - - if out_v: - paddle.assign(ret, out_v) - return out_v - else: - return ret - - -def max(*args, **kwargs): - if "input" in kwargs: - kwargs["x"] = kwargs.pop("input") - - out_v = None - if "out" in kwargs: - out_v = kwargs.pop("out") - - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(*args, **kwargs) - elif len(args) == 2 and isinstance(args[1], paddle.Tensor): - ret = paddle.maximum(*args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 2: - if out_v: - ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) - paddle.assign(ret[0], out_v[0]) - paddle.assign(ret[1], out_v[1]) - return out_v - else: - ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) - return ret - return out_v - else: - ret = paddle.max(*args, **kwargs) - return ret - - if out_v: - paddle.assign(ret, out_v) - return out_v - else: - return ret - - -def add(self, *args, **kwargs): - if "other" in kwargs: - y = kwargs["other"] - elif "y" in kwargs: - y = kwargs["y"] - else: - y = args[0] - - if "alpha" in kwargs: - alpha = kwargs["alpha"] - if alpha != 1: - if not isinstance(y, paddle.Tensor): - y = paddle.to_tensor(alpha * y) - else: - y = alpha * y - else: - if not isinstance(y, paddle.Tensor): - y = paddle.to_tensor(y) - - return paddle.add(self, y) - - -setattr(paddle.Tensor, "add", add) - - -def _FUNCTIONAL_PAD(x, pad, mode="constant", value=0.0, data_format="NCHW"): - if len(x.shape) * 2 == len(pad) and mode == "constant": - pad = ( - paddle.to_tensor(pad, dtype="int32") - .reshape((-1, 2)) - .flip([0]) - .flatten() - .tolist() - ) - return paddle.nn.functional.pad(x, pad, mode, value, data_format) - - -def repeat(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.tile(self, args[0]) - else: - return paddle.tile(self, list(args)) - elif kwargs: - assert "repeats" in kwargs - return paddle.tile(self, repeat_times=kwargs["repeats"]) - - -setattr(paddle.Tensor, "repeat", repeat) - - -def min_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.minimum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.min(self, *args, **kwargs), paddle.argmin( - self, *args, **kwargs - ) - else: - ret = paddle.min(self, *args, **kwargs) - - return ret - - -def max_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.maximum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.max(self, *args, **kwargs), paddle.argmax( - self, *args, **kwargs - ) - else: - ret = paddle.max(self, *args, **kwargs) - - return ret - - -setattr(paddle.Tensor, "min", min_class_func) -setattr(paddle.Tensor, "max", max_class_func) - - -def scatter_paddle(src, index, dim, out=None, reduce=None): - if reduce is None: - raise ValueError("'reduce' must be 'add', 'mean' or 'mul'.") - - if out is None: - out = paddle.zeros_like(src) - - if reduce == "add": - out = paddle.scatter_(out, index, src, overwrite=False) - elif reduce == "mean": - count = paddle.zeros_like(out) - count = paddle.scatter_(count, index, paddle.ones_like(src), overwrite=False) - out = paddle.scatter_(out, index, src, overwrite=False) - count = paddle.clip(count, min=1) - out = out / count - elif reduce == "mul": - out = paddle.scatter_(out, index, src, overwrite=False, reduce="multiply") - else: - raise ValueError("'reduce' must be 'add', 'mean' or 'mul'.") - - return out - - -def scatter_softmax_paddle(x_scaled, index, axis, overwrite=False): - x_softmax = paddle.nn.functional.softmax(x_scaled, axis=-1) - out = paddle.scatter(x_softmax, index, overwrite=False) - return out +# This file is generated by PaConvert ToolKit, please Don't edit it! +import paddle + + +def reshape(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) + else: + return paddle.reshape(self, list(args)) + elif kwargs: + assert "shape" in kwargs + return paddle.reshape(self, shape=kwargs["shape"]) + + +setattr(paddle.Tensor, "reshape", reshape) + + +def view(self, *args, **kwargs): + if args: + if len(args) == 1: + if isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) # To change reshape => view + elif isinstance(args[0], str): + return paddle.view(self, args[0]) + else: + return paddle.reshape(self, list(args)) # To change reshape => view + else: + return paddle.reshape(self, list(args)) # To change reshape => view + elif kwargs: + key = [k for k in kwargs.keys()] + if "dtype" in kwargs: + return paddle.view(self, shape_or_dtype=kwargs[key[0]]) + else: + return paddle.reshape( + self, shape=kwargs[key[0]] + ) # To change reshape => view + + +setattr(paddle.Tensor, "view", view) + + +def min(*args, **kwargs): + if "input" in kwargs: + kwargs["x"] = kwargs.pop("input") + + out_v = None + if "out" in kwargs: + out_v = kwargs.pop("out") + + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(*args, **kwargs) + elif len(args) == 2 and isinstance(args[1], paddle.Tensor): + ret = paddle.minimum(*args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 2: + if out_v: + ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) + paddle.assign(ret[0], out_v[0]) + paddle.assign(ret[1], out_v[1]) + return out_v + else: + ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) + return ret + else: + ret = paddle.min(*args, **kwargs) + return ret + + if out_v: + paddle.assign(ret, out_v) + return out_v + else: + return ret + + +def max(*args, **kwargs): + if "input" in kwargs: + kwargs["x"] = kwargs.pop("input") + + out_v = None + if "out" in kwargs: + out_v = kwargs.pop("out") + + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(*args, **kwargs) + elif len(args) == 2 and isinstance(args[1], paddle.Tensor): + ret = paddle.maximum(*args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 2: + if out_v: + ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) + paddle.assign(ret[0], out_v[0]) + paddle.assign(ret[1], out_v[1]) + return out_v + else: + ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) + return ret + return out_v + else: + ret = paddle.max(*args, **kwargs) + return ret + + if out_v: + paddle.assign(ret, out_v) + return out_v + else: + return ret + + +def add(self, *args, **kwargs): + if "other" in kwargs: + y = kwargs["other"] + elif "y" in kwargs: + y = kwargs["y"] + else: + y = args[0] + + if "alpha" in kwargs: + alpha = kwargs["alpha"] + if alpha != 1: + if not isinstance(y, paddle.Tensor): + y = paddle.to_tensor(alpha * y) + else: + y = alpha * y + else: + if not isinstance(y, paddle.Tensor): + y = paddle.to_tensor(y) + + return paddle.add(self, y) + + +setattr(paddle.Tensor, "add", add) + + +def _FUNCTIONAL_PAD(x, pad, mode="constant", value=0.0, data_format="NCHW"): + if len(x.shape) * 2 == len(pad) and mode == "constant": + pad = ( + paddle.to_tensor(pad, dtype="int32") + .reshape((-1, 2)) + .flip([0]) + .flatten() + .tolist() + ) + return paddle.nn.functional.pad(x, pad, mode, value, data_format) + + +def repeat(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.tile(self, args[0]) + else: + return paddle.tile(self, list(args)) + elif kwargs: + assert "repeats" in kwargs + return paddle.tile(self, repeat_times=kwargs["repeats"]) + + +setattr(paddle.Tensor, "repeat", repeat) + + +def min_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.minimum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.min(self, *args, **kwargs), paddle.argmin( + self, *args, **kwargs + ) + else: + ret = paddle.min(self, *args, **kwargs) + + return ret + + +def max_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.maximum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.max(self, *args, **kwargs), paddle.argmax( + self, *args, **kwargs + ) + else: + ret = paddle.max(self, *args, **kwargs) + + return ret + + +setattr(paddle.Tensor, "min", min_class_func) +setattr(paddle.Tensor, "max", max_class_func) + + +def scatter_paddle(src, index, dim, out=None, reduce=None): + if reduce is None: + raise ValueError("'reduce' must be 'add', 'mean' or 'mul'.") + + if out is None: + out = paddle.zeros_like(src) + + if reduce == "add": + out = paddle.scatter_(out, index, src, overwrite=False) + elif reduce == "mean": + count = paddle.zeros_like(out) + count = paddle.scatter_(count, index, paddle.ones_like(src), overwrite=False) + out = paddle.scatter_(out, index, src, overwrite=False) + count = paddle.clip(count, min=1) + out = out / count + elif reduce == "mul": + out = paddle.scatter_(out, index, src, overwrite=False, reduce="multiply") + else: + raise ValueError("'reduce' must be 'add', 'mean' or 'mul'.") + + return out + + +def scatter_softmax_paddle(x_scaled, index, axis, overwrite=False): + x_softmax = paddle.nn.functional.softmax(x_scaled, axis=-1) + out = paddle.scatter(x_softmax, index, overwrite=False) + return out diff --git a/jointContribution/IJCAI_2024/aminos/utils/utilities.py b/jointContribution/IJCAI_2024/aminos/utils/utilities.py index a0e39d2d97..8669d34a41 100644 --- a/jointContribution/IJCAI_2024/aminos/utils/utilities.py +++ b/jointContribution/IJCAI_2024/aminos/utils/utilities.py @@ -1,112 +1,112 @@ -import enum - -import numpy as np -from dataset.Load_mesh import CustomGraphData -from utils.paddle_aux import scatter_paddle - - -class MeshType(enum.IntEnum): - Triangle = 1 - Tetrahedron = 2 - Quad = 3 - Line = 4 - Flat = 5 - - -def calc_cell_centered_with_node_attr( - node_attr, cells_node, cells_index, reduce="mean", map=True -): - if tuple(cells_node.shape) != tuple(cells_index.shape): - raise ValueError("wrong cells_node/cells_index dim") - if len(tuple(cells_node.shape)) > 1: - cells_node = cells_node.view(-1) - if len(tuple(cells_index.shape)) > 1: - cells_index = cells_index.view(-1) - if map: - mapped_node_attr = node_attr[cells_node] - else: - mapped_node_attr = node_attr - cell_attr = scatter_paddle( - src=mapped_node_attr, index=cells_index, dim=0, reduce=reduce - ) - return cell_attr - - -def calc_node_centered_with_cell_attr( - cell_attr, cells_node, cells_index, reduce="mean", map=True -): - if tuple(cells_node.shape) != tuple(cells_index.shape): - raise ValueError("wrong cells_node/cells_index dim ") - if len(tuple(cells_node.shape)) > 1: - cells_node = cells_node.view(-1) - if len(tuple(cells_index.shape)) > 1: - cells_index = cells_index.view(-1) - if map: - maped_cell_attr = cell_attr[cells_index] - else: - maped_cell_attr = cell_attr - cell_attr = scatter_paddle( - src=maped_cell_attr, index=cells_node, dim=0, reduce=reduce - ) - return cell_attr - - -def decompose_and_trans_node_attr_to_cell_attr_graph( - graph, has_changed_node_attr_to_cell_attr -): - x, edge_index, edge_attr, face, global_attr, ball_edge_index = ( - None, - None, - None, - None, - None, - None, - ) - for key in graph.keys: - if key == "x": - x = graph.x - elif key == "edge_index": - edge_index = graph.edge_index - elif key == "edge_attr": - edge_attr = graph.edge_attr - elif key == "global_attr": - global_attr = graph.global_attr - elif key == "face": - face = graph.face - elif key == "ball_edge_index": - ball_edge_index = graph.ball_edge_index - else: - pass - return x, edge_index, edge_attr, face, global_attr, ball_edge_index - - -def copy_geometric_data(graph, has_changed_node_attr_to_cell_attr): - """return a copy of gl.graph.Graph - This function should be carefully used based on - which keys in a given graph. - """ - ( - node_attr, - edge_index, - edge_attr, - face, - global_attr, - ball_edge_index, - ) = decompose_and_trans_node_attr_to_cell_attr_graph( - graph, has_changed_node_attr_to_cell_attr - ) - ret = CustomGraphData( - x=node_attr, - edge_index=edge_index, - edge_attr=edge_attr, - face=face, - ball_edge_index=ball_edge_index, - ) - ret.keys = ["x", "num_graphs", "edge_index", "batch", "edge_attr"] - return ret - - -def shuffle_np(array): - array_t = array.copy() - np.random.shuffle(array_t) - return array_t +import enum + +import numpy as np +from dataset.Load_mesh import CustomGraphData +from utils.paddle_aux import scatter_paddle + + +class MeshType(enum.IntEnum): + Triangle = 1 + Tetrahedron = 2 + Quad = 3 + Line = 4 + Flat = 5 + + +def calc_cell_centered_with_node_attr( + node_attr, cells_node, cells_index, reduce="mean", map=True +): + if tuple(cells_node.shape) != tuple(cells_index.shape): + raise ValueError("wrong cells_node/cells_index dim") + if len(tuple(cells_node.shape)) > 1: + cells_node = cells_node.view(-1) + if len(tuple(cells_index.shape)) > 1: + cells_index = cells_index.view(-1) + if map: + mapped_node_attr = node_attr[cells_node] + else: + mapped_node_attr = node_attr + cell_attr = scatter_paddle( + src=mapped_node_attr, index=cells_index, dim=0, reduce=reduce + ) + return cell_attr + + +def calc_node_centered_with_cell_attr( + cell_attr, cells_node, cells_index, reduce="mean", map=True +): + if tuple(cells_node.shape) != tuple(cells_index.shape): + raise ValueError("wrong cells_node/cells_index dim ") + if len(tuple(cells_node.shape)) > 1: + cells_node = cells_node.view(-1) + if len(tuple(cells_index.shape)) > 1: + cells_index = cells_index.view(-1) + if map: + maped_cell_attr = cell_attr[cells_index] + else: + maped_cell_attr = cell_attr + cell_attr = scatter_paddle( + src=maped_cell_attr, index=cells_node, dim=0, reduce=reduce + ) + return cell_attr + + +def decompose_and_trans_node_attr_to_cell_attr_graph( + graph, has_changed_node_attr_to_cell_attr +): + x, edge_index, edge_attr, face, global_attr, ball_edge_index = ( + None, + None, + None, + None, + None, + None, + ) + for key in graph.keys: + if key == "x": + x = graph.x + elif key == "edge_index": + edge_index = graph.edge_index + elif key == "edge_attr": + edge_attr = graph.edge_attr + elif key == "global_attr": + global_attr = graph.global_attr + elif key == "face": + face = graph.face + elif key == "ball_edge_index": + ball_edge_index = graph.ball_edge_index + else: + pass + return x, edge_index, edge_attr, face, global_attr, ball_edge_index + + +def copy_geometric_data(graph, has_changed_node_attr_to_cell_attr): + """return a copy of gl.graph.Graph + This function should be carefully used based on + which keys in a given graph. + """ + ( + node_attr, + edge_index, + edge_attr, + face, + global_attr, + ball_edge_index, + ) = decompose_and_trans_node_attr_to_cell_attr_graph( + graph, has_changed_node_attr_to_cell_attr + ) + ret = CustomGraphData( + x=node_attr, + edge_index=edge_index, + edge_attr=edge_attr, + face=face, + ball_edge_index=ball_edge_index, + ) + ret.keys = ["x", "num_graphs", "edge_index", "batch", "edge_attr"] + return ret + + +def shuffle_np(array): + array_t = array.copy() + np.random.shuffle(array_t) + return array_t diff --git a/jointContribution/IJCAI_2024/bju/cfd_params.yaml b/jointContribution/IJCAI_2024/bju/cfd_params.yaml index 1db73c9e7b..b4ed5f38b2 100644 --- a/jointContribution/IJCAI_2024/bju/cfd_params.yaml +++ b/jointContribution/IJCAI_2024/bju/cfd_params.yaml @@ -1,51 +1,51 @@ -GraphSAGE: - encoder: [7, 32, 64] - decoder: [64, 32, 4] - - nb_hidden_layers: 2 - size_hidden_layers: 64 - batch_size: 1 - nb_epochs: 400 - lr: 0.001 - bn_bool: True - res_bool: False - -MLP: - encoder: [3, 64, 64, 32, 64] - decoder: [64, 32, 64, 64, 1] - - nb_hidden_layers: 12 - size_hidden_layers: 64 - batch_size: 1 - nb_epochs: 1000 - lr: 0.001 - bn_bool: True - res_bool: False - r: 0.02 - -GAT: - encoder: [7, 32, 64] - decoder: [64, 32, 4] - - nb_hidden_layers: 2 - size_hidden_layers: 64 - batch_size: 1 - nb_epochs: 400 - lr: 0.0001 - bn_bool: True - nb_heads: 4 - res_bool: False - -GNO: - encoder: [7, 32, 32] - decoder: [32, 32, 4] - - nb_hidden_layers: 2 - size_hidden_layers: 32 - batch_size: 1 - nb_epochs: 200 - lr: 0.001 - bn_bool: True - kernel: [11, 64, 256, 1024] - res_bool: False - r: 0.2 +GraphSAGE: + encoder: [7, 32, 64] + decoder: [64, 32, 4] + + nb_hidden_layers: 2 + size_hidden_layers: 64 + batch_size: 1 + nb_epochs: 400 + lr: 0.001 + bn_bool: True + res_bool: False + +MLP: + encoder: [3, 64, 64, 32, 64] + decoder: [64, 32, 64, 64, 1] + + nb_hidden_layers: 12 + size_hidden_layers: 64 + batch_size: 1 + nb_epochs: 1000 + lr: 0.001 + bn_bool: True + res_bool: False + r: 0.02 + +GAT: + encoder: [7, 32, 64] + decoder: [64, 32, 4] + + nb_hidden_layers: 2 + size_hidden_layers: 64 + batch_size: 1 + nb_epochs: 400 + lr: 0.0001 + bn_bool: True + nb_heads: 4 + res_bool: False + +GNO: + encoder: [7, 32, 32] + decoder: [32, 32, 4] + + nb_hidden_layers: 2 + size_hidden_layers: 32 + batch_size: 1 + nb_epochs: 200 + lr: 0.001 + bn_bool: True + kernel: [11, 64, 256, 1024] + res_bool: False + r: 0.2 diff --git a/jointContribution/IJCAI_2024/bju/dataset.py b/jointContribution/IJCAI_2024/bju/dataset.py index c9e4649a25..9365b3263d 100644 --- a/jointContribution/IJCAI_2024/bju/dataset.py +++ b/jointContribution/IJCAI_2024/bju/dataset.py @@ -1,163 +1,163 @@ -import os -import random - -import numpy as np -import paddle -from utils.utils import k_hop_subgraph -from utils.utils import radius_graph - - -def read_data(args, norm=True): - with open(os.path.join(args.info_dir, "global_bounds.txt"), "r") as fp: - min_bounds = fp.readline().split(" ") - max_bounds = fp.readline().split(" ") - min_in = np.array([float(a) for a in min_bounds]) - max_in = np.array([float(a) for a in max_bounds]) - with open(os.path.join(args.info_dir, "train_pressure_mean_std.txt"), "r") as fp: - min_bounds = fp.readline().split(" ") - max_bounds = fp.readline().split(" ") - mean_out = np.array([float(a) for a in min_bounds]) - std_out = np.array([float(a) for a in max_bounds]) - coef_norm = min_in, max_in, mean_out, std_out - train_data_dir = args.train_data_dir - test_data_dir = args.test_data_dir - extra_data_dir = args.extra_data_dir - train_samples = [] - test_samples = [] - train_files = os.listdir(train_data_dir) - test_files = os.listdir(test_data_dir) - for file in train_files: - if file.startswith("press_"): - path = os.path.join(train_data_dir, file) - train_samples.append(path) - if extra_data_dir is not None: - extra_files = os.listdir(extra_data_dir) - for file in extra_files: - if file.startswith("press_"): - path = os.path.join(extra_data_dir, file) - train_samples.append(path) - for file in test_files: - if file.startswith("centroid_"): - path = os.path.join(test_data_dir, file) - test_samples.append(path) - val_samples = train_samples[-50:] - train_samples = train_samples[:-50] - train_dataset = [] - val_dataset = [] - test_dataset = [] - for k, s in enumerate(train_samples): - file_name_press = s - file_name_point = s.replace("press", "centroid") - if not (os.path.exists(file_name_press) or os.path.exists(file_name_point)): - continue - press = np.load(file_name_press) - points_press = np.load(file_name_point) - x = paddle.to_tensor(data=points_press) - y = paddle.to_tensor(data=press) - data = CustomData(x=x, y=y) - if norm is True: - data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( - dtype="float32" - ) - data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") - train_dataset.append(data) - for k, s in enumerate(val_samples): - file_name_press = s - file_name_point = s.replace("press", "centroid") - if not (os.path.exists(file_name_press) or os.path.exists(file_name_point)): - continue - press = np.load(file_name_press) - points_press = np.load(file_name_point) - x = paddle.to_tensor(data=points_press) - y = paddle.to_tensor(data=press) - data = CustomData(x=x, y=y) - if norm is True: - data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( - dtype="float32" - ) - data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") - val_dataset.append(data) - for k, s in enumerate(test_samples): - file_name_point = s - points_press = np.load(file_name_point) - x = paddle.to_tensor(data=points_press) - data = CustomData(x=x) - if norm is True: - data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( - dtype="float32" - ) - test_dataset.append(data) - test_index = [ - int(os.path.basename(i).lstrip("centroid_").rstrip(".npy")) - for i in test_samples - ] - return train_dataset, val_dataset, test_dataset, coef_norm, test_index - - -def get_induced_graph(data, idx, num_hops): - subset, sub_edge_index, _, _ = k_hop_subgraph( - node_idx=idx, num_hops=num_hops, edge_index=data.edge_index, relabel_nodes=True - ) - return CustomData(x=data.x[subset], y=data.y[idx], edge_index=sub_edge_index) - - -def pc_normalize(pc): - centroid = paddle.mean(pc, axis=0) - pc = pc - centroid - m = paddle.max(x=paddle.sqrt(x=paddle.sum(pc**2, axis=1))) - pc = pc / m - return pc - - -def get_shape(data, max_n_point=3682, normalize=True, use_height=False): - if len(data.x) > max_n_point: - surf_indices = np.array(random.sample(range(len(data.x)), max_n_point)) - shape_pc = data.x[surf_indices].clone() - if normalize: - shape_pc = pc_normalize(shape_pc) - return shape_pc.astype(dtype="float32") - - -def create_edge_index_radius(data, r, max_neighbors=32): - data.edge_index = radius_graph( - x=data.pos, r=r, loop=True, max_num_neighbors=max_neighbors - ) - return data - - -class CustomData: - def __init__(self, **kwargs): - self.edge_index = None - for key, value in kwargs.items(): - setattr(self, key, value) - - -class GraphDataset(paddle.io.Dataset): - def __init__(self, datalist, use_height=False, use_cfd_mesh=True, r=None): - super().__init__() - self.datalist = datalist - self.use_height = use_height - if not use_cfd_mesh: - assert r is not None - for i in range(len(self.datalist)): - self.datalist[i] = create_edge_index_radius(self.datalist[i], r) - - def __len__(self): - return len(self.datalist) - - def get(self, idx): - data = self.datalist[idx] - shape = get_shape(data, use_height=self.use_height) - return self.datalist[idx], shape - - def __getitem__(self, idx): - return self.get(idx) - - def collate_fn(self, batch): - batch_data = [data for (data, _) in batch] - batch_shape = paddle.stack([shape for (_, shape) in batch], axis=0) - return batch_data, batch_shape - - -if __name__ == "__main__": - root = "./data/mlcfd_data/training_data" +import os +import random + +import numpy as np +import paddle +from utils.utils import k_hop_subgraph +from utils.utils import radius_graph + + +def read_data(args, norm=True): + with open(os.path.join(args.info_dir, "global_bounds.txt"), "r") as fp: + min_bounds = fp.readline().split(" ") + max_bounds = fp.readline().split(" ") + min_in = np.array([float(a) for a in min_bounds]) + max_in = np.array([float(a) for a in max_bounds]) + with open(os.path.join(args.info_dir, "train_pressure_mean_std.txt"), "r") as fp: + min_bounds = fp.readline().split(" ") + max_bounds = fp.readline().split(" ") + mean_out = np.array([float(a) for a in min_bounds]) + std_out = np.array([float(a) for a in max_bounds]) + coef_norm = min_in, max_in, mean_out, std_out + train_data_dir = args.train_data_dir + test_data_dir = args.test_data_dir + extra_data_dir = args.extra_data_dir + train_samples = [] + test_samples = [] + train_files = os.listdir(train_data_dir) + test_files = os.listdir(test_data_dir) + for file in train_files: + if file.startswith("press_"): + path = os.path.join(train_data_dir, file) + train_samples.append(path) + if extra_data_dir is not None: + extra_files = os.listdir(extra_data_dir) + for file in extra_files: + if file.startswith("press_"): + path = os.path.join(extra_data_dir, file) + train_samples.append(path) + for file in test_files: + if file.startswith("centroid_"): + path = os.path.join(test_data_dir, file) + test_samples.append(path) + val_samples = train_samples[-50:] + train_samples = train_samples[:-50] + train_dataset = [] + val_dataset = [] + test_dataset = [] + for k, s in enumerate(train_samples): + file_name_press = s + file_name_point = s.replace("press", "centroid") + if not (os.path.exists(file_name_press) or os.path.exists(file_name_point)): + continue + press = np.load(file_name_press) + points_press = np.load(file_name_point) + x = paddle.to_tensor(data=points_press) + y = paddle.to_tensor(data=press) + data = CustomData(x=x, y=y) + if norm is True: + data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( + dtype="float32" + ) + data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") + train_dataset.append(data) + for k, s in enumerate(val_samples): + file_name_press = s + file_name_point = s.replace("press", "centroid") + if not (os.path.exists(file_name_press) or os.path.exists(file_name_point)): + continue + press = np.load(file_name_press) + points_press = np.load(file_name_point) + x = paddle.to_tensor(data=points_press) + y = paddle.to_tensor(data=press) + data = CustomData(x=x, y=y) + if norm is True: + data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( + dtype="float32" + ) + data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") + val_dataset.append(data) + for k, s in enumerate(test_samples): + file_name_point = s + points_press = np.load(file_name_point) + x = paddle.to_tensor(data=points_press) + data = CustomData(x=x) + if norm is True: + data.x = ((data.x - min_in) / (max_in - min_in + 1e-08)).astype( + dtype="float32" + ) + test_dataset.append(data) + test_index = [ + int(os.path.basename(i).lstrip("centroid_").rstrip(".npy")) + for i in test_samples + ] + return train_dataset, val_dataset, test_dataset, coef_norm, test_index + + +def get_induced_graph(data, idx, num_hops): + subset, sub_edge_index, _, _ = k_hop_subgraph( + node_idx=idx, num_hops=num_hops, edge_index=data.edge_index, relabel_nodes=True + ) + return CustomData(x=data.x[subset], y=data.y[idx], edge_index=sub_edge_index) + + +def pc_normalize(pc): + centroid = paddle.mean(pc, axis=0) + pc = pc - centroid + m = paddle.max(x=paddle.sqrt(x=paddle.sum(pc**2, axis=1))) + pc = pc / m + return pc + + +def get_shape(data, max_n_point=3682, normalize=True, use_height=False): + if len(data.x) > max_n_point: + surf_indices = np.array(random.sample(range(len(data.x)), max_n_point)) + shape_pc = data.x[surf_indices].clone() + if normalize: + shape_pc = pc_normalize(shape_pc) + return shape_pc.astype(dtype="float32") + + +def create_edge_index_radius(data, r, max_neighbors=32): + data.edge_index = radius_graph( + x=data.pos, r=r, loop=True, max_num_neighbors=max_neighbors + ) + return data + + +class CustomData: + def __init__(self, **kwargs): + self.edge_index = None + for key, value in kwargs.items(): + setattr(self, key, value) + + +class GraphDataset(paddle.io.Dataset): + def __init__(self, datalist, use_height=False, use_cfd_mesh=True, r=None): + super().__init__() + self.datalist = datalist + self.use_height = use_height + if not use_cfd_mesh: + assert r is not None + for i in range(len(self.datalist)): + self.datalist[i] = create_edge_index_radius(self.datalist[i], r) + + def __len__(self): + return len(self.datalist) + + def get(self, idx): + data = self.datalist[idx] + shape = get_shape(data, use_height=self.use_height) + return self.datalist[idx], shape + + def __getitem__(self, idx): + return self.get(idx) + + def collate_fn(self, batch): + batch_data = [data for (data, _) in batch] + batch_shape = paddle.stack([shape for (_, shape) in batch], axis=0) + return batch_data, batch_shape + + +if __name__ == "__main__": + root = "./data/mlcfd_data/training_data" diff --git a/jointContribution/IJCAI_2024/bju/download_dataset.ipynb b/jointContribution/IJCAI_2024/bju/download_dataset.ipynb index f4c0104739..e29d395141 100644 --- a/jointContribution/IJCAI_2024/bju/download_dataset.ipynb +++ b/jointContribution/IJCAI_2024/bju/download_dataset.ipynb @@ -1,2393 +1,2393 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "SqRGNIxYlTr7" - }, - "source": [ - "# **官方版本数据导入**" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yHcQ9wurwwFX" - }, - "source": [ - "赛道一" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "collapsed": true, - "executionInfo": { - "elapsed": 82228, - "status": "ok", - "timestamp": 1720765083865, - "user": { - "displayName": "Yuanwei Bin", - "userId": "04820485600131748919" - }, - "user_tz": -480 - }, - "id": "oL_v8aw8lZ72", - "outputId": "2fd464ba-e35f-423a-f157-5b20a5f71e3f" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--2024-07-12 06:16:43-- https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\n", - "Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 142.251.12.132, 2404:6800:4003:c11::84\n", - "Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|142.251.12.132|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 1084182095 (1.0G) [application/octet-stream]\n", - "Saving to: ‘Dataset.zip’\n", - "\n", - "Dataset.zip 100%[===================>] 1.01G 27.5MB/s in 45s \n", - "\n", - "2024-07-12 06:17:30 (23.0 MB/s) - ‘Dataset.zip’ saved [1084182095/1084182095]\n", - "\n", - "Archive: Dataset.zip\n", - " creating: Dataset/\n", - " creating: Dataset/Testset_track_A/\n", - " creating: Dataset/Testset_track_A/Inference/\n", - " inflating: Dataset/Testset_track_A/Inference/mesh_658.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_659.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_660.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_662.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_663.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_664.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_665.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_666.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_667.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_668.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_672.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_673.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_674.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_675.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_676.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_677.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_678.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_679.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_681.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_683.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_684.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_686.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_687.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_688.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_689.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_690.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_691.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_692.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_693.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_695.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_696.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_697.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_700.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_701.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_702.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_703.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_704.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_705.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_708.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_709.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_710.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_711.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_712.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_713.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_715.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_717.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_718.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_719.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_721.ply \n", - " inflating: Dataset/Testset_track_A/Inference/mesh_722.ply \n", - " creating: Dataset/Testset_track_B/\n", - " creating: Dataset/Testset_track_B/Auxiliary/\n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_1.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_10.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_11.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_12.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_13.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_14.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_15.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_16.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_17.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_18.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_19.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_2.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_20.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_21.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_22.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_23.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_24.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_25.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_26.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_27.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_28.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_29.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_3.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_30.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_31.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_32.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_33.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_34.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_35.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_36.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_37.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_38.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_39.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_4.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_40.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_41.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_42.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_43.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_44.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_45.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_46.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_47.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_48.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_49.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_5.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_50.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_6.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_7.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_8.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_9.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/area_bounds.txt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/global_bounds.txt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_1.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_10.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_11.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_12.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_13.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_14.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_15.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_16.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_17.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_18.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_19.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_2.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_20.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_21.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_22.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_23.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_24.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_25.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_26.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_27.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_28.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_29.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_3.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_30.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_31.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_32.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_33.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_34.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_35.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_36.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_37.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_38.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_39.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_4.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_40.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_41.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_42.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_43.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_44.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_45.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_46.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_47.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_48.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_49.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_5.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_50.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_6.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_7.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_8.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_9.pt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/info_bounds.txt \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_1.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_10.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_11.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_12.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_13.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_14.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_15.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_16.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_17.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_18.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_19.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_2.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_20.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_21.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_22.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_23.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_24.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_25.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_26.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_27.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_28.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_29.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_3.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_30.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_31.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_32.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_33.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_34.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_35.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_36.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_37.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_38.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_39.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_4.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_40.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_41.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_42.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_43.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_44.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_45.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_46.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_47.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_48.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_49.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_5.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_50.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_6.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_7.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_8.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/normal_9.npy \n", - " inflating: Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt \n", - " inflating: Dataset/Testset_track_B/IJCAI_data_doc_v1.pdf \n", - " creating: Dataset/Testset_track_B/Inference/\n", - " inflating: Dataset/Testset_track_B/Inference/centroid_1.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_10.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_11.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_12.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_13.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_14.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_15.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_16.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_17.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_18.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_19.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_2.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_20.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_21.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_22.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_23.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_24.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_25.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_26.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_27.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_28.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_29.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_3.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_30.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_31.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_32.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_33.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_34.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_35.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_36.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_37.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_38.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_39.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_4.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_40.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_41.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_42.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_43.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_44.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_45.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_46.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_47.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_48.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_49.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_5.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_50.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_6.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_7.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_8.npy \n", - " inflating: Dataset/Testset_track_B/Inference/centroid_9.npy \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_1.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_10.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_11.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_12.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_13.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_14.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_15.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_16.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_17.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_18.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_19.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_2.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_20.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_21.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_22.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_23.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_24.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_25.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_26.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_27.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_28.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_29.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_3.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_30.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_31.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_32.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_33.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_34.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_35.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_36.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_37.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_38.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_39.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_4.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_40.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_41.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_42.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_43.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_44.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_45.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_46.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_47.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_48.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_49.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_5.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_50.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_6.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_7.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_8.ply \n", - " inflating: Dataset/Testset_track_B/Inference/mesh_9.ply \n", - " inflating: Dataset/Testset_track_B/track_B_data_dict.xlsx \n", - " creating: Dataset/Training_data/\n", - " creating: Dataset/Training_data/Feature/\n", - " inflating: Dataset/Training_data/Feature/mesh_001.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_002.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_004.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_005.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_006.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_007.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_008.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_010.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_012.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_013.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_017.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_018.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_021.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_022.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_023.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_025.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_026.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_027.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_028.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_029.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_030.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_031.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_032.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_034.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_035.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_039.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_040.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_043.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_044.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_045.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_046.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_047.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_048.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_049.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_050.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_051.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_052.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_054.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_055.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_056.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_058.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_059.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_060.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_061.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_062.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_063.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_064.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_065.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_067.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_069.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_070.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_071.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_072.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_073.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_074.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_075.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_076.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_077.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_078.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_079.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_080.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_081.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_083.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_084.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_085.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_086.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_087.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_088.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_090.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_091.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_092.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_094.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_095.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_096.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_097.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_100.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_101.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_102.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_105.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_106.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_107.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_109.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_110.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_111.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_112.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_113.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_114.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_115.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_116.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_117.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_118.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_119.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_120.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_121.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_123.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_124.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_125.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_126.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_127.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_128.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_129.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_130.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_131.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_133.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_134.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_136.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_137.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_138.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_139.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_140.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_141.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_142.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_143.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_144.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_145.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_146.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_147.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_148.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_149.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_150.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_151.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_152.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_153.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_155.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_156.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_157.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_158.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_159.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_160.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_161.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_162.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_163.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_165.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_166.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_170.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_172.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_173.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_175.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_176.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_177.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_178.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_179.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_180.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_181.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_182.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_183.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_184.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_186.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_190.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_191.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_192.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_193.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_195.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_196.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_198.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_199.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_200.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_201.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_202.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_203.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_205.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_207.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_210.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_211.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_212.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_213.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_214.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_215.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_217.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_219.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_220.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_221.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_222.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_223.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_224.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_225.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_227.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_228.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_229.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_230.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_231.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_232.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_233.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_234.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_235.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_236.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_237.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_241.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_243.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_244.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_245.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_246.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_247.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_248.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_249.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_251.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_252.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_253.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_255.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_257.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_258.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_259.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_260.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_261.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_262.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_263.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_264.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_266.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_267.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_268.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_269.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_271.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_272.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_273.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_274.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_275.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_276.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_277.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_278.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_279.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_280.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_281.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_282.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_283.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_285.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_286.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_289.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_290.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_291.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_292.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_293.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_294.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_295.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_296.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_297.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_298.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_299.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_300.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_301.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_302.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_304.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_305.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_306.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_308.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_309.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_310.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_311.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_312.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_313.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_314.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_315.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_319.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_320.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_321.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_322.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_323.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_324.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_325.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_327.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_328.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_329.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_331.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_332.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_333.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_334.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_335.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_337.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_338.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_339.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_340.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_341.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_344.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_345.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_347.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_348.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_349.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_350.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_352.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_353.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_354.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_355.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_356.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_357.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_358.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_360.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_362.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_364.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_365.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_366.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_367.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_369.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_371.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_372.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_373.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_374.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_375.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_376.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_378.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_379.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_380.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_381.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_384.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_385.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_389.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_392.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_393.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_397.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_398.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_399.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_401.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_402.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_403.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_404.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_405.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_407.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_408.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_410.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_412.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_413.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_414.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_415.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_417.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_418.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_419.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_420.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_422.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_424.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_425.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_427.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_430.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_431.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_433.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_435.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_436.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_437.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_439.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_440.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_443.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_444.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_446.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_447.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_448.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_449.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_450.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_451.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_452.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_453.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_454.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_455.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_456.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_457.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_459.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_460.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_462.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_463.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_464.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_465.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_466.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_467.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_468.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_469.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_470.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_472.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_473.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_474.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_475.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_476.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_478.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_479.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_480.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_482.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_483.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_486.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_487.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_488.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_490.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_493.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_494.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_495.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_496.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_497.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_498.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_499.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_501.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_502.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_503.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_504.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_505.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_507.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_508.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_509.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_511.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_512.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_513.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_514.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_515.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_516.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_518.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_519.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_521.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_522.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_523.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_524.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_525.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_527.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_529.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_530.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_532.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_533.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_536.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_538.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_539.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_540.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_542.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_543.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_545.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_547.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_548.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_549.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_550.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_551.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_552.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_553.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_554.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_555.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_560.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_561.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_562.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_564.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_565.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_566.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_567.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_568.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_569.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_572.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_573.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_574.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_576.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_577.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_579.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_581.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_582.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_583.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_584.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_587.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_588.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_589.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_591.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_593.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_594.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_595.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_596.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_597.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_598.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_600.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_602.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_604.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_608.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_610.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_611.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_612.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_613.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_615.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_616.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_617.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_618.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_620.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_621.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_622.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_623.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_625.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_626.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_627.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_628.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_629.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_630.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_631.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_632.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_633.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_634.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_635.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_636.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_638.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_639.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_640.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_641.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_642.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_643.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_644.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_645.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_646.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_647.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_648.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_649.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_651.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_652.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_654.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_655.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_656.ply \n", - " inflating: Dataset/Training_data/Feature/mesh_657.ply \n", - " creating: Dataset/Training_data/Label/\n", - " inflating: Dataset/Training_data/Label/press_001.npy \n", - " inflating: Dataset/Training_data/Label/press_002.npy \n", - " inflating: Dataset/Training_data/Label/press_004.npy \n", - " inflating: Dataset/Training_data/Label/press_005.npy \n", - " inflating: Dataset/Training_data/Label/press_006.npy \n", - " inflating: Dataset/Training_data/Label/press_007.npy \n", - " inflating: Dataset/Training_data/Label/press_008.npy \n", - " inflating: Dataset/Training_data/Label/press_010.npy \n", - " inflating: Dataset/Training_data/Label/press_012.npy \n", - " inflating: Dataset/Training_data/Label/press_013.npy \n", - " inflating: Dataset/Training_data/Label/press_017.npy \n", - " inflating: Dataset/Training_data/Label/press_018.npy \n", - " inflating: Dataset/Training_data/Label/press_021.npy \n", - " inflating: Dataset/Training_data/Label/press_022.npy \n", - " inflating: Dataset/Training_data/Label/press_023.npy \n", - " inflating: Dataset/Training_data/Label/press_025.npy \n", - " inflating: Dataset/Training_data/Label/press_026.npy \n", - " inflating: Dataset/Training_data/Label/press_027.npy \n", - " inflating: Dataset/Training_data/Label/press_028.npy \n", - " inflating: Dataset/Training_data/Label/press_029.npy \n", - " inflating: Dataset/Training_data/Label/press_030.npy \n", - " inflating: Dataset/Training_data/Label/press_031.npy \n", - " inflating: Dataset/Training_data/Label/press_032.npy \n", - " inflating: Dataset/Training_data/Label/press_034.npy \n", - " inflating: Dataset/Training_data/Label/press_035.npy \n", - " inflating: Dataset/Training_data/Label/press_039.npy \n", - " inflating: Dataset/Training_data/Label/press_040.npy \n", - " inflating: Dataset/Training_data/Label/press_043.npy \n", - " inflating: Dataset/Training_data/Label/press_044.npy \n", - " inflating: Dataset/Training_data/Label/press_045.npy \n", - " inflating: Dataset/Training_data/Label/press_046.npy \n", - " inflating: Dataset/Training_data/Label/press_047.npy \n", - " inflating: Dataset/Training_data/Label/press_048.npy \n", - " inflating: Dataset/Training_data/Label/press_049.npy \n", - " inflating: Dataset/Training_data/Label/press_050.npy \n", - " inflating: Dataset/Training_data/Label/press_051.npy \n", - " inflating: Dataset/Training_data/Label/press_052.npy \n", - " inflating: Dataset/Training_data/Label/press_054.npy \n", - " inflating: Dataset/Training_data/Label/press_055.npy \n", - " inflating: Dataset/Training_data/Label/press_056.npy \n", - " inflating: Dataset/Training_data/Label/press_058.npy \n", - " inflating: Dataset/Training_data/Label/press_059.npy \n", - " inflating: Dataset/Training_data/Label/press_060.npy \n", - " inflating: Dataset/Training_data/Label/press_061.npy \n", - " inflating: Dataset/Training_data/Label/press_062.npy \n", - " inflating: Dataset/Training_data/Label/press_063.npy \n", - " inflating: Dataset/Training_data/Label/press_064.npy \n", - " inflating: Dataset/Training_data/Label/press_065.npy \n", - " inflating: Dataset/Training_data/Label/press_067.npy \n", - " inflating: Dataset/Training_data/Label/press_069.npy \n", - " inflating: Dataset/Training_data/Label/press_070.npy \n", - " inflating: Dataset/Training_data/Label/press_071.npy \n", - " inflating: Dataset/Training_data/Label/press_072.npy \n", - " inflating: Dataset/Training_data/Label/press_073.npy \n", - " inflating: Dataset/Training_data/Label/press_074.npy \n", - " inflating: Dataset/Training_data/Label/press_075.npy \n", - " inflating: Dataset/Training_data/Label/press_076.npy \n", - " inflating: Dataset/Training_data/Label/press_077.npy \n", - " inflating: Dataset/Training_data/Label/press_078.npy \n", - " inflating: Dataset/Training_data/Label/press_079.npy \n", - " inflating: Dataset/Training_data/Label/press_080.npy \n", - " inflating: Dataset/Training_data/Label/press_081.npy \n", - " inflating: Dataset/Training_data/Label/press_083.npy \n", - " inflating: Dataset/Training_data/Label/press_084.npy \n", - " inflating: Dataset/Training_data/Label/press_085.npy \n", - " inflating: Dataset/Training_data/Label/press_086.npy \n", - " inflating: Dataset/Training_data/Label/press_087.npy \n", - " inflating: Dataset/Training_data/Label/press_088.npy \n", - " inflating: Dataset/Training_data/Label/press_090.npy \n", - " inflating: Dataset/Training_data/Label/press_091.npy \n", - " inflating: Dataset/Training_data/Label/press_092.npy \n", - " inflating: Dataset/Training_data/Label/press_094.npy \n", - " inflating: Dataset/Training_data/Label/press_095.npy \n", - " inflating: Dataset/Training_data/Label/press_096.npy \n", - " inflating: Dataset/Training_data/Label/press_097.npy \n", - " inflating: Dataset/Training_data/Label/press_100.npy \n", - " inflating: Dataset/Training_data/Label/press_101.npy \n", - " inflating: Dataset/Training_data/Label/press_102.npy \n", - " inflating: Dataset/Training_data/Label/press_105.npy \n", - " inflating: Dataset/Training_data/Label/press_106.npy \n", - " inflating: Dataset/Training_data/Label/press_107.npy \n", - " inflating: Dataset/Training_data/Label/press_109.npy \n", - " inflating: Dataset/Training_data/Label/press_110.npy \n", - " inflating: Dataset/Training_data/Label/press_111.npy \n", - " inflating: Dataset/Training_data/Label/press_112.npy \n", - " inflating: Dataset/Training_data/Label/press_113.npy \n", - " inflating: Dataset/Training_data/Label/press_114.npy \n", - " inflating: Dataset/Training_data/Label/press_115.npy \n", - " inflating: Dataset/Training_data/Label/press_116.npy \n", - " inflating: Dataset/Training_data/Label/press_117.npy \n", - " inflating: Dataset/Training_data/Label/press_118.npy \n", - " inflating: Dataset/Training_data/Label/press_119.npy \n", - " inflating: Dataset/Training_data/Label/press_120.npy \n", - " inflating: Dataset/Training_data/Label/press_121.npy \n", - " inflating: Dataset/Training_data/Label/press_123.npy \n", - " inflating: Dataset/Training_data/Label/press_124.npy \n", - " inflating: Dataset/Training_data/Label/press_125.npy \n", - " inflating: Dataset/Training_data/Label/press_126.npy \n", - " inflating: Dataset/Training_data/Label/press_127.npy \n", - " inflating: Dataset/Training_data/Label/press_128.npy \n", - " inflating: Dataset/Training_data/Label/press_129.npy \n", - " inflating: Dataset/Training_data/Label/press_130.npy \n", - " inflating: Dataset/Training_data/Label/press_131.npy \n", - " inflating: Dataset/Training_data/Label/press_133.npy \n", - " inflating: Dataset/Training_data/Label/press_134.npy \n", - " inflating: Dataset/Training_data/Label/press_136.npy \n", - " inflating: Dataset/Training_data/Label/press_137.npy \n", - " inflating: Dataset/Training_data/Label/press_138.npy \n", - " inflating: Dataset/Training_data/Label/press_139.npy \n", - " inflating: Dataset/Training_data/Label/press_140.npy \n", - " inflating: Dataset/Training_data/Label/press_141.npy \n", - " inflating: Dataset/Training_data/Label/press_142.npy \n", - " inflating: Dataset/Training_data/Label/press_143.npy \n", - " inflating: Dataset/Training_data/Label/press_144.npy \n", - " inflating: Dataset/Training_data/Label/press_145.npy \n", - " inflating: Dataset/Training_data/Label/press_146.npy \n", - " inflating: Dataset/Training_data/Label/press_147.npy \n", - " inflating: Dataset/Training_data/Label/press_148.npy \n", - " inflating: Dataset/Training_data/Label/press_149.npy \n", - " inflating: Dataset/Training_data/Label/press_150.npy \n", - " inflating: Dataset/Training_data/Label/press_151.npy \n", - " inflating: Dataset/Training_data/Label/press_152.npy \n", - " inflating: Dataset/Training_data/Label/press_153.npy \n", - " inflating: Dataset/Training_data/Label/press_155.npy \n", - " inflating: Dataset/Training_data/Label/press_156.npy \n", - " inflating: Dataset/Training_data/Label/press_157.npy \n", - " inflating: Dataset/Training_data/Label/press_158.npy \n", - " inflating: Dataset/Training_data/Label/press_159.npy \n", - " inflating: Dataset/Training_data/Label/press_160.npy \n", - " inflating: Dataset/Training_data/Label/press_161.npy \n", - " inflating: Dataset/Training_data/Label/press_162.npy \n", - " inflating: Dataset/Training_data/Label/press_163.npy \n", - " inflating: Dataset/Training_data/Label/press_165.npy \n", - " inflating: Dataset/Training_data/Label/press_166.npy \n", - " inflating: Dataset/Training_data/Label/press_170.npy \n", - " inflating: Dataset/Training_data/Label/press_172.npy \n", - " inflating: Dataset/Training_data/Label/press_173.npy \n", - " inflating: Dataset/Training_data/Label/press_175.npy \n", - " inflating: Dataset/Training_data/Label/press_176.npy \n", - " inflating: Dataset/Training_data/Label/press_177.npy \n", - " inflating: Dataset/Training_data/Label/press_178.npy \n", - " inflating: Dataset/Training_data/Label/press_179.npy \n", - " inflating: Dataset/Training_data/Label/press_180.npy \n", - " inflating: Dataset/Training_data/Label/press_181.npy \n", - " inflating: Dataset/Training_data/Label/press_182.npy \n", - " inflating: Dataset/Training_data/Label/press_183.npy \n", - " inflating: Dataset/Training_data/Label/press_184.npy \n", - " inflating: Dataset/Training_data/Label/press_186.npy \n", - " inflating: Dataset/Training_data/Label/press_190.npy \n", - " inflating: Dataset/Training_data/Label/press_191.npy \n", - " inflating: Dataset/Training_data/Label/press_192.npy \n", - " inflating: Dataset/Training_data/Label/press_193.npy \n", - " inflating: Dataset/Training_data/Label/press_195.npy \n", - " inflating: Dataset/Training_data/Label/press_196.npy \n", - " inflating: Dataset/Training_data/Label/press_198.npy \n", - " inflating: Dataset/Training_data/Label/press_199.npy \n", - " inflating: Dataset/Training_data/Label/press_200.npy \n", - " inflating: Dataset/Training_data/Label/press_201.npy \n", - " inflating: Dataset/Training_data/Label/press_202.npy \n", - " inflating: Dataset/Training_data/Label/press_203.npy \n", - " inflating: Dataset/Training_data/Label/press_205.npy \n", - " inflating: Dataset/Training_data/Label/press_207.npy \n", - " inflating: Dataset/Training_data/Label/press_210.npy \n", - " inflating: Dataset/Training_data/Label/press_211.npy \n", - " inflating: Dataset/Training_data/Label/press_212.npy \n", - " inflating: Dataset/Training_data/Label/press_213.npy \n", - " inflating: Dataset/Training_data/Label/press_214.npy \n", - " inflating: Dataset/Training_data/Label/press_215.npy \n", - " inflating: Dataset/Training_data/Label/press_217.npy \n", - " inflating: Dataset/Training_data/Label/press_219.npy \n", - " inflating: Dataset/Training_data/Label/press_220.npy \n", - " inflating: Dataset/Training_data/Label/press_221.npy \n", - " inflating: Dataset/Training_data/Label/press_222.npy \n", - " inflating: Dataset/Training_data/Label/press_223.npy \n", - " inflating: Dataset/Training_data/Label/press_224.npy \n", - " inflating: Dataset/Training_data/Label/press_225.npy \n", - " inflating: Dataset/Training_data/Label/press_227.npy \n", - " inflating: Dataset/Training_data/Label/press_228.npy \n", - " inflating: Dataset/Training_data/Label/press_229.npy \n", - " inflating: Dataset/Training_data/Label/press_230.npy \n", - " inflating: Dataset/Training_data/Label/press_231.npy \n", - " inflating: Dataset/Training_data/Label/press_232.npy \n", - " inflating: Dataset/Training_data/Label/press_233.npy \n", - " inflating: Dataset/Training_data/Label/press_234.npy \n", - " inflating: Dataset/Training_data/Label/press_235.npy \n", - " inflating: Dataset/Training_data/Label/press_236.npy \n", - " inflating: Dataset/Training_data/Label/press_237.npy \n", - " inflating: Dataset/Training_data/Label/press_241.npy \n", - " inflating: Dataset/Training_data/Label/press_243.npy \n", - " inflating: Dataset/Training_data/Label/press_244.npy \n", - " inflating: Dataset/Training_data/Label/press_245.npy \n", - " inflating: Dataset/Training_data/Label/press_246.npy \n", - " inflating: Dataset/Training_data/Label/press_247.npy \n", - " inflating: Dataset/Training_data/Label/press_248.npy \n", - " inflating: Dataset/Training_data/Label/press_249.npy \n", - " inflating: Dataset/Training_data/Label/press_251.npy \n", - " inflating: Dataset/Training_data/Label/press_252.npy \n", - " inflating: Dataset/Training_data/Label/press_253.npy \n", - " inflating: Dataset/Training_data/Label/press_255.npy \n", - " inflating: Dataset/Training_data/Label/press_257.npy \n", - " inflating: Dataset/Training_data/Label/press_258.npy \n", - " inflating: Dataset/Training_data/Label/press_259.npy \n", - " inflating: Dataset/Training_data/Label/press_260.npy \n", - " inflating: Dataset/Training_data/Label/press_261.npy \n", - " inflating: Dataset/Training_data/Label/press_262.npy \n", - " inflating: Dataset/Training_data/Label/press_263.npy \n", - " inflating: Dataset/Training_data/Label/press_264.npy \n", - " inflating: Dataset/Training_data/Label/press_266.npy \n", - " inflating: Dataset/Training_data/Label/press_267.npy \n", - " inflating: Dataset/Training_data/Label/press_268.npy \n", - " inflating: Dataset/Training_data/Label/press_269.npy \n", - " inflating: Dataset/Training_data/Label/press_271.npy \n", - " inflating: Dataset/Training_data/Label/press_272.npy \n", - " inflating: Dataset/Training_data/Label/press_273.npy \n", - " inflating: Dataset/Training_data/Label/press_274.npy \n", - " inflating: Dataset/Training_data/Label/press_275.npy \n", - " inflating: Dataset/Training_data/Label/press_276.npy \n", - " inflating: Dataset/Training_data/Label/press_277.npy \n", - " inflating: Dataset/Training_data/Label/press_278.npy \n", - " inflating: Dataset/Training_data/Label/press_279.npy \n", - " inflating: Dataset/Training_data/Label/press_280.npy \n", - " inflating: Dataset/Training_data/Label/press_281.npy \n", - " inflating: Dataset/Training_data/Label/press_282.npy \n", - " inflating: Dataset/Training_data/Label/press_283.npy \n", - " inflating: Dataset/Training_data/Label/press_285.npy \n", - " inflating: Dataset/Training_data/Label/press_286.npy \n", - " inflating: Dataset/Training_data/Label/press_289.npy \n", - " inflating: Dataset/Training_data/Label/press_290.npy \n", - " inflating: Dataset/Training_data/Label/press_291.npy \n", - " inflating: Dataset/Training_data/Label/press_292.npy \n", - " inflating: Dataset/Training_data/Label/press_293.npy \n", - " inflating: Dataset/Training_data/Label/press_294.npy \n", - " inflating: Dataset/Training_data/Label/press_295.npy \n", - " inflating: Dataset/Training_data/Label/press_296.npy \n", - " inflating: Dataset/Training_data/Label/press_297.npy \n", - " inflating: Dataset/Training_data/Label/press_298.npy \n", - " inflating: Dataset/Training_data/Label/press_299.npy \n", - " inflating: Dataset/Training_data/Label/press_300.npy \n", - " inflating: Dataset/Training_data/Label/press_301.npy \n", - " inflating: Dataset/Training_data/Label/press_302.npy \n", - " inflating: Dataset/Training_data/Label/press_304.npy \n", - " inflating: Dataset/Training_data/Label/press_305.npy \n", - " inflating: Dataset/Training_data/Label/press_306.npy \n", - " inflating: Dataset/Training_data/Label/press_308.npy \n", - " inflating: Dataset/Training_data/Label/press_309.npy \n", - " inflating: Dataset/Training_data/Label/press_310.npy \n", - " inflating: Dataset/Training_data/Label/press_311.npy \n", - " inflating: Dataset/Training_data/Label/press_312.npy \n", - " inflating: Dataset/Training_data/Label/press_313.npy \n", - " inflating: Dataset/Training_data/Label/press_314.npy \n", - " inflating: Dataset/Training_data/Label/press_315.npy \n", - " inflating: Dataset/Training_data/Label/press_319.npy \n", - " inflating: Dataset/Training_data/Label/press_320.npy \n", - " inflating: Dataset/Training_data/Label/press_321.npy \n", - " inflating: Dataset/Training_data/Label/press_322.npy \n", - " inflating: Dataset/Training_data/Label/press_323.npy \n", - " inflating: Dataset/Training_data/Label/press_324.npy \n", - " inflating: Dataset/Training_data/Label/press_325.npy \n", - " inflating: Dataset/Training_data/Label/press_327.npy \n", - " inflating: Dataset/Training_data/Label/press_328.npy \n", - " inflating: Dataset/Training_data/Label/press_329.npy \n", - " inflating: Dataset/Training_data/Label/press_331.npy \n", - " inflating: Dataset/Training_data/Label/press_332.npy \n", - " inflating: Dataset/Training_data/Label/press_333.npy \n", - " inflating: Dataset/Training_data/Label/press_334.npy \n", - " inflating: Dataset/Training_data/Label/press_335.npy \n", - " inflating: Dataset/Training_data/Label/press_337.npy \n", - " inflating: Dataset/Training_data/Label/press_338.npy \n", - " inflating: Dataset/Training_data/Label/press_339.npy \n", - " inflating: Dataset/Training_data/Label/press_340.npy \n", - " inflating: Dataset/Training_data/Label/press_341.npy \n", - " inflating: Dataset/Training_data/Label/press_344.npy \n", - " inflating: Dataset/Training_data/Label/press_345.npy \n", - " inflating: Dataset/Training_data/Label/press_347.npy \n", - " inflating: Dataset/Training_data/Label/press_348.npy \n", - " inflating: Dataset/Training_data/Label/press_349.npy \n", - " inflating: Dataset/Training_data/Label/press_350.npy \n", - " inflating: Dataset/Training_data/Label/press_352.npy \n", - " inflating: Dataset/Training_data/Label/press_353.npy \n", - " inflating: Dataset/Training_data/Label/press_354.npy \n", - " inflating: Dataset/Training_data/Label/press_355.npy \n", - " inflating: Dataset/Training_data/Label/press_356.npy \n", - " inflating: Dataset/Training_data/Label/press_357.npy \n", - " inflating: Dataset/Training_data/Label/press_358.npy \n", - " inflating: Dataset/Training_data/Label/press_360.npy \n", - " inflating: Dataset/Training_data/Label/press_362.npy \n", - " inflating: Dataset/Training_data/Label/press_364.npy \n", - " inflating: Dataset/Training_data/Label/press_365.npy \n", - " inflating: Dataset/Training_data/Label/press_366.npy \n", - " inflating: Dataset/Training_data/Label/press_367.npy \n", - " inflating: Dataset/Training_data/Label/press_369.npy \n", - " inflating: Dataset/Training_data/Label/press_371.npy \n", - " inflating: Dataset/Training_data/Label/press_372.npy \n", - " inflating: Dataset/Training_data/Label/press_373.npy \n", - " inflating: Dataset/Training_data/Label/press_374.npy \n", - " inflating: Dataset/Training_data/Label/press_375.npy \n", - " inflating: Dataset/Training_data/Label/press_376.npy \n", - " inflating: Dataset/Training_data/Label/press_378.npy \n", - " inflating: Dataset/Training_data/Label/press_379.npy \n", - " inflating: Dataset/Training_data/Label/press_380.npy \n", - " inflating: Dataset/Training_data/Label/press_381.npy \n", - " inflating: Dataset/Training_data/Label/press_384.npy \n", - " inflating: Dataset/Training_data/Label/press_385.npy \n", - " inflating: Dataset/Training_data/Label/press_389.npy \n", - " inflating: Dataset/Training_data/Label/press_392.npy \n", - " inflating: Dataset/Training_data/Label/press_393.npy \n", - " inflating: Dataset/Training_data/Label/press_397.npy \n", - " inflating: Dataset/Training_data/Label/press_398.npy \n", - " inflating: Dataset/Training_data/Label/press_399.npy \n", - " inflating: Dataset/Training_data/Label/press_401.npy \n", - " inflating: Dataset/Training_data/Label/press_402.npy \n", - " inflating: Dataset/Training_data/Label/press_403.npy \n", - " inflating: Dataset/Training_data/Label/press_404.npy \n", - " inflating: Dataset/Training_data/Label/press_405.npy \n", - " inflating: Dataset/Training_data/Label/press_407.npy \n", - " inflating: Dataset/Training_data/Label/press_408.npy \n", - " inflating: Dataset/Training_data/Label/press_410.npy \n", - " inflating: Dataset/Training_data/Label/press_412.npy \n", - " inflating: Dataset/Training_data/Label/press_413.npy \n", - " inflating: Dataset/Training_data/Label/press_414.npy \n", - " inflating: Dataset/Training_data/Label/press_415.npy \n", - " inflating: Dataset/Training_data/Label/press_417.npy \n", - " inflating: Dataset/Training_data/Label/press_418.npy \n", - " inflating: Dataset/Training_data/Label/press_419.npy \n", - " inflating: Dataset/Training_data/Label/press_420.npy \n", - " inflating: Dataset/Training_data/Label/press_422.npy \n", - " inflating: Dataset/Training_data/Label/press_424.npy \n", - " inflating: Dataset/Training_data/Label/press_425.npy \n", - " inflating: Dataset/Training_data/Label/press_427.npy \n", - " inflating: Dataset/Training_data/Label/press_430.npy \n", - " inflating: Dataset/Training_data/Label/press_431.npy \n", - " inflating: Dataset/Training_data/Label/press_433.npy \n", - " inflating: Dataset/Training_data/Label/press_435.npy \n", - " inflating: Dataset/Training_data/Label/press_436.npy \n", - " inflating: Dataset/Training_data/Label/press_437.npy \n", - " inflating: Dataset/Training_data/Label/press_439.npy \n", - " inflating: Dataset/Training_data/Label/press_440.npy \n", - " inflating: Dataset/Training_data/Label/press_443.npy \n", - " inflating: Dataset/Training_data/Label/press_444.npy \n", - " inflating: Dataset/Training_data/Label/press_446.npy \n", - " inflating: Dataset/Training_data/Label/press_447.npy \n", - " inflating: Dataset/Training_data/Label/press_448.npy \n", - " inflating: Dataset/Training_data/Label/press_449.npy \n", - " inflating: Dataset/Training_data/Label/press_450.npy \n", - " inflating: Dataset/Training_data/Label/press_451.npy \n", - " inflating: Dataset/Training_data/Label/press_452.npy \n", - " inflating: Dataset/Training_data/Label/press_453.npy \n", - " inflating: Dataset/Training_data/Label/press_454.npy \n", - " inflating: Dataset/Training_data/Label/press_455.npy \n", - " inflating: Dataset/Training_data/Label/press_456.npy \n", - " inflating: Dataset/Training_data/Label/press_457.npy \n", - " inflating: Dataset/Training_data/Label/press_459.npy \n", - " inflating: Dataset/Training_data/Label/press_460.npy \n", - " inflating: Dataset/Training_data/Label/press_462.npy \n", - " inflating: Dataset/Training_data/Label/press_463.npy \n", - " inflating: Dataset/Training_data/Label/press_464.npy \n", - " inflating: Dataset/Training_data/Label/press_465.npy \n", - " inflating: Dataset/Training_data/Label/press_466.npy \n", - " inflating: Dataset/Training_data/Label/press_467.npy \n", - " inflating: Dataset/Training_data/Label/press_468.npy \n", - " inflating: Dataset/Training_data/Label/press_469.npy \n", - " inflating: Dataset/Training_data/Label/press_470.npy \n", - " inflating: Dataset/Training_data/Label/press_472.npy \n", - " inflating: Dataset/Training_data/Label/press_473.npy \n", - " inflating: Dataset/Training_data/Label/press_474.npy \n", - " inflating: Dataset/Training_data/Label/press_475.npy \n", - " inflating: Dataset/Training_data/Label/press_476.npy \n", - " inflating: Dataset/Training_data/Label/press_478.npy \n", - " inflating: Dataset/Training_data/Label/press_479.npy \n", - " inflating: Dataset/Training_data/Label/press_480.npy \n", - " inflating: Dataset/Training_data/Label/press_482.npy \n", - " inflating: Dataset/Training_data/Label/press_483.npy \n", - " inflating: Dataset/Training_data/Label/press_486.npy \n", - " inflating: Dataset/Training_data/Label/press_487.npy \n", - " inflating: Dataset/Training_data/Label/press_488.npy \n", - " inflating: Dataset/Training_data/Label/press_490.npy \n", - " inflating: Dataset/Training_data/Label/press_493.npy \n", - " inflating: Dataset/Training_data/Label/press_494.npy \n", - " inflating: Dataset/Training_data/Label/press_495.npy \n", - " inflating: Dataset/Training_data/Label/press_496.npy \n", - " inflating: Dataset/Training_data/Label/press_497.npy \n", - " inflating: Dataset/Training_data/Label/press_498.npy \n", - " inflating: Dataset/Training_data/Label/press_499.npy \n", - " inflating: Dataset/Training_data/Label/press_501.npy \n", - " inflating: Dataset/Training_data/Label/press_502.npy \n", - " inflating: Dataset/Training_data/Label/press_503.npy \n", - " inflating: Dataset/Training_data/Label/press_504.npy \n", - " inflating: Dataset/Training_data/Label/press_505.npy \n", - " inflating: Dataset/Training_data/Label/press_507.npy \n", - " inflating: Dataset/Training_data/Label/press_508.npy \n", - " inflating: Dataset/Training_data/Label/press_509.npy \n", - " inflating: Dataset/Training_data/Label/press_511.npy \n", - " inflating: Dataset/Training_data/Label/press_512.npy \n", - " inflating: Dataset/Training_data/Label/press_513.npy \n", - " inflating: Dataset/Training_data/Label/press_514.npy \n", - " inflating: Dataset/Training_data/Label/press_515.npy \n", - " inflating: Dataset/Training_data/Label/press_516.npy \n", - " inflating: Dataset/Training_data/Label/press_518.npy \n", - " inflating: Dataset/Training_data/Label/press_519.npy \n", - " inflating: Dataset/Training_data/Label/press_521.npy \n", - " inflating: Dataset/Training_data/Label/press_522.npy \n", - " inflating: Dataset/Training_data/Label/press_523.npy \n", - " inflating: Dataset/Training_data/Label/press_524.npy \n", - " inflating: Dataset/Training_data/Label/press_525.npy \n", - " inflating: Dataset/Training_data/Label/press_527.npy \n", - " inflating: Dataset/Training_data/Label/press_529.npy \n", - " inflating: Dataset/Training_data/Label/press_530.npy \n", - " inflating: Dataset/Training_data/Label/press_532.npy \n", - " inflating: Dataset/Training_data/Label/press_533.npy \n", - " inflating: Dataset/Training_data/Label/press_536.npy \n", - " inflating: Dataset/Training_data/Label/press_538.npy \n", - " inflating: Dataset/Training_data/Label/press_539.npy \n", - " inflating: Dataset/Training_data/Label/press_540.npy \n", - " inflating: Dataset/Training_data/Label/press_542.npy \n", - " inflating: Dataset/Training_data/Label/press_543.npy \n", - " inflating: Dataset/Training_data/Label/press_545.npy \n", - " inflating: Dataset/Training_data/Label/press_547.npy \n", - " inflating: Dataset/Training_data/Label/press_548.npy \n", - " inflating: Dataset/Training_data/Label/press_549.npy \n", - " inflating: Dataset/Training_data/Label/press_550.npy \n", - " inflating: Dataset/Training_data/Label/press_551.npy \n", - " inflating: Dataset/Training_data/Label/press_552.npy \n", - " inflating: Dataset/Training_data/Label/press_553.npy \n", - " inflating: Dataset/Training_data/Label/press_554.npy \n", - " inflating: Dataset/Training_data/Label/press_555.npy \n", - " inflating: Dataset/Training_data/Label/press_560.npy \n", - " inflating: Dataset/Training_data/Label/press_561.npy \n", - " inflating: Dataset/Training_data/Label/press_562.npy \n", - " inflating: Dataset/Training_data/Label/press_564.npy \n", - " inflating: Dataset/Training_data/Label/press_565.npy \n", - " inflating: Dataset/Training_data/Label/press_566.npy \n", - " inflating: Dataset/Training_data/Label/press_567.npy \n", - " inflating: Dataset/Training_data/Label/press_568.npy \n", - " inflating: Dataset/Training_data/Label/press_569.npy \n", - " inflating: Dataset/Training_data/Label/press_572.npy \n", - " inflating: Dataset/Training_data/Label/press_573.npy \n", - " inflating: Dataset/Training_data/Label/press_574.npy \n", - " inflating: Dataset/Training_data/Label/press_576.npy \n", - " inflating: Dataset/Training_data/Label/press_577.npy \n", - " inflating: Dataset/Training_data/Label/press_579.npy \n", - " inflating: Dataset/Training_data/Label/press_581.npy \n", - " inflating: Dataset/Training_data/Label/press_582.npy \n", - " inflating: Dataset/Training_data/Label/press_583.npy \n", - " inflating: Dataset/Training_data/Label/press_584.npy \n", - " inflating: Dataset/Training_data/Label/press_587.npy \n", - " inflating: Dataset/Training_data/Label/press_588.npy \n", - " inflating: Dataset/Training_data/Label/press_589.npy \n", - " inflating: Dataset/Training_data/Label/press_591.npy \n", - " inflating: Dataset/Training_data/Label/press_593.npy \n", - " inflating: Dataset/Training_data/Label/press_594.npy \n", - " inflating: Dataset/Training_data/Label/press_595.npy \n", - " inflating: Dataset/Training_data/Label/press_596.npy \n", - " inflating: Dataset/Training_data/Label/press_597.npy \n", - " inflating: Dataset/Training_data/Label/press_598.npy \n", - " inflating: Dataset/Training_data/Label/press_600.npy \n", - " inflating: Dataset/Training_data/Label/press_602.npy \n", - " inflating: Dataset/Training_data/Label/press_604.npy \n", - " inflating: Dataset/Training_data/Label/press_608.npy \n", - " inflating: Dataset/Training_data/Label/press_610.npy \n", - " inflating: Dataset/Training_data/Label/press_611.npy \n", - " inflating: Dataset/Training_data/Label/press_612.npy \n", - " inflating: Dataset/Training_data/Label/press_613.npy \n", - " inflating: Dataset/Training_data/Label/press_615.npy \n", - " inflating: Dataset/Training_data/Label/press_616.npy \n", - " inflating: Dataset/Training_data/Label/press_617.npy \n", - " inflating: Dataset/Training_data/Label/press_618.npy \n", - " inflating: Dataset/Training_data/Label/press_620.npy \n", - " inflating: Dataset/Training_data/Label/press_621.npy \n", - " inflating: Dataset/Training_data/Label/press_622.npy \n", - " inflating: Dataset/Training_data/Label/press_623.npy \n", - " inflating: Dataset/Training_data/Label/press_625.npy \n", - " inflating: Dataset/Training_data/Label/press_626.npy \n", - " inflating: Dataset/Training_data/Label/press_627.npy \n", - " inflating: Dataset/Training_data/Label/press_628.npy \n", - " inflating: Dataset/Training_data/Label/press_629.npy \n", - " inflating: Dataset/Training_data/Label/press_630.npy \n", - " inflating: Dataset/Training_data/Label/press_631.npy \n", - " inflating: Dataset/Training_data/Label/press_632.npy \n", - " inflating: Dataset/Training_data/Label/press_633.npy \n", - " inflating: Dataset/Training_data/Label/press_634.npy \n", - " inflating: Dataset/Training_data/Label/press_635.npy \n", - " inflating: Dataset/Training_data/Label/press_636.npy \n", - " inflating: Dataset/Training_data/Label/press_638.npy \n", - " inflating: Dataset/Training_data/Label/press_639.npy \n", - " inflating: Dataset/Training_data/Label/press_640.npy \n", - " inflating: Dataset/Training_data/Label/press_641.npy \n", - " inflating: Dataset/Training_data/Label/press_642.npy \n", - " inflating: Dataset/Training_data/Label/press_643.npy \n", - " inflating: Dataset/Training_data/Label/press_644.npy \n", - " inflating: Dataset/Training_data/Label/press_645.npy \n", - " inflating: Dataset/Training_data/Label/press_646.npy \n", - " inflating: Dataset/Training_data/Label/press_647.npy \n", - " inflating: Dataset/Training_data/Label/press_648.npy \n", - " inflating: Dataset/Training_data/Label/press_649.npy \n", - " inflating: Dataset/Training_data/Label/press_651.npy \n", - " inflating: Dataset/Training_data/Label/press_652.npy \n", - " inflating: Dataset/Training_data/Label/press_654.npy \n", - " inflating: Dataset/Training_data/Label/press_655.npy \n", - " inflating: Dataset/Training_data/Label/press_656.npy \n", - " inflating: Dataset/Training_data/Label/press_657.npy \n", - " inflating: Dataset/Training_data/train_pressure_min_std.txt \n", - " inflating: Dataset/Training_data/watertight_global_bounds.txt \n", - " inflating: Dataset/Training_data/watertight_meshes.txt \n" - ] - } - ], - "source": [ - "####下载Dataset.zip\n", - "!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\" -c -O 'Dataset.zip'\n", - "####解压Dataset.zip\n", - "!unzip Dataset.zip\n", - "####删除Dataset.zip\n", - "!rm Dataset.zip\n", - "####重命名Training_data文件名\n", - "!mv Dataset/Training_data Dataset/Trainset_track_A" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hjHXYCzgw2v5" - }, - "source": [ - "赛道二" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "collapsed": true, - "executionInfo": { - "elapsed": 231508, - "status": "ok", - "timestamp": 1720765946210, - "user": { - "displayName": "Yuanwei Bin", - "userId": "04820485600131748919" - }, - "user_tz": -480 - }, - "id": "B88H3zRrnfil", - "outputId": "b9243c63-5134-40a3-846b-910ab4a657b5" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--2024-07-12 06:28:36-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n", - "Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 103.235.47.176, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n", - "Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|103.235.47.176|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 4740031429 (4.4G) [application/octet-stream]\n", - "Saving to: ‘train_track_B.zip’\n", - "\n", - "train_track_B.zip 100%[===================>] 4.41G 27.9MB/s in 2m 48s \n", - "\n", - "2024-07-12 06:31:25 (26.9 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n", - "\n", - "Archive: train_track_B.zip\n", - " inflating: area_0002.npy \n", - " inflating: area_0003.npy \n", - " inflating: area_0004.npy \n", - " inflating: area_0005.npy \n", - " inflating: area_0006.npy \n", - " inflating: area_0011.npy \n", - " inflating: area_0012.npy \n", - " inflating: area_0013.npy \n", - " inflating: area_0015.npy \n", - " inflating: area_0017.npy \n", - " inflating: area_0018.npy \n", - " inflating: area_0020.npy \n", - " inflating: area_0021.npy \n", - " inflating: area_0022.npy \n", - " inflating: area_0023.npy \n", - " inflating: area_0024.npy \n", - " inflating: area_0026.npy \n", - " inflating: area_0029.npy \n", - " inflating: area_0030.npy \n", - " inflating: area_0036.npy \n", - " inflating: area_0037.npy \n", - " inflating: area_0038.npy \n", - " inflating: area_0039.npy \n", - " inflating: area_0040.npy \n", - " inflating: area_0041.npy \n", - " inflating: area_0042.npy \n", - " inflating: area_0043.npy \n", - " inflating: area_0044.npy \n", - " inflating: area_0048.npy \n", - " inflating: area_0049.npy \n", - " inflating: area_0051.npy \n", - " inflating: area_0052.npy \n", - " inflating: area_0055.npy \n", - " inflating: area_0056.npy \n", - " inflating: area_0057.npy \n", - " inflating: area_0059.npy \n", - " inflating: area_0062.npy \n", - " inflating: area_0064.npy \n", - " inflating: area_0066.npy \n", - " inflating: area_0067.npy \n", - " inflating: area_0068.npy \n", - " inflating: area_0071.npy \n", - " inflating: area_0074.npy \n", - " inflating: area_0075.npy \n", - " inflating: area_0077.npy \n", - " inflating: area_0078.npy \n", - " inflating: area_0080.npy \n", - " inflating: area_0081.npy \n", - " inflating: area_0082.npy \n", - " inflating: area_0084.npy \n", - " inflating: area_0085.npy \n", - " inflating: area_0086.npy \n", - " inflating: area_0087.npy \n", - " inflating: area_0088.npy \n", - " inflating: area_0089.npy \n", - " inflating: area_0090.npy \n", - " inflating: area_0092.npy \n", - " inflating: area_0093.npy \n", - " inflating: area_0094.npy \n", - " inflating: area_0095.npy \n", - " inflating: area_0097.npy \n", - " inflating: area_0098.npy \n", - " inflating: area_0100.npy \n", - " inflating: area_0101.npy \n", - " inflating: area_0102.npy \n", - " inflating: area_0103.npy \n", - " inflating: area_0104.npy \n", - " inflating: area_0106.npy \n", - " inflating: area_0107.npy \n", - " inflating: area_0108.npy \n", - " inflating: area_0109.npy \n", - " inflating: area_0110.npy \n", - " inflating: area_0113.npy \n", - " inflating: area_0114.npy \n", - " inflating: area_0115.npy \n", - " inflating: area_0116.npy \n", - " inflating: area_0117.npy \n", - " inflating: area_0118.npy \n", - " inflating: area_0119.npy \n", - " inflating: area_0120.npy \n", - " inflating: area_0121.npy \n", - " inflating: area_0122.npy \n", - " inflating: area_0124.npy \n", - " inflating: area_0125.npy \n", - " inflating: area_0126.npy \n", - " inflating: area_0128.npy \n", - " inflating: area_0129.npy \n", - " inflating: area_0130.npy \n", - " inflating: area_0131.npy \n", - " inflating: area_0132.npy \n", - " inflating: area_0133.npy \n", - " inflating: area_0134.npy \n", - " inflating: area_0135.npy \n", - " inflating: area_0136.npy \n", - " inflating: area_0138.npy \n", - " inflating: area_0139.npy \n", - " inflating: area_0140.npy \n", - " inflating: area_0141.npy \n", - " inflating: area_0143.npy \n", - " inflating: area_0145.npy \n", - " inflating: area_0146.npy \n", - " inflating: area_0148.npy \n", - " inflating: area_0149.npy \n", - " inflating: area_0150.npy \n", - " inflating: area_0151.npy \n", - " inflating: area_0153.npy \n", - " inflating: area_0154.npy \n", - " inflating: area_0156.npy \n", - " inflating: area_0157.npy \n", - " inflating: area_0158.npy \n", - " inflating: area_0161.npy \n", - " inflating: area_0162.npy \n", - " inflating: area_0163.npy \n", - " inflating: area_0164.npy \n", - " inflating: area_0166.npy \n", - " inflating: area_0167.npy \n", - " inflating: area_0168.npy \n", - " inflating: area_0170.npy \n", - " inflating: area_0171.npy \n", - " inflating: area_0172.npy \n", - " inflating: area_0174.npy \n", - " inflating: area_0175.npy \n", - " inflating: area_0183.npy \n", - " inflating: area_0184.npy \n", - " inflating: area_0185.npy \n", - " inflating: area_0189.npy \n", - " inflating: area_0190.npy \n", - " inflating: area_0193.npy \n", - " inflating: area_0194.npy \n", - " inflating: area_0195.npy \n", - " inflating: area_0197.npy \n", - " inflating: area_0201.npy \n", - " inflating: area_0203.npy \n", - " inflating: area_0204.npy \n", - " inflating: area_0205.npy \n", - " inflating: area_0206.npy \n", - " inflating: area_0208.npy \n", - " inflating: area_0210.npy \n", - " inflating: area_0211.npy \n", - " inflating: area_0216.npy \n", - " inflating: area_0217.npy \n", - " inflating: area_0219.npy \n", - " inflating: area_0220.npy \n", - " inflating: area_0227.npy \n", - " inflating: area_0228.npy \n", - " inflating: area_0229.npy \n", - " inflating: area_0232.npy \n", - " inflating: area_0234.npy \n", - " inflating: area_0235.npy \n", - " inflating: area_0236.npy \n", - " inflating: area_0238.npy \n", - " inflating: area_0239.npy \n", - " inflating: area_0240.npy \n", - " inflating: area_0241.npy \n", - " inflating: area_0245.npy \n", - " inflating: area_0246.npy \n", - " inflating: area_0247.npy \n", - " inflating: area_0248.npy \n", - " inflating: area_0249.npy \n", - " inflating: area_0252.npy \n", - " inflating: area_0253.npy \n", - " inflating: area_0254.npy \n", - " inflating: area_0256.npy \n", - " inflating: area_0257.npy \n", - " inflating: area_0259.npy \n", - " inflating: area_0264.npy \n", - " inflating: area_0265.npy \n", - " inflating: area_0266.npy \n", - " inflating: area_0268.npy \n", - " inflating: area_0269.npy \n", - " inflating: area_0271.npy \n", - " inflating: area_0272.npy \n", - " inflating: area_0273.npy \n", - " inflating: area_0275.npy \n", - " inflating: area_0276.npy \n", - " inflating: area_0277.npy \n", - " inflating: area_0279.npy \n", - " inflating: area_0280.npy \n", - " inflating: area_0281.npy \n", - " inflating: area_0284.npy \n", - " inflating: area_0285.npy \n", - " inflating: area_0286.npy \n", - " inflating: area_0288.npy \n", - " inflating: area_0289.npy \n", - " inflating: area_0290.npy \n", - " inflating: area_0291.npy \n", - " inflating: area_0294.npy \n", - " inflating: area_0296.npy \n", - " inflating: area_0297.npy \n", - " inflating: area_0298.npy \n", - " inflating: area_0301.npy \n", - " inflating: area_0304.npy \n", - " inflating: area_0305.npy \n", - " inflating: area_0306.npy \n", - " inflating: area_0307.npy \n", - " inflating: area_0308.npy \n", - " inflating: area_0310.npy \n", - " inflating: area_0311.npy \n", - " inflating: area_0314.npy \n", - " inflating: area_0315.npy \n", - " inflating: area_0316.npy \n", - " inflating: area_0320.npy \n", - " inflating: area_0321.npy \n", - " inflating: area_0323.npy \n", - " inflating: area_0324.npy \n", - " inflating: area_0327.npy \n", - " inflating: area_0330.npy \n", - " inflating: area_0331.npy \n", - " inflating: area_0332.npy \n", - " inflating: area_0333.npy \n", - " inflating: area_0334.npy \n", - " inflating: area_0337.npy \n", - " inflating: area_0338.npy \n", - " inflating: area_0339.npy \n", - " inflating: area_0340.npy \n", - " inflating: area_0341.npy \n", - " inflating: area_0342.npy \n", - " inflating: area_0343.npy \n", - " inflating: area_0344.npy \n", - " inflating: area_0345.npy \n", - " inflating: area_0346.npy \n", - " inflating: area_0348.npy \n", - " inflating: area_0349.npy \n", - " inflating: area_0351.npy \n", - " inflating: area_0352.npy \n", - " inflating: area_0353.npy \n", - " inflating: area_0354.npy \n", - " inflating: area_0356.npy \n", - " inflating: area_0357.npy \n", - " inflating: area_0359.npy \n", - " inflating: area_0360.npy \n", - " inflating: area_0361.npy \n", - " inflating: area_0363.npy \n", - " inflating: area_0364.npy \n", - " inflating: area_0365.npy \n", - " inflating: area_0366.npy \n", - " inflating: area_0367.npy \n", - " inflating: area_0368.npy \n", - " inflating: area_0369.npy \n", - " inflating: area_0371.npy \n", - " inflating: area_0373.npy \n", - " inflating: area_0376.npy \n", - " inflating: area_0377.npy \n", - " inflating: area_0378.npy \n", - " inflating: area_0379.npy \n", - " inflating: area_0381.npy \n", - " inflating: area_0382.npy \n", - " inflating: area_0383.npy \n", - " inflating: area_0384.npy \n", - " inflating: area_0385.npy \n", - " inflating: area_0387.npy \n", - " inflating: area_0388.npy \n", - " inflating: area_0389.npy \n", - " inflating: area_0392.npy \n", - " inflating: area_0393.npy \n", - " inflating: area_0394.npy \n", - " inflating: area_0395.npy \n", - " inflating: area_0396.npy \n", - " inflating: area_0398.npy \n", - " inflating: area_0399.npy \n", - " inflating: area_0400.npy \n", - " inflating: area_0401.npy \n", - " inflating: area_0402.npy \n", - " inflating: area_0403.npy \n", - " inflating: area_0404.npy \n", - " inflating: area_0405.npy \n", - " inflating: area_0407.npy \n", - " inflating: area_0408.npy \n", - " inflating: area_0409.npy \n", - " inflating: area_0410.npy \n", - " inflating: area_0411.npy \n", - " inflating: area_0413.npy \n", - " inflating: area_0416.npy \n", - " inflating: area_0417.npy \n", - " inflating: area_0421.npy \n", - " inflating: area_0422.npy \n", - " inflating: area_0423.npy \n", - " inflating: area_0424.npy \n", - " inflating: area_0425.npy \n", - " inflating: area_0428.npy \n", - " inflating: area_0429.npy \n", - " inflating: area_0430.npy \n", - " inflating: area_0431.npy \n", - " inflating: area_0432.npy \n", - " inflating: area_0435.npy \n", - " inflating: area_0438.npy \n", - " inflating: area_0439.npy \n", - " inflating: area_0441.npy \n", - " inflating: area_0444.npy \n", - " inflating: area_0445.npy \n", - " inflating: area_0449.npy \n", - " inflating: area_0450.npy \n", - " inflating: area_0451.npy \n", - " inflating: area_0452.npy \n", - " inflating: area_0453.npy \n", - " inflating: area_0456.npy \n", - " inflating: area_0457.npy \n", - " inflating: area_0458.npy \n", - " inflating: area_0459.npy \n", - " inflating: area_0460.npy \n", - " inflating: area_0461.npy \n", - " inflating: area_0463.npy \n", - " inflating: area_0464.npy \n", - " inflating: area_0465.npy \n", - " inflating: area_0467.npy \n", - " inflating: area_0469.npy \n", - " inflating: area_0471.npy \n", - " inflating: area_0472.npy \n", - " inflating: area_0474.npy \n", - " inflating: area_0475.npy \n", - " inflating: area_0477.npy \n", - " inflating: area_0478.npy \n", - " inflating: area_0479.npy \n", - " inflating: area_0480.npy \n", - " inflating: area_0481.npy \n", - " inflating: area_0482.npy \n", - " inflating: area_0485.npy \n", - " inflating: area_0486.npy \n", - " inflating: area_0487.npy \n", - " inflating: area_0488.npy \n", - " inflating: area_0489.npy \n", - " inflating: area_0492.npy \n", - " inflating: area_0493.npy \n", - " inflating: area_0494.npy \n", - " inflating: area_0497.npy \n", - " inflating: area_0498.npy \n", - " inflating: area_0499.npy \n", - " inflating: area_0501.npy \n", - " inflating: area_0502.npy \n", - " inflating: area_0503.npy \n", - " inflating: area_0504.npy \n", - " inflating: area_0507.npy \n", - " inflating: area_0508.npy \n", - " inflating: area_0509.npy \n", - " inflating: area_0513.npy \n", - " inflating: area_0514.npy \n", - " inflating: area_0515.npy \n", - " inflating: area_0517.npy \n", - " inflating: area_0518.npy \n", - " inflating: area_0519.npy \n", - " inflating: area_0520.npy \n", - " inflating: area_0521.npy \n", - " inflating: area_0522.npy \n", - " inflating: area_0523.npy \n", - " inflating: area_0524.npy \n", - " inflating: area_0525.npy \n", - " inflating: area_0526.npy \n", - " inflating: area_0527.npy \n", - " inflating: area_0528.npy \n", - " inflating: area_0529.npy \n", - " inflating: area_0530.npy \n", - " inflating: area_0531.npy \n", - " inflating: area_0534.npy \n", - " inflating: area_0535.npy \n", - " inflating: area_0536.npy \n", - " inflating: area_0538.npy \n", - " inflating: area_0541.npy \n", - " inflating: area_0542.npy \n", - " inflating: area_0544.npy \n", - " inflating: area_0545.npy \n", - " inflating: area_0546.npy \n", - " inflating: area_0547.npy \n", - " inflating: area_0550.npy \n", - " inflating: area_0551.npy \n", - " inflating: area_0553.npy \n", - " inflating: area_0555.npy \n", - " inflating: area_0557.npy \n", - " inflating: area_0558.npy \n", - " inflating: area_0561.npy \n", - " inflating: area_0563.npy \n", - " inflating: area_0564.npy \n", - " inflating: area_0565.npy \n", - " inflating: area_0567.npy \n", - " inflating: area_0568.npy \n", - " inflating: area_0571.npy \n", - " inflating: area_0574.npy \n", - " inflating: area_0576.npy \n", - " inflating: area_0579.npy \n", - " inflating: area_0580.npy \n", - " inflating: area_0582.npy \n", - " inflating: area_0584.npy \n", - " inflating: area_0585.npy \n", - " inflating: area_0588.npy \n", - " inflating: area_0589.npy \n", - " inflating: area_0590.npy \n", - " inflating: area_0591.npy \n", - " inflating: area_0592.npy \n", - " inflating: area_0593.npy \n", - " inflating: area_0594.npy \n", - " inflating: area_0595.npy \n", - " inflating: area_0596.npy \n", - " inflating: area_0597.npy \n", - " inflating: area_0598.npy \n", - " inflating: area_0600.npy \n", - " inflating: area_0602.npy \n", - " inflating: area_0605.npy \n", - " inflating: area_0608.npy \n", - " inflating: area_0609.npy \n", - " inflating: area_0611.npy \n", - " inflating: area_0612.npy \n", - " inflating: area_0613.npy \n", - " inflating: area_0614.npy \n", - " inflating: area_0618.npy \n", - " inflating: area_0619.npy \n", - " inflating: area_0620.npy \n", - " inflating: area_0621.npy \n", - " inflating: area_0622.npy \n", - " inflating: area_0623.npy \n", - " inflating: area_0624.npy \n", - " inflating: area_0625.npy \n", - " inflating: area_0627.npy \n", - " inflating: area_0628.npy \n", - " inflating: area_0629.npy \n", - " inflating: area_0630.npy \n", - " inflating: area_0631.npy \n", - " inflating: area_0632.npy \n", - " inflating: area_0633.npy \n", - " inflating: area_0634.npy \n", - " inflating: area_0635.npy \n", - " inflating: area_0637.npy \n", - " inflating: area_0638.npy \n", - " inflating: area_0639.npy \n", - " inflating: area_0640.npy \n", - " inflating: area_0641.npy \n", - " inflating: area_0643.npy \n", - " inflating: area_0644.npy \n", - " inflating: area_0645.npy \n", - " inflating: area_0646.npy \n", - " inflating: area_0648.npy \n", - " inflating: area_0650.npy \n", - " inflating: area_0651.npy \n", - " inflating: area_0652.npy \n", - " inflating: area_0653.npy \n", - " inflating: area_0654.npy \n", - " inflating: area_0656.npy \n", - " inflating: area_0657.npy \n", - " inflating: area_0658.npy \n", - " inflating: area_0661.npy \n", - " inflating: area_0663.npy \n", - " inflating: area_0664.npy \n", - " inflating: area_0665.npy \n", - " inflating: area_0666.npy \n", - " inflating: area_0667.npy \n", - " inflating: area_0668.npy \n", - " inflating: area_0669.npy \n", - " inflating: area_0671.npy \n", - " inflating: area_0672.npy \n", - " inflating: area_0673.npy \n", - " inflating: area_0674.npy \n", - " inflating: area_0676.npy \n", - " inflating: area_0677.npy \n", - " inflating: area_0678.npy \n", - " inflating: area_0679.npy \n", - " inflating: area_0680.npy \n", - " inflating: area_0682.npy \n", - " inflating: area_0686.npy \n", - " inflating: area_0688.npy \n", - " inflating: area_0689.npy \n", - " inflating: area_0690.npy \n", - " inflating: area_0691.npy \n", - " inflating: area_0692.npy \n", - " inflating: area_0693.npy \n", - " inflating: area_0694.npy \n", - " inflating: area_0695.npy \n", - " inflating: area_0697.npy \n", - " inflating: area_0699.npy \n", - " inflating: area_0700.npy \n", - " inflating: area_0701.npy \n", - " inflating: area_0703.npy \n", - " inflating: area_0704.npy \n", - " inflating: area_0706.npy \n", - " inflating: area_0707.npy \n", - " inflating: area_0708.npy \n", - " inflating: area_0709.npy \n", - " inflating: area_0711.npy \n", - " inflating: area_0712.npy \n", - " inflating: area_0713.npy \n", - " inflating: area_0714.npy \n", - " inflating: area_0715.npy \n", - " inflating: area_0716.npy \n", - " inflating: area_0718.npy \n", - " inflating: area_0719.npy \n", - " inflating: area_0720.npy \n", - " inflating: area_0721.npy \n", - " inflating: area_0722.npy \n", - " inflating: area_0724.npy \n", - " inflating: area_0727.npy \n", - " inflating: area_0728.npy \n", - " inflating: area_0729.npy \n", - " inflating: area_0730.npy \n", - " inflating: area_0731.npy \n", - " inflating: area_0733.npy \n", - " inflating: area_0735.npy \n", - " inflating: area_0736.npy \n", - " inflating: area_0737.npy \n", - " inflating: area_0740.npy \n", - " inflating: area_0742.npy \n", - " inflating: area_0743.npy \n", - " inflating: area_0744.npy \n", - " inflating: area_0745.npy \n", - " inflating: centroid_0002.npy \n", - " inflating: centroid_0003.npy \n", - " inflating: centroid_0004.npy \n", - " inflating: centroid_0005.npy \n", - " inflating: centroid_0006.npy \n", - " inflating: centroid_0011.npy \n", - " inflating: centroid_0012.npy \n", - " inflating: centroid_0013.npy \n", - " inflating: centroid_0015.npy \n", - " inflating: centroid_0017.npy \n", - " inflating: centroid_0018.npy \n", - " inflating: centroid_0020.npy \n", - " inflating: centroid_0021.npy \n", - " inflating: centroid_0022.npy \n", - " inflating: centroid_0023.npy \n", - " inflating: centroid_0024.npy \n", - " inflating: centroid_0026.npy \n", - " inflating: centroid_0029.npy \n", - " inflating: centroid_0030.npy \n", - " inflating: centroid_0036.npy \n", - " inflating: centroid_0037.npy \n", - " inflating: centroid_0038.npy \n", - " inflating: centroid_0039.npy \n", - " inflating: centroid_0040.npy \n", - " inflating: centroid_0041.npy \n", - " inflating: centroid_0042.npy \n", - " inflating: centroid_0043.npy \n", - " inflating: centroid_0044.npy \n", - " inflating: centroid_0048.npy \n", - " inflating: centroid_0049.npy \n", - " inflating: centroid_0051.npy \n", - " inflating: centroid_0052.npy \n", - " inflating: centroid_0055.npy \n", - " inflating: centroid_0056.npy \n", - " inflating: centroid_0057.npy \n", - " inflating: centroid_0059.npy \n", - " inflating: centroid_0062.npy \n", - " inflating: centroid_0064.npy \n", - " inflating: centroid_0066.npy \n", - " inflating: centroid_0067.npy \n", - " inflating: centroid_0068.npy \n", - " inflating: centroid_0071.npy \n", - " inflating: centroid_0074.npy \n", - " inflating: centroid_0075.npy \n", - " inflating: centroid_0077.npy \n", - " inflating: centroid_0078.npy \n", - " inflating: centroid_0080.npy \n", - " inflating: centroid_0081.npy \n", - " inflating: centroid_0082.npy \n", - " inflating: centroid_0084.npy \n", - " inflating: centroid_0085.npy \n", - " inflating: centroid_0086.npy \n", - " inflating: centroid_0087.npy \n", - " inflating: centroid_0088.npy \n", - " inflating: centroid_0089.npy \n", - " inflating: centroid_0090.npy \n", - " inflating: centroid_0092.npy \n", - " inflating: centroid_0093.npy \n", - " inflating: centroid_0094.npy \n", - " inflating: centroid_0095.npy \n", - " inflating: centroid_0097.npy \n", - " inflating: centroid_0098.npy \n", - " inflating: centroid_0100.npy \n", - " inflating: centroid_0101.npy \n", - " inflating: centroid_0102.npy \n", - " inflating: centroid_0103.npy \n", - " inflating: centroid_0104.npy \n", - " inflating: centroid_0106.npy \n", - " inflating: centroid_0107.npy \n", - " inflating: centroid_0108.npy \n", - " inflating: centroid_0109.npy \n", - " inflating: centroid_0110.npy \n", - " inflating: centroid_0113.npy \n", - " inflating: centroid_0114.npy \n", - " inflating: centroid_0115.npy \n", - " inflating: centroid_0116.npy \n", - " inflating: centroid_0117.npy \n", - " inflating: centroid_0118.npy \n", - " inflating: centroid_0119.npy \n", - " inflating: centroid_0120.npy \n", - " inflating: centroid_0121.npy \n", - " inflating: centroid_0122.npy \n", - " inflating: centroid_0124.npy \n", - " inflating: centroid_0125.npy \n", - " inflating: centroid_0126.npy \n", - " inflating: centroid_0128.npy \n", - " inflating: centroid_0129.npy \n", - " inflating: centroid_0130.npy \n", - " inflating: centroid_0131.npy \n", - " inflating: centroid_0132.npy \n", - " inflating: centroid_0133.npy \n", - " inflating: centroid_0134.npy \n", - " inflating: centroid_0135.npy \n", - " inflating: centroid_0136.npy \n", - " inflating: centroid_0138.npy \n", - " inflating: centroid_0139.npy \n", - " inflating: centroid_0140.npy \n", - " inflating: centroid_0141.npy \n", - " inflating: centroid_0143.npy \n", - " inflating: centroid_0145.npy \n", - " inflating: centroid_0146.npy \n", - " inflating: centroid_0148.npy \n", - " inflating: centroid_0149.npy \n", - " inflating: centroid_0150.npy \n", - " inflating: centroid_0151.npy \n", - " inflating: centroid_0153.npy \n", - " inflating: centroid_0154.npy \n", - " inflating: centroid_0156.npy \n", - " inflating: centroid_0157.npy \n", - " inflating: centroid_0158.npy \n", - " inflating: centroid_0161.npy \n", - " inflating: centroid_0162.npy \n", - " inflating: centroid_0163.npy \n", - " inflating: centroid_0164.npy \n", - " inflating: centroid_0166.npy \n", - " inflating: centroid_0167.npy \n", - " inflating: centroid_0168.npy \n", - " inflating: centroid_0170.npy \n", - " inflating: centroid_0171.npy \n", - " inflating: centroid_0172.npy \n", - " inflating: centroid_0174.npy \n", - " inflating: centroid_0175.npy \n", - " inflating: centroid_0183.npy \n", - " inflating: centroid_0184.npy \n", - " inflating: centroid_0185.npy \n", - " inflating: centroid_0189.npy \n", - " inflating: centroid_0190.npy \n", - " inflating: centroid_0193.npy \n", - " inflating: centroid_0194.npy \n", - " inflating: centroid_0195.npy \n", - " inflating: centroid_0197.npy \n", - " inflating: centroid_0201.npy \n", - " inflating: centroid_0203.npy \n", - " inflating: centroid_0204.npy \n", - " inflating: centroid_0205.npy \n", - " inflating: centroid_0206.npy \n", - " inflating: centroid_0208.npy \n", - " inflating: centroid_0210.npy \n", - " inflating: centroid_0211.npy \n", - " inflating: centroid_0216.npy \n", - " inflating: centroid_0217.npy \n", - " inflating: centroid_0219.npy \n", - " inflating: centroid_0220.npy \n", - " inflating: centroid_0227.npy \n", - " inflating: centroid_0228.npy \n", - " inflating: centroid_0229.npy \n", - " inflating: centroid_0232.npy \n", - " inflating: centroid_0234.npy \n", - " inflating: centroid_0235.npy \n", - " inflating: centroid_0236.npy \n", - " inflating: centroid_0238.npy \n", - " inflating: centroid_0239.npy \n", - " inflating: centroid_0240.npy \n", - " inflating: centroid_0241.npy \n", - " inflating: centroid_0245.npy \n", - " inflating: centroid_0246.npy \n", - " inflating: centroid_0247.npy \n", - " inflating: centroid_0248.npy \n", - " inflating: centroid_0249.npy \n", - " inflating: centroid_0252.npy \n", - " inflating: centroid_0253.npy \n", - " inflating: centroid_0254.npy \n", - " inflating: centroid_0256.npy \n", - " inflating: centroid_0257.npy \n", - " inflating: centroid_0259.npy \n", - " inflating: centroid_0264.npy \n", - " inflating: centroid_0265.npy \n", - " inflating: centroid_0266.npy \n", - " inflating: centroid_0268.npy \n", - " inflating: centroid_0269.npy \n", - " inflating: centroid_0271.npy \n", - " inflating: centroid_0272.npy \n", - " inflating: centroid_0273.npy \n", - " inflating: centroid_0275.npy \n", - " inflating: centroid_0276.npy \n", - " inflating: centroid_0277.npy \n", - " inflating: centroid_0279.npy \n", - " inflating: centroid_0280.npy \n", - " inflating: centroid_0281.npy \n", - " inflating: centroid_0284.npy \n", - " inflating: centroid_0285.npy \n", - " inflating: centroid_0286.npy \n", - " inflating: centroid_0288.npy \n", - " inflating: centroid_0289.npy \n", - " inflating: centroid_0290.npy \n", - " inflating: centroid_0291.npy \n", - " inflating: centroid_0294.npy \n", - " inflating: centroid_0296.npy \n", - " inflating: centroid_0297.npy \n", - " inflating: centroid_0298.npy \n", - " inflating: centroid_0301.npy \n", - " inflating: centroid_0304.npy \n", - " inflating: centroid_0305.npy \n", - " inflating: centroid_0306.npy \n", - " inflating: centroid_0307.npy \n", - " inflating: centroid_0308.npy \n", - " inflating: centroid_0310.npy \n", - " inflating: centroid_0311.npy \n", - " inflating: centroid_0314.npy \n", - " inflating: centroid_0315.npy \n", - " inflating: centroid_0316.npy \n", - " inflating: centroid_0320.npy \n", - " inflating: centroid_0321.npy \n", - " inflating: centroid_0323.npy \n", - " inflating: centroid_0324.npy \n", - " inflating: centroid_0327.npy \n", - " inflating: centroid_0330.npy \n", - " inflating: centroid_0331.npy \n", - " inflating: centroid_0332.npy \n", - " inflating: centroid_0333.npy \n", - " inflating: centroid_0334.npy \n", - " inflating: centroid_0337.npy \n", - " inflating: centroid_0338.npy \n", - " inflating: centroid_0339.npy \n", - " inflating: centroid_0340.npy \n", - " inflating: centroid_0341.npy \n", - " inflating: centroid_0342.npy \n", - " inflating: centroid_0343.npy \n", - " inflating: centroid_0344.npy \n", - " inflating: centroid_0345.npy \n", - " inflating: centroid_0346.npy \n", - " inflating: centroid_0348.npy \n", - " inflating: centroid_0349.npy \n", - " inflating: centroid_0351.npy \n", - " inflating: centroid_0352.npy \n", - " inflating: centroid_0353.npy \n", - " inflating: centroid_0354.npy \n", - " inflating: centroid_0356.npy \n", - " inflating: centroid_0357.npy \n", - " inflating: centroid_0359.npy \n", - " inflating: centroid_0360.npy \n", - " inflating: centroid_0361.npy \n", - " inflating: centroid_0363.npy \n", - " inflating: centroid_0364.npy \n", - " inflating: centroid_0365.npy \n", - " inflating: centroid_0366.npy \n", - " inflating: centroid_0367.npy \n", - " inflating: centroid_0368.npy \n", - " inflating: centroid_0369.npy \n", - " inflating: centroid_0371.npy \n", - " inflating: centroid_0373.npy \n", - " inflating: centroid_0376.npy \n", - " inflating: centroid_0377.npy \n", - " inflating: centroid_0378.npy \n", - " inflating: centroid_0379.npy \n", - " inflating: centroid_0381.npy \n", - " inflating: centroid_0382.npy \n", - " inflating: centroid_0383.npy \n", - " inflating: centroid_0384.npy \n", - " inflating: centroid_0385.npy \n", - " inflating: centroid_0387.npy \n", - " inflating: centroid_0388.npy \n", - " inflating: centroid_0389.npy \n", - " inflating: centroid_0392.npy \n", - " inflating: centroid_0393.npy \n", - " inflating: centroid_0394.npy \n", - " inflating: centroid_0395.npy \n", - " inflating: centroid_0396.npy \n", - " inflating: centroid_0398.npy \n", - " inflating: centroid_0399.npy \n", - " inflating: centroid_0400.npy \n", - " inflating: centroid_0401.npy \n", - " inflating: centroid_0402.npy \n", - " inflating: centroid_0403.npy \n", - " inflating: centroid_0404.npy \n", - " inflating: centroid_0405.npy \n", - " inflating: centroid_0407.npy \n", - " inflating: centroid_0408.npy \n", - " inflating: centroid_0409.npy \n", - " inflating: centroid_0410.npy \n", - " inflating: centroid_0411.npy \n", - " inflating: centroid_0413.npy \n", - " inflating: centroid_0416.npy \n", - " inflating: centroid_0417.npy \n", - " inflating: centroid_0421.npy \n", - " inflating: centroid_0422.npy \n", - " inflating: centroid_0423.npy \n", - " inflating: centroid_0424.npy \n", - " inflating: centroid_0425.npy \n", - " inflating: centroid_0428.npy \n", - " inflating: centroid_0429.npy \n", - " inflating: centroid_0430.npy " - ] - } - ], - "source": [ - "####下载train_track_B.zip\n", - "!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n", - "####解压到train_track_B\n", - "!mkdir -p train_track_B && unzip -o train_track_B.zip -d train_track_B/\n", - "####将train_track_B移到Dataset下\n", - "!mv train_track_B Dataset/Trainset_track_B\n", - "####删除train_track_B.zip\n", - "!rm train_track_B.zip" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "J6DqAplSlOd1" - }, - "source": [ - "# **额外数据导入**" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2RNGoSyovQpc" - }, - "source": [ - "导入赛道二额外数据\n", - "\n", - "数据来源:https://github.com/Mohamedelrefaie/DrivAerNet\n", - "\n", - "论文:Elrefaie, Mohamed, Angela Dai, and Faez Ahmed. \"Drivaernet: A parametric car dataset for data-driven aerodynamic design and graph-based drag prediction.\" arXiv preprint arXiv:2403.08055 (2024).\n", - "\n", - "特别说明:额外数据中仅使用了id>745的数据,未踩到比赛测试数据,数据预处理见“centroidPressureFromDrivAerNet.py”" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "###################### 第一部分:npy_dataset_id_to2090 ######################\n", - "!curl 'https://drive.usercontent.google.com/download?id=1HrlAFfxmvidh4nmo5OA97hn7T1w2D3W2&export=download&authuser=0&confirm=t&uuid=fa8ce3e5-8e5e-47f0-bc88-a5255bb1d205&at=APZUnTX5vfaU7XGNnWVJO-3d3NnS%3A1721008623935' \\\n", - " -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n", - " -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,de;q=0.7' \\\n", - " -H 'cookie: SID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_P1oOKYaH1BLrHCo3iS0ORAACgYKARkSARESFQHGX2MiWPVwYo8_viuWERHrHR1NdhoVAUF8yKruIZ_4e1ei2Ekggpa8y8tn0076; __Secure-1PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_N4Ewz7Dr3VAo_wx2LK2r8gACgYKATwSARESFQHGX2MiQ8tfWf2JaqKvrnUyQ45v3BoVAUF8yKq8rQ4q5pVZQVPFSWPsFpAL0076; __Secure-3PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_2Uk70D_VaOy1CAOn_-q02wACgYKAdMSARESFQHGX2Milt8gLaNlTE2LPd0GCHBY8RoVAUF8yKpwkzBbaFeOpamEruSyO4KB0076; HSID=Aze2C-18CaajQB5ZY; SSID=AcSGa_zv1UHXYWXjD; APISID=63spz5_adFMvKs3X/AnRG94TS2UMLY3-Jn; SAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-1PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-3PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; AEC=AVYB7cqCjRuyMKwjdDYgIZrykK3GrPvfMCHPw6aftmy-i3jUaK-D4PahKUY; NID=515=qmBvly9f2yPayL5i0BHfVfz668yIlBHAnFQ5N3qoP2gVxqUWPJSsLoUDxXX25G2CDW_FvJW3NOTZDjiCnxDnUQhYDsqIfwa82Zh6xWwzC43u0L25cZKfNGerS4-eyAuiQbBgUl8Rf3PYriRkKJoCMs25jhZ_9z0Wuvch5zkc5zJoL1w9NBRjbk-F6HB9GTuSTBEX-uBWQFQDH5sWuQxwsXtSGRZPQOkKPUNF-LWGa26P1eP2syJlBSOLXIEj2J30p-ahuTAwSgkisPo5YTPFh6gX9iBztpmEyyx9CF4OESec830Sxcu-DgkeNBIwGeKapNLRdg-hyUA6HE2T-tVWQEyhv0k-B5aq95ig81bpWRwDsfUCW8f22q1eZM4Js5nw6nND2hQwwfJ9RlA6orbvTm8DIZZf1ZvwDf1ooTLMA3TZeOv2PEvtlYIhBLX_JxcY_hIc_qvgYrRsJkwOKkykafiRw6F58W9lIQ; __Secure-1PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; SIDCC=AKEyXzXx3xq69yhLhL6faYN-CslMkFWFN9k2VGCpxUBix4n1USKcT0K9k1kr9z4WWKxOSWsLX9pf; __Secure-1PSIDCC=AKEyXzVJ1otwaPYrh7P23QePj9Uci7oNGp8LoptrV4SuIpcFNgvHA4BhonL7WRlE7TTsE-FeNO0; __Secure-3PSIDCC=AKEyXzV2hkupH10qZ9QKzyDBrGprWcq6ovH37ctI_OU7s25hBteNzXDK3IHj0Im1nsm4DhqdYKY' \\\n", - " -H 'priority: u=0, i' \\\n", - " -H 'sec-ch-ua: \"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"' \\\n", - " -H 'sec-ch-ua-arch: \"x86\"' \\\n", - " -H 'sec-ch-ua-bitness: \"64\"' \\\n", - " -H 'sec-ch-ua-form-factors: \"Desktop\"' \\\n", - " -H 'sec-ch-ua-full-version: \"126.0.6478.127\"' \\\n", - " -H 'sec-ch-ua-full-version-list: \"Not/A)Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"126.0.6478.127\", \"Google Chrome\";v=\"126.0.6478.127\"' \\\n", - " -H 'sec-ch-ua-mobile: ?0' \\\n", - " -H 'sec-ch-ua-model: \"\"' \\\n", - " -H 'sec-ch-ua-platform: \"Windows\"' \\\n", - " -H 'sec-ch-ua-platform-version: \"15.0.0\"' \\\n", - " -H 'sec-ch-ua-wow64: ?0' \\\n", - " -H 'sec-fetch-dest: document' \\\n", - " -H 'sec-fetch-mode: navigate' \\\n", - " -H 'sec-fetch-site: cross-site' \\\n", - " -H 'sec-fetch-user: ?1' \\\n", - " -H 'upgrade-insecure-requests: 1' \\\n", - " -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36' \\\n", - " -H 'x-client-data: CK61yQEIlbbJAQimtskBCKmdygEIsvXKAQiWocsBCJz+zAEI7ZjNAQiFoM0BCKaizgEIg6jOAQ==' -o ./npy_dataset_id_to2090.tar.gz" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tZRuUMtu4Cji" - }, - "outputs": [], - "source": [ - "tar_path = './npy_dataset_id_to2090.tar.gz' # 压缩文件路径\n", - "extract_dir = './Dataset/Extra_Trainset_track_B' # 解压目录\n", - "!tar -xzf tar_path --strip-components=1 -C extract_dir\n", - "!rm tar_path" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "###################### 第二部分:npy_dataset_last_part ######################\n", - "!curl 'https://drive.usercontent.google.com/download?id=13LAHqAnjYpqcL33_PdBv71YiVTRi6Bgh&export=download&authuser=0&confirm=t&uuid=ecc5c29b-d551-4cae-955b-8bb3f5162f3f&at=APZUnTXV6vdX6nTQxVdPMOk0foy5%3A1721008953098' \\\n", - " -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n", - " -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,de;q=0.7' \\\n", - " -H 'cookie: SID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_P1oOKYaH1BLrHCo3iS0ORAACgYKARkSARESFQHGX2MiWPVwYo8_viuWERHrHR1NdhoVAUF8yKruIZ_4e1ei2Ekggpa8y8tn0076; __Secure-1PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_N4Ewz7Dr3VAo_wx2LK2r8gACgYKATwSARESFQHGX2MiQ8tfWf2JaqKvrnUyQ45v3BoVAUF8yKq8rQ4q5pVZQVPFSWPsFpAL0076; __Secure-3PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_2Uk70D_VaOy1CAOn_-q02wACgYKAdMSARESFQHGX2Milt8gLaNlTE2LPd0GCHBY8RoVAUF8yKpwkzBbaFeOpamEruSyO4KB0076; HSID=Aze2C-18CaajQB5ZY; SSID=AcSGa_zv1UHXYWXjD; APISID=63spz5_adFMvKs3X/AnRG94TS2UMLY3-Jn; SAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-1PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-3PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; AEC=AVYB7cqCjRuyMKwjdDYgIZrykK3GrPvfMCHPw6aftmy-i3jUaK-D4PahKUY; NID=515=qmBvly9f2yPayL5i0BHfVfz668yIlBHAnFQ5N3qoP2gVxqUWPJSsLoUDxXX25G2CDW_FvJW3NOTZDjiCnxDnUQhYDsqIfwa82Zh6xWwzC43u0L25cZKfNGerS4-eyAuiQbBgUl8Rf3PYriRkKJoCMs25jhZ_9z0Wuvch5zkc5zJoL1w9NBRjbk-F6HB9GTuSTBEX-uBWQFQDH5sWuQxwsXtSGRZPQOkKPUNF-LWGa26P1eP2syJlBSOLXIEj2J30p-ahuTAwSgkisPo5YTPFh6gX9iBztpmEyyx9CF4OESec830Sxcu-DgkeNBIwGeKapNLRdg-hyUA6HE2T-tVWQEyhv0k-B5aq95ig81bpWRwDsfUCW8f22q1eZM4Js5nw6nND2hQwwfJ9RlA6orbvTm8DIZZf1ZvwDf1ooTLMA3TZeOv2PEvtlYIhBLX_JxcY_hIc_qvgYrRsJkwOKkykafiRw6F58W9lIQ; __Secure-1PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; SIDCC=AKEyXzXxsM9syaIZNUoIGb2zN2lXB7rG8BpEsm9SJz87gq5gsNxwFe-lG63U6c74LYoXvbopHG0Z; __Secure-1PSIDCC=AKEyXzVyhAwFNnt6SkT2HO83gFjVIK8IxNxnondGM0u1COHMa-GBI0MSe0gjD1pG0cphSFwVl9s; __Secure-3PSIDCC=AKEyXzURcwg9KPPeDlIcZSKmqW6O9S42naV7FjBARhx6Zv3vCYTg-EdHEYgQl6tri4LgM_w-DFc' \\\n", - " -H 'priority: u=0, i' \\\n", - " -H 'sec-ch-ua: \"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"' \\\n", - " -H 'sec-ch-ua-arch: \"x86\"' \\\n", - " -H 'sec-ch-ua-bitness: \"64\"' \\\n", - " -H 'sec-ch-ua-form-factors: \"Desktop\"' \\\n", - " -H 'sec-ch-ua-full-version: \"126.0.6478.127\"' \\\n", - " -H 'sec-ch-ua-full-version-list: \"Not/A)Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"126.0.6478.127\", \"Google Chrome\";v=\"126.0.6478.127\"' \\\n", - " -H 'sec-ch-ua-mobile: ?0' \\\n", - " -H 'sec-ch-ua-model: \"\"' \\\n", - " -H 'sec-ch-ua-platform: \"Windows\"' \\\n", - " -H 'sec-ch-ua-platform-version: \"15.0.0\"' \\\n", - " -H 'sec-ch-ua-wow64: ?0' \\\n", - " -H 'sec-fetch-dest: document' \\\n", - " -H 'sec-fetch-mode: navigate' \\\n", - " -H 'sec-fetch-site: cross-site' \\\n", - " -H 'sec-fetch-user: ?1' \\\n", - " -H 'upgrade-insecure-requests: 1' \\\n", - " -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36' \\\n", - " -H 'x-client-data: CK61yQEIlbbJAQimtskBCKmdygEIsvXKAQiWocsBCJz+zAEI7ZjNAQiFoM0BCKaizgEIg6jOAQ==' -o ./npy_dataset_last_part.tar.gz" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3fu-nayY3YZ-" - }, - "outputs": [], - "source": [ - "tar_path = './npy_dataset_last_part.tar.gz' # 压缩文件路径\n", - "extract_dir = './Dataset/Extra_Trainset_track_B' # 解压目录\n", - "!tar -xzf tar_path --strip-components=1 -C extract_dir\n", - "!rm tar_path" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ml0rN3NHvT8t" - }, - "source": [ - "导入PointBERT预训练模型\n", - "\n", - "数据来源:https://github.com/salesforce/ULIP\n", - "\n", - "论文:Xue, Le, et al. \"Ulip-2: Towards scalable multimodal pre-training for 3d understanding.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!wget https://paddle-org.bj.bcebos.com/paddlescience/models/contrib/IJCAI_2024_ckpts.tar.gz\n", - "!tar -zxvf IJCAI_2024_ckpts.tar.gz\n", - "! mv ckpts/bju/geom/ckpt/checkpoint_pointbert.pdparams ./geom/ckpt/checkpoint_pointbert.pdparams" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "gpuType": "T4", - "machine_shape": "hm", - "name": "", - "version": "" - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "SqRGNIxYlTr7" + }, + "source": [ + "# **官方版本数据导入**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yHcQ9wurwwFX" + }, + "source": [ + "赛道一" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "collapsed": true, + "executionInfo": { + "elapsed": 82228, + "status": "ok", + "timestamp": 1720765083865, + "user": { + "displayName": "Yuanwei Bin", + "userId": "04820485600131748919" + }, + "user_tz": -480 + }, + "id": "oL_v8aw8lZ72", + "outputId": "2fd464ba-e35f-423a-f157-5b20a5f71e3f" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2024-07-12 06:16:43-- https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\n", + "Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 142.251.12.132, 2404:6800:4003:c11::84\n", + "Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|142.251.12.132|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 1084182095 (1.0G) [application/octet-stream]\n", + "Saving to: ‘Dataset.zip’\n", + "\n", + "Dataset.zip 100%[===================>] 1.01G 27.5MB/s in 45s \n", + "\n", + "2024-07-12 06:17:30 (23.0 MB/s) - ‘Dataset.zip’ saved [1084182095/1084182095]\n", + "\n", + "Archive: Dataset.zip\n", + " creating: Dataset/\n", + " creating: Dataset/Testset_track_A/\n", + " creating: Dataset/Testset_track_A/Inference/\n", + " inflating: Dataset/Testset_track_A/Inference/mesh_658.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_659.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_660.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_662.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_663.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_664.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_665.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_666.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_667.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_668.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_672.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_673.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_674.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_675.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_676.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_677.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_678.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_679.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_681.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_683.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_684.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_686.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_687.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_688.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_689.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_690.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_691.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_692.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_693.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_695.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_696.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_697.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_700.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_701.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_702.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_703.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_704.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_705.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_708.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_709.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_710.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_711.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_712.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_713.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_715.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_717.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_718.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_719.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_721.ply \n", + " inflating: Dataset/Testset_track_A/Inference/mesh_722.ply \n", + " creating: Dataset/Testset_track_B/\n", + " creating: Dataset/Testset_track_B/Auxiliary/\n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_1.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_10.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_11.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_12.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_13.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_14.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_15.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_16.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_17.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_18.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_19.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_2.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_20.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_21.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_22.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_23.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_24.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_25.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_26.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_27.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_28.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_29.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_3.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_30.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_31.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_32.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_33.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_34.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_35.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_36.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_37.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_38.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_39.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_4.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_40.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_41.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_42.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_43.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_44.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_45.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_46.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_47.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_48.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_49.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_5.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_50.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_6.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_7.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_8.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_9.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/area_bounds.txt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/global_bounds.txt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_1.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_10.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_11.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_12.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_13.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_14.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_15.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_16.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_17.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_18.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_19.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_2.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_20.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_21.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_22.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_23.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_24.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_25.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_26.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_27.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_28.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_29.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_3.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_30.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_31.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_32.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_33.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_34.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_35.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_36.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_37.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_38.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_39.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_4.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_40.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_41.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_42.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_43.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_44.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_45.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_46.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_47.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_48.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_49.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_5.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_50.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_6.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_7.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_8.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_9.pt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/info_bounds.txt \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_1.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_10.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_11.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_12.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_13.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_14.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_15.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_16.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_17.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_18.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_19.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_2.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_20.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_21.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_22.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_23.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_24.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_25.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_26.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_27.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_28.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_29.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_3.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_30.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_31.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_32.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_33.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_34.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_35.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_36.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_37.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_38.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_39.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_4.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_40.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_41.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_42.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_43.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_44.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_45.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_46.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_47.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_48.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_49.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_5.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_50.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_6.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_7.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_8.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/normal_9.npy \n", + " inflating: Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt \n", + " inflating: Dataset/Testset_track_B/IJCAI_data_doc_v1.pdf \n", + " creating: Dataset/Testset_track_B/Inference/\n", + " inflating: Dataset/Testset_track_B/Inference/centroid_1.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_10.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_11.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_12.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_13.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_14.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_15.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_16.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_17.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_18.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_19.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_2.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_20.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_21.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_22.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_23.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_24.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_25.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_26.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_27.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_28.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_29.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_3.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_30.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_31.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_32.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_33.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_34.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_35.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_36.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_37.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_38.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_39.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_4.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_40.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_41.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_42.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_43.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_44.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_45.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_46.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_47.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_48.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_49.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_5.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_50.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_6.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_7.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_8.npy \n", + " inflating: Dataset/Testset_track_B/Inference/centroid_9.npy \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_1.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_10.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_11.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_12.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_13.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_14.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_15.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_16.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_17.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_18.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_19.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_2.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_20.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_21.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_22.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_23.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_24.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_25.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_26.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_27.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_28.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_29.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_3.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_30.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_31.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_32.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_33.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_34.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_35.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_36.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_37.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_38.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_39.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_4.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_40.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_41.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_42.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_43.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_44.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_45.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_46.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_47.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_48.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_49.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_5.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_50.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_6.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_7.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_8.ply \n", + " inflating: Dataset/Testset_track_B/Inference/mesh_9.ply \n", + " inflating: Dataset/Testset_track_B/track_B_data_dict.xlsx \n", + " creating: Dataset/Training_data/\n", + " creating: Dataset/Training_data/Feature/\n", + " inflating: Dataset/Training_data/Feature/mesh_001.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_002.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_004.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_005.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_006.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_007.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_008.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_010.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_012.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_013.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_017.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_018.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_021.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_022.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_023.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_025.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_026.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_027.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_028.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_029.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_030.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_031.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_032.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_034.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_035.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_039.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_040.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_043.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_044.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_045.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_046.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_047.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_048.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_049.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_050.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_051.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_052.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_054.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_055.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_056.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_058.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_059.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_060.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_061.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_062.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_063.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_064.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_065.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_067.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_069.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_070.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_071.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_072.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_073.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_074.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_075.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_076.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_077.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_078.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_079.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_080.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_081.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_083.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_084.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_085.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_086.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_087.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_088.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_090.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_091.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_092.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_094.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_095.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_096.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_097.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_100.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_101.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_102.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_105.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_106.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_107.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_109.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_110.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_111.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_112.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_113.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_114.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_115.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_116.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_117.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_118.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_119.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_120.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_121.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_123.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_124.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_125.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_126.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_127.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_128.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_129.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_130.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_131.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_133.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_134.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_136.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_137.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_138.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_139.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_140.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_141.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_142.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_143.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_144.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_145.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_146.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_147.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_148.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_149.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_150.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_151.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_152.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_153.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_155.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_156.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_157.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_158.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_159.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_160.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_161.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_162.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_163.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_165.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_166.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_170.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_172.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_173.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_175.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_176.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_177.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_178.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_179.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_180.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_181.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_182.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_183.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_184.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_186.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_190.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_191.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_192.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_193.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_195.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_196.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_198.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_199.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_200.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_201.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_202.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_203.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_205.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_207.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_210.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_211.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_212.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_213.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_214.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_215.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_217.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_219.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_220.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_221.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_222.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_223.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_224.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_225.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_227.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_228.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_229.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_230.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_231.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_232.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_233.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_234.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_235.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_236.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_237.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_241.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_243.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_244.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_245.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_246.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_247.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_248.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_249.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_251.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_252.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_253.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_255.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_257.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_258.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_259.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_260.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_261.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_262.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_263.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_264.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_266.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_267.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_268.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_269.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_271.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_272.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_273.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_274.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_275.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_276.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_277.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_278.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_279.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_280.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_281.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_282.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_283.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_285.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_286.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_289.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_290.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_291.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_292.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_293.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_294.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_295.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_296.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_297.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_298.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_299.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_300.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_301.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_302.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_304.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_305.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_306.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_308.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_309.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_310.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_311.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_312.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_313.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_314.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_315.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_319.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_320.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_321.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_322.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_323.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_324.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_325.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_327.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_328.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_329.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_331.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_332.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_333.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_334.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_335.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_337.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_338.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_339.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_340.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_341.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_344.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_345.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_347.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_348.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_349.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_350.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_352.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_353.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_354.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_355.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_356.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_357.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_358.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_360.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_362.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_364.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_365.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_366.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_367.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_369.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_371.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_372.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_373.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_374.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_375.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_376.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_378.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_379.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_380.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_381.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_384.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_385.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_389.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_392.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_393.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_397.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_398.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_399.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_401.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_402.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_403.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_404.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_405.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_407.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_408.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_410.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_412.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_413.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_414.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_415.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_417.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_418.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_419.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_420.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_422.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_424.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_425.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_427.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_430.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_431.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_433.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_435.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_436.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_437.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_439.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_440.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_443.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_444.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_446.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_447.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_448.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_449.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_450.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_451.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_452.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_453.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_454.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_455.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_456.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_457.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_459.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_460.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_462.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_463.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_464.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_465.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_466.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_467.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_468.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_469.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_470.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_472.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_473.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_474.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_475.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_476.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_478.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_479.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_480.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_482.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_483.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_486.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_487.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_488.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_490.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_493.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_494.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_495.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_496.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_497.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_498.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_499.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_501.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_502.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_503.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_504.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_505.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_507.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_508.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_509.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_511.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_512.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_513.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_514.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_515.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_516.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_518.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_519.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_521.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_522.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_523.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_524.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_525.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_527.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_529.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_530.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_532.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_533.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_536.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_538.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_539.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_540.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_542.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_543.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_545.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_547.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_548.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_549.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_550.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_551.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_552.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_553.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_554.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_555.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_560.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_561.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_562.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_564.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_565.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_566.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_567.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_568.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_569.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_572.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_573.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_574.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_576.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_577.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_579.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_581.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_582.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_583.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_584.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_587.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_588.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_589.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_591.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_593.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_594.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_595.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_596.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_597.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_598.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_600.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_602.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_604.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_608.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_610.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_611.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_612.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_613.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_615.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_616.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_617.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_618.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_620.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_621.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_622.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_623.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_625.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_626.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_627.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_628.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_629.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_630.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_631.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_632.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_633.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_634.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_635.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_636.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_638.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_639.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_640.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_641.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_642.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_643.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_644.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_645.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_646.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_647.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_648.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_649.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_651.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_652.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_654.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_655.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_656.ply \n", + " inflating: Dataset/Training_data/Feature/mesh_657.ply \n", + " creating: Dataset/Training_data/Label/\n", + " inflating: Dataset/Training_data/Label/press_001.npy \n", + " inflating: Dataset/Training_data/Label/press_002.npy \n", + " inflating: Dataset/Training_data/Label/press_004.npy \n", + " inflating: Dataset/Training_data/Label/press_005.npy \n", + " inflating: Dataset/Training_data/Label/press_006.npy \n", + " inflating: Dataset/Training_data/Label/press_007.npy \n", + " inflating: Dataset/Training_data/Label/press_008.npy \n", + " inflating: Dataset/Training_data/Label/press_010.npy \n", + " inflating: Dataset/Training_data/Label/press_012.npy \n", + " inflating: Dataset/Training_data/Label/press_013.npy \n", + " inflating: Dataset/Training_data/Label/press_017.npy \n", + " inflating: Dataset/Training_data/Label/press_018.npy \n", + " inflating: Dataset/Training_data/Label/press_021.npy \n", + " inflating: Dataset/Training_data/Label/press_022.npy \n", + " inflating: Dataset/Training_data/Label/press_023.npy \n", + " inflating: Dataset/Training_data/Label/press_025.npy \n", + " inflating: Dataset/Training_data/Label/press_026.npy \n", + " inflating: Dataset/Training_data/Label/press_027.npy \n", + " inflating: Dataset/Training_data/Label/press_028.npy \n", + " inflating: Dataset/Training_data/Label/press_029.npy \n", + " inflating: Dataset/Training_data/Label/press_030.npy \n", + " inflating: Dataset/Training_data/Label/press_031.npy \n", + " inflating: Dataset/Training_data/Label/press_032.npy \n", + " inflating: Dataset/Training_data/Label/press_034.npy \n", + " inflating: Dataset/Training_data/Label/press_035.npy \n", + " inflating: Dataset/Training_data/Label/press_039.npy \n", + " inflating: Dataset/Training_data/Label/press_040.npy \n", + " inflating: Dataset/Training_data/Label/press_043.npy \n", + " inflating: Dataset/Training_data/Label/press_044.npy \n", + " inflating: Dataset/Training_data/Label/press_045.npy \n", + " inflating: Dataset/Training_data/Label/press_046.npy \n", + " inflating: Dataset/Training_data/Label/press_047.npy \n", + " inflating: Dataset/Training_data/Label/press_048.npy \n", + " inflating: Dataset/Training_data/Label/press_049.npy \n", + " inflating: Dataset/Training_data/Label/press_050.npy \n", + " inflating: Dataset/Training_data/Label/press_051.npy \n", + " inflating: Dataset/Training_data/Label/press_052.npy \n", + " inflating: Dataset/Training_data/Label/press_054.npy \n", + " inflating: Dataset/Training_data/Label/press_055.npy \n", + " inflating: Dataset/Training_data/Label/press_056.npy \n", + " inflating: Dataset/Training_data/Label/press_058.npy \n", + " inflating: Dataset/Training_data/Label/press_059.npy \n", + " inflating: Dataset/Training_data/Label/press_060.npy \n", + " inflating: Dataset/Training_data/Label/press_061.npy \n", + " inflating: Dataset/Training_data/Label/press_062.npy \n", + " inflating: Dataset/Training_data/Label/press_063.npy \n", + " inflating: Dataset/Training_data/Label/press_064.npy \n", + " inflating: Dataset/Training_data/Label/press_065.npy \n", + " inflating: Dataset/Training_data/Label/press_067.npy \n", + " inflating: Dataset/Training_data/Label/press_069.npy \n", + " inflating: Dataset/Training_data/Label/press_070.npy \n", + " inflating: Dataset/Training_data/Label/press_071.npy \n", + " inflating: Dataset/Training_data/Label/press_072.npy \n", + " inflating: Dataset/Training_data/Label/press_073.npy \n", + " inflating: Dataset/Training_data/Label/press_074.npy \n", + " inflating: Dataset/Training_data/Label/press_075.npy \n", + " inflating: Dataset/Training_data/Label/press_076.npy \n", + " inflating: Dataset/Training_data/Label/press_077.npy \n", + " inflating: Dataset/Training_data/Label/press_078.npy \n", + " inflating: Dataset/Training_data/Label/press_079.npy \n", + " inflating: Dataset/Training_data/Label/press_080.npy \n", + " inflating: Dataset/Training_data/Label/press_081.npy \n", + " inflating: Dataset/Training_data/Label/press_083.npy \n", + " inflating: Dataset/Training_data/Label/press_084.npy \n", + " inflating: Dataset/Training_data/Label/press_085.npy \n", + " inflating: Dataset/Training_data/Label/press_086.npy \n", + " inflating: Dataset/Training_data/Label/press_087.npy \n", + " inflating: Dataset/Training_data/Label/press_088.npy \n", + " inflating: Dataset/Training_data/Label/press_090.npy \n", + " inflating: Dataset/Training_data/Label/press_091.npy \n", + " inflating: Dataset/Training_data/Label/press_092.npy \n", + " inflating: Dataset/Training_data/Label/press_094.npy \n", + " inflating: Dataset/Training_data/Label/press_095.npy \n", + " inflating: Dataset/Training_data/Label/press_096.npy \n", + " inflating: Dataset/Training_data/Label/press_097.npy \n", + " inflating: Dataset/Training_data/Label/press_100.npy \n", + " inflating: Dataset/Training_data/Label/press_101.npy \n", + " inflating: Dataset/Training_data/Label/press_102.npy \n", + " inflating: Dataset/Training_data/Label/press_105.npy \n", + " inflating: Dataset/Training_data/Label/press_106.npy \n", + " inflating: Dataset/Training_data/Label/press_107.npy \n", + " inflating: Dataset/Training_data/Label/press_109.npy \n", + " inflating: Dataset/Training_data/Label/press_110.npy \n", + " inflating: Dataset/Training_data/Label/press_111.npy \n", + " inflating: Dataset/Training_data/Label/press_112.npy \n", + " inflating: Dataset/Training_data/Label/press_113.npy \n", + " inflating: Dataset/Training_data/Label/press_114.npy \n", + " inflating: Dataset/Training_data/Label/press_115.npy \n", + " inflating: Dataset/Training_data/Label/press_116.npy \n", + " inflating: Dataset/Training_data/Label/press_117.npy \n", + " inflating: Dataset/Training_data/Label/press_118.npy \n", + " inflating: Dataset/Training_data/Label/press_119.npy \n", + " inflating: Dataset/Training_data/Label/press_120.npy \n", + " inflating: Dataset/Training_data/Label/press_121.npy \n", + " inflating: Dataset/Training_data/Label/press_123.npy \n", + " inflating: Dataset/Training_data/Label/press_124.npy \n", + " inflating: Dataset/Training_data/Label/press_125.npy \n", + " inflating: Dataset/Training_data/Label/press_126.npy \n", + " inflating: Dataset/Training_data/Label/press_127.npy \n", + " inflating: Dataset/Training_data/Label/press_128.npy \n", + " inflating: Dataset/Training_data/Label/press_129.npy \n", + " inflating: Dataset/Training_data/Label/press_130.npy \n", + " inflating: Dataset/Training_data/Label/press_131.npy \n", + " inflating: Dataset/Training_data/Label/press_133.npy \n", + " inflating: Dataset/Training_data/Label/press_134.npy \n", + " inflating: Dataset/Training_data/Label/press_136.npy \n", + " inflating: Dataset/Training_data/Label/press_137.npy \n", + " inflating: Dataset/Training_data/Label/press_138.npy \n", + " inflating: Dataset/Training_data/Label/press_139.npy \n", + " inflating: Dataset/Training_data/Label/press_140.npy \n", + " inflating: Dataset/Training_data/Label/press_141.npy \n", + " inflating: Dataset/Training_data/Label/press_142.npy \n", + " inflating: Dataset/Training_data/Label/press_143.npy \n", + " inflating: Dataset/Training_data/Label/press_144.npy \n", + " inflating: Dataset/Training_data/Label/press_145.npy \n", + " inflating: Dataset/Training_data/Label/press_146.npy \n", + " inflating: Dataset/Training_data/Label/press_147.npy \n", + " inflating: Dataset/Training_data/Label/press_148.npy \n", + " inflating: Dataset/Training_data/Label/press_149.npy \n", + " inflating: Dataset/Training_data/Label/press_150.npy \n", + " inflating: Dataset/Training_data/Label/press_151.npy \n", + " inflating: Dataset/Training_data/Label/press_152.npy \n", + " inflating: Dataset/Training_data/Label/press_153.npy \n", + " inflating: Dataset/Training_data/Label/press_155.npy \n", + " inflating: Dataset/Training_data/Label/press_156.npy \n", + " inflating: Dataset/Training_data/Label/press_157.npy \n", + " inflating: Dataset/Training_data/Label/press_158.npy \n", + " inflating: Dataset/Training_data/Label/press_159.npy \n", + " inflating: Dataset/Training_data/Label/press_160.npy \n", + " inflating: Dataset/Training_data/Label/press_161.npy \n", + " inflating: Dataset/Training_data/Label/press_162.npy \n", + " inflating: Dataset/Training_data/Label/press_163.npy \n", + " inflating: Dataset/Training_data/Label/press_165.npy \n", + " inflating: Dataset/Training_data/Label/press_166.npy \n", + " inflating: Dataset/Training_data/Label/press_170.npy \n", + " inflating: Dataset/Training_data/Label/press_172.npy \n", + " inflating: Dataset/Training_data/Label/press_173.npy \n", + " inflating: Dataset/Training_data/Label/press_175.npy \n", + " inflating: Dataset/Training_data/Label/press_176.npy \n", + " inflating: Dataset/Training_data/Label/press_177.npy \n", + " inflating: Dataset/Training_data/Label/press_178.npy \n", + " inflating: Dataset/Training_data/Label/press_179.npy \n", + " inflating: Dataset/Training_data/Label/press_180.npy \n", + " inflating: Dataset/Training_data/Label/press_181.npy \n", + " inflating: Dataset/Training_data/Label/press_182.npy \n", + " inflating: Dataset/Training_data/Label/press_183.npy \n", + " inflating: Dataset/Training_data/Label/press_184.npy \n", + " inflating: Dataset/Training_data/Label/press_186.npy \n", + " inflating: Dataset/Training_data/Label/press_190.npy \n", + " inflating: Dataset/Training_data/Label/press_191.npy \n", + " inflating: Dataset/Training_data/Label/press_192.npy \n", + " inflating: Dataset/Training_data/Label/press_193.npy \n", + " inflating: Dataset/Training_data/Label/press_195.npy \n", + " inflating: Dataset/Training_data/Label/press_196.npy \n", + " inflating: Dataset/Training_data/Label/press_198.npy \n", + " inflating: Dataset/Training_data/Label/press_199.npy \n", + " inflating: Dataset/Training_data/Label/press_200.npy \n", + " inflating: Dataset/Training_data/Label/press_201.npy \n", + " inflating: Dataset/Training_data/Label/press_202.npy \n", + " inflating: Dataset/Training_data/Label/press_203.npy \n", + " inflating: Dataset/Training_data/Label/press_205.npy \n", + " inflating: Dataset/Training_data/Label/press_207.npy \n", + " inflating: Dataset/Training_data/Label/press_210.npy \n", + " inflating: Dataset/Training_data/Label/press_211.npy \n", + " inflating: Dataset/Training_data/Label/press_212.npy \n", + " inflating: Dataset/Training_data/Label/press_213.npy \n", + " inflating: Dataset/Training_data/Label/press_214.npy \n", + " inflating: Dataset/Training_data/Label/press_215.npy \n", + " inflating: Dataset/Training_data/Label/press_217.npy \n", + " inflating: Dataset/Training_data/Label/press_219.npy \n", + " inflating: Dataset/Training_data/Label/press_220.npy \n", + " inflating: Dataset/Training_data/Label/press_221.npy \n", + " inflating: Dataset/Training_data/Label/press_222.npy \n", + " inflating: Dataset/Training_data/Label/press_223.npy \n", + " inflating: Dataset/Training_data/Label/press_224.npy \n", + " inflating: Dataset/Training_data/Label/press_225.npy \n", + " inflating: Dataset/Training_data/Label/press_227.npy \n", + " inflating: Dataset/Training_data/Label/press_228.npy \n", + " inflating: Dataset/Training_data/Label/press_229.npy \n", + " inflating: Dataset/Training_data/Label/press_230.npy \n", + " inflating: Dataset/Training_data/Label/press_231.npy \n", + " inflating: Dataset/Training_data/Label/press_232.npy \n", + " inflating: Dataset/Training_data/Label/press_233.npy \n", + " inflating: Dataset/Training_data/Label/press_234.npy \n", + " inflating: Dataset/Training_data/Label/press_235.npy \n", + " inflating: Dataset/Training_data/Label/press_236.npy \n", + " inflating: Dataset/Training_data/Label/press_237.npy \n", + " inflating: Dataset/Training_data/Label/press_241.npy \n", + " inflating: Dataset/Training_data/Label/press_243.npy \n", + " inflating: Dataset/Training_data/Label/press_244.npy \n", + " inflating: Dataset/Training_data/Label/press_245.npy \n", + " inflating: Dataset/Training_data/Label/press_246.npy \n", + " inflating: Dataset/Training_data/Label/press_247.npy \n", + " inflating: Dataset/Training_data/Label/press_248.npy \n", + " inflating: Dataset/Training_data/Label/press_249.npy \n", + " inflating: Dataset/Training_data/Label/press_251.npy \n", + " inflating: Dataset/Training_data/Label/press_252.npy \n", + " inflating: Dataset/Training_data/Label/press_253.npy \n", + " inflating: Dataset/Training_data/Label/press_255.npy \n", + " inflating: Dataset/Training_data/Label/press_257.npy \n", + " inflating: Dataset/Training_data/Label/press_258.npy \n", + " inflating: Dataset/Training_data/Label/press_259.npy \n", + " inflating: Dataset/Training_data/Label/press_260.npy \n", + " inflating: Dataset/Training_data/Label/press_261.npy \n", + " inflating: Dataset/Training_data/Label/press_262.npy \n", + " inflating: Dataset/Training_data/Label/press_263.npy \n", + " inflating: Dataset/Training_data/Label/press_264.npy \n", + " inflating: Dataset/Training_data/Label/press_266.npy \n", + " inflating: Dataset/Training_data/Label/press_267.npy \n", + " inflating: Dataset/Training_data/Label/press_268.npy \n", + " inflating: Dataset/Training_data/Label/press_269.npy \n", + " inflating: Dataset/Training_data/Label/press_271.npy \n", + " inflating: Dataset/Training_data/Label/press_272.npy \n", + " inflating: Dataset/Training_data/Label/press_273.npy \n", + " inflating: Dataset/Training_data/Label/press_274.npy \n", + " inflating: Dataset/Training_data/Label/press_275.npy \n", + " inflating: Dataset/Training_data/Label/press_276.npy \n", + " inflating: Dataset/Training_data/Label/press_277.npy \n", + " inflating: Dataset/Training_data/Label/press_278.npy \n", + " inflating: Dataset/Training_data/Label/press_279.npy \n", + " inflating: Dataset/Training_data/Label/press_280.npy \n", + " inflating: Dataset/Training_data/Label/press_281.npy \n", + " inflating: Dataset/Training_data/Label/press_282.npy \n", + " inflating: Dataset/Training_data/Label/press_283.npy \n", + " inflating: Dataset/Training_data/Label/press_285.npy \n", + " inflating: Dataset/Training_data/Label/press_286.npy \n", + " inflating: Dataset/Training_data/Label/press_289.npy \n", + " inflating: Dataset/Training_data/Label/press_290.npy \n", + " inflating: Dataset/Training_data/Label/press_291.npy \n", + " inflating: Dataset/Training_data/Label/press_292.npy \n", + " inflating: Dataset/Training_data/Label/press_293.npy \n", + " inflating: Dataset/Training_data/Label/press_294.npy \n", + " inflating: Dataset/Training_data/Label/press_295.npy \n", + " inflating: Dataset/Training_data/Label/press_296.npy \n", + " inflating: Dataset/Training_data/Label/press_297.npy \n", + " inflating: Dataset/Training_data/Label/press_298.npy \n", + " inflating: Dataset/Training_data/Label/press_299.npy \n", + " inflating: Dataset/Training_data/Label/press_300.npy \n", + " inflating: Dataset/Training_data/Label/press_301.npy \n", + " inflating: Dataset/Training_data/Label/press_302.npy \n", + " inflating: Dataset/Training_data/Label/press_304.npy \n", + " inflating: Dataset/Training_data/Label/press_305.npy \n", + " inflating: Dataset/Training_data/Label/press_306.npy \n", + " inflating: Dataset/Training_data/Label/press_308.npy \n", + " inflating: Dataset/Training_data/Label/press_309.npy \n", + " inflating: Dataset/Training_data/Label/press_310.npy \n", + " inflating: Dataset/Training_data/Label/press_311.npy \n", + " inflating: Dataset/Training_data/Label/press_312.npy \n", + " inflating: Dataset/Training_data/Label/press_313.npy \n", + " inflating: Dataset/Training_data/Label/press_314.npy \n", + " inflating: Dataset/Training_data/Label/press_315.npy \n", + " inflating: Dataset/Training_data/Label/press_319.npy \n", + " inflating: Dataset/Training_data/Label/press_320.npy \n", + " inflating: Dataset/Training_data/Label/press_321.npy \n", + " inflating: Dataset/Training_data/Label/press_322.npy \n", + " inflating: Dataset/Training_data/Label/press_323.npy \n", + " inflating: Dataset/Training_data/Label/press_324.npy \n", + " inflating: Dataset/Training_data/Label/press_325.npy \n", + " inflating: Dataset/Training_data/Label/press_327.npy \n", + " inflating: Dataset/Training_data/Label/press_328.npy \n", + " inflating: Dataset/Training_data/Label/press_329.npy \n", + " inflating: Dataset/Training_data/Label/press_331.npy \n", + " inflating: Dataset/Training_data/Label/press_332.npy \n", + " inflating: Dataset/Training_data/Label/press_333.npy \n", + " inflating: Dataset/Training_data/Label/press_334.npy \n", + " inflating: Dataset/Training_data/Label/press_335.npy \n", + " inflating: Dataset/Training_data/Label/press_337.npy \n", + " inflating: Dataset/Training_data/Label/press_338.npy \n", + " inflating: Dataset/Training_data/Label/press_339.npy \n", + " inflating: Dataset/Training_data/Label/press_340.npy \n", + " inflating: Dataset/Training_data/Label/press_341.npy \n", + " inflating: Dataset/Training_data/Label/press_344.npy \n", + " inflating: Dataset/Training_data/Label/press_345.npy \n", + " inflating: Dataset/Training_data/Label/press_347.npy \n", + " inflating: Dataset/Training_data/Label/press_348.npy \n", + " inflating: Dataset/Training_data/Label/press_349.npy \n", + " inflating: Dataset/Training_data/Label/press_350.npy \n", + " inflating: Dataset/Training_data/Label/press_352.npy \n", + " inflating: Dataset/Training_data/Label/press_353.npy \n", + " inflating: Dataset/Training_data/Label/press_354.npy \n", + " inflating: Dataset/Training_data/Label/press_355.npy \n", + " inflating: Dataset/Training_data/Label/press_356.npy \n", + " inflating: Dataset/Training_data/Label/press_357.npy \n", + " inflating: Dataset/Training_data/Label/press_358.npy \n", + " inflating: Dataset/Training_data/Label/press_360.npy \n", + " inflating: Dataset/Training_data/Label/press_362.npy \n", + " inflating: Dataset/Training_data/Label/press_364.npy \n", + " inflating: Dataset/Training_data/Label/press_365.npy \n", + " inflating: Dataset/Training_data/Label/press_366.npy \n", + " inflating: Dataset/Training_data/Label/press_367.npy \n", + " inflating: Dataset/Training_data/Label/press_369.npy \n", + " inflating: Dataset/Training_data/Label/press_371.npy \n", + " inflating: Dataset/Training_data/Label/press_372.npy \n", + " inflating: Dataset/Training_data/Label/press_373.npy \n", + " inflating: Dataset/Training_data/Label/press_374.npy \n", + " inflating: Dataset/Training_data/Label/press_375.npy \n", + " inflating: Dataset/Training_data/Label/press_376.npy \n", + " inflating: Dataset/Training_data/Label/press_378.npy \n", + " inflating: Dataset/Training_data/Label/press_379.npy \n", + " inflating: Dataset/Training_data/Label/press_380.npy \n", + " inflating: Dataset/Training_data/Label/press_381.npy \n", + " inflating: Dataset/Training_data/Label/press_384.npy \n", + " inflating: Dataset/Training_data/Label/press_385.npy \n", + " inflating: Dataset/Training_data/Label/press_389.npy \n", + " inflating: Dataset/Training_data/Label/press_392.npy \n", + " inflating: Dataset/Training_data/Label/press_393.npy \n", + " inflating: Dataset/Training_data/Label/press_397.npy \n", + " inflating: Dataset/Training_data/Label/press_398.npy \n", + " inflating: Dataset/Training_data/Label/press_399.npy \n", + " inflating: Dataset/Training_data/Label/press_401.npy \n", + " inflating: Dataset/Training_data/Label/press_402.npy \n", + " inflating: Dataset/Training_data/Label/press_403.npy \n", + " inflating: Dataset/Training_data/Label/press_404.npy \n", + " inflating: Dataset/Training_data/Label/press_405.npy \n", + " inflating: Dataset/Training_data/Label/press_407.npy \n", + " inflating: Dataset/Training_data/Label/press_408.npy \n", + " inflating: Dataset/Training_data/Label/press_410.npy \n", + " inflating: Dataset/Training_data/Label/press_412.npy \n", + " inflating: Dataset/Training_data/Label/press_413.npy \n", + " inflating: Dataset/Training_data/Label/press_414.npy \n", + " inflating: Dataset/Training_data/Label/press_415.npy \n", + " inflating: Dataset/Training_data/Label/press_417.npy \n", + " inflating: Dataset/Training_data/Label/press_418.npy \n", + " inflating: Dataset/Training_data/Label/press_419.npy \n", + " inflating: Dataset/Training_data/Label/press_420.npy \n", + " inflating: Dataset/Training_data/Label/press_422.npy \n", + " inflating: Dataset/Training_data/Label/press_424.npy \n", + " inflating: Dataset/Training_data/Label/press_425.npy \n", + " inflating: Dataset/Training_data/Label/press_427.npy \n", + " inflating: Dataset/Training_data/Label/press_430.npy \n", + " inflating: Dataset/Training_data/Label/press_431.npy \n", + " inflating: Dataset/Training_data/Label/press_433.npy \n", + " inflating: Dataset/Training_data/Label/press_435.npy \n", + " inflating: Dataset/Training_data/Label/press_436.npy \n", + " inflating: Dataset/Training_data/Label/press_437.npy \n", + " inflating: Dataset/Training_data/Label/press_439.npy \n", + " inflating: Dataset/Training_data/Label/press_440.npy \n", + " inflating: Dataset/Training_data/Label/press_443.npy \n", + " inflating: Dataset/Training_data/Label/press_444.npy \n", + " inflating: Dataset/Training_data/Label/press_446.npy \n", + " inflating: Dataset/Training_data/Label/press_447.npy \n", + " inflating: Dataset/Training_data/Label/press_448.npy \n", + " inflating: Dataset/Training_data/Label/press_449.npy \n", + " inflating: Dataset/Training_data/Label/press_450.npy \n", + " inflating: Dataset/Training_data/Label/press_451.npy \n", + " inflating: Dataset/Training_data/Label/press_452.npy \n", + " inflating: Dataset/Training_data/Label/press_453.npy \n", + " inflating: Dataset/Training_data/Label/press_454.npy \n", + " inflating: Dataset/Training_data/Label/press_455.npy \n", + " inflating: Dataset/Training_data/Label/press_456.npy \n", + " inflating: Dataset/Training_data/Label/press_457.npy \n", + " inflating: Dataset/Training_data/Label/press_459.npy \n", + " inflating: Dataset/Training_data/Label/press_460.npy \n", + " inflating: Dataset/Training_data/Label/press_462.npy \n", + " inflating: Dataset/Training_data/Label/press_463.npy \n", + " inflating: Dataset/Training_data/Label/press_464.npy \n", + " inflating: Dataset/Training_data/Label/press_465.npy \n", + " inflating: Dataset/Training_data/Label/press_466.npy \n", + " inflating: Dataset/Training_data/Label/press_467.npy \n", + " inflating: Dataset/Training_data/Label/press_468.npy \n", + " inflating: Dataset/Training_data/Label/press_469.npy \n", + " inflating: Dataset/Training_data/Label/press_470.npy \n", + " inflating: Dataset/Training_data/Label/press_472.npy \n", + " inflating: Dataset/Training_data/Label/press_473.npy \n", + " inflating: Dataset/Training_data/Label/press_474.npy \n", + " inflating: Dataset/Training_data/Label/press_475.npy \n", + " inflating: Dataset/Training_data/Label/press_476.npy \n", + " inflating: Dataset/Training_data/Label/press_478.npy \n", + " inflating: Dataset/Training_data/Label/press_479.npy \n", + " inflating: Dataset/Training_data/Label/press_480.npy \n", + " inflating: Dataset/Training_data/Label/press_482.npy \n", + " inflating: Dataset/Training_data/Label/press_483.npy \n", + " inflating: Dataset/Training_data/Label/press_486.npy \n", + " inflating: Dataset/Training_data/Label/press_487.npy \n", + " inflating: Dataset/Training_data/Label/press_488.npy \n", + " inflating: Dataset/Training_data/Label/press_490.npy \n", + " inflating: Dataset/Training_data/Label/press_493.npy \n", + " inflating: Dataset/Training_data/Label/press_494.npy \n", + " inflating: Dataset/Training_data/Label/press_495.npy \n", + " inflating: Dataset/Training_data/Label/press_496.npy \n", + " inflating: Dataset/Training_data/Label/press_497.npy \n", + " inflating: Dataset/Training_data/Label/press_498.npy \n", + " inflating: Dataset/Training_data/Label/press_499.npy \n", + " inflating: Dataset/Training_data/Label/press_501.npy \n", + " inflating: Dataset/Training_data/Label/press_502.npy \n", + " inflating: Dataset/Training_data/Label/press_503.npy \n", + " inflating: Dataset/Training_data/Label/press_504.npy \n", + " inflating: Dataset/Training_data/Label/press_505.npy \n", + " inflating: Dataset/Training_data/Label/press_507.npy \n", + " inflating: Dataset/Training_data/Label/press_508.npy \n", + " inflating: Dataset/Training_data/Label/press_509.npy \n", + " inflating: Dataset/Training_data/Label/press_511.npy \n", + " inflating: Dataset/Training_data/Label/press_512.npy \n", + " inflating: Dataset/Training_data/Label/press_513.npy \n", + " inflating: Dataset/Training_data/Label/press_514.npy \n", + " inflating: Dataset/Training_data/Label/press_515.npy \n", + " inflating: Dataset/Training_data/Label/press_516.npy \n", + " inflating: Dataset/Training_data/Label/press_518.npy \n", + " inflating: Dataset/Training_data/Label/press_519.npy \n", + " inflating: Dataset/Training_data/Label/press_521.npy \n", + " inflating: Dataset/Training_data/Label/press_522.npy \n", + " inflating: Dataset/Training_data/Label/press_523.npy \n", + " inflating: Dataset/Training_data/Label/press_524.npy \n", + " inflating: Dataset/Training_data/Label/press_525.npy \n", + " inflating: Dataset/Training_data/Label/press_527.npy \n", + " inflating: Dataset/Training_data/Label/press_529.npy \n", + " inflating: Dataset/Training_data/Label/press_530.npy \n", + " inflating: Dataset/Training_data/Label/press_532.npy \n", + " inflating: Dataset/Training_data/Label/press_533.npy \n", + " inflating: Dataset/Training_data/Label/press_536.npy \n", + " inflating: Dataset/Training_data/Label/press_538.npy \n", + " inflating: Dataset/Training_data/Label/press_539.npy \n", + " inflating: Dataset/Training_data/Label/press_540.npy \n", + " inflating: Dataset/Training_data/Label/press_542.npy \n", + " inflating: Dataset/Training_data/Label/press_543.npy \n", + " inflating: Dataset/Training_data/Label/press_545.npy \n", + " inflating: Dataset/Training_data/Label/press_547.npy \n", + " inflating: Dataset/Training_data/Label/press_548.npy \n", + " inflating: Dataset/Training_data/Label/press_549.npy \n", + " inflating: Dataset/Training_data/Label/press_550.npy \n", + " inflating: Dataset/Training_data/Label/press_551.npy \n", + " inflating: Dataset/Training_data/Label/press_552.npy \n", + " inflating: Dataset/Training_data/Label/press_553.npy \n", + " inflating: Dataset/Training_data/Label/press_554.npy \n", + " inflating: Dataset/Training_data/Label/press_555.npy \n", + " inflating: Dataset/Training_data/Label/press_560.npy \n", + " inflating: Dataset/Training_data/Label/press_561.npy \n", + " inflating: Dataset/Training_data/Label/press_562.npy \n", + " inflating: Dataset/Training_data/Label/press_564.npy \n", + " inflating: Dataset/Training_data/Label/press_565.npy \n", + " inflating: Dataset/Training_data/Label/press_566.npy \n", + " inflating: Dataset/Training_data/Label/press_567.npy \n", + " inflating: Dataset/Training_data/Label/press_568.npy \n", + " inflating: Dataset/Training_data/Label/press_569.npy \n", + " inflating: Dataset/Training_data/Label/press_572.npy \n", + " inflating: Dataset/Training_data/Label/press_573.npy \n", + " inflating: Dataset/Training_data/Label/press_574.npy \n", + " inflating: Dataset/Training_data/Label/press_576.npy \n", + " inflating: Dataset/Training_data/Label/press_577.npy \n", + " inflating: Dataset/Training_data/Label/press_579.npy \n", + " inflating: Dataset/Training_data/Label/press_581.npy \n", + " inflating: Dataset/Training_data/Label/press_582.npy \n", + " inflating: Dataset/Training_data/Label/press_583.npy \n", + " inflating: Dataset/Training_data/Label/press_584.npy \n", + " inflating: Dataset/Training_data/Label/press_587.npy \n", + " inflating: Dataset/Training_data/Label/press_588.npy \n", + " inflating: Dataset/Training_data/Label/press_589.npy \n", + " inflating: Dataset/Training_data/Label/press_591.npy \n", + " inflating: Dataset/Training_data/Label/press_593.npy \n", + " inflating: Dataset/Training_data/Label/press_594.npy \n", + " inflating: Dataset/Training_data/Label/press_595.npy \n", + " inflating: Dataset/Training_data/Label/press_596.npy \n", + " inflating: Dataset/Training_data/Label/press_597.npy \n", + " inflating: Dataset/Training_data/Label/press_598.npy \n", + " inflating: Dataset/Training_data/Label/press_600.npy \n", + " inflating: Dataset/Training_data/Label/press_602.npy \n", + " inflating: Dataset/Training_data/Label/press_604.npy \n", + " inflating: Dataset/Training_data/Label/press_608.npy \n", + " inflating: Dataset/Training_data/Label/press_610.npy \n", + " inflating: Dataset/Training_data/Label/press_611.npy \n", + " inflating: Dataset/Training_data/Label/press_612.npy \n", + " inflating: Dataset/Training_data/Label/press_613.npy \n", + " inflating: Dataset/Training_data/Label/press_615.npy \n", + " inflating: Dataset/Training_data/Label/press_616.npy \n", + " inflating: Dataset/Training_data/Label/press_617.npy \n", + " inflating: Dataset/Training_data/Label/press_618.npy \n", + " inflating: Dataset/Training_data/Label/press_620.npy \n", + " inflating: Dataset/Training_data/Label/press_621.npy \n", + " inflating: Dataset/Training_data/Label/press_622.npy \n", + " inflating: Dataset/Training_data/Label/press_623.npy \n", + " inflating: Dataset/Training_data/Label/press_625.npy \n", + " inflating: Dataset/Training_data/Label/press_626.npy \n", + " inflating: Dataset/Training_data/Label/press_627.npy \n", + " inflating: Dataset/Training_data/Label/press_628.npy \n", + " inflating: Dataset/Training_data/Label/press_629.npy \n", + " inflating: Dataset/Training_data/Label/press_630.npy \n", + " inflating: Dataset/Training_data/Label/press_631.npy \n", + " inflating: Dataset/Training_data/Label/press_632.npy \n", + " inflating: Dataset/Training_data/Label/press_633.npy \n", + " inflating: Dataset/Training_data/Label/press_634.npy \n", + " inflating: Dataset/Training_data/Label/press_635.npy \n", + " inflating: Dataset/Training_data/Label/press_636.npy \n", + " inflating: Dataset/Training_data/Label/press_638.npy \n", + " inflating: Dataset/Training_data/Label/press_639.npy \n", + " inflating: Dataset/Training_data/Label/press_640.npy \n", + " inflating: Dataset/Training_data/Label/press_641.npy \n", + " inflating: Dataset/Training_data/Label/press_642.npy \n", + " inflating: Dataset/Training_data/Label/press_643.npy \n", + " inflating: Dataset/Training_data/Label/press_644.npy \n", + " inflating: Dataset/Training_data/Label/press_645.npy \n", + " inflating: Dataset/Training_data/Label/press_646.npy \n", + " inflating: Dataset/Training_data/Label/press_647.npy \n", + " inflating: Dataset/Training_data/Label/press_648.npy \n", + " inflating: Dataset/Training_data/Label/press_649.npy \n", + " inflating: Dataset/Training_data/Label/press_651.npy \n", + " inflating: Dataset/Training_data/Label/press_652.npy \n", + " inflating: Dataset/Training_data/Label/press_654.npy \n", + " inflating: Dataset/Training_data/Label/press_655.npy \n", + " inflating: Dataset/Training_data/Label/press_656.npy \n", + " inflating: Dataset/Training_data/Label/press_657.npy \n", + " inflating: Dataset/Training_data/train_pressure_min_std.txt \n", + " inflating: Dataset/Training_data/watertight_global_bounds.txt \n", + " inflating: Dataset/Training_data/watertight_meshes.txt \n" + ] + } + ], + "source": [ + "####下载Dataset.zip\n", + "!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\" -c -O 'Dataset.zip'\n", + "####解压Dataset.zip\n", + "!unzip Dataset.zip\n", + "####删除Dataset.zip\n", + "!rm Dataset.zip\n", + "####重命名Training_data文件名\n", + "!mv Dataset/Training_data Dataset/Trainset_track_A" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hjHXYCzgw2v5" + }, + "source": [ + "赛道二" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "collapsed": true, + "executionInfo": { + "elapsed": 231508, + "status": "ok", + "timestamp": 1720765946210, + "user": { + "displayName": "Yuanwei Bin", + "userId": "04820485600131748919" + }, + "user_tz": -480 + }, + "id": "B88H3zRrnfil", + "outputId": "b9243c63-5134-40a3-846b-910ab4a657b5" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2024-07-12 06:28:36-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n", + "Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 103.235.47.176, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n", + "Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|103.235.47.176|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 4740031429 (4.4G) [application/octet-stream]\n", + "Saving to: ‘train_track_B.zip’\n", + "\n", + "train_track_B.zip 100%[===================>] 4.41G 27.9MB/s in 2m 48s \n", + "\n", + "2024-07-12 06:31:25 (26.9 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n", + "\n", + "Archive: train_track_B.zip\n", + " inflating: area_0002.npy \n", + " inflating: area_0003.npy \n", + " inflating: area_0004.npy \n", + " inflating: area_0005.npy \n", + " inflating: area_0006.npy \n", + " inflating: area_0011.npy \n", + " inflating: area_0012.npy \n", + " inflating: area_0013.npy \n", + " inflating: area_0015.npy \n", + " inflating: area_0017.npy \n", + " inflating: area_0018.npy \n", + " inflating: area_0020.npy \n", + " inflating: area_0021.npy \n", + " inflating: area_0022.npy \n", + " inflating: area_0023.npy \n", + " inflating: area_0024.npy \n", + " inflating: area_0026.npy \n", + " inflating: area_0029.npy \n", + " inflating: area_0030.npy \n", + " inflating: area_0036.npy \n", + " inflating: area_0037.npy \n", + " inflating: area_0038.npy \n", + " inflating: area_0039.npy \n", + " inflating: area_0040.npy \n", + " inflating: area_0041.npy \n", + " inflating: area_0042.npy \n", + " inflating: area_0043.npy \n", + " inflating: area_0044.npy \n", + " inflating: area_0048.npy \n", + " inflating: area_0049.npy \n", + " inflating: area_0051.npy \n", + " inflating: area_0052.npy \n", + " inflating: area_0055.npy \n", + " inflating: area_0056.npy \n", + " inflating: area_0057.npy \n", + " inflating: area_0059.npy \n", + " inflating: area_0062.npy \n", + " inflating: area_0064.npy \n", + " inflating: area_0066.npy \n", + " inflating: area_0067.npy \n", + " inflating: area_0068.npy \n", + " inflating: area_0071.npy \n", + " inflating: area_0074.npy \n", + " inflating: area_0075.npy \n", + " inflating: area_0077.npy \n", + " inflating: area_0078.npy \n", + " inflating: area_0080.npy \n", + " inflating: area_0081.npy \n", + " inflating: area_0082.npy \n", + " inflating: area_0084.npy \n", + " inflating: area_0085.npy \n", + " inflating: area_0086.npy \n", + " inflating: area_0087.npy \n", + " inflating: area_0088.npy \n", + " inflating: area_0089.npy \n", + " inflating: area_0090.npy \n", + " inflating: area_0092.npy \n", + " inflating: area_0093.npy \n", + " inflating: area_0094.npy \n", + " inflating: area_0095.npy \n", + " inflating: area_0097.npy \n", + " inflating: area_0098.npy \n", + " inflating: area_0100.npy \n", + " inflating: area_0101.npy \n", + " inflating: area_0102.npy \n", + " inflating: area_0103.npy \n", + " inflating: area_0104.npy \n", + " inflating: area_0106.npy \n", + " inflating: area_0107.npy \n", + " inflating: area_0108.npy \n", + " inflating: area_0109.npy \n", + " inflating: area_0110.npy \n", + " inflating: area_0113.npy \n", + " inflating: area_0114.npy \n", + " inflating: area_0115.npy \n", + " inflating: area_0116.npy \n", + " inflating: area_0117.npy \n", + " inflating: area_0118.npy \n", + " inflating: area_0119.npy \n", + " inflating: area_0120.npy \n", + " inflating: area_0121.npy \n", + " inflating: area_0122.npy \n", + " inflating: area_0124.npy \n", + " inflating: area_0125.npy \n", + " inflating: area_0126.npy \n", + " inflating: area_0128.npy \n", + " inflating: area_0129.npy \n", + " inflating: area_0130.npy \n", + " inflating: area_0131.npy \n", + " inflating: area_0132.npy \n", + " inflating: area_0133.npy \n", + " inflating: area_0134.npy \n", + " inflating: area_0135.npy \n", + " inflating: area_0136.npy \n", + " inflating: area_0138.npy \n", + " inflating: area_0139.npy \n", + " inflating: area_0140.npy \n", + " inflating: area_0141.npy \n", + " inflating: area_0143.npy \n", + " inflating: area_0145.npy \n", + " inflating: area_0146.npy \n", + " inflating: area_0148.npy \n", + " inflating: area_0149.npy \n", + " inflating: area_0150.npy \n", + " inflating: area_0151.npy \n", + " inflating: area_0153.npy \n", + " inflating: area_0154.npy \n", + " inflating: area_0156.npy \n", + " inflating: area_0157.npy \n", + " inflating: area_0158.npy \n", + " inflating: area_0161.npy \n", + " inflating: area_0162.npy \n", + " inflating: area_0163.npy \n", + " inflating: area_0164.npy \n", + " inflating: area_0166.npy \n", + " inflating: area_0167.npy \n", + " inflating: area_0168.npy \n", + " inflating: area_0170.npy \n", + " inflating: area_0171.npy \n", + " inflating: area_0172.npy \n", + " inflating: area_0174.npy \n", + " inflating: area_0175.npy \n", + " inflating: area_0183.npy \n", + " inflating: area_0184.npy \n", + " inflating: area_0185.npy \n", + " inflating: area_0189.npy \n", + " inflating: area_0190.npy \n", + " inflating: area_0193.npy \n", + " inflating: area_0194.npy \n", + " inflating: area_0195.npy \n", + " inflating: area_0197.npy \n", + " inflating: area_0201.npy \n", + " inflating: area_0203.npy \n", + " inflating: area_0204.npy \n", + " inflating: area_0205.npy \n", + " inflating: area_0206.npy \n", + " inflating: area_0208.npy \n", + " inflating: area_0210.npy \n", + " inflating: area_0211.npy \n", + " inflating: area_0216.npy \n", + " inflating: area_0217.npy \n", + " inflating: area_0219.npy \n", + " inflating: area_0220.npy \n", + " inflating: area_0227.npy \n", + " inflating: area_0228.npy \n", + " inflating: area_0229.npy \n", + " inflating: area_0232.npy \n", + " inflating: area_0234.npy \n", + " inflating: area_0235.npy \n", + " inflating: area_0236.npy \n", + " inflating: area_0238.npy \n", + " inflating: area_0239.npy \n", + " inflating: area_0240.npy \n", + " inflating: area_0241.npy \n", + " inflating: area_0245.npy \n", + " inflating: area_0246.npy \n", + " inflating: area_0247.npy \n", + " inflating: area_0248.npy \n", + " inflating: area_0249.npy \n", + " inflating: area_0252.npy \n", + " inflating: area_0253.npy \n", + " inflating: area_0254.npy \n", + " inflating: area_0256.npy \n", + " inflating: area_0257.npy \n", + " inflating: area_0259.npy \n", + " inflating: area_0264.npy \n", + " inflating: area_0265.npy \n", + " inflating: area_0266.npy \n", + " inflating: area_0268.npy \n", + " inflating: area_0269.npy \n", + " inflating: area_0271.npy \n", + " inflating: area_0272.npy \n", + " inflating: area_0273.npy \n", + " inflating: area_0275.npy \n", + " inflating: area_0276.npy \n", + " inflating: area_0277.npy \n", + " inflating: area_0279.npy \n", + " inflating: area_0280.npy \n", + " inflating: area_0281.npy \n", + " inflating: area_0284.npy \n", + " inflating: area_0285.npy \n", + " inflating: area_0286.npy \n", + " inflating: area_0288.npy \n", + " inflating: area_0289.npy \n", + " inflating: area_0290.npy \n", + " inflating: area_0291.npy \n", + " inflating: area_0294.npy \n", + " inflating: area_0296.npy \n", + " inflating: area_0297.npy \n", + " inflating: area_0298.npy \n", + " inflating: area_0301.npy \n", + " inflating: area_0304.npy \n", + " inflating: area_0305.npy \n", + " inflating: area_0306.npy \n", + " inflating: area_0307.npy \n", + " inflating: area_0308.npy \n", + " inflating: area_0310.npy \n", + " inflating: area_0311.npy \n", + " inflating: area_0314.npy \n", + " inflating: area_0315.npy \n", + " inflating: area_0316.npy \n", + " inflating: area_0320.npy \n", + " inflating: area_0321.npy \n", + " inflating: area_0323.npy \n", + " inflating: area_0324.npy \n", + " inflating: area_0327.npy \n", + " inflating: area_0330.npy \n", + " inflating: area_0331.npy \n", + " inflating: area_0332.npy \n", + " inflating: area_0333.npy \n", + " inflating: area_0334.npy \n", + " inflating: area_0337.npy \n", + " inflating: area_0338.npy \n", + " inflating: area_0339.npy \n", + " inflating: area_0340.npy \n", + " inflating: area_0341.npy \n", + " inflating: area_0342.npy \n", + " inflating: area_0343.npy \n", + " inflating: area_0344.npy \n", + " inflating: area_0345.npy \n", + " inflating: area_0346.npy \n", + " inflating: area_0348.npy \n", + " inflating: area_0349.npy \n", + " inflating: area_0351.npy \n", + " inflating: area_0352.npy \n", + " inflating: area_0353.npy \n", + " inflating: area_0354.npy \n", + " inflating: area_0356.npy \n", + " inflating: area_0357.npy \n", + " inflating: area_0359.npy \n", + " inflating: area_0360.npy \n", + " inflating: area_0361.npy \n", + " inflating: area_0363.npy \n", + " inflating: area_0364.npy \n", + " inflating: area_0365.npy \n", + " inflating: area_0366.npy \n", + " inflating: area_0367.npy \n", + " inflating: area_0368.npy \n", + " inflating: area_0369.npy \n", + " inflating: area_0371.npy \n", + " inflating: area_0373.npy \n", + " inflating: area_0376.npy \n", + " inflating: area_0377.npy \n", + " inflating: area_0378.npy \n", + " inflating: area_0379.npy \n", + " inflating: area_0381.npy \n", + " inflating: area_0382.npy \n", + " inflating: area_0383.npy \n", + " inflating: area_0384.npy \n", + " inflating: area_0385.npy \n", + " inflating: area_0387.npy \n", + " inflating: area_0388.npy \n", + " inflating: area_0389.npy \n", + " inflating: area_0392.npy \n", + " inflating: area_0393.npy \n", + " inflating: area_0394.npy \n", + " inflating: area_0395.npy \n", + " inflating: area_0396.npy \n", + " inflating: area_0398.npy \n", + " inflating: area_0399.npy \n", + " inflating: area_0400.npy \n", + " inflating: area_0401.npy \n", + " inflating: area_0402.npy \n", + " inflating: area_0403.npy \n", + " inflating: area_0404.npy \n", + " inflating: area_0405.npy \n", + " inflating: area_0407.npy \n", + " inflating: area_0408.npy \n", + " inflating: area_0409.npy \n", + " inflating: area_0410.npy \n", + " inflating: area_0411.npy \n", + " inflating: area_0413.npy \n", + " inflating: area_0416.npy \n", + " inflating: area_0417.npy \n", + " inflating: area_0421.npy \n", + " inflating: area_0422.npy \n", + " inflating: area_0423.npy \n", + " inflating: area_0424.npy \n", + " inflating: area_0425.npy \n", + " inflating: area_0428.npy \n", + " inflating: area_0429.npy \n", + " inflating: area_0430.npy \n", + " inflating: area_0431.npy \n", + " inflating: area_0432.npy \n", + " inflating: area_0435.npy \n", + " inflating: area_0438.npy \n", + " inflating: area_0439.npy \n", + " inflating: area_0441.npy \n", + " inflating: area_0444.npy \n", + " inflating: area_0445.npy \n", + " inflating: area_0449.npy \n", + " inflating: area_0450.npy \n", + " inflating: area_0451.npy \n", + " inflating: area_0452.npy \n", + " inflating: area_0453.npy \n", + " inflating: area_0456.npy \n", + " inflating: area_0457.npy \n", + " inflating: area_0458.npy \n", + " inflating: area_0459.npy \n", + " inflating: area_0460.npy \n", + " inflating: area_0461.npy \n", + " inflating: area_0463.npy \n", + " inflating: area_0464.npy \n", + " inflating: area_0465.npy \n", + " inflating: area_0467.npy \n", + " inflating: area_0469.npy \n", + " inflating: area_0471.npy \n", + " inflating: area_0472.npy \n", + " inflating: area_0474.npy \n", + " inflating: area_0475.npy \n", + " inflating: area_0477.npy \n", + " inflating: area_0478.npy \n", + " inflating: area_0479.npy \n", + " inflating: area_0480.npy \n", + " inflating: area_0481.npy \n", + " inflating: area_0482.npy \n", + " inflating: area_0485.npy \n", + " inflating: area_0486.npy \n", + " inflating: area_0487.npy \n", + " inflating: area_0488.npy \n", + " inflating: area_0489.npy \n", + " inflating: area_0492.npy \n", + " inflating: area_0493.npy \n", + " inflating: area_0494.npy \n", + " inflating: area_0497.npy \n", + " inflating: area_0498.npy \n", + " inflating: area_0499.npy \n", + " inflating: area_0501.npy \n", + " inflating: area_0502.npy \n", + " inflating: area_0503.npy \n", + " inflating: area_0504.npy \n", + " inflating: area_0507.npy \n", + " inflating: area_0508.npy \n", + " inflating: area_0509.npy \n", + " inflating: area_0513.npy \n", + " inflating: area_0514.npy \n", + " inflating: area_0515.npy \n", + " inflating: area_0517.npy \n", + " inflating: area_0518.npy \n", + " inflating: area_0519.npy \n", + " inflating: area_0520.npy \n", + " inflating: area_0521.npy \n", + " inflating: area_0522.npy \n", + " inflating: area_0523.npy \n", + " inflating: area_0524.npy \n", + " inflating: area_0525.npy \n", + " inflating: area_0526.npy \n", + " inflating: area_0527.npy \n", + " inflating: area_0528.npy \n", + " inflating: area_0529.npy \n", + " inflating: area_0530.npy \n", + " inflating: area_0531.npy \n", + " inflating: area_0534.npy \n", + " inflating: area_0535.npy \n", + " inflating: area_0536.npy \n", + " inflating: area_0538.npy \n", + " inflating: area_0541.npy \n", + " inflating: area_0542.npy \n", + " inflating: area_0544.npy \n", + " inflating: area_0545.npy \n", + " inflating: area_0546.npy \n", + " inflating: area_0547.npy \n", + " inflating: area_0550.npy \n", + " inflating: area_0551.npy \n", + " inflating: area_0553.npy \n", + " inflating: area_0555.npy \n", + " inflating: area_0557.npy \n", + " inflating: area_0558.npy \n", + " inflating: area_0561.npy \n", + " inflating: area_0563.npy \n", + " inflating: area_0564.npy \n", + " inflating: area_0565.npy \n", + " inflating: area_0567.npy \n", + " inflating: area_0568.npy \n", + " inflating: area_0571.npy \n", + " inflating: area_0574.npy \n", + " inflating: area_0576.npy \n", + " inflating: area_0579.npy \n", + " inflating: area_0580.npy \n", + " inflating: area_0582.npy \n", + " inflating: area_0584.npy \n", + " inflating: area_0585.npy \n", + " inflating: area_0588.npy \n", + " inflating: area_0589.npy \n", + " inflating: area_0590.npy \n", + " inflating: area_0591.npy \n", + " inflating: area_0592.npy \n", + " inflating: area_0593.npy \n", + " inflating: area_0594.npy \n", + " inflating: area_0595.npy \n", + " inflating: area_0596.npy \n", + " inflating: area_0597.npy \n", + " inflating: area_0598.npy \n", + " inflating: area_0600.npy \n", + " inflating: area_0602.npy \n", + " inflating: area_0605.npy \n", + " inflating: area_0608.npy \n", + " inflating: area_0609.npy \n", + " inflating: area_0611.npy \n", + " inflating: area_0612.npy \n", + " inflating: area_0613.npy \n", + " inflating: area_0614.npy \n", + " inflating: area_0618.npy \n", + " inflating: area_0619.npy \n", + " inflating: area_0620.npy \n", + " inflating: area_0621.npy \n", + " inflating: area_0622.npy \n", + " inflating: area_0623.npy \n", + " inflating: area_0624.npy \n", + " inflating: area_0625.npy \n", + " inflating: area_0627.npy \n", + " inflating: area_0628.npy \n", + " inflating: area_0629.npy \n", + " inflating: area_0630.npy \n", + " inflating: area_0631.npy \n", + " inflating: area_0632.npy \n", + " inflating: area_0633.npy \n", + " inflating: area_0634.npy \n", + " inflating: area_0635.npy \n", + " inflating: area_0637.npy \n", + " inflating: area_0638.npy \n", + " inflating: area_0639.npy \n", + " inflating: area_0640.npy \n", + " inflating: area_0641.npy \n", + " inflating: area_0643.npy \n", + " inflating: area_0644.npy \n", + " inflating: area_0645.npy \n", + " inflating: area_0646.npy \n", + " inflating: area_0648.npy \n", + " inflating: area_0650.npy \n", + " inflating: area_0651.npy \n", + " inflating: area_0652.npy \n", + " inflating: area_0653.npy \n", + " inflating: area_0654.npy \n", + " inflating: area_0656.npy \n", + " inflating: area_0657.npy \n", + " inflating: area_0658.npy \n", + " inflating: area_0661.npy \n", + " inflating: area_0663.npy \n", + " inflating: area_0664.npy \n", + " inflating: area_0665.npy \n", + " inflating: area_0666.npy \n", + " inflating: area_0667.npy \n", + " inflating: area_0668.npy \n", + " inflating: area_0669.npy \n", + " inflating: area_0671.npy \n", + " inflating: area_0672.npy \n", + " inflating: area_0673.npy \n", + " inflating: area_0674.npy \n", + " inflating: area_0676.npy \n", + " inflating: area_0677.npy \n", + " inflating: area_0678.npy \n", + " inflating: area_0679.npy \n", + " inflating: area_0680.npy \n", + " inflating: area_0682.npy \n", + " inflating: area_0686.npy \n", + " inflating: area_0688.npy \n", + " inflating: area_0689.npy \n", + " inflating: area_0690.npy \n", + " inflating: area_0691.npy \n", + " inflating: area_0692.npy \n", + " inflating: area_0693.npy \n", + " inflating: area_0694.npy \n", + " inflating: area_0695.npy \n", + " inflating: area_0697.npy \n", + " inflating: area_0699.npy \n", + " inflating: area_0700.npy \n", + " inflating: area_0701.npy \n", + " inflating: area_0703.npy \n", + " inflating: area_0704.npy \n", + " inflating: area_0706.npy \n", + " inflating: area_0707.npy \n", + " inflating: area_0708.npy \n", + " inflating: area_0709.npy \n", + " inflating: area_0711.npy \n", + " inflating: area_0712.npy \n", + " inflating: area_0713.npy \n", + " inflating: area_0714.npy \n", + " inflating: area_0715.npy \n", + " inflating: area_0716.npy \n", + " inflating: area_0718.npy \n", + " inflating: area_0719.npy \n", + " inflating: area_0720.npy \n", + " inflating: area_0721.npy \n", + " inflating: area_0722.npy \n", + " inflating: area_0724.npy \n", + " inflating: area_0727.npy \n", + " inflating: area_0728.npy \n", + " inflating: area_0729.npy \n", + " inflating: area_0730.npy \n", + " inflating: area_0731.npy \n", + " inflating: area_0733.npy \n", + " inflating: area_0735.npy \n", + " inflating: area_0736.npy \n", + " inflating: area_0737.npy \n", + " inflating: area_0740.npy \n", + " inflating: area_0742.npy \n", + " inflating: area_0743.npy \n", + " inflating: area_0744.npy \n", + " inflating: area_0745.npy \n", + " inflating: centroid_0002.npy \n", + " inflating: centroid_0003.npy \n", + " inflating: centroid_0004.npy \n", + " inflating: centroid_0005.npy \n", + " inflating: centroid_0006.npy \n", + " inflating: centroid_0011.npy \n", + " inflating: centroid_0012.npy \n", + " inflating: centroid_0013.npy \n", + " inflating: centroid_0015.npy \n", + " inflating: centroid_0017.npy \n", + " inflating: centroid_0018.npy \n", + " inflating: centroid_0020.npy \n", + " inflating: centroid_0021.npy \n", + " inflating: centroid_0022.npy \n", + " inflating: centroid_0023.npy \n", + " inflating: centroid_0024.npy \n", + " inflating: centroid_0026.npy \n", + " inflating: centroid_0029.npy \n", + " inflating: centroid_0030.npy \n", + " inflating: centroid_0036.npy \n", + " inflating: centroid_0037.npy \n", + " inflating: centroid_0038.npy \n", + " inflating: centroid_0039.npy \n", + " inflating: centroid_0040.npy \n", + " inflating: centroid_0041.npy \n", + " inflating: centroid_0042.npy \n", + " inflating: centroid_0043.npy \n", + " inflating: centroid_0044.npy \n", + " inflating: centroid_0048.npy \n", + " inflating: centroid_0049.npy \n", + " inflating: centroid_0051.npy \n", + " inflating: centroid_0052.npy \n", + " inflating: centroid_0055.npy \n", + " inflating: centroid_0056.npy \n", + " inflating: centroid_0057.npy \n", + " inflating: centroid_0059.npy \n", + " inflating: centroid_0062.npy \n", + " inflating: centroid_0064.npy \n", + " inflating: centroid_0066.npy \n", + " inflating: centroid_0067.npy \n", + " inflating: centroid_0068.npy \n", + " inflating: centroid_0071.npy \n", + " inflating: centroid_0074.npy \n", + " inflating: centroid_0075.npy \n", + " inflating: centroid_0077.npy \n", + " inflating: centroid_0078.npy \n", + " inflating: centroid_0080.npy \n", + " inflating: centroid_0081.npy \n", + " inflating: centroid_0082.npy \n", + " inflating: centroid_0084.npy \n", + " inflating: centroid_0085.npy \n", + " inflating: centroid_0086.npy \n", + " inflating: centroid_0087.npy \n", + " inflating: centroid_0088.npy \n", + " inflating: centroid_0089.npy \n", + " inflating: centroid_0090.npy \n", + " inflating: centroid_0092.npy \n", + " inflating: centroid_0093.npy \n", + " inflating: centroid_0094.npy \n", + " inflating: centroid_0095.npy \n", + " inflating: centroid_0097.npy \n", + " inflating: centroid_0098.npy \n", + " inflating: centroid_0100.npy \n", + " inflating: centroid_0101.npy \n", + " inflating: centroid_0102.npy \n", + " inflating: centroid_0103.npy \n", + " inflating: centroid_0104.npy \n", + " inflating: centroid_0106.npy \n", + " inflating: centroid_0107.npy \n", + " inflating: centroid_0108.npy \n", + " inflating: centroid_0109.npy \n", + " inflating: centroid_0110.npy \n", + " inflating: centroid_0113.npy \n", + " inflating: centroid_0114.npy \n", + " inflating: centroid_0115.npy \n", + " inflating: centroid_0116.npy \n", + " inflating: centroid_0117.npy \n", + " inflating: centroid_0118.npy \n", + " inflating: centroid_0119.npy \n", + " inflating: centroid_0120.npy \n", + " inflating: centroid_0121.npy \n", + " inflating: centroid_0122.npy \n", + " inflating: centroid_0124.npy \n", + " inflating: centroid_0125.npy \n", + " inflating: centroid_0126.npy \n", + " inflating: centroid_0128.npy \n", + " inflating: centroid_0129.npy \n", + " inflating: centroid_0130.npy \n", + " inflating: centroid_0131.npy \n", + " inflating: centroid_0132.npy \n", + " inflating: centroid_0133.npy \n", + " inflating: centroid_0134.npy \n", + " inflating: centroid_0135.npy \n", + " inflating: centroid_0136.npy \n", + " inflating: centroid_0138.npy \n", + " inflating: centroid_0139.npy \n", + " inflating: centroid_0140.npy \n", + " inflating: centroid_0141.npy \n", + " inflating: centroid_0143.npy \n", + " inflating: centroid_0145.npy \n", + " inflating: centroid_0146.npy \n", + " inflating: centroid_0148.npy \n", + " inflating: centroid_0149.npy \n", + " inflating: centroid_0150.npy \n", + " inflating: centroid_0151.npy \n", + " inflating: centroid_0153.npy \n", + " inflating: centroid_0154.npy \n", + " inflating: centroid_0156.npy \n", + " inflating: centroid_0157.npy \n", + " inflating: centroid_0158.npy \n", + " inflating: centroid_0161.npy \n", + " inflating: centroid_0162.npy \n", + " inflating: centroid_0163.npy \n", + " inflating: centroid_0164.npy \n", + " inflating: centroid_0166.npy \n", + " inflating: centroid_0167.npy \n", + " inflating: centroid_0168.npy \n", + " inflating: centroid_0170.npy \n", + " inflating: centroid_0171.npy \n", + " inflating: centroid_0172.npy \n", + " inflating: centroid_0174.npy \n", + " inflating: centroid_0175.npy \n", + " inflating: centroid_0183.npy \n", + " inflating: centroid_0184.npy \n", + " inflating: centroid_0185.npy \n", + " inflating: centroid_0189.npy \n", + " inflating: centroid_0190.npy \n", + " inflating: centroid_0193.npy \n", + " inflating: centroid_0194.npy \n", + " inflating: centroid_0195.npy \n", + " inflating: centroid_0197.npy \n", + " inflating: centroid_0201.npy \n", + " inflating: centroid_0203.npy \n", + " inflating: centroid_0204.npy \n", + " inflating: centroid_0205.npy \n", + " inflating: centroid_0206.npy \n", + " inflating: centroid_0208.npy \n", + " inflating: centroid_0210.npy \n", + " inflating: centroid_0211.npy \n", + " inflating: centroid_0216.npy \n", + " inflating: centroid_0217.npy \n", + " inflating: centroid_0219.npy \n", + " inflating: centroid_0220.npy \n", + " inflating: centroid_0227.npy \n", + " inflating: centroid_0228.npy \n", + " inflating: centroid_0229.npy \n", + " inflating: centroid_0232.npy \n", + " inflating: centroid_0234.npy \n", + " inflating: centroid_0235.npy \n", + " inflating: centroid_0236.npy \n", + " inflating: centroid_0238.npy \n", + " inflating: centroid_0239.npy \n", + " inflating: centroid_0240.npy \n", + " inflating: centroid_0241.npy \n", + " inflating: centroid_0245.npy \n", + " inflating: centroid_0246.npy \n", + " inflating: centroid_0247.npy \n", + " inflating: centroid_0248.npy \n", + " inflating: centroid_0249.npy \n", + " inflating: centroid_0252.npy \n", + " inflating: centroid_0253.npy \n", + " inflating: centroid_0254.npy \n", + " inflating: centroid_0256.npy \n", + " inflating: centroid_0257.npy \n", + " inflating: centroid_0259.npy \n", + " inflating: centroid_0264.npy \n", + " inflating: centroid_0265.npy \n", + " inflating: centroid_0266.npy \n", + " inflating: centroid_0268.npy \n", + " inflating: centroid_0269.npy \n", + " inflating: centroid_0271.npy \n", + " inflating: centroid_0272.npy \n", + " inflating: centroid_0273.npy \n", + " inflating: centroid_0275.npy \n", + " inflating: centroid_0276.npy \n", + " inflating: centroid_0277.npy \n", + " inflating: centroid_0279.npy \n", + " inflating: centroid_0280.npy \n", + " inflating: centroid_0281.npy \n", + " inflating: centroid_0284.npy \n", + " inflating: centroid_0285.npy \n", + " inflating: centroid_0286.npy \n", + " inflating: centroid_0288.npy \n", + " inflating: centroid_0289.npy \n", + " inflating: centroid_0290.npy \n", + " inflating: centroid_0291.npy \n", + " inflating: centroid_0294.npy \n", + " inflating: centroid_0296.npy \n", + " inflating: centroid_0297.npy \n", + " inflating: centroid_0298.npy \n", + " inflating: centroid_0301.npy \n", + " inflating: centroid_0304.npy \n", + " inflating: centroid_0305.npy \n", + " inflating: centroid_0306.npy \n", + " inflating: centroid_0307.npy \n", + " inflating: centroid_0308.npy \n", + " inflating: centroid_0310.npy \n", + " inflating: centroid_0311.npy \n", + " inflating: centroid_0314.npy \n", + " inflating: centroid_0315.npy \n", + " inflating: centroid_0316.npy \n", + " inflating: centroid_0320.npy \n", + " inflating: centroid_0321.npy \n", + " inflating: centroid_0323.npy \n", + " inflating: centroid_0324.npy \n", + " inflating: centroid_0327.npy \n", + " inflating: centroid_0330.npy \n", + " inflating: centroid_0331.npy \n", + " inflating: centroid_0332.npy \n", + " inflating: centroid_0333.npy \n", + " inflating: centroid_0334.npy \n", + " inflating: centroid_0337.npy \n", + " inflating: centroid_0338.npy \n", + " inflating: centroid_0339.npy \n", + " inflating: centroid_0340.npy \n", + " inflating: centroid_0341.npy \n", + " inflating: centroid_0342.npy \n", + " inflating: centroid_0343.npy \n", + " inflating: centroid_0344.npy \n", + " inflating: centroid_0345.npy \n", + " inflating: centroid_0346.npy \n", + " inflating: centroid_0348.npy \n", + " inflating: centroid_0349.npy \n", + " inflating: centroid_0351.npy \n", + " inflating: centroid_0352.npy \n", + " inflating: centroid_0353.npy \n", + " inflating: centroid_0354.npy \n", + " inflating: centroid_0356.npy \n", + " inflating: centroid_0357.npy \n", + " inflating: centroid_0359.npy \n", + " inflating: centroid_0360.npy \n", + " inflating: centroid_0361.npy \n", + " inflating: centroid_0363.npy \n", + " inflating: centroid_0364.npy \n", + " inflating: centroid_0365.npy \n", + " inflating: centroid_0366.npy \n", + " inflating: centroid_0367.npy \n", + " inflating: centroid_0368.npy \n", + " inflating: centroid_0369.npy \n", + " inflating: centroid_0371.npy \n", + " inflating: centroid_0373.npy \n", + " inflating: centroid_0376.npy \n", + " inflating: centroid_0377.npy \n", + " inflating: centroid_0378.npy \n", + " inflating: centroid_0379.npy \n", + " inflating: centroid_0381.npy \n", + " inflating: centroid_0382.npy \n", + " inflating: centroid_0383.npy \n", + " inflating: centroid_0384.npy \n", + " inflating: centroid_0385.npy \n", + " inflating: centroid_0387.npy \n", + " inflating: centroid_0388.npy \n", + " inflating: centroid_0389.npy \n", + " inflating: centroid_0392.npy \n", + " inflating: centroid_0393.npy \n", + " inflating: centroid_0394.npy \n", + " inflating: centroid_0395.npy \n", + " inflating: centroid_0396.npy \n", + " inflating: centroid_0398.npy \n", + " inflating: centroid_0399.npy \n", + " inflating: centroid_0400.npy \n", + " inflating: centroid_0401.npy \n", + " inflating: centroid_0402.npy \n", + " inflating: centroid_0403.npy \n", + " inflating: centroid_0404.npy \n", + " inflating: centroid_0405.npy \n", + " inflating: centroid_0407.npy \n", + " inflating: centroid_0408.npy \n", + " inflating: centroid_0409.npy \n", + " inflating: centroid_0410.npy \n", + " inflating: centroid_0411.npy \n", + " inflating: centroid_0413.npy \n", + " inflating: centroid_0416.npy \n", + " inflating: centroid_0417.npy \n", + " inflating: centroid_0421.npy \n", + " inflating: centroid_0422.npy \n", + " inflating: centroid_0423.npy \n", + " inflating: centroid_0424.npy \n", + " inflating: centroid_0425.npy \n", + " inflating: centroid_0428.npy \n", + " inflating: centroid_0429.npy \n", + " inflating: centroid_0430.npy " + ] + } + ], + "source": [ + "####下载train_track_B.zip\n", + "!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n", + "####解压到train_track_B\n", + "!mkdir -p train_track_B && unzip -o train_track_B.zip -d train_track_B/\n", + "####将train_track_B移到Dataset下\n", + "!mv train_track_B Dataset/Trainset_track_B\n", + "####删除train_track_B.zip\n", + "!rm train_track_B.zip" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "J6DqAplSlOd1" + }, + "source": [ + "# **额外数据导入**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2RNGoSyovQpc" + }, + "source": [ + "导入赛道二额外数据\n", + "\n", + "数据来源:https://github.com/Mohamedelrefaie/DrivAerNet\n", + "\n", + "论文:Elrefaie, Mohamed, Angela Dai, and Faez Ahmed. \"Drivaernet: A parametric car dataset for data-driven aerodynamic design and graph-based drag prediction.\" arXiv preprint arXiv:2403.08055 (2024).\n", + "\n", + "特别说明:额外数据中仅使用了id>745的数据,未踩到比赛测试数据,数据预处理见“centroidPressureFromDrivAerNet.py”" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "###################### 第一部分:npy_dataset_id_to2090 ######################\n", + "!curl 'https://drive.usercontent.google.com/download?id=1HrlAFfxmvidh4nmo5OA97hn7T1w2D3W2&export=download&authuser=0&confirm=t&uuid=fa8ce3e5-8e5e-47f0-bc88-a5255bb1d205&at=APZUnTX5vfaU7XGNnWVJO-3d3NnS%3A1721008623935' \\\n", + " -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n", + " -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,de;q=0.7' \\\n", + " -H 'cookie: SID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_P1oOKYaH1BLrHCo3iS0ORAACgYKARkSARESFQHGX2MiWPVwYo8_viuWERHrHR1NdhoVAUF8yKruIZ_4e1ei2Ekggpa8y8tn0076; __Secure-1PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_N4Ewz7Dr3VAo_wx2LK2r8gACgYKATwSARESFQHGX2MiQ8tfWf2JaqKvrnUyQ45v3BoVAUF8yKq8rQ4q5pVZQVPFSWPsFpAL0076; __Secure-3PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_2Uk70D_VaOy1CAOn_-q02wACgYKAdMSARESFQHGX2Milt8gLaNlTE2LPd0GCHBY8RoVAUF8yKpwkzBbaFeOpamEruSyO4KB0076; HSID=Aze2C-18CaajQB5ZY; SSID=AcSGa_zv1UHXYWXjD; APISID=63spz5_adFMvKs3X/AnRG94TS2UMLY3-Jn; SAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-1PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-3PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; AEC=AVYB7cqCjRuyMKwjdDYgIZrykK3GrPvfMCHPw6aftmy-i3jUaK-D4PahKUY; NID=515=qmBvly9f2yPayL5i0BHfVfz668yIlBHAnFQ5N3qoP2gVxqUWPJSsLoUDxXX25G2CDW_FvJW3NOTZDjiCnxDnUQhYDsqIfwa82Zh6xWwzC43u0L25cZKfNGerS4-eyAuiQbBgUl8Rf3PYriRkKJoCMs25jhZ_9z0Wuvch5zkc5zJoL1w9NBRjbk-F6HB9GTuSTBEX-uBWQFQDH5sWuQxwsXtSGRZPQOkKPUNF-LWGa26P1eP2syJlBSOLXIEj2J30p-ahuTAwSgkisPo5YTPFh6gX9iBztpmEyyx9CF4OESec830Sxcu-DgkeNBIwGeKapNLRdg-hyUA6HE2T-tVWQEyhv0k-B5aq95ig81bpWRwDsfUCW8f22q1eZM4Js5nw6nND2hQwwfJ9RlA6orbvTm8DIZZf1ZvwDf1ooTLMA3TZeOv2PEvtlYIhBLX_JxcY_hIc_qvgYrRsJkwOKkykafiRw6F58W9lIQ; __Secure-1PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; SIDCC=AKEyXzXx3xq69yhLhL6faYN-CslMkFWFN9k2VGCpxUBix4n1USKcT0K9k1kr9z4WWKxOSWsLX9pf; __Secure-1PSIDCC=AKEyXzVJ1otwaPYrh7P23QePj9Uci7oNGp8LoptrV4SuIpcFNgvHA4BhonL7WRlE7TTsE-FeNO0; __Secure-3PSIDCC=AKEyXzV2hkupH10qZ9QKzyDBrGprWcq6ovH37ctI_OU7s25hBteNzXDK3IHj0Im1nsm4DhqdYKY' \\\n", + " -H 'priority: u=0, i' \\\n", + " -H 'sec-ch-ua: \"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"' \\\n", + " -H 'sec-ch-ua-arch: \"x86\"' \\\n", + " -H 'sec-ch-ua-bitness: \"64\"' \\\n", + " -H 'sec-ch-ua-form-factors: \"Desktop\"' \\\n", + " -H 'sec-ch-ua-full-version: \"126.0.6478.127\"' \\\n", + " -H 'sec-ch-ua-full-version-list: \"Not/A)Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"126.0.6478.127\", \"Google Chrome\";v=\"126.0.6478.127\"' \\\n", + " -H 'sec-ch-ua-mobile: ?0' \\\n", + " -H 'sec-ch-ua-model: \"\"' \\\n", + " -H 'sec-ch-ua-platform: \"Windows\"' \\\n", + " -H 'sec-ch-ua-platform-version: \"15.0.0\"' \\\n", + " -H 'sec-ch-ua-wow64: ?0' \\\n", + " -H 'sec-fetch-dest: document' \\\n", + " -H 'sec-fetch-mode: navigate' \\\n", + " -H 'sec-fetch-site: cross-site' \\\n", + " -H 'sec-fetch-user: ?1' \\\n", + " -H 'upgrade-insecure-requests: 1' \\\n", + " -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36' \\\n", + " -H 'x-client-data: CK61yQEIlbbJAQimtskBCKmdygEIsvXKAQiWocsBCJz+zAEI7ZjNAQiFoM0BCKaizgEIg6jOAQ==' -o ./npy_dataset_id_to2090.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tZRuUMtu4Cji" + }, + "outputs": [], + "source": [ + "tar_path = './npy_dataset_id_to2090.tar.gz' # 压缩文件路径\n", + "extract_dir = './Dataset/Extra_Trainset_track_B' # 解压目录\n", + "!tar -xzf tar_path --strip-components=1 -C extract_dir\n", + "!rm tar_path" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "###################### 第二部分:npy_dataset_last_part ######################\n", + "!curl 'https://drive.usercontent.google.com/download?id=13LAHqAnjYpqcL33_PdBv71YiVTRi6Bgh&export=download&authuser=0&confirm=t&uuid=ecc5c29b-d551-4cae-955b-8bb3f5162f3f&at=APZUnTXV6vdX6nTQxVdPMOk0foy5%3A1721008953098' \\\n", + " -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n", + " -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,de;q=0.7' \\\n", + " -H 'cookie: SID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_P1oOKYaH1BLrHCo3iS0ORAACgYKARkSARESFQHGX2MiWPVwYo8_viuWERHrHR1NdhoVAUF8yKruIZ_4e1ei2Ekggpa8y8tn0076; __Secure-1PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_N4Ewz7Dr3VAo_wx2LK2r8gACgYKATwSARESFQHGX2MiQ8tfWf2JaqKvrnUyQ45v3BoVAUF8yKq8rQ4q5pVZQVPFSWPsFpAL0076; __Secure-3PSID=g.a000lgi8bRQnt3R6LXRzzKoIKj5y34h1bzpwUabi2WXA1TVd6wJ_2Uk70D_VaOy1CAOn_-q02wACgYKAdMSARESFQHGX2Milt8gLaNlTE2LPd0GCHBY8RoVAUF8yKpwkzBbaFeOpamEruSyO4KB0076; HSID=Aze2C-18CaajQB5ZY; SSID=AcSGa_zv1UHXYWXjD; APISID=63spz5_adFMvKs3X/AnRG94TS2UMLY3-Jn; SAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-1PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; __Secure-3PAPISID=lv3zXMc-MpsJeRZr/ARn4Lxk4FrjjMbdSd; AEC=AVYB7cqCjRuyMKwjdDYgIZrykK3GrPvfMCHPw6aftmy-i3jUaK-D4PahKUY; NID=515=qmBvly9f2yPayL5i0BHfVfz668yIlBHAnFQ5N3qoP2gVxqUWPJSsLoUDxXX25G2CDW_FvJW3NOTZDjiCnxDnUQhYDsqIfwa82Zh6xWwzC43u0L25cZKfNGerS4-eyAuiQbBgUl8Rf3PYriRkKJoCMs25jhZ_9z0Wuvch5zkc5zJoL1w9NBRjbk-F6HB9GTuSTBEX-uBWQFQDH5sWuQxwsXtSGRZPQOkKPUNF-LWGa26P1eP2syJlBSOLXIEj2J30p-ahuTAwSgkisPo5YTPFh6gX9iBztpmEyyx9CF4OESec830Sxcu-DgkeNBIwGeKapNLRdg-hyUA6HE2T-tVWQEyhv0k-B5aq95ig81bpWRwDsfUCW8f22q1eZM4Js5nw6nND2hQwwfJ9RlA6orbvTm8DIZZf1ZvwDf1ooTLMA3TZeOv2PEvtlYIhBLX_JxcY_hIc_qvgYrRsJkwOKkykafiRw6F58W9lIQ; __Secure-1PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSE6NJDghNHP1m15TJhjhK_3gk6CbiZHkVbttFbJCYIB2BwTObN5Q77lMAC4EAA; SIDCC=AKEyXzXxsM9syaIZNUoIGb2zN2lXB7rG8BpEsm9SJz87gq5gsNxwFe-lG63U6c74LYoXvbopHG0Z; __Secure-1PSIDCC=AKEyXzVyhAwFNnt6SkT2HO83gFjVIK8IxNxnondGM0u1COHMa-GBI0MSe0gjD1pG0cphSFwVl9s; __Secure-3PSIDCC=AKEyXzURcwg9KPPeDlIcZSKmqW6O9S42naV7FjBARhx6Zv3vCYTg-EdHEYgQl6tri4LgM_w-DFc' \\\n", + " -H 'priority: u=0, i' \\\n", + " -H 'sec-ch-ua: \"Not/A)Brand\";v=\"8\", \"Chromium\";v=\"126\", \"Google Chrome\";v=\"126\"' \\\n", + " -H 'sec-ch-ua-arch: \"x86\"' \\\n", + " -H 'sec-ch-ua-bitness: \"64\"' \\\n", + " -H 'sec-ch-ua-form-factors: \"Desktop\"' \\\n", + " -H 'sec-ch-ua-full-version: \"126.0.6478.127\"' \\\n", + " -H 'sec-ch-ua-full-version-list: \"Not/A)Brand\";v=\"8.0.0.0\", \"Chromium\";v=\"126.0.6478.127\", \"Google Chrome\";v=\"126.0.6478.127\"' \\\n", + " -H 'sec-ch-ua-mobile: ?0' \\\n", + " -H 'sec-ch-ua-model: \"\"' \\\n", + " -H 'sec-ch-ua-platform: \"Windows\"' \\\n", + " -H 'sec-ch-ua-platform-version: \"15.0.0\"' \\\n", + " -H 'sec-ch-ua-wow64: ?0' \\\n", + " -H 'sec-fetch-dest: document' \\\n", + " -H 'sec-fetch-mode: navigate' \\\n", + " -H 'sec-fetch-site: cross-site' \\\n", + " -H 'sec-fetch-user: ?1' \\\n", + " -H 'upgrade-insecure-requests: 1' \\\n", + " -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36' \\\n", + " -H 'x-client-data: CK61yQEIlbbJAQimtskBCKmdygEIsvXKAQiWocsBCJz+zAEI7ZjNAQiFoM0BCKaizgEIg6jOAQ==' -o ./npy_dataset_last_part.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3fu-nayY3YZ-" + }, + "outputs": [], + "source": [ + "tar_path = './npy_dataset_last_part.tar.gz' # 压缩文件路径\n", + "extract_dir = './Dataset/Extra_Trainset_track_B' # 解压目录\n", + "!tar -xzf tar_path --strip-components=1 -C extract_dir\n", + "!rm tar_path" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ml0rN3NHvT8t" + }, + "source": [ + "导入PointBERT预训练模型\n", + "\n", + "数据来源:https://github.com/salesforce/ULIP\n", + "\n", + "论文:Xue, Le, et al. \"Ulip-2: Towards scalable multimodal pre-training for 3d understanding.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!wget https://paddle-org.bj.bcebos.com/paddlescience/models/contrib/IJCAI_2024_ckpts.tar.gz\n", + "!tar -zxvf IJCAI_2024_ckpts.tar.gz\n", + "! mv ckpts/bju/geom/ckpt/checkpoint_pointbert.pdparams ./geom/ckpt/checkpoint_pointbert.pdparams" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "machine_shape": "hm", + "name": "", + "version": "" + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/jointContribution/IJCAI_2024/bju/geom/models/ULIP_models.py b/jointContribution/IJCAI_2024/bju/geom/models/ULIP_models.py index de5557f091..39f8442df2 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/ULIP_models.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/ULIP_models.py @@ -1,302 +1,302 @@ -import numpy as np -import paddle -import yaml -from easydict import EasyDict - -# from paddle.vision.models import vision_transformer -from paddleclas import ViT_base_patch16_224 - - -class LayerNorm(paddle.nn.LayerNorm): - """Subclass paddle's LayerNorm to handle fp16.""" - - def forward(self, x: paddle.Tensor): - orig_type = x.dtype - ret = super().forward(x.astype("float32")) - return ret.astype(orig_type) - - -class QuickGELU(paddle.nn.Layer): - def forward(self, x: paddle.Tensor): - return x * paddle.nn.functional.sigmoid(x=1.702 * x) - - -class ResidualAttentionBlock(paddle.nn.Layer): - def __init__(self, d_model: int, n_head: int, attn_mask: paddle.Tensor = None): - super().__init__() - self.attn = paddle.nn.MultiHeadAttention(d_model, n_head) - self.ln_1 = LayerNorm(d_model) - self.mlp = paddle.nn.Sequential( - *[ - ( - "c_fc", - paddle.nn.Linear(in_features=d_model, out_features=d_model * 4), - ), - ("gelu", QuickGELU()), - ( - "c_proj", - paddle.nn.Linear(in_features=d_model * 4, out_features=d_model), - ), - ] - ) - self.ln_2 = LayerNorm(d_model) - self.attn_mask = attn_mask - - def attention(self, x: paddle.Tensor): - self.attn_mask = ( - self.attn_mask.to(dtype=x.dtype, device=x.place) - if self.attn_mask is not None - else None - ) - return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] - - def forward(self, x: paddle.Tensor): - x = x + self.attention(self.ln_1(x)) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(paddle.nn.Layer): - def __init__( - self, width: int, layers: int, heads: int, attn_mask: paddle.Tensor = None - ): - super().__init__() - self.width = width - self.layers = layers - self.resblocks = paddle.nn.Sequential( - *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] - ) - - def forward(self, x: paddle.Tensor): - return self.resblocks(x) - - -class ULIP_WITH_IMAGE(paddle.nn.Layer): - def __init__(self, point_encoder, **kwargs): - super().__init__() - kwargs = EasyDict(kwargs) - self.context_length = kwargs.context_length - self.vision_width = kwargs.vision_width - self.visual = kwargs.vision_model - self.transformer = Transformer( - width=kwargs.transformer_width, - layers=kwargs.transformer_layers, - heads=kwargs.transformer_heads, - attn_mask=self.build_attention_mask(), - ) - self.vocab_size = kwargs.vocab_size - self.token_embedding = paddle.nn.Embedding( - num_embeddings=kwargs.vocab_size, embedding_dim=kwargs.transformer_width - ) - out_0 = paddle.create_parameter( - shape=paddle.empty( - shape=[self.context_length, kwargs.transformer_width] - ).shape, - dtype=paddle.empty(shape=[self.context_length, kwargs.transformer_width]) - .numpy() - .dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.empty(shape=[self.context_length, kwargs.transformer_width]) - ), - ) - out_0.stop_gradient = not True - self.positional_embedding = out_0 - self.ln_final = LayerNorm(kwargs.transformer_width) - out_1 = paddle.create_parameter( - shape=paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]).shape, - dtype=paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]) - .numpy() - .dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]) - ), - ) - out_1.stop_gradient = not True - self.image_projection = out_1 - out_2 = paddle.create_parameter( - shape=paddle.empty( - shape=[kwargs.transformer_width, kwargs.embed_dim] - ).shape, - dtype=paddle.empty(shape=[kwargs.transformer_width, kwargs.embed_dim]) - .numpy() - .dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.empty(shape=[kwargs.transformer_width, kwargs.embed_dim]) - ), - ) - out_2.stop_gradient = not True - self.text_projection = out_2 - out_3 = paddle.create_parameter( - shape=(paddle.ones(shape=[]) * np.log(1 / 0.07)).shape, - dtype=(paddle.ones(shape=[]) * np.log(1 / 0.07)).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.ones(shape=[]) * np.log(1 / 0.07) - ), - ) - out_3.stop_gradient = not True - self.logit_scale = out_3 - self.initialize_parameters() - self.point_encoder = point_encoder - out_4 = paddle.create_parameter( - shape=paddle.empty(shape=[kwargs.pc_feat_dims, 512]).shape, - dtype=paddle.empty(shape=[kwargs.pc_feat_dims, 512]).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.empty(shape=[kwargs.pc_feat_dims, 512]) - ), - ) - out_4.stop_gradient = not True - self.pc_projection = out_4 - init_Normal = paddle.nn.initializer.Normal(std=512**-0.5) - init_Normal(self.pc_projection) - - def encode_image(self, image): - x = self.visual(image) - x = x @ self.image_projection - return x - - def encode_text(self, text): - x = self.token_embedding(text) - x = x + self.positional_embedding - x = x.transpose(perm=[1, 0, 2]) - x = self.transformer(x) - x = x.transpose(perm=[1, 0, 2]) - x = self.ln_final(x) - x = ( - x[paddle.arange(end=tuple(x.shape)[0]), text.argmax(axis=-1)] - @ self.text_projection - ) - return x - - def build_attention_mask(self): - mask = paddle.empty(shape=[self.context_length, self.context_length]) - mask.fill_(value=float("-inf")) - mask.triu_(diagonal=1) - return mask - - def initialize_parameters(self): - init_Normal = paddle.nn.initializer.Normal(std=0.02) - init_Normal(self.token_embedding.weight) - init_Normal = paddle.nn.initializer.Normal(std=0.01) - init_Normal(self.positional_embedding) - proj_std = ( - self.transformer.width**-0.5 * (2 * self.transformer.layers) ** -0.5 - ) - attn_std = self.transformer.width**-0.5 - fc_std = (2 * self.transformer.width) ** -0.5 - for block in self.transformer.resblocks: - init_Normal = paddle.nn.initializer.Normal(std=attn_std) - init_Normal(block.attn.q_proj.weight) - init_Normal = paddle.nn.initializer.Normal(std=proj_std) - init_Normal(block.attn.out_proj.weight) - init_Normal = paddle.nn.initializer.Normal(std=fc_std) - init_Normal(block.mlp.c_fc.weight) - init_Normal = paddle.nn.initializer.Normal(std=proj_std) - init_Normal(block.mlp.c_proj.weight) - init_Normal = paddle.nn.initializer.Normal(std=self.vision_width**-0.5) - init_Normal(self.image_projection) - init_Normal = paddle.nn.initializer.Normal(std=self.transformer.width**-0.5) - init_Normal(self.text_projection) - - def encode_pc(self, pc): - pc_feat = self.point_encoder(pc) - pc_embed = pc_feat @ self.pc_projection - return pc_embed - - def forward(self, pc, text, image=None): - text_embed_all = [] - for i in range(tuple(text.shape)[0]): - text_for_one_sample = text[i] - text_embed = self.encode_text(text_for_one_sample) - text_embed = text_embed / text_embed.norm(axis=-1, keepdim=True) - text_embed = text_embed.mean(axis=0) - text_embed = text_embed / text_embed.norm(axis=-1, keepdim=True) - text_embed_all.append(text_embed) - text_embed_all = paddle.stack(x=text_embed_all) - pc_embed = self.encode_pc(pc) - if image is not None: - image_embed = self.encode_image(image) - return { - "text_embed": text_embed_all, - "pc_embed": pc_embed, - "image_embed": image_embed, - "logit_scale": self.logit_scale.exp(), - } - else: - return { - "text_embed": text_embed_all, - "pc_embed": pc_embed, - "logit_scale": self.logit_scale.exp(), - } - - -def ULIP_PointBERT(args): - # vision_model = timm.create_model('vit_base_patch16_224', num_classes=0) - vision_model = ViT_base_patch16_224(pretrained=True, num_classes=0) - from geom.models.pointbert.point_encoder import PointTransformer - - config_addr = "./geom/models/pointbert/PointTransformer_8192point.yaml" - - def merge_new_config(config, new_config): - for key, val in new_config.items(): - if not isinstance(val, dict): - if key == "_base_": - with open(new_config["_base_"], "r") as f: - try: - val = yaml.load(f, Loader=yaml.FullLoader) - except Exception: - val = yaml.load(f) - config[key] = EasyDict() - merge_new_config(config[key], val) - else: - config[key] = val - continue - if key not in config: - config[key] = EasyDict() - merge_new_config(config[key], val) - return config - - def cfg_from_yaml_file(cfg_file): - config = EasyDict() - with open(cfg_file, "r") as f: - new_config = yaml.load(f, Loader=yaml.FullLoader) - merge_new_config(config=config, new_config=new_config) - return config - - config = cfg_from_yaml_file(config_addr) - point_encoder = PointTransformer(config.model, args=args) - pc_feat_dims = 768 - model = ULIP_WITH_IMAGE( - embed_dim=512, - vision_width=768, - point_encoder=point_encoder, - vision_model=vision_model, - context_length=77, - vocab_size=49408, - transformer_width=512, - transformer_heads=8, - transformer_layers=12, - pc_feat_dims=pc_feat_dims, - ) - return model - - -def ULIP_PN_NEXT(args): - # vision_model = timm.create_model('vit_base_patch16_224', num_classes=0) - vision_model = ViT_base_patch16_224(pretrained=True, num_classes=0) - from geom.models.pointnext.pointnext import PointNEXT - - point_encoder = PointNEXT() - pc_feat_dims = 256 - model = ULIP_WITH_IMAGE( - embed_dim=512, - vision_width=768, - point_encoder=point_encoder, - vision_model=vision_model, - context_length=77, - vocab_size=49408, - transformer_width=512, - transformer_heads=8, - transformer_layers=12, - pc_feat_dims=pc_feat_dims, - ) - return model +import numpy as np +import paddle +import yaml +from easydict import EasyDict + +# from paddle.vision.models import vision_transformer +from paddleclas import ViT_base_patch16_224 + + +class LayerNorm(paddle.nn.LayerNorm): + """Subclass paddle's LayerNorm to handle fp16.""" + + def forward(self, x: paddle.Tensor): + orig_type = x.dtype + ret = super().forward(x.astype("float32")) + return ret.astype(orig_type) + + +class QuickGELU(paddle.nn.Layer): + def forward(self, x: paddle.Tensor): + return x * paddle.nn.functional.sigmoid(x=1.702 * x) + + +class ResidualAttentionBlock(paddle.nn.Layer): + def __init__(self, d_model: int, n_head: int, attn_mask: paddle.Tensor = None): + super().__init__() + self.attn = paddle.nn.MultiHeadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = paddle.nn.Sequential( + *[ + ( + "c_fc", + paddle.nn.Linear(in_features=d_model, out_features=d_model * 4), + ), + ("gelu", QuickGELU()), + ( + "c_proj", + paddle.nn.Linear(in_features=d_model * 4, out_features=d_model), + ), + ] + ) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + def attention(self, x: paddle.Tensor): + self.attn_mask = ( + self.attn_mask.to(dtype=x.dtype, device=x.place) + if self.attn_mask is not None + else None + ) + return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] + + def forward(self, x: paddle.Tensor): + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(paddle.nn.Layer): + def __init__( + self, width: int, layers: int, heads: int, attn_mask: paddle.Tensor = None + ): + super().__init__() + self.width = width + self.layers = layers + self.resblocks = paddle.nn.Sequential( + *[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)] + ) + + def forward(self, x: paddle.Tensor): + return self.resblocks(x) + + +class ULIP_WITH_IMAGE(paddle.nn.Layer): + def __init__(self, point_encoder, **kwargs): + super().__init__() + kwargs = EasyDict(kwargs) + self.context_length = kwargs.context_length + self.vision_width = kwargs.vision_width + self.visual = kwargs.vision_model + self.transformer = Transformer( + width=kwargs.transformer_width, + layers=kwargs.transformer_layers, + heads=kwargs.transformer_heads, + attn_mask=self.build_attention_mask(), + ) + self.vocab_size = kwargs.vocab_size + self.token_embedding = paddle.nn.Embedding( + num_embeddings=kwargs.vocab_size, embedding_dim=kwargs.transformer_width + ) + out_0 = paddle.create_parameter( + shape=paddle.empty( + shape=[self.context_length, kwargs.transformer_width] + ).shape, + dtype=paddle.empty(shape=[self.context_length, kwargs.transformer_width]) + .numpy() + .dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.empty(shape=[self.context_length, kwargs.transformer_width]) + ), + ) + out_0.stop_gradient = not True + self.positional_embedding = out_0 + self.ln_final = LayerNorm(kwargs.transformer_width) + out_1 = paddle.create_parameter( + shape=paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]).shape, + dtype=paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]) + .numpy() + .dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.empty(shape=[kwargs.vision_width, kwargs.embed_dim]) + ), + ) + out_1.stop_gradient = not True + self.image_projection = out_1 + out_2 = paddle.create_parameter( + shape=paddle.empty( + shape=[kwargs.transformer_width, kwargs.embed_dim] + ).shape, + dtype=paddle.empty(shape=[kwargs.transformer_width, kwargs.embed_dim]) + .numpy() + .dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.empty(shape=[kwargs.transformer_width, kwargs.embed_dim]) + ), + ) + out_2.stop_gradient = not True + self.text_projection = out_2 + out_3 = paddle.create_parameter( + shape=(paddle.ones(shape=[]) * np.log(1 / 0.07)).shape, + dtype=(paddle.ones(shape=[]) * np.log(1 / 0.07)).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.ones(shape=[]) * np.log(1 / 0.07) + ), + ) + out_3.stop_gradient = not True + self.logit_scale = out_3 + self.initialize_parameters() + self.point_encoder = point_encoder + out_4 = paddle.create_parameter( + shape=paddle.empty(shape=[kwargs.pc_feat_dims, 512]).shape, + dtype=paddle.empty(shape=[kwargs.pc_feat_dims, 512]).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.empty(shape=[kwargs.pc_feat_dims, 512]) + ), + ) + out_4.stop_gradient = not True + self.pc_projection = out_4 + init_Normal = paddle.nn.initializer.Normal(std=512**-0.5) + init_Normal(self.pc_projection) + + def encode_image(self, image): + x = self.visual(image) + x = x @ self.image_projection + return x + + def encode_text(self, text): + x = self.token_embedding(text) + x = x + self.positional_embedding + x = x.transpose(perm=[1, 0, 2]) + x = self.transformer(x) + x = x.transpose(perm=[1, 0, 2]) + x = self.ln_final(x) + x = ( + x[paddle.arange(end=tuple(x.shape)[0]), text.argmax(axis=-1)] + @ self.text_projection + ) + return x + + def build_attention_mask(self): + mask = paddle.empty(shape=[self.context_length, self.context_length]) + mask.fill_(value=float("-inf")) + mask.triu_(diagonal=1) + return mask + + def initialize_parameters(self): + init_Normal = paddle.nn.initializer.Normal(std=0.02) + init_Normal(self.token_embedding.weight) + init_Normal = paddle.nn.initializer.Normal(std=0.01) + init_Normal(self.positional_embedding) + proj_std = ( + self.transformer.width**-0.5 * (2 * self.transformer.layers) ** -0.5 + ) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width) ** -0.5 + for block in self.transformer.resblocks: + init_Normal = paddle.nn.initializer.Normal(std=attn_std) + init_Normal(block.attn.q_proj.weight) + init_Normal = paddle.nn.initializer.Normal(std=proj_std) + init_Normal(block.attn.out_proj.weight) + init_Normal = paddle.nn.initializer.Normal(std=fc_std) + init_Normal(block.mlp.c_fc.weight) + init_Normal = paddle.nn.initializer.Normal(std=proj_std) + init_Normal(block.mlp.c_proj.weight) + init_Normal = paddle.nn.initializer.Normal(std=self.vision_width**-0.5) + init_Normal(self.image_projection) + init_Normal = paddle.nn.initializer.Normal(std=self.transformer.width**-0.5) + init_Normal(self.text_projection) + + def encode_pc(self, pc): + pc_feat = self.point_encoder(pc) + pc_embed = pc_feat @ self.pc_projection + return pc_embed + + def forward(self, pc, text, image=None): + text_embed_all = [] + for i in range(tuple(text.shape)[0]): + text_for_one_sample = text[i] + text_embed = self.encode_text(text_for_one_sample) + text_embed = text_embed / text_embed.norm(axis=-1, keepdim=True) + text_embed = text_embed.mean(axis=0) + text_embed = text_embed / text_embed.norm(axis=-1, keepdim=True) + text_embed_all.append(text_embed) + text_embed_all = paddle.stack(x=text_embed_all) + pc_embed = self.encode_pc(pc) + if image is not None: + image_embed = self.encode_image(image) + return { + "text_embed": text_embed_all, + "pc_embed": pc_embed, + "image_embed": image_embed, + "logit_scale": self.logit_scale.exp(), + } + else: + return { + "text_embed": text_embed_all, + "pc_embed": pc_embed, + "logit_scale": self.logit_scale.exp(), + } + + +def ULIP_PointBERT(args): + # vision_model = timm.create_model('vit_base_patch16_224', num_classes=0) + vision_model = ViT_base_patch16_224(pretrained=True, num_classes=0) + from geom.models.pointbert.point_encoder import PointTransformer + + config_addr = "./geom/models/pointbert/PointTransformer_8192point.yaml" + + def merge_new_config(config, new_config): + for key, val in new_config.items(): + if not isinstance(val, dict): + if key == "_base_": + with open(new_config["_base_"], "r") as f: + try: + val = yaml.load(f, Loader=yaml.FullLoader) + except Exception: + val = yaml.load(f) + config[key] = EasyDict() + merge_new_config(config[key], val) + else: + config[key] = val + continue + if key not in config: + config[key] = EasyDict() + merge_new_config(config[key], val) + return config + + def cfg_from_yaml_file(cfg_file): + config = EasyDict() + with open(cfg_file, "r") as f: + new_config = yaml.load(f, Loader=yaml.FullLoader) + merge_new_config(config=config, new_config=new_config) + return config + + config = cfg_from_yaml_file(config_addr) + point_encoder = PointTransformer(config.model, args=args) + pc_feat_dims = 768 + model = ULIP_WITH_IMAGE( + embed_dim=512, + vision_width=768, + point_encoder=point_encoder, + vision_model=vision_model, + context_length=77, + vocab_size=49408, + transformer_width=512, + transformer_heads=8, + transformer_layers=12, + pc_feat_dims=pc_feat_dims, + ) + return model + + +def ULIP_PN_NEXT(args): + # vision_model = timm.create_model('vit_base_patch16_224', num_classes=0) + vision_model = ViT_base_patch16_224(pretrained=True, num_classes=0) + from geom.models.pointnext.pointnext import PointNEXT + + point_encoder = PointNEXT() + pc_feat_dims = 256 + model = ULIP_WITH_IMAGE( + embed_dim=512, + vision_width=768, + point_encoder=point_encoder, + vision_model=vision_model, + context_length=77, + vocab_size=49408, + transformer_width=512, + transformer_heads=8, + transformer_layers=12, + pc_feat_dims=pc_feat_dims, + ) + return model diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/PointTransformer_8192point.yaml b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/PointTransformer_8192point.yaml index a48bf61a6c..48bfd7900b 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/PointTransformer_8192point.yaml +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/PointTransformer_8192point.yaml @@ -1,32 +1,32 @@ -optimizer : { - type: AdamW, - kwargs: { - lr : 0.0005, - weight_decay : 0.05 -}} - -scheduler: { - type: CosLR, - kwargs: { - epochs: 300, - initial_epochs : 10 -}} - -model : { - NAME: PointTransformer, - trans_dim: 384, - depth: 12, - drop_path_rate: 0.1, - cls_dim: 40, - num_heads: 6, - group_size: 32, - num_group: 512, - encoder_dims: 256, -} -npoints: 8192 -total_bs : 32 -step_per_update : 1 -max_epoch : 300 -grad_norm_clip : 10 - -consider_metric: CDL1 +optimizer : { + type: AdamW, + kwargs: { + lr : 0.0005, + weight_decay : 0.05 +}} + +scheduler: { + type: CosLR, + kwargs: { + epochs: 300, + initial_epochs : 10 +}} + +model : { + NAME: PointTransformer, + trans_dim: 384, + depth: 12, + drop_path_rate: 0.1, + cls_dim: 40, + num_heads: 6, + group_size: 32, + num_group: 512, + encoder_dims: 256, +} +npoints: 8192 +total_bs : 32 +step_per_update : 1 +max_epoch : 300 +grad_norm_clip : 10 + +consider_metric: CDL1 diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/checkpoint.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/checkpoint.py index 72e70a78a6..a8a80056b0 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/checkpoint.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/checkpoint.py @@ -1,119 +1,119 @@ -from collections import defaultdict -from typing import Any -from typing import Dict -from typing import Iterable -from typing import List -from typing import Tuple - -import paddle -from termcolor import colored - - -def get_missing_parameters_message(keys: List[str]) -> str: - """ - Get a logging-friendly message to report parameter names (keys) that are in - the model but not found in a checkpoint. - Args: - keys (list[str]): List of keys that were not found in the checkpoint. - Returns: - str: message. - """ - groups = _group_checkpoint_keys(keys) - msg = "Some model parameters or buffers are not found in the checkpoint:\n" - msg += "\n".join( - " " + colored(k + _group_to_str(v), "blue") for k, v in groups.items() - ) - return msg - - -def get_unexpected_parameters_message(keys: List[str]) -> str: - """ - Get a logging-friendly message to report parameter names (keys) that are in - the checkpoint but not found in the model. - Args: - keys (list[str]): List of keys that were not found in the model. - Returns: - str: message. - """ - groups = _group_checkpoint_keys(keys) - msg = "The checkpoint state_dict contains keys that are not used by the model:\n" - msg += "\n".join( - " " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items() - ) - return msg - - -def _strip_prefix_if_present(state_dict: Dict[str, Any], prefix: str) -> None: - """ - Strip the prefix in metadata, if any. - Args: - state_dict (OrderedDict): a state-dict to be loaded to the model. - prefix (str): prefix. - """ - keys = sorted(state_dict.keys()) - if not all(len(key) == 0 or key.startswith(prefix) for key in keys): - return - for key in keys: - newkey = key[len(prefix) :] - state_dict[newkey] = state_dict.pop(key) - try: - metadata = state_dict._metadata - except AttributeError: - pass - else: - for key in list(metadata.keys()): - if len(key) == 0: - continue - newkey = key[len(prefix) :] - metadata[newkey] = metadata.pop(key) - - -def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]: - """ - Group keys based on common prefixes. A prefix is the string up to the final - "." in each key. - Args: - keys (list[str]): list of parameter names, i.e. keys in the model - checkpoint dict. - Returns: - dict[list]: keys with common prefixes are grouped into lists. - """ - groups = defaultdict(list) - for key in keys: - pos = key.rfind(".") - if pos >= 0: - head, tail = key[:pos], [key[pos + 1 :]] - else: - head, tail = key, [] - groups[head].extend(tail) - return groups - - -def _group_to_str(group: List[str]) -> str: - """ - Format a group of parameter name suffixes into a loggable string. - Args: - group (list[str]): list of parameter name suffixes. - Returns: - str: formated string. - """ - if len(group) == 0: - return "" - if len(group) == 1: - return "." + group[0] - return ".{" + ", ".join(group) + "}" - - -def _named_modules_with_dup( - model: paddle.nn.Layer, prefix: str = "" -) -> Iterable[Tuple[str, paddle.nn.Layer]]: - """ - The same as `model.named_modules()`, except that it includes - duplicated modules that have more than one name. - """ - yield prefix, model - for name, module in model._modules.items(): - if module is None: - continue - submodule_prefix = prefix + ("." if prefix else "") + name - yield from _named_modules_with_dup(module, submodule_prefix) +from collections import defaultdict +from typing import Any +from typing import Dict +from typing import Iterable +from typing import List +from typing import Tuple + +import paddle +from termcolor import colored + + +def get_missing_parameters_message(keys: List[str]) -> str: + """ + Get a logging-friendly message to report parameter names (keys) that are in + the model but not found in a checkpoint. + Args: + keys (list[str]): List of keys that were not found in the checkpoint. + Returns: + str: message. + """ + groups = _group_checkpoint_keys(keys) + msg = "Some model parameters or buffers are not found in the checkpoint:\n" + msg += "\n".join( + " " + colored(k + _group_to_str(v), "blue") for k, v in groups.items() + ) + return msg + + +def get_unexpected_parameters_message(keys: List[str]) -> str: + """ + Get a logging-friendly message to report parameter names (keys) that are in + the checkpoint but not found in the model. + Args: + keys (list[str]): List of keys that were not found in the model. + Returns: + str: message. + """ + groups = _group_checkpoint_keys(keys) + msg = "The checkpoint state_dict contains keys that are not used by the model:\n" + msg += "\n".join( + " " + colored(k + _group_to_str(v), "magenta") for k, v in groups.items() + ) + return msg + + +def _strip_prefix_if_present(state_dict: Dict[str, Any], prefix: str) -> None: + """ + Strip the prefix in metadata, if any. + Args: + state_dict (OrderedDict): a state-dict to be loaded to the model. + prefix (str): prefix. + """ + keys = sorted(state_dict.keys()) + if not all(len(key) == 0 or key.startswith(prefix) for key in keys): + return + for key in keys: + newkey = key[len(prefix) :] + state_dict[newkey] = state_dict.pop(key) + try: + metadata = state_dict._metadata + except AttributeError: + pass + else: + for key in list(metadata.keys()): + if len(key) == 0: + continue + newkey = key[len(prefix) :] + metadata[newkey] = metadata.pop(key) + + +def _group_checkpoint_keys(keys: List[str]) -> Dict[str, List[str]]: + """ + Group keys based on common prefixes. A prefix is the string up to the final + "." in each key. + Args: + keys (list[str]): list of parameter names, i.e. keys in the model + checkpoint dict. + Returns: + dict[list]: keys with common prefixes are grouped into lists. + """ + groups = defaultdict(list) + for key in keys: + pos = key.rfind(".") + if pos >= 0: + head, tail = key[:pos], [key[pos + 1 :]] + else: + head, tail = key, [] + groups[head].extend(tail) + return groups + + +def _group_to_str(group: List[str]) -> str: + """ + Format a group of parameter name suffixes into a loggable string. + Args: + group (list[str]): list of parameter name suffixes. + Returns: + str: formated string. + """ + if len(group) == 0: + return "" + if len(group) == 1: + return "." + group[0] + return ".{" + ", ".join(group) + "}" + + +def _named_modules_with_dup( + model: paddle.nn.Layer, prefix: str = "" +) -> Iterable[Tuple[str, paddle.nn.Layer]]: + """ + The same as `model.named_modules()`, except that it includes + duplicated modules that have more than one name. + """ + yield prefix, model + for name, module in model._modules.items(): + if module is None: + continue + submodule_prefix = prefix + ("." if prefix else "") + name + yield from _named_modules_with_dup(module, submodule_prefix) diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/dvae.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/dvae.py index 2431150163..2dacfeaf29 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/dvae.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/dvae.py @@ -1,399 +1,399 @@ -import paddle -import utils.paddle_aux # NOQA -from geom.models.pointbert import misc -from geom.models.pointbert.knn import knn - - -class DGCNN(paddle.nn.Layer): - def __init__(self, encoder_channel, output_channel): - super().__init__() - """ - K has to be 16 - """ - self.input_trans = paddle.nn.Conv1D( - in_channels=encoder_channel, out_channels=128, kernel_size=1 - ) - self.layer1 = paddle.nn.Sequential( - paddle.nn.Conv2D( - in_channels=256, out_channels=256, kernel_size=1, bias_attr=False - ), - paddle.nn.GroupNorm(num_groups=4, num_channels=256), - paddle.nn.LeakyReLU(negative_slope=0.2), - ) - self.layer2 = paddle.nn.Sequential( - paddle.nn.Conv2D( - in_channels=512, out_channels=512, kernel_size=1, bias_attr=False - ), - paddle.nn.GroupNorm(num_groups=4, num_channels=512), - paddle.nn.LeakyReLU(negative_slope=0.2), - ) - self.layer3 = paddle.nn.Sequential( - paddle.nn.Conv2D( - in_channels=1024, out_channels=512, kernel_size=1, bias_attr=False - ), - paddle.nn.GroupNorm(num_groups=4, num_channels=512), - paddle.nn.LeakyReLU(negative_slope=0.2), - ) - self.layer4 = paddle.nn.Sequential( - paddle.nn.Conv2D( - in_channels=1024, out_channels=1024, kernel_size=1, bias_attr=False - ), - paddle.nn.GroupNorm(num_groups=4, num_channels=1024), - paddle.nn.LeakyReLU(negative_slope=0.2), - ) - self.layer5 = paddle.nn.Sequential( - paddle.nn.Conv1D( - in_channels=2304, - out_channels=output_channel, - kernel_size=1, - bias_attr=False, - ), - paddle.nn.GroupNorm(num_groups=4, num_channels=output_channel), - paddle.nn.LeakyReLU(negative_slope=0.2), - ) - - @staticmethod - def get_graph_feature(coor_q, x_q, coor_k, x_k): - k = 4 - batch_size = x_k.shape[0] - num_points_k = x_k.shape[2] - num_points_q = x_q.shape[2] - with paddle.no_grad(): - _, idx = knn(coor_k, coor_q, k=4) - assert tuple(idx.shape)[1] == k - idx_base = ( - paddle.arange(start=0, end=batch_size).view(-1, 1, 1) * num_points_k - ) - idx = idx + idx_base - idx = idx.view(-1) - num_dims = x_k.shape[1] - x = x_k - perm_0 = list(range(x.ndim)) - perm_0[2] = 1 - perm_0[1] = 2 - x_k = x.transpose(perm=perm_0) - feature = x_k.view(batch_size * num_points_k, -1)[idx, :] - feature = feature.view(batch_size, k, num_points_q, num_dims).transpose( - perm=[0, 3, 2, 1] - ) - x_q = x_q.view(batch_size, num_dims, num_points_q, 1).expand( - shape=[-1, -1, -1, k] - ) - feature = paddle.concat(x=(feature - x_q, x_q), axis=1) - return feature - - def forward(self, f, coor): - feature_list = [] - x = coor - perm_1 = list(range(x.ndim)) - perm_1[1] = 2 - perm_1[2] = 1 - coor = x.transpose(perm=perm_1) - x = f - perm_2 = list(range(x.ndim)) - perm_2[1] = 2 - perm_2[2] = 1 - f = x.transpose(perm=perm_2) - f = self.input_trans(f) - f = self.get_graph_feature(coor, f, coor, f) - f = self.layer1(f) - f = f.max(dim=-1, keepdim=False)[0] - feature_list.append(f) - f = self.get_graph_feature(coor, f, coor, f) - f = self.layer2(f) - f = f.max(dim=-1, keepdim=False)[0] - feature_list.append(f) - f = self.get_graph_feature(coor, f, coor, f) - f = self.layer3(f) - f = f.max(dim=-1, keepdim=False)[0] - feature_list.append(f) - f = self.get_graph_feature(coor, f, coor, f) - f = self.layer4(f) - f = f.max(dim=-1, keepdim=False)[0] - feature_list.append(f) - f = paddle.concat(x=feature_list, axis=1) - f = self.layer5(f) - x = f - perm_3 = list(range(x.ndim)) - perm_3[-1] = -2 - perm_3[-2] = -1 - f = x.transpose(perm=perm_3) - return f - - -def knn_point(nsample, xyz, new_xyz): - """ - Input: - nsample: max sample number in local region - xyz: all points, [B, N, C] - new_xyz: query points, [B, S, C] - Return: - group_idx: grouped points index, [B, S, nsample] - """ - sqrdists = square_distance(new_xyz, xyz) - _, group_idx = paddle.topk( - k=nsample, largest=False, sorted=False, x=sqrdists, axis=-1 - ) - return group_idx - - -def square_distance(src, dst): - """ - Calculate Euclid distance between each two points. - src^T * dst = xn * xm + yn * ym + zn * zm; - sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn; - sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm; - dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2 - = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst - Input: - src: source points, [B, N, C] - dst: target points, [B, M, C] - Output: - dist: per-point square distance, [B, N, M] - """ - B, N, _ = tuple(src.shape) - _, M, _ = tuple(dst.shape) - dist = -2 * paddle.matmul(x=src, y=dst.transpose(perm=[0, 2, 1])) - dist += paddle.sum(x=src**2, axis=-1).view(B, N, 1) - dist += paddle.sum(x=dst**2, axis=-1).view(B, 1, M) - return dist - - -class Group(paddle.nn.Layer): - def __init__(self, num_group, group_size): - super().__init__() - self.num_group = num_group - self.group_size = group_size - - def forward(self, xyz): - """ - input: B N 3 - --------------------------- - output: B G M 3 - center : B G 3 - """ - batch_size, num_points, _ = tuple(xyz.shape) - center = misc.fps(xyz, self.num_group) - idx = knn_point(self.group_size, xyz, center) - assert idx.shape[1] == self.num_group - assert idx.shape[2] == self.group_size - idx_base = paddle.arange(start=0, end=batch_size).view(-1, 1, 1) * num_points - idx = idx + idx_base - idx = idx.view(-1) - neighborhood = xyz.view(batch_size * num_points, -1)[idx, :] - neighborhood = neighborhood.view(batch_size, self.num_group, self.group_size, 3) - neighborhood = neighborhood - center.unsqueeze(axis=2) - return neighborhood, center - - -class Encoder(paddle.nn.Layer): - def __init__(self, encoder_channel): - super().__init__() - self.encoder_channel = encoder_channel - self.first_conv = paddle.nn.Sequential( - paddle.nn.Conv1D(in_channels=3, out_channels=128, kernel_size=1), - paddle.nn.BatchNorm1D(num_features=128), - paddle.nn.ReLU(), - paddle.nn.Conv1D(in_channels=128, out_channels=256, kernel_size=1), - ) - self.second_conv = paddle.nn.Sequential( - paddle.nn.Conv1D(in_channels=512, out_channels=512, kernel_size=1), - paddle.nn.BatchNorm1D(num_features=512), - paddle.nn.ReLU(), - paddle.nn.Conv1D( - in_channels=512, out_channels=self.encoder_channel, kernel_size=1 - ), - ) - - def forward(self, point_groups): - """ - point_groups : B G N 3 - ----------------- - feature_global : B G C - """ - bs, g, n, _ = tuple(point_groups.shape) - point_groups = point_groups.reshape(bs * g, n, 3) - x = point_groups - perm_4 = list(range(x.ndim)) - perm_4[2] = 1 - perm_4[1] = 2 - feature = self.first_conv(x.transpose(perm=perm_4)) - feature_global = ( - paddle.max(x=feature, axis=2, keepdim=True), - paddle.argmax(x=feature, axis=2, keepdim=True), - )[0] - feature = paddle.concat( - x=[feature_global.expand(shape=[-1, -1, n]), feature], axis=1 - ) - feature = self.second_conv(feature) - feature_global = ( - paddle.max(x=feature, axis=2, keepdim=False), - paddle.argmax(x=feature, axis=2, keepdim=False), - )[0] - return feature_global.reshape(bs, g, self.encoder_channel) - - -class Decoder(paddle.nn.Layer): - def __init__(self, encoder_channel, num_fine): - super().__init__() - self.num_fine = num_fine - self.grid_size = 2 - self.num_coarse = self.num_fine // 4 - assert num_fine % 4 == 0 - self.mlp = paddle.nn.Sequential( - paddle.nn.Linear(in_features=encoder_channel, out_features=1024), - paddle.nn.ReLU(), - paddle.nn.Linear(in_features=1024, out_features=1024), - paddle.nn.ReLU(), - paddle.nn.Linear(in_features=1024, out_features=3 * self.num_coarse), - ) - self.final_conv = paddle.nn.Sequential( - paddle.nn.Conv1D( - in_channels=encoder_channel + 3 + 2, out_channels=512, kernel_size=1 - ), - paddle.nn.BatchNorm1D(num_features=512), - paddle.nn.ReLU(), - paddle.nn.Conv1D(in_channels=512, out_channels=512, kernel_size=1), - paddle.nn.BatchNorm1D(num_features=512), - paddle.nn.ReLU(), - paddle.nn.Conv1D(in_channels=512, out_channels=3, kernel_size=1), - ) - a = ( - paddle.linspace(start=-0.05, stop=0.05, num=self.grid_size, dtype="float32") - .view(1, self.grid_size) - .expand(shape=[self.grid_size, self.grid_size]) - .reshape(1, -1) - ) - b = ( - paddle.linspace(start=-0.05, stop=0.05, num=self.grid_size, dtype="float32") - .view(self.grid_size, 1) - .expand(shape=[self.grid_size, self.grid_size]) - .reshape(1, -1) - ) - self.folding_seed = paddle.concat(x=[a, b], axis=0).view( - 1, 2, self.grid_size**2 - ) - - def forward(self, feature_global): - """ - feature_global : B G C - ------- - coarse : B G M 3 - fine : B G N 3 - - """ - bs, g, c = tuple(feature_global.shape) - feature_global = feature_global.reshape(bs * g, c) - coarse = self.mlp(feature_global).reshape(bs * g, self.num_coarse, 3) - point_feat = coarse.unsqueeze(axis=2).expand( - shape=[-1, -1, self.grid_size**2, -1] - ) - x = point_feat.reshape(bs * g, self.num_fine, 3) - perm_5 = list(range(x.ndim)) - perm_5[2] = 1 - perm_5[1] = 2 - point_feat = x.transpose(perm=perm_5) - seed = self.folding_seed.unsqueeze(axis=2).expand( - shape=[bs * g, -1, self.num_coarse, -1] - ) - seed = seed.reshape(bs * g, -1, self.num_fine).to(feature_global.place) - feature_global = feature_global.unsqueeze(axis=2).expand( - shape=[-1, -1, self.num_fine] - ) - feat = paddle.concat(x=[feature_global, seed, point_feat], axis=1) - center = coarse.unsqueeze(axis=2).expand( - shape=[-1, -1, self.grid_size**2, -1] - ) - x = center.reshape(bs * g, self.num_fine, 3) - perm_6 = list(range(x.ndim)) - perm_6[2] = 1 - perm_6[1] = 2 - center = x.transpose(perm=perm_6) - fine = self.final_conv(feat) + center - x = fine.reshape(bs, g, 3, self.num_fine) - perm_7 = list(range(x.ndim)) - perm_7[-1] = -2 - perm_7[-2] = -1 - fine = x.transpose(perm=perm_7) - coarse = coarse.reshape(bs, g, self.num_coarse, 3) - return coarse, fine - - -class DiscreteVAE(paddle.nn.Layer): - def __init__(self, config, **kwargs): - super().__init__() - self.group_size = config.group_size - self.num_group = config.num_group - self.encoder_dims = config.encoder_dims - self.tokens_dims = config.tokens_dims - self.decoder_dims = config.decoder_dims - self.num_tokens = config.num_tokens - self.group_divider = Group(num_group=self.num_group, group_size=self.group_size) - self.encoder = Encoder(encoder_channel=self.encoder_dims) - self.dgcnn_1 = DGCNN( - encoder_channel=self.encoder_dims, output_channel=self.num_tokens - ) - out_5 = paddle.create_parameter( - shape=paddle.randn(shape=[self.num_tokens, self.tokens_dims]).shape, - dtype=paddle.randn(shape=[self.num_tokens, self.tokens_dims]).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.randn(shape=[self.num_tokens, self.tokens_dims]) - ), - ) - out_5.stop_gradient = not True - self.codebook = out_5 - self.dgcnn_2 = DGCNN( - encoder_channel=self.tokens_dims, output_channel=self.decoder_dims - ) - self.decoder = Decoder( - encoder_channel=self.decoder_dims, num_fine=self.group_size - ) - - def recon_loss(self, ret, gt): - whole_coarse, whole_fine, coarse, fine, group_gt, _ = ret - bs, g, _, _ = tuple(coarse.shape) - coarse = coarse.reshape(bs * g, -1, 3) - fine = fine.reshape(bs * g, -1, 3) - group_gt = group_gt.reshape(bs * g, -1, 3) - loss_coarse_block = self.loss_func_cdl1(coarse, group_gt) - loss_fine_block = self.loss_func_cdl1(fine, group_gt) - loss_recon = loss_coarse_block + loss_fine_block - return loss_recon - - def get_loss(self, ret, gt): - loss_recon = self.recon_loss(ret, gt) - logits = ret[-1] - softmax = paddle.nn.functional.softmax(x=logits, axis=-1) - mean_softmax = softmax.mean(axis=1) - log_qy = paddle.log(x=mean_softmax) - log_uniform = paddle.log( - x=paddle.to_tensor(data=[1.0 / self.num_tokens], place=gt.place) - ) - loss_klv = paddle.nn.functional.kl_div( - log_qy, - log_uniform.expand(shape=[log_qy.shape[0], log_qy.shape[1]]), - None, - None, - "batchmean", - log_target=True, - ) - return loss_recon, loss_klv - - def forward(self, inp, temperature=1.0, hard=False, **kwargs): - neighborhood, center = self.group_divider(inp) - logits = self.encoder(neighborhood) - logits = self.dgcnn_1(logits, center) - soft_one_hot = paddle.nn.functional.gumbel_softmax( - x=logits, temperature=temperature, axis=2, hard=hard - ) - sampled = paddle.einsum("b g n, n c -> b g c", soft_one_hot, self.codebook) - feature = self.dgcnn_2(sampled, center) - coarse, fine = self.decoder(feature) - with paddle.no_grad(): - whole_fine = (fine + center.unsqueeze(axis=2)).reshape(inp.shape[0], -1, 3) - whole_coarse = (coarse + center.unsqueeze(axis=2)).reshape( - inp.shape[0], -1, 3 - ) - assert fine.shape[2] == self.group_size - ret = whole_coarse, whole_fine, coarse, fine, neighborhood, logits - return ret +import paddle +import utils.paddle_aux # NOQA +from geom.models.pointbert import misc +from geom.models.pointbert.knn import knn + + +class DGCNN(paddle.nn.Layer): + def __init__(self, encoder_channel, output_channel): + super().__init__() + """ + K has to be 16 + """ + self.input_trans = paddle.nn.Conv1D( + in_channels=encoder_channel, out_channels=128, kernel_size=1 + ) + self.layer1 = paddle.nn.Sequential( + paddle.nn.Conv2D( + in_channels=256, out_channels=256, kernel_size=1, bias_attr=False + ), + paddle.nn.GroupNorm(num_groups=4, num_channels=256), + paddle.nn.LeakyReLU(negative_slope=0.2), + ) + self.layer2 = paddle.nn.Sequential( + paddle.nn.Conv2D( + in_channels=512, out_channels=512, kernel_size=1, bias_attr=False + ), + paddle.nn.GroupNorm(num_groups=4, num_channels=512), + paddle.nn.LeakyReLU(negative_slope=0.2), + ) + self.layer3 = paddle.nn.Sequential( + paddle.nn.Conv2D( + in_channels=1024, out_channels=512, kernel_size=1, bias_attr=False + ), + paddle.nn.GroupNorm(num_groups=4, num_channels=512), + paddle.nn.LeakyReLU(negative_slope=0.2), + ) + self.layer4 = paddle.nn.Sequential( + paddle.nn.Conv2D( + in_channels=1024, out_channels=1024, kernel_size=1, bias_attr=False + ), + paddle.nn.GroupNorm(num_groups=4, num_channels=1024), + paddle.nn.LeakyReLU(negative_slope=0.2), + ) + self.layer5 = paddle.nn.Sequential( + paddle.nn.Conv1D( + in_channels=2304, + out_channels=output_channel, + kernel_size=1, + bias_attr=False, + ), + paddle.nn.GroupNorm(num_groups=4, num_channels=output_channel), + paddle.nn.LeakyReLU(negative_slope=0.2), + ) + + @staticmethod + def get_graph_feature(coor_q, x_q, coor_k, x_k): + k = 4 + batch_size = x_k.shape[0] + num_points_k = x_k.shape[2] + num_points_q = x_q.shape[2] + with paddle.no_grad(): + _, idx = knn(coor_k, coor_q, k=4) + assert tuple(idx.shape)[1] == k + idx_base = ( + paddle.arange(start=0, end=batch_size).view(-1, 1, 1) * num_points_k + ) + idx = idx + idx_base + idx = idx.view(-1) + num_dims = x_k.shape[1] + x = x_k + perm_0 = list(range(x.ndim)) + perm_0[2] = 1 + perm_0[1] = 2 + x_k = x.transpose(perm=perm_0) + feature = x_k.view(batch_size * num_points_k, -1)[idx, :] + feature = feature.view(batch_size, k, num_points_q, num_dims).transpose( + perm=[0, 3, 2, 1] + ) + x_q = x_q.view(batch_size, num_dims, num_points_q, 1).expand( + shape=[-1, -1, -1, k] + ) + feature = paddle.concat(x=(feature - x_q, x_q), axis=1) + return feature + + def forward(self, f, coor): + feature_list = [] + x = coor + perm_1 = list(range(x.ndim)) + perm_1[1] = 2 + perm_1[2] = 1 + coor = x.transpose(perm=perm_1) + x = f + perm_2 = list(range(x.ndim)) + perm_2[1] = 2 + perm_2[2] = 1 + f = x.transpose(perm=perm_2) + f = self.input_trans(f) + f = self.get_graph_feature(coor, f, coor, f) + f = self.layer1(f) + f = f.max(dim=-1, keepdim=False)[0] + feature_list.append(f) + f = self.get_graph_feature(coor, f, coor, f) + f = self.layer2(f) + f = f.max(dim=-1, keepdim=False)[0] + feature_list.append(f) + f = self.get_graph_feature(coor, f, coor, f) + f = self.layer3(f) + f = f.max(dim=-1, keepdim=False)[0] + feature_list.append(f) + f = self.get_graph_feature(coor, f, coor, f) + f = self.layer4(f) + f = f.max(dim=-1, keepdim=False)[0] + feature_list.append(f) + f = paddle.concat(x=feature_list, axis=1) + f = self.layer5(f) + x = f + perm_3 = list(range(x.ndim)) + perm_3[-1] = -2 + perm_3[-2] = -1 + f = x.transpose(perm=perm_3) + return f + + +def knn_point(nsample, xyz, new_xyz): + """ + Input: + nsample: max sample number in local region + xyz: all points, [B, N, C] + new_xyz: query points, [B, S, C] + Return: + group_idx: grouped points index, [B, S, nsample] + """ + sqrdists = square_distance(new_xyz, xyz) + _, group_idx = paddle.topk( + k=nsample, largest=False, sorted=False, x=sqrdists, axis=-1 + ) + return group_idx + + +def square_distance(src, dst): + """ + Calculate Euclid distance between each two points. + src^T * dst = xn * xm + yn * ym + zn * zm; + sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn; + sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm; + dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2 + = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst + Input: + src: source points, [B, N, C] + dst: target points, [B, M, C] + Output: + dist: per-point square distance, [B, N, M] + """ + B, N, _ = tuple(src.shape) + _, M, _ = tuple(dst.shape) + dist = -2 * paddle.matmul(x=src, y=dst.transpose(perm=[0, 2, 1])) + dist += paddle.sum(x=src**2, axis=-1).view(B, N, 1) + dist += paddle.sum(x=dst**2, axis=-1).view(B, 1, M) + return dist + + +class Group(paddle.nn.Layer): + def __init__(self, num_group, group_size): + super().__init__() + self.num_group = num_group + self.group_size = group_size + + def forward(self, xyz): + """ + input: B N 3 + --------------------------- + output: B G M 3 + center : B G 3 + """ + batch_size, num_points, _ = tuple(xyz.shape) + center = misc.fps(xyz, self.num_group) + idx = knn_point(self.group_size, xyz, center) + assert idx.shape[1] == self.num_group + assert idx.shape[2] == self.group_size + idx_base = paddle.arange(start=0, end=batch_size).view(-1, 1, 1) * num_points + idx = idx + idx_base + idx = idx.view(-1) + neighborhood = xyz.view(batch_size * num_points, -1)[idx, :] + neighborhood = neighborhood.view(batch_size, self.num_group, self.group_size, 3) + neighborhood = neighborhood - center.unsqueeze(axis=2) + return neighborhood, center + + +class Encoder(paddle.nn.Layer): + def __init__(self, encoder_channel): + super().__init__() + self.encoder_channel = encoder_channel + self.first_conv = paddle.nn.Sequential( + paddle.nn.Conv1D(in_channels=3, out_channels=128, kernel_size=1), + paddle.nn.BatchNorm1D(num_features=128), + paddle.nn.ReLU(), + paddle.nn.Conv1D(in_channels=128, out_channels=256, kernel_size=1), + ) + self.second_conv = paddle.nn.Sequential( + paddle.nn.Conv1D(in_channels=512, out_channels=512, kernel_size=1), + paddle.nn.BatchNorm1D(num_features=512), + paddle.nn.ReLU(), + paddle.nn.Conv1D( + in_channels=512, out_channels=self.encoder_channel, kernel_size=1 + ), + ) + + def forward(self, point_groups): + """ + point_groups : B G N 3 + ----------------- + feature_global : B G C + """ + bs, g, n, _ = tuple(point_groups.shape) + point_groups = point_groups.reshape(bs * g, n, 3) + x = point_groups + perm_4 = list(range(x.ndim)) + perm_4[2] = 1 + perm_4[1] = 2 + feature = self.first_conv(x.transpose(perm=perm_4)) + feature_global = ( + paddle.max(x=feature, axis=2, keepdim=True), + paddle.argmax(x=feature, axis=2, keepdim=True), + )[0] + feature = paddle.concat( + x=[feature_global.expand(shape=[-1, -1, n]), feature], axis=1 + ) + feature = self.second_conv(feature) + feature_global = ( + paddle.max(x=feature, axis=2, keepdim=False), + paddle.argmax(x=feature, axis=2, keepdim=False), + )[0] + return feature_global.reshape(bs, g, self.encoder_channel) + + +class Decoder(paddle.nn.Layer): + def __init__(self, encoder_channel, num_fine): + super().__init__() + self.num_fine = num_fine + self.grid_size = 2 + self.num_coarse = self.num_fine // 4 + assert num_fine % 4 == 0 + self.mlp = paddle.nn.Sequential( + paddle.nn.Linear(in_features=encoder_channel, out_features=1024), + paddle.nn.ReLU(), + paddle.nn.Linear(in_features=1024, out_features=1024), + paddle.nn.ReLU(), + paddle.nn.Linear(in_features=1024, out_features=3 * self.num_coarse), + ) + self.final_conv = paddle.nn.Sequential( + paddle.nn.Conv1D( + in_channels=encoder_channel + 3 + 2, out_channels=512, kernel_size=1 + ), + paddle.nn.BatchNorm1D(num_features=512), + paddle.nn.ReLU(), + paddle.nn.Conv1D(in_channels=512, out_channels=512, kernel_size=1), + paddle.nn.BatchNorm1D(num_features=512), + paddle.nn.ReLU(), + paddle.nn.Conv1D(in_channels=512, out_channels=3, kernel_size=1), + ) + a = ( + paddle.linspace(start=-0.05, stop=0.05, num=self.grid_size, dtype="float32") + .view(1, self.grid_size) + .expand(shape=[self.grid_size, self.grid_size]) + .reshape(1, -1) + ) + b = ( + paddle.linspace(start=-0.05, stop=0.05, num=self.grid_size, dtype="float32") + .view(self.grid_size, 1) + .expand(shape=[self.grid_size, self.grid_size]) + .reshape(1, -1) + ) + self.folding_seed = paddle.concat(x=[a, b], axis=0).view( + 1, 2, self.grid_size**2 + ) + + def forward(self, feature_global): + """ + feature_global : B G C + ------- + coarse : B G M 3 + fine : B G N 3 + + """ + bs, g, c = tuple(feature_global.shape) + feature_global = feature_global.reshape(bs * g, c) + coarse = self.mlp(feature_global).reshape(bs * g, self.num_coarse, 3) + point_feat = coarse.unsqueeze(axis=2).expand( + shape=[-1, -1, self.grid_size**2, -1] + ) + x = point_feat.reshape(bs * g, self.num_fine, 3) + perm_5 = list(range(x.ndim)) + perm_5[2] = 1 + perm_5[1] = 2 + point_feat = x.transpose(perm=perm_5) + seed = self.folding_seed.unsqueeze(axis=2).expand( + shape=[bs * g, -1, self.num_coarse, -1] + ) + seed = seed.reshape(bs * g, -1, self.num_fine).to(feature_global.place) + feature_global = feature_global.unsqueeze(axis=2).expand( + shape=[-1, -1, self.num_fine] + ) + feat = paddle.concat(x=[feature_global, seed, point_feat], axis=1) + center = coarse.unsqueeze(axis=2).expand( + shape=[-1, -1, self.grid_size**2, -1] + ) + x = center.reshape(bs * g, self.num_fine, 3) + perm_6 = list(range(x.ndim)) + perm_6[2] = 1 + perm_6[1] = 2 + center = x.transpose(perm=perm_6) + fine = self.final_conv(feat) + center + x = fine.reshape(bs, g, 3, self.num_fine) + perm_7 = list(range(x.ndim)) + perm_7[-1] = -2 + perm_7[-2] = -1 + fine = x.transpose(perm=perm_7) + coarse = coarse.reshape(bs, g, self.num_coarse, 3) + return coarse, fine + + +class DiscreteVAE(paddle.nn.Layer): + def __init__(self, config, **kwargs): + super().__init__() + self.group_size = config.group_size + self.num_group = config.num_group + self.encoder_dims = config.encoder_dims + self.tokens_dims = config.tokens_dims + self.decoder_dims = config.decoder_dims + self.num_tokens = config.num_tokens + self.group_divider = Group(num_group=self.num_group, group_size=self.group_size) + self.encoder = Encoder(encoder_channel=self.encoder_dims) + self.dgcnn_1 = DGCNN( + encoder_channel=self.encoder_dims, output_channel=self.num_tokens + ) + out_5 = paddle.create_parameter( + shape=paddle.randn(shape=[self.num_tokens, self.tokens_dims]).shape, + dtype=paddle.randn(shape=[self.num_tokens, self.tokens_dims]).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.randn(shape=[self.num_tokens, self.tokens_dims]) + ), + ) + out_5.stop_gradient = not True + self.codebook = out_5 + self.dgcnn_2 = DGCNN( + encoder_channel=self.tokens_dims, output_channel=self.decoder_dims + ) + self.decoder = Decoder( + encoder_channel=self.decoder_dims, num_fine=self.group_size + ) + + def recon_loss(self, ret, gt): + whole_coarse, whole_fine, coarse, fine, group_gt, _ = ret + bs, g, _, _ = tuple(coarse.shape) + coarse = coarse.reshape(bs * g, -1, 3) + fine = fine.reshape(bs * g, -1, 3) + group_gt = group_gt.reshape(bs * g, -1, 3) + loss_coarse_block = self.loss_func_cdl1(coarse, group_gt) + loss_fine_block = self.loss_func_cdl1(fine, group_gt) + loss_recon = loss_coarse_block + loss_fine_block + return loss_recon + + def get_loss(self, ret, gt): + loss_recon = self.recon_loss(ret, gt) + logits = ret[-1] + softmax = paddle.nn.functional.softmax(x=logits, axis=-1) + mean_softmax = softmax.mean(axis=1) + log_qy = paddle.log(x=mean_softmax) + log_uniform = paddle.log( + x=paddle.to_tensor(data=[1.0 / self.num_tokens], place=gt.place) + ) + loss_klv = paddle.nn.functional.kl_div( + log_qy, + log_uniform.expand(shape=[log_qy.shape[0], log_qy.shape[1]]), + None, + None, + "batchmean", + log_target=True, + ) + return loss_recon, loss_klv + + def forward(self, inp, temperature=1.0, hard=False, **kwargs): + neighborhood, center = self.group_divider(inp) + logits = self.encoder(neighborhood) + logits = self.dgcnn_1(logits, center) + soft_one_hot = paddle.nn.functional.gumbel_softmax( + x=logits, temperature=temperature, axis=2, hard=hard + ) + sampled = paddle.einsum("b g n, n c -> b g c", soft_one_hot, self.codebook) + feature = self.dgcnn_2(sampled, center) + coarse, fine = self.decoder(feature) + with paddle.no_grad(): + whole_fine = (fine + center.unsqueeze(axis=2)).reshape(inp.shape[0], -1, 3) + whole_coarse = (coarse + center.unsqueeze(axis=2)).reshape( + inp.shape[0], -1, 3 + ) + assert fine.shape[2] == self.group_size + ret = whole_coarse, whole_fine, coarse, fine, neighborhood, logits + return ret diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/knn.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/knn.py index d5560b2e64..7e80532c98 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/knn.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/knn.py @@ -1,11 +1,11 @@ -import paddle - - -def knn(ref_points, query_points, k=4): - dists = paddle.norm( - paddle.unsqueeze(ref_points, axis=1) - paddle.unsqueeze(query_points, axis=0), - p=2, - axis=-1, - ) - _, indices = paddle.topk(dists, k=k, axis=-1, largest=False) - return indices +import paddle + + +def knn(ref_points, query_points, k=4): + dists = paddle.norm( + paddle.unsqueeze(ref_points, axis=1) - paddle.unsqueeze(query_points, axis=0), + p=2, + axis=-1, + ) + _, indices = paddle.topk(dists, k=k, axis=-1, largest=False) + return indices diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/logger.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/logger.py index 4b008c7ddc..503d85bbf4 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/logger.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/logger.py @@ -1,105 +1,105 @@ -import logging - -import paddle - -logger_initialized = {} - - -def get_root_logger(log_file=None, log_level=logging.INFO, name="main"): - """Get root logger and add a keyword filter to it. - The logger will be initialized if it has not been initialized. By default a - StreamHandler will be added. If `log_file` is specified, a FileHandler will - also be added. The name of the root logger is the top-level package name, - e.g., "mmdet3d". - Args: - log_file (str, optional): File path of log. Defaults to None. - log_level (int, optional): The level of logger. - Defaults to logging.INFO. - name (str, optional): The name of the root logger, also used as a - filter keyword. Defaults to 'mmdet3d'. - Returns: - :obj:`logging.Logger`: The obtained logger - """ - logger = get_logger(name=name, log_file=log_file, log_level=log_level) - logging_filter = logging.Filter(name) - logging_filter.filter = lambda record: record.find(name) != -1 - return logger - - -def get_logger(name, log_file=None, log_level=logging.INFO, file_mode="w"): - """Initialize and get a logger by name. - If the logger has not been initialized, this method will initialize the - logger by adding one or two handlers, otherwise the initialized logger will - be directly returned. During initialization, a StreamHandler will always be - added. If `log_file` is specified and the process rank is 0, a FileHandler - will also be added. - Args: - name (str): Logger name. - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the logger. - log_level (int): The logger level. Note that only the process of - rank 0 is affected, and other processes will set the level to - "Error" thus be silent most of the time. - file_mode (str): The file mode used in opening log file. - Defaults to 'w'. - Returns: - logging.Logger: The expected logger. - """ - logger = logging.getLogger(name) - if name in logger_initialized: - return logger - for logger_name in logger_initialized: - if name.startswith(logger_name): - return logger - for handler in logger.root.handlers: - if type(handler) is logging.StreamHandler: - handler.setLevel(logging.ERROR) - stream_handler = logging.StreamHandler() - handlers = [stream_handler] - if paddle.distributed.is_available() and paddle.distributed.is_initialized(): - rank = paddle.distributed.get_rank() - else: - rank = 0 - if rank == 0 and log_file is not None: - file_handler = logging.FileHandler(log_file, file_mode) - handlers.append(file_handler) - formatter = logging.Formatter( - "%(asctime)s - %(name)s - %(levelname)s - %(message)s" - ) - for handler in handlers: - handler.setFormatter(formatter) - handler.setLevel(log_level) - logger.addHandler(handler) - if rank == 0: - logger.setLevel(log_level) - else: - logger.setLevel(logging.ERROR) - logger_initialized[name] = True - return logger - - -def print_log(msg, logger=None, level=logging.INFO): - """Print a log message. - Args: - msg (str): The message to be logged. - logger (logging.Logger | str | None): The logger to be used. - Some special loggers are: - - "silent": no message will be printed. - - other str: the logger obtained with `get_root_logger(logger)`. - - None: The `print()` method will be used to print log messages. - level (int): Logging level. Only available when `logger` is a Logger - object or "root". - """ - if logger is None: - print(msg) - elif isinstance(logger, logging.Logger): - logger.log(level, msg) - elif logger == "silent": - pass - elif isinstance(logger, str): - _logger = get_logger(logger) - _logger.log(level, msg) - else: - raise TypeError( - f'logger should be either a logging.Logger object, str, "silent" or None, but got {type(logger)}' - ) +import logging + +import paddle + +logger_initialized = {} + + +def get_root_logger(log_file=None, log_level=logging.INFO, name="main"): + """Get root logger and add a keyword filter to it. + The logger will be initialized if it has not been initialized. By default a + StreamHandler will be added. If `log_file` is specified, a FileHandler will + also be added. The name of the root logger is the top-level package name, + e.g., "mmdet3d". + Args: + log_file (str, optional): File path of log. Defaults to None. + log_level (int, optional): The level of logger. + Defaults to logging.INFO. + name (str, optional): The name of the root logger, also used as a + filter keyword. Defaults to 'mmdet3d'. + Returns: + :obj:`logging.Logger`: The obtained logger + """ + logger = get_logger(name=name, log_file=log_file, log_level=log_level) + logging_filter = logging.Filter(name) + logging_filter.filter = lambda record: record.find(name) != -1 + return logger + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode="w"): + """Initialize and get a logger by name. + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + if paddle.distributed.is_available() and paddle.distributed.is_initialized(): + rank = paddle.distributed.get_rank() + else: + rank = 0 + if rank == 0 and log_file is not None: + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + logger_initialized[name] = True + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == "silent": + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + f'logger should be either a logging.Logger object, str, "silent" or None, but got {type(logger)}' + ) diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/misc.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/misc.py index 62518f23ea..6d8f2f18b4 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/misc.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/misc.py @@ -1,273 +1,273 @@ -import os -import random -from collections import abc - -import matplotlib.pyplot as plt -import numpy as np -import paddle -import utils.paddle_aux as paddle_aux # NOQA -from mpl_toolkits.mplot3d import Axes3D - - -def index_points(points, idx): - """ - Input: - points: input points data, [B, N, C] - idx: sample index data, [B, S] - Return: - new_points:, indexed points data, [B, S, C] - """ - device = points.place - B = tuple(points.shape)[0] - view_shape = list(tuple(idx.shape)) - view_shape[1:] = [1] * (len(view_shape) - 1) - repeat_shape = list(tuple(idx.shape)) - repeat_shape[0] = 1 - batch_indices = ( - paddle.arange(dtype="int64", end=B) - .to(device) - .view(view_shape) - .repeat(repeat_shape) - ) - new_points = points[batch_indices, idx, :] - return new_points - - -def fps(xyz, npoint): - """ - Input: - xyz: pointcloud data, [B, N, 3] - npoint: number of samples - Return: - centroids: sampled pointcloud index, [B, npoint] - """ - device = xyz.place - B, N, C = tuple(xyz.shape) - centroids = paddle.zeros(shape=[B, npoint], dtype="int64").to(device) - distance = paddle.ones(shape=[B, N]).to(device) * 10000000000.0 - farthest = paddle.randint(low=0, high=N, shape=(B,), dtype="int64").to(device) - batch_indices = paddle.arange(dtype="int64", end=B).to(device) - for i in range(npoint): - centroids[:, i] = farthest - centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) - dist = paddle.sum(x=(xyz - centroid) ** 2, axis=-1) - distance = paddle_aux.min(distance, dist) - farthest = paddle_aux.max(distance, -1)[1] - return index_points(xyz, centroids) - - -def worker_init_fn(worker_id): - np.random.seed(np.random.get_state()[1][0] + worker_id) - - -def build_lambda_sche(opti, config): - if config.get("decay_step") is not None: - - def lr_lbmd(e): - return max(config.lr_decay ** (e / config.decay_step), config.lowest_decay) - - tmp_lr = paddle.optimizer.lr.LambdaDecay( - lr_lambda=lr_lbmd, learning_rate=opti.get_lr() - ) - opti.set_lr_scheduler(tmp_lr) - scheduler = tmp_lr - else: - raise NotImplementedError() - return scheduler - - -def build_lambda_bnsche(model, config): - if config.get("decay_step") is not None: - - def bnm_lmbd(e): - return max( - config.bn_momentum * config.bn_decay ** (e / config.decay_step), - config.lowest_decay, - ) - - bnm_scheduler = BNMomentumScheduler(model, bnm_lmbd) - else: - raise NotImplementedError() - return bnm_scheduler - - -def set_random_seed(seed): - """Set random seed. - Args: - seed (int): Seed to be used. - """ - random.seed(seed) - np.random.seed(seed) - paddle.seed(seed=seed) - - -def is_seq_of(seq, expected_type, seq_type=None): - """Check whether it is a sequence of some type. - Args: - seq (Sequence): The sequence to be checked. - expected_type (type): Expected type of sequence items. - seq_type (type, optional): Expected sequence type. - Returns: - bool: Whether the sequence is valid. - """ - if seq_type is None: - exp_seq_type = abc.Sequence - else: - assert isinstance(seq_type, type) - exp_seq_type = seq_type - if not isinstance(seq, exp_seq_type): - return False - for item in seq: - if not isinstance(item, expected_type): - return False - return True - - -def set_bn_momentum_default(bn_momentum): - def fn(m): - if isinstance( - m, (paddle.nn.BatchNorm1D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D) - ): - m.momentum = bn_momentum - - return fn - - -class BNMomentumScheduler(object): - def __init__(self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default): - if not isinstance(model, paddle.nn.Layer): - raise RuntimeError( - "Class '{}' is not a Paddle nn Layer".format(type(model).__name__) - ) - self.model = model - self.setter = setter - self.lmbd = bn_lambda - self.step(last_epoch + 1) - self.last_epoch = last_epoch - - def step(self, epoch=None): - if epoch is None: - epoch = self.last_epoch + 1 - self.last_epoch = epoch - self.model.apply(self.setter(self.lmbd(epoch))) - - def get_momentum(self, epoch=None): - if epoch is None: - epoch = self.last_epoch + 1 - return self.lmbd(epoch) - - -def seprate_point_cloud(xyz, num_points, crop, fixed_points=None, padding_zeros=False): - """ - seprate point cloud: usage : using to generate the incomplete point cloud with a setted number. - """ - _, n, c = tuple(xyz.shape) - assert n == num_points - assert c == 3 - if crop == num_points: - return xyz, None - INPUT = [] - CROP = [] - for points in xyz: - if isinstance(crop, list): - num_crop = random.randint(crop[0], crop[1]) - else: - num_crop = crop - points = points.unsqueeze(axis=0) - if fixed_points is None: - center = paddle.nn.functional.normalize( - x=paddle.randn(shape=[1, 1, 3]), p=2, axis=-1 - ).cuda(blocking=True) - else: - if isinstance(fixed_points, list): - fixed_point = random.sample(fixed_points, 1)[0] - else: - fixed_point = fixed_points - center = fixed_point.reshape(1, 1, 3).cuda(blocking=True) - distance_matrix = paddle.linalg.norm( - x=center.unsqueeze(axis=2) - points.unsqueeze(axis=1), p=2, axis=-1 - ) - idx = paddle.argsort(x=distance_matrix, axis=-1, descending=False)[0, 0] - if padding_zeros: - input_data = points.clone() - input_data[0, idx[:num_crop]] = input_data[0, idx[:num_crop]] * 0 - else: - input_data = points.clone()[0, idx[num_crop:]].unsqueeze(axis=0) - crop_data = points.clone()[0, idx[:num_crop]].unsqueeze(axis=0) - if isinstance(crop, list): - INPUT.append(fps(input_data, 2048)) - CROP.append(fps(crop_data, 2048)) - else: - INPUT.append(input_data) - CROP.append(crop_data) - input_data = paddle.concat(x=INPUT, axis=0) - crop_data = paddle.concat(x=CROP, axis=0) - return input_data, crop_data - - -def get_ptcloud_img(ptcloud): - fig = plt.figure(figsize=(8, 8)) - x = ptcloud - perm_10 = list(range(x.ndim)) - perm_10[1] = 0 - perm_10[0] = 1 - x, z, y = x.transpose(perm=perm_10) - ax = fig.gca(projection=Axes3D.name, adjustable="box") - ax.axis("off") - ax.view_init(30, 45) - max, min = np.max(ptcloud), np.min(ptcloud) - ax.set_xbound(min, max) - ax.set_ybound(min, max) - ax.set_zbound(min, max) - ax.scatter(x, y, z, zdir="z", c=x, cmap="jet") - fig.canvas.draw() - img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - return img - - -def visualize_KITTI( - path, - data_list, - titles=["input", "pred"], - cmap=["bwr", "autumn"], - zdir="y", - xlim=(-1, 1), - ylim=(-1, 1), - zlim=(-1, 1), -): - fig = plt.figure(figsize=(6 * len(data_list), 6)) - for i in range(len(data_list)): - ax = fig.add_subplot(1, len(data_list), i + 1, projection="3d") - ax.view_init(30, -120) - ax.set_title(titles[i]) - ax.set_axis_off() - ax.set_xlim(xlim) - ax.set_ylim(ylim) - ax.set_zlim(zlim) - plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0.2, hspace=0) - if not os.path.exists(path): - os.makedirs(path) - pic_path = path + ".png" - fig.savefig(pic_path) - np.save(os.path.join(path, "input.npy"), data_list[0].numpy()) - np.save(os.path.join(path, "pred.npy"), data_list[1].numpy()) - plt.close(fig) - - -def random_dropping(pc, e): - up_num = max(64, 768 // (e // 50 + 1)) - pc = pc - random_num = paddle.randint(low=1, high=up_num, shape=(1, 1))[0, 0] - pc = fps(pc, random_num) - padding = paddle.zeros(shape=[pc.shape[0], 2048 - pc.shape[1], 3]).to(pc.place) - pc = paddle.concat(x=[pc, padding], axis=1) - return pc - - -def random_scale(partial, scale_range=[0.8, 1.2]): - scale = ( - paddle.rand(shape=[1]).cuda(blocking=True) * (scale_range[1] - scale_range[0]) - + scale_range[0] - ) - return partial * scale +import os +import random +from collections import abc + +import matplotlib.pyplot as plt +import numpy as np +import paddle +import utils.paddle_aux as paddle_aux # NOQA +from mpl_toolkits.mplot3d import Axes3D + + +def index_points(points, idx): + """ + Input: + points: input points data, [B, N, C] + idx: sample index data, [B, S] + Return: + new_points:, indexed points data, [B, S, C] + """ + device = points.place + B = tuple(points.shape)[0] + view_shape = list(tuple(idx.shape)) + view_shape[1:] = [1] * (len(view_shape) - 1) + repeat_shape = list(tuple(idx.shape)) + repeat_shape[0] = 1 + batch_indices = ( + paddle.arange(dtype="int64", end=B) + .to(device) + .view(view_shape) + .repeat(repeat_shape) + ) + new_points = points[batch_indices, idx, :] + return new_points + + +def fps(xyz, npoint): + """ + Input: + xyz: pointcloud data, [B, N, 3] + npoint: number of samples + Return: + centroids: sampled pointcloud index, [B, npoint] + """ + device = xyz.place + B, N, C = tuple(xyz.shape) + centroids = paddle.zeros(shape=[B, npoint], dtype="int64").to(device) + distance = paddle.ones(shape=[B, N]).to(device) * 10000000000.0 + farthest = paddle.randint(low=0, high=N, shape=(B,), dtype="int64").to(device) + batch_indices = paddle.arange(dtype="int64", end=B).to(device) + for i in range(npoint): + centroids[:, i] = farthest + centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) + dist = paddle.sum(x=(xyz - centroid) ** 2, axis=-1) + distance = paddle_aux.min(distance, dist) + farthest = paddle_aux.max(distance, -1)[1] + return index_points(xyz, centroids) + + +def worker_init_fn(worker_id): + np.random.seed(np.random.get_state()[1][0] + worker_id) + + +def build_lambda_sche(opti, config): + if config.get("decay_step") is not None: + + def lr_lbmd(e): + return max(config.lr_decay ** (e / config.decay_step), config.lowest_decay) + + tmp_lr = paddle.optimizer.lr.LambdaDecay( + lr_lambda=lr_lbmd, learning_rate=opti.get_lr() + ) + opti.set_lr_scheduler(tmp_lr) + scheduler = tmp_lr + else: + raise NotImplementedError() + return scheduler + + +def build_lambda_bnsche(model, config): + if config.get("decay_step") is not None: + + def bnm_lmbd(e): + return max( + config.bn_momentum * config.bn_decay ** (e / config.decay_step), + config.lowest_decay, + ) + + bnm_scheduler = BNMomentumScheduler(model, bnm_lmbd) + else: + raise NotImplementedError() + return bnm_scheduler + + +def set_random_seed(seed): + """Set random seed. + Args: + seed (int): Seed to be used. + """ + random.seed(seed) + np.random.seed(seed) + paddle.seed(seed=seed) + + +def is_seq_of(seq, expected_type, seq_type=None): + """Check whether it is a sequence of some type. + Args: + seq (Sequence): The sequence to be checked. + expected_type (type): Expected type of sequence items. + seq_type (type, optional): Expected sequence type. + Returns: + bool: Whether the sequence is valid. + """ + if seq_type is None: + exp_seq_type = abc.Sequence + else: + assert isinstance(seq_type, type) + exp_seq_type = seq_type + if not isinstance(seq, exp_seq_type): + return False + for item in seq: + if not isinstance(item, expected_type): + return False + return True + + +def set_bn_momentum_default(bn_momentum): + def fn(m): + if isinstance( + m, (paddle.nn.BatchNorm1D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D) + ): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + def __init__(self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default): + if not isinstance(model, paddle.nn.Layer): + raise RuntimeError( + "Class '{}' is not a Paddle nn Layer".format(type(model).__name__) + ) + self.model = model + self.setter = setter + self.lmbd = bn_lambda + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) + + def get_momentum(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + return self.lmbd(epoch) + + +def seprate_point_cloud(xyz, num_points, crop, fixed_points=None, padding_zeros=False): + """ + seprate point cloud: usage : using to generate the incomplete point cloud with a setted number. + """ + _, n, c = tuple(xyz.shape) + assert n == num_points + assert c == 3 + if crop == num_points: + return xyz, None + INPUT = [] + CROP = [] + for points in xyz: + if isinstance(crop, list): + num_crop = random.randint(crop[0], crop[1]) + else: + num_crop = crop + points = points.unsqueeze(axis=0) + if fixed_points is None: + center = paddle.nn.functional.normalize( + x=paddle.randn(shape=[1, 1, 3]), p=2, axis=-1 + ).cuda(blocking=True) + else: + if isinstance(fixed_points, list): + fixed_point = random.sample(fixed_points, 1)[0] + else: + fixed_point = fixed_points + center = fixed_point.reshape(1, 1, 3).cuda(blocking=True) + distance_matrix = paddle.linalg.norm( + x=center.unsqueeze(axis=2) - points.unsqueeze(axis=1), p=2, axis=-1 + ) + idx = paddle.argsort(x=distance_matrix, axis=-1, descending=False)[0, 0] + if padding_zeros: + input_data = points.clone() + input_data[0, idx[:num_crop]] = input_data[0, idx[:num_crop]] * 0 + else: + input_data = points.clone()[0, idx[num_crop:]].unsqueeze(axis=0) + crop_data = points.clone()[0, idx[:num_crop]].unsqueeze(axis=0) + if isinstance(crop, list): + INPUT.append(fps(input_data, 2048)) + CROP.append(fps(crop_data, 2048)) + else: + INPUT.append(input_data) + CROP.append(crop_data) + input_data = paddle.concat(x=INPUT, axis=0) + crop_data = paddle.concat(x=CROP, axis=0) + return input_data, crop_data + + +def get_ptcloud_img(ptcloud): + fig = plt.figure(figsize=(8, 8)) + x = ptcloud + perm_10 = list(range(x.ndim)) + perm_10[1] = 0 + perm_10[0] = 1 + x, z, y = x.transpose(perm=perm_10) + ax = fig.gca(projection=Axes3D.name, adjustable="box") + ax.axis("off") + ax.view_init(30, 45) + max, min = np.max(ptcloud), np.min(ptcloud) + ax.set_xbound(min, max) + ax.set_ybound(min, max) + ax.set_zbound(min, max) + ax.scatter(x, y, z, zdir="z", c=x, cmap="jet") + fig.canvas.draw() + img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + return img + + +def visualize_KITTI( + path, + data_list, + titles=["input", "pred"], + cmap=["bwr", "autumn"], + zdir="y", + xlim=(-1, 1), + ylim=(-1, 1), + zlim=(-1, 1), +): + fig = plt.figure(figsize=(6 * len(data_list), 6)) + for i in range(len(data_list)): + ax = fig.add_subplot(1, len(data_list), i + 1, projection="3d") + ax.view_init(30, -120) + ax.set_title(titles[i]) + ax.set_axis_off() + ax.set_xlim(xlim) + ax.set_ylim(ylim) + ax.set_zlim(zlim) + plt.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0.2, hspace=0) + if not os.path.exists(path): + os.makedirs(path) + pic_path = path + ".png" + fig.savefig(pic_path) + np.save(os.path.join(path, "input.npy"), data_list[0].numpy()) + np.save(os.path.join(path, "pred.npy"), data_list[1].numpy()) + plt.close(fig) + + +def random_dropping(pc, e): + up_num = max(64, 768 // (e // 50 + 1)) + pc = pc + random_num = paddle.randint(low=1, high=up_num, shape=(1, 1))[0, 0] + pc = fps(pc, random_num) + padding = paddle.zeros(shape=[pc.shape[0], 2048 - pc.shape[1], 3]).to(pc.place) + pc = paddle.concat(x=[pc, padding], axis=1) + return pc + + +def random_scale(partial, scale_range=[0.8, 1.2]): + scale = ( + paddle.rand(shape=[1]).cuda(blocking=True) * (scale_range[1] - scale_range[0]) + + scale_range[0] + ) + return partial * scale diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/point_encoder.py b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/point_encoder.py index b014506dc2..3e80e6b44b 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointbert/point_encoder.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointbert/point_encoder.py @@ -1,300 +1,300 @@ -import paddle -import utils.paddle_aux # NOQA -from geom.models.pointbert.checkpoint import get_missing_parameters_message -from geom.models.pointbert.checkpoint import get_unexpected_parameters_message -from geom.models.pointbert.dvae import Encoder -from geom.models.pointbert.dvae import Group -from geom.models.pointbert.logger import print_log - - -class Mlp(paddle.nn.Layer): - def __init__( - self, - in_features, - hidden_features=None, - out_features=None, - act_layer=paddle.nn.GELU, - drop=0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = paddle.nn.Linear( - in_features=in_features, out_features=hidden_features - ) - self.act = act_layer() - self.fc2 = paddle.nn.Linear( - in_features=hidden_features, out_features=out_features - ) - self.drop = paddle.nn.Dropout(p=drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(paddle.nn.Layer): - def __init__( - self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - self.qkv = paddle.nn.Linear( - in_features=dim, out_features=dim * 3, bias_attr=qkv_bias - ) - self.attn_drop = paddle.nn.Dropout(p=attn_drop) - self.proj = paddle.nn.Linear(in_features=dim, out_features=dim) - self.proj_drop = paddle.nn.Dropout(p=proj_drop) - - def forward(self, x): - B, N, C = tuple(x.shape) - qkv = ( - self.qkv(x) - .reshape(B, N, 3, self.num_heads, C // self.num_heads) - .transpose(perm=[2, 0, 3, 1, 4]) - ) - q, k, v = qkv[0], qkv[1], qkv[2] - x = k - perm_8 = list(range(x.ndim)) - perm_8[-2] = -1 - perm_8[-1] = -2 - attn = q @ x.transpose(perm=perm_8) * self.scale - attn = paddle.nn.functional.softmax(attn, axis=-1) - attn = self.attn_drop(attn) - x = attn @ v - perm_9 = list(range(x.ndim)) - perm_9[1] = 2 - perm_9[2] = 1 - x = x.transpose(perm=perm_9).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class DropPath(paddle.nn.Layer): - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - if self.drop_prob is None or self.drop_prob == 0.0 or not self.training: - return x - keep_prob = 1 - self.drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) - random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) - binary_tensor = paddle.floor(random_tensor) - output = x.divide(keep_prob) * binary_tensor - return output - - -class Block(paddle.nn.Layer): - def __init__( - self, - dim, - num_heads, - mlp_ratio=4.0, - qkv_bias=False, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=paddle.nn.GELU, - norm_layer=paddle.nn.LayerNorm, - ): - super().__init__() - self.norm1 = norm_layer(dim) - self.drop_path = ( - DropPath(drop_path) if drop_path > 0.0 else paddle.nn.Identity() - ) - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=drop, - ) - self.attn = Attention( - dim, - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - def forward(self, x): - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class TransformerEncoder(paddle.nn.Layer): - """Transformer Encoder without hierarchical structure""" - - def __init__( - self, - embed_dim=768, - depth=4, - num_heads=12, - mlp_ratio=4.0, - qkv_bias=False, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.0, - ): - super().__init__() - self.blocks = paddle.nn.LayerList( - sublayers=[ - Block( - dim=embed_dim, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=drop_path_rate[i] - if isinstance(drop_path_rate, list) - else drop_path_rate, - ) - for i in range(depth) - ] - ) - - def forward(self, x, pos): - for _, block in enumerate(self.blocks): - x = block(x + pos) - return x - - -class PointTransformer(paddle.nn.Layer): - def __init__(self, config, **kwargs): - super().__init__() - self.config = config - self.args = kwargs["args"] - self.trans_dim = config.trans_dim - self.depth = config.depth - self.drop_path_rate = config.drop_path_rate - self.cls_dim = config.cls_dim - self.num_heads = config.num_heads - self.group_size = config.group_size - self.num_group = config.num_group - self.group_divider = Group(num_group=self.num_group, group_size=self.group_size) - self.encoder_dims = config.encoder_dims - self.encoder = Encoder(encoder_channel=self.encoder_dims) - self.reduce_dim = paddle.nn.Linear( - in_features=self.encoder_dims, out_features=self.trans_dim - ) - out_6 = paddle.create_parameter( - shape=paddle.zeros(shape=[1, 1, self.trans_dim]).shape, - dtype=paddle.zeros(shape=[1, 1, self.trans_dim]).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.zeros(shape=[1, 1, self.trans_dim]) - ), - ) - out_6.stop_gradient = not True - self.cls_token = out_6 - out_7 = paddle.create_parameter( - shape=paddle.randn(shape=[1, 1, self.trans_dim]).shape, - dtype=paddle.randn(shape=[1, 1, self.trans_dim]).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.randn(shape=[1, 1, self.trans_dim]) - ), - ) - out_7.stop_gradient = not True - self.cls_pos = out_7 - self.pos_embed = paddle.nn.Sequential( - paddle.nn.Linear(in_features=3, out_features=128), - paddle.nn.GELU(), - paddle.nn.Linear(in_features=128, out_features=self.trans_dim), - ) - dpr = [ - x.item() - for x in paddle.linspace(start=0, stop=self.drop_path_rate, num=self.depth) - ] - self.blocks = TransformerEncoder( - embed_dim=self.trans_dim, - depth=self.depth, - drop_path_rate=dpr, - num_heads=self.num_heads, - ) - self.norm = paddle.nn.LayerNorm(normalized_shape=self.trans_dim) - - def build_loss_func(self): - self.loss_ce = paddle.nn.CrossEntropyLoss() - - def get_loss_acc(self, pred, gt, smoothing=True): - gt = gt.view(-1).astype(dtype="int64") - if smoothing: - eps = 0.2 - n_class = pred.shape[1] - one_hot = paddle.zeros_like(x=pred).put_along_axis( - axis=1, indices=gt.view(-1, 1), values=1 - ) - one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) - log_prb = paddle.nn.functional.log_softmax(x=pred, axis=1) - loss = -(one_hot * log_prb).sum(axis=1).mean() - else: - loss = self.loss_ce(pred, gt.astype(dtype="int64")) - pred = pred.argmax(axis=-1) - acc = (pred == gt).sum() / float(gt.shape[0]) - return loss, acc * 100 - - def load_model_from_ckpt(self, bert_ckpt_path): - ckpt = paddle.load(path=bert_ckpt_path) - base_ckpt = {k.replace("module.", ""): v for k, v in ckpt["base_model"].items()} - for k in list(base_ckpt.keys()): - if k.startswith("transformer_q") and not k.startswith( - "transformer_q.cls_head" - ): - base_ckpt[k[len("transformer_q.") :]] = base_ckpt[k] - elif k.startswith("base_model"): - base_ckpt[k[len("base_model.") :]] = base_ckpt[k] - del base_ckpt[k] - incompatible = self.set_state_dict( - state_dict=base_ckpt, use_structured_name=False - ) - if incompatible.missing_keys: - print_log("missing_keys", logger="Transformer") - print_log( - get_missing_parameters_message(incompatible.missing_keys), - logger="Transformer", - ) - if incompatible.unexpected_keys: - print_log("unexpected_keys", logger="Transformer") - print_log( - get_unexpected_parameters_message(incompatible.unexpected_keys), - logger="Transformer", - ) - print_log( - f"[Transformer] Successful Loading the ckpt from {bert_ckpt_path}", - logger="Transformer", - ) - - def forward(self, pts): - neighborhood, center = self.group_divider(pts) - group_input_tokens = self.encoder(neighborhood) - group_input_tokens = self.reduce_dim(group_input_tokens) - cls_tokens = self.cls_token.expand(shape=[group_input_tokens.shape[0], -1, -1]) - cls_pos = self.cls_pos.expand(shape=[group_input_tokens.shape[0], -1, -1]) - pos = self.pos_embed(center) - x = paddle.concat(x=(cls_tokens, group_input_tokens), axis=1) - pos = paddle.concat(x=(cls_pos, pos), axis=1) - x = self.blocks(x, pos) - x = self.norm(x) - concat_f = paddle.concat(x=[x[:, 0], x[:, 1:].max(1)[0]], axis=-1) - return concat_f +import paddle +import utils.paddle_aux # NOQA +from geom.models.pointbert.checkpoint import get_missing_parameters_message +from geom.models.pointbert.checkpoint import get_unexpected_parameters_message +from geom.models.pointbert.dvae import Encoder +from geom.models.pointbert.dvae import Group +from geom.models.pointbert.logger import print_log + + +class Mlp(paddle.nn.Layer): + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=paddle.nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = paddle.nn.Linear( + in_features=in_features, out_features=hidden_features + ) + self.act = act_layer() + self.fc2 = paddle.nn.Linear( + in_features=hidden_features, out_features=out_features + ) + self.drop = paddle.nn.Dropout(p=drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(paddle.nn.Layer): + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + self.qkv = paddle.nn.Linear( + in_features=dim, out_features=dim * 3, bias_attr=qkv_bias + ) + self.attn_drop = paddle.nn.Dropout(p=attn_drop) + self.proj = paddle.nn.Linear(in_features=dim, out_features=dim) + self.proj_drop = paddle.nn.Dropout(p=proj_drop) + + def forward(self, x): + B, N, C = tuple(x.shape) + qkv = ( + self.qkv(x) + .reshape(B, N, 3, self.num_heads, C // self.num_heads) + .transpose(perm=[2, 0, 3, 1, 4]) + ) + q, k, v = qkv[0], qkv[1], qkv[2] + x = k + perm_8 = list(range(x.ndim)) + perm_8[-2] = -1 + perm_8[-1] = -2 + attn = q @ x.transpose(perm=perm_8) * self.scale + attn = paddle.nn.functional.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + x = attn @ v + perm_9 = list(range(x.ndim)) + perm_9[1] = 2 + perm_9[2] = 1 + x = x.transpose(perm=perm_9).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class DropPath(paddle.nn.Layer): + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + if self.drop_prob is None or self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = keep_prob + paddle.rand(shape, dtype=x.dtype) + binary_tensor = paddle.floor(random_tensor) + output = x.divide(keep_prob) * binary_tensor + return output + + +class Block(paddle.nn.Layer): + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + act_layer=paddle.nn.GELU, + norm_layer=paddle.nn.LayerNorm, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.drop_path = ( + DropPath(drop_path) if drop_path > 0.0 else paddle.nn.Identity() + ) + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class TransformerEncoder(paddle.nn.Layer): + """Transformer Encoder without hierarchical structure""" + + def __init__( + self, + embed_dim=768, + depth=4, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + ): + super().__init__() + self.blocks = paddle.nn.LayerList( + sublayers=[ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_rate[i] + if isinstance(drop_path_rate, list) + else drop_path_rate, + ) + for i in range(depth) + ] + ) + + def forward(self, x, pos): + for _, block in enumerate(self.blocks): + x = block(x + pos) + return x + + +class PointTransformer(paddle.nn.Layer): + def __init__(self, config, **kwargs): + super().__init__() + self.config = config + self.args = kwargs["args"] + self.trans_dim = config.trans_dim + self.depth = config.depth + self.drop_path_rate = config.drop_path_rate + self.cls_dim = config.cls_dim + self.num_heads = config.num_heads + self.group_size = config.group_size + self.num_group = config.num_group + self.group_divider = Group(num_group=self.num_group, group_size=self.group_size) + self.encoder_dims = config.encoder_dims + self.encoder = Encoder(encoder_channel=self.encoder_dims) + self.reduce_dim = paddle.nn.Linear( + in_features=self.encoder_dims, out_features=self.trans_dim + ) + out_6 = paddle.create_parameter( + shape=paddle.zeros(shape=[1, 1, self.trans_dim]).shape, + dtype=paddle.zeros(shape=[1, 1, self.trans_dim]).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.zeros(shape=[1, 1, self.trans_dim]) + ), + ) + out_6.stop_gradient = not True + self.cls_token = out_6 + out_7 = paddle.create_parameter( + shape=paddle.randn(shape=[1, 1, self.trans_dim]).shape, + dtype=paddle.randn(shape=[1, 1, self.trans_dim]).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.randn(shape=[1, 1, self.trans_dim]) + ), + ) + out_7.stop_gradient = not True + self.cls_pos = out_7 + self.pos_embed = paddle.nn.Sequential( + paddle.nn.Linear(in_features=3, out_features=128), + paddle.nn.GELU(), + paddle.nn.Linear(in_features=128, out_features=self.trans_dim), + ) + dpr = [ + x.item() + for x in paddle.linspace(start=0, stop=self.drop_path_rate, num=self.depth) + ] + self.blocks = TransformerEncoder( + embed_dim=self.trans_dim, + depth=self.depth, + drop_path_rate=dpr, + num_heads=self.num_heads, + ) + self.norm = paddle.nn.LayerNorm(normalized_shape=self.trans_dim) + + def build_loss_func(self): + self.loss_ce = paddle.nn.CrossEntropyLoss() + + def get_loss_acc(self, pred, gt, smoothing=True): + gt = gt.view(-1).astype(dtype="int64") + if smoothing: + eps = 0.2 + n_class = pred.shape[1] + one_hot = paddle.zeros_like(x=pred).put_along_axis( + axis=1, indices=gt.view(-1, 1), values=1 + ) + one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1) + log_prb = paddle.nn.functional.log_softmax(x=pred, axis=1) + loss = -(one_hot * log_prb).sum(axis=1).mean() + else: + loss = self.loss_ce(pred, gt.astype(dtype="int64")) + pred = pred.argmax(axis=-1) + acc = (pred == gt).sum() / float(gt.shape[0]) + return loss, acc * 100 + + def load_model_from_ckpt(self, bert_ckpt_path): + ckpt = paddle.load(path=bert_ckpt_path) + base_ckpt = {k.replace("module.", ""): v for k, v in ckpt["base_model"].items()} + for k in list(base_ckpt.keys()): + if k.startswith("transformer_q") and not k.startswith( + "transformer_q.cls_head" + ): + base_ckpt[k[len("transformer_q.") :]] = base_ckpt[k] + elif k.startswith("base_model"): + base_ckpt[k[len("base_model.") :]] = base_ckpt[k] + del base_ckpt[k] + incompatible = self.set_state_dict( + state_dict=base_ckpt, use_structured_name=False + ) + if incompatible.missing_keys: + print_log("missing_keys", logger="Transformer") + print_log( + get_missing_parameters_message(incompatible.missing_keys), + logger="Transformer", + ) + if incompatible.unexpected_keys: + print_log("unexpected_keys", logger="Transformer") + print_log( + get_unexpected_parameters_message(incompatible.unexpected_keys), + logger="Transformer", + ) + print_log( + f"[Transformer] Successful Loading the ckpt from {bert_ckpt_path}", + logger="Transformer", + ) + + def forward(self, pts): + neighborhood, center = self.group_divider(pts) + group_input_tokens = self.encoder(neighborhood) + group_input_tokens = self.reduce_dim(group_input_tokens) + cls_tokens = self.cls_token.expand(shape=[group_input_tokens.shape[0], -1, -1]) + cls_pos = self.cls_pos.expand(shape=[group_input_tokens.shape[0], -1, -1]) + pos = self.pos_embed(center) + x = paddle.concat(x=(cls_tokens, group_input_tokens), axis=1) + pos = paddle.concat(x=(cls_pos, pos), axis=1) + x = self.blocks(x, pos) + x = self.norm(x) + concat_f = paddle.concat(x=[x[:, 0], x[:, 1:].max(1)[0]], axis=-1) + return concat_f diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext-s.yaml b/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext-s.yaml index c78bc0ec5b..55dddd7583 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext-s.yaml +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext-s.yaml @@ -1,36 +1,36 @@ -# FLOPs GMACs Params.(M) -# 1.64 0.81 1.367 -# 2040.039810480711 - -model: - NAME: BaseCls - encoder_args: - NAME: PointNextEncoder - blocks: [1, 1, 1, 1, 1, 1] - strides: [1, 2, 2, 2, 2, 1] - width: 32 - in_channels: 4 - sa_layers: 2 - sa_use_res: True - radius: 0.15 - radius_scaling: 1.5 - nsample: 32 - expansion: 4 - aggr_args: - feature_type: 'dp_fj' - reduction: 'max' - group_args: - NAME: 'ballquery' - normalize_dp: True - conv_args: - order: conv-norm-act - act_args: - act: 'relu' - norm_args: - norm: 'bn' - cls_args: - NAME: ClsHead - num_classes: -1 - mlps: [512, 256] - norm_args: - norm: 'bn1d' +# FLOPs GMACs Params.(M) +# 1.64 0.81 1.367 +# 2040.039810480711 + +model: + NAME: BaseCls + encoder_args: + NAME: PointNextEncoder + blocks: [1, 1, 1, 1, 1, 1] + strides: [1, 2, 2, 2, 2, 1] + width: 32 + in_channels: 4 + sa_layers: 2 + sa_use_res: True + radius: 0.15 + radius_scaling: 1.5 + nsample: 32 + expansion: 4 + aggr_args: + feature_type: 'dp_fj' + reduction: 'max' + group_args: + NAME: 'ballquery' + normalize_dp: True + conv_args: + order: conv-norm-act + act_args: + act: 'relu' + norm_args: + norm: 'bn' + cls_args: + NAME: ClsHead + num_classes: -1 + mlps: [512, 256] + norm_args: + norm: 'bn1d' diff --git a/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext.py b/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext.py index f403e2492b..3d4608a473 100644 --- a/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext.py +++ b/jointContribution/IJCAI_2024/bju/geom/models/pointnext/pointnext.py @@ -1,29 +1,29 @@ -import os -import sys - -from openpoints.models import build_model_from_cfg -from openpoints.utils import EasyConfig -from openpoints.utils import cal_model_parm_nums - -pointnext_dir = "./geom/models/pointnext/PointNeXt" - - -def add_path_recursive(directory): - sys.path.append(directory) - for root, dirs, files in os.walk(directory): - for d in dirs: - add_path_recursive(os.path.join(root, d)) - - -add_path_recursive(pointnext_dir) - - -def PointNEXT(): - cfg_path = "./geom/models/pointnext/pointnext-s.yaml" - cfg = EasyConfig() - cfg.load(cfg_path, recursive=True) - model = build_model_from_cfg(cfg.model) - model_size = cal_model_parm_nums(model) - print("model size:") - print(model_size) - return model +import os +import sys + +from openpoints.models import build_model_from_cfg +from openpoints.utils import EasyConfig +from openpoints.utils import cal_model_parm_nums + +pointnext_dir = "./geom/models/pointnext/PointNeXt" + + +def add_path_recursive(directory): + sys.path.append(directory) + for root, dirs, files in os.walk(directory): + for d in dirs: + add_path_recursive(os.path.join(root, d)) + + +add_path_recursive(pointnext_dir) + + +def PointNEXT(): + cfg_path = "./geom/models/pointnext/pointnext-s.yaml" + cfg = EasyConfig() + cfg.load(cfg_path, recursive=True) + model = build_model_from_cfg(cfg.model) + model_size = cal_model_parm_nums(model) + print("model size:") + print(model_size) + return model diff --git a/jointContribution/IJCAI_2024/bju/geom/pc_encoder.py b/jointContribution/IJCAI_2024/bju/geom/pc_encoder.py index bcfbe3e994..4b12ef25d6 100644 --- a/jointContribution/IJCAI_2024/bju/geom/pc_encoder.py +++ b/jointContribution/IJCAI_2024/bju/geom/pc_encoder.py @@ -1,27 +1,27 @@ -import copy -from collections import OrderedDict - -import paddle -from geom.models import ULIP_models as models - - -def load_geom_encoder(args, pretrained=True, frozen=False): - ckpt = paddle.load(path=args.ulip_ckpt) - state_dict = OrderedDict() - for k, v in ckpt["state_dict"].items(): - state_dict[k.replace("module.", "")] = v - print("=> creating model: {}".format(args.ulip_model)) - model = getattr(models, args.ulip_model)(args=args) - if pretrained: - model.set_state_dict(state_dict=state_dict, use_structured_name=True) - print("=> loaded resume checkpoint '{}'".format(args.ulip_ckpt)) - else: - print("=> new model without pretraining") - point_encoder = copy.deepcopy(model.point_encoder) - pc_projection = copy.deepcopy(model.pc_projection) - del model - if frozen: - for params in point_encoder.parameters(): - params.stop_gradient = not False - pc_projection.stop_gradient = not False - return point_encoder, pc_projection +import copy +from collections import OrderedDict + +import paddle +from geom.models import ULIP_models as models + + +def load_geom_encoder(args, pretrained=True, frozen=False): + ckpt = paddle.load(path=args.ulip_ckpt) + state_dict = OrderedDict() + for k, v in ckpt["state_dict"].items(): + state_dict[k.replace("module.", "")] = v + print("=> creating model: {}".format(args.ulip_model)) + model = getattr(models, args.ulip_model)(args=args) + if pretrained: + model.set_state_dict(state_dict=state_dict, use_structured_name=True) + print("=> loaded resume checkpoint '{}'".format(args.ulip_ckpt)) + else: + print("=> new model without pretraining") + point_encoder = copy.deepcopy(model.point_encoder) + pc_projection = copy.deepcopy(model.pc_projection) + del model + if frozen: + for params in point_encoder.parameters(): + params.stop_gradient = not False + pc_projection.stop_gradient = not False + return point_encoder, pc_projection diff --git a/jointContribution/IJCAI_2024/bju/infer.py b/jointContribution/IJCAI_2024/bju/infer.py index 917120d318..5e7bff0a6e 100644 --- a/jointContribution/IJCAI_2024/bju/infer.py +++ b/jointContribution/IJCAI_2024/bju/infer.py @@ -1,115 +1,115 @@ -import argparse -import os - -import numpy as np -import paddle -import yaml -from dataset import GraphDataset -from dataset import read_data -from geom.pc_encoder import load_geom_encoder -from model import MLP -from model import NN -from model import GeoCA3D -from paddle.io import DataLoader - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--train_data_dir", - default="./Dataset/Trainset_track_B", - ) - parser.add_argument( - "--test_data_dir", - default="./Dataset/Testset_track_B/Inference", - ) - parser.add_argument( - "--info_dir", - default="./Dataset/Testset_track_B/Auxiliary", - ) - parser.add_argument("--extra_data_dir", default=None) - parser.add_argument("--fold_id", default=0, type=int) - parser.add_argument("--gpu", default=0, type=int) - parser.add_argument("--val_iter", default=10, type=int) - parser.add_argument("--config_dir", default="params.yaml") - parser.add_argument("--ulip_model", default="ULIP_PointBERT") - parser.add_argument( - "--ulip_ckpt", - default="./geom/ckpt/checkpoint_pointbert.pdparams", - ) - parser.add_argument("--frozen", default=True) - parser.add_argument("--cfd_config_dir", default="cfd_params.yaml") - parser.add_argument("--cfd_model", default="MLP") - parser.add_argument("--cfd_mesh", default=True) - parser.add_argument("--weight", default=0.5, type=float) - args = parser.parse_args() - return args - - -if __name__ == "__main__": - # load setting - args = parse_args() - print(args) - with open(args.cfd_config_dir, "r") as f: - cfd_hparams = yaml.safe_load(f)[args.cfd_model] - print(cfd_hparams) - with open(args.config_dir, "r") as f: - hparams = yaml.safe_load(f)["GeoCA3D"] - n_gpu = paddle.device.cuda.device_count() - use_cuda = 0 <= args.gpu < n_gpu and paddle.device.cuda.device_count() >= 1 - device = str(f"cuda:{args.gpu}" if use_cuda else "cpu").replace("cuda", "gpu") - - # load data - train_data, val_data, test_data, coef_norm, test_index = read_data(args, norm=True) - use_height = False - r = cfd_hparams["r"] if "r" in cfd_hparams.keys() else None - test_ds = GraphDataset( - test_data, use_height=use_height, use_cfd_mesh=args.cfd_mesh, r=r - ) - test_loader = DataLoader(test_ds, batch_size=1, collate_fn=test_ds.collate_fn) - - # load model - if args.ulip_model == "none": - print( - "inference model use ULIP_PointBERT, please set 'ulip_model' to 'ULIP_PointBERT'" - ) - else: - g_encoder, g_proj = load_geom_encoder( - args, pretrained=False, frozen=args.frozen - ) - print(hparams) - encoder = MLP(cfd_hparams["encoder"], batch_norm=False) - decoder = MLP(cfd_hparams["decoder"], batch_norm=False) - - if args.cfd_model == "MLP": - model = NN(cfd_hparams, encoder, decoder) - else: - print("inference model use mlp, please set 'cfd_model' to 'MLP'") - - model = GeoCA3D(model, geom_encoder=g_encoder, geom_proj=g_proj, **hparams).to( - device - ) - path = "./pretrained_checkpoint.pdparams" - loaded_state_dict = paddle.load(path=path) - model.set_state_dict(state_dict=loaded_state_dict) - - # infer - model.eval() - index = 0 - if not os.path.exists("./results"): - os.makedirs("./results") - - with paddle.no_grad(): - for cfd_data, geom in test_loader: - out = model((cfd_data[0], geom)) - if coef_norm is not None: - mean = paddle.to_tensor(data=coef_norm[2]).to(device) - std = paddle.to_tensor(data=coef_norm[3]).to(device) - pred_press = out * std[-1] + mean[-1] - np.save( - f"./results/press_{test_index[index]}.npy", - pred_press.detach().cpu().numpy().squeeze(), - ) - print(f"Finish save sample {index}") - index = index + 1 - npy_dir = "./results" +import argparse +import os + +import numpy as np +import paddle +import yaml +from dataset import GraphDataset +from dataset import read_data +from geom.pc_encoder import load_geom_encoder +from model import MLP +from model import NN +from model import GeoCA3D +from paddle.io import DataLoader + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--train_data_dir", + default="./Dataset/Trainset_track_B", + ) + parser.add_argument( + "--test_data_dir", + default="./Dataset/Testset_track_B/Inference", + ) + parser.add_argument( + "--info_dir", + default="./Dataset/Testset_track_B/Auxiliary", + ) + parser.add_argument("--extra_data_dir", default=None) + parser.add_argument("--fold_id", default=0, type=int) + parser.add_argument("--gpu", default=0, type=int) + parser.add_argument("--val_iter", default=10, type=int) + parser.add_argument("--config_dir", default="params.yaml") + parser.add_argument("--ulip_model", default="ULIP_PointBERT") + parser.add_argument( + "--ulip_ckpt", + default="./geom/ckpt/checkpoint_pointbert.pdparams", + ) + parser.add_argument("--frozen", default=True) + parser.add_argument("--cfd_config_dir", default="cfd_params.yaml") + parser.add_argument("--cfd_model", default="MLP") + parser.add_argument("--cfd_mesh", default=True) + parser.add_argument("--weight", default=0.5, type=float) + args = parser.parse_args() + return args + + +if __name__ == "__main__": + # load setting + args = parse_args() + print(args) + with open(args.cfd_config_dir, "r") as f: + cfd_hparams = yaml.safe_load(f)[args.cfd_model] + print(cfd_hparams) + with open(args.config_dir, "r") as f: + hparams = yaml.safe_load(f)["GeoCA3D"] + n_gpu = paddle.device.cuda.device_count() + use_cuda = 0 <= args.gpu < n_gpu and paddle.device.cuda.device_count() >= 1 + device = str(f"cuda:{args.gpu}" if use_cuda else "cpu").replace("cuda", "gpu") + + # load data + train_data, val_data, test_data, coef_norm, test_index = read_data(args, norm=True) + use_height = False + r = cfd_hparams["r"] if "r" in cfd_hparams.keys() else None + test_ds = GraphDataset( + test_data, use_height=use_height, use_cfd_mesh=args.cfd_mesh, r=r + ) + test_loader = DataLoader(test_ds, batch_size=1, collate_fn=test_ds.collate_fn) + + # load model + if args.ulip_model == "none": + print( + "inference model use ULIP_PointBERT, please set 'ulip_model' to 'ULIP_PointBERT'" + ) + else: + g_encoder, g_proj = load_geom_encoder( + args, pretrained=False, frozen=args.frozen + ) + print(hparams) + encoder = MLP(cfd_hparams["encoder"], batch_norm=False) + decoder = MLP(cfd_hparams["decoder"], batch_norm=False) + + if args.cfd_model == "MLP": + model = NN(cfd_hparams, encoder, decoder) + else: + print("inference model use mlp, please set 'cfd_model' to 'MLP'") + + model = GeoCA3D(model, geom_encoder=g_encoder, geom_proj=g_proj, **hparams).to( + device + ) + path = "./pretrained_checkpoint.pdparams" + loaded_state_dict = paddle.load(path=path) + model.set_state_dict(state_dict=loaded_state_dict) + + # infer + model.eval() + index = 0 + if not os.path.exists("./results"): + os.makedirs("./results") + + with paddle.no_grad(): + for cfd_data, geom in test_loader: + out = model((cfd_data[0], geom)) + if coef_norm is not None: + mean = paddle.to_tensor(data=coef_norm[2]).to(device) + std = paddle.to_tensor(data=coef_norm[3]).to(device) + pred_press = out * std[-1] + mean[-1] + np.save( + f"./results/press_{test_index[index]}.npy", + pred_press.detach().cpu().numpy().squeeze(), + ) + print(f"Finish save sample {index}") + index = index + 1 + npy_dir = "./results" diff --git a/jointContribution/IJCAI_2024/bju/model.py b/jointContribution/IJCAI_2024/bju/model.py index 7f89e21531..bcc30e68b5 100644 --- a/jointContribution/IJCAI_2024/bju/model.py +++ b/jointContribution/IJCAI_2024/bju/model.py @@ -1,328 +1,328 @@ -import math - -import paddle -from einops import rearrange -from paddle.nn import Linear -from utils.utils import default - - -class BaseModel(paddle.nn.Layer): - def __init__(self, hparams, encoder, decoder): - super(BaseModel, self).__init__() - self.nb_hidden_layers = hparams["nb_hidden_layers"] - self.size_hidden_layers = hparams["size_hidden_layers"] - self.enc_dim = hparams["encoder"][-1] - self.dec_dim = hparams["decoder"][0] - self.bn_bool = hparams["bn_bool"] - self.res_bool = hparams["res_bool"] - self.activation = paddle.nn.functional.gelu - self.encoder = encoder - self.decoder = decoder - self.in_layer = self._in_layer(hparams) - self.hidden_layers = self._hidden_layers(hparams) - self.out_layer = self._out_layer(hparams) - self.bn = self._bn(hparams) - - def _in_layer(self, hparams): - raise NotImplementedError - - def _hidden_layers(self, hparams): - raise NotImplementedError - - def _out_layer(self, hparams): - raise NotImplementedError - - def _bn(self, hparams): - bn = None - if self.bn_bool: - bn = paddle.nn.LayerList() - for n in range(self.nb_hidden_layers): - bn.append( - paddle.nn.BatchNorm1D( - num_features=self.size_hidden_layers, use_global_stats=False - ) - ) - return bn - - def forward(self, data): - z, edge_index = data.x, data.edge_index - if hasattr(self, "get_edge_attr"): - edge_attr = self.get_edge_attr(z, edge_index) - z = self.encoder(z) - if self.enc_dim == self.dec_dim: - z_in = z - if hasattr(self, "get_edge_attr"): - z = self.in_layer(z, edge_index, edge_attr) - else: - z = self.in_layer(z, edge_index) - if self.bn_bool: - z = self.bn[0](z) - z = self.activation(z) - for n in range(self.nb_hidden_layers - 1): - if hasattr(self, "res_bool") and self.res_bool: - z_res = z - if hasattr(self, "get_edge_attr"): - z = self.hidden_layers[n](z, edge_index, edge_attr) - else: - z = self.hidden_layers[n](z, edge_index) - if self.bn_bool: - z = self.bn[n + 1](z) - z = self.activation(z) - if hasattr(self, "res_bool") and self.res_bool: - z = z + z_res - if hasattr(self, "get_edge_attr"): - z = self.out_layer(z, edge_index, edge_attr) - else: - z = self.out_layer(z, edge_index) - if self.enc_dim == self.dec_dim: - z = z + z_in - z = self.decoder(z) - return z - - -class NN(BaseModel): - def __init__(self, hparams, encoder, decoder): - self.enc_dim = hparams["encoder"][-1] - self.dec_dim = hparams["decoder"][0] - super(NN, self).__init__(hparams, encoder, decoder) - - def _in_layer(self, hparams): - return MLP([self.enc_dim, self.size_hidden_layers]) - - def _hidden_layers(self, hparams): - hidden_layers = paddle.nn.LayerList() - for n in range(self.nb_hidden_layers - 1): - hidden_layers.append( - MLP([self.size_hidden_layers, self.size_hidden_layers]) - ) - return hidden_layers - - def _out_layer(self, hparams): - return MLP([self.size_hidden_layers, self.dec_dim]) - - -class MLP(paddle.nn.Layer): - def __init__( - self, channel_list, dropout=0.0, batch_norm=True, activation_first=False - ): - super().__init__() - assert len(channel_list) >= 2 - self.channel_list = channel_list - self.dropout = dropout - self.activation_first = activation_first - self.lins = paddle.nn.LayerList() - for dims in zip(self.channel_list[:-1], self.channel_list[1:]): - self.lins.append(Linear(*dims)) - self.norms = paddle.nn.LayerList() - for dim in zip(self.channel_list[1:-1]): - self.norms.append( - paddle.nn.BatchNorm1D(num_features=dim, use_global_stats=False) - if batch_norm - else paddle.nn.Identity() - ) - self.reset_parameters() - - def reset_parameters(self): - for lin in self.lins: - self._reset_parameters(lin) - for norm in self.norms: - if hasattr(norm, "reset_parameters"): - norm.reset_parameters() - - def calculate_fan_in_and_fan_out(self, tensor): - dimensions = tensor.ndim - if dimensions < 2: - raise ValueError( - "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" - ) - - if dimensions == 2: # Linear - fan_in = tensor.shape[1] - fan_out = tensor.shape[0] - else: - num_input_fmaps = tensor.shape[1] - num_output_fmaps = tensor.shape[0] - receptive_field_size = 1 - if tensor.ndim > 2: - receptive_field_size = tensor[0][ - 0 - ].numel() # numel returns the number of elements in the tensor - fan_in = num_input_fmaps * receptive_field_size - fan_out = num_output_fmaps * receptive_field_size - return fan_in, fan_out - - def _reset_parameters(self, lin) -> None: - kaiming_init = paddle.nn.initializer.KaimingUniform() - kaiming_init(lin.weight) - if lin.bias is not None: - fan_in, _ = self.calculate_fan_in_and_fan_out(lin.weight) - bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 - uniform_init = paddle.nn.initializer.Uniform(-bound, bound) - uniform_init(lin.bias) - - def forward(self, x, edge_index=None): - x = self.lins[0](x) - for lin, norm in zip(self.lins[1:], self.norms): - if self.activation_first: - x = paddle.nn.functional.gelu(x=x) - x = norm(x) - if not self.activation_first: - x = paddle.nn.functional.gelu(x=x) - x = paddle.nn.functional.dropout( - x=x, p=self.dropout, training=self.training - ) - x = lin.forward(x) - return x - - def __repr__(self) -> str: - return f"{self.__class__.__name__}({str(self.channel_list)[1:-1]})" - - -class GEGLU(paddle.nn.Layer): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = paddle.nn.Linear(in_features=dim_in, out_features=dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(chunks=2, axis=-1) - return x * paddle.nn.functional.gelu(x=gate) - - -class FeedForward(paddle.nn.Layer): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = ( - paddle.nn.Sequential( - paddle.nn.Linear(in_features=dim, out_features=inner_dim), - paddle.nn.GELU(), - ) - if not glu - else GEGLU(dim, inner_dim) - ) - self.net = paddle.nn.Sequential( - project_in, - paddle.nn.Dropout(p=dropout), - paddle.nn.Linear(in_features=inner_dim, out_features=dim_out), - ) - - def forward(self, x): - return self.net(x) - - -class FCLayer(paddle.nn.Layer): - def __init__(self, query_dim, context_dim=None, dropout=0.0): - super().__init__() - context_dim = default(context_dim, query_dim) - self.to_out = paddle.nn.Sequential( - paddle.nn.Linear(in_features=context_dim, out_features=query_dim), - paddle.nn.Dropout(p=dropout), - ) - - def forward(self, x, context=None): - context = default(context, x) - return self.to_out(context) - - -class GeoCA3DBlock(paddle.nn.Layer): - def __init__(self, dim, dropout=0.0, context_dim=None, gated_ff=True): - super().__init__() - self.fc1 = FCLayer(query_dim=dim, dropout=dropout) - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.fc2 = FCLayer(query_dim=dim, context_dim=context_dim, dropout=dropout) - self.norm1 = paddle.nn.LayerNorm(normalized_shape=dim) - self.norm2 = paddle.nn.LayerNorm(normalized_shape=dim) - - def forward(self, x, context=None): - x = self.fc1(self.norm1(x)) + x - x = self.fc2(x, context=context) + x - x = self.ff(self.norm2(x)) + x - return x - - -class GeoCA3D(paddle.nn.Layer): - def __init__( - self, - cfd_model, - geom_encoder=None, - geom_proj=None, - in_out_dim=64, - dropout=0.0, - context_dim=512, - gated_ff=True, - ) -> None: - super().__init__() - self.geom_encoder = geom_encoder - self.geom_proj = geom_proj - self.cfd_model = cfd_model - if self.geom_encoder is not None: - self.n_blocks = self.cfd_model.nb_hidden_layers + 2 - dims = ( - [in_out_dim] - + [self.cfd_model.size_hidden_layers] * self.cfd_model.nb_hidden_layers - + [in_out_dim] - ) - self.blocks = paddle.nn.LayerList( - sublayers=[ - GeoCA3DBlock( - dim=dim, dropout=dropout, context_dim=context_dim, gated_ff=True - ) - for dim in dims - ] - ) - - def forward(self, data): - cfd_data, geom_data = data - if self.geom_encoder is None: - x = self.cfd_model(cfd_data) - return x - x, edge_index = cfd_data.x, cfd_data.edge_index - if hasattr(self.cfd_model, "get_edge_attr"): - edge_attr = self.cfd_model.get_edge_attr(x, edge_index) - x = self.cfd_model.encoder(x) - z = self.geom_encoder(geom_data) @ self.geom_proj - z = z / z.norm(axis=-1, keepdim=True) - z = z.repeat_interleave(repeats=tuple(x.shape)[0] // tuple(z.shape)[0], axis=0) - z = rearrange(z, "(b n) d -> b n d", n=1) - if self.cfd_model.enc_dim == self.cfd_model.dec_dim: - x_in = x - x = rearrange(x, "(b n) d -> b n d", n=1) - x = self.blocks[0](x, context=z) - x = rearrange(x, "b n d -> (b n) d") - if hasattr(self.cfd_model, "get_edge_attr"): - x = self.cfd_model.in_layer(x, edge_index, edge_attr) - else: - x = self.cfd_model.in_layer(x, edge_index) - if self.cfd_model.bn_bool: - x = self.cfd_model.bn[0](x) - x = self.cfd_model.activation(x) - for i in range(1, self.n_blocks - 2): - if hasattr(self.cfd_model, "res_bool") and self.cfd_model.res_bool: - x_res = x - x = rearrange(x, "(b n) d -> b n d", n=1) - x = self.blocks[i](x, context=z) - x = rearrange(x, "b n d -> (b n) d") - if hasattr(self.cfd_model, "get_edge_attr"): - x = self.cfd_model.hidden_layers[i - 1](x, edge_index, edge_attr) - else: - x = self.cfd_model.hidden_layers[i - 1](x, edge_index) - if self.cfd_model.bn_bool: - x = self.cfd_model.bn[i](x) - x = self.cfd_model.activation(x) - if hasattr(self.cfd_model, "res_bool") and self.cfd_model.res_bool: - x = x + x_res - x = rearrange(x, "(b n) d -> b n d", n=1) - x = self.blocks[-2](x, context=z) - x = rearrange(x, "b n d -> (b n) d") - if hasattr(self.cfd_model, "get_edge_attr"): - x = self.cfd_model.out_layer(x, edge_index, edge_attr) - else: - x = self.cfd_model.out_layer(x, edge_index) - x = rearrange(x, "(b n) d -> b n d", n=1) - x = self.blocks[-1](x, context=z) - x = rearrange(x, "b n d -> (b n) d") - if self.cfd_model.enc_dim == self.cfd_model.dec_dim: - x = x + x_in - x = self.cfd_model.decoder(x) - return x +import math + +import paddle +from einops import rearrange +from paddle.nn import Linear +from utils.utils import default + + +class BaseModel(paddle.nn.Layer): + def __init__(self, hparams, encoder, decoder): + super(BaseModel, self).__init__() + self.nb_hidden_layers = hparams["nb_hidden_layers"] + self.size_hidden_layers = hparams["size_hidden_layers"] + self.enc_dim = hparams["encoder"][-1] + self.dec_dim = hparams["decoder"][0] + self.bn_bool = hparams["bn_bool"] + self.res_bool = hparams["res_bool"] + self.activation = paddle.nn.functional.gelu + self.encoder = encoder + self.decoder = decoder + self.in_layer = self._in_layer(hparams) + self.hidden_layers = self._hidden_layers(hparams) + self.out_layer = self._out_layer(hparams) + self.bn = self._bn(hparams) + + def _in_layer(self, hparams): + raise NotImplementedError + + def _hidden_layers(self, hparams): + raise NotImplementedError + + def _out_layer(self, hparams): + raise NotImplementedError + + def _bn(self, hparams): + bn = None + if self.bn_bool: + bn = paddle.nn.LayerList() + for n in range(self.nb_hidden_layers): + bn.append( + paddle.nn.BatchNorm1D( + num_features=self.size_hidden_layers, use_global_stats=False + ) + ) + return bn + + def forward(self, data): + z, edge_index = data.x, data.edge_index + if hasattr(self, "get_edge_attr"): + edge_attr = self.get_edge_attr(z, edge_index) + z = self.encoder(z) + if self.enc_dim == self.dec_dim: + z_in = z + if hasattr(self, "get_edge_attr"): + z = self.in_layer(z, edge_index, edge_attr) + else: + z = self.in_layer(z, edge_index) + if self.bn_bool: + z = self.bn[0](z) + z = self.activation(z) + for n in range(self.nb_hidden_layers - 1): + if hasattr(self, "res_bool") and self.res_bool: + z_res = z + if hasattr(self, "get_edge_attr"): + z = self.hidden_layers[n](z, edge_index, edge_attr) + else: + z = self.hidden_layers[n](z, edge_index) + if self.bn_bool: + z = self.bn[n + 1](z) + z = self.activation(z) + if hasattr(self, "res_bool") and self.res_bool: + z = z + z_res + if hasattr(self, "get_edge_attr"): + z = self.out_layer(z, edge_index, edge_attr) + else: + z = self.out_layer(z, edge_index) + if self.enc_dim == self.dec_dim: + z = z + z_in + z = self.decoder(z) + return z + + +class NN(BaseModel): + def __init__(self, hparams, encoder, decoder): + self.enc_dim = hparams["encoder"][-1] + self.dec_dim = hparams["decoder"][0] + super(NN, self).__init__(hparams, encoder, decoder) + + def _in_layer(self, hparams): + return MLP([self.enc_dim, self.size_hidden_layers]) + + def _hidden_layers(self, hparams): + hidden_layers = paddle.nn.LayerList() + for n in range(self.nb_hidden_layers - 1): + hidden_layers.append( + MLP([self.size_hidden_layers, self.size_hidden_layers]) + ) + return hidden_layers + + def _out_layer(self, hparams): + return MLP([self.size_hidden_layers, self.dec_dim]) + + +class MLP(paddle.nn.Layer): + def __init__( + self, channel_list, dropout=0.0, batch_norm=True, activation_first=False + ): + super().__init__() + assert len(channel_list) >= 2 + self.channel_list = channel_list + self.dropout = dropout + self.activation_first = activation_first + self.lins = paddle.nn.LayerList() + for dims in zip(self.channel_list[:-1], self.channel_list[1:]): + self.lins.append(Linear(*dims)) + self.norms = paddle.nn.LayerList() + for dim in zip(self.channel_list[1:-1]): + self.norms.append( + paddle.nn.BatchNorm1D(num_features=dim, use_global_stats=False) + if batch_norm + else paddle.nn.Identity() + ) + self.reset_parameters() + + def reset_parameters(self): + for lin in self.lins: + self._reset_parameters(lin) + for norm in self.norms: + if hasattr(norm, "reset_parameters"): + norm.reset_parameters() + + def calculate_fan_in_and_fan_out(self, tensor): + dimensions = tensor.ndim + if dimensions < 2: + raise ValueError( + "Fan in and fan out can not be computed for tensor with fewer than 2 dimensions" + ) + + if dimensions == 2: # Linear + fan_in = tensor.shape[1] + fan_out = tensor.shape[0] + else: + num_input_fmaps = tensor.shape[1] + num_output_fmaps = tensor.shape[0] + receptive_field_size = 1 + if tensor.ndim > 2: + receptive_field_size = tensor[0][ + 0 + ].numel() # numel returns the number of elements in the tensor + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + return fan_in, fan_out + + def _reset_parameters(self, lin) -> None: + kaiming_init = paddle.nn.initializer.KaimingUniform() + kaiming_init(lin.weight) + if lin.bias is not None: + fan_in, _ = self.calculate_fan_in_and_fan_out(lin.weight) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + uniform_init = paddle.nn.initializer.Uniform(-bound, bound) + uniform_init(lin.bias) + + def forward(self, x, edge_index=None): + x = self.lins[0](x) + for lin, norm in zip(self.lins[1:], self.norms): + if self.activation_first: + x = paddle.nn.functional.gelu(x=x) + x = norm(x) + if not self.activation_first: + x = paddle.nn.functional.gelu(x=x) + x = paddle.nn.functional.dropout( + x=x, p=self.dropout, training=self.training + ) + x = lin.forward(x) + return x + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({str(self.channel_list)[1:-1]})" + + +class GEGLU(paddle.nn.Layer): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = paddle.nn.Linear(in_features=dim_in, out_features=dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(chunks=2, axis=-1) + return x * paddle.nn.functional.gelu(x=gate) + + +class FeedForward(paddle.nn.Layer): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.0): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = ( + paddle.nn.Sequential( + paddle.nn.Linear(in_features=dim, out_features=inner_dim), + paddle.nn.GELU(), + ) + if not glu + else GEGLU(dim, inner_dim) + ) + self.net = paddle.nn.Sequential( + project_in, + paddle.nn.Dropout(p=dropout), + paddle.nn.Linear(in_features=inner_dim, out_features=dim_out), + ) + + def forward(self, x): + return self.net(x) + + +class FCLayer(paddle.nn.Layer): + def __init__(self, query_dim, context_dim=None, dropout=0.0): + super().__init__() + context_dim = default(context_dim, query_dim) + self.to_out = paddle.nn.Sequential( + paddle.nn.Linear(in_features=context_dim, out_features=query_dim), + paddle.nn.Dropout(p=dropout), + ) + + def forward(self, x, context=None): + context = default(context, x) + return self.to_out(context) + + +class GeoCA3DBlock(paddle.nn.Layer): + def __init__(self, dim, dropout=0.0, context_dim=None, gated_ff=True): + super().__init__() + self.fc1 = FCLayer(query_dim=dim, dropout=dropout) + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.fc2 = FCLayer(query_dim=dim, context_dim=context_dim, dropout=dropout) + self.norm1 = paddle.nn.LayerNorm(normalized_shape=dim) + self.norm2 = paddle.nn.LayerNorm(normalized_shape=dim) + + def forward(self, x, context=None): + x = self.fc1(self.norm1(x)) + x + x = self.fc2(x, context=context) + x + x = self.ff(self.norm2(x)) + x + return x + + +class GeoCA3D(paddle.nn.Layer): + def __init__( + self, + cfd_model, + geom_encoder=None, + geom_proj=None, + in_out_dim=64, + dropout=0.0, + context_dim=512, + gated_ff=True, + ) -> None: + super().__init__() + self.geom_encoder = geom_encoder + self.geom_proj = geom_proj + self.cfd_model = cfd_model + if self.geom_encoder is not None: + self.n_blocks = self.cfd_model.nb_hidden_layers + 2 + dims = ( + [in_out_dim] + + [self.cfd_model.size_hidden_layers] * self.cfd_model.nb_hidden_layers + + [in_out_dim] + ) + self.blocks = paddle.nn.LayerList( + sublayers=[ + GeoCA3DBlock( + dim=dim, dropout=dropout, context_dim=context_dim, gated_ff=True + ) + for dim in dims + ] + ) + + def forward(self, data): + cfd_data, geom_data = data + if self.geom_encoder is None: + x = self.cfd_model(cfd_data) + return x + x, edge_index = cfd_data.x, cfd_data.edge_index + if hasattr(self.cfd_model, "get_edge_attr"): + edge_attr = self.cfd_model.get_edge_attr(x, edge_index) + x = self.cfd_model.encoder(x) + z = self.geom_encoder(geom_data) @ self.geom_proj + z = z / z.norm(axis=-1, keepdim=True) + z = z.repeat_interleave(repeats=tuple(x.shape)[0] // tuple(z.shape)[0], axis=0) + z = rearrange(z, "(b n) d -> b n d", n=1) + if self.cfd_model.enc_dim == self.cfd_model.dec_dim: + x_in = x + x = rearrange(x, "(b n) d -> b n d", n=1) + x = self.blocks[0](x, context=z) + x = rearrange(x, "b n d -> (b n) d") + if hasattr(self.cfd_model, "get_edge_attr"): + x = self.cfd_model.in_layer(x, edge_index, edge_attr) + else: + x = self.cfd_model.in_layer(x, edge_index) + if self.cfd_model.bn_bool: + x = self.cfd_model.bn[0](x) + x = self.cfd_model.activation(x) + for i in range(1, self.n_blocks - 2): + if hasattr(self.cfd_model, "res_bool") and self.cfd_model.res_bool: + x_res = x + x = rearrange(x, "(b n) d -> b n d", n=1) + x = self.blocks[i](x, context=z) + x = rearrange(x, "b n d -> (b n) d") + if hasattr(self.cfd_model, "get_edge_attr"): + x = self.cfd_model.hidden_layers[i - 1](x, edge_index, edge_attr) + else: + x = self.cfd_model.hidden_layers[i - 1](x, edge_index) + if self.cfd_model.bn_bool: + x = self.cfd_model.bn[i](x) + x = self.cfd_model.activation(x) + if hasattr(self.cfd_model, "res_bool") and self.cfd_model.res_bool: + x = x + x_res + x = rearrange(x, "(b n) d -> b n d", n=1) + x = self.blocks[-2](x, context=z) + x = rearrange(x, "b n d -> (b n) d") + if hasattr(self.cfd_model, "get_edge_attr"): + x = self.cfd_model.out_layer(x, edge_index, edge_attr) + else: + x = self.cfd_model.out_layer(x, edge_index) + x = rearrange(x, "(b n) d -> b n d", n=1) + x = self.blocks[-1](x, context=z) + x = rearrange(x, "b n d -> (b n) d") + if self.cfd_model.enc_dim == self.cfd_model.dec_dim: + x = x + x_in + x = self.cfd_model.decoder(x) + return x diff --git a/jointContribution/IJCAI_2024/bju/params.yaml b/jointContribution/IJCAI_2024/bju/params.yaml index b26d3ddce1..6e2a017584 100644 --- a/jointContribution/IJCAI_2024/bju/params.yaml +++ b/jointContribution/IJCAI_2024/bju/params.yaml @@ -1,13 +1,13 @@ -GeomCFD: - in_out_dim: 64 - n_heads: 4 - d_head: 16 - dropout: 0.1 - context_dim: 512 - gated_ff: True - -GeoCA3D: - in_out_dim: 64 - dropout: 0.1 - context_dim: 512 - gated_ff: True +GeomCFD: + in_out_dim: 64 + n_heads: 4 + d_head: 16 + dropout: 0.1 + context_dim: 512 + gated_ff: True + +GeoCA3D: + in_out_dim: 64 + dropout: 0.1 + context_dim: 512 + gated_ff: True diff --git a/jointContribution/IJCAI_2024/bju/requirements.txt b/jointContribution/IJCAI_2024/bju/requirements.txt index eea7c9007e..6008c03a29 100644 --- a/jointContribution/IJCAI_2024/bju/requirements.txt +++ b/jointContribution/IJCAI_2024/bju/requirements.txt @@ -1,8 +1,8 @@ -easydict -einops -matplotlib -numpy -paddleclas -paddlepaddle_gpu -PyYAML -termcolor +easydict +einops +matplotlib +numpy +paddleclas +paddlepaddle_gpu +PyYAML +termcolor diff --git a/jointContribution/IJCAI_2024/bju/utils/paddle_aux.py b/jointContribution/IJCAI_2024/bju/utils/paddle_aux.py index d54b9de151..47697ec3ed 100644 --- a/jointContribution/IJCAI_2024/bju/utils/paddle_aux.py +++ b/jointContribution/IJCAI_2024/bju/utils/paddle_aux.py @@ -1,173 +1,173 @@ -# This file is generated by PaConvert ToolKit, please Don't edit it! -import paddle - - -def view(self, *args, **kwargs): - if args: - if len(args) == 1: - if isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) # To change reshape => view - elif isinstance(args[0], str): - return paddle.view(self, args[0]) - else: - return paddle.reshape(self, list(args)) # To change reshape => view - else: - return paddle.reshape(self, list(args)) # To change reshape => view - elif kwargs: - key = [k for k in kwargs.keys()] - if "dtype" in kwargs: - return paddle.view(self, shape_or_dtype=kwargs[key[0]]) - else: - return paddle.reshape( - self, shape=kwargs[key[0]] - ) # To change reshape => view - - -setattr(paddle.Tensor, "view", view) - - -def min_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.minimum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.min(self, *args, **kwargs), paddle.argmin( - self, *args, **kwargs - ) - else: - ret = paddle.min(self, *args, **kwargs) - - return ret - - -def max_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.maximum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.max(self, *args, **kwargs), paddle.argmax( - self, *args, **kwargs - ) - else: - ret = paddle.max(self, *args, **kwargs) - - return ret - - -setattr(paddle.Tensor, "min", min_class_func) -setattr(paddle.Tensor, "max", max_class_func) - - -def reshape(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) - else: - return paddle.reshape(self, list(args)) - elif kwargs: - assert "shape" in kwargs - return paddle.reshape(self, shape=kwargs["shape"]) - - -setattr(paddle.Tensor, "reshape", reshape) - - -def repeat(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.tile(self, args[0]) - else: - return paddle.tile(self, list(args)) - elif kwargs: - assert "repeats" in kwargs - return paddle.tile(self, repeat_times=kwargs["repeats"]) - - -setattr(paddle.Tensor, "repeat", repeat) - - -def min(*args, **kwargs): - if "input" in kwargs: - kwargs["x"] = kwargs.pop("input") - - out_v = None - if "out" in kwargs: - out_v = kwargs.pop("out") - - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(*args, **kwargs) - elif len(args) == 2 and isinstance(args[1], paddle.Tensor): - ret = paddle.minimum(*args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 2: - if out_v: - ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) - paddle.assign(ret[0], out_v[0]) - paddle.assign(ret[1], out_v[1]) - return out_v - else: - ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) - return ret - else: - ret = paddle.min(*args, **kwargs) - return ret - - if out_v: - paddle.assign(ret, out_v) - return out_v - else: - return ret - - -def max(*args, **kwargs): - if "input" in kwargs: - kwargs["x"] = kwargs.pop("input") - - out_v = None - if "out" in kwargs: - out_v = kwargs.pop("out") - - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(*args, **kwargs) - elif len(args) == 2 and isinstance(args[1], paddle.Tensor): - ret = paddle.maximum(*args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 2: - if out_v: - ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) - paddle.assign(ret[0], out_v[0]) - paddle.assign(ret[1], out_v[1]) - return out_v - else: - ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) - return ret - return out_v - else: - ret = paddle.max(*args, **kwargs) - return ret - - if out_v: - paddle.assign(ret, out_v) - return out_v - else: - return ret +# This file is generated by PaConvert ToolKit, please Don't edit it! +import paddle + + +def view(self, *args, **kwargs): + if args: + if len(args) == 1: + if isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) # To change reshape => view + elif isinstance(args[0], str): + return paddle.view(self, args[0]) + else: + return paddle.reshape(self, list(args)) # To change reshape => view + else: + return paddle.reshape(self, list(args)) # To change reshape => view + elif kwargs: + key = [k for k in kwargs.keys()] + if "dtype" in kwargs: + return paddle.view(self, shape_or_dtype=kwargs[key[0]]) + else: + return paddle.reshape( + self, shape=kwargs[key[0]] + ) # To change reshape => view + + +setattr(paddle.Tensor, "view", view) + + +def min_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.minimum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.min(self, *args, **kwargs), paddle.argmin( + self, *args, **kwargs + ) + else: + ret = paddle.min(self, *args, **kwargs) + + return ret + + +def max_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.maximum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.max(self, *args, **kwargs), paddle.argmax( + self, *args, **kwargs + ) + else: + ret = paddle.max(self, *args, **kwargs) + + return ret + + +setattr(paddle.Tensor, "min", min_class_func) +setattr(paddle.Tensor, "max", max_class_func) + + +def reshape(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) + else: + return paddle.reshape(self, list(args)) + elif kwargs: + assert "shape" in kwargs + return paddle.reshape(self, shape=kwargs["shape"]) + + +setattr(paddle.Tensor, "reshape", reshape) + + +def repeat(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.tile(self, args[0]) + else: + return paddle.tile(self, list(args)) + elif kwargs: + assert "repeats" in kwargs + return paddle.tile(self, repeat_times=kwargs["repeats"]) + + +setattr(paddle.Tensor, "repeat", repeat) + + +def min(*args, **kwargs): + if "input" in kwargs: + kwargs["x"] = kwargs.pop("input") + + out_v = None + if "out" in kwargs: + out_v = kwargs.pop("out") + + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(*args, **kwargs) + elif len(args) == 2 and isinstance(args[1], paddle.Tensor): + ret = paddle.minimum(*args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 2: + if out_v: + ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) + paddle.assign(ret[0], out_v[0]) + paddle.assign(ret[1], out_v[1]) + return out_v + else: + ret = paddle.min(*args, **kwargs), paddle.argmin(*args, **kwargs) + return ret + else: + ret = paddle.min(*args, **kwargs) + return ret + + if out_v: + paddle.assign(ret, out_v) + return out_v + else: + return ret + + +def max(*args, **kwargs): + if "input" in kwargs: + kwargs["x"] = kwargs.pop("input") + + out_v = None + if "out" in kwargs: + out_v = kwargs.pop("out") + + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(*args, **kwargs) + elif len(args) == 2 and isinstance(args[1], paddle.Tensor): + ret = paddle.maximum(*args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 2: + if out_v: + ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) + paddle.assign(ret[0], out_v[0]) + paddle.assign(ret[1], out_v[1]) + return out_v + else: + ret = paddle.max(*args, **kwargs), paddle.argmax(*args, **kwargs) + return ret + return out_v + else: + ret = paddle.max(*args, **kwargs) + return ret + + if out_v: + paddle.assign(ret, out_v) + return out_v + else: + return ret diff --git a/jointContribution/IJCAI_2024/bju/utils/utils.py b/jointContribution/IJCAI_2024/bju/utils/utils.py index ef2544b9f1..3aad709254 100644 --- a/jointContribution/IJCAI_2024/bju/utils/utils.py +++ b/jointContribution/IJCAI_2024/bju/utils/utils.py @@ -1,87 +1,87 @@ -from inspect import isfunction -from typing import List -from typing import Union - -import paddle - - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def radius_graph(x, r, batch=None, loop=False, max_num_neighbors=32): - num_nodes = x.shape[0] - if batch is None: - batch = paddle.zeros(shape=[num_nodes], dtype=paddle.int64) - - dist_matrix = paddle.norm(x.unsqueeze(1) - x.unsqueeze(0), axis=-1, p=2) - - adj_matrix = dist_matrix < r - - if not loop: - adj_matrix = adj_matrix * (1 - paddle.eye(num_nodes, dtype=paddle.bool)) - - mask = batch.unsqueeze(1) == batch.unsqueeze(0) - adj_matrix = adj_matrix * mask - - degree = adj_matrix.sum(axis=-1) - if max_num_neighbors < degree.max(): - idx = degree.argsort(descending=True) - idx = idx[:max_num_neighbors] - adj_matrix = adj_matrix[:, idx] - - return adj_matrix - - -def k_hop_subgraph( - edge_index: paddle.Tensor, - num_hops: int, - node_idx: Union[int, List[int], paddle.Tensor], - relabel_nodes: bool = False, -) -> paddle.Tensor: - if not isinstance(node_idx, paddle.Tensor): - node_idx = paddle.to_tensor(node_idx, dtype="int64") - - visited = paddle.zeros([edge_index.max() + 1], dtype="bool") - queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx - visited[queue] = True - sub_edge_index = [] - - current_hop = 0 - - while queue and current_hop < num_hops: - current_hop += 1 - next_queue = [] - - for node in queue: - neighbors = edge_index[1] == node - neighbors = edge_index[0][neighbors] - neighbors = neighbors[~visited[neighbors]] - - next_queue.extend(neighbors.tolist()) - visited[neighbors] = True - - for neighbor in neighbors: - if relabel_nodes: - original_idx = ( - paddle.nonzero(node_idx == node)[0].item() - if isinstance(node_idx, paddle.Tensor) - else node_idx.index(node) - ) - sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) - else: - sub_edge_index.append([node, neighbor]) - - queue = next_queue - - sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") - if relabel_nodes: - return sub_edge_index.reshape([-1, 2])[:, 1] - else: - return sub_edge_index.reshape([-1, 2]) +from inspect import isfunction +from typing import List +from typing import Union + +import paddle + + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def radius_graph(x, r, batch=None, loop=False, max_num_neighbors=32): + num_nodes = x.shape[0] + if batch is None: + batch = paddle.zeros(shape=[num_nodes], dtype=paddle.int64) + + dist_matrix = paddle.norm(x.unsqueeze(1) - x.unsqueeze(0), axis=-1, p=2) + + adj_matrix = dist_matrix < r + + if not loop: + adj_matrix = adj_matrix * (1 - paddle.eye(num_nodes, dtype=paddle.bool)) + + mask = batch.unsqueeze(1) == batch.unsqueeze(0) + adj_matrix = adj_matrix * mask + + degree = adj_matrix.sum(axis=-1) + if max_num_neighbors < degree.max(): + idx = degree.argsort(descending=True) + idx = idx[:max_num_neighbors] + adj_matrix = adj_matrix[:, idx] + + return adj_matrix + + +def k_hop_subgraph( + edge_index: paddle.Tensor, + num_hops: int, + node_idx: Union[int, List[int], paddle.Tensor], + relabel_nodes: bool = False, +) -> paddle.Tensor: + if not isinstance(node_idx, paddle.Tensor): + node_idx = paddle.to_tensor(node_idx, dtype="int64") + + visited = paddle.zeros([edge_index.max() + 1], dtype="bool") + queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx + visited[queue] = True + sub_edge_index = [] + + current_hop = 0 + + while queue and current_hop < num_hops: + current_hop += 1 + next_queue = [] + + for node in queue: + neighbors = edge_index[1] == node + neighbors = edge_index[0][neighbors] + neighbors = neighbors[~visited[neighbors]] + + next_queue.extend(neighbors.tolist()) + visited[neighbors] = True + + for neighbor in neighbors: + if relabel_nodes: + original_idx = ( + paddle.nonzero(node_idx == node)[0].item() + if isinstance(node_idx, paddle.Tensor) + else node_idx.index(node) + ) + sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) + else: + sub_edge_index.append([node, neighbor]) + + queue = next_queue + + sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") + if relabel_nodes: + return sub_edge_index.reshape([-1, 2])[:, 1] + else: + return sub_edge_index.reshape([-1, 2]) diff --git a/jointContribution/IJCAI_2024/leejt/dataset.py b/jointContribution/IJCAI_2024/leejt/dataset.py index 173023947e..9d39755fc1 100644 --- a/jointContribution/IJCAI_2024/leejt/dataset.py +++ b/jointContribution/IJCAI_2024/leejt/dataset.py @@ -1,46 +1,46 @@ -import paddle -from paddle.io import Dataset - - -class CustomDataset(Dataset): - def __init__(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - -class CustomDataLoader(paddle.io.DataLoader): - def __init__( - self, dataset, batch_size=1, shuffle=False, collate_fn=None, num_workers=0 - ): - super().__init__( - dataset, - batch_size=batch_size, - shuffle=shuffle, - collate_fn=self.collate_fn, - num_workers=num_workers, - ) - self.dataset = dataset - self.batch_size = batch_size - self.index = 0 - - def collate_fn(self, batch): - return batch - - def __iter__(self): - return self - - def __next__(self): - if self.index >= len(self.dataset): - raise StopIteration - - batch_data = [ - self.dataset[i] - for i in range( - self.index, min(self.index + self.batch_size, len(self.dataset)) - ) - ] - self.index += self.batch_size - - if len(batch_data) == 1: - return batch_data[0] - return batch_data +import paddle +from paddle.io import Dataset + + +class CustomDataset(Dataset): + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + + +class CustomDataLoader(paddle.io.DataLoader): + def __init__( + self, dataset, batch_size=1, shuffle=False, collate_fn=None, num_workers=0 + ): + super().__init__( + dataset, + batch_size=batch_size, + shuffle=shuffle, + collate_fn=self.collate_fn, + num_workers=num_workers, + ) + self.dataset = dataset + self.batch_size = batch_size + self.index = 0 + + def collate_fn(self, batch): + return batch + + def __iter__(self): + return self + + def __next__(self): + if self.index >= len(self.dataset): + raise StopIteration + + batch_data = [ + self.dataset[i] + for i in range( + self.index, min(self.index + self.batch_size, len(self.dataset)) + ) + ] + self.index += self.batch_size + + if len(batch_data) == 1: + return batch_data[0] + return batch_data diff --git a/jointContribution/IJCAI_2024/leejt/download_dataset.ipynb b/jointContribution/IJCAI_2024/leejt/download_dataset.ipynb index 6d534f08ce..68b48d507c 100644 --- a/jointContribution/IJCAI_2024/leejt/download_dataset.ipynb +++ b/jointContribution/IJCAI_2024/leejt/download_dataset.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"d25Ges-BIcKg"},"source":["# 下载比赛数据"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":40227,"status":"ok","timestamp":1720876228678,"user":{"displayName":"Jintang Li","userId":"11727574012572911053"},"user_tz":-480},"id":"8zLTCo4-DJUW","outputId":"83f5ac5f-74a7-4811-a72b-0976ab099a08"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-13 13:09:47-- https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\n","Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 108.177.98.132, 2607:f8b0:400e:c05::84\n","Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|108.177.98.132|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 1084182095 (1.0G) [application/octet-stream]\n","Saving to: ‘Dataset.zip’\n","\n","Dataset.zip 100%[===================>] 1.01G 47.6MB/s in 15s \n","\n","2024-07-13 13:10:03 (71.2 MB/s) - ‘Dataset.zip’ saved [1084182095/1084182095]\n","\n","Archive: Dataset.zip\n"," creating: Dataset/Dataset/\n"," creating: Dataset/Dataset/Testset_track_A/\n"," creating: Dataset/Dataset/Testset_track_A/Inference/\n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_658.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_659.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_660.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_662.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_663.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_664.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_665.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_666.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_667.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_668.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_672.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_673.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_674.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_675.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_676.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_677.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_678.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_679.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_681.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_683.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_684.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_686.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_687.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_688.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_689.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_690.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_691.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_692.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_693.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_695.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_696.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_697.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_700.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_701.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_702.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_703.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_704.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_705.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_708.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_709.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_710.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_711.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_712.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_713.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_715.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_717.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_718.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_719.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_721.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_722.ply \n"," creating: Dataset/Dataset/Testset_track_B/\n"," creating: Dataset/Dataset/Testset_track_B/Auxiliary/\n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/global_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_1.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_10.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_11.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_12.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_13.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_14.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_15.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_16.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_17.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_18.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_19.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_2.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_20.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_21.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_22.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_23.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_24.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_25.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_26.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_27.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_28.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_29.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_3.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_30.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_31.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_32.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_33.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_34.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_35.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_36.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_37.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_38.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_39.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_4.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_40.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_41.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_42.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_43.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_44.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_45.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_46.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_47.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_48.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_49.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_5.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_50.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_6.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_7.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_8.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_9.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt \n"," inflating: Dataset/Dataset/Testset_track_B/IJCAI_data_doc_v1.pdf \n"," creating: Dataset/Dataset/Testset_track_B/Inference/\n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_1.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_10.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_11.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_12.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_13.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_14.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_15.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_16.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_17.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_18.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_19.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_2.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_20.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_21.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_22.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_23.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_24.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_25.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_26.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_27.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_28.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_29.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_3.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_30.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_31.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_32.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_33.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_34.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_35.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_36.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_37.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_38.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_39.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_4.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_40.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_41.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_42.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_43.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_44.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_45.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_46.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_47.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_48.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_49.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_5.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_50.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_6.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_7.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_8.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_9.ply \n"," inflating: Dataset/Dataset/Testset_track_B/track_B_data_dict.xlsx \n"," creating: Dataset/Dataset/Training_data/\n"," creating: Dataset/Dataset/Training_data/Feature/\n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_001.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_002.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_004.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_005.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_006.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_007.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_008.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_010.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_012.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_013.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_017.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_018.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_021.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_022.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_023.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_025.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_026.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_027.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_028.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_029.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_030.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_031.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_032.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_034.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_035.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_039.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_040.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_043.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_044.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_045.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_046.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_047.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_048.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_049.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_050.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_051.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_052.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_054.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_055.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_056.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_058.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_059.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_060.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_061.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_062.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_063.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_064.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_065.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_067.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_069.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_070.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_071.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_072.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_073.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_074.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_075.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_076.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_077.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_078.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_079.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_080.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_081.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_083.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_084.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_085.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_086.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_087.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_088.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_090.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_091.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_092.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_094.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_095.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_096.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_097.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_100.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_101.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_102.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_105.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_106.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_107.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_109.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_110.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_111.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_112.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_113.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_114.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_115.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_116.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_117.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_118.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_119.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_120.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_121.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_123.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_124.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_125.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_126.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_127.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_128.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_129.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_130.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_131.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_133.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_134.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_136.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_137.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_138.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_139.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_140.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_141.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_142.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_143.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_144.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_145.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_146.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_147.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_148.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_149.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_150.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_151.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_152.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_153.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_155.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_156.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_157.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_158.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_159.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_160.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_161.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_162.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_163.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_165.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_166.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_170.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_172.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_173.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_175.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_176.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_177.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_178.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_179.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_180.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_181.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_182.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_183.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_184.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_186.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_190.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_191.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_192.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_193.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_195.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_196.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_198.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_199.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_200.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_201.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_202.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_203.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_205.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_207.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_210.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_211.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_212.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_213.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_214.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_215.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_217.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_219.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_220.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_221.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_222.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_223.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_224.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_225.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_227.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_228.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_229.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_230.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_231.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_232.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_233.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_234.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_235.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_236.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_237.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_241.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_243.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_244.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_245.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_246.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_247.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_248.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_249.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_251.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_252.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_253.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_255.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_257.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_258.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_259.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_260.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_261.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_262.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_263.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_264.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_266.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_267.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_268.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_269.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_271.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_272.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_273.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_274.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_275.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_276.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_277.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_278.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_279.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_280.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_281.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_282.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_283.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_285.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_286.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_289.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_290.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_291.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_292.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_293.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_294.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_295.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_296.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_297.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_298.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_299.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_300.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_301.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_302.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_304.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_305.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_306.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_308.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_309.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_310.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_311.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_312.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_313.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_314.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_315.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_319.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_320.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_321.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_322.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_323.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_324.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_325.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_327.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_328.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_329.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_331.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_332.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_333.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_334.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_335.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_337.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_338.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_339.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_340.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_341.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_344.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_345.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_347.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_348.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_349.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_350.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_352.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_353.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_354.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_355.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_356.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_357.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_358.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_360.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_362.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_364.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_365.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_366.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_367.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_369.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_371.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_372.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_373.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_374.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_375.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_376.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_378.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_379.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_380.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_381.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_384.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_385.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_389.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_392.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_393.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_397.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_398.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_399.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_401.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_402.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_403.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_404.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_405.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_407.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_408.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_410.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_412.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_413.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_414.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_415.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_417.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_418.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_419.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_420.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_422.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_424.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_425.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_427.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_430.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_431.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_433.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_435.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_436.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_437.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_439.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_440.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_443.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_444.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_446.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_447.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_448.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_449.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_450.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_451.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_452.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_453.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_454.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_455.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_456.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_457.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_459.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_460.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_462.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_463.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_464.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_465.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_466.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_467.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_468.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_469.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_470.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_472.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_473.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_474.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_475.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_476.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_478.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_479.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_480.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_482.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_483.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_486.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_487.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_488.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_490.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_493.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_494.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_495.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_496.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_497.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_498.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_499.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_501.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_502.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_503.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_504.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_505.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_507.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_508.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_509.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_511.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_512.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_513.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_514.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_515.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_516.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_518.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_519.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_521.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_522.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_523.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_524.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_525.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_527.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_529.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_530.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_532.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_533.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_536.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_538.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_539.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_540.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_542.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_543.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_545.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_547.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_548.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_549.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_550.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_551.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_552.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_553.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_554.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_555.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_560.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_561.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_562.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_564.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_565.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_566.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_567.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_568.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_569.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_572.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_573.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_574.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_576.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_577.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_579.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_581.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_582.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_583.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_584.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_587.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_588.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_589.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_591.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_593.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_594.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_595.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_596.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_597.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_598.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_600.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_602.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_604.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_608.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_610.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_611.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_612.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_613.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_615.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_616.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_617.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_618.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_620.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_621.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_622.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_623.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_625.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_626.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_627.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_628.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_629.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_630.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_631.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_632.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_633.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_634.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_635.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_636.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_638.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_639.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_640.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_641.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_642.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_643.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_644.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_645.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_646.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_647.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_648.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_649.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_651.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_652.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_654.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_655.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_656.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_657.ply \n"," creating: Dataset/Dataset/Training_data/Label/\n"," inflating: Dataset/Dataset/Training_data/Label/press_001.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_002.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_004.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_005.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_006.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_007.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_008.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_010.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_012.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_013.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_017.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_018.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_021.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_022.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_023.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_025.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_026.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_027.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_028.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_029.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_030.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_031.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_032.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_034.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_035.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_039.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_040.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_043.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_044.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_045.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_046.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_047.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_048.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_049.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_050.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_051.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_052.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_054.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_055.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_056.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_058.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_059.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_060.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_061.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_062.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_063.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_064.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_065.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_067.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_069.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_070.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_071.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_072.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_073.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_074.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_075.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_076.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_077.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_078.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_079.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_080.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_081.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_083.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_084.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_085.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_086.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_087.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_088.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_090.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_091.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_092.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_094.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_095.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_096.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_097.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_100.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_101.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_102.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_105.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_106.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_107.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_109.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_110.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_111.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_112.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_113.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_114.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_115.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_116.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_117.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_118.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_119.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_120.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_121.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_123.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_124.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_125.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_126.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_127.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_128.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_129.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_130.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_131.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_133.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_134.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_136.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_137.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_138.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_139.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_140.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_141.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_142.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_143.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_144.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_145.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_146.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_147.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_148.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_149.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_150.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_151.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_152.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_153.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_155.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_156.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_157.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_158.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_159.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_160.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_161.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_162.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_163.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_165.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_166.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_170.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_172.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_173.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_175.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_176.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_177.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_178.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_179.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_180.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_181.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_182.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_183.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_184.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_186.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_190.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_191.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_192.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_193.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_195.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_196.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_198.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_199.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_200.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_201.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_202.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_203.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_205.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_207.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_210.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_211.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_212.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_213.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_214.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_215.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_217.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_219.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_220.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_221.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_222.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_223.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_224.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_225.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_227.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_228.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_229.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_230.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_231.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_232.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_233.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_234.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_235.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_236.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_237.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_241.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_243.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_244.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_245.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_246.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_247.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_248.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_249.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_251.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_252.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_253.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_255.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_257.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_258.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_259.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_260.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_261.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_262.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_263.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_264.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_266.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_267.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_268.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_269.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_271.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_272.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_273.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_274.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_275.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_276.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_277.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_278.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_279.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_280.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_281.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_282.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_283.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_285.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_286.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_289.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_290.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_291.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_292.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_293.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_294.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_295.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_296.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_297.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_298.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_299.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_300.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_301.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_302.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_304.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_305.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_306.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_308.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_309.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_310.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_311.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_312.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_313.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_314.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_315.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_319.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_320.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_321.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_322.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_323.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_324.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_325.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_327.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_328.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_329.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_331.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_332.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_333.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_334.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_335.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_337.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_338.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_339.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_340.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_341.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_344.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_345.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_347.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_348.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_349.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_350.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_352.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_353.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_354.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_355.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_356.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_357.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_358.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_360.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_362.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_364.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_365.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_366.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_367.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_369.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_371.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_372.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_373.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_374.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_375.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_376.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_378.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_379.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_380.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_381.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_384.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_385.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_389.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_392.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_393.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_397.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_398.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_399.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_401.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_402.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_403.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_404.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_405.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_407.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_408.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_410.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_412.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_413.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_414.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_415.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_417.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_418.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_419.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_420.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_422.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_424.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_425.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_427.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_430.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_431.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_433.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_435.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_436.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_437.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_439.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_440.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_443.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_444.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_446.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_447.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_448.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_449.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_450.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_451.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_452.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_453.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_454.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_455.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_456.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_457.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_459.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_460.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_462.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_463.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_464.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_465.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_466.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_467.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_468.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_469.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_470.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_472.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_473.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_474.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_475.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_476.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_478.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_479.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_480.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_482.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_483.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_486.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_487.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_488.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_490.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_493.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_494.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_495.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_496.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_497.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_498.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_499.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_501.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_502.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_503.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_504.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_505.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_507.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_508.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_509.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_511.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_512.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_513.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_514.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_515.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_516.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_518.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_519.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_521.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_522.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_523.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_524.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_525.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_527.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_529.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_530.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_532.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_533.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_536.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_538.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_539.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_540.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_542.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_543.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_545.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_547.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_548.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_549.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_550.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_551.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_552.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_553.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_554.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_555.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_560.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_561.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_562.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_564.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_565.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_566.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_567.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_568.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_569.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_572.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_573.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_574.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_576.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_577.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_579.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_581.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_582.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_583.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_584.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_587.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_588.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_589.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_591.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_593.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_594.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_595.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_596.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_597.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_598.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_600.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_602.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_604.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_608.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_610.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_611.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_612.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_613.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_615.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_616.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_617.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_618.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_620.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_621.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_622.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_623.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_625.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_626.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_627.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_628.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_629.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_630.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_631.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_632.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_633.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_634.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_635.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_636.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_638.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_639.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_640.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_641.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_642.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_643.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_644.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_645.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_646.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_647.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_648.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_649.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_651.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_652.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_654.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_655.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_656.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_657.npy \n"," inflating: Dataset/Dataset/Training_data/train_pressure_min_std.txt \n"," inflating: Dataset/Dataset/Training_data/watertight_global_bounds.txt \n"," inflating: Dataset/Dataset/Training_data/watertight_meshes.txt \n"]}],"source":["!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\" -c -O 'Dataset.zip'\n","!mkdir -p Dataset\n","!unzip -o Dataset.zip -d Dataset/"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":344790,"status":"ok","timestamp":1720876573464,"user":{"displayName":"Jintang Li","userId":"11727574012572911053"},"user_tz":-480},"id":"VFzvkceAIWV2","outputId":"3036bbd2-9752-4891-bbb7-1ff4bb30445e"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-13 13:10:27-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 103.235.47.176, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|103.235.47.176|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 4740031429 (4.4G) [application/octet-stream]\n","Saving to: ‘train_track_B.zip’\n","\n","train_track_B.zip 100%[===================>] 4.41G 21.8MB/s in 3m 48s \n","\n","2024-07-13 13:14:16 (19.8 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n","\n","Archive: train_track_B.zip\n"," inflating: Dataset/train_track_B/area_0002.npy \n"," inflating: Dataset/train_track_B/area_0003.npy \n"," inflating: Dataset/train_track_B/area_0004.npy \n"," inflating: Dataset/train_track_B/area_0005.npy \n"," inflating: Dataset/train_track_B/area_0006.npy \n"," inflating: Dataset/train_track_B/area_0011.npy \n"," inflating: Dataset/train_track_B/area_0012.npy \n"," inflating: Dataset/train_track_B/area_0013.npy \n"," inflating: Dataset/train_track_B/area_0015.npy \n"," inflating: Dataset/train_track_B/area_0017.npy \n"," inflating: Dataset/train_track_B/area_0018.npy \n"," inflating: Dataset/train_track_B/area_0020.npy \n"," inflating: Dataset/train_track_B/area_0021.npy \n"," inflating: Dataset/train_track_B/area_0022.npy \n"," inflating: Dataset/train_track_B/area_0023.npy \n"," inflating: Dataset/train_track_B/area_0024.npy \n"," inflating: Dataset/train_track_B/area_0026.npy \n"," inflating: Dataset/train_track_B/area_0029.npy \n"," inflating: Dataset/train_track_B/area_0030.npy \n"," inflating: Dataset/train_track_B/area_0036.npy \n"," inflating: Dataset/train_track_B/area_0037.npy \n"," inflating: Dataset/train_track_B/area_0038.npy \n"," inflating: Dataset/train_track_B/area_0039.npy \n"," inflating: Dataset/train_track_B/area_0040.npy \n"," inflating: Dataset/train_track_B/area_0041.npy \n"," inflating: Dataset/train_track_B/area_0042.npy \n"," inflating: Dataset/train_track_B/area_0043.npy \n"," inflating: Dataset/train_track_B/area_0044.npy \n"," inflating: Dataset/train_track_B/area_0048.npy \n"," inflating: Dataset/train_track_B/area_0049.npy \n"," inflating: Dataset/train_track_B/area_0051.npy \n"," inflating: Dataset/train_track_B/area_0052.npy \n"," inflating: Dataset/train_track_B/area_0055.npy \n"," inflating: Dataset/train_track_B/area_0056.npy \n"," inflating: Dataset/train_track_B/area_0057.npy \n"," inflating: Dataset/train_track_B/area_0059.npy \n"," inflating: Dataset/train_track_B/area_0062.npy \n"," inflating: Dataset/train_track_B/area_0064.npy \n"," inflating: Dataset/train_track_B/area_0066.npy \n"," inflating: Dataset/train_track_B/area_0067.npy \n"," inflating: Dataset/train_track_B/area_0068.npy \n"," inflating: Dataset/train_track_B/area_0071.npy \n"," inflating: Dataset/train_track_B/area_0074.npy \n"," inflating: Dataset/train_track_B/area_0075.npy \n"," inflating: Dataset/train_track_B/area_0077.npy \n"," inflating: Dataset/train_track_B/area_0078.npy \n"," inflating: Dataset/train_track_B/area_0080.npy \n"," inflating: Dataset/train_track_B/area_0081.npy \n"," inflating: Dataset/train_track_B/area_0082.npy \n"," inflating: Dataset/train_track_B/area_0084.npy \n"," inflating: Dataset/train_track_B/area_0085.npy \n"," inflating: Dataset/train_track_B/area_0086.npy \n"," inflating: Dataset/train_track_B/area_0087.npy \n"," inflating: Dataset/train_track_B/area_0088.npy \n"," inflating: Dataset/train_track_B/area_0089.npy \n"," inflating: Dataset/train_track_B/area_0090.npy \n"," inflating: Dataset/train_track_B/area_0092.npy \n"," inflating: Dataset/train_track_B/area_0093.npy \n"," inflating: Dataset/train_track_B/area_0094.npy \n"," inflating: Dataset/train_track_B/area_0095.npy \n"," inflating: Dataset/train_track_B/area_0097.npy \n"," inflating: Dataset/train_track_B/area_0098.npy \n"," inflating: Dataset/train_track_B/area_0100.npy \n"," inflating: Dataset/train_track_B/area_0101.npy \n"," inflating: Dataset/train_track_B/area_0102.npy \n"," inflating: Dataset/train_track_B/area_0103.npy \n"," inflating: Dataset/train_track_B/area_0104.npy \n"," inflating: Dataset/train_track_B/area_0106.npy \n"," inflating: Dataset/train_track_B/area_0107.npy \n"," inflating: Dataset/train_track_B/area_0108.npy \n"," inflating: Dataset/train_track_B/area_0109.npy \n"," inflating: Dataset/train_track_B/area_0110.npy \n"," inflating: Dataset/train_track_B/area_0113.npy \n"," inflating: Dataset/train_track_B/area_0114.npy \n"," inflating: Dataset/train_track_B/area_0115.npy \n"," inflating: Dataset/train_track_B/area_0116.npy \n"," inflating: Dataset/train_track_B/area_0117.npy \n"," inflating: Dataset/train_track_B/area_0118.npy \n"," inflating: Dataset/train_track_B/area_0119.npy \n"," inflating: Dataset/train_track_B/area_0120.npy \n"," inflating: Dataset/train_track_B/area_0121.npy \n"," inflating: Dataset/train_track_B/area_0122.npy \n"," inflating: Dataset/train_track_B/area_0124.npy \n"," inflating: Dataset/train_track_B/area_0125.npy \n"," inflating: Dataset/train_track_B/area_0126.npy \n"," inflating: Dataset/train_track_B/area_0128.npy \n"," inflating: Dataset/train_track_B/area_0129.npy \n"," inflating: Dataset/train_track_B/area_0130.npy \n"," inflating: Dataset/train_track_B/area_0131.npy \n"," inflating: Dataset/train_track_B/area_0132.npy \n"," inflating: Dataset/train_track_B/area_0133.npy \n"," inflating: Dataset/train_track_B/area_0134.npy \n"," inflating: Dataset/train_track_B/area_0135.npy \n"," inflating: Dataset/train_track_B/area_0136.npy \n"," inflating: Dataset/train_track_B/area_0138.npy \n"," inflating: Dataset/train_track_B/area_0139.npy \n"," inflating: Dataset/train_track_B/area_0140.npy \n"," inflating: Dataset/train_track_B/area_0141.npy \n"," inflating: Dataset/train_track_B/area_0143.npy \n"," inflating: Dataset/train_track_B/area_0145.npy \n"," inflating: Dataset/train_track_B/area_0146.npy \n"," inflating: Dataset/train_track_B/area_0148.npy \n"," inflating: Dataset/train_track_B/area_0149.npy \n"," inflating: Dataset/train_track_B/area_0150.npy \n"," inflating: Dataset/train_track_B/area_0151.npy \n"," inflating: Dataset/train_track_B/area_0153.npy \n"," inflating: Dataset/train_track_B/area_0154.npy \n"," inflating: Dataset/train_track_B/area_0156.npy \n"," inflating: Dataset/train_track_B/area_0157.npy \n"," inflating: Dataset/train_track_B/area_0158.npy \n"," inflating: Dataset/train_track_B/area_0161.npy \n"," inflating: Dataset/train_track_B/area_0162.npy \n"," inflating: Dataset/train_track_B/area_0163.npy \n"," inflating: Dataset/train_track_B/area_0164.npy \n"," inflating: Dataset/train_track_B/area_0166.npy \n"," inflating: Dataset/train_track_B/area_0167.npy \n"," inflating: Dataset/train_track_B/area_0168.npy \n"," inflating: Dataset/train_track_B/area_0170.npy \n"," inflating: Dataset/train_track_B/area_0171.npy \n"," inflating: Dataset/train_track_B/area_0172.npy \n"," inflating: Dataset/train_track_B/area_0174.npy \n"," inflating: Dataset/train_track_B/area_0175.npy \n"," inflating: Dataset/train_track_B/area_0183.npy \n"," inflating: Dataset/train_track_B/area_0184.npy \n"," inflating: Dataset/train_track_B/area_0185.npy \n"," inflating: Dataset/train_track_B/area_0189.npy \n"," inflating: Dataset/train_track_B/area_0190.npy \n"," inflating: Dataset/train_track_B/area_0193.npy \n"," inflating: Dataset/train_track_B/area_0194.npy \n"," inflating: Dataset/train_track_B/area_0195.npy \n"," inflating: Dataset/train_track_B/area_0197.npy \n"," inflating: Dataset/train_track_B/area_0201.npy \n"," inflating: Dataset/train_track_B/area_0203.npy \n"," inflating: Dataset/train_track_B/area_0204.npy \n"," inflating: Dataset/train_track_B/area_0205.npy \n"," inflating: Dataset/train_track_B/area_0206.npy \n"," inflating: Dataset/train_track_B/area_0208.npy \n"," inflating: Dataset/train_track_B/area_0210.npy \n"," inflating: Dataset/train_track_B/area_0211.npy \n"," inflating: Dataset/train_track_B/area_0216.npy \n"," inflating: Dataset/train_track_B/area_0217.npy \n"," inflating: Dataset/train_track_B/area_0219.npy \n"," inflating: Dataset/train_track_B/area_0220.npy \n"," inflating: Dataset/train_track_B/area_0227.npy \n"," inflating: Dataset/train_track_B/area_0228.npy \n"," inflating: Dataset/train_track_B/area_0229.npy \n"," inflating: Dataset/train_track_B/area_0232.npy \n"," inflating: Dataset/train_track_B/area_0234.npy \n"," inflating: Dataset/train_track_B/area_0235.npy \n"," inflating: Dataset/train_track_B/area_0236.npy \n"," inflating: Dataset/train_track_B/area_0238.npy \n"," inflating: Dataset/train_track_B/area_0239.npy \n"," inflating: Dataset/train_track_B/area_0240.npy \n"," inflating: Dataset/train_track_B/area_0241.npy \n"," inflating: Dataset/train_track_B/area_0245.npy \n"," inflating: Dataset/train_track_B/area_0246.npy \n"," inflating: Dataset/train_track_B/area_0247.npy \n"," inflating: Dataset/train_track_B/area_0248.npy \n"," inflating: Dataset/train_track_B/area_0249.npy \n"," inflating: Dataset/train_track_B/area_0252.npy \n"," inflating: Dataset/train_track_B/area_0253.npy \n"," inflating: Dataset/train_track_B/area_0254.npy \n"," inflating: Dataset/train_track_B/area_0256.npy \n"," inflating: Dataset/train_track_B/area_0257.npy \n"," inflating: Dataset/train_track_B/area_0259.npy \n"," inflating: Dataset/train_track_B/area_0264.npy \n"," inflating: Dataset/train_track_B/area_0265.npy \n"," inflating: Dataset/train_track_B/area_0266.npy \n"," inflating: Dataset/train_track_B/area_0268.npy \n"," inflating: Dataset/train_track_B/area_0269.npy \n"," inflating: Dataset/train_track_B/area_0271.npy \n"," inflating: Dataset/train_track_B/area_0272.npy \n"," inflating: Dataset/train_track_B/area_0273.npy \n"," inflating: Dataset/train_track_B/area_0275.npy \n"," inflating: Dataset/train_track_B/area_0276.npy \n"," inflating: Dataset/train_track_B/area_0277.npy \n"," inflating: Dataset/train_track_B/area_0279.npy \n"," inflating: Dataset/train_track_B/area_0280.npy \n"," inflating: Dataset/train_track_B/area_0281.npy \n"," inflating: Dataset/train_track_B/area_0284.npy \n"," inflating: Dataset/train_track_B/area_0285.npy \n"," inflating: Dataset/train_track_B/area_0286.npy \n"," inflating: Dataset/train_track_B/area_0288.npy \n"," inflating: Dataset/train_track_B/area_0289.npy \n"," inflating: Dataset/train_track_B/area_0290.npy \n"," inflating: Dataset/train_track_B/area_0291.npy \n"," inflating: Dataset/train_track_B/area_0294.npy \n"," inflating: Dataset/train_track_B/area_0296.npy \n"," inflating: Dataset/train_track_B/area_0297.npy \n"," inflating: Dataset/train_track_B/area_0298.npy \n"," inflating: Dataset/train_track_B/area_0301.npy \n"," inflating: Dataset/train_track_B/area_0304.npy \n"," inflating: Dataset/train_track_B/area_0305.npy \n"," inflating: Dataset/train_track_B/area_0306.npy \n"," inflating: Dataset/train_track_B/area_0307.npy \n"," inflating: Dataset/train_track_B/area_0308.npy \n"," inflating: Dataset/train_track_B/area_0310.npy \n"," inflating: Dataset/train_track_B/area_0311.npy \n"," inflating: Dataset/train_track_B/area_0314.npy \n"," inflating: Dataset/train_track_B/area_0315.npy \n"," inflating: Dataset/train_track_B/area_0316.npy \n"," inflating: Dataset/train_track_B/area_0320.npy \n"," inflating: Dataset/train_track_B/area_0321.npy \n"," inflating: Dataset/train_track_B/area_0323.npy \n"," inflating: Dataset/train_track_B/area_0324.npy \n"," inflating: Dataset/train_track_B/area_0327.npy \n"," inflating: Dataset/train_track_B/area_0330.npy \n"," inflating: Dataset/train_track_B/area_0331.npy \n"," inflating: Dataset/train_track_B/area_0332.npy \n"," inflating: Dataset/train_track_B/area_0333.npy \n"," inflating: Dataset/train_track_B/area_0334.npy \n"," inflating: Dataset/train_track_B/area_0337.npy \n"," inflating: Dataset/train_track_B/area_0338.npy \n"," inflating: Dataset/train_track_B/area_0339.npy \n"," inflating: Dataset/train_track_B/area_0340.npy \n"," inflating: Dataset/train_track_B/area_0341.npy \n"," inflating: Dataset/train_track_B/area_0342.npy \n"," inflating: Dataset/train_track_B/area_0343.npy \n"," inflating: Dataset/train_track_B/area_0344.npy \n"," inflating: Dataset/train_track_B/area_0345.npy \n"," inflating: Dataset/train_track_B/area_0346.npy \n"," inflating: Dataset/train_track_B/area_0348.npy \n"," inflating: Dataset/train_track_B/area_0349.npy \n"," inflating: Dataset/train_track_B/area_0351.npy \n"," inflating: Dataset/train_track_B/area_0352.npy \n"," inflating: Dataset/train_track_B/area_0353.npy \n"," inflating: Dataset/train_track_B/area_0354.npy \n"," inflating: Dataset/train_track_B/area_0356.npy \n"," inflating: Dataset/train_track_B/area_0357.npy \n"," inflating: Dataset/train_track_B/area_0359.npy \n"," inflating: Dataset/train_track_B/area_0360.npy \n"," inflating: Dataset/train_track_B/area_0361.npy \n"," inflating: Dataset/train_track_B/area_0363.npy \n"," inflating: Dataset/train_track_B/area_0364.npy \n"," inflating: Dataset/train_track_B/area_0365.npy \n"," inflating: Dataset/train_track_B/area_0366.npy \n"," inflating: Dataset/train_track_B/area_0367.npy \n"," inflating: Dataset/train_track_B/area_0368.npy \n"," inflating: Dataset/train_track_B/area_0369.npy \n"," inflating: Dataset/train_track_B/area_0371.npy \n"," inflating: Dataset/train_track_B/area_0373.npy \n"," inflating: Dataset/train_track_B/area_0376.npy \n"," inflating: Dataset/train_track_B/area_0377.npy \n"," inflating: Dataset/train_track_B/area_0378.npy \n"," inflating: Dataset/train_track_B/area_0379.npy \n"," inflating: Dataset/train_track_B/area_0381.npy \n"," inflating: Dataset/train_track_B/area_0382.npy \n"," inflating: Dataset/train_track_B/area_0383.npy \n"," inflating: Dataset/train_track_B/area_0384.npy \n"," inflating: Dataset/train_track_B/area_0385.npy \n"," inflating: Dataset/train_track_B/area_0387.npy \n"," inflating: Dataset/train_track_B/area_0388.npy \n"," inflating: Dataset/train_track_B/area_0389.npy \n"," inflating: Dataset/train_track_B/area_0392.npy \n"," inflating: Dataset/train_track_B/area_0393.npy \n"," inflating: Dataset/train_track_B/area_0394.npy \n"," inflating: Dataset/train_track_B/area_0395.npy \n"," inflating: Dataset/train_track_B/area_0396.npy \n"," inflating: Dataset/train_track_B/area_0398.npy \n"," inflating: Dataset/train_track_B/area_0399.npy \n"," inflating: Dataset/train_track_B/area_0400.npy \n"," inflating: Dataset/train_track_B/area_0401.npy \n"," inflating: Dataset/train_track_B/area_0402.npy \n"," inflating: Dataset/train_track_B/area_0403.npy \n"," inflating: Dataset/train_track_B/area_0404.npy \n"," inflating: Dataset/train_track_B/area_0405.npy \n"," inflating: Dataset/train_track_B/area_0407.npy \n"," inflating: Dataset/train_track_B/area_0408.npy \n"," inflating: Dataset/train_track_B/area_0409.npy \n"," inflating: Dataset/train_track_B/area_0410.npy \n"," inflating: Dataset/train_track_B/area_0411.npy \n"," inflating: Dataset/train_track_B/area_0413.npy \n"," inflating: Dataset/train_track_B/area_0416.npy \n"," inflating: Dataset/train_track_B/area_0417.npy \n"," inflating: Dataset/train_track_B/area_0421.npy \n"," inflating: Dataset/train_track_B/area_0422.npy \n"," inflating: Dataset/train_track_B/area_0423.npy \n"," inflating: Dataset/train_track_B/area_0424.npy \n"," inflating: Dataset/train_track_B/area_0425.npy \n"," inflating: Dataset/train_track_B/area_0428.npy \n"," inflating: Dataset/train_track_B/area_0429.npy \n"," inflating: Dataset/train_track_B/area_0430.npy \n"," inflating: Dataset/train_track_B/area_0431.npy \n"," inflating: Dataset/train_track_B/area_0432.npy \n"," inflating: Dataset/train_track_B/area_0435.npy \n"," inflating: Dataset/train_track_B/area_0438.npy \n"," inflating: Dataset/train_track_B/area_0439.npy \n"," inflating: Dataset/train_track_B/area_0441.npy \n"," inflating: Dataset/train_track_B/area_0444.npy \n"," inflating: Dataset/train_track_B/area_0445.npy \n"," inflating: Dataset/train_track_B/area_0449.npy \n"," inflating: Dataset/train_track_B/area_0450.npy \n"," inflating: Dataset/train_track_B/area_0451.npy \n"," inflating: Dataset/train_track_B/area_0452.npy \n"," inflating: Dataset/train_track_B/area_0453.npy \n"," inflating: Dataset/train_track_B/area_0456.npy \n"," inflating: Dataset/train_track_B/area_0457.npy \n"," inflating: Dataset/train_track_B/area_0458.npy \n"," inflating: Dataset/train_track_B/area_0459.npy \n"," inflating: Dataset/train_track_B/area_0460.npy \n"," inflating: Dataset/train_track_B/area_0461.npy \n"," inflating: Dataset/train_track_B/area_0463.npy \n"," inflating: Dataset/train_track_B/area_0464.npy \n"," inflating: Dataset/train_track_B/area_0465.npy \n"," inflating: Dataset/train_track_B/area_0467.npy \n"," inflating: Dataset/train_track_B/area_0469.npy \n"," inflating: Dataset/train_track_B/area_0471.npy \n"," inflating: Dataset/train_track_B/area_0472.npy \n"," inflating: Dataset/train_track_B/area_0474.npy \n"," inflating: Dataset/train_track_B/area_0475.npy \n"," inflating: Dataset/train_track_B/area_0477.npy \n"," inflating: Dataset/train_track_B/area_0478.npy \n"," inflating: Dataset/train_track_B/area_0479.npy \n"," inflating: Dataset/train_track_B/area_0480.npy \n"," inflating: Dataset/train_track_B/area_0481.npy \n"," inflating: Dataset/train_track_B/area_0482.npy \n"," inflating: Dataset/train_track_B/area_0485.npy \n"," inflating: Dataset/train_track_B/area_0486.npy \n"," inflating: Dataset/train_track_B/area_0487.npy \n"," inflating: Dataset/train_track_B/area_0488.npy \n"," inflating: Dataset/train_track_B/area_0489.npy \n"," inflating: Dataset/train_track_B/area_0492.npy \n"," inflating: Dataset/train_track_B/area_0493.npy \n"," inflating: Dataset/train_track_B/area_0494.npy \n"," inflating: Dataset/train_track_B/area_0497.npy \n"," inflating: Dataset/train_track_B/area_0498.npy \n"," inflating: Dataset/train_track_B/area_0499.npy \n"," inflating: Dataset/train_track_B/area_0501.npy \n"," inflating: Dataset/train_track_B/area_0502.npy \n"," inflating: Dataset/train_track_B/area_0503.npy \n"," inflating: Dataset/train_track_B/area_0504.npy \n"," inflating: Dataset/train_track_B/area_0507.npy \n"," inflating: Dataset/train_track_B/area_0508.npy \n"," inflating: Dataset/train_track_B/area_0509.npy \n"," inflating: Dataset/train_track_B/area_0513.npy \n"," inflating: Dataset/train_track_B/area_0514.npy \n"," inflating: Dataset/train_track_B/area_0515.npy \n"," inflating: Dataset/train_track_B/area_0517.npy \n"," inflating: Dataset/train_track_B/area_0518.npy \n"," inflating: Dataset/train_track_B/area_0519.npy \n"," inflating: Dataset/train_track_B/area_0520.npy \n"," inflating: Dataset/train_track_B/area_0521.npy \n"," inflating: Dataset/train_track_B/area_0522.npy \n"," inflating: Dataset/train_track_B/area_0523.npy \n"," inflating: Dataset/train_track_B/area_0524.npy \n"," inflating: Dataset/train_track_B/area_0525.npy \n"," inflating: Dataset/train_track_B/area_0526.npy \n"," inflating: Dataset/train_track_B/area_0527.npy \n"," inflating: Dataset/train_track_B/area_0528.npy \n"," inflating: Dataset/train_track_B/area_0529.npy \n"," inflating: Dataset/train_track_B/area_0530.npy \n"," inflating: Dataset/train_track_B/area_0531.npy \n"," inflating: Dataset/train_track_B/area_0534.npy \n"," inflating: Dataset/train_track_B/area_0535.npy \n"," inflating: Dataset/train_track_B/area_0536.npy \n"," inflating: Dataset/train_track_B/area_0538.npy \n"," inflating: Dataset/train_track_B/area_0541.npy \n"," inflating: Dataset/train_track_B/area_0542.npy \n"," inflating: Dataset/train_track_B/area_0544.npy \n"," inflating: Dataset/train_track_B/area_0545.npy \n"," inflating: Dataset/train_track_B/area_0546.npy \n"," inflating: Dataset/train_track_B/area_0547.npy \n"," inflating: Dataset/train_track_B/area_0550.npy \n"," inflating: Dataset/train_track_B/area_0551.npy \n"," inflating: Dataset/train_track_B/area_0553.npy \n"," inflating: Dataset/train_track_B/area_0555.npy \n"," inflating: Dataset/train_track_B/area_0557.npy \n"," inflating: Dataset/train_track_B/area_0558.npy \n"," inflating: Dataset/train_track_B/area_0561.npy \n"," inflating: Dataset/train_track_B/area_0563.npy \n"," inflating: Dataset/train_track_B/area_0564.npy \n"," inflating: Dataset/train_track_B/area_0565.npy \n"," inflating: Dataset/train_track_B/area_0567.npy \n"," inflating: Dataset/train_track_B/area_0568.npy \n"," inflating: Dataset/train_track_B/area_0571.npy \n"," inflating: Dataset/train_track_B/area_0574.npy \n"," inflating: Dataset/train_track_B/area_0576.npy \n"," inflating: Dataset/train_track_B/area_0579.npy \n"," inflating: Dataset/train_track_B/area_0580.npy \n"," inflating: Dataset/train_track_B/area_0582.npy \n"," inflating: Dataset/train_track_B/area_0584.npy \n"," inflating: Dataset/train_track_B/area_0585.npy \n"," inflating: Dataset/train_track_B/area_0588.npy \n"," inflating: Dataset/train_track_B/area_0589.npy \n"," inflating: Dataset/train_track_B/area_0590.npy \n"," inflating: Dataset/train_track_B/area_0591.npy \n"," inflating: Dataset/train_track_B/area_0592.npy \n"," inflating: Dataset/train_track_B/area_0593.npy \n"," inflating: Dataset/train_track_B/area_0594.npy \n"," inflating: Dataset/train_track_B/area_0595.npy \n"," inflating: Dataset/train_track_B/area_0596.npy \n"," inflating: Dataset/train_track_B/area_0597.npy \n"," inflating: Dataset/train_track_B/area_0598.npy \n"," inflating: Dataset/train_track_B/area_0600.npy \n"," inflating: Dataset/train_track_B/area_0602.npy \n"," inflating: Dataset/train_track_B/area_0605.npy \n"," inflating: Dataset/train_track_B/area_0608.npy \n"," inflating: Dataset/train_track_B/area_0609.npy \n"," inflating: Dataset/train_track_B/area_0611.npy \n"," inflating: Dataset/train_track_B/area_0612.npy \n"," inflating: Dataset/train_track_B/area_0613.npy \n"," inflating: Dataset/train_track_B/area_0614.npy \n"," inflating: Dataset/train_track_B/area_0618.npy \n"," inflating: Dataset/train_track_B/area_0619.npy \n"," inflating: Dataset/train_track_B/area_0620.npy \n"," inflating: Dataset/train_track_B/area_0621.npy \n"," inflating: Dataset/train_track_B/area_0622.npy \n"," inflating: Dataset/train_track_B/area_0623.npy \n"," inflating: Dataset/train_track_B/area_0624.npy \n"," inflating: Dataset/train_track_B/area_0625.npy \n"," inflating: Dataset/train_track_B/area_0627.npy \n"," inflating: Dataset/train_track_B/area_0628.npy \n"," inflating: Dataset/train_track_B/area_0629.npy \n"," inflating: Dataset/train_track_B/area_0630.npy \n"," inflating: Dataset/train_track_B/area_0631.npy \n"," inflating: Dataset/train_track_B/area_0632.npy \n"," inflating: Dataset/train_track_B/area_0633.npy \n"," inflating: Dataset/train_track_B/area_0634.npy \n"," inflating: Dataset/train_track_B/area_0635.npy \n"," inflating: Dataset/train_track_B/area_0637.npy \n"," inflating: Dataset/train_track_B/area_0638.npy \n"," inflating: Dataset/train_track_B/area_0639.npy \n"," inflating: Dataset/train_track_B/area_0640.npy \n"," inflating: Dataset/train_track_B/area_0641.npy \n"," inflating: Dataset/train_track_B/area_0643.npy \n"," inflating: Dataset/train_track_B/area_0644.npy \n"," inflating: Dataset/train_track_B/area_0645.npy \n"," inflating: Dataset/train_track_B/area_0646.npy \n"," inflating: Dataset/train_track_B/area_0648.npy \n"," inflating: Dataset/train_track_B/area_0650.npy \n"," inflating: Dataset/train_track_B/area_0651.npy \n"," inflating: Dataset/train_track_B/area_0652.npy \n"," inflating: Dataset/train_track_B/area_0653.npy \n"," inflating: Dataset/train_track_B/area_0654.npy \n"," inflating: Dataset/train_track_B/area_0656.npy \n"," inflating: Dataset/train_track_B/area_0657.npy \n"," inflating: Dataset/train_track_B/area_0658.npy \n"," inflating: Dataset/train_track_B/area_0661.npy \n"," inflating: Dataset/train_track_B/area_0663.npy \n"," inflating: Dataset/train_track_B/area_0664.npy \n"," inflating: Dataset/train_track_B/area_0665.npy \n"," inflating: Dataset/train_track_B/area_0666.npy \n"," inflating: Dataset/train_track_B/area_0667.npy \n"," inflating: Dataset/train_track_B/area_0668.npy \n"," inflating: Dataset/train_track_B/area_0669.npy \n"," inflating: Dataset/train_track_B/area_0671.npy \n"," inflating: Dataset/train_track_B/area_0672.npy \n"," inflating: Dataset/train_track_B/area_0673.npy \n"," inflating: Dataset/train_track_B/area_0674.npy \n"," inflating: Dataset/train_track_B/area_0676.npy \n"," inflating: Dataset/train_track_B/area_0677.npy \n"," inflating: Dataset/train_track_B/area_0678.npy \n"," inflating: Dataset/train_track_B/area_0679.npy \n"," inflating: Dataset/train_track_B/area_0680.npy \n"," inflating: Dataset/train_track_B/area_0682.npy \n"," inflating: Dataset/train_track_B/area_0686.npy \n"," inflating: Dataset/train_track_B/area_0688.npy \n"," inflating: Dataset/train_track_B/area_0689.npy \n"," inflating: Dataset/train_track_B/area_0690.npy \n"," inflating: Dataset/train_track_B/area_0691.npy \n"," inflating: Dataset/train_track_B/area_0692.npy \n"," inflating: Dataset/train_track_B/area_0693.npy \n"," inflating: Dataset/train_track_B/area_0694.npy \n"," inflating: Dataset/train_track_B/area_0695.npy \n"," inflating: Dataset/train_track_B/area_0697.npy \n"," inflating: Dataset/train_track_B/area_0699.npy \n"," inflating: Dataset/train_track_B/area_0700.npy \n"," inflating: Dataset/train_track_B/area_0701.npy \n"," inflating: Dataset/train_track_B/area_0703.npy \n"," inflating: Dataset/train_track_B/area_0704.npy \n"," inflating: Dataset/train_track_B/area_0706.npy \n"," inflating: Dataset/train_track_B/area_0707.npy \n"," inflating: Dataset/train_track_B/area_0708.npy \n"," inflating: Dataset/train_track_B/area_0709.npy \n"," inflating: Dataset/train_track_B/area_0711.npy \n"," inflating: Dataset/train_track_B/area_0712.npy \n"," inflating: Dataset/train_track_B/area_0713.npy \n"," inflating: Dataset/train_track_B/area_0714.npy \n"," inflating: Dataset/train_track_B/area_0715.npy \n"," inflating: Dataset/train_track_B/area_0716.npy \n"," inflating: Dataset/train_track_B/area_0718.npy \n"," inflating: Dataset/train_track_B/area_0719.npy \n"," inflating: Dataset/train_track_B/area_0720.npy \n"," inflating: Dataset/train_track_B/area_0721.npy \n"," inflating: Dataset/train_track_B/area_0722.npy \n"," inflating: Dataset/train_track_B/area_0724.npy \n"," inflating: Dataset/train_track_B/area_0727.npy \n"," inflating: Dataset/train_track_B/area_0728.npy \n"," inflating: Dataset/train_track_B/area_0729.npy \n"," inflating: Dataset/train_track_B/area_0730.npy \n"," inflating: Dataset/train_track_B/area_0731.npy \n"," inflating: Dataset/train_track_B/area_0733.npy \n"," inflating: Dataset/train_track_B/area_0735.npy \n"," inflating: Dataset/train_track_B/area_0736.npy \n"," inflating: Dataset/train_track_B/area_0737.npy \n"," inflating: Dataset/train_track_B/area_0740.npy \n"," inflating: Dataset/train_track_B/area_0742.npy \n"," inflating: Dataset/train_track_B/area_0743.npy \n"," inflating: Dataset/train_track_B/area_0744.npy \n"," inflating: Dataset/train_track_B/area_0745.npy \n"," inflating: Dataset/train_track_B/centroid_0002.npy \n"," inflating: Dataset/train_track_B/centroid_0003.npy \n"," inflating: Dataset/train_track_B/centroid_0004.npy \n"," inflating: Dataset/train_track_B/centroid_0005.npy \n"," inflating: Dataset/train_track_B/centroid_0006.npy \n"," inflating: Dataset/train_track_B/centroid_0011.npy \n"," inflating: Dataset/train_track_B/centroid_0012.npy \n"," inflating: Dataset/train_track_B/centroid_0013.npy \n"," inflating: Dataset/train_track_B/centroid_0015.npy \n"," inflating: Dataset/train_track_B/centroid_0017.npy \n"," inflating: Dataset/train_track_B/centroid_0018.npy \n"," inflating: Dataset/train_track_B/centroid_0020.npy \n"," inflating: Dataset/train_track_B/centroid_0021.npy \n"," inflating: Dataset/train_track_B/centroid_0022.npy \n"," inflating: Dataset/train_track_B/centroid_0023.npy \n"," inflating: Dataset/train_track_B/centroid_0024.npy \n"," inflating: Dataset/train_track_B/centroid_0026.npy \n"," inflating: Dataset/train_track_B/centroid_0029.npy \n"," inflating: Dataset/train_track_B/centroid_0030.npy \n"," inflating: Dataset/train_track_B/centroid_0036.npy \n"," inflating: Dataset/train_track_B/centroid_0037.npy \n"," inflating: Dataset/train_track_B/centroid_0038.npy \n"," inflating: Dataset/train_track_B/centroid_0039.npy \n"," inflating: Dataset/train_track_B/centroid_0040.npy \n"," inflating: Dataset/train_track_B/centroid_0041.npy \n"," inflating: Dataset/train_track_B/centroid_0042.npy \n"," inflating: Dataset/train_track_B/centroid_0043.npy \n"," inflating: Dataset/train_track_B/centroid_0044.npy \n"," inflating: Dataset/train_track_B/centroid_0048.npy \n"," inflating: Dataset/train_track_B/centroid_0049.npy \n"," inflating: Dataset/train_track_B/centroid_0051.npy \n"," inflating: Dataset/train_track_B/centroid_0052.npy \n"," inflating: Dataset/train_track_B/centroid_0055.npy \n"," inflating: Dataset/train_track_B/centroid_0056.npy \n"," inflating: Dataset/train_track_B/centroid_0057.npy \n"," inflating: Dataset/train_track_B/centroid_0059.npy \n"," inflating: Dataset/train_track_B/centroid_0062.npy \n"," inflating: Dataset/train_track_B/centroid_0064.npy \n"," inflating: Dataset/train_track_B/centroid_0066.npy \n"," inflating: Dataset/train_track_B/centroid_0067.npy \n"," inflating: Dataset/train_track_B/centroid_0068.npy \n"," inflating: Dataset/train_track_B/centroid_0071.npy \n"," inflating: Dataset/train_track_B/centroid_0074.npy \n"," inflating: Dataset/train_track_B/centroid_0075.npy \n"," inflating: Dataset/train_track_B/centroid_0077.npy \n"," inflating: Dataset/train_track_B/centroid_0078.npy \n"," inflating: Dataset/train_track_B/centroid_0080.npy \n"," inflating: Dataset/train_track_B/centroid_0081.npy \n"," inflating: Dataset/train_track_B/centroid_0082.npy \n"," inflating: Dataset/train_track_B/centroid_0084.npy \n"," inflating: Dataset/train_track_B/centroid_0085.npy \n"," inflating: Dataset/train_track_B/centroid_0086.npy \n"," inflating: Dataset/train_track_B/centroid_0087.npy \n"," inflating: Dataset/train_track_B/centroid_0088.npy \n"," inflating: Dataset/train_track_B/centroid_0089.npy \n"," inflating: Dataset/train_track_B/centroid_0090.npy \n"," inflating: Dataset/train_track_B/centroid_0092.npy \n"," inflating: Dataset/train_track_B/centroid_0093.npy \n"," inflating: Dataset/train_track_B/centroid_0094.npy \n"," inflating: Dataset/train_track_B/centroid_0095.npy \n"," inflating: Dataset/train_track_B/centroid_0097.npy \n"," inflating: Dataset/train_track_B/centroid_0098.npy \n"," inflating: Dataset/train_track_B/centroid_0100.npy \n"," inflating: Dataset/train_track_B/centroid_0101.npy \n"," inflating: Dataset/train_track_B/centroid_0102.npy \n"," inflating: Dataset/train_track_B/centroid_0103.npy \n"," inflating: Dataset/train_track_B/centroid_0104.npy \n"," inflating: Dataset/train_track_B/centroid_0106.npy \n"," inflating: Dataset/train_track_B/centroid_0107.npy \n"," inflating: Dataset/train_track_B/centroid_0108.npy \n"," inflating: Dataset/train_track_B/centroid_0109.npy \n"," inflating: Dataset/train_track_B/centroid_0110.npy \n"," inflating: Dataset/train_track_B/centroid_0113.npy \n"," inflating: Dataset/train_track_B/centroid_0114.npy \n"," inflating: Dataset/train_track_B/centroid_0115.npy \n"," inflating: Dataset/train_track_B/centroid_0116.npy \n"," inflating: Dataset/train_track_B/centroid_0117.npy \n"," inflating: Dataset/train_track_B/centroid_0118.npy \n"," inflating: Dataset/train_track_B/centroid_0119.npy \n"," inflating: Dataset/train_track_B/centroid_0120.npy \n"," inflating: Dataset/train_track_B/centroid_0121.npy \n"," inflating: Dataset/train_track_B/centroid_0122.npy \n"," inflating: Dataset/train_track_B/centroid_0124.npy \n"," inflating: Dataset/train_track_B/centroid_0125.npy \n"," inflating: Dataset/train_track_B/centroid_0126.npy \n"," inflating: Dataset/train_track_B/centroid_0128.npy \n"," inflating: Dataset/train_track_B/centroid_0129.npy \n"," inflating: Dataset/train_track_B/centroid_0130.npy \n"," inflating: Dataset/train_track_B/centroid_0131.npy \n"," inflating: Dataset/train_track_B/centroid_0132.npy \n"," inflating: Dataset/train_track_B/centroid_0133.npy \n"," inflating: Dataset/train_track_B/centroid_0134.npy \n"," inflating: Dataset/train_track_B/centroid_0135.npy \n"," inflating: Dataset/train_track_B/centroid_0136.npy \n"," inflating: Dataset/train_track_B/centroid_0138.npy \n"," inflating: Dataset/train_track_B/centroid_0139.npy \n"," inflating: Dataset/train_track_B/centroid_0140.npy \n"," inflating: Dataset/train_track_B/centroid_0141.npy \n"," inflating: Dataset/train_track_B/centroid_0143.npy \n"," inflating: Dataset/train_track_B/centroid_0145.npy \n"," inflating: Dataset/train_track_B/centroid_0146.npy \n"," inflating: Dataset/train_track_B/centroid_0148.npy \n"," inflating: Dataset/train_track_B/centroid_0149.npy \n"," inflating: Dataset/train_track_B/centroid_0150.npy \n"," inflating: Dataset/train_track_B/centroid_0151.npy \n"," inflating: Dataset/train_track_B/centroid_0153.npy \n"," inflating: Dataset/train_track_B/centroid_0154.npy \n"," inflating: Dataset/train_track_B/centroid_0156.npy \n"," inflating: Dataset/train_track_B/centroid_0157.npy \n"," inflating: Dataset/train_track_B/centroid_0158.npy \n"," inflating: Dataset/train_track_B/centroid_0161.npy \n"," inflating: Dataset/train_track_B/centroid_0162.npy \n"," inflating: Dataset/train_track_B/centroid_0163.npy \n"," inflating: Dataset/train_track_B/centroid_0164.npy \n"," inflating: Dataset/train_track_B/centroid_0166.npy \n"," inflating: Dataset/train_track_B/centroid_0167.npy \n"," inflating: Dataset/train_track_B/centroid_0168.npy \n"," inflating: Dataset/train_track_B/centroid_0170.npy \n"," inflating: Dataset/train_track_B/centroid_0171.npy \n"," inflating: Dataset/train_track_B/centroid_0172.npy \n"," inflating: Dataset/train_track_B/centroid_0174.npy \n"," inflating: Dataset/train_track_B/centroid_0175.npy \n"," inflating: Dataset/train_track_B/centroid_0183.npy \n"," inflating: Dataset/train_track_B/centroid_0184.npy \n"," inflating: Dataset/train_track_B/centroid_0185.npy \n"," inflating: Dataset/train_track_B/centroid_0189.npy \n"," inflating: Dataset/train_track_B/centroid_0190.npy \n"," inflating: Dataset/train_track_B/centroid_0193.npy \n"," inflating: Dataset/train_track_B/centroid_0194.npy \n"," inflating: Dataset/train_track_B/centroid_0195.npy \n"," inflating: Dataset/train_track_B/centroid_0197.npy \n"," inflating: Dataset/train_track_B/centroid_0201.npy \n"," inflating: Dataset/train_track_B/centroid_0203.npy \n"," inflating: Dataset/train_track_B/centroid_0204.npy \n"," inflating: Dataset/train_track_B/centroid_0205.npy \n"," inflating: Dataset/train_track_B/centroid_0206.npy \n"," inflating: Dataset/train_track_B/centroid_0208.npy \n"," inflating: Dataset/train_track_B/centroid_0210.npy \n"," inflating: Dataset/train_track_B/centroid_0211.npy \n"," inflating: Dataset/train_track_B/centroid_0216.npy \n"," inflating: Dataset/train_track_B/centroid_0217.npy \n"," inflating: Dataset/train_track_B/centroid_0219.npy \n"," inflating: Dataset/train_track_B/centroid_0220.npy \n"," inflating: Dataset/train_track_B/centroid_0227.npy \n"," inflating: Dataset/train_track_B/centroid_0228.npy \n"," inflating: Dataset/train_track_B/centroid_0229.npy \n"," inflating: Dataset/train_track_B/centroid_0232.npy \n"," inflating: Dataset/train_track_B/centroid_0234.npy \n"," inflating: Dataset/train_track_B/centroid_0235.npy \n"," inflating: Dataset/train_track_B/centroid_0236.npy \n"," inflating: Dataset/train_track_B/centroid_0238.npy \n"," inflating: Dataset/train_track_B/centroid_0239.npy \n"," inflating: Dataset/train_track_B/centroid_0240.npy \n"," inflating: Dataset/train_track_B/centroid_0241.npy \n"," inflating: Dataset/train_track_B/centroid_0245.npy \n"," inflating: Dataset/train_track_B/centroid_0246.npy \n"," inflating: Dataset/train_track_B/centroid_0247.npy \n"," inflating: Dataset/train_track_B/centroid_0248.npy \n"," inflating: Dataset/train_track_B/centroid_0249.npy \n"," inflating: Dataset/train_track_B/centroid_0252.npy \n"," inflating: Dataset/train_track_B/centroid_0253.npy \n"," inflating: Dataset/train_track_B/centroid_0254.npy \n"," inflating: Dataset/train_track_B/centroid_0256.npy \n"," inflating: Dataset/train_track_B/centroid_0257.npy \n"," inflating: Dataset/train_track_B/centroid_0259.npy \n"," inflating: Dataset/train_track_B/centroid_0264.npy \n"," inflating: Dataset/train_track_B/centroid_0265.npy \n"," inflating: Dataset/train_track_B/centroid_0266.npy \n"," inflating: Dataset/train_track_B/centroid_0268.npy \n"," inflating: Dataset/train_track_B/centroid_0269.npy \n"," inflating: Dataset/train_track_B/centroid_0271.npy \n"," inflating: Dataset/train_track_B/centroid_0272.npy \n"," inflating: Dataset/train_track_B/centroid_0273.npy \n"," inflating: Dataset/train_track_B/centroid_0275.npy \n"," inflating: Dataset/train_track_B/centroid_0276.npy \n"," inflating: Dataset/train_track_B/centroid_0277.npy \n"," inflating: Dataset/train_track_B/centroid_0279.npy \n"," inflating: Dataset/train_track_B/centroid_0280.npy \n"," inflating: Dataset/train_track_B/centroid_0281.npy \n"," inflating: Dataset/train_track_B/centroid_0284.npy \n"," inflating: Dataset/train_track_B/centroid_0285.npy \n"," inflating: Dataset/train_track_B/centroid_0286.npy \n"," inflating: Dataset/train_track_B/centroid_0288.npy \n"," inflating: Dataset/train_track_B/centroid_0289.npy \n"," inflating: Dataset/train_track_B/centroid_0290.npy \n"," inflating: Dataset/train_track_B/centroid_0291.npy \n"," inflating: Dataset/train_track_B/centroid_0294.npy \n"," inflating: Dataset/train_track_B/centroid_0296.npy \n"," inflating: Dataset/train_track_B/centroid_0297.npy \n"," inflating: Dataset/train_track_B/centroid_0298.npy \n"," inflating: Dataset/train_track_B/centroid_0301.npy \n"," inflating: Dataset/train_track_B/centroid_0304.npy \n"," inflating: Dataset/train_track_B/centroid_0305.npy \n"," inflating: Dataset/train_track_B/centroid_0306.npy \n"," inflating: Dataset/train_track_B/centroid_0307.npy \n"," inflating: Dataset/train_track_B/centroid_0308.npy \n"," inflating: Dataset/train_track_B/centroid_0310.npy \n"," inflating: Dataset/train_track_B/centroid_0311.npy \n"," inflating: Dataset/train_track_B/centroid_0314.npy \n"," inflating: Dataset/train_track_B/centroid_0315.npy \n"," inflating: Dataset/train_track_B/centroid_0316.npy \n"," inflating: Dataset/train_track_B/centroid_0320.npy \n"," inflating: Dataset/train_track_B/centroid_0321.npy \n"," inflating: Dataset/train_track_B/centroid_0323.npy \n"," inflating: Dataset/train_track_B/centroid_0324.npy \n"," inflating: Dataset/train_track_B/centroid_0327.npy \n"," inflating: Dataset/train_track_B/centroid_0330.npy \n"," inflating: Dataset/train_track_B/centroid_0331.npy \n"," inflating: Dataset/train_track_B/centroid_0332.npy \n"," inflating: Dataset/train_track_B/centroid_0333.npy \n"," inflating: Dataset/train_track_B/centroid_0334.npy \n"," inflating: Dataset/train_track_B/centroid_0337.npy \n"," inflating: Dataset/train_track_B/centroid_0338.npy \n"," inflating: Dataset/train_track_B/centroid_0339.npy \n"," inflating: Dataset/train_track_B/centroid_0340.npy \n"," inflating: Dataset/train_track_B/centroid_0341.npy \n"," inflating: Dataset/train_track_B/centroid_0342.npy \n"," inflating: Dataset/train_track_B/centroid_0343.npy \n"," inflating: Dataset/train_track_B/centroid_0344.npy \n"," inflating: Dataset/train_track_B/centroid_0345.npy \n"," inflating: Dataset/train_track_B/centroid_0346.npy \n"," inflating: Dataset/train_track_B/centroid_0348.npy \n"," inflating: Dataset/train_track_B/centroid_0349.npy \n"," inflating: Dataset/train_track_B/centroid_0351.npy \n"," inflating: Dataset/train_track_B/centroid_0352.npy \n"," inflating: Dataset/train_track_B/centroid_0353.npy \n"," inflating: Dataset/train_track_B/centroid_0354.npy \n"," inflating: Dataset/train_track_B/centroid_0356.npy \n"," inflating: Dataset/train_track_B/centroid_0357.npy \n"," inflating: Dataset/train_track_B/centroid_0359.npy \n"," inflating: Dataset/train_track_B/centroid_0360.npy \n"," inflating: Dataset/train_track_B/centroid_0361.npy \n"," inflating: Dataset/train_track_B/centroid_0363.npy \n"," inflating: Dataset/train_track_B/centroid_0364.npy \n"," inflating: Dataset/train_track_B/centroid_0365.npy \n"," inflating: Dataset/train_track_B/centroid_0366.npy \n"," inflating: Dataset/train_track_B/centroid_0367.npy \n"," inflating: Dataset/train_track_B/centroid_0368.npy \n"," inflating: Dataset/train_track_B/centroid_0369.npy \n"," inflating: Dataset/train_track_B/centroid_0371.npy \n"," inflating: Dataset/train_track_B/centroid_0373.npy \n"," inflating: Dataset/train_track_B/centroid_0376.npy \n"," inflating: Dataset/train_track_B/centroid_0377.npy \n"," inflating: Dataset/train_track_B/centroid_0378.npy \n"," inflating: Dataset/train_track_B/centroid_0379.npy \n"," inflating: Dataset/train_track_B/centroid_0381.npy \n"," inflating: Dataset/train_track_B/centroid_0382.npy \n"," inflating: Dataset/train_track_B/centroid_0383.npy \n"," inflating: Dataset/train_track_B/centroid_0384.npy \n"," inflating: Dataset/train_track_B/centroid_0385.npy \n"," inflating: Dataset/train_track_B/centroid_0387.npy \n"," inflating: Dataset/train_track_B/centroid_0388.npy \n"," inflating: Dataset/train_track_B/centroid_0389.npy \n"," inflating: Dataset/train_track_B/centroid_0392.npy \n"," inflating: Dataset/train_track_B/centroid_0393.npy \n"," inflating: Dataset/train_track_B/centroid_0394.npy \n"," inflating: Dataset/train_track_B/centroid_0395.npy \n"," inflating: Dataset/train_track_B/centroid_0396.npy \n"," inflating: Dataset/train_track_B/centroid_0398.npy \n"," inflating: Dataset/train_track_B/centroid_0399.npy \n"," inflating: Dataset/train_track_B/centroid_0400.npy \n"," inflating: Dataset/train_track_B/centroid_0401.npy \n"," inflating: Dataset/train_track_B/centroid_0402.npy \n"," inflating: Dataset/train_track_B/centroid_0403.npy \n"," inflating: Dataset/train_track_B/centroid_0404.npy \n"," inflating: Dataset/train_track_B/centroid_0405.npy \n"," inflating: Dataset/train_track_B/centroid_0407.npy \n"," inflating: Dataset/train_track_B/centroid_0408.npy \n"," inflating: Dataset/train_track_B/centroid_0409.npy \n"," inflating: Dataset/train_track_B/centroid_0410.npy \n"," inflating: Dataset/train_track_B/centroid_0411.npy \n"," inflating: Dataset/train_track_B/centroid_0413.npy \n"," inflating: Dataset/train_track_B/centroid_0416.npy \n"," inflating: Dataset/train_track_B/centroid_0417.npy \n"," inflating: Dataset/train_track_B/centroid_0421.npy \n"," inflating: Dataset/train_track_B/centroid_0422.npy \n"," inflating: Dataset/train_track_B/centroid_0423.npy \n"," inflating: Dataset/train_track_B/centroid_0424.npy \n"," inflating: Dataset/train_track_B/centroid_0425.npy \n"," inflating: Dataset/train_track_B/centroid_0428.npy \n"," inflating: Dataset/train_track_B/centroid_0429.npy \n"," inflating: Dataset/train_track_B/centroid_0430.npy \n"," inflating: Dataset/train_track_B/centroid_0431.npy \n"," inflating: Dataset/train_track_B/centroid_0432.npy \n"," inflating: Dataset/train_track_B/centroid_0435.npy \n"," inflating: Dataset/train_track_B/centroid_0438.npy \n"," inflating: Dataset/train_track_B/centroid_0439.npy \n"," inflating: Dataset/train_track_B/centroid_0441.npy \n"," inflating: Dataset/train_track_B/centroid_0444.npy \n"," inflating: Dataset/train_track_B/centroid_0445.npy \n"," inflating: Dataset/train_track_B/centroid_0449.npy \n"," inflating: Dataset/train_track_B/centroid_0450.npy \n"," inflating: Dataset/train_track_B/centroid_0451.npy \n"," inflating: Dataset/train_track_B/centroid_0452.npy \n"," inflating: Dataset/train_track_B/centroid_0453.npy \n"," inflating: Dataset/train_track_B/centroid_0456.npy \n"," inflating: Dataset/train_track_B/centroid_0457.npy \n"," inflating: Dataset/train_track_B/centroid_0458.npy \n"," inflating: Dataset/train_track_B/centroid_0459.npy \n"," inflating: Dataset/train_track_B/centroid_0460.npy \n"," inflating: Dataset/train_track_B/centroid_0461.npy \n"," inflating: Dataset/train_track_B/centroid_0463.npy \n"," inflating: Dataset/train_track_B/centroid_0464.npy \n"," inflating: Dataset/train_track_B/centroid_0465.npy \n"," inflating: Dataset/train_track_B/centroid_0467.npy \n"," inflating: Dataset/train_track_B/centroid_0469.npy \n"," inflating: Dataset/train_track_B/centroid_0471.npy \n"," inflating: Dataset/train_track_B/centroid_0472.npy \n"," inflating: Dataset/train_track_B/centroid_0474.npy \n"," inflating: Dataset/train_track_B/centroid_0475.npy \n"," inflating: Dataset/train_track_B/centroid_0477.npy \n"," inflating: Dataset/train_track_B/centroid_0478.npy \n"," inflating: Dataset/train_track_B/centroid_0479.npy \n"," inflating: Dataset/train_track_B/centroid_0480.npy \n"," inflating: Dataset/train_track_B/centroid_0481.npy \n"," inflating: Dataset/train_track_B/centroid_0482.npy \n"," inflating: Dataset/train_track_B/centroid_0485.npy \n"," inflating: Dataset/train_track_B/centroid_0486.npy \n"," inflating: Dataset/train_track_B/centroid_0487.npy \n"," inflating: Dataset/train_track_B/centroid_0488.npy \n"," inflating: Dataset/train_track_B/centroid_0489.npy \n"," inflating: Dataset/train_track_B/centroid_0492.npy \n"," inflating: Dataset/train_track_B/centroid_0493.npy \n"," inflating: Dataset/train_track_B/centroid_0494.npy \n"," inflating: Dataset/train_track_B/centroid_0497.npy \n"," inflating: Dataset/train_track_B/centroid_0498.npy \n"," inflating: Dataset/train_track_B/centroid_0499.npy \n"," inflating: Dataset/train_track_B/centroid_0501.npy \n"," inflating: Dataset/train_track_B/centroid_0502.npy \n"," inflating: Dataset/train_track_B/centroid_0503.npy \n"," inflating: Dataset/train_track_B/centroid_0504.npy \n"," inflating: Dataset/train_track_B/centroid_0507.npy \n"," inflating: Dataset/train_track_B/centroid_0508.npy \n"," inflating: Dataset/train_track_B/centroid_0509.npy \n"," inflating: Dataset/train_track_B/centroid_0513.npy \n"," inflating: Dataset/train_track_B/centroid_0514.npy \n"," inflating: Dataset/train_track_B/centroid_0515.npy \n"," inflating: Dataset/train_track_B/centroid_0517.npy \n"," inflating: Dataset/train_track_B/centroid_0518.npy \n"," inflating: Dataset/train_track_B/centroid_0519.npy \n"," inflating: Dataset/train_track_B/centroid_0520.npy \n"," inflating: Dataset/train_track_B/centroid_0521.npy \n"," inflating: Dataset/train_track_B/centroid_0522.npy \n"," inflating: Dataset/train_track_B/centroid_0523.npy \n"," inflating: Dataset/train_track_B/centroid_0524.npy \n"," inflating: Dataset/train_track_B/centroid_0525.npy \n"," inflating: Dataset/train_track_B/centroid_0526.npy \n"," inflating: Dataset/train_track_B/centroid_0527.npy \n"," inflating: Dataset/train_track_B/centroid_0528.npy \n"," inflating: Dataset/train_track_B/centroid_0529.npy \n"," inflating: Dataset/train_track_B/centroid_0530.npy \n"," inflating: Dataset/train_track_B/centroid_0531.npy \n"," inflating: Dataset/train_track_B/centroid_0534.npy \n"," inflating: Dataset/train_track_B/centroid_0535.npy \n"," inflating: Dataset/train_track_B/centroid_0536.npy \n"," inflating: Dataset/train_track_B/centroid_0538.npy \n"," inflating: Dataset/train_track_B/centroid_0541.npy \n"," inflating: Dataset/train_track_B/centroid_0542.npy \n"," inflating: Dataset/train_track_B/centroid_0544.npy \n"," inflating: Dataset/train_track_B/centroid_0545.npy \n"," inflating: Dataset/train_track_B/centroid_0546.npy \n"," inflating: Dataset/train_track_B/centroid_0547.npy \n"," inflating: Dataset/train_track_B/centroid_0550.npy \n"," inflating: Dataset/train_track_B/centroid_0551.npy \n"," inflating: Dataset/train_track_B/centroid_0553.npy \n"," inflating: Dataset/train_track_B/centroid_0555.npy \n"," inflating: Dataset/train_track_B/centroid_0557.npy \n"," inflating: Dataset/train_track_B/centroid_0558.npy \n"," inflating: Dataset/train_track_B/centroid_0561.npy \n"," inflating: Dataset/train_track_B/centroid_0563.npy \n"," inflating: Dataset/train_track_B/centroid_0564.npy \n"," inflating: Dataset/train_track_B/centroid_0565.npy \n"," inflating: Dataset/train_track_B/centroid_0567.npy \n"," inflating: Dataset/train_track_B/centroid_0568.npy \n"," inflating: Dataset/train_track_B/centroid_0571.npy \n"," inflating: Dataset/train_track_B/centroid_0574.npy \n"," inflating: Dataset/train_track_B/centroid_0576.npy \n"," inflating: Dataset/train_track_B/centroid_0579.npy \n"," inflating: Dataset/train_track_B/centroid_0580.npy \n"," inflating: Dataset/train_track_B/centroid_0582.npy \n"," inflating: Dataset/train_track_B/centroid_0584.npy \n"," inflating: Dataset/train_track_B/centroid_0585.npy \n"," inflating: Dataset/train_track_B/centroid_0588.npy \n"," inflating: Dataset/train_track_B/centroid_0589.npy \n"," inflating: Dataset/train_track_B/centroid_0590.npy \n"," inflating: Dataset/train_track_B/centroid_0591.npy \n"," inflating: Dataset/train_track_B/centroid_0592.npy \n"," inflating: Dataset/train_track_B/centroid_0593.npy \n"," inflating: Dataset/train_track_B/centroid_0594.npy \n"," inflating: Dataset/train_track_B/centroid_0595.npy \n"," inflating: Dataset/train_track_B/centroid_0596.npy \n"," inflating: Dataset/train_track_B/centroid_0597.npy \n"," inflating: Dataset/train_track_B/centroid_0598.npy \n"," inflating: Dataset/train_track_B/centroid_0600.npy \n"," inflating: Dataset/train_track_B/centroid_0602.npy \n"," inflating: Dataset/train_track_B/centroid_0605.npy \n"," inflating: Dataset/train_track_B/centroid_0608.npy \n"," inflating: Dataset/train_track_B/centroid_0609.npy \n"," inflating: Dataset/train_track_B/centroid_0611.npy \n"," inflating: Dataset/train_track_B/centroid_0612.npy \n"," inflating: Dataset/train_track_B/centroid_0613.npy \n"," inflating: Dataset/train_track_B/centroid_0614.npy \n"," inflating: Dataset/train_track_B/centroid_0618.npy \n"," inflating: Dataset/train_track_B/centroid_0619.npy \n"," inflating: Dataset/train_track_B/centroid_0620.npy \n"," inflating: Dataset/train_track_B/centroid_0621.npy \n"," inflating: Dataset/train_track_B/centroid_0622.npy \n"," inflating: Dataset/train_track_B/centroid_0623.npy \n"," inflating: Dataset/train_track_B/centroid_0624.npy \n"," inflating: Dataset/train_track_B/centroid_0625.npy \n"," inflating: Dataset/train_track_B/centroid_0627.npy \n"," inflating: Dataset/train_track_B/centroid_0628.npy \n"," inflating: Dataset/train_track_B/centroid_0629.npy \n"," inflating: Dataset/train_track_B/centroid_0630.npy \n"," inflating: Dataset/train_track_B/centroid_0631.npy \n"," inflating: Dataset/train_track_B/centroid_0632.npy \n"," inflating: Dataset/train_track_B/centroid_0633.npy \n"," inflating: Dataset/train_track_B/centroid_0634.npy \n"," inflating: Dataset/train_track_B/centroid_0635.npy \n"," inflating: Dataset/train_track_B/centroid_0637.npy \n"," inflating: Dataset/train_track_B/centroid_0638.npy \n"," inflating: Dataset/train_track_B/centroid_0639.npy \n"," inflating: Dataset/train_track_B/centroid_0640.npy \n"," inflating: Dataset/train_track_B/centroid_0641.npy \n"," inflating: Dataset/train_track_B/centroid_0643.npy \n"," inflating: Dataset/train_track_B/centroid_0644.npy \n"," inflating: Dataset/train_track_B/centroid_0645.npy \n"," inflating: Dataset/train_track_B/centroid_0646.npy \n"," inflating: Dataset/train_track_B/centroid_0648.npy \n"," inflating: Dataset/train_track_B/centroid_0650.npy \n"," inflating: Dataset/train_track_B/centroid_0651.npy \n"," inflating: Dataset/train_track_B/centroid_0652.npy \n"," inflating: Dataset/train_track_B/centroid_0653.npy \n"," inflating: Dataset/train_track_B/centroid_0654.npy \n"," inflating: Dataset/train_track_B/centroid_0656.npy \n"," inflating: Dataset/train_track_B/centroid_0657.npy \n"," inflating: Dataset/train_track_B/centroid_0658.npy \n"," inflating: Dataset/train_track_B/centroid_0661.npy \n"," inflating: Dataset/train_track_B/centroid_0663.npy \n"," inflating: Dataset/train_track_B/centroid_0664.npy \n"," inflating: Dataset/train_track_B/centroid_0665.npy \n"," inflating: Dataset/train_track_B/centroid_0666.npy \n"," inflating: Dataset/train_track_B/centroid_0667.npy \n"," inflating: Dataset/train_track_B/centroid_0668.npy \n"," inflating: Dataset/train_track_B/centroid_0669.npy \n"," inflating: Dataset/train_track_B/centroid_0671.npy \n"," inflating: Dataset/train_track_B/centroid_0672.npy \n"," inflating: Dataset/train_track_B/centroid_0673.npy \n"," inflating: Dataset/train_track_B/centroid_0674.npy \n"," inflating: Dataset/train_track_B/centroid_0676.npy \n"," inflating: Dataset/train_track_B/centroid_0677.npy \n"," inflating: Dataset/train_track_B/centroid_0678.npy \n"," inflating: Dataset/train_track_B/centroid_0679.npy \n"," inflating: Dataset/train_track_B/centroid_0680.npy \n"," inflating: Dataset/train_track_B/centroid_0682.npy \n"," inflating: Dataset/train_track_B/centroid_0686.npy \n"," inflating: Dataset/train_track_B/centroid_0688.npy \n"," inflating: Dataset/train_track_B/centroid_0689.npy \n"," inflating: Dataset/train_track_B/centroid_0690.npy \n"," inflating: Dataset/train_track_B/centroid_0691.npy \n"," inflating: Dataset/train_track_B/centroid_0692.npy \n"," inflating: Dataset/train_track_B/centroid_0693.npy \n"," inflating: Dataset/train_track_B/centroid_0694.npy \n"," inflating: Dataset/train_track_B/centroid_0695.npy \n"," inflating: Dataset/train_track_B/centroid_0697.npy \n"," inflating: Dataset/train_track_B/centroid_0699.npy \n"," inflating: Dataset/train_track_B/centroid_0700.npy \n"," inflating: Dataset/train_track_B/centroid_0701.npy \n"," inflating: Dataset/train_track_B/centroid_0703.npy \n"," inflating: Dataset/train_track_B/centroid_0704.npy \n"," inflating: Dataset/train_track_B/centroid_0706.npy \n"," inflating: Dataset/train_track_B/centroid_0707.npy \n"," inflating: Dataset/train_track_B/centroid_0708.npy \n"," inflating: Dataset/train_track_B/centroid_0709.npy \n"," inflating: Dataset/train_track_B/centroid_0711.npy \n"," inflating: Dataset/train_track_B/centroid_0712.npy \n"," inflating: Dataset/train_track_B/centroid_0713.npy \n"," inflating: Dataset/train_track_B/centroid_0714.npy \n"," inflating: Dataset/train_track_B/centroid_0715.npy \n"," inflating: Dataset/train_track_B/centroid_0716.npy \n"," inflating: Dataset/train_track_B/centroid_0718.npy \n"," inflating: Dataset/train_track_B/centroid_0719.npy \n"," inflating: Dataset/train_track_B/centroid_0720.npy \n"," inflating: Dataset/train_track_B/centroid_0721.npy \n"," inflating: Dataset/train_track_B/centroid_0722.npy \n"," inflating: Dataset/train_track_B/centroid_0724.npy \n"," inflating: Dataset/train_track_B/centroid_0727.npy \n"," inflating: Dataset/train_track_B/centroid_0728.npy \n"," inflating: Dataset/train_track_B/centroid_0729.npy \n"," inflating: Dataset/train_track_B/centroid_0730.npy \n"," inflating: Dataset/train_track_B/centroid_0731.npy \n"," inflating: Dataset/train_track_B/centroid_0733.npy \n"," inflating: Dataset/train_track_B/centroid_0735.npy \n"," inflating: Dataset/train_track_B/centroid_0736.npy \n"," inflating: Dataset/train_track_B/centroid_0737.npy \n"," inflating: Dataset/train_track_B/centroid_0740.npy \n"," inflating: Dataset/train_track_B/centroid_0742.npy \n"," inflating: Dataset/train_track_B/centroid_0743.npy \n"," inflating: Dataset/train_track_B/centroid_0744.npy \n"," inflating: Dataset/train_track_B/centroid_0745.npy \n"," inflating: Dataset/train_track_B/press_0002.npy \n"," inflating: Dataset/train_track_B/press_0003.npy \n"," inflating: Dataset/train_track_B/press_0004.npy \n"," inflating: Dataset/train_track_B/press_0005.npy \n"," inflating: Dataset/train_track_B/press_0006.npy \n"," inflating: Dataset/train_track_B/press_0011.npy \n"," inflating: Dataset/train_track_B/press_0012.npy \n"," inflating: Dataset/train_track_B/press_0013.npy \n"," inflating: Dataset/train_track_B/press_0015.npy \n"," inflating: Dataset/train_track_B/press_0017.npy \n"," inflating: Dataset/train_track_B/press_0018.npy \n"," inflating: Dataset/train_track_B/press_0020.npy \n"," inflating: Dataset/train_track_B/press_0021.npy \n"," inflating: Dataset/train_track_B/press_0022.npy \n"," inflating: Dataset/train_track_B/press_0023.npy \n"," inflating: Dataset/train_track_B/press_0024.npy \n"," inflating: Dataset/train_track_B/press_0026.npy \n"," inflating: Dataset/train_track_B/press_0029.npy \n"," inflating: Dataset/train_track_B/press_0030.npy \n"," inflating: Dataset/train_track_B/press_0036.npy \n"," inflating: Dataset/train_track_B/press_0037.npy \n"," inflating: Dataset/train_track_B/press_0038.npy \n"," inflating: Dataset/train_track_B/press_0039.npy \n"," inflating: Dataset/train_track_B/press_0040.npy \n"," inflating: Dataset/train_track_B/press_0041.npy \n"," inflating: Dataset/train_track_B/press_0042.npy \n"," inflating: Dataset/train_track_B/press_0043.npy \n"," inflating: Dataset/train_track_B/press_0044.npy \n"," inflating: Dataset/train_track_B/press_0048.npy \n"," inflating: Dataset/train_track_B/press_0049.npy \n"," inflating: Dataset/train_track_B/press_0051.npy \n"," inflating: Dataset/train_track_B/press_0052.npy \n"," inflating: Dataset/train_track_B/press_0055.npy \n"," inflating: Dataset/train_track_B/press_0056.npy \n"," inflating: Dataset/train_track_B/press_0057.npy \n"," inflating: Dataset/train_track_B/press_0059.npy \n"," inflating: Dataset/train_track_B/press_0062.npy \n"," inflating: Dataset/train_track_B/press_0064.npy \n"," inflating: Dataset/train_track_B/press_0066.npy \n"," inflating: Dataset/train_track_B/press_0067.npy \n"," inflating: Dataset/train_track_B/press_0068.npy \n"," inflating: Dataset/train_track_B/press_0071.npy \n"," inflating: Dataset/train_track_B/press_0074.npy \n"," inflating: Dataset/train_track_B/press_0075.npy \n"," inflating: Dataset/train_track_B/press_0077.npy \n"," inflating: Dataset/train_track_B/press_0078.npy \n"," inflating: Dataset/train_track_B/press_0080.npy \n"," inflating: Dataset/train_track_B/press_0081.npy \n"," inflating: Dataset/train_track_B/press_0082.npy \n"," inflating: Dataset/train_track_B/press_0084.npy \n"," inflating: Dataset/train_track_B/press_0085.npy \n"," inflating: Dataset/train_track_B/press_0086.npy \n"," inflating: Dataset/train_track_B/press_0087.npy \n"," inflating: Dataset/train_track_B/press_0088.npy \n"," inflating: Dataset/train_track_B/press_0089.npy \n"," inflating: Dataset/train_track_B/press_0090.npy \n"," inflating: Dataset/train_track_B/press_0092.npy \n"," inflating: Dataset/train_track_B/press_0093.npy \n"," inflating: Dataset/train_track_B/press_0094.npy \n"," inflating: Dataset/train_track_B/press_0095.npy \n"," inflating: Dataset/train_track_B/press_0097.npy \n"," inflating: Dataset/train_track_B/press_0098.npy \n"," inflating: Dataset/train_track_B/press_0100.npy \n"," inflating: Dataset/train_track_B/press_0101.npy \n"," inflating: Dataset/train_track_B/press_0102.npy \n"," inflating: Dataset/train_track_B/press_0103.npy \n"," inflating: Dataset/train_track_B/press_0104.npy \n"," inflating: Dataset/train_track_B/press_0106.npy \n"," inflating: Dataset/train_track_B/press_0107.npy \n"," inflating: Dataset/train_track_B/press_0108.npy \n"," inflating: Dataset/train_track_B/press_0109.npy \n"," inflating: Dataset/train_track_B/press_0110.npy \n"," inflating: Dataset/train_track_B/press_0113.npy \n"," inflating: Dataset/train_track_B/press_0114.npy \n"," inflating: Dataset/train_track_B/press_0115.npy \n"," inflating: Dataset/train_track_B/press_0116.npy \n"," inflating: Dataset/train_track_B/press_0117.npy \n"," inflating: Dataset/train_track_B/press_0118.npy \n"," inflating: Dataset/train_track_B/press_0119.npy \n"," inflating: Dataset/train_track_B/press_0120.npy \n"," inflating: Dataset/train_track_B/press_0121.npy \n"," inflating: Dataset/train_track_B/press_0122.npy \n"," inflating: Dataset/train_track_B/press_0124.npy \n"," inflating: Dataset/train_track_B/press_0125.npy \n"," inflating: Dataset/train_track_B/press_0126.npy \n"," inflating: Dataset/train_track_B/press_0128.npy \n"," inflating: Dataset/train_track_B/press_0129.npy \n"," inflating: Dataset/train_track_B/press_0130.npy \n"," inflating: Dataset/train_track_B/press_0131.npy \n"," inflating: Dataset/train_track_B/press_0132.npy \n"," inflating: Dataset/train_track_B/press_0133.npy \n"," inflating: Dataset/train_track_B/press_0134.npy \n"," inflating: Dataset/train_track_B/press_0135.npy \n"," inflating: Dataset/train_track_B/press_0136.npy \n"," inflating: Dataset/train_track_B/press_0138.npy \n"," inflating: Dataset/train_track_B/press_0139.npy \n"," inflating: Dataset/train_track_B/press_0140.npy \n"," inflating: Dataset/train_track_B/press_0141.npy \n"," inflating: Dataset/train_track_B/press_0143.npy \n"," inflating: Dataset/train_track_B/press_0145.npy \n"," inflating: Dataset/train_track_B/press_0146.npy \n"," inflating: Dataset/train_track_B/press_0148.npy \n"," inflating: Dataset/train_track_B/press_0149.npy \n"," inflating: Dataset/train_track_B/press_0150.npy \n"," inflating: Dataset/train_track_B/press_0151.npy \n"," inflating: Dataset/train_track_B/press_0153.npy \n"," inflating: Dataset/train_track_B/press_0154.npy \n"," inflating: Dataset/train_track_B/press_0156.npy \n"," inflating: Dataset/train_track_B/press_0157.npy \n"," inflating: Dataset/train_track_B/press_0158.npy \n"," inflating: Dataset/train_track_B/press_0161.npy \n"," inflating: Dataset/train_track_B/press_0162.npy \n"," inflating: Dataset/train_track_B/press_0163.npy \n"," inflating: Dataset/train_track_B/press_0164.npy \n"," inflating: Dataset/train_track_B/press_0166.npy \n"," inflating: Dataset/train_track_B/press_0167.npy \n"," inflating: Dataset/train_track_B/press_0168.npy \n"," inflating: Dataset/train_track_B/press_0170.npy \n"," inflating: Dataset/train_track_B/press_0171.npy \n"," inflating: Dataset/train_track_B/press_0172.npy \n"," inflating: Dataset/train_track_B/press_0174.npy \n"," inflating: Dataset/train_track_B/press_0175.npy \n"," inflating: Dataset/train_track_B/press_0183.npy \n"," inflating: Dataset/train_track_B/press_0184.npy \n"," inflating: Dataset/train_track_B/press_0185.npy \n"," inflating: Dataset/train_track_B/press_0189.npy \n"," inflating: Dataset/train_track_B/press_0190.npy \n"," inflating: Dataset/train_track_B/press_0193.npy \n"," inflating: Dataset/train_track_B/press_0194.npy \n"," inflating: Dataset/train_track_B/press_0195.npy \n"," inflating: Dataset/train_track_B/press_0197.npy \n"," inflating: Dataset/train_track_B/press_0201.npy \n"," inflating: Dataset/train_track_B/press_0203.npy \n"," inflating: Dataset/train_track_B/press_0204.npy \n"," inflating: Dataset/train_track_B/press_0205.npy \n"," inflating: Dataset/train_track_B/press_0206.npy \n"," inflating: Dataset/train_track_B/press_0208.npy \n"," inflating: Dataset/train_track_B/press_0210.npy \n"," inflating: Dataset/train_track_B/press_0211.npy \n"," inflating: Dataset/train_track_B/press_0216.npy \n"," inflating: Dataset/train_track_B/press_0217.npy \n"," inflating: Dataset/train_track_B/press_0219.npy \n"," inflating: Dataset/train_track_B/press_0220.npy \n"," inflating: Dataset/train_track_B/press_0227.npy \n"," inflating: Dataset/train_track_B/press_0228.npy \n"," inflating: Dataset/train_track_B/press_0229.npy \n"," inflating: Dataset/train_track_B/press_0232.npy \n"," inflating: Dataset/train_track_B/press_0234.npy \n"," inflating: Dataset/train_track_B/press_0235.npy \n"," inflating: Dataset/train_track_B/press_0236.npy \n"," inflating: Dataset/train_track_B/press_0238.npy \n"," inflating: Dataset/train_track_B/press_0239.npy \n"," inflating: Dataset/train_track_B/press_0240.npy \n"," inflating: Dataset/train_track_B/press_0241.npy \n"," inflating: Dataset/train_track_B/press_0245.npy \n"," inflating: Dataset/train_track_B/press_0246.npy \n"," inflating: Dataset/train_track_B/press_0247.npy \n"," inflating: Dataset/train_track_B/press_0248.npy \n"," inflating: Dataset/train_track_B/press_0249.npy \n"," inflating: Dataset/train_track_B/press_0252.npy \n"," inflating: Dataset/train_track_B/press_0253.npy \n"," inflating: Dataset/train_track_B/press_0254.npy \n"," inflating: Dataset/train_track_B/press_0256.npy \n"," inflating: Dataset/train_track_B/press_0257.npy \n"," inflating: Dataset/train_track_B/press_0259.npy \n"," inflating: Dataset/train_track_B/press_0264.npy \n"," inflating: Dataset/train_track_B/press_0265.npy \n"," inflating: Dataset/train_track_B/press_0266.npy \n"," inflating: Dataset/train_track_B/press_0268.npy \n"," inflating: Dataset/train_track_B/press_0269.npy \n"," inflating: Dataset/train_track_B/press_0271.npy \n"," inflating: Dataset/train_track_B/press_0272.npy \n"," inflating: Dataset/train_track_B/press_0273.npy \n"," inflating: Dataset/train_track_B/press_0275.npy \n"," inflating: Dataset/train_track_B/press_0276.npy \n"," inflating: Dataset/train_track_B/press_0277.npy \n"," inflating: Dataset/train_track_B/press_0279.npy \n"," inflating: Dataset/train_track_B/press_0280.npy \n"," inflating: Dataset/train_track_B/press_0281.npy \n"," inflating: Dataset/train_track_B/press_0284.npy \n"," inflating: Dataset/train_track_B/press_0285.npy \n"," inflating: Dataset/train_track_B/press_0286.npy \n"," inflating: Dataset/train_track_B/press_0288.npy \n"," inflating: Dataset/train_track_B/press_0289.npy \n"," inflating: Dataset/train_track_B/press_0290.npy \n"," inflating: Dataset/train_track_B/press_0291.npy \n"," inflating: Dataset/train_track_B/press_0294.npy \n"," inflating: Dataset/train_track_B/press_0296.npy \n"," inflating: Dataset/train_track_B/press_0297.npy \n"," inflating: Dataset/train_track_B/press_0298.npy \n"," inflating: Dataset/train_track_B/press_0301.npy \n"," inflating: Dataset/train_track_B/press_0304.npy \n"," inflating: Dataset/train_track_B/press_0305.npy \n"," inflating: Dataset/train_track_B/press_0306.npy \n"," inflating: Dataset/train_track_B/press_0307.npy \n"," inflating: Dataset/train_track_B/press_0308.npy \n"," inflating: Dataset/train_track_B/press_0310.npy \n"," inflating: Dataset/train_track_B/press_0311.npy \n"," inflating: Dataset/train_track_B/press_0314.npy \n"," inflating: Dataset/train_track_B/press_0315.npy \n"," inflating: Dataset/train_track_B/press_0316.npy \n"," inflating: Dataset/train_track_B/press_0320.npy \n"," inflating: Dataset/train_track_B/press_0321.npy \n"," inflating: Dataset/train_track_B/press_0323.npy \n"," inflating: Dataset/train_track_B/press_0324.npy \n"," inflating: Dataset/train_track_B/press_0327.npy \n"," inflating: Dataset/train_track_B/press_0330.npy \n"," inflating: Dataset/train_track_B/press_0331.npy \n"," inflating: Dataset/train_track_B/press_0332.npy \n"," inflating: Dataset/train_track_B/press_0333.npy \n"," inflating: Dataset/train_track_B/press_0334.npy \n"," inflating: Dataset/train_track_B/press_0337.npy \n"," inflating: Dataset/train_track_B/press_0338.npy \n"," inflating: Dataset/train_track_B/press_0339.npy \n"," inflating: Dataset/train_track_B/press_0340.npy \n"," inflating: Dataset/train_track_B/press_0341.npy \n"," inflating: Dataset/train_track_B/press_0342.npy \n"," inflating: Dataset/train_track_B/press_0343.npy \n"," inflating: Dataset/train_track_B/press_0344.npy \n"," inflating: Dataset/train_track_B/press_0345.npy \n"," inflating: Dataset/train_track_B/press_0346.npy \n"," inflating: Dataset/train_track_B/press_0348.npy \n"," inflating: Dataset/train_track_B/press_0349.npy \n"," inflating: Dataset/train_track_B/press_0351.npy \n"," inflating: Dataset/train_track_B/press_0352.npy \n"," inflating: Dataset/train_track_B/press_0353.npy \n"," inflating: Dataset/train_track_B/press_0354.npy \n"," inflating: Dataset/train_track_B/press_0356.npy \n"," inflating: Dataset/train_track_B/press_0357.npy \n"," inflating: Dataset/train_track_B/press_0359.npy \n"," inflating: Dataset/train_track_B/press_0360.npy \n"," inflating: Dataset/train_track_B/press_0361.npy \n"," inflating: Dataset/train_track_B/press_0363.npy \n"," inflating: Dataset/train_track_B/press_0364.npy \n"," inflating: Dataset/train_track_B/press_0365.npy \n"," inflating: Dataset/train_track_B/press_0366.npy \n"," inflating: Dataset/train_track_B/press_0367.npy \n"," inflating: Dataset/train_track_B/press_0368.npy \n"," inflating: Dataset/train_track_B/press_0369.npy \n"," inflating: Dataset/train_track_B/press_0371.npy \n"," inflating: Dataset/train_track_B/press_0373.npy \n"," inflating: Dataset/train_track_B/press_0376.npy \n"," inflating: Dataset/train_track_B/press_0377.npy \n"," inflating: Dataset/train_track_B/press_0378.npy \n"," inflating: Dataset/train_track_B/press_0379.npy \n"," inflating: Dataset/train_track_B/press_0381.npy \n"," inflating: Dataset/train_track_B/press_0382.npy \n"," inflating: Dataset/train_track_B/press_0383.npy \n"," inflating: Dataset/train_track_B/press_0384.npy \n"," inflating: Dataset/train_track_B/press_0385.npy \n"," inflating: Dataset/train_track_B/press_0387.npy \n"," inflating: Dataset/train_track_B/press_0388.npy \n"," inflating: Dataset/train_track_B/press_0389.npy \n"," inflating: Dataset/train_track_B/press_0392.npy \n"," inflating: Dataset/train_track_B/press_0393.npy \n"," inflating: Dataset/train_track_B/press_0394.npy \n"," inflating: Dataset/train_track_B/press_0395.npy \n"," inflating: Dataset/train_track_B/press_0396.npy \n"," inflating: Dataset/train_track_B/press_0398.npy \n"," inflating: Dataset/train_track_B/press_0399.npy \n"," inflating: Dataset/train_track_B/press_0400.npy \n"," inflating: Dataset/train_track_B/press_0401.npy \n"," inflating: Dataset/train_track_B/press_0402.npy \n"," inflating: Dataset/train_track_B/press_0403.npy \n"," inflating: Dataset/train_track_B/press_0404.npy \n"," inflating: Dataset/train_track_B/press_0405.npy \n"," inflating: Dataset/train_track_B/press_0407.npy \n"," inflating: Dataset/train_track_B/press_0408.npy \n"," inflating: Dataset/train_track_B/press_0409.npy \n"," inflating: Dataset/train_track_B/press_0410.npy \n"," inflating: Dataset/train_track_B/press_0411.npy \n"," inflating: Dataset/train_track_B/press_0413.npy \n"," inflating: Dataset/train_track_B/press_0416.npy \n"," inflating: Dataset/train_track_B/press_0417.npy \n"," inflating: Dataset/train_track_B/press_0421.npy \n"," inflating: Dataset/train_track_B/press_0422.npy \n"," inflating: Dataset/train_track_B/press_0423.npy \n"," inflating: Dataset/train_track_B/press_0424.npy \n"," inflating: Dataset/train_track_B/press_0425.npy \n"," inflating: Dataset/train_track_B/press_0428.npy \n"," inflating: Dataset/train_track_B/press_0429.npy \n"," inflating: Dataset/train_track_B/press_0430.npy \n"," inflating: Dataset/train_track_B/press_0431.npy \n"," inflating: Dataset/train_track_B/press_0432.npy \n"," inflating: Dataset/train_track_B/press_0435.npy \n"," inflating: Dataset/train_track_B/press_0438.npy \n"," inflating: Dataset/train_track_B/press_0439.npy \n"," inflating: Dataset/train_track_B/press_0441.npy \n"," inflating: Dataset/train_track_B/press_0444.npy \n"," inflating: Dataset/train_track_B/press_0445.npy \n"," inflating: Dataset/train_track_B/press_0449.npy \n"," inflating: Dataset/train_track_B/press_0450.npy \n"," inflating: Dataset/train_track_B/press_0451.npy \n"," inflating: Dataset/train_track_B/press_0452.npy \n"," inflating: Dataset/train_track_B/press_0453.npy \n"," inflating: Dataset/train_track_B/press_0456.npy \n"," inflating: Dataset/train_track_B/press_0457.npy \n"," inflating: Dataset/train_track_B/press_0458.npy \n"," inflating: Dataset/train_track_B/press_0459.npy \n"," inflating: Dataset/train_track_B/press_0460.npy \n"," inflating: Dataset/train_track_B/press_0461.npy \n"," inflating: Dataset/train_track_B/press_0463.npy \n"," inflating: Dataset/train_track_B/press_0464.npy \n"," inflating: Dataset/train_track_B/press_0465.npy \n"," inflating: Dataset/train_track_B/press_0467.npy \n"," inflating: Dataset/train_track_B/press_0469.npy \n"," inflating: Dataset/train_track_B/press_0471.npy \n"," inflating: Dataset/train_track_B/press_0472.npy \n"," inflating: Dataset/train_track_B/press_0474.npy \n"," inflating: Dataset/train_track_B/press_0475.npy \n"," inflating: Dataset/train_track_B/press_0477.npy \n"," inflating: Dataset/train_track_B/press_0478.npy \n"," inflating: Dataset/train_track_B/press_0479.npy \n"," inflating: Dataset/train_track_B/press_0480.npy \n"," inflating: Dataset/train_track_B/press_0481.npy \n"," inflating: Dataset/train_track_B/press_0482.npy \n"," inflating: Dataset/train_track_B/press_0485.npy \n"," inflating: Dataset/train_track_B/press_0486.npy \n"," inflating: Dataset/train_track_B/press_0487.npy \n"," inflating: Dataset/train_track_B/press_0488.npy \n"," inflating: Dataset/train_track_B/press_0489.npy \n"," inflating: Dataset/train_track_B/press_0492.npy \n"," inflating: Dataset/train_track_B/press_0493.npy \n"," inflating: Dataset/train_track_B/press_0494.npy \n"," inflating: Dataset/train_track_B/press_0497.npy \n"," inflating: Dataset/train_track_B/press_0498.npy \n"," inflating: Dataset/train_track_B/press_0499.npy \n"," inflating: Dataset/train_track_B/press_0501.npy \n"," inflating: Dataset/train_track_B/press_0502.npy \n"," inflating: Dataset/train_track_B/press_0503.npy \n"," inflating: Dataset/train_track_B/press_0504.npy \n"," inflating: Dataset/train_track_B/press_0507.npy \n"," inflating: Dataset/train_track_B/press_0508.npy \n"," inflating: Dataset/train_track_B/press_0509.npy \n"," inflating: Dataset/train_track_B/press_0513.npy \n"," inflating: Dataset/train_track_B/press_0514.npy \n"," inflating: Dataset/train_track_B/press_0515.npy \n"," inflating: Dataset/train_track_B/press_0517.npy \n"," inflating: Dataset/train_track_B/press_0518.npy \n"," inflating: Dataset/train_track_B/press_0519.npy \n"," inflating: Dataset/train_track_B/press_0520.npy \n"," inflating: Dataset/train_track_B/press_0521.npy \n"," inflating: Dataset/train_track_B/press_0522.npy \n"," inflating: Dataset/train_track_B/press_0523.npy \n"," inflating: Dataset/train_track_B/press_0524.npy \n"," inflating: Dataset/train_track_B/press_0525.npy \n"," inflating: Dataset/train_track_B/press_0526.npy \n"," inflating: Dataset/train_track_B/press_0527.npy \n"," inflating: Dataset/train_track_B/press_0528.npy \n"," inflating: Dataset/train_track_B/press_0529.npy \n"," inflating: Dataset/train_track_B/press_0530.npy \n"," inflating: Dataset/train_track_B/press_0531.npy \n"," inflating: Dataset/train_track_B/press_0534.npy \n"," inflating: Dataset/train_track_B/press_0535.npy \n"," inflating: Dataset/train_track_B/press_0536.npy \n"," inflating: Dataset/train_track_B/press_0538.npy \n"," inflating: Dataset/train_track_B/press_0541.npy \n"," inflating: Dataset/train_track_B/press_0542.npy \n"," inflating: Dataset/train_track_B/press_0544.npy \n"," inflating: Dataset/train_track_B/press_0545.npy \n"," inflating: Dataset/train_track_B/press_0546.npy \n"," inflating: Dataset/train_track_B/press_0547.npy \n"," inflating: Dataset/train_track_B/press_0550.npy \n"," inflating: Dataset/train_track_B/press_0551.npy \n"," inflating: Dataset/train_track_B/press_0553.npy \n"," inflating: Dataset/train_track_B/press_0555.npy \n"," inflating: Dataset/train_track_B/press_0557.npy \n"," inflating: Dataset/train_track_B/press_0558.npy \n"," inflating: Dataset/train_track_B/press_0561.npy \n"," inflating: Dataset/train_track_B/press_0563.npy \n"," inflating: Dataset/train_track_B/press_0564.npy \n"," inflating: Dataset/train_track_B/press_0565.npy \n"," inflating: Dataset/train_track_B/press_0567.npy \n"," inflating: Dataset/train_track_B/press_0568.npy \n"," inflating: Dataset/train_track_B/press_0571.npy \n"," inflating: Dataset/train_track_B/press_0574.npy \n"," inflating: Dataset/train_track_B/press_0576.npy \n"," inflating: Dataset/train_track_B/press_0579.npy \n"," inflating: Dataset/train_track_B/press_0580.npy \n"," inflating: Dataset/train_track_B/press_0582.npy \n"," inflating: Dataset/train_track_B/press_0584.npy \n"," inflating: Dataset/train_track_B/press_0585.npy \n"," inflating: Dataset/train_track_B/press_0588.npy \n"," inflating: Dataset/train_track_B/press_0589.npy \n"," inflating: Dataset/train_track_B/press_0590.npy \n"," inflating: Dataset/train_track_B/press_0591.npy \n"," inflating: Dataset/train_track_B/press_0592.npy \n"," inflating: Dataset/train_track_B/press_0593.npy \n"," inflating: Dataset/train_track_B/press_0594.npy \n"," inflating: Dataset/train_track_B/press_0595.npy \n"," inflating: Dataset/train_track_B/press_0596.npy \n"," inflating: Dataset/train_track_B/press_0597.npy \n"," inflating: Dataset/train_track_B/press_0598.npy \n"," inflating: Dataset/train_track_B/press_0600.npy \n"," inflating: Dataset/train_track_B/press_0602.npy \n"," inflating: Dataset/train_track_B/press_0605.npy \n"," inflating: Dataset/train_track_B/press_0608.npy \n"," inflating: Dataset/train_track_B/press_0609.npy \n"," inflating: Dataset/train_track_B/press_0611.npy \n"," inflating: Dataset/train_track_B/press_0612.npy \n"," inflating: Dataset/train_track_B/press_0613.npy \n"," inflating: Dataset/train_track_B/press_0614.npy \n"," inflating: Dataset/train_track_B/press_0618.npy \n"," inflating: Dataset/train_track_B/press_0619.npy \n"," inflating: Dataset/train_track_B/press_0620.npy \n"," inflating: Dataset/train_track_B/press_0621.npy \n"," inflating: Dataset/train_track_B/press_0622.npy \n"," inflating: Dataset/train_track_B/press_0623.npy \n"," inflating: Dataset/train_track_B/press_0624.npy \n"," inflating: Dataset/train_track_B/press_0625.npy \n"," inflating: Dataset/train_track_B/press_0627.npy \n"," inflating: Dataset/train_track_B/press_0628.npy \n"," inflating: Dataset/train_track_B/press_0629.npy \n"," inflating: Dataset/train_track_B/press_0630.npy \n"," inflating: Dataset/train_track_B/press_0631.npy \n"," inflating: Dataset/train_track_B/press_0632.npy \n"," inflating: Dataset/train_track_B/press_0633.npy \n"," inflating: Dataset/train_track_B/press_0634.npy \n"," inflating: Dataset/train_track_B/press_0635.npy \n"," inflating: Dataset/train_track_B/press_0637.npy \n"," inflating: Dataset/train_track_B/press_0638.npy \n"," inflating: Dataset/train_track_B/press_0639.npy \n"," inflating: Dataset/train_track_B/press_0640.npy \n"," inflating: Dataset/train_track_B/press_0641.npy \n"," inflating: Dataset/train_track_B/press_0643.npy \n"," inflating: Dataset/train_track_B/press_0644.npy \n"," inflating: Dataset/train_track_B/press_0645.npy \n"," inflating: Dataset/train_track_B/press_0646.npy \n"," inflating: Dataset/train_track_B/press_0648.npy \n"," inflating: Dataset/train_track_B/press_0650.npy \n"," inflating: Dataset/train_track_B/press_0651.npy \n"," inflating: Dataset/train_track_B/press_0652.npy \n"," inflating: Dataset/train_track_B/press_0653.npy \n"," inflating: Dataset/train_track_B/press_0654.npy \n"," inflating: Dataset/train_track_B/press_0656.npy \n"," inflating: Dataset/train_track_B/press_0657.npy \n"," inflating: Dataset/train_track_B/press_0658.npy \n"," inflating: Dataset/train_track_B/press_0661.npy \n"," inflating: Dataset/train_track_B/press_0663.npy \n"," inflating: Dataset/train_track_B/press_0664.npy \n"," inflating: Dataset/train_track_B/press_0665.npy \n"," inflating: Dataset/train_track_B/press_0666.npy \n"," inflating: Dataset/train_track_B/press_0667.npy \n"," inflating: Dataset/train_track_B/press_0668.npy \n"," inflating: Dataset/train_track_B/press_0669.npy \n"," inflating: Dataset/train_track_B/press_0671.npy \n"," inflating: Dataset/train_track_B/press_0672.npy \n"," inflating: Dataset/train_track_B/press_0673.npy \n"," inflating: Dataset/train_track_B/press_0674.npy \n"," inflating: Dataset/train_track_B/press_0676.npy \n"," inflating: Dataset/train_track_B/press_0677.npy \n"," inflating: Dataset/train_track_B/press_0678.npy \n"," inflating: Dataset/train_track_B/press_0679.npy \n"," inflating: Dataset/train_track_B/press_0680.npy \n"," inflating: Dataset/train_track_B/press_0682.npy \n"," inflating: Dataset/train_track_B/press_0686.npy \n"," inflating: Dataset/train_track_B/press_0688.npy \n"," inflating: Dataset/train_track_B/press_0689.npy \n"," inflating: Dataset/train_track_B/press_0690.npy \n"," inflating: Dataset/train_track_B/press_0691.npy \n"," inflating: Dataset/train_track_B/press_0692.npy \n"," inflating: Dataset/train_track_B/press_0693.npy \n"," inflating: Dataset/train_track_B/press_0694.npy \n"," inflating: Dataset/train_track_B/press_0695.npy \n"," inflating: Dataset/train_track_B/press_0697.npy \n"," inflating: Dataset/train_track_B/press_0699.npy \n"," inflating: Dataset/train_track_B/press_0700.npy \n"," inflating: Dataset/train_track_B/press_0701.npy \n"," inflating: Dataset/train_track_B/press_0703.npy \n"," inflating: Dataset/train_track_B/press_0704.npy \n"," inflating: Dataset/train_track_B/press_0706.npy \n"," inflating: Dataset/train_track_B/press_0707.npy \n"," inflating: Dataset/train_track_B/press_0708.npy \n"," inflating: Dataset/train_track_B/press_0709.npy \n"," inflating: Dataset/train_track_B/press_0711.npy \n"," inflating: Dataset/train_track_B/press_0712.npy \n"," inflating: Dataset/train_track_B/press_0713.npy \n"," inflating: Dataset/train_track_B/press_0714.npy \n"," inflating: Dataset/train_track_B/press_0715.npy \n"," inflating: Dataset/train_track_B/press_0716.npy \n"," inflating: Dataset/train_track_B/press_0718.npy \n"," inflating: Dataset/train_track_B/press_0719.npy \n"," inflating: Dataset/train_track_B/press_0720.npy \n"," inflating: Dataset/train_track_B/press_0721.npy \n"," inflating: Dataset/train_track_B/press_0722.npy \n"," inflating: Dataset/train_track_B/press_0724.npy \n"," inflating: Dataset/train_track_B/press_0727.npy \n"," inflating: Dataset/train_track_B/press_0728.npy \n"," inflating: Dataset/train_track_B/press_0729.npy \n"," inflating: Dataset/train_track_B/press_0730.npy \n"," inflating: Dataset/train_track_B/press_0731.npy \n"," inflating: Dataset/train_track_B/press_0733.npy \n"," inflating: Dataset/train_track_B/press_0735.npy \n"," inflating: Dataset/train_track_B/press_0736.npy \n"," inflating: Dataset/train_track_B/press_0737.npy \n"," inflating: Dataset/train_track_B/press_0740.npy \n"," inflating: Dataset/train_track_B/press_0742.npy \n"," inflating: Dataset/train_track_B/press_0743.npy \n"," inflating: Dataset/train_track_B/press_0744.npy \n"," inflating: Dataset/train_track_B/press_0745.npy \n"]}],"source":["!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n","!mkdir -p Dataset/train_track_B && unzip -o train_track_B.zip -d Dataset/train_track_B/\n"]}],"metadata":{"colab":{"authorship_tag":"ABX9TyNUH9p5eYZb0k/VZfG/jPGH","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"d25Ges-BIcKg"},"source":["# 下载比赛数据"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":40227,"status":"ok","timestamp":1720876228678,"user":{"displayName":"Jintang Li","userId":"11727574012572911053"},"user_tz":-480},"id":"8zLTCo4-DJUW","outputId":"83f5ac5f-74a7-4811-a72b-0976ab099a08"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-13 13:09:47-- https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\n","Resolving drive.usercontent.google.com (drive.usercontent.google.com)... 108.177.98.132, 2607:f8b0:400e:c05::84\n","Connecting to drive.usercontent.google.com (drive.usercontent.google.com)|108.177.98.132|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 1084182095 (1.0G) [application/octet-stream]\n","Saving to: ‘Dataset.zip’\n","\n","Dataset.zip 100%[===================>] 1.01G 47.6MB/s in 15s \n","\n","2024-07-13 13:10:03 (71.2 MB/s) - ‘Dataset.zip’ saved [1084182095/1084182095]\n","\n","Archive: Dataset.zip\n"," creating: Dataset/Dataset/\n"," creating: Dataset/Dataset/Testset_track_A/\n"," creating: Dataset/Dataset/Testset_track_A/Inference/\n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_658.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_659.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_660.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_662.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_663.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_664.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_665.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_666.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_667.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_668.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_672.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_673.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_674.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_675.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_676.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_677.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_678.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_679.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_681.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_683.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_684.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_686.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_687.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_688.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_689.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_690.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_691.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_692.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_693.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_695.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_696.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_697.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_700.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_701.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_702.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_703.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_704.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_705.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_708.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_709.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_710.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_711.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_712.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_713.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_715.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_717.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_718.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_719.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_721.ply \n"," inflating: Dataset/Dataset/Testset_track_A/Inference/mesh_722.ply \n"," creating: Dataset/Dataset/Testset_track_B/\n"," creating: Dataset/Dataset/Testset_track_B/Auxiliary/\n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/area_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/global_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_1.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_10.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_11.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_12.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_13.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_14.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_15.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_16.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_17.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_18.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_19.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_2.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_20.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_21.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_22.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_23.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_24.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_25.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_26.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_27.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_28.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_29.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_3.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_30.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_31.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_32.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_33.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_34.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_35.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_36.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_37.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_38.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_39.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_4.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_40.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_41.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_42.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_43.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_44.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_45.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_46.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_47.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_48.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_49.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_5.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_50.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_6.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_7.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_8.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_9.pt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/info_bounds.txt \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/normal_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt \n"," inflating: Dataset/Dataset/Testset_track_B/IJCAI_data_doc_v1.pdf \n"," creating: Dataset/Dataset/Testset_track_B/Inference/\n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_1.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_10.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_11.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_12.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_13.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_14.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_15.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_16.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_17.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_18.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_19.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_2.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_20.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_21.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_22.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_23.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_24.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_25.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_26.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_27.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_28.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_29.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_3.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_30.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_31.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_32.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_33.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_34.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_35.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_36.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_37.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_38.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_39.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_4.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_40.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_41.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_42.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_43.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_44.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_45.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_46.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_47.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_48.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_49.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_5.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_50.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_6.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_7.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_8.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/centroid_9.npy \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_1.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_10.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_11.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_12.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_13.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_14.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_15.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_16.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_17.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_18.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_19.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_2.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_20.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_21.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_22.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_23.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_24.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_25.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_26.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_27.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_28.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_29.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_3.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_30.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_31.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_32.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_33.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_34.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_35.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_36.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_37.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_38.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_39.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_4.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_40.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_41.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_42.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_43.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_44.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_45.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_46.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_47.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_48.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_49.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_5.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_50.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_6.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_7.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_8.ply \n"," inflating: Dataset/Dataset/Testset_track_B/Inference/mesh_9.ply \n"," inflating: Dataset/Dataset/Testset_track_B/track_B_data_dict.xlsx \n"," creating: Dataset/Dataset/Training_data/\n"," creating: Dataset/Dataset/Training_data/Feature/\n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_001.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_002.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_004.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_005.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_006.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_007.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_008.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_010.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_012.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_013.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_017.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_018.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_021.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_022.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_023.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_025.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_026.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_027.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_028.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_029.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_030.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_031.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_032.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_034.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_035.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_039.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_040.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_043.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_044.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_045.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_046.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_047.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_048.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_049.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_050.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_051.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_052.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_054.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_055.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_056.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_058.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_059.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_060.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_061.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_062.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_063.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_064.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_065.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_067.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_069.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_070.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_071.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_072.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_073.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_074.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_075.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_076.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_077.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_078.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_079.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_080.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_081.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_083.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_084.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_085.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_086.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_087.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_088.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_090.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_091.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_092.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_094.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_095.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_096.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_097.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_100.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_101.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_102.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_105.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_106.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_107.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_109.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_110.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_111.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_112.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_113.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_114.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_115.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_116.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_117.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_118.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_119.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_120.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_121.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_123.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_124.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_125.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_126.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_127.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_128.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_129.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_130.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_131.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_133.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_134.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_136.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_137.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_138.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_139.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_140.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_141.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_142.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_143.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_144.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_145.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_146.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_147.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_148.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_149.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_150.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_151.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_152.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_153.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_155.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_156.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_157.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_158.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_159.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_160.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_161.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_162.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_163.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_165.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_166.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_170.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_172.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_173.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_175.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_176.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_177.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_178.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_179.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_180.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_181.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_182.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_183.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_184.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_186.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_190.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_191.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_192.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_193.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_195.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_196.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_198.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_199.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_200.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_201.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_202.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_203.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_205.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_207.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_210.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_211.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_212.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_213.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_214.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_215.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_217.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_219.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_220.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_221.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_222.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_223.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_224.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_225.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_227.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_228.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_229.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_230.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_231.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_232.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_233.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_234.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_235.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_236.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_237.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_241.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_243.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_244.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_245.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_246.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_247.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_248.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_249.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_251.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_252.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_253.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_255.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_257.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_258.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_259.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_260.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_261.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_262.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_263.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_264.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_266.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_267.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_268.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_269.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_271.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_272.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_273.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_274.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_275.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_276.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_277.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_278.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_279.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_280.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_281.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_282.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_283.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_285.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_286.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_289.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_290.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_291.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_292.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_293.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_294.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_295.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_296.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_297.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_298.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_299.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_300.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_301.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_302.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_304.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_305.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_306.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_308.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_309.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_310.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_311.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_312.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_313.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_314.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_315.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_319.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_320.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_321.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_322.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_323.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_324.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_325.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_327.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_328.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_329.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_331.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_332.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_333.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_334.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_335.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_337.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_338.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_339.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_340.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_341.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_344.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_345.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_347.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_348.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_349.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_350.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_352.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_353.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_354.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_355.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_356.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_357.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_358.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_360.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_362.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_364.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_365.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_366.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_367.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_369.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_371.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_372.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_373.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_374.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_375.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_376.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_378.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_379.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_380.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_381.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_384.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_385.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_389.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_392.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_393.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_397.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_398.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_399.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_401.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_402.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_403.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_404.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_405.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_407.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_408.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_410.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_412.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_413.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_414.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_415.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_417.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_418.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_419.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_420.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_422.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_424.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_425.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_427.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_430.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_431.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_433.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_435.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_436.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_437.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_439.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_440.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_443.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_444.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_446.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_447.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_448.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_449.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_450.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_451.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_452.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_453.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_454.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_455.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_456.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_457.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_459.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_460.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_462.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_463.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_464.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_465.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_466.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_467.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_468.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_469.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_470.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_472.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_473.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_474.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_475.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_476.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_478.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_479.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_480.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_482.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_483.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_486.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_487.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_488.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_490.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_493.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_494.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_495.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_496.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_497.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_498.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_499.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_501.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_502.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_503.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_504.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_505.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_507.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_508.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_509.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_511.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_512.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_513.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_514.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_515.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_516.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_518.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_519.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_521.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_522.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_523.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_524.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_525.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_527.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_529.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_530.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_532.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_533.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_536.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_538.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_539.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_540.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_542.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_543.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_545.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_547.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_548.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_549.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_550.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_551.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_552.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_553.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_554.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_555.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_560.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_561.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_562.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_564.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_565.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_566.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_567.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_568.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_569.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_572.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_573.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_574.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_576.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_577.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_579.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_581.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_582.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_583.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_584.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_587.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_588.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_589.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_591.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_593.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_594.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_595.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_596.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_597.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_598.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_600.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_602.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_604.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_608.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_610.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_611.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_612.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_613.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_615.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_616.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_617.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_618.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_620.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_621.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_622.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_623.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_625.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_626.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_627.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_628.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_629.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_630.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_631.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_632.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_633.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_634.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_635.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_636.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_638.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_639.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_640.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_641.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_642.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_643.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_644.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_645.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_646.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_647.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_648.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_649.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_651.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_652.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_654.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_655.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_656.ply \n"," inflating: Dataset/Dataset/Training_data/Feature/mesh_657.ply \n"," creating: Dataset/Dataset/Training_data/Label/\n"," inflating: Dataset/Dataset/Training_data/Label/press_001.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_002.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_004.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_005.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_006.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_007.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_008.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_010.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_012.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_013.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_017.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_018.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_021.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_022.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_023.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_025.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_026.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_027.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_028.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_029.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_030.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_031.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_032.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_034.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_035.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_039.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_040.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_043.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_044.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_045.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_046.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_047.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_048.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_049.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_050.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_051.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_052.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_054.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_055.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_056.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_058.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_059.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_060.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_061.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_062.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_063.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_064.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_065.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_067.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_069.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_070.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_071.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_072.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_073.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_074.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_075.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_076.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_077.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_078.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_079.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_080.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_081.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_083.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_084.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_085.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_086.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_087.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_088.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_090.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_091.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_092.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_094.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_095.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_096.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_097.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_100.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_101.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_102.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_105.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_106.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_107.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_109.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_110.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_111.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_112.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_113.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_114.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_115.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_116.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_117.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_118.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_119.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_120.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_121.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_123.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_124.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_125.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_126.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_127.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_128.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_129.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_130.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_131.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_133.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_134.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_136.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_137.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_138.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_139.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_140.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_141.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_142.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_143.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_144.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_145.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_146.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_147.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_148.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_149.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_150.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_151.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_152.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_153.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_155.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_156.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_157.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_158.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_159.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_160.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_161.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_162.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_163.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_165.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_166.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_170.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_172.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_173.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_175.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_176.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_177.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_178.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_179.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_180.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_181.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_182.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_183.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_184.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_186.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_190.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_191.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_192.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_193.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_195.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_196.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_198.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_199.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_200.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_201.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_202.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_203.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_205.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_207.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_210.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_211.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_212.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_213.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_214.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_215.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_217.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_219.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_220.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_221.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_222.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_223.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_224.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_225.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_227.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_228.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_229.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_230.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_231.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_232.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_233.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_234.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_235.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_236.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_237.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_241.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_243.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_244.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_245.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_246.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_247.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_248.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_249.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_251.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_252.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_253.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_255.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_257.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_258.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_259.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_260.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_261.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_262.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_263.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_264.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_266.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_267.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_268.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_269.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_271.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_272.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_273.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_274.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_275.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_276.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_277.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_278.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_279.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_280.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_281.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_282.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_283.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_285.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_286.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_289.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_290.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_291.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_292.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_293.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_294.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_295.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_296.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_297.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_298.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_299.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_300.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_301.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_302.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_304.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_305.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_306.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_308.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_309.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_310.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_311.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_312.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_313.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_314.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_315.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_319.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_320.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_321.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_322.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_323.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_324.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_325.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_327.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_328.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_329.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_331.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_332.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_333.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_334.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_335.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_337.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_338.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_339.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_340.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_341.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_344.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_345.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_347.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_348.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_349.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_350.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_352.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_353.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_354.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_355.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_356.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_357.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_358.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_360.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_362.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_364.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_365.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_366.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_367.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_369.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_371.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_372.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_373.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_374.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_375.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_376.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_378.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_379.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_380.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_381.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_384.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_385.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_389.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_392.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_393.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_397.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_398.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_399.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_401.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_402.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_403.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_404.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_405.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_407.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_408.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_410.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_412.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_413.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_414.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_415.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_417.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_418.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_419.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_420.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_422.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_424.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_425.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_427.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_430.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_431.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_433.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_435.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_436.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_437.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_439.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_440.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_443.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_444.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_446.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_447.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_448.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_449.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_450.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_451.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_452.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_453.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_454.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_455.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_456.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_457.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_459.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_460.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_462.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_463.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_464.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_465.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_466.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_467.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_468.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_469.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_470.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_472.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_473.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_474.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_475.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_476.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_478.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_479.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_480.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_482.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_483.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_486.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_487.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_488.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_490.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_493.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_494.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_495.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_496.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_497.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_498.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_499.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_501.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_502.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_503.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_504.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_505.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_507.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_508.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_509.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_511.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_512.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_513.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_514.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_515.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_516.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_518.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_519.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_521.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_522.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_523.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_524.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_525.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_527.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_529.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_530.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_532.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_533.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_536.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_538.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_539.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_540.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_542.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_543.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_545.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_547.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_548.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_549.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_550.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_551.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_552.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_553.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_554.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_555.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_560.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_561.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_562.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_564.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_565.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_566.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_567.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_568.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_569.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_572.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_573.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_574.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_576.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_577.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_579.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_581.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_582.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_583.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_584.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_587.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_588.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_589.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_591.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_593.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_594.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_595.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_596.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_597.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_598.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_600.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_602.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_604.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_608.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_610.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_611.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_612.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_613.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_615.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_616.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_617.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_618.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_620.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_621.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_622.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_623.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_625.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_626.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_627.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_628.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_629.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_630.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_631.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_632.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_633.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_634.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_635.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_636.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_638.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_639.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_640.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_641.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_642.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_643.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_644.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_645.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_646.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_647.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_648.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_649.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_651.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_652.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_654.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_655.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_656.npy \n"," inflating: Dataset/Dataset/Training_data/Label/press_657.npy \n"," inflating: Dataset/Dataset/Training_data/train_pressure_min_std.txt \n"," inflating: Dataset/Dataset/Training_data/watertight_global_bounds.txt \n"," inflating: Dataset/Dataset/Training_data/watertight_meshes.txt \n"]}],"source":["!wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1JwR0Q1ArTg6c47EF2ZuIBpQwCPgXKrO2&export=download&authuser=0&confirm=t&uuid=dc3aa13c-c3a9-458f-983a-8586798cb635&at=APZUnTX25XMxi-z-3wBcgR93IGsL%3A1719235792953\" -c -O 'Dataset.zip'\n","!mkdir -p Dataset\n","!unzip -o Dataset.zip -d Dataset/"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":344790,"status":"ok","timestamp":1720876573464,"user":{"displayName":"Jintang Li","userId":"11727574012572911053"},"user_tz":-480},"id":"VFzvkceAIWV2","outputId":"3036bbd2-9752-4891-bbb7-1ff4bb30445e"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-13 13:10:27-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 103.235.47.176, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|103.235.47.176|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 4740031429 (4.4G) [application/octet-stream]\n","Saving to: ‘train_track_B.zip’\n","\n","train_track_B.zip 100%[===================>] 4.41G 21.8MB/s in 3m 48s \n","\n","2024-07-13 13:14:16 (19.8 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n","\n","Archive: train_track_B.zip\n"," inflating: Dataset/train_track_B/area_0002.npy \n"," inflating: Dataset/train_track_B/area_0003.npy \n"," inflating: Dataset/train_track_B/area_0004.npy \n"," inflating: Dataset/train_track_B/area_0005.npy \n"," inflating: Dataset/train_track_B/area_0006.npy \n"," inflating: Dataset/train_track_B/area_0011.npy \n"," inflating: Dataset/train_track_B/area_0012.npy \n"," inflating: Dataset/train_track_B/area_0013.npy \n"," inflating: Dataset/train_track_B/area_0015.npy \n"," inflating: Dataset/train_track_B/area_0017.npy \n"," inflating: Dataset/train_track_B/area_0018.npy \n"," inflating: Dataset/train_track_B/area_0020.npy \n"," inflating: Dataset/train_track_B/area_0021.npy \n"," inflating: Dataset/train_track_B/area_0022.npy \n"," inflating: Dataset/train_track_B/area_0023.npy \n"," inflating: Dataset/train_track_B/area_0024.npy \n"," inflating: Dataset/train_track_B/area_0026.npy \n"," inflating: Dataset/train_track_B/area_0029.npy \n"," inflating: Dataset/train_track_B/area_0030.npy \n"," inflating: Dataset/train_track_B/area_0036.npy \n"," inflating: Dataset/train_track_B/area_0037.npy \n"," inflating: Dataset/train_track_B/area_0038.npy \n"," inflating: Dataset/train_track_B/area_0039.npy \n"," inflating: Dataset/train_track_B/area_0040.npy \n"," inflating: Dataset/train_track_B/area_0041.npy \n"," inflating: Dataset/train_track_B/area_0042.npy \n"," inflating: Dataset/train_track_B/area_0043.npy \n"," inflating: Dataset/train_track_B/area_0044.npy \n"," inflating: Dataset/train_track_B/area_0048.npy \n"," inflating: Dataset/train_track_B/area_0049.npy \n"," inflating: Dataset/train_track_B/area_0051.npy \n"," inflating: Dataset/train_track_B/area_0052.npy \n"," inflating: Dataset/train_track_B/area_0055.npy \n"," inflating: Dataset/train_track_B/area_0056.npy \n"," inflating: Dataset/train_track_B/area_0057.npy \n"," inflating: Dataset/train_track_B/area_0059.npy \n"," inflating: Dataset/train_track_B/area_0062.npy \n"," inflating: Dataset/train_track_B/area_0064.npy \n"," inflating: Dataset/train_track_B/area_0066.npy \n"," inflating: Dataset/train_track_B/area_0067.npy \n"," inflating: Dataset/train_track_B/area_0068.npy \n"," inflating: Dataset/train_track_B/area_0071.npy \n"," inflating: Dataset/train_track_B/area_0074.npy \n"," inflating: Dataset/train_track_B/area_0075.npy \n"," inflating: Dataset/train_track_B/area_0077.npy \n"," inflating: Dataset/train_track_B/area_0078.npy \n"," inflating: Dataset/train_track_B/area_0080.npy \n"," inflating: Dataset/train_track_B/area_0081.npy \n"," inflating: Dataset/train_track_B/area_0082.npy \n"," inflating: Dataset/train_track_B/area_0084.npy \n"," inflating: Dataset/train_track_B/area_0085.npy \n"," inflating: Dataset/train_track_B/area_0086.npy \n"," inflating: Dataset/train_track_B/area_0087.npy \n"," inflating: Dataset/train_track_B/area_0088.npy \n"," inflating: Dataset/train_track_B/area_0089.npy \n"," inflating: Dataset/train_track_B/area_0090.npy \n"," inflating: Dataset/train_track_B/area_0092.npy \n"," inflating: Dataset/train_track_B/area_0093.npy \n"," inflating: Dataset/train_track_B/area_0094.npy \n"," inflating: Dataset/train_track_B/area_0095.npy \n"," inflating: Dataset/train_track_B/area_0097.npy \n"," inflating: Dataset/train_track_B/area_0098.npy \n"," inflating: Dataset/train_track_B/area_0100.npy \n"," inflating: Dataset/train_track_B/area_0101.npy \n"," inflating: Dataset/train_track_B/area_0102.npy \n"," inflating: Dataset/train_track_B/area_0103.npy \n"," inflating: Dataset/train_track_B/area_0104.npy \n"," inflating: Dataset/train_track_B/area_0106.npy \n"," inflating: Dataset/train_track_B/area_0107.npy \n"," inflating: Dataset/train_track_B/area_0108.npy \n"," inflating: Dataset/train_track_B/area_0109.npy \n"," inflating: Dataset/train_track_B/area_0110.npy \n"," inflating: Dataset/train_track_B/area_0113.npy \n"," inflating: Dataset/train_track_B/area_0114.npy \n"," inflating: Dataset/train_track_B/area_0115.npy \n"," inflating: Dataset/train_track_B/area_0116.npy \n"," inflating: Dataset/train_track_B/area_0117.npy \n"," inflating: Dataset/train_track_B/area_0118.npy \n"," inflating: Dataset/train_track_B/area_0119.npy \n"," inflating: Dataset/train_track_B/area_0120.npy \n"," inflating: Dataset/train_track_B/area_0121.npy \n"," inflating: Dataset/train_track_B/area_0122.npy \n"," inflating: Dataset/train_track_B/area_0124.npy \n"," inflating: Dataset/train_track_B/area_0125.npy \n"," inflating: Dataset/train_track_B/area_0126.npy \n"," inflating: Dataset/train_track_B/area_0128.npy \n"," inflating: Dataset/train_track_B/area_0129.npy \n"," inflating: Dataset/train_track_B/area_0130.npy \n"," inflating: Dataset/train_track_B/area_0131.npy \n"," inflating: Dataset/train_track_B/area_0132.npy \n"," inflating: Dataset/train_track_B/area_0133.npy \n"," inflating: Dataset/train_track_B/area_0134.npy \n"," inflating: Dataset/train_track_B/area_0135.npy \n"," inflating: Dataset/train_track_B/area_0136.npy \n"," inflating: Dataset/train_track_B/area_0138.npy \n"," inflating: Dataset/train_track_B/area_0139.npy \n"," inflating: Dataset/train_track_B/area_0140.npy \n"," inflating: Dataset/train_track_B/area_0141.npy \n"," inflating: Dataset/train_track_B/area_0143.npy \n"," inflating: Dataset/train_track_B/area_0145.npy \n"," inflating: Dataset/train_track_B/area_0146.npy \n"," inflating: Dataset/train_track_B/area_0148.npy \n"," inflating: Dataset/train_track_B/area_0149.npy \n"," inflating: Dataset/train_track_B/area_0150.npy \n"," inflating: Dataset/train_track_B/area_0151.npy \n"," inflating: Dataset/train_track_B/area_0153.npy \n"," inflating: Dataset/train_track_B/area_0154.npy \n"," inflating: Dataset/train_track_B/area_0156.npy \n"," inflating: Dataset/train_track_B/area_0157.npy \n"," inflating: Dataset/train_track_B/area_0158.npy \n"," inflating: Dataset/train_track_B/area_0161.npy \n"," inflating: Dataset/train_track_B/area_0162.npy \n"," inflating: Dataset/train_track_B/area_0163.npy \n"," inflating: Dataset/train_track_B/area_0164.npy \n"," inflating: Dataset/train_track_B/area_0166.npy \n"," inflating: Dataset/train_track_B/area_0167.npy \n"," inflating: Dataset/train_track_B/area_0168.npy \n"," inflating: Dataset/train_track_B/area_0170.npy \n"," inflating: Dataset/train_track_B/area_0171.npy \n"," inflating: Dataset/train_track_B/area_0172.npy \n"," inflating: Dataset/train_track_B/area_0174.npy \n"," inflating: Dataset/train_track_B/area_0175.npy \n"," inflating: Dataset/train_track_B/area_0183.npy \n"," inflating: Dataset/train_track_B/area_0184.npy \n"," inflating: Dataset/train_track_B/area_0185.npy \n"," inflating: Dataset/train_track_B/area_0189.npy \n"," inflating: Dataset/train_track_B/area_0190.npy \n"," inflating: Dataset/train_track_B/area_0193.npy \n"," inflating: Dataset/train_track_B/area_0194.npy \n"," inflating: Dataset/train_track_B/area_0195.npy \n"," inflating: Dataset/train_track_B/area_0197.npy \n"," inflating: Dataset/train_track_B/area_0201.npy \n"," inflating: Dataset/train_track_B/area_0203.npy \n"," inflating: Dataset/train_track_B/area_0204.npy \n"," inflating: Dataset/train_track_B/area_0205.npy \n"," inflating: Dataset/train_track_B/area_0206.npy \n"," inflating: Dataset/train_track_B/area_0208.npy \n"," inflating: Dataset/train_track_B/area_0210.npy \n"," inflating: Dataset/train_track_B/area_0211.npy \n"," inflating: Dataset/train_track_B/area_0216.npy \n"," inflating: Dataset/train_track_B/area_0217.npy \n"," inflating: Dataset/train_track_B/area_0219.npy \n"," inflating: Dataset/train_track_B/area_0220.npy \n"," inflating: Dataset/train_track_B/area_0227.npy \n"," inflating: Dataset/train_track_B/area_0228.npy \n"," inflating: Dataset/train_track_B/area_0229.npy \n"," inflating: Dataset/train_track_B/area_0232.npy \n"," inflating: Dataset/train_track_B/area_0234.npy \n"," inflating: Dataset/train_track_B/area_0235.npy \n"," inflating: Dataset/train_track_B/area_0236.npy \n"," inflating: Dataset/train_track_B/area_0238.npy \n"," inflating: Dataset/train_track_B/area_0239.npy \n"," inflating: Dataset/train_track_B/area_0240.npy \n"," inflating: Dataset/train_track_B/area_0241.npy \n"," inflating: Dataset/train_track_B/area_0245.npy \n"," inflating: Dataset/train_track_B/area_0246.npy \n"," inflating: Dataset/train_track_B/area_0247.npy \n"," inflating: Dataset/train_track_B/area_0248.npy \n"," inflating: Dataset/train_track_B/area_0249.npy \n"," inflating: Dataset/train_track_B/area_0252.npy \n"," inflating: Dataset/train_track_B/area_0253.npy \n"," inflating: Dataset/train_track_B/area_0254.npy \n"," inflating: Dataset/train_track_B/area_0256.npy \n"," inflating: Dataset/train_track_B/area_0257.npy \n"," inflating: Dataset/train_track_B/area_0259.npy \n"," inflating: Dataset/train_track_B/area_0264.npy \n"," inflating: Dataset/train_track_B/area_0265.npy \n"," inflating: Dataset/train_track_B/area_0266.npy \n"," inflating: Dataset/train_track_B/area_0268.npy \n"," inflating: Dataset/train_track_B/area_0269.npy \n"," inflating: Dataset/train_track_B/area_0271.npy \n"," inflating: Dataset/train_track_B/area_0272.npy \n"," inflating: Dataset/train_track_B/area_0273.npy \n"," inflating: Dataset/train_track_B/area_0275.npy \n"," inflating: Dataset/train_track_B/area_0276.npy \n"," inflating: Dataset/train_track_B/area_0277.npy \n"," inflating: Dataset/train_track_B/area_0279.npy \n"," inflating: Dataset/train_track_B/area_0280.npy \n"," inflating: Dataset/train_track_B/area_0281.npy \n"," inflating: Dataset/train_track_B/area_0284.npy \n"," inflating: Dataset/train_track_B/area_0285.npy \n"," inflating: Dataset/train_track_B/area_0286.npy \n"," inflating: Dataset/train_track_B/area_0288.npy \n"," inflating: Dataset/train_track_B/area_0289.npy \n"," inflating: Dataset/train_track_B/area_0290.npy \n"," inflating: Dataset/train_track_B/area_0291.npy \n"," inflating: Dataset/train_track_B/area_0294.npy \n"," inflating: Dataset/train_track_B/area_0296.npy \n"," inflating: Dataset/train_track_B/area_0297.npy \n"," inflating: Dataset/train_track_B/area_0298.npy \n"," inflating: Dataset/train_track_B/area_0301.npy \n"," inflating: Dataset/train_track_B/area_0304.npy \n"," inflating: Dataset/train_track_B/area_0305.npy \n"," inflating: Dataset/train_track_B/area_0306.npy \n"," inflating: Dataset/train_track_B/area_0307.npy \n"," inflating: Dataset/train_track_B/area_0308.npy \n"," inflating: Dataset/train_track_B/area_0310.npy \n"," inflating: Dataset/train_track_B/area_0311.npy \n"," inflating: Dataset/train_track_B/area_0314.npy \n"," inflating: Dataset/train_track_B/area_0315.npy \n"," inflating: Dataset/train_track_B/area_0316.npy \n"," inflating: Dataset/train_track_B/area_0320.npy \n"," inflating: Dataset/train_track_B/area_0321.npy \n"," inflating: Dataset/train_track_B/area_0323.npy \n"," inflating: Dataset/train_track_B/area_0324.npy \n"," inflating: Dataset/train_track_B/area_0327.npy \n"," inflating: Dataset/train_track_B/area_0330.npy \n"," inflating: Dataset/train_track_B/area_0331.npy \n"," inflating: Dataset/train_track_B/area_0332.npy \n"," inflating: Dataset/train_track_B/area_0333.npy \n"," inflating: Dataset/train_track_B/area_0334.npy \n"," inflating: Dataset/train_track_B/area_0337.npy \n"," inflating: Dataset/train_track_B/area_0338.npy \n"," inflating: Dataset/train_track_B/area_0339.npy \n"," inflating: Dataset/train_track_B/area_0340.npy \n"," inflating: Dataset/train_track_B/area_0341.npy \n"," inflating: Dataset/train_track_B/area_0342.npy \n"," inflating: Dataset/train_track_B/area_0343.npy \n"," inflating: Dataset/train_track_B/area_0344.npy \n"," inflating: Dataset/train_track_B/area_0345.npy \n"," inflating: Dataset/train_track_B/area_0346.npy \n"," inflating: Dataset/train_track_B/area_0348.npy \n"," inflating: Dataset/train_track_B/area_0349.npy \n"," inflating: Dataset/train_track_B/area_0351.npy \n"," inflating: Dataset/train_track_B/area_0352.npy \n"," inflating: Dataset/train_track_B/area_0353.npy \n"," inflating: Dataset/train_track_B/area_0354.npy \n"," inflating: Dataset/train_track_B/area_0356.npy \n"," inflating: Dataset/train_track_B/area_0357.npy \n"," inflating: Dataset/train_track_B/area_0359.npy \n"," inflating: Dataset/train_track_B/area_0360.npy \n"," inflating: Dataset/train_track_B/area_0361.npy \n"," inflating: Dataset/train_track_B/area_0363.npy \n"," inflating: Dataset/train_track_B/area_0364.npy \n"," inflating: Dataset/train_track_B/area_0365.npy \n"," inflating: Dataset/train_track_B/area_0366.npy \n"," inflating: Dataset/train_track_B/area_0367.npy \n"," inflating: Dataset/train_track_B/area_0368.npy \n"," inflating: Dataset/train_track_B/area_0369.npy \n"," inflating: Dataset/train_track_B/area_0371.npy \n"," inflating: Dataset/train_track_B/area_0373.npy \n"," inflating: Dataset/train_track_B/area_0376.npy \n"," inflating: Dataset/train_track_B/area_0377.npy \n"," inflating: Dataset/train_track_B/area_0378.npy \n"," inflating: Dataset/train_track_B/area_0379.npy \n"," inflating: Dataset/train_track_B/area_0381.npy \n"," inflating: Dataset/train_track_B/area_0382.npy \n"," inflating: Dataset/train_track_B/area_0383.npy \n"," inflating: Dataset/train_track_B/area_0384.npy \n"," inflating: Dataset/train_track_B/area_0385.npy \n"," inflating: Dataset/train_track_B/area_0387.npy \n"," inflating: Dataset/train_track_B/area_0388.npy \n"," inflating: Dataset/train_track_B/area_0389.npy \n"," inflating: Dataset/train_track_B/area_0392.npy \n"," inflating: Dataset/train_track_B/area_0393.npy \n"," inflating: Dataset/train_track_B/area_0394.npy \n"," inflating: Dataset/train_track_B/area_0395.npy \n"," inflating: Dataset/train_track_B/area_0396.npy \n"," inflating: Dataset/train_track_B/area_0398.npy \n"," inflating: Dataset/train_track_B/area_0399.npy \n"," inflating: Dataset/train_track_B/area_0400.npy \n"," inflating: Dataset/train_track_B/area_0401.npy \n"," inflating: Dataset/train_track_B/area_0402.npy \n"," inflating: Dataset/train_track_B/area_0403.npy \n"," inflating: Dataset/train_track_B/area_0404.npy \n"," inflating: Dataset/train_track_B/area_0405.npy \n"," inflating: Dataset/train_track_B/area_0407.npy \n"," inflating: Dataset/train_track_B/area_0408.npy \n"," inflating: Dataset/train_track_B/area_0409.npy \n"," inflating: Dataset/train_track_B/area_0410.npy \n"," inflating: Dataset/train_track_B/area_0411.npy \n"," inflating: Dataset/train_track_B/area_0413.npy \n"," inflating: Dataset/train_track_B/area_0416.npy \n"," inflating: Dataset/train_track_B/area_0417.npy \n"," inflating: Dataset/train_track_B/area_0421.npy \n"," inflating: Dataset/train_track_B/area_0422.npy \n"," inflating: Dataset/train_track_B/area_0423.npy \n"," inflating: Dataset/train_track_B/area_0424.npy \n"," inflating: Dataset/train_track_B/area_0425.npy \n"," inflating: Dataset/train_track_B/area_0428.npy \n"," inflating: Dataset/train_track_B/area_0429.npy \n"," inflating: Dataset/train_track_B/area_0430.npy \n"," inflating: Dataset/train_track_B/area_0431.npy \n"," inflating: Dataset/train_track_B/area_0432.npy \n"," inflating: Dataset/train_track_B/area_0435.npy \n"," inflating: Dataset/train_track_B/area_0438.npy \n"," inflating: Dataset/train_track_B/area_0439.npy \n"," inflating: Dataset/train_track_B/area_0441.npy \n"," inflating: Dataset/train_track_B/area_0444.npy \n"," inflating: Dataset/train_track_B/area_0445.npy \n"," inflating: Dataset/train_track_B/area_0449.npy \n"," inflating: Dataset/train_track_B/area_0450.npy \n"," inflating: Dataset/train_track_B/area_0451.npy \n"," inflating: Dataset/train_track_B/area_0452.npy \n"," inflating: Dataset/train_track_B/area_0453.npy \n"," inflating: Dataset/train_track_B/area_0456.npy \n"," inflating: Dataset/train_track_B/area_0457.npy \n"," inflating: Dataset/train_track_B/area_0458.npy \n"," inflating: Dataset/train_track_B/area_0459.npy \n"," inflating: Dataset/train_track_B/area_0460.npy \n"," inflating: Dataset/train_track_B/area_0461.npy \n"," inflating: Dataset/train_track_B/area_0463.npy \n"," inflating: Dataset/train_track_B/area_0464.npy \n"," inflating: Dataset/train_track_B/area_0465.npy \n"," inflating: Dataset/train_track_B/area_0467.npy \n"," inflating: Dataset/train_track_B/area_0469.npy \n"," inflating: Dataset/train_track_B/area_0471.npy \n"," inflating: Dataset/train_track_B/area_0472.npy \n"," inflating: Dataset/train_track_B/area_0474.npy \n"," inflating: Dataset/train_track_B/area_0475.npy \n"," inflating: Dataset/train_track_B/area_0477.npy \n"," inflating: Dataset/train_track_B/area_0478.npy \n"," inflating: Dataset/train_track_B/area_0479.npy \n"," inflating: Dataset/train_track_B/area_0480.npy \n"," inflating: Dataset/train_track_B/area_0481.npy \n"," inflating: Dataset/train_track_B/area_0482.npy \n"," inflating: Dataset/train_track_B/area_0485.npy \n"," inflating: Dataset/train_track_B/area_0486.npy \n"," inflating: Dataset/train_track_B/area_0487.npy \n"," inflating: Dataset/train_track_B/area_0488.npy \n"," inflating: Dataset/train_track_B/area_0489.npy \n"," inflating: Dataset/train_track_B/area_0492.npy \n"," inflating: Dataset/train_track_B/area_0493.npy \n"," inflating: Dataset/train_track_B/area_0494.npy \n"," inflating: Dataset/train_track_B/area_0497.npy \n"," inflating: Dataset/train_track_B/area_0498.npy \n"," inflating: Dataset/train_track_B/area_0499.npy \n"," inflating: Dataset/train_track_B/area_0501.npy \n"," inflating: Dataset/train_track_B/area_0502.npy \n"," inflating: Dataset/train_track_B/area_0503.npy \n"," inflating: Dataset/train_track_B/area_0504.npy \n"," inflating: Dataset/train_track_B/area_0507.npy \n"," inflating: Dataset/train_track_B/area_0508.npy \n"," inflating: Dataset/train_track_B/area_0509.npy \n"," inflating: Dataset/train_track_B/area_0513.npy \n"," inflating: Dataset/train_track_B/area_0514.npy \n"," inflating: Dataset/train_track_B/area_0515.npy \n"," inflating: Dataset/train_track_B/area_0517.npy \n"," inflating: Dataset/train_track_B/area_0518.npy \n"," inflating: Dataset/train_track_B/area_0519.npy \n"," inflating: Dataset/train_track_B/area_0520.npy \n"," inflating: Dataset/train_track_B/area_0521.npy \n"," inflating: Dataset/train_track_B/area_0522.npy \n"," inflating: Dataset/train_track_B/area_0523.npy \n"," inflating: Dataset/train_track_B/area_0524.npy \n"," inflating: Dataset/train_track_B/area_0525.npy \n"," inflating: Dataset/train_track_B/area_0526.npy \n"," inflating: Dataset/train_track_B/area_0527.npy \n"," inflating: Dataset/train_track_B/area_0528.npy \n"," inflating: Dataset/train_track_B/area_0529.npy \n"," inflating: Dataset/train_track_B/area_0530.npy \n"," inflating: Dataset/train_track_B/area_0531.npy \n"," inflating: Dataset/train_track_B/area_0534.npy \n"," inflating: Dataset/train_track_B/area_0535.npy \n"," inflating: Dataset/train_track_B/area_0536.npy \n"," inflating: Dataset/train_track_B/area_0538.npy \n"," inflating: Dataset/train_track_B/area_0541.npy \n"," inflating: Dataset/train_track_B/area_0542.npy \n"," inflating: Dataset/train_track_B/area_0544.npy \n"," inflating: Dataset/train_track_B/area_0545.npy \n"," inflating: Dataset/train_track_B/area_0546.npy \n"," inflating: Dataset/train_track_B/area_0547.npy \n"," inflating: Dataset/train_track_B/area_0550.npy \n"," inflating: Dataset/train_track_B/area_0551.npy \n"," inflating: Dataset/train_track_B/area_0553.npy \n"," inflating: Dataset/train_track_B/area_0555.npy \n"," inflating: Dataset/train_track_B/area_0557.npy \n"," inflating: Dataset/train_track_B/area_0558.npy \n"," inflating: Dataset/train_track_B/area_0561.npy \n"," inflating: Dataset/train_track_B/area_0563.npy \n"," inflating: Dataset/train_track_B/area_0564.npy \n"," inflating: Dataset/train_track_B/area_0565.npy \n"," inflating: Dataset/train_track_B/area_0567.npy \n"," inflating: Dataset/train_track_B/area_0568.npy \n"," inflating: Dataset/train_track_B/area_0571.npy \n"," inflating: Dataset/train_track_B/area_0574.npy \n"," inflating: Dataset/train_track_B/area_0576.npy \n"," inflating: Dataset/train_track_B/area_0579.npy \n"," inflating: Dataset/train_track_B/area_0580.npy \n"," inflating: Dataset/train_track_B/area_0582.npy \n"," inflating: Dataset/train_track_B/area_0584.npy \n"," inflating: Dataset/train_track_B/area_0585.npy \n"," inflating: Dataset/train_track_B/area_0588.npy \n"," inflating: Dataset/train_track_B/area_0589.npy \n"," inflating: Dataset/train_track_B/area_0590.npy \n"," inflating: Dataset/train_track_B/area_0591.npy \n"," inflating: Dataset/train_track_B/area_0592.npy \n"," inflating: Dataset/train_track_B/area_0593.npy \n"," inflating: Dataset/train_track_B/area_0594.npy \n"," inflating: Dataset/train_track_B/area_0595.npy \n"," inflating: Dataset/train_track_B/area_0596.npy \n"," inflating: Dataset/train_track_B/area_0597.npy \n"," inflating: Dataset/train_track_B/area_0598.npy \n"," inflating: Dataset/train_track_B/area_0600.npy \n"," inflating: Dataset/train_track_B/area_0602.npy \n"," inflating: Dataset/train_track_B/area_0605.npy \n"," inflating: Dataset/train_track_B/area_0608.npy \n"," inflating: Dataset/train_track_B/area_0609.npy \n"," inflating: Dataset/train_track_B/area_0611.npy \n"," inflating: Dataset/train_track_B/area_0612.npy \n"," inflating: Dataset/train_track_B/area_0613.npy \n"," inflating: Dataset/train_track_B/area_0614.npy \n"," inflating: Dataset/train_track_B/area_0618.npy \n"," inflating: Dataset/train_track_B/area_0619.npy \n"," inflating: Dataset/train_track_B/area_0620.npy \n"," inflating: Dataset/train_track_B/area_0621.npy \n"," inflating: Dataset/train_track_B/area_0622.npy \n"," inflating: Dataset/train_track_B/area_0623.npy \n"," inflating: Dataset/train_track_B/area_0624.npy \n"," inflating: Dataset/train_track_B/area_0625.npy \n"," inflating: Dataset/train_track_B/area_0627.npy \n"," inflating: Dataset/train_track_B/area_0628.npy \n"," inflating: Dataset/train_track_B/area_0629.npy \n"," inflating: Dataset/train_track_B/area_0630.npy \n"," inflating: Dataset/train_track_B/area_0631.npy \n"," inflating: Dataset/train_track_B/area_0632.npy \n"," inflating: Dataset/train_track_B/area_0633.npy \n"," inflating: Dataset/train_track_B/area_0634.npy \n"," inflating: Dataset/train_track_B/area_0635.npy \n"," inflating: Dataset/train_track_B/area_0637.npy \n"," inflating: Dataset/train_track_B/area_0638.npy \n"," inflating: Dataset/train_track_B/area_0639.npy \n"," inflating: Dataset/train_track_B/area_0640.npy \n"," inflating: Dataset/train_track_B/area_0641.npy \n"," inflating: Dataset/train_track_B/area_0643.npy \n"," inflating: Dataset/train_track_B/area_0644.npy \n"," inflating: Dataset/train_track_B/area_0645.npy \n"," inflating: Dataset/train_track_B/area_0646.npy \n"," inflating: Dataset/train_track_B/area_0648.npy \n"," inflating: Dataset/train_track_B/area_0650.npy \n"," inflating: Dataset/train_track_B/area_0651.npy \n"," inflating: Dataset/train_track_B/area_0652.npy \n"," inflating: Dataset/train_track_B/area_0653.npy \n"," inflating: Dataset/train_track_B/area_0654.npy \n"," inflating: Dataset/train_track_B/area_0656.npy \n"," inflating: Dataset/train_track_B/area_0657.npy \n"," inflating: Dataset/train_track_B/area_0658.npy \n"," inflating: Dataset/train_track_B/area_0661.npy \n"," inflating: Dataset/train_track_B/area_0663.npy \n"," inflating: Dataset/train_track_B/area_0664.npy \n"," inflating: Dataset/train_track_B/area_0665.npy \n"," inflating: Dataset/train_track_B/area_0666.npy \n"," inflating: Dataset/train_track_B/area_0667.npy \n"," inflating: Dataset/train_track_B/area_0668.npy \n"," inflating: Dataset/train_track_B/area_0669.npy \n"," inflating: Dataset/train_track_B/area_0671.npy \n"," inflating: Dataset/train_track_B/area_0672.npy \n"," inflating: Dataset/train_track_B/area_0673.npy \n"," inflating: Dataset/train_track_B/area_0674.npy \n"," inflating: Dataset/train_track_B/area_0676.npy \n"," inflating: Dataset/train_track_B/area_0677.npy \n"," inflating: Dataset/train_track_B/area_0678.npy \n"," inflating: Dataset/train_track_B/area_0679.npy \n"," inflating: Dataset/train_track_B/area_0680.npy \n"," inflating: Dataset/train_track_B/area_0682.npy \n"," inflating: Dataset/train_track_B/area_0686.npy \n"," inflating: Dataset/train_track_B/area_0688.npy \n"," inflating: Dataset/train_track_B/area_0689.npy \n"," inflating: Dataset/train_track_B/area_0690.npy \n"," inflating: Dataset/train_track_B/area_0691.npy \n"," inflating: Dataset/train_track_B/area_0692.npy \n"," inflating: Dataset/train_track_B/area_0693.npy \n"," inflating: Dataset/train_track_B/area_0694.npy \n"," inflating: Dataset/train_track_B/area_0695.npy \n"," inflating: Dataset/train_track_B/area_0697.npy \n"," inflating: Dataset/train_track_B/area_0699.npy \n"," inflating: Dataset/train_track_B/area_0700.npy \n"," inflating: Dataset/train_track_B/area_0701.npy \n"," inflating: Dataset/train_track_B/area_0703.npy \n"," inflating: Dataset/train_track_B/area_0704.npy \n"," inflating: Dataset/train_track_B/area_0706.npy \n"," inflating: Dataset/train_track_B/area_0707.npy \n"," inflating: Dataset/train_track_B/area_0708.npy \n"," inflating: Dataset/train_track_B/area_0709.npy \n"," inflating: Dataset/train_track_B/area_0711.npy \n"," inflating: Dataset/train_track_B/area_0712.npy \n"," inflating: Dataset/train_track_B/area_0713.npy \n"," inflating: Dataset/train_track_B/area_0714.npy \n"," inflating: Dataset/train_track_B/area_0715.npy \n"," inflating: Dataset/train_track_B/area_0716.npy \n"," inflating: Dataset/train_track_B/area_0718.npy \n"," inflating: Dataset/train_track_B/area_0719.npy \n"," inflating: Dataset/train_track_B/area_0720.npy \n"," inflating: Dataset/train_track_B/area_0721.npy \n"," inflating: Dataset/train_track_B/area_0722.npy \n"," inflating: Dataset/train_track_B/area_0724.npy \n"," inflating: Dataset/train_track_B/area_0727.npy \n"," inflating: Dataset/train_track_B/area_0728.npy \n"," inflating: Dataset/train_track_B/area_0729.npy \n"," inflating: Dataset/train_track_B/area_0730.npy \n"," inflating: Dataset/train_track_B/area_0731.npy \n"," inflating: Dataset/train_track_B/area_0733.npy \n"," inflating: Dataset/train_track_B/area_0735.npy \n"," inflating: Dataset/train_track_B/area_0736.npy \n"," inflating: Dataset/train_track_B/area_0737.npy \n"," inflating: Dataset/train_track_B/area_0740.npy \n"," inflating: Dataset/train_track_B/area_0742.npy \n"," inflating: Dataset/train_track_B/area_0743.npy \n"," inflating: Dataset/train_track_B/area_0744.npy \n"," inflating: Dataset/train_track_B/area_0745.npy \n"," inflating: Dataset/train_track_B/centroid_0002.npy \n"," inflating: Dataset/train_track_B/centroid_0003.npy \n"," inflating: Dataset/train_track_B/centroid_0004.npy \n"," inflating: Dataset/train_track_B/centroid_0005.npy \n"," inflating: Dataset/train_track_B/centroid_0006.npy \n"," inflating: Dataset/train_track_B/centroid_0011.npy \n"," inflating: Dataset/train_track_B/centroid_0012.npy \n"," inflating: Dataset/train_track_B/centroid_0013.npy \n"," inflating: Dataset/train_track_B/centroid_0015.npy \n"," inflating: Dataset/train_track_B/centroid_0017.npy \n"," inflating: Dataset/train_track_B/centroid_0018.npy \n"," inflating: Dataset/train_track_B/centroid_0020.npy \n"," inflating: Dataset/train_track_B/centroid_0021.npy \n"," inflating: Dataset/train_track_B/centroid_0022.npy \n"," inflating: Dataset/train_track_B/centroid_0023.npy \n"," inflating: Dataset/train_track_B/centroid_0024.npy \n"," inflating: Dataset/train_track_B/centroid_0026.npy \n"," inflating: Dataset/train_track_B/centroid_0029.npy \n"," inflating: Dataset/train_track_B/centroid_0030.npy \n"," inflating: Dataset/train_track_B/centroid_0036.npy \n"," inflating: Dataset/train_track_B/centroid_0037.npy \n"," inflating: Dataset/train_track_B/centroid_0038.npy \n"," inflating: Dataset/train_track_B/centroid_0039.npy \n"," inflating: Dataset/train_track_B/centroid_0040.npy \n"," inflating: Dataset/train_track_B/centroid_0041.npy \n"," inflating: Dataset/train_track_B/centroid_0042.npy \n"," inflating: Dataset/train_track_B/centroid_0043.npy \n"," inflating: Dataset/train_track_B/centroid_0044.npy \n"," inflating: Dataset/train_track_B/centroid_0048.npy \n"," inflating: Dataset/train_track_B/centroid_0049.npy \n"," inflating: Dataset/train_track_B/centroid_0051.npy \n"," inflating: Dataset/train_track_B/centroid_0052.npy \n"," inflating: Dataset/train_track_B/centroid_0055.npy \n"," inflating: Dataset/train_track_B/centroid_0056.npy \n"," inflating: Dataset/train_track_B/centroid_0057.npy \n"," inflating: Dataset/train_track_B/centroid_0059.npy \n"," inflating: Dataset/train_track_B/centroid_0062.npy \n"," inflating: Dataset/train_track_B/centroid_0064.npy \n"," inflating: Dataset/train_track_B/centroid_0066.npy \n"," inflating: Dataset/train_track_B/centroid_0067.npy \n"," inflating: Dataset/train_track_B/centroid_0068.npy \n"," inflating: Dataset/train_track_B/centroid_0071.npy \n"," inflating: Dataset/train_track_B/centroid_0074.npy \n"," inflating: Dataset/train_track_B/centroid_0075.npy \n"," inflating: Dataset/train_track_B/centroid_0077.npy \n"," inflating: Dataset/train_track_B/centroid_0078.npy \n"," inflating: Dataset/train_track_B/centroid_0080.npy \n"," inflating: Dataset/train_track_B/centroid_0081.npy \n"," inflating: Dataset/train_track_B/centroid_0082.npy \n"," inflating: Dataset/train_track_B/centroid_0084.npy \n"," inflating: Dataset/train_track_B/centroid_0085.npy \n"," inflating: Dataset/train_track_B/centroid_0086.npy \n"," inflating: Dataset/train_track_B/centroid_0087.npy \n"," inflating: Dataset/train_track_B/centroid_0088.npy \n"," inflating: Dataset/train_track_B/centroid_0089.npy \n"," inflating: Dataset/train_track_B/centroid_0090.npy \n"," inflating: Dataset/train_track_B/centroid_0092.npy \n"," inflating: Dataset/train_track_B/centroid_0093.npy \n"," inflating: Dataset/train_track_B/centroid_0094.npy \n"," inflating: Dataset/train_track_B/centroid_0095.npy \n"," inflating: Dataset/train_track_B/centroid_0097.npy \n"," inflating: Dataset/train_track_B/centroid_0098.npy \n"," inflating: Dataset/train_track_B/centroid_0100.npy \n"," inflating: Dataset/train_track_B/centroid_0101.npy \n"," inflating: Dataset/train_track_B/centroid_0102.npy \n"," inflating: Dataset/train_track_B/centroid_0103.npy \n"," inflating: Dataset/train_track_B/centroid_0104.npy \n"," inflating: Dataset/train_track_B/centroid_0106.npy \n"," inflating: Dataset/train_track_B/centroid_0107.npy \n"," inflating: Dataset/train_track_B/centroid_0108.npy \n"," inflating: Dataset/train_track_B/centroid_0109.npy \n"," inflating: Dataset/train_track_B/centroid_0110.npy \n"," inflating: Dataset/train_track_B/centroid_0113.npy \n"," inflating: Dataset/train_track_B/centroid_0114.npy \n"," inflating: Dataset/train_track_B/centroid_0115.npy \n"," inflating: Dataset/train_track_B/centroid_0116.npy \n"," inflating: Dataset/train_track_B/centroid_0117.npy \n"," inflating: Dataset/train_track_B/centroid_0118.npy \n"," inflating: Dataset/train_track_B/centroid_0119.npy \n"," inflating: Dataset/train_track_B/centroid_0120.npy \n"," inflating: Dataset/train_track_B/centroid_0121.npy \n"," inflating: Dataset/train_track_B/centroid_0122.npy \n"," inflating: Dataset/train_track_B/centroid_0124.npy \n"," inflating: Dataset/train_track_B/centroid_0125.npy \n"," inflating: Dataset/train_track_B/centroid_0126.npy \n"," inflating: Dataset/train_track_B/centroid_0128.npy \n"," inflating: Dataset/train_track_B/centroid_0129.npy \n"," inflating: Dataset/train_track_B/centroid_0130.npy \n"," inflating: Dataset/train_track_B/centroid_0131.npy \n"," inflating: Dataset/train_track_B/centroid_0132.npy \n"," inflating: Dataset/train_track_B/centroid_0133.npy \n"," inflating: Dataset/train_track_B/centroid_0134.npy \n"," inflating: Dataset/train_track_B/centroid_0135.npy \n"," inflating: Dataset/train_track_B/centroid_0136.npy \n"," inflating: Dataset/train_track_B/centroid_0138.npy \n"," inflating: Dataset/train_track_B/centroid_0139.npy \n"," inflating: Dataset/train_track_B/centroid_0140.npy \n"," inflating: Dataset/train_track_B/centroid_0141.npy \n"," inflating: Dataset/train_track_B/centroid_0143.npy \n"," inflating: Dataset/train_track_B/centroid_0145.npy \n"," inflating: Dataset/train_track_B/centroid_0146.npy \n"," inflating: Dataset/train_track_B/centroid_0148.npy \n"," inflating: Dataset/train_track_B/centroid_0149.npy \n"," inflating: Dataset/train_track_B/centroid_0150.npy \n"," inflating: Dataset/train_track_B/centroid_0151.npy \n"," inflating: Dataset/train_track_B/centroid_0153.npy \n"," inflating: Dataset/train_track_B/centroid_0154.npy \n"," inflating: Dataset/train_track_B/centroid_0156.npy \n"," inflating: Dataset/train_track_B/centroid_0157.npy \n"," inflating: Dataset/train_track_B/centroid_0158.npy \n"," inflating: Dataset/train_track_B/centroid_0161.npy \n"," inflating: Dataset/train_track_B/centroid_0162.npy \n"," inflating: Dataset/train_track_B/centroid_0163.npy \n"," inflating: Dataset/train_track_B/centroid_0164.npy \n"," inflating: Dataset/train_track_B/centroid_0166.npy \n"," inflating: Dataset/train_track_B/centroid_0167.npy \n"," inflating: Dataset/train_track_B/centroid_0168.npy \n"," inflating: Dataset/train_track_B/centroid_0170.npy \n"," inflating: Dataset/train_track_B/centroid_0171.npy \n"," inflating: Dataset/train_track_B/centroid_0172.npy \n"," inflating: Dataset/train_track_B/centroid_0174.npy \n"," inflating: Dataset/train_track_B/centroid_0175.npy \n"," inflating: Dataset/train_track_B/centroid_0183.npy \n"," inflating: Dataset/train_track_B/centroid_0184.npy \n"," inflating: Dataset/train_track_B/centroid_0185.npy \n"," inflating: Dataset/train_track_B/centroid_0189.npy \n"," inflating: Dataset/train_track_B/centroid_0190.npy \n"," inflating: Dataset/train_track_B/centroid_0193.npy \n"," inflating: Dataset/train_track_B/centroid_0194.npy \n"," inflating: Dataset/train_track_B/centroid_0195.npy \n"," inflating: Dataset/train_track_B/centroid_0197.npy \n"," inflating: Dataset/train_track_B/centroid_0201.npy \n"," inflating: Dataset/train_track_B/centroid_0203.npy \n"," inflating: Dataset/train_track_B/centroid_0204.npy \n"," inflating: Dataset/train_track_B/centroid_0205.npy \n"," inflating: Dataset/train_track_B/centroid_0206.npy \n"," inflating: Dataset/train_track_B/centroid_0208.npy \n"," inflating: Dataset/train_track_B/centroid_0210.npy \n"," inflating: Dataset/train_track_B/centroid_0211.npy \n"," inflating: Dataset/train_track_B/centroid_0216.npy \n"," inflating: Dataset/train_track_B/centroid_0217.npy \n"," inflating: Dataset/train_track_B/centroid_0219.npy \n"," inflating: Dataset/train_track_B/centroid_0220.npy \n"," inflating: Dataset/train_track_B/centroid_0227.npy \n"," inflating: Dataset/train_track_B/centroid_0228.npy \n"," inflating: Dataset/train_track_B/centroid_0229.npy \n"," inflating: Dataset/train_track_B/centroid_0232.npy \n"," inflating: Dataset/train_track_B/centroid_0234.npy \n"," inflating: Dataset/train_track_B/centroid_0235.npy \n"," inflating: Dataset/train_track_B/centroid_0236.npy \n"," inflating: Dataset/train_track_B/centroid_0238.npy \n"," inflating: Dataset/train_track_B/centroid_0239.npy \n"," inflating: Dataset/train_track_B/centroid_0240.npy \n"," inflating: Dataset/train_track_B/centroid_0241.npy \n"," inflating: Dataset/train_track_B/centroid_0245.npy \n"," inflating: Dataset/train_track_B/centroid_0246.npy \n"," inflating: Dataset/train_track_B/centroid_0247.npy \n"," inflating: Dataset/train_track_B/centroid_0248.npy \n"," inflating: Dataset/train_track_B/centroid_0249.npy \n"," inflating: Dataset/train_track_B/centroid_0252.npy \n"," inflating: Dataset/train_track_B/centroid_0253.npy \n"," inflating: Dataset/train_track_B/centroid_0254.npy \n"," inflating: Dataset/train_track_B/centroid_0256.npy \n"," inflating: Dataset/train_track_B/centroid_0257.npy \n"," inflating: Dataset/train_track_B/centroid_0259.npy \n"," inflating: Dataset/train_track_B/centroid_0264.npy \n"," inflating: Dataset/train_track_B/centroid_0265.npy \n"," inflating: Dataset/train_track_B/centroid_0266.npy \n"," inflating: Dataset/train_track_B/centroid_0268.npy \n"," inflating: Dataset/train_track_B/centroid_0269.npy \n"," inflating: Dataset/train_track_B/centroid_0271.npy \n"," inflating: Dataset/train_track_B/centroid_0272.npy \n"," inflating: Dataset/train_track_B/centroid_0273.npy \n"," inflating: Dataset/train_track_B/centroid_0275.npy \n"," inflating: Dataset/train_track_B/centroid_0276.npy \n"," inflating: Dataset/train_track_B/centroid_0277.npy \n"," inflating: Dataset/train_track_B/centroid_0279.npy \n"," inflating: Dataset/train_track_B/centroid_0280.npy \n"," inflating: Dataset/train_track_B/centroid_0281.npy \n"," inflating: Dataset/train_track_B/centroid_0284.npy \n"," inflating: Dataset/train_track_B/centroid_0285.npy \n"," inflating: Dataset/train_track_B/centroid_0286.npy \n"," inflating: Dataset/train_track_B/centroid_0288.npy \n"," inflating: Dataset/train_track_B/centroid_0289.npy \n"," inflating: Dataset/train_track_B/centroid_0290.npy \n"," inflating: Dataset/train_track_B/centroid_0291.npy \n"," inflating: Dataset/train_track_B/centroid_0294.npy \n"," inflating: Dataset/train_track_B/centroid_0296.npy \n"," inflating: Dataset/train_track_B/centroid_0297.npy \n"," inflating: Dataset/train_track_B/centroid_0298.npy \n"," inflating: Dataset/train_track_B/centroid_0301.npy \n"," inflating: Dataset/train_track_B/centroid_0304.npy \n"," inflating: Dataset/train_track_B/centroid_0305.npy \n"," inflating: Dataset/train_track_B/centroid_0306.npy \n"," inflating: Dataset/train_track_B/centroid_0307.npy \n"," inflating: Dataset/train_track_B/centroid_0308.npy \n"," inflating: Dataset/train_track_B/centroid_0310.npy \n"," inflating: Dataset/train_track_B/centroid_0311.npy \n"," inflating: Dataset/train_track_B/centroid_0314.npy \n"," inflating: Dataset/train_track_B/centroid_0315.npy \n"," inflating: Dataset/train_track_B/centroid_0316.npy \n"," inflating: Dataset/train_track_B/centroid_0320.npy \n"," inflating: Dataset/train_track_B/centroid_0321.npy \n"," inflating: Dataset/train_track_B/centroid_0323.npy \n"," inflating: Dataset/train_track_B/centroid_0324.npy \n"," inflating: Dataset/train_track_B/centroid_0327.npy \n"," inflating: Dataset/train_track_B/centroid_0330.npy \n"," inflating: Dataset/train_track_B/centroid_0331.npy \n"," inflating: Dataset/train_track_B/centroid_0332.npy \n"," inflating: Dataset/train_track_B/centroid_0333.npy \n"," inflating: Dataset/train_track_B/centroid_0334.npy \n"," inflating: Dataset/train_track_B/centroid_0337.npy \n"," inflating: Dataset/train_track_B/centroid_0338.npy \n"," inflating: Dataset/train_track_B/centroid_0339.npy \n"," inflating: Dataset/train_track_B/centroid_0340.npy \n"," inflating: Dataset/train_track_B/centroid_0341.npy \n"," inflating: Dataset/train_track_B/centroid_0342.npy \n"," inflating: Dataset/train_track_B/centroid_0343.npy \n"," inflating: Dataset/train_track_B/centroid_0344.npy \n"," inflating: Dataset/train_track_B/centroid_0345.npy \n"," inflating: Dataset/train_track_B/centroid_0346.npy \n"," inflating: Dataset/train_track_B/centroid_0348.npy \n"," inflating: Dataset/train_track_B/centroid_0349.npy \n"," inflating: Dataset/train_track_B/centroid_0351.npy \n"," inflating: Dataset/train_track_B/centroid_0352.npy \n"," inflating: Dataset/train_track_B/centroid_0353.npy \n"," inflating: Dataset/train_track_B/centroid_0354.npy \n"," inflating: Dataset/train_track_B/centroid_0356.npy \n"," inflating: Dataset/train_track_B/centroid_0357.npy \n"," inflating: Dataset/train_track_B/centroid_0359.npy \n"," inflating: Dataset/train_track_B/centroid_0360.npy \n"," inflating: Dataset/train_track_B/centroid_0361.npy \n"," inflating: Dataset/train_track_B/centroid_0363.npy \n"," inflating: Dataset/train_track_B/centroid_0364.npy \n"," inflating: Dataset/train_track_B/centroid_0365.npy \n"," inflating: Dataset/train_track_B/centroid_0366.npy \n"," inflating: Dataset/train_track_B/centroid_0367.npy \n"," inflating: Dataset/train_track_B/centroid_0368.npy \n"," inflating: Dataset/train_track_B/centroid_0369.npy \n"," inflating: Dataset/train_track_B/centroid_0371.npy \n"," inflating: Dataset/train_track_B/centroid_0373.npy \n"," inflating: Dataset/train_track_B/centroid_0376.npy \n"," inflating: Dataset/train_track_B/centroid_0377.npy \n"," inflating: Dataset/train_track_B/centroid_0378.npy \n"," inflating: Dataset/train_track_B/centroid_0379.npy \n"," inflating: Dataset/train_track_B/centroid_0381.npy \n"," inflating: Dataset/train_track_B/centroid_0382.npy \n"," inflating: Dataset/train_track_B/centroid_0383.npy \n"," inflating: Dataset/train_track_B/centroid_0384.npy \n"," inflating: Dataset/train_track_B/centroid_0385.npy \n"," inflating: Dataset/train_track_B/centroid_0387.npy \n"," inflating: Dataset/train_track_B/centroid_0388.npy \n"," inflating: Dataset/train_track_B/centroid_0389.npy \n"," inflating: Dataset/train_track_B/centroid_0392.npy \n"," inflating: Dataset/train_track_B/centroid_0393.npy \n"," inflating: Dataset/train_track_B/centroid_0394.npy \n"," inflating: Dataset/train_track_B/centroid_0395.npy \n"," inflating: Dataset/train_track_B/centroid_0396.npy \n"," inflating: Dataset/train_track_B/centroid_0398.npy \n"," inflating: Dataset/train_track_B/centroid_0399.npy \n"," inflating: Dataset/train_track_B/centroid_0400.npy \n"," inflating: Dataset/train_track_B/centroid_0401.npy \n"," inflating: Dataset/train_track_B/centroid_0402.npy \n"," inflating: Dataset/train_track_B/centroid_0403.npy \n"," inflating: Dataset/train_track_B/centroid_0404.npy \n"," inflating: Dataset/train_track_B/centroid_0405.npy \n"," inflating: Dataset/train_track_B/centroid_0407.npy \n"," inflating: Dataset/train_track_B/centroid_0408.npy \n"," inflating: Dataset/train_track_B/centroid_0409.npy \n"," inflating: Dataset/train_track_B/centroid_0410.npy \n"," inflating: Dataset/train_track_B/centroid_0411.npy \n"," inflating: Dataset/train_track_B/centroid_0413.npy \n"," inflating: Dataset/train_track_B/centroid_0416.npy \n"," inflating: Dataset/train_track_B/centroid_0417.npy \n"," inflating: Dataset/train_track_B/centroid_0421.npy \n"," inflating: Dataset/train_track_B/centroid_0422.npy \n"," inflating: Dataset/train_track_B/centroid_0423.npy \n"," inflating: Dataset/train_track_B/centroid_0424.npy \n"," inflating: Dataset/train_track_B/centroid_0425.npy \n"," inflating: Dataset/train_track_B/centroid_0428.npy \n"," inflating: Dataset/train_track_B/centroid_0429.npy \n"," inflating: Dataset/train_track_B/centroid_0430.npy \n"," inflating: Dataset/train_track_B/centroid_0431.npy \n"," inflating: Dataset/train_track_B/centroid_0432.npy \n"," inflating: Dataset/train_track_B/centroid_0435.npy \n"," inflating: Dataset/train_track_B/centroid_0438.npy \n"," inflating: Dataset/train_track_B/centroid_0439.npy \n"," inflating: Dataset/train_track_B/centroid_0441.npy \n"," inflating: Dataset/train_track_B/centroid_0444.npy \n"," inflating: Dataset/train_track_B/centroid_0445.npy \n"," inflating: Dataset/train_track_B/centroid_0449.npy \n"," inflating: Dataset/train_track_B/centroid_0450.npy \n"," inflating: Dataset/train_track_B/centroid_0451.npy \n"," inflating: Dataset/train_track_B/centroid_0452.npy \n"," inflating: Dataset/train_track_B/centroid_0453.npy \n"," inflating: Dataset/train_track_B/centroid_0456.npy \n"," inflating: Dataset/train_track_B/centroid_0457.npy \n"," inflating: Dataset/train_track_B/centroid_0458.npy \n"," inflating: Dataset/train_track_B/centroid_0459.npy \n"," inflating: Dataset/train_track_B/centroid_0460.npy \n"," inflating: Dataset/train_track_B/centroid_0461.npy \n"," inflating: Dataset/train_track_B/centroid_0463.npy \n"," inflating: Dataset/train_track_B/centroid_0464.npy \n"," inflating: Dataset/train_track_B/centroid_0465.npy \n"," inflating: Dataset/train_track_B/centroid_0467.npy \n"," inflating: Dataset/train_track_B/centroid_0469.npy \n"," inflating: Dataset/train_track_B/centroid_0471.npy \n"," inflating: Dataset/train_track_B/centroid_0472.npy \n"," inflating: Dataset/train_track_B/centroid_0474.npy \n"," inflating: Dataset/train_track_B/centroid_0475.npy \n"," inflating: Dataset/train_track_B/centroid_0477.npy \n"," inflating: Dataset/train_track_B/centroid_0478.npy \n"," inflating: Dataset/train_track_B/centroid_0479.npy \n"," inflating: Dataset/train_track_B/centroid_0480.npy \n"," inflating: Dataset/train_track_B/centroid_0481.npy \n"," inflating: Dataset/train_track_B/centroid_0482.npy \n"," inflating: Dataset/train_track_B/centroid_0485.npy \n"," inflating: Dataset/train_track_B/centroid_0486.npy \n"," inflating: Dataset/train_track_B/centroid_0487.npy \n"," inflating: Dataset/train_track_B/centroid_0488.npy \n"," inflating: Dataset/train_track_B/centroid_0489.npy \n"," inflating: Dataset/train_track_B/centroid_0492.npy \n"," inflating: Dataset/train_track_B/centroid_0493.npy \n"," inflating: Dataset/train_track_B/centroid_0494.npy \n"," inflating: Dataset/train_track_B/centroid_0497.npy \n"," inflating: Dataset/train_track_B/centroid_0498.npy \n"," inflating: Dataset/train_track_B/centroid_0499.npy \n"," inflating: Dataset/train_track_B/centroid_0501.npy \n"," inflating: Dataset/train_track_B/centroid_0502.npy \n"," inflating: Dataset/train_track_B/centroid_0503.npy \n"," inflating: Dataset/train_track_B/centroid_0504.npy \n"," inflating: Dataset/train_track_B/centroid_0507.npy \n"," inflating: Dataset/train_track_B/centroid_0508.npy \n"," inflating: Dataset/train_track_B/centroid_0509.npy \n"," inflating: Dataset/train_track_B/centroid_0513.npy \n"," inflating: Dataset/train_track_B/centroid_0514.npy \n"," inflating: Dataset/train_track_B/centroid_0515.npy \n"," inflating: Dataset/train_track_B/centroid_0517.npy \n"," inflating: Dataset/train_track_B/centroid_0518.npy \n"," inflating: Dataset/train_track_B/centroid_0519.npy \n"," inflating: Dataset/train_track_B/centroid_0520.npy \n"," inflating: Dataset/train_track_B/centroid_0521.npy \n"," inflating: Dataset/train_track_B/centroid_0522.npy \n"," inflating: Dataset/train_track_B/centroid_0523.npy \n"," inflating: Dataset/train_track_B/centroid_0524.npy \n"," inflating: Dataset/train_track_B/centroid_0525.npy \n"," inflating: Dataset/train_track_B/centroid_0526.npy \n"," inflating: Dataset/train_track_B/centroid_0527.npy \n"," inflating: Dataset/train_track_B/centroid_0528.npy \n"," inflating: Dataset/train_track_B/centroid_0529.npy \n"," inflating: Dataset/train_track_B/centroid_0530.npy \n"," inflating: Dataset/train_track_B/centroid_0531.npy \n"," inflating: Dataset/train_track_B/centroid_0534.npy \n"," inflating: Dataset/train_track_B/centroid_0535.npy \n"," inflating: Dataset/train_track_B/centroid_0536.npy \n"," inflating: Dataset/train_track_B/centroid_0538.npy \n"," inflating: Dataset/train_track_B/centroid_0541.npy \n"," inflating: Dataset/train_track_B/centroid_0542.npy \n"," inflating: Dataset/train_track_B/centroid_0544.npy \n"," inflating: Dataset/train_track_B/centroid_0545.npy \n"," inflating: Dataset/train_track_B/centroid_0546.npy \n"," inflating: Dataset/train_track_B/centroid_0547.npy \n"," inflating: Dataset/train_track_B/centroid_0550.npy \n"," inflating: Dataset/train_track_B/centroid_0551.npy \n"," inflating: Dataset/train_track_B/centroid_0553.npy \n"," inflating: Dataset/train_track_B/centroid_0555.npy \n"," inflating: Dataset/train_track_B/centroid_0557.npy \n"," inflating: Dataset/train_track_B/centroid_0558.npy \n"," inflating: Dataset/train_track_B/centroid_0561.npy \n"," inflating: Dataset/train_track_B/centroid_0563.npy \n"," inflating: Dataset/train_track_B/centroid_0564.npy \n"," inflating: Dataset/train_track_B/centroid_0565.npy \n"," inflating: Dataset/train_track_B/centroid_0567.npy \n"," inflating: Dataset/train_track_B/centroid_0568.npy \n"," inflating: Dataset/train_track_B/centroid_0571.npy \n"," inflating: Dataset/train_track_B/centroid_0574.npy \n"," inflating: Dataset/train_track_B/centroid_0576.npy \n"," inflating: Dataset/train_track_B/centroid_0579.npy \n"," inflating: Dataset/train_track_B/centroid_0580.npy \n"," inflating: Dataset/train_track_B/centroid_0582.npy \n"," inflating: Dataset/train_track_B/centroid_0584.npy \n"," inflating: Dataset/train_track_B/centroid_0585.npy \n"," inflating: Dataset/train_track_B/centroid_0588.npy \n"," inflating: Dataset/train_track_B/centroid_0589.npy \n"," inflating: Dataset/train_track_B/centroid_0590.npy \n"," inflating: Dataset/train_track_B/centroid_0591.npy \n"," inflating: Dataset/train_track_B/centroid_0592.npy \n"," inflating: Dataset/train_track_B/centroid_0593.npy \n"," inflating: Dataset/train_track_B/centroid_0594.npy \n"," inflating: Dataset/train_track_B/centroid_0595.npy \n"," inflating: Dataset/train_track_B/centroid_0596.npy \n"," inflating: Dataset/train_track_B/centroid_0597.npy \n"," inflating: Dataset/train_track_B/centroid_0598.npy \n"," inflating: Dataset/train_track_B/centroid_0600.npy \n"," inflating: Dataset/train_track_B/centroid_0602.npy \n"," inflating: Dataset/train_track_B/centroid_0605.npy \n"," inflating: Dataset/train_track_B/centroid_0608.npy \n"," inflating: Dataset/train_track_B/centroid_0609.npy \n"," inflating: Dataset/train_track_B/centroid_0611.npy \n"," inflating: Dataset/train_track_B/centroid_0612.npy \n"," inflating: Dataset/train_track_B/centroid_0613.npy \n"," inflating: Dataset/train_track_B/centroid_0614.npy \n"," inflating: Dataset/train_track_B/centroid_0618.npy \n"," inflating: Dataset/train_track_B/centroid_0619.npy \n"," inflating: Dataset/train_track_B/centroid_0620.npy \n"," inflating: Dataset/train_track_B/centroid_0621.npy \n"," inflating: Dataset/train_track_B/centroid_0622.npy \n"," inflating: Dataset/train_track_B/centroid_0623.npy \n"," inflating: Dataset/train_track_B/centroid_0624.npy \n"," inflating: Dataset/train_track_B/centroid_0625.npy \n"," inflating: Dataset/train_track_B/centroid_0627.npy \n"," inflating: Dataset/train_track_B/centroid_0628.npy \n"," inflating: Dataset/train_track_B/centroid_0629.npy \n"," inflating: Dataset/train_track_B/centroid_0630.npy \n"," inflating: Dataset/train_track_B/centroid_0631.npy \n"," inflating: Dataset/train_track_B/centroid_0632.npy \n"," inflating: Dataset/train_track_B/centroid_0633.npy \n"," inflating: Dataset/train_track_B/centroid_0634.npy \n"," inflating: Dataset/train_track_B/centroid_0635.npy \n"," inflating: Dataset/train_track_B/centroid_0637.npy \n"," inflating: Dataset/train_track_B/centroid_0638.npy \n"," inflating: Dataset/train_track_B/centroid_0639.npy \n"," inflating: Dataset/train_track_B/centroid_0640.npy \n"," inflating: Dataset/train_track_B/centroid_0641.npy \n"," inflating: Dataset/train_track_B/centroid_0643.npy \n"," inflating: Dataset/train_track_B/centroid_0644.npy \n"," inflating: Dataset/train_track_B/centroid_0645.npy \n"," inflating: Dataset/train_track_B/centroid_0646.npy \n"," inflating: Dataset/train_track_B/centroid_0648.npy \n"," inflating: Dataset/train_track_B/centroid_0650.npy \n"," inflating: Dataset/train_track_B/centroid_0651.npy \n"," inflating: Dataset/train_track_B/centroid_0652.npy \n"," inflating: Dataset/train_track_B/centroid_0653.npy \n"," inflating: Dataset/train_track_B/centroid_0654.npy \n"," inflating: Dataset/train_track_B/centroid_0656.npy \n"," inflating: Dataset/train_track_B/centroid_0657.npy \n"," inflating: Dataset/train_track_B/centroid_0658.npy \n"," inflating: Dataset/train_track_B/centroid_0661.npy \n"," inflating: Dataset/train_track_B/centroid_0663.npy \n"," inflating: Dataset/train_track_B/centroid_0664.npy \n"," inflating: Dataset/train_track_B/centroid_0665.npy \n"," inflating: Dataset/train_track_B/centroid_0666.npy \n"," inflating: Dataset/train_track_B/centroid_0667.npy \n"," inflating: Dataset/train_track_B/centroid_0668.npy \n"," inflating: Dataset/train_track_B/centroid_0669.npy \n"," inflating: Dataset/train_track_B/centroid_0671.npy \n"," inflating: Dataset/train_track_B/centroid_0672.npy \n"," inflating: Dataset/train_track_B/centroid_0673.npy \n"," inflating: Dataset/train_track_B/centroid_0674.npy \n"," inflating: Dataset/train_track_B/centroid_0676.npy \n"," inflating: Dataset/train_track_B/centroid_0677.npy \n"," inflating: Dataset/train_track_B/centroid_0678.npy \n"," inflating: Dataset/train_track_B/centroid_0679.npy \n"," inflating: Dataset/train_track_B/centroid_0680.npy \n"," inflating: Dataset/train_track_B/centroid_0682.npy \n"," inflating: Dataset/train_track_B/centroid_0686.npy \n"," inflating: Dataset/train_track_B/centroid_0688.npy \n"," inflating: Dataset/train_track_B/centroid_0689.npy \n"," inflating: Dataset/train_track_B/centroid_0690.npy \n"," inflating: Dataset/train_track_B/centroid_0691.npy \n"," inflating: Dataset/train_track_B/centroid_0692.npy \n"," inflating: Dataset/train_track_B/centroid_0693.npy \n"," inflating: Dataset/train_track_B/centroid_0694.npy \n"," inflating: Dataset/train_track_B/centroid_0695.npy \n"," inflating: Dataset/train_track_B/centroid_0697.npy \n"," inflating: Dataset/train_track_B/centroid_0699.npy \n"," inflating: Dataset/train_track_B/centroid_0700.npy \n"," inflating: Dataset/train_track_B/centroid_0701.npy \n"," inflating: Dataset/train_track_B/centroid_0703.npy \n"," inflating: Dataset/train_track_B/centroid_0704.npy \n"," inflating: Dataset/train_track_B/centroid_0706.npy \n"," inflating: Dataset/train_track_B/centroid_0707.npy \n"," inflating: Dataset/train_track_B/centroid_0708.npy \n"," inflating: Dataset/train_track_B/centroid_0709.npy \n"," inflating: Dataset/train_track_B/centroid_0711.npy \n"," inflating: Dataset/train_track_B/centroid_0712.npy \n"," inflating: Dataset/train_track_B/centroid_0713.npy \n"," inflating: Dataset/train_track_B/centroid_0714.npy \n"," inflating: Dataset/train_track_B/centroid_0715.npy \n"," inflating: Dataset/train_track_B/centroid_0716.npy \n"," inflating: Dataset/train_track_B/centroid_0718.npy \n"," inflating: Dataset/train_track_B/centroid_0719.npy \n"," inflating: Dataset/train_track_B/centroid_0720.npy \n"," inflating: Dataset/train_track_B/centroid_0721.npy \n"," inflating: Dataset/train_track_B/centroid_0722.npy \n"," inflating: Dataset/train_track_B/centroid_0724.npy \n"," inflating: Dataset/train_track_B/centroid_0727.npy \n"," inflating: Dataset/train_track_B/centroid_0728.npy \n"," inflating: Dataset/train_track_B/centroid_0729.npy \n"," inflating: Dataset/train_track_B/centroid_0730.npy \n"," inflating: Dataset/train_track_B/centroid_0731.npy \n"," inflating: Dataset/train_track_B/centroid_0733.npy \n"," inflating: Dataset/train_track_B/centroid_0735.npy \n"," inflating: Dataset/train_track_B/centroid_0736.npy \n"," inflating: Dataset/train_track_B/centroid_0737.npy \n"," inflating: Dataset/train_track_B/centroid_0740.npy \n"," inflating: Dataset/train_track_B/centroid_0742.npy \n"," inflating: Dataset/train_track_B/centroid_0743.npy \n"," inflating: Dataset/train_track_B/centroid_0744.npy \n"," inflating: Dataset/train_track_B/centroid_0745.npy \n"," inflating: Dataset/train_track_B/press_0002.npy \n"," inflating: Dataset/train_track_B/press_0003.npy \n"," inflating: Dataset/train_track_B/press_0004.npy \n"," inflating: Dataset/train_track_B/press_0005.npy \n"," inflating: Dataset/train_track_B/press_0006.npy \n"," inflating: Dataset/train_track_B/press_0011.npy \n"," inflating: Dataset/train_track_B/press_0012.npy \n"," inflating: Dataset/train_track_B/press_0013.npy \n"," inflating: Dataset/train_track_B/press_0015.npy \n"," inflating: Dataset/train_track_B/press_0017.npy \n"," inflating: Dataset/train_track_B/press_0018.npy \n"," inflating: Dataset/train_track_B/press_0020.npy \n"," inflating: Dataset/train_track_B/press_0021.npy \n"," inflating: Dataset/train_track_B/press_0022.npy \n"," inflating: Dataset/train_track_B/press_0023.npy \n"," inflating: Dataset/train_track_B/press_0024.npy \n"," inflating: Dataset/train_track_B/press_0026.npy \n"," inflating: Dataset/train_track_B/press_0029.npy \n"," inflating: Dataset/train_track_B/press_0030.npy \n"," inflating: Dataset/train_track_B/press_0036.npy \n"," inflating: Dataset/train_track_B/press_0037.npy \n"," inflating: Dataset/train_track_B/press_0038.npy \n"," inflating: Dataset/train_track_B/press_0039.npy \n"," inflating: Dataset/train_track_B/press_0040.npy \n"," inflating: Dataset/train_track_B/press_0041.npy \n"," inflating: Dataset/train_track_B/press_0042.npy \n"," inflating: Dataset/train_track_B/press_0043.npy \n"," inflating: Dataset/train_track_B/press_0044.npy \n"," inflating: Dataset/train_track_B/press_0048.npy \n"," inflating: Dataset/train_track_B/press_0049.npy \n"," inflating: Dataset/train_track_B/press_0051.npy \n"," inflating: Dataset/train_track_B/press_0052.npy \n"," inflating: Dataset/train_track_B/press_0055.npy \n"," inflating: Dataset/train_track_B/press_0056.npy \n"," inflating: Dataset/train_track_B/press_0057.npy \n"," inflating: Dataset/train_track_B/press_0059.npy \n"," inflating: Dataset/train_track_B/press_0062.npy \n"," inflating: Dataset/train_track_B/press_0064.npy \n"," inflating: Dataset/train_track_B/press_0066.npy \n"," inflating: Dataset/train_track_B/press_0067.npy \n"," inflating: Dataset/train_track_B/press_0068.npy \n"," inflating: Dataset/train_track_B/press_0071.npy \n"," inflating: Dataset/train_track_B/press_0074.npy \n"," inflating: Dataset/train_track_B/press_0075.npy \n"," inflating: Dataset/train_track_B/press_0077.npy \n"," inflating: Dataset/train_track_B/press_0078.npy \n"," inflating: Dataset/train_track_B/press_0080.npy \n"," inflating: Dataset/train_track_B/press_0081.npy \n"," inflating: Dataset/train_track_B/press_0082.npy \n"," inflating: Dataset/train_track_B/press_0084.npy \n"," inflating: Dataset/train_track_B/press_0085.npy \n"," inflating: Dataset/train_track_B/press_0086.npy \n"," inflating: Dataset/train_track_B/press_0087.npy \n"," inflating: Dataset/train_track_B/press_0088.npy \n"," inflating: Dataset/train_track_B/press_0089.npy \n"," inflating: Dataset/train_track_B/press_0090.npy \n"," inflating: Dataset/train_track_B/press_0092.npy \n"," inflating: Dataset/train_track_B/press_0093.npy \n"," inflating: Dataset/train_track_B/press_0094.npy \n"," inflating: Dataset/train_track_B/press_0095.npy \n"," inflating: Dataset/train_track_B/press_0097.npy \n"," inflating: Dataset/train_track_B/press_0098.npy \n"," inflating: Dataset/train_track_B/press_0100.npy \n"," inflating: Dataset/train_track_B/press_0101.npy \n"," inflating: Dataset/train_track_B/press_0102.npy \n"," inflating: Dataset/train_track_B/press_0103.npy \n"," inflating: Dataset/train_track_B/press_0104.npy \n"," inflating: Dataset/train_track_B/press_0106.npy \n"," inflating: Dataset/train_track_B/press_0107.npy \n"," inflating: Dataset/train_track_B/press_0108.npy \n"," inflating: Dataset/train_track_B/press_0109.npy \n"," inflating: Dataset/train_track_B/press_0110.npy \n"," inflating: Dataset/train_track_B/press_0113.npy \n"," inflating: Dataset/train_track_B/press_0114.npy \n"," inflating: Dataset/train_track_B/press_0115.npy \n"," inflating: Dataset/train_track_B/press_0116.npy \n"," inflating: Dataset/train_track_B/press_0117.npy \n"," inflating: Dataset/train_track_B/press_0118.npy \n"," inflating: Dataset/train_track_B/press_0119.npy \n"," inflating: Dataset/train_track_B/press_0120.npy \n"," inflating: Dataset/train_track_B/press_0121.npy \n"," inflating: Dataset/train_track_B/press_0122.npy \n"," inflating: Dataset/train_track_B/press_0124.npy \n"," inflating: Dataset/train_track_B/press_0125.npy \n"," inflating: Dataset/train_track_B/press_0126.npy \n"," inflating: Dataset/train_track_B/press_0128.npy \n"," inflating: Dataset/train_track_B/press_0129.npy \n"," inflating: Dataset/train_track_B/press_0130.npy \n"," inflating: Dataset/train_track_B/press_0131.npy \n"," inflating: Dataset/train_track_B/press_0132.npy \n"," inflating: Dataset/train_track_B/press_0133.npy \n"," inflating: Dataset/train_track_B/press_0134.npy \n"," inflating: Dataset/train_track_B/press_0135.npy \n"," inflating: Dataset/train_track_B/press_0136.npy \n"," inflating: Dataset/train_track_B/press_0138.npy \n"," inflating: Dataset/train_track_B/press_0139.npy \n"," inflating: Dataset/train_track_B/press_0140.npy \n"," inflating: Dataset/train_track_B/press_0141.npy \n"," inflating: Dataset/train_track_B/press_0143.npy \n"," inflating: Dataset/train_track_B/press_0145.npy \n"," inflating: Dataset/train_track_B/press_0146.npy \n"," inflating: Dataset/train_track_B/press_0148.npy \n"," inflating: Dataset/train_track_B/press_0149.npy \n"," inflating: Dataset/train_track_B/press_0150.npy \n"," inflating: Dataset/train_track_B/press_0151.npy \n"," inflating: Dataset/train_track_B/press_0153.npy \n"," inflating: Dataset/train_track_B/press_0154.npy \n"," inflating: Dataset/train_track_B/press_0156.npy \n"," inflating: Dataset/train_track_B/press_0157.npy \n"," inflating: Dataset/train_track_B/press_0158.npy \n"," inflating: Dataset/train_track_B/press_0161.npy \n"," inflating: Dataset/train_track_B/press_0162.npy \n"," inflating: Dataset/train_track_B/press_0163.npy \n"," inflating: Dataset/train_track_B/press_0164.npy \n"," inflating: Dataset/train_track_B/press_0166.npy \n"," inflating: Dataset/train_track_B/press_0167.npy \n"," inflating: Dataset/train_track_B/press_0168.npy \n"," inflating: Dataset/train_track_B/press_0170.npy \n"," inflating: Dataset/train_track_B/press_0171.npy \n"," inflating: Dataset/train_track_B/press_0172.npy \n"," inflating: Dataset/train_track_B/press_0174.npy \n"," inflating: Dataset/train_track_B/press_0175.npy \n"," inflating: Dataset/train_track_B/press_0183.npy \n"," inflating: Dataset/train_track_B/press_0184.npy \n"," inflating: Dataset/train_track_B/press_0185.npy \n"," inflating: Dataset/train_track_B/press_0189.npy \n"," inflating: Dataset/train_track_B/press_0190.npy \n"," inflating: Dataset/train_track_B/press_0193.npy \n"," inflating: Dataset/train_track_B/press_0194.npy \n"," inflating: Dataset/train_track_B/press_0195.npy \n"," inflating: Dataset/train_track_B/press_0197.npy \n"," inflating: Dataset/train_track_B/press_0201.npy \n"," inflating: Dataset/train_track_B/press_0203.npy \n"," inflating: Dataset/train_track_B/press_0204.npy \n"," inflating: Dataset/train_track_B/press_0205.npy \n"," inflating: Dataset/train_track_B/press_0206.npy \n"," inflating: Dataset/train_track_B/press_0208.npy \n"," inflating: Dataset/train_track_B/press_0210.npy \n"," inflating: Dataset/train_track_B/press_0211.npy \n"," inflating: Dataset/train_track_B/press_0216.npy \n"," inflating: Dataset/train_track_B/press_0217.npy \n"," inflating: Dataset/train_track_B/press_0219.npy \n"," inflating: Dataset/train_track_B/press_0220.npy \n"," inflating: Dataset/train_track_B/press_0227.npy \n"," inflating: Dataset/train_track_B/press_0228.npy \n"," inflating: Dataset/train_track_B/press_0229.npy \n"," inflating: Dataset/train_track_B/press_0232.npy \n"," inflating: Dataset/train_track_B/press_0234.npy \n"," inflating: Dataset/train_track_B/press_0235.npy \n"," inflating: Dataset/train_track_B/press_0236.npy \n"," inflating: Dataset/train_track_B/press_0238.npy \n"," inflating: Dataset/train_track_B/press_0239.npy \n"," inflating: Dataset/train_track_B/press_0240.npy \n"," inflating: Dataset/train_track_B/press_0241.npy \n"," inflating: Dataset/train_track_B/press_0245.npy \n"," inflating: Dataset/train_track_B/press_0246.npy \n"," inflating: Dataset/train_track_B/press_0247.npy \n"," inflating: Dataset/train_track_B/press_0248.npy \n"," inflating: Dataset/train_track_B/press_0249.npy \n"," inflating: Dataset/train_track_B/press_0252.npy \n"," inflating: Dataset/train_track_B/press_0253.npy \n"," inflating: Dataset/train_track_B/press_0254.npy \n"," inflating: Dataset/train_track_B/press_0256.npy \n"," inflating: Dataset/train_track_B/press_0257.npy \n"," inflating: Dataset/train_track_B/press_0259.npy \n"," inflating: Dataset/train_track_B/press_0264.npy \n"," inflating: Dataset/train_track_B/press_0265.npy \n"," inflating: Dataset/train_track_B/press_0266.npy \n"," inflating: Dataset/train_track_B/press_0268.npy \n"," inflating: Dataset/train_track_B/press_0269.npy \n"," inflating: Dataset/train_track_B/press_0271.npy \n"," inflating: Dataset/train_track_B/press_0272.npy \n"," inflating: Dataset/train_track_B/press_0273.npy \n"," inflating: Dataset/train_track_B/press_0275.npy \n"," inflating: Dataset/train_track_B/press_0276.npy \n"," inflating: Dataset/train_track_B/press_0277.npy \n"," inflating: Dataset/train_track_B/press_0279.npy \n"," inflating: Dataset/train_track_B/press_0280.npy \n"," inflating: Dataset/train_track_B/press_0281.npy \n"," inflating: Dataset/train_track_B/press_0284.npy \n"," inflating: Dataset/train_track_B/press_0285.npy \n"," inflating: Dataset/train_track_B/press_0286.npy \n"," inflating: Dataset/train_track_B/press_0288.npy \n"," inflating: Dataset/train_track_B/press_0289.npy \n"," inflating: Dataset/train_track_B/press_0290.npy \n"," inflating: Dataset/train_track_B/press_0291.npy \n"," inflating: Dataset/train_track_B/press_0294.npy \n"," inflating: Dataset/train_track_B/press_0296.npy \n"," inflating: Dataset/train_track_B/press_0297.npy \n"," inflating: Dataset/train_track_B/press_0298.npy \n"," inflating: Dataset/train_track_B/press_0301.npy \n"," inflating: Dataset/train_track_B/press_0304.npy \n"," inflating: Dataset/train_track_B/press_0305.npy \n"," inflating: Dataset/train_track_B/press_0306.npy \n"," inflating: Dataset/train_track_B/press_0307.npy \n"," inflating: Dataset/train_track_B/press_0308.npy \n"," inflating: Dataset/train_track_B/press_0310.npy \n"," inflating: Dataset/train_track_B/press_0311.npy \n"," inflating: Dataset/train_track_B/press_0314.npy \n"," inflating: Dataset/train_track_B/press_0315.npy \n"," inflating: Dataset/train_track_B/press_0316.npy \n"," inflating: Dataset/train_track_B/press_0320.npy \n"," inflating: Dataset/train_track_B/press_0321.npy \n"," inflating: Dataset/train_track_B/press_0323.npy \n"," inflating: Dataset/train_track_B/press_0324.npy \n"," inflating: Dataset/train_track_B/press_0327.npy \n"," inflating: Dataset/train_track_B/press_0330.npy \n"," inflating: Dataset/train_track_B/press_0331.npy \n"," inflating: Dataset/train_track_B/press_0332.npy \n"," inflating: Dataset/train_track_B/press_0333.npy \n"," inflating: Dataset/train_track_B/press_0334.npy \n"," inflating: Dataset/train_track_B/press_0337.npy \n"," inflating: Dataset/train_track_B/press_0338.npy \n"," inflating: Dataset/train_track_B/press_0339.npy \n"," inflating: Dataset/train_track_B/press_0340.npy \n"," inflating: Dataset/train_track_B/press_0341.npy \n"," inflating: Dataset/train_track_B/press_0342.npy \n"," inflating: Dataset/train_track_B/press_0343.npy \n"," inflating: Dataset/train_track_B/press_0344.npy \n"," inflating: Dataset/train_track_B/press_0345.npy \n"," inflating: Dataset/train_track_B/press_0346.npy \n"," inflating: Dataset/train_track_B/press_0348.npy \n"," inflating: Dataset/train_track_B/press_0349.npy \n"," inflating: Dataset/train_track_B/press_0351.npy \n"," inflating: Dataset/train_track_B/press_0352.npy \n"," inflating: Dataset/train_track_B/press_0353.npy \n"," inflating: Dataset/train_track_B/press_0354.npy \n"," inflating: Dataset/train_track_B/press_0356.npy \n"," inflating: Dataset/train_track_B/press_0357.npy \n"," inflating: Dataset/train_track_B/press_0359.npy \n"," inflating: Dataset/train_track_B/press_0360.npy \n"," inflating: Dataset/train_track_B/press_0361.npy \n"," inflating: Dataset/train_track_B/press_0363.npy \n"," inflating: Dataset/train_track_B/press_0364.npy \n"," inflating: Dataset/train_track_B/press_0365.npy \n"," inflating: Dataset/train_track_B/press_0366.npy \n"," inflating: Dataset/train_track_B/press_0367.npy \n"," inflating: Dataset/train_track_B/press_0368.npy \n"," inflating: Dataset/train_track_B/press_0369.npy \n"," inflating: Dataset/train_track_B/press_0371.npy \n"," inflating: Dataset/train_track_B/press_0373.npy \n"," inflating: Dataset/train_track_B/press_0376.npy \n"," inflating: Dataset/train_track_B/press_0377.npy \n"," inflating: Dataset/train_track_B/press_0378.npy \n"," inflating: Dataset/train_track_B/press_0379.npy \n"," inflating: Dataset/train_track_B/press_0381.npy \n"," inflating: Dataset/train_track_B/press_0382.npy \n"," inflating: Dataset/train_track_B/press_0383.npy \n"," inflating: Dataset/train_track_B/press_0384.npy \n"," inflating: Dataset/train_track_B/press_0385.npy \n"," inflating: Dataset/train_track_B/press_0387.npy \n"," inflating: Dataset/train_track_B/press_0388.npy \n"," inflating: Dataset/train_track_B/press_0389.npy \n"," inflating: Dataset/train_track_B/press_0392.npy \n"," inflating: Dataset/train_track_B/press_0393.npy \n"," inflating: Dataset/train_track_B/press_0394.npy \n"," inflating: Dataset/train_track_B/press_0395.npy \n"," inflating: Dataset/train_track_B/press_0396.npy \n"," inflating: Dataset/train_track_B/press_0398.npy \n"," inflating: Dataset/train_track_B/press_0399.npy \n"," inflating: Dataset/train_track_B/press_0400.npy \n"," inflating: Dataset/train_track_B/press_0401.npy \n"," inflating: Dataset/train_track_B/press_0402.npy \n"," inflating: Dataset/train_track_B/press_0403.npy \n"," inflating: Dataset/train_track_B/press_0404.npy \n"," inflating: Dataset/train_track_B/press_0405.npy \n"," inflating: Dataset/train_track_B/press_0407.npy \n"," inflating: Dataset/train_track_B/press_0408.npy \n"," inflating: Dataset/train_track_B/press_0409.npy \n"," inflating: Dataset/train_track_B/press_0410.npy \n"," inflating: Dataset/train_track_B/press_0411.npy \n"," inflating: Dataset/train_track_B/press_0413.npy \n"," inflating: Dataset/train_track_B/press_0416.npy \n"," inflating: Dataset/train_track_B/press_0417.npy \n"," inflating: Dataset/train_track_B/press_0421.npy \n"," inflating: Dataset/train_track_B/press_0422.npy \n"," inflating: Dataset/train_track_B/press_0423.npy \n"," inflating: Dataset/train_track_B/press_0424.npy \n"," inflating: Dataset/train_track_B/press_0425.npy \n"," inflating: Dataset/train_track_B/press_0428.npy \n"," inflating: Dataset/train_track_B/press_0429.npy \n"," inflating: Dataset/train_track_B/press_0430.npy \n"," inflating: Dataset/train_track_B/press_0431.npy \n"," inflating: Dataset/train_track_B/press_0432.npy \n"," inflating: Dataset/train_track_B/press_0435.npy \n"," inflating: Dataset/train_track_B/press_0438.npy \n"," inflating: Dataset/train_track_B/press_0439.npy \n"," inflating: Dataset/train_track_B/press_0441.npy \n"," inflating: Dataset/train_track_B/press_0444.npy \n"," inflating: Dataset/train_track_B/press_0445.npy \n"," inflating: Dataset/train_track_B/press_0449.npy \n"," inflating: Dataset/train_track_B/press_0450.npy \n"," inflating: Dataset/train_track_B/press_0451.npy \n"," inflating: Dataset/train_track_B/press_0452.npy \n"," inflating: Dataset/train_track_B/press_0453.npy \n"," inflating: Dataset/train_track_B/press_0456.npy \n"," inflating: Dataset/train_track_B/press_0457.npy \n"," inflating: Dataset/train_track_B/press_0458.npy \n"," inflating: Dataset/train_track_B/press_0459.npy \n"," inflating: Dataset/train_track_B/press_0460.npy \n"," inflating: Dataset/train_track_B/press_0461.npy \n"," inflating: Dataset/train_track_B/press_0463.npy \n"," inflating: Dataset/train_track_B/press_0464.npy \n"," inflating: Dataset/train_track_B/press_0465.npy \n"," inflating: Dataset/train_track_B/press_0467.npy \n"," inflating: Dataset/train_track_B/press_0469.npy \n"," inflating: Dataset/train_track_B/press_0471.npy \n"," inflating: Dataset/train_track_B/press_0472.npy \n"," inflating: Dataset/train_track_B/press_0474.npy \n"," inflating: Dataset/train_track_B/press_0475.npy \n"," inflating: Dataset/train_track_B/press_0477.npy \n"," inflating: Dataset/train_track_B/press_0478.npy \n"," inflating: Dataset/train_track_B/press_0479.npy \n"," inflating: Dataset/train_track_B/press_0480.npy \n"," inflating: Dataset/train_track_B/press_0481.npy \n"," inflating: Dataset/train_track_B/press_0482.npy \n"," inflating: Dataset/train_track_B/press_0485.npy \n"," inflating: Dataset/train_track_B/press_0486.npy \n"," inflating: Dataset/train_track_B/press_0487.npy \n"," inflating: Dataset/train_track_B/press_0488.npy \n"," inflating: Dataset/train_track_B/press_0489.npy \n"," inflating: Dataset/train_track_B/press_0492.npy \n"," inflating: Dataset/train_track_B/press_0493.npy \n"," inflating: Dataset/train_track_B/press_0494.npy \n"," inflating: Dataset/train_track_B/press_0497.npy \n"," inflating: Dataset/train_track_B/press_0498.npy \n"," inflating: Dataset/train_track_B/press_0499.npy \n"," inflating: Dataset/train_track_B/press_0501.npy \n"," inflating: Dataset/train_track_B/press_0502.npy \n"," inflating: Dataset/train_track_B/press_0503.npy \n"," inflating: Dataset/train_track_B/press_0504.npy \n"," inflating: Dataset/train_track_B/press_0507.npy \n"," inflating: Dataset/train_track_B/press_0508.npy \n"," inflating: Dataset/train_track_B/press_0509.npy \n"," inflating: Dataset/train_track_B/press_0513.npy \n"," inflating: Dataset/train_track_B/press_0514.npy \n"," inflating: Dataset/train_track_B/press_0515.npy \n"," inflating: Dataset/train_track_B/press_0517.npy \n"," inflating: Dataset/train_track_B/press_0518.npy \n"," inflating: Dataset/train_track_B/press_0519.npy \n"," inflating: Dataset/train_track_B/press_0520.npy \n"," inflating: Dataset/train_track_B/press_0521.npy \n"," inflating: Dataset/train_track_B/press_0522.npy \n"," inflating: Dataset/train_track_B/press_0523.npy \n"," inflating: Dataset/train_track_B/press_0524.npy \n"," inflating: Dataset/train_track_B/press_0525.npy \n"," inflating: Dataset/train_track_B/press_0526.npy \n"," inflating: Dataset/train_track_B/press_0527.npy \n"," inflating: Dataset/train_track_B/press_0528.npy \n"," inflating: Dataset/train_track_B/press_0529.npy \n"," inflating: Dataset/train_track_B/press_0530.npy \n"," inflating: Dataset/train_track_B/press_0531.npy \n"," inflating: Dataset/train_track_B/press_0534.npy \n"," inflating: Dataset/train_track_B/press_0535.npy \n"," inflating: Dataset/train_track_B/press_0536.npy \n"," inflating: Dataset/train_track_B/press_0538.npy \n"," inflating: Dataset/train_track_B/press_0541.npy \n"," inflating: Dataset/train_track_B/press_0542.npy \n"," inflating: Dataset/train_track_B/press_0544.npy \n"," inflating: Dataset/train_track_B/press_0545.npy \n"," inflating: Dataset/train_track_B/press_0546.npy \n"," inflating: Dataset/train_track_B/press_0547.npy \n"," inflating: Dataset/train_track_B/press_0550.npy \n"," inflating: Dataset/train_track_B/press_0551.npy \n"," inflating: Dataset/train_track_B/press_0553.npy \n"," inflating: Dataset/train_track_B/press_0555.npy \n"," inflating: Dataset/train_track_B/press_0557.npy \n"," inflating: Dataset/train_track_B/press_0558.npy \n"," inflating: Dataset/train_track_B/press_0561.npy \n"," inflating: Dataset/train_track_B/press_0563.npy \n"," inflating: Dataset/train_track_B/press_0564.npy \n"," inflating: Dataset/train_track_B/press_0565.npy \n"," inflating: Dataset/train_track_B/press_0567.npy \n"," inflating: Dataset/train_track_B/press_0568.npy \n"," inflating: Dataset/train_track_B/press_0571.npy \n"," inflating: Dataset/train_track_B/press_0574.npy \n"," inflating: Dataset/train_track_B/press_0576.npy \n"," inflating: Dataset/train_track_B/press_0579.npy \n"," inflating: Dataset/train_track_B/press_0580.npy \n"," inflating: Dataset/train_track_B/press_0582.npy \n"," inflating: Dataset/train_track_B/press_0584.npy \n"," inflating: Dataset/train_track_B/press_0585.npy \n"," inflating: Dataset/train_track_B/press_0588.npy \n"," inflating: Dataset/train_track_B/press_0589.npy \n"," inflating: Dataset/train_track_B/press_0590.npy \n"," inflating: Dataset/train_track_B/press_0591.npy \n"," inflating: Dataset/train_track_B/press_0592.npy \n"," inflating: Dataset/train_track_B/press_0593.npy \n"," inflating: Dataset/train_track_B/press_0594.npy \n"," inflating: Dataset/train_track_B/press_0595.npy \n"," inflating: Dataset/train_track_B/press_0596.npy \n"," inflating: Dataset/train_track_B/press_0597.npy \n"," inflating: Dataset/train_track_B/press_0598.npy \n"," inflating: Dataset/train_track_B/press_0600.npy \n"," inflating: Dataset/train_track_B/press_0602.npy \n"," inflating: Dataset/train_track_B/press_0605.npy \n"," inflating: Dataset/train_track_B/press_0608.npy \n"," inflating: Dataset/train_track_B/press_0609.npy \n"," inflating: Dataset/train_track_B/press_0611.npy \n"," inflating: Dataset/train_track_B/press_0612.npy \n"," inflating: Dataset/train_track_B/press_0613.npy \n"," inflating: Dataset/train_track_B/press_0614.npy \n"," inflating: Dataset/train_track_B/press_0618.npy \n"," inflating: Dataset/train_track_B/press_0619.npy \n"," inflating: Dataset/train_track_B/press_0620.npy \n"," inflating: Dataset/train_track_B/press_0621.npy \n"," inflating: Dataset/train_track_B/press_0622.npy \n"," inflating: Dataset/train_track_B/press_0623.npy \n"," inflating: Dataset/train_track_B/press_0624.npy \n"," inflating: Dataset/train_track_B/press_0625.npy \n"," inflating: Dataset/train_track_B/press_0627.npy \n"," inflating: Dataset/train_track_B/press_0628.npy \n"," inflating: Dataset/train_track_B/press_0629.npy \n"," inflating: Dataset/train_track_B/press_0630.npy \n"," inflating: Dataset/train_track_B/press_0631.npy \n"," inflating: Dataset/train_track_B/press_0632.npy \n"," inflating: Dataset/train_track_B/press_0633.npy \n"," inflating: Dataset/train_track_B/press_0634.npy \n"," inflating: Dataset/train_track_B/press_0635.npy \n"," inflating: Dataset/train_track_B/press_0637.npy \n"," inflating: Dataset/train_track_B/press_0638.npy \n"," inflating: Dataset/train_track_B/press_0639.npy \n"," inflating: Dataset/train_track_B/press_0640.npy \n"," inflating: Dataset/train_track_B/press_0641.npy \n"," inflating: Dataset/train_track_B/press_0643.npy \n"," inflating: Dataset/train_track_B/press_0644.npy \n"," inflating: Dataset/train_track_B/press_0645.npy \n"," inflating: Dataset/train_track_B/press_0646.npy \n"," inflating: Dataset/train_track_B/press_0648.npy \n"," inflating: Dataset/train_track_B/press_0650.npy \n"," inflating: Dataset/train_track_B/press_0651.npy \n"," inflating: Dataset/train_track_B/press_0652.npy \n"," inflating: Dataset/train_track_B/press_0653.npy \n"," inflating: Dataset/train_track_B/press_0654.npy \n"," inflating: Dataset/train_track_B/press_0656.npy \n"," inflating: Dataset/train_track_B/press_0657.npy \n"," inflating: Dataset/train_track_B/press_0658.npy \n"," inflating: Dataset/train_track_B/press_0661.npy \n"," inflating: Dataset/train_track_B/press_0663.npy \n"," inflating: Dataset/train_track_B/press_0664.npy \n"," inflating: Dataset/train_track_B/press_0665.npy \n"," inflating: Dataset/train_track_B/press_0666.npy \n"," inflating: Dataset/train_track_B/press_0667.npy \n"," inflating: Dataset/train_track_B/press_0668.npy \n"," inflating: Dataset/train_track_B/press_0669.npy \n"," inflating: Dataset/train_track_B/press_0671.npy \n"," inflating: Dataset/train_track_B/press_0672.npy \n"," inflating: Dataset/train_track_B/press_0673.npy \n"," inflating: Dataset/train_track_B/press_0674.npy \n"," inflating: Dataset/train_track_B/press_0676.npy \n"," inflating: Dataset/train_track_B/press_0677.npy \n"," inflating: Dataset/train_track_B/press_0678.npy \n"," inflating: Dataset/train_track_B/press_0679.npy \n"," inflating: Dataset/train_track_B/press_0680.npy \n"," inflating: Dataset/train_track_B/press_0682.npy \n"," inflating: Dataset/train_track_B/press_0686.npy \n"," inflating: Dataset/train_track_B/press_0688.npy \n"," inflating: Dataset/train_track_B/press_0689.npy \n"," inflating: Dataset/train_track_B/press_0690.npy \n"," inflating: Dataset/train_track_B/press_0691.npy \n"," inflating: Dataset/train_track_B/press_0692.npy \n"," inflating: Dataset/train_track_B/press_0693.npy \n"," inflating: Dataset/train_track_B/press_0694.npy \n"," inflating: Dataset/train_track_B/press_0695.npy \n"," inflating: Dataset/train_track_B/press_0697.npy \n"," inflating: Dataset/train_track_B/press_0699.npy \n"," inflating: Dataset/train_track_B/press_0700.npy \n"," inflating: Dataset/train_track_B/press_0701.npy \n"," inflating: Dataset/train_track_B/press_0703.npy \n"," inflating: Dataset/train_track_B/press_0704.npy \n"," inflating: Dataset/train_track_B/press_0706.npy \n"," inflating: Dataset/train_track_B/press_0707.npy \n"," inflating: Dataset/train_track_B/press_0708.npy \n"," inflating: Dataset/train_track_B/press_0709.npy \n"," inflating: Dataset/train_track_B/press_0711.npy \n"," inflating: Dataset/train_track_B/press_0712.npy \n"," inflating: Dataset/train_track_B/press_0713.npy \n"," inflating: Dataset/train_track_B/press_0714.npy \n"," inflating: Dataset/train_track_B/press_0715.npy \n"," inflating: Dataset/train_track_B/press_0716.npy \n"," inflating: Dataset/train_track_B/press_0718.npy \n"," inflating: Dataset/train_track_B/press_0719.npy \n"," inflating: Dataset/train_track_B/press_0720.npy \n"," inflating: Dataset/train_track_B/press_0721.npy \n"," inflating: Dataset/train_track_B/press_0722.npy \n"," inflating: Dataset/train_track_B/press_0724.npy \n"," inflating: Dataset/train_track_B/press_0727.npy \n"," inflating: Dataset/train_track_B/press_0728.npy \n"," inflating: Dataset/train_track_B/press_0729.npy \n"," inflating: Dataset/train_track_B/press_0730.npy \n"," inflating: Dataset/train_track_B/press_0731.npy \n"," inflating: Dataset/train_track_B/press_0733.npy \n"," inflating: Dataset/train_track_B/press_0735.npy \n"," inflating: Dataset/train_track_B/press_0736.npy \n"," inflating: Dataset/train_track_B/press_0737.npy \n"," inflating: Dataset/train_track_B/press_0740.npy \n"," inflating: Dataset/train_track_B/press_0742.npy \n"," inflating: Dataset/train_track_B/press_0743.npy \n"," inflating: Dataset/train_track_B/press_0744.npy \n"," inflating: Dataset/train_track_B/press_0745.npy \n"]}],"source":["!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n","!mkdir -p Dataset/train_track_B && unzip -o train_track_B.zip -d Dataset/train_track_B/\n"]}],"metadata":{"colab":{"authorship_tag":"ABX9TyNUH9p5eYZb0k/VZfG/jPGH","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} diff --git a/jointContribution/IJCAI_2024/leejt/flashbert.py b/jointContribution/IJCAI_2024/leejt/flashbert.py index 26d9df469b..a40abb4b7a 100644 --- a/jointContribution/IJCAI_2024/leejt/flashbert.py +++ b/jointContribution/IJCAI_2024/leejt/flashbert.py @@ -1,785 +1,785 @@ -import logging -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -import paddlenlp -import utils.paddle_aux # NOQA -from paddlenlp.transformers.activations import ACT2FN -from paddlenlp.transformers.bert.configuration import BertConfig - -# fmt: off -from paddlenlp.transformers.model_outputs import BaseModelOutputWithPastAndCrossAttentions # NOQA - -# fmt: on -from paddlenlp.transformers.model_utils import PretrainedModel -from paddlenlp.transformers.model_utils import apply_chunking_to_forward -from paddlenlp.transformers.model_utils import find_pruneable_heads_and_indices -from paddlenlp.transformers.model_utils import prune_linear_layer - -logger = logging.getLogger(name=__name__) - - -class BertEmbeddings(paddle.nn.Layer): - """Construct the embeddings from word, position and token_type embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = paddle.nn.Embedding( - num_embeddings=config.vocab_size, - embedding_dim=config.hidden_size, - padding_idx=config.pad_token_id, - ) - self.position_embeddings = paddle.nn.Embedding( - num_embeddings=config.max_position_embeddings, - embedding_dim=config.hidden_size, - ) - self.token_type_embeddings = paddle.nn.Embedding( - num_embeddings=config.type_vocab_size, embedding_dim=config.hidden_size - ) - self.LayerNorm = paddle.nn.LayerNorm( - normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps - ) - self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) - self.position_embedding_type = getattr( - config, "position_embedding_type", "absolute" - ) - self.register_buffer( - name="position_ids", - tensor=paddle.arange(end=config.max_position_embeddings).expand( - shape=(1, -1) - ), - persistable=False, - ) - self.register_buffer( - name="token_type_ids", - tensor=paddle.zeros(shape=tuple(self.position_ids.shape), dtype="int64"), - persistable=False, - ) - - def forward( - self, - input_ids: Optional[paddle.Tensor] = None, - token_type_ids: Optional[paddle.Tensor] = None, - position_ids: Optional[paddle.Tensor] = None, - inputs_embeds: Optional[paddle.Tensor] = None, - past_key_values_length: int = 0, - ) -> paddle.Tensor: - if input_ids is not None: - input_shape = tuple(input_ids.shape) - else: - input_shape = tuple(inputs_embeds.shape)[:-1] - seq_length = input_shape[1] - if position_ids is None: - position_ids = self.position_ids[ - :, past_key_values_length : seq_length + past_key_values_length - ] - if token_type_ids is None: - if hasattr(self, "token_type_ids"): - buffered_token_type_ids = self.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand( - shape=[input_shape[0], seq_length] - ) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = paddle.zeros(shape=input_shape, dtype="int64") - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - embeddings = inputs_embeds + token_type_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(paddle.nn.Layer): - def __init__(self, config, position_embedding_type=None): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr( - config, "embedding_size" - ): - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple", - f" of the number of attention heads ({config.num_attention_heads})", - ) - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.query = paddle.nn.Linear( - in_features=config.hidden_size, out_features=self.all_head_size - ) - self.key = paddle.nn.Linear( - in_features=config.hidden_size, out_features=self.all_head_size - ) - self.value = paddle.nn.Linear( - in_features=config.hidden_size, out_features=self.all_head_size - ) - self.dropout = paddle.nn.Dropout(p=config.attention_probs_dropout_prob) - self.position_embedding_type = position_embedding_type or getattr( - config, "position_embedding_type", "absolute" - ) - if ( - self.position_embedding_type == "relative_key" - or self.position_embedding_type == "relative_key_query" - ): - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = paddle.nn.Embedding( - num_embeddings=2 * config.max_position_embeddings - 1, - embedding_dim=self.attention_head_size, - ) - self.is_decoder = config.is_decoder - - def transpose_for_scores(self, x: paddle.Tensor) -> paddle.Tensor: - new_x_shape = tuple(x.shape)[:-1] + ( - self.num_attention_heads, - self.attention_head_size, - ) - x = x.view(new_x_shape) - return x - - def forward( - self, - hidden_states: paddle.Tensor, - attention_mask: Optional[paddle.Tensor] = None, - head_mask: Optional[paddle.Tensor] = None, - encoder_hidden_states: Optional[paddle.Tensor] = None, - encoder_attention_mask: Optional[paddle.Tensor] = None, - past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[paddle.Tensor]: - mixed_query_layer = self.query(hidden_states) - is_cross_attention = encoder_hidden_states is not None - if is_cross_attention and past_key_value is not None: - key_layer = past_key_value[0] - value_layer = past_key_value[1] - # attention_mask = encoder_attention_mask - elif is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - # attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = paddle.concat(x=[past_key_value[0], key_layer], axis=2) - value_layer = paddle.concat(x=[past_key_value[1], value_layer], axis=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - query_layer = self.transpose_for_scores(mixed_query_layer) - if self.training: - drop_rate = self.dropout.p - else: - drop_rate = 0 - query_layer = query_layer.astype(paddle.float16) - key_layer = key_layer.astype(paddle.float16) - value_layer = value_layer.astype(paddle.float16) - context_layer = paddle.nn.functional.scaled_dot_product_attention( - query_layer, - key_layer, - value_layer, - dropout_p=drop_rate, - is_causal=False, - ) - context_layer = context_layer.astype(paddle.float32) - # use_cache = past_key_value is not None - new_context_layer_shape = tuple(context_layer.shape)[:-2] + ( - self.all_head_size, - ) - context_layer = context_layer.view(new_context_layer_shape) - outputs = (context_layer,) - if self.is_decoder: - outputs = outputs + (past_key_value,) - return outputs - - -class BertSelfOutput(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.dense = paddle.nn.Linear( - in_features=config.hidden_size, out_features=config.hidden_size - ) - self.LayerNorm = paddle.nn.LayerNorm( - normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps - ) - self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) - - def forward( - self, hidden_states: paddle.Tensor, input_tensor: paddle.Tensor - ) -> paddle.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(paddle.nn.Layer): - def __init__(self, config, position_embedding_type=None): - super().__init__() - self.self = BertSelfAttention( - config, position_embedding_type=position_embedding_type - ) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, - self.self.num_attention_heads, - self.self.attention_head_size, - self.pruned_heads, - ) - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = ( - self.self.attention_head_size * self.self.num_attention_heads - ) - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: paddle.Tensor, - attention_mask: Optional[paddle.Tensor] = None, - head_mask: Optional[paddle.Tensor] = None, - encoder_hidden_states: Optional[paddle.Tensor] = None, - encoder_attention_mask: Optional[paddle.Tensor] = None, - past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[paddle.Tensor]: - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] - return outputs - - -class BertIntermediate(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.dense = paddle.nn.Linear( - in_features=config.hidden_size, out_features=config.intermediate_size - ) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: paddle.Tensor) -> paddle.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.dense = paddle.nn.Linear( - in_features=config.intermediate_size, out_features=config.hidden_size - ) - self.LayerNorm = paddle.nn.LayerNorm( - normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps - ) - self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) - - def forward( - self, hidden_states: paddle.Tensor, input_tensor: paddle.Tensor - ) -> paddle.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = BertAttention(config) - self.is_decoder = config.is_decoder - self.add_cross_attention = config.add_cross_attention - if self.add_cross_attention: - if not self.is_decoder: - raise ValueError( - f"{self} should be used as a decoder model if cross attention is added" - ) - self.crossattention = BertAttention( - config, position_embedding_type="absolute" - ) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward( - self, - hidden_states: paddle.Tensor, - attention_mask: Optional[paddle.Tensor] = None, - head_mask: Optional[paddle.Tensor] = None, - encoder_hidden_states: Optional[paddle.Tensor] = None, - encoder_attention_mask: Optional[paddle.Tensor] = None, - past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[paddle.Tensor]: - self_attn_past_key_value = ( - past_key_value[:2] if past_key_value is not None else None - ) - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - if self.is_decoder: - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - else: - outputs = self_attention_outputs[1:] - cross_attn_present_key_value = None - if self.is_decoder and encoder_hidden_states is not None: - if not hasattr(self, "crossattention"): - raise ValueError( - f"If `encoder_hidden_states` are passed, {self} has to be instantiated", - " with cross-attention layers by setting `config.add_cross_attention=True`", - ) - cross_attn_past_key_value = ( - past_key_value[-2:] if past_key_value is not None else None - ) - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - cross_attn_past_key_value, - output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] - cross_attn_present_key_value = cross_attention_outputs[-1] - present_key_value = present_key_value + cross_attn_present_key_value - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, - self.chunk_size_feed_forward, - self.seq_len_dim, - attention_output, - ) - outputs = (layer_output,) + outputs - if self.is_decoder: - outputs = outputs + (present_key_value,) - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class BertEncoder(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = paddle.nn.LayerList( - sublayers=[BertLayer(config) for _ in range(config.num_hidden_layers)] - ) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: paddle.Tensor, - attention_mask: Optional[paddle.Tensor] = None, - head_mask: Optional[paddle.Tensor] = None, - encoder_hidden_states: Optional[paddle.Tensor] = None, - encoder_attention_mask: Optional[paddle.Tensor] = None, - past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = False, - output_hidden_states: Optional[bool] = False, - return_dict: Optional[bool] = True, - ) -> Union[Tuple[paddle.Tensor], BaseModelOutputWithPastAndCrossAttentions,]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = ( - () if output_attentions and self.config.add_cross_attention else None - ) - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - next_decoder_cache = () if use_cache else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - if self.gradient_checkpointing and self.training: - layer_outputs = self._gradient_checkpointing_func( - layer_module.__call__, - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if self.config.add_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class BertPooler(paddle.nn.Layer): - def __init__(self, config): - super().__init__() - self.dense = paddle.nn.Linear( - in_features=config.hidden_size, out_features=config.hidden_size - ) - self.activation = paddle.nn.Tanh() - - def forward(self, hidden_states: paddle.Tensor) -> paddle.Tensor: - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class BertPreTrainedModel(PretrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BertConfig - base_model_prefix = "bert" - supports_gradient_checkpointing = True - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, paddle.nn.Linear): - normal_tensor = paddle.normal( - mean=0.0, std=self.config.initializer_range, shape=module.weight.shape - ) - module.weight.set_value(normal_tensor) - if module.bias is not None: - module.bias.set_value(paddle.zeros_like(module.bias)) - elif isinstance(module, paddle.nn.Embedding): - normal_tensor = paddle.normal( - mean=0.0, std=self.config.initializer_range, shape=module.weight.shape - ) - module.weight.set_value(normal_tensor) - if module._padding_idx is not None: - normal_tensor[module._padding_idx] = 0.0 - module.weight.set_value(normal_tensor) - elif isinstance(module, paddle.nn.LayerNorm): - module.bias.set_value(paddle.zeros_like(module.bias)) - module.weight.set_value(paddle.ones_like(module.weight)) - - def post_init(self): - """ - A method executed at the end of each Transformer model initialization, to execute code that needs the model's - modules properly initialized (such as weight initialization). - """ - self.init_weights() - self._backward_compatibility_gradient_checkpointing() - - def _backward_compatibility_gradient_checkpointing(self): - if self.supports_gradient_checkpointing and getattr( - self.config, "gradient_checkpointing", False - ): - self.gradient_checkpointing_enable() - # Remove the attribute now that is has been consumed, so it's no saved in the config. - delattr(self.config, "gradient_checkpointing") - - def get_extended_attention_mask( - self, - attention_mask: paddle.Tensor, - input_shape: Tuple[int], - has_query: bool = False, - ) -> paddle.Tensor: - """ - Makes broadcastable attention and causal masks so that future and masked tokens are ignored. - Arguments: - attention_mask (`paddle.Tensor`): - Mask with ones indicating tokens to attend to, zeros for tokens to ignore. - input_shape (`Tuple[int]`): - The shape of the input to the model. - Returns: - `paddle.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. - """ - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - the model is an encoder, so make the mask broadcastable - # to [batch_size, num_heads, seq_length, seq_length] - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.cast( - dtype=self.config.dtype - ) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - return extended_attention_mask - - def get_head_mask( - self, - head_mask: Optional[paddle.Tensor], - num_hidden_layers: int, - is_attention_chunked: bool = False, - ) -> paddle.Tensor: - """ - Prepare the head mask if needed. - Args: - head_mask (`paddle.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): - The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). - num_hidden_layers (`int`): - The number of hidden layers in the model. - is_attention_chunked: (`bool`, *optional*, defaults to `False`): - Whether or not the attentions scores are computed by chunks or not. - Returns: - `paddle.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with - `[None]` for each layer. - """ - if head_mask is not None: - head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) - if is_attention_chunked is True: - head_mask = head_mask.unsqueeze(-1) - else: - head_mask = [None] * num_hidden_layers - - return head_mask - - -class BertModel(BertPreTrainedModel): - """ - - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in [Attention is - all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - - To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set - to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and - `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - self.config.type_vocab_size = 2 - self.embeddings = BertEmbeddings(config) - self.encoder = BertEncoder(config) - self.pooler = BertPooler(config) if add_pooling_layer else None - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PretrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - def forward( - self, - input_ids: Optional[paddle.Tensor] = None, - attention_mask: Optional[paddle.Tensor] = None, - token_type_ids: Optional[paddle.Tensor] = None, - position_ids: Optional[paddle.Tensor] = None, - head_mask: Optional[paddle.Tensor] = None, - inputs_embeds: Optional[paddle.Tensor] = None, - encoder_hidden_states: Optional[paddle.Tensor] = None, - encoder_attention_mask: Optional[paddle.Tensor] = None, - past_key_values: Optional[List[paddle.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[ - Tuple[paddle.Tensor], - paddlenlp.transformers.model_outputs.BaseModelOutputWithPoolingAndCrossAttentions, - ]: - """ - encoder_hidden_states (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (`paddle.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (`tuple(tuple(paddle.Tensor))` of length `config.n_layers` with each tuple having 4 tensors - of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - """ - output_attentions = ( - output_attentions - if output_attentions is not None - else self.config.output_attentions - ) - output_hidden_states = ( - output_hidden_states - if output_hidden_states is not None - else self.config.output_hidden_states - ) - return_dict = ( - return_dict if return_dict is not None else self.config.use_return_dict - ) - if self.config.is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - if input_ids is not None and inputs_embeds is not None: - raise ValueError( - "You cannot specify both input_ids and inputs_embeds at the same time" - ) - elif input_ids is not None: - self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) - input_shape = tuple(input_ids.shape) - elif inputs_embeds is not None: - input_shape = tuple(inputs_embeds.shape)[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - batch_size, seq_length = input_shape - # device = input_ids.place if input_ids is not None else inputs_embeds.place - past_key_values_length = ( - tuple(past_key_values[0][0].shape)[2] if past_key_values is not None else 0 - ) - if attention_mask is None: - attention_mask = paddle.ones( - shape=(batch_size, seq_length + past_key_values_length) - ) - if token_type_ids is None: - if hasattr(self.embeddings, "token_type_ids"): - buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand( - shape=[batch_size, seq_length] - ) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = paddle.zeros(shape=input_shape, dtype="int64") - extended_attention_mask: paddle.Tensor = self.get_extended_attention_mask( - attention_mask, input_shape - ) - if self.config.is_decoder and encoder_hidden_states is not None: - encoder_batch_size, encoder_sequence_length, _ = tuple( - encoder_hidden_states.shape - ) - encoder_hidden_shape = encoder_batch_size, encoder_sequence_length - if encoder_attention_mask is None: - encoder_attention_mask = paddle.ones(shape=encoder_hidden_shape) - encoder_extended_attention_mask = self.invert_attention_mask( - encoder_attention_mask - ) - else: - encoder_extended_attention_mask = None - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - pooled_output = ( - self.pooler(sequence_output) if self.pooler is not None else None - ) - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - return paddlenlp.transformers.model_outputs.BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) +import logging +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import paddlenlp +import utils.paddle_aux # NOQA +from paddlenlp.transformers.activations import ACT2FN +from paddlenlp.transformers.bert.configuration import BertConfig + +# fmt: off +from paddlenlp.transformers.model_outputs import BaseModelOutputWithPastAndCrossAttentions # NOQA + +# fmt: on +from paddlenlp.transformers.model_utils import PretrainedModel +from paddlenlp.transformers.model_utils import apply_chunking_to_forward +from paddlenlp.transformers.model_utils import find_pruneable_heads_and_indices +from paddlenlp.transformers.model_utils import prune_linear_layer + +logger = logging.getLogger(name=__name__) + + +class BertEmbeddings(paddle.nn.Layer): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = paddle.nn.Embedding( + num_embeddings=config.vocab_size, + embedding_dim=config.hidden_size, + padding_idx=config.pad_token_id, + ) + self.position_embeddings = paddle.nn.Embedding( + num_embeddings=config.max_position_embeddings, + embedding_dim=config.hidden_size, + ) + self.token_type_embeddings = paddle.nn.Embedding( + num_embeddings=config.type_vocab_size, embedding_dim=config.hidden_size + ) + self.LayerNorm = paddle.nn.LayerNorm( + normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps + ) + self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) + self.position_embedding_type = getattr( + config, "position_embedding_type", "absolute" + ) + self.register_buffer( + name="position_ids", + tensor=paddle.arange(end=config.max_position_embeddings).expand( + shape=(1, -1) + ), + persistable=False, + ) + self.register_buffer( + name="token_type_ids", + tensor=paddle.zeros(shape=tuple(self.position_ids.shape), dtype="int64"), + persistable=False, + ) + + def forward( + self, + input_ids: Optional[paddle.Tensor] = None, + token_type_ids: Optional[paddle.Tensor] = None, + position_ids: Optional[paddle.Tensor] = None, + inputs_embeds: Optional[paddle.Tensor] = None, + past_key_values_length: int = 0, + ) -> paddle.Tensor: + if input_ids is not None: + input_shape = tuple(input_ids.shape) + else: + input_shape = tuple(inputs_embeds.shape)[:-1] + seq_length = input_shape[1] + if position_ids is None: + position_ids = self.position_ids[ + :, past_key_values_length : seq_length + past_key_values_length + ] + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + shape=[input_shape[0], seq_length] + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = paddle.zeros(shape=input_shape, dtype="int64") + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings = inputs_embeds + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(paddle.nn.Layer): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, "embedding_size" + ): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple", + f" of the number of attention heads ({config.num_attention_heads})", + ) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.query = paddle.nn.Linear( + in_features=config.hidden_size, out_features=self.all_head_size + ) + self.key = paddle.nn.Linear( + in_features=config.hidden_size, out_features=self.all_head_size + ) + self.value = paddle.nn.Linear( + in_features=config.hidden_size, out_features=self.all_head_size + ) + self.dropout = paddle.nn.Dropout(p=config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if ( + self.position_embedding_type == "relative_key" + or self.position_embedding_type == "relative_key_query" + ): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = paddle.nn.Embedding( + num_embeddings=2 * config.max_position_embeddings - 1, + embedding_dim=self.attention_head_size, + ) + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: paddle.Tensor) -> paddle.Tensor: + new_x_shape = tuple(x.shape)[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(new_x_shape) + return x + + def forward( + self, + hidden_states: paddle.Tensor, + attention_mask: Optional[paddle.Tensor] = None, + head_mask: Optional[paddle.Tensor] = None, + encoder_hidden_states: Optional[paddle.Tensor] = None, + encoder_attention_mask: Optional[paddle.Tensor] = None, + past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[paddle.Tensor]: + mixed_query_layer = self.query(hidden_states) + is_cross_attention = encoder_hidden_states is not None + if is_cross_attention and past_key_value is not None: + key_layer = past_key_value[0] + value_layer = past_key_value[1] + # attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + # attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = paddle.concat(x=[past_key_value[0], key_layer], axis=2) + value_layer = paddle.concat(x=[past_key_value[1], value_layer], axis=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + if self.training: + drop_rate = self.dropout.p + else: + drop_rate = 0 + query_layer = query_layer.astype(paddle.float16) + key_layer = key_layer.astype(paddle.float16) + value_layer = value_layer.astype(paddle.float16) + context_layer = paddle.nn.functional.scaled_dot_product_attention( + query_layer, + key_layer, + value_layer, + dropout_p=drop_rate, + is_causal=False, + ) + context_layer = context_layer.astype(paddle.float32) + # use_cache = past_key_value is not None + new_context_layer_shape = tuple(context_layer.shape)[:-2] + ( + self.all_head_size, + ) + context_layer = context_layer.view(new_context_layer_shape) + outputs = (context_layer,) + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.dense = paddle.nn.Linear( + in_features=config.hidden_size, out_features=config.hidden_size + ) + self.LayerNorm = paddle.nn.LayerNorm( + normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps + ) + self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) + + def forward( + self, hidden_states: paddle.Tensor, input_tensor: paddle.Tensor + ) -> paddle.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(paddle.nn.Layer): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = BertSelfAttention( + config, position_embedding_type=position_embedding_type + ) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads + ) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: paddle.Tensor, + attention_mask: Optional[paddle.Tensor] = None, + head_mask: Optional[paddle.Tensor] = None, + encoder_hidden_states: Optional[paddle.Tensor] = None, + encoder_attention_mask: Optional[paddle.Tensor] = None, + past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[paddle.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] + return outputs + + +class BertIntermediate(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.dense = paddle.nn.Linear( + in_features=config.hidden_size, out_features=config.intermediate_size + ) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: paddle.Tensor) -> paddle.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.dense = paddle.nn.Linear( + in_features=config.intermediate_size, out_features=config.hidden_size + ) + self.LayerNorm = paddle.nn.LayerNorm( + normalized_shape=config.hidden_size, epsilon=config.layer_norm_eps + ) + self.dropout = paddle.nn.Dropout(p=config.hidden_dropout_prob) + + def forward( + self, hidden_states: paddle.Tensor, input_tensor: paddle.Tensor + ) -> paddle.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError( + f"{self} should be used as a decoder model if cross attention is added" + ) + self.crossattention = BertAttention( + config, position_embedding_type="absolute" + ) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states: paddle.Tensor, + attention_mask: Optional[paddle.Tensor] = None, + head_mask: Optional[paddle.Tensor] = None, + encoder_hidden_states: Optional[paddle.Tensor] = None, + encoder_attention_mask: Optional[paddle.Tensor] = None, + past_key_value: Optional[Tuple[Tuple[paddle.Tensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[paddle.Tensor]: + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None + ) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated", + " with cross-attention layers by setting `config.add_cross_attention=True`", + ) + cross_attn_past_key_value = ( + past_key_value[-2:] if past_key_value is not None else None + ) + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output,) + outputs + if self.is_decoder: + outputs = outputs + (present_key_value,) + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = paddle.nn.LayerList( + sublayers=[BertLayer(config) for _ in range(config.num_hidden_layers)] + ) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: paddle.Tensor, + attention_mask: Optional[paddle.Tensor] = None, + head_mask: Optional[paddle.Tensor] = None, + encoder_hidden_states: Optional[paddle.Tensor] = None, + encoder_attention_mask: Optional[paddle.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[paddle.Tensor], BaseModelOutputWithPastAndCrossAttentions,]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + () if output_attentions and self.config.add_cross_attention else None + ) + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.dense = paddle.nn.Linear( + in_features=config.hidden_size, out_features=config.hidden_size + ) + self.activation = paddle.nn.Tanh() + + def forward(self, hidden_states: paddle.Tensor) -> paddle.Tensor: + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPreTrainedModel(PretrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BertConfig + base_model_prefix = "bert" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, paddle.nn.Linear): + normal_tensor = paddle.normal( + mean=0.0, std=self.config.initializer_range, shape=module.weight.shape + ) + module.weight.set_value(normal_tensor) + if module.bias is not None: + module.bias.set_value(paddle.zeros_like(module.bias)) + elif isinstance(module, paddle.nn.Embedding): + normal_tensor = paddle.normal( + mean=0.0, std=self.config.initializer_range, shape=module.weight.shape + ) + module.weight.set_value(normal_tensor) + if module._padding_idx is not None: + normal_tensor[module._padding_idx] = 0.0 + module.weight.set_value(normal_tensor) + elif isinstance(module, paddle.nn.LayerNorm): + module.bias.set_value(paddle.zeros_like(module.bias)) + module.weight.set_value(paddle.ones_like(module.weight)) + + def post_init(self): + """ + A method executed at the end of each Transformer model initialization, to execute code that needs the model's + modules properly initialized (such as weight initialization). + """ + self.init_weights() + self._backward_compatibility_gradient_checkpointing() + + def _backward_compatibility_gradient_checkpointing(self): + if self.supports_gradient_checkpointing and getattr( + self.config, "gradient_checkpointing", False + ): + self.gradient_checkpointing_enable() + # Remove the attribute now that is has been consumed, so it's no saved in the config. + delattr(self.config, "gradient_checkpointing") + + def get_extended_attention_mask( + self, + attention_mask: paddle.Tensor, + input_shape: Tuple[int], + has_query: bool = False, + ) -> paddle.Tensor: + """ + Makes broadcastable attention and causal masks so that future and masked tokens are ignored. + Arguments: + attention_mask (`paddle.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (`Tuple[int]`): + The shape of the input to the model. + Returns: + `paddle.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - the model is an encoder, so make the mask broadcastable + # to [batch_size, num_heads, seq_length, seq_length] + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( + input_shape, attention_mask.shape + ) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.cast( + dtype=self.config.dtype + ) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def get_head_mask( + self, + head_mask: Optional[paddle.Tensor], + num_hidden_layers: int, + is_attention_chunked: bool = False, + ) -> paddle.Tensor: + """ + Prepare the head mask if needed. + Args: + head_mask (`paddle.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*): + The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). + num_hidden_layers (`int`): + The number of hidden layers in the model. + is_attention_chunked: (`bool`, *optional*, defaults to `False`): + Whether or not the attentions scores are computed by chunks or not. + Returns: + `paddle.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with + `[None]` for each layer. + """ + if head_mask is not None: + head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) + if is_attention_chunked is True: + head_mask = head_mask.unsqueeze(-1) + else: + head_mask = [None] * num_hidden_layers + + return head_mask + + +class BertModel(BertPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + self.config.type_vocab_size = 2 + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + self.pooler = BertPooler(config) if add_pooling_layer else None + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PretrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def forward( + self, + input_ids: Optional[paddle.Tensor] = None, + attention_mask: Optional[paddle.Tensor] = None, + token_type_ids: Optional[paddle.Tensor] = None, + position_ids: Optional[paddle.Tensor] = None, + head_mask: Optional[paddle.Tensor] = None, + inputs_embeds: Optional[paddle.Tensor] = None, + encoder_hidden_states: Optional[paddle.Tensor] = None, + encoder_attention_mask: Optional[paddle.Tensor] = None, + past_key_values: Optional[List[paddle.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[ + Tuple[paddle.Tensor], + paddlenlp.transformers.model_outputs.BaseModelOutputWithPoolingAndCrossAttentions, + ]: + """ + encoder_hidden_states (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`paddle.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(paddle.Tensor))` of length `config.n_layers` with each tuple having 4 tensors + of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.use_return_dict + ) + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time" + ) + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = tuple(input_ids.shape) + elif inputs_embeds is not None: + input_shape = tuple(inputs_embeds.shape)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + batch_size, seq_length = input_shape + # device = input_ids.place if input_ids is not None else inputs_embeds.place + past_key_values_length = ( + tuple(past_key_values[0][0].shape)[2] if past_key_values is not None else 0 + ) + if attention_mask is None: + attention_mask = paddle.ones( + shape=(batch_size, seq_length + past_key_values_length) + ) + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand( + shape=[batch_size, seq_length] + ) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = paddle.zeros(shape=input_shape, dtype="int64") + extended_attention_mask: paddle.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape + ) + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = tuple( + encoder_hidden_states.shape + ) + encoder_hidden_shape = encoder_batch_size, encoder_sequence_length + if encoder_attention_mask is None: + encoder_attention_mask = paddle.ones(shape=encoder_hidden_shape) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask + ) + else: + encoder_extended_attention_mask = None + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None + ) + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + return paddlenlp.transformers.model_outputs.BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) diff --git a/jointContribution/IJCAI_2024/leejt/infer.py b/jointContribution/IJCAI_2024/leejt/infer.py index 3463cccca9..07ee9836fb 100644 --- a/jointContribution/IJCAI_2024/leejt/infer.py +++ b/jointContribution/IJCAI_2024/leejt/infer.py @@ -1,130 +1,130 @@ -import os -import random - -import numpy as np -import paddle -import utils.paddle_aux # NOQA -from dataset import CustomDataLoader -from dataset import CustomDataset -from model import ConvBert -from tqdm import tqdm - -root = "./" -mean, std = np.loadtxt( - f"{root}/Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt" -) -area_min_bounds, area_max_bounds = np.loadtxt( - f"{root}/Dataset/Testset_track_B/Auxiliary/area_bounds.txt" -) -bounds = np.loadtxt(f"{root}/Dataset/Testset_track_B/Auxiliary/global_bounds.txt") -global_min_bounds = paddle.to_tensor(data=bounds[0]).view(1, -1).astype(dtype="float32") -global_max_bounds = paddle.to_tensor(data=bounds[1]).view(1, -1).astype(dtype="float32") - - -def seed_everything(seed): - paddle.seed(seed) - np.random.seed(seed) - random.seed(seed) - - -def read_npy_as_data(ids, training=True): - data_list = [] - for file_id in tqdm(ids): - press = None - if training: - pos = np.load(f"{root}/train_track_B/centroid_{file_id}.npy") - press = np.load(f"{root}/train_track_B/press_{file_id}.npy") - area = np.load(f"{root}/train_track_B/area_{file_id}.npy") - pos = paddle.to_tensor(data=pos).to("float32") - press = paddle.to_tensor(data=press).to("float32") - area = paddle.to_tensor(data=area).to("float32") - press = (press - mean) / std - else: - pos = np.load( - f"{root}/Dataset/Testset_track_B/Inference/centroid_{file_id}.npy" - ) - area = np.load( - f"{root}/Dataset/Testset_track_B/Auxiliary/area_{file_id}.npy" - ) - pos = paddle.to_tensor(data=pos).to("float32") - area = paddle.to_tensor(data=area).to("float32") - pos = (pos - global_min_bounds) / (global_max_bounds - global_min_bounds) - pos = 2 * pos - 1 - area = (area - area_min_bounds) / (area_max_bounds - area_min_bounds) - data = CustomDataset(pos=pos, y=press, area=area) - data_list.append(data) - return data_list - - -class LpLoss(paddle.nn.Layer): - def __init__(self, dim=None): - super().__init__() - self.dim = dim - - def forward(self, x, y): - a = paddle.linalg.norm(x=x - y, p=2, axis=self.dim) - b = paddle.linalg.norm(x=y, p=2, axis=self.dim) - return (a / b).mean() - - -def uneven_chunk(tensor, chunks): - total_size = tensor.shape[0] - chunk_size = total_size // chunks - remainder = total_size % chunks - chunks_indices = [] - start = 0 - for i in range(chunks): - end = start + chunk_size + (1 if i < remainder else 0) - chunks_indices.append((start, end)) - start = end - return [tensor[start:end] for start, end in chunks_indices] - - -@paddle.no_grad() -def get_preds(data, num_trials=10): - pos = data.pos - area = data.area - final_outs = 0.0 - for i in range(num_trials): - idx = paddle.randperm(n=pos.shape[0]) - outs = [] - with paddle.amp.auto_cast(): - for p, a in zip(pos[idx].chunk(chunks=100), area[idx].chunk(chunks=100)): - data = CustomDataset(pos=p, area=a) - out = model(data) - outs.append(out) - outs = paddle.concat(x=outs).astype(dtype="float32") - tmp = paddle.zeros_like(x=outs) - tmp[idx] = outs - final_outs += tmp - final_outs /= num_trials - return final_outs - - -if __name__ == "__main__": - seed_everything(2024) - - # load data - test_ids = os.listdir(f"{root}/Dataset/Testset_track_B/Inference") - test_ids = sorted( - [i[i.find("_") + 1 : i.find(".")] for i in test_ids if "centroid_" in i] - ) - test_ids = np.array(test_ids) - print(f"Finish loading {len(test_ids)} test samples") - test_data = read_npy_as_data(test_ids, training=False) - test_loader = CustomDataLoader(test_data, batch_size=1, shuffle=False) - - # load model - device = paddle.set_device("gpu") - model = ConvBert().to(device) - model.eval() - model.set_state_dict(state_dict=paddle.load(path="model.pdparams")) - - track = "gen_answer_B" - submit_path = f"results/{track}" - os.makedirs(submit_path, exist_ok=True) - for idx, data in enumerate(tqdm(test_loader)): - out = get_preds(data).astype(dtype="float32") - out = out.cpu().numpy() * std + mean - file_id = int(test_ids[idx]) - np.save(f"{submit_path}/press_{file_id}.npy", out) +import os +import random + +import numpy as np +import paddle +import utils.paddle_aux # NOQA +from dataset import CustomDataLoader +from dataset import CustomDataset +from model import ConvBert +from tqdm import tqdm + +root = "./" +mean, std = np.loadtxt( + f"{root}/Dataset/Testset_track_B/Auxiliary/train_pressure_mean_std.txt" +) +area_min_bounds, area_max_bounds = np.loadtxt( + f"{root}/Dataset/Testset_track_B/Auxiliary/area_bounds.txt" +) +bounds = np.loadtxt(f"{root}/Dataset/Testset_track_B/Auxiliary/global_bounds.txt") +global_min_bounds = paddle.to_tensor(data=bounds[0]).view(1, -1).astype(dtype="float32") +global_max_bounds = paddle.to_tensor(data=bounds[1]).view(1, -1).astype(dtype="float32") + + +def seed_everything(seed): + paddle.seed(seed) + np.random.seed(seed) + random.seed(seed) + + +def read_npy_as_data(ids, training=True): + data_list = [] + for file_id in tqdm(ids): + press = None + if training: + pos = np.load(f"{root}/train_track_B/centroid_{file_id}.npy") + press = np.load(f"{root}/train_track_B/press_{file_id}.npy") + area = np.load(f"{root}/train_track_B/area_{file_id}.npy") + pos = paddle.to_tensor(data=pos).to("float32") + press = paddle.to_tensor(data=press).to("float32") + area = paddle.to_tensor(data=area).to("float32") + press = (press - mean) / std + else: + pos = np.load( + f"{root}/Dataset/Testset_track_B/Inference/centroid_{file_id}.npy" + ) + area = np.load( + f"{root}/Dataset/Testset_track_B/Auxiliary/area_{file_id}.npy" + ) + pos = paddle.to_tensor(data=pos).to("float32") + area = paddle.to_tensor(data=area).to("float32") + pos = (pos - global_min_bounds) / (global_max_bounds - global_min_bounds) + pos = 2 * pos - 1 + area = (area - area_min_bounds) / (area_max_bounds - area_min_bounds) + data = CustomDataset(pos=pos, y=press, area=area) + data_list.append(data) + return data_list + + +class LpLoss(paddle.nn.Layer): + def __init__(self, dim=None): + super().__init__() + self.dim = dim + + def forward(self, x, y): + a = paddle.linalg.norm(x=x - y, p=2, axis=self.dim) + b = paddle.linalg.norm(x=y, p=2, axis=self.dim) + return (a / b).mean() + + +def uneven_chunk(tensor, chunks): + total_size = tensor.shape[0] + chunk_size = total_size // chunks + remainder = total_size % chunks + chunks_indices = [] + start = 0 + for i in range(chunks): + end = start + chunk_size + (1 if i < remainder else 0) + chunks_indices.append((start, end)) + start = end + return [tensor[start:end] for start, end in chunks_indices] + + +@paddle.no_grad() +def get_preds(data, num_trials=10): + pos = data.pos + area = data.area + final_outs = 0.0 + for i in range(num_trials): + idx = paddle.randperm(n=pos.shape[0]) + outs = [] + with paddle.amp.auto_cast(): + for p, a in zip(pos[idx].chunk(chunks=100), area[idx].chunk(chunks=100)): + data = CustomDataset(pos=p, area=a) + out = model(data) + outs.append(out) + outs = paddle.concat(x=outs).astype(dtype="float32") + tmp = paddle.zeros_like(x=outs) + tmp[idx] = outs + final_outs += tmp + final_outs /= num_trials + return final_outs + + +if __name__ == "__main__": + seed_everything(2024) + + # load data + test_ids = os.listdir(f"{root}/Dataset/Testset_track_B/Inference") + test_ids = sorted( + [i[i.find("_") + 1 : i.find(".")] for i in test_ids if "centroid_" in i] + ) + test_ids = np.array(test_ids) + print(f"Finish loading {len(test_ids)} test samples") + test_data = read_npy_as_data(test_ids, training=False) + test_loader = CustomDataLoader(test_data, batch_size=1, shuffle=False) + + # load model + device = paddle.set_device("gpu") + model = ConvBert().to(device) + model.eval() + model.set_state_dict(state_dict=paddle.load(path="model.pdparams")) + + track = "gen_answer_B" + submit_path = f"results/{track}" + os.makedirs(submit_path, exist_ok=True) + for idx, data in enumerate(tqdm(test_loader)): + out = get_preds(data).astype(dtype="float32") + out = out.cpu().numpy() * std + mean + file_id = int(test_ids[idx]) + np.save(f"{submit_path}/press_{file_id}.npy", out) diff --git a/jointContribution/IJCAI_2024/leejt/model.py b/jointContribution/IJCAI_2024/leejt/model.py index 8a03636965..6c2341347b 100644 --- a/jointContribution/IJCAI_2024/leejt/model.py +++ b/jointContribution/IJCAI_2024/leejt/model.py @@ -1,74 +1,74 @@ -import paddle -from flashbert import BertConfig -from flashbert import BertModel -from paddle.nn import Linear - - -class Bert(paddle.nn.Layer): - def __init__(self, hidden=512): - super().__init__() - self.config = BertConfig( - hidden_dropout_prob=0.0, - attention_probs_dropout_prob=0.0, - hidden_size=hidden, - num_attention_heads=4, - num_hidden_layers=10, - max_position_embeddings=1, - intermediate_size=2048, - vocab_size=1, - ) - self.bert = BertModel(self.config) - - def forward(self, x): - x = x.unsqueeze(axis=0) - x = self.bert(inputs_embeds=x)[0] - x = x.view(-1, self.config.hidden_size) - return x - - -class ConvNet(paddle.nn.Layer): - def __init__(self, num_layers=5, hidden=512, out=None, residual=False): - super().__init__() - convs = [] - for i in range(num_layers): - convs.append( - paddle.nn.Conv1D(in_channels=hidden, out_channels=hidden, kernel_size=1) - ) - convs.append(paddle.nn.ReLU()) - if out is not None: - convs.append( - paddle.nn.Conv1D(in_channels=hidden, out_channels=out, kernel_size=1) - ) - self.convs = paddle.nn.Sequential(*convs) - self.residual = residual - - def forward(self, x): - x_0 = x - x = x.t().unsqueeze(axis=0) - x = self.convs(x) - x = x.transpose(perm=[0, 2, 1]).squeeze() - if self.residual: - x = x + x_0 - return x - - -class ConvBert(paddle.nn.Layer): - def __init__(self): - super().__init__() - hidden = 512 - self.mlp_in = Linear(4, hidden) - self.cnn_in = ConvNet(2, hidden, residual=False) - self.ln = paddle.nn.LayerNorm(normalized_shape=hidden) - self.bert = Bert(hidden) - self.cnn_out = ConvNet(2, hidden, out=1) - - def forward(self, data): - pos = data.pos - area = data.area - x = paddle.concat(x=[pos, area.unsqueeze(axis=1)], axis=1) - x = self.mlp_in(x) - x = self.cnn_in(x) - x = self.ln(x) - x = self.bert(x) - x = self.cnn_out(x) - return x.squeeze() +import paddle +from flashbert import BertConfig +from flashbert import BertModel +from paddle.nn import Linear + + +class Bert(paddle.nn.Layer): + def __init__(self, hidden=512): + super().__init__() + self.config = BertConfig( + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + hidden_size=hidden, + num_attention_heads=4, + num_hidden_layers=10, + max_position_embeddings=1, + intermediate_size=2048, + vocab_size=1, + ) + self.bert = BertModel(self.config) + + def forward(self, x): + x = x.unsqueeze(axis=0) + x = self.bert(inputs_embeds=x)[0] + x = x.view(-1, self.config.hidden_size) + return x + + +class ConvNet(paddle.nn.Layer): + def __init__(self, num_layers=5, hidden=512, out=None, residual=False): + super().__init__() + convs = [] + for i in range(num_layers): + convs.append( + paddle.nn.Conv1D(in_channels=hidden, out_channels=hidden, kernel_size=1) + ) + convs.append(paddle.nn.ReLU()) + if out is not None: + convs.append( + paddle.nn.Conv1D(in_channels=hidden, out_channels=out, kernel_size=1) + ) + self.convs = paddle.nn.Sequential(*convs) + self.residual = residual + + def forward(self, x): + x_0 = x + x = x.t().unsqueeze(axis=0) + x = self.convs(x) + x = x.transpose(perm=[0, 2, 1]).squeeze() + if self.residual: + x = x + x_0 + return x + + +class ConvBert(paddle.nn.Layer): + def __init__(self): + super().__init__() + hidden = 512 + self.mlp_in = Linear(4, hidden) + self.cnn_in = ConvNet(2, hidden, residual=False) + self.ln = paddle.nn.LayerNorm(normalized_shape=hidden) + self.bert = Bert(hidden) + self.cnn_out = ConvNet(2, hidden, out=1) + + def forward(self, data): + pos = data.pos + area = data.area + x = paddle.concat(x=[pos, area.unsqueeze(axis=1)], axis=1) + x = self.mlp_in(x) + x = self.cnn_in(x) + x = self.ln(x) + x = self.bert(x) + x = self.cnn_out(x) + return x.squeeze() diff --git a/jointContribution/IJCAI_2024/leejt/requirements.txt b/jointContribution/IJCAI_2024/leejt/requirements.txt index 78fa4feeb0..d5c0e8cdca 100644 --- a/jointContribution/IJCAI_2024/leejt/requirements.txt +++ b/jointContribution/IJCAI_2024/leejt/requirements.txt @@ -1,4 +1,4 @@ -numpy -paddlenlp -paddlepaddle_gpu -tqdm +numpy +paddlenlp +paddlepaddle_gpu +tqdm diff --git a/jointContribution/IJCAI_2024/leejt/utils/paddle_aux.py b/jointContribution/IJCAI_2024/leejt/utils/paddle_aux.py index 5ad1075b73..ccf753feba 100644 --- a/jointContribution/IJCAI_2024/leejt/utils/paddle_aux.py +++ b/jointContribution/IJCAI_2024/leejt/utils/paddle_aux.py @@ -1,59 +1,59 @@ -# This file is generated by PaConvert ToolKit, please Don't edit it! -import paddle - - -def view(self, *args, **kwargs): - if args: - if len(args) == 1: - if isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) # To change reshape => view - elif isinstance(args[0], str): - return paddle.view(self, args[0]) - else: - return paddle.reshape(self, list(args)) # To change reshape => view - else: - return paddle.reshape(self, list(args)) # To change reshape => view - elif kwargs: - key = [k for k in kwargs.keys()] - if "dtype" in kwargs: - return paddle.view(self, shape_or_dtype=kwargs[key[0]]) - else: - return paddle.reshape( - self, shape=kwargs[key[0]] - ) # To change reshape => view - - -setattr(paddle.Tensor, "view", view) - - -def split_tensor_func(self, split_size, dim=0): - if isinstance(split_size, int): - return paddle.split(self, self.shape[dim] // split_size, dim) - else: - return paddle.split(self, split_size, dim) - - -setattr(paddle.Tensor, "split", split_tensor_func) - -# def uneven_chunk(self, chunks): -# tensor = self -# total_size = tensor.shape[0] -# chunk_size = total_size // chunks -# remainder = total_size % chunks -# chunks_indices = [] -# start = 0 -# for i in range(chunks): -# end = start + chunk_size + (1 if i < remainder else 0) -# chunks_indices.append((start, end)) -# start = end -# return [tensor[start:end] for start, end in chunks_indices] - - -def uneven_chunk(self, chunks): - remainder = self.shape[0] % chunks - chunks = paddle.chunk(self[:-remainder], chunks) - chunks.append(self[-remainder:]) - return chunks - - -setattr(paddle.Tensor, "chunk", uneven_chunk) +# This file is generated by PaConvert ToolKit, please Don't edit it! +import paddle + + +def view(self, *args, **kwargs): + if args: + if len(args) == 1: + if isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) # To change reshape => view + elif isinstance(args[0], str): + return paddle.view(self, args[0]) + else: + return paddle.reshape(self, list(args)) # To change reshape => view + else: + return paddle.reshape(self, list(args)) # To change reshape => view + elif kwargs: + key = [k for k in kwargs.keys()] + if "dtype" in kwargs: + return paddle.view(self, shape_or_dtype=kwargs[key[0]]) + else: + return paddle.reshape( + self, shape=kwargs[key[0]] + ) # To change reshape => view + + +setattr(paddle.Tensor, "view", view) + + +def split_tensor_func(self, split_size, dim=0): + if isinstance(split_size, int): + return paddle.split(self, self.shape[dim] // split_size, dim) + else: + return paddle.split(self, split_size, dim) + + +setattr(paddle.Tensor, "split", split_tensor_func) + +# def uneven_chunk(self, chunks): +# tensor = self +# total_size = tensor.shape[0] +# chunk_size = total_size // chunks +# remainder = total_size % chunks +# chunks_indices = [] +# start = 0 +# for i in range(chunks): +# end = start + chunk_size + (1 if i < remainder else 0) +# chunks_indices.append((start, end)) +# start = end +# return [tensor[start:end] for start, end in chunks_indices] + + +def uneven_chunk(self, chunks): + remainder = self.shape[0] % chunks + chunks = paddle.chunk(self[:-remainder], chunks) + chunks.append(self[-remainder:]) + return chunks + + +setattr(paddle.Tensor, "chunk", uneven_chunk) diff --git a/jointContribution/IJCAI_2024/tenfeng/Transolver.py b/jointContribution/IJCAI_2024/tenfeng/Transolver.py index a38b51b3bf..0c7c0f124a 100644 --- a/jointContribution/IJCAI_2024/tenfeng/Transolver.py +++ b/jointContribution/IJCAI_2024/tenfeng/Transolver.py @@ -1,292 +1,292 @@ -import numpy as np -import paddle -import utils.paddle_aux # noqa: F401 -from einops import rearrange - -import ppsci - -ACTIVATION = { - "gelu": paddle.nn.GELU, - "tanh": paddle.nn.Tanh, - "sigmoid": paddle.nn.Sigmoid, - "relu": paddle.nn.ReLU, - "leaky_relu": paddle.nn.LeakyReLU(negative_slope=0.1), - "softplus": paddle.nn.Softplus, - "ELU": paddle.nn.ELU, - "silu": paddle.nn.Silu, -} - - -class Physics_Attention_1D(paddle.nn.Layer): - def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, slice_num=64): - super().__init__() - inner_dim = dim_head * heads - self.dim_head = dim_head - self.heads = heads - self.scale = dim_head**-0.5 - self.softmax = paddle.nn.Softmax(axis=-1) - self.dropout = paddle.nn.Dropout(p=dropout) - out_0 = paddle.create_parameter( - shape=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).shape, - dtype=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.ones(shape=[1, heads, 1, 1]) * 0.5 - ), - ) - out_0.stop_gradient = not True - self.temperature = out_0 - self.in_project_x = paddle.nn.Linear(in_features=dim, out_features=inner_dim) - self.in_project_fx = paddle.nn.Linear(in_features=dim, out_features=inner_dim) - self.in_project_slice = paddle.nn.Linear( - in_features=dim_head, out_features=slice_num - ) - for la in [self.in_project_slice]: - init_Orthogonal = paddle.nn.initializer.Orthogonal() - init_Orthogonal(la.weight) - self.to_q = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_k = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_v = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_out = paddle.nn.Sequential( - paddle.nn.Linear(in_features=inner_dim, out_features=dim), - paddle.nn.Dropout(p=dropout), - ) - - def forward(self, x): - B, N, C = tuple(x.shape) - fx_mid = ( - self.in_project_fx(x) - .reshape(B, N, self.heads, self.dim_head) - .transpose(perm=[0, 2, 1, 3]) - ) - x_mid = ( - self.in_project_x(x) - .reshape(B, N, self.heads, self.dim_head) - .transpose(perm=[0, 2, 1, 3]) - ) - slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) - slice_norm = slice_weights.sum(axis=2) - slice_token = paddle.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) - slice_token = slice_token / (slice_norm + 1e-05)[:, :, :, None].repeat( - 1, 1, 1, self.dim_head - ) - q_slice_token = self.to_q(slice_token) - k_slice_token = self.to_k(slice_token) - v_slice_token = self.to_v(slice_token) - x = k_slice_token - perm_2 = list(range(x.ndim)) - perm_2[-1] = -2 - perm_2[-2] = -1 - dots = paddle.matmul(x=q_slice_token, y=x.transpose(perm=perm_2)) * self.scale - attn = self.softmax(dots) - attn = self.dropout(attn) - out_slice_token = paddle.matmul(x=attn, y=v_slice_token) - out_x = paddle.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) - out_x = rearrange(out_x, "b h n d -> b n (h d)") - return self.to_out(out_x) - - -class MLP(paddle.nn.Layer): - def __init__(self, n_input, n_hidden, n_output, n_layers=1, act="gelu", res=True): - super(MLP, self).__init__() - if act in ACTIVATION.keys(): - act = ACTIVATION[act] - else: - raise NotImplementedError - self.n_input = n_input - self.n_hidden = n_hidden - self.n_output = n_output - self.n_layers = n_layers - self.res = res - self.linear_pre = paddle.nn.Sequential( - paddle.nn.Linear(in_features=n_input, out_features=n_hidden), act() - ) - self.linear_post = paddle.nn.Linear(in_features=n_hidden, out_features=n_output) - self.linears = paddle.nn.LayerList( - sublayers=[ - paddle.nn.Sequential( - paddle.nn.Linear(in_features=n_hidden, out_features=n_hidden), act() - ) - for _ in range(n_layers) - ] - ) - - def forward(self, x): - x = self.linear_pre(x) - for i in range(self.n_layers): - if self.res: - x = self.linears[i](x) + x - else: - x = self.linears[i](x) - x = self.linear_post(x) - return x - - -class Transolver_block(paddle.nn.Layer): - """Transformer encoder block.""" - - def __init__( - self, - num_heads: int, - hidden_dim: int, - dropout: float, - act="gelu", - mlp_ratio=4, - last_layer=False, - out_dim=1, - slice_num=32, - ): - super().__init__() - self.last_layer = last_layer - self.ln_1 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.Attn = Physics_Attention_1D( - hidden_dim, - heads=num_heads, - dim_head=hidden_dim // num_heads, - dropout=dropout, - slice_num=slice_num, - ) - self.ln_2 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.mlp = MLP( - hidden_dim, - hidden_dim * mlp_ratio, - hidden_dim, - n_layers=0, - res=False, - act=act, - ) - if self.last_layer: - self.ln_3 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.mlp2 = paddle.nn.Linear(in_features=hidden_dim, out_features=out_dim) - - def forward(self, fx): - fx = self.Attn(self.ln_1(fx)) + fx - fx = self.mlp(self.ln_2(fx)) + fx - if self.last_layer: - return self.mlp2(self.ln_3(fx)) - else: - return fx - - -class Model(paddle.nn.Layer): - def __init__( - self, - space_dim=1, - n_layers=5, - n_hidden=256, - dropout=0, - n_head=8, - act="gelu", - mlp_ratio=1, - fun_dim=1, - out_dim=1, - slice_num=32, - ref=8, - n_iter=1, - unified_pos=False, - ): - super(Model, self).__init__() - self.__name__ = "UniPDE_3D" - self.ref = ref - self.n_layers = n_layers - self.n_iter = n_iter - self.unified_pos = unified_pos - if self.unified_pos: - self.preprocess = MLP( - fun_dim + self.ref * self.ref * self.ref, - n_hidden * 2, - n_hidden, - n_layers=0, - res=False, - act=act, - ) - else: - self.preprocess = MLP( - fun_dim + space_dim, - n_hidden * 2, - n_hidden, - n_layers=0, - res=False, - act=act, - ) - self.n_hidden = n_hidden - self.space_dim = space_dim - self.blocks = paddle.nn.LayerList( - sublayers=[ - Transolver_block( - num_heads=n_head, - hidden_dim=n_hidden, - dropout=dropout, - act=act, - mlp_ratio=mlp_ratio, - out_dim=out_dim, - slice_num=slice_num, - last_layer=_ == n_layers - 1, - ) - for _ in range(n_layers) - ] - ) - self.initialize_weights() - param = 1 / n_hidden * paddle.rand(shape=(n_hidden,), dtype="float32") - out_1 = paddle.create_parameter( - shape=param.shape, - dtype=param.numpy().dtype, - default_initializer=paddle.nn.initializer.Assign(param), - ) - out_1.stop_gradient = not True - self.placeholder = out_1 - - def initialize_weights(self): - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, paddle.nn.Linear): - m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) - if isinstance(m, paddle.nn.Linear) and m.bias is not None: - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - init_Constant = paddle.nn.initializer.Constant(value=1.0) - init_Constant(m.weight) - - def get_grid(self, my_pos): - batchsize = tuple(my_pos.shape)[0] - gridx = paddle.to_tensor(data=np.linspace(-1.5, 1.5, self.ref), dtype="float32") - gridx = gridx.reshape(1, self.ref, 1, 1, 1).repeat( - [batchsize, 1, self.ref, self.ref, 1] - ) - gridy = paddle.to_tensor(data=np.linspace(0, 2, self.ref), dtype="float32") - gridy = gridy.reshape(1, 1, self.ref, 1, 1).repeat( - [batchsize, self.ref, 1, self.ref, 1] - ) - gridz = paddle.to_tensor(data=np.linspace(-4, 4, self.ref), dtype="float32") - gridz = gridz.reshape(1, 1, 1, self.ref, 1).repeat( - [batchsize, self.ref, self.ref, 1, 1] - ) - grid_ref = ( - paddle.concat(x=(gridx, gridy, gridz), axis=-1) - .cuda(blocking=True) - .reshape(batchsize, self.ref**3, 3) - ) - pos = paddle.sqrt( - x=paddle.sum( - x=(my_pos[:, :, None, :] - grid_ref[:, None, :, :]) ** 2, axis=-1 - ) - ).reshape(batchsize, tuple(my_pos.shape)[1], self.ref * self.ref * self.ref) - return pos - - def forward(self, x): - fx = self.preprocess(x) - fx = fx + self.placeholder[None, None, :] - for _ in range(self.n_iter): - for i in range(self.n_layers - 1): - fx = self.blocks[i](fx) - fx = self.blocks[-1](fx) - return fx +import numpy as np +import paddle +import utils.paddle_aux # noqa: F401 +from einops import rearrange + +import ppsci + +ACTIVATION = { + "gelu": paddle.nn.GELU, + "tanh": paddle.nn.Tanh, + "sigmoid": paddle.nn.Sigmoid, + "relu": paddle.nn.ReLU, + "leaky_relu": paddle.nn.LeakyReLU(negative_slope=0.1), + "softplus": paddle.nn.Softplus, + "ELU": paddle.nn.ELU, + "silu": paddle.nn.Silu, +} + + +class Physics_Attention_1D(paddle.nn.Layer): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, slice_num=64): + super().__init__() + inner_dim = dim_head * heads + self.dim_head = dim_head + self.heads = heads + self.scale = dim_head**-0.5 + self.softmax = paddle.nn.Softmax(axis=-1) + self.dropout = paddle.nn.Dropout(p=dropout) + out_0 = paddle.create_parameter( + shape=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).shape, + dtype=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.ones(shape=[1, heads, 1, 1]) * 0.5 + ), + ) + out_0.stop_gradient = not True + self.temperature = out_0 + self.in_project_x = paddle.nn.Linear(in_features=dim, out_features=inner_dim) + self.in_project_fx = paddle.nn.Linear(in_features=dim, out_features=inner_dim) + self.in_project_slice = paddle.nn.Linear( + in_features=dim_head, out_features=slice_num + ) + for la in [self.in_project_slice]: + init_Orthogonal = paddle.nn.initializer.Orthogonal() + init_Orthogonal(la.weight) + self.to_q = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_k = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_v = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_out = paddle.nn.Sequential( + paddle.nn.Linear(in_features=inner_dim, out_features=dim), + paddle.nn.Dropout(p=dropout), + ) + + def forward(self, x): + B, N, C = tuple(x.shape) + fx_mid = ( + self.in_project_fx(x) + .reshape(B, N, self.heads, self.dim_head) + .transpose(perm=[0, 2, 1, 3]) + ) + x_mid = ( + self.in_project_x(x) + .reshape(B, N, self.heads, self.dim_head) + .transpose(perm=[0, 2, 1, 3]) + ) + slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) + slice_norm = slice_weights.sum(axis=2) + slice_token = paddle.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) + slice_token = slice_token / (slice_norm + 1e-05)[:, :, :, None].repeat( + 1, 1, 1, self.dim_head + ) + q_slice_token = self.to_q(slice_token) + k_slice_token = self.to_k(slice_token) + v_slice_token = self.to_v(slice_token) + x = k_slice_token + perm_2 = list(range(x.ndim)) + perm_2[-1] = -2 + perm_2[-2] = -1 + dots = paddle.matmul(x=q_slice_token, y=x.transpose(perm=perm_2)) * self.scale + attn = self.softmax(dots) + attn = self.dropout(attn) + out_slice_token = paddle.matmul(x=attn, y=v_slice_token) + out_x = paddle.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) + out_x = rearrange(out_x, "b h n d -> b n (h d)") + return self.to_out(out_x) + + +class MLP(paddle.nn.Layer): + def __init__(self, n_input, n_hidden, n_output, n_layers=1, act="gelu", res=True): + super(MLP, self).__init__() + if act in ACTIVATION.keys(): + act = ACTIVATION[act] + else: + raise NotImplementedError + self.n_input = n_input + self.n_hidden = n_hidden + self.n_output = n_output + self.n_layers = n_layers + self.res = res + self.linear_pre = paddle.nn.Sequential( + paddle.nn.Linear(in_features=n_input, out_features=n_hidden), act() + ) + self.linear_post = paddle.nn.Linear(in_features=n_hidden, out_features=n_output) + self.linears = paddle.nn.LayerList( + sublayers=[ + paddle.nn.Sequential( + paddle.nn.Linear(in_features=n_hidden, out_features=n_hidden), act() + ) + for _ in range(n_layers) + ] + ) + + def forward(self, x): + x = self.linear_pre(x) + for i in range(self.n_layers): + if self.res: + x = self.linears[i](x) + x + else: + x = self.linears[i](x) + x = self.linear_post(x) + return x + + +class Transolver_block(paddle.nn.Layer): + """Transformer encoder block.""" + + def __init__( + self, + num_heads: int, + hidden_dim: int, + dropout: float, + act="gelu", + mlp_ratio=4, + last_layer=False, + out_dim=1, + slice_num=32, + ): + super().__init__() + self.last_layer = last_layer + self.ln_1 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.Attn = Physics_Attention_1D( + hidden_dim, + heads=num_heads, + dim_head=hidden_dim // num_heads, + dropout=dropout, + slice_num=slice_num, + ) + self.ln_2 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.mlp = MLP( + hidden_dim, + hidden_dim * mlp_ratio, + hidden_dim, + n_layers=0, + res=False, + act=act, + ) + if self.last_layer: + self.ln_3 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.mlp2 = paddle.nn.Linear(in_features=hidden_dim, out_features=out_dim) + + def forward(self, fx): + fx = self.Attn(self.ln_1(fx)) + fx + fx = self.mlp(self.ln_2(fx)) + fx + if self.last_layer: + return self.mlp2(self.ln_3(fx)) + else: + return fx + + +class Model(paddle.nn.Layer): + def __init__( + self, + space_dim=1, + n_layers=5, + n_hidden=256, + dropout=0, + n_head=8, + act="gelu", + mlp_ratio=1, + fun_dim=1, + out_dim=1, + slice_num=32, + ref=8, + n_iter=1, + unified_pos=False, + ): + super(Model, self).__init__() + self.__name__ = "UniPDE_3D" + self.ref = ref + self.n_layers = n_layers + self.n_iter = n_iter + self.unified_pos = unified_pos + if self.unified_pos: + self.preprocess = MLP( + fun_dim + self.ref * self.ref * self.ref, + n_hidden * 2, + n_hidden, + n_layers=0, + res=False, + act=act, + ) + else: + self.preprocess = MLP( + fun_dim + space_dim, + n_hidden * 2, + n_hidden, + n_layers=0, + res=False, + act=act, + ) + self.n_hidden = n_hidden + self.space_dim = space_dim + self.blocks = paddle.nn.LayerList( + sublayers=[ + Transolver_block( + num_heads=n_head, + hidden_dim=n_hidden, + dropout=dropout, + act=act, + mlp_ratio=mlp_ratio, + out_dim=out_dim, + slice_num=slice_num, + last_layer=_ == n_layers - 1, + ) + for _ in range(n_layers) + ] + ) + self.initialize_weights() + param = 1 / n_hidden * paddle.rand(shape=(n_hidden,), dtype="float32") + out_1 = paddle.create_parameter( + shape=param.shape, + dtype=param.numpy().dtype, + default_initializer=paddle.nn.initializer.Assign(param), + ) + out_1.stop_gradient = not True + self.placeholder = out_1 + + def initialize_weights(self): + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, paddle.nn.Linear): + m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) + if isinstance(m, paddle.nn.Linear) and m.bias is not None: + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + init_Constant = paddle.nn.initializer.Constant(value=1.0) + init_Constant(m.weight) + + def get_grid(self, my_pos): + batchsize = tuple(my_pos.shape)[0] + gridx = paddle.to_tensor(data=np.linspace(-1.5, 1.5, self.ref), dtype="float32") + gridx = gridx.reshape(1, self.ref, 1, 1, 1).repeat( + [batchsize, 1, self.ref, self.ref, 1] + ) + gridy = paddle.to_tensor(data=np.linspace(0, 2, self.ref), dtype="float32") + gridy = gridy.reshape(1, 1, self.ref, 1, 1).repeat( + [batchsize, self.ref, 1, self.ref, 1] + ) + gridz = paddle.to_tensor(data=np.linspace(-4, 4, self.ref), dtype="float32") + gridz = gridz.reshape(1, 1, 1, self.ref, 1).repeat( + [batchsize, self.ref, self.ref, 1, 1] + ) + grid_ref = ( + paddle.concat(x=(gridx, gridy, gridz), axis=-1) + .cuda(blocking=True) + .reshape(batchsize, self.ref**3, 3) + ) + pos = paddle.sqrt( + x=paddle.sum( + x=(my_pos[:, :, None, :] - grid_ref[:, None, :, :]) ** 2, axis=-1 + ) + ).reshape(batchsize, tuple(my_pos.shape)[1], self.ref * self.ref * self.ref) + return pos + + def forward(self, x): + fx = self.preprocess(x) + fx = fx + self.placeholder[None, None, :] + for _ in range(self.n_iter): + for i in range(self.n_layers - 1): + fx = self.blocks[i](fx) + fx = self.blocks[-1](fx) + return fx diff --git a/jointContribution/IJCAI_2024/tenfeng/download_dataset.ipynb b/jointContribution/IJCAI_2024/tenfeng/download_dataset.ipynb index bfe5fa6aef..32a550ffc7 100644 --- a/jointContribution/IJCAI_2024/tenfeng/download_dataset.ipynb +++ b/jointContribution/IJCAI_2024/tenfeng/download_dataset.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"kBRw5QHhBkax"},"source":["# 数据集导入(预制链接)"]},{"cell_type":"markdown","metadata":{"id":"DUxmPjWWV1sr"},"source":["# 额外数据导入\n","(此处导入权重文件和额外数据集,在此之外的导入将有被判违规的风险,这里以导入随机生成的Track C的A榜样例提交的zip为例子)"]},{"cell_type":"code","execution_count":null,"metadata":{"collapsed":true,"id":"H8YjMlYcYmei"},"outputs":[],"source":["# !pip install gdown\n","# import gdown\n","# !wget -P /content/train_track_B_e/press/ --user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\" https://drive.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH\n","# !wget --quiet --save-cookies cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p'\n","# !wget --load-cookies cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH\" -O press.zip && rm -rf /tmp/cookies.txt\n","# --header=\"Host: drive.usercontent.google.com\"\n","# --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\"\n","# --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh,zh-CN;q=0.9,en;q=0.8\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=615334ea-9569-476f-8bb4-727516c76591&at=APZUnTWDW32examprS0NvVx3_v7o%3A1720881303406\"\n","# !wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=23a03d1a-abc8-42c8-b2e9-ac15544ce09b&at=APZUnTUcKjp9S2N1MeUHjjEMQv8Z%3A1720882064537\" -c -O 'centroid_3.zip'\n","\n","# !wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh,zh-CN;q=0.9,en;q=0.8\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=615334ea-9569-476f-8bb4-727516c76591&at=APZUnTWDW32examprS0NvVx3_v7o%3A1720881303406\" -c -O 'centroid_3.zip'"]},{"cell_type":"markdown","metadata":{"id":"wDM42uay7X8A"},"source":["赛道二额外数据来源于:https://github.com/Mohamedelrefaie/DrivAerNet\n","\n","赛道二额外数据的论文:Elrefaie, Mohamed, Angela Dai, and Faez Ahmed. \"Drivaernet: A parametric car dataset for data-driven aerodynamic design and graph-based drag prediction.\" arXiv preprint arXiv:2403.08055 (2024).\n","\n","**额外数据排除测试集:额外数据中仅使用了id>745的数据,未使用比赛测试数据,共计3711条数据。**"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":159215,"status":"ok","timestamp":1721011960293,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"qrLUfXxxKrTW","outputId":"18f9e7db-1f98-4ef7-98e8-536902231445"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n"," 78 15.7G 78 12.2G 0 0 79.6M 0 0:03:22 0:02:37 0:00:45 54.1M^C\n"]}],"source":["!curl 'https://drive.usercontent.google.com/download?id=1zEL0a_o0DBuSzRncgtxtkrFCPiegZqJd&export=download&authuser=1&confirm=t&uuid=f4cd6774-28c7-4899-beb1-e9271a130f19&at=APZUnTWt_RC0KHds95ggRJczRF9J:1721011736859' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkSar0V5K52eCTBzJJ_9xOGD9L_GO2fYUQ-D53J-yUCJkpPUipO1K2pk8KDmYEAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSar0V5K52eCTBzJJ_9xOGD9L_GO2fYUQ-D53J-yUCJkpPUipO1K2pk8KDmYEAA; SIDCC=AKEyXzVVd4bq-8eK0Ht4kPhv4m8AlVIgPDd2FxW7cenVq0oPzrD2ov4oNHOWlvosWvN-QLt4VQ; __Secure-1PSIDCC=AKEyXzXh9XzrRW3khhZ7nNG7s_-FttVPwYk5oVse3vTT2fCoSQkJgDrY7vazg1j8O1omAKPbSw; __Secure-3PSIDCC=AKEyXzVEYSAsIpUVMfnPqWxZnGDSvp-dEK5Dep35xn6oogrNNN_IskFB3yPDmaHFyfP2aMl247E' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o centroid.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":74051,"status":"ok","timestamp":1720931143410,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"cQJI_BcgNccz","outputId":"92c47a16-9c33-43a7-d466-03ee07949baf"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n","100 5534M 100 5534M 0 0 74.6M 0 0:01:14 0:01:14 --:--:-- 83.3M\n"]}],"source":["\n","!curl 'https://drive.usercontent.google.com/download?id=1kplQ1qGMI8toknT74vQZGdmdWxA71SiL&export=download&authuser=1&confirm=t&uuid=5ad4d34a-c765-4ab9-bc70-0696d277d61e&at=APZUnTUmDANwNPtvdqkZOT1dXioc:1721011901290' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkcyeG-706DIirRL4ZT6d1Ey7zOxcpjH0gIMP-LGrv_2vD6ZYpCNB-zbi6FD1EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkcyeG-706DIirRL4ZT6d1Ey7zOxcpjH0gIMP-LGrv_2vD6ZYpCNB-zbi6FD1EAA; SIDCC=AKEyXzUO6ATMbyLDKf0JONH_EZ9q18dNAh4W2BnM-J4vWJXZIWn62ug9d3TdYBYHq_wZGYaQ9g; __Secure-1PSIDCC=AKEyXzVaBb1Cde3SKjz4O9dj3_TJYqgA6V0wzhjeI7tzYynz8DfGJqCyAlTI5wfk3BSO5jeg3g; __Secure-3PSIDCC=AKEyXzVhNQN1oY0uCd7QSXfCDNSJCAv920LjcKWTt-641d2O0ITi6ng5t_AaspWvj3D8kv_cG-8' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o press.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4302,"status":"ok","timestamp":1720931148267,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"zSkqKjB4NeQi","outputId":"56d831df-b746-42d4-ce2b-ba509c66a8d6"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n","100 312M 100 312M 0 0 72.0M 0 0:00:04 0:00:04 --:--:-- 100M\n"]}],"source":["\n","!curl 'https://drive.usercontent.google.com/download?id=1Pu9XSyRj47SFrmdmxLNxZMb8f3LK6oi2&export=download&authuser=1&confirm=t&uuid=03fc13af-efab-4e23-8505-b60212c68185&at=APZUnTUBVyyVybTwVQg3uylUsD5F:1721008361811' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkRl-e2cLhhwXDzDk-rzgd2xbWfhFsDhTMeF5dTlXdEZtgRRv9KxOg3aKbw94EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkRl-e2cLhhwXDzDk-rzgd2xbWfhFsDhTMeF5dTlXdEZtgRRv9KxOg3aKbw94EAA; SIDCC=AKEyXzUQGR2IEvc_8XXe0ihqfc_i1mjVc2ver1o3_qABcV9RkiYy9oq63ESbdypx118N_ffR3w; __Secure-1PSIDCC=AKEyXzUbVzm8IfocDFc9TkyP7h23tZ-QdwdtDuRlCuwZFq8GH-fcpnzNFi2mhTZJbGbiHzw26w; __Secure-3PSIDCC=AKEyXzX2kWbD1r-wN6CXPXeroB7juxTpokNrOsycIfomuT-E3grtkX0ktBuHs8ut788dbPuNMlg' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o Testset_track_B.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"KAa3QqklR_fu"},"outputs":[],"source":["\n","import zipfile\n","import os\n","\n","def list_and_unzip(zip_folder, extract_folder):\n"," # 确保解压目录存在\n"," if not os.path.exists(extract_folder):\n"," os.makedirs(extract_folder)\n","\n"," # 列出指定目录下所有文件和文件夹\n"," files = os.listdir(zip_folder)\n","\n"," # 过滤出.zip文件\n"," zip_files = [file for file in files if file.endswith('.zip')]\n","\n"," # 输出.zip文件列表\n"," print(\"Zip files found:\", zip_files)\n","\n"," # 逐一解压每个.zip文件\n"," for zip_file in zip_files:\n"," zip_path = os.path.join(zip_folder, zip_file)\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_folder)\n"," print(f\"Extracted {zip_file} to {extract_folder}\")\n","\n","\n","def unzip_file(zip_path, extract_to):\n"," \"\"\"\n"," 解压 ZIP 文件到指定目录。\n"," Args:\n"," zip_path (str): ZIP 文件的路径。\n"," extract_to (str): 文件解压的目标目录。\n"," \"\"\"\n"," # 确保解压目标目录存在\n"," if not os.path.exists(extract_to):\n"," os.makedirs(extract_to)\n","\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_to)\n"," print(f\"Files extracted to: {extract_to}\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":126225,"status":"ok","timestamp":1720931623482,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"YrH8j7XOSBsp","outputId":"c355cf90-a49e-470f-ed3a-a06b4a41fb44"},"outputs":[],"source":["\n","# centroid\n","# centroid_zip_folder = '/content/train_track_B_e/' # 替换为你的.zip文件所在目录\n","# centroid_folder = '/content/train_track_B_e/' # 替换为你希望解压到的目录\n","# list_and_unzip(centroid_zip_folder, centroid_folder)\n","\n","unzip_file('/centroid.zip','/Dataset/train_track_B_e/')\n","os.remove('/centroid.zip')\n","unzip_file('/press.zip','/Dataset/train_track_B_e/')\n","os.remove('/press.zip')\n","unzip_file('/Testset_track_B.zip','/Dataset/')\n","os.remove('/Testset_track_B.zip')\n","# os.remove('/content/train_track_B_e/centroid.zip')\n","# os.remove('/content/train_track_B_e/press.zip')"]}],"metadata":{"colab":{"collapsed_sections":["z0Sek0wtEs5n","kY81z-fCgPfK","PmlOGK6yPVGu"],"gpuType":"T4","provenance":[{"file_id":"1cqAJrxi3BXDeizZAYTIu0GJxkb-1HrNZ","timestamp":1721015572352}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"kBRw5QHhBkax"},"source":["# 数据集导入(预制链接)"]},{"cell_type":"markdown","metadata":{"id":"DUxmPjWWV1sr"},"source":["# 额外数据导入\n","(此处导入权重文件和额外数据集,在此之外的导入将有被判违规的风险,这里以导入随机生成的Track C的A榜样例提交的zip为例子)"]},{"cell_type":"code","execution_count":null,"metadata":{"collapsed":true,"id":"H8YjMlYcYmei"},"outputs":[],"source":["# !pip install gdown\n","# import gdown\n","# !wget -P /content/train_track_B_e/press/ --user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36\" https://drive.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH\n","# !wget --quiet --save-cookies cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p'\n","# !wget --load-cookies cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1fED8VAaC8QyL2AUCCAaq39WFLh7PanNH\" -O press.zip && rm -rf /tmp/cookies.txt\n","# --header=\"Host: drive.usercontent.google.com\"\n","# --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\"\n","# --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh,zh-CN;q=0.9,en;q=0.8\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=615334ea-9569-476f-8bb4-727516c76591&at=APZUnTWDW32examprS0NvVx3_v7o%3A1720881303406\"\n","# !wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=23a03d1a-abc8-42c8-b2e9-ac15544ce09b&at=APZUnTUcKjp9S2N1MeUHjjEMQv8Z%3A1720882064537\" -c -O 'centroid_3.zip'\n","\n","# !wget --header=\"Host: drive.usercontent.google.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh,zh-CN;q=0.9,en;q=0.8\" --header=\"Cookie: AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL:billing-ui-v3-efe=bUPMkFOb_-XdG1QunQWLPvT97nV4ggJL; __Secure-1PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkTePrvGZ34Uqd-fiwJVuKQfsKn-m0FkRIk67VXXobf9pe45X49TD9mxOTck4EAA; NID=515=NM4LfLVBoYJ9i81Kc-NBiwctEbhMRSl5wmR6n307UZGSzQGEMmfjGt37rGvWmr0kpME0asDPCVyOzRzWOQAE6tAeXOUlI-t456WBIvImfRgmWWMOsHrakcWQkh6tJpukjL302wlmwg58aKKzGf0A9FLjP_8ICtkhzJyPGNP9-UrXbObciHjanJe9QQ7HUQWK6o-ykNOyEMQdYRUyxuAiUNGs-J8CYs_FMGoehXTFhvCDRwk6BJc5fAFm7UJP6rqztlNPRsJwt04Zd3Gvhd_flNhUeLCSR8ZHFzeAbO4805TE9uuSCcny3CLGv9zsEsfbDtZEAGfDY8EzT_xm7jr7jzOts146k2NvdI2RvHJww81i_fz7uI494PwzXUgAKH4yM1ja9fZjwApQVPs0y17j-DzJoToC50bYt7DR4HpQK6on2AOu4q16ve9wyP0RDDNHfP9USVLb36gOCqfNU9At6EqTpXSJntuJOdCrF0IkO324uDeByiXpRDeDWTuFOuaD48lp0Gj91REUikYIEgsSL1De2Zrml_UCEYJrYUaRYvwZBkGpBJeb9rCVp-oipGOkSZ3ATKaIMOpviqPQEefRiK5xPf91m3eFcT4u-kRUuCIYmlZaP_fXXFPh459B6eYmhRwSAPwSZe1BczwQy9fAo6R_ZxrZKR-QHQvnJ9BOsuIvLNdygZCVWAdJji1wQp4BPxyRpHI1zwblH7vT7MDk_nQ6dcFnK-JNQM-ss4v8LQ_t7zzwg-cAOkNpek9wGSaLxAx9un2nY9bJpP9xkT5FrJjQfpQ7631XXdZP9YrTfJeM30_Cwk8HjeCa0A14Y7e3sDlCQFSEWKjsgvVKtZQPVU-dT4qy2hZku2m6xn6bqDCJ4S51qavae8ciYHJ5u06-kxoH9H2sPBkGHmNxl4uqa4E4LNG2cmK0OP0hSC_lIvPL1jMlTN9NPoI17U6Pk6z4uEa-rwZGaBc3tw0xCgZ5aNw9LHniHxefjXWFyEYuOXbtItfKYwe3KlBYHyPjX1WFvOifr99tnvKwwR-LFmTJ0_J6HzsUfXCQ6hBXfr7inmrRn8BBin-4NLpRKKUJTrE615Ltf7Rt_duO7XtInAeASNce9hyN3aB2sFD5gmEiHlXAxO1oEiuYbmBCBE3p-5pqJ994EzbTJd3_QQ; SIDCC=AKEyXzWUXvaLxcEWoUr7xDsHNqI9YlS2B0CD4n3sQW20AEbXjfwUamDojGOHRk_EUx-eeHbEAw; __Secure-1PSIDCC=AKEyXzWnzBM9s4vVnoJOnyQ6Am5ICp50PfclBvvvp2ftfri0PlYsvEF8X56Y-xoGbSgl-p7lIA; __Secure-3PSIDCC=AKEyXzWnlf6Wa8I0Hck8U3WeLdJ1nbz2W9cNUupMH0BW6wH57dNls4LLysU7cghw22IvvZKdIG8\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Cookie: __Secure-ENID=12.SE=Yd0Bj-CLJ14fnd4qzdJHmwUs4B5zz46UaPC1cPJigNqqFV9PtM2CYyBpSbCkOyzUwzlEdZ1nZFf-igtGi7wSdJ_gqQSfQfh84r9egqFQAy9-GKayCRbdQKdera-2mkpuIT-c64CyR9vfNojM3hxZ9Dej-dGvtxlGjal9ttEHybw; __gsas=ID=ae0421b9a34b478c:T=1710758437:RT=1710758437:S=ALNI_MZP13R9ZOHbCzC0rgHSMrGXj6GCsg; HSID=A-4I-ZudDNUIB6EKH; SSID=A7v_1v9un6xAwVNku; APISID=ctK8IbLjeuDUmgys/AFnMSLWt9KddceDI6; SAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-1PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; __Secure-3PAPISID=J7GhTwED67EBqJJT/A9nwK7mr0ijGPw08r; SID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_kzuBV1TvOhAIC8VF1e9fpgACgYKATQSARQSFQHGX2Mi8LXUwWoIwNCEPU8Sy3mXUxoVAUF8yKqGXVfjTGz9gQal7nwGr4Pl0076; __Secure-1PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_PDa-DzVmbdGFPyxMQpk9_QACgYKAewSARQSFQHGX2MiAeee4fn0OWglWZfAygqkyBoVAUF8yKp-Sfmtnueimxc-0QbJRF9I0076; __Secure-3PSID=g.a000kgiBabgKCiCYKve9zfoWVgz9eu8sBA6N4XDPPpP5pcW16_C_g9IrMeU98APBo9Stp6wEnAACgYKAQASARQSFQHGX2MiFWtc9ucONXnpxBzlRdudEhoVAUF8yKoeZwCpJDnjfAFjGssHSUGm0076; NID=515=GQhY9nKKFCx3qFDjE0MA4ubjWNdef6xCIY_RfWOPWKEtyfBN3nAUl8WHI2VczjNQ4rVkj1XBAY8WNWHXyqSK10CfT4FxsFlPzrHIJpeTtm1nWRNBd9AAfBKJHz4XpESszntVUTE_59RklZuKo0vk1poReVi2da1PZKC3CTKH2Ll3gB5xuB9wf4bmq8ylVUuIROPJczr0XnCuUHV3qLdBvgy9_870b6UwOq1iOlIxFQFm01EZ4pqF4q1Ub3QRSWpEMLh4LSZFpJ5O255R5OV7krmEdDvH_sHoTEPZAg2PoEpwAyGK6Xp9qcLIlldgx5-5V86N8Wtb93uTlQuA_CFXb5_2eP3bgeX8txwlJ5SrldVjg9ctzYtBU2RwJKTSvdHfIG7lpOkg6XlkvDOcJpR3DihT_OlqnPn7drCAJpvVDv29hZn5XPMXaSrNdbG64OJ9urJEw5odEwsLYkkpC1vmlUcuoo52S5f6RQu0Z8kZiV8iRW6XIqHsSmQHunVaxk6xWCStUg; __Secure-1PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; __Secure-3PSIDTS=sidts-CjEB3EgAEtTS0OazynCofIH4RCBstiRP5flEcvYW3z4Fg9oGd5QOESDOZt1wO2iqUYHjEAA; SIDCC=AKEyXzVI6aMX8lSDja86Yts3FBAtBzPCzVNgaX5BCz78NWsWzlT3yFWKUV7ZE46SFzE1GiBI-cHdTw; __Secure-1PSIDCC=AKEyXzUo4NQAwqqPMxP2eye-MFEbZmBIm_sZqRU1amttg0YoQkc8ZKSNXdHl5jNCMEbhrUHhS9-K; __Secure-3PSIDCC=AKEyXzWf2lIdmDLeZKpXSi9GytVQb6XudrYiNUBA5gW952YuLh8kL6T3IbBlu8zOTfGEcdUp5O1R\" --header=\"Connection: keep-alive\" \"https://drive.usercontent.google.com/download?id=1W0jmrRX11DnRqU4QvGMlWFSlcItyeh0R&export=download&authuser=0&confirm=t&uuid=615334ea-9569-476f-8bb4-727516c76591&at=APZUnTWDW32examprS0NvVx3_v7o%3A1720881303406\" -c -O 'centroid_3.zip'"]},{"cell_type":"markdown","metadata":{"id":"wDM42uay7X8A"},"source":["赛道二额外数据来源于:https://github.com/Mohamedelrefaie/DrivAerNet\n","\n","赛道二额外数据的论文:Elrefaie, Mohamed, Angela Dai, and Faez Ahmed. \"Drivaernet: A parametric car dataset for data-driven aerodynamic design and graph-based drag prediction.\" arXiv preprint arXiv:2403.08055 (2024).\n","\n","**额外数据排除测试集:额外数据中仅使用了id>745的数据,未使用比赛测试数据,共计3711条数据。**"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":159215,"status":"ok","timestamp":1721011960293,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"qrLUfXxxKrTW","outputId":"18f9e7db-1f98-4ef7-98e8-536902231445"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n"," 78 15.7G 78 12.2G 0 0 79.6M 0 0:03:22 0:02:37 0:00:45 54.1M^C\n"]}],"source":["!curl 'https://drive.usercontent.google.com/download?id=1zEL0a_o0DBuSzRncgtxtkrFCPiegZqJd&export=download&authuser=1&confirm=t&uuid=f4cd6774-28c7-4899-beb1-e9271a130f19&at=APZUnTWt_RC0KHds95ggRJczRF9J:1721011736859' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkSar0V5K52eCTBzJJ_9xOGD9L_GO2fYUQ-D53J-yUCJkpPUipO1K2pk8KDmYEAA; __Secure-3PSIDTS=sidts-CjEB4E2dkSar0V5K52eCTBzJJ_9xOGD9L_GO2fYUQ-D53J-yUCJkpPUipO1K2pk8KDmYEAA; SIDCC=AKEyXzVVd4bq-8eK0Ht4kPhv4m8AlVIgPDd2FxW7cenVq0oPzrD2ov4oNHOWlvosWvN-QLt4VQ; __Secure-1PSIDCC=AKEyXzXh9XzrRW3khhZ7nNG7s_-FttVPwYk5oVse3vTT2fCoSQkJgDrY7vazg1j8O1omAKPbSw; __Secure-3PSIDCC=AKEyXzVEYSAsIpUVMfnPqWxZnGDSvp-dEK5Dep35xn6oogrNNN_IskFB3yPDmaHFyfP2aMl247E' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o centroid.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":74051,"status":"ok","timestamp":1720931143410,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"cQJI_BcgNccz","outputId":"92c47a16-9c33-43a7-d466-03ee07949baf"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n","100 5534M 100 5534M 0 0 74.6M 0 0:01:14 0:01:14 --:--:-- 83.3M\n"]}],"source":["\n","!curl 'https://drive.usercontent.google.com/download?id=1kplQ1qGMI8toknT74vQZGdmdWxA71SiL&export=download&authuser=1&confirm=t&uuid=5ad4d34a-c765-4ab9-bc70-0696d277d61e&at=APZUnTUmDANwNPtvdqkZOT1dXioc:1721011901290' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkcyeG-706DIirRL4ZT6d1Ey7zOxcpjH0gIMP-LGrv_2vD6ZYpCNB-zbi6FD1EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkcyeG-706DIirRL4ZT6d1Ey7zOxcpjH0gIMP-LGrv_2vD6ZYpCNB-zbi6FD1EAA; SIDCC=AKEyXzUO6ATMbyLDKf0JONH_EZ9q18dNAh4W2BnM-J4vWJXZIWn62ug9d3TdYBYHq_wZGYaQ9g; __Secure-1PSIDCC=AKEyXzVaBb1Cde3SKjz4O9dj3_TJYqgA6V0wzhjeI7tzYynz8DfGJqCyAlTI5wfk3BSO5jeg3g; __Secure-3PSIDCC=AKEyXzVhNQN1oY0uCd7QSXfCDNSJCAv920LjcKWTt-641d2O0ITi6ng5t_AaspWvj3D8kv_cG-8' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o press.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":4302,"status":"ok","timestamp":1720931148267,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"zSkqKjB4NeQi","outputId":"56d831df-b746-42d4-ce2b-ba509c66a8d6"},"outputs":[{"name":"stdout","output_type":"stream","text":[" % Total % Received % Xferd Average Speed Time Time Time Current\n"," Dload Upload Total Spent Left Speed\n","100 312M 100 312M 0 0 72.0M 0 0:00:04 0:00:04 --:--:-- 100M\n"]}],"source":["\n","!curl 'https://drive.usercontent.google.com/download?id=1Pu9XSyRj47SFrmdmxLNxZMb8f3LK6oi2&export=download&authuser=1&confirm=t&uuid=03fc13af-efab-4e23-8505-b60212c68185&at=APZUnTUBVyyVybTwVQg3uylUsD5F:1721008361811' \\\n"," -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \\\n"," -H 'accept-language: zh,zh-CN;q=0.9,en;q=0.8' \\\n"," -H 'cookie: SEARCH_SAMESITE=CgQIzJsB; AEC=AVYB7coqqJpbAt5TWooVNVp_aDFiZNB57t_gUQNwIT4IG1KVhaFStFKcVRM; SID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7sEcXEqMo-cDulaOAV9QiRAACgYKATASARYSFQHGX2MicthHYExwzkuJRXpDLmC_XxoVAUF8yKpkSuSLMm7wN8kPiklDLWpD0076; __Secure-1PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7eTxKEPCbtKJP-WpB0sS7IwACgYKAQ0SARYSFQHGX2MimRYfxLjZZnsSbRUyLnXj-xoVAUF8yKpK6mH60avbvj7dWu_Wtvny0076; __Secure-3PSID=g.a000lwhzPIqBzg0W97B8nl4OMFLevLbxH1Qlfz-Q-ETz1Zt9KdI7uXG5Vbh6OrUmdFmX1D1k3gACgYKAWMSARYSFQHGX2MiFsS_t8MOTqmRAcbHsxqG8RoVAUF8yKp0GCuI4xnwYrlT9L1pS6Ol0076; HSID=Ak0_J4cMdWxYYLsNT; SSID=AIMT0qtQWmO9_ImAP; APISID=h9-r_ylvYL8SAAMr/A-iHpJeO9Apsuge6w; SAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-1PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; __Secure-3PAPISID=GiGcvVVJAk8U30rD/ARLtq0ckd5r4NZAku; S=billing-ui-v3=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL:billing-ui-v3-efe=myiOE0YlM8lN8iYqqDFVV1DzcE9ILBKL; NID=515=f7DxlXMHGv5snCa0o2Rtf5PaclKJPFPfs4bzQYWoP6-12yc61N_aaDD1dxrg_RZ9gAo4sd2DtQX3LjE_zIJmfycvHEAT8y_NDwj97mJ9RoEpHPflVyjkLedO5HixxuVPESgUY6RJSJagWgWEz4I3z_u4YHnGNh97LoYHEhIPEaWKiJPOu_LbcJH0W0JypqoUI-Wq3fnXMbkdQDYn8jYfmvwX1E64Tm4J_0TL2p9SontzJU7zHyD0AqMmsy_8bGF_tPSIzWDaoUQe_jhJBEfZTMObdKKsWH1tONlvFtvXOH9PR2K9ELhsj76I2rxRIkyj9a2J68rvfn2pWFl87DttRagRQo4OB3iBbEW5E73M3NdqHhDNy-GA9Gxee0gDTcXrESwdv_GzNC8fzH-4z5-aHCT3iAhIndphEwLJb-hXmjt4TG2yHdptS4WeE1cHT3QYuclxlMU_CF6-jK3FmwZDjuwTliqzqlOw9TdAbJuG66v_PdRLwgp3CYPAQjyMoPQRS2XXXZj-JclCNcB7cRX445gUTm9IojyQjy5GJHxxXB6jr-QNNWggOf8Ox-EtjMLKzsFqTkcJD3YQ6Bm-KPwGRIrgYi4MXAfDBDWQ4LkU1N5GcdY5dFYa7n5AaQQK98QlIfj08iluZtuntxfAHFA4RnOCPhM_ve3CYtmVa6ZeMcSOgAepO-oYLB_IgVzmWcPJIaAMBcwaeN4TSA5-zbgOjdSGb5QX9Oitq8pNFTDgXSlu_yQUQCZueGbkCuh0G1D8Tdxm1U_1x5bG44VQzp5dO1csLR9YJtShx_VDaQi2Oo1znXNzq-cRNIOIW1tnEEsaHSBjZmRNKLFK92xE93upumeeAqFg8IW3dCT42IHkBA7U8mZw5zjz35suCSX8opjaVG3h776NSj4-hq3217g1jGMPDcHkd1Ruzi-8-yfh5slwmkxyO4a-6dlU77smmhjZRhZYFVdkvzkld6D4yFr1ZwCujT3sJjS1YKwBzgGYwp_L-n5-fb8wKD5GwVrgelLMoMq4ZW_iWg8wbLXCW_CnBgsD0GDp97_y1BhjQ0ISYAefkgRudMnxPwGhLvvByzC_2TAKfOHpDscOnIQZlk9vAImBJQ_BwkjV2qvQW3xWNrDskRyQ7sp5zgZV8ZBOKUvs0RzQuk-9acLl9Q; __Secure-1PSIDTS=sidts-CjEB4E2dkRl-e2cLhhwXDzDk-rzgd2xbWfhFsDhTMeF5dTlXdEZtgRRv9KxOg3aKbw94EAA; __Secure-3PSIDTS=sidts-CjEB4E2dkRl-e2cLhhwXDzDk-rzgd2xbWfhFsDhTMeF5dTlXdEZtgRRv9KxOg3aKbw94EAA; SIDCC=AKEyXzUQGR2IEvc_8XXe0ihqfc_i1mjVc2ver1o3_qABcV9RkiYy9oq63ESbdypx118N_ffR3w; __Secure-1PSIDCC=AKEyXzUbVzm8IfocDFc9TkyP7h23tZ-QdwdtDuRlCuwZFq8GH-fcpnzNFi2mhTZJbGbiHzw26w; __Secure-3PSIDCC=AKEyXzX2kWbD1r-wN6CXPXeroB7juxTpokNrOsycIfomuT-E3grtkX0ktBuHs8ut788dbPuNMlg' \\\n"," -H 'priority: u=0, i' \\\n"," -H 'sec-ch-ua: \"Google Chrome\";v=\"125\", \"Chromium\";v=\"125\", \"Not.A/Brand\";v=\"24\"' \\\n"," -H 'sec-ch-ua-mobile: ?0' \\\n"," -H 'sec-ch-ua-platform: \"Windows\"' \\\n"," -H 'sec-fetch-dest: iframe' \\\n"," -H 'sec-fetch-mode: navigate' \\\n"," -H 'sec-fetch-site: same-site' \\\n"," -H 'upgrade-insecure-requests: 1' \\\n"," -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36' \\\n"," -H 'x-client-data: CKy1yQEIlbbJAQiitskBCKmdygEItoHLAQiSocsBCIWgzQEIuMjNAQimis4BCOKTzgEI6JPOAQjum84BCJWdzgEIxZ3OAQiyn84BGPXJzQEY1+vNARihnc4B' -o Testset_track_B.zip\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"KAa3QqklR_fu"},"outputs":[],"source":["\n","import zipfile\n","import os\n","\n","def list_and_unzip(zip_folder, extract_folder):\n"," # 确保解压目录存在\n"," if not os.path.exists(extract_folder):\n"," os.makedirs(extract_folder)\n","\n"," # 列出指定目录下所有文件和文件夹\n"," files = os.listdir(zip_folder)\n","\n"," # 过滤出.zip文件\n"," zip_files = [file for file in files if file.endswith('.zip')]\n","\n"," # 输出.zip文件列表\n"," print(\"Zip files found:\", zip_files)\n","\n"," # 逐一解压每个.zip文件\n"," for zip_file in zip_files:\n"," zip_path = os.path.join(zip_folder, zip_file)\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_folder)\n"," print(f\"Extracted {zip_file} to {extract_folder}\")\n","\n","\n","def unzip_file(zip_path, extract_to):\n"," \"\"\"\n"," 解压 ZIP 文件到指定目录。\n"," Args:\n"," zip_path (str): ZIP 文件的路径。\n"," extract_to (str): 文件解压的目标目录。\n"," \"\"\"\n"," # 确保解压目标目录存在\n"," if not os.path.exists(extract_to):\n"," os.makedirs(extract_to)\n","\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_to)\n"," print(f\"Files extracted to: {extract_to}\")\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":126225,"status":"ok","timestamp":1720931623482,"user":{"displayName":"刘野","userId":"06353679841549898578"},"user_tz":-480},"id":"YrH8j7XOSBsp","outputId":"c355cf90-a49e-470f-ed3a-a06b4a41fb44"},"outputs":[],"source":["\n","# centroid\n","# centroid_zip_folder = '/content/train_track_B_e/' # 替换为你的.zip文件所在目录\n","# centroid_folder = '/content/train_track_B_e/' # 替换为你希望解压到的目录\n","# list_and_unzip(centroid_zip_folder, centroid_folder)\n","\n","unzip_file('/centroid.zip','/Dataset/train_track_B_e/')\n","os.remove('/centroid.zip')\n","unzip_file('/press.zip','/Dataset/train_track_B_e/')\n","os.remove('/press.zip')\n","unzip_file('/Testset_track_B.zip','/Dataset/')\n","os.remove('/Testset_track_B.zip')\n","# os.remove('/content/train_track_B_e/centroid.zip')\n","# os.remove('/content/train_track_B_e/press.zip')"]}],"metadata":{"colab":{"collapsed_sections":["z0Sek0wtEs5n","kY81z-fCgPfK","PmlOGK6yPVGu"],"gpuType":"T4","provenance":[{"file_id":"1cqAJrxi3BXDeizZAYTIu0GJxkb-1HrNZ","timestamp":1721015572352}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} diff --git a/jointContribution/IJCAI_2024/tenfeng/infer.py b/jointContribution/IJCAI_2024/tenfeng/infer.py index 4b9576a0f4..f135ad7a3e 100644 --- a/jointContribution/IJCAI_2024/tenfeng/infer.py +++ b/jointContribution/IJCAI_2024/tenfeng/infer.py @@ -1,375 +1,375 @@ -import argparse -import logging -import os - -import numpy as np -import paddle -from Transolver import Model -from utils.utils import LpLoss - - -def setup_logging(log_dir): - if not os.path.exists(log_dir): - os.makedirs(log_dir) - logging.basicConfig( - filename=os.path.join(log_dir, "training.log"), - level=logging.INFO, - format="%(asctime)s - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - ) - console = logging.StreamHandler() - console.setLevel(logging.INFO) - formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") - console.setFormatter(formatter) - logging.getLogger().addHandler(console) - - -def log_initial_configuration(args): - for arg, value in vars(args).items(): - logging.info(f"{arg}: {value}") - - -def set_random_seed(seed): - np.random.seed(seed) - paddle.seed(seed=seed) - - -def set_device(gpu_id): - os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) - device = str("cuda" if paddle.device.cuda.device_count() >= 1 else "cpu").replace( - "cuda", "gpu" - ) - return device - - -def load_tensor(file_path, mask=None): - if not os.path.exists(file_path): - raise FileNotFoundError(f"File not found: {file_path}") - data = np.load(file_path) - if mask is not None: - data = data[mask] - return paddle.to_tensor(data=data).astype(dtype="float32") - - -def custom_collate_fn(batch): - return batch - - -class PointCloudDataset(paddle.io.Dataset): - def __init__( - self, root_dir, transform=None, train=True, translate=True, submit=False - ): - """ - Args: - root_dir (string): Directory with all the point cloud files. - transform (callable, optional): Optional transform to be applied on a sample. - """ - self.root_dir = root_dir - self.file_list = [ - f - for f in os.listdir(os.path.join(root_dir, "centroid")) - if f.endswith(".npy") - ] - self.transform = transform - self.train = train - self.translate = translate - self.submit = submit - - def __len__(self): - return len(self.file_list) - - def __getitem__(self, idx): - # start_time = time.time() - file_name = self.file_list[idx] - file_path = os.path.join(self.root_dir, "centroid", file_name) - points = np.load(file_path) - points_min = np.min(points, axis=0, keepdims=True) - points_max = np.max(points, axis=0, keepdims=True) - mean_std_dict = load_mean_std("mean_std.txt") - if self.train: - sample_rate = 0.1 - else: - sample_rate = 0.4 - if self.submit: - sampled_indices = np.arange(tuple(points.shape)[0]) - else: - sampled_indices = np.random.choice( - np.arange(tuple(points.shape)[0]), - int(len(points) * sample_rate), - replace=False, - ) - sampled_points = points[sampled_indices].astype(np.float32) - local_sampled_points = (sampled_points - points_min) / ( - points_max - points_min - ).astype(np.float32) - press_sample = np.load( - os.path.join(self.root_dir, "press", file_name.replace("centroid", "press")) - )[sampled_indices].astype(np.float32) - if self.translate and self.train: - translation_vector = np.random.rand(3) * 0.01 - 0.005 - sampled_points += translation_vector - Normal = True - if Normal: - sampled_points = ( - sampled_points - mean_std_dict["centroid"][0] - ) / mean_std_dict["centroid"][1] - sample = { - "centroid": sampled_points.astype(np.float32), - "local_centroid": local_sampled_points.astype(np.float32), - "press": press_sample.astype(np.float32), - "file_name": file_name, - "mean_std": mean_std_dict, - } - return sample - - -def load_mean_std(input_file): - """ - Load mean and standard deviations from a text file. - Args: - input_file (str): The path to the text file containing the saved mean and std values. - - Returns: - dict: A dictionary with keys as the data categories and values as tuples of mean and std. - """ - results = {} - with open(input_file, "r") as f: - lines = f.readlines() - for line in lines: - parts = line.split() - category = parts[0] - if category == "centroid": - mean_val_1 = float(parts[3].strip(",")) - mean_val_2 = float(parts[4].strip(",")) - mean_val_3 = float(parts[5].strip(",")) - std_val_1 = float(parts[9].strip(",")) - std_val_2 = float(parts[10].strip(",")) - std_val_3 = float(parts[11].strip(",")) - mean_val = [mean_val_1, mean_val_2, mean_val_3] - std_val = [std_val_1, std_val_2, std_val_3] - else: - mean_val = [float(parts[3].strip(","))] - std_val = [float(parts[7])] - results[category] = mean_val, std_val - return results - - -class modified_log_transformed_l2_relative_error_loss(object): - def __init__(self, epsison=1e-08): - super(modified_log_transformed_l2_relative_error_loss, self).__init__() - self.epsilon = epsison - - def rel(self, x, y): - num_examples = tuple(x.shape)[0] - sign_x = paddle.sign(x=x.reshape(num_examples, -1)) - sign_y = paddle.sign(x=y.reshape(num_examples, -1)) - log_abs_x = paddle.log( - x=paddle.abs(x=x.reshape(num_examples, -1)) - + paddle.to_tensor(data=self.epsilon, dtype=x.dtype, place=x.place) - ) - log_abs_y = paddle.log( - x=paddle.abs(x=y.reshape(num_examples, -1)) - + paddle.to_tensor(data=self.epsilon, dtype=x.dtype, place=x.place) - ) - signed_log_x = sign_x * log_abs_x - signed_log_y = sign_y * log_abs_y - diff = signed_log_x - signed_log_y - diff_norm = paddle.linalg.norm(x=diff, p=2, axis=1) - y_norm = paddle.linalg.norm(x=signed_log_y, p=2, axis=1) - relative_error = diff_norm / y_norm - return paddle.mean(x=relative_error) - - def __call__(self, x, y): - return self.rel(x, y) - - -def parse_args(): - parser = argparse.ArgumentParser( - description="Train a model for Automobile Aerodynamic Drag Prediction." - ) - parser.add_argument( - "--training_data_dir", - type=str, - default="./Dataset/train_track_B_e", - help="Directory for the training data", - ) - parser.add_argument( - "--testing_data_dir", - type=str, - default="./Dataset/Testset_track_B_e", - help="Directory for the testing data", - ) - parser.add_argument( - "--log_dir", - type=str, - default="./results", - help="Directory for saving logs and results", - ) - parser.add_argument("--gpu_id", default=0, type=int) - parser.add_argument( - "--num_segments", type=int, default=10, help="Number of segments to split" - ) - parser.add_argument( - "--segments_id", type=int, default=0, help="the id_th of segments to split" - ) - parser.add_argument( - "--overlap_ratio", type=float, default=0.5, help="Overlap ratio for segments" - ) - parser.add_argument( - "--global_normal", - type=bool, - default=True, - help="wheter use global normal or not", - ) - parser.add_argument( - "--normalization", type=bool, default=True, help="Flag to normalize data or not" - ) - parser.add_argument( - "--translate", action="store_true", help="Translate data or not" - ) - parser.add_argument( - "--loss_type", - type=str, - default="rl2", - choices=["l2", "rl2", "log_rl2", "huber"], - help="The type of loss function to use.", - ) - parser.add_argument( - "--submit", action="store_true", help="if generate submitted data" - ) - parser.add_argument("--batch_size", type=int, default=5) - parser.add_argument( - "--input_dim", type=int, default=6, help="Dimension of model input" - ) - parser.add_argument( - "--output_dim", type=int, default=1, help="Dimension of model output" - ) - parser.add_argument("--depth", type=int, default=5, help="Depth of the model") - parser.add_argument( - "--hidden_dim", type=int, default=256, help="Dimension of hidden features" - ) - parser.add_argument( - "--num_slices", - type=int, - default=32, - help="Number of slices for slicing the input", - ) - parser.add_argument( - "--num_heads", type=int, default=8, help="Number of attention heads" - ) - parser.add_argument( - "--mlp_ratio", - type=int, - default=2, - help="Ratio of mlp hidden dim to embedding dim", - ) - parser.add_argument("--patch_size", type=int, default=20, help="Size of each patch") - parser.add_argument( - "--shift", type=int, default=4, help="Shift size for shifting the patches" - ) - parser.add_argument("--n_layer", type=int, default=1, help="Number of layers") - parser.add_argument( - "--epochs", type=int, default=69, help="Number of epochs to train" - ) - parser.add_argument( - "--lr", type=float, default=0.001, help="Learning rate for the optimizer" - ) - parser.add_argument( - "--scheduler_step", - type=int, - default=30, - help="Number of steps after which to reduce learning rate", - ) - parser.add_argument( - "--milestones", nargs="+", type=int, default=[40, 50, 60, 65, 68] - ) - parser.add_argument( - "--scheduler_gamma", - type=float, - default=0.5, - help="Gamma factor for reducing the learning rate", - ) - return parser.parse_args() - - -if __name__ == "__main__": - set_random_seed(123) - args = parse_args() - print(args) - save_dir = args.log_dir - setup_logging(save_dir) - log_initial_configuration(args) - device = set_device(args.gpu_id) - model = Model( - n_hidden=args.hidden_dim, - n_layers=args.depth, - space_dim=args.input_dim, - fun_dim=0, - n_head=args.num_heads, - mlp_ratio=args.mlp_ratio, - out_dim=args.output_dim, - slice_num=args.num_slices, - n_iter=args.n_layer, - unified_pos=0, - ).to(device) - L2_fn = LpLoss(reduction=False) - if args.submit: - submit_dataset = PointCloudDataset( - root_dir=args.testing_data_dir, train=False, translate=False, submit=True - ) - submitloader = paddle.io.DataLoader( - dataset=submit_dataset, - batch_size=1, - shuffle=False, - collate_fn=custom_collate_fn, - ) - output_dirs = os.path.join(save_dir, "output") - if os.path.exists(output_dirs) is False: - os.makedirs(output_dirs) - model = Model( - n_hidden=args.hidden_dim, - n_layers=args.depth, - space_dim=args.input_dim, - fun_dim=0, - n_head=args.num_heads, - mlp_ratio=args.mlp_ratio, - out_dim=args.output_dim, - slice_num=args.num_slices, - n_iter=args.n_layer, - unified_pos=0, - ).to(device) - model.set_state_dict( - state_dict=paddle.load(path=f"{save_dir}/checkpoint.pdparams") - ) - y_list = [] - y_hat_list = [] - L2 = [] - model.eval() - with paddle.no_grad(): - for batch in submitloader: - for i in range(len(batch)): - x_centroid = batch[i]["centroid"].cuda().unsqueeze(axis=0) - x_local_centroid = ( - batch[i]["local_centroid"].cuda().unsqueeze(axis=0) - ) - y = batch[i]["press"].cuda().unsqueeze(axis=0) - features = paddle.concat(x=[x_centroid, x_local_centroid], axis=-1) - y_hat = model(features) - y_hat = ( - y_hat * batch[i]["mean_std"]["press"][1][0] - + batch[i]["mean_std"]["press"][0][0] - ).cuda() - test_L2 = L2_fn(y_hat, y) - L2.append(test_L2.cpu()) - np.save( - f"{output_dirs}/{batch[i]['file_name'].replace('centroid', 'press')}", - y_hat.cpu().numpy().squeeze(), - ) - print( - f"{batch[i]['file_name'].replace('centroid', 'press')} score: {test_L2.cpu().item():.5f}" - ) - L2 = paddle.mean(x=paddle.concat(x=L2, axis=0), axis=0) - y_list.append(y.cpu().numpy().squeeze()) - y_hat_list.append(y_hat.cpu().numpy().squeeze()) - print(float(L2)) - print("########################## submit sucessfully #################") +import argparse +import logging +import os + +import numpy as np +import paddle +from Transolver import Model +from utils.utils import LpLoss + + +def setup_logging(log_dir): + if not os.path.exists(log_dir): + os.makedirs(log_dir) + logging.basicConfig( + filename=os.path.join(log_dir, "training.log"), + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + console = logging.StreamHandler() + console.setLevel(logging.INFO) + formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + console.setFormatter(formatter) + logging.getLogger().addHandler(console) + + +def log_initial_configuration(args): + for arg, value in vars(args).items(): + logging.info(f"{arg}: {value}") + + +def set_random_seed(seed): + np.random.seed(seed) + paddle.seed(seed=seed) + + +def set_device(gpu_id): + os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) + device = str("cuda" if paddle.device.cuda.device_count() >= 1 else "cpu").replace( + "cuda", "gpu" + ) + return device + + +def load_tensor(file_path, mask=None): + if not os.path.exists(file_path): + raise FileNotFoundError(f"File not found: {file_path}") + data = np.load(file_path) + if mask is not None: + data = data[mask] + return paddle.to_tensor(data=data).astype(dtype="float32") + + +def custom_collate_fn(batch): + return batch + + +class PointCloudDataset(paddle.io.Dataset): + def __init__( + self, root_dir, transform=None, train=True, translate=True, submit=False + ): + """ + Args: + root_dir (string): Directory with all the point cloud files. + transform (callable, optional): Optional transform to be applied on a sample. + """ + self.root_dir = root_dir + self.file_list = [ + f + for f in os.listdir(os.path.join(root_dir, "centroid")) + if f.endswith(".npy") + ] + self.transform = transform + self.train = train + self.translate = translate + self.submit = submit + + def __len__(self): + return len(self.file_list) + + def __getitem__(self, idx): + # start_time = time.time() + file_name = self.file_list[idx] + file_path = os.path.join(self.root_dir, "centroid", file_name) + points = np.load(file_path) + points_min = np.min(points, axis=0, keepdims=True) + points_max = np.max(points, axis=0, keepdims=True) + mean_std_dict = load_mean_std("mean_std.txt") + if self.train: + sample_rate = 0.1 + else: + sample_rate = 0.4 + if self.submit: + sampled_indices = np.arange(tuple(points.shape)[0]) + else: + sampled_indices = np.random.choice( + np.arange(tuple(points.shape)[0]), + int(len(points) * sample_rate), + replace=False, + ) + sampled_points = points[sampled_indices].astype(np.float32) + local_sampled_points = (sampled_points - points_min) / ( + points_max - points_min + ).astype(np.float32) + press_sample = np.load( + os.path.join(self.root_dir, "press", file_name.replace("centroid", "press")) + )[sampled_indices].astype(np.float32) + if self.translate and self.train: + translation_vector = np.random.rand(3) * 0.01 - 0.005 + sampled_points += translation_vector + Normal = True + if Normal: + sampled_points = ( + sampled_points - mean_std_dict["centroid"][0] + ) / mean_std_dict["centroid"][1] + sample = { + "centroid": sampled_points.astype(np.float32), + "local_centroid": local_sampled_points.astype(np.float32), + "press": press_sample.astype(np.float32), + "file_name": file_name, + "mean_std": mean_std_dict, + } + return sample + + +def load_mean_std(input_file): + """ + Load mean and standard deviations from a text file. + Args: + input_file (str): The path to the text file containing the saved mean and std values. + + Returns: + dict: A dictionary with keys as the data categories and values as tuples of mean and std. + """ + results = {} + with open(input_file, "r") as f: + lines = f.readlines() + for line in lines: + parts = line.split() + category = parts[0] + if category == "centroid": + mean_val_1 = float(parts[3].strip(",")) + mean_val_2 = float(parts[4].strip(",")) + mean_val_3 = float(parts[5].strip(",")) + std_val_1 = float(parts[9].strip(",")) + std_val_2 = float(parts[10].strip(",")) + std_val_3 = float(parts[11].strip(",")) + mean_val = [mean_val_1, mean_val_2, mean_val_3] + std_val = [std_val_1, std_val_2, std_val_3] + else: + mean_val = [float(parts[3].strip(","))] + std_val = [float(parts[7])] + results[category] = mean_val, std_val + return results + + +class modified_log_transformed_l2_relative_error_loss(object): + def __init__(self, epsison=1e-08): + super(modified_log_transformed_l2_relative_error_loss, self).__init__() + self.epsilon = epsison + + def rel(self, x, y): + num_examples = tuple(x.shape)[0] + sign_x = paddle.sign(x=x.reshape(num_examples, -1)) + sign_y = paddle.sign(x=y.reshape(num_examples, -1)) + log_abs_x = paddle.log( + x=paddle.abs(x=x.reshape(num_examples, -1)) + + paddle.to_tensor(data=self.epsilon, dtype=x.dtype, place=x.place) + ) + log_abs_y = paddle.log( + x=paddle.abs(x=y.reshape(num_examples, -1)) + + paddle.to_tensor(data=self.epsilon, dtype=x.dtype, place=x.place) + ) + signed_log_x = sign_x * log_abs_x + signed_log_y = sign_y * log_abs_y + diff = signed_log_x - signed_log_y + diff_norm = paddle.linalg.norm(x=diff, p=2, axis=1) + y_norm = paddle.linalg.norm(x=signed_log_y, p=2, axis=1) + relative_error = diff_norm / y_norm + return paddle.mean(x=relative_error) + + def __call__(self, x, y): + return self.rel(x, y) + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Train a model for Automobile Aerodynamic Drag Prediction." + ) + parser.add_argument( + "--training_data_dir", + type=str, + default="./Dataset/train_track_B_e", + help="Directory for the training data", + ) + parser.add_argument( + "--testing_data_dir", + type=str, + default="./Dataset/Testset_track_B_e", + help="Directory for the testing data", + ) + parser.add_argument( + "--log_dir", + type=str, + default="./results", + help="Directory for saving logs and results", + ) + parser.add_argument("--gpu_id", default=0, type=int) + parser.add_argument( + "--num_segments", type=int, default=10, help="Number of segments to split" + ) + parser.add_argument( + "--segments_id", type=int, default=0, help="the id_th of segments to split" + ) + parser.add_argument( + "--overlap_ratio", type=float, default=0.5, help="Overlap ratio for segments" + ) + parser.add_argument( + "--global_normal", + type=bool, + default=True, + help="wheter use global normal or not", + ) + parser.add_argument( + "--normalization", type=bool, default=True, help="Flag to normalize data or not" + ) + parser.add_argument( + "--translate", action="store_true", help="Translate data or not" + ) + parser.add_argument( + "--loss_type", + type=str, + default="rl2", + choices=["l2", "rl2", "log_rl2", "huber"], + help="The type of loss function to use.", + ) + parser.add_argument( + "--submit", action="store_true", help="if generate submitted data" + ) + parser.add_argument("--batch_size", type=int, default=5) + parser.add_argument( + "--input_dim", type=int, default=6, help="Dimension of model input" + ) + parser.add_argument( + "--output_dim", type=int, default=1, help="Dimension of model output" + ) + parser.add_argument("--depth", type=int, default=5, help="Depth of the model") + parser.add_argument( + "--hidden_dim", type=int, default=256, help="Dimension of hidden features" + ) + parser.add_argument( + "--num_slices", + type=int, + default=32, + help="Number of slices for slicing the input", + ) + parser.add_argument( + "--num_heads", type=int, default=8, help="Number of attention heads" + ) + parser.add_argument( + "--mlp_ratio", + type=int, + default=2, + help="Ratio of mlp hidden dim to embedding dim", + ) + parser.add_argument("--patch_size", type=int, default=20, help="Size of each patch") + parser.add_argument( + "--shift", type=int, default=4, help="Shift size for shifting the patches" + ) + parser.add_argument("--n_layer", type=int, default=1, help="Number of layers") + parser.add_argument( + "--epochs", type=int, default=69, help="Number of epochs to train" + ) + parser.add_argument( + "--lr", type=float, default=0.001, help="Learning rate for the optimizer" + ) + parser.add_argument( + "--scheduler_step", + type=int, + default=30, + help="Number of steps after which to reduce learning rate", + ) + parser.add_argument( + "--milestones", nargs="+", type=int, default=[40, 50, 60, 65, 68] + ) + parser.add_argument( + "--scheduler_gamma", + type=float, + default=0.5, + help="Gamma factor for reducing the learning rate", + ) + return parser.parse_args() + + +if __name__ == "__main__": + set_random_seed(123) + args = parse_args() + print(args) + save_dir = args.log_dir + setup_logging(save_dir) + log_initial_configuration(args) + device = set_device(args.gpu_id) + model = Model( + n_hidden=args.hidden_dim, + n_layers=args.depth, + space_dim=args.input_dim, + fun_dim=0, + n_head=args.num_heads, + mlp_ratio=args.mlp_ratio, + out_dim=args.output_dim, + slice_num=args.num_slices, + n_iter=args.n_layer, + unified_pos=0, + ).to(device) + L2_fn = LpLoss(reduction=False) + if args.submit: + submit_dataset = PointCloudDataset( + root_dir=args.testing_data_dir, train=False, translate=False, submit=True + ) + submitloader = paddle.io.DataLoader( + dataset=submit_dataset, + batch_size=1, + shuffle=False, + collate_fn=custom_collate_fn, + ) + output_dirs = os.path.join(save_dir, "output") + if os.path.exists(output_dirs) is False: + os.makedirs(output_dirs) + model = Model( + n_hidden=args.hidden_dim, + n_layers=args.depth, + space_dim=args.input_dim, + fun_dim=0, + n_head=args.num_heads, + mlp_ratio=args.mlp_ratio, + out_dim=args.output_dim, + slice_num=args.num_slices, + n_iter=args.n_layer, + unified_pos=0, + ).to(device) + model.set_state_dict( + state_dict=paddle.load(path=f"{save_dir}/checkpoint.pdparams") + ) + y_list = [] + y_hat_list = [] + L2 = [] + model.eval() + with paddle.no_grad(): + for batch in submitloader: + for i in range(len(batch)): + x_centroid = batch[i]["centroid"].cuda().unsqueeze(axis=0) + x_local_centroid = ( + batch[i]["local_centroid"].cuda().unsqueeze(axis=0) + ) + y = batch[i]["press"].cuda().unsqueeze(axis=0) + features = paddle.concat(x=[x_centroid, x_local_centroid], axis=-1) + y_hat = model(features) + y_hat = ( + y_hat * batch[i]["mean_std"]["press"][1][0] + + batch[i]["mean_std"]["press"][0][0] + ).cuda() + test_L2 = L2_fn(y_hat, y) + L2.append(test_L2.cpu()) + np.save( + f"{output_dirs}/{batch[i]['file_name'].replace('centroid', 'press')}", + y_hat.cpu().numpy().squeeze(), + ) + print( + f"{batch[i]['file_name'].replace('centroid', 'press')} score: {test_L2.cpu().item():.5f}" + ) + L2 = paddle.mean(x=paddle.concat(x=L2, axis=0), axis=0) + y_list.append(y.cpu().numpy().squeeze()) + y_hat_list.append(y_hat.cpu().numpy().squeeze()) + print(float(L2)) + print("########################## submit sucessfully #################") diff --git a/jointContribution/IJCAI_2024/tenfeng/mean_std.txt b/jointContribution/IJCAI_2024/tenfeng/mean_std.txt index c1480589f2..9c1be9a19f 100644 --- a/jointContribution/IJCAI_2024/tenfeng/mean_std.txt +++ b/jointContribution/IJCAI_2024/tenfeng/mean_std.txt @@ -1,8 +1,8 @@ -mfpfh mean: [ 18.1818181818182 ] std: [ 39.81273955272964 ] -centroid mean: [ 1.5760514612539709 -0.01965541469322948 0.597544295937334 ] std: [ 1.354381732849656 0.6233992628798983 0.39729898654716606 ] -curvature mean: [ 0.026065537375220396 ] std: [ 0.04991963191398231 ] -normalcosine mean: [ 0.0034148012723603342 ] std: [ 0.4473087555293666 ] -roughness mean: [ 0.04039116896153989 ] std: [ 0.0014707461952125748 ] -slope mean: [ 90.18651593156174 ] std: [ 31.94540241672869 ] -press mean: [ -93.1135960724897 ] std: [ 113.59230112800168 ] -weightcosine mean: [ -1.0923285238749563 ] std: [ 65.97032086985001 ] +mfpfh mean: [ 18.1818181818182 ] std: [ 39.81273955272964 ] +centroid mean: [ 1.5760514612539709 -0.01965541469322948 0.597544295937334 ] std: [ 1.354381732849656 0.6233992628798983 0.39729898654716606 ] +curvature mean: [ 0.026065537375220396 ] std: [ 0.04991963191398231 ] +normalcosine mean: [ 0.0034148012723603342 ] std: [ 0.4473087555293666 ] +roughness mean: [ 0.04039116896153989 ] std: [ 0.0014707461952125748 ] +slope mean: [ 90.18651593156174 ] std: [ 31.94540241672869 ] +press mean: [ -93.1135960724897 ] std: [ 113.59230112800168 ] +weightcosine mean: [ -1.0923285238749563 ] std: [ 65.97032086985001 ] diff --git a/jointContribution/IJCAI_2024/tenfeng/requirements.txt b/jointContribution/IJCAI_2024/tenfeng/requirements.txt index 4dec2dbb35..df6b2e2a89 100644 --- a/jointContribution/IJCAI_2024/tenfeng/requirements.txt +++ b/jointContribution/IJCAI_2024/tenfeng/requirements.txt @@ -1,4 +1,4 @@ -einops -numpy -paddlepaddle_gpu -scipy +einops +numpy +paddlepaddle_gpu +scipy diff --git a/jointContribution/IJCAI_2024/tenfeng/utils/paddle_aux.py b/jointContribution/IJCAI_2024/tenfeng/utils/paddle_aux.py index bb1c901379..d0750abb56 100644 --- a/jointContribution/IJCAI_2024/tenfeng/utils/paddle_aux.py +++ b/jointContribution/IJCAI_2024/tenfeng/utils/paddle_aux.py @@ -1,110 +1,110 @@ -# This file is generated by PaConvert ToolKit, please Don't edit it! -import paddle - - -def view(self, *args, **kwargs): - if args: - if len(args) == 1: - if isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) # To change reshape => view - elif isinstance(args[0], str): - return paddle.view(self, args[0]) - else: - return paddle.reshape(self, list(args)) # To change reshape => view - else: - return paddle.reshape(self, list(args)) # To change reshape => view - elif kwargs: - key = [k for k in kwargs.keys()] - if "dtype" in kwargs: - return paddle.view(self, shape_or_dtype=kwargs[key[0]]) - else: - return paddle.reshape( - self, shape=kwargs[key[0]] - ) # To change reshape => view - - -setattr(paddle.Tensor, "view", view) - - -def reshape(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) - else: - return paddle.reshape(self, list(args)) - elif kwargs: - assert "shape" in kwargs - return paddle.reshape(self, shape=kwargs["shape"]) - - -setattr(paddle.Tensor, "reshape", reshape) - - -def min_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.minimum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.min(self, *args, **kwargs), paddle.argmin( - self, *args, **kwargs - ) - else: - ret = paddle.min(self, *args, **kwargs) - - return ret - - -def max_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.maximum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.max(self, *args, **kwargs), paddle.argmax( - self, *args, **kwargs - ) - else: - ret = paddle.max(self, *args, **kwargs) - - return ret - - -setattr(paddle.Tensor, "min", min_class_func) -setattr(paddle.Tensor, "max", max_class_func) - - -def _FUNCTIONAL_PAD(x, pad, mode="constant", value=0.0, data_format="NCHW"): - if len(x.shape) * 2 == len(pad) and mode == "constant": - pad = ( - paddle.to_tensor(pad, dtype="int32") - .reshape((-1, 2)) - .flip([0]) - .flatten() - .tolist() - ) - return paddle.nn.functional.pad(x, pad, mode, value, data_format) - - -def repeat(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.tile(self, args[0]) - else: - return paddle.tile(self, list(args)) - elif kwargs: - assert "repeats" in kwargs - return paddle.tile(self, repeat_times=kwargs["repeats"]) - - -setattr(paddle.Tensor, "repeat", repeat) +# This file is generated by PaConvert ToolKit, please Don't edit it! +import paddle + + +def view(self, *args, **kwargs): + if args: + if len(args) == 1: + if isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) # To change reshape => view + elif isinstance(args[0], str): + return paddle.view(self, args[0]) + else: + return paddle.reshape(self, list(args)) # To change reshape => view + else: + return paddle.reshape(self, list(args)) # To change reshape => view + elif kwargs: + key = [k for k in kwargs.keys()] + if "dtype" in kwargs: + return paddle.view(self, shape_or_dtype=kwargs[key[0]]) + else: + return paddle.reshape( + self, shape=kwargs[key[0]] + ) # To change reshape => view + + +setattr(paddle.Tensor, "view", view) + + +def reshape(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) + else: + return paddle.reshape(self, list(args)) + elif kwargs: + assert "shape" in kwargs + return paddle.reshape(self, shape=kwargs["shape"]) + + +setattr(paddle.Tensor, "reshape", reshape) + + +def min_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.minimum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.min(self, *args, **kwargs), paddle.argmin( + self, *args, **kwargs + ) + else: + ret = paddle.min(self, *args, **kwargs) + + return ret + + +def max_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.maximum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.max(self, *args, **kwargs), paddle.argmax( + self, *args, **kwargs + ) + else: + ret = paddle.max(self, *args, **kwargs) + + return ret + + +setattr(paddle.Tensor, "min", min_class_func) +setattr(paddle.Tensor, "max", max_class_func) + + +def _FUNCTIONAL_PAD(x, pad, mode="constant", value=0.0, data_format="NCHW"): + if len(x.shape) * 2 == len(pad) and mode == "constant": + pad = ( + paddle.to_tensor(pad, dtype="int32") + .reshape((-1, 2)) + .flip([0]) + .flatten() + .tolist() + ) + return paddle.nn.functional.pad(x, pad, mode, value, data_format) + + +def repeat(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.tile(self, args[0]) + else: + return paddle.tile(self, list(args)) + elif kwargs: + assert "repeats" in kwargs + return paddle.tile(self, repeat_times=kwargs["repeats"]) + + +setattr(paddle.Tensor, "repeat", repeat) diff --git a/jointContribution/IJCAI_2024/tenfeng/utils/utils.py b/jointContribution/IJCAI_2024/tenfeng/utils/utils.py index 3c8520b028..4c33ef2776 100644 --- a/jointContribution/IJCAI_2024/tenfeng/utils/utils.py +++ b/jointContribution/IJCAI_2024/tenfeng/utils/utils.py @@ -1,64 +1,64 @@ -import argparse -import operator -from functools import reduce - -import paddle - - -def dict2namespace(config): - namespace = argparse.Namespace() - for key, value in config.items(): - if isinstance(value, dict): - new_value = dict2namespace(value) - else: - new_value = value - setattr(namespace, key, new_value) - return namespace - - -class LpLoss(object): - def __init__(self, d=2, p=2, size_average=True, reduction=True): - super(LpLoss, self).__init__() - assert d > 0 and p > 0 - self.d = d - self.p = p - self.reduction = reduction - self.size_average = size_average - - def abs(self, x, y): - num_examples = tuple(x.shape)[0] - h = 1.0 / (tuple(x.shape)[1] - 1.0) - all_norms = h ** (self.d / self.p) * paddle.linalg.norm( - x=x.view(num_examples, -1) - y.view(num_examples, -1), p=self.p, axis=1 - ) - if self.reduction: - if self.size_average: - return paddle.mean(x=all_norms) - else: - return paddle.sum(x=all_norms) - return all_norms - - def rel(self, x, y): - num_examples = tuple(x.shape)[0] - diff_norms = paddle.linalg.norm( - x=x.reshape(num_examples, -1) - y.reshape(num_examples, -1), - p=self.p, - axis=1, - ) - y_norms = paddle.linalg.norm(x=y.reshape(num_examples, -1), p=self.p, axis=1) - if self.reduction: - if self.size_average: - return paddle.mean(x=diff_norms / y_norms) - else: - return paddle.sum(x=diff_norms / y_norms) - return diff_norms / y_norms - - def __call__(self, x, y): - return self.rel(x, y) - - -def count_params(model): - c = 0 - for p in list(model.parameters()): - c += reduce(operator.mul, list(tuple(p.shape))) - return c +import argparse +import operator +from functools import reduce + +import paddle + + +def dict2namespace(config): + namespace = argparse.Namespace() + for key, value in config.items(): + if isinstance(value, dict): + new_value = dict2namespace(value) + else: + new_value = value + setattr(namespace, key, new_value) + return namespace + + +class LpLoss(object): + def __init__(self, d=2, p=2, size_average=True, reduction=True): + super(LpLoss, self).__init__() + assert d > 0 and p > 0 + self.d = d + self.p = p + self.reduction = reduction + self.size_average = size_average + + def abs(self, x, y): + num_examples = tuple(x.shape)[0] + h = 1.0 / (tuple(x.shape)[1] - 1.0) + all_norms = h ** (self.d / self.p) * paddle.linalg.norm( + x=x.view(num_examples, -1) - y.view(num_examples, -1), p=self.p, axis=1 + ) + if self.reduction: + if self.size_average: + return paddle.mean(x=all_norms) + else: + return paddle.sum(x=all_norms) + return all_norms + + def rel(self, x, y): + num_examples = tuple(x.shape)[0] + diff_norms = paddle.linalg.norm( + x=x.reshape(num_examples, -1) - y.reshape(num_examples, -1), + p=self.p, + axis=1, + ) + y_norms = paddle.linalg.norm(x=y.reshape(num_examples, -1), p=self.p, axis=1) + if self.reduction: + if self.size_average: + return paddle.mean(x=diff_norms / y_norms) + else: + return paddle.sum(x=diff_norms / y_norms) + return diff_norms / y_norms + + def __call__(self, x, y): + return self.rel(x, y) + + +def count_params(model): + c = 0 + for p in list(model.parameters()): + c += reduce(operator.mul, list(tuple(p.shape))) + return c diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/data_process.py b/jointContribution/IJCAI_2024/zhongzaicanyu/data_process.py index 99574f01b5..90ac66d770 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/data_process.py +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/data_process.py @@ -1,113 +1,113 @@ -import os - -import numpy as np -import utils.paddle_aux # NOQA -import vtk - - -def load_centroid(file_path): - centroid = np.load(file_path).reshape((-1, 3)).astype(np.float32) - return centroid - - -def load_pressure(file_path): - press = np.load(file_path).reshape((-1,)).astype(np.float32) - return press - - -def write_vtk(vertices, pressure_data, output_path): - points = vtk.vtkPoints() - for idx, vertex in enumerate(vertices): - points.InsertNextPoint(vertex) - cells = vtk.vtkCellArray() - for idx in range(len(vertices)): - cells.InsertNextCell(1) - cells.InsertCellPoint(idx) - unstructured_grid = vtk.vtkUnstructuredGrid() - unstructured_grid.SetPoints(points) - unstructured_grid.SetCells(vtk.VTK_VERTEX, cells) - pressure = vtk.vtkFloatArray() - pressure.SetName("Pressure") - for idx, value in enumerate(pressure_data): - pressure.InsertNextValue(value) - unstructured_grid.GetPointData().AddArray(pressure) - writer = vtk.vtkUnstructuredGridWriter() - writer.SetFileName(output_path) - writer.SetInputData(unstructured_grid) - writer.Write() - - -def process_directory(input_centroid_dir, input_pressure_dir, output_dir): - if not os.path.exists(output_dir): - os.makedirs(output_dir) - for file_name in os.listdir(input_centroid_dir): - if file_name.endswith(".npy") and file_name.startswith("centroid_"): - mesh_index = file_name.replace("centroid_", "").replace(".npy", "") - centroid_file_path = os.path.join(input_centroid_dir, file_name) - pressure_file_name = f"press_{mesh_index}.npy" - pressure_file_path = os.path.join(input_pressure_dir, pressure_file_name) - vtk_file_path = os.path.join(output_dir, f"mesh_{mesh_index}.vtk") - if os.path.exists(pressure_file_path): - vertices = load_centroid(centroid_file_path) - pressure_data = load_pressure(pressure_file_path) - num_vertices = tuple(vertices.shape)[0] - num_pressure = tuple(pressure_data.shape)[0] - if num_pressure > num_vertices: - print( - f"Warning: Pressure data for {file_name} is larger than the number of points. ", - "Trimming extra data.", - ) - pressure_data = pressure_data[:num_vertices] - elif num_pressure < num_vertices: - print( - f"Warning: Pressure data for {file_name} is smaller than the number of points. ", - "Trimming extra points.", - ) - vertices = vertices[:num_pressure] - write_vtk(vertices, pressure_data, vtk_file_path) - print(f"Processed {file_name} to {vtk_file_path}") - else: - print(f"Pressure file for {file_name} not found.") - - -def write_vtk2(vertices, output_path): - points = vtk.vtkPoints() - for idx, vertex in enumerate(vertices): - points.InsertNextPoint(vertex) - cells = vtk.vtkCellArray() - for idx in range(len(vertices)): - cells.InsertNextCell(1) - cells.InsertCellPoint(idx) - unstructured_grid = vtk.vtkUnstructuredGrid() - unstructured_grid.SetPoints(points) - unstructured_grid.SetCells(vtk.VTK_VERTEX, cells) - writer = vtk.vtkUnstructuredGridWriter() - writer.SetFileName(output_path) - writer.SetInputData(unstructured_grid) - writer.Write() - - -def process_directory2(input_centroid_dir, output_dir): - if not os.path.exists(output_dir): - os.makedirs(output_dir) - for file_name in os.listdir(input_centroid_dir): - if file_name.endswith(".npy") and file_name.startswith("centroid_"): - mesh_index = file_name.replace("centroid_", "").replace(".npy", "") - centroid_file_path = os.path.join(input_centroid_dir, file_name) - vtk_file_path = os.path.join(output_dir, f"mesh_{mesh_index}.vtk") - vertices = load_centroid(centroid_file_path) - write_vtk2(vertices, vtk_file_path) - print(f"Processed {file_name} to {vtk_file_path}") - - -def data_process(): - input_centroid_directory = "./Dataset/data_track_B" - input_pressure_directory = "./Dataset/data_track_B" - output_directory = "./Dataset/data_centroid_track_B_vtk" - process_directory( - input_centroid_directory, input_pressure_directory, output_directory - ) - - input_centroid_directory = "./Dataset/track_B" - output_directory = "./Dataset/track_B_vtk" - process_directory2(input_centroid_directory, output_directory) +import os + +import numpy as np +import utils.paddle_aux # NOQA +import vtk + + +def load_centroid(file_path): + centroid = np.load(file_path).reshape((-1, 3)).astype(np.float32) + return centroid + + +def load_pressure(file_path): + press = np.load(file_path).reshape((-1,)).astype(np.float32) + return press + + +def write_vtk(vertices, pressure_data, output_path): + points = vtk.vtkPoints() + for idx, vertex in enumerate(vertices): + points.InsertNextPoint(vertex) + cells = vtk.vtkCellArray() + for idx in range(len(vertices)): + cells.InsertNextCell(1) + cells.InsertCellPoint(idx) + unstructured_grid = vtk.vtkUnstructuredGrid() + unstructured_grid.SetPoints(points) + unstructured_grid.SetCells(vtk.VTK_VERTEX, cells) + pressure = vtk.vtkFloatArray() + pressure.SetName("Pressure") + for idx, value in enumerate(pressure_data): + pressure.InsertNextValue(value) + unstructured_grid.GetPointData().AddArray(pressure) + writer = vtk.vtkUnstructuredGridWriter() + writer.SetFileName(output_path) + writer.SetInputData(unstructured_grid) + writer.Write() + + +def process_directory(input_centroid_dir, input_pressure_dir, output_dir): + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for file_name in os.listdir(input_centroid_dir): + if file_name.endswith(".npy") and file_name.startswith("centroid_"): + mesh_index = file_name.replace("centroid_", "").replace(".npy", "") + centroid_file_path = os.path.join(input_centroid_dir, file_name) + pressure_file_name = f"press_{mesh_index}.npy" + pressure_file_path = os.path.join(input_pressure_dir, pressure_file_name) + vtk_file_path = os.path.join(output_dir, f"mesh_{mesh_index}.vtk") + if os.path.exists(pressure_file_path): + vertices = load_centroid(centroid_file_path) + pressure_data = load_pressure(pressure_file_path) + num_vertices = tuple(vertices.shape)[0] + num_pressure = tuple(pressure_data.shape)[0] + if num_pressure > num_vertices: + print( + f"Warning: Pressure data for {file_name} is larger than the number of points. ", + "Trimming extra data.", + ) + pressure_data = pressure_data[:num_vertices] + elif num_pressure < num_vertices: + print( + f"Warning: Pressure data for {file_name} is smaller than the number of points. ", + "Trimming extra points.", + ) + vertices = vertices[:num_pressure] + write_vtk(vertices, pressure_data, vtk_file_path) + print(f"Processed {file_name} to {vtk_file_path}") + else: + print(f"Pressure file for {file_name} not found.") + + +def write_vtk2(vertices, output_path): + points = vtk.vtkPoints() + for idx, vertex in enumerate(vertices): + points.InsertNextPoint(vertex) + cells = vtk.vtkCellArray() + for idx in range(len(vertices)): + cells.InsertNextCell(1) + cells.InsertCellPoint(idx) + unstructured_grid = vtk.vtkUnstructuredGrid() + unstructured_grid.SetPoints(points) + unstructured_grid.SetCells(vtk.VTK_VERTEX, cells) + writer = vtk.vtkUnstructuredGridWriter() + writer.SetFileName(output_path) + writer.SetInputData(unstructured_grid) + writer.Write() + + +def process_directory2(input_centroid_dir, output_dir): + if not os.path.exists(output_dir): + os.makedirs(output_dir) + for file_name in os.listdir(input_centroid_dir): + if file_name.endswith(".npy") and file_name.startswith("centroid_"): + mesh_index = file_name.replace("centroid_", "").replace(".npy", "") + centroid_file_path = os.path.join(input_centroid_dir, file_name) + vtk_file_path = os.path.join(output_dir, f"mesh_{mesh_index}.vtk") + vertices = load_centroid(centroid_file_path) + write_vtk2(vertices, vtk_file_path) + print(f"Processed {file_name} to {vtk_file_path}") + + +def data_process(): + input_centroid_directory = "./Dataset/data_track_B" + input_pressure_directory = "./Dataset/data_track_B" + output_directory = "./Dataset/data_centroid_track_B_vtk" + process_directory( + input_centroid_directory, input_pressure_directory, output_directory + ) + + input_centroid_directory = "./Dataset/track_B" + output_directory = "./Dataset/track_B_vtk" + process_directory2(input_centroid_directory, output_directory) diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/dataset.py b/jointContribution/IJCAI_2024/zhongzaicanyu/dataset.py index f827779b9a..8caa3b92d9 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/dataset.py +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/dataset.py @@ -1,595 +1,595 @@ -import itertools -import os -import random -from typing import List -from typing import Union - -import numpy as np -import paddle -import vtk -from sklearn.neighbors import NearestNeighbors -from tqdm import tqdm -from vtk.util.numpy_support import vtk_to_numpy - - -def load_unstructured_grid_data(file_name): - reader = vtk.vtkUnstructuredGridReader() - reader.SetFileName(file_name) - reader.Update() - output = reader.GetOutput() - return output - - -def unstructured_grid_data_to_poly_data(unstructured_grid_data): - filter = vtk.vtkDataSetSurfaceFilter() - filter.SetInputData(unstructured_grid_data) - filter.Update() - poly_data = filter.GetOutput() - return poly_data, filter - - -def get_sdf(target, boundary): - nbrs = NearestNeighbors(n_neighbors=1).fit(boundary) - dists, indices = nbrs.kneighbors(target) - neis = np.array([boundary[i[0]] for i in indices]) - dirs = (target - neis) / (dists + 1e-08) - return dists.reshape(-1), dirs - - -def get_normal(unstructured_grid_data): - poly_data, surface_filter = unstructured_grid_data_to_poly_data( - unstructured_grid_data - ) - normal_filter = vtk.vtkPolyDataNormals() - normal_filter.SetInputData(poly_data) - normal_filter.SetAutoOrientNormals(1) - normal_filter.SetConsistency(1) - normal_filter.SetComputeCellNormals(1) - normal_filter.SetComputePointNormals(0) - normal_filter.Update() - unstructured_grid_data.GetCellData().SetNormals( - normal_filter.GetOutput().GetCellData().GetNormals() - ) - c2p = vtk.vtkCellDataToPointData() - c2p.SetInputData(unstructured_grid_data) - c2p.Update() - unstructured_grid_data = c2p.GetOutput() - normal = vtk_to_numpy(c2p.GetOutput().GetPointData().GetNormals()).astype(np.double) - normal /= np.max(np.abs(normal), axis=1, keepdims=True) + 1e-08 - normal /= np.linalg.norm(normal, axis=1, keepdims=True) + 1e-08 - if np.isnan(normal).sum() > 0: - print(np.isnan(normal).sum()) - print("recalculate") - return get_normal(unstructured_grid_data) - return normal - - -def visualize_poly_data(poly_data, surface_filter, normal_filter=None): - if normal_filter is not None: - mask = vtk.vtkMaskPoints() - mask.SetInputData(normal_filter.GetOutput()) - mask.Update() - arrow = vtk.vtkArrowSource() - arrow.Update() - glyph = vtk.vtkGlyph3D() - glyph.SetInputData(mask.GetOutput()) - glyph.SetSourceData(arrow.GetOutput()) - glyph.SetVectorModeToUseNormal() - glyph.SetScaleFactor(0.1) - glyph.Update() - norm_mapper = vtk.vtkPolyDataMapper() - norm_mapper.SetInputData(normal_filter.GetOutput()) - glyph_mapper = vtk.vtkPolyDataMapper() - glyph_mapper.SetInputData(glyph.GetOutput()) - norm_actor = vtk.vtkActor() - norm_actor.SetMapper(norm_mapper) - glyph_actor = vtk.vtkActor() - glyph_actor.SetMapper(glyph_mapper) - glyph_actor.GetProperty().SetColor(1, 0, 0) - norm_render = vtk.vtkRenderer() - norm_render.AddActor(norm_actor) - norm_render.SetBackground(0, 1, 0) - glyph_render = vtk.vtkRenderer() - glyph_render.AddActor(glyph_actor) - glyph_render.AddActor(norm_actor) - glyph_render.SetBackground(0, 0, 1) - scalar_range = poly_data.GetScalarRange() - mapper = vtk.vtkDataSetMapper() - mapper.SetInputConnection(surface_filter.GetOutputPort()) - mapper.SetScalarRange(scalar_range) - actor = vtk.vtkActor() - actor.SetMapper(mapper) - renderer = vtk.vtkRenderer() - renderer.AddActor(actor) - renderer.SetBackground(1, 1, 1) - renderer_window = vtk.vtkRenderWindow() - renderer_window.AddRenderer(renderer) - if normal_filter is not None: - renderer_window.AddRenderer(norm_render) - renderer_window.AddRenderer(glyph_render) - renderer_window.Render() - interactor = vtk.vtkRenderWindowInteractor() - interactor.SetRenderWindow(renderer_window) - interactor.Initialize() - interactor.Start() - - -def get_scalar_data(unstructured_grid, scalar_name): - point_data = unstructured_grid.GetPointData() - if point_data: - scalar_array = point_data.GetArray(scalar_name) - if scalar_array: - return vtk_to_numpy(scalar_array) - return None - - -def bget_datalist( - root, samples, norm=False, coef_norm=None, savedir=None, preprocessed=False -): - dataset = [] - mean_in, mean_out = 0, 0 - std_in, std_out = 0, 0 - for k, s in enumerate(tqdm(samples, desc="Processing samples")): - if preprocessed and savedir is not None: - save_path = os.path.join(savedir, s) - if not os.path.exists(save_path): - continue - init = np.load(os.path.join(save_path, "x.npy")) - target = np.load(os.path.join(save_path, "y.npy")) - pos = np.load(os.path.join(save_path, "pos.npy")) - surf = np.load(os.path.join(save_path, "surf.npy")) - area = np.load(os.path.join(save_path, "area.npy")) - else: - file_name_press = os.path.join(root, s) - if not os.path.exists(file_name_press): - continue - unstructured_grid_data_press = load_unstructured_grid_data(file_name_press) - scalar_names = ["Pressure", "point_scalars"] - for scalar_name in scalar_names: - press = get_scalar_data(unstructured_grid_data_press, scalar_name) - if press is not None: - break - points_press = vtk_to_numpy( - unstructured_grid_data_press.GetPoints().GetData() - ) - sdf_press = np.zeros(tuple(points_press.shape)[0]) - pos_surf = points_press - sdf_surf = sdf_press - press_surf = press - mesh_number = s[-8:-4] - area_file_name = os.path.join("data_track_B", f"area_{mesh_number}.npy") - if os.path.exists(area_file_name): - area = np.load(area_file_name) - else: - area = np.zeros(len(pos_surf)) - info = np.full((len(pos_surf), 1), 30.0) - init_surf = np.c_[pos_surf, sdf_surf, area, info] - target_surf = np.c_[np.zeros((len(pos_surf), 3)), press_surf] - surf = np.ones(len(pos_surf)) - pos = pos_surf - init = init_surf - target = target_surf - if savedir is not None: - save_path = os.path.join(savedir, s) - if not os.path.exists(save_path): - os.makedirs(save_path) - np.save(os.path.join(save_path, "x.npy"), init) - np.save(os.path.join(save_path, "y.npy"), target) - np.save(os.path.join(save_path, "pos.npy"), pos) - np.save(os.path.join(save_path, "surf.npy"), surf) - np.save(os.path.join(save_path, "area.npy"), area) - surf = paddle.to_tensor(data=surf) - pos = paddle.to_tensor(data=pos) - x = paddle.to_tensor(data=init) - y = paddle.to_tensor(data=target) - if norm and coef_norm is None: - if k == 0: - old_length = tuple(init.shape)[0] - mean_in = init.mean(axis=0) - mean_out = target.mean(axis=0) - else: - new_length = old_length + tuple(init.shape)[0] - mean_in += ( - init.sum(axis=0) - tuple(init.shape)[0] * mean_in - ) / new_length - mean_out += ( - target.sum(axis=0) - tuple(init.shape)[0] * mean_out - ) / new_length - old_length = new_length - data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) - dataset.append(data) - if norm and coef_norm is None: - for k, data in enumerate(dataset): - if k == 0: - old_length = tuple(data.x.numpy().shape)[0] - std_in = ((data.x.numpy() - mean_in) ** 2).sum(axis=0) / old_length - std_out = ((data.y.numpy() - mean_out) ** 2).sum(axis=0) / old_length - else: - new_length = old_length + tuple(data.x.numpy().shape)[0] - std_in += ( - ((data.x.numpy() - mean_in) ** 2).sum(axis=0) - - tuple(data.x.numpy().shape)[0] * std_in - ) / new_length - std_out += ( - ((data.y.numpy() - mean_out) ** 2).sum(axis=0) - - tuple(data.x.numpy().shape)[0] * std_out - ) / new_length - old_length = new_length - std_in = np.sqrt(std_in) - std_out = np.sqrt(std_out) - for data in dataset: - data.x = ((data.x - mean_in) / (std_in + 1e-08)).astype(dtype="float32") - data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") - coef_norm = mean_in, std_in, mean_out, std_out - dataset = dataset, coef_norm - elif coef_norm is not None: - for data in dataset: - data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( - dtype="float32" - ) - data.y = ((data.y - coef_norm[2]) / (coef_norm[3] + 1e-08)).astype( - dtype="float32" - ) - return dataset - - -def bget_datalist_for_prediction( - root, samples, norm=False, coef_norm=None, savedir=None, preprocessed=False -): - dataset = [] - mean_in, std_in = 0, 0 - for k, s in enumerate(tqdm(samples, desc="Processing samples")): - if preprocessed and savedir is not None: - save_path = os.path.join(savedir, s) - if not os.path.exists(save_path): - continue - init = np.load(os.path.join(save_path, "x.npy")) - pos = np.load(os.path.join(save_path, "pos.npy")) - surf = np.load(os.path.join(save_path, "surf.npy")) - area = np.load(os.path.join(save_path, "area.npy")) - else: - file_name = os.path.join(root, s) - if not os.path.exists(file_name): - continue - unstructured_grid_data = load_unstructured_grid_data(file_name) - points = vtk_to_numpy(unstructured_grid_data.GetPoints().GetData()) - sdf = np.zeros(tuple(points.shape)[0]) - pos_surf = points - sdf_surf = sdf - mesh_number = int(s.split("_")[-1].split(".")[0]) - area_file_name = os.path.join( - "../data/IJCAI_Car/track_B", f"area_{mesh_number}.npy" - ) - if os.path.exists(area_file_name): - area = np.load(area_file_name) - else: - area = np.zeros(len(pos_surf)) - info = np.full((len(pos_surf), 1), 30.0) - init_surf = np.c_[pos_surf, sdf_surf, area, info] - surf = np.ones(len(pos_surf)) - pos = pos_surf - init = init_surf - if savedir is not None: - save_path = os.path.join(savedir, s) - if not os.path.exists(save_path): - os.makedirs(save_path) - np.save(os.path.join(save_path, "x.npy"), init) - np.save(os.path.join(save_path, "pos.npy"), pos) - np.save(os.path.join(save_path, "surf.npy"), surf) - np.save(os.path.join(save_path, "area.npy"), area) - surf = paddle.to_tensor(data=surf) - pos = paddle.to_tensor(data=pos) - x = paddle.to_tensor(data=init) - y = paddle.zeros(shape=(tuple(x.shape)[0], 4)) - if norm and coef_norm is None: - if k == 0: - old_length = tuple(init.shape)[0] - mean_in = init.mean(axis=0) - else: - new_length = old_length + tuple(init.shape)[0] - mean_in += ( - init.sum(axis=0) - tuple(init.shape)[0] * mean_in - ) / new_length - old_length = new_length - data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) - dataset.append(data) - if norm and coef_norm is None: - for k, data in enumerate(dataset): - if k == 0: - old_length = tuple(data.x.numpy().shape)[0] - std_in = ((data.x.numpy() - mean_in) ** 2).sum(axis=0) / old_length - else: - new_length = old_length + tuple(data.x.numpy().shape)[0] - std_in += ( - ((data.x.numpy() - mean_in) ** 2).sum(axis=0) - - tuple(data.x.numpy().shape)[0] * std_in - ) / new_length - old_length = new_length - std_in = np.sqrt(std_in) - for data in dataset: - data.x = ((data.x - mean_in) / (std_in + 1e-08)).astype(dtype="float32") - coef_norm = mean_in, std_in - elif coef_norm is not None: - for data in dataset: - data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( - dtype="float32" - ) - return dataset - - -def bget_data_for_prediction(file_name, norm=False, coef_norm=None): - if not os.path.exists(file_name): - return - unstructured_grid_data = load_unstructured_grid_data(file_name) - points = vtk_to_numpy(unstructured_grid_data.GetPoints().GetData()) - sdf = np.zeros(tuple(points.shape)[0]) - pos_surf = points - sdf_surf = sdf - mesh_number = int(file_name.split("_")[-1].split(".")[0]) - area_file_name = os.path.join( - "../data/IJCAI_Car/track_B", f"area_{mesh_number}.npy" - ) - if os.path.exists(area_file_name): - area = np.load(area_file_name) - else: - area = np.zeros(len(pos_surf)) - info = np.full((len(pos_surf), 1), 30.0) - init_surf = np.c_[pos_surf, sdf_surf, area, info] - surf = np.ones(len(pos_surf)) - pos = pos_surf - init = init_surf - - surf = paddle.to_tensor(data=surf) - pos = paddle.to_tensor(data=pos) - x = paddle.to_tensor(data=init) - y = paddle.zeros(shape=(tuple(x.shape)[0], 4)) - - data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) - - if coef_norm is not None: - data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( - dtype="float32" - ) - return data - - -def get_edges(unstructured_grid_data, points, cell_size=4): - edge_indeces = set() - cells = vtk_to_numpy(unstructured_grid_data.GetCells().GetData()).reshape( - -1, cell_size + 1 - ) - for i in range(len(cells)): - for j, k in itertools.product(range(1, cell_size + 1), repeat=2): - edge_indeces.add((cells[i][j], cells[i][k])) - edge_indeces.add((cells[i][k], cells[i][j])) - edges = [[], []] - for u, v in edge_indeces: - edges[0].append(tuple(points[u])) - edges[1].append(tuple(points[v])) - return edges - - -def get_edge_index(pos, edges_press, edges_velo): - indices = {tuple(pos[i]): i for i in range(len(pos))} - edges = set() - for i in range(len(edges_press[0])): - edges.add((indices[edges_press[0][i]], indices[edges_press[1][i]])) - for i in range(len(edges_velo[0])): - edges.add((indices[edges_velo[0][i]], indices[edges_velo[1][i]])) - edge_index = np.array(list(edges)).T - return edge_index - - -def get_induced_graph(data, idx, num_hops): - subset, sub_edge_index, _, _ = k_hop_subgraph( - node_idx=idx, num_hops=num_hops, edge_index=data.edge_index, relabel_nodes=True - ) - return CustomData(x=data.x[subset], y=data.y[idx], edge_index=sub_edge_index) - - -def pc_normalize(pc): - centroid = paddle.mean(pc, axis=0) - pc = pc - centroid - m = paddle.max(x=paddle.sqrt(x=paddle.sum(pc**2, axis=1))) - pc = pc / m - return pc - - -def get_shape(data, max_n_point=8192, normalize=True, use_height=False): - surf_indices = paddle.where(data.surf)[0].tolist() - if len(surf_indices) > max_n_point: - surf_indices = np.array(random.sample(range(len(surf_indices)), max_n_point)) - shape_pc = data.pos[surf_indices].clone() - if normalize: - shape_pc = pc_normalize(shape_pc) - if use_height: - gravity_dim = 1 - height_array = ( - shape_pc[:, gravity_dim : gravity_dim + 1] - - shape_pc[:, gravity_dim : gravity_dim + 1].min() - ) - shape_pc = paddle.cat((shape_pc, height_array), axis=1) - return shape_pc - - -def create_edge_index_radius(data, r, max_neighbors=32): - if isinstance(data, list): - print("Error: 'data' is a list, expected 'CustomData' object.") - print("CustomData content:", data) - return None - data.edge_index = radius_graph( - x=data.pos, r=r, loop=True, max_num_neighbors=max_neighbors - ) - return data - - -class GraphDataset(paddle.io.Dataset): - def __init__( - self, - datalist, - use_height=False, - use_cfd_mesh=True, - r=None, - root=None, - norm=False, - coef_norm=None, - ): - super().__init__() - self.datalist = datalist - self.use_height = use_height - self.use_cfd_mesh = use_cfd_mesh - self.r = r - self.root = root - self.norm = norm - self.coef_norm = coef_norm - - def __len__(self): - return len(self.datalist) - - def __getitem__(self, idx): - file_name = os.path.join(self.root, self.datalist[idx]) - data = bget_data_for_prediction(file_name, self.norm, self.coef_norm) - if not self.use_cfd_mesh: - data = create_edge_index_radius(data, self.r) - shape = get_shape(data, use_height=self.use_height) - return data, shape - - def collate_fn(self, batch): - batch_data = [data for (data, _) in batch] - batch_shape = paddle.stack([shape for (_, shape) in batch], axis=0) - if len(batch_data) == 1: - return batch_data[0], batch_shape - return batch_data, batch_shape - - -def get_samples(root): - samples = [] - files = os.listdir(root) - for file in files: - if file.endswith(".vtk"): - samples.append(file) - return samples - - -def B_load_train_val_fold(args, preprocessed): - samples = get_samples(args.data_dir) - np.random.shuffle(samples) - trainlst = samples[: args.train_split] - vallst = samples[args.train_split : args.val_split] - if preprocessed: - print("use preprocessed data") - print("loading data") - train_dataset, coef_norm = bget_datalist( - args.data_dir, - trainlst, - norm=True, - savedir=args.save_dir, - preprocessed=preprocessed, - ) - val_dataset = bget_datalist( - args.data_dir, - vallst, - coef_norm=coef_norm, - savedir=args.save_dir, - preprocessed=preprocessed, - ) - print("load data finish") - return train_dataset, val_dataset, coef_norm - - -def Bload_train_val_fold_file(args, preprocessed, coef_norm): - samples = get_samples(args.test_data_dir) - np.random.shuffle(samples) - vallst = samples[:50] - if preprocessed: - print("use preprocessed data") - print("loading data") - val_dataset = bget_datalist_for_prediction( - args.test_data_dir, - vallst, - norm=True, - savedir=args.save_dir, - preprocessed=preprocessed, - coef_norm=coef_norm, - ) - print("load data finish") - return val_dataset, vallst - - -def radius_graph(x, r, batch=None, loop=False, max_num_neighbors=32): - num_nodes = x.shape[0] - if batch is None: - batch = paddle.zeros(shape=[num_nodes], dtype=paddle.int64) - - dist_matrix = paddle.norm(x.unsqueeze(1) - x.unsqueeze(0), axis=-1, p=2) - - adj_matrix = dist_matrix < r - - if not loop: - adj_matrix = adj_matrix * (1 - paddle.eye(num_nodes, dtype=paddle.bool)) - - mask = batch.unsqueeze(1) == batch.unsqueeze(0) - adj_matrix = adj_matrix * mask - - degree = adj_matrix.sum(axis=-1) - if max_num_neighbors < degree.max(): - idx = degree.argsort(descending=True) - idx = idx[:max_num_neighbors] - adj_matrix = adj_matrix[:, idx] - - return adj_matrix - - -def k_hop_subgraph( - edge_index: paddle.Tensor, - num_hops: int, - node_idx: Union[int, List[int], paddle.Tensor], - relabel_nodes: bool = False, -) -> paddle.Tensor: - if not isinstance(node_idx, paddle.Tensor): - node_idx = paddle.to_tensor(node_idx, dtype="int64") - - visited = paddle.zeros([edge_index.max() + 1], dtype="bool") - queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx - visited[queue] = True - sub_edge_index = [] - - current_hop = 0 - - while queue and current_hop < num_hops: - current_hop += 1 - next_queue = [] - - for node in queue: - neighbors = edge_index[1] == node - neighbors = edge_index[0][neighbors] - neighbors = neighbors[~visited[neighbors]] - - next_queue.extend(neighbors.tolist()) - visited[neighbors] = True - - for neighbor in neighbors: - if relabel_nodes: - original_idx = ( - paddle.nonzero(node_idx == node)[0].item() - if isinstance(node_idx, paddle.Tensor) - else node_idx.index(node) - ) - sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) - else: - sub_edge_index.append([node, neighbor]) - - queue = next_queue - - sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") - if relabel_nodes: - return sub_edge_index.reshape([-1, 2])[:, 1] - else: - return sub_edge_index.reshape([-1, 2]) - - -class CustomData: - def __init__(self, **kwargs): - self.edge_index = None - for key, value in kwargs.items(): - setattr(self, key, value) +import itertools +import os +import random +from typing import List +from typing import Union + +import numpy as np +import paddle +import vtk +from sklearn.neighbors import NearestNeighbors +from tqdm import tqdm +from vtk.util.numpy_support import vtk_to_numpy + + +def load_unstructured_grid_data(file_name): + reader = vtk.vtkUnstructuredGridReader() + reader.SetFileName(file_name) + reader.Update() + output = reader.GetOutput() + return output + + +def unstructured_grid_data_to_poly_data(unstructured_grid_data): + filter = vtk.vtkDataSetSurfaceFilter() + filter.SetInputData(unstructured_grid_data) + filter.Update() + poly_data = filter.GetOutput() + return poly_data, filter + + +def get_sdf(target, boundary): + nbrs = NearestNeighbors(n_neighbors=1).fit(boundary) + dists, indices = nbrs.kneighbors(target) + neis = np.array([boundary[i[0]] for i in indices]) + dirs = (target - neis) / (dists + 1e-08) + return dists.reshape(-1), dirs + + +def get_normal(unstructured_grid_data): + poly_data, surface_filter = unstructured_grid_data_to_poly_data( + unstructured_grid_data + ) + normal_filter = vtk.vtkPolyDataNormals() + normal_filter.SetInputData(poly_data) + normal_filter.SetAutoOrientNormals(1) + normal_filter.SetConsistency(1) + normal_filter.SetComputeCellNormals(1) + normal_filter.SetComputePointNormals(0) + normal_filter.Update() + unstructured_grid_data.GetCellData().SetNormals( + normal_filter.GetOutput().GetCellData().GetNormals() + ) + c2p = vtk.vtkCellDataToPointData() + c2p.SetInputData(unstructured_grid_data) + c2p.Update() + unstructured_grid_data = c2p.GetOutput() + normal = vtk_to_numpy(c2p.GetOutput().GetPointData().GetNormals()).astype(np.double) + normal /= np.max(np.abs(normal), axis=1, keepdims=True) + 1e-08 + normal /= np.linalg.norm(normal, axis=1, keepdims=True) + 1e-08 + if np.isnan(normal).sum() > 0: + print(np.isnan(normal).sum()) + print("recalculate") + return get_normal(unstructured_grid_data) + return normal + + +def visualize_poly_data(poly_data, surface_filter, normal_filter=None): + if normal_filter is not None: + mask = vtk.vtkMaskPoints() + mask.SetInputData(normal_filter.GetOutput()) + mask.Update() + arrow = vtk.vtkArrowSource() + arrow.Update() + glyph = vtk.vtkGlyph3D() + glyph.SetInputData(mask.GetOutput()) + glyph.SetSourceData(arrow.GetOutput()) + glyph.SetVectorModeToUseNormal() + glyph.SetScaleFactor(0.1) + glyph.Update() + norm_mapper = vtk.vtkPolyDataMapper() + norm_mapper.SetInputData(normal_filter.GetOutput()) + glyph_mapper = vtk.vtkPolyDataMapper() + glyph_mapper.SetInputData(glyph.GetOutput()) + norm_actor = vtk.vtkActor() + norm_actor.SetMapper(norm_mapper) + glyph_actor = vtk.vtkActor() + glyph_actor.SetMapper(glyph_mapper) + glyph_actor.GetProperty().SetColor(1, 0, 0) + norm_render = vtk.vtkRenderer() + norm_render.AddActor(norm_actor) + norm_render.SetBackground(0, 1, 0) + glyph_render = vtk.vtkRenderer() + glyph_render.AddActor(glyph_actor) + glyph_render.AddActor(norm_actor) + glyph_render.SetBackground(0, 0, 1) + scalar_range = poly_data.GetScalarRange() + mapper = vtk.vtkDataSetMapper() + mapper.SetInputConnection(surface_filter.GetOutputPort()) + mapper.SetScalarRange(scalar_range) + actor = vtk.vtkActor() + actor.SetMapper(mapper) + renderer = vtk.vtkRenderer() + renderer.AddActor(actor) + renderer.SetBackground(1, 1, 1) + renderer_window = vtk.vtkRenderWindow() + renderer_window.AddRenderer(renderer) + if normal_filter is not None: + renderer_window.AddRenderer(norm_render) + renderer_window.AddRenderer(glyph_render) + renderer_window.Render() + interactor = vtk.vtkRenderWindowInteractor() + interactor.SetRenderWindow(renderer_window) + interactor.Initialize() + interactor.Start() + + +def get_scalar_data(unstructured_grid, scalar_name): + point_data = unstructured_grid.GetPointData() + if point_data: + scalar_array = point_data.GetArray(scalar_name) + if scalar_array: + return vtk_to_numpy(scalar_array) + return None + + +def bget_datalist( + root, samples, norm=False, coef_norm=None, savedir=None, preprocessed=False +): + dataset = [] + mean_in, mean_out = 0, 0 + std_in, std_out = 0, 0 + for k, s in enumerate(tqdm(samples, desc="Processing samples")): + if preprocessed and savedir is not None: + save_path = os.path.join(savedir, s) + if not os.path.exists(save_path): + continue + init = np.load(os.path.join(save_path, "x.npy")) + target = np.load(os.path.join(save_path, "y.npy")) + pos = np.load(os.path.join(save_path, "pos.npy")) + surf = np.load(os.path.join(save_path, "surf.npy")) + area = np.load(os.path.join(save_path, "area.npy")) + else: + file_name_press = os.path.join(root, s) + if not os.path.exists(file_name_press): + continue + unstructured_grid_data_press = load_unstructured_grid_data(file_name_press) + scalar_names = ["Pressure", "point_scalars"] + for scalar_name in scalar_names: + press = get_scalar_data(unstructured_grid_data_press, scalar_name) + if press is not None: + break + points_press = vtk_to_numpy( + unstructured_grid_data_press.GetPoints().GetData() + ) + sdf_press = np.zeros(tuple(points_press.shape)[0]) + pos_surf = points_press + sdf_surf = sdf_press + press_surf = press + mesh_number = s[-8:-4] + area_file_name = os.path.join("data_track_B", f"area_{mesh_number}.npy") + if os.path.exists(area_file_name): + area = np.load(area_file_name) + else: + area = np.zeros(len(pos_surf)) + info = np.full((len(pos_surf), 1), 30.0) + init_surf = np.c_[pos_surf, sdf_surf, area, info] + target_surf = np.c_[np.zeros((len(pos_surf), 3)), press_surf] + surf = np.ones(len(pos_surf)) + pos = pos_surf + init = init_surf + target = target_surf + if savedir is not None: + save_path = os.path.join(savedir, s) + if not os.path.exists(save_path): + os.makedirs(save_path) + np.save(os.path.join(save_path, "x.npy"), init) + np.save(os.path.join(save_path, "y.npy"), target) + np.save(os.path.join(save_path, "pos.npy"), pos) + np.save(os.path.join(save_path, "surf.npy"), surf) + np.save(os.path.join(save_path, "area.npy"), area) + surf = paddle.to_tensor(data=surf) + pos = paddle.to_tensor(data=pos) + x = paddle.to_tensor(data=init) + y = paddle.to_tensor(data=target) + if norm and coef_norm is None: + if k == 0: + old_length = tuple(init.shape)[0] + mean_in = init.mean(axis=0) + mean_out = target.mean(axis=0) + else: + new_length = old_length + tuple(init.shape)[0] + mean_in += ( + init.sum(axis=0) - tuple(init.shape)[0] * mean_in + ) / new_length + mean_out += ( + target.sum(axis=0) - tuple(init.shape)[0] * mean_out + ) / new_length + old_length = new_length + data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) + dataset.append(data) + if norm and coef_norm is None: + for k, data in enumerate(dataset): + if k == 0: + old_length = tuple(data.x.numpy().shape)[0] + std_in = ((data.x.numpy() - mean_in) ** 2).sum(axis=0) / old_length + std_out = ((data.y.numpy() - mean_out) ** 2).sum(axis=0) / old_length + else: + new_length = old_length + tuple(data.x.numpy().shape)[0] + std_in += ( + ((data.x.numpy() - mean_in) ** 2).sum(axis=0) + - tuple(data.x.numpy().shape)[0] * std_in + ) / new_length + std_out += ( + ((data.y.numpy() - mean_out) ** 2).sum(axis=0) + - tuple(data.x.numpy().shape)[0] * std_out + ) / new_length + old_length = new_length + std_in = np.sqrt(std_in) + std_out = np.sqrt(std_out) + for data in dataset: + data.x = ((data.x - mean_in) / (std_in + 1e-08)).astype(dtype="float32") + data.y = ((data.y - mean_out) / (std_out + 1e-08)).astype(dtype="float32") + coef_norm = mean_in, std_in, mean_out, std_out + dataset = dataset, coef_norm + elif coef_norm is not None: + for data in dataset: + data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( + dtype="float32" + ) + data.y = ((data.y - coef_norm[2]) / (coef_norm[3] + 1e-08)).astype( + dtype="float32" + ) + return dataset + + +def bget_datalist_for_prediction( + root, samples, norm=False, coef_norm=None, savedir=None, preprocessed=False +): + dataset = [] + mean_in, std_in = 0, 0 + for k, s in enumerate(tqdm(samples, desc="Processing samples")): + if preprocessed and savedir is not None: + save_path = os.path.join(savedir, s) + if not os.path.exists(save_path): + continue + init = np.load(os.path.join(save_path, "x.npy")) + pos = np.load(os.path.join(save_path, "pos.npy")) + surf = np.load(os.path.join(save_path, "surf.npy")) + area = np.load(os.path.join(save_path, "area.npy")) + else: + file_name = os.path.join(root, s) + if not os.path.exists(file_name): + continue + unstructured_grid_data = load_unstructured_grid_data(file_name) + points = vtk_to_numpy(unstructured_grid_data.GetPoints().GetData()) + sdf = np.zeros(tuple(points.shape)[0]) + pos_surf = points + sdf_surf = sdf + mesh_number = int(s.split("_")[-1].split(".")[0]) + area_file_name = os.path.join( + "../data/IJCAI_Car/track_B", f"area_{mesh_number}.npy" + ) + if os.path.exists(area_file_name): + area = np.load(area_file_name) + else: + area = np.zeros(len(pos_surf)) + info = np.full((len(pos_surf), 1), 30.0) + init_surf = np.c_[pos_surf, sdf_surf, area, info] + surf = np.ones(len(pos_surf)) + pos = pos_surf + init = init_surf + if savedir is not None: + save_path = os.path.join(savedir, s) + if not os.path.exists(save_path): + os.makedirs(save_path) + np.save(os.path.join(save_path, "x.npy"), init) + np.save(os.path.join(save_path, "pos.npy"), pos) + np.save(os.path.join(save_path, "surf.npy"), surf) + np.save(os.path.join(save_path, "area.npy"), area) + surf = paddle.to_tensor(data=surf) + pos = paddle.to_tensor(data=pos) + x = paddle.to_tensor(data=init) + y = paddle.zeros(shape=(tuple(x.shape)[0], 4)) + if norm and coef_norm is None: + if k == 0: + old_length = tuple(init.shape)[0] + mean_in = init.mean(axis=0) + else: + new_length = old_length + tuple(init.shape)[0] + mean_in += ( + init.sum(axis=0) - tuple(init.shape)[0] * mean_in + ) / new_length + old_length = new_length + data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) + dataset.append(data) + if norm and coef_norm is None: + for k, data in enumerate(dataset): + if k == 0: + old_length = tuple(data.x.numpy().shape)[0] + std_in = ((data.x.numpy() - mean_in) ** 2).sum(axis=0) / old_length + else: + new_length = old_length + tuple(data.x.numpy().shape)[0] + std_in += ( + ((data.x.numpy() - mean_in) ** 2).sum(axis=0) + - tuple(data.x.numpy().shape)[0] * std_in + ) / new_length + old_length = new_length + std_in = np.sqrt(std_in) + for data in dataset: + data.x = ((data.x - mean_in) / (std_in + 1e-08)).astype(dtype="float32") + coef_norm = mean_in, std_in + elif coef_norm is not None: + for data in dataset: + data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( + dtype="float32" + ) + return dataset + + +def bget_data_for_prediction(file_name, norm=False, coef_norm=None): + if not os.path.exists(file_name): + return + unstructured_grid_data = load_unstructured_grid_data(file_name) + points = vtk_to_numpy(unstructured_grid_data.GetPoints().GetData()) + sdf = np.zeros(tuple(points.shape)[0]) + pos_surf = points + sdf_surf = sdf + mesh_number = int(file_name.split("_")[-1].split(".")[0]) + area_file_name = os.path.join( + "../data/IJCAI_Car/track_B", f"area_{mesh_number}.npy" + ) + if os.path.exists(area_file_name): + area = np.load(area_file_name) + else: + area = np.zeros(len(pos_surf)) + info = np.full((len(pos_surf), 1), 30.0) + init_surf = np.c_[pos_surf, sdf_surf, area, info] + surf = np.ones(len(pos_surf)) + pos = pos_surf + init = init_surf + + surf = paddle.to_tensor(data=surf) + pos = paddle.to_tensor(data=pos) + x = paddle.to_tensor(data=init) + y = paddle.zeros(shape=(tuple(x.shape)[0], 4)) + + data = CustomData(pos=pos, x=x, y=y, surf=surf.astype(dtype="bool")) + + if coef_norm is not None: + data.x = ((data.x - coef_norm[0]) / (coef_norm[1] + 1e-08)).astype( + dtype="float32" + ) + return data + + +def get_edges(unstructured_grid_data, points, cell_size=4): + edge_indeces = set() + cells = vtk_to_numpy(unstructured_grid_data.GetCells().GetData()).reshape( + -1, cell_size + 1 + ) + for i in range(len(cells)): + for j, k in itertools.product(range(1, cell_size + 1), repeat=2): + edge_indeces.add((cells[i][j], cells[i][k])) + edge_indeces.add((cells[i][k], cells[i][j])) + edges = [[], []] + for u, v in edge_indeces: + edges[0].append(tuple(points[u])) + edges[1].append(tuple(points[v])) + return edges + + +def get_edge_index(pos, edges_press, edges_velo): + indices = {tuple(pos[i]): i for i in range(len(pos))} + edges = set() + for i in range(len(edges_press[0])): + edges.add((indices[edges_press[0][i]], indices[edges_press[1][i]])) + for i in range(len(edges_velo[0])): + edges.add((indices[edges_velo[0][i]], indices[edges_velo[1][i]])) + edge_index = np.array(list(edges)).T + return edge_index + + +def get_induced_graph(data, idx, num_hops): + subset, sub_edge_index, _, _ = k_hop_subgraph( + node_idx=idx, num_hops=num_hops, edge_index=data.edge_index, relabel_nodes=True + ) + return CustomData(x=data.x[subset], y=data.y[idx], edge_index=sub_edge_index) + + +def pc_normalize(pc): + centroid = paddle.mean(pc, axis=0) + pc = pc - centroid + m = paddle.max(x=paddle.sqrt(x=paddle.sum(pc**2, axis=1))) + pc = pc / m + return pc + + +def get_shape(data, max_n_point=8192, normalize=True, use_height=False): + surf_indices = paddle.where(data.surf)[0].tolist() + if len(surf_indices) > max_n_point: + surf_indices = np.array(random.sample(range(len(surf_indices)), max_n_point)) + shape_pc = data.pos[surf_indices].clone() + if normalize: + shape_pc = pc_normalize(shape_pc) + if use_height: + gravity_dim = 1 + height_array = ( + shape_pc[:, gravity_dim : gravity_dim + 1] + - shape_pc[:, gravity_dim : gravity_dim + 1].min() + ) + shape_pc = paddle.cat((shape_pc, height_array), axis=1) + return shape_pc + + +def create_edge_index_radius(data, r, max_neighbors=32): + if isinstance(data, list): + print("Error: 'data' is a list, expected 'CustomData' object.") + print("CustomData content:", data) + return None + data.edge_index = radius_graph( + x=data.pos, r=r, loop=True, max_num_neighbors=max_neighbors + ) + return data + + +class GraphDataset(paddle.io.Dataset): + def __init__( + self, + datalist, + use_height=False, + use_cfd_mesh=True, + r=None, + root=None, + norm=False, + coef_norm=None, + ): + super().__init__() + self.datalist = datalist + self.use_height = use_height + self.use_cfd_mesh = use_cfd_mesh + self.r = r + self.root = root + self.norm = norm + self.coef_norm = coef_norm + + def __len__(self): + return len(self.datalist) + + def __getitem__(self, idx): + file_name = os.path.join(self.root, self.datalist[idx]) + data = bget_data_for_prediction(file_name, self.norm, self.coef_norm) + if not self.use_cfd_mesh: + data = create_edge_index_radius(data, self.r) + shape = get_shape(data, use_height=self.use_height) + return data, shape + + def collate_fn(self, batch): + batch_data = [data for (data, _) in batch] + batch_shape = paddle.stack([shape for (_, shape) in batch], axis=0) + if len(batch_data) == 1: + return batch_data[0], batch_shape + return batch_data, batch_shape + + +def get_samples(root): + samples = [] + files = os.listdir(root) + for file in files: + if file.endswith(".vtk"): + samples.append(file) + return samples + + +def B_load_train_val_fold(args, preprocessed): + samples = get_samples(args.data_dir) + np.random.shuffle(samples) + trainlst = samples[: args.train_split] + vallst = samples[args.train_split : args.val_split] + if preprocessed: + print("use preprocessed data") + print("loading data") + train_dataset, coef_norm = bget_datalist( + args.data_dir, + trainlst, + norm=True, + savedir=args.save_dir, + preprocessed=preprocessed, + ) + val_dataset = bget_datalist( + args.data_dir, + vallst, + coef_norm=coef_norm, + savedir=args.save_dir, + preprocessed=preprocessed, + ) + print("load data finish") + return train_dataset, val_dataset, coef_norm + + +def Bload_train_val_fold_file(args, preprocessed, coef_norm): + samples = get_samples(args.test_data_dir) + np.random.shuffle(samples) + vallst = samples[:50] + if preprocessed: + print("use preprocessed data") + print("loading data") + val_dataset = bget_datalist_for_prediction( + args.test_data_dir, + vallst, + norm=True, + savedir=args.save_dir, + preprocessed=preprocessed, + coef_norm=coef_norm, + ) + print("load data finish") + return val_dataset, vallst + + +def radius_graph(x, r, batch=None, loop=False, max_num_neighbors=32): + num_nodes = x.shape[0] + if batch is None: + batch = paddle.zeros(shape=[num_nodes], dtype=paddle.int64) + + dist_matrix = paddle.norm(x.unsqueeze(1) - x.unsqueeze(0), axis=-1, p=2) + + adj_matrix = dist_matrix < r + + if not loop: + adj_matrix = adj_matrix * (1 - paddle.eye(num_nodes, dtype=paddle.bool)) + + mask = batch.unsqueeze(1) == batch.unsqueeze(0) + adj_matrix = adj_matrix * mask + + degree = adj_matrix.sum(axis=-1) + if max_num_neighbors < degree.max(): + idx = degree.argsort(descending=True) + idx = idx[:max_num_neighbors] + adj_matrix = adj_matrix[:, idx] + + return adj_matrix + + +def k_hop_subgraph( + edge_index: paddle.Tensor, + num_hops: int, + node_idx: Union[int, List[int], paddle.Tensor], + relabel_nodes: bool = False, +) -> paddle.Tensor: + if not isinstance(node_idx, paddle.Tensor): + node_idx = paddle.to_tensor(node_idx, dtype="int64") + + visited = paddle.zeros([edge_index.max() + 1], dtype="bool") + queue = node_idx.tolist() if isinstance(node_idx, paddle.Tensor) else node_idx + visited[queue] = True + sub_edge_index = [] + + current_hop = 0 + + while queue and current_hop < num_hops: + current_hop += 1 + next_queue = [] + + for node in queue: + neighbors = edge_index[1] == node + neighbors = edge_index[0][neighbors] + neighbors = neighbors[~visited[neighbors]] + + next_queue.extend(neighbors.tolist()) + visited[neighbors] = True + + for neighbor in neighbors: + if relabel_nodes: + original_idx = ( + paddle.nonzero(node_idx == node)[0].item() + if isinstance(node_idx, paddle.Tensor) + else node_idx.index(node) + ) + sub_edge_index.append([original_idx, len(sub_edge_index) // 2 + 1]) + else: + sub_edge_index.append([node, neighbor]) + + queue = next_queue + + sub_edge_index = paddle.to_tensor(sub_edge_index, dtype="int64") + if relabel_nodes: + return sub_edge_index.reshape([-1, 2])[:, 1] + else: + return sub_edge_index.reshape([-1, 2]) + + +class CustomData: + def __init__(self, **kwargs): + self.edge_index = None + for key, value in kwargs.items(): + setattr(self, key, value) diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/download_dataset.ipynb b/jointContribution/IJCAI_2024/zhongzaicanyu/download_dataset.ipynb index 1386056b7f..3d3f189b2f 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/download_dataset.ipynb +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/download_dataset.ipynb @@ -1 +1 @@ -{"cells":[{"cell_type":"markdown","metadata":{"id":"z0Sek0wtEs5n"},"source":["## 百度Baseline版本数据导入"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["!mkdir Dataset\n","!cd Dataset"]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":255341,"status":"ok","timestamp":1720679943973,"user":{"displayName":"pei jian zeng","userId":"06013928868849686113"},"user_tz":-480},"id":"GTV_YDaxEsd3","outputId":"8554a8d9-ac54-49a7-c5d8-f56cd72953ba"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-11 06:34:48-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 36.110.192.178, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|36.110.192.178|:443... connected.\n","HTTP request sent, awaiting response... 206 Partial Content\n","Length: 4740031429 (4.4G), 2358336404 (2.2G) remaining [application/octet-stream]\n","Saving to: ‘train_track_B.zip’\n","\n","train_track_B.zip 100%[++++++++++=========>] 4.41G 11.7MB/s in 3m 23s \n","\n","2024-07-11 06:38:12 (11.1 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n","\n","--2024-07-11 06:38:13-- https://ai-studio-online.bj.bcebos.com/v1/1638f9c292b9437bb46885186407a63e584856c91f9f4c18908b87abd46471e0?responseContentDisposition=attachment%3B%20filename%3Dtrack_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-05-05T03%3A02%3A25Z%2F-1%2F%2Fcfdfd6b6a9e096c761ee8e7d863d586741c69a9e6de89f9c3696706d35f8b265\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 36.110.192.178, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|36.110.192.178|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 1012191818 (965M) [application/octet-stream]\n","Saving to: ‘track_B.zip’\n","\n","track_B.zip 100%[===================>] 965.30M 21.0MB/s in 50s \n","\n","2024-07-11 06:39:04 (19.5 MB/s) - ‘track_B.zip’ saved [1012191818/1012191818]\n","\n"]}],"source":["!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n","!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/1638f9c292b9437bb46885186407a63e584856c91f9f4c18908b87abd46471e0?responseContentDisposition=attachment%3B%20filename%3Dtrack_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-05-05T03%3A02%3A25Z%2F-1%2F%2Fcfdfd6b6a9e096c761ee8e7d863d586741c69a9e6de89f9c3696706d35f8b265\" -c -O 'track_B.zip'"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":153639,"status":"ok","timestamp":1720680617913,"user":{"displayName":"pei jian zeng","userId":"06013928868849686113"},"user_tz":-480},"id":"OS4r3PcokLdA","outputId":"525b6316-634c-410d-d582-528aa5698819"},"outputs":[{"name":"stdout","output_type":"stream","text":["Archive: train_track_B.zip\n"," inflating: data_track_B/area_0002.npy \n"," inflating: data_track_B/area_0003.npy \n"," inflating: data_track_B/area_0004.npy \n"," inflating: data_track_B/area_0005.npy \n"," inflating: data_track_B/area_0006.npy \n"," inflating: data_track_B/area_0011.npy \n"," inflating: data_track_B/area_0012.npy \n"," inflating: data_track_B/area_0013.npy \n"," inflating: data_track_B/area_0015.npy \n"," inflating: data_track_B/area_0017.npy \n"," inflating: data_track_B/area_0018.npy \n"," inflating: data_track_B/area_0020.npy \n"," inflating: data_track_B/area_0021.npy \n"," inflating: data_track_B/area_0022.npy \n"," inflating: data_track_B/area_0023.npy \n"," inflating: data_track_B/area_0024.npy \n"," inflating: data_track_B/area_0026.npy \n"," inflating: data_track_B/area_0029.npy \n"," inflating: data_track_B/area_0030.npy \n"," inflating: data_track_B/area_0036.npy \n"," inflating: data_track_B/area_0037.npy \n"," inflating: data_track_B/area_0038.npy \n"," inflating: data_track_B/area_0039.npy \n"," inflating: data_track_B/area_0040.npy \n"," inflating: data_track_B/area_0041.npy \n"," inflating: data_track_B/area_0042.npy \n"," inflating: data_track_B/area_0043.npy \n"," inflating: data_track_B/area_0044.npy \n"," inflating: data_track_B/area_0048.npy \n"," inflating: data_track_B/area_0049.npy \n"," inflating: data_track_B/area_0051.npy \n"," inflating: data_track_B/area_0052.npy \n"," inflating: data_track_B/area_0055.npy \n"," inflating: data_track_B/area_0056.npy \n"," inflating: data_track_B/area_0057.npy \n"," inflating: data_track_B/area_0059.npy \n"," inflating: data_track_B/area_0062.npy \n"," inflating: data_track_B/area_0064.npy \n"," inflating: data_track_B/area_0066.npy \n"," inflating: data_track_B/area_0067.npy \n"," inflating: data_track_B/area_0068.npy \n"," inflating: data_track_B/area_0071.npy \n"," inflating: data_track_B/area_0074.npy \n"," inflating: data_track_B/area_0075.npy \n"," inflating: data_track_B/area_0077.npy \n"," inflating: data_track_B/area_0078.npy \n"," inflating: data_track_B/area_0080.npy \n"," inflating: data_track_B/area_0081.npy \n"," inflating: data_track_B/area_0082.npy \n"," inflating: data_track_B/area_0084.npy \n"," inflating: data_track_B/area_0085.npy \n"," inflating: data_track_B/area_0086.npy \n"," inflating: data_track_B/area_0087.npy \n"," inflating: data_track_B/area_0088.npy \n"," inflating: data_track_B/area_0089.npy \n"," inflating: data_track_B/area_0090.npy \n"," inflating: data_track_B/area_0092.npy \n"," inflating: data_track_B/area_0093.npy \n"," inflating: data_track_B/area_0094.npy \n"," inflating: data_track_B/area_0095.npy \n"," inflating: data_track_B/area_0097.npy \n"," inflating: data_track_B/area_0098.npy \n"," inflating: data_track_B/area_0100.npy \n"," inflating: data_track_B/area_0101.npy \n"," inflating: data_track_B/area_0102.npy \n"," inflating: data_track_B/area_0103.npy \n"," inflating: data_track_B/area_0104.npy \n"," inflating: data_track_B/area_0106.npy \n"," inflating: data_track_B/area_0107.npy \n"," inflating: data_track_B/area_0108.npy \n"," inflating: data_track_B/area_0109.npy \n"," inflating: data_track_B/area_0110.npy \n"," inflating: data_track_B/area_0113.npy \n"," inflating: data_track_B/area_0114.npy \n"," inflating: data_track_B/area_0115.npy \n"," inflating: data_track_B/area_0116.npy \n"," inflating: data_track_B/area_0117.npy \n"," inflating: data_track_B/area_0118.npy \n"," inflating: data_track_B/area_0119.npy \n"," inflating: data_track_B/area_0120.npy \n"," inflating: data_track_B/area_0121.npy \n"," inflating: data_track_B/area_0122.npy \n"," inflating: data_track_B/area_0124.npy \n"," inflating: data_track_B/area_0125.npy \n"," inflating: data_track_B/area_0126.npy \n"," inflating: data_track_B/area_0128.npy \n"," inflating: data_track_B/area_0129.npy \n"," inflating: data_track_B/area_0130.npy \n"," inflating: data_track_B/area_0131.npy \n"," inflating: data_track_B/area_0132.npy \n"," inflating: data_track_B/area_0133.npy \n"," inflating: data_track_B/area_0134.npy \n"," inflating: data_track_B/area_0135.npy \n"," inflating: data_track_B/area_0136.npy \n"," inflating: data_track_B/area_0138.npy \n"," inflating: data_track_B/area_0139.npy \n"," inflating: data_track_B/area_0140.npy \n"," inflating: data_track_B/area_0141.npy \n"," inflating: data_track_B/area_0143.npy \n"," inflating: data_track_B/area_0145.npy \n"," inflating: data_track_B/area_0146.npy \n"," inflating: data_track_B/area_0148.npy \n"," inflating: data_track_B/area_0149.npy \n"," inflating: data_track_B/area_0150.npy \n"," inflating: data_track_B/area_0151.npy \n"," inflating: data_track_B/area_0153.npy \n"," inflating: data_track_B/area_0154.npy \n"," inflating: data_track_B/area_0156.npy \n"," inflating: data_track_B/area_0157.npy \n"," inflating: data_track_B/area_0158.npy \n"," inflating: data_track_B/area_0161.npy \n"," inflating: data_track_B/area_0162.npy \n"," inflating: data_track_B/area_0163.npy \n"," inflating: data_track_B/area_0164.npy \n"," inflating: data_track_B/area_0166.npy \n"," inflating: data_track_B/area_0167.npy \n"," inflating: data_track_B/area_0168.npy \n"," inflating: data_track_B/area_0170.npy \n"," inflating: data_track_B/area_0171.npy \n"," inflating: data_track_B/area_0172.npy \n"," inflating: data_track_B/area_0174.npy \n"," inflating: data_track_B/area_0175.npy \n"," inflating: data_track_B/area_0183.npy \n"," inflating: data_track_B/area_0184.npy \n"," inflating: data_track_B/area_0185.npy \n"," inflating: data_track_B/area_0189.npy \n"," inflating: data_track_B/area_0190.npy \n"," inflating: data_track_B/area_0193.npy \n"," inflating: data_track_B/area_0194.npy \n"," inflating: data_track_B/area_0195.npy \n"," inflating: data_track_B/area_0197.npy \n"," inflating: data_track_B/area_0201.npy \n"," inflating: data_track_B/area_0203.npy \n"," inflating: data_track_B/area_0204.npy \n"," inflating: data_track_B/area_0205.npy \n"," inflating: data_track_B/area_0206.npy \n"," inflating: data_track_B/area_0208.npy \n"," inflating: data_track_B/area_0210.npy \n"," inflating: data_track_B/area_0211.npy \n"," inflating: data_track_B/area_0216.npy \n"," inflating: data_track_B/area_0217.npy \n"," inflating: data_track_B/area_0219.npy \n"," inflating: data_track_B/area_0220.npy \n"," inflating: data_track_B/area_0227.npy \n"," inflating: data_track_B/area_0228.npy \n"," inflating: data_track_B/area_0229.npy \n"," inflating: data_track_B/area_0232.npy \n"," inflating: data_track_B/area_0234.npy \n"," inflating: data_track_B/area_0235.npy \n"," inflating: data_track_B/area_0236.npy \n"," inflating: data_track_B/area_0238.npy \n"," inflating: data_track_B/area_0239.npy \n"," inflating: data_track_B/area_0240.npy \n"," inflating: data_track_B/area_0241.npy \n"," inflating: data_track_B/area_0245.npy \n"," inflating: data_track_B/area_0246.npy \n"," inflating: data_track_B/area_0247.npy \n"," inflating: data_track_B/area_0248.npy \n"," inflating: data_track_B/area_0249.npy \n"," inflating: data_track_B/area_0252.npy \n"," inflating: data_track_B/area_0253.npy \n"," inflating: data_track_B/area_0254.npy \n"," inflating: data_track_B/area_0256.npy \n"," inflating: data_track_B/area_0257.npy \n"," inflating: data_track_B/area_0259.npy \n"," inflating: data_track_B/area_0264.npy \n"," inflating: data_track_B/area_0265.npy \n"," inflating: data_track_B/area_0266.npy \n"," inflating: data_track_B/area_0268.npy \n"," inflating: data_track_B/area_0269.npy \n"," inflating: data_track_B/area_0271.npy \n"," inflating: data_track_B/area_0272.npy \n"," inflating: data_track_B/area_0273.npy \n"," inflating: data_track_B/area_0275.npy \n"," inflating: data_track_B/area_0276.npy \n"," inflating: data_track_B/area_0277.npy \n"," inflating: data_track_B/area_0279.npy \n"," inflating: data_track_B/area_0280.npy \n"," inflating: data_track_B/area_0281.npy \n"," inflating: data_track_B/area_0284.npy \n"," inflating: data_track_B/area_0285.npy \n"," inflating: data_track_B/area_0286.npy \n"," inflating: data_track_B/area_0288.npy \n"," inflating: data_track_B/area_0289.npy \n"," inflating: data_track_B/area_0290.npy \n"," inflating: data_track_B/area_0291.npy \n"," inflating: data_track_B/area_0294.npy \n"," inflating: data_track_B/area_0296.npy \n"," inflating: data_track_B/area_0297.npy \n"," inflating: data_track_B/area_0298.npy \n"," inflating: data_track_B/area_0301.npy \n"," inflating: data_track_B/area_0304.npy \n"," inflating: data_track_B/area_0305.npy \n"," inflating: data_track_B/area_0306.npy \n"," inflating: data_track_B/area_0307.npy \n"," inflating: data_track_B/area_0308.npy \n"," inflating: data_track_B/area_0310.npy \n"," inflating: data_track_B/area_0311.npy \n"," inflating: data_track_B/area_0314.npy \n"," inflating: data_track_B/area_0315.npy \n"," inflating: data_track_B/area_0316.npy \n"," inflating: data_track_B/area_0320.npy \n"," inflating: data_track_B/area_0321.npy \n"," inflating: data_track_B/area_0323.npy \n"," inflating: data_track_B/area_0324.npy \n"," inflating: data_track_B/area_0327.npy \n"," inflating: data_track_B/area_0330.npy \n"," inflating: data_track_B/area_0331.npy \n"," inflating: data_track_B/area_0332.npy \n"," inflating: data_track_B/area_0333.npy \n"," inflating: data_track_B/area_0334.npy \n"," inflating: data_track_B/area_0337.npy \n"," inflating: data_track_B/area_0338.npy \n"," inflating: data_track_B/area_0339.npy \n"," inflating: data_track_B/area_0340.npy \n"," inflating: data_track_B/area_0341.npy \n"," inflating: data_track_B/area_0342.npy \n"," inflating: data_track_B/area_0343.npy \n"," inflating: data_track_B/area_0344.npy \n"," inflating: data_track_B/area_0345.npy \n"," inflating: data_track_B/area_0346.npy \n"," inflating: data_track_B/area_0348.npy \n"," inflating: data_track_B/area_0349.npy \n"," inflating: data_track_B/area_0351.npy \n"," inflating: data_track_B/area_0352.npy \n"," inflating: data_track_B/area_0353.npy \n"," inflating: data_track_B/area_0354.npy \n"," inflating: data_track_B/area_0356.npy \n"," inflating: data_track_B/area_0357.npy \n"," inflating: data_track_B/area_0359.npy \n"," inflating: data_track_B/area_0360.npy \n"," inflating: data_track_B/area_0361.npy \n"," inflating: data_track_B/area_0363.npy \n"," inflating: data_track_B/area_0364.npy \n"," inflating: data_track_B/area_0365.npy \n"," inflating: data_track_B/area_0366.npy \n"," inflating: data_track_B/area_0367.npy \n"," inflating: data_track_B/area_0368.npy \n"," inflating: data_track_B/area_0369.npy \n"," inflating: data_track_B/area_0371.npy \n"," inflating: data_track_B/area_0373.npy \n"," inflating: data_track_B/area_0376.npy \n"," inflating: data_track_B/area_0377.npy \n"," inflating: data_track_B/area_0378.npy \n"," inflating: data_track_B/area_0379.npy \n"," inflating: data_track_B/area_0381.npy \n"," inflating: data_track_B/area_0382.npy \n"," inflating: data_track_B/area_0383.npy \n"," inflating: data_track_B/area_0384.npy \n"," inflating: data_track_B/area_0385.npy \n"," inflating: data_track_B/area_0387.npy \n"," inflating: data_track_B/area_0388.npy \n"," inflating: data_track_B/area_0389.npy \n"," inflating: data_track_B/area_0392.npy \n"," inflating: data_track_B/area_0393.npy \n"," inflating: data_track_B/area_0394.npy \n"," inflating: data_track_B/area_0395.npy \n"," inflating: data_track_B/area_0396.npy \n"," inflating: data_track_B/area_0398.npy \n"," inflating: data_track_B/area_0399.npy \n"," inflating: data_track_B/area_0400.npy \n"," inflating: data_track_B/area_0401.npy \n"," inflating: data_track_B/area_0402.npy \n"," inflating: data_track_B/area_0403.npy \n"," inflating: data_track_B/area_0404.npy \n"," inflating: data_track_B/area_0405.npy \n"," inflating: data_track_B/area_0407.npy \n"," inflating: data_track_B/area_0408.npy \n"," inflating: data_track_B/area_0409.npy \n"," inflating: data_track_B/area_0410.npy \n"," inflating: data_track_B/area_0411.npy \n"," inflating: data_track_B/area_0413.npy \n"," inflating: data_track_B/area_0416.npy \n"," inflating: data_track_B/area_0417.npy \n"," inflating: data_track_B/area_0421.npy \n"," inflating: data_track_B/area_0422.npy \n"," inflating: data_track_B/area_0423.npy \n"," inflating: data_track_B/area_0424.npy \n"," inflating: data_track_B/area_0425.npy \n"," inflating: data_track_B/area_0428.npy \n"," inflating: data_track_B/area_0429.npy \n"," inflating: data_track_B/area_0430.npy \n"," inflating: data_track_B/area_0431.npy \n"," inflating: data_track_B/area_0432.npy \n"," inflating: data_track_B/area_0435.npy \n"," inflating: data_track_B/area_0438.npy \n"," inflating: data_track_B/area_0439.npy \n"," inflating: data_track_B/area_0441.npy \n"," inflating: data_track_B/area_0444.npy \n"," inflating: data_track_B/area_0445.npy \n"," inflating: data_track_B/area_0449.npy \n"," inflating: data_track_B/area_0450.npy \n"," inflating: data_track_B/area_0451.npy \n"," inflating: data_track_B/area_0452.npy \n"," inflating: data_track_B/area_0453.npy \n"," inflating: data_track_B/area_0456.npy \n"," inflating: data_track_B/area_0457.npy \n"," inflating: data_track_B/area_0458.npy \n"," inflating: data_track_B/area_0459.npy \n"," inflating: data_track_B/area_0460.npy \n"," inflating: data_track_B/area_0461.npy \n"," inflating: data_track_B/area_0463.npy \n"," inflating: data_track_B/area_0464.npy \n"," inflating: data_track_B/area_0465.npy \n"," inflating: data_track_B/area_0467.npy \n"," inflating: data_track_B/area_0469.npy \n"," inflating: data_track_B/area_0471.npy \n"," inflating: data_track_B/area_0472.npy \n"," inflating: data_track_B/area_0474.npy \n"," inflating: data_track_B/area_0475.npy \n"," inflating: data_track_B/area_0477.npy \n"," inflating: data_track_B/area_0478.npy \n"," inflating: data_track_B/area_0479.npy \n"," inflating: data_track_B/area_0480.npy \n"," inflating: data_track_B/area_0481.npy \n"," inflating: data_track_B/area_0482.npy \n"," inflating: data_track_B/area_0485.npy \n"," inflating: data_track_B/area_0486.npy \n"," inflating: data_track_B/area_0487.npy \n"," inflating: data_track_B/area_0488.npy \n"," inflating: data_track_B/area_0489.npy \n"," inflating: data_track_B/area_0492.npy \n"," inflating: data_track_B/area_0493.npy \n"," inflating: data_track_B/area_0494.npy \n"," inflating: data_track_B/area_0497.npy \n"," inflating: data_track_B/area_0498.npy \n"," inflating: data_track_B/area_0499.npy \n"," inflating: data_track_B/area_0501.npy \n"," inflating: data_track_B/area_0502.npy \n"," inflating: data_track_B/area_0503.npy \n"," inflating: data_track_B/area_0504.npy \n"," inflating: data_track_B/area_0507.npy \n"," inflating: data_track_B/area_0508.npy \n"," inflating: data_track_B/area_0509.npy \n"," inflating: data_track_B/area_0513.npy \n"," inflating: data_track_B/area_0514.npy \n"," inflating: data_track_B/area_0515.npy \n"," inflating: data_track_B/area_0517.npy \n"," inflating: data_track_B/area_0518.npy \n"," inflating: data_track_B/area_0519.npy \n"," inflating: data_track_B/area_0520.npy \n"," inflating: data_track_B/area_0521.npy \n"," inflating: data_track_B/area_0522.npy \n"," inflating: data_track_B/area_0523.npy \n"," inflating: data_track_B/area_0524.npy \n"," inflating: data_track_B/area_0525.npy \n"," inflating: data_track_B/area_0526.npy \n"," inflating: data_track_B/area_0527.npy \n"," inflating: data_track_B/area_0528.npy \n"," inflating: data_track_B/area_0529.npy \n"," inflating: data_track_B/area_0530.npy \n"," inflating: data_track_B/area_0531.npy \n"," inflating: data_track_B/area_0534.npy \n"," inflating: data_track_B/area_0535.npy \n"," inflating: data_track_B/area_0536.npy \n"," inflating: data_track_B/area_0538.npy \n"," inflating: data_track_B/area_0541.npy \n"," inflating: data_track_B/area_0542.npy \n"," inflating: data_track_B/area_0544.npy \n"," inflating: data_track_B/area_0545.npy \n"," inflating: data_track_B/area_0546.npy \n"," inflating: data_track_B/area_0547.npy \n"," inflating: data_track_B/area_0550.npy \n"," inflating: data_track_B/area_0551.npy \n"," inflating: data_track_B/area_0553.npy \n"," inflating: data_track_B/area_0555.npy \n"," inflating: data_track_B/area_0557.npy \n"," inflating: data_track_B/area_0558.npy \n"," inflating: data_track_B/area_0561.npy \n"," inflating: data_track_B/area_0563.npy \n"," inflating: data_track_B/area_0564.npy \n"," inflating: data_track_B/area_0565.npy \n"," inflating: data_track_B/area_0567.npy \n"," inflating: data_track_B/area_0568.npy \n"," inflating: data_track_B/area_0571.npy \n"," inflating: data_track_B/area_0574.npy \n"," inflating: data_track_B/area_0576.npy \n"," inflating: data_track_B/area_0579.npy \n"," inflating: data_track_B/area_0580.npy \n"," inflating: data_track_B/area_0582.npy \n"," inflating: data_track_B/area_0584.npy \n"," inflating: data_track_B/area_0585.npy \n"," inflating: data_track_B/area_0588.npy \n"," inflating: data_track_B/area_0589.npy \n"," inflating: data_track_B/area_0590.npy \n"," inflating: data_track_B/area_0591.npy \n"," inflating: data_track_B/area_0592.npy \n"," inflating: data_track_B/area_0593.npy \n"," inflating: data_track_B/area_0594.npy \n"," inflating: data_track_B/area_0595.npy \n"," inflating: data_track_B/area_0596.npy \n"," inflating: data_track_B/area_0597.npy \n"," inflating: data_track_B/area_0598.npy \n"," inflating: data_track_B/area_0600.npy \n"," inflating: data_track_B/area_0602.npy \n"," inflating: data_track_B/area_0605.npy \n"," inflating: data_track_B/area_0608.npy \n"," inflating: data_track_B/area_0609.npy \n"," inflating: data_track_B/area_0611.npy \n"," inflating: data_track_B/area_0612.npy \n"," inflating: data_track_B/area_0613.npy \n"," inflating: data_track_B/area_0614.npy \n"," inflating: data_track_B/area_0618.npy \n"," inflating: data_track_B/area_0619.npy \n"," inflating: data_track_B/area_0620.npy \n"," inflating: data_track_B/area_0621.npy \n"," inflating: data_track_B/area_0622.npy \n"," inflating: data_track_B/area_0623.npy \n"," inflating: data_track_B/area_0624.npy \n"," inflating: data_track_B/area_0625.npy \n"," inflating: data_track_B/area_0627.npy \n"," inflating: data_track_B/area_0628.npy \n"," inflating: data_track_B/area_0629.npy \n"," inflating: data_track_B/area_0630.npy \n"," inflating: data_track_B/area_0631.npy \n"," inflating: data_track_B/area_0632.npy \n"," inflating: data_track_B/area_0633.npy \n"," inflating: data_track_B/area_0634.npy \n"," inflating: data_track_B/area_0635.npy \n"," inflating: data_track_B/area_0637.npy \n"," inflating: data_track_B/area_0638.npy \n"," inflating: data_track_B/area_0639.npy \n"," inflating: data_track_B/area_0640.npy \n"," inflating: data_track_B/area_0641.npy \n"," inflating: data_track_B/area_0643.npy \n"," inflating: data_track_B/area_0644.npy \n"," inflating: data_track_B/area_0645.npy \n"," inflating: data_track_B/area_0646.npy \n"," inflating: data_track_B/area_0648.npy \n"," inflating: data_track_B/area_0650.npy \n"," inflating: data_track_B/area_0651.npy \n"," inflating: data_track_B/area_0652.npy \n"," inflating: data_track_B/area_0653.npy \n"," inflating: data_track_B/area_0654.npy \n"," inflating: data_track_B/area_0656.npy \n"," inflating: data_track_B/area_0657.npy \n"," inflating: data_track_B/area_0658.npy \n"," inflating: data_track_B/area_0661.npy \n"," inflating: data_track_B/area_0663.npy \n"," inflating: data_track_B/area_0664.npy \n"," inflating: data_track_B/area_0665.npy \n"," inflating: data_track_B/area_0666.npy \n"," inflating: data_track_B/area_0667.npy \n"," inflating: data_track_B/area_0668.npy \n"," inflating: data_track_B/area_0669.npy \n"," inflating: data_track_B/area_0671.npy \n"," inflating: data_track_B/area_0672.npy \n"," inflating: data_track_B/area_0673.npy \n"," inflating: data_track_B/area_0674.npy \n"," inflating: data_track_B/area_0676.npy \n"," inflating: data_track_B/area_0677.npy \n"," inflating: data_track_B/area_0678.npy \n"," inflating: data_track_B/area_0679.npy \n"," inflating: data_track_B/area_0680.npy \n"," inflating: data_track_B/area_0682.npy \n"," inflating: data_track_B/area_0686.npy \n"," inflating: data_track_B/area_0688.npy \n"," inflating: data_track_B/area_0689.npy \n"," inflating: data_track_B/area_0690.npy \n"," inflating: data_track_B/area_0691.npy \n"," inflating: data_track_B/area_0692.npy \n"," inflating: data_track_B/area_0693.npy \n"," inflating: data_track_B/area_0694.npy \n"," inflating: data_track_B/area_0695.npy \n"," inflating: data_track_B/area_0697.npy \n"," inflating: data_track_B/area_0699.npy \n"," inflating: data_track_B/area_0700.npy \n"," inflating: data_track_B/area_0701.npy \n"," inflating: data_track_B/area_0703.npy \n"," inflating: data_track_B/area_0704.npy \n"," inflating: data_track_B/area_0706.npy \n"," inflating: data_track_B/area_0707.npy \n"," inflating: data_track_B/area_0708.npy \n"," inflating: data_track_B/area_0709.npy \n"," inflating: data_track_B/area_0711.npy \n"," inflating: data_track_B/area_0712.npy \n"," inflating: data_track_B/area_0713.npy \n"," inflating: data_track_B/area_0714.npy \n"," inflating: data_track_B/area_0715.npy \n"," inflating: data_track_B/area_0716.npy \n"," inflating: data_track_B/area_0718.npy \n"," inflating: data_track_B/area_0719.npy \n"," inflating: data_track_B/area_0720.npy \n"," inflating: data_track_B/area_0721.npy \n"," inflating: data_track_B/area_0722.npy \n"," inflating: data_track_B/area_0724.npy \n"," inflating: data_track_B/area_0727.npy \n"," inflating: data_track_B/area_0728.npy \n"," inflating: data_track_B/area_0729.npy \n"," inflating: data_track_B/area_0730.npy \n"," inflating: data_track_B/area_0731.npy \n"," inflating: data_track_B/area_0733.npy \n"," inflating: data_track_B/area_0735.npy \n"," inflating: data_track_B/area_0736.npy \n"," inflating: data_track_B/area_0737.npy \n"," inflating: data_track_B/area_0740.npy \n"," inflating: data_track_B/area_0742.npy \n"," inflating: data_track_B/area_0743.npy \n"," inflating: data_track_B/area_0744.npy \n"," inflating: data_track_B/area_0745.npy \n"," inflating: data_track_B/centroid_0002.npy \n"," inflating: data_track_B/centroid_0003.npy \n"," inflating: data_track_B/centroid_0004.npy \n"," inflating: data_track_B/centroid_0005.npy \n"," inflating: data_track_B/centroid_0006.npy \n"," inflating: data_track_B/centroid_0011.npy \n"," inflating: data_track_B/centroid_0012.npy \n"," inflating: data_track_B/centroid_0013.npy \n"," inflating: data_track_B/centroid_0015.npy \n"," inflating: data_track_B/centroid_0017.npy \n"," inflating: data_track_B/centroid_0018.npy \n"," inflating: data_track_B/centroid_0020.npy \n"," inflating: data_track_B/centroid_0021.npy \n"," inflating: data_track_B/centroid_0022.npy \n"," inflating: data_track_B/centroid_0023.npy \n"," inflating: data_track_B/centroid_0024.npy \n"," inflating: data_track_B/centroid_0026.npy \n"," inflating: data_track_B/centroid_0029.npy \n"," inflating: data_track_B/centroid_0030.npy \n"," inflating: data_track_B/centroid_0036.npy \n"," inflating: data_track_B/centroid_0037.npy \n"," inflating: data_track_B/centroid_0038.npy \n"," inflating: data_track_B/centroid_0039.npy \n"," inflating: data_track_B/centroid_0040.npy \n"," inflating: data_track_B/centroid_0041.npy \n"," inflating: data_track_B/centroid_0042.npy \n"," inflating: data_track_B/centroid_0043.npy \n"," inflating: data_track_B/centroid_0044.npy \n"," inflating: data_track_B/centroid_0048.npy \n"," inflating: data_track_B/centroid_0049.npy \n"," inflating: data_track_B/centroid_0051.npy \n"," inflating: data_track_B/centroid_0052.npy \n"," inflating: data_track_B/centroid_0055.npy \n"," inflating: data_track_B/centroid_0056.npy \n"," inflating: data_track_B/centroid_0057.npy \n"," inflating: data_track_B/centroid_0059.npy \n"," inflating: data_track_B/centroid_0062.npy \n"," inflating: data_track_B/centroid_0064.npy \n"," inflating: data_track_B/centroid_0066.npy \n"," inflating: data_track_B/centroid_0067.npy \n"," inflating: data_track_B/centroid_0068.npy \n"," inflating: data_track_B/centroid_0071.npy \n"," inflating: data_track_B/centroid_0074.npy \n"," inflating: data_track_B/centroid_0075.npy \n"," inflating: data_track_B/centroid_0077.npy \n"," inflating: data_track_B/centroid_0078.npy \n"," inflating: data_track_B/centroid_0080.npy \n"," inflating: data_track_B/centroid_0081.npy \n"," inflating: data_track_B/centroid_0082.npy \n"," inflating: data_track_B/centroid_0084.npy \n"," inflating: data_track_B/centroid_0085.npy \n"," inflating: data_track_B/centroid_0086.npy \n"," inflating: data_track_B/centroid_0087.npy \n"," inflating: data_track_B/centroid_0088.npy \n"," inflating: data_track_B/centroid_0089.npy \n"," inflating: data_track_B/centroid_0090.npy \n"," inflating: data_track_B/centroid_0092.npy \n"," inflating: data_track_B/centroid_0093.npy \n"," inflating: data_track_B/centroid_0094.npy \n"," inflating: data_track_B/centroid_0095.npy \n"," inflating: data_track_B/centroid_0097.npy \n"," inflating: data_track_B/centroid_0098.npy \n"," inflating: data_track_B/centroid_0100.npy \n"," inflating: data_track_B/centroid_0101.npy \n"," inflating: data_track_B/centroid_0102.npy \n"," inflating: data_track_B/centroid_0103.npy \n"," inflating: data_track_B/centroid_0104.npy \n"," inflating: data_track_B/centroid_0106.npy \n"," inflating: data_track_B/centroid_0107.npy \n"," inflating: data_track_B/centroid_0108.npy \n"," inflating: data_track_B/centroid_0109.npy \n"," inflating: data_track_B/centroid_0110.npy \n"," inflating: data_track_B/centroid_0113.npy \n"," inflating: data_track_B/centroid_0114.npy \n"," inflating: data_track_B/centroid_0115.npy \n"," inflating: data_track_B/centroid_0116.npy \n"," inflating: data_track_B/centroid_0117.npy \n"," inflating: data_track_B/centroid_0118.npy \n"," inflating: data_track_B/centroid_0119.npy \n"," inflating: data_track_B/centroid_0120.npy \n"," inflating: data_track_B/centroid_0121.npy \n"," inflating: data_track_B/centroid_0122.npy \n"," inflating: data_track_B/centroid_0124.npy \n"," inflating: data_track_B/centroid_0125.npy \n"," inflating: data_track_B/centroid_0126.npy \n"," inflating: data_track_B/centroid_0128.npy \n"," inflating: data_track_B/centroid_0129.npy \n"," inflating: data_track_B/centroid_0130.npy \n"," inflating: data_track_B/centroid_0131.npy \n"," inflating: data_track_B/centroid_0132.npy \n"," inflating: data_track_B/centroid_0133.npy \n"," inflating: data_track_B/centroid_0134.npy \n"," inflating: data_track_B/centroid_0135.npy \n"," inflating: data_track_B/centroid_0136.npy \n"," inflating: data_track_B/centroid_0138.npy \n"," inflating: data_track_B/centroid_0139.npy \n"," inflating: data_track_B/centroid_0140.npy \n"," inflating: data_track_B/centroid_0141.npy \n"," inflating: data_track_B/centroid_0143.npy \n"," inflating: data_track_B/centroid_0145.npy \n"," inflating: data_track_B/centroid_0146.npy \n"," inflating: data_track_B/centroid_0148.npy \n"," inflating: data_track_B/centroid_0149.npy \n"," inflating: data_track_B/centroid_0150.npy \n"," inflating: data_track_B/centroid_0151.npy \n"," inflating: data_track_B/centroid_0153.npy \n"," inflating: data_track_B/centroid_0154.npy \n"," inflating: data_track_B/centroid_0156.npy \n"," inflating: data_track_B/centroid_0157.npy \n"," inflating: data_track_B/centroid_0158.npy \n"," inflating: data_track_B/centroid_0161.npy \n"," inflating: data_track_B/centroid_0162.npy \n"," inflating: data_track_B/centroid_0163.npy \n"," inflating: data_track_B/centroid_0164.npy \n"," inflating: data_track_B/centroid_0166.npy \n"," inflating: data_track_B/centroid_0167.npy \n"," inflating: data_track_B/centroid_0168.npy \n"," inflating: data_track_B/centroid_0170.npy \n"," inflating: data_track_B/centroid_0171.npy \n"," inflating: data_track_B/centroid_0172.npy \n"," inflating: data_track_B/centroid_0174.npy \n"," inflating: data_track_B/centroid_0175.npy \n"," inflating: data_track_B/centroid_0183.npy \n"," inflating: data_track_B/centroid_0184.npy \n"," inflating: data_track_B/centroid_0185.npy \n"," inflating: data_track_B/centroid_0189.npy \n"," inflating: data_track_B/centroid_0190.npy \n"," inflating: data_track_B/centroid_0193.npy \n"," inflating: data_track_B/centroid_0194.npy \n"," inflating: data_track_B/centroid_0195.npy \n"," inflating: data_track_B/centroid_0197.npy \n"," inflating: data_track_B/centroid_0201.npy \n"," inflating: data_track_B/centroid_0203.npy \n"," inflating: data_track_B/centroid_0204.npy \n"," inflating: data_track_B/centroid_0205.npy \n"," inflating: data_track_B/centroid_0206.npy \n"," inflating: data_track_B/centroid_0208.npy \n"," inflating: data_track_B/centroid_0210.npy \n"," inflating: data_track_B/centroid_0211.npy \n"," inflating: data_track_B/centroid_0216.npy \n"," inflating: data_track_B/centroid_0217.npy \n"," inflating: data_track_B/centroid_0219.npy \n"," inflating: data_track_B/centroid_0220.npy \n"," inflating: data_track_B/centroid_0227.npy \n"," inflating: data_track_B/centroid_0228.npy \n"," inflating: data_track_B/centroid_0229.npy \n"," inflating: data_track_B/centroid_0232.npy \n"," inflating: data_track_B/centroid_0234.npy \n"," inflating: data_track_B/centroid_0235.npy \n"," inflating: data_track_B/centroid_0236.npy \n"," inflating: data_track_B/centroid_0238.npy \n"," inflating: data_track_B/centroid_0239.npy \n"," inflating: data_track_B/centroid_0240.npy \n"," inflating: data_track_B/centroid_0241.npy \n"," inflating: data_track_B/centroid_0245.npy \n"," inflating: data_track_B/centroid_0246.npy \n"," inflating: data_track_B/centroid_0247.npy \n"," inflating: data_track_B/centroid_0248.npy \n"," inflating: data_track_B/centroid_0249.npy \n"," inflating: data_track_B/centroid_0252.npy \n"," inflating: data_track_B/centroid_0253.npy \n"," inflating: data_track_B/centroid_0254.npy \n"," inflating: data_track_B/centroid_0256.npy \n"," inflating: data_track_B/centroid_0257.npy \n"," inflating: data_track_B/centroid_0259.npy \n"," inflating: data_track_B/centroid_0264.npy \n"," inflating: data_track_B/centroid_0265.npy \n"," inflating: data_track_B/centroid_0266.npy \n"," inflating: data_track_B/centroid_0268.npy \n"," inflating: data_track_B/centroid_0269.npy \n"," inflating: data_track_B/centroid_0271.npy \n"," inflating: data_track_B/centroid_0272.npy \n"," inflating: data_track_B/centroid_0273.npy \n"," inflating: data_track_B/centroid_0275.npy \n"," inflating: data_track_B/centroid_0276.npy \n"," inflating: data_track_B/centroid_0277.npy \n"," inflating: data_track_B/centroid_0279.npy \n"," inflating: data_track_B/centroid_0280.npy \n"," inflating: data_track_B/centroid_0281.npy \n"," inflating: data_track_B/centroid_0284.npy \n"," inflating: data_track_B/centroid_0285.npy \n"," inflating: data_track_B/centroid_0286.npy \n"," inflating: data_track_B/centroid_0288.npy \n"," inflating: data_track_B/centroid_0289.npy \n"," inflating: data_track_B/centroid_0290.npy \n"," inflating: data_track_B/centroid_0291.npy \n"," inflating: data_track_B/centroid_0294.npy \n"," inflating: data_track_B/centroid_0296.npy \n"," inflating: data_track_B/centroid_0297.npy \n"," inflating: data_track_B/centroid_0298.npy \n"," inflating: data_track_B/centroid_0301.npy \n"," inflating: data_track_B/centroid_0304.npy \n"," inflating: data_track_B/centroid_0305.npy \n"," inflating: data_track_B/centroid_0306.npy \n"," inflating: data_track_B/centroid_0307.npy \n"," inflating: data_track_B/centroid_0308.npy \n"," inflating: data_track_B/centroid_0310.npy \n"," inflating: data_track_B/centroid_0311.npy \n"," inflating: data_track_B/centroid_0314.npy \n"," inflating: data_track_B/centroid_0315.npy \n"," inflating: data_track_B/centroid_0316.npy \n"," inflating: data_track_B/centroid_0320.npy \n"," inflating: data_track_B/centroid_0321.npy \n"," inflating: data_track_B/centroid_0323.npy \n"," inflating: data_track_B/centroid_0324.npy \n"," inflating: data_track_B/centroid_0327.npy \n"," inflating: data_track_B/centroid_0330.npy \n"," inflating: data_track_B/centroid_0331.npy \n"," inflating: data_track_B/centroid_0332.npy \n"," inflating: data_track_B/centroid_0333.npy \n"," inflating: data_track_B/centroid_0334.npy \n"," inflating: data_track_B/centroid_0337.npy \n"," inflating: data_track_B/centroid_0338.npy \n"," inflating: data_track_B/centroid_0339.npy \n"," inflating: data_track_B/centroid_0340.npy \n"," inflating: data_track_B/centroid_0341.npy \n"," inflating: data_track_B/centroid_0342.npy \n"," inflating: data_track_B/centroid_0343.npy \n"," inflating: data_track_B/centroid_0344.npy \n"," inflating: data_track_B/centroid_0345.npy \n"," inflating: data_track_B/centroid_0346.npy \n"," inflating: data_track_B/centroid_0348.npy \n"," inflating: data_track_B/centroid_0349.npy \n"," inflating: data_track_B/centroid_0351.npy \n"," inflating: data_track_B/centroid_0352.npy \n"," inflating: data_track_B/centroid_0353.npy \n"," inflating: data_track_B/centroid_0354.npy \n"," inflating: data_track_B/centroid_0356.npy \n"," inflating: data_track_B/centroid_0357.npy \n"," inflating: data_track_B/centroid_0359.npy \n"," inflating: data_track_B/centroid_0360.npy \n"," inflating: data_track_B/centroid_0361.npy \n"," inflating: data_track_B/centroid_0363.npy \n"," inflating: data_track_B/centroid_0364.npy \n"," inflating: data_track_B/centroid_0365.npy \n"," inflating: data_track_B/centroid_0366.npy \n"," inflating: data_track_B/centroid_0367.npy \n"," inflating: data_track_B/centroid_0368.npy \n"," inflating: data_track_B/centroid_0369.npy \n"," inflating: data_track_B/centroid_0371.npy \n"," inflating: data_track_B/centroid_0373.npy \n"," inflating: data_track_B/centroid_0376.npy \n"," inflating: data_track_B/centroid_0377.npy \n"," inflating: data_track_B/centroid_0378.npy \n"," inflating: data_track_B/centroid_0379.npy \n"," inflating: data_track_B/centroid_0381.npy \n"," inflating: data_track_B/centroid_0382.npy \n"," inflating: data_track_B/centroid_0383.npy \n"," inflating: data_track_B/centroid_0384.npy \n"," inflating: data_track_B/centroid_0385.npy \n"," inflating: data_track_B/centroid_0387.npy \n"," inflating: data_track_B/centroid_0388.npy \n"," inflating: data_track_B/centroid_0389.npy \n"," inflating: data_track_B/centroid_0392.npy \n"," inflating: data_track_B/centroid_0393.npy \n"," inflating: data_track_B/centroid_0394.npy \n"," inflating: data_track_B/centroid_0395.npy \n"," inflating: data_track_B/centroid_0396.npy \n"," inflating: data_track_B/centroid_0398.npy \n"," inflating: data_track_B/centroid_0399.npy \n"," inflating: data_track_B/centroid_0400.npy \n"," inflating: data_track_B/centroid_0401.npy \n"," inflating: data_track_B/centroid_0402.npy \n"," inflating: data_track_B/centroid_0403.npy \n"," inflating: data_track_B/centroid_0404.npy \n"," inflating: data_track_B/centroid_0405.npy \n"," inflating: data_track_B/centroid_0407.npy \n"," inflating: data_track_B/centroid_0408.npy \n"," inflating: data_track_B/centroid_0409.npy \n"," inflating: data_track_B/centroid_0410.npy \n"," inflating: data_track_B/centroid_0411.npy \n"," inflating: data_track_B/centroid_0413.npy \n"," inflating: data_track_B/centroid_0416.npy \n"," inflating: data_track_B/centroid_0417.npy \n"," inflating: data_track_B/centroid_0421.npy \n"," inflating: data_track_B/centroid_0422.npy \n"," inflating: data_track_B/centroid_0423.npy \n"," inflating: data_track_B/centroid_0424.npy \n"," inflating: data_track_B/centroid_0425.npy \n"," inflating: data_track_B/centroid_0428.npy \n"," inflating: data_track_B/centroid_0429.npy \n"," inflating: data_track_B/centroid_0430.npy \n"," inflating: data_track_B/centroid_0431.npy \n"," inflating: data_track_B/centroid_0432.npy \n"," inflating: data_track_B/centroid_0435.npy \n"," inflating: data_track_B/centroid_0438.npy \n"," inflating: data_track_B/centroid_0439.npy \n"," inflating: data_track_B/centroid_0441.npy \n"," inflating: data_track_B/centroid_0444.npy \n"," inflating: data_track_B/centroid_0445.npy \n"," inflating: data_track_B/centroid_0449.npy \n"," inflating: data_track_B/centroid_0450.npy \n"," inflating: data_track_B/centroid_0451.npy \n"," inflating: data_track_B/centroid_0452.npy \n"," inflating: data_track_B/centroid_0453.npy \n"," inflating: data_track_B/centroid_0456.npy \n"," inflating: data_track_B/centroid_0457.npy \n"," inflating: data_track_B/centroid_0458.npy \n"," inflating: data_track_B/centroid_0459.npy \n"," inflating: data_track_B/centroid_0460.npy \n"," inflating: data_track_B/centroid_0461.npy \n"," inflating: data_track_B/centroid_0463.npy \n"," inflating: data_track_B/centroid_0464.npy \n"," inflating: data_track_B/centroid_0465.npy \n"," inflating: data_track_B/centroid_0467.npy \n"," inflating: data_track_B/centroid_0469.npy \n"," inflating: data_track_B/centroid_0471.npy \n"," inflating: data_track_B/centroid_0472.npy \n"," inflating: data_track_B/centroid_0474.npy \n"," inflating: data_track_B/centroid_0475.npy \n"," inflating: data_track_B/centroid_0477.npy \n"," inflating: data_track_B/centroid_0478.npy \n"," inflating: data_track_B/centroid_0479.npy \n"," inflating: data_track_B/centroid_0480.npy \n"," inflating: data_track_B/centroid_0481.npy \n"," inflating: data_track_B/centroid_0482.npy \n"," inflating: data_track_B/centroid_0485.npy \n"," inflating: data_track_B/centroid_0486.npy \n"," inflating: data_track_B/centroid_0487.npy \n"," inflating: data_track_B/centroid_0488.npy \n"," inflating: data_track_B/centroid_0489.npy \n"," inflating: data_track_B/centroid_0492.npy \n"," inflating: data_track_B/centroid_0493.npy \n"," inflating: data_track_B/centroid_0494.npy \n"," inflating: data_track_B/centroid_0497.npy \n"," inflating: data_track_B/centroid_0498.npy \n"," inflating: data_track_B/centroid_0499.npy \n"," inflating: data_track_B/centroid_0501.npy \n"," inflating: data_track_B/centroid_0502.npy \n"," inflating: data_track_B/centroid_0503.npy \n"," inflating: data_track_B/centroid_0504.npy \n"," inflating: data_track_B/centroid_0507.npy \n"," inflating: data_track_B/centroid_0508.npy \n"," inflating: data_track_B/centroid_0509.npy \n"," inflating: data_track_B/centroid_0513.npy \n"," inflating: data_track_B/centroid_0514.npy \n"," inflating: data_track_B/centroid_0515.npy \n"," inflating: data_track_B/centroid_0517.npy \n"," inflating: data_track_B/centroid_0518.npy \n"," inflating: data_track_B/centroid_0519.npy \n"," inflating: data_track_B/centroid_0520.npy \n"," inflating: data_track_B/centroid_0521.npy \n"," inflating: data_track_B/centroid_0522.npy \n"," inflating: data_track_B/centroid_0523.npy \n"," inflating: data_track_B/centroid_0524.npy \n"," inflating: data_track_B/centroid_0525.npy \n"," inflating: data_track_B/centroid_0526.npy \n"," inflating: data_track_B/centroid_0527.npy \n"," inflating: data_track_B/centroid_0528.npy \n"," inflating: data_track_B/centroid_0529.npy \n"," inflating: data_track_B/centroid_0530.npy \n"," inflating: data_track_B/centroid_0531.npy \n"," inflating: data_track_B/centroid_0534.npy \n"," inflating: data_track_B/centroid_0535.npy \n"," inflating: data_track_B/centroid_0536.npy \n"," inflating: data_track_B/centroid_0538.npy \n"," inflating: data_track_B/centroid_0541.npy \n"," inflating: data_track_B/centroid_0542.npy \n"," inflating: data_track_B/centroid_0544.npy \n"," inflating: data_track_B/centroid_0545.npy \n"," inflating: data_track_B/centroid_0546.npy \n"," inflating: data_track_B/centroid_0547.npy \n"," inflating: data_track_B/centroid_0550.npy \n"," inflating: data_track_B/centroid_0551.npy \n"," inflating: data_track_B/centroid_0553.npy \n"," inflating: data_track_B/centroid_0555.npy \n"," inflating: data_track_B/centroid_0557.npy \n"," inflating: data_track_B/centroid_0558.npy \n"," inflating: data_track_B/centroid_0561.npy \n"," inflating: data_track_B/centroid_0563.npy \n"," inflating: data_track_B/centroid_0564.npy \n"," inflating: data_track_B/centroid_0565.npy \n"," inflating: data_track_B/centroid_0567.npy \n"," inflating: data_track_B/centroid_0568.npy \n"," inflating: data_track_B/centroid_0571.npy \n"," inflating: data_track_B/centroid_0574.npy \n"," inflating: data_track_B/centroid_0576.npy \n"," inflating: data_track_B/centroid_0579.npy \n"," inflating: data_track_B/centroid_0580.npy \n"," inflating: data_track_B/centroid_0582.npy \n"," inflating: data_track_B/centroid_0584.npy \n"," inflating: data_track_B/centroid_0585.npy \n"," inflating: data_track_B/centroid_0588.npy \n"," inflating: data_track_B/centroid_0589.npy \n"," inflating: data_track_B/centroid_0590.npy \n"," inflating: data_track_B/centroid_0591.npy \n"," inflating: data_track_B/centroid_0592.npy \n"," inflating: data_track_B/centroid_0593.npy \n"," inflating: data_track_B/centroid_0594.npy \n"," inflating: data_track_B/centroid_0595.npy \n"," inflating: data_track_B/centroid_0596.npy \n"," inflating: data_track_B/centroid_0597.npy \n"," inflating: data_track_B/centroid_0598.npy \n"," inflating: data_track_B/centroid_0600.npy \n"," inflating: data_track_B/centroid_0602.npy \n"," inflating: data_track_B/centroid_0605.npy \n"," inflating: data_track_B/centroid_0608.npy \n"," inflating: data_track_B/centroid_0609.npy \n"," inflating: data_track_B/centroid_0611.npy \n"," inflating: data_track_B/centroid_0612.npy \n"," inflating: data_track_B/centroid_0613.npy \n"," inflating: data_track_B/centroid_0614.npy \n"," inflating: data_track_B/centroid_0618.npy \n"," inflating: data_track_B/centroid_0619.npy \n"," inflating: data_track_B/centroid_0620.npy \n"," inflating: data_track_B/centroid_0621.npy \n"," inflating: data_track_B/centroid_0622.npy \n"," inflating: data_track_B/centroid_0623.npy \n"," inflating: data_track_B/centroid_0624.npy \n"," inflating: data_track_B/centroid_0625.npy \n"," inflating: data_track_B/centroid_0627.npy \n"," inflating: data_track_B/centroid_0628.npy \n"," inflating: data_track_B/centroid_0629.npy \n"," inflating: data_track_B/centroid_0630.npy \n"," inflating: data_track_B/centroid_0631.npy \n"," inflating: data_track_B/centroid_0632.npy \n"," inflating: data_track_B/centroid_0633.npy \n"," inflating: data_track_B/centroid_0634.npy \n"," inflating: data_track_B/centroid_0635.npy \n"," inflating: data_track_B/centroid_0637.npy \n"," inflating: data_track_B/centroid_0638.npy \n"," inflating: data_track_B/centroid_0639.npy \n"," inflating: data_track_B/centroid_0640.npy \n"," inflating: data_track_B/centroid_0641.npy \n"," inflating: data_track_B/centroid_0643.npy \n"," inflating: data_track_B/centroid_0644.npy \n"," inflating: data_track_B/centroid_0645.npy \n"," inflating: data_track_B/centroid_0646.npy \n"," inflating: data_track_B/centroid_0648.npy \n"," inflating: data_track_B/centroid_0650.npy \n"," inflating: data_track_B/centroid_0651.npy \n"," inflating: data_track_B/centroid_0652.npy \n"," inflating: data_track_B/centroid_0653.npy \n"," inflating: data_track_B/centroid_0654.npy \n"," inflating: data_track_B/centroid_0656.npy \n"," inflating: data_track_B/centroid_0657.npy \n"," inflating: data_track_B/centroid_0658.npy \n"," inflating: data_track_B/centroid_0661.npy \n"," inflating: data_track_B/centroid_0663.npy \n"," inflating: data_track_B/centroid_0664.npy \n"," inflating: data_track_B/centroid_0665.npy \n"," inflating: data_track_B/centroid_0666.npy \n"," inflating: data_track_B/centroid_0667.npy \n"," inflating: data_track_B/centroid_0668.npy \n"," inflating: data_track_B/centroid_0669.npy \n"," inflating: data_track_B/centroid_0671.npy \n"," inflating: data_track_B/centroid_0672.npy \n"," inflating: data_track_B/centroid_0673.npy \n"," inflating: data_track_B/centroid_0674.npy \n"," inflating: data_track_B/centroid_0676.npy \n"," inflating: data_track_B/centroid_0677.npy \n"," inflating: data_track_B/centroid_0678.npy \n"," inflating: data_track_B/centroid_0679.npy \n"," inflating: data_track_B/centroid_0680.npy \n"," inflating: data_track_B/centroid_0682.npy \n"," inflating: data_track_B/centroid_0686.npy \n"," inflating: data_track_B/centroid_0688.npy \n"," inflating: data_track_B/centroid_0689.npy \n"," inflating: data_track_B/centroid_0690.npy \n"," inflating: data_track_B/centroid_0691.npy \n"," inflating: data_track_B/centroid_0692.npy \n"," inflating: data_track_B/centroid_0693.npy \n"," inflating: data_track_B/centroid_0694.npy \n"," inflating: data_track_B/centroid_0695.npy \n"," inflating: data_track_B/centroid_0697.npy \n"," inflating: data_track_B/centroid_0699.npy \n"," inflating: data_track_B/centroid_0700.npy \n"," inflating: data_track_B/centroid_0701.npy \n"," inflating: data_track_B/centroid_0703.npy \n"," inflating: data_track_B/centroid_0704.npy \n"," inflating: data_track_B/centroid_0706.npy \n"," inflating: data_track_B/centroid_0707.npy \n"," inflating: data_track_B/centroid_0708.npy \n"," inflating: data_track_B/centroid_0709.npy \n"," inflating: data_track_B/centroid_0711.npy \n"," inflating: data_track_B/centroid_0712.npy \n"," inflating: data_track_B/centroid_0713.npy \n"," inflating: data_track_B/centroid_0714.npy \n"," inflating: data_track_B/centroid_0715.npy \n"," inflating: data_track_B/centroid_0716.npy \n"," inflating: data_track_B/centroid_0718.npy \n"," inflating: data_track_B/centroid_0719.npy \n"," inflating: data_track_B/centroid_0720.npy \n"," inflating: data_track_B/centroid_0721.npy \n"," inflating: data_track_B/centroid_0722.npy \n"," inflating: data_track_B/centroid_0724.npy \n"," inflating: data_track_B/centroid_0727.npy \n"," inflating: data_track_B/centroid_0728.npy \n"," inflating: data_track_B/centroid_0729.npy \n"," inflating: data_track_B/centroid_0730.npy \n"," inflating: data_track_B/centroid_0731.npy \n"," inflating: data_track_B/centroid_0733.npy \n"," inflating: data_track_B/centroid_0735.npy \n"," inflating: data_track_B/centroid_0736.npy \n"," inflating: data_track_B/centroid_0737.npy \n"," inflating: data_track_B/centroid_0740.npy \n"," inflating: data_track_B/centroid_0742.npy \n"," inflating: data_track_B/centroid_0743.npy \n"," inflating: data_track_B/centroid_0744.npy \n"," inflating: data_track_B/centroid_0745.npy \n"," inflating: data_track_B/press_0002.npy \n"," inflating: data_track_B/press_0003.npy \n"," inflating: data_track_B/press_0004.npy \n"," inflating: data_track_B/press_0005.npy \n"," inflating: data_track_B/press_0006.npy \n"," inflating: data_track_B/press_0011.npy \n"," inflating: data_track_B/press_0012.npy \n"," inflating: data_track_B/press_0013.npy \n"," inflating: data_track_B/press_0015.npy \n"," inflating: data_track_B/press_0017.npy \n"," inflating: data_track_B/press_0018.npy \n"," inflating: data_track_B/press_0020.npy \n"," inflating: data_track_B/press_0021.npy \n"," inflating: data_track_B/press_0022.npy \n"," inflating: data_track_B/press_0023.npy \n"," inflating: data_track_B/press_0024.npy \n"," inflating: data_track_B/press_0026.npy \n"," inflating: data_track_B/press_0029.npy \n"," inflating: data_track_B/press_0030.npy \n"," inflating: data_track_B/press_0036.npy \n"," inflating: data_track_B/press_0037.npy \n"," inflating: data_track_B/press_0038.npy \n"," inflating: data_track_B/press_0039.npy \n"," inflating: data_track_B/press_0040.npy \n"," inflating: data_track_B/press_0041.npy \n"," inflating: data_track_B/press_0042.npy \n"," inflating: data_track_B/press_0043.npy \n"," inflating: data_track_B/press_0044.npy \n"," inflating: data_track_B/press_0048.npy \n"," inflating: data_track_B/press_0049.npy \n"," inflating: data_track_B/press_0051.npy \n"," inflating: data_track_B/press_0052.npy \n"," inflating: data_track_B/press_0055.npy \n"," inflating: data_track_B/press_0056.npy \n"," inflating: data_track_B/press_0057.npy \n"," inflating: data_track_B/press_0059.npy \n"," inflating: data_track_B/press_0062.npy \n"," inflating: data_track_B/press_0064.npy \n"," inflating: data_track_B/press_0066.npy \n"," inflating: data_track_B/press_0067.npy \n"," inflating: data_track_B/press_0068.npy \n"," inflating: data_track_B/press_0071.npy \n"," inflating: data_track_B/press_0074.npy \n"," inflating: data_track_B/press_0075.npy \n"," inflating: data_track_B/press_0077.npy \n"," inflating: data_track_B/press_0078.npy \n"," inflating: data_track_B/press_0080.npy \n"," inflating: data_track_B/press_0081.npy \n"," inflating: data_track_B/press_0082.npy \n"," inflating: data_track_B/press_0084.npy \n"," inflating: data_track_B/press_0085.npy \n"," inflating: data_track_B/press_0086.npy \n"," inflating: data_track_B/press_0087.npy \n"," inflating: data_track_B/press_0088.npy \n"," inflating: data_track_B/press_0089.npy \n"," inflating: data_track_B/press_0090.npy \n"," inflating: data_track_B/press_0092.npy \n"," inflating: data_track_B/press_0093.npy \n"," inflating: data_track_B/press_0094.npy \n"," inflating: data_track_B/press_0095.npy \n"," inflating: data_track_B/press_0097.npy \n"," inflating: data_track_B/press_0098.npy \n"," inflating: data_track_B/press_0100.npy \n"," inflating: data_track_B/press_0101.npy \n"," inflating: data_track_B/press_0102.npy \n"," inflating: data_track_B/press_0103.npy \n"," inflating: data_track_B/press_0104.npy \n"," inflating: data_track_B/press_0106.npy \n"," inflating: data_track_B/press_0107.npy \n"," inflating: data_track_B/press_0108.npy \n"," inflating: data_track_B/press_0109.npy \n"," inflating: data_track_B/press_0110.npy \n"," inflating: data_track_B/press_0113.npy \n"," inflating: data_track_B/press_0114.npy \n"," inflating: data_track_B/press_0115.npy \n"," inflating: data_track_B/press_0116.npy \n"," inflating: data_track_B/press_0117.npy \n"," inflating: data_track_B/press_0118.npy \n"," inflating: data_track_B/press_0119.npy \n"," inflating: data_track_B/press_0120.npy \n"," inflating: data_track_B/press_0121.npy \n"," inflating: data_track_B/press_0122.npy \n"," inflating: data_track_B/press_0124.npy \n"," inflating: data_track_B/press_0125.npy \n"," inflating: data_track_B/press_0126.npy \n"," inflating: data_track_B/press_0128.npy \n"," inflating: data_track_B/press_0129.npy \n"," inflating: data_track_B/press_0130.npy \n"," inflating: data_track_B/press_0131.npy \n"," inflating: data_track_B/press_0132.npy \n"," inflating: data_track_B/press_0133.npy \n"," inflating: data_track_B/press_0134.npy \n"," inflating: data_track_B/press_0135.npy \n"," inflating: data_track_B/press_0136.npy \n"," inflating: data_track_B/press_0138.npy \n"," inflating: data_track_B/press_0139.npy \n"," inflating: data_track_B/press_0140.npy \n"," inflating: data_track_B/press_0141.npy \n"," inflating: data_track_B/press_0143.npy \n"," inflating: data_track_B/press_0145.npy \n"," inflating: data_track_B/press_0146.npy \n"," inflating: data_track_B/press_0148.npy \n"," inflating: data_track_B/press_0149.npy \n"," inflating: data_track_B/press_0150.npy \n"," inflating: data_track_B/press_0151.npy \n"," inflating: data_track_B/press_0153.npy \n"," inflating: data_track_B/press_0154.npy \n"," inflating: data_track_B/press_0156.npy \n"," inflating: data_track_B/press_0157.npy \n"," inflating: data_track_B/press_0158.npy \n"," inflating: data_track_B/press_0161.npy \n"," inflating: data_track_B/press_0162.npy \n"," inflating: data_track_B/press_0163.npy \n"," inflating: data_track_B/press_0164.npy \n"," inflating: data_track_B/press_0166.npy \n"," inflating: data_track_B/press_0167.npy \n"," inflating: data_track_B/press_0168.npy \n"," inflating: data_track_B/press_0170.npy \n"," inflating: data_track_B/press_0171.npy \n"," inflating: data_track_B/press_0172.npy \n"," inflating: data_track_B/press_0174.npy \n"," inflating: data_track_B/press_0175.npy \n"," inflating: data_track_B/press_0183.npy \n"," inflating: data_track_B/press_0184.npy \n"," inflating: data_track_B/press_0185.npy \n"," inflating: data_track_B/press_0189.npy \n"," inflating: data_track_B/press_0190.npy \n"," inflating: data_track_B/press_0193.npy \n"," inflating: data_track_B/press_0194.npy \n"," inflating: data_track_B/press_0195.npy \n"," inflating: data_track_B/press_0197.npy \n"," inflating: data_track_B/press_0201.npy \n"," inflating: data_track_B/press_0203.npy \n"," inflating: data_track_B/press_0204.npy \n"," inflating: data_track_B/press_0205.npy \n"," inflating: data_track_B/press_0206.npy \n"," inflating: data_track_B/press_0208.npy \n"," inflating: data_track_B/press_0210.npy \n"," inflating: data_track_B/press_0211.npy \n"," inflating: data_track_B/press_0216.npy \n"," inflating: data_track_B/press_0217.npy \n"," inflating: data_track_B/press_0219.npy \n"," inflating: data_track_B/press_0220.npy \n"," inflating: data_track_B/press_0227.npy \n"," inflating: data_track_B/press_0228.npy \n"," inflating: data_track_B/press_0229.npy \n"," inflating: data_track_B/press_0232.npy \n"," inflating: data_track_B/press_0234.npy \n"," inflating: data_track_B/press_0235.npy \n"," inflating: data_track_B/press_0236.npy \n"," inflating: data_track_B/press_0238.npy \n"," inflating: data_track_B/press_0239.npy \n"," inflating: data_track_B/press_0240.npy \n"," inflating: data_track_B/press_0241.npy \n"," inflating: data_track_B/press_0245.npy \n"," inflating: data_track_B/press_0246.npy \n"," inflating: data_track_B/press_0247.npy \n"," inflating: data_track_B/press_0248.npy \n"," inflating: data_track_B/press_0249.npy \n"," inflating: data_track_B/press_0252.npy \n"," inflating: data_track_B/press_0253.npy \n"," inflating: data_track_B/press_0254.npy \n"," inflating: data_track_B/press_0256.npy \n"," inflating: data_track_B/press_0257.npy \n"," inflating: data_track_B/press_0259.npy \n"," inflating: data_track_B/press_0264.npy \n"," inflating: data_track_B/press_0265.npy \n"," inflating: data_track_B/press_0266.npy \n"," inflating: data_track_B/press_0268.npy \n"," inflating: data_track_B/press_0269.npy \n"," inflating: data_track_B/press_0271.npy \n"," inflating: data_track_B/press_0272.npy \n"," inflating: data_track_B/press_0273.npy \n"," inflating: data_track_B/press_0275.npy \n"," inflating: data_track_B/press_0276.npy \n"," inflating: data_track_B/press_0277.npy \n"," inflating: data_track_B/press_0279.npy \n"," inflating: data_track_B/press_0280.npy \n"," inflating: data_track_B/press_0281.npy \n"," inflating: data_track_B/press_0284.npy \n"," inflating: data_track_B/press_0285.npy \n"," inflating: data_track_B/press_0286.npy \n"," inflating: data_track_B/press_0288.npy \n"," inflating: data_track_B/press_0289.npy \n"," inflating: data_track_B/press_0290.npy \n"," inflating: data_track_B/press_0291.npy \n"," inflating: data_track_B/press_0294.npy \n"," inflating: data_track_B/press_0296.npy \n"," inflating: data_track_B/press_0297.npy \n"," inflating: data_track_B/press_0298.npy \n"," inflating: data_track_B/press_0301.npy \n"," inflating: data_track_B/press_0304.npy \n"," inflating: data_track_B/press_0305.npy \n"," inflating: data_track_B/press_0306.npy \n"," inflating: data_track_B/press_0307.npy \n"," inflating: data_track_B/press_0308.npy \n"," inflating: data_track_B/press_0310.npy \n"," inflating: data_track_B/press_0311.npy \n"," inflating: data_track_B/press_0314.npy \n"," inflating: data_track_B/press_0315.npy \n"," inflating: data_track_B/press_0316.npy \n"," inflating: data_track_B/press_0320.npy \n"," inflating: data_track_B/press_0321.npy \n"," inflating: data_track_B/press_0323.npy \n"," inflating: data_track_B/press_0324.npy \n"," inflating: data_track_B/press_0327.npy \n"," inflating: data_track_B/press_0330.npy \n"," inflating: data_track_B/press_0331.npy \n"," inflating: data_track_B/press_0332.npy \n"," inflating: data_track_B/press_0333.npy \n"," inflating: data_track_B/press_0334.npy \n"," inflating: data_track_B/press_0337.npy \n"," inflating: data_track_B/press_0338.npy \n"," inflating: data_track_B/press_0339.npy \n"," inflating: data_track_B/press_0340.npy \n"," inflating: data_track_B/press_0341.npy \n"," inflating: data_track_B/press_0342.npy \n"," inflating: data_track_B/press_0343.npy \n"," inflating: data_track_B/press_0344.npy \n"," inflating: data_track_B/press_0345.npy \n"," inflating: data_track_B/press_0346.npy \n"," inflating: data_track_B/press_0348.npy \n"," inflating: data_track_B/press_0349.npy \n"," inflating: data_track_B/press_0351.npy \n"," inflating: data_track_B/press_0352.npy \n"," inflating: data_track_B/press_0353.npy \n"," inflating: data_track_B/press_0354.npy \n"," inflating: data_track_B/press_0356.npy \n"," inflating: data_track_B/press_0357.npy \n"," inflating: data_track_B/press_0359.npy \n"," inflating: data_track_B/press_0360.npy \n"," inflating: data_track_B/press_0361.npy \n"," inflating: data_track_B/press_0363.npy \n"," inflating: data_track_B/press_0364.npy \n"," inflating: data_track_B/press_0365.npy \n"," inflating: data_track_B/press_0366.npy \n"," inflating: data_track_B/press_0367.npy \n"," inflating: data_track_B/press_0368.npy \n"," inflating: data_track_B/press_0369.npy \n"," inflating: data_track_B/press_0371.npy \n"," inflating: data_track_B/press_0373.npy \n"," inflating: data_track_B/press_0376.npy \n"," inflating: data_track_B/press_0377.npy \n"," inflating: data_track_B/press_0378.npy \n"," inflating: data_track_B/press_0379.npy \n"," inflating: data_track_B/press_0381.npy \n"," inflating: data_track_B/press_0382.npy \n"," inflating: data_track_B/press_0383.npy \n"," inflating: data_track_B/press_0384.npy \n"," inflating: data_track_B/press_0385.npy \n"," inflating: data_track_B/press_0387.npy \n"," inflating: data_track_B/press_0388.npy \n"," inflating: data_track_B/press_0389.npy \n"," inflating: data_track_B/press_0392.npy \n"," inflating: data_track_B/press_0393.npy \n"," inflating: data_track_B/press_0394.npy \n"," inflating: data_track_B/press_0395.npy \n"," inflating: data_track_B/press_0396.npy \n"," inflating: data_track_B/press_0398.npy \n"," inflating: data_track_B/press_0399.npy \n"," inflating: data_track_B/press_0400.npy \n"," inflating: data_track_B/press_0401.npy \n"," inflating: data_track_B/press_0402.npy \n"," inflating: data_track_B/press_0403.npy \n"," inflating: data_track_B/press_0404.npy \n"," inflating: data_track_B/press_0405.npy \n"," inflating: data_track_B/press_0407.npy \n"," inflating: data_track_B/press_0408.npy \n"," inflating: data_track_B/press_0409.npy \n"," inflating: data_track_B/press_0410.npy \n"," inflating: data_track_B/press_0411.npy \n"," inflating: data_track_B/press_0413.npy \n"," inflating: data_track_B/press_0416.npy \n"," inflating: data_track_B/press_0417.npy \n"," inflating: data_track_B/press_0421.npy \n"," inflating: data_track_B/press_0422.npy \n"," inflating: data_track_B/press_0423.npy \n"," inflating: data_track_B/press_0424.npy \n"," inflating: data_track_B/press_0425.npy \n"," inflating: data_track_B/press_0428.npy \n"," inflating: data_track_B/press_0429.npy \n"," inflating: data_track_B/press_0430.npy \n"," inflating: data_track_B/press_0431.npy \n"," inflating: data_track_B/press_0432.npy \n"," inflating: data_track_B/press_0435.npy \n"," inflating: data_track_B/press_0438.npy \n"," inflating: data_track_B/press_0439.npy \n"," inflating: data_track_B/press_0441.npy \n"," inflating: data_track_B/press_0444.npy \n"," inflating: data_track_B/press_0445.npy \n"," inflating: data_track_B/press_0449.npy \n"," inflating: data_track_B/press_0450.npy \n"," inflating: data_track_B/press_0451.npy \n"," inflating: data_track_B/press_0452.npy \n"," inflating: data_track_B/press_0453.npy \n"," inflating: data_track_B/press_0456.npy \n"," inflating: data_track_B/press_0457.npy \n"," inflating: data_track_B/press_0458.npy \n"," inflating: data_track_B/press_0459.npy \n"," inflating: data_track_B/press_0460.npy \n"," inflating: data_track_B/press_0461.npy \n"," inflating: data_track_B/press_0463.npy \n"," inflating: data_track_B/press_0464.npy \n"," inflating: data_track_B/press_0465.npy \n"," inflating: data_track_B/press_0467.npy \n"," inflating: data_track_B/press_0469.npy \n"," inflating: data_track_B/press_0471.npy \n"," inflating: data_track_B/press_0472.npy \n"," inflating: data_track_B/press_0474.npy \n"," inflating: data_track_B/press_0475.npy \n"," inflating: data_track_B/press_0477.npy \n"," inflating: data_track_B/press_0478.npy \n"," inflating: data_track_B/press_0479.npy \n"," inflating: data_track_B/press_0480.npy \n"," inflating: data_track_B/press_0481.npy \n"," inflating: data_track_B/press_0482.npy \n"," inflating: data_track_B/press_0485.npy \n"," inflating: data_track_B/press_0486.npy \n"," inflating: data_track_B/press_0487.npy \n"," inflating: data_track_B/press_0488.npy \n"," inflating: data_track_B/press_0489.npy \n"," inflating: data_track_B/press_0492.npy \n"," inflating: data_track_B/press_0493.npy \n"," inflating: data_track_B/press_0494.npy \n"," inflating: data_track_B/press_0497.npy \n"," inflating: data_track_B/press_0498.npy \n"," inflating: data_track_B/press_0499.npy \n"," inflating: data_track_B/press_0501.npy \n"," inflating: data_track_B/press_0502.npy \n"," inflating: data_track_B/press_0503.npy \n"," inflating: data_track_B/press_0504.npy \n"," inflating: data_track_B/press_0507.npy \n"," inflating: data_track_B/press_0508.npy \n"," inflating: data_track_B/press_0509.npy \n"," inflating: data_track_B/press_0513.npy \n"," inflating: data_track_B/press_0514.npy \n"," inflating: data_track_B/press_0515.npy \n"," inflating: data_track_B/press_0517.npy \n"," inflating: data_track_B/press_0518.npy \n"," inflating: data_track_B/press_0519.npy \n"," inflating: data_track_B/press_0520.npy \n"," inflating: data_track_B/press_0521.npy \n"," inflating: data_track_B/press_0522.npy \n"," inflating: data_track_B/press_0523.npy \n"," inflating: data_track_B/press_0524.npy \n"," inflating: data_track_B/press_0525.npy \n"," inflating: data_track_B/press_0526.npy \n"," inflating: data_track_B/press_0527.npy \n"," inflating: data_track_B/press_0528.npy \n"," inflating: data_track_B/press_0529.npy \n"," inflating: data_track_B/press_0530.npy \n"," inflating: data_track_B/press_0531.npy \n"," inflating: data_track_B/press_0534.npy \n"," inflating: data_track_B/press_0535.npy \n"," inflating: data_track_B/press_0536.npy \n"," inflating: data_track_B/press_0538.npy \n"," inflating: data_track_B/press_0541.npy \n"," inflating: data_track_B/press_0542.npy \n"," inflating: data_track_B/press_0544.npy \n"," inflating: data_track_B/press_0545.npy \n"," inflating: data_track_B/press_0546.npy \n"," inflating: data_track_B/press_0547.npy \n"," inflating: data_track_B/press_0550.npy \n"," inflating: data_track_B/press_0551.npy \n"," inflating: data_track_B/press_0553.npy \n"," inflating: data_track_B/press_0555.npy \n"," inflating: data_track_B/press_0557.npy \n"," inflating: data_track_B/press_0558.npy \n"," inflating: data_track_B/press_0561.npy \n"," inflating: data_track_B/press_0563.npy \n"," inflating: data_track_B/press_0564.npy \n"," inflating: data_track_B/press_0565.npy \n"," inflating: data_track_B/press_0567.npy \n"," inflating: data_track_B/press_0568.npy \n"," inflating: data_track_B/press_0571.npy \n"," inflating: data_track_B/press_0574.npy \n"," inflating: data_track_B/press_0576.npy \n"," inflating: data_track_B/press_0579.npy \n"," inflating: data_track_B/press_0580.npy \n"," inflating: data_track_B/press_0582.npy \n"," inflating: data_track_B/press_0584.npy \n"," inflating: data_track_B/press_0585.npy \n"," inflating: data_track_B/press_0588.npy \n"," inflating: data_track_B/press_0589.npy \n"," inflating: data_track_B/press_0590.npy \n"," inflating: data_track_B/press_0591.npy \n"," inflating: data_track_B/press_0592.npy \n"," inflating: data_track_B/press_0593.npy \n"," inflating: data_track_B/press_0594.npy \n"," inflating: data_track_B/press_0595.npy \n"," inflating: data_track_B/press_0596.npy \n"," inflating: data_track_B/press_0597.npy \n"," inflating: data_track_B/press_0598.npy \n"," inflating: data_track_B/press_0600.npy \n"," inflating: data_track_B/press_0602.npy \n"," inflating: data_track_B/press_0605.npy \n"," inflating: data_track_B/press_0608.npy \n"," inflating: data_track_B/press_0609.npy \n"," inflating: data_track_B/press_0611.npy \n"," inflating: data_track_B/press_0612.npy \n"," inflating: data_track_B/press_0613.npy \n"," inflating: data_track_B/press_0614.npy \n"," inflating: data_track_B/press_0618.npy \n"," inflating: data_track_B/press_0619.npy \n"," inflating: data_track_B/press_0620.npy \n"," inflating: data_track_B/press_0621.npy \n"," inflating: data_track_B/press_0622.npy \n"," inflating: data_track_B/press_0623.npy \n"," inflating: data_track_B/press_0624.npy \n"," inflating: data_track_B/press_0625.npy \n"," inflating: data_track_B/press_0627.npy \n"," inflating: data_track_B/press_0628.npy \n"," inflating: data_track_B/press_0629.npy \n"," inflating: data_track_B/press_0630.npy \n"," inflating: data_track_B/press_0631.npy \n"," inflating: data_track_B/press_0632.npy \n"," inflating: data_track_B/press_0633.npy \n"," inflating: data_track_B/press_0634.npy \n"," inflating: data_track_B/press_0635.npy \n"," inflating: data_track_B/press_0637.npy \n"," inflating: data_track_B/press_0638.npy \n"," inflating: data_track_B/press_0639.npy \n"," inflating: data_track_B/press_0640.npy \n"," inflating: data_track_B/press_0641.npy \n"," inflating: data_track_B/press_0643.npy \n"," inflating: data_track_B/press_0644.npy \n"," inflating: data_track_B/press_0645.npy \n"," inflating: data_track_B/press_0646.npy \n"," inflating: data_track_B/press_0648.npy \n"," inflating: data_track_B/press_0650.npy \n"," inflating: data_track_B/press_0651.npy \n"," inflating: data_track_B/press_0652.npy \n"," inflating: data_track_B/press_0653.npy \n"," inflating: data_track_B/press_0654.npy \n"," inflating: data_track_B/press_0656.npy \n"," inflating: data_track_B/press_0657.npy \n"," inflating: data_track_B/press_0658.npy \n"," inflating: data_track_B/press_0661.npy \n"," inflating: data_track_B/press_0663.npy \n"," inflating: data_track_B/press_0664.npy \n"," inflating: data_track_B/press_0665.npy \n"," inflating: data_track_B/press_0666.npy \n"," inflating: data_track_B/press_0667.npy \n"," inflating: data_track_B/press_0668.npy \n"," inflating: data_track_B/press_0669.npy \n"," inflating: data_track_B/press_0671.npy \n"," inflating: data_track_B/press_0672.npy \n"," inflating: data_track_B/press_0673.npy \n"," inflating: data_track_B/press_0674.npy \n"," inflating: data_track_B/press_0676.npy \n"," inflating: data_track_B/press_0677.npy \n"," inflating: data_track_B/press_0678.npy \n"," inflating: data_track_B/press_0679.npy \n"," inflating: data_track_B/press_0680.npy \n"," inflating: data_track_B/press_0682.npy \n"," inflating: data_track_B/press_0686.npy \n"," inflating: data_track_B/press_0688.npy \n"," inflating: data_track_B/press_0689.npy \n"," inflating: data_track_B/press_0690.npy \n"," inflating: data_track_B/press_0691.npy \n"," inflating: data_track_B/press_0692.npy \n"," inflating: data_track_B/press_0693.npy \n"," inflating: data_track_B/press_0694.npy \n"," inflating: data_track_B/press_0695.npy \n"," inflating: data_track_B/press_0697.npy \n"," inflating: data_track_B/press_0699.npy \n"," inflating: data_track_B/press_0700.npy \n"," inflating: data_track_B/press_0701.npy \n"," inflating: data_track_B/press_0703.npy \n"," inflating: data_track_B/press_0704.npy \n"," inflating: data_track_B/press_0706.npy \n"," inflating: data_track_B/press_0707.npy \n"," inflating: data_track_B/press_0708.npy \n"," inflating: data_track_B/press_0709.npy \n"," inflating: data_track_B/press_0711.npy \n"," inflating: data_track_B/press_0712.npy \n"," inflating: data_track_B/press_0713.npy \n"," inflating: data_track_B/press_0714.npy \n"," inflating: data_track_B/press_0715.npy \n"," inflating: data_track_B/press_0716.npy \n"," inflating: data_track_B/press_0718.npy \n"," inflating: data_track_B/press_0719.npy \n"," inflating: data_track_B/press_0720.npy \n"," inflating: data_track_B/press_0721.npy \n"," inflating: data_track_B/press_0722.npy \n"," inflating: data_track_B/press_0724.npy \n"," inflating: data_track_B/press_0727.npy \n"," inflating: data_track_B/press_0728.npy \n"," inflating: data_track_B/press_0729.npy \n"," inflating: data_track_B/press_0730.npy \n"," inflating: data_track_B/press_0731.npy \n"," inflating: data_track_B/press_0733.npy \n"," inflating: data_track_B/press_0735.npy \n"," inflating: data_track_B/press_0736.npy \n"," inflating: data_track_B/press_0737.npy \n"," inflating: data_track_B/press_0740.npy \n"," inflating: data_track_B/press_0742.npy \n"," inflating: data_track_B/press_0743.npy \n"," inflating: data_track_B/press_0744.npy \n"," inflating: data_track_B/press_0745.npy \n","Archive: track_B.zip\n"," inflating: track_B/area_1.npy \n"," inflating: track_B/area_10.npy \n"," inflating: track_B/area_11.npy \n"," inflating: track_B/area_12.npy \n"," inflating: track_B/area_13.npy \n"," inflating: track_B/area_14.npy \n"," inflating: track_B/area_15.npy \n"," inflating: track_B/area_16.npy \n"," inflating: track_B/area_17.npy \n"," inflating: track_B/area_18.npy \n"," inflating: track_B/area_19.npy \n"," inflating: track_B/area_2.npy \n"," inflating: track_B/area_20.npy \n"," inflating: track_B/area_21.npy \n"," inflating: track_B/area_22.npy \n"," inflating: track_B/area_23.npy \n"," inflating: track_B/area_24.npy \n"," inflating: track_B/area_25.npy \n"," inflating: track_B/area_26.npy \n"," inflating: track_B/area_27.npy \n"," inflating: track_B/area_28.npy \n"," inflating: track_B/area_29.npy \n"," inflating: track_B/area_3.npy \n"," inflating: track_B/area_30.npy \n"," inflating: track_B/area_31.npy \n"," inflating: track_B/area_32.npy \n"," inflating: track_B/area_33.npy \n"," inflating: track_B/area_34.npy \n"," inflating: track_B/area_35.npy \n"," inflating: track_B/area_36.npy \n"," inflating: track_B/area_37.npy \n"," inflating: track_B/area_38.npy \n"," inflating: track_B/area_39.npy \n"," inflating: track_B/area_4.npy \n"," inflating: track_B/area_40.npy \n"," inflating: track_B/area_41.npy \n"," inflating: track_B/area_42.npy \n"," inflating: track_B/area_43.npy \n"," inflating: track_B/area_44.npy \n"," inflating: track_B/area_45.npy \n"," inflating: track_B/area_46.npy \n"," inflating: track_B/area_47.npy \n"," inflating: track_B/area_48.npy \n"," inflating: track_B/area_49.npy \n"," inflating: track_B/area_5.npy \n"," inflating: track_B/area_50.npy \n"," inflating: track_B/area_6.npy \n"," inflating: track_B/area_7.npy \n"," inflating: track_B/area_8.npy \n"," inflating: track_B/area_9.npy \n"," inflating: track_B/area_bounds.txt \n"," inflating: track_B/centroid_1.npy \n"," inflating: track_B/centroid_10.npy \n"," inflating: track_B/centroid_11.npy \n"," inflating: track_B/centroid_12.npy \n"," inflating: track_B/centroid_13.npy \n"," inflating: track_B/centroid_14.npy \n"," inflating: track_B/centroid_15.npy \n"," inflating: track_B/centroid_16.npy \n"," inflating: track_B/centroid_17.npy \n"," inflating: track_B/centroid_18.npy \n"," inflating: track_B/centroid_19.npy \n"," inflating: track_B/centroid_2.npy \n"," inflating: track_B/centroid_20.npy \n"," inflating: track_B/centroid_21.npy \n"," inflating: track_B/centroid_22.npy \n"," inflating: track_B/centroid_23.npy \n"," inflating: track_B/centroid_24.npy \n"," inflating: track_B/centroid_25.npy \n"," inflating: track_B/centroid_26.npy \n"," inflating: track_B/centroid_27.npy \n"," inflating: track_B/centroid_28.npy \n"," inflating: track_B/centroid_29.npy \n"," inflating: track_B/centroid_3.npy \n"," inflating: track_B/centroid_30.npy \n"," inflating: track_B/centroid_31.npy \n"," inflating: track_B/centroid_32.npy \n"," inflating: track_B/centroid_33.npy \n"," inflating: track_B/centroid_34.npy \n"," inflating: track_B/centroid_35.npy \n"," inflating: track_B/centroid_36.npy \n"," inflating: track_B/centroid_37.npy \n"," inflating: track_B/centroid_38.npy \n"," inflating: track_B/centroid_39.npy \n"," inflating: track_B/centroid_4.npy \n"," inflating: track_B/centroid_40.npy \n"," inflating: track_B/centroid_41.npy \n"," inflating: track_B/centroid_42.npy \n"," inflating: track_B/centroid_43.npy \n"," inflating: track_B/centroid_44.npy \n"," inflating: track_B/centroid_45.npy \n"," inflating: track_B/centroid_46.npy \n"," inflating: track_B/centroid_47.npy \n"," inflating: track_B/centroid_48.npy \n"," inflating: track_B/centroid_49.npy \n"," inflating: track_B/centroid_5.npy \n"," inflating: track_B/centroid_50.npy \n"," inflating: track_B/centroid_6.npy \n"," inflating: track_B/centroid_7.npy \n"," inflating: track_B/centroid_8.npy \n"," inflating: track_B/centroid_9.npy \n"," inflating: track_B/global_bounds.txt \n"," inflating: track_B/info_1.npy \n"," inflating: track_B/info_10.npy \n"," inflating: track_B/info_11.npy \n"," inflating: track_B/info_12.npy \n"," inflating: track_B/info_13.npy \n"," inflating: track_B/info_14.npy \n"," inflating: track_B/info_15.npy \n"," inflating: track_B/info_16.npy \n"," inflating: track_B/info_17.npy \n"," inflating: track_B/info_18.npy \n"," inflating: track_B/info_19.npy \n"," inflating: track_B/info_2.npy \n"," inflating: track_B/info_20.npy \n"," inflating: track_B/info_21.npy \n"," inflating: track_B/info_22.npy \n"," inflating: track_B/info_23.npy \n"," inflating: track_B/info_24.npy \n"," inflating: track_B/info_25.npy \n"," inflating: track_B/info_26.npy \n"," inflating: track_B/info_27.npy \n"," inflating: track_B/info_28.npy \n"," inflating: track_B/info_29.npy \n"," inflating: track_B/info_3.npy \n"," inflating: track_B/info_30.npy \n"," inflating: track_B/info_31.npy \n"," inflating: track_B/info_32.npy \n"," inflating: track_B/info_33.npy \n"," inflating: track_B/info_34.npy \n"," inflating: track_B/info_35.npy \n"," inflating: track_B/info_36.npy \n"," inflating: track_B/info_37.npy \n"," inflating: track_B/info_38.npy \n"," inflating: track_B/info_39.npy \n"," inflating: track_B/info_4.npy \n"," inflating: track_B/info_40.npy \n"," inflating: track_B/info_41.npy \n"," inflating: track_B/info_42.npy \n"," inflating: track_B/info_43.npy \n"," inflating: track_B/info_44.npy \n"," inflating: track_B/info_45.npy \n"," inflating: track_B/info_46.npy \n"," inflating: track_B/info_47.npy \n"," inflating: track_B/info_48.npy \n"," inflating: track_B/info_49.npy \n"," inflating: track_B/info_5.npy \n"," inflating: track_B/info_50.npy \n"," inflating: track_B/info_6.npy \n"," inflating: track_B/info_7.npy \n"," inflating: track_B/info_8.npy \n"," inflating: track_B/info_9.npy \n"," inflating: track_B/info_bounds.txt \n"," inflating: track_B/mesh_1.ply \n"," inflating: track_B/mesh_10.ply \n"," inflating: track_B/mesh_11.ply \n"," inflating: track_B/mesh_12.ply \n"," inflating: track_B/mesh_13.ply \n"," inflating: track_B/mesh_14.ply \n"," inflating: track_B/mesh_15.ply \n"," inflating: track_B/mesh_16.ply \n"," inflating: track_B/mesh_17.ply \n"," inflating: track_B/mesh_18.ply \n"," inflating: track_B/mesh_19.ply \n"," inflating: track_B/mesh_2.ply \n"," inflating: track_B/mesh_20.ply \n"," inflating: track_B/mesh_21.ply \n"," inflating: track_B/mesh_22.ply \n"," inflating: track_B/mesh_23.ply \n"," inflating: track_B/mesh_24.ply \n"," inflating: track_B/mesh_25.ply \n"," inflating: track_B/mesh_26.ply \n"," inflating: track_B/mesh_27.ply \n"," inflating: track_B/mesh_28.ply \n"," inflating: track_B/mesh_29.ply \n"," inflating: track_B/mesh_3.ply \n"," inflating: track_B/mesh_30.ply \n"," inflating: track_B/mesh_31.ply \n"," inflating: track_B/mesh_32.ply \n"," inflating: track_B/mesh_33.ply \n"," inflating: track_B/mesh_34.ply \n"," inflating: track_B/mesh_35.ply \n"," inflating: track_B/mesh_36.ply \n"," inflating: track_B/mesh_37.ply \n"," inflating: track_B/mesh_38.ply \n"," inflating: track_B/mesh_39.ply \n"," inflating: track_B/mesh_4.ply \n"," inflating: track_B/mesh_40.ply \n"," inflating: track_B/mesh_41.ply \n"," inflating: track_B/mesh_42.ply \n"," inflating: track_B/mesh_43.ply \n"," inflating: track_B/mesh_44.ply \n"," inflating: track_B/mesh_45.ply \n"," inflating: track_B/mesh_46.ply \n"," inflating: track_B/mesh_47.ply \n"," inflating: track_B/mesh_48.ply \n"," inflating: track_B/mesh_49.ply \n"," inflating: track_B/mesh_5.ply \n"," inflating: track_B/mesh_50.ply \n"," inflating: track_B/mesh_6.ply \n"," inflating: track_B/mesh_7.ply \n"," inflating: track_B/mesh_8.ply \n"," inflating: track_B/mesh_9.ply \n"," inflating: track_B/normal_1.npy \n"," inflating: track_B/normal_10.npy \n"," inflating: track_B/normal_11.npy \n"," inflating: track_B/normal_12.npy \n"," inflating: track_B/normal_13.npy \n"," inflating: track_B/normal_14.npy \n"," inflating: track_B/normal_15.npy \n"," inflating: track_B/normal_16.npy \n"," inflating: track_B/normal_17.npy \n"," inflating: track_B/normal_18.npy \n"," inflating: track_B/normal_19.npy \n"," inflating: track_B/normal_2.npy \n"," inflating: track_B/normal_20.npy \n"," inflating: track_B/normal_21.npy \n"," inflating: track_B/normal_22.npy \n"," inflating: track_B/normal_23.npy \n"," inflating: track_B/normal_24.npy \n"," inflating: track_B/normal_25.npy \n"," inflating: track_B/normal_26.npy \n"," inflating: track_B/normal_27.npy \n"," inflating: track_B/normal_28.npy \n"," inflating: track_B/normal_29.npy \n"," inflating: track_B/normal_3.npy \n"," inflating: track_B/normal_30.npy \n"," inflating: track_B/normal_31.npy \n"," inflating: track_B/normal_32.npy \n"," inflating: track_B/normal_33.npy \n"," inflating: track_B/normal_34.npy \n"," inflating: track_B/normal_35.npy \n"," inflating: track_B/normal_36.npy \n"," inflating: track_B/normal_37.npy \n"," inflating: track_B/normal_38.npy \n"," inflating: track_B/normal_39.npy \n"," inflating: track_B/normal_4.npy \n"," inflating: track_B/normal_40.npy \n"," inflating: track_B/normal_41.npy \n"," inflating: track_B/normal_42.npy \n"," inflating: track_B/normal_43.npy \n"," inflating: track_B/normal_44.npy \n"," inflating: track_B/normal_45.npy \n"," inflating: track_B/normal_46.npy \n"," inflating: track_B/normal_47.npy \n"," inflating: track_B/normal_48.npy \n"," inflating: track_B/normal_49.npy \n"," inflating: track_B/normal_5.npy \n"," inflating: track_B/normal_50.npy \n"," inflating: track_B/normal_6.npy \n"," inflating: track_B/normal_7.npy \n"," inflating: track_B/normal_8.npy \n"," inflating: track_B/normal_9.npy \n"," inflating: track_B/train_pressure_mean_std.txt \n","mkdir: cannot create directory ‘track_B_vtk’: File exists\n","mkdir: cannot create directory ‘data_centroid_track_B_vtk’: File exists\n","mkdir: cannot create directory ‘data_centroid_track_B_vtk_preprocessed_data’: File exists\n"]}],"source":["!mkdir -p train_track_B && unzip -o train_track_B.zip -d data_track_B/\n","!mkdir -p track_B && unzip -o track_B.zip\n","!mkdir track_B_vtk\n","!mkdir data_centroid_track_B_vtk\n","!mkdir data_centroid_track_B_vtk_preprocessed_data"]}],"metadata":{"colab":{"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} +{"cells":[{"cell_type":"markdown","metadata":{"id":"z0Sek0wtEs5n"},"source":["## 百度Baseline版本数据导入"]},{"cell_type":"code","execution_count":null,"metadata":{},"outputs":[],"source":["!mkdir Dataset\n","!cd Dataset"]},{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":255341,"status":"ok","timestamp":1720679943973,"user":{"displayName":"pei jian zeng","userId":"06013928868849686113"},"user_tz":-480},"id":"GTV_YDaxEsd3","outputId":"8554a8d9-ac54-49a7-c5d8-f56cd72953ba"},"outputs":[{"name":"stdout","output_type":"stream","text":["--2024-07-11 06:34:48-- https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 36.110.192.178, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|36.110.192.178|:443... connected.\n","HTTP request sent, awaiting response... 206 Partial Content\n","Length: 4740031429 (4.4G), 2358336404 (2.2G) remaining [application/octet-stream]\n","Saving to: ‘train_track_B.zip’\n","\n","train_track_B.zip 100%[++++++++++=========>] 4.41G 11.7MB/s in 3m 23s \n","\n","2024-07-11 06:38:12 (11.1 MB/s) - ‘train_track_B.zip’ saved [4740031429/4740031429]\n","\n","--2024-07-11 06:38:13-- https://ai-studio-online.bj.bcebos.com/v1/1638f9c292b9437bb46885186407a63e584856c91f9f4c18908b87abd46471e0?responseContentDisposition=attachment%3B%20filename%3Dtrack_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-05-05T03%3A02%3A25Z%2F-1%2F%2Fcfdfd6b6a9e096c761ee8e7d863d586741c69a9e6de89f9c3696706d35f8b265\n","Resolving ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)... 36.110.192.178, 2409:8c04:1001:1203:0:ff:b0bb:4f27\n","Connecting to ai-studio-online.bj.bcebos.com (ai-studio-online.bj.bcebos.com)|36.110.192.178|:443... connected.\n","HTTP request sent, awaiting response... 200 OK\n","Length: 1012191818 (965M) [application/octet-stream]\n","Saving to: ‘track_B.zip’\n","\n","track_B.zip 100%[===================>] 965.30M 21.0MB/s in 50s \n","\n","2024-07-11 06:39:04 (19.5 MB/s) - ‘track_B.zip’ saved [1012191818/1012191818]\n","\n"]}],"source":["!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/38e9adf0fce84527aad3558cc3e82d0e9a251aac4c934297afae9b74d9b3d1e9?responseContentDisposition=attachment%3B%20filename%3Dtrain_track_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-06-04T03%3A21%3A02Z%2F-1%2F%2Facd359add161bace603a52c7a268467406cb3c1889a7114bbb687de8002b55f6\" -c -O 'train_track_B.zip'\n","!wget --header=\"Host: ai-studio-online.bj.bcebos.com\" --header=\"User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0\" --header=\"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\" --header=\"Accept-Language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\" --header=\"Referer: https://aistudio.baidu.com/\" \"https://ai-studio-online.bj.bcebos.com/v1/1638f9c292b9437bb46885186407a63e584856c91f9f4c18908b87abd46471e0?responseContentDisposition=attachment%3B%20filename%3Dtrack_B.zip&authorization=bce-auth-v1%2F5cfe9a5e1454405eb2a975c43eace6ec%2F2024-05-05T03%3A02%3A25Z%2F-1%2F%2Fcfdfd6b6a9e096c761ee8e7d863d586741c69a9e6de89f9c3696706d35f8b265\" -c -O 'track_B.zip'"]},{"cell_type":"code","execution_count":2,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"collapsed":true,"executionInfo":{"elapsed":153639,"status":"ok","timestamp":1720680617913,"user":{"displayName":"pei jian zeng","userId":"06013928868849686113"},"user_tz":-480},"id":"OS4r3PcokLdA","outputId":"525b6316-634c-410d-d582-528aa5698819"},"outputs":[{"name":"stdout","output_type":"stream","text":["Archive: train_track_B.zip\n"," inflating: data_track_B/area_0002.npy \n"," inflating: data_track_B/area_0003.npy \n"," inflating: data_track_B/area_0004.npy \n"," inflating: data_track_B/area_0005.npy \n"," inflating: data_track_B/area_0006.npy \n"," inflating: data_track_B/area_0011.npy \n"," inflating: data_track_B/area_0012.npy \n"," inflating: data_track_B/area_0013.npy \n"," inflating: data_track_B/area_0015.npy \n"," inflating: data_track_B/area_0017.npy \n"," inflating: data_track_B/area_0018.npy \n"," inflating: data_track_B/area_0020.npy \n"," inflating: data_track_B/area_0021.npy \n"," inflating: data_track_B/area_0022.npy \n"," inflating: data_track_B/area_0023.npy \n"," inflating: data_track_B/area_0024.npy \n"," inflating: data_track_B/area_0026.npy \n"," inflating: data_track_B/area_0029.npy \n"," inflating: data_track_B/area_0030.npy \n"," inflating: data_track_B/area_0036.npy \n"," inflating: data_track_B/area_0037.npy \n"," inflating: data_track_B/area_0038.npy \n"," inflating: data_track_B/area_0039.npy \n"," inflating: data_track_B/area_0040.npy \n"," inflating: data_track_B/area_0041.npy \n"," inflating: data_track_B/area_0042.npy \n"," inflating: data_track_B/area_0043.npy \n"," inflating: data_track_B/area_0044.npy \n"," inflating: data_track_B/area_0048.npy \n"," inflating: data_track_B/area_0049.npy \n"," inflating: data_track_B/area_0051.npy \n"," inflating: data_track_B/area_0052.npy \n"," inflating: data_track_B/area_0055.npy \n"," inflating: data_track_B/area_0056.npy \n"," inflating: data_track_B/area_0057.npy \n"," inflating: data_track_B/area_0059.npy \n"," inflating: data_track_B/area_0062.npy \n"," inflating: data_track_B/area_0064.npy \n"," inflating: data_track_B/area_0066.npy \n"," inflating: data_track_B/area_0067.npy \n"," inflating: data_track_B/area_0068.npy \n"," inflating: data_track_B/area_0071.npy \n"," inflating: data_track_B/area_0074.npy \n"," inflating: data_track_B/area_0075.npy \n"," inflating: data_track_B/area_0077.npy \n"," inflating: data_track_B/area_0078.npy \n"," inflating: data_track_B/area_0080.npy \n"," inflating: data_track_B/area_0081.npy \n"," inflating: data_track_B/area_0082.npy \n"," inflating: data_track_B/area_0084.npy \n"," inflating: data_track_B/area_0085.npy \n"," inflating: data_track_B/area_0086.npy \n"," inflating: data_track_B/area_0087.npy \n"," inflating: data_track_B/area_0088.npy \n"," inflating: data_track_B/area_0089.npy \n"," inflating: data_track_B/area_0090.npy \n"," inflating: data_track_B/area_0092.npy \n"," inflating: data_track_B/area_0093.npy \n"," inflating: data_track_B/area_0094.npy \n"," inflating: data_track_B/area_0095.npy \n"," inflating: data_track_B/area_0097.npy \n"," inflating: data_track_B/area_0098.npy \n"," inflating: data_track_B/area_0100.npy \n"," inflating: data_track_B/area_0101.npy \n"," inflating: data_track_B/area_0102.npy \n"," inflating: data_track_B/area_0103.npy \n"," inflating: data_track_B/area_0104.npy \n"," inflating: data_track_B/area_0106.npy \n"," inflating: data_track_B/area_0107.npy \n"," inflating: data_track_B/area_0108.npy \n"," inflating: data_track_B/area_0109.npy \n"," inflating: data_track_B/area_0110.npy \n"," inflating: data_track_B/area_0113.npy \n"," inflating: data_track_B/area_0114.npy \n"," inflating: data_track_B/area_0115.npy \n"," inflating: data_track_B/area_0116.npy \n"," inflating: data_track_B/area_0117.npy \n"," inflating: data_track_B/area_0118.npy \n"," inflating: data_track_B/area_0119.npy \n"," inflating: data_track_B/area_0120.npy \n"," inflating: data_track_B/area_0121.npy \n"," inflating: data_track_B/area_0122.npy \n"," inflating: data_track_B/area_0124.npy \n"," inflating: data_track_B/area_0125.npy \n"," inflating: data_track_B/area_0126.npy \n"," inflating: data_track_B/area_0128.npy \n"," inflating: data_track_B/area_0129.npy \n"," inflating: data_track_B/area_0130.npy \n"," inflating: data_track_B/area_0131.npy \n"," inflating: data_track_B/area_0132.npy \n"," inflating: data_track_B/area_0133.npy \n"," inflating: data_track_B/area_0134.npy \n"," inflating: data_track_B/area_0135.npy \n"," inflating: data_track_B/area_0136.npy \n"," inflating: data_track_B/area_0138.npy \n"," inflating: data_track_B/area_0139.npy \n"," inflating: data_track_B/area_0140.npy \n"," inflating: data_track_B/area_0141.npy \n"," inflating: data_track_B/area_0143.npy \n"," inflating: data_track_B/area_0145.npy \n"," inflating: data_track_B/area_0146.npy \n"," inflating: data_track_B/area_0148.npy \n"," inflating: data_track_B/area_0149.npy \n"," inflating: data_track_B/area_0150.npy \n"," inflating: data_track_B/area_0151.npy \n"," inflating: data_track_B/area_0153.npy \n"," inflating: data_track_B/area_0154.npy \n"," inflating: data_track_B/area_0156.npy \n"," inflating: data_track_B/area_0157.npy \n"," inflating: data_track_B/area_0158.npy \n"," inflating: data_track_B/area_0161.npy \n"," inflating: data_track_B/area_0162.npy \n"," inflating: data_track_B/area_0163.npy \n"," inflating: data_track_B/area_0164.npy \n"," inflating: data_track_B/area_0166.npy \n"," inflating: data_track_B/area_0167.npy \n"," inflating: data_track_B/area_0168.npy \n"," inflating: data_track_B/area_0170.npy \n"," inflating: data_track_B/area_0171.npy \n"," inflating: data_track_B/area_0172.npy \n"," inflating: data_track_B/area_0174.npy \n"," inflating: data_track_B/area_0175.npy \n"," inflating: data_track_B/area_0183.npy \n"," inflating: data_track_B/area_0184.npy \n"," inflating: data_track_B/area_0185.npy \n"," inflating: data_track_B/area_0189.npy \n"," inflating: data_track_B/area_0190.npy \n"," inflating: data_track_B/area_0193.npy \n"," inflating: data_track_B/area_0194.npy \n"," inflating: data_track_B/area_0195.npy \n"," inflating: data_track_B/area_0197.npy \n"," inflating: data_track_B/area_0201.npy \n"," inflating: data_track_B/area_0203.npy \n"," inflating: data_track_B/area_0204.npy \n"," inflating: data_track_B/area_0205.npy \n"," inflating: data_track_B/area_0206.npy \n"," inflating: data_track_B/area_0208.npy \n"," inflating: data_track_B/area_0210.npy \n"," inflating: data_track_B/area_0211.npy \n"," inflating: data_track_B/area_0216.npy \n"," inflating: data_track_B/area_0217.npy \n"," inflating: data_track_B/area_0219.npy \n"," inflating: data_track_B/area_0220.npy \n"," inflating: data_track_B/area_0227.npy \n"," inflating: data_track_B/area_0228.npy \n"," inflating: data_track_B/area_0229.npy \n"," inflating: data_track_B/area_0232.npy \n"," inflating: data_track_B/area_0234.npy \n"," inflating: data_track_B/area_0235.npy \n"," inflating: data_track_B/area_0236.npy \n"," inflating: data_track_B/area_0238.npy \n"," inflating: data_track_B/area_0239.npy \n"," inflating: data_track_B/area_0240.npy \n"," inflating: data_track_B/area_0241.npy \n"," inflating: data_track_B/area_0245.npy \n"," inflating: data_track_B/area_0246.npy \n"," inflating: data_track_B/area_0247.npy \n"," inflating: data_track_B/area_0248.npy \n"," inflating: data_track_B/area_0249.npy \n"," inflating: data_track_B/area_0252.npy \n"," inflating: data_track_B/area_0253.npy \n"," inflating: data_track_B/area_0254.npy \n"," inflating: data_track_B/area_0256.npy \n"," inflating: data_track_B/area_0257.npy \n"," inflating: data_track_B/area_0259.npy \n"," inflating: data_track_B/area_0264.npy \n"," inflating: data_track_B/area_0265.npy \n"," inflating: data_track_B/area_0266.npy \n"," inflating: data_track_B/area_0268.npy \n"," inflating: data_track_B/area_0269.npy \n"," inflating: data_track_B/area_0271.npy \n"," inflating: data_track_B/area_0272.npy \n"," inflating: data_track_B/area_0273.npy \n"," inflating: data_track_B/area_0275.npy \n"," inflating: data_track_B/area_0276.npy \n"," inflating: data_track_B/area_0277.npy \n"," inflating: data_track_B/area_0279.npy \n"," inflating: data_track_B/area_0280.npy \n"," inflating: data_track_B/area_0281.npy \n"," inflating: data_track_B/area_0284.npy \n"," inflating: data_track_B/area_0285.npy \n"," inflating: data_track_B/area_0286.npy \n"," inflating: data_track_B/area_0288.npy \n"," inflating: data_track_B/area_0289.npy \n"," inflating: data_track_B/area_0290.npy \n"," inflating: data_track_B/area_0291.npy \n"," inflating: data_track_B/area_0294.npy \n"," inflating: data_track_B/area_0296.npy \n"," inflating: data_track_B/area_0297.npy \n"," inflating: data_track_B/area_0298.npy \n"," inflating: data_track_B/area_0301.npy \n"," inflating: data_track_B/area_0304.npy \n"," inflating: data_track_B/area_0305.npy \n"," inflating: data_track_B/area_0306.npy \n"," inflating: data_track_B/area_0307.npy \n"," inflating: data_track_B/area_0308.npy \n"," inflating: data_track_B/area_0310.npy \n"," inflating: data_track_B/area_0311.npy \n"," inflating: data_track_B/area_0314.npy \n"," inflating: data_track_B/area_0315.npy \n"," inflating: data_track_B/area_0316.npy \n"," inflating: data_track_B/area_0320.npy \n"," inflating: data_track_B/area_0321.npy \n"," inflating: data_track_B/area_0323.npy \n"," inflating: data_track_B/area_0324.npy \n"," inflating: data_track_B/area_0327.npy \n"," inflating: data_track_B/area_0330.npy \n"," inflating: data_track_B/area_0331.npy \n"," inflating: data_track_B/area_0332.npy \n"," inflating: data_track_B/area_0333.npy \n"," inflating: data_track_B/area_0334.npy \n"," inflating: data_track_B/area_0337.npy \n"," inflating: data_track_B/area_0338.npy \n"," inflating: data_track_B/area_0339.npy \n"," inflating: data_track_B/area_0340.npy \n"," inflating: data_track_B/area_0341.npy \n"," inflating: data_track_B/area_0342.npy \n"," inflating: data_track_B/area_0343.npy \n"," inflating: data_track_B/area_0344.npy \n"," inflating: data_track_B/area_0345.npy \n"," inflating: data_track_B/area_0346.npy \n"," inflating: data_track_B/area_0348.npy \n"," inflating: data_track_B/area_0349.npy \n"," inflating: data_track_B/area_0351.npy \n"," inflating: data_track_B/area_0352.npy \n"," inflating: data_track_B/area_0353.npy \n"," inflating: data_track_B/area_0354.npy \n"," inflating: data_track_B/area_0356.npy \n"," inflating: data_track_B/area_0357.npy \n"," inflating: data_track_B/area_0359.npy \n"," inflating: data_track_B/area_0360.npy \n"," inflating: data_track_B/area_0361.npy \n"," inflating: data_track_B/area_0363.npy \n"," inflating: data_track_B/area_0364.npy \n"," inflating: data_track_B/area_0365.npy \n"," inflating: data_track_B/area_0366.npy \n"," inflating: data_track_B/area_0367.npy \n"," inflating: data_track_B/area_0368.npy \n"," inflating: data_track_B/area_0369.npy \n"," inflating: data_track_B/area_0371.npy \n"," inflating: data_track_B/area_0373.npy \n"," inflating: data_track_B/area_0376.npy \n"," inflating: data_track_B/area_0377.npy \n"," inflating: data_track_B/area_0378.npy \n"," inflating: data_track_B/area_0379.npy \n"," inflating: data_track_B/area_0381.npy \n"," inflating: data_track_B/area_0382.npy \n"," inflating: data_track_B/area_0383.npy \n"," inflating: data_track_B/area_0384.npy \n"," inflating: data_track_B/area_0385.npy \n"," inflating: data_track_B/area_0387.npy \n"," inflating: data_track_B/area_0388.npy \n"," inflating: data_track_B/area_0389.npy \n"," inflating: data_track_B/area_0392.npy \n"," inflating: data_track_B/area_0393.npy \n"," inflating: data_track_B/area_0394.npy \n"," inflating: data_track_B/area_0395.npy \n"," inflating: data_track_B/area_0396.npy \n"," inflating: data_track_B/area_0398.npy \n"," inflating: data_track_B/area_0399.npy \n"," inflating: data_track_B/area_0400.npy \n"," inflating: data_track_B/area_0401.npy \n"," inflating: data_track_B/area_0402.npy \n"," inflating: data_track_B/area_0403.npy \n"," inflating: data_track_B/area_0404.npy \n"," inflating: data_track_B/area_0405.npy \n"," inflating: data_track_B/area_0407.npy \n"," inflating: data_track_B/area_0408.npy \n"," inflating: data_track_B/area_0409.npy \n"," inflating: data_track_B/area_0410.npy \n"," inflating: data_track_B/area_0411.npy \n"," inflating: data_track_B/area_0413.npy \n"," inflating: data_track_B/area_0416.npy \n"," inflating: data_track_B/area_0417.npy \n"," inflating: data_track_B/area_0421.npy \n"," inflating: data_track_B/area_0422.npy \n"," inflating: data_track_B/area_0423.npy \n"," inflating: data_track_B/area_0424.npy \n"," inflating: data_track_B/area_0425.npy \n"," inflating: data_track_B/area_0428.npy \n"," inflating: data_track_B/area_0429.npy \n"," inflating: data_track_B/area_0430.npy \n"," inflating: data_track_B/area_0431.npy \n"," inflating: data_track_B/area_0432.npy \n"," inflating: data_track_B/area_0435.npy \n"," inflating: data_track_B/area_0438.npy \n"," inflating: data_track_B/area_0439.npy \n"," inflating: data_track_B/area_0441.npy \n"," inflating: data_track_B/area_0444.npy \n"," inflating: data_track_B/area_0445.npy \n"," inflating: data_track_B/area_0449.npy \n"," inflating: data_track_B/area_0450.npy \n"," inflating: data_track_B/area_0451.npy \n"," inflating: data_track_B/area_0452.npy \n"," inflating: data_track_B/area_0453.npy \n"," inflating: data_track_B/area_0456.npy \n"," inflating: data_track_B/area_0457.npy \n"," inflating: data_track_B/area_0458.npy \n"," inflating: data_track_B/area_0459.npy \n"," inflating: data_track_B/area_0460.npy \n"," inflating: data_track_B/area_0461.npy \n"," inflating: data_track_B/area_0463.npy \n"," inflating: data_track_B/area_0464.npy \n"," inflating: data_track_B/area_0465.npy \n"," inflating: data_track_B/area_0467.npy \n"," inflating: data_track_B/area_0469.npy \n"," inflating: data_track_B/area_0471.npy \n"," inflating: data_track_B/area_0472.npy \n"," inflating: data_track_B/area_0474.npy \n"," inflating: data_track_B/area_0475.npy \n"," inflating: data_track_B/area_0477.npy \n"," inflating: data_track_B/area_0478.npy \n"," inflating: data_track_B/area_0479.npy \n"," inflating: data_track_B/area_0480.npy \n"," inflating: data_track_B/area_0481.npy \n"," inflating: data_track_B/area_0482.npy \n"," inflating: data_track_B/area_0485.npy \n"," inflating: data_track_B/area_0486.npy \n"," inflating: data_track_B/area_0487.npy \n"," inflating: data_track_B/area_0488.npy \n"," inflating: data_track_B/area_0489.npy \n"," inflating: data_track_B/area_0492.npy \n"," inflating: data_track_B/area_0493.npy \n"," inflating: data_track_B/area_0494.npy \n"," inflating: data_track_B/area_0497.npy \n"," inflating: data_track_B/area_0498.npy \n"," inflating: data_track_B/area_0499.npy \n"," inflating: data_track_B/area_0501.npy \n"," inflating: data_track_B/area_0502.npy \n"," inflating: data_track_B/area_0503.npy \n"," inflating: data_track_B/area_0504.npy \n"," inflating: data_track_B/area_0507.npy \n"," inflating: data_track_B/area_0508.npy \n"," inflating: data_track_B/area_0509.npy \n"," inflating: data_track_B/area_0513.npy \n"," inflating: data_track_B/area_0514.npy \n"," inflating: data_track_B/area_0515.npy \n"," inflating: data_track_B/area_0517.npy \n"," inflating: data_track_B/area_0518.npy \n"," inflating: data_track_B/area_0519.npy \n"," inflating: data_track_B/area_0520.npy \n"," inflating: data_track_B/area_0521.npy \n"," inflating: data_track_B/area_0522.npy \n"," inflating: data_track_B/area_0523.npy \n"," inflating: data_track_B/area_0524.npy \n"," inflating: data_track_B/area_0525.npy \n"," inflating: data_track_B/area_0526.npy \n"," inflating: data_track_B/area_0527.npy \n"," inflating: data_track_B/area_0528.npy \n"," inflating: data_track_B/area_0529.npy \n"," inflating: data_track_B/area_0530.npy \n"," inflating: data_track_B/area_0531.npy \n"," inflating: data_track_B/area_0534.npy \n"," inflating: data_track_B/area_0535.npy \n"," inflating: data_track_B/area_0536.npy \n"," inflating: data_track_B/area_0538.npy \n"," inflating: data_track_B/area_0541.npy \n"," inflating: data_track_B/area_0542.npy \n"," inflating: data_track_B/area_0544.npy \n"," inflating: data_track_B/area_0545.npy \n"," inflating: data_track_B/area_0546.npy \n"," inflating: data_track_B/area_0547.npy \n"," inflating: data_track_B/area_0550.npy \n"," inflating: data_track_B/area_0551.npy \n"," inflating: data_track_B/area_0553.npy \n"," inflating: data_track_B/area_0555.npy \n"," inflating: data_track_B/area_0557.npy \n"," inflating: data_track_B/area_0558.npy \n"," inflating: data_track_B/area_0561.npy \n"," inflating: data_track_B/area_0563.npy \n"," inflating: data_track_B/area_0564.npy \n"," inflating: data_track_B/area_0565.npy \n"," inflating: data_track_B/area_0567.npy \n"," inflating: data_track_B/area_0568.npy \n"," inflating: data_track_B/area_0571.npy \n"," inflating: data_track_B/area_0574.npy \n"," inflating: data_track_B/area_0576.npy \n"," inflating: data_track_B/area_0579.npy \n"," inflating: data_track_B/area_0580.npy \n"," inflating: data_track_B/area_0582.npy \n"," inflating: data_track_B/area_0584.npy \n"," inflating: data_track_B/area_0585.npy \n"," inflating: data_track_B/area_0588.npy \n"," inflating: data_track_B/area_0589.npy \n"," inflating: data_track_B/area_0590.npy \n"," inflating: data_track_B/area_0591.npy \n"," inflating: data_track_B/area_0592.npy \n"," inflating: data_track_B/area_0593.npy \n"," inflating: data_track_B/area_0594.npy \n"," inflating: data_track_B/area_0595.npy \n"," inflating: data_track_B/area_0596.npy \n"," inflating: data_track_B/area_0597.npy \n"," inflating: data_track_B/area_0598.npy \n"," inflating: data_track_B/area_0600.npy \n"," inflating: data_track_B/area_0602.npy \n"," inflating: data_track_B/area_0605.npy \n"," inflating: data_track_B/area_0608.npy \n"," inflating: data_track_B/area_0609.npy \n"," inflating: data_track_B/area_0611.npy \n"," inflating: data_track_B/area_0612.npy \n"," inflating: data_track_B/area_0613.npy \n"," inflating: data_track_B/area_0614.npy \n"," inflating: data_track_B/area_0618.npy \n"," inflating: data_track_B/area_0619.npy \n"," inflating: data_track_B/area_0620.npy \n"," inflating: data_track_B/area_0621.npy \n"," inflating: data_track_B/area_0622.npy \n"," inflating: data_track_B/area_0623.npy \n"," inflating: data_track_B/area_0624.npy \n"," inflating: data_track_B/area_0625.npy \n"," inflating: data_track_B/area_0627.npy \n"," inflating: data_track_B/area_0628.npy \n"," inflating: data_track_B/area_0629.npy \n"," inflating: data_track_B/area_0630.npy \n"," inflating: data_track_B/area_0631.npy \n"," inflating: data_track_B/area_0632.npy \n"," inflating: data_track_B/area_0633.npy \n"," inflating: data_track_B/area_0634.npy \n"," inflating: data_track_B/area_0635.npy \n"," inflating: data_track_B/area_0637.npy \n"," inflating: data_track_B/area_0638.npy \n"," inflating: data_track_B/area_0639.npy \n"," inflating: data_track_B/area_0640.npy \n"," inflating: data_track_B/area_0641.npy \n"," inflating: data_track_B/area_0643.npy \n"," inflating: data_track_B/area_0644.npy \n"," inflating: data_track_B/area_0645.npy \n"," inflating: data_track_B/area_0646.npy \n"," inflating: data_track_B/area_0648.npy \n"," inflating: data_track_B/area_0650.npy \n"," inflating: data_track_B/area_0651.npy \n"," inflating: data_track_B/area_0652.npy \n"," inflating: data_track_B/area_0653.npy \n"," inflating: data_track_B/area_0654.npy \n"," inflating: data_track_B/area_0656.npy \n"," inflating: data_track_B/area_0657.npy \n"," inflating: data_track_B/area_0658.npy \n"," inflating: data_track_B/area_0661.npy \n"," inflating: data_track_B/area_0663.npy \n"," inflating: data_track_B/area_0664.npy \n"," inflating: data_track_B/area_0665.npy \n"," inflating: data_track_B/area_0666.npy \n"," inflating: data_track_B/area_0667.npy \n"," inflating: data_track_B/area_0668.npy \n"," inflating: data_track_B/area_0669.npy \n"," inflating: data_track_B/area_0671.npy \n"," inflating: data_track_B/area_0672.npy \n"," inflating: data_track_B/area_0673.npy \n"," inflating: data_track_B/area_0674.npy \n"," inflating: data_track_B/area_0676.npy \n"," inflating: data_track_B/area_0677.npy \n"," inflating: data_track_B/area_0678.npy \n"," inflating: data_track_B/area_0679.npy \n"," inflating: data_track_B/area_0680.npy \n"," inflating: data_track_B/area_0682.npy \n"," inflating: data_track_B/area_0686.npy \n"," inflating: data_track_B/area_0688.npy \n"," inflating: data_track_B/area_0689.npy \n"," inflating: data_track_B/area_0690.npy \n"," inflating: data_track_B/area_0691.npy \n"," inflating: data_track_B/area_0692.npy \n"," inflating: data_track_B/area_0693.npy \n"," inflating: data_track_B/area_0694.npy \n"," inflating: data_track_B/area_0695.npy \n"," inflating: data_track_B/area_0697.npy \n"," inflating: data_track_B/area_0699.npy \n"," inflating: data_track_B/area_0700.npy \n"," inflating: data_track_B/area_0701.npy \n"," inflating: data_track_B/area_0703.npy \n"," inflating: data_track_B/area_0704.npy \n"," inflating: data_track_B/area_0706.npy \n"," inflating: data_track_B/area_0707.npy \n"," inflating: data_track_B/area_0708.npy \n"," inflating: data_track_B/area_0709.npy \n"," inflating: data_track_B/area_0711.npy \n"," inflating: data_track_B/area_0712.npy \n"," inflating: data_track_B/area_0713.npy \n"," inflating: data_track_B/area_0714.npy \n"," inflating: data_track_B/area_0715.npy \n"," inflating: data_track_B/area_0716.npy \n"," inflating: data_track_B/area_0718.npy \n"," inflating: data_track_B/area_0719.npy \n"," inflating: data_track_B/area_0720.npy \n"," inflating: data_track_B/area_0721.npy \n"," inflating: data_track_B/area_0722.npy \n"," inflating: data_track_B/area_0724.npy \n"," inflating: data_track_B/area_0727.npy \n"," inflating: data_track_B/area_0728.npy \n"," inflating: data_track_B/area_0729.npy \n"," inflating: data_track_B/area_0730.npy \n"," inflating: data_track_B/area_0731.npy \n"," inflating: data_track_B/area_0733.npy \n"," inflating: data_track_B/area_0735.npy \n"," inflating: data_track_B/area_0736.npy \n"," inflating: data_track_B/area_0737.npy \n"," inflating: data_track_B/area_0740.npy \n"," inflating: data_track_B/area_0742.npy \n"," inflating: data_track_B/area_0743.npy \n"," inflating: data_track_B/area_0744.npy \n"," inflating: data_track_B/area_0745.npy \n"," inflating: data_track_B/centroid_0002.npy \n"," inflating: data_track_B/centroid_0003.npy \n"," inflating: data_track_B/centroid_0004.npy \n"," inflating: data_track_B/centroid_0005.npy \n"," inflating: data_track_B/centroid_0006.npy \n"," inflating: data_track_B/centroid_0011.npy \n"," inflating: data_track_B/centroid_0012.npy \n"," inflating: data_track_B/centroid_0013.npy \n"," inflating: data_track_B/centroid_0015.npy \n"," inflating: data_track_B/centroid_0017.npy \n"," inflating: data_track_B/centroid_0018.npy \n"," inflating: data_track_B/centroid_0020.npy \n"," inflating: data_track_B/centroid_0021.npy \n"," inflating: data_track_B/centroid_0022.npy \n"," inflating: data_track_B/centroid_0023.npy \n"," inflating: data_track_B/centroid_0024.npy \n"," inflating: data_track_B/centroid_0026.npy \n"," inflating: data_track_B/centroid_0029.npy \n"," inflating: data_track_B/centroid_0030.npy \n"," inflating: data_track_B/centroid_0036.npy \n"," inflating: data_track_B/centroid_0037.npy \n"," inflating: data_track_B/centroid_0038.npy \n"," inflating: data_track_B/centroid_0039.npy \n"," inflating: data_track_B/centroid_0040.npy \n"," inflating: data_track_B/centroid_0041.npy \n"," inflating: data_track_B/centroid_0042.npy \n"," inflating: data_track_B/centroid_0043.npy \n"," inflating: data_track_B/centroid_0044.npy \n"," inflating: data_track_B/centroid_0048.npy \n"," inflating: data_track_B/centroid_0049.npy \n"," inflating: data_track_B/centroid_0051.npy \n"," inflating: data_track_B/centroid_0052.npy \n"," inflating: data_track_B/centroid_0055.npy \n"," inflating: data_track_B/centroid_0056.npy \n"," inflating: data_track_B/centroid_0057.npy \n"," inflating: data_track_B/centroid_0059.npy \n"," inflating: data_track_B/centroid_0062.npy \n"," inflating: data_track_B/centroid_0064.npy \n"," inflating: data_track_B/centroid_0066.npy \n"," inflating: data_track_B/centroid_0067.npy \n"," inflating: data_track_B/centroid_0068.npy \n"," inflating: data_track_B/centroid_0071.npy \n"," inflating: data_track_B/centroid_0074.npy \n"," inflating: data_track_B/centroid_0075.npy \n"," inflating: data_track_B/centroid_0077.npy \n"," inflating: data_track_B/centroid_0078.npy \n"," inflating: data_track_B/centroid_0080.npy \n"," inflating: data_track_B/centroid_0081.npy \n"," inflating: data_track_B/centroid_0082.npy \n"," inflating: data_track_B/centroid_0084.npy \n"," inflating: data_track_B/centroid_0085.npy \n"," inflating: data_track_B/centroid_0086.npy \n"," inflating: data_track_B/centroid_0087.npy \n"," inflating: data_track_B/centroid_0088.npy \n"," inflating: data_track_B/centroid_0089.npy \n"," inflating: data_track_B/centroid_0090.npy \n"," inflating: data_track_B/centroid_0092.npy \n"," inflating: data_track_B/centroid_0093.npy \n"," inflating: data_track_B/centroid_0094.npy \n"," inflating: data_track_B/centroid_0095.npy \n"," inflating: data_track_B/centroid_0097.npy \n"," inflating: data_track_B/centroid_0098.npy \n"," inflating: data_track_B/centroid_0100.npy \n"," inflating: data_track_B/centroid_0101.npy \n"," inflating: data_track_B/centroid_0102.npy \n"," inflating: data_track_B/centroid_0103.npy \n"," inflating: data_track_B/centroid_0104.npy \n"," inflating: data_track_B/centroid_0106.npy \n"," inflating: data_track_B/centroid_0107.npy \n"," inflating: data_track_B/centroid_0108.npy \n"," inflating: data_track_B/centroid_0109.npy \n"," inflating: data_track_B/centroid_0110.npy \n"," inflating: data_track_B/centroid_0113.npy \n"," inflating: data_track_B/centroid_0114.npy \n"," inflating: data_track_B/centroid_0115.npy \n"," inflating: data_track_B/centroid_0116.npy \n"," inflating: data_track_B/centroid_0117.npy \n"," inflating: data_track_B/centroid_0118.npy \n"," inflating: data_track_B/centroid_0119.npy \n"," inflating: data_track_B/centroid_0120.npy \n"," inflating: data_track_B/centroid_0121.npy \n"," inflating: data_track_B/centroid_0122.npy \n"," inflating: data_track_B/centroid_0124.npy \n"," inflating: data_track_B/centroid_0125.npy \n"," inflating: data_track_B/centroid_0126.npy \n"," inflating: data_track_B/centroid_0128.npy \n"," inflating: data_track_B/centroid_0129.npy \n"," inflating: data_track_B/centroid_0130.npy \n"," inflating: data_track_B/centroid_0131.npy \n"," inflating: data_track_B/centroid_0132.npy \n"," inflating: data_track_B/centroid_0133.npy \n"," inflating: data_track_B/centroid_0134.npy \n"," inflating: data_track_B/centroid_0135.npy \n"," inflating: data_track_B/centroid_0136.npy \n"," inflating: data_track_B/centroid_0138.npy \n"," inflating: data_track_B/centroid_0139.npy \n"," inflating: data_track_B/centroid_0140.npy \n"," inflating: data_track_B/centroid_0141.npy \n"," inflating: data_track_B/centroid_0143.npy \n"," inflating: data_track_B/centroid_0145.npy \n"," inflating: data_track_B/centroid_0146.npy \n"," inflating: data_track_B/centroid_0148.npy \n"," inflating: data_track_B/centroid_0149.npy \n"," inflating: data_track_B/centroid_0150.npy \n"," inflating: data_track_B/centroid_0151.npy \n"," inflating: data_track_B/centroid_0153.npy \n"," inflating: data_track_B/centroid_0154.npy \n"," inflating: data_track_B/centroid_0156.npy \n"," inflating: data_track_B/centroid_0157.npy \n"," inflating: data_track_B/centroid_0158.npy \n"," inflating: data_track_B/centroid_0161.npy \n"," inflating: data_track_B/centroid_0162.npy \n"," inflating: data_track_B/centroid_0163.npy \n"," inflating: data_track_B/centroid_0164.npy \n"," inflating: data_track_B/centroid_0166.npy \n"," inflating: data_track_B/centroid_0167.npy \n"," inflating: data_track_B/centroid_0168.npy \n"," inflating: data_track_B/centroid_0170.npy \n"," inflating: data_track_B/centroid_0171.npy \n"," inflating: data_track_B/centroid_0172.npy \n"," inflating: data_track_B/centroid_0174.npy \n"," inflating: data_track_B/centroid_0175.npy \n"," inflating: data_track_B/centroid_0183.npy \n"," inflating: data_track_B/centroid_0184.npy \n"," inflating: data_track_B/centroid_0185.npy \n"," inflating: data_track_B/centroid_0189.npy \n"," inflating: data_track_B/centroid_0190.npy \n"," inflating: data_track_B/centroid_0193.npy \n"," inflating: data_track_B/centroid_0194.npy \n"," inflating: data_track_B/centroid_0195.npy \n"," inflating: data_track_B/centroid_0197.npy \n"," inflating: data_track_B/centroid_0201.npy \n"," inflating: data_track_B/centroid_0203.npy \n"," inflating: data_track_B/centroid_0204.npy \n"," inflating: data_track_B/centroid_0205.npy \n"," inflating: data_track_B/centroid_0206.npy \n"," inflating: data_track_B/centroid_0208.npy \n"," inflating: data_track_B/centroid_0210.npy \n"," inflating: data_track_B/centroid_0211.npy \n"," inflating: data_track_B/centroid_0216.npy \n"," inflating: data_track_B/centroid_0217.npy \n"," inflating: data_track_B/centroid_0219.npy \n"," inflating: data_track_B/centroid_0220.npy \n"," inflating: data_track_B/centroid_0227.npy \n"," inflating: data_track_B/centroid_0228.npy \n"," inflating: data_track_B/centroid_0229.npy \n"," inflating: data_track_B/centroid_0232.npy \n"," inflating: data_track_B/centroid_0234.npy \n"," inflating: data_track_B/centroid_0235.npy \n"," inflating: data_track_B/centroid_0236.npy \n"," inflating: data_track_B/centroid_0238.npy \n"," inflating: data_track_B/centroid_0239.npy \n"," inflating: data_track_B/centroid_0240.npy \n"," inflating: data_track_B/centroid_0241.npy \n"," inflating: data_track_B/centroid_0245.npy \n"," inflating: data_track_B/centroid_0246.npy \n"," inflating: data_track_B/centroid_0247.npy \n"," inflating: data_track_B/centroid_0248.npy \n"," inflating: data_track_B/centroid_0249.npy \n"," inflating: data_track_B/centroid_0252.npy \n"," inflating: data_track_B/centroid_0253.npy \n"," inflating: data_track_B/centroid_0254.npy \n"," inflating: data_track_B/centroid_0256.npy \n"," inflating: data_track_B/centroid_0257.npy \n"," inflating: data_track_B/centroid_0259.npy \n"," inflating: data_track_B/centroid_0264.npy \n"," inflating: data_track_B/centroid_0265.npy \n"," inflating: data_track_B/centroid_0266.npy \n"," inflating: data_track_B/centroid_0268.npy \n"," inflating: data_track_B/centroid_0269.npy \n"," inflating: data_track_B/centroid_0271.npy \n"," inflating: data_track_B/centroid_0272.npy \n"," inflating: data_track_B/centroid_0273.npy \n"," inflating: data_track_B/centroid_0275.npy \n"," inflating: data_track_B/centroid_0276.npy \n"," inflating: data_track_B/centroid_0277.npy \n"," inflating: data_track_B/centroid_0279.npy \n"," inflating: data_track_B/centroid_0280.npy \n"," inflating: data_track_B/centroid_0281.npy \n"," inflating: data_track_B/centroid_0284.npy \n"," inflating: data_track_B/centroid_0285.npy \n"," inflating: data_track_B/centroid_0286.npy \n"," inflating: data_track_B/centroid_0288.npy \n"," inflating: data_track_B/centroid_0289.npy \n"," inflating: data_track_B/centroid_0290.npy \n"," inflating: data_track_B/centroid_0291.npy \n"," inflating: data_track_B/centroid_0294.npy \n"," inflating: data_track_B/centroid_0296.npy \n"," inflating: data_track_B/centroid_0297.npy \n"," inflating: data_track_B/centroid_0298.npy \n"," inflating: data_track_B/centroid_0301.npy \n"," inflating: data_track_B/centroid_0304.npy \n"," inflating: data_track_B/centroid_0305.npy \n"," inflating: data_track_B/centroid_0306.npy \n"," inflating: data_track_B/centroid_0307.npy \n"," inflating: data_track_B/centroid_0308.npy \n"," inflating: data_track_B/centroid_0310.npy \n"," inflating: data_track_B/centroid_0311.npy \n"," inflating: data_track_B/centroid_0314.npy \n"," inflating: data_track_B/centroid_0315.npy \n"," inflating: data_track_B/centroid_0316.npy \n"," inflating: data_track_B/centroid_0320.npy \n"," inflating: data_track_B/centroid_0321.npy \n"," inflating: data_track_B/centroid_0323.npy \n"," inflating: data_track_B/centroid_0324.npy \n"," inflating: data_track_B/centroid_0327.npy \n"," inflating: data_track_B/centroid_0330.npy \n"," inflating: data_track_B/centroid_0331.npy \n"," inflating: data_track_B/centroid_0332.npy \n"," inflating: data_track_B/centroid_0333.npy \n"," inflating: data_track_B/centroid_0334.npy \n"," inflating: data_track_B/centroid_0337.npy \n"," inflating: data_track_B/centroid_0338.npy \n"," inflating: data_track_B/centroid_0339.npy \n"," inflating: data_track_B/centroid_0340.npy \n"," inflating: data_track_B/centroid_0341.npy \n"," inflating: data_track_B/centroid_0342.npy \n"," inflating: data_track_B/centroid_0343.npy \n"," inflating: data_track_B/centroid_0344.npy \n"," inflating: data_track_B/centroid_0345.npy \n"," inflating: data_track_B/centroid_0346.npy \n"," inflating: data_track_B/centroid_0348.npy \n"," inflating: data_track_B/centroid_0349.npy \n"," inflating: data_track_B/centroid_0351.npy \n"," inflating: data_track_B/centroid_0352.npy \n"," inflating: data_track_B/centroid_0353.npy \n"," inflating: data_track_B/centroid_0354.npy \n"," inflating: data_track_B/centroid_0356.npy \n"," inflating: data_track_B/centroid_0357.npy \n"," inflating: data_track_B/centroid_0359.npy \n"," inflating: data_track_B/centroid_0360.npy \n"," inflating: data_track_B/centroid_0361.npy \n"," inflating: data_track_B/centroid_0363.npy \n"," inflating: data_track_B/centroid_0364.npy \n"," inflating: data_track_B/centroid_0365.npy \n"," inflating: data_track_B/centroid_0366.npy \n"," inflating: data_track_B/centroid_0367.npy \n"," inflating: data_track_B/centroid_0368.npy \n"," inflating: data_track_B/centroid_0369.npy \n"," inflating: data_track_B/centroid_0371.npy \n"," inflating: data_track_B/centroid_0373.npy \n"," inflating: data_track_B/centroid_0376.npy \n"," inflating: data_track_B/centroid_0377.npy \n"," inflating: data_track_B/centroid_0378.npy \n"," inflating: data_track_B/centroid_0379.npy \n"," inflating: data_track_B/centroid_0381.npy \n"," inflating: data_track_B/centroid_0382.npy \n"," inflating: data_track_B/centroid_0383.npy \n"," inflating: data_track_B/centroid_0384.npy \n"," inflating: data_track_B/centroid_0385.npy \n"," inflating: data_track_B/centroid_0387.npy \n"," inflating: data_track_B/centroid_0388.npy \n"," inflating: data_track_B/centroid_0389.npy \n"," inflating: data_track_B/centroid_0392.npy \n"," inflating: data_track_B/centroid_0393.npy \n"," inflating: data_track_B/centroid_0394.npy \n"," inflating: data_track_B/centroid_0395.npy \n"," inflating: data_track_B/centroid_0396.npy \n"," inflating: data_track_B/centroid_0398.npy \n"," inflating: data_track_B/centroid_0399.npy \n"," inflating: data_track_B/centroid_0400.npy \n"," inflating: data_track_B/centroid_0401.npy \n"," inflating: data_track_B/centroid_0402.npy \n"," inflating: data_track_B/centroid_0403.npy \n"," inflating: data_track_B/centroid_0404.npy \n"," inflating: data_track_B/centroid_0405.npy \n"," inflating: data_track_B/centroid_0407.npy \n"," inflating: data_track_B/centroid_0408.npy \n"," inflating: data_track_B/centroid_0409.npy \n"," inflating: data_track_B/centroid_0410.npy \n"," inflating: data_track_B/centroid_0411.npy \n"," inflating: data_track_B/centroid_0413.npy \n"," inflating: data_track_B/centroid_0416.npy \n"," inflating: data_track_B/centroid_0417.npy \n"," inflating: data_track_B/centroid_0421.npy \n"," inflating: data_track_B/centroid_0422.npy \n"," inflating: data_track_B/centroid_0423.npy \n"," inflating: data_track_B/centroid_0424.npy \n"," inflating: data_track_B/centroid_0425.npy \n"," inflating: data_track_B/centroid_0428.npy \n"," inflating: data_track_B/centroid_0429.npy \n"," inflating: data_track_B/centroid_0430.npy \n"," inflating: data_track_B/centroid_0431.npy \n"," inflating: data_track_B/centroid_0432.npy \n"," inflating: data_track_B/centroid_0435.npy \n"," inflating: data_track_B/centroid_0438.npy \n"," inflating: data_track_B/centroid_0439.npy \n"," inflating: data_track_B/centroid_0441.npy \n"," inflating: data_track_B/centroid_0444.npy \n"," inflating: data_track_B/centroid_0445.npy \n"," inflating: data_track_B/centroid_0449.npy \n"," inflating: data_track_B/centroid_0450.npy \n"," inflating: data_track_B/centroid_0451.npy \n"," inflating: data_track_B/centroid_0452.npy \n"," inflating: data_track_B/centroid_0453.npy \n"," inflating: data_track_B/centroid_0456.npy \n"," inflating: data_track_B/centroid_0457.npy \n"," inflating: data_track_B/centroid_0458.npy \n"," inflating: data_track_B/centroid_0459.npy \n"," inflating: data_track_B/centroid_0460.npy \n"," inflating: data_track_B/centroid_0461.npy \n"," inflating: data_track_B/centroid_0463.npy \n"," inflating: data_track_B/centroid_0464.npy \n"," inflating: data_track_B/centroid_0465.npy \n"," inflating: data_track_B/centroid_0467.npy \n"," inflating: data_track_B/centroid_0469.npy \n"," inflating: data_track_B/centroid_0471.npy \n"," inflating: data_track_B/centroid_0472.npy \n"," inflating: data_track_B/centroid_0474.npy \n"," inflating: data_track_B/centroid_0475.npy \n"," inflating: data_track_B/centroid_0477.npy \n"," inflating: data_track_B/centroid_0478.npy \n"," inflating: data_track_B/centroid_0479.npy \n"," inflating: data_track_B/centroid_0480.npy \n"," inflating: data_track_B/centroid_0481.npy \n"," inflating: data_track_B/centroid_0482.npy \n"," inflating: data_track_B/centroid_0485.npy \n"," inflating: data_track_B/centroid_0486.npy \n"," inflating: data_track_B/centroid_0487.npy \n"," inflating: data_track_B/centroid_0488.npy \n"," inflating: data_track_B/centroid_0489.npy \n"," inflating: data_track_B/centroid_0492.npy \n"," inflating: data_track_B/centroid_0493.npy \n"," inflating: data_track_B/centroid_0494.npy \n"," inflating: data_track_B/centroid_0497.npy \n"," inflating: data_track_B/centroid_0498.npy \n"," inflating: data_track_B/centroid_0499.npy \n"," inflating: data_track_B/centroid_0501.npy \n"," inflating: data_track_B/centroid_0502.npy \n"," inflating: data_track_B/centroid_0503.npy \n"," inflating: data_track_B/centroid_0504.npy \n"," inflating: data_track_B/centroid_0507.npy \n"," inflating: data_track_B/centroid_0508.npy \n"," inflating: data_track_B/centroid_0509.npy \n"," inflating: data_track_B/centroid_0513.npy \n"," inflating: data_track_B/centroid_0514.npy \n"," inflating: data_track_B/centroid_0515.npy \n"," inflating: data_track_B/centroid_0517.npy \n"," inflating: data_track_B/centroid_0518.npy \n"," inflating: data_track_B/centroid_0519.npy \n"," inflating: data_track_B/centroid_0520.npy \n"," inflating: data_track_B/centroid_0521.npy \n"," inflating: data_track_B/centroid_0522.npy \n"," inflating: data_track_B/centroid_0523.npy \n"," inflating: data_track_B/centroid_0524.npy \n"," inflating: data_track_B/centroid_0525.npy \n"," inflating: data_track_B/centroid_0526.npy \n"," inflating: data_track_B/centroid_0527.npy \n"," inflating: data_track_B/centroid_0528.npy \n"," inflating: data_track_B/centroid_0529.npy \n"," inflating: data_track_B/centroid_0530.npy \n"," inflating: data_track_B/centroid_0531.npy \n"," inflating: data_track_B/centroid_0534.npy \n"," inflating: data_track_B/centroid_0535.npy \n"," inflating: data_track_B/centroid_0536.npy \n"," inflating: data_track_B/centroid_0538.npy \n"," inflating: data_track_B/centroid_0541.npy \n"," inflating: data_track_B/centroid_0542.npy \n"," inflating: data_track_B/centroid_0544.npy \n"," inflating: data_track_B/centroid_0545.npy \n"," inflating: data_track_B/centroid_0546.npy \n"," inflating: data_track_B/centroid_0547.npy \n"," inflating: data_track_B/centroid_0550.npy \n"," inflating: data_track_B/centroid_0551.npy \n"," inflating: data_track_B/centroid_0553.npy \n"," inflating: data_track_B/centroid_0555.npy \n"," inflating: data_track_B/centroid_0557.npy \n"," inflating: data_track_B/centroid_0558.npy \n"," inflating: data_track_B/centroid_0561.npy \n"," inflating: data_track_B/centroid_0563.npy \n"," inflating: data_track_B/centroid_0564.npy \n"," inflating: data_track_B/centroid_0565.npy \n"," inflating: data_track_B/centroid_0567.npy \n"," inflating: data_track_B/centroid_0568.npy \n"," inflating: data_track_B/centroid_0571.npy \n"," inflating: data_track_B/centroid_0574.npy \n"," inflating: data_track_B/centroid_0576.npy \n"," inflating: data_track_B/centroid_0579.npy \n"," inflating: data_track_B/centroid_0580.npy \n"," inflating: data_track_B/centroid_0582.npy \n"," inflating: data_track_B/centroid_0584.npy \n"," inflating: data_track_B/centroid_0585.npy \n"," inflating: data_track_B/centroid_0588.npy \n"," inflating: data_track_B/centroid_0589.npy \n"," inflating: data_track_B/centroid_0590.npy \n"," inflating: data_track_B/centroid_0591.npy \n"," inflating: data_track_B/centroid_0592.npy \n"," inflating: data_track_B/centroid_0593.npy \n"," inflating: data_track_B/centroid_0594.npy \n"," inflating: data_track_B/centroid_0595.npy \n"," inflating: data_track_B/centroid_0596.npy \n"," inflating: data_track_B/centroid_0597.npy \n"," inflating: data_track_B/centroid_0598.npy \n"," inflating: data_track_B/centroid_0600.npy \n"," inflating: data_track_B/centroid_0602.npy \n"," inflating: data_track_B/centroid_0605.npy \n"," inflating: data_track_B/centroid_0608.npy \n"," inflating: data_track_B/centroid_0609.npy \n"," inflating: data_track_B/centroid_0611.npy \n"," inflating: data_track_B/centroid_0612.npy \n"," inflating: data_track_B/centroid_0613.npy \n"," inflating: data_track_B/centroid_0614.npy \n"," inflating: data_track_B/centroid_0618.npy \n"," inflating: data_track_B/centroid_0619.npy \n"," inflating: data_track_B/centroid_0620.npy \n"," inflating: data_track_B/centroid_0621.npy \n"," inflating: data_track_B/centroid_0622.npy \n"," inflating: data_track_B/centroid_0623.npy \n"," inflating: data_track_B/centroid_0624.npy \n"," inflating: data_track_B/centroid_0625.npy \n"," inflating: data_track_B/centroid_0627.npy \n"," inflating: data_track_B/centroid_0628.npy \n"," inflating: data_track_B/centroid_0629.npy \n"," inflating: data_track_B/centroid_0630.npy \n"," inflating: data_track_B/centroid_0631.npy \n"," inflating: data_track_B/centroid_0632.npy \n"," inflating: data_track_B/centroid_0633.npy \n"," inflating: data_track_B/centroid_0634.npy \n"," inflating: data_track_B/centroid_0635.npy \n"," inflating: data_track_B/centroid_0637.npy \n"," inflating: data_track_B/centroid_0638.npy \n"," inflating: data_track_B/centroid_0639.npy \n"," inflating: data_track_B/centroid_0640.npy \n"," inflating: data_track_B/centroid_0641.npy \n"," inflating: data_track_B/centroid_0643.npy \n"," inflating: data_track_B/centroid_0644.npy \n"," inflating: data_track_B/centroid_0645.npy \n"," inflating: data_track_B/centroid_0646.npy \n"," inflating: data_track_B/centroid_0648.npy \n"," inflating: data_track_B/centroid_0650.npy \n"," inflating: data_track_B/centroid_0651.npy \n"," inflating: data_track_B/centroid_0652.npy \n"," inflating: data_track_B/centroid_0653.npy \n"," inflating: data_track_B/centroid_0654.npy \n"," inflating: data_track_B/centroid_0656.npy \n"," inflating: data_track_B/centroid_0657.npy \n"," inflating: data_track_B/centroid_0658.npy \n"," inflating: data_track_B/centroid_0661.npy \n"," inflating: data_track_B/centroid_0663.npy \n"," inflating: data_track_B/centroid_0664.npy \n"," inflating: data_track_B/centroid_0665.npy \n"," inflating: data_track_B/centroid_0666.npy \n"," inflating: data_track_B/centroid_0667.npy \n"," inflating: data_track_B/centroid_0668.npy \n"," inflating: data_track_B/centroid_0669.npy \n"," inflating: data_track_B/centroid_0671.npy \n"," inflating: data_track_B/centroid_0672.npy \n"," inflating: data_track_B/centroid_0673.npy \n"," inflating: data_track_B/centroid_0674.npy \n"," inflating: data_track_B/centroid_0676.npy \n"," inflating: data_track_B/centroid_0677.npy \n"," inflating: data_track_B/centroid_0678.npy \n"," inflating: data_track_B/centroid_0679.npy \n"," inflating: data_track_B/centroid_0680.npy \n"," inflating: data_track_B/centroid_0682.npy \n"," inflating: data_track_B/centroid_0686.npy \n"," inflating: data_track_B/centroid_0688.npy \n"," inflating: data_track_B/centroid_0689.npy \n"," inflating: data_track_B/centroid_0690.npy \n"," inflating: data_track_B/centroid_0691.npy \n"," inflating: data_track_B/centroid_0692.npy \n"," inflating: data_track_B/centroid_0693.npy \n"," inflating: data_track_B/centroid_0694.npy \n"," inflating: data_track_B/centroid_0695.npy \n"," inflating: data_track_B/centroid_0697.npy \n"," inflating: data_track_B/centroid_0699.npy \n"," inflating: data_track_B/centroid_0700.npy \n"," inflating: data_track_B/centroid_0701.npy \n"," inflating: data_track_B/centroid_0703.npy \n"," inflating: data_track_B/centroid_0704.npy \n"," inflating: data_track_B/centroid_0706.npy \n"," inflating: data_track_B/centroid_0707.npy \n"," inflating: data_track_B/centroid_0708.npy \n"," inflating: data_track_B/centroid_0709.npy \n"," inflating: data_track_B/centroid_0711.npy \n"," inflating: data_track_B/centroid_0712.npy \n"," inflating: data_track_B/centroid_0713.npy \n"," inflating: data_track_B/centroid_0714.npy \n"," inflating: data_track_B/centroid_0715.npy \n"," inflating: data_track_B/centroid_0716.npy \n"," inflating: data_track_B/centroid_0718.npy \n"," inflating: data_track_B/centroid_0719.npy \n"," inflating: data_track_B/centroid_0720.npy \n"," inflating: data_track_B/centroid_0721.npy \n"," inflating: data_track_B/centroid_0722.npy \n"," inflating: data_track_B/centroid_0724.npy \n"," inflating: data_track_B/centroid_0727.npy \n"," inflating: data_track_B/centroid_0728.npy \n"," inflating: data_track_B/centroid_0729.npy \n"," inflating: data_track_B/centroid_0730.npy \n"," inflating: data_track_B/centroid_0731.npy \n"," inflating: data_track_B/centroid_0733.npy \n"," inflating: data_track_B/centroid_0735.npy \n"," inflating: data_track_B/centroid_0736.npy \n"," inflating: data_track_B/centroid_0737.npy \n"," inflating: data_track_B/centroid_0740.npy \n"," inflating: data_track_B/centroid_0742.npy \n"," inflating: data_track_B/centroid_0743.npy \n"," inflating: data_track_B/centroid_0744.npy \n"," inflating: data_track_B/centroid_0745.npy \n"," inflating: data_track_B/press_0002.npy \n"," inflating: data_track_B/press_0003.npy \n"," inflating: data_track_B/press_0004.npy \n"," inflating: data_track_B/press_0005.npy \n"," inflating: data_track_B/press_0006.npy \n"," inflating: data_track_B/press_0011.npy \n"," inflating: data_track_B/press_0012.npy \n"," inflating: data_track_B/press_0013.npy \n"," inflating: data_track_B/press_0015.npy \n"," inflating: data_track_B/press_0017.npy \n"," inflating: data_track_B/press_0018.npy \n"," inflating: data_track_B/press_0020.npy \n"," inflating: data_track_B/press_0021.npy \n"," inflating: data_track_B/press_0022.npy \n"," inflating: data_track_B/press_0023.npy \n"," inflating: data_track_B/press_0024.npy \n"," inflating: data_track_B/press_0026.npy \n"," inflating: data_track_B/press_0029.npy \n"," inflating: data_track_B/press_0030.npy \n"," inflating: data_track_B/press_0036.npy \n"," inflating: data_track_B/press_0037.npy \n"," inflating: data_track_B/press_0038.npy \n"," inflating: data_track_B/press_0039.npy \n"," inflating: data_track_B/press_0040.npy \n"," inflating: data_track_B/press_0041.npy \n"," inflating: data_track_B/press_0042.npy \n"," inflating: data_track_B/press_0043.npy \n"," inflating: data_track_B/press_0044.npy \n"," inflating: data_track_B/press_0048.npy \n"," inflating: data_track_B/press_0049.npy \n"," inflating: data_track_B/press_0051.npy \n"," inflating: data_track_B/press_0052.npy \n"," inflating: data_track_B/press_0055.npy \n"," inflating: data_track_B/press_0056.npy \n"," inflating: data_track_B/press_0057.npy \n"," inflating: data_track_B/press_0059.npy \n"," inflating: data_track_B/press_0062.npy \n"," inflating: data_track_B/press_0064.npy \n"," inflating: data_track_B/press_0066.npy \n"," inflating: data_track_B/press_0067.npy \n"," inflating: data_track_B/press_0068.npy \n"," inflating: data_track_B/press_0071.npy \n"," inflating: data_track_B/press_0074.npy \n"," inflating: data_track_B/press_0075.npy \n"," inflating: data_track_B/press_0077.npy \n"," inflating: data_track_B/press_0078.npy \n"," inflating: data_track_B/press_0080.npy \n"," inflating: data_track_B/press_0081.npy \n"," inflating: data_track_B/press_0082.npy \n"," inflating: data_track_B/press_0084.npy \n"," inflating: data_track_B/press_0085.npy \n"," inflating: data_track_B/press_0086.npy \n"," inflating: data_track_B/press_0087.npy \n"," inflating: data_track_B/press_0088.npy \n"," inflating: data_track_B/press_0089.npy \n"," inflating: data_track_B/press_0090.npy \n"," inflating: data_track_B/press_0092.npy \n"," inflating: data_track_B/press_0093.npy \n"," inflating: data_track_B/press_0094.npy \n"," inflating: data_track_B/press_0095.npy \n"," inflating: data_track_B/press_0097.npy \n"," inflating: data_track_B/press_0098.npy \n"," inflating: data_track_B/press_0100.npy \n"," inflating: data_track_B/press_0101.npy \n"," inflating: data_track_B/press_0102.npy \n"," inflating: data_track_B/press_0103.npy \n"," inflating: data_track_B/press_0104.npy \n"," inflating: data_track_B/press_0106.npy \n"," inflating: data_track_B/press_0107.npy \n"," inflating: data_track_B/press_0108.npy \n"," inflating: data_track_B/press_0109.npy \n"," inflating: data_track_B/press_0110.npy \n"," inflating: data_track_B/press_0113.npy \n"," inflating: data_track_B/press_0114.npy \n"," inflating: data_track_B/press_0115.npy \n"," inflating: data_track_B/press_0116.npy \n"," inflating: data_track_B/press_0117.npy \n"," inflating: data_track_B/press_0118.npy \n"," inflating: data_track_B/press_0119.npy \n"," inflating: data_track_B/press_0120.npy \n"," inflating: data_track_B/press_0121.npy \n"," inflating: data_track_B/press_0122.npy \n"," inflating: data_track_B/press_0124.npy \n"," inflating: data_track_B/press_0125.npy \n"," inflating: data_track_B/press_0126.npy \n"," inflating: data_track_B/press_0128.npy \n"," inflating: data_track_B/press_0129.npy \n"," inflating: data_track_B/press_0130.npy \n"," inflating: data_track_B/press_0131.npy \n"," inflating: data_track_B/press_0132.npy \n"," inflating: data_track_B/press_0133.npy \n"," inflating: data_track_B/press_0134.npy \n"," inflating: data_track_B/press_0135.npy \n"," inflating: data_track_B/press_0136.npy \n"," inflating: data_track_B/press_0138.npy \n"," inflating: data_track_B/press_0139.npy \n"," inflating: data_track_B/press_0140.npy \n"," inflating: data_track_B/press_0141.npy \n"," inflating: data_track_B/press_0143.npy \n"," inflating: data_track_B/press_0145.npy \n"," inflating: data_track_B/press_0146.npy \n"," inflating: data_track_B/press_0148.npy \n"," inflating: data_track_B/press_0149.npy \n"," inflating: data_track_B/press_0150.npy \n"," inflating: data_track_B/press_0151.npy \n"," inflating: data_track_B/press_0153.npy \n"," inflating: data_track_B/press_0154.npy \n"," inflating: data_track_B/press_0156.npy \n"," inflating: data_track_B/press_0157.npy \n"," inflating: data_track_B/press_0158.npy \n"," inflating: data_track_B/press_0161.npy \n"," inflating: data_track_B/press_0162.npy \n"," inflating: data_track_B/press_0163.npy \n"," inflating: data_track_B/press_0164.npy \n"," inflating: data_track_B/press_0166.npy \n"," inflating: data_track_B/press_0167.npy \n"," inflating: data_track_B/press_0168.npy \n"," inflating: data_track_B/press_0170.npy \n"," inflating: data_track_B/press_0171.npy \n"," inflating: data_track_B/press_0172.npy \n"," inflating: data_track_B/press_0174.npy \n"," inflating: data_track_B/press_0175.npy \n"," inflating: data_track_B/press_0183.npy \n"," inflating: data_track_B/press_0184.npy \n"," inflating: data_track_B/press_0185.npy \n"," inflating: data_track_B/press_0189.npy \n"," inflating: data_track_B/press_0190.npy \n"," inflating: data_track_B/press_0193.npy \n"," inflating: data_track_B/press_0194.npy \n"," inflating: data_track_B/press_0195.npy \n"," inflating: data_track_B/press_0197.npy \n"," inflating: data_track_B/press_0201.npy \n"," inflating: data_track_B/press_0203.npy \n"," inflating: data_track_B/press_0204.npy \n"," inflating: data_track_B/press_0205.npy \n"," inflating: data_track_B/press_0206.npy \n"," inflating: data_track_B/press_0208.npy \n"," inflating: data_track_B/press_0210.npy \n"," inflating: data_track_B/press_0211.npy \n"," inflating: data_track_B/press_0216.npy \n"," inflating: data_track_B/press_0217.npy \n"," inflating: data_track_B/press_0219.npy \n"," inflating: data_track_B/press_0220.npy \n"," inflating: data_track_B/press_0227.npy \n"," inflating: data_track_B/press_0228.npy \n"," inflating: data_track_B/press_0229.npy \n"," inflating: data_track_B/press_0232.npy \n"," inflating: data_track_B/press_0234.npy \n"," inflating: data_track_B/press_0235.npy \n"," inflating: data_track_B/press_0236.npy \n"," inflating: data_track_B/press_0238.npy \n"," inflating: data_track_B/press_0239.npy \n"," inflating: data_track_B/press_0240.npy \n"," inflating: data_track_B/press_0241.npy \n"," inflating: data_track_B/press_0245.npy \n"," inflating: data_track_B/press_0246.npy \n"," inflating: data_track_B/press_0247.npy \n"," inflating: data_track_B/press_0248.npy \n"," inflating: data_track_B/press_0249.npy \n"," inflating: data_track_B/press_0252.npy \n"," inflating: data_track_B/press_0253.npy \n"," inflating: data_track_B/press_0254.npy \n"," inflating: data_track_B/press_0256.npy \n"," inflating: data_track_B/press_0257.npy \n"," inflating: data_track_B/press_0259.npy \n"," inflating: data_track_B/press_0264.npy \n"," inflating: data_track_B/press_0265.npy \n"," inflating: data_track_B/press_0266.npy \n"," inflating: data_track_B/press_0268.npy \n"," inflating: data_track_B/press_0269.npy \n"," inflating: data_track_B/press_0271.npy \n"," inflating: data_track_B/press_0272.npy \n"," inflating: data_track_B/press_0273.npy \n"," inflating: data_track_B/press_0275.npy \n"," inflating: data_track_B/press_0276.npy \n"," inflating: data_track_B/press_0277.npy \n"," inflating: data_track_B/press_0279.npy \n"," inflating: data_track_B/press_0280.npy \n"," inflating: data_track_B/press_0281.npy \n"," inflating: data_track_B/press_0284.npy \n"," inflating: data_track_B/press_0285.npy \n"," inflating: data_track_B/press_0286.npy \n"," inflating: data_track_B/press_0288.npy \n"," inflating: data_track_B/press_0289.npy \n"," inflating: data_track_B/press_0290.npy \n"," inflating: data_track_B/press_0291.npy \n"," inflating: data_track_B/press_0294.npy \n"," inflating: data_track_B/press_0296.npy \n"," inflating: data_track_B/press_0297.npy \n"," inflating: data_track_B/press_0298.npy \n"," inflating: data_track_B/press_0301.npy \n"," inflating: data_track_B/press_0304.npy \n"," inflating: data_track_B/press_0305.npy \n"," inflating: data_track_B/press_0306.npy \n"," inflating: data_track_B/press_0307.npy \n"," inflating: data_track_B/press_0308.npy \n"," inflating: data_track_B/press_0310.npy \n"," inflating: data_track_B/press_0311.npy \n"," inflating: data_track_B/press_0314.npy \n"," inflating: data_track_B/press_0315.npy \n"," inflating: data_track_B/press_0316.npy \n"," inflating: data_track_B/press_0320.npy \n"," inflating: data_track_B/press_0321.npy \n"," inflating: data_track_B/press_0323.npy \n"," inflating: data_track_B/press_0324.npy \n"," inflating: data_track_B/press_0327.npy \n"," inflating: data_track_B/press_0330.npy \n"," inflating: data_track_B/press_0331.npy \n"," inflating: data_track_B/press_0332.npy \n"," inflating: data_track_B/press_0333.npy \n"," inflating: data_track_B/press_0334.npy \n"," inflating: data_track_B/press_0337.npy \n"," inflating: data_track_B/press_0338.npy \n"," inflating: data_track_B/press_0339.npy \n"," inflating: data_track_B/press_0340.npy \n"," inflating: data_track_B/press_0341.npy \n"," inflating: data_track_B/press_0342.npy \n"," inflating: data_track_B/press_0343.npy \n"," inflating: data_track_B/press_0344.npy \n"," inflating: data_track_B/press_0345.npy \n"," inflating: data_track_B/press_0346.npy \n"," inflating: data_track_B/press_0348.npy \n"," inflating: data_track_B/press_0349.npy \n"," inflating: data_track_B/press_0351.npy \n"," inflating: data_track_B/press_0352.npy \n"," inflating: data_track_B/press_0353.npy \n"," inflating: data_track_B/press_0354.npy \n"," inflating: data_track_B/press_0356.npy \n"," inflating: data_track_B/press_0357.npy \n"," inflating: data_track_B/press_0359.npy \n"," inflating: data_track_B/press_0360.npy \n"," inflating: data_track_B/press_0361.npy \n"," inflating: data_track_B/press_0363.npy \n"," inflating: data_track_B/press_0364.npy \n"," inflating: data_track_B/press_0365.npy \n"," inflating: data_track_B/press_0366.npy \n"," inflating: data_track_B/press_0367.npy \n"," inflating: data_track_B/press_0368.npy \n"," inflating: data_track_B/press_0369.npy \n"," inflating: data_track_B/press_0371.npy \n"," inflating: data_track_B/press_0373.npy \n"," inflating: data_track_B/press_0376.npy \n"," inflating: data_track_B/press_0377.npy \n"," inflating: data_track_B/press_0378.npy \n"," inflating: data_track_B/press_0379.npy \n"," inflating: data_track_B/press_0381.npy \n"," inflating: data_track_B/press_0382.npy \n"," inflating: data_track_B/press_0383.npy \n"," inflating: data_track_B/press_0384.npy \n"," inflating: data_track_B/press_0385.npy \n"," inflating: data_track_B/press_0387.npy \n"," inflating: data_track_B/press_0388.npy \n"," inflating: data_track_B/press_0389.npy \n"," inflating: data_track_B/press_0392.npy \n"," inflating: data_track_B/press_0393.npy \n"," inflating: data_track_B/press_0394.npy \n"," inflating: data_track_B/press_0395.npy \n"," inflating: data_track_B/press_0396.npy \n"," inflating: data_track_B/press_0398.npy \n"," inflating: data_track_B/press_0399.npy \n"," inflating: data_track_B/press_0400.npy \n"," inflating: data_track_B/press_0401.npy \n"," inflating: data_track_B/press_0402.npy \n"," inflating: data_track_B/press_0403.npy \n"," inflating: data_track_B/press_0404.npy \n"," inflating: data_track_B/press_0405.npy \n"," inflating: data_track_B/press_0407.npy \n"," inflating: data_track_B/press_0408.npy \n"," inflating: data_track_B/press_0409.npy \n"," inflating: data_track_B/press_0410.npy \n"," inflating: data_track_B/press_0411.npy \n"," inflating: data_track_B/press_0413.npy \n"," inflating: data_track_B/press_0416.npy \n"," inflating: data_track_B/press_0417.npy \n"," inflating: data_track_B/press_0421.npy \n"," inflating: data_track_B/press_0422.npy \n"," inflating: data_track_B/press_0423.npy \n"," inflating: data_track_B/press_0424.npy \n"," inflating: data_track_B/press_0425.npy \n"," inflating: data_track_B/press_0428.npy \n"," inflating: data_track_B/press_0429.npy \n"," inflating: data_track_B/press_0430.npy \n"," inflating: data_track_B/press_0431.npy \n"," inflating: data_track_B/press_0432.npy \n"," inflating: data_track_B/press_0435.npy \n"," inflating: data_track_B/press_0438.npy \n"," inflating: data_track_B/press_0439.npy \n"," inflating: data_track_B/press_0441.npy \n"," inflating: data_track_B/press_0444.npy \n"," inflating: data_track_B/press_0445.npy \n"," inflating: data_track_B/press_0449.npy \n"," inflating: data_track_B/press_0450.npy \n"," inflating: data_track_B/press_0451.npy \n"," inflating: data_track_B/press_0452.npy \n"," inflating: data_track_B/press_0453.npy \n"," inflating: data_track_B/press_0456.npy \n"," inflating: data_track_B/press_0457.npy \n"," inflating: data_track_B/press_0458.npy \n"," inflating: data_track_B/press_0459.npy \n"," inflating: data_track_B/press_0460.npy \n"," inflating: data_track_B/press_0461.npy \n"," inflating: data_track_B/press_0463.npy \n"," inflating: data_track_B/press_0464.npy \n"," inflating: data_track_B/press_0465.npy \n"," inflating: data_track_B/press_0467.npy \n"," inflating: data_track_B/press_0469.npy \n"," inflating: data_track_B/press_0471.npy \n"," inflating: data_track_B/press_0472.npy \n"," inflating: data_track_B/press_0474.npy \n"," inflating: data_track_B/press_0475.npy \n"," inflating: data_track_B/press_0477.npy \n"," inflating: data_track_B/press_0478.npy \n"," inflating: data_track_B/press_0479.npy \n"," inflating: data_track_B/press_0480.npy \n"," inflating: data_track_B/press_0481.npy \n"," inflating: data_track_B/press_0482.npy \n"," inflating: data_track_B/press_0485.npy \n"," inflating: data_track_B/press_0486.npy \n"," inflating: data_track_B/press_0487.npy \n"," inflating: data_track_B/press_0488.npy \n"," inflating: data_track_B/press_0489.npy \n"," inflating: data_track_B/press_0492.npy \n"," inflating: data_track_B/press_0493.npy \n"," inflating: data_track_B/press_0494.npy \n"," inflating: data_track_B/press_0497.npy \n"," inflating: data_track_B/press_0498.npy \n"," inflating: data_track_B/press_0499.npy \n"," inflating: data_track_B/press_0501.npy \n"," inflating: data_track_B/press_0502.npy \n"," inflating: data_track_B/press_0503.npy \n"," inflating: data_track_B/press_0504.npy \n"," inflating: data_track_B/press_0507.npy \n"," inflating: data_track_B/press_0508.npy \n"," inflating: data_track_B/press_0509.npy \n"," inflating: data_track_B/press_0513.npy \n"," inflating: data_track_B/press_0514.npy \n"," inflating: data_track_B/press_0515.npy \n"," inflating: data_track_B/press_0517.npy \n"," inflating: data_track_B/press_0518.npy \n"," inflating: data_track_B/press_0519.npy \n"," inflating: data_track_B/press_0520.npy \n"," inflating: data_track_B/press_0521.npy \n"," inflating: data_track_B/press_0522.npy \n"," inflating: data_track_B/press_0523.npy \n"," inflating: data_track_B/press_0524.npy \n"," inflating: data_track_B/press_0525.npy \n"," inflating: data_track_B/press_0526.npy \n"," inflating: data_track_B/press_0527.npy \n"," inflating: data_track_B/press_0528.npy \n"," inflating: data_track_B/press_0529.npy \n"," inflating: data_track_B/press_0530.npy \n"," inflating: data_track_B/press_0531.npy \n"," inflating: data_track_B/press_0534.npy \n"," inflating: data_track_B/press_0535.npy \n"," inflating: data_track_B/press_0536.npy \n"," inflating: data_track_B/press_0538.npy \n"," inflating: data_track_B/press_0541.npy \n"," inflating: data_track_B/press_0542.npy \n"," inflating: data_track_B/press_0544.npy \n"," inflating: data_track_B/press_0545.npy \n"," inflating: data_track_B/press_0546.npy \n"," inflating: data_track_B/press_0547.npy \n"," inflating: data_track_B/press_0550.npy \n"," inflating: data_track_B/press_0551.npy \n"," inflating: data_track_B/press_0553.npy \n"," inflating: data_track_B/press_0555.npy \n"," inflating: data_track_B/press_0557.npy \n"," inflating: data_track_B/press_0558.npy \n"," inflating: data_track_B/press_0561.npy \n"," inflating: data_track_B/press_0563.npy \n"," inflating: data_track_B/press_0564.npy \n"," inflating: data_track_B/press_0565.npy \n"," inflating: data_track_B/press_0567.npy \n"," inflating: data_track_B/press_0568.npy \n"," inflating: data_track_B/press_0571.npy \n"," inflating: data_track_B/press_0574.npy \n"," inflating: data_track_B/press_0576.npy \n"," inflating: data_track_B/press_0579.npy \n"," inflating: data_track_B/press_0580.npy \n"," inflating: data_track_B/press_0582.npy \n"," inflating: data_track_B/press_0584.npy \n"," inflating: data_track_B/press_0585.npy \n"," inflating: data_track_B/press_0588.npy \n"," inflating: data_track_B/press_0589.npy \n"," inflating: data_track_B/press_0590.npy \n"," inflating: data_track_B/press_0591.npy \n"," inflating: data_track_B/press_0592.npy \n"," inflating: data_track_B/press_0593.npy \n"," inflating: data_track_B/press_0594.npy \n"," inflating: data_track_B/press_0595.npy \n"," inflating: data_track_B/press_0596.npy \n"," inflating: data_track_B/press_0597.npy \n"," inflating: data_track_B/press_0598.npy \n"," inflating: data_track_B/press_0600.npy \n"," inflating: data_track_B/press_0602.npy \n"," inflating: data_track_B/press_0605.npy \n"," inflating: data_track_B/press_0608.npy \n"," inflating: data_track_B/press_0609.npy \n"," inflating: data_track_B/press_0611.npy \n"," inflating: data_track_B/press_0612.npy \n"," inflating: data_track_B/press_0613.npy \n"," inflating: data_track_B/press_0614.npy \n"," inflating: data_track_B/press_0618.npy \n"," inflating: data_track_B/press_0619.npy \n"," inflating: data_track_B/press_0620.npy \n"," inflating: data_track_B/press_0621.npy \n"," inflating: data_track_B/press_0622.npy \n"," inflating: data_track_B/press_0623.npy \n"," inflating: data_track_B/press_0624.npy \n"," inflating: data_track_B/press_0625.npy \n"," inflating: data_track_B/press_0627.npy \n"," inflating: data_track_B/press_0628.npy \n"," inflating: data_track_B/press_0629.npy \n"," inflating: data_track_B/press_0630.npy \n"," inflating: data_track_B/press_0631.npy \n"," inflating: data_track_B/press_0632.npy \n"," inflating: data_track_B/press_0633.npy \n"," inflating: data_track_B/press_0634.npy \n"," inflating: data_track_B/press_0635.npy \n"," inflating: data_track_B/press_0637.npy \n"," inflating: data_track_B/press_0638.npy \n"," inflating: data_track_B/press_0639.npy \n"," inflating: data_track_B/press_0640.npy \n"," inflating: data_track_B/press_0641.npy \n"," inflating: data_track_B/press_0643.npy \n"," inflating: data_track_B/press_0644.npy \n"," inflating: data_track_B/press_0645.npy \n"," inflating: data_track_B/press_0646.npy \n"," inflating: data_track_B/press_0648.npy \n"," inflating: data_track_B/press_0650.npy \n"," inflating: data_track_B/press_0651.npy \n"," inflating: data_track_B/press_0652.npy \n"," inflating: data_track_B/press_0653.npy \n"," inflating: data_track_B/press_0654.npy \n"," inflating: data_track_B/press_0656.npy \n"," inflating: data_track_B/press_0657.npy \n"," inflating: data_track_B/press_0658.npy \n"," inflating: data_track_B/press_0661.npy \n"," inflating: data_track_B/press_0663.npy \n"," inflating: data_track_B/press_0664.npy \n"," inflating: data_track_B/press_0665.npy \n"," inflating: data_track_B/press_0666.npy \n"," inflating: data_track_B/press_0667.npy \n"," inflating: data_track_B/press_0668.npy \n"," inflating: data_track_B/press_0669.npy \n"," inflating: data_track_B/press_0671.npy \n"," inflating: data_track_B/press_0672.npy \n"," inflating: data_track_B/press_0673.npy \n"," inflating: data_track_B/press_0674.npy \n"," inflating: data_track_B/press_0676.npy \n"," inflating: data_track_B/press_0677.npy \n"," inflating: data_track_B/press_0678.npy \n"," inflating: data_track_B/press_0679.npy \n"," inflating: data_track_B/press_0680.npy \n"," inflating: data_track_B/press_0682.npy \n"," inflating: data_track_B/press_0686.npy \n"," inflating: data_track_B/press_0688.npy \n"," inflating: data_track_B/press_0689.npy \n"," inflating: data_track_B/press_0690.npy \n"," inflating: data_track_B/press_0691.npy \n"," inflating: data_track_B/press_0692.npy \n"," inflating: data_track_B/press_0693.npy \n"," inflating: data_track_B/press_0694.npy \n"," inflating: data_track_B/press_0695.npy \n"," inflating: data_track_B/press_0697.npy \n"," inflating: data_track_B/press_0699.npy \n"," inflating: data_track_B/press_0700.npy \n"," inflating: data_track_B/press_0701.npy \n"," inflating: data_track_B/press_0703.npy \n"," inflating: data_track_B/press_0704.npy \n"," inflating: data_track_B/press_0706.npy \n"," inflating: data_track_B/press_0707.npy \n"," inflating: data_track_B/press_0708.npy \n"," inflating: data_track_B/press_0709.npy \n"," inflating: data_track_B/press_0711.npy \n"," inflating: data_track_B/press_0712.npy \n"," inflating: data_track_B/press_0713.npy \n"," inflating: data_track_B/press_0714.npy \n"," inflating: data_track_B/press_0715.npy \n"," inflating: data_track_B/press_0716.npy \n"," inflating: data_track_B/press_0718.npy \n"," inflating: data_track_B/press_0719.npy \n"," inflating: data_track_B/press_0720.npy \n"," inflating: data_track_B/press_0721.npy \n"," inflating: data_track_B/press_0722.npy \n"," inflating: data_track_B/press_0724.npy \n"," inflating: data_track_B/press_0727.npy \n"," inflating: data_track_B/press_0728.npy \n"," inflating: data_track_B/press_0729.npy \n"," inflating: data_track_B/press_0730.npy \n"," inflating: data_track_B/press_0731.npy \n"," inflating: data_track_B/press_0733.npy \n"," inflating: data_track_B/press_0735.npy \n"," inflating: data_track_B/press_0736.npy \n"," inflating: data_track_B/press_0737.npy \n"," inflating: data_track_B/press_0740.npy \n"," inflating: data_track_B/press_0742.npy \n"," inflating: data_track_B/press_0743.npy \n"," inflating: data_track_B/press_0744.npy \n"," inflating: data_track_B/press_0745.npy \n","Archive: track_B.zip\n"," inflating: track_B/area_1.npy \n"," inflating: track_B/area_10.npy \n"," inflating: track_B/area_11.npy \n"," inflating: track_B/area_12.npy \n"," inflating: track_B/area_13.npy \n"," inflating: track_B/area_14.npy \n"," inflating: track_B/area_15.npy \n"," inflating: track_B/area_16.npy \n"," inflating: track_B/area_17.npy \n"," inflating: track_B/area_18.npy \n"," inflating: track_B/area_19.npy \n"," inflating: track_B/area_2.npy \n"," inflating: track_B/area_20.npy \n"," inflating: track_B/area_21.npy \n"," inflating: track_B/area_22.npy \n"," inflating: track_B/area_23.npy \n"," inflating: track_B/area_24.npy \n"," inflating: track_B/area_25.npy \n"," inflating: track_B/area_26.npy \n"," inflating: track_B/area_27.npy \n"," inflating: track_B/area_28.npy \n"," inflating: track_B/area_29.npy \n"," inflating: track_B/area_3.npy \n"," inflating: track_B/area_30.npy \n"," inflating: track_B/area_31.npy \n"," inflating: track_B/area_32.npy \n"," inflating: track_B/area_33.npy \n"," inflating: track_B/area_34.npy \n"," inflating: track_B/area_35.npy \n"," inflating: track_B/area_36.npy \n"," inflating: track_B/area_37.npy \n"," inflating: track_B/area_38.npy \n"," inflating: track_B/area_39.npy \n"," inflating: track_B/area_4.npy \n"," inflating: track_B/area_40.npy \n"," inflating: track_B/area_41.npy \n"," inflating: track_B/area_42.npy \n"," inflating: track_B/area_43.npy \n"," inflating: track_B/area_44.npy \n"," inflating: track_B/area_45.npy \n"," inflating: track_B/area_46.npy \n"," inflating: track_B/area_47.npy \n"," inflating: track_B/area_48.npy \n"," inflating: track_B/area_49.npy \n"," inflating: track_B/area_5.npy \n"," inflating: track_B/area_50.npy \n"," inflating: track_B/area_6.npy \n"," inflating: track_B/area_7.npy \n"," inflating: track_B/area_8.npy \n"," inflating: track_B/area_9.npy \n"," inflating: track_B/area_bounds.txt \n"," inflating: track_B/centroid_1.npy \n"," inflating: track_B/centroid_10.npy \n"," inflating: track_B/centroid_11.npy \n"," inflating: track_B/centroid_12.npy \n"," inflating: track_B/centroid_13.npy \n"," inflating: track_B/centroid_14.npy \n"," inflating: track_B/centroid_15.npy \n"," inflating: track_B/centroid_16.npy \n"," inflating: track_B/centroid_17.npy \n"," inflating: track_B/centroid_18.npy \n"," inflating: track_B/centroid_19.npy \n"," inflating: track_B/centroid_2.npy \n"," inflating: track_B/centroid_20.npy \n"," inflating: track_B/centroid_21.npy \n"," inflating: track_B/centroid_22.npy \n"," inflating: track_B/centroid_23.npy \n"," inflating: track_B/centroid_24.npy \n"," inflating: track_B/centroid_25.npy \n"," inflating: track_B/centroid_26.npy \n"," inflating: track_B/centroid_27.npy \n"," inflating: track_B/centroid_28.npy \n"," inflating: track_B/centroid_29.npy \n"," inflating: track_B/centroid_3.npy \n"," inflating: track_B/centroid_30.npy \n"," inflating: track_B/centroid_31.npy \n"," inflating: track_B/centroid_32.npy \n"," inflating: track_B/centroid_33.npy \n"," inflating: track_B/centroid_34.npy \n"," inflating: track_B/centroid_35.npy \n"," inflating: track_B/centroid_36.npy \n"," inflating: track_B/centroid_37.npy \n"," inflating: track_B/centroid_38.npy \n"," inflating: track_B/centroid_39.npy \n"," inflating: track_B/centroid_4.npy \n"," inflating: track_B/centroid_40.npy \n"," inflating: track_B/centroid_41.npy \n"," inflating: track_B/centroid_42.npy \n"," inflating: track_B/centroid_43.npy \n"," inflating: track_B/centroid_44.npy \n"," inflating: track_B/centroid_45.npy \n"," inflating: track_B/centroid_46.npy \n"," inflating: track_B/centroid_47.npy \n"," inflating: track_B/centroid_48.npy \n"," inflating: track_B/centroid_49.npy \n"," inflating: track_B/centroid_5.npy \n"," inflating: track_B/centroid_50.npy \n"," inflating: track_B/centroid_6.npy \n"," inflating: track_B/centroid_7.npy \n"," inflating: track_B/centroid_8.npy \n"," inflating: track_B/centroid_9.npy \n"," inflating: track_B/global_bounds.txt \n"," inflating: track_B/info_1.npy \n"," inflating: track_B/info_10.npy \n"," inflating: track_B/info_11.npy \n"," inflating: track_B/info_12.npy \n"," inflating: track_B/info_13.npy \n"," inflating: track_B/info_14.npy \n"," inflating: track_B/info_15.npy \n"," inflating: track_B/info_16.npy \n"," inflating: track_B/info_17.npy \n"," inflating: track_B/info_18.npy \n"," inflating: track_B/info_19.npy \n"," inflating: track_B/info_2.npy \n"," inflating: track_B/info_20.npy \n"," inflating: track_B/info_21.npy \n"," inflating: track_B/info_22.npy \n"," inflating: track_B/info_23.npy \n"," inflating: track_B/info_24.npy \n"," inflating: track_B/info_25.npy \n"," inflating: track_B/info_26.npy \n"," inflating: track_B/info_27.npy \n"," inflating: track_B/info_28.npy \n"," inflating: track_B/info_29.npy \n"," inflating: track_B/info_3.npy \n"," inflating: track_B/info_30.npy \n"," inflating: track_B/info_31.npy \n"," inflating: track_B/info_32.npy \n"," inflating: track_B/info_33.npy \n"," inflating: track_B/info_34.npy \n"," inflating: track_B/info_35.npy \n"," inflating: track_B/info_36.npy \n"," inflating: track_B/info_37.npy \n"," inflating: track_B/info_38.npy \n"," inflating: track_B/info_39.npy \n"," inflating: track_B/info_4.npy \n"," inflating: track_B/info_40.npy \n"," inflating: track_B/info_41.npy \n"," inflating: track_B/info_42.npy \n"," inflating: track_B/info_43.npy \n"," inflating: track_B/info_44.npy \n"," inflating: track_B/info_45.npy \n"," inflating: track_B/info_46.npy \n"," inflating: track_B/info_47.npy \n"," inflating: track_B/info_48.npy \n"," inflating: track_B/info_49.npy \n"," inflating: track_B/info_5.npy \n"," inflating: track_B/info_50.npy \n"," inflating: track_B/info_6.npy \n"," inflating: track_B/info_7.npy \n"," inflating: track_B/info_8.npy \n"," inflating: track_B/info_9.npy \n"," inflating: track_B/info_bounds.txt \n"," inflating: track_B/mesh_1.ply \n"," inflating: track_B/mesh_10.ply \n"," inflating: track_B/mesh_11.ply \n"," inflating: track_B/mesh_12.ply \n"," inflating: track_B/mesh_13.ply \n"," inflating: track_B/mesh_14.ply \n"," inflating: track_B/mesh_15.ply \n"," inflating: track_B/mesh_16.ply \n"," inflating: track_B/mesh_17.ply \n"," inflating: track_B/mesh_18.ply \n"," inflating: track_B/mesh_19.ply \n"," inflating: track_B/mesh_2.ply \n"," inflating: track_B/mesh_20.ply \n"," inflating: track_B/mesh_21.ply \n"," inflating: track_B/mesh_22.ply \n"," inflating: track_B/mesh_23.ply \n"," inflating: track_B/mesh_24.ply \n"," inflating: track_B/mesh_25.ply \n"," inflating: track_B/mesh_26.ply \n"," inflating: track_B/mesh_27.ply \n"," inflating: track_B/mesh_28.ply \n"," inflating: track_B/mesh_29.ply \n"," inflating: track_B/mesh_3.ply \n"," inflating: track_B/mesh_30.ply \n"," inflating: track_B/mesh_31.ply \n"," inflating: track_B/mesh_32.ply \n"," inflating: track_B/mesh_33.ply \n"," inflating: track_B/mesh_34.ply \n"," inflating: track_B/mesh_35.ply \n"," inflating: track_B/mesh_36.ply \n"," inflating: track_B/mesh_37.ply \n"," inflating: track_B/mesh_38.ply \n"," inflating: track_B/mesh_39.ply \n"," inflating: track_B/mesh_4.ply \n"," inflating: track_B/mesh_40.ply \n"," inflating: track_B/mesh_41.ply \n"," inflating: track_B/mesh_42.ply \n"," inflating: track_B/mesh_43.ply \n"," inflating: track_B/mesh_44.ply \n"," inflating: track_B/mesh_45.ply \n"," inflating: track_B/mesh_46.ply \n"," inflating: track_B/mesh_47.ply \n"," inflating: track_B/mesh_48.ply \n"," inflating: track_B/mesh_49.ply \n"," inflating: track_B/mesh_5.ply \n"," inflating: track_B/mesh_50.ply \n"," inflating: track_B/mesh_6.ply \n"," inflating: track_B/mesh_7.ply \n"," inflating: track_B/mesh_8.ply \n"," inflating: track_B/mesh_9.ply \n"," inflating: track_B/normal_1.npy \n"," inflating: track_B/normal_10.npy \n"," inflating: track_B/normal_11.npy \n"," inflating: track_B/normal_12.npy \n"," inflating: track_B/normal_13.npy \n"," inflating: track_B/normal_14.npy \n"," inflating: track_B/normal_15.npy \n"," inflating: track_B/normal_16.npy \n"," inflating: track_B/normal_17.npy \n"," inflating: track_B/normal_18.npy \n"," inflating: track_B/normal_19.npy \n"," inflating: track_B/normal_2.npy \n"," inflating: track_B/normal_20.npy \n"," inflating: track_B/normal_21.npy \n"," inflating: track_B/normal_22.npy \n"," inflating: track_B/normal_23.npy \n"," inflating: track_B/normal_24.npy \n"," inflating: track_B/normal_25.npy \n"," inflating: track_B/normal_26.npy \n"," inflating: track_B/normal_27.npy \n"," inflating: track_B/normal_28.npy \n"," inflating: track_B/normal_29.npy \n"," inflating: track_B/normal_3.npy \n"," inflating: track_B/normal_30.npy \n"," inflating: track_B/normal_31.npy \n"," inflating: track_B/normal_32.npy \n"," inflating: track_B/normal_33.npy \n"," inflating: track_B/normal_34.npy \n"," inflating: track_B/normal_35.npy \n"," inflating: track_B/normal_36.npy \n"," inflating: track_B/normal_37.npy \n"," inflating: track_B/normal_38.npy \n"," inflating: track_B/normal_39.npy \n"," inflating: track_B/normal_4.npy \n"," inflating: track_B/normal_40.npy \n"," inflating: track_B/normal_41.npy \n"," inflating: track_B/normal_42.npy \n"," inflating: track_B/normal_43.npy \n"," inflating: track_B/normal_44.npy \n"," inflating: track_B/normal_45.npy \n"," inflating: track_B/normal_46.npy \n"," inflating: track_B/normal_47.npy \n"," inflating: track_B/normal_48.npy \n"," inflating: track_B/normal_49.npy \n"," inflating: track_B/normal_5.npy \n"," inflating: track_B/normal_50.npy \n"," inflating: track_B/normal_6.npy \n"," inflating: track_B/normal_7.npy \n"," inflating: track_B/normal_8.npy \n"," inflating: track_B/normal_9.npy \n"," inflating: track_B/train_pressure_mean_std.txt \n","mkdir: cannot create directory ‘track_B_vtk’: File exists\n","mkdir: cannot create directory ‘data_centroid_track_B_vtk’: File exists\n","mkdir: cannot create directory ‘data_centroid_track_B_vtk_preprocessed_data’: File exists\n"]}],"source":["!mkdir -p train_track_B && unzip -o train_track_B.zip -d data_track_B/\n","!mkdir -p track_B && unzip -o track_B.zip\n","!mkdir track_B_vtk\n","!mkdir data_centroid_track_B_vtk\n","!mkdir data_centroid_track_B_vtk_preprocessed_data"]}],"metadata":{"colab":{"provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/infer.py b/jointContribution/IJCAI_2024/zhongzaicanyu/infer.py index 47cb1a9df1..0dbbb8224a 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/infer.py +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/infer.py @@ -1,147 +1,147 @@ -import argparse -import os -import random -import re -import time - -import numpy as np -import paddle -from data_process import data_process -from dataset import B_load_train_val_fold -from dataset import GraphDataset -from dataset import get_samples -from model import Model - - -def set_seed(seed): - np.random.seed(seed) - paddle.seed(seed=seed) - random.seed(seed) - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--data_dir", - default="./Dataset/data_centroid_track_B_vtk", - ) - parser.add_argument( - "--test_data_dir", - default="./Dataset/track_B_vtk", - ) - parser.add_argument( - "--save_dir", - default="./Dataset/data_centroid_track_B_vtk_preprocessed_data", - ) - parser.add_argument("--fold_id", default=1, type=int) - parser.add_argument("--gpu", default=0, type=int) - parser.add_argument("--cfd_model", default="Transolver", type=str) - parser.add_argument("--cfd_mesh", default=True) - parser.add_argument("--r", default=0.2, type=float) - parser.add_argument("--val_iter", default=1, type=int) - parser.add_argument("--lr", default=1e-05, type=float) - parser.add_argument("--batch_size", default=1, type=int) - parser.add_argument("--weight", default=0.5, type=float) - parser.add_argument("--nb_epochs", default=400, type=int) - parser.add_argument("--preprocessed", default=0, type=int) - parser.add_argument("--num_workers", default=4, type=int) - parser.add_argument("--train_split", default=2, type=int) - parser.add_argument("--val_split", default=3, type=int) - parser.add_argument("-f", help="a dummy argument to fool ipython", default="1") - args = parser.parse_args() - print(args) - return args - - -if __name__ == "__main__": - print( - "Attention: Please run and only run `data_process()` at first time in `infer.py`. " - "And change path in the file before run it." - ) - data_process() - - # load setting - set_seed(0) - args = parse_args() - print(args) - hparams = { - "lr": args.lr, - "batch_size": args.batch_size, - "nb_epochs": args.nb_epochs, - "num_workers": args.num_workers, - } - n_gpu = paddle.device.cuda.device_count() - use_cuda = 0 <= args.gpu < n_gpu and paddle.device.cuda.device_count() >= 1 - device = str(f"cuda:{args.gpu}" if use_cuda else "cpu").replace("cuda", "gpu") - - # load data - _, _, coef_norm = B_load_train_val_fold(args, preprocessed=args.preprocessed) - samples = get_samples(args.test_data_dir) - total_samples = len(samples) - np.random.shuffle(samples) - testlst = samples[:50] - test_ds = GraphDataset( - testlst, - use_cfd_mesh=args.cfd_mesh, - r=args.r, - root=args.test_data_dir, - norm=True, - coef_norm=coef_norm, - ) - test_loader = paddle.io.DataLoader( - test_ds, batch_size=1, collate_fn=test_ds.collate_fn - ) - - # load model - if args.cfd_model == "Transolver": - model = Model( - n_hidden=256, - n_layers=8, - space_dim=6, - fun_dim=0, - n_head=8, - act="gelu", - mlp_ratio=2, - out_dim=4, - slice_num=32, - unified_pos=False, - ).to(device) - else: - print("inference model use Transolver, please set 'cfd_model' to 'Transolver'") - - model_path = "./pretrained_checkpoint.pdparams" - model.set_state_dict(state_dict=paddle.load(path=model_path)) - model.to(device) - - if not os.path.exists("./results"): - os.makedirs("./results") - - # infer - with paddle.no_grad(): - model.eval() - times = [] - index = 0 - for cfd_data, geom in test_loader: - mesh_file = testlst[index] - match = re.search("mesh_(\\d+)\\.vtk", mesh_file) - if match: - mesh_index = match.group(1) - print(f"Processing mesh index: {mesh_index}") - else: - raise ValueError(f"Invalid mesh file format: {mesh_file}") - tic = time.time() - out = model((cfd_data, geom)) - toc = time.time() - press_output = out[cfd_data.surf, -1] - if coef_norm is not None: - mean_out = paddle.to_tensor(data=coef_norm[2]).to(device) - std_out = paddle.to_tensor(data=coef_norm[3]).to(device) - press_output = press_output * std_out[-1] + mean_out[-1] - press_output = press_output.detach().cpu().numpy() - np.save( - "./results/" + "press" + "_" + f"{mesh_index}.npy", - press_output, - ) - times.append(toc - tic) - index += 1 - print("time:", np.mean(times)) +import argparse +import os +import random +import re +import time + +import numpy as np +import paddle +from data_process import data_process +from dataset import B_load_train_val_fold +from dataset import GraphDataset +from dataset import get_samples +from model import Model + + +def set_seed(seed): + np.random.seed(seed) + paddle.seed(seed=seed) + random.seed(seed) + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--data_dir", + default="./Dataset/data_centroid_track_B_vtk", + ) + parser.add_argument( + "--test_data_dir", + default="./Dataset/track_B_vtk", + ) + parser.add_argument( + "--save_dir", + default="./Dataset/data_centroid_track_B_vtk_preprocessed_data", + ) + parser.add_argument("--fold_id", default=1, type=int) + parser.add_argument("--gpu", default=0, type=int) + parser.add_argument("--cfd_model", default="Transolver", type=str) + parser.add_argument("--cfd_mesh", default=True) + parser.add_argument("--r", default=0.2, type=float) + parser.add_argument("--val_iter", default=1, type=int) + parser.add_argument("--lr", default=1e-05, type=float) + parser.add_argument("--batch_size", default=1, type=int) + parser.add_argument("--weight", default=0.5, type=float) + parser.add_argument("--nb_epochs", default=400, type=int) + parser.add_argument("--preprocessed", default=0, type=int) + parser.add_argument("--num_workers", default=4, type=int) + parser.add_argument("--train_split", default=2, type=int) + parser.add_argument("--val_split", default=3, type=int) + parser.add_argument("-f", help="a dummy argument to fool ipython", default="1") + args = parser.parse_args() + print(args) + return args + + +if __name__ == "__main__": + print( + "Attention: Please run and only run `data_process()` at first time in `infer.py`. " + "And change path in the file before run it." + ) + data_process() + + # load setting + set_seed(0) + args = parse_args() + print(args) + hparams = { + "lr": args.lr, + "batch_size": args.batch_size, + "nb_epochs": args.nb_epochs, + "num_workers": args.num_workers, + } + n_gpu = paddle.device.cuda.device_count() + use_cuda = 0 <= args.gpu < n_gpu and paddle.device.cuda.device_count() >= 1 + device = str(f"cuda:{args.gpu}" if use_cuda else "cpu").replace("cuda", "gpu") + + # load data + _, _, coef_norm = B_load_train_val_fold(args, preprocessed=args.preprocessed) + samples = get_samples(args.test_data_dir) + total_samples = len(samples) + np.random.shuffle(samples) + testlst = samples[:50] + test_ds = GraphDataset( + testlst, + use_cfd_mesh=args.cfd_mesh, + r=args.r, + root=args.test_data_dir, + norm=True, + coef_norm=coef_norm, + ) + test_loader = paddle.io.DataLoader( + test_ds, batch_size=1, collate_fn=test_ds.collate_fn + ) + + # load model + if args.cfd_model == "Transolver": + model = Model( + n_hidden=256, + n_layers=8, + space_dim=6, + fun_dim=0, + n_head=8, + act="gelu", + mlp_ratio=2, + out_dim=4, + slice_num=32, + unified_pos=False, + ).to(device) + else: + print("inference model use Transolver, please set 'cfd_model' to 'Transolver'") + + model_path = "./pretrained_checkpoint.pdparams" + model.set_state_dict(state_dict=paddle.load(path=model_path)) + model.to(device) + + if not os.path.exists("./results"): + os.makedirs("./results") + + # infer + with paddle.no_grad(): + model.eval() + times = [] + index = 0 + for cfd_data, geom in test_loader: + mesh_file = testlst[index] + match = re.search("mesh_(\\d+)\\.vtk", mesh_file) + if match: + mesh_index = match.group(1) + print(f"Processing mesh index: {mesh_index}") + else: + raise ValueError(f"Invalid mesh file format: {mesh_file}") + tic = time.time() + out = model((cfd_data, geom)) + toc = time.time() + press_output = out[cfd_data.surf, -1] + if coef_norm is not None: + mean_out = paddle.to_tensor(data=coef_norm[2]).to(device) + std_out = paddle.to_tensor(data=coef_norm[3]).to(device) + press_output = press_output * std_out[-1] + mean_out[-1] + press_output = press_output.detach().cpu().numpy() + np.save( + "./results/" + "press" + "_" + f"{mesh_index}.npy", + press_output, + ) + times.append(toc - tic) + index += 1 + print("time:", np.mean(times)) diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/model.py b/jointContribution/IJCAI_2024/zhongzaicanyu/model.py index 54fc72f1d5..4dacfeb695 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/model.py +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/model.py @@ -1,296 +1,296 @@ -import numpy as np -import paddle -import utils.paddle_aux # NOQA -from einops import rearrange - -import ppsci - -ACTIVATION = { - "gelu": paddle.nn.GELU, - "tanh": paddle.nn.Tanh, - "sigmoid": paddle.nn.Sigmoid, - "relu": paddle.nn.ReLU, - "leaky_relu": paddle.nn.LeakyReLU(negative_slope=0.1), - "softplus": paddle.nn.Softplus, - "ELU": paddle.nn.ELU, - "silu": paddle.nn.Silu, -} - - -class Physics_Attention_1D(paddle.nn.Layer): - def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, slice_num=64): - super().__init__() - inner_dim = dim_head * heads - self.dim_head = dim_head - self.heads = heads - self.scale = dim_head**-0.5 - self.softmax = paddle.nn.Softmax(axis=-1) - self.dropout = paddle.nn.Dropout(p=dropout) - out_0 = paddle.create_parameter( - shape=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).shape, - dtype=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.ones(shape=[1, heads, 1, 1]) * 0.5 - ), - ) - out_0.stop_gradient = not True - self.temperature = out_0 - self.in_project_x = paddle.nn.Linear(in_features=dim, out_features=inner_dim) - self.in_project_fx = paddle.nn.Linear(in_features=dim, out_features=inner_dim) - self.in_project_slice = paddle.nn.Linear( - in_features=dim_head, out_features=slice_num - ) - for ly in [self.in_project_slice]: - init_Orthogonal = paddle.nn.initializer.Orthogonal() - init_Orthogonal(ly.weight) - self.to_q = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_k = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_v = paddle.nn.Linear( - in_features=dim_head, out_features=dim_head, bias_attr=False - ) - self.to_out = paddle.nn.Sequential( - paddle.nn.Linear(in_features=inner_dim, out_features=dim), - paddle.nn.Dropout(p=dropout), - ) - - def forward(self, x): - B, N, C = tuple(x.shape) - fx_mid = ( - self.in_project_fx(x) - .reshape(B, N, self.heads, self.dim_head) - .transpose(perm=[0, 2, 1, 3]) - ) - x_mid = ( - self.in_project_x(x) - .reshape(B, N, self.heads, self.dim_head) - .transpose(perm=[0, 2, 1, 3]) - ) - slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) - slice_norm = slice_weights.sum(axis=2) - slice_token = paddle.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) - slice_token = slice_token / (slice_norm + 1e-05)[:, :, :, None].repeat( - 1, 1, 1, self.dim_head - ) - q_slice_token = self.to_q(slice_token) - k_slice_token = self.to_k(slice_token) - v_slice_token = self.to_v(slice_token) - x = k_slice_token - perm_0 = list(range(x.ndim)) - perm_0[-1] = -2 - perm_0[-2] = -1 - dots = paddle.matmul(x=q_slice_token, y=x.transpose(perm=perm_0)) * self.scale - attn = self.softmax(dots) - attn = self.dropout(attn) - out_slice_token = paddle.matmul(x=attn, y=v_slice_token) - out_x = paddle.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) - out_x = rearrange(out_x, "b h n d -> b n (h d)") - return self.to_out(out_x) - - -class MLP(paddle.nn.Layer): - def __init__(self, n_input, n_hidden, n_output, n_layers=1, act="gelu", res=True): - super(MLP, self).__init__() - if act in ACTIVATION.keys(): - act = ACTIVATION[act] - else: - raise NotImplementedError - self.n_input = n_input - self.n_hidden = n_hidden - self.n_output = n_output - self.n_layers = n_layers - self.res = res - self.linear_pre = paddle.nn.Sequential( - paddle.nn.Linear(in_features=n_input, out_features=n_hidden), act() - ) - self.linear_post = paddle.nn.Linear(in_features=n_hidden, out_features=n_output) - self.linears = paddle.nn.LayerList( - sublayers=[ - paddle.nn.Sequential( - paddle.nn.Linear(in_features=n_hidden, out_features=n_hidden), act() - ) - for _ in range(n_layers) - ] - ) - - def forward(self, x): - x = self.linear_pre(x) - for i in range(self.n_layers): - if self.res: - x = self.linears[i](x) + x - else: - x = self.linears[i](x) - x = self.linear_post(x) - return x - - -class Transolver_block(paddle.nn.Layer): - """Transformer encoder block.""" - - def __init__( - self, - num_heads: int, - hidden_dim: int, - dropout: float, - act="gelu", - mlp_ratio=4, - last_layer=False, - out_dim=1, - slice_num=32, - ): - super().__init__() - self.last_layer = last_layer - self.ln_1 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.Attn = Physics_Attention_1D( - hidden_dim, - heads=num_heads, - dim_head=hidden_dim // num_heads, - dropout=dropout, - slice_num=slice_num, - ) - self.ln_2 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.mlp = MLP( - hidden_dim, - hidden_dim * mlp_ratio, - hidden_dim, - n_layers=0, - res=False, - act=act, - ) - if self.last_layer: - self.ln_3 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) - self.mlp2 = paddle.nn.Linear(in_features=hidden_dim, out_features=out_dim) - - def forward(self, fx): - fx = self.Attn(self.ln_1(fx)) + fx - fx = self.mlp(self.ln_2(fx)) + fx - if self.last_layer: - fx = self.mlp2(self.ln_3(fx)) - return fx - - -class Model(paddle.nn.Layer): - def __init__( - self, - space_dim=1, - n_layers=5, - n_hidden=256, - dropout=0, - n_head=8, - act="gelu", - mlp_ratio=1, - fun_dim=1, - out_dim=1, - slice_num=32, - ref=8, - unified_pos=False, - ): - super(Model, self).__init__() - self.__name__ = "UniPDE_3D" - self.ref = ref - self.unified_pos = unified_pos - if self.unified_pos: - self.preprocess = MLP( - fun_dim + self.ref * self.ref * self.ref, - n_hidden * 2, - n_hidden, - n_layers=0, - res=False, - act=act, - ) - else: - self.preprocess = MLP( - fun_dim + space_dim, - n_hidden * 2, - n_hidden, - n_layers=0, - res=False, - act=act, - ) - self.n_hidden = n_hidden - self.space_dim = space_dim - self.blocks = paddle.nn.LayerList( - sublayers=[ - Transolver_block( - num_heads=n_head, - hidden_dim=n_hidden, - dropout=dropout, - act=act, - mlp_ratio=mlp_ratio, - out_dim=out_dim, - slice_num=slice_num, - last_layer=_ == n_layers - 1, - ) - for _ in range(n_layers) - ] - ) - self.initialize_weights() - param = 1 / n_hidden * paddle.rand(shape=(n_hidden,), dtype="float32") - out_1 = paddle.create_parameter( - shape=param.shape, - dtype=param.numpy().dtype, - default_initializer=paddle.nn.initializer.Assign(param), - ) - out_1.stop_gradient = not True - self.placeholder = out_1 - - def initialize_weights(self): - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, paddle.nn.Linear): - m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) - if isinstance(m, paddle.nn.Linear) and m.bias is not None: - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): - init_Constant = paddle.nn.initializer.Constant(value=0) - init_Constant(m.bias) - init_Constant = paddle.nn.initializer.Constant(value=1.0) - init_Constant(m.weight) - - def get_grid(self, my_pos): - batchsize = tuple(my_pos.shape)[0] - gridx = paddle.to_tensor(data=np.linspace(-1.5, 1.5, self.ref), dtype="float32") - gridx = gridx.reshape(1, self.ref, 1, 1, 1).repeat( - [batchsize, 1, self.ref, self.ref, 1] - ) - gridy = paddle.to_tensor(data=np.linspace(0, 2, self.ref), dtype="float32") - gridy = gridy.reshape(1, 1, self.ref, 1, 1).repeat( - [batchsize, self.ref, 1, self.ref, 1] - ) - gridz = paddle.to_tensor(data=np.linspace(-4, 4, self.ref), dtype="float32") - gridz = gridz.reshape(1, 1, 1, self.ref, 1).repeat( - [batchsize, self.ref, self.ref, 1, 1] - ) - grid_ref = ( - paddle.concat(x=(gridx, gridy, gridz), axis=-1) - .cuda(blocking=True) - .reshape(batchsize, self.ref**3, 3) - ) - pos = paddle.sqrt( - x=paddle.sum( - x=(my_pos[:, :, None, :] - grid_ref[:, None, :, :]) ** 2, axis=-1 - ) - ).reshape(batchsize, tuple(my_pos.shape)[1], self.ref * self.ref * self.ref) - return pos - - def forward(self, data): - cfd_data, geom_data = data - x, fx, _ = cfd_data.x, None, None - x = x[None, :, :] - if self.unified_pos: - new_pos = self.get_grid(cfd_data.pos[None, :, :]) - x = paddle.concat(x=(x, new_pos), axis=-1) - if fx is not None: - fx = paddle.concat(x=(x, fx), axis=-1) - fx = self.preprocess(fx) - else: - fx = self.preprocess(x) - fx = fx + self.placeholder[None, None, :] - for block in self.blocks: - fx = block(fx) - return fx[0] +import numpy as np +import paddle +import utils.paddle_aux # NOQA +from einops import rearrange + +import ppsci + +ACTIVATION = { + "gelu": paddle.nn.GELU, + "tanh": paddle.nn.Tanh, + "sigmoid": paddle.nn.Sigmoid, + "relu": paddle.nn.ReLU, + "leaky_relu": paddle.nn.LeakyReLU(negative_slope=0.1), + "softplus": paddle.nn.Softplus, + "ELU": paddle.nn.ELU, + "silu": paddle.nn.Silu, +} + + +class Physics_Attention_1D(paddle.nn.Layer): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0, slice_num=64): + super().__init__() + inner_dim = dim_head * heads + self.dim_head = dim_head + self.heads = heads + self.scale = dim_head**-0.5 + self.softmax = paddle.nn.Softmax(axis=-1) + self.dropout = paddle.nn.Dropout(p=dropout) + out_0 = paddle.create_parameter( + shape=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).shape, + dtype=(paddle.ones(shape=[1, heads, 1, 1]) * 0.5).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.ones(shape=[1, heads, 1, 1]) * 0.5 + ), + ) + out_0.stop_gradient = not True + self.temperature = out_0 + self.in_project_x = paddle.nn.Linear(in_features=dim, out_features=inner_dim) + self.in_project_fx = paddle.nn.Linear(in_features=dim, out_features=inner_dim) + self.in_project_slice = paddle.nn.Linear( + in_features=dim_head, out_features=slice_num + ) + for ly in [self.in_project_slice]: + init_Orthogonal = paddle.nn.initializer.Orthogonal() + init_Orthogonal(ly.weight) + self.to_q = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_k = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_v = paddle.nn.Linear( + in_features=dim_head, out_features=dim_head, bias_attr=False + ) + self.to_out = paddle.nn.Sequential( + paddle.nn.Linear(in_features=inner_dim, out_features=dim), + paddle.nn.Dropout(p=dropout), + ) + + def forward(self, x): + B, N, C = tuple(x.shape) + fx_mid = ( + self.in_project_fx(x) + .reshape(B, N, self.heads, self.dim_head) + .transpose(perm=[0, 2, 1, 3]) + ) + x_mid = ( + self.in_project_x(x) + .reshape(B, N, self.heads, self.dim_head) + .transpose(perm=[0, 2, 1, 3]) + ) + slice_weights = self.softmax(self.in_project_slice(x_mid) / self.temperature) + slice_norm = slice_weights.sum(axis=2) + slice_token = paddle.einsum("bhnc,bhng->bhgc", fx_mid, slice_weights) + slice_token = slice_token / (slice_norm + 1e-05)[:, :, :, None].repeat( + 1, 1, 1, self.dim_head + ) + q_slice_token = self.to_q(slice_token) + k_slice_token = self.to_k(slice_token) + v_slice_token = self.to_v(slice_token) + x = k_slice_token + perm_0 = list(range(x.ndim)) + perm_0[-1] = -2 + perm_0[-2] = -1 + dots = paddle.matmul(x=q_slice_token, y=x.transpose(perm=perm_0)) * self.scale + attn = self.softmax(dots) + attn = self.dropout(attn) + out_slice_token = paddle.matmul(x=attn, y=v_slice_token) + out_x = paddle.einsum("bhgc,bhng->bhnc", out_slice_token, slice_weights) + out_x = rearrange(out_x, "b h n d -> b n (h d)") + return self.to_out(out_x) + + +class MLP(paddle.nn.Layer): + def __init__(self, n_input, n_hidden, n_output, n_layers=1, act="gelu", res=True): + super(MLP, self).__init__() + if act in ACTIVATION.keys(): + act = ACTIVATION[act] + else: + raise NotImplementedError + self.n_input = n_input + self.n_hidden = n_hidden + self.n_output = n_output + self.n_layers = n_layers + self.res = res + self.linear_pre = paddle.nn.Sequential( + paddle.nn.Linear(in_features=n_input, out_features=n_hidden), act() + ) + self.linear_post = paddle.nn.Linear(in_features=n_hidden, out_features=n_output) + self.linears = paddle.nn.LayerList( + sublayers=[ + paddle.nn.Sequential( + paddle.nn.Linear(in_features=n_hidden, out_features=n_hidden), act() + ) + for _ in range(n_layers) + ] + ) + + def forward(self, x): + x = self.linear_pre(x) + for i in range(self.n_layers): + if self.res: + x = self.linears[i](x) + x + else: + x = self.linears[i](x) + x = self.linear_post(x) + return x + + +class Transolver_block(paddle.nn.Layer): + """Transformer encoder block.""" + + def __init__( + self, + num_heads: int, + hidden_dim: int, + dropout: float, + act="gelu", + mlp_ratio=4, + last_layer=False, + out_dim=1, + slice_num=32, + ): + super().__init__() + self.last_layer = last_layer + self.ln_1 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.Attn = Physics_Attention_1D( + hidden_dim, + heads=num_heads, + dim_head=hidden_dim // num_heads, + dropout=dropout, + slice_num=slice_num, + ) + self.ln_2 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.mlp = MLP( + hidden_dim, + hidden_dim * mlp_ratio, + hidden_dim, + n_layers=0, + res=False, + act=act, + ) + if self.last_layer: + self.ln_3 = paddle.nn.LayerNorm(normalized_shape=hidden_dim) + self.mlp2 = paddle.nn.Linear(in_features=hidden_dim, out_features=out_dim) + + def forward(self, fx): + fx = self.Attn(self.ln_1(fx)) + fx + fx = self.mlp(self.ln_2(fx)) + fx + if self.last_layer: + fx = self.mlp2(self.ln_3(fx)) + return fx + + +class Model(paddle.nn.Layer): + def __init__( + self, + space_dim=1, + n_layers=5, + n_hidden=256, + dropout=0, + n_head=8, + act="gelu", + mlp_ratio=1, + fun_dim=1, + out_dim=1, + slice_num=32, + ref=8, + unified_pos=False, + ): + super(Model, self).__init__() + self.__name__ = "UniPDE_3D" + self.ref = ref + self.unified_pos = unified_pos + if self.unified_pos: + self.preprocess = MLP( + fun_dim + self.ref * self.ref * self.ref, + n_hidden * 2, + n_hidden, + n_layers=0, + res=False, + act=act, + ) + else: + self.preprocess = MLP( + fun_dim + space_dim, + n_hidden * 2, + n_hidden, + n_layers=0, + res=False, + act=act, + ) + self.n_hidden = n_hidden + self.space_dim = space_dim + self.blocks = paddle.nn.LayerList( + sublayers=[ + Transolver_block( + num_heads=n_head, + hidden_dim=n_hidden, + dropout=dropout, + act=act, + mlp_ratio=mlp_ratio, + out_dim=out_dim, + slice_num=slice_num, + last_layer=_ == n_layers - 1, + ) + for _ in range(n_layers) + ] + ) + self.initialize_weights() + param = 1 / n_hidden * paddle.rand(shape=(n_hidden,), dtype="float32") + out_1 = paddle.create_parameter( + shape=param.shape, + dtype=param.numpy().dtype, + default_initializer=paddle.nn.initializer.Assign(param), + ) + out_1.stop_gradient = not True + self.placeholder = out_1 + + def initialize_weights(self): + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, paddle.nn.Linear): + m.weight = ppsci.utils.initializer.trunc_normal_(m.weight, std=0.02) + if isinstance(m, paddle.nn.Linear) and m.bias is not None: + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + elif isinstance(m, (paddle.nn.LayerNorm, paddle.nn.BatchNorm1D)): + init_Constant = paddle.nn.initializer.Constant(value=0) + init_Constant(m.bias) + init_Constant = paddle.nn.initializer.Constant(value=1.0) + init_Constant(m.weight) + + def get_grid(self, my_pos): + batchsize = tuple(my_pos.shape)[0] + gridx = paddle.to_tensor(data=np.linspace(-1.5, 1.5, self.ref), dtype="float32") + gridx = gridx.reshape(1, self.ref, 1, 1, 1).repeat( + [batchsize, 1, self.ref, self.ref, 1] + ) + gridy = paddle.to_tensor(data=np.linspace(0, 2, self.ref), dtype="float32") + gridy = gridy.reshape(1, 1, self.ref, 1, 1).repeat( + [batchsize, self.ref, 1, self.ref, 1] + ) + gridz = paddle.to_tensor(data=np.linspace(-4, 4, self.ref), dtype="float32") + gridz = gridz.reshape(1, 1, 1, self.ref, 1).repeat( + [batchsize, self.ref, self.ref, 1, 1] + ) + grid_ref = ( + paddle.concat(x=(gridx, gridy, gridz), axis=-1) + .cuda(blocking=True) + .reshape(batchsize, self.ref**3, 3) + ) + pos = paddle.sqrt( + x=paddle.sum( + x=(my_pos[:, :, None, :] - grid_ref[:, None, :, :]) ** 2, axis=-1 + ) + ).reshape(batchsize, tuple(my_pos.shape)[1], self.ref * self.ref * self.ref) + return pos + + def forward(self, data): + cfd_data, geom_data = data + x, fx, _ = cfd_data.x, None, None + x = x[None, :, :] + if self.unified_pos: + new_pos = self.get_grid(cfd_data.pos[None, :, :]) + x = paddle.concat(x=(x, new_pos), axis=-1) + if fx is not None: + fx = paddle.concat(x=(x, fx), axis=-1) + fx = self.preprocess(fx) + else: + fx = self.preprocess(x) + fx = fx + self.placeholder[None, None, :] + for block in self.blocks: + fx = block(fx) + return fx[0] diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/requirements.txt b/jointContribution/IJCAI_2024/zhongzaicanyu/requirements.txt index 35a192268f..b45ed9db8c 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/requirements.txt +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/requirements.txt @@ -1,6 +1,6 @@ -einops -numpy -paddlepaddle_gpu -scikit_learn -tqdm -vtk +einops +numpy +paddlepaddle_gpu +scikit_learn +tqdm +vtk diff --git a/jointContribution/IJCAI_2024/zhongzaicanyu/utils/paddle_aux.py b/jointContribution/IJCAI_2024/zhongzaicanyu/utils/paddle_aux.py index acc1a9ff14..39f1e2d37d 100644 --- a/jointContribution/IJCAI_2024/zhongzaicanyu/utils/paddle_aux.py +++ b/jointContribution/IJCAI_2024/zhongzaicanyu/utils/paddle_aux.py @@ -1,99 +1,99 @@ -# This file is generated by PaConvert ToolKit, please Don't edit it! -import paddle - - -def reshape(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.reshape(self, args[0]) - else: - return paddle.reshape(self, list(args)) - elif kwargs: - assert "shape" in kwargs - return paddle.reshape(self, shape=kwargs["shape"]) - - -setattr(paddle.Tensor, "reshape", reshape) - - -def repeat(self, *args, **kwargs): - if args: - if len(args) == 1 and isinstance(args[0], (tuple, list)): - return paddle.tile(self, args[0]) - else: - return paddle.tile(self, list(args)) - elif kwargs: - assert "repeats" in kwargs - return paddle.tile(self, repeat_times=kwargs["repeats"]) - - -setattr(paddle.Tensor, "repeat", repeat) - - -def add(self, *args, **kwargs): - if "other" in kwargs: - y = kwargs["other"] - elif "y" in kwargs: - y = kwargs["y"] - else: - y = args[0] - - if "alpha" in kwargs: - alpha = kwargs["alpha"] - if alpha != 1: - if not isinstance(y, paddle.Tensor): - y = paddle.to_tensor(alpha * y) - else: - y = alpha * y - else: - if not isinstance(y, paddle.Tensor): - y = paddle.to_tensor(y) - - return paddle.add(self, y) - - -setattr(paddle.Tensor, "add", add) - - -def min_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.minimum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.minimum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.min(self, *args, **kwargs), paddle.argmin( - self, *args, **kwargs - ) - else: - ret = paddle.min(self, *args, **kwargs) - - return ret - - -def max_class_func(self, *args, **kwargs): - if "other" in kwargs: - kwargs["y"] = kwargs.pop("other") - ret = paddle.maximum(self, *args, **kwargs) - elif len(args) == 1 and isinstance(args[0], paddle.Tensor): - ret = paddle.maximum(self, *args, **kwargs) - else: - if "dim" in kwargs: - kwargs["axis"] = kwargs.pop("dim") - - if "axis" in kwargs or len(args) >= 1: - ret = paddle.max(self, *args, **kwargs), paddle.argmax( - self, *args, **kwargs - ) - else: - ret = paddle.max(self, *args, **kwargs) - - return ret - - -setattr(paddle.Tensor, "min", min_class_func) -setattr(paddle.Tensor, "max", max_class_func) +# This file is generated by PaConvert ToolKit, please Don't edit it! +import paddle + + +def reshape(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.reshape(self, args[0]) + else: + return paddle.reshape(self, list(args)) + elif kwargs: + assert "shape" in kwargs + return paddle.reshape(self, shape=kwargs["shape"]) + + +setattr(paddle.Tensor, "reshape", reshape) + + +def repeat(self, *args, **kwargs): + if args: + if len(args) == 1 and isinstance(args[0], (tuple, list)): + return paddle.tile(self, args[0]) + else: + return paddle.tile(self, list(args)) + elif kwargs: + assert "repeats" in kwargs + return paddle.tile(self, repeat_times=kwargs["repeats"]) + + +setattr(paddle.Tensor, "repeat", repeat) + + +def add(self, *args, **kwargs): + if "other" in kwargs: + y = kwargs["other"] + elif "y" in kwargs: + y = kwargs["y"] + else: + y = args[0] + + if "alpha" in kwargs: + alpha = kwargs["alpha"] + if alpha != 1: + if not isinstance(y, paddle.Tensor): + y = paddle.to_tensor(alpha * y) + else: + y = alpha * y + else: + if not isinstance(y, paddle.Tensor): + y = paddle.to_tensor(y) + + return paddle.add(self, y) + + +setattr(paddle.Tensor, "add", add) + + +def min_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.minimum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.minimum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.min(self, *args, **kwargs), paddle.argmin( + self, *args, **kwargs + ) + else: + ret = paddle.min(self, *args, **kwargs) + + return ret + + +def max_class_func(self, *args, **kwargs): + if "other" in kwargs: + kwargs["y"] = kwargs.pop("other") + ret = paddle.maximum(self, *args, **kwargs) + elif len(args) == 1 and isinstance(args[0], paddle.Tensor): + ret = paddle.maximum(self, *args, **kwargs) + else: + if "dim" in kwargs: + kwargs["axis"] = kwargs.pop("dim") + + if "axis" in kwargs or len(args) >= 1: + ret = paddle.max(self, *args, **kwargs), paddle.argmax( + self, *args, **kwargs + ) + else: + ret = paddle.max(self, *args, **kwargs) + + return ret + + +setattr(paddle.Tensor, "min", min_class_func) +setattr(paddle.Tensor, "max", max_class_func) diff --git a/jointContribution/PIDeepONet-LBM/task1-cavity-flow/LBM/cavity.c b/jointContribution/PIDeepONet-LBM/task1-cavity-flow/LBM/cavity.c index e45906817f..e6a7bfe020 100644 --- a/jointContribution/PIDeepONet-LBM/task1-cavity-flow/LBM/cavity.c +++ b/jointContribution/PIDeepONet-LBM/task1-cavity-flow/LBM/cavity.c @@ -1,588 +1,588 @@ -// modified Ladd's method: direct force -#include -#include -#include -#include -#define M 200 -#define N 200 -#define M2 (M / 2) -#define N2 (N / 2) -#define M1 (M + 1) -#define N1 (N + 1) -#define Ly (1.0) - -#define tau0 (0.8) -#define A0 (0.1) -#define dp (0.0) - -#define n_PL 1.0 -//#define mu_PL 0.0018 -#define Re 100.0 -#define rho0 1.0 -#define U0 0.1 - -#define Q 9 - -void lbini(void); -void analy(void); -void data_read(void); -void Evol(void); -void geo(void); -double feq(int k, int y, int x); -double force(int k, int y, int x); -void datadeal(void); -void Mass_velo_error(void); -double f[M1][N1][Q], g[M1][N1][Q], u[M1][N1], v[M1][N1], p[M1][N1], A[M1][N1], - tau[M1][N1], Sh[M1][N1][3], uans[M1]; -double umax, utem0, utem1, utem2, u0, mu_PL; -int m, e[Q][2], re[Q]; -int flag[M1][N1]; -double tp[Q], s[Q], diag[Q]; -double c, rc, rcc, dx, dt, cs2; -double w, w1, rho_in, rho_out; -double Err_mass, Err_vel; - - -void main() { - int readdata, mmax, TEND = 0; - double drho, err; - - e[0][0] = e[0][1] = 0; - e[1][0] = 1; - e[1][1] = 0; - e[2][0] = 0; - e[2][1] = 1; - e[3][0] = -1; - e[3][1] = 0; - e[4][0] = 0; - e[4][1] = -1; - e[5][0] = 1; - e[5][1] = 1; - e[6][0] = -1; - e[6][1] = 1; - e[7][0] = -1; - e[7][1] = -1; - e[8][0] = 1; - e[8][1] = -1; - tp[0] = 4.0 / 9; - tp[1] = tp[2] = tp[3] = tp[4] = 1.0 / 9; - tp[5] = tp[6] = tp[7] = tp[8] = 1.0 / 36; - - re[0] = 0; - re[1] = 3; - re[2] = 4; - re[3] = 1; - re[4] = 2; - re[5] = 7; - re[6] = 8; - re[7] = 5; - re[8] = 6; - - c = 1.0; - dx = Ly / (M); - dt = dx / c; - rc = 1.0 / c; - rcc = rc * rc; - cs2 = c * c / 3; - - // drho=N*dx*dp/cs2; rho_in=rho0+0.5*drho; rho_out=rho0-0.5*drho; - - mu_PL = rho0 * pow(U0, 2. - n_PL) / Re; - w = 1.0 / tau0; - w1 = 1.0 - 0.5 * w; - // A0=tau-0.5-mu_PL/rho0/(cs2*dt); - - utem0 = 1.0 / mu_PL * dp; - utem1 = n_PL / (n_PL + 1) * pow(utem0, 1.0 / n_PL); - utem2 = pow(0.5 * Ly, 1.0 + 1.0 / n_PL); - umax = utem1 * utem2; - - diag[0] = 1.0 / 9; - diag[1] = diag[2] = 1.0 / 36; - diag[3] = diag[5] = 1.0 / 6; - diag[4] = diag[6] = 1.0 / 12; - diag[7] = diag[8] = 1.0 / 4; - - printf( - "mu_ori=%e, A0=%e, pindex=%f, dx=%e, dt=%e\n", mu_PL, A0, n_PL, dx, dt); - - // geo(); - lbini(); - analy(); - - // printf("Read Data? (yes=1 no=0)\n"); - // scanf("%d",&readdata); - // if(readdata) data_read(); - - m = 0; - err = 1.0; - -AA: - printf("input mmax:\n"); - scanf("%d", &mmax); - // mmax=1000000; - TEND += mmax; - - u0 = u[M2][N2]; - while (m < TEND && err > 1.0e-9) { - m++; - Evol(); - - if (m % 500 == 0) { - err = fabs(u[M2][N2] - u0) / (fabs(u[M2][N2]) + 1.0e-10); - u0 = u[M2][N2]; - printf("err=%e ucenter=%e m=%d\n", err, u[M2][N2], m); - } - - if (m % 2000 == 0) { - Mass_velo_error(); - printf("err_mass=%e, err_velo=%e\n", Err_mass, Err_vel); - datadeal(); - } - } - - Mass_velo_error(); - printf("err_mass=%e, err_velo=%e\n", Err_mass, Err_vel); - datadeal(); - - - printf("Continue? (yes=1 no=0)\n"); - scanf("%d", &readdata); - if (readdata) goto AA; -} - -void lbini() { - int i, j, k; - - for (j = 0; j < M1; j++) - for (i = 0; i < N1; i++) { - u[j][i] = 0.0; - v[j][i] = 0.0; - p[j][i] = rho0; - A[j][i] = A0; - tau[j][i] = tau0; - Sh[j][i][0] = Sh[j][i][1] = Sh[j][i][2] = 0.0; - } - - for (j = 0; j <= M; j++) - for (i = 0; i <= N; i++) - for (k = 0; k < Q; k++) f[j][i][k] = feq(k, j, i); -} - -void geo() { - int i, j; - for (j = 1; j < M; j++) { - for (i = 1; i < N; i++) { - flag[j][i] = 1; - } - } -} - -void analy() { - int j; - double yd, yabs; - FILE *fp; - - for (j = 0; j < M1; j++) { - yd = (j - 0.5) * dx - 0.5 * Ly; - yabs = fabs(yd); - uans[j] = umax - utem1 * pow(yabs, 1.0 + 1.0 / n_PL); - } - //--save data--- - if ((fp = fopen("uans.dat", "w")) == NULL) { - printf(" File Open Error\n"); - exit(1); - } - - for (j = 0; j < M1; j++) { - fprintf(fp, "%e", uans[j] / umax); - fprintf(fp, "\n"); - } - fclose(fp); - - printf("datasave completed!\n"); -} - -void Mass_velo_error() { - int i, j, nt = 0; - double p_sum = 0.0, p_nu_sum = 0.0, u_ana_sum = 0.0, u_ana_nu_sum = 0.0; - - for (j = 0; j < M1; j++) - for (i = 0; i < N1; i++) { - p_sum += rho0; - p_nu_sum += fabs(p[j][i] - rho0); - } - - Err_mass = p_nu_sum / p_sum; - - for (j = 0; j <= M; j++) - for (i = 0; i < N1; i++) { - u_ana_sum += fabs(uans[j]); - u_ana_nu_sum += fabs(u[j][i] - uans[j]); - } - - Err_vel = u_ana_nu_sum / u_ana_sum; -} - -/* -double feq(int k, int y, int x) -{ - double RHO, U, V, At; - double uv, eu, f1eq, eqf; - double sheq,seq00,seq01,seq02,seq11,f2eq; - - RHO=rho[y][x]; U=u[y][x]; V=v[y][x]; At=A[y][x]; - eu=(e[k][0]*U+e[k][1]*V)*rc; - uv=(U*U+V*V)*rcc; - f1eq= 1.0+3*eu+4.5*eu*eu-1.5*uv; - - seq00=Sh[y][x][0]*(e[k][0]*e[k][0]-1.0/3); - seq01=Sh[y][x][1]* e[k][0]*e[k][1] ; - seq11=Sh[y][x][2]*(e[k][1]*e[k][1]-1.0/3); - sheq=seq00+2.0*seq01+seq11; - f2eq=1.5*At*dt*sheq; - - eqf=tp[k]*RHO*(f1eq+f2eq); - - return eqf; -} -*/ - -double feq(int k, int y, int x) { - double PHO, U, V, At; - double uv, eu, f1eq, eqf; - double sheq, seq00, seq01, seq02, seq11, f2eq; - - PHO = p[y][x]; - U = u[y][x]; - V = v[y][x]; - At = A[y][x]; - eu = (e[k][0] * U + e[k][1] * V) * rc; - uv = (U * U + V * V) * rcc; - f1eq = 3 * eu + 4.5 * eu * eu - 1.5 * uv; - - // eqf = tp[k]*(PHO + rho0*f1eq); - - seq00 = Sh[y][x][0] * (e[k][0] * e[k][0] - 1.0 / 3); - seq01 = Sh[y][x][1] * e[k][0] * e[k][1]; - seq11 = Sh[y][x][2] * (e[k][1] * e[k][1] - 1.0 / 3); - sheq = seq00 + 2.0 * seq01 + seq11; - f2eq = 1.5 * At * dt * sheq; - - eqf = tp[k] * PHO + rho0 * tp[k] * (f1eq + f2eq); - /* - if(k==0) eqf=rho0-(1-tp[k])*PHO/cs2+rho0*tp[k]*(f1eq+f2eq); - else eqf=tp[k]*PHO/cs2+rho0*tp[k]*(f1eq+f2eq); - */ - - return eqf; -} - -double force(int k, int y, int x) { - double F1, F2, Fc, U, V, rtau; - - U = u[y][x]; - V = v[y][x]; - rtau = 1.0 / tau[y][x]; - - F1 = 3.0 * dp * e[k][0]; - F2 = 9.0 * (U * e[k][0] + V * e[k][1]) * (dp * e[k][0]) - 3.0 * U * dp; - Fc = tp[k] * w1 * (F1 * rc + F2 * rcc); - - return Fc; -} - -void Evol() { - int i, j, k, id, jd; - double FM, FCM, r = 10., wc, fneq; - double cst, miut, at00, at01, at10, at11, att; - double sum_rho_in_0 = 0.0, sum_rho_out_0 = 0.0; - double alpha_in, alpha_out, FMu, FMb; - - // relaxation - for (j = 0; j < M1; j++) - for (i = 0; i < N1; i++) { - for (k = 0; k < Q; k++) { - FM = feq(k, j, i); - FCM = force(k, j, i); - g[j][i][k] = f[j][i][k] - w * (f[j][i][k] - FM) + dt * FCM; - } - } - - /* - // Halfway bounce-back - j=0; - for(i=0;i<=N;i++) - { - for(k=0;k=0) f[j][i][k]=g[jd][id][k]; - else f[j][i][k]=g[j][i][re[k]]; - } - - } - - j=M; - for(i=0;i<=N;i++) - { - for(k=0;k +#include +#include +#include +#define M 200 +#define N 200 +#define M2 (M / 2) +#define N2 (N / 2) +#define M1 (M + 1) +#define N1 (N + 1) +#define Ly (1.0) + +#define tau0 (0.8) +#define A0 (0.1) +#define dp (0.0) + +#define n_PL 1.0 +//#define mu_PL 0.0018 +#define Re 100.0 +#define rho0 1.0 +#define U0 0.1 + +#define Q 9 + +void lbini(void); +void analy(void); +void data_read(void); +void Evol(void); +void geo(void); +double feq(int k, int y, int x); +double force(int k, int y, int x); +void datadeal(void); +void Mass_velo_error(void); +double f[M1][N1][Q], g[M1][N1][Q], u[M1][N1], v[M1][N1], p[M1][N1], A[M1][N1], + tau[M1][N1], Sh[M1][N1][3], uans[M1]; +double umax, utem0, utem1, utem2, u0, mu_PL; +int m, e[Q][2], re[Q]; +int flag[M1][N1]; +double tp[Q], s[Q], diag[Q]; +double c, rc, rcc, dx, dt, cs2; +double w, w1, rho_in, rho_out; +double Err_mass, Err_vel; + + +void main() { + int readdata, mmax, TEND = 0; + double drho, err; + + e[0][0] = e[0][1] = 0; + e[1][0] = 1; + e[1][1] = 0; + e[2][0] = 0; + e[2][1] = 1; + e[3][0] = -1; + e[3][1] = 0; + e[4][0] = 0; + e[4][1] = -1; + e[5][0] = 1; + e[5][1] = 1; + e[6][0] = -1; + e[6][1] = 1; + e[7][0] = -1; + e[7][1] = -1; + e[8][0] = 1; + e[8][1] = -1; + tp[0] = 4.0 / 9; + tp[1] = tp[2] = tp[3] = tp[4] = 1.0 / 9; + tp[5] = tp[6] = tp[7] = tp[8] = 1.0 / 36; + + re[0] = 0; + re[1] = 3; + re[2] = 4; + re[3] = 1; + re[4] = 2; + re[5] = 7; + re[6] = 8; + re[7] = 5; + re[8] = 6; + + c = 1.0; + dx = Ly / (M); + dt = dx / c; + rc = 1.0 / c; + rcc = rc * rc; + cs2 = c * c / 3; + + // drho=N*dx*dp/cs2; rho_in=rho0+0.5*drho; rho_out=rho0-0.5*drho; + + mu_PL = rho0 * pow(U0, 2. - n_PL) / Re; + w = 1.0 / tau0; + w1 = 1.0 - 0.5 * w; + // A0=tau-0.5-mu_PL/rho0/(cs2*dt); + + utem0 = 1.0 / mu_PL * dp; + utem1 = n_PL / (n_PL + 1) * pow(utem0, 1.0 / n_PL); + utem2 = pow(0.5 * Ly, 1.0 + 1.0 / n_PL); + umax = utem1 * utem2; + + diag[0] = 1.0 / 9; + diag[1] = diag[2] = 1.0 / 36; + diag[3] = diag[5] = 1.0 / 6; + diag[4] = diag[6] = 1.0 / 12; + diag[7] = diag[8] = 1.0 / 4; + + printf( + "mu_ori=%e, A0=%e, pindex=%f, dx=%e, dt=%e\n", mu_PL, A0, n_PL, dx, dt); + + // geo(); + lbini(); + analy(); + + // printf("Read Data? (yes=1 no=0)\n"); + // scanf("%d",&readdata); + // if(readdata) data_read(); + + m = 0; + err = 1.0; + +AA: + printf("input mmax:\n"); + scanf("%d", &mmax); + // mmax=1000000; + TEND += mmax; + + u0 = u[M2][N2]; + while (m < TEND && err > 1.0e-9) { + m++; + Evol(); + + if (m % 500 == 0) { + err = fabs(u[M2][N2] - u0) / (fabs(u[M2][N2]) + 1.0e-10); + u0 = u[M2][N2]; + printf("err=%e ucenter=%e m=%d\n", err, u[M2][N2], m); + } + + if (m % 2000 == 0) { + Mass_velo_error(); + printf("err_mass=%e, err_velo=%e\n", Err_mass, Err_vel); + datadeal(); + } + } + + Mass_velo_error(); + printf("err_mass=%e, err_velo=%e\n", Err_mass, Err_vel); + datadeal(); + + + printf("Continue? (yes=1 no=0)\n"); + scanf("%d", &readdata); + if (readdata) goto AA; +} + +void lbini() { + int i, j, k; + + for (j = 0; j < M1; j++) + for (i = 0; i < N1; i++) { + u[j][i] = 0.0; + v[j][i] = 0.0; + p[j][i] = rho0; + A[j][i] = A0; + tau[j][i] = tau0; + Sh[j][i][0] = Sh[j][i][1] = Sh[j][i][2] = 0.0; + } + + for (j = 0; j <= M; j++) + for (i = 0; i <= N; i++) + for (k = 0; k < Q; k++) f[j][i][k] = feq(k, j, i); +} + +void geo() { + int i, j; + for (j = 1; j < M; j++) { + for (i = 1; i < N; i++) { + flag[j][i] = 1; + } + } +} + +void analy() { + int j; + double yd, yabs; + FILE *fp; + + for (j = 0; j < M1; j++) { + yd = (j - 0.5) * dx - 0.5 * Ly; + yabs = fabs(yd); + uans[j] = umax - utem1 * pow(yabs, 1.0 + 1.0 / n_PL); + } + //--save data--- + if ((fp = fopen("uans.dat", "w")) == NULL) { + printf(" File Open Error\n"); + exit(1); + } + + for (j = 0; j < M1; j++) { + fprintf(fp, "%e", uans[j] / umax); + fprintf(fp, "\n"); + } + fclose(fp); + + printf("datasave completed!\n"); +} + +void Mass_velo_error() { + int i, j, nt = 0; + double p_sum = 0.0, p_nu_sum = 0.0, u_ana_sum = 0.0, u_ana_nu_sum = 0.0; + + for (j = 0; j < M1; j++) + for (i = 0; i < N1; i++) { + p_sum += rho0; + p_nu_sum += fabs(p[j][i] - rho0); + } + + Err_mass = p_nu_sum / p_sum; + + for (j = 0; j <= M; j++) + for (i = 0; i < N1; i++) { + u_ana_sum += fabs(uans[j]); + u_ana_nu_sum += fabs(u[j][i] - uans[j]); + } + + Err_vel = u_ana_nu_sum / u_ana_sum; +} + +/* +double feq(int k, int y, int x) +{ + double RHO, U, V, At; + double uv, eu, f1eq, eqf; + double sheq,seq00,seq01,seq02,seq11,f2eq; + + RHO=rho[y][x]; U=u[y][x]; V=v[y][x]; At=A[y][x]; + eu=(e[k][0]*U+e[k][1]*V)*rc; + uv=(U*U+V*V)*rcc; + f1eq= 1.0+3*eu+4.5*eu*eu-1.5*uv; + + seq00=Sh[y][x][0]*(e[k][0]*e[k][0]-1.0/3); + seq01=Sh[y][x][1]* e[k][0]*e[k][1] ; + seq11=Sh[y][x][2]*(e[k][1]*e[k][1]-1.0/3); + sheq=seq00+2.0*seq01+seq11; + f2eq=1.5*At*dt*sheq; + + eqf=tp[k]*RHO*(f1eq+f2eq); + + return eqf; +} +*/ + +double feq(int k, int y, int x) { + double PHO, U, V, At; + double uv, eu, f1eq, eqf; + double sheq, seq00, seq01, seq02, seq11, f2eq; + + PHO = p[y][x]; + U = u[y][x]; + V = v[y][x]; + At = A[y][x]; + eu = (e[k][0] * U + e[k][1] * V) * rc; + uv = (U * U + V * V) * rcc; + f1eq = 3 * eu + 4.5 * eu * eu - 1.5 * uv; + + // eqf = tp[k]*(PHO + rho0*f1eq); + + seq00 = Sh[y][x][0] * (e[k][0] * e[k][0] - 1.0 / 3); + seq01 = Sh[y][x][1] * e[k][0] * e[k][1]; + seq11 = Sh[y][x][2] * (e[k][1] * e[k][1] - 1.0 / 3); + sheq = seq00 + 2.0 * seq01 + seq11; + f2eq = 1.5 * At * dt * sheq; + + eqf = tp[k] * PHO + rho0 * tp[k] * (f1eq + f2eq); + /* + if(k==0) eqf=rho0-(1-tp[k])*PHO/cs2+rho0*tp[k]*(f1eq+f2eq); + else eqf=tp[k]*PHO/cs2+rho0*tp[k]*(f1eq+f2eq); + */ + + return eqf; +} + +double force(int k, int y, int x) { + double F1, F2, Fc, U, V, rtau; + + U = u[y][x]; + V = v[y][x]; + rtau = 1.0 / tau[y][x]; + + F1 = 3.0 * dp * e[k][0]; + F2 = 9.0 * (U * e[k][0] + V * e[k][1]) * (dp * e[k][0]) - 3.0 * U * dp; + Fc = tp[k] * w1 * (F1 * rc + F2 * rcc); + + return Fc; +} + +void Evol() { + int i, j, k, id, jd; + double FM, FCM, r = 10., wc, fneq; + double cst, miut, at00, at01, at10, at11, att; + double sum_rho_in_0 = 0.0, sum_rho_out_0 = 0.0; + double alpha_in, alpha_out, FMu, FMb; + + // relaxation + for (j = 0; j < M1; j++) + for (i = 0; i < N1; i++) { + for (k = 0; k < Q; k++) { + FM = feq(k, j, i); + FCM = force(k, j, i); + g[j][i][k] = f[j][i][k] - w * (f[j][i][k] - FM) + dt * FCM; + } + } + + /* + // Halfway bounce-back + j=0; + for(i=0;i<=N;i++) + { + for(k=0;k=0) f[j][i][k]=g[jd][id][k]; + else f[j][i][k]=g[j][i][re[k]]; + } + + } + + j=M; + for(i=0;i<=N;i++) + { + for(k=0;kbn", out_B_u, u_basis) - v_pred = pd.einsum("bi,ni->bn", out_B_v, v_basis) - u_pred, v_pred = u_pred.numpy(), v_pred.numpy() - u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile( - v_pred[:, :, None], [1, 1, 1] - ) - u_pred, v_pred = data.decoder(u_temp, v_temp) - err_u = np.mean( - np.linalg.norm(u_pred - u_test, 2, axis=1) / np.linalg.norm(u_test, 2, axis=1) - ) - err_v = np.mean( - np.linalg.norm(v_pred - v_test, 2, axis=1) / np.linalg.norm(v_test, 2, axis=1) - ) - return err_u, err_v - - -def main(): - data = DataSet(nx, bs, modes) - u_basis, v_basis = data.PODbasis() - u_basis = pd.to_tensor(u_basis) - v_basis = pd.to_tensor(v_basis) - - ##paddle-Branch net - model = pd.nn.Sequential( - pd.nn.Linear(h, 64), - pd.nn.Tanh(), - pd.nn.Linear(64, 64), - pd.nn.Tanh(), - pd.nn.Linear(64, out_dims), - ) - # optimizer - opt = pd.optimizer.Adam(learning_rate=1.0e-3, parameters=model.parameters()) - - model.train() - - x_test, f_test, u_test, v_test = data.testbatch() - f_test = pd.to_tensor(f_test) - n = 0 - nmax = 20000 - start_time = time.perf_counter() - time_step_0 = time.perf_counter() - while n <= nmax: - - x_train, f_train, u_train, v_train, _, _ = data.minibatch() - f_train, u_train, v_train = ( - pd.to_tensor(f_train), - pd.to_tensor(u_train), - pd.to_tensor(v_train), - ) - out_B = model(f_train) - out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] - u_pred = pd.einsum("bi,ni->bn", out_B_u, u_basis) - v_pred = pd.einsum("bi,ni->bn", out_B_v, v_basis) - loss = F.mse_loss(u_pred, u_train[:, :, 0]) + F.mse_loss( - v_pred, v_train[:, :, 0] - ) - loss.backward() - opt.step() - opt.clear_grad() - - if n % 100 == 0: - time_step_1000 = time.perf_counter() - T = time_step_1000 - time_step_0 - err_u, err_v = prediction( - model, data, u_basis, v_basis, f_test, u_test, v_test - ) - # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) - print( - "Step: %d, Loss: %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f" - % (n, float(loss), err_u, err_v, T) - ) - # print('Step: %d, Loss: %.3e, Time (secs): %.3f'%(n, float(loss), T)) - time_step_0 = time.perf_counter() - - n += 1 - - stop_time = time.perf_counter() - print("Training time (secs): %.3f" % (stop_time - start_time)) - - start_time = time.perf_counter() - err_u, err_v = prediction(model, data, u_basis, v_basis, f_test, u_test, v_test) - stop_time = time.perf_counter() - T = stop_time - start_time - print("err_u: %.3e, err_v: %.3e, Inference time (secs): %.3f" % (err_u, err_v, T)) - - -if __name__ == "__main__": - main() +import time + +import numpy as np +import paddle as pd +import paddle.nn.functional as F +from dataset import DataSet + +pd.set_default_dtype("float32") + +pd.seed(1234) +np.random.seed(1234) + +# resolution +h = 65 +w = 65 + +# output dimension of Branch/Trunk +p = 100 +p1 = p // 2 + +# batch_size +bs = 32 + +# size of input for Trunk net +nx = h +x_num = nx * nx + +# POD modes +modes = 5 +out_dims = 2 * modes +# coeffs for POD +# layer_pod = [h, 64, 64, 2*modes] + + +def prediction(model, data, u_basis, v_basis, f_test, u_test, v_test): + out_B = model(f_test) + out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] + u_pred = pd.einsum("bi,ni->bn", out_B_u, u_basis) + v_pred = pd.einsum("bi,ni->bn", out_B_v, v_basis) + u_pred, v_pred = u_pred.numpy(), v_pred.numpy() + u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile( + v_pred[:, :, None], [1, 1, 1] + ) + u_pred, v_pred = data.decoder(u_temp, v_temp) + err_u = np.mean( + np.linalg.norm(u_pred - u_test, 2, axis=1) / np.linalg.norm(u_test, 2, axis=1) + ) + err_v = np.mean( + np.linalg.norm(v_pred - v_test, 2, axis=1) / np.linalg.norm(v_test, 2, axis=1) + ) + return err_u, err_v + + +def main(): + data = DataSet(nx, bs, modes) + u_basis, v_basis = data.PODbasis() + u_basis = pd.to_tensor(u_basis) + v_basis = pd.to_tensor(v_basis) + + ##paddle-Branch net + model = pd.nn.Sequential( + pd.nn.Linear(h, 64), + pd.nn.Tanh(), + pd.nn.Linear(64, 64), + pd.nn.Tanh(), + pd.nn.Linear(64, out_dims), + ) + # optimizer + opt = pd.optimizer.Adam(learning_rate=1.0e-3, parameters=model.parameters()) + + model.train() + + x_test, f_test, u_test, v_test = data.testbatch() + f_test = pd.to_tensor(f_test) + n = 0 + nmax = 20000 + start_time = time.perf_counter() + time_step_0 = time.perf_counter() + while n <= nmax: + + x_train, f_train, u_train, v_train, _, _ = data.minibatch() + f_train, u_train, v_train = ( + pd.to_tensor(f_train), + pd.to_tensor(u_train), + pd.to_tensor(v_train), + ) + out_B = model(f_train) + out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] + u_pred = pd.einsum("bi,ni->bn", out_B_u, u_basis) + v_pred = pd.einsum("bi,ni->bn", out_B_v, v_basis) + loss = F.mse_loss(u_pred, u_train[:, :, 0]) + F.mse_loss( + v_pred, v_train[:, :, 0] + ) + loss.backward() + opt.step() + opt.clear_grad() + + if n % 100 == 0: + time_step_1000 = time.perf_counter() + T = time_step_1000 - time_step_0 + err_u, err_v = prediction( + model, data, u_basis, v_basis, f_test, u_test, v_test + ) + # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) + print( + "Step: %d, Loss: %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f" + % (n, float(loss), err_u, err_v, T) + ) + # print('Step: %d, Loss: %.3e, Time (secs): %.3f'%(n, float(loss), T)) + time_step_0 = time.perf_counter() + + n += 1 + + stop_time = time.perf_counter() + print("Training time (secs): %.3f" % (stop_time - start_time)) + + start_time = time.perf_counter() + err_u, err_v = prediction(model, data, u_basis, v_basis, f_test, u_test, v_test) + stop_time = time.perf_counter() + T = stop_time - start_time + print("err_u: %.3e, err_v: %.3e, Inference time (secs): %.3f" % (err_u, err_v, T)) + + +if __name__ == "__main__": + main() diff --git a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/LBM_two_phase/rayleigh_gpu.cu b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/LBM_two_phase/rayleigh_gpu.cu index 7d3f481e44..f0b65267f8 100644 --- a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/LBM_two_phase/rayleigh_gpu.cu +++ b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/LBM_two_phase/rayleigh_gpu.cu @@ -1,1303 +1,1303 @@ -#include -#include -#include -#include -#include -//#include - -#define M 1024 // grid in y-direction -#define N 256 // grid in x-direction -#define M1 (M + 1) // number of grid in y-direction -#define N1 (N + 1) // number of grid in x-direction - -#define At 0.1 -#define rhol 1.0 -#define rhog (rhol * (1 - At) / (1 + At)) -#define rhom (0.5 * (rhol + rhog)) - -#define phil 1.0 -#define phig (-1.0) -#define phim (0.5 * (phil + phig)) - -#define D 4.0 -//#define sigma 0.0000526 -#define sigma (5.0e-5) - -#define pi 3.1415926535897932 -#define Max 80000 - -#define Q 9 // 9 velocities in LBM - -#define BX 128 //ÿһblockĴС -#define BY 1 - -#define dx 1.0 // c=1.0 -#define dt 1.0 -#define rdt 1.0 - -const int N16 = 16; -const int NX = (N1 + N16 + N16) / 16 * 16; //16չ N16N16+N1-1 -const int NY = M1 + 2; // 1չ 1M1 -const int NYNX = NY * NX; - -void init(); //ʼ -void datadeal(int step); // -double error1(double phi[NY][NX], double phi0[NY][NX]); //fܶ -// double A_spike( ); -// double A_bulble( ); - -double f[Q][NY][NX] = {0.0}, - g[Q][NY][NX] = {0.0}; //CPUֲ f(Һ) g()Ŀռ -double phi[NY][NX] = {0.0}, rho[NY][NX] = {0.0}, p[NY][NX] = {0.0}, - mu[NY][NX] = {0.0}; -double u[NY][NX] = {0.0}, v[NY][NX] = {0.0}; -double phi0[NY][NX] = {0.0}, u0[NY][NX] = {0.0}, v0[NY][NX] = {0.0}; - -int e[Q][2], w[Q]; - -double Re; -double ww_f, ww_g; // w=1/tau -double beta, Kappa, MM, A, ggy; - -////////////////////////////////////////////////////////// -#define FEQ_0(phi, vx, vy, mu, A) \ - (phi - 5.0 / 9.0 * A * mu) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) -#define FEQ_1(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu + 3.0 * phi * vx)) -#define FEQ_2(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu + 3.0 * phi * vy)) -#define FEQ_3(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu - 3.0 * phi * vx)) -#define FEQ_4(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu - 3.0 * phi * vy)) -#define FEQ_5(phi, vx, vy, mu, A) \ - (1.0 / 36.0 * (A * mu + 3.0 * phi * (vx + vy))) -#define FEQ_6(phi, vx, vy, mu, A) \ - (1.0 / 36.0 * (A * mu + 3.0 * phi * (-vx + vy))) -#define FEQ_7(phi, vx, vy, mu, A) \ - (1.0 / 36.0 * (A * mu + 3.0 * phi * (-vx - vy))) -#define FEQ_8(phi, vx, vy, mu, A) \ - (1.0 / 36.0 * (A * mu + 3.0 * phi * (vx - vy))) -////////////////////////////////////////////////////////// -#define GEQ_0(rho, p, vx, vy, vv) \ - (4.0 / 9.0 * (3.0 * p - rho * vv) - \ - 3 * p) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) -#define GEQ_1(rho, p, vx, vy, vv) \ - (1.0 / 9.0 * (3.0 * p + rho * (3.0 * vx + 4.5 * vx * vx - vv))) -#define GEQ_2(rho, p, vx, vy, vv) \ - (1.0 / 9.0 * (3.0 * p + rho * (3.0 * vy + 4.5 * vy * vy - vv))) -#define GEQ_3(rho, p, vx, vy, vv) \ - (1.0 / 9.0 * (3.0 * p + rho * (-3.0 * vx + 4.5 * vx * vx - vv))) -#define GEQ_4(rho, p, vx, vy, vv) \ - (1.0 / 9.0 * (3.0 * p + rho * (-3.0 * vy + 4.5 * vy * vy - vv))) -#define GEQ_5(rho, p, vx, vy, vv) \ - (1.0 / 36.0 * \ - (3.0 * p + rho * (3.0 * (vx + vy) + 4.5 * (vx + vy) * (vx + vy) - vv))) -#define GEQ_6(rho, p, vx, vy, vv) \ - (1.0 / 36.0 * \ - (3.0 * p + rho * (3.0 * (-vx + vy) + 4.5 * (-vx + vy) * (-vx + vy) - vv))) -#define GEQ_7(rho, p, vx, vy, vv) \ - (1.0 / 36.0 * \ - (3.0 * p + rho * (3.0 * (-vx - vy) + 4.5 * (-vx - vy) * (-vx - vy) - vv))) -#define GEQ_8(rho, p, vx, vy, vv) \ - (1.0 / 36.0 * \ - (3.0 * p + rho * (3.0 * (vx - vy) + 4.5 * (vx - vy) * (vx - vy) - vv))) -////////////////////////////////////////////////////////// -#define WEQ_0(vx, vy, vv) (4.0 / 9.0 * (1.0 - vv)) -#define WEQ_1(vx, vy, vv) (1.0 / 9.0 * (1.0 + 3.0 * vx + 4.5 * vx * vx - vv)) -#define WEQ_2(vx, vy, vv) (1.0 / 9.0 * (1.0 + 3.0 * vy + 4.5 * vy * vy - vv)) -#define WEQ_3(vx, vy, vv) (1.0 / 9.0 * (1.0 - 3.0 * vx + 4.5 * vx * vx - vv)) -#define WEQ_4(vx, vy, vv) (1.0 / 9.0 * (1.0 - 3.0 * vy + 4.5 * vy * vy - vv)) -#define WEQ_5(vx, vy, vv) \ - (1.0 / 36.0 * (1.0 + 3.0 * (vx + vy) + 4.5 * (vx + vy) * (vx + vy) - vv)) -#define WEQ_6(vx, vy, vv) \ - (1.0 / 36.0 * (1.0 + 3.0 * (-vx + vy) + 4.5 * (-vx + vy) * (-vx + vy) - vv)) -#define WEQ_7(vx, vy, vv) \ - (1.0 / 36.0 * (1.0 + 3.0 * (-vx - vy) + 4.5 * (-vx - vy) * (-vx - vy) - vv)) -#define WEQ_8(vx, vy, vv) \ - (1.0 / 36.0 * (1.0 + 3.0 * (vx - vy) + 4.5 * (vx - vy) * (vx - vy) - vv)) -////////////////////////////////////////////////////////// -#define MFEQ_0(phi, vx, vy, mu, A) (phi) -#define MFEQ_1(phi, vx, vy, mu, A) (-4.0 * phi + 2.0 * A * mu) -#define MFEQ_2(phi, vx, vy, mu, A) (4.0 * phi - 3.0 * A * mu) -#define MFEQ_3(phi, vx, vy, mu, A) (phi * vx) -#define MFEQ_4(phi, vx, vy, mu, A) (-phi * vx) -#define MFEQ_5(phi, vx, vy, mu, A) (phi * vy) -#define MFEQ_6(phi, vx, vy, mu, A) (-phi * vy) -#define MFEQ_7(phi, vx, vy, mu, A) (0.) -#define MFEQ_8(phi, vx, vy, mu, A) (0.) -////////////////////////////////////////////////////////// -#define MGEQ_0(rho, p, vx, vy) \ - (0.) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) -#define MGEQ_1(rho, p, vx, vy) (6.0 * p + 3.0 * rho * (vx * vx + vy * vy)) -#define MGEQ_2(rho, p, vx, vy) (-9.0 * p - 3.0 * rho * (vx * vx + vy * vy)) -#define MGEQ_3(rho, p, vx, vy) (rho * vx) -#define MGEQ_4(rho, p, vx, vy) (-rho * vx) -#define MGEQ_5(rho, p, vx, vy) (rho * vy) -#define MGEQ_6(rho, p, vx, vy) (-rho * vy) -#define MGEQ_7(rho, p, vx, vy) (rho * (vx * vx - vy * vy)) -#define MGEQ_8(rho, p, vx, vy) (rho * vx * vy) - -////////////////////////////////////////////////////////// -#define MFF_0(phi, vx, vy, phi0, vx0, vy0) \ - (0.) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) -#define MFF_1(phi, vx, vy, phi0, vx0, vy0) (0.) -#define MFF_2(phi, vx, vy, phi0, vx0, vy0) (0.) -#define MFF_3(phi, vx, vy, phi0, vx0, vy0) (phi * vx - phi0 * vx0) -#define MFF_4(phi, vx, vy, phi0, vx0, vy0) (-phi * vx + phi0 * vx0) -#define MFF_5(phi, vx, vy, phi0, vx0, vy0) (phi * vy - phi0 * vy0) -#define MFF_6(phi, vx, vy, phi0, vx0, vy0) (-phi * vy + phi0 * vy0) -#define MFF_7(phi, vx, vy, phi0, vx0, vy0) (0.) -#define MFF_8(phi, vx, vy, phi0, vx0, vy0) (0.) -////////////////////////////////////////////////////////// -#define diag0 (1.0 / 9.0) -#define diag1 (1.0 / 36.0) -#define diag2 (1.0 / 36.0) -#define diag3 (1.0 / 6.0) -#define diag4 (1.0 / 12.0) -#define diag5 (1.0 / 6.0) -#define diag6 (1.0 / 12.0) -#define diag7 (1.0 / 4.0) -#define diag8 (1.0 / 4.0) -//////////////////////////////////////////////////////////////////////////// -#define F_d(k, y, x) \ - F_d[(k)*NYNX + (y)*NX + (x)] //ֲFyxkoffset -#define G_d(k, y, x) \ - G_d[(k)*NYNX + (y)*NX + (x)] //ֲFyxkoffset -#define phi_d(y, x) phi_d[(y)*NX + (x)] -#define rho_d(y, x) rho_d[(y)*NX + (x)] -#define mu_d(y, x) mu_d[(y)*NX + (x)] -#define p_d(y, x) p_d[(y)*NX + (x)] -#define u_d(y, x) u_d[(y)*NX + (x)] -#define v_d(y, x) v_d[(y)*NX + (x)] -#define u0_d(y, x) u0_d[(y)*NX + (x)] -#define v0_d(y, x) v0_d[(y)*NX + (x)] -#define phi0_d(y, x) phi0_d[(y)*NX + (x)] - -/////////////////////////////////////////////////////////////////////////////////// -double *f_dev, *F_dev, *g_dev, *G_dev; //ֲf,gGPUڴ -double *phi_dev, *rho_dev, *p_dev, *phi0_dev, *mu_dev; -double *u_dev, *v_dev, *u0_dev, *v0_dev; -/////////////////////////////////////////////////////////////////////////////////// -__global__ void collision_propagation(double A, - double MM, - double w_f, - double w_g, - double ggy, - double *phi_d, - double *mu_d, - double *rho_d, - double *p_d, - double *u_d, - double *v_d, - double *phi0_d, - double *u0_d, - double *v0_d, - double *f_d, - double *F_d, - double *g_d, - double *G_d); -__global__ void Macro_rho(double *phi_d, - double *phi0_d, - double *rho_d, - double *f_d); -__global__ void Macro_mu(double Kappa, - double beta, - double *phi_d, - double *mu_d); -__global__ void Macro_u(double ggy, - double MM, - double *phi_d, - double *rho_d, - double *p_d, - double *mu_d, - double *u_d, - double *v_d, - double *u0_d, - double *v0_d, - double *g_d); - -//////////////////////////////////////////////////////////////////// -int main(int argc, char **argv) { - int m, k; - double err1; - - e[0][0] = e[0][1] = 0; - e[1][0] = 1; - e[1][1] = 0; - e[2][0] = 0; - e[2][1] = 1; - e[3][0] = -1; - e[3][1] = 0; - e[4][0] = 0; - e[4][1] = -1; - e[5][0] = 1; - e[5][1] = 1; - e[6][0] = -1; - e[6][1] = 1; - e[7][0] = -1; - e[7][1] = -1; - e[8][0] = 1; - e[8][1] = -1; - - w[0] = 4.0 / 9.0; - w[1] = w[2] = w[3] = w[4] = 1.0 / 9.0; - w[5] = w[6] = w[7] = w[8] = 1.0 / 36.0; - - for (k = 0; k < 50; k++) { - Re = 20.0 + k * 20.; - - init(); //ʼ - - err1 = 1.0; - m = 0; - - datadeal(m); //ʼu,v,rho - - int device = 0; - cudaSetDevice(device); - cudaDeviceProp properties; - cudaGetDeviceProperties(&properties, device); - printf("Simulation running on %s\n", properties.name); - - //////////////////////////////////////////// - dim3 threads(BX, BY); //ÿblockάС - dim3 grid((N1 + BX - 1) / BX, (M1 + BY - 1) / BY); // gridάС - - // GPUԴ: f_dev[], F_dev[] - cudaMalloc((void **)&f_dev, sizeof(double) * Q * NY * NX); - cudaMalloc((void **)&F_dev, sizeof(double) * Q * NY * NX); - cudaMalloc((void **)&g_dev, sizeof(double) * Q * NY * NX); - cudaMalloc((void **)&G_dev, sizeof(double) * Q * NY * NX); - cudaMalloc((void **)&phi_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&rho_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&p_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&phi0_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&mu_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&u_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&v_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&u0_dev, sizeof(double) * NY * NX); - cudaMalloc((void **)&v0_dev, sizeof(double) * NY * NX); - - // (GPU <= CPU): f_dev <= f - cudaMemcpy(f_dev, - &f[0][0][0], - sizeof(double) * Q * NY * NX, - cudaMemcpyHostToDevice); - cudaMemcpy(g_dev, - &g[0][0][0], - sizeof(double) * Q * NY * NX, - cudaMemcpyHostToDevice); - cudaMemcpy( - phi_dev, &phi[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - rho_dev, &rho[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - p_dev, &p[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy(phi0_dev, - &phi0[0][0], - sizeof(double) * NY * NX, - cudaMemcpyHostToDevice); - cudaMemcpy( - mu_dev, &mu[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - u_dev, &u[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - v_dev, &v[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - u0_dev, &u0[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - cudaMemcpy( - v0_dev, &v0[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); - - - // time_begin=clock(); - while (m < Max) { - collision_propagation<<>>(A, - MM, - ww_f, - ww_g, - ggy, - phi_dev, - mu_dev, - rho_dev, - p_dev, - u_dev, - v_dev, - phi0_dev, - u0_dev, - v0_dev, - f_dev, - F_dev, - g_dev, - G_dev); - Macro_rho<<>>(phi_dev, phi0_dev, rho_dev, F_dev); - Macro_mu<<>>(Kappa, beta, phi_dev, mu_dev); - Macro_u<<>>(ggy, - MM, - phi_dev, - rho_dev, - p_dev, - mu_dev, - u_dev, - v_dev, - u0_dev, - v0_dev, - G_dev); - - collision_propagation<<>>(A, - MM, - ww_f, - ww_g, - ggy, - phi_dev, - mu_dev, - rho_dev, - p_dev, - u_dev, - v_dev, - phi0_dev, - u0_dev, - v0_dev, - F_dev, - f_dev, - G_dev, - g_dev); - Macro_rho<<>>(phi_dev, phi0_dev, rho_dev, f_dev); - Macro_mu<<>>(Kappa, beta, phi_dev, mu_dev); - Macro_u<<>>(ggy, - MM, - phi_dev, - rho_dev, - p_dev, - mu_dev, - u_dev, - v_dev, - u0_dev, - v0_dev, - g_dev); - - m = m + 2; - - if (m % 10000 == 0) { - cudaMemcpy(&f[0][0][0], - f_dev, - Q * NY * NX * sizeof(double), - cudaMemcpyDeviceToHost); - cudaMemcpy( - &u[0][0], u_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); - cudaMemcpy( - &v[0][0], v_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); - cudaMemcpy(&phi[0][0], - phi_dev, - NY * NX * sizeof(double), - cudaMemcpyDeviceToHost); - cudaMemcpy(&phi0[0][0], - phi0_dev, - NY * NX * sizeof(double), - cudaMemcpyDeviceToHost); - cudaMemcpy(&rho[0][0], - rho_dev, - NY * NX * sizeof(double), - cudaMemcpyDeviceToHost); - cudaMemcpy( - &p[0][0], p_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); - - datadeal(m); // - // X_s=A_spike( ); - // X_b=A_bulble( ); - err1 = error1(phi, phi0); - printf("t=%d err1=%e\n", m, err1); - } - } - // time_end=clock(); - // printf("The time is: %f seconds\n", - //(float)(time_end-time_begin)/CLOCKS_PER_SEC); - cudaFree(f_dev); - cudaFree(F_dev); - cudaFree(g_dev); - cudaFree(G_dev); - cudaFree(phi_dev); - cudaFree(rho_dev); - cudaFree(p_dev); - cudaFree(phi0_dev); - cudaFree(mu_dev); - cudaFree(u_dev); - cudaFree(v_dev); - cudaFree(u0_dev); - cudaFree(v0_dev); - } - - return 0; -} - - -void init() //ֲijʼflagijʼ -{ - double DDphi, mu0, uv, Ban, t_s; // u_ffƽ̬ٶ rho_ffܶ - double rhotal_1; //еܶȺ - int j, k, i, jp, kp; - double Pe, x, h, lamda, wl, uu, niu, tau_f, tau_g; - - beta = 12.0 * sigma / - (D * (phil - phig) * (phil - phig) * (phil - phig) * (phil - phig)); - Kappa = 1.5 * D * sigma / ((phil - phig) * (phil - phig)); - - lamda = 256.0; - wl = 256.0; - uu = 0.04; - ggy = -uu * uu / lamda; - - niu = uu * lamda / Re; - tau_g = 3.0 * niu / (dx) + 0.5; - tau_f = 0.8; - ww_f = 1.0 / tau_f; - ww_g = 1.0 / tau_g; - - Pe = 50.0; - MM = uu * D / (beta * Pe * (phil - phig) * (phil - phig)); - A = 3.0 * MM / ((tau_f - 0.5) * dt); - - Ban = sigma * (2. * pi / lamda) * (2. * pi / lamda) / (-(rhol - rhog) * ggy); - t_s = sqrt(-At * ggy / lamda); - - rhotal_1 = 0.; - for (j = 1; j <= M + 1; j++) { - for (k = N16; k <= N16 + N; k++) { - // h=0.6*NY+0.05*wl*cos(2.0*pi*k/wl); - h = 0.5 * NY + 0.05 * wl * cos(2.0 * pi * (k - N16) / wl); - x = 2. * (j - h) / D; - // phi[j][k]=0.5*(phil+phig)+0.5*(phil-phig)*tanh(x); - // h=865+5.36*cos(2.0*pi*(k-N16)/lamda+3.0); - // x=2.0*(j-h)/D; - phi[j][k] = 0.5 * (phil + phig) + 0.5 * (phil - phig) * tanh(x); - /* if(j>=h&&j<=NY) - { - phi[j][k]=phil; - } - else - { - phi[j][k]=phig; - }*/ - - rho[j][k] = (phi[j][k] - phig) * (rhol - rhog) / (phil - phig) + rhog; - - p[j][k] = 0.0; - u[j][k] = 0.0; - v[j][k] = 0.0; - - uv = 1.5 * (u[j][k] * u[j][k] + v[j][k] * v[j][k]); - - u0[j][k] = u[j][k]; - v0[j][k] = v[j][k]; - phi0[j][k] = phi[j][k]; - - - DDphi = 0.0; - for (i = 0; i < 9; i++) { - jp = j + e[i][1]; - kp = (k + e[i][0] + N1 - N16) % N1 + N16; - - if (jp < 1 || jp > M1) { - jp = j; - kp = k; - } - - DDphi += w[i] * phi[jp][kp]; - } - mu0 = 4. * beta * (phi[j][k] - phil) * (phi[j][k] - phig) * - (phi[j][k] - phim); - mu[j][k] = mu0 - Kappa * (6.0 * rdt * rdt * (DDphi - phi[j][k])); - - f[0][j][k] = FEQ_0(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[1][j][k] = FEQ_1(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[3][j][k] = FEQ_2(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[2][j][k] = FEQ_3(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[4][j][k] = FEQ_4(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[5][j][k] = FEQ_5(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[6][j][k] = FEQ_6(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[7][j][k] = FEQ_7(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - f[8][j][k] = FEQ_8(phi[j][k], u[j][k], v[j][k], mu[j][k], A); - - g[0][j][k] = GEQ_0(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[1][j][k] = GEQ_1(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[3][j][k] = GEQ_2(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[2][j][k] = GEQ_3(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[4][j][k] = GEQ_4(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[5][j][k] = GEQ_5(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[6][j][k] = GEQ_6(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[7][j][k] = GEQ_7(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - g[8][j][k] = GEQ_8(rho[j][k], p[j][k], u[j][k], v[j][k], uv); - - rhotal_1 += phi[j][k]; - } - } - printf( - "rhotal_1=%lf Re=%lf ggy=%e Pe=%lf tau_f=%lf tau_g=%lf niu=%e MM=%lf " - "A=%lf D=%lf sigma=%lf beta=%lf Kappa=%lf Ban=%lf t_s=%e\n", - rhotal_1, - Re, - ggy, - Pe, - tau_f, - tau_g, - niu, - MM, - A, - D, - sigma, - beta, - Kappa, - Ban, - t_s); -} - -/////////////////////////////////////////// - -__global__ void collision_propagation(double A, - double MM, - double w_f, - double w_g, - double ggy, - double *phi_d, - double *mu_d, - double *rho_d, - double *p_d, - double *u_d, - double *v_d, - double *phi0_d, - double *u0_d, - double *v0_d, - double *f_d, - double *F_d, - double *g_d, - double *G_d) { - int x, y, k; - - double f0, f1, f2, f3, f4, f5, f6, f7, f8; //ǰķֲ - double g0, g1, g2, g3, g4, g5, g6, g7, g8; - double mf0, mf1, mf2, mf3, mf4, mf5, mf6, mf7, mf8; - double mg0, mg1, mg2, mg3, mg4, mg5, mg6, mg7, mg8; - double GG0, GG1, GG2, GG3, GG4, GG5, GG6, GG7, GG8; - double mGG0, mGG1, mGG2, mGG3, mGG4, mGG5, mGG6, mGG7, mGG8; - double s_f0, s_f1, s_f2, s_f3, s_f4, s_f5, s_f6, s_f7, s_f8; - double s_g0, s_g1, s_g2, s_g3, s_g4, s_g5, s_g6, s_g7, s_g8; - double wx0, wy0, wx1, wy1, wx2, wy2, wx3, wy3, wx4, wy4, wx5, wy5, wx6, wy6, - wx7, wy7, wx8, wy8; - double vv; - double Drhox, Drhoy, Dphix, Dphiy, DDmu; - double Fx, Fy; - - x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ± - y = 1 + blockIdx.y * BY; // f_d, F_d ± - k = NX * y + x; // f_d, F_dһάoffset - - if (x <= N + N16) { - f0 = f_d[k + 0 * NYNX]; - f1 = f_d[k + 1 * NYNX]; - f2 = f_d[k + 2 * NYNX]; - f3 = f_d[k + 3 * NYNX]; - f4 = f_d[k + 4 * NYNX]; - f5 = f_d[k + 5 * NYNX]; - f6 = f_d[k + 6 * NYNX]; - f7 = f_d[k + 7 * NYNX]; - f8 = f_d[k + 8 * NYNX]; - - g0 = g_d[k + 0 * NYNX]; - g1 = g_d[k + 1 * NYNX]; - g2 = g_d[k + 2 * NYNX]; - g3 = g_d[k + 3 * NYNX]; - g4 = g_d[k + 4 * NYNX]; - g5 = g_d[k + 5 * NYNX]; - g6 = g_d[k + 6 * NYNX]; - g7 = g_d[k + 7 * NYNX]; - g8 = g_d[k + 8 * NYNX]; - - mf0 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; - mf1 = -4 * f0 - f1 - f2 - f3 - f4 + 2 * (f5 + f6 + f7 + f8); - mf2 = 4 * f0 - 2 * (f1 + f2 + f3 + f4) + f5 + f6 + f7 + f8; - mf3 = f1 - f3 + f5 - f6 - f7 + f8; - mf4 = -2 * (f1 - f3) + f5 - f6 - f7 + f8; - mf5 = f2 - f4 + f5 + f6 - f7 - f8; - mf6 = -2 * (f2 - f4) + f5 + f6 - f7 - f8; - mf7 = f1 - f2 + f3 - f4; - mf8 = f5 - f6 + f7 - f8; - - mg0 = g0 + g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8; - mg1 = -4 * g0 - g1 - g2 - g3 - g4 + 2 * (g5 + g6 + g7 + g8); - mg2 = 4 * g0 - 2 * (g1 + g2 + g3 + g4) + g5 + g6 + g7 + g8; - mg3 = g1 - g3 + g5 - g6 - g7 + g8; - mg4 = -2 * (g1 - g3) + g5 - g6 - g7 + g8; - mg5 = g2 - g4 + g5 + g6 - g7 - g8; - mg6 = -2 * (g2 - g4) + g5 + g6 - g7 - g8; - mg7 = g1 - g2 + g3 - g4; - mg8 = g5 - g6 + g7 - g8; - - s_f3 = s_f5 = w_f; - s_f0 = 1.0; - s_f7 = s_f8 = 1.0; - s_f1 = s_f2 = 1.3; - s_f4 = s_f6 = 1.3; - /* - s_f3=s_f5=w_f; - s_f0=w_f; s_f7=s_f8=w_f; - s_f1=s_f2=w_f; - s_f4=s_f6=w_f;*/ - - s_g0 = s_g3 = s_g5 = 1.0; - s_g1 = 1.0; - s_g2 = 1.0; - s_g4 = s_g6 = 1.7; - s_g7 = s_g8 = w_g; - - Drhox = Drhoy = 0.0; - Dphix = Dphiy = 0.0; - DDmu = 0.0; - - int xd = (x + 1 - N16 + N1) % N1 + N16; - int xs = (x - 1 - N16 + N1) % N1 + N16; - // 1 - Drhox += 1.0 / 9.0 * rho_d(y, xd); - Dphix += 1.0 / 9.0 * phi_d(y, xd); - DDmu += 1.0 / 9.0 * (mu_d(y, xd) - mu_d(y, x)); - - // 3 - Drhox -= 1.0 / 9.0 * rho_d(y, xs); - Dphix -= 1.0 / 9.0 * phi_d(y, xs); - DDmu += 1.0 / 9.0 * (mu_d(y, xs) - mu_d(y, x)); - // 478 - if ((y - 1) >= 1) { - Drhoy -= 1.0 / 9.0 * rho_d(y - 1, x); // 4 - Dphiy -= 1.0 / 9.0 * phi_d(y - 1, x); - DDmu += 1.0 / 9.0 * (mu_d(y - 1, x) - mu_d(y, x)); - - Drhox -= 1.0 / 36.0 * rho_d(y - 1, xs); // 7 - Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xs); - Dphix -= 1.0 / 36.0 * phi_d(y - 1, xs); - Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xs); - - DDmu += 1.0 / 36.0 * (mu_d(y - 1, xs) - mu_d(y, x)); - - Drhox += 1.0 / 36.0 * rho_d(y - 1, xd); // 8 - Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xd); - Dphix += 1.0 / 36.0 * phi_d(y - 1, xd); - Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xd); - - DDmu += 1.0 / 36.0 * (mu_d(y - 1, xd) - mu_d(y, x)); - } else { - Drhoy -= 1.0 / 9.0 * rho_d(y, x); // 4 - Dphiy -= 1.0 / 9.0 * phi_d(y, x); - - Drhox -= 1.0 / 36.0 * rho_d(y, x); // 7 - Drhoy -= 1.0 / 36.0 * rho_d(y, x); - Dphix -= 1.0 / 36.0 * phi_d(y, x); - Dphiy -= 1.0 / 36.0 * phi_d(y, x); - - Drhox += 1.0 / 36.0 * rho_d(y, x); // 8 - Drhoy -= 1.0 / 36.0 * rho_d(y, x); - Dphix += 1.0 / 36.0 * phi_d(y, x); - Dphiy -= 1.0 / 36.0 * phi_d(y, x); - } - // 256 - if ((y + 1) <= M1) { - Drhoy += 1.0 / 9.0 * rho_d(y + 1, x); // 2 - Dphiy += 1.0 / 9.0 * phi_d(y + 1, x); - DDmu += 1.0 / 9.0 * (mu_d(y + 1, x) - mu_d(y, x)); - - Drhox += 1.0 / 36.0 * rho_d(y + 1, xd); // 5 - Drhoy += 1.0 / 36.0 * rho_d(y + 1, xd); - Dphix += 1.0 / 36.0 * phi_d(y + 1, xd); - Dphiy += 1.0 / 36.0 * phi_d(y + 1, xd); - - DDmu += 1.0 / 36.0 * (mu_d(y + 1, xd) - mu_d(y, x)); - - Drhox -= 1.0 / 36.0 * rho_d(y + 1, xs); // 6 - Drhoy += 1.0 / 36.0 * rho_d(y + 1, xs); - Dphix -= 1.0 / 36.0 * phi_d(y + 1, xs); - Dphiy += 1.0 / 36.0 * phi_d(y + 1, xs); - - DDmu += 1.0 / 36.0 * (mu_d(y + 1, xs) - mu_d(y, x)); - } else { - Drhoy += 1.0 / 9.0 * rho_d(y, x); // 2 - Dphiy += 1.0 / 9.0 * phi_d(y, x); - - Drhox += 1.0 / 36.0 * rho_d(y, x); // 5 - Drhoy += 1.0 / 36.0 * rho_d(y, x); - Dphix += 1.0 / 36.0 * phi_d(y, x); - Dphiy += 1.0 / 36.0 * phi_d(y, x); - - Drhox -= 1.0 / 36.0 * rho_d(y, x); // 6 - Drhoy += 1.0 / 36.0 * rho_d(y, x); - Dphix -= 1.0 / 36.0 * phi_d(y, x); - Dphiy += 1.0 / 36.0 * phi_d(y, x); - } - - Fx = 3.0 * mu_d(y, x) * Dphix * rdt + - 3.0 * u_d(y, x) * (rhol - rhog) * MM * DDmu * rdt * rdt; // Fx=Fsx+Fax - Fy = 3.0 * mu_d(y, x) * Dphiy * rdt + - 3.0 * v_d(y, x) * (rhol - rhog) * MM * DDmu * rdt * rdt; - - vv = 1.5 * (u_d(y, x) * u_d(y, x) + v_d(y, x) * v_d(y, x)); - - wx0 = (WEQ_0(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_0(u_d(y, x), v_d(y, x), vv) - 4.0 / 9.0) * rdt * Drhox); - wy0 = (WEQ_0(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_0(u_d(y, x), v_d(y, x), vv) - 4.0 / 9.0) * rdt * Drhoy); - wx1 = (WEQ_1(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_1(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); - wy1 = (WEQ_1(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_1(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); - wx2 = (WEQ_2(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_2(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); - wy2 = (WEQ_2(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_2(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); - wx3 = (WEQ_3(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_3(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); - wy3 = (WEQ_3(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_3(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); - wx4 = (WEQ_4(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_4(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); - wy4 = (WEQ_4(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_4(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); - wx5 = (WEQ_5(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_5(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); - wy5 = (WEQ_5(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_5(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); - wx6 = (WEQ_6(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_6(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); - wy6 = (WEQ_6(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_6(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); - wx7 = (WEQ_7(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_7(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); - wy7 = (WEQ_7(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_7(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); - wx8 = (WEQ_8(u_d(y, x), v_d(y, x), vv) * Fx + - (WEQ_8(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); - wy8 = (WEQ_8(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + - (WEQ_8(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); - - GG0 = 3.0 * ((-u_d(y, x)) * wx0 + (-v_d(y, x)) * wy0); - GG1 = 3.0 * ((1 - u_d(y, x)) * wx1 + (-v_d(y, x)) * wy1); - GG2 = 3.0 * ((-u_d(y, x)) * wx2 + (1 - v_d(y, x)) * wy2); - GG3 = 3.0 * ((-1 - u_d(y, x)) * wx3 + (-v_d(y, x)) * wy3); - GG4 = 3.0 * ((-u_d(y, x)) * wx4 + (-1 - v_d(y, x)) * wy4); - GG5 = 3.0 * ((1 - u_d(y, x)) * wx5 + (1 - v_d(y, x)) * wy5); - GG6 = 3.0 * ((-1 - u_d(y, x)) * wx6 + (1 - v_d(y, x)) * wy6); - GG7 = 3.0 * ((-1 - u_d(y, x)) * wx7 + (-1 - v_d(y, x)) * wy7); - GG8 = 3.0 * ((1 - u_d(y, x)) * wx8 + (-1 - v_d(y, x)) * wy8); - - mGG0 = GG0 + GG1 + GG2 + GG3 + GG4 + GG5 + GG6 + GG7 + GG8; - mGG1 = -4 * GG0 - GG1 - GG2 - GG3 - GG4 + 2 * (GG5 + GG6 + GG7 + GG8); - mGG2 = 4 * GG0 - 2 * (GG1 + GG2 + GG3 + GG4) + GG5 + GG6 + GG7 + GG8; - mGG3 = GG1 - GG3 + GG5 - GG6 - GG7 + GG8; - mGG4 = -2 * (GG1 - GG3) + GG5 - GG6 - GG7 + GG8; - mGG5 = GG2 - GG4 + GG5 + GG6 - GG7 - GG8; - mGG6 = -2 * (GG2 - GG4) + GG5 + GG6 - GG7 - GG8; - mGG7 = GG1 - GG2 + GG3 - GG4; - mGG8 = GG5 - GG6 + GG7 - GG8; - - - mf0 = diag0 * - (mf0 - - s_f0 * (mf0 - - MFEQ_0(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f0) * MFF_0(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf1 = diag1 * - (mf1 - - s_f1 * (mf1 - - MFEQ_1(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f1) * MFF_1(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf2 = diag2 * - (mf2 - - s_f2 * (mf2 - - MFEQ_2(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f2) * MFF_2(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf3 = diag3 * - (mf3 - - s_f3 * (mf3 - - MFEQ_3(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f3) * MFF_3(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf4 = diag4 * - (mf4 - - s_f4 * (mf4 - - MFEQ_4(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f4) * MFF_4(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf5 = diag5 * - (mf5 - - s_f5 * (mf5 - - MFEQ_5(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f5) * MFF_5(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf6 = diag6 * - (mf6 - - s_f6 * (mf6 - - MFEQ_6(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f6) * MFF_6(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf7 = diag7 * - (mf7 - - s_f7 * (mf7 - - MFEQ_7(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f7) * MFF_7(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - mf8 = diag8 * - (mf8 - - s_f8 * (mf8 - - MFEQ_8(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + - (1 - 0.5 * s_f8) * MFF_8(phi_d(y, x), - u_d(y, x), - v_d(y, x), - phi0_d(y, x), - u0_d(y, x), - v0_d(y, x))); - - mg0 = diag0 * - (mg0 - - s_g0 * (mg0 - MGEQ_0(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g0) * mGG0); - mg1 = diag1 * - (mg1 - - s_g1 * (mg1 - MGEQ_1(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g1) * mGG1); - mg2 = diag2 * - (mg2 - - s_g2 * (mg2 - MGEQ_2(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g2) * mGG2); - mg3 = diag3 * - (mg3 - - s_g3 * (mg3 - MGEQ_3(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g3) * mGG3); - mg4 = diag4 * - (mg4 - - s_g4 * (mg4 - MGEQ_4(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g4) * mGG4); - mg5 = diag5 * - (mg5 - - s_g5 * (mg5 - MGEQ_5(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g5) * mGG5); - mg6 = diag6 * - (mg6 - - s_g6 * (mg6 - MGEQ_6(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g6) * mGG6); - mg7 = diag7 * - (mg7 - - s_g7 * (mg7 - MGEQ_7(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g7) * mGG7); - mg8 = diag8 * - (mg8 - - s_g8 * (mg8 - MGEQ_8(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + - dt * (1 - 0.5 * s_g8) * mGG8); - - f0 = mf0 - 4 * (mf1 - mf2); - f1 = mf0 - mf1 - 2 * (mf2 + mf4) + mf3 + mf7; - f2 = mf0 - mf1 - 2 * (mf2 + mf6) + mf5 - mf7; - f3 = mf0 - mf1 - 2 * (mf2 - mf4) - mf3 + mf7; - f4 = mf0 - mf1 - 2 * (mf2 - mf6) - mf5 - mf7; - f5 = mf0 + mf1 + mf1 + mf2 + mf3 + mf4 + mf5 + mf6 + mf8; - f6 = mf0 + mf1 + mf1 + mf2 - mf3 - mf4 + mf5 + mf6 - mf8; - f7 = mf0 + mf1 + mf1 + mf2 - mf3 - mf4 - mf5 - mf6 + mf8; - f8 = mf0 + mf1 + mf1 + mf2 + mf3 + mf4 - mf5 - mf6 - mf8; - - g0 = mg0 - 4 * (mg1 - mg2); - g1 = mg0 - mg1 - 2 * (mg2 + mg4) + mg3 + mg7; - g2 = mg0 - mg1 - 2 * (mg2 + mg6) + mg5 - mg7; - g3 = mg0 - mg1 - 2 * (mg2 - mg4) - mg3 + mg7; - g4 = mg0 - mg1 - 2 * (mg2 - mg6) - mg5 - mg7; - g5 = mg0 + mg1 + mg1 + mg2 + mg3 + mg4 + mg5 + mg6 + mg8; - g6 = mg0 + mg1 + mg1 + mg2 - mg3 - mg4 + mg5 + mg6 - mg8; - g7 = mg0 + mg1 + mg1 + mg2 - mg3 - mg4 - mg5 - mg6 + mg8; - g8 = mg0 + mg1 + mg1 + mg2 + mg3 + mg4 - mg5 - mg6 - mg8; - - - // 0 1 3 - F_d[k] = f0; - G_d[k] = g0; - F_d(1, y, xd) = f1; - G_d(1, y, xd) = g1; - F_d(3, y, xs) = f3; - G_d(3, y, xs) = g3; - // 2 5 6 - if ((y + 1) <= M1) { - F_d(2, y + 1, x) = f2; - G_d(2, y + 1, x) = g2; - F_d(5, y + 1, xd) = f5; - G_d(5, y + 1, xd) = g5; - F_d(6, y + 1, xs) = f6; - G_d(6, y + 1, xs) = g6; - } else { - F_d(4, y, x) = f2; - G_d(4, y, x) = g2; - F_d(7, y, x) = f5; - G_d(7, y, x) = g5; - F_d(8, y, x) = f6; - G_d(8, y, x) = g6; - } - // 4 7 8 - if ((y - 1) >= 1) { - F_d(4, y - 1, x) = f4; - G_d(4, y - 1, x) = g4; - F_d(7, y - 1, xs) = f7; - G_d(7, y - 1, xs) = g7; - F_d(8, y - 1, xd) = f8; - G_d(8, y - 1, xd) = g8; - } else { - F_d(2, y, x) = f4; - G_d(2, y, x) = g4; - F_d(5, y, x) = f7; - G_d(5, y, x) = g7; - F_d(6, y, x) = f8; - G_d(6, y, x) = g8; - } - } -} - -__global__ void Macro_rho(double *phi_d, - double *phi0_d, - double *rho_d, - double *f_d) { - int x, y, k; - double f0, f1, f2, f3, f4, f5, f6, f7, f8; - - x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x - y = 1 + blockIdx.y * BY; // f_d, F_d ±y - k = NX * y + x; - - if (x <= N + N16) { - f0 = f_d[k + 0 * NYNX]; - f1 = f_d[k + 1 * NYNX]; - f2 = f_d[k + 2 * NYNX]; - f3 = f_d[k + 3 * NYNX]; - f4 = f_d[k + 4 * NYNX]; - f5 = f_d[k + 5 * NYNX]; - f6 = f_d[k + 6 * NYNX]; - f7 = f_d[k + 7 * NYNX]; - f8 = f_d[k + 8 * NYNX]; - - phi0_d(y, x) = phi_d(y, x); - - phi_d(y, x) = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; - rho_d(y, x) = 0.5 * (phi_d(y, x) - phig) * (rhol - rhog) + rhog; - } -} - -__global__ void Macro_mu(double Kappa, - double beta, - double *phi_d, - double *mu_d) { - int x, y; - - double DDphi, mu0; - - x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x - y = 1 + blockIdx.y * BY; // f_d, F_d ±y - - - if (x <= N + N16) { - int xd = (x + 1 - N16 + N1) % N1 + N16; - int xs = (x - 1 - N16 + N1) % N1 + N16; - - DDphi = 0.0; - DDphi += 1.0 / 9.0 * (phi_d(y, xd) - phi_d(y, x)); // 1 - DDphi += 1.0 / 9.0 * (phi_d(y, xs) - phi_d(y, x)); // 3 - if ((y + 1) <= M1) { - DDphi += 1.0 / 9.0 * (phi_d(y + 1, x) - phi_d(y, x)); // 2 - DDphi += 1.0 / 36.0 * (phi_d(y + 1, xd) - phi_d(y, x)); // 5 - DDphi += 1.0 / 36.0 * (phi_d(y + 1, xs) - phi_d(y, x)); // 6 - } - if ((y - 1) >= 1) { - DDphi += 1.0 / 9.0 * (phi_d(y - 1, x) - phi_d(y, x)); // 4 - DDphi += 1.0 / 36.0 * (phi_d(y - 1, xs) - phi_d(y, x)); // 7 - DDphi += 1.0 / 36.0 * (phi_d(y - 1, xd) - phi_d(y, x)); // 8 - } - - mu0 = 4 * beta * (phi_d(y, x) - phil) * (phi_d(y, x) - phig) * - (phi_d(y, x) - phim); - mu_d(y, x) = mu0 - 6.0 * Kappa * rdt * rdt * DDphi; - } -} - -__global__ void Macro_u(double ggy, - double MM, - double *phi_d, - double *rho_d, - double *p_d, - double *mu_d, - double *u_d, - double *v_d, - double *u0_d, - double *v0_d, - double *g_d) { - int x, y, k; - - double g1, g2, g3, g4, g5, g6, g7, g8; - - double Drhox, Drhoy, Dphix, Dphiy, DDmu; - double UX, UY, rF, FFa, s0u, udrho, Fsx, Fsy; - - x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x - y = 1 + blockIdx.y * BY; // f_d, F_d ±y - k = NX * y + x; - - if (x <= N + N16) { - // g0 = g_d[k+0*NYNX]; - g1 = g_d[k + 1 * NYNX]; - g2 = g_d[k + 2 * NYNX]; - g3 = g_d[k + 3 * NYNX]; - g4 = g_d[k + 4 * NYNX]; - g5 = g_d[k + 5 * NYNX]; - g6 = g_d[k + 6 * NYNX]; - g7 = g_d[k + 7 * NYNX]; - g8 = g_d[k + 8 * NYNX]; - - u0_d(y, x) = u_d(y, x); - v0_d(y, x) = v_d(y, x); - - int xd = (x + 1 - N16 + N1) % N1 + N16; - int xs = (x - 1 - N16 + N1) % N1 + N16; - - Drhox = Drhoy = 0.0; - Dphix = Dphiy = DDmu = 0.0; - - Drhox += 1.0 / 9.0 * rho_d(y, xd); // 1 - Dphix += 1.0 / 9.0 * phi_d(y, xd); - DDmu += 1.0 / 9.0 * (mu_d(y, xd) - mu_d(y, x)); - - // 3 - Drhox -= 1.0 / 9.0 * rho_d(y, xs); - Dphix -= 1.0 / 9.0 * phi_d(y, xs); - DDmu += 1.0 / 9.0 * (mu_d(y, xs) - mu_d(y, x)); - // 478 - if ((y - 1) >= 1) { - Drhoy -= 1.0 / 9.0 * rho_d(y - 1, x); // 4 - Dphiy -= 1.0 / 9.0 * phi_d(y - 1, x); - DDmu += 1.0 / 9.0 * (mu_d(y - 1, x) - mu_d(y, x)); - - Drhox -= 1.0 / 36.0 * rho_d(y - 1, xs); // 7 - Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xs); - Dphix -= 1.0 / 36.0 * phi_d(y - 1, xs); - Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xs); - - DDmu += 1.0 / 36.0 * (mu_d(y - 1, xs) - mu_d(y, x)); - - - Drhox += 1.0 / 36.0 * rho_d(y - 1, xd); // 8 - Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xd); - Dphix += 1.0 / 36.0 * phi_d(y - 1, xd); - Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xd); - - DDmu += 1.0 / 36.0 * (mu_d(y - 1, xd) - mu_d(y, x)); - } else { - Drhoy -= 1.0 / 9.0 * rho_d(y, x); // 4 - Dphiy -= 1.0 / 9.0 * phi_d(y, x); - - Drhox -= 1.0 / 36.0 * rho_d(y, x); // 7 - Drhoy -= 1.0 / 36.0 * rho_d(y, x); - Dphix -= 1.0 / 36.0 * phi_d(y, x); - Dphiy -= 1.0 / 36.0 * phi_d(y, x); - - Drhox += 1.0 / 36.0 * rho_d(y, x); // 8 - Drhoy -= 1.0 / 36.0 * rho_d(y, x); - Dphix += 1.0 / 36.0 * phi_d(y, x); - Dphiy -= 1.0 / 36.0 * phi_d(y, x); - } - // 256 - if ((y + 1) <= M1) { - Drhoy += 1.0 / 9.0 * rho_d(y + 1, x); // 2 - Dphiy += 1.0 / 9.0 * phi_d(y + 1, x); - DDmu += 1.0 / 9.0 * (mu_d(y + 1, x) - mu_d(y, x)); - - Drhox += 1.0 / 36.0 * rho_d(y + 1, xd); // 5 - Drhoy += 1.0 / 36.0 * rho_d(y + 1, xd); - Dphix += 1.0 / 36.0 * phi_d(y + 1, xd); - Dphiy += 1.0 / 36.0 * phi_d(y + 1, xd); - - DDmu += 1.0 / 36.0 * (mu_d(y + 1, xd) - mu_d(y, x)); - - Drhox -= 1.0 / 36.0 * rho_d(y + 1, xs); // 6 - Drhoy += 1.0 / 36.0 * rho_d(y + 1, xs); - Dphix -= 1.0 / 36.0 * phi_d(y + 1, xs); - Dphiy += 1.0 / 36.0 * phi_d(y + 1, xs); - - DDmu += 1.0 / 36.0 * (mu_d(y + 1, xs) - mu_d(y, x)); - } else { - Drhoy += 1.0 / 9.0 * rho_d(y, x); // 2 - - Dphiy += 1.0 / 9.0 * phi_d(y, x); - Drhox += 1.0 / 36.0 * rho_d(y, x); // 5 - Drhoy += 1.0 / 36.0 * rho_d(y, x); - Dphix += 1.0 / 36.0 * phi_d(y, x); - Dphiy += 1.0 / 36.0 * phi_d(y, x); - - Drhox -= 1.0 / 36.0 * rho_d(y, x); // 6 - Drhoy += 1.0 / 36.0 * rho_d(y, x); - Dphix -= 1.0 / 36.0 * phi_d(y, x); - Dphiy += 1.0 / 36.0 * phi_d(y, x); - } - - p_d(y, x) = g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8; - - UX = g1 - g3 + g5 + g8 - g6 - g7; - UY = g2 - g4 + g5 + g6 - g7 - g8; - - Fsx = 3.0 * mu_d(y, x) * rdt * Dphix; - Fsy = 3.0 * mu_d(y, x) * rdt * Dphiy; - - FFa = 1.5 * (rhol - rhog) * MM * (rdt * DDmu); - - rF = 1.0 / (rho_d(y, x) - FFa); - u_d(y, x) = (UX + 0.5 * dt * Fsx) * rF; - v_d(y, x) = (UY + 0.5 * dt * (Fsy + (rho_d(y, x) - rhom) * ggy)) * rF; - - s0u = -2.0 / 3.0 * (u_d(y, x) * u_d(y, x) + v_d(y, x) * v_d(y, x)); - udrho = 3.0 * (u_d(y, x) * Drhox + v_d(y, x) * Drhoy); - p_d(y, x) = 0.6 * (p_d(y, x) + 0.5 * udrho + rho_d(y, x) * s0u); - } -} - -double error1(double phi[NY][NX], double phi0[NY][NX]) { - int x, y; - double temp1 = 0, temp2 = 0, error; - - for (y = 1; y <= M1; y++) { - for (x = N16; x <= N16 + N; x++) { - temp1 += fabs(phi[y][x] - phi0[y][x]); - temp2 += fabs(phi[y][x]); - } - } - error = temp1 / temp2; - return (error); -} - -void datadeal(int t) //㡢 -{ - int x, y; - int Reint; - double rhotal_2; - FILE *fp; - char filename[50]; - - Reint = int(Re); - sprintf(filename, "./Output/%s%.4d%s%.8d", "Re", Reint, "phi", t); - - rhotal_2 = 0; - for (y = 1; y <= M1; y++) { - for (x = N16; x <= N16 + N; x++) { - rhotal_2 += phi[y][x]; - } - } - printf("rhotal_2=%lf\n", rhotal_2); - - fp = fopen(filename, "w"); - for (y = 1; y <= M1; y++) { - for (x = N16; x <= N16 + N; x++) { - fprintf(fp, "%e ", phi[y][x]); - } - fprintf(fp, "\n"); - } - fclose(fp); - /* -sprintf(filename,"%s%d","ux",t); - fp=fopen(filename,"w"); - for(y=1;y<=M+1;y++) -{ -for(x=N16;x<=N16+N;x++) -{ - fprintf(fp,"%e ",ux[y][x]); -} - fprintf(fp,"\n"); -} -fclose(fp); - -sprintf(filename,"%s%d","uy",t); - fp=fopen(filename,"w"); - for(y=1;y<=M+1;y++) -{ -for(x=N16;x<=N16+N;x++) -{ - fprintf(fp,"%e ",uy[y][x]); -} -fprintf(fp,"\n"); -} -fclose(fp);*/ -} -/* - double A_spike( ) - { - int j, k, flag; - double ls; - - for(k=N16;k<=N16+N;k++) - { - for(j=1;j<=M1;j++) - { - if(phi[j][k]>=-0.01&&phi[j][k]<=0.01) - { - ls=NY/2.0-j; - flag=1; - break; - } - - } - if(flag==1) - { - break; - } - } - - return ls; - } - -double A_bulble( ) -{ - int j, k, flag; - double lb; - for(k=N16;k<=N16+N;k++) - { - for(j=M1;j>=1;j--) - { - if(phi[j][k]>=-0.01&&phi[j][k]<=0.01) - { - lb=j-NY/2.0; - flag=1; - break; - } - } - if(flag==1) - { - break; - } - } - return lb; - }*/ +#include +#include +#include +#include +#include +//#include + +#define M 1024 // grid in y-direction +#define N 256 // grid in x-direction +#define M1 (M + 1) // number of grid in y-direction +#define N1 (N + 1) // number of grid in x-direction + +#define At 0.1 +#define rhol 1.0 +#define rhog (rhol * (1 - At) / (1 + At)) +#define rhom (0.5 * (rhol + rhog)) + +#define phil 1.0 +#define phig (-1.0) +#define phim (0.5 * (phil + phig)) + +#define D 4.0 +//#define sigma 0.0000526 +#define sigma (5.0e-5) + +#define pi 3.1415926535897932 +#define Max 80000 + +#define Q 9 // 9 velocities in LBM + +#define BX 128 //ÿһblockĴС +#define BY 1 + +#define dx 1.0 // c=1.0 +#define dt 1.0 +#define rdt 1.0 + +const int N16 = 16; +const int NX = (N1 + N16 + N16) / 16 * 16; //16չ N16N16+N1-1 +const int NY = M1 + 2; // 1չ 1M1 +const int NYNX = NY * NX; + +void init(); //ʼ +void datadeal(int step); // +double error1(double phi[NY][NX], double phi0[NY][NX]); //fܶ +// double A_spike( ); +// double A_bulble( ); + +double f[Q][NY][NX] = {0.0}, + g[Q][NY][NX] = {0.0}; //CPUֲ f(Һ) g()Ŀռ +double phi[NY][NX] = {0.0}, rho[NY][NX] = {0.0}, p[NY][NX] = {0.0}, + mu[NY][NX] = {0.0}; +double u[NY][NX] = {0.0}, v[NY][NX] = {0.0}; +double phi0[NY][NX] = {0.0}, u0[NY][NX] = {0.0}, v0[NY][NX] = {0.0}; + +int e[Q][2], w[Q]; + +double Re; +double ww_f, ww_g; // w=1/tau +double beta, Kappa, MM, A, ggy; + +////////////////////////////////////////////////////////// +#define FEQ_0(phi, vx, vy, mu, A) \ + (phi - 5.0 / 9.0 * A * mu) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) +#define FEQ_1(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu + 3.0 * phi * vx)) +#define FEQ_2(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu + 3.0 * phi * vy)) +#define FEQ_3(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu - 3.0 * phi * vx)) +#define FEQ_4(phi, vx, vy, mu, A) (1.0 / 9.0 * (A * mu - 3.0 * phi * vy)) +#define FEQ_5(phi, vx, vy, mu, A) \ + (1.0 / 36.0 * (A * mu + 3.0 * phi * (vx + vy))) +#define FEQ_6(phi, vx, vy, mu, A) \ + (1.0 / 36.0 * (A * mu + 3.0 * phi * (-vx + vy))) +#define FEQ_7(phi, vx, vy, mu, A) \ + (1.0 / 36.0 * (A * mu + 3.0 * phi * (-vx - vy))) +#define FEQ_8(phi, vx, vy, mu, A) \ + (1.0 / 36.0 * (A * mu + 3.0 * phi * (vx - vy))) +////////////////////////////////////////////////////////// +#define GEQ_0(rho, p, vx, vy, vv) \ + (4.0 / 9.0 * (3.0 * p - rho * vv) - \ + 3 * p) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) +#define GEQ_1(rho, p, vx, vy, vv) \ + (1.0 / 9.0 * (3.0 * p + rho * (3.0 * vx + 4.5 * vx * vx - vv))) +#define GEQ_2(rho, p, vx, vy, vv) \ + (1.0 / 9.0 * (3.0 * p + rho * (3.0 * vy + 4.5 * vy * vy - vv))) +#define GEQ_3(rho, p, vx, vy, vv) \ + (1.0 / 9.0 * (3.0 * p + rho * (-3.0 * vx + 4.5 * vx * vx - vv))) +#define GEQ_4(rho, p, vx, vy, vv) \ + (1.0 / 9.0 * (3.0 * p + rho * (-3.0 * vy + 4.5 * vy * vy - vv))) +#define GEQ_5(rho, p, vx, vy, vv) \ + (1.0 / 36.0 * \ + (3.0 * p + rho * (3.0 * (vx + vy) + 4.5 * (vx + vy) * (vx + vy) - vv))) +#define GEQ_6(rho, p, vx, vy, vv) \ + (1.0 / 36.0 * \ + (3.0 * p + rho * (3.0 * (-vx + vy) + 4.5 * (-vx + vy) * (-vx + vy) - vv))) +#define GEQ_7(rho, p, vx, vy, vv) \ + (1.0 / 36.0 * \ + (3.0 * p + rho * (3.0 * (-vx - vy) + 4.5 * (-vx - vy) * (-vx - vy) - vv))) +#define GEQ_8(rho, p, vx, vy, vv) \ + (1.0 / 36.0 * \ + (3.0 * p + rho * (3.0 * (vx - vy) + 4.5 * (vx - vy) * (vx - vy) - vv))) +////////////////////////////////////////////////////////// +#define WEQ_0(vx, vy, vv) (4.0 / 9.0 * (1.0 - vv)) +#define WEQ_1(vx, vy, vv) (1.0 / 9.0 * (1.0 + 3.0 * vx + 4.5 * vx * vx - vv)) +#define WEQ_2(vx, vy, vv) (1.0 / 9.0 * (1.0 + 3.0 * vy + 4.5 * vy * vy - vv)) +#define WEQ_3(vx, vy, vv) (1.0 / 9.0 * (1.0 - 3.0 * vx + 4.5 * vx * vx - vv)) +#define WEQ_4(vx, vy, vv) (1.0 / 9.0 * (1.0 - 3.0 * vy + 4.5 * vy * vy - vv)) +#define WEQ_5(vx, vy, vv) \ + (1.0 / 36.0 * (1.0 + 3.0 * (vx + vy) + 4.5 * (vx + vy) * (vx + vy) - vv)) +#define WEQ_6(vx, vy, vv) \ + (1.0 / 36.0 * (1.0 + 3.0 * (-vx + vy) + 4.5 * (-vx + vy) * (-vx + vy) - vv)) +#define WEQ_7(vx, vy, vv) \ + (1.0 / 36.0 * (1.0 + 3.0 * (-vx - vy) + 4.5 * (-vx - vy) * (-vx - vy) - vv)) +#define WEQ_8(vx, vy, vv) \ + (1.0 / 36.0 * (1.0 + 3.0 * (vx - vy) + 4.5 * (vx - vy) * (vx - vy) - vv)) +////////////////////////////////////////////////////////// +#define MFEQ_0(phi, vx, vy, mu, A) (phi) +#define MFEQ_1(phi, vx, vy, mu, A) (-4.0 * phi + 2.0 * A * mu) +#define MFEQ_2(phi, vx, vy, mu, A) (4.0 * phi - 3.0 * A * mu) +#define MFEQ_3(phi, vx, vy, mu, A) (phi * vx) +#define MFEQ_4(phi, vx, vy, mu, A) (-phi * vx) +#define MFEQ_5(phi, vx, vy, mu, A) (phi * vy) +#define MFEQ_6(phi, vx, vy, mu, A) (-phi * vy) +#define MFEQ_7(phi, vx, vy, mu, A) (0.) +#define MFEQ_8(phi, vx, vy, mu, A) (0.) +////////////////////////////////////////////////////////// +#define MGEQ_0(rho, p, vx, vy) \ + (0.) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) +#define MGEQ_1(rho, p, vx, vy) (6.0 * p + 3.0 * rho * (vx * vx + vy * vy)) +#define MGEQ_2(rho, p, vx, vy) (-9.0 * p - 3.0 * rho * (vx * vx + vy * vy)) +#define MGEQ_3(rho, p, vx, vy) (rho * vx) +#define MGEQ_4(rho, p, vx, vy) (-rho * vx) +#define MGEQ_5(rho, p, vx, vy) (rho * vy) +#define MGEQ_6(rho, p, vx, vy) (-rho * vy) +#define MGEQ_7(rho, p, vx, vy) (rho * (vx * vx - vy * vy)) +#define MGEQ_8(rho, p, vx, vy) (rho * vx * vy) + +////////////////////////////////////////////////////////// +#define MFF_0(phi, vx, vy, phi0, vx0, vy0) \ + (0.) //ٶvx,vyûгc, vv=1.5*(vx*vx+vy*vy) +#define MFF_1(phi, vx, vy, phi0, vx0, vy0) (0.) +#define MFF_2(phi, vx, vy, phi0, vx0, vy0) (0.) +#define MFF_3(phi, vx, vy, phi0, vx0, vy0) (phi * vx - phi0 * vx0) +#define MFF_4(phi, vx, vy, phi0, vx0, vy0) (-phi * vx + phi0 * vx0) +#define MFF_5(phi, vx, vy, phi0, vx0, vy0) (phi * vy - phi0 * vy0) +#define MFF_6(phi, vx, vy, phi0, vx0, vy0) (-phi * vy + phi0 * vy0) +#define MFF_7(phi, vx, vy, phi0, vx0, vy0) (0.) +#define MFF_8(phi, vx, vy, phi0, vx0, vy0) (0.) +////////////////////////////////////////////////////////// +#define diag0 (1.0 / 9.0) +#define diag1 (1.0 / 36.0) +#define diag2 (1.0 / 36.0) +#define diag3 (1.0 / 6.0) +#define diag4 (1.0 / 12.0) +#define diag5 (1.0 / 6.0) +#define diag6 (1.0 / 12.0) +#define diag7 (1.0 / 4.0) +#define diag8 (1.0 / 4.0) +//////////////////////////////////////////////////////////////////////////// +#define F_d(k, y, x) \ + F_d[(k)*NYNX + (y)*NX + (x)] //ֲFyxkoffset +#define G_d(k, y, x) \ + G_d[(k)*NYNX + (y)*NX + (x)] //ֲFyxkoffset +#define phi_d(y, x) phi_d[(y)*NX + (x)] +#define rho_d(y, x) rho_d[(y)*NX + (x)] +#define mu_d(y, x) mu_d[(y)*NX + (x)] +#define p_d(y, x) p_d[(y)*NX + (x)] +#define u_d(y, x) u_d[(y)*NX + (x)] +#define v_d(y, x) v_d[(y)*NX + (x)] +#define u0_d(y, x) u0_d[(y)*NX + (x)] +#define v0_d(y, x) v0_d[(y)*NX + (x)] +#define phi0_d(y, x) phi0_d[(y)*NX + (x)] + +/////////////////////////////////////////////////////////////////////////////////// +double *f_dev, *F_dev, *g_dev, *G_dev; //ֲf,gGPUڴ +double *phi_dev, *rho_dev, *p_dev, *phi0_dev, *mu_dev; +double *u_dev, *v_dev, *u0_dev, *v0_dev; +/////////////////////////////////////////////////////////////////////////////////// +__global__ void collision_propagation(double A, + double MM, + double w_f, + double w_g, + double ggy, + double *phi_d, + double *mu_d, + double *rho_d, + double *p_d, + double *u_d, + double *v_d, + double *phi0_d, + double *u0_d, + double *v0_d, + double *f_d, + double *F_d, + double *g_d, + double *G_d); +__global__ void Macro_rho(double *phi_d, + double *phi0_d, + double *rho_d, + double *f_d); +__global__ void Macro_mu(double Kappa, + double beta, + double *phi_d, + double *mu_d); +__global__ void Macro_u(double ggy, + double MM, + double *phi_d, + double *rho_d, + double *p_d, + double *mu_d, + double *u_d, + double *v_d, + double *u0_d, + double *v0_d, + double *g_d); + +//////////////////////////////////////////////////////////////////// +int main(int argc, char **argv) { + int m, k; + double err1; + + e[0][0] = e[0][1] = 0; + e[1][0] = 1; + e[1][1] = 0; + e[2][0] = 0; + e[2][1] = 1; + e[3][0] = -1; + e[3][1] = 0; + e[4][0] = 0; + e[4][1] = -1; + e[5][0] = 1; + e[5][1] = 1; + e[6][0] = -1; + e[6][1] = 1; + e[7][0] = -1; + e[7][1] = -1; + e[8][0] = 1; + e[8][1] = -1; + + w[0] = 4.0 / 9.0; + w[1] = w[2] = w[3] = w[4] = 1.0 / 9.0; + w[5] = w[6] = w[7] = w[8] = 1.0 / 36.0; + + for (k = 0; k < 50; k++) { + Re = 20.0 + k * 20.; + + init(); //ʼ + + err1 = 1.0; + m = 0; + + datadeal(m); //ʼu,v,rho + + int device = 0; + cudaSetDevice(device); + cudaDeviceProp properties; + cudaGetDeviceProperties(&properties, device); + printf("Simulation running on %s\n", properties.name); + + //////////////////////////////////////////// + dim3 threads(BX, BY); //ÿblockάС + dim3 grid((N1 + BX - 1) / BX, (M1 + BY - 1) / BY); // gridάС + + // GPUԴ: f_dev[], F_dev[] + cudaMalloc((void **)&f_dev, sizeof(double) * Q * NY * NX); + cudaMalloc((void **)&F_dev, sizeof(double) * Q * NY * NX); + cudaMalloc((void **)&g_dev, sizeof(double) * Q * NY * NX); + cudaMalloc((void **)&G_dev, sizeof(double) * Q * NY * NX); + cudaMalloc((void **)&phi_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&rho_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&p_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&phi0_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&mu_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&u_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&v_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&u0_dev, sizeof(double) * NY * NX); + cudaMalloc((void **)&v0_dev, sizeof(double) * NY * NX); + + // (GPU <= CPU): f_dev <= f + cudaMemcpy(f_dev, + &f[0][0][0], + sizeof(double) * Q * NY * NX, + cudaMemcpyHostToDevice); + cudaMemcpy(g_dev, + &g[0][0][0], + sizeof(double) * Q * NY * NX, + cudaMemcpyHostToDevice); + cudaMemcpy( + phi_dev, &phi[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + rho_dev, &rho[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + p_dev, &p[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy(phi0_dev, + &phi0[0][0], + sizeof(double) * NY * NX, + cudaMemcpyHostToDevice); + cudaMemcpy( + mu_dev, &mu[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + u_dev, &u[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + v_dev, &v[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + u0_dev, &u0[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + cudaMemcpy( + v0_dev, &v0[0][0], sizeof(double) * NY * NX, cudaMemcpyHostToDevice); + + + // time_begin=clock(); + while (m < Max) { + collision_propagation<<>>(A, + MM, + ww_f, + ww_g, + ggy, + phi_dev, + mu_dev, + rho_dev, + p_dev, + u_dev, + v_dev, + phi0_dev, + u0_dev, + v0_dev, + f_dev, + F_dev, + g_dev, + G_dev); + Macro_rho<<>>(phi_dev, phi0_dev, rho_dev, F_dev); + Macro_mu<<>>(Kappa, beta, phi_dev, mu_dev); + Macro_u<<>>(ggy, + MM, + phi_dev, + rho_dev, + p_dev, + mu_dev, + u_dev, + v_dev, + u0_dev, + v0_dev, + G_dev); + + collision_propagation<<>>(A, + MM, + ww_f, + ww_g, + ggy, + phi_dev, + mu_dev, + rho_dev, + p_dev, + u_dev, + v_dev, + phi0_dev, + u0_dev, + v0_dev, + F_dev, + f_dev, + G_dev, + g_dev); + Macro_rho<<>>(phi_dev, phi0_dev, rho_dev, f_dev); + Macro_mu<<>>(Kappa, beta, phi_dev, mu_dev); + Macro_u<<>>(ggy, + MM, + phi_dev, + rho_dev, + p_dev, + mu_dev, + u_dev, + v_dev, + u0_dev, + v0_dev, + g_dev); + + m = m + 2; + + if (m % 10000 == 0) { + cudaMemcpy(&f[0][0][0], + f_dev, + Q * NY * NX * sizeof(double), + cudaMemcpyDeviceToHost); + cudaMemcpy( + &u[0][0], u_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); + cudaMemcpy( + &v[0][0], v_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); + cudaMemcpy(&phi[0][0], + phi_dev, + NY * NX * sizeof(double), + cudaMemcpyDeviceToHost); + cudaMemcpy(&phi0[0][0], + phi0_dev, + NY * NX * sizeof(double), + cudaMemcpyDeviceToHost); + cudaMemcpy(&rho[0][0], + rho_dev, + NY * NX * sizeof(double), + cudaMemcpyDeviceToHost); + cudaMemcpy( + &p[0][0], p_dev, NY * NX * sizeof(double), cudaMemcpyDeviceToHost); + + datadeal(m); // + // X_s=A_spike( ); + // X_b=A_bulble( ); + err1 = error1(phi, phi0); + printf("t=%d err1=%e\n", m, err1); + } + } + // time_end=clock(); + // printf("The time is: %f seconds\n", + //(float)(time_end-time_begin)/CLOCKS_PER_SEC); + cudaFree(f_dev); + cudaFree(F_dev); + cudaFree(g_dev); + cudaFree(G_dev); + cudaFree(phi_dev); + cudaFree(rho_dev); + cudaFree(p_dev); + cudaFree(phi0_dev); + cudaFree(mu_dev); + cudaFree(u_dev); + cudaFree(v_dev); + cudaFree(u0_dev); + cudaFree(v0_dev); + } + + return 0; +} + + +void init() //ֲijʼflagijʼ +{ + double DDphi, mu0, uv, Ban, t_s; // u_ffƽ̬ٶ rho_ffܶ + double rhotal_1; //еܶȺ + int j, k, i, jp, kp; + double Pe, x, h, lamda, wl, uu, niu, tau_f, tau_g; + + beta = 12.0 * sigma / + (D * (phil - phig) * (phil - phig) * (phil - phig) * (phil - phig)); + Kappa = 1.5 * D * sigma / ((phil - phig) * (phil - phig)); + + lamda = 256.0; + wl = 256.0; + uu = 0.04; + ggy = -uu * uu / lamda; + + niu = uu * lamda / Re; + tau_g = 3.0 * niu / (dx) + 0.5; + tau_f = 0.8; + ww_f = 1.0 / tau_f; + ww_g = 1.0 / tau_g; + + Pe = 50.0; + MM = uu * D / (beta * Pe * (phil - phig) * (phil - phig)); + A = 3.0 * MM / ((tau_f - 0.5) * dt); + + Ban = sigma * (2. * pi / lamda) * (2. * pi / lamda) / (-(rhol - rhog) * ggy); + t_s = sqrt(-At * ggy / lamda); + + rhotal_1 = 0.; + for (j = 1; j <= M + 1; j++) { + for (k = N16; k <= N16 + N; k++) { + // h=0.6*NY+0.05*wl*cos(2.0*pi*k/wl); + h = 0.5 * NY + 0.05 * wl * cos(2.0 * pi * (k - N16) / wl); + x = 2. * (j - h) / D; + // phi[j][k]=0.5*(phil+phig)+0.5*(phil-phig)*tanh(x); + // h=865+5.36*cos(2.0*pi*(k-N16)/lamda+3.0); + // x=2.0*(j-h)/D; + phi[j][k] = 0.5 * (phil + phig) + 0.5 * (phil - phig) * tanh(x); + /* if(j>=h&&j<=NY) + { + phi[j][k]=phil; + } + else + { + phi[j][k]=phig; + }*/ + + rho[j][k] = (phi[j][k] - phig) * (rhol - rhog) / (phil - phig) + rhog; + + p[j][k] = 0.0; + u[j][k] = 0.0; + v[j][k] = 0.0; + + uv = 1.5 * (u[j][k] * u[j][k] + v[j][k] * v[j][k]); + + u0[j][k] = u[j][k]; + v0[j][k] = v[j][k]; + phi0[j][k] = phi[j][k]; + + + DDphi = 0.0; + for (i = 0; i < 9; i++) { + jp = j + e[i][1]; + kp = (k + e[i][0] + N1 - N16) % N1 + N16; + + if (jp < 1 || jp > M1) { + jp = j; + kp = k; + } + + DDphi += w[i] * phi[jp][kp]; + } + mu0 = 4. * beta * (phi[j][k] - phil) * (phi[j][k] - phig) * + (phi[j][k] - phim); + mu[j][k] = mu0 - Kappa * (6.0 * rdt * rdt * (DDphi - phi[j][k])); + + f[0][j][k] = FEQ_0(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[1][j][k] = FEQ_1(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[3][j][k] = FEQ_2(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[2][j][k] = FEQ_3(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[4][j][k] = FEQ_4(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[5][j][k] = FEQ_5(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[6][j][k] = FEQ_6(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[7][j][k] = FEQ_7(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + f[8][j][k] = FEQ_8(phi[j][k], u[j][k], v[j][k], mu[j][k], A); + + g[0][j][k] = GEQ_0(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[1][j][k] = GEQ_1(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[3][j][k] = GEQ_2(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[2][j][k] = GEQ_3(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[4][j][k] = GEQ_4(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[5][j][k] = GEQ_5(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[6][j][k] = GEQ_6(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[7][j][k] = GEQ_7(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + g[8][j][k] = GEQ_8(rho[j][k], p[j][k], u[j][k], v[j][k], uv); + + rhotal_1 += phi[j][k]; + } + } + printf( + "rhotal_1=%lf Re=%lf ggy=%e Pe=%lf tau_f=%lf tau_g=%lf niu=%e MM=%lf " + "A=%lf D=%lf sigma=%lf beta=%lf Kappa=%lf Ban=%lf t_s=%e\n", + rhotal_1, + Re, + ggy, + Pe, + tau_f, + tau_g, + niu, + MM, + A, + D, + sigma, + beta, + Kappa, + Ban, + t_s); +} + +/////////////////////////////////////////// + +__global__ void collision_propagation(double A, + double MM, + double w_f, + double w_g, + double ggy, + double *phi_d, + double *mu_d, + double *rho_d, + double *p_d, + double *u_d, + double *v_d, + double *phi0_d, + double *u0_d, + double *v0_d, + double *f_d, + double *F_d, + double *g_d, + double *G_d) { + int x, y, k; + + double f0, f1, f2, f3, f4, f5, f6, f7, f8; //ǰķֲ + double g0, g1, g2, g3, g4, g5, g6, g7, g8; + double mf0, mf1, mf2, mf3, mf4, mf5, mf6, mf7, mf8; + double mg0, mg1, mg2, mg3, mg4, mg5, mg6, mg7, mg8; + double GG0, GG1, GG2, GG3, GG4, GG5, GG6, GG7, GG8; + double mGG0, mGG1, mGG2, mGG3, mGG4, mGG5, mGG6, mGG7, mGG8; + double s_f0, s_f1, s_f2, s_f3, s_f4, s_f5, s_f6, s_f7, s_f8; + double s_g0, s_g1, s_g2, s_g3, s_g4, s_g5, s_g6, s_g7, s_g8; + double wx0, wy0, wx1, wy1, wx2, wy2, wx3, wy3, wx4, wy4, wx5, wy5, wx6, wy6, + wx7, wy7, wx8, wy8; + double vv; + double Drhox, Drhoy, Dphix, Dphiy, DDmu; + double Fx, Fy; + + x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ± + y = 1 + blockIdx.y * BY; // f_d, F_d ± + k = NX * y + x; // f_d, F_dһάoffset + + if (x <= N + N16) { + f0 = f_d[k + 0 * NYNX]; + f1 = f_d[k + 1 * NYNX]; + f2 = f_d[k + 2 * NYNX]; + f3 = f_d[k + 3 * NYNX]; + f4 = f_d[k + 4 * NYNX]; + f5 = f_d[k + 5 * NYNX]; + f6 = f_d[k + 6 * NYNX]; + f7 = f_d[k + 7 * NYNX]; + f8 = f_d[k + 8 * NYNX]; + + g0 = g_d[k + 0 * NYNX]; + g1 = g_d[k + 1 * NYNX]; + g2 = g_d[k + 2 * NYNX]; + g3 = g_d[k + 3 * NYNX]; + g4 = g_d[k + 4 * NYNX]; + g5 = g_d[k + 5 * NYNX]; + g6 = g_d[k + 6 * NYNX]; + g7 = g_d[k + 7 * NYNX]; + g8 = g_d[k + 8 * NYNX]; + + mf0 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; + mf1 = -4 * f0 - f1 - f2 - f3 - f4 + 2 * (f5 + f6 + f7 + f8); + mf2 = 4 * f0 - 2 * (f1 + f2 + f3 + f4) + f5 + f6 + f7 + f8; + mf3 = f1 - f3 + f5 - f6 - f7 + f8; + mf4 = -2 * (f1 - f3) + f5 - f6 - f7 + f8; + mf5 = f2 - f4 + f5 + f6 - f7 - f8; + mf6 = -2 * (f2 - f4) + f5 + f6 - f7 - f8; + mf7 = f1 - f2 + f3 - f4; + mf8 = f5 - f6 + f7 - f8; + + mg0 = g0 + g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8; + mg1 = -4 * g0 - g1 - g2 - g3 - g4 + 2 * (g5 + g6 + g7 + g8); + mg2 = 4 * g0 - 2 * (g1 + g2 + g3 + g4) + g5 + g6 + g7 + g8; + mg3 = g1 - g3 + g5 - g6 - g7 + g8; + mg4 = -2 * (g1 - g3) + g5 - g6 - g7 + g8; + mg5 = g2 - g4 + g5 + g6 - g7 - g8; + mg6 = -2 * (g2 - g4) + g5 + g6 - g7 - g8; + mg7 = g1 - g2 + g3 - g4; + mg8 = g5 - g6 + g7 - g8; + + s_f3 = s_f5 = w_f; + s_f0 = 1.0; + s_f7 = s_f8 = 1.0; + s_f1 = s_f2 = 1.3; + s_f4 = s_f6 = 1.3; + /* + s_f3=s_f5=w_f; + s_f0=w_f; s_f7=s_f8=w_f; + s_f1=s_f2=w_f; + s_f4=s_f6=w_f;*/ + + s_g0 = s_g3 = s_g5 = 1.0; + s_g1 = 1.0; + s_g2 = 1.0; + s_g4 = s_g6 = 1.7; + s_g7 = s_g8 = w_g; + + Drhox = Drhoy = 0.0; + Dphix = Dphiy = 0.0; + DDmu = 0.0; + + int xd = (x + 1 - N16 + N1) % N1 + N16; + int xs = (x - 1 - N16 + N1) % N1 + N16; + // 1 + Drhox += 1.0 / 9.0 * rho_d(y, xd); + Dphix += 1.0 / 9.0 * phi_d(y, xd); + DDmu += 1.0 / 9.0 * (mu_d(y, xd) - mu_d(y, x)); + + // 3 + Drhox -= 1.0 / 9.0 * rho_d(y, xs); + Dphix -= 1.0 / 9.0 * phi_d(y, xs); + DDmu += 1.0 / 9.0 * (mu_d(y, xs) - mu_d(y, x)); + // 478 + if ((y - 1) >= 1) { + Drhoy -= 1.0 / 9.0 * rho_d(y - 1, x); // 4 + Dphiy -= 1.0 / 9.0 * phi_d(y - 1, x); + DDmu += 1.0 / 9.0 * (mu_d(y - 1, x) - mu_d(y, x)); + + Drhox -= 1.0 / 36.0 * rho_d(y - 1, xs); // 7 + Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xs); + Dphix -= 1.0 / 36.0 * phi_d(y - 1, xs); + Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xs); + + DDmu += 1.0 / 36.0 * (mu_d(y - 1, xs) - mu_d(y, x)); + + Drhox += 1.0 / 36.0 * rho_d(y - 1, xd); // 8 + Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xd); + Dphix += 1.0 / 36.0 * phi_d(y - 1, xd); + Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xd); + + DDmu += 1.0 / 36.0 * (mu_d(y - 1, xd) - mu_d(y, x)); + } else { + Drhoy -= 1.0 / 9.0 * rho_d(y, x); // 4 + Dphiy -= 1.0 / 9.0 * phi_d(y, x); + + Drhox -= 1.0 / 36.0 * rho_d(y, x); // 7 + Drhoy -= 1.0 / 36.0 * rho_d(y, x); + Dphix -= 1.0 / 36.0 * phi_d(y, x); + Dphiy -= 1.0 / 36.0 * phi_d(y, x); + + Drhox += 1.0 / 36.0 * rho_d(y, x); // 8 + Drhoy -= 1.0 / 36.0 * rho_d(y, x); + Dphix += 1.0 / 36.0 * phi_d(y, x); + Dphiy -= 1.0 / 36.0 * phi_d(y, x); + } + // 256 + if ((y + 1) <= M1) { + Drhoy += 1.0 / 9.0 * rho_d(y + 1, x); // 2 + Dphiy += 1.0 / 9.0 * phi_d(y + 1, x); + DDmu += 1.0 / 9.0 * (mu_d(y + 1, x) - mu_d(y, x)); + + Drhox += 1.0 / 36.0 * rho_d(y + 1, xd); // 5 + Drhoy += 1.0 / 36.0 * rho_d(y + 1, xd); + Dphix += 1.0 / 36.0 * phi_d(y + 1, xd); + Dphiy += 1.0 / 36.0 * phi_d(y + 1, xd); + + DDmu += 1.0 / 36.0 * (mu_d(y + 1, xd) - mu_d(y, x)); + + Drhox -= 1.0 / 36.0 * rho_d(y + 1, xs); // 6 + Drhoy += 1.0 / 36.0 * rho_d(y + 1, xs); + Dphix -= 1.0 / 36.0 * phi_d(y + 1, xs); + Dphiy += 1.0 / 36.0 * phi_d(y + 1, xs); + + DDmu += 1.0 / 36.0 * (mu_d(y + 1, xs) - mu_d(y, x)); + } else { + Drhoy += 1.0 / 9.0 * rho_d(y, x); // 2 + Dphiy += 1.0 / 9.0 * phi_d(y, x); + + Drhox += 1.0 / 36.0 * rho_d(y, x); // 5 + Drhoy += 1.0 / 36.0 * rho_d(y, x); + Dphix += 1.0 / 36.0 * phi_d(y, x); + Dphiy += 1.0 / 36.0 * phi_d(y, x); + + Drhox -= 1.0 / 36.0 * rho_d(y, x); // 6 + Drhoy += 1.0 / 36.0 * rho_d(y, x); + Dphix -= 1.0 / 36.0 * phi_d(y, x); + Dphiy += 1.0 / 36.0 * phi_d(y, x); + } + + Fx = 3.0 * mu_d(y, x) * Dphix * rdt + + 3.0 * u_d(y, x) * (rhol - rhog) * MM * DDmu * rdt * rdt; // Fx=Fsx+Fax + Fy = 3.0 * mu_d(y, x) * Dphiy * rdt + + 3.0 * v_d(y, x) * (rhol - rhog) * MM * DDmu * rdt * rdt; + + vv = 1.5 * (u_d(y, x) * u_d(y, x) + v_d(y, x) * v_d(y, x)); + + wx0 = (WEQ_0(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_0(u_d(y, x), v_d(y, x), vv) - 4.0 / 9.0) * rdt * Drhox); + wy0 = (WEQ_0(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_0(u_d(y, x), v_d(y, x), vv) - 4.0 / 9.0) * rdt * Drhoy); + wx1 = (WEQ_1(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_1(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); + wy1 = (WEQ_1(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_1(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); + wx2 = (WEQ_2(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_2(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); + wy2 = (WEQ_2(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_2(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); + wx3 = (WEQ_3(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_3(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); + wy3 = (WEQ_3(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_3(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); + wx4 = (WEQ_4(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_4(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhox); + wy4 = (WEQ_4(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_4(u_d(y, x), v_d(y, x), vv) - 1.0 / 9.0) * rdt * Drhoy); + wx5 = (WEQ_5(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_5(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); + wy5 = (WEQ_5(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_5(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); + wx6 = (WEQ_6(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_6(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); + wy6 = (WEQ_6(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_6(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); + wx7 = (WEQ_7(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_7(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); + wy7 = (WEQ_7(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_7(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); + wx8 = (WEQ_8(u_d(y, x), v_d(y, x), vv) * Fx + + (WEQ_8(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhox); + wy8 = (WEQ_8(u_d(y, x), v_d(y, x), vv) * (Fy + (rho_d(y, x) - rhom) * ggy) + + (WEQ_8(u_d(y, x), v_d(y, x), vv) - 1.0 / 36.0) * rdt * Drhoy); + + GG0 = 3.0 * ((-u_d(y, x)) * wx0 + (-v_d(y, x)) * wy0); + GG1 = 3.0 * ((1 - u_d(y, x)) * wx1 + (-v_d(y, x)) * wy1); + GG2 = 3.0 * ((-u_d(y, x)) * wx2 + (1 - v_d(y, x)) * wy2); + GG3 = 3.0 * ((-1 - u_d(y, x)) * wx3 + (-v_d(y, x)) * wy3); + GG4 = 3.0 * ((-u_d(y, x)) * wx4 + (-1 - v_d(y, x)) * wy4); + GG5 = 3.0 * ((1 - u_d(y, x)) * wx5 + (1 - v_d(y, x)) * wy5); + GG6 = 3.0 * ((-1 - u_d(y, x)) * wx6 + (1 - v_d(y, x)) * wy6); + GG7 = 3.0 * ((-1 - u_d(y, x)) * wx7 + (-1 - v_d(y, x)) * wy7); + GG8 = 3.0 * ((1 - u_d(y, x)) * wx8 + (-1 - v_d(y, x)) * wy8); + + mGG0 = GG0 + GG1 + GG2 + GG3 + GG4 + GG5 + GG6 + GG7 + GG8; + mGG1 = -4 * GG0 - GG1 - GG2 - GG3 - GG4 + 2 * (GG5 + GG6 + GG7 + GG8); + mGG2 = 4 * GG0 - 2 * (GG1 + GG2 + GG3 + GG4) + GG5 + GG6 + GG7 + GG8; + mGG3 = GG1 - GG3 + GG5 - GG6 - GG7 + GG8; + mGG4 = -2 * (GG1 - GG3) + GG5 - GG6 - GG7 + GG8; + mGG5 = GG2 - GG4 + GG5 + GG6 - GG7 - GG8; + mGG6 = -2 * (GG2 - GG4) + GG5 + GG6 - GG7 - GG8; + mGG7 = GG1 - GG2 + GG3 - GG4; + mGG8 = GG5 - GG6 + GG7 - GG8; + + + mf0 = diag0 * + (mf0 - + s_f0 * (mf0 - + MFEQ_0(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f0) * MFF_0(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf1 = diag1 * + (mf1 - + s_f1 * (mf1 - + MFEQ_1(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f1) * MFF_1(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf2 = diag2 * + (mf2 - + s_f2 * (mf2 - + MFEQ_2(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f2) * MFF_2(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf3 = diag3 * + (mf3 - + s_f3 * (mf3 - + MFEQ_3(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f3) * MFF_3(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf4 = diag4 * + (mf4 - + s_f4 * (mf4 - + MFEQ_4(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f4) * MFF_4(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf5 = diag5 * + (mf5 - + s_f5 * (mf5 - + MFEQ_5(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f5) * MFF_5(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf6 = diag6 * + (mf6 - + s_f6 * (mf6 - + MFEQ_6(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f6) * MFF_6(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf7 = diag7 * + (mf7 - + s_f7 * (mf7 - + MFEQ_7(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f7) * MFF_7(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + mf8 = diag8 * + (mf8 - + s_f8 * (mf8 - + MFEQ_8(phi_d(y, x), u_d(y, x), v_d(y, x), mu_d(y, x), A)) + + (1 - 0.5 * s_f8) * MFF_8(phi_d(y, x), + u_d(y, x), + v_d(y, x), + phi0_d(y, x), + u0_d(y, x), + v0_d(y, x))); + + mg0 = diag0 * + (mg0 - + s_g0 * (mg0 - MGEQ_0(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g0) * mGG0); + mg1 = diag1 * + (mg1 - + s_g1 * (mg1 - MGEQ_1(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g1) * mGG1); + mg2 = diag2 * + (mg2 - + s_g2 * (mg2 - MGEQ_2(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g2) * mGG2); + mg3 = diag3 * + (mg3 - + s_g3 * (mg3 - MGEQ_3(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g3) * mGG3); + mg4 = diag4 * + (mg4 - + s_g4 * (mg4 - MGEQ_4(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g4) * mGG4); + mg5 = diag5 * + (mg5 - + s_g5 * (mg5 - MGEQ_5(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g5) * mGG5); + mg6 = diag6 * + (mg6 - + s_g6 * (mg6 - MGEQ_6(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g6) * mGG6); + mg7 = diag7 * + (mg7 - + s_g7 * (mg7 - MGEQ_7(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g7) * mGG7); + mg8 = diag8 * + (mg8 - + s_g8 * (mg8 - MGEQ_8(rho_d(y, x), p_d(y, x), u_d(y, x), v_d(y, x))) + + dt * (1 - 0.5 * s_g8) * mGG8); + + f0 = mf0 - 4 * (mf1 - mf2); + f1 = mf0 - mf1 - 2 * (mf2 + mf4) + mf3 + mf7; + f2 = mf0 - mf1 - 2 * (mf2 + mf6) + mf5 - mf7; + f3 = mf0 - mf1 - 2 * (mf2 - mf4) - mf3 + mf7; + f4 = mf0 - mf1 - 2 * (mf2 - mf6) - mf5 - mf7; + f5 = mf0 + mf1 + mf1 + mf2 + mf3 + mf4 + mf5 + mf6 + mf8; + f6 = mf0 + mf1 + mf1 + mf2 - mf3 - mf4 + mf5 + mf6 - mf8; + f7 = mf0 + mf1 + mf1 + mf2 - mf3 - mf4 - mf5 - mf6 + mf8; + f8 = mf0 + mf1 + mf1 + mf2 + mf3 + mf4 - mf5 - mf6 - mf8; + + g0 = mg0 - 4 * (mg1 - mg2); + g1 = mg0 - mg1 - 2 * (mg2 + mg4) + mg3 + mg7; + g2 = mg0 - mg1 - 2 * (mg2 + mg6) + mg5 - mg7; + g3 = mg0 - mg1 - 2 * (mg2 - mg4) - mg3 + mg7; + g4 = mg0 - mg1 - 2 * (mg2 - mg6) - mg5 - mg7; + g5 = mg0 + mg1 + mg1 + mg2 + mg3 + mg4 + mg5 + mg6 + mg8; + g6 = mg0 + mg1 + mg1 + mg2 - mg3 - mg4 + mg5 + mg6 - mg8; + g7 = mg0 + mg1 + mg1 + mg2 - mg3 - mg4 - mg5 - mg6 + mg8; + g8 = mg0 + mg1 + mg1 + mg2 + mg3 + mg4 - mg5 - mg6 - mg8; + + + // 0 1 3 + F_d[k] = f0; + G_d[k] = g0; + F_d(1, y, xd) = f1; + G_d(1, y, xd) = g1; + F_d(3, y, xs) = f3; + G_d(3, y, xs) = g3; + // 2 5 6 + if ((y + 1) <= M1) { + F_d(2, y + 1, x) = f2; + G_d(2, y + 1, x) = g2; + F_d(5, y + 1, xd) = f5; + G_d(5, y + 1, xd) = g5; + F_d(6, y + 1, xs) = f6; + G_d(6, y + 1, xs) = g6; + } else { + F_d(4, y, x) = f2; + G_d(4, y, x) = g2; + F_d(7, y, x) = f5; + G_d(7, y, x) = g5; + F_d(8, y, x) = f6; + G_d(8, y, x) = g6; + } + // 4 7 8 + if ((y - 1) >= 1) { + F_d(4, y - 1, x) = f4; + G_d(4, y - 1, x) = g4; + F_d(7, y - 1, xs) = f7; + G_d(7, y - 1, xs) = g7; + F_d(8, y - 1, xd) = f8; + G_d(8, y - 1, xd) = g8; + } else { + F_d(2, y, x) = f4; + G_d(2, y, x) = g4; + F_d(5, y, x) = f7; + G_d(5, y, x) = g7; + F_d(6, y, x) = f8; + G_d(6, y, x) = g8; + } + } +} + +__global__ void Macro_rho(double *phi_d, + double *phi0_d, + double *rho_d, + double *f_d) { + int x, y, k; + double f0, f1, f2, f3, f4, f5, f6, f7, f8; + + x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x + y = 1 + blockIdx.y * BY; // f_d, F_d ±y + k = NX * y + x; + + if (x <= N + N16) { + f0 = f_d[k + 0 * NYNX]; + f1 = f_d[k + 1 * NYNX]; + f2 = f_d[k + 2 * NYNX]; + f3 = f_d[k + 3 * NYNX]; + f4 = f_d[k + 4 * NYNX]; + f5 = f_d[k + 5 * NYNX]; + f6 = f_d[k + 6 * NYNX]; + f7 = f_d[k + 7 * NYNX]; + f8 = f_d[k + 8 * NYNX]; + + phi0_d(y, x) = phi_d(y, x); + + phi_d(y, x) = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; + rho_d(y, x) = 0.5 * (phi_d(y, x) - phig) * (rhol - rhog) + rhog; + } +} + +__global__ void Macro_mu(double Kappa, + double beta, + double *phi_d, + double *mu_d) { + int x, y; + + double DDphi, mu0; + + x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x + y = 1 + blockIdx.y * BY; // f_d, F_d ±y + + + if (x <= N + N16) { + int xd = (x + 1 - N16 + N1) % N1 + N16; + int xs = (x - 1 - N16 + N1) % N1 + N16; + + DDphi = 0.0; + DDphi += 1.0 / 9.0 * (phi_d(y, xd) - phi_d(y, x)); // 1 + DDphi += 1.0 / 9.0 * (phi_d(y, xs) - phi_d(y, x)); // 3 + if ((y + 1) <= M1) { + DDphi += 1.0 / 9.0 * (phi_d(y + 1, x) - phi_d(y, x)); // 2 + DDphi += 1.0 / 36.0 * (phi_d(y + 1, xd) - phi_d(y, x)); // 5 + DDphi += 1.0 / 36.0 * (phi_d(y + 1, xs) - phi_d(y, x)); // 6 + } + if ((y - 1) >= 1) { + DDphi += 1.0 / 9.0 * (phi_d(y - 1, x) - phi_d(y, x)); // 4 + DDphi += 1.0 / 36.0 * (phi_d(y - 1, xs) - phi_d(y, x)); // 7 + DDphi += 1.0 / 36.0 * (phi_d(y - 1, xd) - phi_d(y, x)); // 8 + } + + mu0 = 4 * beta * (phi_d(y, x) - phil) * (phi_d(y, x) - phig) * + (phi_d(y, x) - phim); + mu_d(y, x) = mu0 - 6.0 * Kappa * rdt * rdt * DDphi; + } +} + +__global__ void Macro_u(double ggy, + double MM, + double *phi_d, + double *rho_d, + double *p_d, + double *mu_d, + double *u_d, + double *v_d, + double *u0_d, + double *v0_d, + double *g_d) { + int x, y, k; + + double g1, g2, g3, g4, g5, g6, g7, g8; + + double Drhox, Drhoy, Dphix, Dphiy, DDmu; + double UX, UY, rF, FFa, s0u, udrho, Fsx, Fsy; + + x = N16 + blockIdx.x * BX + threadIdx.x; // f_d, F_d ±x + y = 1 + blockIdx.y * BY; // f_d, F_d ±y + k = NX * y + x; + + if (x <= N + N16) { + // g0 = g_d[k+0*NYNX]; + g1 = g_d[k + 1 * NYNX]; + g2 = g_d[k + 2 * NYNX]; + g3 = g_d[k + 3 * NYNX]; + g4 = g_d[k + 4 * NYNX]; + g5 = g_d[k + 5 * NYNX]; + g6 = g_d[k + 6 * NYNX]; + g7 = g_d[k + 7 * NYNX]; + g8 = g_d[k + 8 * NYNX]; + + u0_d(y, x) = u_d(y, x); + v0_d(y, x) = v_d(y, x); + + int xd = (x + 1 - N16 + N1) % N1 + N16; + int xs = (x - 1 - N16 + N1) % N1 + N16; + + Drhox = Drhoy = 0.0; + Dphix = Dphiy = DDmu = 0.0; + + Drhox += 1.0 / 9.0 * rho_d(y, xd); // 1 + Dphix += 1.0 / 9.0 * phi_d(y, xd); + DDmu += 1.0 / 9.0 * (mu_d(y, xd) - mu_d(y, x)); + + // 3 + Drhox -= 1.0 / 9.0 * rho_d(y, xs); + Dphix -= 1.0 / 9.0 * phi_d(y, xs); + DDmu += 1.0 / 9.0 * (mu_d(y, xs) - mu_d(y, x)); + // 478 + if ((y - 1) >= 1) { + Drhoy -= 1.0 / 9.0 * rho_d(y - 1, x); // 4 + Dphiy -= 1.0 / 9.0 * phi_d(y - 1, x); + DDmu += 1.0 / 9.0 * (mu_d(y - 1, x) - mu_d(y, x)); + + Drhox -= 1.0 / 36.0 * rho_d(y - 1, xs); // 7 + Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xs); + Dphix -= 1.0 / 36.0 * phi_d(y - 1, xs); + Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xs); + + DDmu += 1.0 / 36.0 * (mu_d(y - 1, xs) - mu_d(y, x)); + + + Drhox += 1.0 / 36.0 * rho_d(y - 1, xd); // 8 + Drhoy -= 1.0 / 36.0 * rho_d(y - 1, xd); + Dphix += 1.0 / 36.0 * phi_d(y - 1, xd); + Dphiy -= 1.0 / 36.0 * phi_d(y - 1, xd); + + DDmu += 1.0 / 36.0 * (mu_d(y - 1, xd) - mu_d(y, x)); + } else { + Drhoy -= 1.0 / 9.0 * rho_d(y, x); // 4 + Dphiy -= 1.0 / 9.0 * phi_d(y, x); + + Drhox -= 1.0 / 36.0 * rho_d(y, x); // 7 + Drhoy -= 1.0 / 36.0 * rho_d(y, x); + Dphix -= 1.0 / 36.0 * phi_d(y, x); + Dphiy -= 1.0 / 36.0 * phi_d(y, x); + + Drhox += 1.0 / 36.0 * rho_d(y, x); // 8 + Drhoy -= 1.0 / 36.0 * rho_d(y, x); + Dphix += 1.0 / 36.0 * phi_d(y, x); + Dphiy -= 1.0 / 36.0 * phi_d(y, x); + } + // 256 + if ((y + 1) <= M1) { + Drhoy += 1.0 / 9.0 * rho_d(y + 1, x); // 2 + Dphiy += 1.0 / 9.0 * phi_d(y + 1, x); + DDmu += 1.0 / 9.0 * (mu_d(y + 1, x) - mu_d(y, x)); + + Drhox += 1.0 / 36.0 * rho_d(y + 1, xd); // 5 + Drhoy += 1.0 / 36.0 * rho_d(y + 1, xd); + Dphix += 1.0 / 36.0 * phi_d(y + 1, xd); + Dphiy += 1.0 / 36.0 * phi_d(y + 1, xd); + + DDmu += 1.0 / 36.0 * (mu_d(y + 1, xd) - mu_d(y, x)); + + Drhox -= 1.0 / 36.0 * rho_d(y + 1, xs); // 6 + Drhoy += 1.0 / 36.0 * rho_d(y + 1, xs); + Dphix -= 1.0 / 36.0 * phi_d(y + 1, xs); + Dphiy += 1.0 / 36.0 * phi_d(y + 1, xs); + + DDmu += 1.0 / 36.0 * (mu_d(y + 1, xs) - mu_d(y, x)); + } else { + Drhoy += 1.0 / 9.0 * rho_d(y, x); // 2 + + Dphiy += 1.0 / 9.0 * phi_d(y, x); + Drhox += 1.0 / 36.0 * rho_d(y, x); // 5 + Drhoy += 1.0 / 36.0 * rho_d(y, x); + Dphix += 1.0 / 36.0 * phi_d(y, x); + Dphiy += 1.0 / 36.0 * phi_d(y, x); + + Drhox -= 1.0 / 36.0 * rho_d(y, x); // 6 + Drhoy += 1.0 / 36.0 * rho_d(y, x); + Dphix -= 1.0 / 36.0 * phi_d(y, x); + Dphiy += 1.0 / 36.0 * phi_d(y, x); + } + + p_d(y, x) = g1 + g2 + g3 + g4 + g5 + g6 + g7 + g8; + + UX = g1 - g3 + g5 + g8 - g6 - g7; + UY = g2 - g4 + g5 + g6 - g7 - g8; + + Fsx = 3.0 * mu_d(y, x) * rdt * Dphix; + Fsy = 3.0 * mu_d(y, x) * rdt * Dphiy; + + FFa = 1.5 * (rhol - rhog) * MM * (rdt * DDmu); + + rF = 1.0 / (rho_d(y, x) - FFa); + u_d(y, x) = (UX + 0.5 * dt * Fsx) * rF; + v_d(y, x) = (UY + 0.5 * dt * (Fsy + (rho_d(y, x) - rhom) * ggy)) * rF; + + s0u = -2.0 / 3.0 * (u_d(y, x) * u_d(y, x) + v_d(y, x) * v_d(y, x)); + udrho = 3.0 * (u_d(y, x) * Drhox + v_d(y, x) * Drhoy); + p_d(y, x) = 0.6 * (p_d(y, x) + 0.5 * udrho + rho_d(y, x) * s0u); + } +} + +double error1(double phi[NY][NX], double phi0[NY][NX]) { + int x, y; + double temp1 = 0, temp2 = 0, error; + + for (y = 1; y <= M1; y++) { + for (x = N16; x <= N16 + N; x++) { + temp1 += fabs(phi[y][x] - phi0[y][x]); + temp2 += fabs(phi[y][x]); + } + } + error = temp1 / temp2; + return (error); +} + +void datadeal(int t) //㡢 +{ + int x, y; + int Reint; + double rhotal_2; + FILE *fp; + char filename[50]; + + Reint = int(Re); + sprintf(filename, "./Output/%s%.4d%s%.8d", "Re", Reint, "phi", t); + + rhotal_2 = 0; + for (y = 1; y <= M1; y++) { + for (x = N16; x <= N16 + N; x++) { + rhotal_2 += phi[y][x]; + } + } + printf("rhotal_2=%lf\n", rhotal_2); + + fp = fopen(filename, "w"); + for (y = 1; y <= M1; y++) { + for (x = N16; x <= N16 + N; x++) { + fprintf(fp, "%e ", phi[y][x]); + } + fprintf(fp, "\n"); + } + fclose(fp); + /* +sprintf(filename,"%s%d","ux",t); + fp=fopen(filename,"w"); + for(y=1;y<=M+1;y++) +{ +for(x=N16;x<=N16+N;x++) +{ + fprintf(fp,"%e ",ux[y][x]); +} + fprintf(fp,"\n"); +} +fclose(fp); + +sprintf(filename,"%s%d","uy",t); + fp=fopen(filename,"w"); + for(y=1;y<=M+1;y++) +{ +for(x=N16;x<=N16+N;x++) +{ + fprintf(fp,"%e ",uy[y][x]); +} +fprintf(fp,"\n"); +} +fclose(fp);*/ +} +/* + double A_spike( ) + { + int j, k, flag; + double ls; + + for(k=N16;k<=N16+N;k++) + { + for(j=1;j<=M1;j++) + { + if(phi[j][k]>=-0.01&&phi[j][k]<=0.01) + { + ls=NY/2.0-j; + flag=1; + break; + } + + } + if(flag==1) + { + break; + } + } + + return ls; + } + +double A_bulble( ) +{ + int j, k, flag; + double lb; + for(k=N16;k<=N16+N;k++) + { + for(j=M1;j>=1;j--) + { + if(phi[j][k]>=-0.01&&phi[j][k]<=0.01) + { + lb=j-NY/2.0; + flag=1; + break; + } + } + if(flag==1) + { + break; + } + } + return lb; + }*/ diff --git a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/dataset.py b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/dataset.py index dde0702791..5dab4dd1e7 100644 --- a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/dataset.py +++ b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/dataset.py @@ -1,289 +1,289 @@ -import numpy as np -import scipy.io as io - -np.random.seed(1234) - - -class DataSet: - def __init__(self, num, bs, modes): - self.num = num - self.bs = bs - self.modes = modes - ( - self.F_train, - self.U_train, - self.F_test, - self.U_test, - self.x_train, - self.u_basis, - self.lam_u, - ) = self.load_data() - """ - self.F_train, self.U_train, self.V_train, \ - self.F_test, self.U_test, self.V_test, \ - self.x_train, \ - self.u_mean, self.u_std, \ - self.v_mean, self.v_std, \ - self.u_basis, self.v_basis, \ - self.lam_u, self.lam_v = self.load_data() - """ - - def PODbasis(self): - s = 65 - num_res = s * s - u_basis_out = np.reshape(self.u_basis.T, (-1, num_res, 1)) - v_basis_out = np.reshape(self.v_basis.T, (-1, num_res, 1)) - u_basis_out, v_basis_out = self.decoder(u_basis_out, v_basis_out) - u_basis_out = u_basis_out - self.u_mean - v_basis_out = v_basis_out - self.v_mean - u_basis_out = np.reshape(u_basis_out, (-1, s, s)) - v_basis_out = np.reshape(v_basis_out, (-1, s, s)) - save_dict = { - "u_basis": u_basis_out, - "v_basis": v_basis_out, - "lam_u": self.lam_u, - "lam_v": self.lam_v, - } - io.savemat("./Output/basis.mat", save_dict) - return self.u_basis, self.v_basis - - def samples(self): - """ - num_train = 40000 - num_test = 10000 - data = io.loadmat('./Data/Data') - F = data['F'] - U = data['U'] - """ - - num_train = 1 - x = np.linspace(-1, 1, self.num) - y = np.linspace(-1, 1, self.num) - xx, yy = np.meshgrid(x, y) - xx = np.reshape(xx, (-1, 1)) - yy = np.reshape(yy, (-1, 1)) - x_train = np.hstack((xx, yy)) - F, U = self.func(x_train) - - Num = self.num * self.num - - F = np.reshape(F, (-1, self.num, self.num, 1)) - U = np.reshape(U, (-1, Num, 1)) - F_train = F[:num_train, :, :] - U_train = U[:num_train, :, :] - F_test = F[num_train:, :, :] - U_test = U[num_train:, :, :] - return F_train, U_train, F_test, U_test - - def decoder(self, u, v): - u = u * self.u_std + self.u_mean - v = v * self.v_std + self.v_mean - return u, v - - def load_data(self): - data_train = io.loadmat("./Data/Two_Phase_Flow_Training") - data_test = io.loadmat("./Data/Two_Phase_Flow_Test") - - step = 4 - num_yb = 100 - num_ye = 900 - num_xb = 0 - num_xe = 256 - - """ - a_train = data_train['u_bc'].astype(np.float32) - u_train = data_train['u_data'].astype(np.float32) - v_train = data_train['v_data'].astype(np.float32) - - a_test = data_test['u_bc'].astype(np.float32) - u_test = data_test['u_data'].astype(np.float32) - v_test = data_test['v_data'].astype(np.float32) - """ - num = 30 - in_dim = 1 - x = np.linspace(0, 1, num).reshape((1, -1)) - x = x.astype(np.float32) - a_train = np.arange(20.0, 1000.0, 20.0, dtype=np.float32).reshape((-1, 1)) - # a_train = np.matmul(0.01*a_train, loc) - u_train = data_train["phi_data"] - u_train = u_train[:, num_yb:num_ye:step, num_xb:num_xe:step] - u_train = u_train.astype(np.float32) - print(u_train.shape) - - # a_test = np.array([110.], dtype=np.float32).reshape((-1, 1)) - a_test = np.arange(55.0, 1000.0, 100.0, dtype=np.float32).reshape((-1, 1)) - print(a_test) - # a_test = np.matmul(0.01*a_test, loc) - u_test = data_test["phi_data"] - # a_test = np.array([100.], dtype=np.float32).reshape((-1, 1)) - # u_test = u_train[4, :, :] - u_test = u_test[:, num_yb:num_ye:step, num_xb:num_xe:step] - u_test = u_test.astype(np.float32) - print(u_test.shape) - - xx = data_train["x_2d"] - yy = data_train["y_2d"] - - xx = np.reshape(xx, (-1, 1)) - yy = np.reshape(yy, (-1, 1)) - x_train = np.hstack((xx, yy)) - - """ - perm = np.random.permutation(a.shape[0]) - a = a[perm, :] - u = u[perm, :, :] - v = v[perm, :, :] - """ - - """ - num_train = 100 - num_test = 10 - s = 65 - """ - num_res = u_train.shape[1] * u_train.shape[2] - - F_train = np.reshape(a_train, (-1, in_dim)) - U_train = np.reshape(u_train, (-1, num_res, 1)) - - F_test = np.reshape(a_test, (-1, in_dim)) - U_test = np.reshape(u_test, (-1, num_res, 1)) - - """ - U = np.reshape(U_train, (-1, num_res)) - C_u = 1./(num_train-1)*np.matmul(U.T, U) - lam_u, phi_u = np.linalg.eigh(C_u) - - lam_u = np.flip(lam_u) - phi_u = np.fliplr(phi_u) - - - u_cumsum = np.cumsum(lam_u) - u_per = u_cumsum[self.modes-1]/u_cumsum[-1] - """ - - data_basis = io.loadmat("./Output/basis") - phi_u = data_basis["phi_basis"] - u_basis = phi_u[:, : self.modes] - lam_u = data_basis["lam_u"] - u_cumsum = np.cumsum(lam_u) - u_per = u_cumsum[self.modes - 1] / u_cumsum[-1] - - print("Kept Energy: u: %.5f" % (u_per)) - - save_dict = {"phi_basis": phi_u, "lam_u": lam_u} - io.savemat("./Output/basis.mat", save_dict) - """ - f_train_mean = np.mean(np.reshape(a_train, (-1, s)), 0) - f_train_std = np.std(np.reshape(a_train, (-1, s)), 0) - u_train_mean = np.mean(np.reshape(u_train, (-1, s, s)), 0) - u_train_std = np.std(np.reshape(u_train, (-1, s, s)), 0) - v_train_mean = np.mean(np.reshape(v_train, (-1, s, s)), 0) - v_train_std = np.std(np.reshape(v_train, (-1, s, s)), 0) - - u_train_mean = np.reshape(u_train_mean, (-1, num_res, 1)) - u_train_std = np.reshape(u_train_std, (-1, num_res, 1)) - v_train_mean = np.reshape(v_train_mean, (-1, num_res, 1)) - v_train_std = np.reshape(v_train_std, (-1, num_res, 1)) - - F_train = np.reshape(a_train, (-1, s)) - U_train = np.reshape(u_train, (-1, num_res, 1)) - V_train = np.reshape(v_train, (-1, num_res, 1)) - - F_train = (F_train - f_train_mean)/(f_train_std + 1.0e-9) - U_train = (U_train - u_train_mean)/(u_train_std + 1.0e-9) - V_train = (V_train - v_train_mean)/(v_train_std + 1.0e-9) - - F_test = np.reshape(a_test, (-1, s)) - U_test = np.reshape(u_test, (-1, num_res, 1)) - V_test = np.reshape(v_test, (-1, num_res, 1)) - - F_test = (F_test - f_train_mean)/(f_train_std + 1.0e-9) - - U = np.reshape(U_train, (-1, num_res)) - V = np.reshape(V_train, (-1, num_res)) - C_u = 1./(num_train-1)*np.matmul(U.T, U) - C_v = 1./(num_train-1)*np.matmul(V.T, V) - lam_u, phi_u = np.linalg.eigh(C_u) - lam_v, phi_v = np.linalg.eigh(C_v) - - lam_u = np.flip(lam_u) - phi_u = np.fliplr(phi_u) - lam_v = np.flip(lam_v) - phi_v = np.fliplr(phi_v) - - u_cumsum = np.cumsum(lam_u) - v_cumsum = np.cumsum(lam_v) - u_per = u_cumsum[self.modes-1]/u_cumsum[-1] - v_per = v_cumsum[self.modes-1]/v_cumsum[-1] - - u_basis = phi_u[:, :self.modes] - v_basis = phi_v[:, :self.modes] - - print("Kept Energy: u: %.3f, v: %.3f"%(u_per, v_per)) - """ - - """ - plt.plot(lam_u[:self.modes], 'k-') - plt.plot(lam_v[:self.modes], 'r--') - plt.show() - """ - - """ - F_train = np.reshape(f_train, (-1, s)) - U_train = np.reshape(u_train, (-1, num_res, 1)) - V_train = np.reshape(v_train, (-1, num_res, 1)) - - F_test = np.reshape(f_test, (-1, s)) - U_test = np.reshape(u_test, (-1, num_res, 1)) - V_test = np.reshape(v_test, (-1, num_res, 1)) - """ - - """ - U_ref = np.reshape(U_test, (U_test.shape[0], U_test.shape[1])) - np.savetxt('./Output/u_ref', U_ref, fmt='%e') - """ - - return F_train, U_train, F_test, U_test, x_train, u_basis, lam_u - - def minibatch(self): - batch_id = np.random.choice(self.F_train.shape[0], self.bs, replace=False) - f_train = [self.F_train[i : i + 1] for i in batch_id] - f_train = np.concatenate(f_train, axis=0) - u_train = [self.U_train[i : i + 1] for i in batch_id] - u_train = np.concatenate(u_train, axis=0) - """ - v_train = [self.V_train[i:i+1] for i in batch_id] - v_train = np.concatenate(v_train, axis=0) - """ - - """ - x = np.linspace(0., 1, self.num) - y = np.linspace(0., 1, self.num) - xx, yy = np.meshgrid(x, y) - xx = np.reshape(xx, (-1, 1)) - yy = np.reshape(yy, (-1, 1)) - x_train = np.hstack((xx, yy)) - """ - - Xmin = np.array([0.0, 0.0]).reshape((-1, 2)) - Xmax = np.array([1.0, 1.0]).reshape((-1, 2)) - # x_train = np.linspace(-1, 1, self.N).reshape((-1, 1)) - - return self.x_train, f_train, u_train, Xmin, Xmax - - def testbatch(self): - """ - batch_id = np.random.choice(self.F_test.shape[0], num_test, replace=False) - f_test = [self.F_test[i:i+1] for i in batch_id] - f_test = np.concatenate(f_test, axis=0) - u_test = [self.U_test[i:i+1] for i in batch_id] - u_test = np.concatenate(u_test, axis=0) - v_test = [self.V_test[i:i+1] for i in batch_id] - v_test = np.concatenate(v_test, axis=0) - batch_id = np.reshape(batch_id, (-1, 1)) - """ - - x_test = self.x_train - f_test, u_test = self.F_test, self.U_test - - return x_test, f_test, u_test +import numpy as np +import scipy.io as io + +np.random.seed(1234) + + +class DataSet: + def __init__(self, num, bs, modes): + self.num = num + self.bs = bs + self.modes = modes + ( + self.F_train, + self.U_train, + self.F_test, + self.U_test, + self.x_train, + self.u_basis, + self.lam_u, + ) = self.load_data() + """ + self.F_train, self.U_train, self.V_train, \ + self.F_test, self.U_test, self.V_test, \ + self.x_train, \ + self.u_mean, self.u_std, \ + self.v_mean, self.v_std, \ + self.u_basis, self.v_basis, \ + self.lam_u, self.lam_v = self.load_data() + """ + + def PODbasis(self): + s = 65 + num_res = s * s + u_basis_out = np.reshape(self.u_basis.T, (-1, num_res, 1)) + v_basis_out = np.reshape(self.v_basis.T, (-1, num_res, 1)) + u_basis_out, v_basis_out = self.decoder(u_basis_out, v_basis_out) + u_basis_out = u_basis_out - self.u_mean + v_basis_out = v_basis_out - self.v_mean + u_basis_out = np.reshape(u_basis_out, (-1, s, s)) + v_basis_out = np.reshape(v_basis_out, (-1, s, s)) + save_dict = { + "u_basis": u_basis_out, + "v_basis": v_basis_out, + "lam_u": self.lam_u, + "lam_v": self.lam_v, + } + io.savemat("./Output/basis.mat", save_dict) + return self.u_basis, self.v_basis + + def samples(self): + """ + num_train = 40000 + num_test = 10000 + data = io.loadmat('./Data/Data') + F = data['F'] + U = data['U'] + """ + + num_train = 1 + x = np.linspace(-1, 1, self.num) + y = np.linspace(-1, 1, self.num) + xx, yy = np.meshgrid(x, y) + xx = np.reshape(xx, (-1, 1)) + yy = np.reshape(yy, (-1, 1)) + x_train = np.hstack((xx, yy)) + F, U = self.func(x_train) + + Num = self.num * self.num + + F = np.reshape(F, (-1, self.num, self.num, 1)) + U = np.reshape(U, (-1, Num, 1)) + F_train = F[:num_train, :, :] + U_train = U[:num_train, :, :] + F_test = F[num_train:, :, :] + U_test = U[num_train:, :, :] + return F_train, U_train, F_test, U_test + + def decoder(self, u, v): + u = u * self.u_std + self.u_mean + v = v * self.v_std + self.v_mean + return u, v + + def load_data(self): + data_train = io.loadmat("./Data/Two_Phase_Flow_Training") + data_test = io.loadmat("./Data/Two_Phase_Flow_Test") + + step = 4 + num_yb = 100 + num_ye = 900 + num_xb = 0 + num_xe = 256 + + """ + a_train = data_train['u_bc'].astype(np.float32) + u_train = data_train['u_data'].astype(np.float32) + v_train = data_train['v_data'].astype(np.float32) + + a_test = data_test['u_bc'].astype(np.float32) + u_test = data_test['u_data'].astype(np.float32) + v_test = data_test['v_data'].astype(np.float32) + """ + num = 30 + in_dim = 1 + x = np.linspace(0, 1, num).reshape((1, -1)) + x = x.astype(np.float32) + a_train = np.arange(20.0, 1000.0, 20.0, dtype=np.float32).reshape((-1, 1)) + # a_train = np.matmul(0.01*a_train, loc) + u_train = data_train["phi_data"] + u_train = u_train[:, num_yb:num_ye:step, num_xb:num_xe:step] + u_train = u_train.astype(np.float32) + print(u_train.shape) + + # a_test = np.array([110.], dtype=np.float32).reshape((-1, 1)) + a_test = np.arange(55.0, 1000.0, 100.0, dtype=np.float32).reshape((-1, 1)) + print(a_test) + # a_test = np.matmul(0.01*a_test, loc) + u_test = data_test["phi_data"] + # a_test = np.array([100.], dtype=np.float32).reshape((-1, 1)) + # u_test = u_train[4, :, :] + u_test = u_test[:, num_yb:num_ye:step, num_xb:num_xe:step] + u_test = u_test.astype(np.float32) + print(u_test.shape) + + xx = data_train["x_2d"] + yy = data_train["y_2d"] + + xx = np.reshape(xx, (-1, 1)) + yy = np.reshape(yy, (-1, 1)) + x_train = np.hstack((xx, yy)) + + """ + perm = np.random.permutation(a.shape[0]) + a = a[perm, :] + u = u[perm, :, :] + v = v[perm, :, :] + """ + + """ + num_train = 100 + num_test = 10 + s = 65 + """ + num_res = u_train.shape[1] * u_train.shape[2] + + F_train = np.reshape(a_train, (-1, in_dim)) + U_train = np.reshape(u_train, (-1, num_res, 1)) + + F_test = np.reshape(a_test, (-1, in_dim)) + U_test = np.reshape(u_test, (-1, num_res, 1)) + + """ + U = np.reshape(U_train, (-1, num_res)) + C_u = 1./(num_train-1)*np.matmul(U.T, U) + lam_u, phi_u = np.linalg.eigh(C_u) + + lam_u = np.flip(lam_u) + phi_u = np.fliplr(phi_u) + + + u_cumsum = np.cumsum(lam_u) + u_per = u_cumsum[self.modes-1]/u_cumsum[-1] + """ + + data_basis = io.loadmat("./Output/basis") + phi_u = data_basis["phi_basis"] + u_basis = phi_u[:, : self.modes] + lam_u = data_basis["lam_u"] + u_cumsum = np.cumsum(lam_u) + u_per = u_cumsum[self.modes - 1] / u_cumsum[-1] + + print("Kept Energy: u: %.5f" % (u_per)) + + save_dict = {"phi_basis": phi_u, "lam_u": lam_u} + io.savemat("./Output/basis.mat", save_dict) + """ + f_train_mean = np.mean(np.reshape(a_train, (-1, s)), 0) + f_train_std = np.std(np.reshape(a_train, (-1, s)), 0) + u_train_mean = np.mean(np.reshape(u_train, (-1, s, s)), 0) + u_train_std = np.std(np.reshape(u_train, (-1, s, s)), 0) + v_train_mean = np.mean(np.reshape(v_train, (-1, s, s)), 0) + v_train_std = np.std(np.reshape(v_train, (-1, s, s)), 0) + + u_train_mean = np.reshape(u_train_mean, (-1, num_res, 1)) + u_train_std = np.reshape(u_train_std, (-1, num_res, 1)) + v_train_mean = np.reshape(v_train_mean, (-1, num_res, 1)) + v_train_std = np.reshape(v_train_std, (-1, num_res, 1)) + + F_train = np.reshape(a_train, (-1, s)) + U_train = np.reshape(u_train, (-1, num_res, 1)) + V_train = np.reshape(v_train, (-1, num_res, 1)) + + F_train = (F_train - f_train_mean)/(f_train_std + 1.0e-9) + U_train = (U_train - u_train_mean)/(u_train_std + 1.0e-9) + V_train = (V_train - v_train_mean)/(v_train_std + 1.0e-9) + + F_test = np.reshape(a_test, (-1, s)) + U_test = np.reshape(u_test, (-1, num_res, 1)) + V_test = np.reshape(v_test, (-1, num_res, 1)) + + F_test = (F_test - f_train_mean)/(f_train_std + 1.0e-9) + + U = np.reshape(U_train, (-1, num_res)) + V = np.reshape(V_train, (-1, num_res)) + C_u = 1./(num_train-1)*np.matmul(U.T, U) + C_v = 1./(num_train-1)*np.matmul(V.T, V) + lam_u, phi_u = np.linalg.eigh(C_u) + lam_v, phi_v = np.linalg.eigh(C_v) + + lam_u = np.flip(lam_u) + phi_u = np.fliplr(phi_u) + lam_v = np.flip(lam_v) + phi_v = np.fliplr(phi_v) + + u_cumsum = np.cumsum(lam_u) + v_cumsum = np.cumsum(lam_v) + u_per = u_cumsum[self.modes-1]/u_cumsum[-1] + v_per = v_cumsum[self.modes-1]/v_cumsum[-1] + + u_basis = phi_u[:, :self.modes] + v_basis = phi_v[:, :self.modes] + + print("Kept Energy: u: %.3f, v: %.3f"%(u_per, v_per)) + """ + + """ + plt.plot(lam_u[:self.modes], 'k-') + plt.plot(lam_v[:self.modes], 'r--') + plt.show() + """ + + """ + F_train = np.reshape(f_train, (-1, s)) + U_train = np.reshape(u_train, (-1, num_res, 1)) + V_train = np.reshape(v_train, (-1, num_res, 1)) + + F_test = np.reshape(f_test, (-1, s)) + U_test = np.reshape(u_test, (-1, num_res, 1)) + V_test = np.reshape(v_test, (-1, num_res, 1)) + """ + + """ + U_ref = np.reshape(U_test, (U_test.shape[0], U_test.shape[1])) + np.savetxt('./Output/u_ref', U_ref, fmt='%e') + """ + + return F_train, U_train, F_test, U_test, x_train, u_basis, lam_u + + def minibatch(self): + batch_id = np.random.choice(self.F_train.shape[0], self.bs, replace=False) + f_train = [self.F_train[i : i + 1] for i in batch_id] + f_train = np.concatenate(f_train, axis=0) + u_train = [self.U_train[i : i + 1] for i in batch_id] + u_train = np.concatenate(u_train, axis=0) + """ + v_train = [self.V_train[i:i+1] for i in batch_id] + v_train = np.concatenate(v_train, axis=0) + """ + + """ + x = np.linspace(0., 1, self.num) + y = np.linspace(0., 1, self.num) + xx, yy = np.meshgrid(x, y) + xx = np.reshape(xx, (-1, 1)) + yy = np.reshape(yy, (-1, 1)) + x_train = np.hstack((xx, yy)) + """ + + Xmin = np.array([0.0, 0.0]).reshape((-1, 2)) + Xmax = np.array([1.0, 1.0]).reshape((-1, 2)) + # x_train = np.linspace(-1, 1, self.N).reshape((-1, 1)) + + return self.x_train, f_train, u_train, Xmin, Xmax + + def testbatch(self): + """ + batch_id = np.random.choice(self.F_test.shape[0], num_test, replace=False) + f_test = [self.F_test[i:i+1] for i in batch_id] + f_test = np.concatenate(f_test, axis=0) + u_test = [self.U_test[i:i+1] for i in batch_id] + u_test = np.concatenate(u_test, axis=0) + v_test = [self.V_test[i:i+1] for i in batch_id] + v_test = np.concatenate(v_test, axis=0) + batch_id = np.reshape(batch_id, (-1, 1)) + """ + + x_test = self.x_train + f_test, u_test = self.F_test, self.U_test + + return x_test, f_test, u_test diff --git a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/main.py b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/main.py index 19a08478e8..cb9a2e89ed 100644 --- a/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/main.py +++ b/jointContribution/PIDeepONet-LBM/task2-two-phase-flow/paddle_two_phase/main.py @@ -1,128 +1,128 @@ -import argparse -import os -import time - -import numpy as np -import paddle as pd -import paddle.nn.functional as F -import scipy.io as io -from dataset import DataSet - -parser = argparse.ArgumentParser() -parser.add_argument("--local_rank", default="0", type=str) -args = parser.parse_args() -device = args.local_rank - -pd.set_default_dtype("float32") -os.environ["CUDA_VISIBLE_DEVICES"] = device - -pd.seed(1234) -np.random.seed(1234) - -in_dim = 1 -# resolution -h = 200 -s = 64 - -# output dimension of Branch/Trunk -p = 100 -p1 = p // 2 - -# batch_size -bs = 32 - -# size of input for Trunk net -nx = in_dim -x_num = nx * nx - -# POD modes -modes = 40 -out_dims = modes -# coeffs for POD -# layer_pod = [h, 64, 64, 2*modes] - - -def prediction(model, data, u_basis, f_test, u_test): - out_B = model(f_test) - u_pred = pd.einsum("bi,ni->bn", out_B, u_basis) - u_pred = pd.tanh(3.0 * u_pred) - u_pred = u_pred.numpy() - # u_temp = np.tile(u_pred[:, :, None], [1, 1, 1]) - u_test = u_test.reshape((-1, u_test.shape[1])) - """ - u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile(v_pred[:, :, None], [1, 1, 1]) - u_pred, v_pred = data.decoder(u_temp, v_temp) - """ - err_u = np.mean( - np.linalg.norm(u_pred - u_test, axis=1) / np.linalg.norm(u_test, axis=1) - ) - u_pred, u_test = u_pred.reshape((-1, h, s)), u_test.reshape((-1, h, s)) - save_dict = {"u_pred": u_pred, "u_test": u_test} - io.savemat("./Output/pred.mat", save_dict) - # err_v = np.mean(np.linalg.norm(v_pred - v_test, 2, axis=1)/np.linalg.norm(v_test, 2, axis=1)) - return err_u, u_pred - - -def main(): - data = DataSet(nx, bs, modes) - # _, _,_, _, _, u_basis, _ = data.load_data() - """ - u_basis, v_basis = data.PODbasis() - """ - u_basis = pd.to_tensor(data.u_basis) - - ##paddle-Branch net - num_nodes = 64 - model = pd.nn.Sequential( - pd.nn.Linear(in_dim, num_nodes), - pd.nn.Tanh(), - pd.nn.Linear(num_nodes, num_nodes), - pd.nn.Tanh(), - pd.nn.Linear(num_nodes, out_dims), - ) - # optimizer - opt = pd.optimizer.Adam(learning_rate=1.0e-3, parameters=model.parameters()) - - model.train() - - x_test, f_test, u_test = data.testbatch() - f_test = pd.to_tensor(f_test) - n = 0 - nmax = 20000 - start_time = time.perf_counter() - time_step_0 = time.perf_counter() - while n <= nmax: - - x_train, f_train, u_train, _, _ = data.minibatch() - f_train, u_train = pd.to_tensor(f_train), pd.to_tensor(u_train) - out_B = model(f_train) - out_B_u = out_B - u_pred = pd.einsum("bi,ni->bn", out_B_u, u_basis) - loss = F.mse_loss(u_pred, u_train[:, :, 0]) - loss.backward() - opt.step() - opt.clear_grad() - - if n % 100 == 0: - time_step_1000 = time.perf_counter() - T = time_step_1000 - time_step_0 - # err_u, _ = prediction(model, data, u_basis, f_test, u_test) - # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) - # print('Step: %d, Loss: %.3e, err_u: %.3e, Time (secs): %.3f'%(n, float(loss), err_u, T)) - print("Step: %d, Loss: %.3e, Time (secs): %.3f" % (n, float(loss), T)) - time_step_0 = time.perf_counter() - - n += 1 - - stop_time = time.perf_counter() - print("Training time (secs): %.3f" % (stop_time - start_time)) - - start_time = time.perf_counter() - err_u, u_pred = prediction(model, data, u_basis, f_test, u_test) - stop_time = time.perf_counter() - T = stop_time - start_time - print("err_u: %.3e, Inference time (secs): %.5f" % (err_u, T)) - - -if __name__ == "__main__": - main() +import argparse +import os +import time + +import numpy as np +import paddle as pd +import paddle.nn.functional as F +import scipy.io as io +from dataset import DataSet + +parser = argparse.ArgumentParser() +parser.add_argument("--local_rank", default="0", type=str) +args = parser.parse_args() +device = args.local_rank + +pd.set_default_dtype("float32") +os.environ["CUDA_VISIBLE_DEVICES"] = device + +pd.seed(1234) +np.random.seed(1234) + +in_dim = 1 +# resolution +h = 200 +s = 64 + +# output dimension of Branch/Trunk +p = 100 +p1 = p // 2 + +# batch_size +bs = 32 + +# size of input for Trunk net +nx = in_dim +x_num = nx * nx + +# POD modes +modes = 40 +out_dims = modes +# coeffs for POD +# layer_pod = [h, 64, 64, 2*modes] + + +def prediction(model, data, u_basis, f_test, u_test): + out_B = model(f_test) + u_pred = pd.einsum("bi,ni->bn", out_B, u_basis) + u_pred = pd.tanh(3.0 * u_pred) + u_pred = u_pred.numpy() + # u_temp = np.tile(u_pred[:, :, None], [1, 1, 1]) + u_test = u_test.reshape((-1, u_test.shape[1])) + """ + u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile(v_pred[:, :, None], [1, 1, 1]) + u_pred, v_pred = data.decoder(u_temp, v_temp) + """ + err_u = np.mean( + np.linalg.norm(u_pred - u_test, axis=1) / np.linalg.norm(u_test, axis=1) + ) + u_pred, u_test = u_pred.reshape((-1, h, s)), u_test.reshape((-1, h, s)) + save_dict = {"u_pred": u_pred, "u_test": u_test} + io.savemat("./Output/pred.mat", save_dict) + # err_v = np.mean(np.linalg.norm(v_pred - v_test, 2, axis=1)/np.linalg.norm(v_test, 2, axis=1)) + return err_u, u_pred + + +def main(): + data = DataSet(nx, bs, modes) + # _, _,_, _, _, u_basis, _ = data.load_data() + """ + u_basis, v_basis = data.PODbasis() + """ + u_basis = pd.to_tensor(data.u_basis) + + ##paddle-Branch net + num_nodes = 64 + model = pd.nn.Sequential( + pd.nn.Linear(in_dim, num_nodes), + pd.nn.Tanh(), + pd.nn.Linear(num_nodes, num_nodes), + pd.nn.Tanh(), + pd.nn.Linear(num_nodes, out_dims), + ) + # optimizer + opt = pd.optimizer.Adam(learning_rate=1.0e-3, parameters=model.parameters()) + + model.train() + + x_test, f_test, u_test = data.testbatch() + f_test = pd.to_tensor(f_test) + n = 0 + nmax = 20000 + start_time = time.perf_counter() + time_step_0 = time.perf_counter() + while n <= nmax: + + x_train, f_train, u_train, _, _ = data.minibatch() + f_train, u_train = pd.to_tensor(f_train), pd.to_tensor(u_train) + out_B = model(f_train) + out_B_u = out_B + u_pred = pd.einsum("bi,ni->bn", out_B_u, u_basis) + loss = F.mse_loss(u_pred, u_train[:, :, 0]) + loss.backward() + opt.step() + opt.clear_grad() + + if n % 100 == 0: + time_step_1000 = time.perf_counter() + T = time_step_1000 - time_step_0 + # err_u, _ = prediction(model, data, u_basis, f_test, u_test) + # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) + # print('Step: %d, Loss: %.3e, err_u: %.3e, Time (secs): %.3f'%(n, float(loss), err_u, T)) + print("Step: %d, Loss: %.3e, Time (secs): %.3f" % (n, float(loss), T)) + time_step_0 = time.perf_counter() + + n += 1 + + stop_time = time.perf_counter() + print("Training time (secs): %.3f" % (stop_time - start_time)) + + start_time = time.perf_counter() + err_u, u_pred = prediction(model, data, u_basis, f_test, u_test) + stop_time = time.perf_counter() + T = stop_time - start_time + print("err_u: %.3e, Inference time (secs): %.5f" % (err_u, T)) + + +if __name__ == "__main__": + main() diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.c b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.c index b4b6498018..82e5905d49 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.c +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.c @@ -1,26 +1,26 @@ -#include "common.h" -#include -#include - -// parameters used in physical field -double nu, U0; -double rho0 = 1.0; -double rho_in, rho_out; -double Fx, Fy; -// dimensionless parameters -double Re = 1000.0; -// parameters used in LBE simulation -double tau_f, wf, ci, rcc; -// parameters used in computation -int n = 0, nmax; -double dx, dt, dn; -double sum_u_o = 0.0; - -// CPU -double f[Q][NY2][NX2]; -double g[Q][NY2][NX2]; -double sf[Q]; -// device adress -double *f_dev; -double *F_dev; -double *sf_dev; +#include "common.h" +#include +#include + +// parameters used in physical field +double nu, U0; +double rho0 = 1.0; +double rho_in, rho_out; +double Fx, Fy; +// dimensionless parameters +double Re = 1000.0; +// parameters used in LBE simulation +double tau_f, wf, ci, rcc; +// parameters used in computation +int n = 0, nmax; +double dx, dt, dn; +double sum_u_o = 0.0; + +// CPU +double f[Q][NY2][NX2]; +double g[Q][NY2][NX2]; +double sf[Q]; +// device adress +double *f_dev; +double *F_dev; +double *sf_dev; diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.h b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.h index 72df58a5a4..4dcf77bfc1 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.h +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/common.h @@ -1,55 +1,55 @@ -#ifndef __COMMON_H_ -#define __COMMON_H_ - -#define XL 1 -#define M (256 * XL) // Grid in y- direciton -#define N (256 * XL) // Grid in x- direciton -#define M1 (M + 1) // Number of the grid in y- direction -#define N1 (N + 1) // Number of the grid in x- direction -#define Ly 1.f // Length of the domain in y- direction -#define Lx (Ly * N / M) // Length of the domain in x- direction -//------------------------------------------------------------------------------------------GPU -#define N16 16 -#define NX2 ((N1 / 16 + 1) * 16 + N16) -#define NY2 (M + 3) -#define NYNX2 (NY2 * NX2) -#define Mc (NY2 / 2) -#define Nc (NX2 / 2) -#define Mb 1 -#define Me M1 -//////////////////////////////////////////////////////////////////////////////////////////////////// -#define BX 128 -#define BY 1 -#define NT 64 // used for boundary grid -#define BCX 64 -#define BCY 1 -//////////////////////////////////////////////////////////////////////////////////////////////////// -#define Q 9 -#define T 1000 -#define PI (4.0 * atan(1.0)) - -// parameters used in physical field -extern double nu, U0; -extern double rho0; -extern double rho_in, rho_out; -extern double Fx, Fy; -// dimensionless parameters -extern double Re; -// parameters used in LBE simulation -extern double tau_f, wf, ci, rcc; -// parameters used in computation -extern int n, nmax; -extern double dx, dt, dn; -extern double sum_u_o; - -///////////////////////////////////////////////////////////////////////////////////////////////// -// CPU -extern double f[Q][NY2][NX2]; -extern double sf[Q]; -//--------------------------------------------------------------------------------------------- -// device adress -extern double *f_dev; -extern double *F_dev; -extern double *sf_dev; -//----------------------------------------------------------------------------------------------------------------- -#endif +#ifndef __COMMON_H_ +#define __COMMON_H_ + +#define XL 1 +#define M (256 * XL) // Grid in y- direciton +#define N (256 * XL) // Grid in x- direciton +#define M1 (M + 1) // Number of the grid in y- direction +#define N1 (N + 1) // Number of the grid in x- direction +#define Ly 1.f // Length of the domain in y- direction +#define Lx (Ly * N / M) // Length of the domain in x- direction +//------------------------------------------------------------------------------------------GPU +#define N16 16 +#define NX2 ((N1 / 16 + 1) * 16 + N16) +#define NY2 (M + 3) +#define NYNX2 (NY2 * NX2) +#define Mc (NY2 / 2) +#define Nc (NX2 / 2) +#define Mb 1 +#define Me M1 +//////////////////////////////////////////////////////////////////////////////////////////////////// +#define BX 128 +#define BY 1 +#define NT 64 // used for boundary grid +#define BCX 64 +#define BCY 1 +//////////////////////////////////////////////////////////////////////////////////////////////////// +#define Q 9 +#define T 1000 +#define PI (4.0 * atan(1.0)) + +// parameters used in physical field +extern double nu, U0; +extern double rho0; +extern double rho_in, rho_out; +extern double Fx, Fy; +// dimensionless parameters +extern double Re; +// parameters used in LBE simulation +extern double tau_f, wf, ci, rcc; +// parameters used in computation +extern int n, nmax; +extern double dx, dt, dn; +extern double sum_u_o; + +///////////////////////////////////////////////////////////////////////////////////////////////// +// CPU +extern double f[Q][NY2][NX2]; +extern double sf[Q]; +//--------------------------------------------------------------------------------------------- +// device adress +extern double *f_dev; +extern double *F_dev; +extern double *sf_dev; +//----------------------------------------------------------------------------------------------------------------- +#endif diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/datasave.cu b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/datasave.cu index 011c664514..9b6f1a81a0 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/datasave.cu +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/datasave.cu @@ -1,41 +1,41 @@ -#include "common.h" -#include "lb.h" - -// save the results -void datasave() { - int x, y; - double ut, vt, rhot; - char u_name[20], v_name[20]; - FILE *fp1, *fp2, *fp3; - - sprintf(u_name, "%s%.8d", "./Output/u", n); - sprintf(v_name, "%s%.8d", "./Output/v", n); - if ((fp1 = fopen(u_name, "w")) == NULL) return; - if ((fp2 = fopen(v_name, "w")) == NULL) return; - if ((fp3 = fopen("./Output/rho", "w")) == NULL) return; - /* -if((fp1=fopen("./Output/u","w")) == NULL) return; -if((fp2=fopen("./Output/v","w")) == NULL) return; -if((fp3=fopen("./Output/rho","w")) == NULL) return; - */ - - for (y = 1; y <= M1; y++) { - for (x = N16; x < N16 + N1; x++) { - ut = UX(y, x) + 0.5 * dt * Fx; - vt = VY(y, x); - rhot = RHO(y, x); - - fprintf(fp1, "%e ", ut); - fprintf(fp2, "%e ", vt); - fprintf(fp3, "%e ", rhot); - } - fprintf(fp1, "\n\n"); - fprintf(fp2, "\n\n"); - fprintf(fp3, "\n\n"); - } - - fclose(fp1); - fclose(fp2); - fclose(fp3); -} -//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +#include "common.h" +#include "lb.h" + +// save the results +void datasave() { + int x, y; + double ut, vt, rhot; + char u_name[20], v_name[20]; + FILE *fp1, *fp2, *fp3; + + sprintf(u_name, "%s%.8d", "./Output/u", n); + sprintf(v_name, "%s%.8d", "./Output/v", n); + if ((fp1 = fopen(u_name, "w")) == NULL) return; + if ((fp2 = fopen(v_name, "w")) == NULL) return; + if ((fp3 = fopen("./Output/rho", "w")) == NULL) return; + /* +if((fp1=fopen("./Output/u","w")) == NULL) return; +if((fp2=fopen("./Output/v","w")) == NULL) return; +if((fp3=fopen("./Output/rho","w")) == NULL) return; + */ + + for (y = 1; y <= M1; y++) { + for (x = N16; x < N16 + N1; x++) { + ut = UX(y, x) + 0.5 * dt * Fx; + vt = VY(y, x); + rhot = RHO(y, x); + + fprintf(fp1, "%e ", ut); + fprintf(fp2, "%e ", vt); + fprintf(fp3, "%e ", rhot); + } + fprintf(fp1, "\n\n"); + fprintf(fp2, "\n\n"); + fprintf(fp3, "\n\n"); + } + + fclose(fp1); + fclose(fp2); + fclose(fp3); +} +//-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/error.cu b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/error.cu index d39273a2fd..868363aca2 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/error.cu +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/error.cu @@ -1,18 +1,18 @@ -#include "common.h" -#include "lb.h" - -double error() { - int x, y; - double sum_u_c = 0.0; - double Err; - - for (y = 1; y <= M1; y++) - for (x = N16; x < N16 + N1; x++) { - sum_u_c += UX(y, x); - } - - Err = fabs(sum_u_c - sum_u_o) / fabs(sum_u_c); - sum_u_o = sum_u_c; - - return Err; -} +#include "common.h" +#include "lb.h" + +double error() { + int x, y; + double sum_u_c = 0.0; + double Err; + + for (y = 1; y <= M1; y++) + for (x = N16; x < N16 + N1; x++) { + sum_u_c += UX(y, x); + } + + Err = fabs(sum_u_c - sum_u_o) / fabs(sum_u_c); + sum_u_o = sum_u_c; + + return Err; +} diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flow.cu b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flow.cu index c5ef6ea158..acadf44c60 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flow.cu +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flow.cu @@ -1,341 +1,341 @@ -#include "common.h" -#include "lb.h" - -__constant__ double diag[Q] = {1.f / 9, - 1.f / 36, - 1.f / 36, - 1.f / 6, - 1.f / 12, - 1.0 / 6, - 1.f / 12, - 1.f / 4, - 1.f / 4}; -__constant__ int e_d[Q][2] = {{0, 0}, - - {1, 0}, - {0, 1}, - {-1, 0}, - {0, -1}, - - {1, 1}, - {-1, 1}, - {-1, -1}, - {1, -1}}; - -__constant__ int re_d[Q] = {0, - 3, - 4, - 1, - 2, - - 7, - 8, - 5, - 6}; - -//-------------------------------------------------------------------------------------------------------------------------- -//------------------------------------------------------------------------------------------------------------------------------ -__global__ void Evol_flow( - double *s_d, double dt, double Fx, double Fy, double *f_d, double *F_d) { - double RHO, U, V, UV; - int tx, ty, k, x, y; - double mf0, mf1, mf2, mf3, mf4, mf5, mf6, mf7, mf8; - __shared__ double f[Q][BY][BX]; - - tx = threadIdx.x; - ty = threadIdx.y; - x = N16 + blockIdx.x * BX + tx; - y = (1 + blockIdx.y * BY + ty); - k = NX2 * y + x; - - if (x < N16 + N1) { - f[0][ty][tx] = f_d[k + 0 * NYNX2]; - f[1][ty][tx] = f_d[k + 1 * NYNX2]; - f[2][ty][tx] = f_d[k + 2 * NYNX2]; - f[3][ty][tx] = f_d[k + 3 * NYNX2]; - f[4][ty][tx] = f_d[k + 4 * NYNX2]; - f[5][ty][tx] = f_d[k + 5 * NYNX2]; - f[6][ty][tx] = f_d[k + 6 * NYNX2]; - f[7][ty][tx] = f_d[k + 7 * NYNX2]; - f[8][ty][tx] = f_d[k + 8 * NYNX2]; - - // f-mf/////////////////////////// - mf0 = f[0][ty][tx] + f[1][ty][tx] + f[2][ty][tx] + f[3][ty][tx] + - f[4][ty][tx] + f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + - f[8][ty][tx]; - mf1 = -4 * f[0][ty][tx] - f[1][ty][tx] - f[2][ty][tx] - f[3][ty][tx] - - f[4][ty][tx] + - 2 * (f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + f[8][ty][tx]); - mf2 = 4 * f[0][ty][tx] - - 2 * (f[1][ty][tx] + f[2][ty][tx] + f[3][ty][tx] + f[4][ty][tx]) + - f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + f[8][ty][tx]; - mf3 = f[1][ty][tx] - f[3][ty][tx] + f[5][ty][tx] - f[6][ty][tx] - - f[7][ty][tx] + f[8][ty][tx]; - mf4 = -2 * (f[1][ty][tx] - f[3][ty][tx]) + f[5][ty][tx] - f[6][ty][tx] - - f[7][ty][tx] + f[8][ty][tx]; - mf5 = f[2][ty][tx] - f[4][ty][tx] + f[5][ty][tx] + f[6][ty][tx] - - f[7][ty][tx] - f[8][ty][tx]; - mf6 = -2 * (f[2][ty][tx] - f[4][ty][tx]) + f[5][ty][tx] + f[6][ty][tx] - - f[7][ty][tx] - f[8][ty][tx]; - mf7 = f[1][ty][tx] - f[2][ty][tx] + f[3][ty][tx] - f[4][ty][tx]; - mf8 = f[5][ty][tx] - f[6][ty][tx] + f[7][ty][tx] - f[8][ty][tx]; - - // macroscopic - // variables///////////////////////////////////////////////////////////////////////////// - RHO = RHO(ty, tx); - U = UX(ty, tx) + 0.5 * dt * Fx; - V = VY(ty, tx) + 0.5 * dt * Fy; - UV = U * U + V * V; - - // collision//------------------------------------------------------------------------------------------------------------------------------- - mf0 = (mf0 - s_d[0] * (mf0 - MEQ_0(RHO))) + - dt * (1.f - 0.5 * s_d[0]) * F_0(U, V, Fx, Fy); - mf1 = (mf1 - s_d[1] * (mf1 - MEQ_1(RHO, UV))) + - dt * (1.f - 0.5 * s_d[1]) * F_1(U, V, Fx, Fy) * 6.f; - mf2 = (mf2 - s_d[2] * (mf2 - MEQ_2(RHO, UV))) - - dt * (1.f - 0.5 * s_d[2]) * F_2(U, V, Fx, Fy) * 6.f; - mf3 = (mf3 - s_d[3] * (mf3 - MEQ_3(U))) + - dt * (1.f - 0.5 * s_d[3]) * F_3(U, V, Fx, Fy); - mf4 = (mf4 - s_d[4] * (mf4 - MEQ_4(U))) - - dt * (1.f - 0.5 * s_d[4]) * F_4(U, V, Fx, Fy); - mf5 = (mf5 - s_d[5] * (mf5 - MEQ_5(V))) + - dt * (1.f - 0.5 * s_d[5]) * F_5(U, V, Fx, Fy); - mf6 = (mf6 - s_d[6] * (mf6 - MEQ_6(V))) - - dt * (1.f - 0.5 * s_d[6]) * F_6(U, V, Fx, Fy); - mf7 = (mf7 - s_d[7] * (mf7 - MEQ_7(U, V))) + - dt * (1.f - 0.5 * s_d[7]) * F_7(U, V, Fx, Fy) * 2.f; - mf8 = (mf8 - s_d[8] * (mf8 - MEQ_8(U, V))) + - dt * (1.f - 0.5 * s_d[8]) * F_8(U, V, Fx, Fy); - - //---------------------------------------------------------------------------------------------------------------------------------------- - mf0 = mf0 * diag[0]; - mf1 = mf1 * diag[1]; - mf2 = mf2 * diag[2]; - mf3 = mf3 * diag[3]; - mf4 = mf4 * diag[4]; - mf5 = mf5 * diag[5]; - mf6 = mf6 * diag[6]; - mf7 = mf7 * diag[7]; - mf8 = mf8 * diag[8]; - //--mf - f - //--////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - f[0][ty][tx] = (mf0 - 4.f * (mf1 - mf2)); - f[1][ty][tx] = (mf0 - mf1 - 2.f * (mf2 + mf4) + mf3 + mf7); - f[2][ty][tx] = (mf0 - mf1 - 2.f * (mf2 + mf6) + mf5 - mf7); - f[3][ty][tx] = (mf0 - mf1 - 2.f * (mf2 - mf4) - mf3 + mf7); - f[4][ty][tx] = (mf0 - mf1 - 2.f * (mf2 - mf6) - mf5 - mf7); - f[5][ty][tx] = (mf0 + mf1 + mf1 + mf2 + mf3 + mf4 + mf5 + mf6 + mf8); - f[6][ty][tx] = (mf0 + mf1 + mf1 + mf2 - mf3 - mf4 + mf5 + mf6 - mf8); - f[7][ty][tx] = (mf0 + mf1 + mf1 + mf2 - mf3 - mf4 - mf5 - mf6 + mf8); - f[8][ty][tx] = (mf0 + mf1 + mf1 + mf2 + mf3 + mf4 - mf5 - mf6 - mf8); - - __syncthreads(); - - // streaming - // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - F_d[k + 0 * NYNX2] = f[0][ty][tx]; - F_d[k + NX2 + 2 * NYNX2] = f[2][ty][tx]; - F_d[k - NX2 + 4 * NYNX2] = f[4][ty][tx]; - - if (tx != 0) { - F_d[k + 1 * NYNX2] = f[1][ty][tx - 1]; - F_d[k + NX2 + 5 * NYNX2] = f[5][ty][tx - 1]; - F_d[k - NX2 + 8 * NYNX2] = f[8][ty][tx - 1]; - } - - if (tx == BX - 1) { - F_d[k + 1 + 1 * NYNX2] = f[1][ty][tx]; - F_d[k + NX2 + 1 + 5 * NYNX2] = f[5][ty][tx]; - F_d[k - NX2 + 1 + 8 * NYNX2] = f[8][ty][tx]; - } - - if (tx != BX - 1) { - F_d[k + 3 * NYNX2] = f[3][ty][tx + 1]; - F_d[k + NX2 + 6 * NYNX2] = f[6][ty][tx + 1]; - F_d[k - NX2 + 7 * NYNX2] = f[7][ty][tx + 1]; - } - - if (tx == 0) { - F_d[k - 1 + 3 * NYNX2] = f[3][ty][tx]; - F_d[k + NX2 - 1 + 6 * NYNX2] = f[6][ty][tx]; - F_d[k - NX2 - 1 + 7 * NYNX2] = f[7][ty][tx]; - } - } -} -//-------------------------------------------------------------------------------------------------------------------------------------------------------- -/* -__global__ void Bc_flow_BB(int *flag_d, double *f_d) -{ - int tx, ty, k, x, y; - int kq, xp, yp, kp; - - tx = threadIdx.x; ty = threadIdx.y; - x = N16+blockIdx.x*BX+tx; y = (1+blockIdx.y*BY+ty); - k = NX2*y+x; - - if (x < N16+N1) - { - if(flag_d[k] == 0) - { - for(kq = 1; kq < Q; kq++) - { - xp = x + e_d[kq][0]; yp = y + e_d[kq][1]; - kp = NX2*yp+xp; - if(flag_d[kp] == 1) - { - f_d[kp + NYNX2*kq] = f_d[k + NYNX2*re_d[kq]]; - } - - } - } - } - -} -*/ - -__global__ void Bc_flow_X( - double dt, double Fx, double Fy, double U, double *f_d) { - int x, y, k, k1; - double f0, f1, f2, f3, f4, f5, f6, f7, f8; - double vx1, vy1, vv1, vx, vy, vv, rho1, rho; - - /////////////////////////////////////////////////////////////// - // boundary points 0/1 left/right - y = 1 + blockIdx.x * NT + threadIdx.x; - - if (y <= M1) { - if (blockIdx.y == 0) { - x = N16; - k = y * NX2 + x; - k1 = k + 1; - } else { - x = N16 + N1 - 1; - k = y * NX2 + x; - k1 = k - 1; - } - - f0 = f_d[k1 + 0 * NYNX2]; - f1 = f_d[k1 + 1 * NYNX2]; - f2 = f_d[k1 + 2 * NYNX2]; - f3 = f_d[k1 + 3 * NYNX2]; - f4 = f_d[k1 + 4 * NYNX2]; - f5 = f_d[k1 + 5 * NYNX2]; - f6 = f_d[k1 + 6 * NYNX2]; - f7 = f_d[k1 + 7 * NYNX2]; - f8 = f_d[k1 + 8 * NYNX2]; - - vx1 = (f1 - f3 + f5 + f8 - f6 - f7) + 0.5 * dt * Fx; - vy1 = (f2 - f4 + f5 + f6 - f7 - f8) + 0.5 * dt * Fy; - vv1 = (vx1 * vx1 + vy1 * vy1); - rho1 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; - - if (blockIdx.y == 0) { - vx = 0.0; - vy = 0.0; - rho = rho1; - } else { - vx = 0.0; - vy = 0.0; - rho = rho1; - } - - - vv = (vx * vx + vy * vy); - - f_d[k + 0 * NYNX2] = - FEQ_0(rho, vx, vy, vv) + f0 - FEQ_0(rho1, vx1, vy1, vv1); - f_d[k + 1 * NYNX2] = - FEQ_1(rho, vx, vy, vv) + f1 - FEQ_1(rho1, vx1, vy1, vv1); - f_d[k + 2 * NYNX2] = - FEQ_2(rho, vx, vy, vv) + f2 - FEQ_2(rho1, vx1, vy1, vv1); - f_d[k + 3 * NYNX2] = - FEQ_3(rho, vx, vy, vv) + f3 - FEQ_3(rho1, vx1, vy1, vv1); - f_d[k + 4 * NYNX2] = - FEQ_4(rho, vx, vy, vv) + f4 - FEQ_4(rho1, vx1, vy1, vv1); - f_d[k + 5 * NYNX2] = - FEQ_5(rho, vx, vy, vv) + f5 - FEQ_5(rho1, vx1, vy1, vv1); - f_d[k + 6 * NYNX2] = - FEQ_6(rho, vx, vy, vv) + f6 - FEQ_6(rho1, vx1, vy1, vv1); - f_d[k + 7 * NYNX2] = - FEQ_7(rho, vx, vy, vv) + f7 - FEQ_7(rho1, vx1, vy1, vv1); - f_d[k + 8 * NYNX2] = - FEQ_8(rho, vx, vy, vv) + f8 - FEQ_8(rho1, vx1, vy1, vv1); - } -} -//------------------------------------------------------------------------------------------------------------------------------------------------------ -__global__ void Bc_flow_Y( - double dt, double Fx, double Fy, double U, int n, double dn, double *f_d) { - int x, y, k, k1; - double f0, f1, f2, f3, f4, f5, f6, f7, f8; - double vx1, vy1, vv1, rho1; - double vx, vy, vv, rho; - double xx; - double dx = dt; - double omega = 10.0; // 1.0 + dn*n; - double A = 0.5 * U; - - /////////////////////////////////////////////////////////////// - // boundary points 0/1 upper/bottom - x = N16 + blockIdx.x * NT + threadIdx.x; - - if (x < N16 + N1) { - if (blockIdx.y == 0) { - y = M1; - k = y * NX2 + x; - k1 = k - NX2; - } else { - y = 1; - k = y * NX2 + x; - k1 = k + NX2; - } - - f0 = f_d[k1 + 0 * NYNX2]; - f1 = f_d[k1 + 1 * NYNX2]; - f2 = f_d[k1 + 2 * NYNX2]; - f3 = f_d[k1 + 3 * NYNX2]; - f4 = f_d[k1 + 4 * NYNX2]; - f5 = f_d[k1 + 5 * NYNX2]; - f6 = f_d[k1 + 6 * NYNX2]; - f7 = f_d[k1 + 7 * NYNX2]; - f8 = f_d[k1 + 8 * NYNX2]; - - vx1 = (f1 - f3 + f5 + f8 - f6 - f7) + 0.5 * dt * Fx; - vy1 = (f2 - f4 + f5 + f6 - f7 - f8) + 0.5 * dt * Fy; - vv1 = (vx1 * vx1 + vy1 * vy1); - rho1 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; - - if (blockIdx.y == 0) { - xx = (x - N16) * dx; - vx = U * (1. - cosh(10. * (xx - 0.5)) / cosh(5.)) + - A * sin(2 * PI * xx) * sin(omega * n * dt); - vy = 0.0; - rho = rho1; - } else { - vx = 0.0; - vy = 0.0; - rho = rho1; - } - - vv = (vx * vx + vy * vy); - - f_d[k + 0 * NYNX2] = - FEQ_0(rho, vx, vy, vv) + f0 - FEQ_0(rho1, vx1, vy1, vv1); - f_d[k + 1 * NYNX2] = - FEQ_1(rho, vx, vy, vv) + f1 - FEQ_1(rho1, vx1, vy1, vv1); - f_d[k + 2 * NYNX2] = - FEQ_2(rho, vx, vy, vv) + f2 - FEQ_2(rho1, vx1, vy1, vv1); - f_d[k + 3 * NYNX2] = - FEQ_3(rho, vx, vy, vv) + f3 - FEQ_3(rho1, vx1, vy1, vv1); - f_d[k + 4 * NYNX2] = - FEQ_4(rho, vx, vy, vv) + f4 - FEQ_4(rho1, vx1, vy1, vv1); - f_d[k + 5 * NYNX2] = - FEQ_5(rho, vx, vy, vv) + f5 - FEQ_5(rho1, vx1, vy1, vv1); - f_d[k + 6 * NYNX2] = - FEQ_6(rho, vx, vy, vv) + f6 - FEQ_6(rho1, vx1, vy1, vv1); - f_d[k + 7 * NYNX2] = - FEQ_7(rho, vx, vy, vv) + f7 - FEQ_7(rho1, vx1, vy1, vv1); - f_d[k + 8 * NYNX2] = - FEQ_8(rho, vx, vy, vv) + f8 - FEQ_8(rho1, vx1, vy1, vv1); - } -} -//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ +#include "common.h" +#include "lb.h" + +__constant__ double diag[Q] = {1.f / 9, + 1.f / 36, + 1.f / 36, + 1.f / 6, + 1.f / 12, + 1.0 / 6, + 1.f / 12, + 1.f / 4, + 1.f / 4}; +__constant__ int e_d[Q][2] = {{0, 0}, + + {1, 0}, + {0, 1}, + {-1, 0}, + {0, -1}, + + {1, 1}, + {-1, 1}, + {-1, -1}, + {1, -1}}; + +__constant__ int re_d[Q] = {0, + 3, + 4, + 1, + 2, + + 7, + 8, + 5, + 6}; + +//-------------------------------------------------------------------------------------------------------------------------- +//------------------------------------------------------------------------------------------------------------------------------ +__global__ void Evol_flow( + double *s_d, double dt, double Fx, double Fy, double *f_d, double *F_d) { + double RHO, U, V, UV; + int tx, ty, k, x, y; + double mf0, mf1, mf2, mf3, mf4, mf5, mf6, mf7, mf8; + __shared__ double f[Q][BY][BX]; + + tx = threadIdx.x; + ty = threadIdx.y; + x = N16 + blockIdx.x * BX + tx; + y = (1 + blockIdx.y * BY + ty); + k = NX2 * y + x; + + if (x < N16 + N1) { + f[0][ty][tx] = f_d[k + 0 * NYNX2]; + f[1][ty][tx] = f_d[k + 1 * NYNX2]; + f[2][ty][tx] = f_d[k + 2 * NYNX2]; + f[3][ty][tx] = f_d[k + 3 * NYNX2]; + f[4][ty][tx] = f_d[k + 4 * NYNX2]; + f[5][ty][tx] = f_d[k + 5 * NYNX2]; + f[6][ty][tx] = f_d[k + 6 * NYNX2]; + f[7][ty][tx] = f_d[k + 7 * NYNX2]; + f[8][ty][tx] = f_d[k + 8 * NYNX2]; + + // f-mf/////////////////////////// + mf0 = f[0][ty][tx] + f[1][ty][tx] + f[2][ty][tx] + f[3][ty][tx] + + f[4][ty][tx] + f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + + f[8][ty][tx]; + mf1 = -4 * f[0][ty][tx] - f[1][ty][tx] - f[2][ty][tx] - f[3][ty][tx] - + f[4][ty][tx] + + 2 * (f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + f[8][ty][tx]); + mf2 = 4 * f[0][ty][tx] - + 2 * (f[1][ty][tx] + f[2][ty][tx] + f[3][ty][tx] + f[4][ty][tx]) + + f[5][ty][tx] + f[6][ty][tx] + f[7][ty][tx] + f[8][ty][tx]; + mf3 = f[1][ty][tx] - f[3][ty][tx] + f[5][ty][tx] - f[6][ty][tx] - + f[7][ty][tx] + f[8][ty][tx]; + mf4 = -2 * (f[1][ty][tx] - f[3][ty][tx]) + f[5][ty][tx] - f[6][ty][tx] - + f[7][ty][tx] + f[8][ty][tx]; + mf5 = f[2][ty][tx] - f[4][ty][tx] + f[5][ty][tx] + f[6][ty][tx] - + f[7][ty][tx] - f[8][ty][tx]; + mf6 = -2 * (f[2][ty][tx] - f[4][ty][tx]) + f[5][ty][tx] + f[6][ty][tx] - + f[7][ty][tx] - f[8][ty][tx]; + mf7 = f[1][ty][tx] - f[2][ty][tx] + f[3][ty][tx] - f[4][ty][tx]; + mf8 = f[5][ty][tx] - f[6][ty][tx] + f[7][ty][tx] - f[8][ty][tx]; + + // macroscopic + // variables///////////////////////////////////////////////////////////////////////////// + RHO = RHO(ty, tx); + U = UX(ty, tx) + 0.5 * dt * Fx; + V = VY(ty, tx) + 0.5 * dt * Fy; + UV = U * U + V * V; + + // collision//------------------------------------------------------------------------------------------------------------------------------- + mf0 = (mf0 - s_d[0] * (mf0 - MEQ_0(RHO))) + + dt * (1.f - 0.5 * s_d[0]) * F_0(U, V, Fx, Fy); + mf1 = (mf1 - s_d[1] * (mf1 - MEQ_1(RHO, UV))) + + dt * (1.f - 0.5 * s_d[1]) * F_1(U, V, Fx, Fy) * 6.f; + mf2 = (mf2 - s_d[2] * (mf2 - MEQ_2(RHO, UV))) - + dt * (1.f - 0.5 * s_d[2]) * F_2(U, V, Fx, Fy) * 6.f; + mf3 = (mf3 - s_d[3] * (mf3 - MEQ_3(U))) + + dt * (1.f - 0.5 * s_d[3]) * F_3(U, V, Fx, Fy); + mf4 = (mf4 - s_d[4] * (mf4 - MEQ_4(U))) - + dt * (1.f - 0.5 * s_d[4]) * F_4(U, V, Fx, Fy); + mf5 = (mf5 - s_d[5] * (mf5 - MEQ_5(V))) + + dt * (1.f - 0.5 * s_d[5]) * F_5(U, V, Fx, Fy); + mf6 = (mf6 - s_d[6] * (mf6 - MEQ_6(V))) - + dt * (1.f - 0.5 * s_d[6]) * F_6(U, V, Fx, Fy); + mf7 = (mf7 - s_d[7] * (mf7 - MEQ_7(U, V))) + + dt * (1.f - 0.5 * s_d[7]) * F_7(U, V, Fx, Fy) * 2.f; + mf8 = (mf8 - s_d[8] * (mf8 - MEQ_8(U, V))) + + dt * (1.f - 0.5 * s_d[8]) * F_8(U, V, Fx, Fy); + + //---------------------------------------------------------------------------------------------------------------------------------------- + mf0 = mf0 * diag[0]; + mf1 = mf1 * diag[1]; + mf2 = mf2 * diag[2]; + mf3 = mf3 * diag[3]; + mf4 = mf4 * diag[4]; + mf5 = mf5 * diag[5]; + mf6 = mf6 * diag[6]; + mf7 = mf7 * diag[7]; + mf8 = mf8 * diag[8]; + //--mf - f + //--////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + f[0][ty][tx] = (mf0 - 4.f * (mf1 - mf2)); + f[1][ty][tx] = (mf0 - mf1 - 2.f * (mf2 + mf4) + mf3 + mf7); + f[2][ty][tx] = (mf0 - mf1 - 2.f * (mf2 + mf6) + mf5 - mf7); + f[3][ty][tx] = (mf0 - mf1 - 2.f * (mf2 - mf4) - mf3 + mf7); + f[4][ty][tx] = (mf0 - mf1 - 2.f * (mf2 - mf6) - mf5 - mf7); + f[5][ty][tx] = (mf0 + mf1 + mf1 + mf2 + mf3 + mf4 + mf5 + mf6 + mf8); + f[6][ty][tx] = (mf0 + mf1 + mf1 + mf2 - mf3 - mf4 + mf5 + mf6 - mf8); + f[7][ty][tx] = (mf0 + mf1 + mf1 + mf2 - mf3 - mf4 - mf5 - mf6 + mf8); + f[8][ty][tx] = (mf0 + mf1 + mf1 + mf2 + mf3 + mf4 - mf5 - mf6 - mf8); + + __syncthreads(); + + // streaming + // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + F_d[k + 0 * NYNX2] = f[0][ty][tx]; + F_d[k + NX2 + 2 * NYNX2] = f[2][ty][tx]; + F_d[k - NX2 + 4 * NYNX2] = f[4][ty][tx]; + + if (tx != 0) { + F_d[k + 1 * NYNX2] = f[1][ty][tx - 1]; + F_d[k + NX2 + 5 * NYNX2] = f[5][ty][tx - 1]; + F_d[k - NX2 + 8 * NYNX2] = f[8][ty][tx - 1]; + } + + if (tx == BX - 1) { + F_d[k + 1 + 1 * NYNX2] = f[1][ty][tx]; + F_d[k + NX2 + 1 + 5 * NYNX2] = f[5][ty][tx]; + F_d[k - NX2 + 1 + 8 * NYNX2] = f[8][ty][tx]; + } + + if (tx != BX - 1) { + F_d[k + 3 * NYNX2] = f[3][ty][tx + 1]; + F_d[k + NX2 + 6 * NYNX2] = f[6][ty][tx + 1]; + F_d[k - NX2 + 7 * NYNX2] = f[7][ty][tx + 1]; + } + + if (tx == 0) { + F_d[k - 1 + 3 * NYNX2] = f[3][ty][tx]; + F_d[k + NX2 - 1 + 6 * NYNX2] = f[6][ty][tx]; + F_d[k - NX2 - 1 + 7 * NYNX2] = f[7][ty][tx]; + } + } +} +//-------------------------------------------------------------------------------------------------------------------------------------------------------- +/* +__global__ void Bc_flow_BB(int *flag_d, double *f_d) +{ + int tx, ty, k, x, y; + int kq, xp, yp, kp; + + tx = threadIdx.x; ty = threadIdx.y; + x = N16+blockIdx.x*BX+tx; y = (1+blockIdx.y*BY+ty); + k = NX2*y+x; + + if (x < N16+N1) + { + if(flag_d[k] == 0) + { + for(kq = 1; kq < Q; kq++) + { + xp = x + e_d[kq][0]; yp = y + e_d[kq][1]; + kp = NX2*yp+xp; + if(flag_d[kp] == 1) + { + f_d[kp + NYNX2*kq] = f_d[k + NYNX2*re_d[kq]]; + } + + } + } + } + +} +*/ + +__global__ void Bc_flow_X( + double dt, double Fx, double Fy, double U, double *f_d) { + int x, y, k, k1; + double f0, f1, f2, f3, f4, f5, f6, f7, f8; + double vx1, vy1, vv1, vx, vy, vv, rho1, rho; + + /////////////////////////////////////////////////////////////// + // boundary points 0/1 left/right + y = 1 + blockIdx.x * NT + threadIdx.x; + + if (y <= M1) { + if (blockIdx.y == 0) { + x = N16; + k = y * NX2 + x; + k1 = k + 1; + } else { + x = N16 + N1 - 1; + k = y * NX2 + x; + k1 = k - 1; + } + + f0 = f_d[k1 + 0 * NYNX2]; + f1 = f_d[k1 + 1 * NYNX2]; + f2 = f_d[k1 + 2 * NYNX2]; + f3 = f_d[k1 + 3 * NYNX2]; + f4 = f_d[k1 + 4 * NYNX2]; + f5 = f_d[k1 + 5 * NYNX2]; + f6 = f_d[k1 + 6 * NYNX2]; + f7 = f_d[k1 + 7 * NYNX2]; + f8 = f_d[k1 + 8 * NYNX2]; + + vx1 = (f1 - f3 + f5 + f8 - f6 - f7) + 0.5 * dt * Fx; + vy1 = (f2 - f4 + f5 + f6 - f7 - f8) + 0.5 * dt * Fy; + vv1 = (vx1 * vx1 + vy1 * vy1); + rho1 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; + + if (blockIdx.y == 0) { + vx = 0.0; + vy = 0.0; + rho = rho1; + } else { + vx = 0.0; + vy = 0.0; + rho = rho1; + } + + + vv = (vx * vx + vy * vy); + + f_d[k + 0 * NYNX2] = + FEQ_0(rho, vx, vy, vv) + f0 - FEQ_0(rho1, vx1, vy1, vv1); + f_d[k + 1 * NYNX2] = + FEQ_1(rho, vx, vy, vv) + f1 - FEQ_1(rho1, vx1, vy1, vv1); + f_d[k + 2 * NYNX2] = + FEQ_2(rho, vx, vy, vv) + f2 - FEQ_2(rho1, vx1, vy1, vv1); + f_d[k + 3 * NYNX2] = + FEQ_3(rho, vx, vy, vv) + f3 - FEQ_3(rho1, vx1, vy1, vv1); + f_d[k + 4 * NYNX2] = + FEQ_4(rho, vx, vy, vv) + f4 - FEQ_4(rho1, vx1, vy1, vv1); + f_d[k + 5 * NYNX2] = + FEQ_5(rho, vx, vy, vv) + f5 - FEQ_5(rho1, vx1, vy1, vv1); + f_d[k + 6 * NYNX2] = + FEQ_6(rho, vx, vy, vv) + f6 - FEQ_6(rho1, vx1, vy1, vv1); + f_d[k + 7 * NYNX2] = + FEQ_7(rho, vx, vy, vv) + f7 - FEQ_7(rho1, vx1, vy1, vv1); + f_d[k + 8 * NYNX2] = + FEQ_8(rho, vx, vy, vv) + f8 - FEQ_8(rho1, vx1, vy1, vv1); + } +} +//------------------------------------------------------------------------------------------------------------------------------------------------------ +__global__ void Bc_flow_Y( + double dt, double Fx, double Fy, double U, int n, double dn, double *f_d) { + int x, y, k, k1; + double f0, f1, f2, f3, f4, f5, f6, f7, f8; + double vx1, vy1, vv1, rho1; + double vx, vy, vv, rho; + double xx; + double dx = dt; + double omega = 10.0; // 1.0 + dn*n; + double A = 0.5 * U; + + /////////////////////////////////////////////////////////////// + // boundary points 0/1 upper/bottom + x = N16 + blockIdx.x * NT + threadIdx.x; + + if (x < N16 + N1) { + if (blockIdx.y == 0) { + y = M1; + k = y * NX2 + x; + k1 = k - NX2; + } else { + y = 1; + k = y * NX2 + x; + k1 = k + NX2; + } + + f0 = f_d[k1 + 0 * NYNX2]; + f1 = f_d[k1 + 1 * NYNX2]; + f2 = f_d[k1 + 2 * NYNX2]; + f3 = f_d[k1 + 3 * NYNX2]; + f4 = f_d[k1 + 4 * NYNX2]; + f5 = f_d[k1 + 5 * NYNX2]; + f6 = f_d[k1 + 6 * NYNX2]; + f7 = f_d[k1 + 7 * NYNX2]; + f8 = f_d[k1 + 8 * NYNX2]; + + vx1 = (f1 - f3 + f5 + f8 - f6 - f7) + 0.5 * dt * Fx; + vy1 = (f2 - f4 + f5 + f6 - f7 - f8) + 0.5 * dt * Fy; + vv1 = (vx1 * vx1 + vy1 * vy1); + rho1 = f0 + f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8; + + if (blockIdx.y == 0) { + xx = (x - N16) * dx; + vx = U * (1. - cosh(10. * (xx - 0.5)) / cosh(5.)) + + A * sin(2 * PI * xx) * sin(omega * n * dt); + vy = 0.0; + rho = rho1; + } else { + vx = 0.0; + vy = 0.0; + rho = rho1; + } + + vv = (vx * vx + vy * vy); + + f_d[k + 0 * NYNX2] = + FEQ_0(rho, vx, vy, vv) + f0 - FEQ_0(rho1, vx1, vy1, vv1); + f_d[k + 1 * NYNX2] = + FEQ_1(rho, vx, vy, vv) + f1 - FEQ_1(rho1, vx1, vy1, vv1); + f_d[k + 2 * NYNX2] = + FEQ_2(rho, vx, vy, vv) + f2 - FEQ_2(rho1, vx1, vy1, vv1); + f_d[k + 3 * NYNX2] = + FEQ_3(rho, vx, vy, vv) + f3 - FEQ_3(rho1, vx1, vy1, vv1); + f_d[k + 4 * NYNX2] = + FEQ_4(rho, vx, vy, vv) + f4 - FEQ_4(rho1, vx1, vy1, vv1); + f_d[k + 5 * NYNX2] = + FEQ_5(rho, vx, vy, vv) + f5 - FEQ_5(rho1, vx1, vy1, vv1); + f_d[k + 6 * NYNX2] = + FEQ_6(rho, vx, vy, vv) + f6 - FEQ_6(rho1, vx1, vy1, vv1); + f_d[k + 7 * NYNX2] = + FEQ_7(rho, vx, vy, vv) + f7 - FEQ_7(rho1, vx1, vy1, vv1); + f_d[k + 8 * NYNX2] = + FEQ_8(rho, vx, vy, vv) + f8 - FEQ_8(rho1, vx1, vy1, vv1); + } +} +//------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flowmain.cu b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flowmain.cu index 7f994c06e9..d28d01eaaa 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flowmain.cu +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/flowmain.cu @@ -1,123 +1,123 @@ -//---LBE for 2D cavity -// flow---------------------------------------------------------- -//---@Author: Xuhui -// Meng------------------------------------------------------------- -#include -#include -#include -#include -#include -#include -#include -#include "common.h" -#include "datasave.cu" -#include "error.cu" -#include "flow.cu" -#include "init.cu" -#include "lb.h" -//---------------------------------------------------------------------------------------------------------------------------- -int main(int argc, char **argv) { - clock_t time_begin, time_end; - double uc, vc; - int new_step, goon; - double err = 1.0; - - int device = 1; - cudaSetDevice(device); - cudaDeviceProp properties; - cudaGetDeviceProperties(&properties, device); - printf("Lattice Boltzmann Simulation running on: %s\n", properties.name); - - dim3 threads(BX, 1); - dim3 grid((N1 + BX - 1) / BX, M1); - dim3 gridBlr((M1 + NT - 1) / NT, 2); - dim3 gridBub((N1 + NT - 1) / NT, 2); - - // parameters - init(); - datasave(); - /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - // GPU memory: f_dev[], F_dev[] - cudaMalloc((void **)&f_dev, sizeof(double) * Q * NY2 * NX2); - cudaMalloc((void **)&F_dev, sizeof(double) * Q * NY2 * NX2); - cudaMalloc((void **)&sf_dev, sizeof(double) * Q); - - // from CPU to GPU (GPU <= CPU): f_dev <= f - cudaMemcpy(f_dev, - &f[0][0][0], - sizeof(double) * Q * NY2 * NX2, - cudaMemcpyHostToDevice); - cudaMemcpy(sf_dev, &sf, sizeof(double) * Q, cudaMemcpyHostToDevice); -///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -loop: - printf("Enter the num of steps:"); - scanf("%d", &new_step); - nmax += new_step; - printf("nmax = %d\n", nmax); - - dn = 9. / nmax; - - time_begin = clock(); - while (n < nmax) //&&(err > 1.0e-5)) - { - // Excute kernel collision_propagation : f => F F => f - Evol_flow<<>>(sf_dev, dt, Fx, Fy, f_dev, F_dev); - Bc_flow_X<<>>(dt, Fx, Fy, U0, F_dev); - Bc_flow_Y<<>>(dt, Fx, Fy, U0, n, dn, F_dev); - - n += 1; - - Evol_flow<<>>(sf_dev, dt, Fx, Fy, F_dev, f_dev); - Bc_flow_X<<>>(dt, Fx, Fy, U0, f_dev); - Bc_flow_Y<<>>(dt, Fx, Fy, U0, n, dn, f_dev); - - n += 1; - - if (n % T == 0) { - cudaMemcpy(&f[0][0][0], - f_dev, - Q * NY2 * NX2 * sizeof(double), - cudaMemcpyDeviceToHost); - uc = UX(Mc, Nc); - vc = VY(Mc, Nc); - err = error(); - printf("n=%d: err = %.3e, uc = %.3e, vc = %.3e\n", n, err, uc, vc); - datasave(); - } - } - - time_end = clock(); - printf("\nThe computing time is: %f seconds ", - (double)(time_end - time_begin) / CLOCKS_PER_SEC); - ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - - if (n % 2 == 0) { - cudaMemcpy(&f[0][0][0], - f_dev, - Q * NY2 * NX2 * sizeof(double), - cudaMemcpyDeviceToHost); - printf("this is from f !\n"); - } else { - cudaMemcpy(&f[0][0][0], - F_dev, - Q * NY2 * NX2 * sizeof(double), - cudaMemcpyDeviceToHost); - printf("this is from F !\n"); - } - - // save data - datasave(); - - printf("goon? yes(1) no(0):"); - scanf("%d", &goon); - if (goon) goto loop; - - - // free GPU memory - cudaFree(f_dev); - cudaFree(F_dev); - cudaFree(sf_dev); - ////////////////////////////////////////////////////////////////////////////////////////////////_GPU - return 0; -} +//---LBE for 2D cavity +// flow---------------------------------------------------------- +//---@Author: Xuhui +// Meng------------------------------------------------------------- +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "datasave.cu" +#include "error.cu" +#include "flow.cu" +#include "init.cu" +#include "lb.h" +//---------------------------------------------------------------------------------------------------------------------------- +int main(int argc, char **argv) { + clock_t time_begin, time_end; + double uc, vc; + int new_step, goon; + double err = 1.0; + + int device = 1; + cudaSetDevice(device); + cudaDeviceProp properties; + cudaGetDeviceProperties(&properties, device); + printf("Lattice Boltzmann Simulation running on: %s\n", properties.name); + + dim3 threads(BX, 1); + dim3 grid((N1 + BX - 1) / BX, M1); + dim3 gridBlr((M1 + NT - 1) / NT, 2); + dim3 gridBub((N1 + NT - 1) / NT, 2); + + // parameters + init(); + datasave(); + /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // GPU memory: f_dev[], F_dev[] + cudaMalloc((void **)&f_dev, sizeof(double) * Q * NY2 * NX2); + cudaMalloc((void **)&F_dev, sizeof(double) * Q * NY2 * NX2); + cudaMalloc((void **)&sf_dev, sizeof(double) * Q); + + // from CPU to GPU (GPU <= CPU): f_dev <= f + cudaMemcpy(f_dev, + &f[0][0][0], + sizeof(double) * Q * NY2 * NX2, + cudaMemcpyHostToDevice); + cudaMemcpy(sf_dev, &sf, sizeof(double) * Q, cudaMemcpyHostToDevice); +///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +loop: + printf("Enter the num of steps:"); + scanf("%d", &new_step); + nmax += new_step; + printf("nmax = %d\n", nmax); + + dn = 9. / nmax; + + time_begin = clock(); + while (n < nmax) //&&(err > 1.0e-5)) + { + // Excute kernel collision_propagation : f => F F => f + Evol_flow<<>>(sf_dev, dt, Fx, Fy, f_dev, F_dev); + Bc_flow_X<<>>(dt, Fx, Fy, U0, F_dev); + Bc_flow_Y<<>>(dt, Fx, Fy, U0, n, dn, F_dev); + + n += 1; + + Evol_flow<<>>(sf_dev, dt, Fx, Fy, F_dev, f_dev); + Bc_flow_X<<>>(dt, Fx, Fy, U0, f_dev); + Bc_flow_Y<<>>(dt, Fx, Fy, U0, n, dn, f_dev); + + n += 1; + + if (n % T == 0) { + cudaMemcpy(&f[0][0][0], + f_dev, + Q * NY2 * NX2 * sizeof(double), + cudaMemcpyDeviceToHost); + uc = UX(Mc, Nc); + vc = VY(Mc, Nc); + err = error(); + printf("n=%d: err = %.3e, uc = %.3e, vc = %.3e\n", n, err, uc, vc); + datasave(); + } + } + + time_end = clock(); + printf("\nThe computing time is: %f seconds ", + (double)(time_end - time_begin) / CLOCKS_PER_SEC); + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + + if (n % 2 == 0) { + cudaMemcpy(&f[0][0][0], + f_dev, + Q * NY2 * NX2 * sizeof(double), + cudaMemcpyDeviceToHost); + printf("this is from f !\n"); + } else { + cudaMemcpy(&f[0][0][0], + F_dev, + Q * NY2 * NX2 * sizeof(double), + cudaMemcpyDeviceToHost); + printf("this is from F !\n"); + } + + // save data + datasave(); + + printf("goon? yes(1) no(0):"); + scanf("%d", &goon); + if (goon) goto loop; + + + // free GPU memory + cudaFree(f_dev); + cudaFree(F_dev); + cudaFree(sf_dev); + ////////////////////////////////////////////////////////////////////////////////////////////////_GPU + return 0; +} diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/init.cu b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/init.cu index b61a04cf7f..13064c1d67 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/init.cu +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/init.cu @@ -1,51 +1,51 @@ -#include "common.h" -#include "lb.h" - -void init() { - int y, x; - double ut, vt, rhot, uv; - double G; - - dx = Ly / M; - dt = dx; - ci = dx / dt; - rcc = 3.0 / ci / ci; - tau_f = 0.51; - nu = (tau_f - 0.5) * dt / 3.0; - U0 = Re * nu / Ly; - wf = 1.0 / tau_f; - G = 8.0 * U0 * nu / Ly / Ly; - rho_in = rho0 + 1.5 * Lx * G; - rho_out = rho0 - 1.5 * Lx * G; - Fx = 0.0; - Fy = 0.0; - - sf[0] = sf[3] = sf[5] = 0.f; - sf[7] = sf[8] = 1.f / tau_f; - sf[4] = sf[6] = (16.f * tau_f - 8.f) / (8.f * tau_f - 1.f); - sf[1] = 1.1; - sf[2] = 1.2; - - printf("tau_f = %.3f, rho_in = %.3f, rho_out = %.3f, Ma = %.3f\n", - tau_f, - rho_in, - rho_out, - U0 / ci); - - for (y = 1; y < M1 + 1; y++) - for (x = N16; x < N1 + N16; x++) { - rhot = rho0; - ut = 0.f; - vt = 0.f; - uv = ut * ut + vt * vt; - f[0][y][x] = FEQ_0(rhot, ut, vt, uv); - f[1][y][x] = FEQ_1(rhot, ut, vt, uv); - f[2][y][x] = FEQ_2(rhot, ut, vt, uv); - f[3][y][x] = FEQ_3(rhot, ut, vt, uv); - f[4][y][x] = FEQ_4(rhot, ut, vt, uv); - f[5][y][x] = FEQ_5(rhot, ut, vt, uv); - f[6][y][x] = FEQ_6(rhot, ut, vt, uv); - f[7][y][x] = FEQ_7(rhot, ut, vt, uv); - f[8][y][x] = FEQ_8(rhot, ut, vt, uv); - } -} +#include "common.h" +#include "lb.h" + +void init() { + int y, x; + double ut, vt, rhot, uv; + double G; + + dx = Ly / M; + dt = dx; + ci = dx / dt; + rcc = 3.0 / ci / ci; + tau_f = 0.51; + nu = (tau_f - 0.5) * dt / 3.0; + U0 = Re * nu / Ly; + wf = 1.0 / tau_f; + G = 8.0 * U0 * nu / Ly / Ly; + rho_in = rho0 + 1.5 * Lx * G; + rho_out = rho0 - 1.5 * Lx * G; + Fx = 0.0; + Fy = 0.0; + + sf[0] = sf[3] = sf[5] = 0.f; + sf[7] = sf[8] = 1.f / tau_f; + sf[4] = sf[6] = (16.f * tau_f - 8.f) / (8.f * tau_f - 1.f); + sf[1] = 1.1; + sf[2] = 1.2; + + printf("tau_f = %.3f, rho_in = %.3f, rho_out = %.3f, Ma = %.3f\n", + tau_f, + rho_in, + rho_out, + U0 / ci); + + for (y = 1; y < M1 + 1; y++) + for (x = N16; x < N1 + N16; x++) { + rhot = rho0; + ut = 0.f; + vt = 0.f; + uv = ut * ut + vt * vt; + f[0][y][x] = FEQ_0(rhot, ut, vt, uv); + f[1][y][x] = FEQ_1(rhot, ut, vt, uv); + f[2][y][x] = FEQ_2(rhot, ut, vt, uv); + f[3][y][x] = FEQ_3(rhot, ut, vt, uv); + f[4][y][x] = FEQ_4(rhot, ut, vt, uv); + f[5][y][x] = FEQ_5(rhot, ut, vt, uv); + f[6][y][x] = FEQ_6(rhot, ut, vt, uv); + f[7][y][x] = FEQ_7(rhot, ut, vt, uv); + f[8][y][x] = FEQ_8(rhot, ut, vt, uv); + } +} diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/lb.h b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/lb.h index 383a3b90fc..120d678c46 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/lb.h +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/LBM/lb.h @@ -1,109 +1,109 @@ -#ifndef __LB_H_ -#define __LB_H_ - -#define FEQ_0(rho, u, v, uv) ((4.0 / 9.0 * (rho - 1.5 * uv))) -#define FEQ_1(rho, u, v, uv) \ - ((1.0 / 9.0 * (rho + 3.0 * u + 4.5 * u * u - 1.5 * uv))) -#define FEQ_2(rho, u, v, uv) \ - ((1.0 / 9.0 * (rho + 3.0 * v + 4.5 * v * v - 1.5 * uv))) -#define FEQ_3(rho, u, v, uv) \ - ((1.0 / 9.0 * (rho - 3.0 * u + 4.5 * u * u - 1.5 * uv))) -#define FEQ_4(rho, u, v, uv) \ - ((1.0 / 9.0 * (rho - 3.0 * v + 4.5 * v * v - 1.5 * uv))) -#define FEQ_5(rho, u, v, uv) \ - ((1.0 / 36.0 * (rho + 3.0 * (u + v) + 4.5 * (u + v) * (u + v) - 1.5 * uv))) -#define FEQ_6(rho, u, v, uv) \ - ((1.0 / 36.0 * (rho + 3.0 * (-u + v) + 4.5 * (-u + v) * (-u + v) - 1.5 * uv))) -#define FEQ_7(rho, u, v, uv) \ - ((1.0 / 36.0 * (rho + 3.0 * (-u - v) + 4.5 * (-u - v) * (-u - v) - 1.5 * uv))) -#define FEQ_8(rho, u, v, uv) \ - ((1.0 / 36.0 * (rho + 3.0 * (u - v) + 4.5 * (u - v) * (u - v) - 1.5 * uv))) -//------------------------------------------------------------------------------------------------------------------------- -#define MEQ_0(rho) (rho) -#define MEQ_1(rho, uv) (-2.0f * rho + 3.0f * uv) -#define MEQ_2(rho, uv) (rho - 3.0f * uv) -#define MEQ_3(u) (u) -#define MEQ_4(u) (-u) -#define MEQ_5(v) (v) -#define MEQ_6(v) (-v) -#define MEQ_7(u, v) (u * u - v * v) -#define MEQ_8(u, v) (u * v) -//-------------------------------------------------------------------------------------------------------------------------- -#define F_0(u, v, Fx, Fy) (0.f) -#define F_1(u, v, Fx, Fy) (u * Fx + v * Fy) -#define F_2(u, v, Fx, Fy) (u * Fx + v * Fy) -#define F_3(u, v, Fx, Fy) (Fx) -#define F_4(u, v, Fx, Fy) (Fx) -#define F_5(u, v, Fx, Fy) (Fy) -#define F_6(u, v, Fx, Fy) (Fy) -#define F_7(u, v, Fx, Fy) (u * Fx - v * Fy) -#define F_8(u, v, Fx, Fy) (u * Fy + v * Fx) -//-------------------------------------------------------------------------------------------------------------------------- -#define GEQ_0(C, u, v, uv) ((4.0 / 9.0 * C * (1.f - 1.5 * uv))) -#define GEQ_1(C, u, v, uv) \ - ((1.0 / 9.0 * C * (1.f + 3.0 * u + 4.5 * u * u - 1.5 * uv))) -#define GEQ_2(C, u, v, uv) \ - ((1.0 / 9.0 * C * (1.f + 3.0 * v + 4.5 * v * v - 1.5 * uv))) -#define GEQ_3(C, u, v, uv) \ - ((1.0 / 9.0 * C * (1.f - 3.0 * u + 4.5 * u * u - 1.5 * uv))) -#define GEQ_4(C, u, v, uv) \ - ((1.0 / 9.0 * C * (1.f - 3.0 * v + 4.5 * v * v - 1.5 * uv))) -#define GEQ_5(C, u, v, uv) \ - ((1.0 / 36.0 * C * \ - (1.f + 3.0 * (u + v) + 4.5 * (u + v) * (u + v) - 1.5 * uv))) -#define GEQ_6(C, u, v, uv) \ - ((1.0 / 36.0 * C * \ - (1.f + 3.0 * (-u + v) + 4.5 * (-u + v) * (-u + v) - 1.5 * uv))) -#define GEQ_7(C, u, v, uv) \ - ((1.0 / 36.0 * C * \ - (1.f + 3.0 * (-u - v) + 4.5 * (-u - v) * (-u - v) - 1.5 * uv))) -#define GEQ_8(C, u, v, uv) \ - ((1.0 / 36.0 * C * \ - (1.f + 3.0 * (u - v) + 4.5 * (u - v) * (u - v) - 1.5 * uv))) -//-------------------------------------------------------------------------------------------------------------------------- -#define MGEQ_0(C) (C) -#define MGEQ_1(C, uv) C *(-2.0f + 3.0f * uv) -#define MGEQ_2(C, uv) C *(1.f - 3.0f * uv) -#define MGEQ_3(C, u) C *(u) -#define MGEQ_4(C, u) C *(-u) -#define MGEQ_5(C, v) C *(v) -#define MGEQ_6(C, v) C *(-v) -#define MGEQ_7(C, u, v) C *(u * u - v * v) -#define MGEQ_8(C, u, v) C *(u * v) -//-------------------------------------------------------------------------------------------------------------------------- -#define RHO(y, x) \ - (f[0][y][x] + f[1][y][x] + f[2][y][x] + f[3][y][x] + f[4][y][x] + \ - f[5][y][x] + f[6][y][x] + f[7][y][x] + f[8][y][x]) -#define UX(y, x) \ - (f[1][y][x] + f[5][y][x] + f[8][y][x] - f[3][y][x] - f[6][y][x] - f[7][y][x]) -#define VY(y, x) \ - (f[2][y][x] + f[5][y][x] + f[6][y][x] - f[4][y][x] - f[7][y][x] - f[8][y][x]) -#define C(y, x) \ - (g[0][y][x] + g[1][y][x] + g[2][y][x] + g[3][y][x] + g[4][y][x] + \ - g[5][y][x] + g[6][y][x] + g[7][y][x] + g[8][y][x]) - -//-------------------------------------------------------------------------------------------------------------------------- -void geo(); -void init(); -__global__ void Evol_flow( - double *s_d, double dt, double Fx, double Fy, double *f_d, double *F_d); -__global__ void Bc_flow_X( - double dt, double Fx, double Fy, double U, double *f_d); -__global__ void Bc_flow_Y(double dt, double Fx, double Fy, double *f_d); -__global__ void Bc_flow_BB(int *flag_d, double *f_d); -__global__ void Evol_solute(double *s_d, - double dt, - double Fx, - double Fy, - double *g_d, - double *G_d, - double *f_d); -__global__ void Bc_solute_X( - double dt, double Fx, double Fy, double *g_d, double *f_d); -__global__ void Bc_solute_Y( - double dt, double Fx, double Fy, double *g_d, double *f_d); -__global__ void Bc_solute_BB(); -double error(); -void datasave(); - -#endif +#ifndef __LB_H_ +#define __LB_H_ + +#define FEQ_0(rho, u, v, uv) ((4.0 / 9.0 * (rho - 1.5 * uv))) +#define FEQ_1(rho, u, v, uv) \ + ((1.0 / 9.0 * (rho + 3.0 * u + 4.5 * u * u - 1.5 * uv))) +#define FEQ_2(rho, u, v, uv) \ + ((1.0 / 9.0 * (rho + 3.0 * v + 4.5 * v * v - 1.5 * uv))) +#define FEQ_3(rho, u, v, uv) \ + ((1.0 / 9.0 * (rho - 3.0 * u + 4.5 * u * u - 1.5 * uv))) +#define FEQ_4(rho, u, v, uv) \ + ((1.0 / 9.0 * (rho - 3.0 * v + 4.5 * v * v - 1.5 * uv))) +#define FEQ_5(rho, u, v, uv) \ + ((1.0 / 36.0 * (rho + 3.0 * (u + v) + 4.5 * (u + v) * (u + v) - 1.5 * uv))) +#define FEQ_6(rho, u, v, uv) \ + ((1.0 / 36.0 * (rho + 3.0 * (-u + v) + 4.5 * (-u + v) * (-u + v) - 1.5 * uv))) +#define FEQ_7(rho, u, v, uv) \ + ((1.0 / 36.0 * (rho + 3.0 * (-u - v) + 4.5 * (-u - v) * (-u - v) - 1.5 * uv))) +#define FEQ_8(rho, u, v, uv) \ + ((1.0 / 36.0 * (rho + 3.0 * (u - v) + 4.5 * (u - v) * (u - v) - 1.5 * uv))) +//------------------------------------------------------------------------------------------------------------------------- +#define MEQ_0(rho) (rho) +#define MEQ_1(rho, uv) (-2.0f * rho + 3.0f * uv) +#define MEQ_2(rho, uv) (rho - 3.0f * uv) +#define MEQ_3(u) (u) +#define MEQ_4(u) (-u) +#define MEQ_5(v) (v) +#define MEQ_6(v) (-v) +#define MEQ_7(u, v) (u * u - v * v) +#define MEQ_8(u, v) (u * v) +//-------------------------------------------------------------------------------------------------------------------------- +#define F_0(u, v, Fx, Fy) (0.f) +#define F_1(u, v, Fx, Fy) (u * Fx + v * Fy) +#define F_2(u, v, Fx, Fy) (u * Fx + v * Fy) +#define F_3(u, v, Fx, Fy) (Fx) +#define F_4(u, v, Fx, Fy) (Fx) +#define F_5(u, v, Fx, Fy) (Fy) +#define F_6(u, v, Fx, Fy) (Fy) +#define F_7(u, v, Fx, Fy) (u * Fx - v * Fy) +#define F_8(u, v, Fx, Fy) (u * Fy + v * Fx) +//-------------------------------------------------------------------------------------------------------------------------- +#define GEQ_0(C, u, v, uv) ((4.0 / 9.0 * C * (1.f - 1.5 * uv))) +#define GEQ_1(C, u, v, uv) \ + ((1.0 / 9.0 * C * (1.f + 3.0 * u + 4.5 * u * u - 1.5 * uv))) +#define GEQ_2(C, u, v, uv) \ + ((1.0 / 9.0 * C * (1.f + 3.0 * v + 4.5 * v * v - 1.5 * uv))) +#define GEQ_3(C, u, v, uv) \ + ((1.0 / 9.0 * C * (1.f - 3.0 * u + 4.5 * u * u - 1.5 * uv))) +#define GEQ_4(C, u, v, uv) \ + ((1.0 / 9.0 * C * (1.f - 3.0 * v + 4.5 * v * v - 1.5 * uv))) +#define GEQ_5(C, u, v, uv) \ + ((1.0 / 36.0 * C * \ + (1.f + 3.0 * (u + v) + 4.5 * (u + v) * (u + v) - 1.5 * uv))) +#define GEQ_6(C, u, v, uv) \ + ((1.0 / 36.0 * C * \ + (1.f + 3.0 * (-u + v) + 4.5 * (-u + v) * (-u + v) - 1.5 * uv))) +#define GEQ_7(C, u, v, uv) \ + ((1.0 / 36.0 * C * \ + (1.f + 3.0 * (-u - v) + 4.5 * (-u - v) * (-u - v) - 1.5 * uv))) +#define GEQ_8(C, u, v, uv) \ + ((1.0 / 36.0 * C * \ + (1.f + 3.0 * (u - v) + 4.5 * (u - v) * (u - v) - 1.5 * uv))) +//-------------------------------------------------------------------------------------------------------------------------- +#define MGEQ_0(C) (C) +#define MGEQ_1(C, uv) C *(-2.0f + 3.0f * uv) +#define MGEQ_2(C, uv) C *(1.f - 3.0f * uv) +#define MGEQ_3(C, u) C *(u) +#define MGEQ_4(C, u) C *(-u) +#define MGEQ_5(C, v) C *(v) +#define MGEQ_6(C, v) C *(-v) +#define MGEQ_7(C, u, v) C *(u * u - v * v) +#define MGEQ_8(C, u, v) C *(u * v) +//-------------------------------------------------------------------------------------------------------------------------- +#define RHO(y, x) \ + (f[0][y][x] + f[1][y][x] + f[2][y][x] + f[3][y][x] + f[4][y][x] + \ + f[5][y][x] + f[6][y][x] + f[7][y][x] + f[8][y][x]) +#define UX(y, x) \ + (f[1][y][x] + f[5][y][x] + f[8][y][x] - f[3][y][x] - f[6][y][x] - f[7][y][x]) +#define VY(y, x) \ + (f[2][y][x] + f[5][y][x] + f[6][y][x] - f[4][y][x] - f[7][y][x] - f[8][y][x]) +#define C(y, x) \ + (g[0][y][x] + g[1][y][x] + g[2][y][x] + g[3][y][x] + g[4][y][x] + \ + g[5][y][x] + g[6][y][x] + g[7][y][x] + g[8][y][x]) + +//-------------------------------------------------------------------------------------------------------------------------- +void geo(); +void init(); +__global__ void Evol_flow( + double *s_d, double dt, double Fx, double Fy, double *f_d, double *F_d); +__global__ void Bc_flow_X( + double dt, double Fx, double Fy, double U, double *f_d); +__global__ void Bc_flow_Y(double dt, double Fx, double Fy, double *f_d); +__global__ void Bc_flow_BB(int *flag_d, double *f_d); +__global__ void Evol_solute(double *s_d, + double dt, + double Fx, + double Fy, + double *g_d, + double *G_d, + double *f_d); +__global__ void Bc_solute_X( + double dt, double Fx, double Fy, double *g_d, double *f_d); +__global__ void Bc_solute_Y( + double dt, double Fx, double Fy, double *g_d, double *f_d); +__global__ void Bc_solute_BB(); +double error(); +void datasave(); + +#endif diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/dataset.py b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/dataset.py index bc6a023e8d..437ed7acdc 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/dataset.py +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/dataset.py @@ -1,275 +1,275 @@ -import numpy as np -import scipy.io as io - -np.random.seed(1234) - - -class DataSet: - def __init__(self, num, bs, modes): - self.num = num - self.bs = bs - self.modes = modes - ( - self.F_train, - self.U_train, - self.V_train, - self.rRe, - self.F_test, - self.U_test, - self.V_test, - self.x_train, - self.x_eq_train, - self.x_test, - self.u_mean, - self.u_std, - self.v_mean, - self.v_std, - self.u_basis, - self.v_basis, - self.lam_u, - self.lam_v, - ) = self.load_data() - - def PODbasis(self): - s = 65 - num_res = s * s - u_basis_out = np.reshape(self.u_basis.T, (-1, num_res, 1)) - v_basis_out = np.reshape(self.v_basis.T, (-1, num_res, 1)) - u_basis_out, v_basis_out = self.decoder(u_basis_out, v_basis_out) - u_basis_out = u_basis_out - self.u_mean - v_basis_out = v_basis_out - self.v_mean - u_basis_out = np.reshape(u_basis_out, (-1, s, s)) - v_basis_out = np.reshape(v_basis_out, (-1, s, s)) - save_dict = { - "u_basis": u_basis_out, - "v_basis": v_basis_out, - "lam_u": self.lam_u, - "lam_v": self.lam_v, - } - io.savemat("./Output/basis.mat", save_dict) - return self.u_basis, self.v_basis - - def samples(self): - """ - num_train = 40000 - num_test = 10000 - data = io.loadmat('./Data/Data') - F = data['F'] - U = data['U'] - """ - - num_train = 1 - x = np.linspace(-1, 1, self.num) - y = np.linspace(-1, 1, self.num) - xx, yy = np.meshgrid(x, y) - xx = np.reshape(xx, (-1, 1)) - yy = np.reshape(yy, (-1, 1)) - x_train = np.hstack((xx, yy)) - F, U = self.func(x_train) - - Num = self.num * self.num - - F = np.reshape(F, (-1, self.num, self.num, 1)) - U = np.reshape(U, (-1, Num, 1)) - F_train = F[:num_train, :, :] - U_train = U[:num_train, :, :] - F_test = F[num_train:, :, :] - U_test = U[num_train:, :, :] - return F_train, U_train, F_test, U_test - - def decoder(self, u, v): - u = u * self.u_std + self.u_mean - v = v * self.v_std + self.v_mean - return u, v - - def load_data(self): - data_train = io.loadmat("./Data/Cavity_Flow") - data_test = io.loadmat("./Data/Cavity_Flow_Test") - - num_Re = 100 - Re = np.linspace(100, 2080, num_Re).reshape((-1, 1, 1)) - Re = Re.astype(np.float32) - rRe = 1.0 / Re - - a_train = data_train["u_bc"].astype(np.float32) - u_train = data_train["u_data"].astype(np.float32) - v_train = data_train["v_data"].astype(np.float32) - - a_test = data_test["u_bc"].astype(np.float32) - u_test = data_test["u_data"].astype(np.float32) - v_test = data_test["v_data"].astype(np.float32) - - xx = data_train["x_2d"].astype(np.float32) - yy = data_train["y_2d"].astype(np.float32) - - res_step = 4 - u_train = u_train[:, ::res_step, ::res_step] - v_train = v_train[:, ::res_step, ::res_step] - xx_train = xx[::res_step, ::res_step] - yy_train = yy[::res_step, ::res_step] - - xx_eq = xx[1:64, 1:64] - yy_eq = yy[1:64, 1:64] - xx_eq = np.reshape(xx_eq, (-1, 1)) - yy_eq = np.reshape(yy_eq, (-1, 1)) - x_eq_train = np.hstack((xx_eq, yy_eq)) - - xx_train = np.reshape(xx_train, (-1, 1)) - yy_train = np.reshape(yy_train, (-1, 1)) - x_train = np.hstack((xx_train, yy_train)) - - xx_test = np.reshape(xx, (-1, 1)) - yy_test = np.reshape(yy, (-1, 1)) - x_test = np.hstack((xx_test, yy_test)) - - """ - perm = np.random.permutation(a.shape[0]) - a = a[perm, :] - u = u[perm, :, :] - v = v[perm, :, :] - """ - - num_train = 100 - s_f = 65 - s = u_train.shape[1] - num_res = s * s - - u_train_mean = np.mean(np.reshape(u_train, (-1, s, s)), 0) - u_train_std = np.std(np.reshape(u_train, (-1, s, s)), 0) - v_train_mean = np.mean(np.reshape(v_train, (-1, s, s)), 0) - v_train_std = np.std(np.reshape(v_train, (-1, s, s)), 0) - - u_train_mean = np.reshape(u_train_mean, (-1, num_res, 1)) - u_train_std = np.reshape(u_train_std, (-1, num_res, 1)) - v_train_mean = np.reshape(v_train_mean, (-1, num_res, 1)) - v_train_std = np.reshape(v_train_std, (-1, num_res, 1)) - - F_train = np.reshape(a_train, (-1, s_f)) - U_train = np.reshape(u_train, (-1, num_res, 1)) - V_train = np.reshape(v_train, (-1, num_res, 1)) - - # F_train = (F_train - f_train_mean)/(f_train_std + 1.0e-9) - # U_train = (U_train - u_train_mean)/(u_train_std + 1.0e-9) - # V_train = (V_train - v_train_mean)/(v_train_std + 1.0e-9) - - # F_test = (F_test - f_train_mean)/(f_train_std + 1.0e-9) - - U = np.reshape(U_train, (-1, num_res)) - V = np.reshape(V_train, (-1, num_res)) - C_u = 1.0 / (num_train - 1) * np.matmul(U.T, U) - C_v = 1.0 / (num_train - 1) * np.matmul(V.T, V) - lam_u, phi_u = np.linalg.eigh(C_u) - lam_v, phi_v = np.linalg.eigh(C_v) - - lam_u = np.flip(lam_u) - phi_u = np.fliplr(phi_u) - lam_v = np.flip(lam_v) - phi_v = np.fliplr(phi_v) - - u_cumsum = np.cumsum(lam_u) - v_cumsum = np.cumsum(lam_v) - u_per = u_cumsum[self.modes - 1] / u_cumsum[-1] - v_per = v_cumsum[self.modes - 1] / v_cumsum[-1] - - u_basis = phi_u[:, : self.modes] - v_basis = phi_v[:, : self.modes] - - print("Kept Energy: u: %.3f, v: %.3f" % (u_per, v_per)) - - num_res = s_f * s_f - F_test = np.reshape(a_test, (-1, s_f)) - U_test = np.reshape(u_test, (-1, num_res, 1)) - V_test = np.reshape(v_test, (-1, num_res, 1)) - """ - plt.plot(lam_u[:self.modes], 'k-') - plt.plot(lam_v[:self.modes], 'r--') - plt.show() - """ - - """ - F_train = np.reshape(f_train, (-1, s)) - U_train = np.reshape(u_train, (-1, num_res, 1)) - V_train = np.reshape(v_train, (-1, num_res, 1)) - - F_test = np.reshape(f_test, (-1, s)) - U_test = np.reshape(u_test, (-1, num_res, 1)) - V_test = np.reshape(v_test, (-1, num_res, 1)) - """ - - """ - U_ref = np.reshape(U_test, (U_test.shape[0], U_test.shape[1])) - np.savetxt('./Output/u_ref', U_ref, fmt='%e') - """ - - return ( - F_train, - U_train, - V_train, - rRe, - F_test, - U_test, - V_test, - x_train, - x_eq_train, - x_test, - u_train_mean, - u_train_std, - v_train_mean, - v_train_std, - u_basis, - v_basis, - lam_u, - lam_v, - ) - - def minibatch(self): - batch_id = np.random.choice(self.F_train.shape[0], self.bs, replace=False) - f_train = [self.F_train[i : i + 1] for i in batch_id] - f_train = np.concatenate(f_train, axis=0) - u_train = [self.U_train[i : i + 1] for i in batch_id] - u_train = np.concatenate(u_train, axis=0) - v_train = [self.V_train[i : i + 1] for i in batch_id] - v_train = np.concatenate(v_train, axis=0) - rRe_train = [self.rRe[i : i + 1] for i in batch_id] - rRe_train = np.concatenate(rRe_train, axis=0) - - """ - x = np.linspace(0., 1, self.num) - y = np.linspace(0., 1, self.num) - xx, yy = np.meshgrid(x, y) - xx = np.reshape(xx, (-1, 1)) - yy = np.reshape(yy, (-1, 1)) - x_train = np.hstack((xx, yy)) - """ - - Xmin = np.array([0.0, 0.0]).reshape((-1, 2)) - Xmax = np.array([1.0, 1.0]).reshape((-1, 2)) - # x_train = np.linspace(-1, 1, self.N).reshape((-1, 1)) - - return ( - self.x_train, - self.x_eq_train, - f_train, - rRe_train, - u_train, - v_train, - Xmin, - Xmax, - ) - - def testbatch(self): - """ - batch_id = np.random.choice(self.F_test.shape[0], num_test, replace=False) - f_test = [self.F_test[i:i+1] for i in batch_id] - f_test = np.concatenate(f_test, axis=0) - u_test = [self.U_test[i:i+1] for i in batch_id] - u_test = np.concatenate(u_test, axis=0) - v_test = [self.V_test[i:i+1] for i in batch_id] - v_test = np.concatenate(v_test, axis=0) - batch_id = np.reshape(batch_id, (-1, 1)) - """ - - x_test = self.x_test - f_test, u_test, v_test = self.F_test, self.U_test, self.V_test - - return x_test, f_test, u_test, v_test +import numpy as np +import scipy.io as io + +np.random.seed(1234) + + +class DataSet: + def __init__(self, num, bs, modes): + self.num = num + self.bs = bs + self.modes = modes + ( + self.F_train, + self.U_train, + self.V_train, + self.rRe, + self.F_test, + self.U_test, + self.V_test, + self.x_train, + self.x_eq_train, + self.x_test, + self.u_mean, + self.u_std, + self.v_mean, + self.v_std, + self.u_basis, + self.v_basis, + self.lam_u, + self.lam_v, + ) = self.load_data() + + def PODbasis(self): + s = 65 + num_res = s * s + u_basis_out = np.reshape(self.u_basis.T, (-1, num_res, 1)) + v_basis_out = np.reshape(self.v_basis.T, (-1, num_res, 1)) + u_basis_out, v_basis_out = self.decoder(u_basis_out, v_basis_out) + u_basis_out = u_basis_out - self.u_mean + v_basis_out = v_basis_out - self.v_mean + u_basis_out = np.reshape(u_basis_out, (-1, s, s)) + v_basis_out = np.reshape(v_basis_out, (-1, s, s)) + save_dict = { + "u_basis": u_basis_out, + "v_basis": v_basis_out, + "lam_u": self.lam_u, + "lam_v": self.lam_v, + } + io.savemat("./Output/basis.mat", save_dict) + return self.u_basis, self.v_basis + + def samples(self): + """ + num_train = 40000 + num_test = 10000 + data = io.loadmat('./Data/Data') + F = data['F'] + U = data['U'] + """ + + num_train = 1 + x = np.linspace(-1, 1, self.num) + y = np.linspace(-1, 1, self.num) + xx, yy = np.meshgrid(x, y) + xx = np.reshape(xx, (-1, 1)) + yy = np.reshape(yy, (-1, 1)) + x_train = np.hstack((xx, yy)) + F, U = self.func(x_train) + + Num = self.num * self.num + + F = np.reshape(F, (-1, self.num, self.num, 1)) + U = np.reshape(U, (-1, Num, 1)) + F_train = F[:num_train, :, :] + U_train = U[:num_train, :, :] + F_test = F[num_train:, :, :] + U_test = U[num_train:, :, :] + return F_train, U_train, F_test, U_test + + def decoder(self, u, v): + u = u * self.u_std + self.u_mean + v = v * self.v_std + self.v_mean + return u, v + + def load_data(self): + data_train = io.loadmat("./Data/Cavity_Flow") + data_test = io.loadmat("./Data/Cavity_Flow_Test") + + num_Re = 100 + Re = np.linspace(100, 2080, num_Re).reshape((-1, 1, 1)) + Re = Re.astype(np.float32) + rRe = 1.0 / Re + + a_train = data_train["u_bc"].astype(np.float32) + u_train = data_train["u_data"].astype(np.float32) + v_train = data_train["v_data"].astype(np.float32) + + a_test = data_test["u_bc"].astype(np.float32) + u_test = data_test["u_data"].astype(np.float32) + v_test = data_test["v_data"].astype(np.float32) + + xx = data_train["x_2d"].astype(np.float32) + yy = data_train["y_2d"].astype(np.float32) + + res_step = 4 + u_train = u_train[:, ::res_step, ::res_step] + v_train = v_train[:, ::res_step, ::res_step] + xx_train = xx[::res_step, ::res_step] + yy_train = yy[::res_step, ::res_step] + + xx_eq = xx[1:64, 1:64] + yy_eq = yy[1:64, 1:64] + xx_eq = np.reshape(xx_eq, (-1, 1)) + yy_eq = np.reshape(yy_eq, (-1, 1)) + x_eq_train = np.hstack((xx_eq, yy_eq)) + + xx_train = np.reshape(xx_train, (-1, 1)) + yy_train = np.reshape(yy_train, (-1, 1)) + x_train = np.hstack((xx_train, yy_train)) + + xx_test = np.reshape(xx, (-1, 1)) + yy_test = np.reshape(yy, (-1, 1)) + x_test = np.hstack((xx_test, yy_test)) + + """ + perm = np.random.permutation(a.shape[0]) + a = a[perm, :] + u = u[perm, :, :] + v = v[perm, :, :] + """ + + num_train = 100 + s_f = 65 + s = u_train.shape[1] + num_res = s * s + + u_train_mean = np.mean(np.reshape(u_train, (-1, s, s)), 0) + u_train_std = np.std(np.reshape(u_train, (-1, s, s)), 0) + v_train_mean = np.mean(np.reshape(v_train, (-1, s, s)), 0) + v_train_std = np.std(np.reshape(v_train, (-1, s, s)), 0) + + u_train_mean = np.reshape(u_train_mean, (-1, num_res, 1)) + u_train_std = np.reshape(u_train_std, (-1, num_res, 1)) + v_train_mean = np.reshape(v_train_mean, (-1, num_res, 1)) + v_train_std = np.reshape(v_train_std, (-1, num_res, 1)) + + F_train = np.reshape(a_train, (-1, s_f)) + U_train = np.reshape(u_train, (-1, num_res, 1)) + V_train = np.reshape(v_train, (-1, num_res, 1)) + + # F_train = (F_train - f_train_mean)/(f_train_std + 1.0e-9) + # U_train = (U_train - u_train_mean)/(u_train_std + 1.0e-9) + # V_train = (V_train - v_train_mean)/(v_train_std + 1.0e-9) + + # F_test = (F_test - f_train_mean)/(f_train_std + 1.0e-9) + + U = np.reshape(U_train, (-1, num_res)) + V = np.reshape(V_train, (-1, num_res)) + C_u = 1.0 / (num_train - 1) * np.matmul(U.T, U) + C_v = 1.0 / (num_train - 1) * np.matmul(V.T, V) + lam_u, phi_u = np.linalg.eigh(C_u) + lam_v, phi_v = np.linalg.eigh(C_v) + + lam_u = np.flip(lam_u) + phi_u = np.fliplr(phi_u) + lam_v = np.flip(lam_v) + phi_v = np.fliplr(phi_v) + + u_cumsum = np.cumsum(lam_u) + v_cumsum = np.cumsum(lam_v) + u_per = u_cumsum[self.modes - 1] / u_cumsum[-1] + v_per = v_cumsum[self.modes - 1] / v_cumsum[-1] + + u_basis = phi_u[:, : self.modes] + v_basis = phi_v[:, : self.modes] + + print("Kept Energy: u: %.3f, v: %.3f" % (u_per, v_per)) + + num_res = s_f * s_f + F_test = np.reshape(a_test, (-1, s_f)) + U_test = np.reshape(u_test, (-1, num_res, 1)) + V_test = np.reshape(v_test, (-1, num_res, 1)) + """ + plt.plot(lam_u[:self.modes], 'k-') + plt.plot(lam_v[:self.modes], 'r--') + plt.show() + """ + + """ + F_train = np.reshape(f_train, (-1, s)) + U_train = np.reshape(u_train, (-1, num_res, 1)) + V_train = np.reshape(v_train, (-1, num_res, 1)) + + F_test = np.reshape(f_test, (-1, s)) + U_test = np.reshape(u_test, (-1, num_res, 1)) + V_test = np.reshape(v_test, (-1, num_res, 1)) + """ + + """ + U_ref = np.reshape(U_test, (U_test.shape[0], U_test.shape[1])) + np.savetxt('./Output/u_ref', U_ref, fmt='%e') + """ + + return ( + F_train, + U_train, + V_train, + rRe, + F_test, + U_test, + V_test, + x_train, + x_eq_train, + x_test, + u_train_mean, + u_train_std, + v_train_mean, + v_train_std, + u_basis, + v_basis, + lam_u, + lam_v, + ) + + def minibatch(self): + batch_id = np.random.choice(self.F_train.shape[0], self.bs, replace=False) + f_train = [self.F_train[i : i + 1] for i in batch_id] + f_train = np.concatenate(f_train, axis=0) + u_train = [self.U_train[i : i + 1] for i in batch_id] + u_train = np.concatenate(u_train, axis=0) + v_train = [self.V_train[i : i + 1] for i in batch_id] + v_train = np.concatenate(v_train, axis=0) + rRe_train = [self.rRe[i : i + 1] for i in batch_id] + rRe_train = np.concatenate(rRe_train, axis=0) + + """ + x = np.linspace(0., 1, self.num) + y = np.linspace(0., 1, self.num) + xx, yy = np.meshgrid(x, y) + xx = np.reshape(xx, (-1, 1)) + yy = np.reshape(yy, (-1, 1)) + x_train = np.hstack((xx, yy)) + """ + + Xmin = np.array([0.0, 0.0]).reshape((-1, 2)) + Xmax = np.array([1.0, 1.0]).reshape((-1, 2)) + # x_train = np.linspace(-1, 1, self.N).reshape((-1, 1)) + + return ( + self.x_train, + self.x_eq_train, + f_train, + rRe_train, + u_train, + v_train, + Xmin, + Xmax, + ) + + def testbatch(self): + """ + batch_id = np.random.choice(self.F_test.shape[0], num_test, replace=False) + f_test = [self.F_test[i:i+1] for i in batch_id] + f_test = np.concatenate(f_test, axis=0) + u_test = [self.U_test[i:i+1] for i in batch_id] + u_test = np.concatenate(u_test, axis=0) + v_test = [self.V_test[i:i+1] for i in batch_id] + v_test = np.concatenate(v_test, axis=0) + batch_id = np.reshape(batch_id, (-1, 1)) + """ + + x_test = self.x_test + f_test, u_test, v_test = self.F_test, self.U_test, self.V_test + + return x_test, f_test, u_test, v_test diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/main.py b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/main.py index a8873f6a0b..c77eb1209b 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/main.py +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/main.py @@ -1,241 +1,241 @@ -import time - -import numpy as np -import paddle as pd -import paddle.nn.functional as F -from dataset import DataSet - -pd.set_default_dtype("float32") - -# from nn import DeepONet - -pd.seed(1234) -np.random.seed(1234) - -pd.device.set_device("gpu:0") - -# resolution -h = 65 -w = 65 - -# output dimension of Branch/Trunk -p = 100 -p1 = p // 2 - -# batch_size -bs = 10 - -# size of input for Trunk net -nx = h -x_num = nx * nx - -# POD modes -modes = 20 -out_dims = 50 -wh = 100 -# coeffs for POD -# layer_pod = [h, 64, 64, 2*modes] - - -class DeepONet(pd.nn.Layer): - def __init__(self): - super(DeepONet, self).__init__() - - ##paddle-Branch net - self.fnn_B = pd.nn.Sequential( - pd.nn.Linear(h, wh), - pd.nn.Tanh(), - pd.nn.Linear(wh, wh), - pd.nn.Tanh(), - pd.nn.Linear(wh, out_dims), - ) - - ##paddle-Trunk net - self.fnn_T = pd.nn.Sequential( - pd.nn.Linear(2, wh), - pd.nn.Tanh(), - pd.nn.Linear(wh, wh), - pd.nn.Tanh(), - pd.nn.Linear(wh, wh), - pd.nn.Tanh(), - pd.nn.Linear(wh, out_dims), - ) - - def forward(self, Bin, Tin): - out_B, out_T = self.fnn_B(Bin), self.fnn_T(Tin) - out_B = pd.tile(out_B[:, None, :], repeat_times=[1, Tin.shape[1], 1]) - out_B_u, out_B_v, out_B_p = ( - out_B[:, :, :modes], - out_B[:, :, modes : 2 * modes], - out_B[:, :, 2 * modes :], - ) - out_T_u, out_T_v, out_T_p = ( - out_T[:, :, :modes], - out_T[:, :, modes : 2 * modes], - out_T[:, :, 2 * modes :], - ) - u_pred = pd.sum(out_B_u * out_T_u, axis=-1, keepdim=True) - v_pred = pd.sum(out_B_v * out_T_v, axis=-1, keepdim=True) - p_pred = pd.sum(out_B_p * out_T_p, axis=-1, keepdim=True) - """ - u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) - v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) - """ - return u_pred, v_pred, p_pred - - def eq(self, Bin, Tin, rRe, data): - u_pred, v_pred, p_pred = self.forward(Bin, Tin) - # u_pred, v_pred = data.decoder(u_pred, v_pred) - du = pd.grad(u_pred, Tin, create_graph=True, retain_graph=True)[0] - dv = pd.grad(v_pred, Tin, create_graph=True, retain_graph=True)[0] - dp = pd.grad(p_pred, Tin, create_graph=True, retain_graph=True)[0] - u_x, u_y = du[:, :, 0:1], du[:, :, 1:2] - v_x, v_y = dv[:, :, 0:1], dv[:, :, 1:2] - p_x, p_y = dp[:, :, 0:1], dp[:, :, 1:2] - ddux = pd.grad(u_x, Tin, create_graph=True, retain_graph=True)[0] - dduy = pd.grad(u_y, Tin, create_graph=True, retain_graph=True)[0] - ddvx = pd.grad(v_x, Tin, create_graph=True, retain_graph=True)[0] - ddvy = pd.grad(v_y, Tin, create_graph=True, retain_graph=True)[0] - u_xx, u_yy = ddux[:, :, 0:1], dduy[:, :, 1:2] - v_xx, v_yy = ddvx[:, :, 0:1], ddvy[:, :, 1:2] - eq1 = u_x + v_y - eq2 = u_pred * u_x + v_pred * u_y - rRe * (u_xx + u_yy) + p_x - eq3 = u_pred * v_x + v_pred * v_y - rRe * (v_xx + v_yy) + p_y - return eq1, eq2, eq3 - - -def prediction(model, data, x_test, f_test, u_test, v_test): - """ - out_B, out_T = model.forward(f_test, x_test) - out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] - out_T_u, out_T_v = out_T[:, :modes], out_T[:, modes:] - u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) - v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) - """ - u_pred, v_pred, _ = model.forward(f_test, x_test) - u_pred, v_pred = u_pred.numpy(), v_pred.numpy() - # u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile(v_pred[:, :, None], [1, 1, 1]) - # u_pred, v_pred = data.decoder(u_temp, v_temp) - # u_pred, v_pred = data.decoder(u_pred, v_pred) - err_u = np.mean( - np.linalg.norm(u_pred - u_test, 2, axis=1) / np.linalg.norm(u_test, 2, axis=1) - ) - err_v = np.mean( - np.linalg.norm(v_pred - v_test, 2, axis=1) / np.linalg.norm(v_test, 2, axis=1) - ) - return err_u, err_v - - -def main(): - data = DataSet(nx, bs, modes) - """ - u_basis, v_basis = data.PODbasis() - u_basis = pd.to_tensor(u_basis) - v_basis = pd.to_tensor(v_basis) - """ - model = DeepONet() - x_train, x_eq_train, f_train, rRe_train, u_train, v_train, _, _ = data.minibatch() - x_train, x_eq_train, f_train, rRe_train, u_train, v_train = ( - pd.to_tensor(x_train), - pd.to_tensor(x_eq_train), - pd.to_tensor(f_train), - pd.to_tensor(rRe_train), - pd.to_tensor(u_train), - pd.to_tensor(v_train), - ) - x_train = pd.tile(x_train[None, :, :], repeat_times=[bs, 1, 1]) - x_eq_train = pd.tile(x_eq_train[None, :, :], repeat_times=[bs, 1, 1]) - """ - x_num = x_train.shape[0] - x_train = pd.tile(x_train[None, :, :], repeat_times=[bs, 1, 1]) - f_train = pd.tile(f_train[:, None, :], repeat_times=[1, x_num, 1]) - """ - x_train.stop_gradient = False - x_eq_train.stop_gradient = False - - # optimizer - opt = pd.optimizer.Adam(learning_rate=5.0e-4, parameters=model.parameters()) - - model.train() - - x_test, f_test, u_test, v_test = data.testbatch() - x_test, f_test = pd.to_tensor(x_test), pd.to_tensor(f_test) - x_test = pd.tile(x_test[None, :, :], repeat_times=[f_test.shape[0], 1, 1]) - n = 0 - - nmax = 150000 - start_time = time.perf_counter() - time_step_0 = time.perf_counter() - while n <= nmax: - - _, _, f_train, rRe_train, u_train, v_train, _, _ = data.minibatch() - f_train, rRe_train, u_train, v_train = ( - pd.to_tensor(f_train), - pd.to_tensor(rRe_train), - pd.to_tensor(u_train), - pd.to_tensor(v_train), - ) - """ - out_B = fnn_B(f_train) - out_T = fnn_T(x_train) - out_B, out_T = model.forward(f_train, x_train) - """ - """ - out_B = pd.tile(out_B[:. None, :], repeat_time=[1, x_num, 1]) - out_T = pd.tile(out_T[None, :, :], repeat_time=[bs, 1, 1]) - """ - """ - out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] - out_T_u, out_T_v = out_T[:, :modes], out_T[:, modes:] - """ - - # u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) - # v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) - u_pred, v_pred, _ = model.forward(f_train, x_train) - eq1, eq2, eq3 = model.eq(f_train, x_eq_train, rRe_train, data) - data_loss = F.mse_loss(u_pred, u_train) + F.mse_loss(v_pred, v_train) - eq1_loss = F.mse_loss(eq1, pd.zeros(shape=eq1.shape)) - eq_loss = eq1_loss + 0.1 * ( - F.mse_loss(eq2, pd.zeros(shape=eq2.shape)) - + F.mse_loss(eq3, pd.zeros(eq3.shape)) - ) - loss = data_loss + eq_loss - - loss.backward() - opt.step() - opt.clear_grad() - - if n % 100 == 0: - time_step_1000 = time.perf_counter() - T = time_step_1000 - time_step_0 - err_u, err_v = prediction(model, data, x_test, f_test, u_test, v_test) - # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) - print( - "Step: %d, Loss: %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f" - % (n, float(loss), err_u, err_v, T) - ) - """ - print('Step: %d, Loss: %.3e, Loss(eq): %.3e, Loss(eq1): %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f'\ - %(n, float(loss), float(eq_loss), float(eq1_loss), err_u, err_v, T)) - """ - # print('Step: %d, Loss: %.3e, Time (secs): %.3f'%(n, float(loss), T)) - time_step_0 = time.perf_counter() - - if n % 1000 == 0: - pd.save(model.state_dict(), "./checkpoint/DeepONet.pdparams") - pd.save(opt.state_dict(), "./checkpoint/opt.pdopt") - - n += 1 - - stop_time = time.perf_counter() - print("Training time (secs): %.3f" % (stop_time - start_time)) - - start_time = time.perf_counter() - err_u, err_v = prediction(model, data, x_test, f_test, u_test, v_test) - stop_time = time.perf_counter() - T = stop_time - start_time - print("err_u: %.3e, err_v: %.3e, Inference time (secs): %.5f" % (err_u, err_v, T)) - - -if __name__ == "__main__": - main() +import time + +import numpy as np +import paddle as pd +import paddle.nn.functional as F +from dataset import DataSet + +pd.set_default_dtype("float32") + +# from nn import DeepONet + +pd.seed(1234) +np.random.seed(1234) + +pd.device.set_device("gpu:0") + +# resolution +h = 65 +w = 65 + +# output dimension of Branch/Trunk +p = 100 +p1 = p // 2 + +# batch_size +bs = 10 + +# size of input for Trunk net +nx = h +x_num = nx * nx + +# POD modes +modes = 20 +out_dims = 50 +wh = 100 +# coeffs for POD +# layer_pod = [h, 64, 64, 2*modes] + + +class DeepONet(pd.nn.Layer): + def __init__(self): + super(DeepONet, self).__init__() + + ##paddle-Branch net + self.fnn_B = pd.nn.Sequential( + pd.nn.Linear(h, wh), + pd.nn.Tanh(), + pd.nn.Linear(wh, wh), + pd.nn.Tanh(), + pd.nn.Linear(wh, out_dims), + ) + + ##paddle-Trunk net + self.fnn_T = pd.nn.Sequential( + pd.nn.Linear(2, wh), + pd.nn.Tanh(), + pd.nn.Linear(wh, wh), + pd.nn.Tanh(), + pd.nn.Linear(wh, wh), + pd.nn.Tanh(), + pd.nn.Linear(wh, out_dims), + ) + + def forward(self, Bin, Tin): + out_B, out_T = self.fnn_B(Bin), self.fnn_T(Tin) + out_B = pd.tile(out_B[:, None, :], repeat_times=[1, Tin.shape[1], 1]) + out_B_u, out_B_v, out_B_p = ( + out_B[:, :, :modes], + out_B[:, :, modes : 2 * modes], + out_B[:, :, 2 * modes :], + ) + out_T_u, out_T_v, out_T_p = ( + out_T[:, :, :modes], + out_T[:, :, modes : 2 * modes], + out_T[:, :, 2 * modes :], + ) + u_pred = pd.sum(out_B_u * out_T_u, axis=-1, keepdim=True) + v_pred = pd.sum(out_B_v * out_T_v, axis=-1, keepdim=True) + p_pred = pd.sum(out_B_p * out_T_p, axis=-1, keepdim=True) + """ + u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) + v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) + """ + return u_pred, v_pred, p_pred + + def eq(self, Bin, Tin, rRe, data): + u_pred, v_pred, p_pred = self.forward(Bin, Tin) + # u_pred, v_pred = data.decoder(u_pred, v_pred) + du = pd.grad(u_pred, Tin, create_graph=True, retain_graph=True)[0] + dv = pd.grad(v_pred, Tin, create_graph=True, retain_graph=True)[0] + dp = pd.grad(p_pred, Tin, create_graph=True, retain_graph=True)[0] + u_x, u_y = du[:, :, 0:1], du[:, :, 1:2] + v_x, v_y = dv[:, :, 0:1], dv[:, :, 1:2] + p_x, p_y = dp[:, :, 0:1], dp[:, :, 1:2] + ddux = pd.grad(u_x, Tin, create_graph=True, retain_graph=True)[0] + dduy = pd.grad(u_y, Tin, create_graph=True, retain_graph=True)[0] + ddvx = pd.grad(v_x, Tin, create_graph=True, retain_graph=True)[0] + ddvy = pd.grad(v_y, Tin, create_graph=True, retain_graph=True)[0] + u_xx, u_yy = ddux[:, :, 0:1], dduy[:, :, 1:2] + v_xx, v_yy = ddvx[:, :, 0:1], ddvy[:, :, 1:2] + eq1 = u_x + v_y + eq2 = u_pred * u_x + v_pred * u_y - rRe * (u_xx + u_yy) + p_x + eq3 = u_pred * v_x + v_pred * v_y - rRe * (v_xx + v_yy) + p_y + return eq1, eq2, eq3 + + +def prediction(model, data, x_test, f_test, u_test, v_test): + """ + out_B, out_T = model.forward(f_test, x_test) + out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] + out_T_u, out_T_v = out_T[:, :modes], out_T[:, modes:] + u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) + v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) + """ + u_pred, v_pred, _ = model.forward(f_test, x_test) + u_pred, v_pred = u_pred.numpy(), v_pred.numpy() + # u_temp, v_temp = np.tile(u_pred[:, :, None], [1, 1, 1]), np.tile(v_pred[:, :, None], [1, 1, 1]) + # u_pred, v_pred = data.decoder(u_temp, v_temp) + # u_pred, v_pred = data.decoder(u_pred, v_pred) + err_u = np.mean( + np.linalg.norm(u_pred - u_test, 2, axis=1) / np.linalg.norm(u_test, 2, axis=1) + ) + err_v = np.mean( + np.linalg.norm(v_pred - v_test, 2, axis=1) / np.linalg.norm(v_test, 2, axis=1) + ) + return err_u, err_v + + +def main(): + data = DataSet(nx, bs, modes) + """ + u_basis, v_basis = data.PODbasis() + u_basis = pd.to_tensor(u_basis) + v_basis = pd.to_tensor(v_basis) + """ + model = DeepONet() + x_train, x_eq_train, f_train, rRe_train, u_train, v_train, _, _ = data.minibatch() + x_train, x_eq_train, f_train, rRe_train, u_train, v_train = ( + pd.to_tensor(x_train), + pd.to_tensor(x_eq_train), + pd.to_tensor(f_train), + pd.to_tensor(rRe_train), + pd.to_tensor(u_train), + pd.to_tensor(v_train), + ) + x_train = pd.tile(x_train[None, :, :], repeat_times=[bs, 1, 1]) + x_eq_train = pd.tile(x_eq_train[None, :, :], repeat_times=[bs, 1, 1]) + """ + x_num = x_train.shape[0] + x_train = pd.tile(x_train[None, :, :], repeat_times=[bs, 1, 1]) + f_train = pd.tile(f_train[:, None, :], repeat_times=[1, x_num, 1]) + """ + x_train.stop_gradient = False + x_eq_train.stop_gradient = False + + # optimizer + opt = pd.optimizer.Adam(learning_rate=5.0e-4, parameters=model.parameters()) + + model.train() + + x_test, f_test, u_test, v_test = data.testbatch() + x_test, f_test = pd.to_tensor(x_test), pd.to_tensor(f_test) + x_test = pd.tile(x_test[None, :, :], repeat_times=[f_test.shape[0], 1, 1]) + n = 0 + + nmax = 150000 + start_time = time.perf_counter() + time_step_0 = time.perf_counter() + while n <= nmax: + + _, _, f_train, rRe_train, u_train, v_train, _, _ = data.minibatch() + f_train, rRe_train, u_train, v_train = ( + pd.to_tensor(f_train), + pd.to_tensor(rRe_train), + pd.to_tensor(u_train), + pd.to_tensor(v_train), + ) + """ + out_B = fnn_B(f_train) + out_T = fnn_T(x_train) + out_B, out_T = model.forward(f_train, x_train) + """ + """ + out_B = pd.tile(out_B[:. None, :], repeat_time=[1, x_num, 1]) + out_T = pd.tile(out_T[None, :, :], repeat_time=[bs, 1, 1]) + """ + """ + out_B_u, out_B_v = out_B[:, :modes], out_B[:, modes:] + out_T_u, out_T_v = out_T[:, :modes], out_T[:, modes:] + """ + + # u_pred = pd.einsum('bi,ni->bn', out_B_u, out_T_u) + # v_pred = pd.einsum('bi,ni->bn', out_B_v, out_T_v) + u_pred, v_pred, _ = model.forward(f_train, x_train) + eq1, eq2, eq3 = model.eq(f_train, x_eq_train, rRe_train, data) + data_loss = F.mse_loss(u_pred, u_train) + F.mse_loss(v_pred, v_train) + eq1_loss = F.mse_loss(eq1, pd.zeros(shape=eq1.shape)) + eq_loss = eq1_loss + 0.1 * ( + F.mse_loss(eq2, pd.zeros(shape=eq2.shape)) + + F.mse_loss(eq3, pd.zeros(eq3.shape)) + ) + loss = data_loss + eq_loss + + loss.backward() + opt.step() + opt.clear_grad() + + if n % 100 == 0: + time_step_1000 = time.perf_counter() + T = time_step_1000 - time_step_0 + err_u, err_v = prediction(model, data, x_test, f_test, u_test, v_test) + # err_u, err_v = data_save.save(sess, x_pos, f_ph, u_ph, v_ph, u_pred, v_pred, data, num_test, h) + print( + "Step: %d, Loss: %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f" + % (n, float(loss), err_u, err_v, T) + ) + """ + print('Step: %d, Loss: %.3e, Loss(eq): %.3e, Loss(eq1): %.3e, err_u: %.3e, err_v: %.3e, Time (secs): %.3f'\ + %(n, float(loss), float(eq_loss), float(eq1_loss), err_u, err_v, T)) + """ + # print('Step: %d, Loss: %.3e, Time (secs): %.3f'%(n, float(loss), T)) + time_step_0 = time.perf_counter() + + if n % 1000 == 0: + pd.save(model.state_dict(), "./checkpoint/DeepONet.pdparams") + pd.save(opt.state_dict(), "./checkpoint/opt.pdopt") + + n += 1 + + stop_time = time.perf_counter() + print("Training time (secs): %.3f" % (stop_time - start_time)) + + start_time = time.perf_counter() + err_u, err_v = prediction(model, data, x_test, f_test, u_test, v_test) + stop_time = time.perf_counter() + T = stop_time - start_time + print("err_u: %.3e, err_v: %.3e, Inference time (secs): %.5f" % (err_u, err_v, T)) + + +if __name__ == "__main__": + main() diff --git a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/nn.py b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/nn.py index 813beceadb..3db32293a8 100644 --- a/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/nn.py +++ b/jointContribution/PIDeepONet-LBM/task3-PIDeepONet/nn.py @@ -1,27 +1,27 @@ -import paddle as pd - - -class DeepONet(pd.nn.Layer): - def __init__(self, h, out_dims): - super(DeepONet, self).__init__() - - ##paddle-Branch net - self.fnn_B = pd.nn.Sequential( - pd.nn.Linear(h, 128), - pd.nn.Tanh(), - pd.nn.Linear(128, 128), - pd.nn.Tanh(), - pd.nn.Linear(128, out_dims), - ) - - ##paddle-Trunk net - self.fnn_T = pd.nn.Sequential( - pd.nn.Linear(2, 128), - pd.nn.Tanh(), - pd.nn.Linear(128, 128), - pd.nn.Tanh(), - pd.nn.Linear(128, out_dims), - ) - - def forward(self, Bin, Tin): - return self.fnn_B(Bin), self.fnn_T(Tin) +import paddle as pd + + +class DeepONet(pd.nn.Layer): + def __init__(self, h, out_dims): + super(DeepONet, self).__init__() + + ##paddle-Branch net + self.fnn_B = pd.nn.Sequential( + pd.nn.Linear(h, 128), + pd.nn.Tanh(), + pd.nn.Linear(128, 128), + pd.nn.Tanh(), + pd.nn.Linear(128, out_dims), + ) + + ##paddle-Trunk net + self.fnn_T = pd.nn.Sequential( + pd.nn.Linear(2, 128), + pd.nn.Tanh(), + pd.nn.Linear(128, 128), + pd.nn.Tanh(), + pd.nn.Linear(128, out_dims), + ) + + def forward(self, Bin, Tin): + return self.fnn_B(Bin), self.fnn_T(Tin) diff --git a/jointContribution/PINO/.gitmodules b/jointContribution/PINO/.gitmodules index dd9afc3943..d730c67073 100644 --- a/jointContribution/PINO/.gitmodules +++ b/jointContribution/PINO/.gitmodules @@ -1,3 +1,3 @@ -[submodule "jointContribution/graphGalerkin/pycamotk"] - path = jointContribution/graphGalerkin/pycamotk - url = https://github.com/zlynna/pycamotk.git +[submodule "jointContribution/graphGalerkin/pycamotk"] + path = jointContribution/graphGalerkin/pycamotk + url = https://github.com/zlynna/pycamotk.git diff --git a/jointContribution/PINO/PINO_paddle/configs/operator/Re500-1_8-800-PINO-s.yaml b/jointContribution/PINO/PINO_paddle/configs/operator/Re500-1_8-800-PINO-s.yaml index 219df3fdec..14daa5ea13 100644 --- a/jointContribution/PINO/PINO_paddle/configs/operator/Re500-1_8-800-PINO-s.yaml +++ b/jointContribution/PINO/PINO_paddle/configs/operator/Re500-1_8-800-PINO-s.yaml @@ -1,48 +1,48 @@ -data: - name: KF - paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy'] - Re: 500 - offset: 0 - total_num: 300 - raw_res: [256, 256, 513] - n_data_samples: 100 - data_res: [64, 64, 257] # resolution in 1 second - pde_res: [256, 256, 513] # resolution in 1 second - a_offset: 0 - n_a_samples: 275 - testoffset: 275 - n_test_samples: 25 - t_duration: 0.125 - shuffle: True - -model: - layers: [64, 64, 64, 64, 64] - modes1: [12, 12, 12, 12] - modes2: [12, 12, 12, 12] - modes3: [12, 12, 12, 12] - fc_dim: 128 - act: gelu - pad_ratio: [0.0, 0.125] - -train: - batchsize: 2 - start_iter: 0 - num_iter: 200_001 - milestones: [20_000, 60_000, 120_000] - base_lr: 0.001 - scheduler_gamma: 0.5 - ic_loss: 10.0 - f_loss: 1.0 - xy_loss: 10.0 - save_step: 5000 - eval_step: 5000 - -test: - batchsize: 1 - data_res: [256, 256, 513] - -log: - logdir: Re500-1_8s-800-PINO-s - entity: hzzheng-pino - project: PINO-KF-Re500 - group: Re500-1_8s-800-PINO-s +data: + name: KF + paths: ['/raid/hongkai/NS-Re500_T300_id0-shuffle.npy'] + Re: 500 + offset: 0 + total_num: 300 + raw_res: [256, 256, 513] + n_data_samples: 100 + data_res: [64, 64, 257] # resolution in 1 second + pde_res: [256, 256, 513] # resolution in 1 second + a_offset: 0 + n_a_samples: 275 + testoffset: 275 + n_test_samples: 25 + t_duration: 0.125 + shuffle: True + +model: + layers: [64, 64, 64, 64, 64] + modes1: [12, 12, 12, 12] + modes2: [12, 12, 12, 12] + modes3: [12, 12, 12, 12] + fc_dim: 128 + act: gelu + pad_ratio: [0.0, 0.125] + +train: + batchsize: 2 + start_iter: 0 + num_iter: 200_001 + milestones: [20_000, 60_000, 120_000] + base_lr: 0.001 + scheduler_gamma: 0.5 + ic_loss: 10.0 + f_loss: 1.0 + xy_loss: 10.0 + save_step: 5000 + eval_step: 5000 + +test: + batchsize: 1 + data_res: [256, 256, 513] + +log: + logdir: Re500-1_8s-800-PINO-s + entity: hzzheng-pino + project: PINO-KF-Re500 + group: Re500-1_8s-800-PINO-s diff --git a/jointContribution/PINO/PINO_paddle/configs/pretrain/Darcy-pretrain.yaml b/jointContribution/PINO/PINO_paddle/configs/pretrain/Darcy-pretrain.yaml index 7ebe16071f..b0a4e1f17d 100644 --- a/jointContribution/PINO/PINO_paddle/configs/pretrain/Darcy-pretrain.yaml +++ b/jointContribution/PINO/PINO_paddle/configs/pretrain/Darcy-pretrain.yaml @@ -1,34 +1,34 @@ -data: - name: 'Darcy' - datapath: '../data/piececonst_r421_N1024_smooth1.mat' - total_num: 1024 - offset: 0 - n_sample: 1000 - nx: 421 - sub: 7 - pde_sub: 2 - -model: - layers: [64, 64, 64, 64, 64] - modes1: [20, 20, 20, 20] - modes2: [20, 20, 20, 20] - fc_dim: 128 - act: gelu - -train: - batchsize: 20 - epochs: 300 - milestones: [100, 150, 200] - base_lr: 0.001 - scheduler_gamma: 0.5 - f_loss: 1.0 - xy_loss: 5.0 - save_dir: 'darcy-FDM' - save_name: 'darcy-pretrain-pino.pt' - -log: - project: 'PINO-Darcy-pretrain' - group: 'gelu-pino' - entity: hzzheng-pino - - +data: + name: 'Darcy' + datapath: '../data/piececonst_r421_N1024_smooth1.mat' + total_num: 1024 + offset: 0 + n_sample: 1000 + nx: 421 + sub: 7 + pde_sub: 2 + +model: + layers: [64, 64, 64, 64, 64] + modes1: [20, 20, 20, 20] + modes2: [20, 20, 20, 20] + fc_dim: 128 + act: gelu + +train: + batchsize: 20 + epochs: 300 + milestones: [100, 150, 200] + base_lr: 0.001 + scheduler_gamma: 0.5 + f_loss: 1.0 + xy_loss: 5.0 + save_dir: 'darcy-FDM' + save_name: 'darcy-pretrain-pino.pt' + +log: + project: 'PINO-Darcy-pretrain' + group: 'gelu-pino' + entity: hzzheng-pino + + diff --git a/jointContribution/PINO/PINO_paddle/configs/pretrain/burgers-pretrain.yaml b/jointContribution/PINO/PINO_paddle/configs/pretrain/burgers-pretrain.yaml index 7870982f5f..4b3b1af2fd 100644 --- a/jointContribution/PINO/PINO_paddle/configs/pretrain/burgers-pretrain.yaml +++ b/jointContribution/PINO/PINO_paddle/configs/pretrain/burgers-pretrain.yaml @@ -1,36 +1,36 @@ -data: - name: Burgers - datapath: '../data/burgers_pino.mat' - total_num: 1000 - offset: 0 - n_sample: 800 - nx: 128 - nt: 100 - sub: 1 - sub_t: 1 - -model: - layers: [16, 24, 24, 32, 32] - modes1: [15, 12, 9, 9] - modes2: [15, 12, 9, 9] - fc_dim: 128 - act: gelu - num_pad: 4 - -train: - batchsize: 20 - epochs: 500 - milestones: [150, 300, 450] - base_lr: 0.001 - scheduler_gamma: 0.5 - ic_loss: 10.0 - f_loss: 1.0 - xy_loss: 0.0 - save_dir: 'burgers-FDM' - save_name: 'burgers-pretrain-eqn.pt' - -log: - project: PINO-burgers-pretrain - group: gelu-eqn - entity: hzzheng-pino - +data: + name: Burgers + datapath: '../data/burgers_pino.mat' + total_num: 1000 + offset: 0 + n_sample: 800 + nx: 128 + nt: 100 + sub: 1 + sub_t: 1 + +model: + layers: [16, 24, 24, 32, 32] + modes1: [15, 12, 9, 9] + modes2: [15, 12, 9, 9] + fc_dim: 128 + act: gelu + num_pad: 4 + +train: + batchsize: 20 + epochs: 500 + milestones: [150, 300, 450] + base_lr: 0.001 + scheduler_gamma: 0.5 + ic_loss: 10.0 + f_loss: 1.0 + xy_loss: 0.0 + save_dir: 'burgers-FDM' + save_name: 'burgers-pretrain-eqn.pt' + +log: + project: PINO-burgers-pretrain + group: gelu-eqn + entity: hzzheng-pino + diff --git a/jointContribution/PINO/PINO_paddle/configs/test/burgers.yaml b/jointContribution/PINO/PINO_paddle/configs/test/burgers.yaml index 716c8847fe..8e7a280feb 100644 --- a/jointContribution/PINO/PINO_paddle/configs/test/burgers.yaml +++ b/jointContribution/PINO/PINO_paddle/configs/test/burgers.yaml @@ -1,27 +1,27 @@ -data: - name: 'Darcy' - datapath: '../data/burgers.mat' - total_num: 1000 - offset: 800 - n_sample: 200 - nx: 128 - nt: 100 - sub: 1 - sub_t: 1 - -model: - layers: [16, 24, 24, 32, 32] - modes1: [15, 12, 9, 9] - modes2: [15, 12, 9, 9] - fc_dim: 128 - act: gelu - -test: - batchsize: 1 - ckpt: 'checkpoints/burgers-FDM/burgers-pretrain-eqn.pt' - -log: - project: 'PINO-burgers-test' - group: 'gelu-test' - - +data: + name: 'Darcy' + datapath: '../data/burgers.mat' + total_num: 1000 + offset: 800 + n_sample: 200 + nx: 128 + nt: 100 + sub: 1 + sub_t: 1 + +model: + layers: [16, 24, 24, 32, 32] + modes1: [15, 12, 9, 9] + modes2: [15, 12, 9, 9] + fc_dim: 128 + act: gelu + +test: + batchsize: 1 + ckpt: 'checkpoints/burgers-FDM/burgers-pretrain-eqn.pt' + +log: + project: 'PINO-burgers-test' + group: 'gelu-test' + + diff --git a/jointContribution/PINO/PINO_paddle/configs/test/darcy.yaml b/jointContribution/PINO/PINO_paddle/configs/test/darcy.yaml index 9822de6122..922031dc02 100644 --- a/jointContribution/PINO/PINO_paddle/configs/test/darcy.yaml +++ b/jointContribution/PINO/PINO_paddle/configs/test/darcy.yaml @@ -1,26 +1,26 @@ -data: - name: 'Darcy' - datapath: '../data/piececonst_r421_N1024_smooth2.mat' - total_num: 1000 - offset: 0 - n_sample: 500 - nx: 421 - sub: 7 - shuffle: False - -model: - layers: [64, 64, 64, 64, 64] - modes1: [20, 20, 20, 20] - modes2: [20, 20, 20, 20] - fc_dim: 128 - act: gelu - -test: - batchsize: 1 - ckpt: 'checkpoints/darcy-FDM/darcy-pretrain-eqn.pt' - -log: - project: 'PINO-Darcy' - group: 'default' - - +data: + name: 'Darcy' + datapath: '../data/piececonst_r421_N1024_smooth2.mat' + total_num: 1000 + offset: 0 + n_sample: 500 + nx: 421 + sub: 7 + shuffle: False + +model: + layers: [64, 64, 64, 64, 64] + modes1: [20, 20, 20, 20] + modes2: [20, 20, 20, 20] + fc_dim: 128 + act: gelu + +test: + batchsize: 1 + ckpt: 'checkpoints/darcy-FDM/darcy-pretrain-eqn.pt' + +log: + project: 'PINO-Darcy' + group: 'default' + + diff --git a/jointContribution/PINO/PINO_paddle/download_data.py b/jointContribution/PINO/PINO_paddle/download_data.py index 5651617381..32ea798b15 100644 --- a/jointContribution/PINO/PINO_paddle/download_data.py +++ b/jointContribution/PINO/PINO_paddle/download_data.py @@ -1,40 +1,44 @@ -import os -from argparse import ArgumentParser -import requests -from tqdm import tqdm - -_url_dict = { - 'NS-T4000': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fft_Re500_T4000.npy', - 'NS-Re500Part0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part0.npy', - 'NS-Re500Part1': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part1.npy', - 'NS-Re500Part2': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part2.npy', - 'NS-Re100Part0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re100_T128_part0.npy', - 'burgers': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/burgers_pino.mat', - 'NS-Re500_T300_id0': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/NS-Re500_T300_id0.npy', - 'NS-Re500_T300_id0-shuffle': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS-Re500_T300_id0-shuffle.npy', - 'darcy-train': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth1.mat', - 'Re500-1_8s-800-pino-140k': 'https://hkzdata.s3.us-west-2.amazonaws.com/PINO/checkpoints/Re500-1_8s-800-PINO-140000.pt', -} - -def download_file(url, file_path): - print('Start downloading...') - with requests.get(url, stream=True) as r: - r.raise_for_status() - with open(file_path, 'wb') as f: - for chunk in tqdm(r.iter_content(chunk_size=256 * 1024 * 1024)): - f.write(chunk) - print('Complete') - -def main(args): - url = _url_dict[args.name] - file_name = url.split('/')[-1] - os.makedirs(args.outdir, exist_ok=True) - file_path = os.path.join(args.outdir, file_name) - download_file(url, file_path) - -if __name__ == '__main__': - parser = ArgumentParser(description='Parser for downloading assets') - parser.add_argument('--name', type=str, default='NS-T4000') - parser.add_argument('--outdir', type=str, default='../data') - args = parser.parse_args() - main(args) \ No newline at end of file +import os +from argparse import ArgumentParser + +import requests +from tqdm import tqdm + +_url_dict = { + "NS-T4000": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fft_Re500_T4000.npy", + "NS-Re500Part0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part0.npy", + "NS-Re500Part1": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part1.npy", + "NS-Re500Part2": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re500_T128_part2.npy", + "NS-Re100Part0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS_fine_Re100_T128_part0.npy", + "burgers": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/burgers_pino.mat", + "NS-Re500_T300_id0": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/NS-Re500_T300_id0.npy", + "NS-Re500_T300_id0-shuffle": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/NS-Re500_T300_id0-shuffle.npy", + "darcy-train": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/data/piececonst_r421_N1024_smooth1.mat", + "Re500-1_8s-800-pino-140k": "https://hkzdata.s3.us-west-2.amazonaws.com/PINO/checkpoints/Re500-1_8s-800-PINO-140000.pt", +} + + +def download_file(url, file_path): + print("Start downloading...") + with requests.get(url, stream=True) as r: + r.raise_for_status() + with open(file_path, "wb") as f: + for chunk in tqdm(r.iter_content(chunk_size=256 * 1024 * 1024)): + f.write(chunk) + print("Complete") + + +def main(args): + url = _url_dict[args.name] + file_name = url.split("/")[-1] + os.makedirs(args.outdir, exist_ok=True) + file_path = os.path.join(args.outdir, file_name) + download_file(url, file_path) + + +if __name__ == "__main__": + parser = ArgumentParser(description="Parser for downloading assets") + parser.add_argument("--name", type=str, default="NS-T4000") + parser.add_argument("--outdir", type=str, default="../data") + args = parser.parse_args() + main(args) diff --git a/jointContribution/PINO/PINO_paddle/eval_operator.py b/jointContribution/PINO/PINO_paddle/eval_operator.py index 40991f86c9..e545babcf9 100644 --- a/jointContribution/PINO/PINO_paddle/eval_operator.py +++ b/jointContribution/PINO/PINO_paddle/eval_operator.py @@ -1,78 +1,94 @@ -import yaml - -import paddle -from paddle.io import DataLoader -from models import FNO3d, FNO2d -from train_utils import NSLoader, get_forcing, DarcyFlow - -from train_utils.eval_3d import eval_ns -from train_utils.eval_2d import eval_darcy - -from argparse import ArgumentParser - -def test_3d(config): - device = 0 if paddle.cuda.is_available() else 'cpu' - data_config = config['data'] - loader = NSLoader(datapath1=data_config['datapath'], - nx=data_config['nx'], nt=data_config['nt'], - sub=data_config['sub'], sub_t=data_config['sub_t'], - N=data_config['total_num'], - t_interval=data_config['time_interval']) - - eval_loader = loader.make_loader(n_sample=data_config['n_sample'], - batch_size=config['test']['batchsize'], - start=data_config['offset'], - train=data_config['shuffle']) - model = FNO3d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - modes3=config['model']['modes3'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers']).to(device) - - if 'ckpt' in config['test']: - ckpt_path = config['test']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - print(f'Resolution : {loader.S}x{loader.S}x{loader.T}') - forcing = get_forcing(loader.S).to(device) - eval_ns(model, - loader, - eval_loader, - forcing, - config, - device=device) - -def test_2d(config): - data_config = config['data'] - dataset = DarcyFlow(data_config['datapath'], - nx=data_config['nx'], sub=data_config['sub'], - offset=data_config['offset'], num=data_config['n_sample']) - dataloader = DataLoader(dataset, batch_size=config['test']['batchsize'], shuffle=False) - model = FNO2d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act']) - # Load from checkpoint - if 'ckpt' in config['test']: - ckpt_path = config['test']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.set_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - eval_darcy(model, dataloader, config) - -if __name__ == '__main__': - parser = ArgumentParser(description='Basic paser') - parser.add_argument('--config_path', type=str, help='Path to the configuration file') - parser.add_argument('--log', action='store_true', help='Turn on the wandb') - options = parser.parse_args() - config_file = options.config_path - with open(config_file, 'r') as stream: - config = yaml.load(stream, yaml.FullLoader) - - if 'name' in config['data'] and config['data']['name'] == 'Darcy': - test_2d(config) - else: - test_3d(config) - +from argparse import ArgumentParser + +import paddle +import yaml +from models import FNO2d +from models import FNO3d +from paddle.io import DataLoader +from train_utils import DarcyFlow +from train_utils import NSLoader +from train_utils import get_forcing +from train_utils.eval_2d import eval_darcy +from train_utils.eval_3d import eval_ns + + +def test_3d(config): + device = 0 if paddle.cuda.is_available() else "cpu" + data_config = config["data"] + loader = NSLoader( + datapath1=data_config["datapath"], + nx=data_config["nx"], + nt=data_config["nt"], + sub=data_config["sub"], + sub_t=data_config["sub_t"], + N=data_config["total_num"], + t_interval=data_config["time_interval"], + ) + + eval_loader = loader.make_loader( + n_sample=data_config["n_sample"], + batch_size=config["test"]["batchsize"], + start=data_config["offset"], + train=data_config["shuffle"], + ) + model = FNO3d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + modes3=config["model"]["modes3"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + ).to(device) + + if "ckpt" in config["test"]: + ckpt_path = config["test"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + print(f"Resolution : {loader.S}x{loader.S}x{loader.T}") + forcing = get_forcing(loader.S).to(device) + eval_ns(model, loader, eval_loader, forcing, config, device=device) + + +def test_2d(config): + data_config = config["data"] + dataset = DarcyFlow( + data_config["datapath"], + nx=data_config["nx"], + sub=data_config["sub"], + offset=data_config["offset"], + num=data_config["n_sample"], + ) + dataloader = DataLoader( + dataset, batch_size=config["test"]["batchsize"], shuffle=False + ) + model = FNO2d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + ) + # Load from checkpoint + if "ckpt" in config["test"]: + ckpt_path = config["test"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.set_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + eval_darcy(model, dataloader, config) + + +if __name__ == "__main__": + parser = ArgumentParser(description="Basic paser") + parser.add_argument( + "--config_path", type=str, help="Path to the configuration file" + ) + parser.add_argument("--log", action="store_true", help="Turn on the wandb") + options = parser.parse_args() + config_file = options.config_path + with open(config_file, "r") as stream: + config = yaml.load(stream, yaml.FullLoader) + + if "name" in config["data"] and config["data"]["name"] == "Darcy": + test_2d(config) + else: + test_3d(config) diff --git a/jointContribution/PINO/PINO_paddle/models/FCN.py b/jointContribution/PINO/PINO_paddle/models/FCN.py index d429d0f43f..bd6e58ad42 100644 --- a/jointContribution/PINO/PINO_paddle/models/FCN.py +++ b/jointContribution/PINO/PINO_paddle/models/FCN.py @@ -1,59 +1,60 @@ -import paddle.nn as nn - -def linear_block(in_channel, out_channel): - block = nn.Sequential( - nn.Linear(in_channel, out_channel), - nn.Tanh() - ) - return block - -class FCNet(nn.Layer): - ''' - Fully connected layers with Tanh as nonlinearity - Reproduced from PINNs Burger equation - ''' - - def __init__(self, layers=[2, 10, 1]): - super(FCNet, self).__init__() - - fc_list = [linear_block(in_size, out_size) - for in_size, out_size in zip(layers, layers[1:-1])] - fc_list.append(nn.Linear(layers[-2], layers[-1])) - self.fc = nn.Sequential(*fc_list) - - def forward(self, x): - return self.fc(x) - -class DenseNet(nn.Layer): - def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False): - super(DenseNet, self).__init__() - - self.n_layers = len(layers) - 1 - assert self.n_layers >= 1 - if isinstance(nonlinearity, str): - if nonlinearity == 'tanh': - nonlinearity = nn.Tanh - elif nonlinearity == 'relu': - nonlinearity == nn.ReLU - else: - raise ValueError(f'{nonlinearity} is not supported') - self.layers = nn.ModuleList() - - for j in range(self.n_layers): - self.layers.append(nn.Linear(layers[j], layers[j+1])) - - if j != self.n_layers - 1: - if normalize: - self.layers.append(nn.BatchNorm1d(layers[j+1])) - - self.layers.append(nonlinearity()) - - if out_nonlinearity is not None: - self.layers.append(out_nonlinearity()) - - def forward(self, x): - for _, l in enumerate(self.layers): - x = l(x) - - return x - +import paddle.nn as nn + + +def linear_block(in_channel, out_channel): + block = nn.Sequential(nn.Linear(in_channel, out_channel), nn.Tanh()) + return block + + +class FCNet(nn.Layer): + """ + Fully connected layers with Tanh as nonlinearity + Reproduced from PINNs Burger equation + """ + + def __init__(self, layers=[2, 10, 1]): + super(FCNet, self).__init__() + + fc_list = [ + linear_block(in_size, out_size) + for in_size, out_size in zip(layers, layers[1:-1]) + ] + fc_list.append(nn.Linear(layers[-2], layers[-1])) + self.fc = nn.Sequential(*fc_list) + + def forward(self, x): + return self.fc(x) + + +class DenseNet(nn.Layer): + def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False): + super(DenseNet, self).__init__() + + self.n_layers = len(layers) - 1 + assert self.n_layers >= 1 + if isinstance(nonlinearity, str): + if nonlinearity == "tanh": + nonlinearity = nn.Tanh + elif nonlinearity == "relu": + nonlinearity == nn.ReLU + else: + raise ValueError(f"{nonlinearity} is not supported") + self.layers = nn.ModuleList() + + for j in range(self.n_layers): + self.layers.append(nn.Linear(layers[j], layers[j + 1])) + + if j != self.n_layers - 1: + if normalize: + self.layers.append(nn.BatchNorm1d(layers[j + 1])) + + self.layers.append(nonlinearity()) + + if out_nonlinearity is not None: + self.layers.append(out_nonlinearity()) + + def forward(self, x): + for _, l in enumerate(self.layers): + x = l(x) + + return x diff --git a/jointContribution/PINO/PINO_paddle/models/FNO_blocks.py b/jointContribution/PINO/PINO_paddle/models/FNO_blocks.py index c158f4f217..cdca68f194 100644 --- a/jointContribution/PINO/PINO_paddle/models/FNO_blocks.py +++ b/jointContribution/PINO/PINO_paddle/models/FNO_blocks.py @@ -1,689 +1,707 @@ -import paddle -import paddle.nn as nn -import itertools - -einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - -def _contract_dense(x, weight, separable=False): - # order = tl.ndim(x) - order = len(x.shape) - # batch-size, in_channels, x, y... - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] - if not isinstance(weight, paddle.Tensor): - weight = paddle.to_tensor(weight) - - return paddle.einsum(eq, x, weight) - -def _contract_dense_trick(x, weight, separable=False): - # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle - weight_real = weight.data.real() - weight_imag = weight.data.imag() - - order = len(x.shape) - # batch-size, in_channels, x, y... - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - - o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( - eq, x.imag(), weight_imag - ) - o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( - eq, x.real(), weight_imag - ) - x = paddle.complex(o1_real, o1_imag) - return x - -def _contract_dense_separable(x, weight, separable=True): - if separable == False: - raise ValueError("This function is only for separable=True") - return x * weight - -def _contract_cp(x, cp_weight, separable=False): - # order = tl.ndim(x) - order = len(x.shape) - - x_syms = str(einsum_symbols[:order]) - rank_sym = einsum_symbols[order] - out_sym = einsum_symbols[order + 1] - out_syms = list(x_syms) - if separable: - factor_syms = [einsum_symbols[1] + rank_sym] # in only - else: - out_syms[1] = out_sym - factor_syms = [einsum_symbols[1] + rank_sym, out_sym + rank_sym] # in, out - factor_syms += [xs + rank_sym for xs in x_syms[2:]] # x, y, ... - eq = ( - x_syms + "," + rank_sym + "," + ",".join(factor_syms) + "->" + "".join(out_syms) - ) - - return paddle.einsum(eq, x, cp_weight.weights, *cp_weight.factors) - -def _contract_tucker(x, tucker_weight, separable=False): - # order = tl.ndim(x) - order = len(x.shape) - - x_syms = str(einsum_symbols[:order]) - out_sym = einsum_symbols[order] - out_syms = list(x_syms) - if separable: - core_syms = einsum_symbols[order + 1 : 2 * order] - factor_syms = [xs + rs for (xs, rs) in zip(x_syms[1:], core_syms)] # x, y, ... - - else: - core_syms = einsum_symbols[order + 1 : 2 * order + 1] - out_syms[1] = out_sym - factor_syms = [ - einsum_symbols[1] + core_syms[0], - out_sym + core_syms[1], - ] # out, in - factor_syms += [ - xs + rs for (xs, rs) in zip(x_syms[2:], core_syms[2:]) - ] # x, y, ... - - eq = ( - x_syms - + "," - + core_syms - + "," - + ",".join(factor_syms) - + "->" - + "".join(out_syms) - ) - print(eq) # 'abcd,fghi,bf,eg,ch,di->aecd' - return paddle.einsum(eq, x, tucker_weight.core, *tucker_weight.factors) - -def _contract_tt(x, tt_weight, separable=False): - # order = tl.ndim(x) - order = len(x.shape) - - x_syms = list(einsum_symbols[:order]) - weight_syms = list(x_syms[1:]) # no batch-size - if not separable: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - else: - out_syms = list(x_syms) - rank_syms = list(einsum_symbols[order + 1 :]) - tt_syms = [] - for i, s in enumerate(weight_syms): - tt_syms.append([rank_syms[i], s, rank_syms[i + 1]]) - eq = ( - "".join(x_syms) - + "," - + ",".join("".join(f) for f in tt_syms) - + "->" - + "".join(out_syms) - ) - - return paddle.einsum(eq, x, *tt_weight.factors) - -def get_contract_fun(weight, implementation="reconstructed", separable=False): - """Generic ND implementation of Fourier Spectral Conv contraction - - Parameters - ---------- - weight : tensorl-paddle's FactorizedTensor - implementation : {'reconstructed', 'factorized'}, default is 'reconstructed' - whether to reconstruct the weight and do a forward pass (reconstructed) - or contract directly the factors of the factorized weight with the input (factorized) - - Returns - ------- - function : (x, weight) -> x * weight in Fourier space - """ - if implementation == "reconstructed": - if separable: - print("SEPARABLE") - return _contract_dense_separable - else: - return _contract_dense - elif implementation == "factorized": - if isinstance(weight, paddle.Tensor): - return _contract_dense_trick - else: - raise ValueError( - f"Got unexpected weight type of class {weight.__class__.__name__}" - ) - else: - raise ValueError( - f'Got implementation = {implementation}, expected "reconstructed" or "factorized"' - ) - -class FactorizedTensor(nn.Layer): - def __init__(self, shape, init_scale): - super().__init__() - self.shape = shape - self.init_scale = init_scale - self.real = self.create_parameter( - shape=shape, default_initializer=nn.initializer.XavierNormal() - ) - self.imag = self.create_parameter( - shape=shape, default_initializer=nn.initializer.XavierNormal() - ) - - def __repr__(self): - return f"FactorizedTensor(shape={self.shape})" - - @property - def data(self): - return paddle.complex(self.real, self.imag) - -class FactorizedSpectralConv(nn.Layer): - """Generic N-Dimensional Fourier Neural Operator - - Parameters - ---------- - in_channels : int, optional - Number of input channels - out_channels : int, optional - Number of output channels - kept_modes : int tuple - total number of modes to keep in Fourier Layer, along each dim - separable : bool, default is True - scale : float or 'auto', default is 'auto' - scale to use for the init - n_layers : int, optional - Number of Fourier Layers, by default 4 - joint_factorization : bool, optional - Whether all the Fourier Layers should be parametrized by a single tensor (vs one per layer), by default False - rank : float or rank, optional - Rank of the tensor factorization of the Fourier weights, by default 1.0 - factorization : str, {'tucker', 'cp', 'tt'}, optional - Tensor factorization of the parameters weight to use, by default 'tucker' - fixed_rank_modes : bool, optional - Modes to not factorize, by default False - fft_norm : str, optional - by default 'forward' - implementation : {'factorized', 'reconstructed'}, optional, default is 'factorized' - If factorization is not None, forward mode to use:: - * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass - * `factorized` : the input is directly contracted with the factors of the decomposition - decomposition_kwargs : dict, optional, default is {} - Optionaly additional parameters to pass to the tensor decomposition - """ - - def __init__( - self, - in_channels, - out_channels, - n_modes, - n_layers=1, - scale="auto", - separable=False, - fft_norm="backward", - bias=False, - implementation="reconstructed", - joint_factorization=False, - rank=0.5, - factorization="cp", - fixed_rank_modes=False, - decomposition_kwargs=dict(), - ): - super().__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - self.order = len(n_modes) - - # We index quadrands only - # n_modes is the total number of modes kept along each dimension - # half_modes is half of that except in the last mode, correponding to the number of modes to keep in *each* quadrant for each dim - if isinstance(n_modes, int): - n_modes = [n_modes] - self.n_modes = n_modes - half_modes = [m // 2 for m in n_modes] - self.half_modes = n_modes - - self.rank = rank - self.factorization = factorization - self.n_layers = n_layers - self.implementation = implementation - - if scale == "auto": - scale = 1 / (in_channels * out_channels) - - if isinstance(fixed_rank_modes, bool): - if fixed_rank_modes: - # If bool, keep the number of layers fixed - fixed_rank_modes = [0] - else: - fixed_rank_modes = None - - self.mlp = None - - self.fft_norm = fft_norm - - # Make sure we are using a Complex Factorized Tensor - if factorization is None: - factorization = "Dense" # No factorization - if not factorization.lower().startswith("complex"): - factorization = f"Complex{factorization}" - - if separable: - if in_channels != out_channels: - raise ValueError( - "To use separable Fourier Conv, in_channels must be equal to out_channels, ", - f"but got in_channels = {in_channels} and out_channels = {out_channels}", - ) - weight_shape = (in_channels, *self.half_modes) - else: - weight_shape = (in_channels, out_channels, *self.half_modes) - self.separable = separable - - if joint_factorization: - self.weight = paddle.create_parameter( - shape=((2 ** (self.order - 1)) * n_layers, *weight_shape), - dtype="float32", - ) - else: - self.weight = nn.LayerList( - [ - FactorizedTensor(weight_shape, init_scale=scale) - for _ in range((2 ** (self.order - 1)) * n_layers) - ] - ) - - self._contract = get_contract_fun( - self.weight[0].data, implementation=implementation, separable=separable - ) - - if bias: - self.bias = paddle.create_parameter( - shape=((n_layers, self.out_channels) + (1,) * self.order), - dtype="float32", - ) - else: - self.bias = None - - def forward(self, x, indices=0): - """Generic forward pass for the Factorized Spectral Conv - - Parameters - ---------- - x : paddle.Tensor - input activation of size (batch_size, channels, d1, ..., dN) - indices : int, default is 0 - if joint_factorization, index of the layers for n_layers > 1 - - Returns - ------- - tensorized_spectral_conv(x) - """ - batchsize, channels, *mode_sizes = x.shape - fft_size = list(mode_sizes) - fft_size[-1] = fft_size[-1] // 2 + 1 # Redundant last coefficient - - # Compute Fourier coeffcients - fft_dims = list(range(-self.order, 0)) - - # put x back in to real, as in paddle x.float() - x_float = paddle.cast(x, dtype="float32") - x = paddle.fft.rfftn(x_float, norm=self.fft_norm, axes=fft_dims) - - out_fft = paddle.zeros( - [batchsize, self.out_channels, *fft_size], dtype=paddle.complex64, - ) # [1,32,16,9], all zeros, complex - - # We contract all corners of the Fourier coefs - # Except for the last mode: there, we take all coefs as redundant modes were already removed - mode_indexing = [((None, m), (-m, None)) for m in self.half_modes[:-1]] + [ - ((None, self.half_modes[-1]),) - ] - - for i, boundaries in enumerate(itertools.product(*mode_indexing)): - # Keep all modes for first 2 modes (batch-size and channels) - idx_tuple = [slice(None), slice(None)] + [slice(*b) for b in boundaries] - - if len(idx_tuple) == 4: - out_fft[ - idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3] - ] = self._contract( - x[idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3]], - self.weight[indices + i].real, - self.weight[indices + i].imag, - separable=self.separable, - ) - elif len(idx_tuple) == 3: - out_fft[idx_tuple[0], idx_tuple[1], idx_tuple[2]] = self._contract( - x[idx_tuple[0], idx_tuple[1], idx_tuple[2]], - self.weight[indices + i].real, - self.weight[indices + i].imag, - separable=self.separable, - ) - else: - raise ValueError("Not implemented") - - x = paddle.fft.irfftn(out_fft, s=(mode_sizes), norm=self.fft_norm) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - - def get_conv(self, indices): - """Returns a sub-convolutional layer from the joint parametrize main-convolution - - The parametrization of sub-convolutional layers is shared with the main one. - """ - if self.n_layers == 1: - raise ValueError( - "A single convolution is parametrized, directly use the main class." - ) - - return SubConv2d(self, indices) - - def __getitem__(self, indices): - return self.get_conv(indices) - -class SubConv2d(nn.Layer): - """Class representing one of the convolutions from the mother joint factorized convolution - - Notes - ----- - This relies on the fact that nn.Parameters are not duplicated: - if the same nn.Parameter is assigned to multiple modules, they all point to the same data, - which is shared. - """ - - def __init__(self, main_conv, indices): - super().__init__() - self.main_conv = main_conv - self.indices = indices - - def forward(self, x): - return self.main_conv.forward(x, self.indices) - -class FactorizedSpectralConv1d(FactorizedSpectralConv): - def __init__( - self, - in_channels, - out_channels, - modes_height, - n_layers=1, - scale="auto", - separable=False, - fft_norm="backward", - bias=True, - implementation="reconstucted", - joint_factorization=False, - rank=0.5, - factorization="cp", - fixed_rank_modes=False, - decomposition_kwargs=dict(), - ): - super().__init__( - in_channels, - out_channels, - (modes_height,), - n_layers=n_layers, - scale=scale, - separable=separable, - fft_norm=fft_norm, - bias=bias, - implementation=implementation, - joint_factorization=joint_factorization, - rank=rank, - factorization=factorization, - fixed_rank_modes=fixed_rank_modes, - decomposition_kwargs=decomposition_kwargs, - ) - self.half_modes_height = self.half_modes[0] - - def forward(self, x, indices=0): - batchsize, channels, width = x.shape - - x = paddle.fft.rfft(x, norm=self.fft_norm) - - out_fft = paddle.zeros( - [batchsize, self.out_channels, width // 2 + 1], dtype=paddle.complex64, - ) - out_fft[:, :, : self.half_modes_height] = self._contract( - x[:, :, : self.half_modes_height], - self.weight[indices], - separable=self.separable, - ) - - x = paddle.fft.irfft(out_fft, n=width, norm=self.fft_norm) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - -class FactorizedSpectralConv2d(FactorizedSpectralConv): - def __init__( - self, - in_channels, - out_channels, - modes_height, - modes_width, - n_layers=1, - scale="auto", - separable=False, - fft_norm="backward", - bias=False, - implementation="factorized", - joint_factorization=False, - rank=0.5, - factorization="cp", - fixed_rank_modes=False, - decomposition_kwargs=dict(), - ): - super().__init__( - in_channels, - out_channels, - (modes_height, modes_width), - n_layers=n_layers, - scale=scale, - separable=separable, - fft_norm=fft_norm, - bias=bias, - implementation=implementation, - joint_factorization=joint_factorization, - rank=rank, - factorization=factorization, - fixed_rank_modes=fixed_rank_modes, - decomposition_kwargs=decomposition_kwargs, - ) - self.half_modes_height, self.half_modes_width = self.half_modes - - def forward(self, x, indices=0): - batchsize, channels, height, width = x.shape - - x_float = paddle.cast(x, dtype="float32") - x = paddle.fft.rfft2(x_float, norm=self.fft_norm) - - # The output will be of size (batch_size, self.out_channels, x.size(-2), x.size(-1)//2 + 1) - out_fft = paddle.zeros( - [batchsize, self.out_channels, height, width // 2 + 1], dtype=x.dtype, - ) - - # upper block (truncate high freq) - out_fft[ - :, :, : self.half_modes_height, : self.half_modes_width - ] = self._contract( - x[:, :, : self.half_modes_height, : self.half_modes_width], - self.weight[2 * indices], - separable=self.separable, - ) - # Lower block - out_fft[ - :, :, -self.half_modes_height :, : self.half_modes_width - ] = self._contract( - x[:, :, -self.half_modes_height :, : self.half_modes_width], - self.weight[2 * indices + 1], - separable=self.separable, - ) - - x = paddle.fft.irfft2( - out_fft, s=(height, width), axes=(-2, -1), norm=self.fft_norm - ) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - -class FactorizedSpectralConv3d(FactorizedSpectralConv): - def __init__( - self, - in_channels, - out_channels, - modes_height, - modes_width, - modes_depth, - n_layers=1, - scale="auto", - separable=False, - fft_norm="backward", - bias=True, - implementation="reconstucted", - joint_factorization=False, - rank=0.5, - factorization="cp", - fixed_rank_modes=False, - decomposition_kwargs=dict(), - ): - super().__init__( - in_channels, - out_channels, - (modes_height, modes_width, modes_depth), - n_layers=n_layers, - scale=scale, - separable=separable, - fft_norm=fft_norm, - bias=bias, - implementation=implementation, - joint_factorization=joint_factorization, - rank=rank, - factorization=factorization, - fixed_rank_modes=fixed_rank_modes, - decomposition_kwargs=decomposition_kwargs, - ) - ( - self.half_modes_height, - self.half_modes_width, - self.half_modes_depth, - ) = self.half_modes - - def forward(self, x, indices=0): - batchsize, channels, height, width, depth = x.shape - - x_float = paddle.cast(x, dtype="float32") - x = paddle.fft.rfftn(x_float, norm=self.fft_norm, dim=[-3, -2, -1]) - out_fft = paddle.zeros( - [batchsize, self.out_channels, height, width, depth // 2 + 1], - dtype="complex64", - ) - - out_fft[ - :, - :, - : self.half_modes_height, - : self.half_modes_width, - : self.half_modes_depth, - ] = self._contract( - x[ - :, - :, - : self.half_modes_height, - : self.half_modes_width, - : self.half_modes_depth, - ], - self.weight[indices + 0], - separable=self.separable, - ) - out_fft[ - :, - :, - : self.half_modes_height, - -self.half_modes_width :, - : self.half_modes_depth, - ] = self._contract( - x[ - :, - :, - : self.half_modes_height, - -self.half_modes_width :, - : self.half_modes_depth, - ], - self.weight[indices + 1], - separable=self.separable, - ) - out_fft[ - :, - :, - -self.half_modes_height :, - : self.half_modes_width, - : self.half_modes_depth, - ] = self._contract( - x[ - :, - :, - -self.half_modes_height :, - : self.half_modes_width, - : self.half_modes_depth, - ], - self.weight[indices + 2], - separable=self.separable, - ) - out_fft[ - :, - :, - -self.half_modes_height :, - -self.half_modes_width :, - : self.half_modes_depth, - ] = self._contract( - x[ - :, - :, - -self.half_modes_height :, - -self.half_modes_width :, - : self.half_modes_depth, - ], - self.weight[indices + 3], - separable=self.separable, - ) - - x = paddle.fft.irfftn(out_fft, s=(height, width, depth), norm=self.fft_norm) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - -if __name__ == "__main__": - # let x be a complex tensor of size (32, 32, 8, 8) - x = paddle.randn([32, 32, 8, 8]).astype("complex64") - # let weight be the same - weight = paddle.randn([32, 32, 8, 8]).astype("complex64") - separable = False - result = _contract_dense(x, weight, separable=separable) - print(result) +import itertools + +import paddle +import paddle.nn as nn + +einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + +def _contract_dense(x, weight, separable=False): + # order = tl.ndim(x) + order = len(x.shape) + # batch-size, in_channels, x, y... + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] + if not isinstance(weight, paddle.Tensor): + weight = paddle.to_tensor(weight) + + return paddle.einsum(eq, x, weight) + + +def _contract_dense_trick(x, weight, separable=False): + # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle + weight_real = weight.data.real() + weight_imag = weight.data.imag() + + order = len(x.shape) + # batch-size, in_channels, x, y... + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + + o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( + eq, x.imag(), weight_imag + ) + o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( + eq, x.real(), weight_imag + ) + x = paddle.complex(o1_real, o1_imag) + return x + + +def _contract_dense_separable(x, weight, separable=True): + if separable == False: + raise ValueError("This function is only for separable=True") + return x * weight + + +def _contract_cp(x, cp_weight, separable=False): + # order = tl.ndim(x) + order = len(x.shape) + + x_syms = str(einsum_symbols[:order]) + rank_sym = einsum_symbols[order] + out_sym = einsum_symbols[order + 1] + out_syms = list(x_syms) + if separable: + factor_syms = [einsum_symbols[1] + rank_sym] # in only + else: + out_syms[1] = out_sym + factor_syms = [einsum_symbols[1] + rank_sym, out_sym + rank_sym] # in, out + factor_syms += [xs + rank_sym for xs in x_syms[2:]] # x, y, ... + eq = ( + x_syms + "," + rank_sym + "," + ",".join(factor_syms) + "->" + "".join(out_syms) + ) + + return paddle.einsum(eq, x, cp_weight.weights, *cp_weight.factors) + + +def _contract_tucker(x, tucker_weight, separable=False): + # order = tl.ndim(x) + order = len(x.shape) + + x_syms = str(einsum_symbols[:order]) + out_sym = einsum_symbols[order] + out_syms = list(x_syms) + if separable: + core_syms = einsum_symbols[order + 1 : 2 * order] + factor_syms = [xs + rs for (xs, rs) in zip(x_syms[1:], core_syms)] # x, y, ... + + else: + core_syms = einsum_symbols[order + 1 : 2 * order + 1] + out_syms[1] = out_sym + factor_syms = [ + einsum_symbols[1] + core_syms[0], + out_sym + core_syms[1], + ] # out, in + factor_syms += [ + xs + rs for (xs, rs) in zip(x_syms[2:], core_syms[2:]) + ] # x, y, ... + + eq = ( + x_syms + + "," + + core_syms + + "," + + ",".join(factor_syms) + + "->" + + "".join(out_syms) + ) + print(eq) # 'abcd,fghi,bf,eg,ch,di->aecd' + return paddle.einsum(eq, x, tucker_weight.core, *tucker_weight.factors) + + +def _contract_tt(x, tt_weight, separable=False): + # order = tl.ndim(x) + order = len(x.shape) + + x_syms = list(einsum_symbols[:order]) + weight_syms = list(x_syms[1:]) # no batch-size + if not separable: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + else: + out_syms = list(x_syms) + rank_syms = list(einsum_symbols[order + 1 :]) + tt_syms = [] + for i, s in enumerate(weight_syms): + tt_syms.append([rank_syms[i], s, rank_syms[i + 1]]) + eq = ( + "".join(x_syms) + + "," + + ",".join("".join(f) for f in tt_syms) + + "->" + + "".join(out_syms) + ) + + return paddle.einsum(eq, x, *tt_weight.factors) + + +def get_contract_fun(weight, implementation="reconstructed", separable=False): + """Generic ND implementation of Fourier Spectral Conv contraction + + Parameters + ---------- + weight : tensorl-paddle's FactorizedTensor + implementation : {'reconstructed', 'factorized'}, default is 'reconstructed' + whether to reconstruct the weight and do a forward pass (reconstructed) + or contract directly the factors of the factorized weight with the input (factorized) + + Returns + ------- + function : (x, weight) -> x * weight in Fourier space + """ + if implementation == "reconstructed": + if separable: + print("SEPARABLE") + return _contract_dense_separable + else: + return _contract_dense + elif implementation == "factorized": + if isinstance(weight, paddle.Tensor): + return _contract_dense_trick + else: + raise ValueError( + f"Got unexpected weight type of class {weight.__class__.__name__}" + ) + else: + raise ValueError( + f'Got implementation = {implementation}, expected "reconstructed" or "factorized"' + ) + + +class FactorizedTensor(nn.Layer): + def __init__(self, shape, init_scale): + super().__init__() + self.shape = shape + self.init_scale = init_scale + self.real = self.create_parameter( + shape=shape, default_initializer=nn.initializer.XavierNormal() + ) + self.imag = self.create_parameter( + shape=shape, default_initializer=nn.initializer.XavierNormal() + ) + + def __repr__(self): + return f"FactorizedTensor(shape={self.shape})" + + @property + def data(self): + return paddle.complex(self.real, self.imag) + + +class FactorizedSpectralConv(nn.Layer): + """Generic N-Dimensional Fourier Neural Operator + + Parameters + ---------- + in_channels : int, optional + Number of input channels + out_channels : int, optional + Number of output channels + kept_modes : int tuple + total number of modes to keep in Fourier Layer, along each dim + separable : bool, default is True + scale : float or 'auto', default is 'auto' + scale to use for the init + n_layers : int, optional + Number of Fourier Layers, by default 4 + joint_factorization : bool, optional + Whether all the Fourier Layers should be parametrized by a single tensor (vs one per layer), by default False + rank : float or rank, optional + Rank of the tensor factorization of the Fourier weights, by default 1.0 + factorization : str, {'tucker', 'cp', 'tt'}, optional + Tensor factorization of the parameters weight to use, by default 'tucker' + fixed_rank_modes : bool, optional + Modes to not factorize, by default False + fft_norm : str, optional + by default 'forward' + implementation : {'factorized', 'reconstructed'}, optional, default is 'factorized' + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass + * `factorized` : the input is directly contracted with the factors of the decomposition + decomposition_kwargs : dict, optional, default is {} + Optionaly additional parameters to pass to the tensor decomposition + """ + + def __init__( + self, + in_channels, + out_channels, + n_modes, + n_layers=1, + scale="auto", + separable=False, + fft_norm="backward", + bias=False, + implementation="reconstructed", + joint_factorization=False, + rank=0.5, + factorization="cp", + fixed_rank_modes=False, + decomposition_kwargs=dict(), + ): + super().__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.order = len(n_modes) + + # We index quadrands only + # n_modes is the total number of modes kept along each dimension + # half_modes is half of that except in the last mode, correponding to the number of modes to keep in *each* quadrant for each dim + if isinstance(n_modes, int): + n_modes = [n_modes] + self.n_modes = n_modes + half_modes = [m // 2 for m in n_modes] + self.half_modes = n_modes + + self.rank = rank + self.factorization = factorization + self.n_layers = n_layers + self.implementation = implementation + + if scale == "auto": + scale = 1 / (in_channels * out_channels) + + if isinstance(fixed_rank_modes, bool): + if fixed_rank_modes: + # If bool, keep the number of layers fixed + fixed_rank_modes = [0] + else: + fixed_rank_modes = None + + self.mlp = None + + self.fft_norm = fft_norm + + # Make sure we are using a Complex Factorized Tensor + if factorization is None: + factorization = "Dense" # No factorization + if not factorization.lower().startswith("complex"): + factorization = f"Complex{factorization}" + + if separable: + if in_channels != out_channels: + raise ValueError( + "To use separable Fourier Conv, in_channels must be equal to out_channels, ", + f"but got in_channels = {in_channels} and out_channels = {out_channels}", + ) + weight_shape = (in_channels, *self.half_modes) + else: + weight_shape = (in_channels, out_channels, *self.half_modes) + self.separable = separable + + if joint_factorization: + self.weight = paddle.create_parameter( + shape=((2 ** (self.order - 1)) * n_layers, *weight_shape), + dtype="float32", + ) + else: + self.weight = nn.LayerList( + [ + FactorizedTensor(weight_shape, init_scale=scale) + for _ in range((2 ** (self.order - 1)) * n_layers) + ] + ) + + self._contract = get_contract_fun( + self.weight[0].data, implementation=implementation, separable=separable + ) + + if bias: + self.bias = paddle.create_parameter( + shape=((n_layers, self.out_channels) + (1,) * self.order), + dtype="float32", + ) + else: + self.bias = None + + def forward(self, x, indices=0): + """Generic forward pass for the Factorized Spectral Conv + + Parameters + ---------- + x : paddle.Tensor + input activation of size (batch_size, channels, d1, ..., dN) + indices : int, default is 0 + if joint_factorization, index of the layers for n_layers > 1 + + Returns + ------- + tensorized_spectral_conv(x) + """ + batchsize, channels, *mode_sizes = x.shape + fft_size = list(mode_sizes) + fft_size[-1] = fft_size[-1] // 2 + 1 # Redundant last coefficient + + # Compute Fourier coeffcients + fft_dims = list(range(-self.order, 0)) + + # put x back in to real, as in paddle x.float() + x_float = paddle.cast(x, dtype="float32") + x = paddle.fft.rfftn(x_float, norm=self.fft_norm, axes=fft_dims) + + out_fft = paddle.zeros( + [batchsize, self.out_channels, *fft_size], + dtype=paddle.complex64, + ) # [1,32,16,9], all zeros, complex + + # We contract all corners of the Fourier coefs + # Except for the last mode: there, we take all coefs as redundant modes were already removed + mode_indexing = [((None, m), (-m, None)) for m in self.half_modes[:-1]] + [ + ((None, self.half_modes[-1]),) + ] + + for i, boundaries in enumerate(itertools.product(*mode_indexing)): + # Keep all modes for first 2 modes (batch-size and channels) + idx_tuple = [slice(None), slice(None)] + [slice(*b) for b in boundaries] + + if len(idx_tuple) == 4: + out_fft[ + idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3] + ] = self._contract( + x[idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3]], + self.weight[indices + i].real, + self.weight[indices + i].imag, + separable=self.separable, + ) + elif len(idx_tuple) == 3: + out_fft[idx_tuple[0], idx_tuple[1], idx_tuple[2]] = self._contract( + x[idx_tuple[0], idx_tuple[1], idx_tuple[2]], + self.weight[indices + i].real, + self.weight[indices + i].imag, + separable=self.separable, + ) + else: + raise ValueError("Not implemented") + + x = paddle.fft.irfftn(out_fft, s=(mode_sizes), norm=self.fft_norm) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + def get_conv(self, indices): + """Returns a sub-convolutional layer from the joint parametrize main-convolution + + The parametrization of sub-convolutional layers is shared with the main one. + """ + if self.n_layers == 1: + raise ValueError( + "A single convolution is parametrized, directly use the main class." + ) + + return SubConv2d(self, indices) + + def __getitem__(self, indices): + return self.get_conv(indices) + + +class SubConv2d(nn.Layer): + """Class representing one of the convolutions from the mother joint factorized convolution + + Notes + ----- + This relies on the fact that nn.Parameters are not duplicated: + if the same nn.Parameter is assigned to multiple modules, they all point to the same data, + which is shared. + """ + + def __init__(self, main_conv, indices): + super().__init__() + self.main_conv = main_conv + self.indices = indices + + def forward(self, x): + return self.main_conv.forward(x, self.indices) + + +class FactorizedSpectralConv1d(FactorizedSpectralConv): + def __init__( + self, + in_channels, + out_channels, + modes_height, + n_layers=1, + scale="auto", + separable=False, + fft_norm="backward", + bias=True, + implementation="reconstucted", + joint_factorization=False, + rank=0.5, + factorization="cp", + fixed_rank_modes=False, + decomposition_kwargs=dict(), + ): + super().__init__( + in_channels, + out_channels, + (modes_height,), + n_layers=n_layers, + scale=scale, + separable=separable, + fft_norm=fft_norm, + bias=bias, + implementation=implementation, + joint_factorization=joint_factorization, + rank=rank, + factorization=factorization, + fixed_rank_modes=fixed_rank_modes, + decomposition_kwargs=decomposition_kwargs, + ) + self.half_modes_height = self.half_modes[0] + + def forward(self, x, indices=0): + batchsize, channels, width = x.shape + + x = paddle.fft.rfft(x, norm=self.fft_norm) + + out_fft = paddle.zeros( + [batchsize, self.out_channels, width // 2 + 1], + dtype=paddle.complex64, + ) + out_fft[:, :, : self.half_modes_height] = self._contract( + x[:, :, : self.half_modes_height], + self.weight[indices], + separable=self.separable, + ) + + x = paddle.fft.irfft(out_fft, n=width, norm=self.fft_norm) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + +class FactorizedSpectralConv2d(FactorizedSpectralConv): + def __init__( + self, + in_channels, + out_channels, + modes_height, + modes_width, + n_layers=1, + scale="auto", + separable=False, + fft_norm="backward", + bias=False, + implementation="factorized", + joint_factorization=False, + rank=0.5, + factorization="cp", + fixed_rank_modes=False, + decomposition_kwargs=dict(), + ): + super().__init__( + in_channels, + out_channels, + (modes_height, modes_width), + n_layers=n_layers, + scale=scale, + separable=separable, + fft_norm=fft_norm, + bias=bias, + implementation=implementation, + joint_factorization=joint_factorization, + rank=rank, + factorization=factorization, + fixed_rank_modes=fixed_rank_modes, + decomposition_kwargs=decomposition_kwargs, + ) + self.half_modes_height, self.half_modes_width = self.half_modes + + def forward(self, x, indices=0): + batchsize, channels, height, width = x.shape + + x_float = paddle.cast(x, dtype="float32") + x = paddle.fft.rfft2(x_float, norm=self.fft_norm) + + # The output will be of size (batch_size, self.out_channels, x.size(-2), x.size(-1)//2 + 1) + out_fft = paddle.zeros( + [batchsize, self.out_channels, height, width // 2 + 1], + dtype=x.dtype, + ) + + # upper block (truncate high freq) + out_fft[ + :, :, : self.half_modes_height, : self.half_modes_width + ] = self._contract( + x[:, :, : self.half_modes_height, : self.half_modes_width], + self.weight[2 * indices], + separable=self.separable, + ) + # Lower block + out_fft[ + :, :, -self.half_modes_height :, : self.half_modes_width + ] = self._contract( + x[:, :, -self.half_modes_height :, : self.half_modes_width], + self.weight[2 * indices + 1], + separable=self.separable, + ) + + x = paddle.fft.irfft2( + out_fft, s=(height, width), axes=(-2, -1), norm=self.fft_norm + ) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + +class FactorizedSpectralConv3d(FactorizedSpectralConv): + def __init__( + self, + in_channels, + out_channels, + modes_height, + modes_width, + modes_depth, + n_layers=1, + scale="auto", + separable=False, + fft_norm="backward", + bias=True, + implementation="reconstucted", + joint_factorization=False, + rank=0.5, + factorization="cp", + fixed_rank_modes=False, + decomposition_kwargs=dict(), + ): + super().__init__( + in_channels, + out_channels, + (modes_height, modes_width, modes_depth), + n_layers=n_layers, + scale=scale, + separable=separable, + fft_norm=fft_norm, + bias=bias, + implementation=implementation, + joint_factorization=joint_factorization, + rank=rank, + factorization=factorization, + fixed_rank_modes=fixed_rank_modes, + decomposition_kwargs=decomposition_kwargs, + ) + ( + self.half_modes_height, + self.half_modes_width, + self.half_modes_depth, + ) = self.half_modes + + def forward(self, x, indices=0): + batchsize, channels, height, width, depth = x.shape + + x_float = paddle.cast(x, dtype="float32") + x = paddle.fft.rfftn(x_float, norm=self.fft_norm, dim=[-3, -2, -1]) + out_fft = paddle.zeros( + [batchsize, self.out_channels, height, width, depth // 2 + 1], + dtype="complex64", + ) + + out_fft[ + :, + :, + : self.half_modes_height, + : self.half_modes_width, + : self.half_modes_depth, + ] = self._contract( + x[ + :, + :, + : self.half_modes_height, + : self.half_modes_width, + : self.half_modes_depth, + ], + self.weight[indices + 0], + separable=self.separable, + ) + out_fft[ + :, + :, + : self.half_modes_height, + -self.half_modes_width :, + : self.half_modes_depth, + ] = self._contract( + x[ + :, + :, + : self.half_modes_height, + -self.half_modes_width :, + : self.half_modes_depth, + ], + self.weight[indices + 1], + separable=self.separable, + ) + out_fft[ + :, + :, + -self.half_modes_height :, + : self.half_modes_width, + : self.half_modes_depth, + ] = self._contract( + x[ + :, + :, + -self.half_modes_height :, + : self.half_modes_width, + : self.half_modes_depth, + ], + self.weight[indices + 2], + separable=self.separable, + ) + out_fft[ + :, + :, + -self.half_modes_height :, + -self.half_modes_width :, + : self.half_modes_depth, + ] = self._contract( + x[ + :, + :, + -self.half_modes_height :, + -self.half_modes_width :, + : self.half_modes_depth, + ], + self.weight[indices + 3], + separable=self.separable, + ) + + x = paddle.fft.irfftn(out_fft, s=(height, width, depth), norm=self.fft_norm) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + +if __name__ == "__main__": + # let x be a complex tensor of size (32, 32, 8, 8) + x = paddle.randn([32, 32, 8, 8]).astype("complex64") + # let weight be the same + weight = paddle.randn([32, 32, 8, 8]).astype("complex64") + separable = False + result = _contract_dense(x, weight, separable=separable) + print(result) diff --git a/jointContribution/PINO/PINO_paddle/models/__init__.py b/jointContribution/PINO/PINO_paddle/models/__init__.py index 58c103ae64..5180783a8c 100644 --- a/jointContribution/PINO/PINO_paddle/models/__init__.py +++ b/jointContribution/PINO/PINO_paddle/models/__init__.py @@ -1,3 +1,3 @@ -from .FCN import FCNet -from .fourier2d import FNO2d -from .fourier3d import FNO3d +from .FCN import FCNet +from .fourier2d import FNO2d +from .fourier3d import FNO3d diff --git a/jointContribution/PINO/PINO_paddle/models/basics.py b/jointContribution/PINO/PINO_paddle/models/basics.py index 8a8fea46f3..7041e0ec18 100644 --- a/jointContribution/PINO/PINO_paddle/models/basics.py +++ b/jointContribution/PINO/PINO_paddle/models/basics.py @@ -1,165 +1,291 @@ -import numpy as np - -import paddle -import paddle.nn as nn -import paddle.nn.initializer as Initializer - -def compl_mul1d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: - # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x) - res = paddle.einsum("bix,iox->box", a, b) - return res - -def compl_mul2d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: - # (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t) - res = paddle.einsum("bixy,ioxy->boxy", a, b) - return res - -def compl_mul3d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: - res = paddle.einsum("bixyz,ioxyz->boxyz", a, b) - return res - -################################################################ -# 1d fourier layer -################################################################ - -class SpectralConv1d(nn.Layer): - def __init__(self, in_channels, out_channels, modes1): - super(SpectralConv1d, self).__init__() - - """ - 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. - """ - - self.in_channels = in_channels - self.out_channels = out_channels - # Number of Fourier modes to multiply, at most floor(N/2) + 1 - self.modes1 = modes1 - - self.scale = (1 / (in_channels*out_channels)) - self.weights1 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.complex64, attr=Initializer.Assign(self.scale * paddle.rand(in_channels, out_channels, self.modes1))) - - def forward(self, x): - batchsize = x.shape[0] - # Compute Fourier coeffcients up to factor of e^(- something constant) - x_ft = paddle.fft.rfftn(x, dim=[2]) - - # Multiply relevant Fourier modes - out_ft = paddle.zeros(batchsize, self.in_channels, x.shape[-1]//2 + 1, dtype=paddle.complex64) - out_ft[:, :, :self.modes1] = compl_mul1d(x_ft[:, :, :self.modes1], self.weights1) - - # Return to physical space - x = paddle.fft.irfftn(out_ft, s=[x.shape[-1]], axes=[2]) - return x - -################################################################ -# 2d fourier layer -################################################################ - -class SpectralConv2d(nn.Layer): - def __init__(self, in_channels, out_channels, modes1, modes2): - super(SpectralConv2d, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - # Number of Fourier modes to multiply, at most floor(N/2) + 1 - self.modes1 = modes1 - self.modes2 = modes2 - - self.scale = (1 / (in_channels * out_channels)) - weights1_attr = Initializer.Assign(self.scale * paddle.rand([in_channels, out_channels, self.modes1, self.modes2], dtype=paddle.float32)) - self.weights1_real = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.float32, attr=weights1_attr) - self.weights1_imag = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.float32, attr=weights1_attr) - self.weights1 = paddle.concat([self.weights1_real, self.weights1_imag], axis=0) - self.weights2 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.float32, attr=Initializer.Assign(self.scale * paddle.rand([in_channels, out_channels, self.modes1, self.modes2], dtype=paddle.float32))) - - def forward(self, x): - batchsize = x.shape[0] - size1 = x.shape[-2] - size2 = x.shape[-1] - # Compute Fourier coeffcients up to factor of e^(- something constant) - x_ft = paddle.fft.rfftn(x, axes=[2, 3]) - - # Multiply relevant Fourier modes - out_ft = paddle.zeros([batchsize, self.out_channels, x.shape[-2], x.shape[-1] // 2 + 1], - dtype=paddle.float32) - out_ft[:, :, :self.modes1, :self.modes2] = \ - compl_mul2d(x_ft[:, :, :self.modes1, :self.modes2], self.weights1) - out_ft[:, :, -self.modes1:, :self.modes2] = \ - compl_mul2d(x_ft[:, :, -self.modes1:, :self.modes2], self.weights2) - - # Return to physical space - x = paddle.fft.irfftn(out_ft, s=(x.shape[-2], x.shape[-1]), axes=[2, 3]) - return x - -class SpectralConv3d(nn.Layer): - def __init__(self, in_channels, out_channels, modes1, modes2, modes3): - super(SpectralConv3d, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.modes1 = modes1 #Number of Fourier modes to multiply, at most floor(N/2) + 1 - self.modes2 = modes2 - self.modes3 = modes3 - - self.scale = (1 / (in_channels * out_channels)) - self.weights1 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.complex64, attr=Initializer.Assign(self.scale * paddle.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3))) - self.weights2 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.complex64, attr=Initializer.Assign(self.scale * paddle.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3))) - self.weights3 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.complex64, attr=Initializer.Assign(self.scale * paddle.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3))) - self.weights4 = paddle.create_parameter(shape=(in_channels, out_channels), dtype=paddle.complex64, attr=Initializer.Assign(self.scale * paddle.rand(in_channels, out_channels, self.modes1, self.modes2, self.modes3))) - - def forward(self, x): - batchsize = x.shape[0] - # Compute Fourier coeffcients up to factor of e^(- something constant) - x_ft = paddle.fft.rfftn(x, dim=[2,3,4]) - - z_dim = min(x_ft.shape[4], self.modes3) - - # Multiply relevant Fourier modes - out_ft = paddle.zeros(batchsize, self.out_channels, x_ft.shape[2], x_ft.shape[3], self.modes3, dtype=paddle.complex64) - - # if x_ft.shape[4] > self.modes3, truncate; if x_ft.shape[4] < self.modes3, add zero padding - coeff = paddle.zeros(batchsize, self.in_channels, self.modes1, self.modes2, self.modes3, dtype=paddle.complex64) - coeff[..., :z_dim] = x_ft[:, :, :self.modes1, :self.modes2, :z_dim] - out_ft[:, :, :self.modes1, :self.modes2, :] = compl_mul3d(coeff, self.weights1) - - coeff = paddle.zeros(batchsize, self.in_channels, self.modes1, self.modes2, self.modes3, dtype=paddle.complex64) - coeff[..., :z_dim] = x_ft[:, :, -self.modes1:, :self.modes2, :z_dim] - out_ft[:, :, -self.modes1:, :self.modes2, :] = compl_mul3d(coeff, self.weights2) - - coeff = paddle.zeros(batchsize, self.in_channels, self.modes1, self.modes2, self.modes3, dtype=paddle.complex64) - coeff[..., :z_dim] = x_ft[:, :, :self.modes1, -self.modes2:, :z_dim] - out_ft[:, :, :self.modes1, -self.modes2:, :] = compl_mul3d(coeff, self.weights3) - - coeff = paddle.zeros(batchsize, self.in_channels, self.modes1, self.modes2, self.modes3, dtype=paddle.complex64) - coeff[..., :z_dim] = x_ft[:, :, -self.modes1:, -self.modes2:, :z_dim] - out_ft[:, :, -self.modes1:, -self.modes2:, :] = compl_mul3d(coeff, self.weights4) - - #Return to physical space - x = paddle.fft.irfftn(out_ft, s=(x.shape[2], x.shape[3], x.shape[4]), axes=[2,3,4]) - return x - -class FourierBlock(nn.Layer): - def __init__(self, in_channels, out_channels, modes1, modes2, modes3, act='tanh'): - super(FourierBlock, self).__init__() - self.in_channel = in_channels - self.out_channel = out_channels - self.speconv = SpectralConv3d(in_channels, out_channels, modes1, modes2, modes3) - self.linear = nn.Conv1D(in_channels, out_channels, 1) - if act == 'tanh': - self.act = paddle.tanh_ - elif act == 'gelu': - self.act = nn.GELU - elif act == 'none': - self.act = None - else: - raise ValueError(f'{act} is not supported') - - def forward(self, x): - ''' - input x: (batchsize, channel width, x_grid, y_grid, t_grid) - ''' - x1 = self.speconv(x) - x2 = self.linear(x.reshape(x.shape[0], self.in_channel, -1)) - out = x1 + x2.reshape(x.shape[0], self.out_channel, x.shape[2], x.shape[3], x.shape[4]) - if self.act is not None: - out = self.act(out) - return out - +import numpy as np +import paddle +import paddle.nn as nn +import paddle.nn.initializer as Initializer + + +def compl_mul1d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: + # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x) + res = paddle.einsum("bix,iox->box", a, b) + return res + + +def compl_mul2d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: + # (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t) + res = paddle.einsum("bixy,ioxy->boxy", a, b) + return res + + +def compl_mul3d(a: paddle.Tensor, b: paddle.Tensor) -> paddle.Tensor: + res = paddle.einsum("bixyz,ioxyz->boxyz", a, b) + return res + + +################################################################ +# 1d fourier layer +################################################################ + + +class SpectralConv1d(nn.Layer): + def __init__(self, in_channels, out_channels, modes1): + super(SpectralConv1d, self).__init__() + + """ + 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. + """ + + self.in_channels = in_channels + self.out_channels = out_channels + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = modes1 + + self.scale = 1 / (in_channels * out_channels) + self.weights1 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.complex64, + attr=Initializer.Assign( + self.scale * paddle.rand(in_channels, out_channels, self.modes1) + ), + ) + + def forward(self, x): + batchsize = x.shape[0] + # Compute Fourier coeffcients up to factor of e^(- something constant) + x_ft = paddle.fft.rfftn(x, dim=[2]) + + # Multiply relevant Fourier modes + out_ft = paddle.zeros( + batchsize, self.in_channels, x.shape[-1] // 2 + 1, dtype=paddle.complex64 + ) + out_ft[:, :, : self.modes1] = compl_mul1d( + x_ft[:, :, : self.modes1], self.weights1 + ) + + # Return to physical space + x = paddle.fft.irfftn(out_ft, s=[x.shape[-1]], axes=[2]) + return x + + +################################################################ +# 2d fourier layer +################################################################ + + +class SpectralConv2d(nn.Layer): + def __init__(self, in_channels, out_channels, modes1, modes2): + super(SpectralConv2d, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = modes1 + self.modes2 = modes2 + + self.scale = 1 / (in_channels * out_channels) + weights1_attr = Initializer.Assign( + self.scale + * paddle.rand( + [in_channels, out_channels, self.modes1, self.modes2], + dtype=paddle.float32, + ) + ) + self.weights1_real = paddle.create_parameter( + shape=(in_channels, out_channels), dtype=paddle.float32, attr=weights1_attr + ) + self.weights1_imag = paddle.create_parameter( + shape=(in_channels, out_channels), dtype=paddle.float32, attr=weights1_attr + ) + self.weights1 = paddle.concat([self.weights1_real, self.weights1_imag], axis=0) + self.weights2 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.float32, + attr=Initializer.Assign( + self.scale + * paddle.rand( + [in_channels, out_channels, self.modes1, self.modes2], + dtype=paddle.float32, + ) + ), + ) + + def forward(self, x): + batchsize = x.shape[0] + size1 = x.shape[-2] + size2 = x.shape[-1] + # Compute Fourier coeffcients up to factor of e^(- something constant) + x_ft = paddle.fft.rfftn(x, axes=[2, 3]) + + # Multiply relevant Fourier modes + out_ft = paddle.zeros( + [batchsize, self.out_channels, x.shape[-2], x.shape[-1] // 2 + 1], + dtype=paddle.float32, + ) + out_ft[:, :, : self.modes1, : self.modes2] = compl_mul2d( + x_ft[:, :, : self.modes1, : self.modes2], self.weights1 + ) + out_ft[:, :, -self.modes1 :, : self.modes2] = compl_mul2d( + x_ft[:, :, -self.modes1 :, : self.modes2], self.weights2 + ) + + # Return to physical space + x = paddle.fft.irfftn(out_ft, s=(x.shape[-2], x.shape[-1]), axes=[2, 3]) + return x + + +class SpectralConv3d(nn.Layer): + def __init__(self, in_channels, out_channels, modes1, modes2, modes3): + super(SpectralConv3d, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.modes1 = ( + modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1 + ) + self.modes2 = modes2 + self.modes3 = modes3 + + self.scale = 1 / (in_channels * out_channels) + self.weights1 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.complex64, + attr=Initializer.Assign( + self.scale + * paddle.rand( + in_channels, out_channels, self.modes1, self.modes2, self.modes3 + ) + ), + ) + self.weights2 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.complex64, + attr=Initializer.Assign( + self.scale + * paddle.rand( + in_channels, out_channels, self.modes1, self.modes2, self.modes3 + ) + ), + ) + self.weights3 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.complex64, + attr=Initializer.Assign( + self.scale + * paddle.rand( + in_channels, out_channels, self.modes1, self.modes2, self.modes3 + ) + ), + ) + self.weights4 = paddle.create_parameter( + shape=(in_channels, out_channels), + dtype=paddle.complex64, + attr=Initializer.Assign( + self.scale + * paddle.rand( + in_channels, out_channels, self.modes1, self.modes2, self.modes3 + ) + ), + ) + + def forward(self, x): + batchsize = x.shape[0] + # Compute Fourier coeffcients up to factor of e^(- something constant) + x_ft = paddle.fft.rfftn(x, dim=[2, 3, 4]) + + z_dim = min(x_ft.shape[4], self.modes3) + + # Multiply relevant Fourier modes + out_ft = paddle.zeros( + batchsize, + self.out_channels, + x_ft.shape[2], + x_ft.shape[3], + self.modes3, + dtype=paddle.complex64, + ) + + # if x_ft.shape[4] > self.modes3, truncate; if x_ft.shape[4] < self.modes3, add zero padding + coeff = paddle.zeros( + batchsize, + self.in_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=paddle.complex64, + ) + coeff[..., :z_dim] = x_ft[:, :, : self.modes1, : self.modes2, :z_dim] + out_ft[:, :, : self.modes1, : self.modes2, :] = compl_mul3d( + coeff, self.weights1 + ) + + coeff = paddle.zeros( + batchsize, + self.in_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=paddle.complex64, + ) + coeff[..., :z_dim] = x_ft[:, :, -self.modes1 :, : self.modes2, :z_dim] + out_ft[:, :, -self.modes1 :, : self.modes2, :] = compl_mul3d( + coeff, self.weights2 + ) + + coeff = paddle.zeros( + batchsize, + self.in_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=paddle.complex64, + ) + coeff[..., :z_dim] = x_ft[:, :, : self.modes1, -self.modes2 :, :z_dim] + out_ft[:, :, : self.modes1, -self.modes2 :, :] = compl_mul3d( + coeff, self.weights3 + ) + + coeff = paddle.zeros( + batchsize, + self.in_channels, + self.modes1, + self.modes2, + self.modes3, + dtype=paddle.complex64, + ) + coeff[..., :z_dim] = x_ft[:, :, -self.modes1 :, -self.modes2 :, :z_dim] + out_ft[:, :, -self.modes1 :, -self.modes2 :, :] = compl_mul3d( + coeff, self.weights4 + ) + + # Return to physical space + x = paddle.fft.irfftn( + out_ft, s=(x.shape[2], x.shape[3], x.shape[4]), axes=[2, 3, 4] + ) + return x + + +class FourierBlock(nn.Layer): + def __init__(self, in_channels, out_channels, modes1, modes2, modes3, act="tanh"): + super(FourierBlock, self).__init__() + self.in_channel = in_channels + self.out_channel = out_channels + self.speconv = SpectralConv3d(in_channels, out_channels, modes1, modes2, modes3) + self.linear = nn.Conv1D(in_channels, out_channels, 1) + if act == "tanh": + self.act = paddle.tanh_ + elif act == "gelu": + self.act = nn.GELU + elif act == "none": + self.act = None + else: + raise ValueError(f"{act} is not supported") + + def forward(self, x): + """ + input x: (batchsize, channel width, x_grid, y_grid, t_grid) + """ + x1 = self.speconv(x) + x2 = self.linear(x.reshape(x.shape[0], self.in_channel, -1)) + out = x1 + x2.reshape( + x.shape[0], self.out_channel, x.shape[2], x.shape[3], x.shape[4] + ) + if self.act is not None: + out = self.act(out) + return out diff --git a/jointContribution/PINO/PINO_paddle/models/fourier2d.py b/jointContribution/PINO/PINO_paddle/models/fourier2d.py index 327369aed1..d797cc3624 100644 --- a/jointContribution/PINO/PINO_paddle/models/fourier2d.py +++ b/jointContribution/PINO/PINO_paddle/models/fourier2d.py @@ -1,86 +1,106 @@ -import paddle.nn as nn -from .basics import SpectralConv2d -from .FNO_blocks import FactorizedSpectralConv2d -from .utils import _get_act, add_padding2, remove_padding2 - -class FNO2d(nn.Layer): - def __init__(self, modes1, modes2, - width=64, fc_dim=128, - layers=None, - in_dim=3, out_dim=1, - act='gelu', - pad_ratio=[0., 0.]): - super(FNO2d, self).__init__() - """ - Args: - - modes1: list of int, number of modes in first dimension in each layer - - modes2: list of int, number of modes in second dimension in each layer - - width: int, optional, if layers is None, it will be initialized as [width] * [len(modes1) + 1] - - in_dim: number of input channels - - out_dim: number of output channels - - act: activation function, {tanh, gelu, relu, leaky_relu}, default: gelu - - pad_ratio: list of float, or float; portion of domain to be extended. If float, paddings are added to the right. - If list, paddings are added to both sides. pad_ratio[0] pads left, pad_ratio[1] pads right. - """ - if isinstance(pad_ratio, float): - pad_ratio = [pad_ratio, pad_ratio] - else: - assert len(pad_ratio) == 2, 'Cannot add padding in more than 2 directions' - self.modes1 = modes1 - self.modes2 = modes2 - - self.pad_ratio = pad_ratio - # input channel is 3: (a(x, y), x, y) - if layers is None: - self.layers = [width] * (len(modes1) + 1) - else: - self.layers = layers - self.fc0 = nn.Linear(in_dim, layers[0]) - - self.sp_convs = nn.LayerList([FactorizedSpectralConv2d( - in_size, out_size, mode1_num, mode2_num) - for in_size, out_size, mode1_num, mode2_num - in zip(self.layers, self.layers[1:], self.modes1, self.modes2)]) - - self.ws = nn.LayerList([nn.Conv1D(in_size, out_size, 1) - for in_size, out_size in zip(self.layers, self.layers[1:])]) - - self.fc1 = nn.Linear(layers[-1], fc_dim) - self.fc2 = nn.Linear(fc_dim, layers[-1]) - self.fc3 = nn.Linear(layers[-1], out_dim) - self.act = _get_act(act) - - def forward(self, x): - ''' - Args: - - x : (batch size, x_grid, y_grid, 2) - Returns: - - x: (batch size, x_grid, y_grid, 1) - ''' - size_1, size_2 = x.shape[1], x.shape[2] - if max(self.pad_ratio) > 0: - num_pad1 = [round(i * size_1) for i in self.pad_ratio] - num_pad2 = [round(i * size_2) for i in self.pad_ratio] - else: - num_pad1 = num_pad2 = [0.] - - length = len(self.ws) - batchsize = x.shape[0] - x = self.fc0(x) - x = x.transpose([0, 3, 1, 2]) # B, C, X, Y - x = add_padding2(x, num_pad1, num_pad2) - size_x, size_y = x.shape[-2], x.shape[-1] - for i, (speconv, w) in enumerate(zip(self.sp_convs, self.ws)): - x1 = speconv(x) - x2 = w(x.reshape([batchsize, self.layers[i], -1])).reshape([batchsize, self.layers[i+1], size_x, size_y]) - x = x1 + x2 - if i != length - 1: - x = self.act(x) - x = remove_padding2(x, num_pad1, num_pad2) - x = x.transpose([0, 2, 3, 1]) - x = self.fc1(x) - x = self.act(x) - x = self.fc2(x) - x = self.act(x) - x = self.fc3(x) - return x +import paddle.nn as nn + +from .basics import SpectralConv2d +from .FNO_blocks import FactorizedSpectralConv2d +from .utils import _get_act +from .utils import add_padding2 +from .utils import remove_padding2 + + +class FNO2d(nn.Layer): + def __init__( + self, + modes1, + modes2, + width=64, + fc_dim=128, + layers=None, + in_dim=3, + out_dim=1, + act="gelu", + pad_ratio=[0.0, 0.0], + ): + super(FNO2d, self).__init__() + """ + Args: + - modes1: list of int, number of modes in first dimension in each layer + - modes2: list of int, number of modes in second dimension in each layer + - width: int, optional, if layers is None, it will be initialized as [width] * [len(modes1) + 1] + - in_dim: number of input channels + - out_dim: number of output channels + - act: activation function, {tanh, gelu, relu, leaky_relu}, default: gelu + - pad_ratio: list of float, or float; portion of domain to be extended. If float, paddings are added to the right. + If list, paddings are added to both sides. pad_ratio[0] pads left, pad_ratio[1] pads right. + """ + if isinstance(pad_ratio, float): + pad_ratio = [pad_ratio, pad_ratio] + else: + assert len(pad_ratio) == 2, "Cannot add padding in more than 2 directions" + self.modes1 = modes1 + self.modes2 = modes2 + + self.pad_ratio = pad_ratio + # input channel is 3: (a(x, y), x, y) + if layers is None: + self.layers = [width] * (len(modes1) + 1) + else: + self.layers = layers + self.fc0 = nn.Linear(in_dim, layers[0]) + + self.sp_convs = nn.LayerList( + [ + FactorizedSpectralConv2d(in_size, out_size, mode1_num, mode2_num) + for in_size, out_size, mode1_num, mode2_num in zip( + self.layers, self.layers[1:], self.modes1, self.modes2 + ) + ] + ) + + self.ws = nn.LayerList( + [ + nn.Conv1D(in_size, out_size, 1) + for in_size, out_size in zip(self.layers, self.layers[1:]) + ] + ) + + self.fc1 = nn.Linear(layers[-1], fc_dim) + self.fc2 = nn.Linear(fc_dim, layers[-1]) + self.fc3 = nn.Linear(layers[-1], out_dim) + self.act = _get_act(act) + + def forward(self, x): + """ + Args: + - x : (batch size, x_grid, y_grid, 2) + Returns: + - x: (batch size, x_grid, y_grid, 1) + """ + size_1, size_2 = x.shape[1], x.shape[2] + if max(self.pad_ratio) > 0: + num_pad1 = [round(i * size_1) for i in self.pad_ratio] + num_pad2 = [round(i * size_2) for i in self.pad_ratio] + else: + num_pad1 = num_pad2 = [0.0] + + length = len(self.ws) + batchsize = x.shape[0] + x = self.fc0(x) + x = x.transpose([0, 3, 1, 2]) # B, C, X, Y + x = add_padding2(x, num_pad1, num_pad2) + size_x, size_y = x.shape[-2], x.shape[-1] + for i, (speconv, w) in enumerate(zip(self.sp_convs, self.ws)): + x1 = speconv(x) + x2 = w(x.reshape([batchsize, self.layers[i], -1])).reshape( + [batchsize, self.layers[i + 1], size_x, size_y] + ) + x = x1 + x2 + if i != length - 1: + x = self.act(x) + x = remove_padding2(x, num_pad1, num_pad2) + x = x.transpose([0, 2, 3, 1]) + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + x = self.act(x) + x = self.fc3(x) + return x diff --git a/jointContribution/PINO/PINO_paddle/models/fourier3d.py b/jointContribution/PINO/PINO_paddle/models/fourier3d.py index 077794f645..115234a42d 100644 --- a/jointContribution/PINO/PINO_paddle/models/fourier3d.py +++ b/jointContribution/PINO/PINO_paddle/models/fourier3d.py @@ -1,91 +1,112 @@ -import paddle.nn as nn -from .basics import SpectralConv3d -from .FNO_blocks import FactorizedSpectralConv3d -from .utils import add_padding, remove_padding, _get_act - -class FNO3d(nn.Layer): - def __init__(self, - modes1, modes2, modes3, - width=16, - fc_dim=128, - layers=None, - in_dim=4, out_dim=1, - act='gelu', - pad_ratio=[0., 0.]): - ''' - Args: - modes1: list of int, first dimension maximal modes for each layer - modes2: list of int, second dimension maximal modes for each layer - modes3: list of int, third dimension maximal modes for each layer - layers: list of int, channels for each layer - fc_dim: dimension of fully connected layers - in_dim: int, input dimension - out_dim: int, output dimension - act: {tanh, gelu, relu, leaky_relu}, activation function - pad_ratio: the ratio of the extended domain - ''' - super(FNO3d, self).__init__() - - if isinstance(pad_ratio, float): - pad_ratio = [pad_ratio, pad_ratio] - else: - assert len(pad_ratio) == 2, 'Cannot add padding in more than 2 directions.' - - self.pad_ratio = pad_ratio - self.modes1 = modes1 - self.modes2 = modes2 - self.modes3 = modes3 - self.pad_ratio = pad_ratio - - if layers is None: - self.layers = [width] * 4 - else: - self.layers = layers - self.fc0 = nn.Linear(in_dim, layers[0]) - - self.sp_convs = nn.LayerList([FactorizedSpectralConv3d( - in_size, out_size, mode1_num, mode2_num, mode3_num) - for in_size, out_size, mode1_num, mode2_num, mode3_num - in zip(self.layers, self.layers[1:], self.modes1, self.modes2, self.modes3)]) - - self.ws = nn.LayerList([nn.Conv1D(in_size, out_size, 1) - for in_size, out_size in zip(self.layers, self.layers[1:])]) - - self.fc1 = nn.Linear(layers[-1], fc_dim) - self.fc2 = nn.Linear(fc_dim, out_dim) - self.act = _get_act(act) - - def forward(self, x): - ''' - Args: - x: (batchsize, x_grid, y_grid, t_grid, 3) - - Returns: - u: (batchsize, x_grid, y_grid, t_grid, 1) - - ''' - size_z = x.shape[-2] - if max(self.pad_ratio) > 0: - num_pad = [round(size_z * i) for i in self.pad_ratio] - else: - num_pad = [0., 0.] - length = len(self.ws) - batchsize = x.shape[0] - - x = self.fc0(x) - x = x.transpose([0, 4, 1, 2, 3]) - x = add_padding(x, num_pad=num_pad) - size_x, size_y, size_z = x.shape[-3], x.shape[-2], x.shape[-1] - - for i, (speconv, w) in enumerate(zip(self.sp_convs, self.ws)): - x1 = speconv(x) - x2 = w(x.reshape([batchsize, self.layers[i], -1])).reshape([batchsize, self.layers[i+1], size_x, size_y, size_z]) - x = x1 + x2 - if i != length - 1: - x = self.act(x) - x = remove_padding(x, num_pad=num_pad) - x = x.transpose([0, 2, 3, 4, 1]) - x = self.fc1(x) - x = self.act(x) - x = self.fc2(x) - return x \ No newline at end of file +import paddle.nn as nn + +from .basics import SpectralConv3d +from .FNO_blocks import FactorizedSpectralConv3d +from .utils import _get_act +from .utils import add_padding +from .utils import remove_padding + + +class FNO3d(nn.Layer): + def __init__( + self, + modes1, + modes2, + modes3, + width=16, + fc_dim=128, + layers=None, + in_dim=4, + out_dim=1, + act="gelu", + pad_ratio=[0.0, 0.0], + ): + """ + Args: + modes1: list of int, first dimension maximal modes for each layer + modes2: list of int, second dimension maximal modes for each layer + modes3: list of int, third dimension maximal modes for each layer + layers: list of int, channels for each layer + fc_dim: dimension of fully connected layers + in_dim: int, input dimension + out_dim: int, output dimension + act: {tanh, gelu, relu, leaky_relu}, activation function + pad_ratio: the ratio of the extended domain + """ + super(FNO3d, self).__init__() + + if isinstance(pad_ratio, float): + pad_ratio = [pad_ratio, pad_ratio] + else: + assert len(pad_ratio) == 2, "Cannot add padding in more than 2 directions." + + self.pad_ratio = pad_ratio + self.modes1 = modes1 + self.modes2 = modes2 + self.modes3 = modes3 + self.pad_ratio = pad_ratio + + if layers is None: + self.layers = [width] * 4 + else: + self.layers = layers + self.fc0 = nn.Linear(in_dim, layers[0]) + + self.sp_convs = nn.LayerList( + [ + FactorizedSpectralConv3d( + in_size, out_size, mode1_num, mode2_num, mode3_num + ) + for in_size, out_size, mode1_num, mode2_num, mode3_num in zip( + self.layers, self.layers[1:], self.modes1, self.modes2, self.modes3 + ) + ] + ) + + self.ws = nn.LayerList( + [ + nn.Conv1D(in_size, out_size, 1) + for in_size, out_size in zip(self.layers, self.layers[1:]) + ] + ) + + self.fc1 = nn.Linear(layers[-1], fc_dim) + self.fc2 = nn.Linear(fc_dim, out_dim) + self.act = _get_act(act) + + def forward(self, x): + """ + Args: + x: (batchsize, x_grid, y_grid, t_grid, 3) + + Returns: + u: (batchsize, x_grid, y_grid, t_grid, 1) + + """ + size_z = x.shape[-2] + if max(self.pad_ratio) > 0: + num_pad = [round(size_z * i) for i in self.pad_ratio] + else: + num_pad = [0.0, 0.0] + length = len(self.ws) + batchsize = x.shape[0] + + x = self.fc0(x) + x = x.transpose([0, 4, 1, 2, 3]) + x = add_padding(x, num_pad=num_pad) + size_x, size_y, size_z = x.shape[-3], x.shape[-2], x.shape[-1] + + for i, (speconv, w) in enumerate(zip(self.sp_convs, self.ws)): + x1 = speconv(x) + x2 = w(x.reshape([batchsize, self.layers[i], -1])).reshape( + [batchsize, self.layers[i + 1], size_x, size_y, size_z] + ) + x = x1 + x2 + if i != length - 1: + x = self.act(x) + x = remove_padding(x, num_pad=num_pad) + x = x.transpose([0, 2, 3, 4, 1]) + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x diff --git a/jointContribution/PINO/PINO_paddle/models/utils.py b/jointContribution/PINO/PINO_paddle/models/utils.py index 0302c5ab3c..c9a00f5493 100644 --- a/jointContribution/PINO/PINO_paddle/models/utils.py +++ b/jointContribution/PINO/PINO_paddle/models/utils.py @@ -1,45 +1,51 @@ -import paddle.nn.functional as F - -def add_padding(x, num_pad): - if max(num_pad) > 0: - res = F.pad(x, (num_pad[0], num_pad[1]), 'constant', 0) - else: - res = x - return res - -def add_padding2(x, num_pad1, num_pad2): - if max(num_pad1) > 0 or max(num_pad2) > 0: - res = F.pad(x, (num_pad2[0], num_pad2[1], num_pad1[0], num_pad1[1]), 'constant', 0.) - else: - res = x - return res - -def remove_padding(x, num_pad): - if max(num_pad) > 0: - res = x[..., num_pad[0]:-num_pad[1]] - else: - res = x - return res - -def remove_padding2(x, num_pad1, num_pad2): - if max(num_pad1) > 0 or max(num_pad2) > 0: - res = x[..., num_pad1[0]:-num_pad1[1], num_pad2[0]:-num_pad2[1]] - else: - res = x - return res - -def _get_act(act): - if act == 'tanh': - func = F.tanh - elif act == 'gelu': - func = F.gelu - elif act == 'relu': - func = F.relu_ - elif act == 'elu': - func = F.elu_ - elif act == 'leaky_relu': - func = F.leaky_relu - else: - raise ValueError(f'{act} is not supported') - return func - +import paddle.nn.functional as F + + +def add_padding(x, num_pad): + if max(num_pad) > 0: + res = F.pad(x, (num_pad[0], num_pad[1]), "constant", 0) + else: + res = x + return res + + +def add_padding2(x, num_pad1, num_pad2): + if max(num_pad1) > 0 or max(num_pad2) > 0: + res = F.pad( + x, (num_pad2[0], num_pad2[1], num_pad1[0], num_pad1[1]), "constant", 0.0 + ) + else: + res = x + return res + + +def remove_padding(x, num_pad): + if max(num_pad) > 0: + res = x[..., num_pad[0] : -num_pad[1]] + else: + res = x + return res + + +def remove_padding2(x, num_pad1, num_pad2): + if max(num_pad1) > 0 or max(num_pad2) > 0: + res = x[..., num_pad1[0] : -num_pad1[1], num_pad2[0] : -num_pad2[1]] + else: + res = x + return res + + +def _get_act(act): + if act == "tanh": + func = F.tanh + elif act == "gelu": + func = F.gelu + elif act == "relu": + func = F.relu_ + elif act == "elu": + func = F.elu_ + elif act == "leaky_relu": + func = F.leaky_relu + else: + raise ValueError(f"{act} is not supported") + return func diff --git a/jointContribution/PINO/PINO_paddle/prepare_data.py b/jointContribution/PINO/PINO_paddle/prepare_data.py index 74a14694e1..c389f99a3c 100644 --- a/jointContribution/PINO/PINO_paddle/prepare_data.py +++ b/jointContribution/PINO/PINO_paddle/prepare_data.py @@ -1,33 +1,37 @@ -import numpy as np -import matplotlib.pyplot as plt - -def shuffle_data(datapath): - data = np.load(datapath) - rng = np.random.default_rng(123) - rng.shuffle(data, axis=0) - savepath = datapath.replace('.npy', '-shuffle.npy') - np.save(savepath, data) - -def test_data(datapath): - raw = np.load(datapath, mmap_mode='r') - print(raw[0, 0, 0, 0:10]) - newpath = datapath.replace('.npy', '-shuffle.npy') - new = np.load(newpath, mmap_mode='r') - print(new[0, 0, 0, 0:10]) - -def get_slice(datapath): - raw = np.load(datapath, mmap_mode='r') - - data = raw[-10:] - print(data.shape) - savepath = 'data/Re500-5x513x256x256.npy' - np.save(savepath, data) - -def plot_test(datapath): - duration = 0.125 - raw = np.load(datapath, mmap_mode='r') - - -if __name__ == '__main__': - datapath = '/raid/hongkai/NS-Re500_T300_id0-shuffle.npy' - get_slice(datapath) \ No newline at end of file +import matplotlib.pyplot as plt +import numpy as np + + +def shuffle_data(datapath): + data = np.load(datapath) + rng = np.random.default_rng(123) + rng.shuffle(data, axis=0) + savepath = datapath.replace(".npy", "-shuffle.npy") + np.save(savepath, data) + + +def test_data(datapath): + raw = np.load(datapath, mmap_mode="r") + print(raw[0, 0, 0, 0:10]) + newpath = datapath.replace(".npy", "-shuffle.npy") + new = np.load(newpath, mmap_mode="r") + print(new[0, 0, 0, 0:10]) + + +def get_slice(datapath): + raw = np.load(datapath, mmap_mode="r") + + data = raw[-10:] + print(data.shape) + savepath = "data/Re500-5x513x256x256.npy" + np.save(savepath, data) + + +def plot_test(datapath): + duration = 0.125 + raw = np.load(datapath, mmap_mode="r") + + +if __name__ == "__main__": + datapath = "/raid/hongkai/NS-Re500_T300_id0-shuffle.npy" + get_slice(datapath) diff --git a/jointContribution/PINO/PINO_paddle/solver/random_fields.py b/jointContribution/PINO/PINO_paddle/solver/random_fields.py index 448567db3e..b56e7e10a2 100644 --- a/jointContribution/PINO/PINO_paddle/solver/random_fields.py +++ b/jointContribution/PINO/PINO_paddle/solver/random_fields.py @@ -1,113 +1,179 @@ -import paddle - -import math - -class GaussianRF(object): - def __init__(self, dim, size, length=1.0, alpha=2.0, tau=3.0, sigma=None, boundary="periodic", constant_eig=False): - - self.dim = dim - - if sigma is None: - sigma = tau**(0.5*(2*alpha - self.dim)) - - k_max = size//2 - - const = (4*(math.pi**2))/(length**2) - - if dim == 1: - k = paddle.concat((paddle.arange(start=0, end=k_max, step=1), \ - paddle.arange(start=-k_max, end=0, step=1)), 0) - - self.sqrt_eig = size*math.sqrt(2.0)*sigma*((const*(k**2) + tau**2)**(-alpha/2.0)) - - if constant_eig: - self.sqrt_eig[0] = size*sigma*(tau**(-alpha)) - else: - self.sqrt_eig[0] = 0.0 - - elif dim == 2: - wavenumers = paddle.concat((paddle.arange(start=0, end=k_max, step=1), \ - paddle.arange(start=-k_max, end=0, step=1)), 0).repeat(size,1) - - k_x = wavenumers.transpose(0,1) - k_y = wavenumers - - self.sqrt_eig = (size**2)*math.sqrt(2.0)*sigma*((const*(k_x**2 + k_y**2) + tau**2)**(-alpha/2.0)) - - if constant_eig: - self.sqrt_eig[0,0] = (size**2)*sigma*(tau**(-alpha)) - else: - self.sqrt_eig[0,0] = 0.0 - - elif dim == 3: - wavenumers = paddle.concat((paddle.arange(start=0, end=k_max, step=1), \ - paddle.arange(start=-k_max, end=0, step=1)), 0).repeat(size,size,1) - - k_x = wavenumers.transpose(1,2) - k_y = wavenumers - k_z = wavenumers.transpose(0,2) - - self.sqrt_eig = (size**3)*math.sqrt(2.0)*sigma*((const*(k_x**2 + k_y**2 + k_z**2) + tau**2)**(-alpha/2.0)) - - if constant_eig: - self.sqrt_eig[0,0,0] = (size**3)*sigma*(tau**(-alpha)) - else: - self.sqrt_eig[0,0,0] = 0.0 - - self.size = [] - for j in range(self.dim): - self.size.append(size) - - self.size = tuple(self.size) - - def sample(self, N): - - coeff = paddle.randn(N, *self.size, dtype=paddle.float32) - coeff = self.sqrt_eig*coeff - - u = paddle.fft.irfftn(coeff, self.size, norm="backward") - return u - -class GaussianRF2d(object): - - def __init__(self, s1, s2, L1=2*math.pi, L2=2*math.pi, alpha=2.0, tau=3.0, sigma=None, mean=None, boundary="periodic", dtype=paddle.float64): - - self.s1 = s1 - self.s2 = s2 - - self.mean = mean - - self.dtype = dtype - - if sigma is None: - self.sigma = tau**(0.5*(2*alpha - 2.0)) - else: - self.sigma = sigma - - const1 = (4*(math.pi**2))/(L1**2) - const2 = (4*(math.pi**2))/(L2**2) - - freq_list1 = paddle.concat((paddle.arange(start=0, end=s1//2, step=1),\ - paddle.arange(start=-s1//2, end=0, step=1)), 0) - k1 = freq_list1.reshape([-1,1]).repeat([1, s2//2 + 1]).type(dtype) - - freq_list2 = paddle.arange(start=0, end=s2//2 + 1, step=1) - - k2 = freq_list2.view(1,-1).repeat(s1, 1).type(dtype) - - self.sqrt_eig = s1*s2*self.sigma*((const1*k1**2 + const2*k2**2 + tau**2)**(-alpha/2.0)) - self.sqrt_eig[0,0] = 0.0 - - def sample(self, N, xi=None): - if xi is None: - xi = paddle.randn(N, self.s1, self.s2//2 + 1, 2, dtype=self.dtype) - - xi[...,0] = self.sqrt_eig*xi [...,0] - xi[...,1] = self.sqrt_eig*xi [...,1] - - u = paddle.fft.irfft2(paddle.reshape(xi), s=(self.s1, self.s2)) - - if self.mean is not None: - u += self.mean - - return u \ No newline at end of file +import math + +import paddle + + +class GaussianRF(object): + def __init__( + self, + dim, + size, + length=1.0, + alpha=2.0, + tau=3.0, + sigma=None, + boundary="periodic", + constant_eig=False, + ): + + self.dim = dim + + if sigma is None: + sigma = tau ** (0.5 * (2 * alpha - self.dim)) + + k_max = size // 2 + + const = (4 * (math.pi**2)) / (length**2) + + if dim == 1: + k = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ) + + self.sqrt_eig = ( + size + * math.sqrt(2.0) + * sigma + * ((const * (k**2) + tau**2) ** (-alpha / 2.0)) + ) + + if constant_eig: + self.sqrt_eig[0] = size * sigma * (tau ** (-alpha)) + else: + self.sqrt_eig[0] = 0.0 + + elif dim == 2: + wavenumers = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ).repeat(size, 1) + + k_x = wavenumers.transpose(0, 1) + k_y = wavenumers + + self.sqrt_eig = ( + (size**2) + * math.sqrt(2.0) + * sigma + * ((const * (k_x**2 + k_y**2) + tau**2) ** (-alpha / 2.0)) + ) + + if constant_eig: + self.sqrt_eig[0, 0] = (size**2) * sigma * (tau ** (-alpha)) + else: + self.sqrt_eig[0, 0] = 0.0 + + elif dim == 3: + wavenumers = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ).repeat(size, size, 1) + + k_x = wavenumers.transpose(1, 2) + k_y = wavenumers + k_z = wavenumers.transpose(0, 2) + + self.sqrt_eig = ( + (size**3) + * math.sqrt(2.0) + * sigma + * ( + (const * (k_x**2 + k_y**2 + k_z**2) + tau**2) + ** (-alpha / 2.0) + ) + ) + + if constant_eig: + self.sqrt_eig[0, 0, 0] = (size**3) * sigma * (tau ** (-alpha)) + else: + self.sqrt_eig[0, 0, 0] = 0.0 + + self.size = [] + for j in range(self.dim): + self.size.append(size) + + self.size = tuple(self.size) + + def sample(self, N): + + coeff = paddle.randn(N, *self.size, dtype=paddle.float32) + coeff = self.sqrt_eig * coeff + + u = paddle.fft.irfftn(coeff, self.size, norm="backward") + return u + + +class GaussianRF2d(object): + def __init__( + self, + s1, + s2, + L1=2 * math.pi, + L2=2 * math.pi, + alpha=2.0, + tau=3.0, + sigma=None, + mean=None, + boundary="periodic", + dtype=paddle.float64, + ): + + self.s1 = s1 + self.s2 = s2 + + self.mean = mean + + self.dtype = dtype + + if sigma is None: + self.sigma = tau ** (0.5 * (2 * alpha - 2.0)) + else: + self.sigma = sigma + + const1 = (4 * (math.pi**2)) / (L1**2) + const2 = (4 * (math.pi**2)) / (L2**2) + + freq_list1 = paddle.concat( + ( + paddle.arange(start=0, end=s1 // 2, step=1), + paddle.arange(start=-s1 // 2, end=0, step=1), + ), + 0, + ) + k1 = freq_list1.reshape([-1, 1]).repeat([1, s2 // 2 + 1]).type(dtype) + + freq_list2 = paddle.arange(start=0, end=s2 // 2 + 1, step=1) + + k2 = freq_list2.view(1, -1).repeat(s1, 1).type(dtype) + + self.sqrt_eig = ( + s1 + * s2 + * self.sigma + * ((const1 * k1**2 + const2 * k2**2 + tau**2) ** (-alpha / 2.0)) + ) + self.sqrt_eig[0, 0] = 0.0 + + def sample(self, N, xi=None): + if xi is None: + xi = paddle.randn(N, self.s1, self.s2 // 2 + 1, 2, dtype=self.dtype) + + xi[..., 0] = self.sqrt_eig * xi[..., 0] + xi[..., 1] = self.sqrt_eig * xi[..., 1] + + u = paddle.fft.irfft2(paddle.reshape(xi), s=(self.s1, self.s2)) + + if self.mean is not None: + u += self.mean + + return u diff --git a/jointContribution/PINO/PINO_paddle/train_burgers.py b/jointContribution/PINO/PINO_paddle/train_burgers.py index 7b5d2d614b..abe78c8bb7 100644 --- a/jointContribution/PINO/PINO_paddle/train_burgers.py +++ b/jointContribution/PINO/PINO_paddle/train_burgers.py @@ -1,85 +1,113 @@ -import yaml -from argparse import ArgumentParser -import paddle -from paddle.optimizer.lr import MultiStepDecay -from paddle.optimizer import Adam -from models import FNO2d -from train_utils.datasets import BurgersLoader -from train_utils.train_2d import train_2d_burger -from train_utils.eval_2d import eval_burgers - -def run(args, config): - data_config = config['data'] - dataset = BurgersLoader(data_config['datapath'], - nx=data_config['nx'], nt=data_config['nt'], - sub=data_config['sub'], sub_t=data_config['sub_t'], new=True) - train_loader = dataset.make_loader(n_sample=data_config['n_sample'], - batch_size=config['train']['batchsize'], - start=data_config['offset']) - - model = FNO2d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act']) - param_state_dict = paddle.load('init_param/init_burgers.pdparams') - model.set_dict(param_state_dict) - # Load from checkpoint - if 'ckpt' in config['train']: - ckpt_path = config['train']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - - scheduler = MultiStepDecay(learning_rate=config['train']['base_lr'], - milestones=config['train']['milestones'], - gamma=config['train']['scheduler_gamma']) - optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) - - train_2d_burger(model, - train_loader, - dataset.v, - optimizer, - scheduler, - config, - rank=0, - log=args.log, - project=config['log']['project'], - group=config['log']['group']) - -def test(config): - data_config = config['data'] - dataset = BurgersLoader(data_config['datapath'], - nx=data_config['nx'], nt=data_config['nt'], - sub=data_config['sub'], sub_t=data_config['sub_t'], new=True) - dataloader = dataset.make_loader(n_sample=data_config['n_sample'], - batch_size=config['test']['batchsize'], - start=data_config['offset']) - - model = FNO2d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act']) - # Load from checkpoint - if 'ckpt' in config['test']: - ckpt_path = config['test']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - eval_burgers(model, dataloader, dataset.v, config) - -if __name__ == '__main__': - parser = ArgumentParser(description='Basic paser') - parser.add_argument('--config_path', type=str, help='Path to the configuration file') - parser.add_argument('--log', action='store_true', help='Turn on the wandb') - parser.add_argument('--mode', type=str, help='train or test') - args = parser.parse_args() - - config_file = args.config_path - with open(config_file, 'r') as stream: - config = yaml.load(stream, yaml.FullLoader) - if args.mode == 'train': - run(args, config) - else: - test(config) +from argparse import ArgumentParser + +import paddle +import yaml +from models import FNO2d +from paddle.optimizer import Adam +from paddle.optimizer.lr import MultiStepDecay +from train_utils.datasets import BurgersLoader +from train_utils.eval_2d import eval_burgers +from train_utils.train_2d import train_2d_burger + + +def run(args, config): + data_config = config["data"] + dataset = BurgersLoader( + data_config["datapath"], + nx=data_config["nx"], + nt=data_config["nt"], + sub=data_config["sub"], + sub_t=data_config["sub_t"], + new=True, + ) + train_loader = dataset.make_loader( + n_sample=data_config["n_sample"], + batch_size=config["train"]["batchsize"], + start=data_config["offset"], + ) + + model = FNO2d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + ) + param_state_dict = paddle.load("init_param/init_burgers.pdparams") + model.set_dict(param_state_dict) + # Load from checkpoint + if "ckpt" in config["train"]: + ckpt_path = config["train"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + + scheduler = MultiStepDecay( + learning_rate=config["train"]["base_lr"], + milestones=config["train"]["milestones"], + gamma=config["train"]["scheduler_gamma"], + ) + optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) + + train_2d_burger( + model, + train_loader, + dataset.v, + optimizer, + scheduler, + config, + rank=0, + log=args.log, + project=config["log"]["project"], + group=config["log"]["group"], + ) + + +def test(config): + data_config = config["data"] + dataset = BurgersLoader( + data_config["datapath"], + nx=data_config["nx"], + nt=data_config["nt"], + sub=data_config["sub"], + sub_t=data_config["sub_t"], + new=True, + ) + dataloader = dataset.make_loader( + n_sample=data_config["n_sample"], + batch_size=config["test"]["batchsize"], + start=data_config["offset"], + ) + + model = FNO2d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + ) + # Load from checkpoint + if "ckpt" in config["test"]: + ckpt_path = config["test"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + eval_burgers(model, dataloader, dataset.v, config) + + +if __name__ == "__main__": + parser = ArgumentParser(description="Basic paser") + parser.add_argument( + "--config_path", type=str, help="Path to the configuration file" + ) + parser.add_argument("--log", action="store_true", help="Turn on the wandb") + parser.add_argument("--mode", type=str, help="train or test") + args = parser.parse_args() + + config_file = args.config_path + with open(config_file, "r") as stream: + config = yaml.load(stream, yaml.FullLoader) + if args.mode == "train": + run(args, config) + else: + test(config) diff --git a/jointContribution/PINO/PINO_paddle/train_operator.py b/jointContribution/PINO/PINO_paddle/train_operator.py index 5f9fd6ae1c..34b7d95575 100644 --- a/jointContribution/PINO/PINO_paddle/train_operator.py +++ b/jointContribution/PINO/PINO_paddle/train_operator.py @@ -1,123 +1,165 @@ -import yaml -from argparse import ArgumentParser -import math - -import paddle -from paddle.io import DataLoader -from paddle.optimizer.lr import MultiStepDecay - -from solver.random_fields import GaussianRF -from train_utils import Adam -from train_utils.datasets import NSLoader, online_loader, DarcyFlow, DarcyCombo -from train_utils.train_3d import mixed_train -from train_utils.train_2d import train_2d_operator -from models import FNO3d, FNO2d - -def train_3d(args, config): - data_config = config['data'] - - # prepare dataloader for training with data - if 'datapath2' in data_config: - loader = NSLoader(datapath1=data_config['datapath'], datapath2=data_config['datapath2'], - nx=data_config['nx'], nt=data_config['nt'], - sub=data_config['sub'], sub_t=data_config['sub_t'], - N=data_config['total_num'], - t_interval=data_config['time_interval']) - else: - loader = NSLoader(datapath1=data_config['datapath'], - nx=data_config['nx'], nt=data_config['nt'], - sub=data_config['sub'], sub_t=data_config['sub_t'], - N=data_config['total_num'], - t_interval=data_config['time_interval']) - - train_loader = loader.make_loader(data_config['n_sample'], - batch_size=config['train']['batchsize'], - start=data_config['offset'], - train=data_config['shuffle']) - # prepare dataloader for training with only equations - gr_sampler = GaussianRF(2, data_config['S2'], 2 * math.pi, alpha=2.5, tau=7) - a_loader = online_loader(gr_sampler, - S=data_config['S2'], - T=data_config['T2'], - time_scale=data_config['time_interval'], - batchsize=config['train']['batchsize']) - # create model - model = FNO3d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - modes3=config['model']['modes3'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act']) - # Load from checkpoint - if 'ckpt' in config['train']: - ckpt_path = config['train']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - # create optimizer and learning rate scheduler - scheduler = MultiStepDecay(learning_rate=config['train']['base_lr'], - milestones=config['train']['milestones'], - gamma=config['train']['scheduler_gamma']) - optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) - mixed_train(model, - train_loader, - loader.S, loader.T, - a_loader, - data_config['S2'], data_config['T2'], - optimizer, - scheduler, - config, - log=args.log, - project=config['log']['project'], - group=config['log']['group']) - -def train_2d(args, config): - data_config = config['data'] - - dataset = DarcyCombo(datapath=data_config['datapath'], - nx=data_config['nx'], - sub=data_config['sub'], - pde_sub=data_config['pde_sub'], - num=data_config['n_sample'], - offset=data_config['offset']) - train_loader = DataLoader(dataset, batch_size=config['train']['batchsize'], shuffle=True) - model = FNO2d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act'], - pad_ratio=config['model']['pad_ratio']) - # Load from checkpoint - if 'ckpt' in config['train']: - ckpt_path = config['train']['ckpt'] - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - - scheduler = MultiStepDecay(learning_rate=config['train']['base_lr'], - milestones=config['train']['milestones'], - gamma=config['train']['scheduler_gamma']) - optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) - - train_2d_operator(model, - train_loader, - optimizer, scheduler, - config, rank=0, log=args.log, - project=config['log']['project'], - group=config['log']['group']) - -if __name__ == '__main__': - # parse options - parser = ArgumentParser(description='Basic paser') - parser.add_argument('--config_path', type=str, help='Path to the configuration file') - parser.add_argument('--log', action='store_true', help='Turn on the wandb') - args = parser.parse_args() - - config_file = args.config_path - with open(config_file, 'r') as stream: - config = yaml.load(stream, yaml.FullLoader) - - if 'name' in config['data'] and config['data']['name'] == 'Darcy': - train_2d(args, config) - else: - train_3d(args, config) +import math +from argparse import ArgumentParser + +import paddle +import yaml +from models import FNO2d +from models import FNO3d +from paddle.io import DataLoader +from paddle.optimizer.lr import MultiStepDecay +from solver.random_fields import GaussianRF +from train_utils import Adam +from train_utils.datasets import DarcyCombo +from train_utils.datasets import DarcyFlow +from train_utils.datasets import NSLoader +from train_utils.datasets import online_loader +from train_utils.train_2d import train_2d_operator +from train_utils.train_3d import mixed_train + + +def train_3d(args, config): + data_config = config["data"] + + # prepare dataloader for training with data + if "datapath2" in data_config: + loader = NSLoader( + datapath1=data_config["datapath"], + datapath2=data_config["datapath2"], + nx=data_config["nx"], + nt=data_config["nt"], + sub=data_config["sub"], + sub_t=data_config["sub_t"], + N=data_config["total_num"], + t_interval=data_config["time_interval"], + ) + else: + loader = NSLoader( + datapath1=data_config["datapath"], + nx=data_config["nx"], + nt=data_config["nt"], + sub=data_config["sub"], + sub_t=data_config["sub_t"], + N=data_config["total_num"], + t_interval=data_config["time_interval"], + ) + + train_loader = loader.make_loader( + data_config["n_sample"], + batch_size=config["train"]["batchsize"], + start=data_config["offset"], + train=data_config["shuffle"], + ) + # prepare dataloader for training with only equations + gr_sampler = GaussianRF(2, data_config["S2"], 2 * math.pi, alpha=2.5, tau=7) + a_loader = online_loader( + gr_sampler, + S=data_config["S2"], + T=data_config["T2"], + time_scale=data_config["time_interval"], + batchsize=config["train"]["batchsize"], + ) + # create model + model = FNO3d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + modes3=config["model"]["modes3"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + ) + # Load from checkpoint + if "ckpt" in config["train"]: + ckpt_path = config["train"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + # create optimizer and learning rate scheduler + scheduler = MultiStepDecay( + learning_rate=config["train"]["base_lr"], + milestones=config["train"]["milestones"], + gamma=config["train"]["scheduler_gamma"], + ) + optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) + mixed_train( + model, + train_loader, + loader.S, + loader.T, + a_loader, + data_config["S2"], + data_config["T2"], + optimizer, + scheduler, + config, + log=args.log, + project=config["log"]["project"], + group=config["log"]["group"], + ) + + +def train_2d(args, config): + data_config = config["data"] + + dataset = DarcyCombo( + datapath=data_config["datapath"], + nx=data_config["nx"], + sub=data_config["sub"], + pde_sub=data_config["pde_sub"], + num=data_config["n_sample"], + offset=data_config["offset"], + ) + train_loader = DataLoader( + dataset, batch_size=config["train"]["batchsize"], shuffle=True + ) + model = FNO2d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + pad_ratio=config["model"]["pad_ratio"], + ) + # Load from checkpoint + if "ckpt" in config["train"]: + ckpt_path = config["train"]["ckpt"] + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + + scheduler = MultiStepDecay( + learning_rate=config["train"]["base_lr"], + milestones=config["train"]["milestones"], + gamma=config["train"]["scheduler_gamma"], + ) + optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) + + train_2d_operator( + model, + train_loader, + optimizer, + scheduler, + config, + rank=0, + log=args.log, + project=config["log"]["project"], + group=config["log"]["group"], + ) + + +if __name__ == "__main__": + # parse options + parser = ArgumentParser(description="Basic paser") + parser.add_argument( + "--config_path", type=str, help="Path to the configuration file" + ) + parser.add_argument("--log", action="store_true", help="Turn on the wandb") + args = parser.parse_args() + + config_file = args.config_path + with open(config_file, "r") as stream: + config = yaml.load(stream, yaml.FullLoader) + + if "name" in config["data"] and config["data"]["name"] == "Darcy": + train_2d(args, config) + else: + train_3d(args, config) diff --git a/jointContribution/PINO/PINO_paddle/train_pino.py b/jointContribution/PINO/PINO_paddle/train_pino.py index 828ae24702..e178e0bcbf 100644 --- a/jointContribution/PINO/PINO_paddle/train_pino.py +++ b/jointContribution/PINO/PINO_paddle/train_pino.py @@ -1,219 +1,237 @@ -import os -import yaml -import random -from argparse import ArgumentParser -from tqdm import tqdm -import numpy as np -import paddle -from paddle.optimizer import Adam -from paddle.io import DataLoader -from paddle.optimizer.lr import MultiStepDecay -from models import FNO3d -from train_utils.losses import LpLoss, PINO_loss3d, get_forcing -from train_utils.datasets import KFDataset, KFaDataset, sample_data -from train_utils.utils import save_ckpt, count_params, dict2str - -@paddle.no_grad() -def eval_ns(model, val_loader, criterion): - model.eval() - val_err = [] - for u, a in val_loader: - u, a = u, a - out = model(a) - val_loss = criterion(out, u) - val_err.append(val_loss.item()) - - N = len(val_loader) - - avg_err = np.mean(val_err) - std_err = np.std(val_err, ddof=1) / np.sqrt(N) - return avg_err, std_err - -def train_ns(model, - train_u_loader, # training data - train_a_loader, # initial conditions - val_loader, # validation data - optimizer, - scheduler, - config, args): - start_iter = config['train']['start_iter'] - v = 1/ config['data']['Re'] - t_duration = config['data']['t_duration'] - save_step = config['train']['save_step'] - eval_step = config['train']['eval_step'] - - ic_weight = config['train']['ic_loss'] - f_weight = config['train']['f_loss'] - xy_weight = config['train']['xy_loss'] - # set up directory - base_dir = os.path.join('exp', config['log']['logdir']) - ckpt_dir = os.path.join(base_dir, 'ckpts') - os.makedirs(ckpt_dir, exist_ok=True) - - # loss fn - lploss = LpLoss(size_average=True) - - S = config['data']['pde_res'][0] - forcing = get_forcing(S) - - pbar = range(start_iter, config['train']['num_iter']) - if args.tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.2) - - u_loader = sample_data(train_u_loader) - a_loader = sample_data(train_a_loader) - - for e in pbar: - log_dict = {} - - optimizer.clear_grad() - # data loss - if xy_weight > 0: - u, a_in = next(u_loader) - u = u - a_in = a_in - out = model(a_in) - data_loss = lploss(out, u) - else: - data_loss = paddle.zeros(1, dtype='float32') - - if f_weight != 0.0: - # pde loss - a = next(a_loader) - a = a - out = model(a) - - u0 = a[:, :, :, 0, -1] - loss_ic, loss_f = PINO_loss3d(out, u0, forcing, v, t_duration) - log_dict['IC'] = loss_ic.item() - log_dict['PDE'] = loss_f.item() - else: - loss_ic = loss_f = 0.0 - loss = data_loss * xy_weight + loss_f * f_weight + loss_ic * ic_weight - - loss.backward() - optimizer.step() - scheduler.step() - - log_dict['train loss'] = loss.item() - log_dict['data'] = data_loss.item() - if e % eval_step == 0: - eval_err, std_err = eval_ns(model, val_loader, lploss) - log_dict['val error'] = eval_err - - if args.tqdm: - logstr = dict2str(log_dict) - pbar.set_description( - ( - logstr - ) - ) - if e % save_step == 0 and e > 0: - ckpt_path = os.path.join(ckpt_dir, f'model-{e}.pt') - save_ckpt(ckpt_path, model, optimizer, scheduler) - -def subprocess(args): - with open(args.config, 'r') as f: - config = yaml.load(f, yaml.FullLoader) - - # set random seed - config['seed'] = args.seed - seed = args.seed - paddle.seed(seed) - random.seed(seed) - if paddle.cuda.is_available(): - paddle.cuda.manual_seed_all(seed) - - # create model - model = FNO3d(modes1=config['model']['modes1'], - modes2=config['model']['modes2'], - modes3=config['model']['modes3'], - fc_dim=config['model']['fc_dim'], - layers=config['model']['layers'], - act=config['model']['act'], - pad_ratio=config['model']['pad_ratio']) - num_params = count_params(model) - config['num_params'] = num_params - print(f'Number of parameters: {num_params}') - # Load from checkpoint - if args.ckpt: - ckpt_path = args.ckpt - ckpt = paddle.load(ckpt_path) - model.load_state_dict(ckpt['model']) - print('Weights loaded from %s' % ckpt_path) - - if args.test: - batchsize = config['test']['batchsize'] - testset = KFDataset(paths=config['data']['paths'], - raw_res=config['data']['raw_res'], - data_res=config['test']['data_res'], - pde_res=config['test']['data_res'], - n_samples=config['data']['n_test_samples'], - offset=config['data']['testoffset'], - t_duration=config['data']['t_duration']) - testloader = DataLoader(testset, batch_size=batchsize, num_workers=4) - criterion = LpLoss() - test_err, std_err = eval_ns(model, testloader, criterion) - print(f'Averaged test relative L2 error: {test_err}; Standard error: {std_err}') - else: - # training set - batchsize = config['train']['batchsize'] - u_set = KFDataset(paths=config['data']['paths'], - raw_res=config['data']['raw_res'], - data_res=config['data']['data_res'], - pde_res=config['data']['data_res'], - n_samples=config['data']['n_data_samples'], - offset=config['data']['offset'], - t_duration=config['data']['t_duration']) - u_loader = DataLoader(u_set, batch_size=batchsize, num_workers=4, shuffle=True) - - a_set = KFaDataset(paths=config['data']['paths'], - raw_res=config['data']['raw_res'], - pde_res=config['data']['pde_res'], - n_samples=config['data']['n_a_samples'], - offset=config['data']['a_offset'], - t_duration=config['data']['t_duration']) - a_loader = DataLoader(a_set, batch_size=batchsize, num_workers=4, shuffle=True) - # val set - valset = KFDataset(paths=config['data']['paths'], - raw_res=config['data']['raw_res'], - data_res=config['test']['data_res'], - pde_res=config['test']['data_res'], - n_samples=config['data']['n_test_samples'], - offset=config['data']['testoffset'], - t_duration=config['data']['t_duration']) - val_loader = DataLoader(valset, batch_size=batchsize, num_workers=4) - print(f'Train set: {len(u_set)}; Test set: {len(valset)}; IC set: {len(a_set)}') - - scheduler = MultiStepDecay(learning_rate=config['train']['base_lr'], - milestones=config['train']['milestones'], - gamma=config['train']['scheduler_gamma']) - optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) - if args.ckpt: - ckpt = paddle.load(ckpt_path) - optimizer.load_state_dict(ckpt['optim']) - scheduler.load_state_dict(ckpt['scheduler']) - config['train']['start_iter'] = scheduler.last_epoch - train_ns(model, - u_loader, a_loader, - val_loader, - optimizer, scheduler, - config, args) - print('Done!') - - - -if __name__ == '__main__': - paddle.backends.cudnn.benchmark = True - # parse options - parser = ArgumentParser(description='Basic paser') - parser.add_argument('--config', type=str, help='Path to the configuration file') - parser.add_argument('--log', action='store_true', help='Turn on the wandb') - parser.add_argument('--seed', type=int, default=None) - parser.add_argument('--ckpt', type=str, default=None) - parser.add_argument('--test', action='store_true', help='Test') - parser.add_argument('--tqdm', action='store_true', help='Turn on the tqdm') - args = parser.parse_args() - if args.seed is None: - args.seed = random.randint(0, 100000) - subprocess(args) \ No newline at end of file +import os +import random +from argparse import ArgumentParser + +import numpy as np +import paddle +import yaml +from models import FNO3d +from paddle.io import DataLoader +from paddle.optimizer import Adam +from paddle.optimizer.lr import MultiStepDecay +from tqdm import tqdm +from train_utils.datasets import KFaDataset +from train_utils.datasets import KFDataset +from train_utils.datasets import sample_data +from train_utils.losses import LpLoss +from train_utils.losses import PINO_loss3d +from train_utils.losses import get_forcing +from train_utils.utils import count_params +from train_utils.utils import dict2str +from train_utils.utils import save_ckpt + + +@paddle.no_grad() +def eval_ns(model, val_loader, criterion): + model.eval() + val_err = [] + for u, a in val_loader: + u, a = u, a + out = model(a) + val_loss = criterion(out, u) + val_err.append(val_loss.item()) + + N = len(val_loader) + + avg_err = np.mean(val_err) + std_err = np.std(val_err, ddof=1) / np.sqrt(N) + return avg_err, std_err + + +def train_ns( + model, + train_u_loader, # training data + train_a_loader, # initial conditions + val_loader, # validation data + optimizer, + scheduler, + config, + args, +): + start_iter = config["train"]["start_iter"] + v = 1 / config["data"]["Re"] + t_duration = config["data"]["t_duration"] + save_step = config["train"]["save_step"] + eval_step = config["train"]["eval_step"] + + ic_weight = config["train"]["ic_loss"] + f_weight = config["train"]["f_loss"] + xy_weight = config["train"]["xy_loss"] + # set up directory + base_dir = os.path.join("exp", config["log"]["logdir"]) + ckpt_dir = os.path.join(base_dir, "ckpts") + os.makedirs(ckpt_dir, exist_ok=True) + + # loss fn + lploss = LpLoss(size_average=True) + + S = config["data"]["pde_res"][0] + forcing = get_forcing(S) + + pbar = range(start_iter, config["train"]["num_iter"]) + if args.tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.2) + + u_loader = sample_data(train_u_loader) + a_loader = sample_data(train_a_loader) + + for e in pbar: + log_dict = {} + + optimizer.clear_grad() + # data loss + if xy_weight > 0: + u, a_in = next(u_loader) + u = u + a_in = a_in + out = model(a_in) + data_loss = lploss(out, u) + else: + data_loss = paddle.zeros(1, dtype="float32") + + if f_weight != 0.0: + # pde loss + a = next(a_loader) + a = a + out = model(a) + + u0 = a[:, :, :, 0, -1] + loss_ic, loss_f = PINO_loss3d(out, u0, forcing, v, t_duration) + log_dict["IC"] = loss_ic.item() + log_dict["PDE"] = loss_f.item() + else: + loss_ic = loss_f = 0.0 + loss = data_loss * xy_weight + loss_f * f_weight + loss_ic * ic_weight + + loss.backward() + optimizer.step() + scheduler.step() + + log_dict["train loss"] = loss.item() + log_dict["data"] = data_loss.item() + if e % eval_step == 0: + eval_err, std_err = eval_ns(model, val_loader, lploss) + log_dict["val error"] = eval_err + + if args.tqdm: + logstr = dict2str(log_dict) + pbar.set_description((logstr)) + if e % save_step == 0 and e > 0: + ckpt_path = os.path.join(ckpt_dir, f"model-{e}.pt") + save_ckpt(ckpt_path, model, optimizer, scheduler) + + +def subprocess(args): + with open(args.config, "r") as f: + config = yaml.load(f, yaml.FullLoader) + + # set random seed + config["seed"] = args.seed + seed = args.seed + paddle.seed(seed) + random.seed(seed) + if paddle.cuda.is_available(): + paddle.cuda.manual_seed_all(seed) + + # create model + model = FNO3d( + modes1=config["model"]["modes1"], + modes2=config["model"]["modes2"], + modes3=config["model"]["modes3"], + fc_dim=config["model"]["fc_dim"], + layers=config["model"]["layers"], + act=config["model"]["act"], + pad_ratio=config["model"]["pad_ratio"], + ) + num_params = count_params(model) + config["num_params"] = num_params + print(f"Number of parameters: {num_params}") + # Load from checkpoint + if args.ckpt: + ckpt_path = args.ckpt + ckpt = paddle.load(ckpt_path) + model.load_state_dict(ckpt["model"]) + print("Weights loaded from %s" % ckpt_path) + + if args.test: + batchsize = config["test"]["batchsize"] + testset = KFDataset( + paths=config["data"]["paths"], + raw_res=config["data"]["raw_res"], + data_res=config["test"]["data_res"], + pde_res=config["test"]["data_res"], + n_samples=config["data"]["n_test_samples"], + offset=config["data"]["testoffset"], + t_duration=config["data"]["t_duration"], + ) + testloader = DataLoader(testset, batch_size=batchsize, num_workers=4) + criterion = LpLoss() + test_err, std_err = eval_ns(model, testloader, criterion) + print(f"Averaged test relative L2 error: {test_err}; Standard error: {std_err}") + else: + # training set + batchsize = config["train"]["batchsize"] + u_set = KFDataset( + paths=config["data"]["paths"], + raw_res=config["data"]["raw_res"], + data_res=config["data"]["data_res"], + pde_res=config["data"]["data_res"], + n_samples=config["data"]["n_data_samples"], + offset=config["data"]["offset"], + t_duration=config["data"]["t_duration"], + ) + u_loader = DataLoader(u_set, batch_size=batchsize, num_workers=4, shuffle=True) + + a_set = KFaDataset( + paths=config["data"]["paths"], + raw_res=config["data"]["raw_res"], + pde_res=config["data"]["pde_res"], + n_samples=config["data"]["n_a_samples"], + offset=config["data"]["a_offset"], + t_duration=config["data"]["t_duration"], + ) + a_loader = DataLoader(a_set, batch_size=batchsize, num_workers=4, shuffle=True) + # val set + valset = KFDataset( + paths=config["data"]["paths"], + raw_res=config["data"]["raw_res"], + data_res=config["test"]["data_res"], + pde_res=config["test"]["data_res"], + n_samples=config["data"]["n_test_samples"], + offset=config["data"]["testoffset"], + t_duration=config["data"]["t_duration"], + ) + val_loader = DataLoader(valset, batch_size=batchsize, num_workers=4) + print(f"Train set: {len(u_set)}; Test set: {len(valset)}; IC set: {len(a_set)}") + + scheduler = MultiStepDecay( + learning_rate=config["train"]["base_lr"], + milestones=config["train"]["milestones"], + gamma=config["train"]["scheduler_gamma"], + ) + optimizer = Adam(learning_rate=scheduler, parameters=model.parameters()) + if args.ckpt: + ckpt = paddle.load(ckpt_path) + optimizer.load_state_dict(ckpt["optim"]) + scheduler.load_state_dict(ckpt["scheduler"]) + config["train"]["start_iter"] = scheduler.last_epoch + train_ns( + model, u_loader, a_loader, val_loader, optimizer, scheduler, config, args + ) + print("Done!") + + +if __name__ == "__main__": + paddle.backends.cudnn.benchmark = True + # parse options + parser = ArgumentParser(description="Basic paser") + parser.add_argument("--config", type=str, help="Path to the configuration file") + parser.add_argument("--log", action="store_true", help="Turn on the wandb") + parser.add_argument("--seed", type=int, default=None) + parser.add_argument("--ckpt", type=str, default=None) + parser.add_argument("--test", action="store_true", help="Test") + parser.add_argument("--tqdm", action="store_true", help="Turn on the tqdm") + args = parser.parse_args() + if args.seed is None: + args.seed = random.randint(0, 100000) + subprocess(args) diff --git a/jointContribution/PINO/PINO_paddle/train_utils/__init__.py b/jointContribution/PINO/PINO_paddle/train_utils/__init__.py index 6bcd02cdfa..187b43f4b4 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/__init__.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/__init__.py @@ -1,2 +1,4 @@ -from .datasets import NSLoader, DarcyFlow -from .losses import get_forcing, LpLoss \ No newline at end of file +from .datasets import DarcyFlow +from .datasets import NSLoader +from .losses import LpLoss +from .losses import get_forcing diff --git a/jointContribution/PINO/PINO_paddle/train_utils/datasets.py b/jointContribution/PINO/PINO_paddle/train_utils/datasets.py index e95988b684..601c023ff5 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/datasets.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/datasets.py @@ -1,497 +1,578 @@ -import scipy.io -import numpy as np - -try: - from pyDOE import lhs - # Only needed for PINN's dataset -except ImportError: - lhs = None - -import paddle -from paddle.io import Dataset -from .utils import get_grid3d, convert_ic, paddle2dgrid - -def online_loader(sampler, S, T, time_scale, batchsize=1): - while True: - u0 = sampler.sample(batchsize) - a = convert_ic(u0, batchsize, - S, T, - time_scale=time_scale) - yield a - -def sample_data(loader): - while True: - for batch in loader: - yield batch - -class MatReader(object): - def __init__(self, file_path, to_paddle=True, to_cuda=False, to_float=True): - super(MatReader, self).__init__() - - self.to_paddle = to_paddle - self.to_cuda = to_cuda - self.to_float = to_float - - self.file_path = file_path - - self.data = None - self.old_mat = None - self._load_file() - - def _load_file(self): - self.data = scipy.io.loadmat(self.file_path) - self.old_mat = True - - def load_file(self, file_path): - self.file_path = file_path - self._load_file() - - def read_field(self, field): - x = self.data[field] - - if not self.old_mat: - x = x[()] - x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1)) - - if self.to_float: - x = x.astype(np.float32) - - if self.to_paddle: - - x = paddle.to_tensor(x) - if self.to_cuda: - x = x - - return x - - def set_cuda(self, to_cuda): - self.to_cuda = to_cuda - - def set_paddle(self, to_paddle): - self.to_paddle = to_paddle - - def set_float(self, to_float): - self.to_float = to_float - -class BurgersLoader(object): - def __init__(self, datapath, nx=2 ** 10, nt=100, sub=8, sub_t=1, new=False): - dataloader = MatReader(datapath) - self.sub = sub - self.sub_t = sub_t - self.s = nx // sub - self.T = nt // sub_t - self.new = new - if new: - self.T += 1 - self.x_data = dataloader.read_field('input')[:, ::sub] - self.y_data = dataloader.read_field('output')[:, ::sub_t, ::sub] - self.v = dataloader.read_field('visc').item() - - def make_loader(self, n_sample, batch_size, start=0, train=True): - Xs = self.x_data[start:start + n_sample] - ys = self.y_data[start:start + n_sample] - if self.new: - gridx = paddle.to_tensor(np.linspace(0, 1, self.s + 1)[:-1], dtype='float32') - gridt = paddle.to_tensor(np.linspace(0, 1, self.T), dtype='float32') - else: - gridx = paddle.to_tensor(np.linspace(0, 1, self.s), dtype='float32') - gridt = paddle.to_tensor(np.linspace(0, 1, self.T + 1)[1:], dtype='float32') - gridx = gridx.reshape([1, 1, self.s]) - gridt = gridt.reshape([1, self.T, 1]) - - Xs = Xs.reshape([n_sample, 1, self.s]).tile([1, self.T, 1]) - Xs = paddle.stack([Xs, gridx.tile([n_sample, self.T, 1]), gridt.tile([n_sample, 1, self.s])], axis=3) - dataset = paddle.io.TensorDataset([Xs, ys]) - if train: - loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=False) - else: - loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=False) - return loader - -class NSLoader(object): - def __init__(self, datapath1, - nx, nt, - datapath2=None, sub=1, sub_t=1, - N=100, t_interval=1.0): - ''' - Load data from npy and reshape to (N, X, Y, T) - Args: - datapath1: path to data - nx: - nt: - datapath2: path to second part of data, default None - sub: - sub_t: - N: - t_interval: - ''' - self.S = nx // sub - self.T = int(nt * t_interval) // sub_t + 1 - self.time_scale = t_interval - data1 = np.load(datapath1) - data1 = paddle.to_tensor(data1, dtype='float32')[..., ::sub_t, ::sub, ::sub] - - if datapath2 is not None: - data2 = np.load(datapath2) - data2 = paddle.to_tensor(data2, dtype='float32')[..., ::sub_t, ::sub, ::sub] - if t_interval == 0.5: - data1 = self.extract(data1) - if datapath2 is not None: - data2 = self.extract(data2) - part1 = data1.permute(0, 2, 3, 1) - if datapath2 is not None: - part2 = data2.permute(0, 2, 3, 1) - self.data = paddle.concat((part1, part2), axis=0) - else: - self.data = part1 - - def make_loader(self, n_sample, batch_size, start=0, train=True): - if train: - a_data = self.data[start:start + n_sample, :, :, 0].reshape([n_sample, self.S, self.S]) - u_data = self.data[start:start + n_sample].reshape([n_sample, self.S, self.S, self.T]) - else: - a_data = self.data[-n_sample:, :, :, 0].reshape([n_sample, self.S, self.S]) - u_data = self.data[-n_sample:].reshape([n_sample, self.S, self.S, self.T]) - a_data = a_data.reshape([n_sample, self.S, self.S, 1, 1]).tile([1, 1, 1, self.T, 1]) - gridx, gridy, gridt = get_grid3d(self.S, self.T, time_scale=self.time_scale) - a_data = paddle.concat((gridx.tile([n_sample, 1, 1, 1, 1]), gridy.tile([n_sample, 1, 1, 1, 1]), - gridt.tile([n_sample, 1, 1, 1, 1]), a_data), axis=-1) - dataset = paddle.io.TensorDataset(a_data, u_data) - loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=train) - return loader - - def make_dataset(self, n_sample, start=0, train=True): - if train: - a_data = self.data[start:start + n_sample, :, :, 0].reshape([n_sample, self.S, self.S]) - u_data = self.data[start:start + n_sample].reshape([n_sample, self.S, self.S, self.T]) - else: - a_data = self.data[-n_sample:, :, :, 0].reshape([n_sample, self.S, self.S]) - u_data = self.data[-n_sample:].reshape([n_sample, self.S, self.S, self.T]) - a_data = a_data.reshape([n_sample, self.S, self.S, 1, 1]).tile([1, 1, 1, self.T, 1]) - gridx, gridy, gridt = get_grid3d(self.S, self.T) - a_data = paddle.concat(( - gridx.tile([n_sample, 1, 1, 1, 1]), - gridy.tile([n_sample, 1, 1, 1, 1]), - gridt.tile([n_sample, 1, 1, 1, 1]), - a_data), axis=-1) - dataset = paddle.io.TensorDataset(a_data, u_data) - return dataset - - @staticmethod - def extract(data): - ''' - Extract data with time range 0-0.5, 0.25-0.75, 0.5-1.0, 0.75-1.25,... - Args: - data: tensor with size N x 129 x 128 x 128 - - Returns: - output: (4*N-1) x 65 x 128 x 128 - ''' - T = data.shape[1] // 2 - interval = data.shape[1] // 4 - N = data.shape[0] - new_data = paddle.zeros(4 * N - 1, T + 1, data.shape[2], data.shape[3]) - for i in range(N): - for j in range(4): - if i == N - 1 and j == 3: - # reach boundary - break - if j != 3: - new_data[i * 4 + j] = data[i, interval * j:interval * j + T + 1] - else: - new_data[i * 4 + j, 0: interval] = data[i, interval * j:interval * j + interval] - new_data[i * 4 + j, interval: T + 1] = data[i + 1, 0:interval + 1] - return new_data - -class KFDataset(Dataset): - def __init__(self, paths, - data_res, pde_res, - raw_res, - n_samples=None, - total_samples=None, - idx=0, - offset=0, - t_duration=1.0): - super().__init__() - self.data_res = data_res # data resolution - self.pde_res = pde_res # pde loss resolution - self.raw_res = raw_res # raw data resolution - self.t_duration = t_duration - self.paths = paths - self.offset = offset - self.n_samples = n_samples - if t_duration == 1.0: - self.T = self.pde_res[2] - else: - self.T = int(self.pde_res[2] * t_duration) + 1 # number of points in time dimension - - self.load() - if total_samples is not None: - print(f'Load {total_samples} samples starting from {idx}th sample') - self.data = self.data[idx:idx + total_samples] - self.a_data = self.a_data[idx:idx + total_samples] - - self.data_s_step = pde_res[0] // data_res[0] - self.data_t_step = (pde_res[2] - 1) // (data_res[2] - 1) - - def load(self): - datapath = self.paths[0] - raw_data = np.load(datapath, mmap_mode='r') - # subsample ratio - sub_x = self.raw_res[0] // self.data_res[0] - sub_t = (self.raw_res[2] - 1) // (self.data_res[2] - 1) - - a_sub_x = self.raw_res[0] // self.pde_res[0] - # load data - data = raw_data[self.offset: self.offset + self.n_samples, ::sub_t, ::sub_x, ::sub_x] - # divide data - if self.t_duration != 0.: - end_t = self.raw_res[2] - 1 - K = int(1/self.t_duration) - step = end_t // K - data = self.partition(data) - a_data = raw_data[self.offset: self.offset + self.n_samples, 0:end_t:step, ::a_sub_x, ::a_sub_x] - a_data = a_data.reshape([self.n_samples * K, 1, self.pde_res[0], self.pde_res[1]]) # 2N x 1 x S x S - else: - a_data = raw_data[self.offset: self.offset + self.n_samples, 0:1, ::a_sub_x, ::a_sub_x] - - # convert into paddle tensor - data = paddle.to_tensor(data, dtype='float32') - a_data = paddle.to_tensor(a_data, dtype='float32').transpose([0, 2, 3, 1]) - self.data = data.transpose([0, 2, 3, 1]) - - S = self.pde_res[1] - - a_data = a_data[:, :, :, :, None] # N x S x S x 1 x 1 - gridx, gridy, gridt = get_grid3d(S, self.T) - self.grid = paddle.concat((gridx[0], gridy[0], gridt[0]), axis=-1) # S x S x T x 3 - self.a_data = a_data - - def partition(self, data): - ''' - Args: - data: tensor with size N x T x S x S - - Returns: - output: int(1/t_duration) *N x (T//2 + 1) x 128 x 128 - ''' - N, T, S = data.shape[:3] - K = int(1 / self.t_duration) - new_data = np.zeros((K * N, T // K + 1, S, S)) - step = T // K - for i in range(N): - for j in range(K): - new_data[i * K + j] = data[i, j * step: (j+1) * step + 1] - return new_data - - def __getitem__(self, idx): - a_data = paddle.concat(( - self.grid, - self.a_data[idx].tile(1, 1, self.T, 1) - ), axis=-1) - return self.data[idx], a_data - - def __len__(self, ): - return self.data.shape[0] - -class BurgerData(Dataset): - ''' - members: - - t, x, Exact: raw data - - X, T: meshgrid - - X_star, u_star: flattened (x, t), u array - - lb, ub: lower bound and upper bound vector - - X_u, u: boundary condition data (x, t), u - ''' - - def __init__(self, datapath): - data = scipy.io.loadmat(datapath) - - # raw 2D data - self.t = data['t'].flatten()[:, None] # (100,1) - self.x = data['x'].flatten()[:, None] # (256, 1) - self.Exact = np.real(data['usol']).T # (100, 256) - - # Flattened sequence - self.get_flatten_data() - self.get_boundary_data() - - def __len__(self): - return self.Exact.shape[0] - - def __getitem__(self, idx): - return self.X_star[idx], self.u_star[idx] - - def get_flatten_data(self): - X, T = np.meshgrid(self.x, self.t) - self.X, self.T = X, T - self.X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) - self.u_star = self.Exact.flatten()[:, None] - - # lower bound of (x, t): 2-dimensional vector - self.lb = self.X_star.min(0) - # upper bound of (x, t): 2-dimensional vector - self.ub = self.X_star.max(0) - - def get_boundary_data(self): - xx1 = np.hstack((self.X[0:1, :].T, self.T[0:1, :].T)) - uu1 = self.Exact[0:1, :].T - xx2 = np.hstack((self.X[:, 0:1], self.T[:, 0:1])) - uu2 = self.Exact[:, 0:1] - xx3 = np.hstack((self.X[:, -1:], self.T[:, -1:])) - uu3 = self.Exact[:, -1:] - self.X_u = np.vstack([xx1, xx2, xx3]) - self.u = np.vstack([uu1, uu2, uu3]) - - def sample_xt(self, N=10000): - ''' - Sample (x, t) pairs within the boundary - Return: - - X_f: (N, 2) array - ''' - X_f = self.lb + (self.ub - self.lb) * lhs(2, N) - X_f = np.vstack((X_f, self.X_u)) - return X_f - - def sample_xu(self, N=100): - ''' - Sample N points from boundary data - Return: - - X_u: (N, 2) array - - u: (N, 1) array - ''' - idx = np.random.choice(self.X_u.shape[0], N, replace=False) - X_u = self.X_u[idx, :] - u = self.u[idx, :] - return X_u, u - -class DarcyFlow(Dataset): - def __init__(self, - datapath, - nx, sub, - offset=0, - num=1): - self.S = int(nx // sub) + 1 if sub > 1 else nx - data = scipy.io.loadmat(datapath) - a = data['coeff'] - u = data['sol'] - self.a = paddle.to_tensor(a[offset: offset + num, ::sub, ::sub], dtype='float32') - self.u = paddle.to_tensor(u[offset: offset + num, ::sub, ::sub], dtype='float32') - self.mesh = paddle2dgrid(self.S, self.S) - - def __len__(self): - return self.a.shape[0] - - def __getitem__(self, item): - fa = self.a[item] - return paddle.concat([fa.unsqueeze(2), self.mesh], axis=2), self.u[item] - -class DarcyIC(Dataset): - def __init__(self, - datapath, - nx, sub, - offset=0, - num=1): - self.S = int(nx // sub) + 1 if sub > 1 else nx - data = scipy.io.loadmat(datapath) - a = data['coeff'] - self.a = paddle.to_tensor(a[offset: offset + num, ::sub, ::sub], dtype='float32') - self.mesh = paddle2dgrid(self.S, self.S) - data = scipy.io.loadmat(datapath) - a = data['coeff'] - u = data['sol'] - self.a = paddle.to_tensor(a[offset: offset + num, ::sub, ::sub], dtype='float32') - self.u = paddle.to_tensor(u[offset: offset + num, ::sub, ::sub], dtype='float32') - self.mesh = paddle2dgrid(self.S, self.S) - - def __len__(self): - return self.a.shape[0] - - def __getitem__(self, item): - fa = self.a[item] - return paddle.concat([fa.unsqueeze(2), self.mesh], axis=2) - -class DarcyCombo(Dataset): - def __init__(self, - datapath, - nx, - sub, pde_sub, - num=1000, offset=0) -> None: - super().__init__() - self.S = int(nx // sub) + 1 if sub > 1 else nx - self.pde_S = int(nx // pde_sub) + 1 if sub > 1 else nx - data = scipy.io.loadmat(datapath) - a = data['coeff'] - u = data['sol'] - self.a = paddle.to_tensor(a[offset: offset + num, ::sub, ::sub], dtype='float32') - self.u = paddle.to_tensor(u[offset: offset + num, ::sub, ::sub], dtype='float32') - self.mesh = paddle2dgrid(self.S, self.S) - self.pde_a = paddle.to_tensor(a[offset: offset + num, ::pde_sub, ::pde_sub], dtype='float32') - self.pde_mesh = paddle2dgrid(self.pde_S, self.pde_S) - - def __len__(self): - return self.a.shape[0] - - def __getitem__(self, item): - fa = self.a[item] - pde_a = self.pde_a[item] - data_ic = paddle.concat([fa.unsqueeze[2], self.mesh], axis=2) - pde_ic = paddle.concat([pde_a.unsqueeze[2], self.pde_mesh], axis=2) - return data_ic, self.u[item], pde_ic - -''' -dataset class for loading initial conditions for Komogrov flow -''' -class KFaDataset(Dataset): - def __init__(self, paths, - pde_res, - raw_res, - n_samples=None, - offset=0, - t_duration=1.0): - super().__init__() - self.pde_res = pde_res # pde loss resolution - self.raw_res = raw_res # raw data resolution - self.t_duration = t_duration - self.paths = paths - self.offset = offset - self.n_samples = n_samples - if t_duration == 1.0: - self.T = self.pde_res[2] - else: - self.T = int(self.pde_res[2] * t_duration) + 1 # number of points in time dimension - - self.load() - - def load(self): - datapath = self.paths[0] - raw_data = np.load(datapath, mmap_mode='r') - # subsample ratio - a_sub_x = self.raw_res[0] // self.pde_res[0] - # load data - if self.t_duration != 0.: - end_t = self.raw_res[2] - 1 - K = int(1/self.t_duration) - step = end_t // K - a_data = raw_data[self.offset: self.offset + self.n_samples, 0:end_t:step, ::a_sub_x, ::a_sub_x] - a_data = a_data.reshape([self.n_samples * K, 1, self.pde_res[0], self.pde_res[1]]) # 2N x 1 x S x S - else: - a_data = raw_data[self.offset: self.offset + self.n_samples, 0:1, ::a_sub_x, ::a_sub_x] - - # convert into tensor - a_data = paddle.to_tensor(a_data, dtype='float32').permute(0, 2, 3, 1) - S = self.pde_res[1] - a_data = a_data[:, :, :, :, None] # N x S x S x 1 x 1 - gridx, gridy, gridt = get_grid3d(S, self.T) - self.grid = paddle.concat((gridx[0], gridy[0], gridt[0]), axis=-1) # S x S x T x 3 - self.a_data = a_data - - def __getitem__(self, idx): - a_data = paddle.concat(( - self.grid, - self.a_data[idx].tile(1, 1, self.T, 1) - ), axis=-1) - return a_data - - def __len__(self, ): - return self.a_data.shape[0] \ No newline at end of file +import numpy as np +import scipy.io + +try: + from pyDOE import lhs + + # Only needed for PINN's dataset +except ImportError: + lhs = None + +import paddle +from paddle.io import Dataset + +from .utils import convert_ic +from .utils import get_grid3d +from .utils import paddle2dgrid + + +def online_loader(sampler, S, T, time_scale, batchsize=1): + while True: + u0 = sampler.sample(batchsize) + a = convert_ic(u0, batchsize, S, T, time_scale=time_scale) + yield a + + +def sample_data(loader): + while True: + for batch in loader: + yield batch + + +class MatReader(object): + def __init__(self, file_path, to_paddle=True, to_cuda=False, to_float=True): + super(MatReader, self).__init__() + + self.to_paddle = to_paddle + self.to_cuda = to_cuda + self.to_float = to_float + + self.file_path = file_path + + self.data = None + self.old_mat = None + self._load_file() + + def _load_file(self): + self.data = scipy.io.loadmat(self.file_path) + self.old_mat = True + + def load_file(self, file_path): + self.file_path = file_path + self._load_file() + + def read_field(self, field): + x = self.data[field] + + if not self.old_mat: + x = x[()] + x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1)) + + if self.to_float: + x = x.astype(np.float32) + + if self.to_paddle: + + x = paddle.to_tensor(x) + if self.to_cuda: + x = x + + return x + + def set_cuda(self, to_cuda): + self.to_cuda = to_cuda + + def set_paddle(self, to_paddle): + self.to_paddle = to_paddle + + def set_float(self, to_float): + self.to_float = to_float + + +class BurgersLoader(object): + def __init__(self, datapath, nx=2**10, nt=100, sub=8, sub_t=1, new=False): + dataloader = MatReader(datapath) + self.sub = sub + self.sub_t = sub_t + self.s = nx // sub + self.T = nt // sub_t + self.new = new + if new: + self.T += 1 + self.x_data = dataloader.read_field("input")[:, ::sub] + self.y_data = dataloader.read_field("output")[:, ::sub_t, ::sub] + self.v = dataloader.read_field("visc").item() + + def make_loader(self, n_sample, batch_size, start=0, train=True): + Xs = self.x_data[start : start + n_sample] + ys = self.y_data[start : start + n_sample] + if self.new: + gridx = paddle.to_tensor( + np.linspace(0, 1, self.s + 1)[:-1], dtype="float32" + ) + gridt = paddle.to_tensor(np.linspace(0, 1, self.T), dtype="float32") + else: + gridx = paddle.to_tensor(np.linspace(0, 1, self.s), dtype="float32") + gridt = paddle.to_tensor(np.linspace(0, 1, self.T + 1)[1:], dtype="float32") + gridx = gridx.reshape([1, 1, self.s]) + gridt = gridt.reshape([1, self.T, 1]) + + Xs = Xs.reshape([n_sample, 1, self.s]).tile([1, self.T, 1]) + Xs = paddle.stack( + [Xs, gridx.tile([n_sample, self.T, 1]), gridt.tile([n_sample, 1, self.s])], + axis=3, + ) + dataset = paddle.io.TensorDataset([Xs, ys]) + if train: + loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=False) + else: + loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=False) + return loader + + +class NSLoader(object): + def __init__( + self, datapath1, nx, nt, datapath2=None, sub=1, sub_t=1, N=100, t_interval=1.0 + ): + """ + Load data from npy and reshape to (N, X, Y, T) + Args: + datapath1: path to data + nx: + nt: + datapath2: path to second part of data, default None + sub: + sub_t: + N: + t_interval: + """ + self.S = nx // sub + self.T = int(nt * t_interval) // sub_t + 1 + self.time_scale = t_interval + data1 = np.load(datapath1) + data1 = paddle.to_tensor(data1, dtype="float32")[..., ::sub_t, ::sub, ::sub] + + if datapath2 is not None: + data2 = np.load(datapath2) + data2 = paddle.to_tensor(data2, dtype="float32")[..., ::sub_t, ::sub, ::sub] + if t_interval == 0.5: + data1 = self.extract(data1) + if datapath2 is not None: + data2 = self.extract(data2) + part1 = data1.permute(0, 2, 3, 1) + if datapath2 is not None: + part2 = data2.permute(0, 2, 3, 1) + self.data = paddle.concat((part1, part2), axis=0) + else: + self.data = part1 + + def make_loader(self, n_sample, batch_size, start=0, train=True): + if train: + a_data = self.data[start : start + n_sample, :, :, 0].reshape( + [n_sample, self.S, self.S] + ) + u_data = self.data[start : start + n_sample].reshape( + [n_sample, self.S, self.S, self.T] + ) + else: + a_data = self.data[-n_sample:, :, :, 0].reshape([n_sample, self.S, self.S]) + u_data = self.data[-n_sample:].reshape([n_sample, self.S, self.S, self.T]) + a_data = a_data.reshape([n_sample, self.S, self.S, 1, 1]).tile( + [1, 1, 1, self.T, 1] + ) + gridx, gridy, gridt = get_grid3d(self.S, self.T, time_scale=self.time_scale) + a_data = paddle.concat( + ( + gridx.tile([n_sample, 1, 1, 1, 1]), + gridy.tile([n_sample, 1, 1, 1, 1]), + gridt.tile([n_sample, 1, 1, 1, 1]), + a_data, + ), + axis=-1, + ) + dataset = paddle.io.TensorDataset(a_data, u_data) + loader = paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=train) + return loader + + def make_dataset(self, n_sample, start=0, train=True): + if train: + a_data = self.data[start : start + n_sample, :, :, 0].reshape( + [n_sample, self.S, self.S] + ) + u_data = self.data[start : start + n_sample].reshape( + [n_sample, self.S, self.S, self.T] + ) + else: + a_data = self.data[-n_sample:, :, :, 0].reshape([n_sample, self.S, self.S]) + u_data = self.data[-n_sample:].reshape([n_sample, self.S, self.S, self.T]) + a_data = a_data.reshape([n_sample, self.S, self.S, 1, 1]).tile( + [1, 1, 1, self.T, 1] + ) + gridx, gridy, gridt = get_grid3d(self.S, self.T) + a_data = paddle.concat( + ( + gridx.tile([n_sample, 1, 1, 1, 1]), + gridy.tile([n_sample, 1, 1, 1, 1]), + gridt.tile([n_sample, 1, 1, 1, 1]), + a_data, + ), + axis=-1, + ) + dataset = paddle.io.TensorDataset(a_data, u_data) + return dataset + + @staticmethod + def extract(data): + """ + Extract data with time range 0-0.5, 0.25-0.75, 0.5-1.0, 0.75-1.25,... + Args: + data: tensor with size N x 129 x 128 x 128 + + Returns: + output: (4*N-1) x 65 x 128 x 128 + """ + T = data.shape[1] // 2 + interval = data.shape[1] // 4 + N = data.shape[0] + new_data = paddle.zeros(4 * N - 1, T + 1, data.shape[2], data.shape[3]) + for i in range(N): + for j in range(4): + if i == N - 1 and j == 3: + # reach boundary + break + if j != 3: + new_data[i * 4 + j] = data[i, interval * j : interval * j + T + 1] + else: + new_data[i * 4 + j, 0:interval] = data[ + i, interval * j : interval * j + interval + ] + new_data[i * 4 + j, interval : T + 1] = data[ + i + 1, 0 : interval + 1 + ] + return new_data + + +class KFDataset(Dataset): + def __init__( + self, + paths, + data_res, + pde_res, + raw_res, + n_samples=None, + total_samples=None, + idx=0, + offset=0, + t_duration=1.0, + ): + super().__init__() + self.data_res = data_res # data resolution + self.pde_res = pde_res # pde loss resolution + self.raw_res = raw_res # raw data resolution + self.t_duration = t_duration + self.paths = paths + self.offset = offset + self.n_samples = n_samples + if t_duration == 1.0: + self.T = self.pde_res[2] + else: + self.T = ( + int(self.pde_res[2] * t_duration) + 1 + ) # number of points in time dimension + + self.load() + if total_samples is not None: + print(f"Load {total_samples} samples starting from {idx}th sample") + self.data = self.data[idx : idx + total_samples] + self.a_data = self.a_data[idx : idx + total_samples] + + self.data_s_step = pde_res[0] // data_res[0] + self.data_t_step = (pde_res[2] - 1) // (data_res[2] - 1) + + def load(self): + datapath = self.paths[0] + raw_data = np.load(datapath, mmap_mode="r") + # subsample ratio + sub_x = self.raw_res[0] // self.data_res[0] + sub_t = (self.raw_res[2] - 1) // (self.data_res[2] - 1) + + a_sub_x = self.raw_res[0] // self.pde_res[0] + # load data + data = raw_data[ + self.offset : self.offset + self.n_samples, ::sub_t, ::sub_x, ::sub_x + ] + # divide data + if self.t_duration != 0.0: + end_t = self.raw_res[2] - 1 + K = int(1 / self.t_duration) + step = end_t // K + data = self.partition(data) + a_data = raw_data[ + self.offset : self.offset + self.n_samples, + 0:end_t:step, + ::a_sub_x, + ::a_sub_x, + ] + a_data = a_data.reshape( + [self.n_samples * K, 1, self.pde_res[0], self.pde_res[1]] + ) # 2N x 1 x S x S + else: + a_data = raw_data[ + self.offset : self.offset + self.n_samples, 0:1, ::a_sub_x, ::a_sub_x + ] + + # convert into paddle tensor + data = paddle.to_tensor(data, dtype="float32") + a_data = paddle.to_tensor(a_data, dtype="float32").transpose([0, 2, 3, 1]) + self.data = data.transpose([0, 2, 3, 1]) + + S = self.pde_res[1] + + a_data = a_data[:, :, :, :, None] # N x S x S x 1 x 1 + gridx, gridy, gridt = get_grid3d(S, self.T) + self.grid = paddle.concat( + (gridx[0], gridy[0], gridt[0]), axis=-1 + ) # S x S x T x 3 + self.a_data = a_data + + def partition(self, data): + """ + Args: + data: tensor with size N x T x S x S + + Returns: + output: int(1/t_duration) *N x (T//2 + 1) x 128 x 128 + """ + N, T, S = data.shape[:3] + K = int(1 / self.t_duration) + new_data = np.zeros((K * N, T // K + 1, S, S)) + step = T // K + for i in range(N): + for j in range(K): + new_data[i * K + j] = data[i, j * step : (j + 1) * step + 1] + return new_data + + def __getitem__(self, idx): + a_data = paddle.concat( + (self.grid, self.a_data[idx].tile(1, 1, self.T, 1)), axis=-1 + ) + return self.data[idx], a_data + + def __len__( + self, + ): + return self.data.shape[0] + + +class BurgerData(Dataset): + """ + members: + - t, x, Exact: raw data + - X, T: meshgrid + - X_star, u_star: flattened (x, t), u array + - lb, ub: lower bound and upper bound vector + - X_u, u: boundary condition data (x, t), u + """ + + def __init__(self, datapath): + data = scipy.io.loadmat(datapath) + + # raw 2D data + self.t = data["t"].flatten()[:, None] # (100,1) + self.x = data["x"].flatten()[:, None] # (256, 1) + self.Exact = np.real(data["usol"]).T # (100, 256) + + # Flattened sequence + self.get_flatten_data() + self.get_boundary_data() + + def __len__(self): + return self.Exact.shape[0] + + def __getitem__(self, idx): + return self.X_star[idx], self.u_star[idx] + + def get_flatten_data(self): + X, T = np.meshgrid(self.x, self.t) + self.X, self.T = X, T + self.X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) + self.u_star = self.Exact.flatten()[:, None] + + # lower bound of (x, t): 2-dimensional vector + self.lb = self.X_star.min(0) + # upper bound of (x, t): 2-dimensional vector + self.ub = self.X_star.max(0) + + def get_boundary_data(self): + xx1 = np.hstack((self.X[0:1, :].T, self.T[0:1, :].T)) + uu1 = self.Exact[0:1, :].T + xx2 = np.hstack((self.X[:, 0:1], self.T[:, 0:1])) + uu2 = self.Exact[:, 0:1] + xx3 = np.hstack((self.X[:, -1:], self.T[:, -1:])) + uu3 = self.Exact[:, -1:] + self.X_u = np.vstack([xx1, xx2, xx3]) + self.u = np.vstack([uu1, uu2, uu3]) + + def sample_xt(self, N=10000): + """ + Sample (x, t) pairs within the boundary + Return: + - X_f: (N, 2) array + """ + X_f = self.lb + (self.ub - self.lb) * lhs(2, N) + X_f = np.vstack((X_f, self.X_u)) + return X_f + + def sample_xu(self, N=100): + """ + Sample N points from boundary data + Return: + - X_u: (N, 2) array + - u: (N, 1) array + """ + idx = np.random.choice(self.X_u.shape[0], N, replace=False) + X_u = self.X_u[idx, :] + u = self.u[idx, :] + return X_u, u + + +class DarcyFlow(Dataset): + def __init__(self, datapath, nx, sub, offset=0, num=1): + self.S = int(nx // sub) + 1 if sub > 1 else nx + data = scipy.io.loadmat(datapath) + a = data["coeff"] + u = data["sol"] + self.a = paddle.to_tensor( + a[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.u = paddle.to_tensor( + u[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.mesh = paddle2dgrid(self.S, self.S) + + def __len__(self): + return self.a.shape[0] + + def __getitem__(self, item): + fa = self.a[item] + return paddle.concat([fa.unsqueeze(2), self.mesh], axis=2), self.u[item] + + +class DarcyIC(Dataset): + def __init__(self, datapath, nx, sub, offset=0, num=1): + self.S = int(nx // sub) + 1 if sub > 1 else nx + data = scipy.io.loadmat(datapath) + a = data["coeff"] + self.a = paddle.to_tensor( + a[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.mesh = paddle2dgrid(self.S, self.S) + data = scipy.io.loadmat(datapath) + a = data["coeff"] + u = data["sol"] + self.a = paddle.to_tensor( + a[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.u = paddle.to_tensor( + u[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.mesh = paddle2dgrid(self.S, self.S) + + def __len__(self): + return self.a.shape[0] + + def __getitem__(self, item): + fa = self.a[item] + return paddle.concat([fa.unsqueeze(2), self.mesh], axis=2) + + +class DarcyCombo(Dataset): + def __init__(self, datapath, nx, sub, pde_sub, num=1000, offset=0) -> None: + super().__init__() + self.S = int(nx // sub) + 1 if sub > 1 else nx + self.pde_S = int(nx // pde_sub) + 1 if sub > 1 else nx + data = scipy.io.loadmat(datapath) + a = data["coeff"] + u = data["sol"] + self.a = paddle.to_tensor( + a[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.u = paddle.to_tensor( + u[offset : offset + num, ::sub, ::sub], dtype="float32" + ) + self.mesh = paddle2dgrid(self.S, self.S) + self.pde_a = paddle.to_tensor( + a[offset : offset + num, ::pde_sub, ::pde_sub], dtype="float32" + ) + self.pde_mesh = paddle2dgrid(self.pde_S, self.pde_S) + + def __len__(self): + return self.a.shape[0] + + def __getitem__(self, item): + fa = self.a[item] + pde_a = self.pde_a[item] + data_ic = paddle.concat([fa.unsqueeze[2], self.mesh], axis=2) + pde_ic = paddle.concat([pde_a.unsqueeze[2], self.pde_mesh], axis=2) + return data_ic, self.u[item], pde_ic + + +""" +dataset class for loading initial conditions for Komogrov flow +""" + + +class KFaDataset(Dataset): + def __init__( + self, paths, pde_res, raw_res, n_samples=None, offset=0, t_duration=1.0 + ): + super().__init__() + self.pde_res = pde_res # pde loss resolution + self.raw_res = raw_res # raw data resolution + self.t_duration = t_duration + self.paths = paths + self.offset = offset + self.n_samples = n_samples + if t_duration == 1.0: + self.T = self.pde_res[2] + else: + self.T = ( + int(self.pde_res[2] * t_duration) + 1 + ) # number of points in time dimension + + self.load() + + def load(self): + datapath = self.paths[0] + raw_data = np.load(datapath, mmap_mode="r") + # subsample ratio + a_sub_x = self.raw_res[0] // self.pde_res[0] + # load data + if self.t_duration != 0.0: + end_t = self.raw_res[2] - 1 + K = int(1 / self.t_duration) + step = end_t // K + a_data = raw_data[ + self.offset : self.offset + self.n_samples, + 0:end_t:step, + ::a_sub_x, + ::a_sub_x, + ] + a_data = a_data.reshape( + [self.n_samples * K, 1, self.pde_res[0], self.pde_res[1]] + ) # 2N x 1 x S x S + else: + a_data = raw_data[ + self.offset : self.offset + self.n_samples, 0:1, ::a_sub_x, ::a_sub_x + ] + + # convert into tensor + a_data = paddle.to_tensor(a_data, dtype="float32").permute(0, 2, 3, 1) + S = self.pde_res[1] + a_data = a_data[:, :, :, :, None] # N x S x S x 1 x 1 + gridx, gridy, gridt = get_grid3d(S, self.T) + self.grid = paddle.concat( + (gridx[0], gridy[0], gridt[0]), axis=-1 + ) # S x S x T x 3 + self.a_data = a_data + + def __getitem__(self, idx): + a_data = paddle.concat( + (self.grid, self.a_data[idx].tile(1, 1, self.T, 1)), axis=-1 + ) + return a_data + + def __len__( + self, + ): + return self.a_data.shape[0] diff --git a/jointContribution/PINO/PINO_paddle/train_utils/eval_2d.py b/jointContribution/PINO/PINO_paddle/train_utils/eval_2d.py index 459c97d3a9..ff17859a5b 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/eval_2d.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/eval_2d.py @@ -1,113 +1,113 @@ -from tqdm import tqdm -import numpy as np - -import paddle - -from .losses import LpLoss, darcy_loss, PINO_loss - -import matplotlib.pyplot as plt - -def eval_darcy(model, - dataloader, - config, - use_tqdm=True): - model.eval() - myloss = LpLoss(size_average=True) - if use_tqdm: - pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) - else: - pbar = dataloader - - mesh = dataloader.dataset.mesh - mollifier = paddle.sin(np.pi * mesh[..., 0]) * paddle.sin(np.pi * mesh[..., 1]) * 0.001 - f_val = [] - test_err = [] - i = 0 - fig, ax = plt.subplots(3,3) - with paddle.no_grad(): - for x, y in pbar: - - pred = model(x).reshape(y.shape) - pred = pred * mollifier - if i < 3: - ax[2][i].imshow(pred[0, :, :]) - ax[2][i].set_title('prediction') - ax[1][i].imshow(y[0, :, :]) - ax[1][i].set_title('ground truth') - ax[0][i].imshow(x[0, :, :, 0]) - ax[0][i].set_title('input') - - for k in range(3): - ax[k][i].set_xlabel('x') - ax[k][i].set_ylabel('y') - if i==3: - plt.tight_layout() - plt.savefig('result.png') - i+=1 - data_loss = myloss(pred, y) - a = x[..., 0] - f_loss = darcy_loss(pred, a) - - test_err.append(data_loss.item()) - f_val.append(f_loss.item()) - if use_tqdm: - pbar.set_description( - ( - f'Equation error: {f_loss.item():.5f}, test l2 error: {data_loss.item()}' - ) - ) - mean_f_err = np.mean(f_val) - std_f_err = np.std(f_val, ddof=1) / np.sqrt(len(f_val)) - - mean_err = np.mean(test_err) - std_err = np.std(test_err, ddof=1) / np.sqrt(len(test_err)) - - print(f'==Averaged relative L2 error mean: {mean_err}, std error: {std_err}==\n' - f'==Averaged equation error mean: {mean_f_err}, std error: {std_f_err}==') - -def eval_burgers(model, - dataloader, - v, - config, - use_tqdm=True): - model.eval() - myloss = LpLoss(size_average=True) - if use_tqdm: - pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) - else: - pbar = dataloader - - test_err = [] - f_err = [] - i = 0 - fig, ax = plt.subplots(2,3) - for x, y in pbar: - x, y = x, y - out = model(x).reshape(y.shape) - data_loss = myloss(out, y) - if i<3: - ax[0][i].imshow(out[0, :, :]) - ax[0][i].set_xlabel('x') - ax[0][i].set_ylabel('t') - ax[0][i].set_title('prediction') - ax[1][i].imshow(y[0, :, :]) - ax[1][i].set_xlabel('x') - ax[1][i].set_ylabel('t') - ax[1][i].set_title('ground truth') - if i==3: - plt.tight_layout() - plt.savefig('result.png') - i+=1 - loss_u, f_loss = PINO_loss(out, x[:, 0, :, 0], v) - test_err.append(data_loss.item()) - f_err.append(f_loss.item()) - - mean_f_err = np.mean(f_err) - std_f_err = np.std(f_err, ddof=1) / np.sqrt(len(f_err)) - - mean_err = np.mean(test_err) - std_err = np.std(test_err, ddof=1) / np.sqrt(len(test_err)) - - print(f'==Averaged relative L2 error mean: {mean_err}, std error: {std_err}==\n' - f'==Averaged equation error mean: {mean_f_err}, std error: {std_f_err}==') - +import matplotlib.pyplot as plt +import numpy as np +import paddle +from tqdm import tqdm + +from .losses import LpLoss +from .losses import PINO_loss +from .losses import darcy_loss + + +def eval_darcy(model, dataloader, config, use_tqdm=True): + model.eval() + myloss = LpLoss(size_average=True) + if use_tqdm: + pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) + else: + pbar = dataloader + + mesh = dataloader.dataset.mesh + mollifier = ( + paddle.sin(np.pi * mesh[..., 0]) * paddle.sin(np.pi * mesh[..., 1]) * 0.001 + ) + f_val = [] + test_err = [] + i = 0 + fig, ax = plt.subplots(3, 3) + with paddle.no_grad(): + for x, y in pbar: + + pred = model(x).reshape(y.shape) + pred = pred * mollifier + if i < 3: + ax[2][i].imshow(pred[0, :, :]) + ax[2][i].set_title("prediction") + ax[1][i].imshow(y[0, :, :]) + ax[1][i].set_title("ground truth") + ax[0][i].imshow(x[0, :, :, 0]) + ax[0][i].set_title("input") + + for k in range(3): + ax[k][i].set_xlabel("x") + ax[k][i].set_ylabel("y") + if i == 3: + plt.tight_layout() + plt.savefig("result.png") + i += 1 + data_loss = myloss(pred, y) + a = x[..., 0] + f_loss = darcy_loss(pred, a) + + test_err.append(data_loss.item()) + f_val.append(f_loss.item()) + if use_tqdm: + pbar.set_description( + ( + f"Equation error: {f_loss.item():.5f}, test l2 error: {data_loss.item()}" + ) + ) + mean_f_err = np.mean(f_val) + std_f_err = np.std(f_val, ddof=1) / np.sqrt(len(f_val)) + + mean_err = np.mean(test_err) + std_err = np.std(test_err, ddof=1) / np.sqrt(len(test_err)) + + print( + f"==Averaged relative L2 error mean: {mean_err}, std error: {std_err}==\n" + f"==Averaged equation error mean: {mean_f_err}, std error: {std_f_err}==" + ) + + +def eval_burgers(model, dataloader, v, config, use_tqdm=True): + model.eval() + myloss = LpLoss(size_average=True) + if use_tqdm: + pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) + else: + pbar = dataloader + + test_err = [] + f_err = [] + i = 0 + fig, ax = plt.subplots(2, 3) + for x, y in pbar: + x, y = x, y + out = model(x).reshape(y.shape) + data_loss = myloss(out, y) + if i < 3: + ax[0][i].imshow(out[0, :, :]) + ax[0][i].set_xlabel("x") + ax[0][i].set_ylabel("t") + ax[0][i].set_title("prediction") + ax[1][i].imshow(y[0, :, :]) + ax[1][i].set_xlabel("x") + ax[1][i].set_ylabel("t") + ax[1][i].set_title("ground truth") + if i == 3: + plt.tight_layout() + plt.savefig("result.png") + i += 1 + loss_u, f_loss = PINO_loss(out, x[:, 0, :, 0], v) + test_err.append(data_loss.item()) + f_err.append(f_loss.item()) + + mean_f_err = np.mean(f_err) + std_f_err = np.std(f_err, ddof=1) / np.sqrt(len(f_err)) + + mean_err = np.mean(test_err) + std_err = np.std(test_err, ddof=1) / np.sqrt(len(test_err)) + + print( + f"==Averaged relative L2 error mean: {mean_err}, std error: {std_err}==\n" + f"==Averaged equation error mean: {mean_f_err}, std error: {std_f_err}==" + ) diff --git a/jointContribution/PINO/PINO_paddle/train_utils/eval_3d.py b/jointContribution/PINO/PINO_paddle/train_utils/eval_3d.py index f387669d1e..fd6df34a5f 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/eval_3d.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/eval_3d.py @@ -1,62 +1,69 @@ -import paddle -import paddle.nn.functional as F - -from tqdm import tqdm -from timeit import default_timer - -from .losses import LpLoss, PINO_loss3d - -def eval_ns(model, - loader, - dataloader, - forcing, - config, - device, - log=False, - project='PINO-default', - group='FDM', - tags=['Nan'], - use_tqdm=True): - ''' - Evaluate the model for Navier Stokes equation - ''' - # data parameters - v = 1 / config['data']['Re'] - S, T = loader.S, loader.T - t_interval = config['data']['time_interval'] - # eval settings - batch_size = config['test']['batchsize'] - - model.eval() - myloss = LpLoss(size_average=True) - if use_tqdm: - pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) - else: - pbar = dataloader - loss_dict = {'f_error': 0.0, - 'test_l2': 0.0} - start_time = default_timer() - with paddle.no_grad(): - for x, y in pbar: - x, y = x.to(device), y.to(device) - x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) - out = model(x_in).reshape(batch_size, S, S, T + 5) - out = out[..., :-5] - x = x[:, :, :, 0, -1] - loss_l2 = myloss(out.view(batch_size, S, S, T), y.view(batch_size, S, S, T)) - loss_ic, loss_f = PINO_loss3d(out.view(batch_size, S, S, T), x, forcing, v, t_interval) - - loss_dict['f_error'] += loss_f - loss_dict['test_l2'] += loss_l2 - if device == 0 and use_tqdm: - pbar.set_description( - ( - f'Train f error: {loss_f.item():.5f}; Test l2 error: {loss_l2.item():.5f}' - ) - ) - end_time = default_timer() - test_l2 = loss_dict['test_l2'].item() / len(dataloader) - loss_f = loss_dict['f_error'].item() / len(dataloader) - print(f'==Averaged relative L2 error is: {test_l2}==\n' - f'==Averaged equation error is: {loss_f}==') - print(f'Time cost: {end_time - start_time} s') +from timeit import default_timer + +import paddle +import paddle.nn.functional as F +from tqdm import tqdm + +from .losses import LpLoss +from .losses import PINO_loss3d + + +def eval_ns( + model, + loader, + dataloader, + forcing, + config, + device, + log=False, + project="PINO-default", + group="FDM", + tags=["Nan"], + use_tqdm=True, +): + """ + Evaluate the model for Navier Stokes equation + """ + # data parameters + v = 1 / config["data"]["Re"] + S, T = loader.S, loader.T + t_interval = config["data"]["time_interval"] + # eval settings + batch_size = config["test"]["batchsize"] + + model.eval() + myloss = LpLoss(size_average=True) + if use_tqdm: + pbar = tqdm(dataloader, dynamic_ncols=True, smoothing=0.05) + else: + pbar = dataloader + loss_dict = {"f_error": 0.0, "test_l2": 0.0} + start_time = default_timer() + with paddle.no_grad(): + for x, y in pbar: + x, y = x.to(device), y.to(device) + x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) + out = model(x_in).reshape(batch_size, S, S, T + 5) + out = out[..., :-5] + x = x[:, :, :, 0, -1] + loss_l2 = myloss(out.view(batch_size, S, S, T), y.view(batch_size, S, S, T)) + loss_ic, loss_f = PINO_loss3d( + out.view(batch_size, S, S, T), x, forcing, v, t_interval + ) + + loss_dict["f_error"] += loss_f + loss_dict["test_l2"] += loss_l2 + if device == 0 and use_tqdm: + pbar.set_description( + ( + f"Train f error: {loss_f.item():.5f}; Test l2 error: {loss_l2.item():.5f}" + ) + ) + end_time = default_timer() + test_l2 = loss_dict["test_l2"].item() / len(dataloader) + loss_f = loss_dict["f_error"].item() / len(dataloader) + print( + f"==Averaged relative L2 error is: {test_l2}==\n" + f"==Averaged equation error is: {loss_f}==" + ) + print(f"Time cost: {end_time - start_time} s") diff --git a/jointContribution/PINO/PINO_paddle/train_utils/losses.py b/jointContribution/PINO/PINO_paddle/train_utils/losses.py index 86bc2844f2..1bc7cd695f 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/losses.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/losses.py @@ -1,240 +1,300 @@ -import numpy as np -import paddle -import paddle.nn.functional as F - -def FDM_Darcy(u, a, D=1): - batchsize = u.size(0) - size = u.size(1) - u = u.reshape(batchsize, size, size) - a = a.reshape(batchsize, size, size) - dx = D / (size - 1) - dy = dx - - ux = (u[:, 2:, 1:-1] - u[:, :-2, 1:-1]) / (2 * dx) - uy = (u[:, 1:-1, 2:] - u[:, 1:-1, :-2]) / (2 * dy) - - a = a[:, 1:-1, 1:-1] - aux = a * ux - auy = a * uy - auxx = (aux[:, 2:, 1:-1] - aux[:, :-2, 1:-1]) / (2 * dx) - auyy = (auy[:, 1:-1, 2:] - auy[:, 1:-1, :-2]) / (2 * dy) - Du = - (auxx + auyy) - return Du - -def darcy_loss(u, a): - batchsize = u.shape[0] - size = u.shape[1] - u = u.reshape(batchsize, size, size) - a = a.reshape(batchsize, size, size) - lploss = LpLoss(size_average=True) - - Du = FDM_Darcy(u, a) - f = paddle.ones(Du.shape) - loss_f = lploss.rel(Du, f) - return loss_f - -def FDM_NS_vorticity(w, v=1/40, t_interval=1.0): - batchsize = w.shape[0] - nx = w.shape[1] - ny = w.shape[2] - nt = w.shape[3] - w = w.reshape([batchsize, nx, ny, nt]) - - w_h = paddle.fft.fft2(w, axes=[1, 2]) - # Wavenumbers in y-direction - k_max = nx//2 - N = nx - k_x = paddle.concat((paddle.arange(start=0, end=k_max, step=1), - paddle.arange(start=-k_max, end=0, step=1)), 0).reshape([N, 1]).tile([1, N]).reshape([1,N,N,1]) - k_y = paddle.concat((paddle.arange(start=0, end=k_max, step=1), - paddle.arange(start=-k_max, end=0, step=1)), 0).reshape([1, N]).tile([N, 1]).reshape([1,N,N,1]) - # Negative Laplacian in Fourier space - lap = (k_x ** 2 + k_y ** 2) - lap[0, 0, 0, 0] = 1.0 - f_h = w_h / lap - - ux_h = 1j * k_y * f_h - uy_h = -1j * k_x * f_h - wx_h = 1j * k_x * w_h - wy_h = 1j * k_y * w_h - wlap_h = -lap * w_h - - ux = paddle.fft.irfft2(ux_h[:, :, :k_max + 1], axes=[1, 2]) - uy = paddle.fft.irfft2(uy_h[:, :, :k_max + 1], axes=[1, 2]) - wx = paddle.fft.irfft2(wx_h[:, :, :k_max+1], axes=[1,2]) - wy = paddle.fft.irfft2(wy_h[:, :, :k_max+1], axes=[1,2]) - wlap = paddle.fft.irfft2(wlap_h[:, :, :k_max+1], axes=[1,2]) - - dt = t_interval / (nt-1) - wt = (w[:, :, :, 2:] - w[:, :, :, :-2]) / (2 * dt) - - Du1 = wt + (ux*wx + uy*wy - v*wlap)[...,1:-1] #- forcing - return Du1 - -def Autograd_Burgers(u, grid, v=1/100): - from paddle.autograd import grad - gridt, gridx = grid - - ut = grad(u.sum(), gridt, create_graph=True)[0] - ux = grad(u.sum(), gridx, create_graph=True)[0] - uxx = grad(ux.sum(), gridx, create_graph=True)[0] - Du = ut + ux*u - v*uxx - return Du, ux, uxx, ut - -def AD_loss(u, u0, grid, index_ic=None, p=None, q=None): - batchsize = u.size(0) - - Du, ux, uxx, ut = Autograd_Burgers(u, grid) - - if index_ic is None: - # u in on a uniform grid - nt = u.size(1) - nx = u.size(2) - u = u.reshape(batchsize, nt, nx) - - index_t = paddle.zeros(nx,).long() - index_x = paddle.tensor(range(nx)).long() - boundary_u = u[:, index_t, index_x] - else: - # u is randomly sampled, 0:p are BC, p:2p are ic, 2p:2p+q are interior - boundary_u = u[:, :p] - batch_index = paddle.tensor(range(batchsize)).reshape(batchsize, 1).repeat(1, p) - u0 = u0[batch_index, index_ic] - - loss_ic = F.mse_loss(boundary_u, u0) - f = paddle.zeros(Du.shape, device=u.device) - loss_f = F.mse_loss(Du, f) - return loss_ic, loss_f - -class LpLoss(object): - ''' - loss function with rel/abs Lp loss - ''' - def __init__(self, d=2, p=2, size_average=True, reduction=True): - super(LpLoss, self).__init__() - - #Dimension and Lp-norm type are postive - assert d > 0 and p > 0 - - self.d = d - self.p = p - self.reduction = reduction - self.size_average = size_average - - def abs(self, x, y): - num_examples = x.size()[0] - - #Assume uniform mesh - h = 1.0 / (x.shape[1] - 1.0) - - all_norms = (h**(self.d/self.p))*paddle.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1) - - if self.reduction: - if self.size_average: - return paddle.mean(all_norms) - else: - return paddle.sum(all_norms) - - return all_norms - - def rel(self, x, y): - num_examples = x.shape[0] - - diff_norms = paddle.norm(x.reshape([num_examples,-1]) - y.reshape([num_examples,-1]), self.p, 1) - y_norms = paddle.norm(y.reshape([num_examples,-1]), self.p, 1) - - if self.reduction: - if self.size_average: - return paddle.mean(diff_norms/y_norms) - else: - return paddle.sum(diff_norms/y_norms) - - return diff_norms/y_norms - - def __call__(self, x, y): - return self.rel(x, y) - -def FDM_Burgers(u, v, D=1): - batchsize = u.shape[0] - nt = u.shape[1] - nx = u.shape[2] - - u = u.reshape([batchsize, nt, nx]) - dt = D / (nt-1) - dx = D / (nx) - - u_h = paddle.fft.fft(u, axis=2) - # Wavenumbers in y-direction - k_max = nx//2 - k_x = paddle.concat((paddle.arange(start=0, end=k_max, step=1, dtype='float32'), - paddle.arange(start=-k_max, end=0, step=1, dtype='float32')), 0).reshape([1,1,nx]) - ux_h = 2j *np.pi*k_x*u_h - uxx_h = 2j *np.pi*k_x*ux_h - ux = paddle.fft.irfft(ux_h[:, :, :k_max+1], axis=2, n=nx) - uxx = paddle.fft.irfft(uxx_h[:, :, :k_max+1], axis=2, n=nx) - ut = (u[:, 2:, :] - u[:, :-2, :]) / (2 * dt) - Du = ut + (ux*u - v*uxx)[:,1:-1,:] - return Du - -def PINO_loss(u, u0, v): - batchsize = u.shape[0] - nt = u.shape[1] - nx = u.shape[2] - - u = u.reshape([batchsize, nt, nx]) - - index_t = paddle.zeros(1,'int32') - index_x = paddle.to_tensor(list(range(nx)),'int32') - boundary_u = paddle.index_select(u, index_t, axis=1).squeeze(1) - loss_u = F.mse_loss(boundary_u, u0) - - Du = FDM_Burgers(u, v)[:, :, :] - f = paddle.zeros(Du.shape) - loss_f = F.mse_loss(Du, f) - - return loss_u, loss_f - -def PINO_loss3d(u, u0, forcing, v=1/40, t_interval=1.0): - batchsize = u.shape[0] - nx = u.shape[1] - ny = u.shape[2] - nt = u.shape[3] - - u = u.reshape([batchsize, nx, ny, nt]) - lploss = LpLoss(size_average=True) - - u_in = u[:, :, :, 0] - loss_ic = lploss(u_in, u0) - - Du = FDM_NS_vorticity(u, v, t_interval) - f = forcing.tile([batchsize, 1, 1, nt-2]) - loss_f = lploss(Du, f) - - return loss_ic, loss_f - -def PDELoss(model, x, t, nu): - ''' - Compute the residual of PDE: - residual = u_t + u * u_x - nu * u_{xx} : (N,1) - - Params: - - model - - x, t: (x, t) pairs, (N, 2) tensor - - nu: constant of PDE - Return: - - mean of residual : scalar - ''' - u = model(paddle.cat([x, t], dim=1)) - # First backward to compute u_x (shape: N x 1), u_t (shape: N x 1) - grad_x, grad_t = paddle.autograd.grad(outputs=[u.sum()], inputs=[x, t], create_graph=True) - # Second backward to compute u_{xx} (shape N x 1) - - gradgrad_x, = paddle.autograd.grad(outputs=[grad_x.sum()], inputs=[x], create_graph=True) - - residual = grad_t + u * grad_x - nu * gradgrad_x - return residual - -def get_forcing(S): - x1 = paddle.to_tensor(np.linspace(0, 2*np.pi, S, endpoint=False), dtype=paddle.float32).reshape([S, 1]).tile([1, S]) - x2 = paddle.to_tensor(np.linspace(0, 2*np.pi, S, endpoint=False), dtype=paddle.float32).reshape([1, S]).tile([S, 1]) - return -4 * (paddle.cos(4*(x2))).reshape([1,S,S,1]) \ No newline at end of file +import numpy as np +import paddle +import paddle.nn.functional as F + + +def FDM_Darcy(u, a, D=1): + batchsize = u.size(0) + size = u.size(1) + u = u.reshape(batchsize, size, size) + a = a.reshape(batchsize, size, size) + dx = D / (size - 1) + dy = dx + + ux = (u[:, 2:, 1:-1] - u[:, :-2, 1:-1]) / (2 * dx) + uy = (u[:, 1:-1, 2:] - u[:, 1:-1, :-2]) / (2 * dy) + + a = a[:, 1:-1, 1:-1] + aux = a * ux + auy = a * uy + auxx = (aux[:, 2:, 1:-1] - aux[:, :-2, 1:-1]) / (2 * dx) + auyy = (auy[:, 1:-1, 2:] - auy[:, 1:-1, :-2]) / (2 * dy) + Du = -(auxx + auyy) + return Du + + +def darcy_loss(u, a): + batchsize = u.shape[0] + size = u.shape[1] + u = u.reshape(batchsize, size, size) + a = a.reshape(batchsize, size, size) + lploss = LpLoss(size_average=True) + + Du = FDM_Darcy(u, a) + f = paddle.ones(Du.shape) + loss_f = lploss.rel(Du, f) + return loss_f + + +def FDM_NS_vorticity(w, v=1 / 40, t_interval=1.0): + batchsize = w.shape[0] + nx = w.shape[1] + ny = w.shape[2] + nt = w.shape[3] + w = w.reshape([batchsize, nx, ny, nt]) + + w_h = paddle.fft.fft2(w, axes=[1, 2]) + # Wavenumbers in y-direction + k_max = nx // 2 + N = nx + k_x = ( + paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ) + .reshape([N, 1]) + .tile([1, N]) + .reshape([1, N, N, 1]) + ) + k_y = ( + paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1), + paddle.arange(start=-k_max, end=0, step=1), + ), + 0, + ) + .reshape([1, N]) + .tile([N, 1]) + .reshape([1, N, N, 1]) + ) + # Negative Laplacian in Fourier space + lap = k_x**2 + k_y**2 + lap[0, 0, 0, 0] = 1.0 + f_h = w_h / lap + + ux_h = 1j * k_y * f_h + uy_h = -1j * k_x * f_h + wx_h = 1j * k_x * w_h + wy_h = 1j * k_y * w_h + wlap_h = -lap * w_h + + ux = paddle.fft.irfft2(ux_h[:, :, : k_max + 1], axes=[1, 2]) + uy = paddle.fft.irfft2(uy_h[:, :, : k_max + 1], axes=[1, 2]) + wx = paddle.fft.irfft2(wx_h[:, :, : k_max + 1], axes=[1, 2]) + wy = paddle.fft.irfft2(wy_h[:, :, : k_max + 1], axes=[1, 2]) + wlap = paddle.fft.irfft2(wlap_h[:, :, : k_max + 1], axes=[1, 2]) + + dt = t_interval / (nt - 1) + wt = (w[:, :, :, 2:] - w[:, :, :, :-2]) / (2 * dt) + + Du1 = wt + (ux * wx + uy * wy - v * wlap)[..., 1:-1] # - forcing + return Du1 + + +def Autograd_Burgers(u, grid, v=1 / 100): + from paddle.autograd import grad + + gridt, gridx = grid + + ut = grad(u.sum(), gridt, create_graph=True)[0] + ux = grad(u.sum(), gridx, create_graph=True)[0] + uxx = grad(ux.sum(), gridx, create_graph=True)[0] + Du = ut + ux * u - v * uxx + return Du, ux, uxx, ut + + +def AD_loss(u, u0, grid, index_ic=None, p=None, q=None): + batchsize = u.size(0) + + Du, ux, uxx, ut = Autograd_Burgers(u, grid) + + if index_ic is None: + # u in on a uniform grid + nt = u.size(1) + nx = u.size(2) + u = u.reshape(batchsize, nt, nx) + + index_t = paddle.zeros( + nx, + ).long() + index_x = paddle.tensor(range(nx)).long() + boundary_u = u[:, index_t, index_x] + else: + # u is randomly sampled, 0:p are BC, p:2p are ic, 2p:2p+q are interior + boundary_u = u[:, :p] + batch_index = paddle.tensor(range(batchsize)).reshape(batchsize, 1).repeat(1, p) + u0 = u0[batch_index, index_ic] + + loss_ic = F.mse_loss(boundary_u, u0) + f = paddle.zeros(Du.shape, device=u.device) + loss_f = F.mse_loss(Du, f) + return loss_ic, loss_f + + +class LpLoss(object): + """ + loss function with rel/abs Lp loss + """ + + def __init__(self, d=2, p=2, size_average=True, reduction=True): + super(LpLoss, self).__init__() + + # Dimension and Lp-norm type are postive + assert d > 0 and p > 0 + + self.d = d + self.p = p + self.reduction = reduction + self.size_average = size_average + + def abs(self, x, y): + num_examples = x.size()[0] + + # Assume uniform mesh + h = 1.0 / (x.shape[1] - 1.0) + + all_norms = (h ** (self.d / self.p)) * paddle.norm( + x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1 + ) + + if self.reduction: + if self.size_average: + return paddle.mean(all_norms) + else: + return paddle.sum(all_norms) + + return all_norms + + def rel(self, x, y): + num_examples = x.shape[0] + + diff_norms = paddle.norm( + x.reshape([num_examples, -1]) - y.reshape([num_examples, -1]), self.p, 1 + ) + y_norms = paddle.norm(y.reshape([num_examples, -1]), self.p, 1) + + if self.reduction: + if self.size_average: + return paddle.mean(diff_norms / y_norms) + else: + return paddle.sum(diff_norms / y_norms) + + return diff_norms / y_norms + + def __call__(self, x, y): + return self.rel(x, y) + + +def FDM_Burgers(u, v, D=1): + batchsize = u.shape[0] + nt = u.shape[1] + nx = u.shape[2] + + u = u.reshape([batchsize, nt, nx]) + dt = D / (nt - 1) + dx = D / (nx) + + u_h = paddle.fft.fft(u, axis=2) + # Wavenumbers in y-direction + k_max = nx // 2 + k_x = paddle.concat( + ( + paddle.arange(start=0, end=k_max, step=1, dtype="float32"), + paddle.arange(start=-k_max, end=0, step=1, dtype="float32"), + ), + 0, + ).reshape([1, 1, nx]) + ux_h = 2j * np.pi * k_x * u_h + uxx_h = 2j * np.pi * k_x * ux_h + ux = paddle.fft.irfft(ux_h[:, :, : k_max + 1], axis=2, n=nx) + uxx = paddle.fft.irfft(uxx_h[:, :, : k_max + 1], axis=2, n=nx) + ut = (u[:, 2:, :] - u[:, :-2, :]) / (2 * dt) + Du = ut + (ux * u - v * uxx)[:, 1:-1, :] + return Du + + +def PINO_loss(u, u0, v): + batchsize = u.shape[0] + nt = u.shape[1] + nx = u.shape[2] + + u = u.reshape([batchsize, nt, nx]) + + index_t = paddle.zeros(1, "int32") + index_x = paddle.to_tensor(list(range(nx)), "int32") + boundary_u = paddle.index_select(u, index_t, axis=1).squeeze(1) + loss_u = F.mse_loss(boundary_u, u0) + + Du = FDM_Burgers(u, v)[:, :, :] + f = paddle.zeros(Du.shape) + loss_f = F.mse_loss(Du, f) + + return loss_u, loss_f + + +def PINO_loss3d(u, u0, forcing, v=1 / 40, t_interval=1.0): + batchsize = u.shape[0] + nx = u.shape[1] + ny = u.shape[2] + nt = u.shape[3] + + u = u.reshape([batchsize, nx, ny, nt]) + lploss = LpLoss(size_average=True) + + u_in = u[:, :, :, 0] + loss_ic = lploss(u_in, u0) + + Du = FDM_NS_vorticity(u, v, t_interval) + f = forcing.tile([batchsize, 1, 1, nt - 2]) + loss_f = lploss(Du, f) + + return loss_ic, loss_f + + +def PDELoss(model, x, t, nu): + """ + Compute the residual of PDE: + residual = u_t + u * u_x - nu * u_{xx} : (N,1) + + Params: + - model + - x, t: (x, t) pairs, (N, 2) tensor + - nu: constant of PDE + Return: + - mean of residual : scalar + """ + u = model(paddle.cat([x, t], dim=1)) + # First backward to compute u_x (shape: N x 1), u_t (shape: N x 1) + grad_x, grad_t = paddle.autograd.grad( + outputs=[u.sum()], inputs=[x, t], create_graph=True + ) + # Second backward to compute u_{xx} (shape N x 1) + + (gradgrad_x,) = paddle.autograd.grad( + outputs=[grad_x.sum()], inputs=[x], create_graph=True + ) + + residual = grad_t + u * grad_x - nu * gradgrad_x + return residual + + +def get_forcing(S): + x1 = ( + paddle.to_tensor( + np.linspace(0, 2 * np.pi, S, endpoint=False), dtype=paddle.float32 + ) + .reshape([S, 1]) + .tile([1, S]) + ) + x2 = ( + paddle.to_tensor( + np.linspace(0, 2 * np.pi, S, endpoint=False), dtype=paddle.float32 + ) + .reshape([1, S]) + .tile([S, 1]) + ) + return -4 * (paddle.cos(4 * (x2))).reshape([1, S, S, 1]) diff --git a/jointContribution/PINO/PINO_paddle/train_utils/train_2d.py b/jointContribution/PINO/PINO_paddle/train_utils/train_2d.py index 3e41fc590a..4e7c02086b 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/train_2d.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/train_2d.py @@ -1,135 +1,162 @@ -import numpy as np -import paddle -from tqdm import tqdm -from .utils import save_checkpoint -from .losses import LpLoss, darcy_loss, PINO_loss - -def train_2d_operator(model, - train_loader, - optimizer, scheduler, - config, - rank=0, log=False, - project='PINO-2d-default', - group='default', - tags=['default'], - use_tqdm=True, - profile=False): - - data_weight = config['train']['xy_loss'] - f_weight = config['train']['f_loss'] - model.train() - myloss = LpLoss(size_average=True) - pbar = range(config['train']['epochs']) - if use_tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1) - mesh = train_loader.dataset.mesh - mollifier = paddle.sin(np.pi * mesh[..., 0]) * paddle.sin(np.pi * mesh[..., 1]) * 0.001 - pde_mesh = train_loader.dataset.pde_mesh - pde_mol = paddle.sin(np.pi * pde_mesh[..., 0]) * paddle.sin(np.pi * pde_mesh[..., 1]) * 0.001 - for e in pbar: - loss_dict = {'train_loss': 0.0, - 'data_loss': 0.0, - 'f_loss': 0.0, - 'test_error': 0.0} - for data_ic, u, pde_ic in train_loader: - data_ic, u, pde_ic = data_ic.to(rank), u.to(rank), pde_ic.to(rank) - - optimizer.zero_grad() - - # data loss - if data_weight > 0: - pred = model(data_ic).squeeze(dim=-1) - pred = pred * mollifier - data_loss = myloss(pred, y) - - a = data_ic[..., 0] - f_loss = darcy_loss(pred, a) - - loss = data_weight * data_loss + f_weight * f_loss - loss.backward() - optimizer.step() - - loss_dict['train_loss'] += loss.item() * y.shape[0] - loss_dict['f_loss'] += f_loss.item() * y.shape[0] - loss_dict['data_loss'] += data_loss.item() * y.shape[0] - - scheduler.step() - train_loss_val = loss_dict['train_loss'] / len(train_loader.dataset) - f_loss_val = loss_dict['f_loss'] / len(train_loader.dataset) - data_loss_val = loss_dict['data_loss'] / len(train_loader.dataset) - - if use_tqdm: - pbar.set_description( - ( - f'Epoch: {e}, train loss: {train_loss_val:.5f}, ' - f'f_loss: {f_loss_val:.5f}, ' - f'data loss: {data_loss_val:.5f}' - ) - ) - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'], - model, optimizer) - print('Done!') - -def train_2d_burger(model, - train_loader, v, - optimizer, scheduler, - config, - rank=0, log=False, - project='PINO-2d-default', - group='default', - tags=['default'], - use_tqdm=True): - - data_weight = config['train']['xy_loss'] - f_weight = config['train']['f_loss'] - ic_weight = config['train']['ic_loss'] - model.train() - myloss = LpLoss(size_average=True) - pbar = range(config['train']['epochs']) - if use_tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1) - - for e in pbar: - model.train() - train_pino = 0.0 - data_l2 = 0.0 - train_loss = 0.0 - - for i, (x, y) in enumerate(train_loader): - x, y = x, y - out = model(x).reshape(y.shape) - data_loss = myloss(out, y) - - loss_u, loss_f = PINO_loss(out, x[:, 0, :, 0], v) - total_loss = loss_u * ic_weight + loss_f * f_weight + data_loss * data_weight - - optimizer.clear_grad() - total_loss.backward() - optimizer.step() - - data_l2 += data_loss.item() - train_pino += loss_f.item() - train_loss += total_loss.item() - - scheduler.step() - data_l2 /= len(train_loader) - train_pino /= len(train_loader) - train_loss /= len(train_loader) - if use_tqdm: - pbar.set_description( - ( - f'Epoch {e}, train loss: {train_loss:.5f} ' - f'train f error: {train_pino:.5f}; ' - f'data l2 error: {data_l2:.5f}' - ) - ) - - if e % 100 == 0: - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'].replace('.pt', f'_{e}.pt'), - model, optimizer) - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'], - model, optimizer) - print('Done!') \ No newline at end of file +import numpy as np +import paddle +from tqdm import tqdm + +from .losses import LpLoss +from .losses import PINO_loss +from .losses import darcy_loss +from .utils import save_checkpoint + + +def train_2d_operator( + model, + train_loader, + optimizer, + scheduler, + config, + rank=0, + log=False, + project="PINO-2d-default", + group="default", + tags=["default"], + use_tqdm=True, + profile=False, +): + + data_weight = config["train"]["xy_loss"] + f_weight = config["train"]["f_loss"] + model.train() + myloss = LpLoss(size_average=True) + pbar = range(config["train"]["epochs"]) + if use_tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1) + mesh = train_loader.dataset.mesh + mollifier = ( + paddle.sin(np.pi * mesh[..., 0]) * paddle.sin(np.pi * mesh[..., 1]) * 0.001 + ) + pde_mesh = train_loader.dataset.pde_mesh + pde_mol = ( + paddle.sin(np.pi * pde_mesh[..., 0]) + * paddle.sin(np.pi * pde_mesh[..., 1]) + * 0.001 + ) + for e in pbar: + loss_dict = { + "train_loss": 0.0, + "data_loss": 0.0, + "f_loss": 0.0, + "test_error": 0.0, + } + for data_ic, u, pde_ic in train_loader: + data_ic, u, pde_ic = data_ic.to(rank), u.to(rank), pde_ic.to(rank) + + optimizer.zero_grad() + + # data loss + if data_weight > 0: + pred = model(data_ic).squeeze(dim=-1) + pred = pred * mollifier + data_loss = myloss(pred, y) + + a = data_ic[..., 0] + f_loss = darcy_loss(pred, a) + + loss = data_weight * data_loss + f_weight * f_loss + loss.backward() + optimizer.step() + + loss_dict["train_loss"] += loss.item() * y.shape[0] + loss_dict["f_loss"] += f_loss.item() * y.shape[0] + loss_dict["data_loss"] += data_loss.item() * y.shape[0] + + scheduler.step() + train_loss_val = loss_dict["train_loss"] / len(train_loader.dataset) + f_loss_val = loss_dict["f_loss"] / len(train_loader.dataset) + data_loss_val = loss_dict["data_loss"] / len(train_loader.dataset) + + if use_tqdm: + pbar.set_description( + ( + f"Epoch: {e}, train loss: {train_loss_val:.5f}, " + f"f_loss: {f_loss_val:.5f}, " + f"data loss: {data_loss_val:.5f}" + ) + ) + save_checkpoint( + config["train"]["save_dir"], config["train"]["save_name"], model, optimizer + ) + print("Done!") + + +def train_2d_burger( + model, + train_loader, + v, + optimizer, + scheduler, + config, + rank=0, + log=False, + project="PINO-2d-default", + group="default", + tags=["default"], + use_tqdm=True, +): + + data_weight = config["train"]["xy_loss"] + f_weight = config["train"]["f_loss"] + ic_weight = config["train"]["ic_loss"] + model.train() + myloss = LpLoss(size_average=True) + pbar = range(config["train"]["epochs"]) + if use_tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.1) + + for e in pbar: + model.train() + train_pino = 0.0 + data_l2 = 0.0 + train_loss = 0.0 + + for i, (x, y) in enumerate(train_loader): + x, y = x, y + out = model(x).reshape(y.shape) + data_loss = myloss(out, y) + + loss_u, loss_f = PINO_loss(out, x[:, 0, :, 0], v) + total_loss = ( + loss_u * ic_weight + loss_f * f_weight + data_loss * data_weight + ) + + optimizer.clear_grad() + total_loss.backward() + optimizer.step() + + data_l2 += data_loss.item() + train_pino += loss_f.item() + train_loss += total_loss.item() + + scheduler.step() + data_l2 /= len(train_loader) + train_pino /= len(train_loader) + train_loss /= len(train_loader) + if use_tqdm: + pbar.set_description( + ( + f"Epoch {e}, train loss: {train_loss:.5f} " + f"train f error: {train_pino:.5f}; " + f"data l2 error: {data_l2:.5f}" + ) + ) + + if e % 100 == 0: + save_checkpoint( + config["train"]["save_dir"], + config["train"]["save_name"].replace(".pt", f"_{e}.pt"), + model, + optimizer, + ) + save_checkpoint( + config["train"]["save_dir"], config["train"]["save_name"], model, optimizer + ) + print("Done!") diff --git a/jointContribution/PINO/PINO_paddle/train_utils/train_3d.py b/jointContribution/PINO/PINO_paddle/train_utils/train_3d.py index 87b2c6c485..e0e7d2bfdf 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/train_3d.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/train_3d.py @@ -1,303 +1,327 @@ -import paddle -from tqdm import tqdm -from timeit import default_timer -import paddle.nn.functional as F -from .utils import save_checkpoint -from .losses import LpLoss, PINO_loss3d, get_forcing -from .distributed import reduce_loss_dict -from .data_utils import sample_data - - - -def train(model, - loader, train_loader, - optimizer, scheduler, - forcing, config, - rank=0, - log=False, - project='PINO-default', - group='FDM', - tags=['Nan'], - use_tqdm=True, - profile=False): - - # data parameters - v = 1 / config['data']['Re'] - S, T = loader.S, loader.T - t_interval = config['data']['time_interval'] - - # training settings - batch_size = config['train']['batchsize'] - ic_weight = config['train']['ic_loss'] - f_weight = config['train']['f_loss'] - xy_weight = config['train']['xy_loss'] - - model.train() - myloss = LpLoss(size_average=True) - pbar = range(config['train']['epochs']) - if use_tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) - zero = paddle.zeros(1, dtype='float32').to(rank) - - for ep in pbar: - loss_dict = {'train_loss': 0.0, - 'train_ic': 0.0, - 'train_f': 0.0, - 'test_l2': 0.0} - log_dict = {} - if rank == 0 and profile: - paddle.cuda.synchronize() - t1 = default_timer() - # start solving - for x, y in train_loader: - x, y = x.to(rank), y.to(rank) - - optimizer.zero_grad() - x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) - out = model(x_in).reshape(batch_size, S, S, T + 5) - out = out[..., :-5] - x = x[:, :, :, 0, -1] - - loss_l2 = myloss(out.view(batch_size, S, S, T), y.view(batch_size, S, S, T)) - - if ic_weight != 0 or f_weight != 0: - loss_ic, loss_f = PINO_loss3d(out.view(batch_size, S, S, T), x, forcing, v, t_interval) - else: - loss_ic, loss_f = zero, zero - - total_loss = loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight - - total_loss.backward() - - optimizer.step() - loss_dict['train_ic'] += loss_ic - loss_dict['test_l2'] += loss_l2 - loss_dict['train_loss'] += total_loss - loss_dict['train_f'] += loss_f - - if rank == 0 and profile: - paddle.cuda.synchronize() - t2 = default_timer() - log_dict['Time cost'] = t2 - t1 - scheduler.step() - loss_reduced = reduce_loss_dict(loss_dict) - train_ic = loss_reduced['train_ic'].item() / len(train_loader) - train_f = loss_reduced['train_f'].item() / len(train_loader) - train_loss = loss_reduced['train_loss'].item() / len(train_loader) - test_l2 = loss_reduced['test_l2'].item() / len(train_loader) - log_dict = { - 'Train f error': train_f, - 'Train L2 error': train_ic, - 'Train loss': train_loss, - 'Test L2 error': test_l2 - } - - if rank == 0: - if use_tqdm: - pbar.set_description( - ( - f'Train f error: {train_f:.5f}; Train ic l2 error: {train_ic:.5f}. ' - f'Train loss: {train_loss:.5f}; Test l2 error: {test_l2:.5f}' - ) - ) - - if rank == 0: - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'], - model, optimizer) - -def mixed_train(model, # model of neural operator - train_loader, # dataloader for training with data - S1, T1, # spacial and time dimension for training with data - a_loader, # generator for ICs - S2, T2, # spacial and time dimension for training with equation only - optimizer, # optimizer - scheduler, # learning rate scheduler - config, # configuration dict - log=False, # turn on the wandb - project='PINO-default', # project name - group='FDM', # group name - tags=['Nan'], # tags - use_tqdm=True): # turn on tqdm - - # data parameters - v = 1 / config['data']['Re'] - t_interval = config['data']['time_interval'] - forcing_1 = get_forcing(S1) - forcing_2 = get_forcing(S2) - # training settings - batch_size = config['train']['batchsize'] - ic_weight = config['train']['ic_loss'] - f_weight = config['train']['f_loss'] - xy_weight = config['train']['xy_loss'] - num_data_iter = config['train']['data_iter'] - num_eqn_iter = config['train']['eqn_iter'] - - model.train() - myloss = LpLoss(size_average=True) - pbar = range(config['train']['epochs']) - if use_tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) - zero = paddle.zeros(1, dtype='float32') - train_loader = sample_data(train_loader) - for ep in pbar: - model.train() - t1 = default_timer() - train_loss = 0.0 - train_ic = 0.0 - train_f = 0.0 - test_l2 = 0.0 - err_eqn = 0.0 - # train with data - for _ in range(num_data_iter): - x, y = next(train_loader) - x, y = x, y - optimizer.clear_grad() - x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) - out = model(x_in).reshape([batch_size, S1, S1, T1 + 5]) - out = out[..., :-5] - x = x[:, :, :, 0, -1] - - loss_l2 = myloss(out.reshape([batch_size, S1, S1, T1]), - y.reshape([batch_size, S1, S1, T1])) - - if ic_weight != 0 or f_weight != 0: - loss_ic, loss_f = PINO_loss3d(out.reshape([batch_size, S1, S1, T1]), - x, forcing_1, - v, t_interval) - else: - loss_ic, loss_f = zero, zero - - total_loss = loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight - - total_loss.backward() - optimizer.step() - - train_ic = loss_ic.item() - test_l2 += loss_l2.item() - train_loss += total_loss.item() - train_f += loss_f.item() - if num_data_iter != 0: - train_ic /= num_data_iter - train_f /= num_data_iter - train_loss /= num_data_iter - test_l2 /= num_data_iter - # train with random ICs - for _ in range(num_eqn_iter): - new_a = next(a_loader) - new_a = new_a - optimizer.clear_grad() - x_in = F.pad(new_a, (0, 0, 0, 5), "constant", 0) - out = model(x_in).reshape([batch_size, S2, S2, T2 + 5]) - out = out[..., :-5] - new_a = new_a[:, :, :, 0, -1] - loss_ic, loss_f = PINO_loss3d(out.reshape([batch_size, S2, S2, T2]), - new_a, forcing_2, - v, t_interval) - eqn_loss = loss_f * f_weight + loss_ic * ic_weight - eqn_loss.backward() - optimizer.step() - - err_eqn += eqn_loss.item() - - scheduler.step() - t2 = default_timer() - if num_eqn_iter != 0: - err_eqn /= num_eqn_iter - if use_tqdm: - pbar.set_description( - ( - f'Data f error: {train_f:.5f}; Data ic l2 error: {train_ic:.5f}. ' - f'Data train loss: {train_loss:.5f}; Data l2 error: {test_l2:.5f}' - f'Eqn loss: {err_eqn:.5f}' - ) - ) - - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'], - model, optimizer) - -def progressive_train(model, - loader, train_loader, - optimizer, scheduler, - milestones, config, - log=False, - project='PINO-default', - group='FDM', - tags=['Nan'], - use_tqdm=True): - - # data parameters - v = 1 / config['data']['Re'] - T = loader.T - t_interval = config['data']['time_interval'] - - # training settings - batch_size = config['train']['batchsize'] - ic_weight = config['train']['ic_loss'] - f_weight = config['train']['f_loss'] - xy_weight = config['train']['xy_loss'] - - model.train() - myloss = LpLoss(size_average=True) - zero = paddle.zeros(1, dtype='float32') - for milestone, epochs in zip(milestones, config['train']['epochs']): - pbar = range(epochs) - if use_tqdm: - pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) - S = loader.S // milestone - print(f'Resolution :{S}') - forcing = get_forcing(S) - for ep in pbar: - model.train() - t1 = default_timer() - train_loss = 0.0 - train_ic = 0.0 - train_f = 0.0 - test_l2 = 0.0 - for x, y in train_loader: - x, y = x, y - x = x[:, ::milestone, ::milestone, :, :] - y = y[:, ::milestone, ::milestone, :] - optimizer.zero_grad() - x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) - out = model(x_in).reshape(batch_size, S, S, T + 5) - out = out[..., :-5] - x = x[:, :, :, 0, -1] - - loss_l2 = myloss(out.view(batch_size, S, S, T), y.view(batch_size, S, S, T)) - - if ic_weight != 0 or f_weight != 0: - loss_ic, loss_f = PINO_loss3d(out.view(batch_size, S, S, T), - x, forcing, v, t_interval) - else: - loss_ic, loss_f = zero, zero - - total_loss = loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight - - total_loss.backward() - - optimizer.step() - train_ic = loss_ic.item() - test_l2 += loss_l2.item() - train_loss += total_loss.item() - train_f += loss_f.item() - scheduler.step() - - train_ic /= len(train_loader) - train_f /= len(train_loader) - train_loss /= len(train_loader) - test_l2 /= len(train_loader) - t2 = default_timer() - if use_tqdm: - pbar.set_description( - ( - f'Train f error: {train_f:.5f}; Train ic l2 error: {train_ic:.5f}. ' - f'Train loss: {train_loss:.5f}; Test l2 error: {test_l2:.5f}' - ) - ) - - save_checkpoint(config['train']['save_dir'], - config['train']['save_name'], - model, optimizer) - +from timeit import default_timer + +import paddle +import paddle.nn.functional as F +from tqdm import tqdm + +from .data_utils import sample_data +from .distributed import reduce_loss_dict +from .losses import LpLoss +from .losses import PINO_loss3d +from .losses import get_forcing +from .utils import save_checkpoint + + +def train( + model, + loader, + train_loader, + optimizer, + scheduler, + forcing, + config, + rank=0, + log=False, + project="PINO-default", + group="FDM", + tags=["Nan"], + use_tqdm=True, + profile=False, +): + + # data parameters + v = 1 / config["data"]["Re"] + S, T = loader.S, loader.T + t_interval = config["data"]["time_interval"] + + # training settings + batch_size = config["train"]["batchsize"] + ic_weight = config["train"]["ic_loss"] + f_weight = config["train"]["f_loss"] + xy_weight = config["train"]["xy_loss"] + + model.train() + myloss = LpLoss(size_average=True) + pbar = range(config["train"]["epochs"]) + if use_tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) + zero = paddle.zeros(1, dtype="float32").to(rank) + + for ep in pbar: + loss_dict = {"train_loss": 0.0, "train_ic": 0.0, "train_f": 0.0, "test_l2": 0.0} + log_dict = {} + if rank == 0 and profile: + paddle.cuda.synchronize() + t1 = default_timer() + # start solving + for x, y in train_loader: + x, y = x.to(rank), y.to(rank) + + optimizer.zero_grad() + x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) + out = model(x_in).reshape(batch_size, S, S, T + 5) + out = out[..., :-5] + x = x[:, :, :, 0, -1] + + loss_l2 = myloss(out.view(batch_size, S, S, T), y.view(batch_size, S, S, T)) + + if ic_weight != 0 or f_weight != 0: + loss_ic, loss_f = PINO_loss3d( + out.view(batch_size, S, S, T), x, forcing, v, t_interval + ) + else: + loss_ic, loss_f = zero, zero + + total_loss = loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight + + total_loss.backward() + + optimizer.step() + loss_dict["train_ic"] += loss_ic + loss_dict["test_l2"] += loss_l2 + loss_dict["train_loss"] += total_loss + loss_dict["train_f"] += loss_f + + if rank == 0 and profile: + paddle.cuda.synchronize() + t2 = default_timer() + log_dict["Time cost"] = t2 - t1 + scheduler.step() + loss_reduced = reduce_loss_dict(loss_dict) + train_ic = loss_reduced["train_ic"].item() / len(train_loader) + train_f = loss_reduced["train_f"].item() / len(train_loader) + train_loss = loss_reduced["train_loss"].item() / len(train_loader) + test_l2 = loss_reduced["test_l2"].item() / len(train_loader) + log_dict = { + "Train f error": train_f, + "Train L2 error": train_ic, + "Train loss": train_loss, + "Test L2 error": test_l2, + } + + if rank == 0: + if use_tqdm: + pbar.set_description( + ( + f"Train f error: {train_f:.5f}; Train ic l2 error: {train_ic:.5f}. " + f"Train loss: {train_loss:.5f}; Test l2 error: {test_l2:.5f}" + ) + ) + + if rank == 0: + save_checkpoint( + config["train"]["save_dir"], config["train"]["save_name"], model, optimizer + ) + + +def mixed_train( + model, # model of neural operator + train_loader, # dataloader for training with data + S1, + T1, # spacial and time dimension for training with data + a_loader, # generator for ICs + S2, + T2, # spacial and time dimension for training with equation only + optimizer, # optimizer + scheduler, # learning rate scheduler + config, # configuration dict + log=False, # turn on the wandb + project="PINO-default", # project name + group="FDM", # group name + tags=["Nan"], # tags + use_tqdm=True, +): # turn on tqdm + + # data parameters + v = 1 / config["data"]["Re"] + t_interval = config["data"]["time_interval"] + forcing_1 = get_forcing(S1) + forcing_2 = get_forcing(S2) + # training settings + batch_size = config["train"]["batchsize"] + ic_weight = config["train"]["ic_loss"] + f_weight = config["train"]["f_loss"] + xy_weight = config["train"]["xy_loss"] + num_data_iter = config["train"]["data_iter"] + num_eqn_iter = config["train"]["eqn_iter"] + + model.train() + myloss = LpLoss(size_average=True) + pbar = range(config["train"]["epochs"]) + if use_tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) + zero = paddle.zeros(1, dtype="float32") + train_loader = sample_data(train_loader) + for ep in pbar: + model.train() + t1 = default_timer() + train_loss = 0.0 + train_ic = 0.0 + train_f = 0.0 + test_l2 = 0.0 + err_eqn = 0.0 + # train with data + for _ in range(num_data_iter): + x, y = next(train_loader) + x, y = x, y + optimizer.clear_grad() + x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) + out = model(x_in).reshape([batch_size, S1, S1, T1 + 5]) + out = out[..., :-5] + x = x[:, :, :, 0, -1] + + loss_l2 = myloss( + out.reshape([batch_size, S1, S1, T1]), + y.reshape([batch_size, S1, S1, T1]), + ) + + if ic_weight != 0 or f_weight != 0: + loss_ic, loss_f = PINO_loss3d( + out.reshape([batch_size, S1, S1, T1]), x, forcing_1, v, t_interval + ) + else: + loss_ic, loss_f = zero, zero + + total_loss = loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight + + total_loss.backward() + optimizer.step() + + train_ic = loss_ic.item() + test_l2 += loss_l2.item() + train_loss += total_loss.item() + train_f += loss_f.item() + if num_data_iter != 0: + train_ic /= num_data_iter + train_f /= num_data_iter + train_loss /= num_data_iter + test_l2 /= num_data_iter + # train with random ICs + for _ in range(num_eqn_iter): + new_a = next(a_loader) + new_a = new_a + optimizer.clear_grad() + x_in = F.pad(new_a, (0, 0, 0, 5), "constant", 0) + out = model(x_in).reshape([batch_size, S2, S2, T2 + 5]) + out = out[..., :-5] + new_a = new_a[:, :, :, 0, -1] + loss_ic, loss_f = PINO_loss3d( + out.reshape([batch_size, S2, S2, T2]), new_a, forcing_2, v, t_interval + ) + eqn_loss = loss_f * f_weight + loss_ic * ic_weight + eqn_loss.backward() + optimizer.step() + + err_eqn += eqn_loss.item() + + scheduler.step() + t2 = default_timer() + if num_eqn_iter != 0: + err_eqn /= num_eqn_iter + if use_tqdm: + pbar.set_description( + ( + f"Data f error: {train_f:.5f}; Data ic l2 error: {train_ic:.5f}. " + f"Data train loss: {train_loss:.5f}; Data l2 error: {test_l2:.5f}" + f"Eqn loss: {err_eqn:.5f}" + ) + ) + + save_checkpoint( + config["train"]["save_dir"], config["train"]["save_name"], model, optimizer + ) + + +def progressive_train( + model, + loader, + train_loader, + optimizer, + scheduler, + milestones, + config, + log=False, + project="PINO-default", + group="FDM", + tags=["Nan"], + use_tqdm=True, +): + + # data parameters + v = 1 / config["data"]["Re"] + T = loader.T + t_interval = config["data"]["time_interval"] + + # training settings + batch_size = config["train"]["batchsize"] + ic_weight = config["train"]["ic_loss"] + f_weight = config["train"]["f_loss"] + xy_weight = config["train"]["xy_loss"] + + model.train() + myloss = LpLoss(size_average=True) + zero = paddle.zeros(1, dtype="float32") + for milestone, epochs in zip(milestones, config["train"]["epochs"]): + pbar = range(epochs) + if use_tqdm: + pbar = tqdm(pbar, dynamic_ncols=True, smoothing=0.05) + S = loader.S // milestone + print(f"Resolution :{S}") + forcing = get_forcing(S) + for ep in pbar: + model.train() + t1 = default_timer() + train_loss = 0.0 + train_ic = 0.0 + train_f = 0.0 + test_l2 = 0.0 + for x, y in train_loader: + x, y = x, y + x = x[:, ::milestone, ::milestone, :, :] + y = y[:, ::milestone, ::milestone, :] + optimizer.zero_grad() + x_in = F.pad(x, (0, 0, 0, 5), "constant", 0) + out = model(x_in).reshape(batch_size, S, S, T + 5) + out = out[..., :-5] + x = x[:, :, :, 0, -1] + + loss_l2 = myloss( + out.view(batch_size, S, S, T), y.view(batch_size, S, S, T) + ) + + if ic_weight != 0 or f_weight != 0: + loss_ic, loss_f = PINO_loss3d( + out.view(batch_size, S, S, T), x, forcing, v, t_interval + ) + else: + loss_ic, loss_f = zero, zero + + total_loss = ( + loss_l2 * xy_weight + loss_f * f_weight + loss_ic * ic_weight + ) + + total_loss.backward() + + optimizer.step() + train_ic = loss_ic.item() + test_l2 += loss_l2.item() + train_loss += total_loss.item() + train_f += loss_f.item() + scheduler.step() + + train_ic /= len(train_loader) + train_f /= len(train_loader) + train_loss /= len(train_loader) + test_l2 /= len(train_loader) + t2 = default_timer() + if use_tqdm: + pbar.set_description( + ( + f"Train f error: {train_f:.5f}; Train ic l2 error: {train_ic:.5f}. " + f"Train loss: {train_loss:.5f}; Test l2 error: {test_l2:.5f}" + ) + ) + + save_checkpoint( + config["train"]["save_dir"], config["train"]["save_name"], model, optimizer + ) diff --git a/jointContribution/PINO/PINO_paddle/train_utils/utils.py b/jointContribution/PINO/PINO_paddle/train_utils/utils.py index 7b116aeb69..97764e8f6e 100644 --- a/jointContribution/PINO/PINO_paddle/train_utils/utils.py +++ b/jointContribution/PINO/PINO_paddle/train_utils/utils.py @@ -1,182 +1,233 @@ -import os -import numpy as np -import paddle - -def vor2vel(w, L=2 * np.pi): - ''' - Convert vorticity into velocity - Args: - w: vorticity with shape (batchsize, num_x, num_y, num_t) - - Returns: - ux, uy with the same shape - ''' - batchsize = w.size(0) - nx = w.size(1) - ny = w.size(2) - nt = w.size(3) - device = w.device - w = w.reshape(batchsize, nx, ny, nt) - - w_h = paddle.fft.fft2(w, axes=[1, 2]) - # Wavenumbers in y-direction - k_max = nx // 2 - N = nx - k_x = paddle.cat((paddle.arange(start=0, end=k_max, step=1, device=device), - paddle.arange(start=-k_max, end=0, step=1, device=device)), 0) \ - .reshape(N, 1).repeat(1, N).reshape(1, N, N, 1) - k_y = paddle.cat((paddle.arange(start=0, end=k_max, step=1, device=device), - paddle.arange(start=-k_max, end=0, step=1, device=device)), 0) \ - .reshape(1, N).repeat(N, 1).reshape(1, N, N, 1) - # Negative Laplacian in Fourier space - lap = (k_x ** 2 + k_y ** 2) - lap[0, 0, 0, 0] = 1.0 - f_h = w_h / lap - - ux_h = 2 * np.pi / L * 1j * k_y * f_h - uy_h = -2 * np.pi / L * 1j * k_x * f_h - - ux = paddle.fft.irfft2(ux_h[:, :, :k_max + 1], dim=[1, 2]) - uy = paddle.fft.irfft2(uy_h[:, :, :k_max + 1], dim=[1, 2]) - return ux, uy - -def get_sample(N, T, s, p, q): - # sample p nodes from Initial Condition, p nodes from Boundary Condition, q nodes from Interior - - # sample IC - index_ic = paddle.randint(s, size=(N, p)) - sample_ic_t = paddle.zeros(N, p) - sample_ic_x = index_ic/s - - # sample BC - sample_bc = paddle.rand(size=(N, p//2)) - sample_bc_t = paddle.cat([sample_bc, sample_bc],dim=1) - sample_bc_x = paddle.cat([paddle.zeros(N, p//2), paddle.ones(N, p//2)],dim=1) - - sample_i_t = -paddle.cos(paddle.rand(size=(N, q))*np.pi/2) + 1 - sample_i_x = paddle.rand(size=(N,q)) - - sample_t = paddle.cat([sample_ic_t, sample_bc_t, sample_i_t], dim=1).cuda() - sample_t.requires_grad = True - sample_x = paddle.cat([sample_ic_x, sample_bc_x, sample_i_x], dim=1).cuda() - sample_x.requires_grad = True - sample = paddle.stack([sample_t, sample_x], dim=-1).reshape(N, (p+p+q), 2) - return sample, sample_t, sample_x, index_ic.long() - -def get_grid(N, T, s): - gridt = paddle.tensor(np.linspace(0, 1, T), dtype=paddle.float).reshape(1, T, 1).repeat(N, 1, s).cuda() - gridt.requires_grad = True - gridx = paddle.tensor(np.linspace(0, 1, s+1)[:-1], dtype=paddle.float).reshape(1, 1, s).repeat(N, T, 1).cuda() - gridx.requires_grad = True - grid = paddle.stack([gridt, gridx], dim=-1).reshape(N, T*s, 2) - return grid, gridt, gridx - -def get_2dgrid(S): - ''' - get array of points on 2d grid in (0,1)^2 - Args: - S: resolution - - Returns: - points: flattened grid, ndarray (N, 2) - ''' - xarr = np.linspace(0, 1, S) - yarr = np.linspace(0, 1, S) - xx, yy = np.meshgrid(xarr, yarr, indexing='ij') - points = np.stack([xx.ravel(), yy.ravel()], axis=0).T - return points - -def paddle2dgrid(num_x, num_y, bot=(0,0), top=(1,1)): - x_bot, y_bot = bot - x_top, y_top = top - x_arr = paddle.linspace(x_bot, x_top, num=num_x) - y_arr = paddle.linspace(y_bot, y_top, num=num_y) - xx, yy = paddle.meshgrid(x_arr, y_arr, indexing='ij') - mesh = paddle.stack([xx, yy], dim=2) - return mesh - -def get_grid3d(S, T, time_scale=1.0, device='cpu'): - gridx = paddle.tensor(np.linspace(0, 1, S + 1)[:-1], dtype=paddle.float, device=device) - gridx = gridx.reshape(1, S, 1, 1, 1).repeat([1, 1, S, T, 1]) - gridy = paddle.tensor(np.linspace(0, 1, S + 1)[:-1], dtype=paddle.float, device=device) - gridy = gridy.reshape(1, 1, S, 1, 1).repeat([1, S, 1, T, 1]) - gridt = paddle.tensor(np.linspace(0, 1 * time_scale, T), dtype=paddle.float, device=device) - gridt = gridt.reshape(1, 1, 1, T, 1).repeat([1, S, S, 1, 1]) - return gridx, gridy, gridt - -def convert_ic(u0, N, S, T, time_scale=1.0): - u0 = u0.reshape(N, S, S, 1, 1).repeat([1, 1, 1, T, 1]) - gridx, gridy, gridt = get_grid3d(S, T, time_scale=time_scale, device=u0.device) - a_data = paddle.cat((gridx.repeat([N, 1, 1, 1, 1]), gridy.repeat([N, 1, 1, 1, 1]), - gridt.repeat([N, 1, 1, 1, 1]), u0), dim=-1) - return a_data - -def requires_grad(model, flag=True): - for p in model.parameters(): - p.requires_grad = flag - -def set_grad(tensors, flag=True): - for p in tensors: - p.requires_grad = flag - -def zero_grad(params): - ''' - set grad field to 0 - ''' - if isinstance(params, paddle.Tensor): - if params.grad is not None: - params.grad.zero_() - else: - for p in params: - if p.grad is not None: - p.grad.zero_() - -def count_params(net): - count = 0 - for p in net.parameters(): - count += p.numel() - return count - -def save_checkpoint(path, name, model, optimizer=None): - ckpt_dir = 'checkpoints/%s/' % path - if not os.path.exists(ckpt_dir): - os.makedirs(ckpt_dir) - try: - model_state_dict = model.module.state_dict() - except AttributeError: - model_state_dict = model.state_dict() - - if optimizer is not None: - optim_dict = optimizer.state_dict() - else: - optim_dict = 0.0 - - paddle.save({ - 'model': model_state_dict, - 'optim': optim_dict - }, ckpt_dir + name) - print('Checkpoint is saved at %s' % ckpt_dir + name) - -def save_ckpt(path, model, optimizer=None, scheduler=None): - model_state = model.state_dict() - if optimizer: - optim_state = optimizer.state_dict() - else: - optim_state = None - - if scheduler: - scheduler_state = scheduler.state_dict() - else: - scheduler_state = None - paddle.save({ - 'model': model_state, - 'optim': optim_state, - 'scheduler': scheduler_state - }, path) - print(f'Checkpoint is saved to {path}') - -def dict2str(log_dict): - res = '' - for key, value in log_dict.items(): - res += f'{key}: {value}|' - return res \ No newline at end of file +import os + +import numpy as np +import paddle + + +def vor2vel(w, L=2 * np.pi): + """ + Convert vorticity into velocity + Args: + w: vorticity with shape (batchsize, num_x, num_y, num_t) + + Returns: + ux, uy with the same shape + """ + batchsize = w.size(0) + nx = w.size(1) + ny = w.size(2) + nt = w.size(3) + device = w.device + w = w.reshape(batchsize, nx, ny, nt) + + w_h = paddle.fft.fft2(w, axes=[1, 2]) + # Wavenumbers in y-direction + k_max = nx // 2 + N = nx + k_x = ( + paddle.cat( + ( + paddle.arange(start=0, end=k_max, step=1, device=device), + paddle.arange(start=-k_max, end=0, step=1, device=device), + ), + 0, + ) + .reshape(N, 1) + .repeat(1, N) + .reshape(1, N, N, 1) + ) + k_y = ( + paddle.cat( + ( + paddle.arange(start=0, end=k_max, step=1, device=device), + paddle.arange(start=-k_max, end=0, step=1, device=device), + ), + 0, + ) + .reshape(1, N) + .repeat(N, 1) + .reshape(1, N, N, 1) + ) + # Negative Laplacian in Fourier space + lap = k_x**2 + k_y**2 + lap[0, 0, 0, 0] = 1.0 + f_h = w_h / lap + + ux_h = 2 * np.pi / L * 1j * k_y * f_h + uy_h = -2 * np.pi / L * 1j * k_x * f_h + + ux = paddle.fft.irfft2(ux_h[:, :, : k_max + 1], dim=[1, 2]) + uy = paddle.fft.irfft2(uy_h[:, :, : k_max + 1], dim=[1, 2]) + return ux, uy + + +def get_sample(N, T, s, p, q): + # sample p nodes from Initial Condition, p nodes from Boundary Condition, q nodes from Interior + + # sample IC + index_ic = paddle.randint(s, size=(N, p)) + sample_ic_t = paddle.zeros(N, p) + sample_ic_x = index_ic / s + + # sample BC + sample_bc = paddle.rand(size=(N, p // 2)) + sample_bc_t = paddle.cat([sample_bc, sample_bc], dim=1) + sample_bc_x = paddle.cat([paddle.zeros(N, p // 2), paddle.ones(N, p // 2)], dim=1) + + sample_i_t = -paddle.cos(paddle.rand(size=(N, q)) * np.pi / 2) + 1 + sample_i_x = paddle.rand(size=(N, q)) + + sample_t = paddle.cat([sample_ic_t, sample_bc_t, sample_i_t], dim=1).cuda() + sample_t.requires_grad = True + sample_x = paddle.cat([sample_ic_x, sample_bc_x, sample_i_x], dim=1).cuda() + sample_x.requires_grad = True + sample = paddle.stack([sample_t, sample_x], dim=-1).reshape(N, (p + p + q), 2) + return sample, sample_t, sample_x, index_ic.long() + + +def get_grid(N, T, s): + gridt = ( + paddle.tensor(np.linspace(0, 1, T), dtype=paddle.float) + .reshape(1, T, 1) + .repeat(N, 1, s) + .cuda() + ) + gridt.requires_grad = True + gridx = ( + paddle.tensor(np.linspace(0, 1, s + 1)[:-1], dtype=paddle.float) + .reshape(1, 1, s) + .repeat(N, T, 1) + .cuda() + ) + gridx.requires_grad = True + grid = paddle.stack([gridt, gridx], dim=-1).reshape(N, T * s, 2) + return grid, gridt, gridx + + +def get_2dgrid(S): + """ + get array of points on 2d grid in (0,1)^2 + Args: + S: resolution + + Returns: + points: flattened grid, ndarray (N, 2) + """ + xarr = np.linspace(0, 1, S) + yarr = np.linspace(0, 1, S) + xx, yy = np.meshgrid(xarr, yarr, indexing="ij") + points = np.stack([xx.ravel(), yy.ravel()], axis=0).T + return points + + +def paddle2dgrid(num_x, num_y, bot=(0, 0), top=(1, 1)): + x_bot, y_bot = bot + x_top, y_top = top + x_arr = paddle.linspace(x_bot, x_top, num=num_x) + y_arr = paddle.linspace(y_bot, y_top, num=num_y) + xx, yy = paddle.meshgrid(x_arr, y_arr, indexing="ij") + mesh = paddle.stack([xx, yy], dim=2) + return mesh + + +def get_grid3d(S, T, time_scale=1.0, device="cpu"): + gridx = paddle.tensor( + np.linspace(0, 1, S + 1)[:-1], dtype=paddle.float, device=device + ) + gridx = gridx.reshape(1, S, 1, 1, 1).repeat([1, 1, S, T, 1]) + gridy = paddle.tensor( + np.linspace(0, 1, S + 1)[:-1], dtype=paddle.float, device=device + ) + gridy = gridy.reshape(1, 1, S, 1, 1).repeat([1, S, 1, T, 1]) + gridt = paddle.tensor( + np.linspace(0, 1 * time_scale, T), dtype=paddle.float, device=device + ) + gridt = gridt.reshape(1, 1, 1, T, 1).repeat([1, S, S, 1, 1]) + return gridx, gridy, gridt + + +def convert_ic(u0, N, S, T, time_scale=1.0): + u0 = u0.reshape(N, S, S, 1, 1).repeat([1, 1, 1, T, 1]) + gridx, gridy, gridt = get_grid3d(S, T, time_scale=time_scale, device=u0.device) + a_data = paddle.cat( + ( + gridx.repeat([N, 1, 1, 1, 1]), + gridy.repeat([N, 1, 1, 1, 1]), + gridt.repeat([N, 1, 1, 1, 1]), + u0, + ), + dim=-1, + ) + return a_data + + +def requires_grad(model, flag=True): + for p in model.parameters(): + p.requires_grad = flag + + +def set_grad(tensors, flag=True): + for p in tensors: + p.requires_grad = flag + + +def zero_grad(params): + """ + set grad field to 0 + """ + if isinstance(params, paddle.Tensor): + if params.grad is not None: + params.grad.zero_() + else: + for p in params: + if p.grad is not None: + p.grad.zero_() + + +def count_params(net): + count = 0 + for p in net.parameters(): + count += p.numel() + return count + + +def save_checkpoint(path, name, model, optimizer=None): + ckpt_dir = "checkpoints/%s/" % path + if not os.path.exists(ckpt_dir): + os.makedirs(ckpt_dir) + try: + model_state_dict = model.module.state_dict() + except AttributeError: + model_state_dict = model.state_dict() + + if optimizer is not None: + optim_dict = optimizer.state_dict() + else: + optim_dict = 0.0 + + paddle.save({"model": model_state_dict, "optim": optim_dict}, ckpt_dir + name) + print("Checkpoint is saved at %s" % ckpt_dir + name) + + +def save_ckpt(path, model, optimizer=None, scheduler=None): + model_state = model.state_dict() + if optimizer: + optim_state = optimizer.state_dict() + else: + optim_state = None + + if scheduler: + scheduler_state = scheduler.state_dict() + else: + scheduler_state = None + paddle.save( + {"model": model_state, "optim": optim_state, "scheduler": scheduler_state}, path + ) + print(f"Checkpoint is saved to {path}") + + +def dict2str(log_dict): + res = "" + for key, value in log_dict.items(): + res += f"{key}: {value}|" + return res diff --git a/jointContribution/PINO/rfcs/PINO.md b/jointContribution/PINO/rfcs/PINO.md index 345896a5ef..2b13409440 100644 --- a/jointContribution/PINO/rfcs/PINO.md +++ b/jointContribution/PINO/rfcs/PINO.md @@ -56,7 +56,7 @@ class FNO2d(nn.Layer): width=64, fc_dim=128, layers=None, in_dim=3, out_dim=1, - act='gelu', + act='gelu', pad_ratio=[0., 0.]): super(FNO2d, self).__init__() @@ -127,4 +127,4 @@ def FDM_Burgers(u, v, D=1): - 202308 : 调研 - 202309 :基于Paddle API的复现 -- 202310 :整理项目产出,撰写案例文档 \ No newline at end of file +- 202310 :整理项目产出,撰写案例文档 diff --git a/jointContribution/PIRBN/analytical_solution.py b/jointContribution/PIRBN/analytical_solution.py index 76304b5ef5..3faae9a7a7 100644 --- a/jointContribution/PIRBN/analytical_solution.py +++ b/jointContribution/PIRBN/analytical_solution.py @@ -1,84 +1,84 @@ -import os - -import matplotlib.pyplot as plt -import numpy as np -import paddle - - -def output_fig(train_obj, mu, b, right_by, activation_function, output_Kgg): - plt.figure(figsize=(15, 9)) - rbn = train_obj.pirbn.rbn - - output_dir = os.path.join(os.path.dirname(__file__), "output") - if not os.path.exists(output_dir): - os.mkdir(output_dir) - - # Comparisons between the network predictions and the ground truth. - plt.subplot(2, 3, 1) - ns = 1001 - dx = 1 / (ns - 1) - xy = np.zeros((ns, 1)).astype(np.float32) - for i in range(0, ns): - xy[i, 0] = i * dx + right_by - y = rbn(paddle.to_tensor(xy)) - y = y.numpy() - y_true = np.sin(2 * mu * np.pi * xy) - plt.plot(xy, y_true) - plt.plot(xy, y, linestyle="--") - plt.legend(["ground truth", "predict"]) - plt.xlabel("x") - - # Point-wise absolute error plot. - plt.subplot(2, 3, 2) - xy_y = np.abs(y_true - y) - plt.plot(xy, xy_y) - plt.ylim(top=np.max(xy_y)) - plt.ylabel("Absolute Error") - plt.xlabel("x") - - # Loss history of the network during the training process. - plt.subplot(2, 3, 3) - loss_g = train_obj.loss_g - x = range(len(loss_g)) - plt.yscale("log") - plt.plot(x, loss_g) - plt.plot(x, train_obj.loss_b) - plt.legend(["Lg", "Lb"]) - plt.ylabel("Loss") - plt.xlabel("Iteration") - - # Visualise NTK after initialisation, The normalised Kg at 0th iteration. - plt.subplot(2, 3, 4) - index = str(output_Kgg[0]) - K = train_obj.ntk_list[index].numpy() - plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) - plt.colorbar() - plt.title(f"Kg at {index}-th iteration") - plt.xlabel("Sample point index") - - # Visualise NTK after training, The normalised Kg at 2000th iteration. - plt.subplot(2, 3, 5) - index = str(output_Kgg[1]) - K = train_obj.ntk_list[index].numpy() - plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) - plt.colorbar() - plt.title(f"Kg at {index}-th iteration") - plt.xlabel("Sample point index") - - # The normalised Kg at 20000th iteration. - plt.subplot(2, 3, 6) - index = str(output_Kgg[2]) - K = train_obj.ntk_list[index].numpy() - plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) - plt.colorbar() - plt.title(f"Kg at {index}-th iteration") - plt.xlabel("Sample point index") - - plt.savefig( - os.path.join( - output_dir, f"sine_function_{mu}_{b}_{right_by}_{activation_function}.png" - ) - ) - - # Save data - # scipy.io.savemat(os.path.join(output_dir, "out.mat"), {"NTK": a, "x": xy, "y": y}) +import os + +import matplotlib.pyplot as plt +import numpy as np +import paddle + + +def output_fig(train_obj, mu, b, right_by, activation_function, output_Kgg): + plt.figure(figsize=(15, 9)) + rbn = train_obj.pirbn.rbn + + output_dir = os.path.join(os.path.dirname(__file__), "output") + if not os.path.exists(output_dir): + os.mkdir(output_dir) + + # Comparisons between the network predictions and the ground truth. + plt.subplot(2, 3, 1) + ns = 1001 + dx = 1 / (ns - 1) + xy = np.zeros((ns, 1)).astype(np.float32) + for i in range(0, ns): + xy[i, 0] = i * dx + right_by + y = rbn(paddle.to_tensor(xy)) + y = y.numpy() + y_true = np.sin(2 * mu * np.pi * xy) + plt.plot(xy, y_true) + plt.plot(xy, y, linestyle="--") + plt.legend(["ground truth", "predict"]) + plt.xlabel("x") + + # Point-wise absolute error plot. + plt.subplot(2, 3, 2) + xy_y = np.abs(y_true - y) + plt.plot(xy, xy_y) + plt.ylim(top=np.max(xy_y)) + plt.ylabel("Absolute Error") + plt.xlabel("x") + + # Loss history of the network during the training process. + plt.subplot(2, 3, 3) + loss_g = train_obj.loss_g + x = range(len(loss_g)) + plt.yscale("log") + plt.plot(x, loss_g) + plt.plot(x, train_obj.loss_b) + plt.legend(["Lg", "Lb"]) + plt.ylabel("Loss") + plt.xlabel("Iteration") + + # Visualise NTK after initialisation, The normalised Kg at 0th iteration. + plt.subplot(2, 3, 4) + index = str(output_Kgg[0]) + K = train_obj.ntk_list[index].numpy() + plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) + plt.colorbar() + plt.title(f"Kg at {index}-th iteration") + plt.xlabel("Sample point index") + + # Visualise NTK after training, The normalised Kg at 2000th iteration. + plt.subplot(2, 3, 5) + index = str(output_Kgg[1]) + K = train_obj.ntk_list[index].numpy() + plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) + plt.colorbar() + plt.title(f"Kg at {index}-th iteration") + plt.xlabel("Sample point index") + + # The normalised Kg at 20000th iteration. + plt.subplot(2, 3, 6) + index = str(output_Kgg[2]) + K = train_obj.ntk_list[index].numpy() + plt.imshow(K / (np.max(abs(K))), cmap="bwr", vmax=1, vmin=-1) + plt.colorbar() + plt.title(f"Kg at {index}-th iteration") + plt.xlabel("Sample point index") + + plt.savefig( + os.path.join( + output_dir, f"sine_function_{mu}_{b}_{right_by}_{activation_function}.png" + ) + ) + + # Save data + # scipy.io.savemat(os.path.join(output_dir, "out.mat"), {"NTK": a, "x": xy, "y": y}) diff --git a/jointContribution/PIRBN/jacobian_function.py b/jointContribution/PIRBN/jacobian_function.py index 544b1ea495..ffc4a49bb6 100644 --- a/jointContribution/PIRBN/jacobian_function.py +++ b/jointContribution/PIRBN/jacobian_function.py @@ -1,36 +1,36 @@ -import paddle - - -def flat(x, start_axis=0, stop_axis=None): - # TODO Error if use paddle.flatten -> The Op flatten_grad doesn't have any gradop - stop_axis = None if stop_axis is None else stop_axis + 1 - shape = x.shape - - # [3, 1] --flat--> [3] - # [2, 2] --flat--> [4] - temp = shape[start_axis:stop_axis] - temp = [0 if x == 1 else x for x in temp] # kill invalid axis - flat_sum = sum(temp) - head = shape[0:start_axis] - body = [flat_sum] - tail = [] if stop_axis is None else shape[stop_axis:] - new_shape = head + body + tail - x_flat = x.reshape(new_shape) - return x_flat - - -def jacobian(y, x): - J_shape = y.shape + x.shape - J = paddle.zeros(J_shape) - J_flat = flat( - J, start_axis=0, stop_axis=len(y.shape) - 1 - ) # partialy flatten as y_flat - for i, y_i in enumerate(y): - assert y_i.size == 1, f"y_i.size shoule be 1, but got {y_i.size}" - grad = paddle.grad(y_i, x, allow_unused=True)[ - 0 - ] # grad[i] == sum by j (dy[j] / dx[i]) - if grad is None: - grad = paddle.zeros_like(x) - J_flat[i] = grad - return J_flat.reshape(J_shape) +import paddle + + +def flat(x, start_axis=0, stop_axis=None): + # TODO Error if use paddle.flatten -> The Op flatten_grad doesn't have any gradop + stop_axis = None if stop_axis is None else stop_axis + 1 + shape = x.shape + + # [3, 1] --flat--> [3] + # [2, 2] --flat--> [4] + temp = shape[start_axis:stop_axis] + temp = [0 if x == 1 else x for x in temp] # kill invalid axis + flat_sum = sum(temp) + head = shape[0:start_axis] + body = [flat_sum] + tail = [] if stop_axis is None else shape[stop_axis:] + new_shape = head + body + tail + x_flat = x.reshape(new_shape) + return x_flat + + +def jacobian(y, x): + J_shape = y.shape + x.shape + J = paddle.zeros(J_shape) + J_flat = flat( + J, start_axis=0, stop_axis=len(y.shape) - 1 + ) # partialy flatten as y_flat + for i, y_i in enumerate(y): + assert y_i.size == 1, f"y_i.size shoule be 1, but got {y_i.size}" + grad = paddle.grad(y_i, x, allow_unused=True)[ + 0 + ] # grad[i] == sum by j (dy[j] / dx[i]) + if grad is None: + grad = paddle.zeros_like(x) + J_flat[i] = grad + return J_flat.reshape(J_shape) diff --git a/jointContribution/PIRBN/main.py b/jointContribution/PIRBN/main.py index 149dd829e4..6173012c2e 100644 --- a/jointContribution/PIRBN/main.py +++ b/jointContribution/PIRBN/main.py @@ -1,68 +1,68 @@ -import analytical_solution -import numpy as np -import pirbn -import rbn_net -import train - -import ppsci - -# set random seed for reproducibility -SEED = 2023 -ppsci.utils.misc.set_random_seed(SEED) - -# mu, Fig.1, Page5 -# right_by, Formula (15) Page5 -def sine_function_main( - mu, adaptive_weights=True, right_by=0, activation_function="gaussian" -): - # Define the number of sample points - ns = 50 - - # Define the sample points' interval - dx = 1.0 / (ns - 1) - - # Initialise sample points' coordinates - x_eq = np.linspace(0.0, 1.0, ns)[:, None] - - for i in range(0, ns): - x_eq[i, 0] = i * dx + right_by - x_bc = np.array([[right_by + 0.0], [right_by + 1.0]]) - x = [x_eq, x_bc] - y = -4 * mu**2 * np.pi**2 * np.sin(2 * mu * np.pi * x_eq) - - # Set up radial basis network - n_in = 1 - n_out = 1 - n_neu = 61 - b = 10.0 - c = [right_by - 0.1, right_by + 1.1] - - # Set up PIRBN - rbn = rbn_net.RBN_Net(n_in, n_out, n_neu, b, c, activation_function) - rbn_loss = pirbn.PIRBN(rbn, activation_function) - maxiter = 20001 - output_Kgg = [0, int(0.1 * maxiter), maxiter - 1] - train_obj = train.Trainer( - rbn_loss, - x, - y, - learning_rate=0.001, - maxiter=maxiter, - adaptive_weights=adaptive_weights, - ) - train_obj.fit(output_Kgg) - - # Visualise results - analytical_solution.output_fig( - train_obj, mu, b, right_by, activation_function, output_Kgg - ) - - -# Fig.1 -sine_function_main(mu=4, right_by=0, activation_function="tanh") -# Fig.2 -sine_function_main(mu=8, right_by=0, activation_function="tanh") -# Fig.3 -sine_function_main(mu=4, right_by=100, activation_function="tanh") -# Fig.6 -sine_function_main(mu=8, right_by=100, activation_function="gaussian") +import analytical_solution +import numpy as np +import pirbn +import rbn_net +import train + +import ppsci + +# set random seed for reproducibility +SEED = 2023 +ppsci.utils.misc.set_random_seed(SEED) + +# mu, Fig.1, Page5 +# right_by, Formula (15) Page5 +def sine_function_main( + mu, adaptive_weights=True, right_by=0, activation_function="gaussian" +): + # Define the number of sample points + ns = 50 + + # Define the sample points' interval + dx = 1.0 / (ns - 1) + + # Initialise sample points' coordinates + x_eq = np.linspace(0.0, 1.0, ns)[:, None] + + for i in range(0, ns): + x_eq[i, 0] = i * dx + right_by + x_bc = np.array([[right_by + 0.0], [right_by + 1.0]]) + x = [x_eq, x_bc] + y = -4 * mu**2 * np.pi**2 * np.sin(2 * mu * np.pi * x_eq) + + # Set up radial basis network + n_in = 1 + n_out = 1 + n_neu = 61 + b = 10.0 + c = [right_by - 0.1, right_by + 1.1] + + # Set up PIRBN + rbn = rbn_net.RBN_Net(n_in, n_out, n_neu, b, c, activation_function) + rbn_loss = pirbn.PIRBN(rbn, activation_function) + maxiter = 20001 + output_Kgg = [0, int(0.1 * maxiter), maxiter - 1] + train_obj = train.Trainer( + rbn_loss, + x, + y, + learning_rate=0.001, + maxiter=maxiter, + adaptive_weights=adaptive_weights, + ) + train_obj.fit(output_Kgg) + + # Visualise results + analytical_solution.output_fig( + train_obj, mu, b, right_by, activation_function, output_Kgg + ) + + +# Fig.1 +sine_function_main(mu=4, right_by=0, activation_function="tanh") +# Fig.2 +sine_function_main(mu=8, right_by=0, activation_function="tanh") +# Fig.3 +sine_function_main(mu=4, right_by=100, activation_function="tanh") +# Fig.6 +sine_function_main(mu=8, right_by=100, activation_function="gaussian") diff --git a/jointContribution/PIRBN/pirbn.py b/jointContribution/PIRBN/pirbn.py index f6179e15ef..88ca1fedb5 100644 --- a/jointContribution/PIRBN/pirbn.py +++ b/jointContribution/PIRBN/pirbn.py @@ -1,106 +1,106 @@ -import paddle -from jacobian_function import jacobian - - -class PIRBN(paddle.nn.Layer): - def __init__(self, rbn, activation_function="gaussian"): - super().__init__() - self.rbn = rbn - self.activation_function = activation_function - - def forward(self, input_data): - xy, xy_b = input_data - # initialize the differential operators - u_b = self.rbn(xy_b) - # obtain partial derivatives of u with respect to x - xy.stop_gradient = False - # Obtain the output from the RBN - u = self.rbn(xy) - # Obtain the first-order derivative of the output with respect to the input - u_x = paddle.grad(u, xy, retain_graph=True, create_graph=True)[0] - # Obtain the second-order derivative of the output with respect to the input - u_xx = paddle.grad(u_x, xy, retain_graph=True, create_graph=True)[0] - return u_xx, u_b, u - - def cal_K(self, x): - u_xx, _, _ = self.forward(x) - w, b = [], [] - - if self.activation_function == "gaussian": - b.append(self.rbn.activation.b) - w.append(self.rbn.last_fc_layer.weight) - elif self.activation_function == "tanh": - w.append(self.rbn.hidden_layer.weight) - b.append(self.rbn.hidden_layer.bias) - w.append(self.rbn.last_fc_layer.weight) - - J_list = [] - - for w_i in w: - J_w = jacobian(u_xx, w_i).squeeze() - J_list.append(J_w) - - for b_i in b: - J_b = jacobian(u_xx, b_i).squeeze() - J_list.append(J_b) - - n_input = x[0].shape[0] # ns in main.py - K = paddle.zeros((n_input, n_input)) - - for J in J_list: - K += J @ J.T - - return K - - def cal_ntk(self, x): - # Formula (4), Page3, \lambda variable - # Lambda represents the eigenvalues of the matrix(Kg) - lambda_g = 0.0 - lambda_b = 0.0 - n_neu = self.rbn.n_neu - - # in-domain - n1 = x[0].shape[0] - for i in range(n1): - temp_x = [x[0][i, ...].unsqueeze(0), paddle.to_tensor([[0.0]])] - y = self.forward(temp_x) - l1t = paddle.grad(y[0], self.parameters(), allow_unused=True) - for j, grad in enumerate(l1t): - if grad is None: - grad = paddle.to_tensor([0.0]).broadcast_to( - self.parameters()[j].shape - ) - l1t[j] = grad - lambda_g = lambda_g + paddle.sum(grad**2) / n1 - - # When use tanh activation function, the value may be None - if self.activation_function == "tanh": - temp = paddle.concat( - # (l1t[0], l1t[1], l1t[2].reshape((1, n_neu))), axis=1 - # Not select last_fc_bias - (l1t[1], l1t[2], l1t[3].reshape((1, n_neu))), - axis=1, - ) - else: - temp = paddle.concat((l1t[0], l1t[1].reshape((1, n_neu))), axis=1) - if i == 0: - # Fig.1, Page5, Kg variable - Kg = temp - else: - Kg = paddle.concat((Kg, temp), axis=0) - - # bound - n2 = x[1].shape[0] - for i in range(n2): - temp_x = [paddle.to_tensor([[0.0]]), x[1][i, ...].unsqueeze(0)] - y = self.forward(temp_x) - l1t = paddle.grad(y[1], self.rbn.parameters(), allow_unused=True) - for j in l1t: - lambda_b = lambda_b + paddle.sum(j**2) / n2 - - # calculate adapt factors - temp = lambda_g + lambda_b - lambda_g = temp / lambda_g - lambda_b = temp / lambda_b - - return lambda_g, lambda_b, Kg +import paddle +from jacobian_function import jacobian + + +class PIRBN(paddle.nn.Layer): + def __init__(self, rbn, activation_function="gaussian"): + super().__init__() + self.rbn = rbn + self.activation_function = activation_function + + def forward(self, input_data): + xy, xy_b = input_data + # initialize the differential operators + u_b = self.rbn(xy_b) + # obtain partial derivatives of u with respect to x + xy.stop_gradient = False + # Obtain the output from the RBN + u = self.rbn(xy) + # Obtain the first-order derivative of the output with respect to the input + u_x = paddle.grad(u, xy, retain_graph=True, create_graph=True)[0] + # Obtain the second-order derivative of the output with respect to the input + u_xx = paddle.grad(u_x, xy, retain_graph=True, create_graph=True)[0] + return u_xx, u_b, u + + def cal_K(self, x): + u_xx, _, _ = self.forward(x) + w, b = [], [] + + if self.activation_function == "gaussian": + b.append(self.rbn.activation.b) + w.append(self.rbn.last_fc_layer.weight) + elif self.activation_function == "tanh": + w.append(self.rbn.hidden_layer.weight) + b.append(self.rbn.hidden_layer.bias) + w.append(self.rbn.last_fc_layer.weight) + + J_list = [] + + for w_i in w: + J_w = jacobian(u_xx, w_i).squeeze() + J_list.append(J_w) + + for b_i in b: + J_b = jacobian(u_xx, b_i).squeeze() + J_list.append(J_b) + + n_input = x[0].shape[0] # ns in main.py + K = paddle.zeros((n_input, n_input)) + + for J in J_list: + K += J @ J.T + + return K + + def cal_ntk(self, x): + # Formula (4), Page3, \lambda variable + # Lambda represents the eigenvalues of the matrix(Kg) + lambda_g = 0.0 + lambda_b = 0.0 + n_neu = self.rbn.n_neu + + # in-domain + n1 = x[0].shape[0] + for i in range(n1): + temp_x = [x[0][i, ...].unsqueeze(0), paddle.to_tensor([[0.0]])] + y = self.forward(temp_x) + l1t = paddle.grad(y[0], self.parameters(), allow_unused=True) + for j, grad in enumerate(l1t): + if grad is None: + grad = paddle.to_tensor([0.0]).broadcast_to( + self.parameters()[j].shape + ) + l1t[j] = grad + lambda_g = lambda_g + paddle.sum(grad**2) / n1 + + # When use tanh activation function, the value may be None + if self.activation_function == "tanh": + temp = paddle.concat( + # (l1t[0], l1t[1], l1t[2].reshape((1, n_neu))), axis=1 + # Not select last_fc_bias + (l1t[1], l1t[2], l1t[3].reshape((1, n_neu))), + axis=1, + ) + else: + temp = paddle.concat((l1t[0], l1t[1].reshape((1, n_neu))), axis=1) + if i == 0: + # Fig.1, Page5, Kg variable + Kg = temp + else: + Kg = paddle.concat((Kg, temp), axis=0) + + # bound + n2 = x[1].shape[0] + for i in range(n2): + temp_x = [paddle.to_tensor([[0.0]]), x[1][i, ...].unsqueeze(0)] + y = self.forward(temp_x) + l1t = paddle.grad(y[1], self.rbn.parameters(), allow_unused=True) + for j in l1t: + lambda_b = lambda_b + paddle.sum(j**2) / n2 + + # calculate adapt factors + temp = lambda_g + lambda_b + lambda_g = temp / lambda_g + lambda_b = temp / lambda_b + + return lambda_g, lambda_b, Kg diff --git a/jointContribution/PIRBN/rbn_net.py b/jointContribution/PIRBN/rbn_net.py index a79898a661..2b7788ee54 100644 --- a/jointContribution/PIRBN/rbn_net.py +++ b/jointContribution/PIRBN/rbn_net.py @@ -1,172 +1,172 @@ -import math - -import numpy as np -import paddle - - -class RBN_Net(paddle.nn.Layer): - """This class is to build a radial basis network (RBN). - - Args: - n_in (int): Number of input of the RBN. - n_out (int): Number of output of the RBN. - n_neu (int): Number of neurons in the hidden layer. - b (Union[List[float], float]): Initial value for hyperparameter b. - c (List[float]): Initial value for hyperparameter c. - activation_function (str, optional): The activation function, tanh or gaussian. Defaults to "gaussian". - """ - - def __init__(self, n_in, n_out, n_neu, b, c, activation_function="gaussian"): - super().__init__() - self.n_in = n_in - self.n_out = n_out - self.n_neu = n_neu - self.b = paddle.to_tensor(b) - self.c = paddle.to_tensor(c) - self.activation_function = activation_function - self.activation = Activation(self.n_neu, self.c, n_in, activation_function) - - # LeCun normal initialization - std = math.sqrt(1 / self.n_neu) - ini = paddle.ParamAttr( - initializer=paddle.nn.initializer.Normal(mean=0.0, std=std) - ) - - if self.activation_function == "gaussian": - # gaussian activation_function need to set self.b - self.init_ab() - self.last_fc_layer = paddle.nn.Linear( - self.n_neu, - self.n_out, - weight_attr=ini, - bias_attr=False, - ) - - elif self.activation_function == "tanh": - w, b = self.initialize_NN([self.n_in, self.n_neu, self.n_out]) - w_0 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(w[0])) - b_0 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(b[0])) - w_1 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(w[1])) - - self.hidden_layer = paddle.nn.Linear( - self.n_in, self.n_neu, weight_attr=w_0, bias_attr=b_0 - ) - - self.last_fc_layer = paddle.nn.Linear( - self.n_neu, - self.n_out, - weight_attr=w_1, - bias_attr=False, - ) - - self.last_fc_bias = self.create_parameter( - shape=b[1].shape, - default_initializer=paddle.nn.initializer.Assign(b[1]), - dtype=paddle.get_default_dtype(), - ) - else: - raise ("Not implemented yet") - - def NTK_init(self, size): - in_dim = size[0] - out_dim = size[1] - std = 1.0 / np.sqrt(in_dim) - return self.create_parameter( - shape=[in_dim, out_dim], - default_initializer=paddle.nn.initializer.Assign( - paddle.normal(shape=[in_dim, out_dim]) * std - ), - dtype=paddle.get_default_dtype(), - ) - - # Initialize network weights and biases using Xavier initialization - def initialize_NN(self, layers): - weights = [] - biases = [] - num_layers = len(layers) - for l in range(0, num_layers - 1): - W = self.NTK_init(size=[layers[l], layers[l + 1]]) - b = self.create_parameter( - shape=[1, layers[l + 1]], - default_initializer=paddle.nn.initializer.Assign( - paddle.normal(shape=[1, layers[l + 1]]) - ), - dtype=paddle.get_default_dtype(), - ) - weights.append(W) - biases.append(b) - return weights, biases - - def forward(self, x): - if self.activation_function == "gaussian": - y = self.activation(x) - y = self.last_fc_layer(y) - elif self.activation_function == "tanh": - # input : n x 1 - # hidden layer : 1 x 61 - # last fc layer : 61 x 1 - y = self.hidden_layer(x) - y = self.activation(y) - y = self.last_fc_layer(y) - y = paddle.add(y, self.last_fc_bias) - else: - raise ("Not implemented yet") - return y - - # gaussian activation_function need to set self.b - def init_ab(self): - b = np.ones((1, self.n_neu)) * self.b - self.activation.b = self.activation.create_parameter( - (1, self.n_neu), default_initializer=paddle.nn.initializer.Assign(b) - ) - - -class Activation(paddle.nn.Layer): - """This class is to create the hidden layer of a radial basis network. - - Args: - n_neu (int): Number of neurons in the hidden layer. - c (List[float32]): Initial value for hyperparameter b. - n_in (int): Last item of input shape. - """ - - def __init__(self, n_neu, c, n_in, activation_function="gaussian"): - super(Activation, self).__init__() - self.activation_function = activation_function - # PINN y = w2 * (tanh(w1 * x + b1)) + b2 w,b are trainable parameters, b is bais - # PIRBN y = w * exp(b^2 * |x-c|^2) w,b are trainable parameters, c is constant, b is not bias - - if self.activation_function == "gaussian": - self.n_neu = n_neu - self.c = c - self.b = self.create_parameter( - shape=(n_in, self.n_neu), - dtype=paddle.get_default_dtype(), - # Convert from tensorflow tf.random_normal_initializer(), default value mean=0.0, std=0.05 - default_initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.05), - ) - - def forward(self, inputs): - if self.activation_function == "gaussian": - return self.gaussian_function(inputs) - elif self.activation_function == "tanh": - return self.tanh_function(inputs) - - # Gaussian function,Formula (19), Page7 - def gaussian_function(self, inputs): - temp_x = paddle.matmul(inputs, paddle.ones((1, self.n_neu))) - x0 = ( - paddle.reshape( - paddle.arange(self.n_neu, dtype=paddle.get_default_dtype()), - (1, self.n_neu), - ) - * (self.c[1] - self.c[0]) - / (self.n_neu - 1) - + self.c[0] - ) - x_new = temp_x - x0 - s = self.b * self.b - return paddle.exp(-(x_new * x_new) * s) - - def tanh_function(self, inputs): - return paddle.tanh(inputs) +import math + +import numpy as np +import paddle + + +class RBN_Net(paddle.nn.Layer): + """This class is to build a radial basis network (RBN). + + Args: + n_in (int): Number of input of the RBN. + n_out (int): Number of output of the RBN. + n_neu (int): Number of neurons in the hidden layer. + b (Union[List[float], float]): Initial value for hyperparameter b. + c (List[float]): Initial value for hyperparameter c. + activation_function (str, optional): The activation function, tanh or gaussian. Defaults to "gaussian". + """ + + def __init__(self, n_in, n_out, n_neu, b, c, activation_function="gaussian"): + super().__init__() + self.n_in = n_in + self.n_out = n_out + self.n_neu = n_neu + self.b = paddle.to_tensor(b) + self.c = paddle.to_tensor(c) + self.activation_function = activation_function + self.activation = Activation(self.n_neu, self.c, n_in, activation_function) + + # LeCun normal initialization + std = math.sqrt(1 / self.n_neu) + ini = paddle.ParamAttr( + initializer=paddle.nn.initializer.Normal(mean=0.0, std=std) + ) + + if self.activation_function == "gaussian": + # gaussian activation_function need to set self.b + self.init_ab() + self.last_fc_layer = paddle.nn.Linear( + self.n_neu, + self.n_out, + weight_attr=ini, + bias_attr=False, + ) + + elif self.activation_function == "tanh": + w, b = self.initialize_NN([self.n_in, self.n_neu, self.n_out]) + w_0 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(w[0])) + b_0 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(b[0])) + w_1 = paddle.ParamAttr(initializer=paddle.nn.initializer.Assign(w[1])) + + self.hidden_layer = paddle.nn.Linear( + self.n_in, self.n_neu, weight_attr=w_0, bias_attr=b_0 + ) + + self.last_fc_layer = paddle.nn.Linear( + self.n_neu, + self.n_out, + weight_attr=w_1, + bias_attr=False, + ) + + self.last_fc_bias = self.create_parameter( + shape=b[1].shape, + default_initializer=paddle.nn.initializer.Assign(b[1]), + dtype=paddle.get_default_dtype(), + ) + else: + raise ("Not implemented yet") + + def NTK_init(self, size): + in_dim = size[0] + out_dim = size[1] + std = 1.0 / np.sqrt(in_dim) + return self.create_parameter( + shape=[in_dim, out_dim], + default_initializer=paddle.nn.initializer.Assign( + paddle.normal(shape=[in_dim, out_dim]) * std + ), + dtype=paddle.get_default_dtype(), + ) + + # Initialize network weights and biases using Xavier initialization + def initialize_NN(self, layers): + weights = [] + biases = [] + num_layers = len(layers) + for l in range(0, num_layers - 1): + W = self.NTK_init(size=[layers[l], layers[l + 1]]) + b = self.create_parameter( + shape=[1, layers[l + 1]], + default_initializer=paddle.nn.initializer.Assign( + paddle.normal(shape=[1, layers[l + 1]]) + ), + dtype=paddle.get_default_dtype(), + ) + weights.append(W) + biases.append(b) + return weights, biases + + def forward(self, x): + if self.activation_function == "gaussian": + y = self.activation(x) + y = self.last_fc_layer(y) + elif self.activation_function == "tanh": + # input : n x 1 + # hidden layer : 1 x 61 + # last fc layer : 61 x 1 + y = self.hidden_layer(x) + y = self.activation(y) + y = self.last_fc_layer(y) + y = paddle.add(y, self.last_fc_bias) + else: + raise ("Not implemented yet") + return y + + # gaussian activation_function need to set self.b + def init_ab(self): + b = np.ones((1, self.n_neu)) * self.b + self.activation.b = self.activation.create_parameter( + (1, self.n_neu), default_initializer=paddle.nn.initializer.Assign(b) + ) + + +class Activation(paddle.nn.Layer): + """This class is to create the hidden layer of a radial basis network. + + Args: + n_neu (int): Number of neurons in the hidden layer. + c (List[float32]): Initial value for hyperparameter b. + n_in (int): Last item of input shape. + """ + + def __init__(self, n_neu, c, n_in, activation_function="gaussian"): + super(Activation, self).__init__() + self.activation_function = activation_function + # PINN y = w2 * (tanh(w1 * x + b1)) + b2 w,b are trainable parameters, b is bais + # PIRBN y = w * exp(b^2 * |x-c|^2) w,b are trainable parameters, c is constant, b is not bias + + if self.activation_function == "gaussian": + self.n_neu = n_neu + self.c = c + self.b = self.create_parameter( + shape=(n_in, self.n_neu), + dtype=paddle.get_default_dtype(), + # Convert from tensorflow tf.random_normal_initializer(), default value mean=0.0, std=0.05 + default_initializer=paddle.nn.initializer.Normal(mean=0.0, std=0.05), + ) + + def forward(self, inputs): + if self.activation_function == "gaussian": + return self.gaussian_function(inputs) + elif self.activation_function == "tanh": + return self.tanh_function(inputs) + + # Gaussian function,Formula (19), Page7 + def gaussian_function(self, inputs): + temp_x = paddle.matmul(inputs, paddle.ones((1, self.n_neu))) + x0 = ( + paddle.reshape( + paddle.arange(self.n_neu, dtype=paddle.get_default_dtype()), + (1, self.n_neu), + ) + * (self.c[1] - self.c[0]) + / (self.n_neu - 1) + + self.c[0] + ) + x_new = temp_x - x0 + s = self.b * self.b + return paddle.exp(-(x_new * x_new) * s) + + def tanh_function(self, inputs): + return paddle.tanh(inputs) diff --git a/jointContribution/PIRBN/train.py b/jointContribution/PIRBN/train.py index cce269044d..28dac91efc 100644 --- a/jointContribution/PIRBN/train.py +++ b/jointContribution/PIRBN/train.py @@ -1,99 +1,99 @@ -import paddle - -paddle.framework.core.set_prim_eager_enabled(True) - - -class Trainer: - def __init__( - self, - pirbn, - x_train, - y_train, - learning_rate=0.001, - maxiter=10000, - adaptive_weights=True, - ): - # set attributes - self.pirbn = pirbn - - self.learning_rate = learning_rate - self.x_train = [ - paddle.to_tensor(x, dtype=paddle.get_default_dtype()) for x in x_train - ] - self.y_train = paddle.to_tensor(y_train, dtype=paddle.get_default_dtype()) - - self.maxiter = maxiter - self.loss_g = [] # eq loss - self.loss_b = [] # boundary loss - self.iter = 0 - self.a_g = paddle.to_tensor(1.0) - self.a_b = paddle.to_tensor(1.0) - self.his_a_g = [] - self.his_a_b = [] - self.optimizer = paddle.optimizer.Adam( - learning_rate=0.001, parameters=self.pirbn.parameters() - ) - self.ntk_list = {} - # Update loss by calculate ntk - self.adaptive_weights = ( - adaptive_weights # Adaptive weights for physics-informed neural networks - ) - - def Loss(self, x, y, a_g, a_b): - u_xx, u_b, _ = self.pirbn(x) - loss_g = 0.5 * paddle.mean(paddle.square(u_xx - y)) - loss_b = 0.5 * paddle.mean(paddle.square(u_b)) - if self.adaptive_weights: - loss = loss_g * a_g + loss_b * a_b - else: - loss = loss_g + 100 * loss_b - return loss, loss_g, loss_b - - def evaluate(self): - # compute loss - loss, loss_g, loss_b = self.Loss(self.x_train, self.y_train, self.a_g, self.a_b) - loss_g_numpy = float(loss_g) - loss_b_numpy = float(loss_b) - # eq loss - self.loss_g.append(loss_g_numpy) - # boundary loss - self.loss_b.append(loss_b_numpy) - if self.iter % 100 == 0: - if self.adaptive_weights: - self.a_g, self.a_b, _ = self.pirbn.cal_ntk(self.x_train) - print( - "Iter : ", - self.iter, - "\tloss : ", - float(loss), - "\tboundary loss : ", - float(loss_b), - "\teq loss : ", - float(loss_g), - ) - print("\ta_g =", float(self.a_g), "\ta_b =", float(self.a_b)) - else: - print( - "Iter : ", - self.iter, - "\tloss : ", - float(loss), - "\tboundary loss : ", - float(loss_b), - "\teq loss : ", - float(loss_g), - ) - self.his_a_g.append(self.a_g) - self.his_a_b.append(self.a_b) - - self.iter = self.iter + 1 - return loss - - def fit(self, output_Kgg): - for i in range(0, self.maxiter): - loss = self.evaluate() - loss.backward() - if i in output_Kgg: - self.ntk_list[f"{i}"] = self.pirbn.cal_K(self.x_train) - self.optimizer.step() - self.optimizer.clear_grad() +import paddle + +paddle.framework.core.set_prim_eager_enabled(True) + + +class Trainer: + def __init__( + self, + pirbn, + x_train, + y_train, + learning_rate=0.001, + maxiter=10000, + adaptive_weights=True, + ): + # set attributes + self.pirbn = pirbn + + self.learning_rate = learning_rate + self.x_train = [ + paddle.to_tensor(x, dtype=paddle.get_default_dtype()) for x in x_train + ] + self.y_train = paddle.to_tensor(y_train, dtype=paddle.get_default_dtype()) + + self.maxiter = maxiter + self.loss_g = [] # eq loss + self.loss_b = [] # boundary loss + self.iter = 0 + self.a_g = paddle.to_tensor(1.0) + self.a_b = paddle.to_tensor(1.0) + self.his_a_g = [] + self.his_a_b = [] + self.optimizer = paddle.optimizer.Adam( + learning_rate=0.001, parameters=self.pirbn.parameters() + ) + self.ntk_list = {} + # Update loss by calculate ntk + self.adaptive_weights = ( + adaptive_weights # Adaptive weights for physics-informed neural networks + ) + + def Loss(self, x, y, a_g, a_b): + u_xx, u_b, _ = self.pirbn(x) + loss_g = 0.5 * paddle.mean(paddle.square(u_xx - y)) + loss_b = 0.5 * paddle.mean(paddle.square(u_b)) + if self.adaptive_weights: + loss = loss_g * a_g + loss_b * a_b + else: + loss = loss_g + 100 * loss_b + return loss, loss_g, loss_b + + def evaluate(self): + # compute loss + loss, loss_g, loss_b = self.Loss(self.x_train, self.y_train, self.a_g, self.a_b) + loss_g_numpy = float(loss_g) + loss_b_numpy = float(loss_b) + # eq loss + self.loss_g.append(loss_g_numpy) + # boundary loss + self.loss_b.append(loss_b_numpy) + if self.iter % 100 == 0: + if self.adaptive_weights: + self.a_g, self.a_b, _ = self.pirbn.cal_ntk(self.x_train) + print( + "Iter : ", + self.iter, + "\tloss : ", + float(loss), + "\tboundary loss : ", + float(loss_b), + "\teq loss : ", + float(loss_g), + ) + print("\ta_g =", float(self.a_g), "\ta_b =", float(self.a_b)) + else: + print( + "Iter : ", + self.iter, + "\tloss : ", + float(loss), + "\tboundary loss : ", + float(loss_b), + "\teq loss : ", + float(loss_g), + ) + self.his_a_g.append(self.a_g) + self.his_a_b.append(self.a_b) + + self.iter = self.iter + 1 + return loss + + def fit(self, output_Kgg): + for i in range(0, self.maxiter): + loss = self.evaluate() + loss.backward() + if i in output_Kgg: + self.ntk_list[f"{i}"] = self.pirbn.cal_K(self.x_train) + self.optimizer.step() + self.optimizer.clear_grad() diff --git a/jointContribution/XPINNs/XPINN_2D_PoissonsEqn.py b/jointContribution/XPINNs/XPINN_2D_PoissonsEqn.py index d50dccd9bc..b3e0cbe17d 100755 --- a/jointContribution/XPINNs/XPINN_2D_PoissonsEqn.py +++ b/jointContribution/XPINNs/XPINN_2D_PoissonsEqn.py @@ -1,673 +1,673 @@ -import os -import time - -import matplotlib.pyplot as plt -import numpy as np -import paddle -import plotting -import scipy.io -from matplotlib import gridspec -from matplotlib import patches -from matplotlib import tri -from paddle import nn - -import ppsci - -# For the use of the second derivative: paddle.cos, paddle.exp -paddle.framework.core.set_prim_eager_enabled(True) - -np.random.seed(1234) -paddle.seed(1234) - - -class XPINN(nn.Layer): - # Initialize the class - def __init__(self, layer_list): - super().__init__() - # Initialize NNs - self.weights1, self.biases1, self.amplitudes1 = self.initialize_nn( - layer_list[0], "layers1" - ) - self.weights2, self.biases2, self.amplitudes2 = self.initialize_nn( - layer_list[1], "layers2" - ) - self.weights3, self.biases3, self.amplitudes3 = self.initialize_nn( - layer_list[2], "layers3" - ) - - def preprocess_data(self, dataset): - x_ub, ub, x_f1, x_f2, x_f3, x_fi1, x_fi2 = dataset - self.x_ub = paddle.to_tensor(x_ub[:, 0:1], dtype=paddle.float64) - self.y_ub = paddle.to_tensor(x_ub[:, 1:2], dtype=paddle.float64) - self.ub = paddle.to_tensor(ub, dtype=paddle.float64) - self.x_f1 = paddle.to_tensor(x_f1[:, 0:1], dtype=paddle.float64) - self.y_f1 = paddle.to_tensor(x_f1[:, 1:2], dtype=paddle.float64) - self.x_f2 = paddle.to_tensor(x_f2[:, 0:1], dtype=paddle.float64) - self.y_f2 = paddle.to_tensor(x_f2[:, 1:2], dtype=paddle.float64) - self.x_f3 = paddle.to_tensor(x_f3[:, 0:1], dtype=paddle.float64) - self.y_f3 = paddle.to_tensor(x_f3[:, 1:2], dtype=paddle.float64) - self.x_fi1 = paddle.to_tensor(x_fi1[:, 0:1], dtype=paddle.float64) - self.y_fi1 = paddle.to_tensor(x_fi1[:, 1:2], dtype=paddle.float64) - self.x_fi2 = paddle.to_tensor(x_fi2[:, 0:1], dtype=paddle.float64) - self.y_fi2 = paddle.to_tensor(x_fi2[:, 1:2], dtype=paddle.float64) - - def forward(self, dataset): - self.preprocess_data(dataset) - self.ub1_pred = self.net_u1(self.x_ub, self.y_ub) - self.ub2_pred = self.net_u2(self.x_f2, self.y_f2) - self.ub3_pred = self.net_u3(self.x_f3, self.y_f3) - - ( - self.f1_pred, - self.f2_pred, - self.f3_pred, - self.fi1_pred, - self.fi2_pred, - self.uavgi1_pred, - self.uavgi2_pred, - self.u1i1_pred, - self.u1i2_pred, - self.u2i1_pred, - self.u3i2_pred, - ) = self.net_f( - self.x_f1, - self.y_f1, - self.x_f2, - self.y_f2, - self.x_f3, - self.y_f3, - self.x_fi1, - self.y_fi1, - self.x_fi2, - self.y_fi2, - ) - - self.loss1 = ( - 20 * paddle.mean(paddle.square(self.ub - self.ub1_pred)) - + paddle.mean(paddle.square(self.f1_pred)) - + 1 * paddle.mean(paddle.square(self.fi1_pred)) - + 1 * paddle.mean(paddle.square(self.fi2_pred)) - + 20 * paddle.mean(paddle.square(self.u1i1_pred - self.uavgi1_pred)) - + 20 * paddle.mean(paddle.square(self.u1i2_pred - self.uavgi2_pred)) - ) - - self.loss2 = ( - paddle.mean(paddle.square(self.f2_pred)) - + 1 * paddle.mean(paddle.square(self.fi1_pred)) - + 20 * paddle.mean(paddle.square(self.u2i1_pred - self.uavgi1_pred)) - ) - - self.loss3 = ( - paddle.mean(paddle.square(self.f3_pred)) - + 1 * paddle.mean(paddle.square(self.fi2_pred)) - + 20 * paddle.mean(paddle.square(self.u3i2_pred - self.uavgi2_pred)) - ) - return [self.loss1, self.loss2, self.loss3] - - def predict(self, x_star1, x_star2, x_star3): - x_star1 = paddle.to_tensor(x_star1, dtype=paddle.float64) - x_star2 = paddle.to_tensor(x_star2, dtype=paddle.float64) - x_star3 = paddle.to_tensor(x_star3, dtype=paddle.float64) - self.ub1_pred = self.net_u1(x_star1[:, 0:1], x_star1[:, 1:2]) - self.ub2_pred = self.net_u2(x_star2[:, 0:1], x_star2[:, 1:2]) - self.ub3_pred = self.net_u3(x_star3[:, 0:1], x_star3[:, 1:2]) - return [self.ub1_pred.numpy(), self.ub2_pred.numpy(), self.ub3_pred.numpy()] - - def initialize_nn(self, layers, name_prefix): - # The weight used in neural_net - weights = [] - # The bias used in neural_net - biases = [] - # The amplitude used in neural_net - amplitudes = [] - num_layers = len(layers) - for l in range(0, num_layers - 1): - weight = self.create_parameter( - shape=[layers[l], layers[l + 1]], - dtype="float64", - default_initializer=self.w_init((layers[l], layers[l + 1])), - ) - bias = self.create_parameter( - shape=[1, layers[l + 1]], - dtype="float64", - is_bias=True, - default_initializer=nn.initializer.Constant(0.0), - ) - amplitude = self.create_parameter( - shape=[1], - dtype="float64", - is_bias=True, - default_initializer=nn.initializer.Constant(0.05), - ) - - self.add_parameter(name_prefix + "_w_" + str(l), weight) - self.add_parameter(name_prefix + "_b_" + str(l), bias) - self.add_parameter(name_prefix + "_a_" + str(l), amplitude) - weights.append(weight) - biases.append(bias) - amplitudes.append(amplitude) - return weights, biases, amplitudes - - def w_init(self, size): - in_dim = size[0] - out_dim = size[1] - xavier_stddev = np.sqrt(2 / (in_dim + out_dim)) - param = paddle.empty(size, "float64") - param = ppsci.utils.initializer.trunc_normal_(param, 0.0, xavier_stddev) - return nn.initializer.Assign(param) - - def neural_net_tanh(self, x, weights, biases, amplitudes): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.tanh(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def neural_net_sin(self, x, weights, biases, amplitudes): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.sin(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def neural_net_cos(self, x, weights, biases, amplitudes): - num_layers = len(weights) + 1 - - h = x - for l in range(0, num_layers - 2): - w = weights[l] - b = biases[l] - h = paddle.cos(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) - w = weights[-1] - b = biases[-1] - y = paddle.add(paddle.matmul(h, w), b) - return y - - def net_u1(self, x, y): - return self.neural_net_tanh( - paddle.concat([x, y], 1), self.weights1, self.biases1, self.amplitudes1 - ) - - def net_u2(self, x, y): - return self.neural_net_sin( - paddle.concat([x, y], 1), self.weights2, self.biases2, self.amplitudes2 - ) - - def net_u3(self, x, y): - return self.neural_net_cos( - paddle.concat([x, y], 1), self.weights3, self.biases3, self.amplitudes3 - ) - - def get_grad(self, outputs, inputs): - grad = paddle.grad(outputs, inputs, retain_graph=True, create_graph=True) - return grad[0] - - def net_f(self, x1, y1, x2, y2, x3, y3, xi1, yi1, xi2, yi2): - # Gradients need to be calculated - x1.stop_gradient = False - y1.stop_gradient = False - x2.stop_gradient = False - y2.stop_gradient = False - x3.stop_gradient = False - y3.stop_gradient = False - xi1.stop_gradient = False - yi1.stop_gradient = False - xi2.stop_gradient = False - yi2.stop_gradient = False - - # Sub-Net1 - u1 = self.net_u1(x1, y1) - u1_x = self.get_grad(u1, x1) - u1_y = self.get_grad(u1, y1) - u1_xx = self.get_grad(u1_x, x1) - u1_yy = self.get_grad(u1_y, y1) - - # Sub-Net2 - u2 = self.net_u2(x2, y2) - u2_x = self.get_grad(u2, x2) - u2_y = self.get_grad(u2, y2) - u2_xx = self.get_grad(u2_x, x2) - u2_yy = self.get_grad(u2_y, y2) - - # Sub-Net3 - u3 = self.net_u3(x3, y3) - u3_x = self.get_grad(u3, x3) - u3_y = self.get_grad(u3, y3) - u3_xx = self.get_grad(u3_x, x3) - u3_yy = self.get_grad(u3_y, y3) - - # Sub-Net1, Interface 1 - u1i1 = self.net_u1(xi1, yi1) - u1i1_x = self.get_grad(u1i1, xi1) - u1i1_y = self.get_grad(u1i1, yi1) - u1i1_xx = self.get_grad(u1i1_x, xi1) - u1i1_yy = self.get_grad(u1i1_y, yi1) - - # Sub-Net2, Interface 1 - u2i1 = self.net_u2(xi1, yi1) - u2i1_x = self.get_grad(u2i1, xi1) - u2i1_y = self.get_grad(u2i1, yi1) - u2i1_xx = self.get_grad(u2i1_x, xi1) - u2i1_yy = self.get_grad(u2i1_y, yi1) - - # Sub-Net1, Interface 2 - u1i2 = self.net_u1(xi2, yi2) - u1i2_x = self.get_grad(u1i2, xi2) - u1i2_y = self.get_grad(u1i2, yi2) - u1i2_xx = self.get_grad(u1i2_x, xi2) - u1i2_yy = self.get_grad(u1i2_y, yi2) - - # Sub-Net3, Interface 2 - u3i2 = self.net_u3(xi2, yi2) - u3i2_x = self.get_grad(u3i2, xi2) - u3i2_y = self.get_grad(u3i2, yi2) - u3i2_xx = self.get_grad(u3i2_x, xi2) - u3i2_yy = self.get_grad(u3i2_y, yi2) - - # Average value (Required for enforcing the average solution along the interface) - uavgi1 = (u1i1 + u2i1) / 2 - uavgi2 = (u1i2 + u3i2) / 2 - - # Residuals - f1 = u1_xx + u1_yy - (paddle.exp(x1) + paddle.exp(y1)) - f2 = u2_xx + u2_yy - (paddle.exp(x2) + paddle.exp(y2)) - f3 = u3_xx + u3_yy - (paddle.exp(x3) + paddle.exp(y3)) - - # Residual continuity conditions on the interfaces - fi1 = (u1i1_xx + u1i1_yy - (paddle.exp(xi1) + paddle.exp(yi1))) - ( - u2i1_xx + u2i1_yy - (paddle.exp(xi1) + paddle.exp(yi1)) - ) - fi2 = (u1i2_xx + u1i2_yy - (paddle.exp(xi2) + paddle.exp(yi2))) - ( - u3i2_xx + u3i2_yy - (paddle.exp(xi2) + paddle.exp(yi2)) - ) - - return f1, f2, f3, fi1, fi2, uavgi1, uavgi2, u1i1, u1i2, u2i1, u3i2 - - -class Trainer: - def __init__(self, layer_list, dataset): - self.model = XPINN(layer_list) - self.optimizer = paddle.optimizer.Adam( - learning_rate=0.0008, parameters=self.model.parameters() - ) - self.dataset = dataset - - def train(self, n_iter, x_star1, x_star2, x_star3, u_exact2, u_exact3): - mse_history1 = [] - mse_history2 = [] - mse_history3 = [] - l2_err2 = [] - l2_err3 = [] - - for it in range(n_iter): - loss1_value, loss2_value, loss3_value = self.model(self.dataset) - loss = loss1_value + loss2_value + loss3_value - loss.backward() - self.optimizer.step() - self.optimizer.clear_grad() - - if it % 20 == 0: - # Predicted solution - _, u_pred2, u_pred3 = self.model.predict(x_star1, x_star2, x_star3) - - # Relative L2 error in subdomains 2 and 3 - l2_error2 = np.linalg.norm(u_exact2 - u_pred2, 2) / np.linalg.norm( - u_exact2, 2 - ) - l2_error3 = np.linalg.norm(u_exact3 - u_pred3, 2) / np.linalg.norm( - u_exact3, 2 - ) - - print( - "It: %d, Loss1: %.3e, Loss2: %.3e, Loss3: %.3e, L2_err2: %.3e, L2_err3: %.3e" - % (it, loss1_value, loss2_value, loss3_value, l2_error2, l2_error3) - ) - - mse_history1.append(loss1_value) - mse_history2.append(loss2_value) - mse_history3.append(loss3_value) - l2_err2.append(l2_error2) - l2_err3.append(l2_error3) - - return mse_history1, mse_history2, mse_history3, l2_err2, l2_err3 - - def predict(self, x_star1, x_star2, x_star3): - return self.model.predict(x_star1, x_star2, x_star3) - - -if __name__ == "__main__": - # Boundary points from subdomain 1 - n_ub = 200 - - # Residual points in three subdomains - n_f1 = 5000 - n_f2 = 1800 - n_f3 = 1200 - - # Interface points along the two interfaces - n_i1 = 100 - n_i2 = 100 - - # NN architecture in each subdomain - layers1 = [2, 30, 30, 1] - layers2 = [2, 20, 20, 20, 20, 1] - layers3 = [2, 25, 25, 25, 1] - - max_iter = 501 - - # Load training data (boundary points), residual and interface points from .mat file - # All points are generated in Matlab - data = scipy.io.loadmat("./data/XPINN_2D_PoissonEqn.mat") - - x_f1 = data["x_f1"].flatten()[:, None] - y_f1 = data["y_f1"].flatten()[:, None] - x_f2 = data["x_f2"].flatten()[:, None] - y_f2 = data["y_f2"].flatten()[:, None] - x_f3 = data["x_f3"].flatten()[:, None] - y_f3 = data["y_f3"].flatten()[:, None] - xi1 = data["xi1"].flatten()[:, None] - yi1 = data["yi1"].flatten()[:, None] - xi2 = data["xi2"].flatten()[:, None] - yi2 = data["yi2"].flatten()[:, None] - xb = data["xb"].flatten()[:, None] - yb = data["yb"].flatten()[:, None] - - ub_train = data["ub"].flatten()[:, None] - u_exact = data["u_exact"].flatten()[:, None] - u_exact2 = data["u_exact2"].flatten()[:, None] - u_exact3 = data["u_exact3"].flatten()[:, None] - - x_f1_train = np.hstack((x_f1.flatten()[:, None], y_f1.flatten()[:, None])) - x_f2_train = np.hstack((x_f2.flatten()[:, None], y_f2.flatten()[:, None])) - x_f3_train = np.hstack((x_f3.flatten()[:, None], y_f3.flatten()[:, None])) - - x_fi1_train = np.hstack((xi1.flatten()[:, None], yi1.flatten()[:, None])) - x_fi2_train = np.hstack((xi2.flatten()[:, None], yi2.flatten()[:, None])) - - x_ub_train = np.hstack((xb.flatten()[:, None], yb.flatten()[:, None])) - - # Points in the whole domain - x_total = data["x_total"].flatten()[:, None] - y_total = data["y_total"].flatten()[:, None] - - x_star1 = np.hstack((x_f1.flatten()[:, None], y_f1.flatten()[:, None])) - x_star2 = np.hstack((x_f2.flatten()[:, None], y_f2.flatten()[:, None])) - x_star3 = np.hstack((x_f3.flatten()[:, None], y_f3.flatten()[:, None])) - - # Randomly select the residual points from sub-domains - idx1 = np.random.choice(x_f1_train.shape[0], n_f1, replace=False) - x_f1_train = x_f1_train[idx1, :] - - idx2 = np.random.choice(x_f2_train.shape[0], n_f2, replace=False) - x_f2_train = x_f2_train[idx2, :] - - idx3 = np.random.choice(x_f3_train.shape[0], n_f3, replace=False) - x_f3_train = x_f3_train[idx3, :] - - # Randomly select boundary points - idx4 = np.random.choice(x_ub_train.shape[0], n_ub, replace=False) - x_ub_train = x_ub_train[idx4, :] - ub_train = ub_train[idx4, :] - - # Randomly select the interface points along two interfaces - idxi1 = np.random.choice(x_fi1_train.shape[0], n_i1, replace=False) - x_fi1_train = x_fi1_train[idxi1, :] - - idxi2 = np.random.choice(x_fi2_train.shape[0], n_i2, replace=False) - x_fi2_train = x_fi2_train[idxi2, :] - - layer_list = ( - layers1, - layers2, - layers3, - ) - dataset = ( - x_ub_train, - ub_train, - x_f1_train, - x_f2_train, - x_f3_train, - x_fi1_train, - x_fi2_train, - ) - - trainer_obj = Trainer( - layer_list, - dataset, - ) - - # Training - start_time = time.time() - mse_hist1, mse_hist2, mse_hist3, l2_err2, l2_err3 = trainer_obj.train( - max_iter, x_star1, x_star2, x_star3, u_exact2, u_exact3 - ) - elapsed = time.time() - start_time - print("Training time: %.4f" % (elapsed)) - - # Solution prediction - u_pred1, u_pred2, u_pred3 = trainer_obj.predict(x_star1, x_star2, x_star3) - - # Needed for plotting - x1, y1 = x_star1[:, 0:1], x_star1[:, 1:2] - triang_1 = tri.Triangulation(x1.flatten(), y1.flatten()) - x2, y2 = x_star2[:, 0:1], x_star2[:, 1:2] - triang_2 = tri.Triangulation(x2.flatten(), y2.flatten()) - x3, y3 = x_star3[:, 0:1], x_star3[:, 1:2] - triang_3 = tri.Triangulation(x3.flatten(), y3.flatten()) - x_tot = np.concatenate([x1, x2, x3]) - y_tot = np.concatenate([y1, y2, y3]) - triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) - - # Concatenating the solution from subdomains - u_pred = np.concatenate([u_pred1, u_pred2, u_pred3]) - - error_u_total = np.linalg.norm( - np.squeeze(u_exact) - u_pred.flatten(), 2 - ) / np.linalg.norm(np.squeeze(u_exact), 2) - print("Error u_total: %e" % (error_u_total)) - - ############################# Plotting ############################### - os.makedirs("./target", exist_ok=True) - fig, ax = plotting.newfig(1.0, 1.1) - plt.plot(range(1, max_iter + 1, 20), mse_hist1, "r-", linewidth=1, label="Sub-Net1") - plt.plot( - range(1, max_iter + 1, 20), mse_hist2, "b-.", linewidth=1, label="Sub-Net2" - ) - plt.plot( - range(1, max_iter + 1, 20), mse_hist3, "g--", linewidth=1, label="Sub-Net3" - ) - plt.xlabel("$\#$ iterations") - plt.ylabel("Loss") - plt.yscale("log") - plt.legend(loc="upper right") - plotting.savefig("./target/XPINN_PoissonMSEhistory") - - fig, ax = plotting.newfig(1.0, 1.1) - plt.plot( - range(1, max_iter + 1, 20), l2_err2, "r-", linewidth=1, label="Subdomain 2" - ) - plt.plot( - range(1, max_iter + 1, 20), l2_err3, "b--", linewidth=1, label="Subdomain 3" - ) - plt.xlabel("$\#$ iterations") - plt.ylabel("Rel. $L_2$ error") - plt.yscale("log") - plt.legend(loc="upper right") - plotting.savefig("./target/XPINN_PoissonErrhistory") - - aa1 = np.array([[np.squeeze(xb[-1]), np.squeeze(yb[-1])]]) - aa2 = np.array( - [ - [1.8, np.squeeze(yb[-1])], - [+1.8, -1.7], - [-1.6, -1.7], - [-1.6, 1.55], - [1.8, 1.55], - [1.8, np.squeeze(yb[-1])], - ] - ) - x_domain1 = np.squeeze(xb.flatten()[:, None]) - y_domain1 = np.squeeze(yb.flatten()[:, None]) - aa3 = np.array([x_domain1, y_domain1]).T - xx = np.vstack((aa3, aa2, aa1)) - triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) - - x_fi1_train_plot = np.hstack((xi1.flatten()[:, None], yi1.flatten()[:, None])) - x_fi2_train_plot = np.hstack((xi2.flatten()[:, None], yi2.flatten()[:, None])) - - fig, ax = plotting.newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf(triang_total, np.squeeze(u_exact), 100, cmap="jet") - ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("$u$ (Exact)", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - x_fi1_train_plot[:, 0:1], - x_fi1_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - x_fi2_train_plot[:, 0:1], - x_fi2_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - plotting.savefig("./target/XPINN_PoissonEq_ExSol") - plt.show() - - fig, ax = plotting.newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf(triang_total, u_pred.flatten(), 100, cmap="jet") - ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("$u$ (Predicted)", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - x_fi1_train_plot[:, 0:1], - x_fi1_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - x_fi2_train_plot[:, 0:1], - x_fi2_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - plotting.savefig("./target/XPINN_PoissonEq_Sol") - plt.show() - - fig, ax = plotting.newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - tcf = ax.tricontourf( - triang_total, abs(np.squeeze(u_exact) - u_pred.flatten()), 100, cmap="jet" - ) - ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) - tcbar = fig.colorbar(tcf) - tcbar.ax.tick_params(labelsize=28) - ax.set_xlabel("$x$", fontsize=32) - ax.set_ylabel("$y$", fontsize=32) - ax.set_title("Point-wise Error", fontsize=34) - ax.tick_params(axis="x", labelsize=28) - ax.tick_params(axis="y", labelsize=28) - plt.plot( - x_fi1_train_plot[:, 0:1], - x_fi1_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - plt.plot( - x_fi2_train_plot[:, 0:1], - x_fi2_train_plot[:, 1:2], - "w-", - markersize=2, - label="Interface Pts", - ) - fig.set_size_inches(w=12, h=9) - plotting.savefig("./target/XPINN_PoissonEq_Err") - plt.show() - - fig, ax = plotting.newfig(1.0, 1.1) - gridspec.GridSpec(1, 1) - ax = plt.subplot2grid((1, 1), (0, 0)) - plt.plot( - x_f1_train[:, 0:1], - x_f1_train[:, 1:2], - "r*", - markersize=4, - label="Residual Pts (sub-domain 1)", - ) - plt.plot( - x_f2_train[:, 0:1], - x_f2_train[:, 1:2], - "yo", - markersize=4, - label="Residual Pts (sub-domain 2)", - ) - plt.plot( - x_f3_train[:, 0:1], - x_f3_train[:, 1:2], - "gs", - markersize=4, - label="Residual Pts (sub-domain 3)", - ) - plt.plot( - x_fi1_train[:, 0:1], - x_fi1_train[:, 1:2], - "bs", - markersize=7, - label="Interface Pts 1", - ) - plt.plot( - x_fi2_train[:, 0:1], - x_fi2_train[:, 1:2], - "bs", - markersize=7, - label="Interface Pts 1", - ) - plt.plot( - x_ub_train[:, 0:1], - x_ub_train[:, 1:2], - "kx", - markersize=9, - label="Interface Pts 1", - ) - ax.set_xlabel("$x$", fontsize=30) - ax.set_ylabel("$y$", fontsize=30) - ax.tick_params(axis="x", labelsize=26) - ax.tick_params(axis="y", labelsize=26) - fig.set_size_inches(w=12, h=12) - plotting.savefig("./target/XPINN_Poisson_dataPts") - plt.show() +import os +import time + +import matplotlib.pyplot as plt +import numpy as np +import paddle +import plotting +import scipy.io +from matplotlib import gridspec +from matplotlib import patches +from matplotlib import tri +from paddle import nn + +import ppsci + +# For the use of the second derivative: paddle.cos, paddle.exp +paddle.framework.core.set_prim_eager_enabled(True) + +np.random.seed(1234) +paddle.seed(1234) + + +class XPINN(nn.Layer): + # Initialize the class + def __init__(self, layer_list): + super().__init__() + # Initialize NNs + self.weights1, self.biases1, self.amplitudes1 = self.initialize_nn( + layer_list[0], "layers1" + ) + self.weights2, self.biases2, self.amplitudes2 = self.initialize_nn( + layer_list[1], "layers2" + ) + self.weights3, self.biases3, self.amplitudes3 = self.initialize_nn( + layer_list[2], "layers3" + ) + + def preprocess_data(self, dataset): + x_ub, ub, x_f1, x_f2, x_f3, x_fi1, x_fi2 = dataset + self.x_ub = paddle.to_tensor(x_ub[:, 0:1], dtype=paddle.float64) + self.y_ub = paddle.to_tensor(x_ub[:, 1:2], dtype=paddle.float64) + self.ub = paddle.to_tensor(ub, dtype=paddle.float64) + self.x_f1 = paddle.to_tensor(x_f1[:, 0:1], dtype=paddle.float64) + self.y_f1 = paddle.to_tensor(x_f1[:, 1:2], dtype=paddle.float64) + self.x_f2 = paddle.to_tensor(x_f2[:, 0:1], dtype=paddle.float64) + self.y_f2 = paddle.to_tensor(x_f2[:, 1:2], dtype=paddle.float64) + self.x_f3 = paddle.to_tensor(x_f3[:, 0:1], dtype=paddle.float64) + self.y_f3 = paddle.to_tensor(x_f3[:, 1:2], dtype=paddle.float64) + self.x_fi1 = paddle.to_tensor(x_fi1[:, 0:1], dtype=paddle.float64) + self.y_fi1 = paddle.to_tensor(x_fi1[:, 1:2], dtype=paddle.float64) + self.x_fi2 = paddle.to_tensor(x_fi2[:, 0:1], dtype=paddle.float64) + self.y_fi2 = paddle.to_tensor(x_fi2[:, 1:2], dtype=paddle.float64) + + def forward(self, dataset): + self.preprocess_data(dataset) + self.ub1_pred = self.net_u1(self.x_ub, self.y_ub) + self.ub2_pred = self.net_u2(self.x_f2, self.y_f2) + self.ub3_pred = self.net_u3(self.x_f3, self.y_f3) + + ( + self.f1_pred, + self.f2_pred, + self.f3_pred, + self.fi1_pred, + self.fi2_pred, + self.uavgi1_pred, + self.uavgi2_pred, + self.u1i1_pred, + self.u1i2_pred, + self.u2i1_pred, + self.u3i2_pred, + ) = self.net_f( + self.x_f1, + self.y_f1, + self.x_f2, + self.y_f2, + self.x_f3, + self.y_f3, + self.x_fi1, + self.y_fi1, + self.x_fi2, + self.y_fi2, + ) + + self.loss1 = ( + 20 * paddle.mean(paddle.square(self.ub - self.ub1_pred)) + + paddle.mean(paddle.square(self.f1_pred)) + + 1 * paddle.mean(paddle.square(self.fi1_pred)) + + 1 * paddle.mean(paddle.square(self.fi2_pred)) + + 20 * paddle.mean(paddle.square(self.u1i1_pred - self.uavgi1_pred)) + + 20 * paddle.mean(paddle.square(self.u1i2_pred - self.uavgi2_pred)) + ) + + self.loss2 = ( + paddle.mean(paddle.square(self.f2_pred)) + + 1 * paddle.mean(paddle.square(self.fi1_pred)) + + 20 * paddle.mean(paddle.square(self.u2i1_pred - self.uavgi1_pred)) + ) + + self.loss3 = ( + paddle.mean(paddle.square(self.f3_pred)) + + 1 * paddle.mean(paddle.square(self.fi2_pred)) + + 20 * paddle.mean(paddle.square(self.u3i2_pred - self.uavgi2_pred)) + ) + return [self.loss1, self.loss2, self.loss3] + + def predict(self, x_star1, x_star2, x_star3): + x_star1 = paddle.to_tensor(x_star1, dtype=paddle.float64) + x_star2 = paddle.to_tensor(x_star2, dtype=paddle.float64) + x_star3 = paddle.to_tensor(x_star3, dtype=paddle.float64) + self.ub1_pred = self.net_u1(x_star1[:, 0:1], x_star1[:, 1:2]) + self.ub2_pred = self.net_u2(x_star2[:, 0:1], x_star2[:, 1:2]) + self.ub3_pred = self.net_u3(x_star3[:, 0:1], x_star3[:, 1:2]) + return [self.ub1_pred.numpy(), self.ub2_pred.numpy(), self.ub3_pred.numpy()] + + def initialize_nn(self, layers, name_prefix): + # The weight used in neural_net + weights = [] + # The bias used in neural_net + biases = [] + # The amplitude used in neural_net + amplitudes = [] + num_layers = len(layers) + for l in range(0, num_layers - 1): + weight = self.create_parameter( + shape=[layers[l], layers[l + 1]], + dtype="float64", + default_initializer=self.w_init((layers[l], layers[l + 1])), + ) + bias = self.create_parameter( + shape=[1, layers[l + 1]], + dtype="float64", + is_bias=True, + default_initializer=nn.initializer.Constant(0.0), + ) + amplitude = self.create_parameter( + shape=[1], + dtype="float64", + is_bias=True, + default_initializer=nn.initializer.Constant(0.05), + ) + + self.add_parameter(name_prefix + "_w_" + str(l), weight) + self.add_parameter(name_prefix + "_b_" + str(l), bias) + self.add_parameter(name_prefix + "_a_" + str(l), amplitude) + weights.append(weight) + biases.append(bias) + amplitudes.append(amplitude) + return weights, biases, amplitudes + + def w_init(self, size): + in_dim = size[0] + out_dim = size[1] + xavier_stddev = np.sqrt(2 / (in_dim + out_dim)) + param = paddle.empty(size, "float64") + param = ppsci.utils.initializer.trunc_normal_(param, 0.0, xavier_stddev) + return nn.initializer.Assign(param) + + def neural_net_tanh(self, x, weights, biases, amplitudes): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.tanh(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def neural_net_sin(self, x, weights, biases, amplitudes): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.sin(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def neural_net_cos(self, x, weights, biases, amplitudes): + num_layers = len(weights) + 1 + + h = x + for l in range(0, num_layers - 2): + w = weights[l] + b = biases[l] + h = paddle.cos(20 * amplitudes[l] * paddle.add(paddle.matmul(h, w), b)) + w = weights[-1] + b = biases[-1] + y = paddle.add(paddle.matmul(h, w), b) + return y + + def net_u1(self, x, y): + return self.neural_net_tanh( + paddle.concat([x, y], 1), self.weights1, self.biases1, self.amplitudes1 + ) + + def net_u2(self, x, y): + return self.neural_net_sin( + paddle.concat([x, y], 1), self.weights2, self.biases2, self.amplitudes2 + ) + + def net_u3(self, x, y): + return self.neural_net_cos( + paddle.concat([x, y], 1), self.weights3, self.biases3, self.amplitudes3 + ) + + def get_grad(self, outputs, inputs): + grad = paddle.grad(outputs, inputs, retain_graph=True, create_graph=True) + return grad[0] + + def net_f(self, x1, y1, x2, y2, x3, y3, xi1, yi1, xi2, yi2): + # Gradients need to be calculated + x1.stop_gradient = False + y1.stop_gradient = False + x2.stop_gradient = False + y2.stop_gradient = False + x3.stop_gradient = False + y3.stop_gradient = False + xi1.stop_gradient = False + yi1.stop_gradient = False + xi2.stop_gradient = False + yi2.stop_gradient = False + + # Sub-Net1 + u1 = self.net_u1(x1, y1) + u1_x = self.get_grad(u1, x1) + u1_y = self.get_grad(u1, y1) + u1_xx = self.get_grad(u1_x, x1) + u1_yy = self.get_grad(u1_y, y1) + + # Sub-Net2 + u2 = self.net_u2(x2, y2) + u2_x = self.get_grad(u2, x2) + u2_y = self.get_grad(u2, y2) + u2_xx = self.get_grad(u2_x, x2) + u2_yy = self.get_grad(u2_y, y2) + + # Sub-Net3 + u3 = self.net_u3(x3, y3) + u3_x = self.get_grad(u3, x3) + u3_y = self.get_grad(u3, y3) + u3_xx = self.get_grad(u3_x, x3) + u3_yy = self.get_grad(u3_y, y3) + + # Sub-Net1, Interface 1 + u1i1 = self.net_u1(xi1, yi1) + u1i1_x = self.get_grad(u1i1, xi1) + u1i1_y = self.get_grad(u1i1, yi1) + u1i1_xx = self.get_grad(u1i1_x, xi1) + u1i1_yy = self.get_grad(u1i1_y, yi1) + + # Sub-Net2, Interface 1 + u2i1 = self.net_u2(xi1, yi1) + u2i1_x = self.get_grad(u2i1, xi1) + u2i1_y = self.get_grad(u2i1, yi1) + u2i1_xx = self.get_grad(u2i1_x, xi1) + u2i1_yy = self.get_grad(u2i1_y, yi1) + + # Sub-Net1, Interface 2 + u1i2 = self.net_u1(xi2, yi2) + u1i2_x = self.get_grad(u1i2, xi2) + u1i2_y = self.get_grad(u1i2, yi2) + u1i2_xx = self.get_grad(u1i2_x, xi2) + u1i2_yy = self.get_grad(u1i2_y, yi2) + + # Sub-Net3, Interface 2 + u3i2 = self.net_u3(xi2, yi2) + u3i2_x = self.get_grad(u3i2, xi2) + u3i2_y = self.get_grad(u3i2, yi2) + u3i2_xx = self.get_grad(u3i2_x, xi2) + u3i2_yy = self.get_grad(u3i2_y, yi2) + + # Average value (Required for enforcing the average solution along the interface) + uavgi1 = (u1i1 + u2i1) / 2 + uavgi2 = (u1i2 + u3i2) / 2 + + # Residuals + f1 = u1_xx + u1_yy - (paddle.exp(x1) + paddle.exp(y1)) + f2 = u2_xx + u2_yy - (paddle.exp(x2) + paddle.exp(y2)) + f3 = u3_xx + u3_yy - (paddle.exp(x3) + paddle.exp(y3)) + + # Residual continuity conditions on the interfaces + fi1 = (u1i1_xx + u1i1_yy - (paddle.exp(xi1) + paddle.exp(yi1))) - ( + u2i1_xx + u2i1_yy - (paddle.exp(xi1) + paddle.exp(yi1)) + ) + fi2 = (u1i2_xx + u1i2_yy - (paddle.exp(xi2) + paddle.exp(yi2))) - ( + u3i2_xx + u3i2_yy - (paddle.exp(xi2) + paddle.exp(yi2)) + ) + + return f1, f2, f3, fi1, fi2, uavgi1, uavgi2, u1i1, u1i2, u2i1, u3i2 + + +class Trainer: + def __init__(self, layer_list, dataset): + self.model = XPINN(layer_list) + self.optimizer = paddle.optimizer.Adam( + learning_rate=0.0008, parameters=self.model.parameters() + ) + self.dataset = dataset + + def train(self, n_iter, x_star1, x_star2, x_star3, u_exact2, u_exact3): + mse_history1 = [] + mse_history2 = [] + mse_history3 = [] + l2_err2 = [] + l2_err3 = [] + + for it in range(n_iter): + loss1_value, loss2_value, loss3_value = self.model(self.dataset) + loss = loss1_value + loss2_value + loss3_value + loss.backward() + self.optimizer.step() + self.optimizer.clear_grad() + + if it % 20 == 0: + # Predicted solution + _, u_pred2, u_pred3 = self.model.predict(x_star1, x_star2, x_star3) + + # Relative L2 error in subdomains 2 and 3 + l2_error2 = np.linalg.norm(u_exact2 - u_pred2, 2) / np.linalg.norm( + u_exact2, 2 + ) + l2_error3 = np.linalg.norm(u_exact3 - u_pred3, 2) / np.linalg.norm( + u_exact3, 2 + ) + + print( + "It: %d, Loss1: %.3e, Loss2: %.3e, Loss3: %.3e, L2_err2: %.3e, L2_err3: %.3e" + % (it, loss1_value, loss2_value, loss3_value, l2_error2, l2_error3) + ) + + mse_history1.append(loss1_value) + mse_history2.append(loss2_value) + mse_history3.append(loss3_value) + l2_err2.append(l2_error2) + l2_err3.append(l2_error3) + + return mse_history1, mse_history2, mse_history3, l2_err2, l2_err3 + + def predict(self, x_star1, x_star2, x_star3): + return self.model.predict(x_star1, x_star2, x_star3) + + +if __name__ == "__main__": + # Boundary points from subdomain 1 + n_ub = 200 + + # Residual points in three subdomains + n_f1 = 5000 + n_f2 = 1800 + n_f3 = 1200 + + # Interface points along the two interfaces + n_i1 = 100 + n_i2 = 100 + + # NN architecture in each subdomain + layers1 = [2, 30, 30, 1] + layers2 = [2, 20, 20, 20, 20, 1] + layers3 = [2, 25, 25, 25, 1] + + max_iter = 501 + + # Load training data (boundary points), residual and interface points from .mat file + # All points are generated in Matlab + data = scipy.io.loadmat("./data/XPINN_2D_PoissonEqn.mat") + + x_f1 = data["x_f1"].flatten()[:, None] + y_f1 = data["y_f1"].flatten()[:, None] + x_f2 = data["x_f2"].flatten()[:, None] + y_f2 = data["y_f2"].flatten()[:, None] + x_f3 = data["x_f3"].flatten()[:, None] + y_f3 = data["y_f3"].flatten()[:, None] + xi1 = data["xi1"].flatten()[:, None] + yi1 = data["yi1"].flatten()[:, None] + xi2 = data["xi2"].flatten()[:, None] + yi2 = data["yi2"].flatten()[:, None] + xb = data["xb"].flatten()[:, None] + yb = data["yb"].flatten()[:, None] + + ub_train = data["ub"].flatten()[:, None] + u_exact = data["u_exact"].flatten()[:, None] + u_exact2 = data["u_exact2"].flatten()[:, None] + u_exact3 = data["u_exact3"].flatten()[:, None] + + x_f1_train = np.hstack((x_f1.flatten()[:, None], y_f1.flatten()[:, None])) + x_f2_train = np.hstack((x_f2.flatten()[:, None], y_f2.flatten()[:, None])) + x_f3_train = np.hstack((x_f3.flatten()[:, None], y_f3.flatten()[:, None])) + + x_fi1_train = np.hstack((xi1.flatten()[:, None], yi1.flatten()[:, None])) + x_fi2_train = np.hstack((xi2.flatten()[:, None], yi2.flatten()[:, None])) + + x_ub_train = np.hstack((xb.flatten()[:, None], yb.flatten()[:, None])) + + # Points in the whole domain + x_total = data["x_total"].flatten()[:, None] + y_total = data["y_total"].flatten()[:, None] + + x_star1 = np.hstack((x_f1.flatten()[:, None], y_f1.flatten()[:, None])) + x_star2 = np.hstack((x_f2.flatten()[:, None], y_f2.flatten()[:, None])) + x_star3 = np.hstack((x_f3.flatten()[:, None], y_f3.flatten()[:, None])) + + # Randomly select the residual points from sub-domains + idx1 = np.random.choice(x_f1_train.shape[0], n_f1, replace=False) + x_f1_train = x_f1_train[idx1, :] + + idx2 = np.random.choice(x_f2_train.shape[0], n_f2, replace=False) + x_f2_train = x_f2_train[idx2, :] + + idx3 = np.random.choice(x_f3_train.shape[0], n_f3, replace=False) + x_f3_train = x_f3_train[idx3, :] + + # Randomly select boundary points + idx4 = np.random.choice(x_ub_train.shape[0], n_ub, replace=False) + x_ub_train = x_ub_train[idx4, :] + ub_train = ub_train[idx4, :] + + # Randomly select the interface points along two interfaces + idxi1 = np.random.choice(x_fi1_train.shape[0], n_i1, replace=False) + x_fi1_train = x_fi1_train[idxi1, :] + + idxi2 = np.random.choice(x_fi2_train.shape[0], n_i2, replace=False) + x_fi2_train = x_fi2_train[idxi2, :] + + layer_list = ( + layers1, + layers2, + layers3, + ) + dataset = ( + x_ub_train, + ub_train, + x_f1_train, + x_f2_train, + x_f3_train, + x_fi1_train, + x_fi2_train, + ) + + trainer_obj = Trainer( + layer_list, + dataset, + ) + + # Training + start_time = time.time() + mse_hist1, mse_hist2, mse_hist3, l2_err2, l2_err3 = trainer_obj.train( + max_iter, x_star1, x_star2, x_star3, u_exact2, u_exact3 + ) + elapsed = time.time() - start_time + print("Training time: %.4f" % (elapsed)) + + # Solution prediction + u_pred1, u_pred2, u_pred3 = trainer_obj.predict(x_star1, x_star2, x_star3) + + # Needed for plotting + x1, y1 = x_star1[:, 0:1], x_star1[:, 1:2] + triang_1 = tri.Triangulation(x1.flatten(), y1.flatten()) + x2, y2 = x_star2[:, 0:1], x_star2[:, 1:2] + triang_2 = tri.Triangulation(x2.flatten(), y2.flatten()) + x3, y3 = x_star3[:, 0:1], x_star3[:, 1:2] + triang_3 = tri.Triangulation(x3.flatten(), y3.flatten()) + x_tot = np.concatenate([x1, x2, x3]) + y_tot = np.concatenate([y1, y2, y3]) + triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) + + # Concatenating the solution from subdomains + u_pred = np.concatenate([u_pred1, u_pred2, u_pred3]) + + error_u_total = np.linalg.norm( + np.squeeze(u_exact) - u_pred.flatten(), 2 + ) / np.linalg.norm(np.squeeze(u_exact), 2) + print("Error u_total: %e" % (error_u_total)) + + ############################# Plotting ############################### + os.makedirs("./target", exist_ok=True) + fig, ax = plotting.newfig(1.0, 1.1) + plt.plot(range(1, max_iter + 1, 20), mse_hist1, "r-", linewidth=1, label="Sub-Net1") + plt.plot( + range(1, max_iter + 1, 20), mse_hist2, "b-.", linewidth=1, label="Sub-Net2" + ) + plt.plot( + range(1, max_iter + 1, 20), mse_hist3, "g--", linewidth=1, label="Sub-Net3" + ) + plt.xlabel("$\#$ iterations") + plt.ylabel("Loss") + plt.yscale("log") + plt.legend(loc="upper right") + plotting.savefig("./target/XPINN_PoissonMSEhistory") + + fig, ax = plotting.newfig(1.0, 1.1) + plt.plot( + range(1, max_iter + 1, 20), l2_err2, "r-", linewidth=1, label="Subdomain 2" + ) + plt.plot( + range(1, max_iter + 1, 20), l2_err3, "b--", linewidth=1, label="Subdomain 3" + ) + plt.xlabel("$\#$ iterations") + plt.ylabel("Rel. $L_2$ error") + plt.yscale("log") + plt.legend(loc="upper right") + plotting.savefig("./target/XPINN_PoissonErrhistory") + + aa1 = np.array([[np.squeeze(xb[-1]), np.squeeze(yb[-1])]]) + aa2 = np.array( + [ + [1.8, np.squeeze(yb[-1])], + [+1.8, -1.7], + [-1.6, -1.7], + [-1.6, 1.55], + [1.8, 1.55], + [1.8, np.squeeze(yb[-1])], + ] + ) + x_domain1 = np.squeeze(xb.flatten()[:, None]) + y_domain1 = np.squeeze(yb.flatten()[:, None]) + aa3 = np.array([x_domain1, y_domain1]).T + xx = np.vstack((aa3, aa2, aa1)) + triang_total = tri.Triangulation(x_tot.flatten(), y_tot.flatten()) + + x_fi1_train_plot = np.hstack((xi1.flatten()[:, None], yi1.flatten()[:, None])) + x_fi2_train_plot = np.hstack((xi2.flatten()[:, None], yi2.flatten()[:, None])) + + fig, ax = plotting.newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf(triang_total, np.squeeze(u_exact), 100, cmap="jet") + ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("$u$ (Exact)", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + x_fi1_train_plot[:, 0:1], + x_fi1_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + x_fi2_train_plot[:, 0:1], + x_fi2_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + plotting.savefig("./target/XPINN_PoissonEq_ExSol") + plt.show() + + fig, ax = plotting.newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf(triang_total, u_pred.flatten(), 100, cmap="jet") + ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("$u$ (Predicted)", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + x_fi1_train_plot[:, 0:1], + x_fi1_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + x_fi2_train_plot[:, 0:1], + x_fi2_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + plotting.savefig("./target/XPINN_PoissonEq_Sol") + plt.show() + + fig, ax = plotting.newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + tcf = ax.tricontourf( + triang_total, abs(np.squeeze(u_exact) - u_pred.flatten()), 100, cmap="jet" + ) + ax.add_patch(patches.Polygon(xx, closed=True, fill=True, color="w", edgecolor="w")) + tcbar = fig.colorbar(tcf) + tcbar.ax.tick_params(labelsize=28) + ax.set_xlabel("$x$", fontsize=32) + ax.set_ylabel("$y$", fontsize=32) + ax.set_title("Point-wise Error", fontsize=34) + ax.tick_params(axis="x", labelsize=28) + ax.tick_params(axis="y", labelsize=28) + plt.plot( + x_fi1_train_plot[:, 0:1], + x_fi1_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + plt.plot( + x_fi2_train_plot[:, 0:1], + x_fi2_train_plot[:, 1:2], + "w-", + markersize=2, + label="Interface Pts", + ) + fig.set_size_inches(w=12, h=9) + plotting.savefig("./target/XPINN_PoissonEq_Err") + plt.show() + + fig, ax = plotting.newfig(1.0, 1.1) + gridspec.GridSpec(1, 1) + ax = plt.subplot2grid((1, 1), (0, 0)) + plt.plot( + x_f1_train[:, 0:1], + x_f1_train[:, 1:2], + "r*", + markersize=4, + label="Residual Pts (sub-domain 1)", + ) + plt.plot( + x_f2_train[:, 0:1], + x_f2_train[:, 1:2], + "yo", + markersize=4, + label="Residual Pts (sub-domain 2)", + ) + plt.plot( + x_f3_train[:, 0:1], + x_f3_train[:, 1:2], + "gs", + markersize=4, + label="Residual Pts (sub-domain 3)", + ) + plt.plot( + x_fi1_train[:, 0:1], + x_fi1_train[:, 1:2], + "bs", + markersize=7, + label="Interface Pts 1", + ) + plt.plot( + x_fi2_train[:, 0:1], + x_fi2_train[:, 1:2], + "bs", + markersize=7, + label="Interface Pts 1", + ) + plt.plot( + x_ub_train[:, 0:1], + x_ub_train[:, 1:2], + "kx", + markersize=9, + label="Interface Pts 1", + ) + ax.set_xlabel("$x$", fontsize=30) + ax.set_ylabel("$y$", fontsize=30) + ax.tick_params(axis="x", labelsize=26) + ax.tick_params(axis="y", labelsize=26) + fig.set_size_inches(w=12, h=12) + plotting.savefig("./target/XPINN_Poisson_dataPts") + plt.show() diff --git a/jointContribution/XPINNs/plotting.py b/jointContribution/XPINNs/plotting.py index 010e2f1f6a..41c658415a 100644 --- a/jointContribution/XPINNs/plotting.py +++ b/jointContribution/XPINNs/plotting.py @@ -1,52 +1,52 @@ -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - - -def figsize(scale, nplots=1): - fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth - inches_per_pt = 1.0 / 72.27 # Convert pt to inch - golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this) - fig_width = fig_width_pt * inches_per_pt * scale # width in inches - fig_height = nplots * fig_width * golden_mean # height in inches - fig_size = [fig_width, fig_height] - return fig_size - - -# I make my own newfig and savefig functions -def newfig(width, nplots=1): - fig = plt.figure(figsize=figsize(width, nplots)) - ax = fig.add_subplot(111) - return fig, ax - - -def savefig(filename, crop=True): - if crop: - plt.savefig(f"{filename}.pdf", bbox_inches="tight", pad_inches=0) - plt.savefig(f"{filename}.eps", bbox_inches="tight", pad_inches=0) - else: - plt.savefig(f"{filename}.pdf") - plt.savefig(f"{filename}.eps") - - -pgf_with_latex = { # setup matplotlib to use latex for output - "pgf.texsystem": "pdflatex", # change this if using xetex or latex - "text.usetex": True, # use LaTeX to write all text - "font.family": "serif", - "font.serif": [], # blank entries should cause plots to inherit fonts from the document - "font.sans-serif": [], - "font.monospace": [], - "axes.labelsize": 10, # LaTeX default is 10pt font. - "font.size": 10, - "legend.fontsize": 8, # Make the legend/label fonts a little smaller - "xtick.labelsize": 8, - "ytick.labelsize": 8, - "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth - "pgf.preamble": "\n".join( - [ - r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it :) - r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble - ] - ), -} -mpl.rcParams.update(pgf_with_latex) +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np + + +def figsize(scale, nplots=1): + fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth + inches_per_pt = 1.0 / 72.27 # Convert pt to inch + golden_mean = (np.sqrt(5.0) - 1.0) / 2.0 # Aesthetic ratio (you could change this) + fig_width = fig_width_pt * inches_per_pt * scale # width in inches + fig_height = nplots * fig_width * golden_mean # height in inches + fig_size = [fig_width, fig_height] + return fig_size + + +# I make my own newfig and savefig functions +def newfig(width, nplots=1): + fig = plt.figure(figsize=figsize(width, nplots)) + ax = fig.add_subplot(111) + return fig, ax + + +def savefig(filename, crop=True): + if crop: + plt.savefig(f"{filename}.pdf", bbox_inches="tight", pad_inches=0) + plt.savefig(f"{filename}.eps", bbox_inches="tight", pad_inches=0) + else: + plt.savefig(f"{filename}.pdf") + plt.savefig(f"{filename}.eps") + + +pgf_with_latex = { # setup matplotlib to use latex for output + "pgf.texsystem": "pdflatex", # change this if using xetex or latex + "text.usetex": True, # use LaTeX to write all text + "font.family": "serif", + "font.serif": [], # blank entries should cause plots to inherit fonts from the document + "font.sans-serif": [], + "font.monospace": [], + "axes.labelsize": 10, # LaTeX default is 10pt font. + "font.size": 10, + "legend.fontsize": 8, # Make the legend/label fonts a little smaller + "xtick.labelsize": 8, + "ytick.labelsize": 8, + "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth + "pgf.preamble": "\n".join( + [ + r"\usepackage[utf8x]{inputenc}", # use utf8 fonts because your computer can handle it :) + r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble + ] + ), +} +mpl.rcParams.update(pgf_with_latex) diff --git a/jointContribution/XPINNs/requirements.txt b/jointContribution/XPINNs/requirements.txt index 26a2b438ff..d3efbc2ab6 100644 --- a/jointContribution/XPINNs/requirements.txt +++ b/jointContribution/XPINNs/requirements.txt @@ -1,3 +1,3 @@ -matplotlib -scipy -numpy +matplotlib +numpy +scipy diff --git a/jointContribution/graphGalerkin/LinearElasticity.py b/jointContribution/graphGalerkin/LinearElasticity.py index 333cb5f454..d359d21897 100644 --- a/jointContribution/graphGalerkin/LinearElasticity.py +++ b/jointContribution/graphGalerkin/LinearElasticity.py @@ -1,375 +1,375 @@ -import sys - -import matplotlib.pyplot as plt -import numpy as np -import paddle -from scipy.io import loadmat - -sys.path.insert(0, "pycamotk") -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_fem_resjac import create_fem_resjac -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.create_mesh_hcube import mesh_hcube -from pyCaMOtk.geom_mltdim import Hypercube -from pyCaMOtk.geom_mltdim import Simplex -from pyCaMOtk.LinearElasticityHandCode import * -from pyCaMOtk.mesh import Mesh -from pyCaMOtk.mesh import get_gdof_from_bndtag -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem - -sys.path.insert(0, "source") -import setup_prob_eqn_handcode -import TensorFEMCore -from GCNNModel import LinearElasticityNet2D -from GCNNModel import e2vcg2connectivity -from TensorFEMCore import Double -from TensorFEMCore import ReshapeFix -from TensorFEMCore import solve_fem_GCNN - -sys.path.insert(0, "utils") -from utils import Data - -paddle.seed(0) - - -class LinearElasticity: - def __init__(self) -> None: - # GCNN model - self.model = LinearElasticityNet2D() - - def train( - self, - Ufem, - ndof, - xcg, - connectivity, - LossF, - tol, - maxit, - dbc, - ndim, - nnode, - etype, - e2vcg, - e2bnd, - ): - ii = 0 - Graph = [] - Ue = Double(Ufem.flatten().reshape(ndof, 1)) - fcn_id = Double(np.asarray([ii])) - Ue_aug = paddle.concat((fcn_id, Ue), axis=0) - xcg_gcnn = np.zeros((2, 2 * xcg.shape[1])) - for i in range(xcg.shape[1]): - xcg_gcnn[:, 2 * i] = xcg[:, i] - xcg_gcnn[:, 2 * i + 1] = xcg[:, i] - Uin = Double(xcg_gcnn.T) - graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) - Graph.append(graph) - DataList = [[Graph[0]]] - TrainDataloader = DataList - [self.model, info] = solve_fem_GCNN( - TrainDataloader, LossF, self.model, tol, maxit - ) - np.save("modelCircleDet.npy", info) - solution = self.model(Graph[0].to("cuda")) - solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") - solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) - solution = solution.detach().cpu().numpy() - xcg_defGCNN = xcg + np.reshape(solution, [ndim, nnode], order="F") - msh_defGCNN = Mesh(etype, xcg_defGCNN, e2vcg, e2bnd, ndim) - uabsGCNN = np.sqrt( - solution[[i for i in range(ndof) if i % 2 == 0]] ** 2 - + solution[[i for i in range(ndof) if i % 2 != 0]] ** 2 - ) - return msh_defGCNN, uabsGCNN - - def plot_hard_way(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): - fig = plt.figure() - ax1 = plt.subplot(1, 2, 1) - visualize_fem( - ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 1}, [] - ) - ax1.set_title("GCNN solution") - ax2 = plt.subplot(1, 2, 2) - visualize_fem(ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) - ax2.set_title("FEM solution") - fig.tight_layout(pad=3.0) - plt.savefig("GCNN.pdf", bbox_inches="tight") - - def plot_square(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): - plt.figure() - ax1 = plt.subplot(1, 1, 1) - _, cbar1 = visualize_fem( - ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 4}, [] - ) - ax1.axis("off") - cbar1.remove() - plt.margins(0, 0) - plt.savefig( - "gcnn_2dlinearelasticity_square.png", - bbox_inches="tight", - pad_inches=0, - dpi=800, - ) - - plt.figure() - ax2 = plt.subplot(1, 1, 1) - _, cbar2 = visualize_fem( - ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 4}, [] - ) - ax2.axis("off") - cbar2.remove() - plt.margins(0, 0) - plt.savefig( - "fem_2dlinearelasticity_square.png", - bbox_inches="tight", - pad_inches=0, - dpi=800, - ) - - def hard_way(self): - # FEM - etype = "simplex" - ndim = 2 - dat = loadmat("./msh/cylshk0a-simp-nref0p1.mat") - xcg = dat["xcg"] / 10 - e2vcg = dat["e2vcg"] - 1 - e2bnd = dat["e2bnd"] - 1 - msh = Mesh(etype, xcg, e2vcg, e2bnd, ndim) - xcg = msh.xcg - e2vcg = msh.e2vcg - e2bnd = msh.e2bnd - porder = msh.porder - [ndim, nnode] = xcg.shape - nvar = ndim - ndof = nnode * nvar - - lam = lambda x, el: 1 - mu = lambda x, el: 1 - f = lambda x, el: np.zeros([ndim, 1]) - bnd2nbc = [0.0, 1.0, 2.0, 3.0, 4.0] - tb = lambda x, n, bnd, el, fc: np.asarray([[2], [0]]) * ( - bnd == 2 or bnd == 2.0 or (bnd - 2) ** 2 < 1e-8 - ) + np.asarray([[0], [0]]) - prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) - # Create finite element space - femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) - ldof2gdof = femsp.ldof2gdof_var.ldof2gdof - geo = Simplex(ndim, porder) - f2v = geo.f2n - dbc_idx = get_gdof_from_bndtag( - [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v - ) - dbc_idx.sort() - dbc_idx = np.asarray(dbc_idx) - dbc_val = 0 * dbc_idx - dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) - femsp.dbc = dbc - tol = 1.0e-8 - maxit = 100000 - [Ufem, info] = solve_fem( - "cg", - msh.transfdatacontiguous, - femsp.elem, - femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp.spmat, - dbc, - None, - tol, - maxit, - ) - - xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") - msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) - uabs = np.sqrt( - Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 - + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 - ) - fig = plt.figure() - ax1 = plt.subplot(1, 1, 1) - visualize_fem(ax1, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) - ax1.set_title("FEM solution") - fig.tight_layout(pad=3.0) - - idx_xcg = [ - i - for i in range(xcg.shape[1]) - if 2 * i not in dbc_idx and 2 * i + 1 not in dbc_idx - ] - - obsidx = np.asarray([5, 11, 26, 32, 38]) # max is 9 - - idx_whole = [] - for i in obsidx: - idx_whole.append(2 * i) - idx_whole.append(2 * i + 1) - obsxcg = msh_def.xcg[:, obsidx] - ax1.plot(obsxcg[0, :], obsxcg[1, :], "o") - - dbc_idx_new = np.hstack((dbc_idx, idx_whole)) - dbc_val_new = Ufem[dbc_idx_new] - dbc = create_dbc_strct(msh.xcg.shape[1] * nvar, dbc_idx_new, dbc_val_new) - - Src_new = self.model.source - K_new = paddle.to_tensor([[0], [0]], dtype="float32").reshape((2,)) - parsfuncI = lambda x: paddle.concat((Src_new[0:1], Src_new[1:2], K_new), axis=0) - # GCNN - connectivity = e2vcg2connectivity(e2vcg, "ele") - prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( - ndim, lam, mu, f, tb, bnd2nbc - ) - femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) - LossF = [] - fcn = lambda u_: TensorFEMCore.create_fem_resjac( - "cg", - u_, - msh.transfdatacontiguous, - femsp_gcnn.elem, - femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp_gcnn.spmat, - dbc, - [i for i in range(ndof) if i not in dbc_idx], - parsfuncI, - None, - ) - LossF.append(fcn) - msh_defGCNN, uabsGCNN = self.train( - Ufem, - ndof, - xcg, - connectivity, - LossF, - tol, - maxit, - dbc, - ndim, - nnode, - etype, - e2vcg, - e2bnd, - ) - self.plot_hard_way(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) - - def main_square(self): - # FEM - nvar = 2 - etype = "hcube" - lims = np.asarray([[0, 1], [0, 1]]) - nel = [2, 2] - porder = 2 - nf = 4 - msh = mesh_hcube(etype, lims, nel, porder).getmsh() - xcg = msh.xcg - e2vcg = msh.e2vcg - e2bnd = msh.e2bnd - porder = msh.porder - [ndim, nnode] = xcg.shape - nvar = ndim - ndof = nnode * nvar - - lam = lambda x, el: 1 - mu = lambda x, el: 1 - f = lambda x, el: np.zeros([ndim, 1]) - bnd2nbc = np.asarray([0, 1, 2, 3]) - tb = lambda x, n, bnd, el, fc: np.asarray([[0.5], [0]]) * ( - (bnd - 2) ** 2 < 1e-8 - ) + np.asarray([[0], [0]]) - prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) - # Create finite element space - femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) - ldof2gdof = femsp.ldof2gdof_var.ldof2gdof - geo = Hypercube(ndim, porder) - f2v = geo.f2n - dbc_idx = get_gdof_from_bndtag( - [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v - ) - dbc_idx.sort() - dbc_idx = np.asarray(dbc_idx) - dbc_val = 0 * dbc_idx - dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) - femsp.dbc = dbc - tol = 1.0e-8 - maxit = 4500 - - [Ufem, info] = solve_fem( - "cg", - msh.transfdatacontiguous, - femsp.elem, - femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp.spmat, - dbc, - None, - tol, - maxit, - ) - - xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") - msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) - uabs = np.sqrt( - Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 - + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 - ) - # GCNN - connectivity = e2vcg2connectivity(e2vcg, "ele") - prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( - ndim, lam, mu, f, tb, bnd2nbc - ) - femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) - LossF = [] - fcn = lambda u_: TensorFEMCore.create_fem_resjac( - "cg", - u_, - msh.transfdatacontiguous, - femsp_gcnn.elem, - femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp_gcnn.spmat, - dbc, - ) - fcn_fem = lambda u_: create_fem_resjac( - "cg", - u_, - msh.transfdatacontiguous, - femsp.elem, - femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp.spmat, - dbc, - ) - LossF.append(fcn) - msh_defGCNN, uabsGCNN = self.train( - Ufem, - ndof, - xcg, - connectivity, - LossF, - tol, - maxit, - dbc, - ndim, - nnode, - etype, - e2vcg, - e2bnd, - ) - self.plot_square(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) - - -if __name__ == "__main__": - le_obj = LinearElasticity() - le_obj.hard_way() - le_obj.main_square() +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle +from scipy.io import loadmat + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_fem_resjac import create_fem_resjac +from pyCaMOtk.create_femsp_cg import create_femsp_cg +from pyCaMOtk.create_mesh_hcube import mesh_hcube +from pyCaMOtk.geom_mltdim import Hypercube +from pyCaMOtk.geom_mltdim import Simplex +from pyCaMOtk.LinearElasticityHandCode import * +from pyCaMOtk.mesh import Mesh +from pyCaMOtk.mesh import get_gdof_from_bndtag +from pyCaMOtk.solve_fem import solve_fem +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +import TensorFEMCore +from GCNNModel import LinearElasticityNet2D +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import ReshapeFix +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + +paddle.seed(0) + + +class LinearElasticity: + def __init__(self) -> None: + # GCNN model + self.model = LinearElasticityNet2D() + + def train( + self, + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ): + ii = 0 + Graph = [] + Ue = Double(Ufem.flatten().reshape(ndof, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + xcg_gcnn = np.zeros((2, 2 * xcg.shape[1])) + for i in range(xcg.shape[1]): + xcg_gcnn[:, 2 * i] = xcg[:, i] + xcg_gcnn[:, 2 * i + 1] = xcg[:, i] + Uin = Double(xcg_gcnn.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) + Graph.append(graph) + DataList = [[Graph[0]]] + TrainDataloader = DataList + [self.model, info] = solve_fem_GCNN( + TrainDataloader, LossF, self.model, tol, maxit + ) + np.save("modelCircleDet.npy", info) + solution = self.model(Graph[0].to("cuda")) + solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().cpu().numpy() + xcg_defGCNN = xcg + np.reshape(solution, [ndim, nnode], order="F") + msh_defGCNN = Mesh(etype, xcg_defGCNN, e2vcg, e2bnd, ndim) + uabsGCNN = np.sqrt( + solution[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + solution[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + return msh_defGCNN, uabsGCNN + + def plot_hard_way(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem( + ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 1}, [] + ) + ax1.set_title("GCNN solution") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) + ax2.set_title("FEM solution") + fig.tight_layout(pad=3.0) + plt.savefig("GCNN.pdf", bbox_inches="tight") + + def plot_square(self, msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs): + plt.figure() + ax1 = plt.subplot(1, 1, 1) + _, cbar1 = visualize_fem( + ax1, msh_defGCNN, uabsGCNN[e2vcg], {"plot_elem": False, "nref": 4}, [] + ) + ax1.axis("off") + cbar1.remove() + plt.margins(0, 0) + plt.savefig( + "gcnn_2dlinearelasticity_square.png", + bbox_inches="tight", + pad_inches=0, + dpi=800, + ) + + plt.figure() + ax2 = plt.subplot(1, 1, 1) + _, cbar2 = visualize_fem( + ax2, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 4}, [] + ) + ax2.axis("off") + cbar2.remove() + plt.margins(0, 0) + plt.savefig( + "fem_2dlinearelasticity_square.png", + bbox_inches="tight", + pad_inches=0, + dpi=800, + ) + + def hard_way(self): + # FEM + etype = "simplex" + ndim = 2 + dat = loadmat("./msh/cylshk0a-simp-nref0p1.mat") + xcg = dat["xcg"] / 10 + e2vcg = dat["e2vcg"] - 1 + e2bnd = dat["e2bnd"] - 1 + msh = Mesh(etype, xcg, e2vcg, e2bnd, ndim) + xcg = msh.xcg + e2vcg = msh.e2vcg + e2bnd = msh.e2bnd + porder = msh.porder + [ndim, nnode] = xcg.shape + nvar = ndim + ndof = nnode * nvar + + lam = lambda x, el: 1 + mu = lambda x, el: 1 + f = lambda x, el: np.zeros([ndim, 1]) + bnd2nbc = [0.0, 1.0, 2.0, 3.0, 4.0] + tb = lambda x, n, bnd, el, fc: np.asarray([[2], [0]]) * ( + bnd == 2 or bnd == 2.0 or (bnd - 2) ** 2 < 1e-8 + ) + np.asarray([[0], [0]]) + prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) + # Create finite element space + femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + geo = Simplex(ndim, porder) + f2v = geo.f2n + dbc_idx = get_gdof_from_bndtag( + [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v + ) + dbc_idx.sort() + dbc_idx = np.asarray(dbc_idx) + dbc_val = 0 * dbc_idx + dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) + femsp.dbc = dbc + tol = 1.0e-8 + maxit = 100000 + [Ufem, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + None, + tol, + maxit, + ) + + xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") + msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) + uabs = np.sqrt( + Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + fig = plt.figure() + ax1 = plt.subplot(1, 1, 1) + visualize_fem(ax1, msh_def, uabs[e2vcg], {"plot_elem": False, "nref": 1}, []) + ax1.set_title("FEM solution") + fig.tight_layout(pad=3.0) + + idx_xcg = [ + i + for i in range(xcg.shape[1]) + if 2 * i not in dbc_idx and 2 * i + 1 not in dbc_idx + ] + + obsidx = np.asarray([5, 11, 26, 32, 38]) # max is 9 + + idx_whole = [] + for i in obsidx: + idx_whole.append(2 * i) + idx_whole.append(2 * i + 1) + obsxcg = msh_def.xcg[:, obsidx] + ax1.plot(obsxcg[0, :], obsxcg[1, :], "o") + + dbc_idx_new = np.hstack((dbc_idx, idx_whole)) + dbc_val_new = Ufem[dbc_idx_new] + dbc = create_dbc_strct(msh.xcg.shape[1] * nvar, dbc_idx_new, dbc_val_new) + + Src_new = self.model.source + K_new = paddle.to_tensor([[0], [0]], dtype="float32").reshape((2,)) + parsfuncI = lambda x: paddle.concat((Src_new[0:1], Src_new[1:2], K_new), axis=0) + # GCNN + connectivity = e2vcg2connectivity(e2vcg, "ele") + prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( + ndim, lam, mu, f, tb, bnd2nbc + ) + femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + [i for i in range(ndof) if i not in dbc_idx], + parsfuncI, + None, + ) + LossF.append(fcn) + msh_defGCNN, uabsGCNN = self.train( + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ) + self.plot_hard_way(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) + + def main_square(self): + # FEM + nvar = 2 + etype = "hcube" + lims = np.asarray([[0, 1], [0, 1]]) + nel = [2, 2] + porder = 2 + nf = 4 + msh = mesh_hcube(etype, lims, nel, porder).getmsh() + xcg = msh.xcg + e2vcg = msh.e2vcg + e2bnd = msh.e2bnd + porder = msh.porder + [ndim, nnode] = xcg.shape + nvar = ndim + ndof = nnode * nvar + + lam = lambda x, el: 1 + mu = lambda x, el: 1 + f = lambda x, el: np.zeros([ndim, 1]) + bnd2nbc = np.asarray([0, 1, 2, 3]) + tb = lambda x, n, bnd, el, fc: np.asarray([[0.5], [0]]) * ( + (bnd - 2) ** 2 < 1e-8 + ) + np.asarray([[0], [0]]) + prob = setup_linelast_base_handcode(ndim, lam, mu, f, tb, bnd2nbc) + # Create finite element space + femsp = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + geo = Hypercube(ndim, porder) + f2v = geo.f2n + dbc_idx = get_gdof_from_bndtag( + [i for i in range(ndim)], [0], nvar, ldof2gdof, e2bnd, f2v + ) + dbc_idx.sort() + dbc_idx = np.asarray(dbc_idx) + dbc_val = 0 * dbc_idx + dbc = create_dbc_strct(ndof, dbc_idx, dbc_val) + femsp.dbc = dbc + tol = 1.0e-8 + maxit = 4500 + + [Ufem, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + None, + tol, + maxit, + ) + + xcg_def = xcg + np.reshape(Ufem, [ndim, nnode], order="F") + msh_def = Mesh(etype, xcg_def, e2vcg, e2bnd, ndim) + uabs = np.sqrt( + Ufem[[i for i in range(ndof) if i % 2 == 0]] ** 2 + + Ufem[[i for i in range(ndof) if i % 2 != 0]] ** 2 + ) + # GCNN + connectivity = e2vcg2connectivity(e2vcg, "ele") + prob = setup_prob_eqn_handcode.setup_linelast_base_handcode( + ndim, lam, mu, f, tb, bnd2nbc + ) + femsp_gcnn = create_femsp_cg(prob, msh, porder, e2vcg, porder, e2vcg, dbc) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + ) + fcn_fem = lambda u_: create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + ) + LossF.append(fcn) + msh_defGCNN, uabsGCNN = self.train( + Ufem, + ndof, + xcg, + connectivity, + LossF, + tol, + maxit, + dbc, + ndim, + nnode, + etype, + e2vcg, + e2bnd, + ) + self.plot_square(msh_defGCNN, uabsGCNN, e2vcg, msh_def, uabs) + + +if __name__ == "__main__": + le_obj = LinearElasticity() + le_obj.hard_way() + le_obj.main_square() diff --git a/jointContribution/graphGalerkin/NsChebnet.py b/jointContribution/graphGalerkin/NsChebnet.py index 4b2d4ecf3c..9368a4e404 100644 --- a/jointContribution/graphGalerkin/NsChebnet.py +++ b/jointContribution/graphGalerkin/NsChebnet.py @@ -1,291 +1,291 @@ -import pdb -import sys - -import matplotlib.pyplot as plt -import numpy as np -import paddle - -sys.path.insert(0, "pycamotk") -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg_mixed2 -from pyCaMOtk.create_mesh_hcube import mesh_hcube -from pyCaMOtk.mesh import Mesh -from pyCaMOtk.setup_ins_base_handcode import setup_ins_base_handcode -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem - -sys.path.insert(0, "source") -import setup_prob_eqn_handcode -import TensorFEMCore -from GCNNModel import Ns_Chebnet -from GCNNModel import e2vcg2connectivity -from TensorFEMCore import Double -from TensorFEMCore import ReshapeFix -from TensorFEMCore import solve_fem_GCNN - -sys.path.insert(0, "utils") -from utils import Data - - -def train(): - """ - Solve GCNN - """ - connectivity_uv = e2vcg2connectivity(e2vcg, "ele") - connectivity_p = e2vcg2connectivity(e2vcg2, "ele") - connectivity = paddle.concat( - [connectivity_uv, connectivity_uv, connectivity_p], axis=1 - ) - prob = setup_prob_eqn_handcode.setup_ins_base_handcode( - ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc - ) - - femsp_gcnn = create_femsp_cg_mixed2( - prob, - msh, - neqn1, - nvar1, - porder, - porder, - e2vcg, - e2vcg, - neqn2, - nvar2, - porder - 1, - porder - 1, - e2vcg2, - e2vcg2, - ) - LossF = [] - fcn = lambda u_: TensorFEMCore.create_fem_resjac( - "cg", - u_, - msh.transfdatacontiguous, - femsp_gcnn.elem, - femsp_gcnn.elem_data, - femsp_gcnn.ldof2gdof_eqn.ldof2gdof, - femsp_gcnn.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp_gcnn.spmat, - dbc, - ) - LossF.append(fcn) - ii = 0 - Graph = [] - Ue = Double(U.flatten().reshape(-1, 1)) - fcn_id = Double(np.asarray([ii])) - Ue_aug = paddle.concat((fcn_id, Ue), axis=0) - xcg_gcnn = np.zeros((2, 2 * xcg.shape[1] + msh_.xcg.shape[1])) - for i in range(xcg.shape[1]): - xcg_gcnn[:, 2 * i] = xcg[:, i] - xcg_gcnn[:, 2 * i + 1] = xcg[:, i] - for i in range(msh_.xcg.shape[1]): - xcg_gcnn[:, 2 * xcg.shape[1] + i] = msh_.xcg[:, i] - Uin = Double(xcg_gcnn.T) - graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) - Graph.append(graph) - DataList = [[Graph[0]]] - TrainDataloader = DataList - split = [xcg.shape[1], msh_.xcg.shape[1], connectivity_uv.shape[1]] - model = Ns_Chebnet(split) - [model, info] = solve_fem_GCNN(TrainDataloader, LossF, model, tol, maxit) - paddle.save(model, "./Model.pth") - np.save("modelTrain.npy", info) - solution = model(Graph[0]) - solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") - solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) - solution = solution.detach().numpy() - - uv_GCNN = np.reshape(solution[0 : ndim * nnode], [ndim, nnode], order="F") - uabs_GCNN = np.sqrt(uv_GCNN[0, :] ** 2 + uv_GCNN[1, :] ** 2) - pGCNN = solution[ndim * nnode :] - - return uabs_GCNN, pGCNN - - -def plot(uabs_GCNN, pGCNN): - fig = plt.figure() - ax1 = plt.subplot(1, 2, 1) - visualize_fem(ax1, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) - ax1.set_title("FEM-Mixed2 Velocity Magnitude") - ax2 = plt.subplot(1, 2, 2) - visualize_fem(ax2, msh, uabs_GCNN[e2vcg], {"plot_elem": False, "nref": 4}, []) - ax2.set_title("GCNN Velocity Magnitude") - fig.tight_layout(pad=2) - plt.savefig("StenosisNSU.pdf", bbox_inches="tight") - - fig = plt.figure() - ax1 = plt.subplot(1, 2, 1) - visualize_fem(ax1, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) - ax1.set_title("FEM-Mixed2 Pressure") - ax2 = plt.subplot(1, 2, 2) - visualize_fem(ax2, msh_, pGCNN[e2vcg2], {"plot_elem": False, "nref": 4}, []) - ax2.set_title("GCNN Pressure Magnitude") - fig.tight_layout(pad=2) - plt.savefig("StenosisNSP.pdf", bbox_inches="tight") - - -if __name__ == "__main__": - # Basic setting of the case - ReList = np.linspace(10, 100, 2) - U0 = None - for Re in ReList: - print("Re=", Re) - rho = 1 - nu = 1 / Re - L = 1 - etype = "hcube" - nelem = [10, 10] - porder = 2 - pltit = True - ndim = 2 - nvar = 2 - inletVelocity = 1 - s = 0.4 - - # Create finite element mesh - msh = mesh_hcube(etype, np.asarray([[0, L], [0, L]]), nelem, porder).getmsh() - msh_ = mesh_hcube( - etype, np.asarray([[0, L], [0, L]]), nelem, porder - 1 - ).getmsh() - e2vcg2 = msh_.e2vcg - xcg = msh.xcg - e2vcg = msh.e2vcg - nnode = xcg.shape[1] - nnode_p = msh_.xcg.shape[1] - - # Setup equation parameters and natural boundary conditions - tb = lambda x, n, bnd, el, fc: np.zeros([ndim + 1, 1]) - bnd2nbc = [1, 1, 1, 1] - prob = setup_ins_base_handcode( - ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc - ) - - # start to impose BC - ndofU = ndim * nnode - ndofUP = ndofU + msh_.xcg.shape[1] - dbc_idx1 = [] - for i in range(nnode): - if i in dbc_idx1: - continue - if xcg[0, i] < 1e-12 or xcg[0, i] > ( - L - 1e-12 - ): # xcg[0,i]<1e-12 or xcg[1,i]<1e-12 or xcg[0,i]>(L-1e-12): - dbc_idx1.append(i) - dbc_idx2 = [i for i in range(nnode) if xcg[1, i] < 1e-12 and i not in dbc_idx1] - dbc_idx3 = [ - i - for i in range(nnode_p) - if msh_.xcg[1, i] > L - 1e-12 and i not in dbc_idx1 and i not in dbc_idx2 - ] - - dbc_val1 = [0 for i in dbc_idx1] - dbc_val2 = [0 for i in dbc_idx2] - dbc_val3 = [0 for i in dbc_idx3] - - dbc_idx = [2 * i for i in dbc_idx1] - dbc_val = [i for i in dbc_val1] - for i in range(len(dbc_val1)): - dbc_idx.append(2 * dbc_idx1[i] + 1) - dbc_val.append(dbc_val1[i]) - - for i in range(len(dbc_idx2)): - dbc_idx.append(2 * dbc_idx2[i]) - dbc_val.append(dbc_val2[i]) - for i in range(len(dbc_idx2)): - dbc_idx.append(2 * dbc_idx2[i] + 1) - dbc_val.append(inletVelocity) - - for i in range(len(dbc_idx3)): - dbc_idx.append(ndofU + dbc_idx3[i]) - dbc_val.append(dbc_val3[i]) - - dbc_idx, I = np.unique(np.asarray(dbc_idx), return_index=True) - dbc_idx = [i for i in dbc_idx] - dbc_val = np.asarray(dbc_val) - dbc_val = dbc_val[I] - dbc_val = [i for i in dbc_val] - - dbc_idx = np.asarray(dbc_idx) - dbc_val = np.asarray(dbc_val) - dbc = create_dbc_strct(ndofUP, dbc_idx, dbc_val) - - # ReDefine Mesh - xcg_ = msh_.xcg - shrinkScalar = lambda y: (1 - s * np.cos(np.pi * (y - L / 2))) - - for i in range(xcg.shape[1]): - xcg[0, i] = (xcg[0, i] - L / 2) * shrinkScalar(xcg[1, i]) + L / 2 - for i in range(xcg_.shape[1]): - xcg_[0, i] = (xcg_[0, i] - L / 2) * shrinkScalar(xcg_[1, i]) + L / 2 - - msh = Mesh(etype, xcg, e2vcg, msh.e2bnd, 2) - msh_ = Mesh(etype, xcg_, e2vcg2, msh_.e2bnd, 2) - e2vcg2 = msh_.e2vcg - xcg = msh.xcg - e2vcg = msh.e2vcg - nnode = xcg.shape[1] - - # Create finite element space - neqn1 = ndim - neqn2 = 1 - nvar1 = ndim - nvar2 = 1 - femsp = create_femsp_cg_mixed2( - prob, - msh, - neqn1, - nvar1, - porder, - porder, - e2vcg, - e2vcg, - neqn2, - nvar2, - porder - 1, - porder - 1, - e2vcg2, - e2vcg2, - ) - ldof2gdof = femsp.ldof2gdof_var.ldof2gdof - femsp.dbc = dbc - - tol = 1.0e-8 - maxit = 10000 - [U, info] = solve_fem( - "cg", - msh.transfdatacontiguous, - femsp.elem, - femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - msh.e2e, - femsp.spmat, - dbc, - U0, - tol, - maxit, - ) - - idx_free = [i for i in range(len(U)) if i not in dbc_idx] - U0 = U[idx_free].reshape([-1, 1]) - uv = np.reshape(U[0 : ndim * nnode], [ndim, nnode], order="F") - p = U[ndim * nnode :] - uabs = np.sqrt(uv[0, :] ** 2 + uv[1, :] ** 2) - if Re == ReList[-1]: - fig = plt.figure() - ax = plt.subplot(1, 1, 1) - visualize_fem(ax, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) - ax.set_title("FEM-Mixed2 Velocity Magnitude") - fig.tight_layout(pad=2) - plt.savefig("FE-Mixed2V.pdf", bbox_inches="tight") - - fig = plt.figure() - ax = plt.subplot(1, 1, 1) - visualize_fem(ax, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) - ax.set_title("FEM-Mixed2 Pressure") - fig.tight_layout(pad=2) - plt.savefig("FE-Mixed2P.pdf", bbox_inches="tight") - - uabs_GCNN, pGCNN = train() - plot(uabs_GCNN, pGCNN) +import pdb +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_femsp_cg import create_femsp_cg_mixed2 +from pyCaMOtk.create_mesh_hcube import mesh_hcube +from pyCaMOtk.mesh import Mesh +from pyCaMOtk.setup_ins_base_handcode import setup_ins_base_handcode +from pyCaMOtk.solve_fem import solve_fem +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +import TensorFEMCore +from GCNNModel import Ns_Chebnet +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import ReshapeFix +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + + +def train(): + """ + Solve GCNN + """ + connectivity_uv = e2vcg2connectivity(e2vcg, "ele") + connectivity_p = e2vcg2connectivity(e2vcg2, "ele") + connectivity = paddle.concat( + [connectivity_uv, connectivity_uv, connectivity_p], axis=1 + ) + prob = setup_prob_eqn_handcode.setup_ins_base_handcode( + ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc + ) + + femsp_gcnn = create_femsp_cg_mixed2( + prob, + msh, + neqn1, + nvar1, + porder, + porder, + e2vcg, + e2vcg, + neqn2, + nvar2, + porder - 1, + porder - 1, + e2vcg2, + e2vcg2, + ) + LossF = [] + fcn = lambda u_: TensorFEMCore.create_fem_resjac( + "cg", + u_, + msh.transfdatacontiguous, + femsp_gcnn.elem, + femsp_gcnn.elem_data, + femsp_gcnn.ldof2gdof_eqn.ldof2gdof, + femsp_gcnn.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp_gcnn.spmat, + dbc, + ) + LossF.append(fcn) + ii = 0 + Graph = [] + Ue = Double(U.flatten().reshape(-1, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + xcg_gcnn = np.zeros((2, 2 * xcg.shape[1] + msh_.xcg.shape[1])) + for i in range(xcg.shape[1]): + xcg_gcnn[:, 2 * i] = xcg[:, i] + xcg_gcnn[:, 2 * i + 1] = xcg[:, i] + for i in range(msh_.xcg.shape[1]): + xcg_gcnn[:, 2 * xcg.shape[1] + i] = msh_.xcg[:, i] + Uin = Double(xcg_gcnn.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=connectivity) + Graph.append(graph) + DataList = [[Graph[0]]] + TrainDataloader = DataList + split = [xcg.shape[1], msh_.xcg.shape[1], connectivity_uv.shape[1]] + model = Ns_Chebnet(split) + [model, info] = solve_fem_GCNN(TrainDataloader, LossF, model, tol, maxit) + paddle.save(model, "./Model.pth") + np.save("modelTrain.npy", info) + solution = model(Graph[0]) + solution = ReshapeFix(paddle.clone(solution), [len(solution.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().numpy() + + uv_GCNN = np.reshape(solution[0 : ndim * nnode], [ndim, nnode], order="F") + uabs_GCNN = np.sqrt(uv_GCNN[0, :] ** 2 + uv_GCNN[1, :] ** 2) + pGCNN = solution[ndim * nnode :] + + return uabs_GCNN, pGCNN + + +def plot(uabs_GCNN, pGCNN): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem(ax1, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax1.set_title("FEM-Mixed2 Velocity Magnitude") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh, uabs_GCNN[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax2.set_title("GCNN Velocity Magnitude") + fig.tight_layout(pad=2) + plt.savefig("StenosisNSU.pdf", bbox_inches="tight") + + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem(ax1, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax1.set_title("FEM-Mixed2 Pressure") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, msh_, pGCNN[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax2.set_title("GCNN Pressure Magnitude") + fig.tight_layout(pad=2) + plt.savefig("StenosisNSP.pdf", bbox_inches="tight") + + +if __name__ == "__main__": + # Basic setting of the case + ReList = np.linspace(10, 100, 2) + U0 = None + for Re in ReList: + print("Re=", Re) + rho = 1 + nu = 1 / Re + L = 1 + etype = "hcube" + nelem = [10, 10] + porder = 2 + pltit = True + ndim = 2 + nvar = 2 + inletVelocity = 1 + s = 0.4 + + # Create finite element mesh + msh = mesh_hcube(etype, np.asarray([[0, L], [0, L]]), nelem, porder).getmsh() + msh_ = mesh_hcube( + etype, np.asarray([[0, L], [0, L]]), nelem, porder - 1 + ).getmsh() + e2vcg2 = msh_.e2vcg + xcg = msh.xcg + e2vcg = msh.e2vcg + nnode = xcg.shape[1] + nnode_p = msh_.xcg.shape[1] + + # Setup equation parameters and natural boundary conditions + tb = lambda x, n, bnd, el, fc: np.zeros([ndim + 1, 1]) + bnd2nbc = [1, 1, 1, 1] + prob = setup_ins_base_handcode( + ndim, lambda x, el: rho, lambda x, el: nu, tb, bnd2nbc + ) + + # start to impose BC + ndofU = ndim * nnode + ndofUP = ndofU + msh_.xcg.shape[1] + dbc_idx1 = [] + for i in range(nnode): + if i in dbc_idx1: + continue + if xcg[0, i] < 1e-12 or xcg[0, i] > ( + L - 1e-12 + ): # xcg[0,i]<1e-12 or xcg[1,i]<1e-12 or xcg[0,i]>(L-1e-12): + dbc_idx1.append(i) + dbc_idx2 = [i for i in range(nnode) if xcg[1, i] < 1e-12 and i not in dbc_idx1] + dbc_idx3 = [ + i + for i in range(nnode_p) + if msh_.xcg[1, i] > L - 1e-12 and i not in dbc_idx1 and i not in dbc_idx2 + ] + + dbc_val1 = [0 for i in dbc_idx1] + dbc_val2 = [0 for i in dbc_idx2] + dbc_val3 = [0 for i in dbc_idx3] + + dbc_idx = [2 * i for i in dbc_idx1] + dbc_val = [i for i in dbc_val1] + for i in range(len(dbc_val1)): + dbc_idx.append(2 * dbc_idx1[i] + 1) + dbc_val.append(dbc_val1[i]) + + for i in range(len(dbc_idx2)): + dbc_idx.append(2 * dbc_idx2[i]) + dbc_val.append(dbc_val2[i]) + for i in range(len(dbc_idx2)): + dbc_idx.append(2 * dbc_idx2[i] + 1) + dbc_val.append(inletVelocity) + + for i in range(len(dbc_idx3)): + dbc_idx.append(ndofU + dbc_idx3[i]) + dbc_val.append(dbc_val3[i]) + + dbc_idx, I = np.unique(np.asarray(dbc_idx), return_index=True) + dbc_idx = [i for i in dbc_idx] + dbc_val = np.asarray(dbc_val) + dbc_val = dbc_val[I] + dbc_val = [i for i in dbc_val] + + dbc_idx = np.asarray(dbc_idx) + dbc_val = np.asarray(dbc_val) + dbc = create_dbc_strct(ndofUP, dbc_idx, dbc_val) + + # ReDefine Mesh + xcg_ = msh_.xcg + shrinkScalar = lambda y: (1 - s * np.cos(np.pi * (y - L / 2))) + + for i in range(xcg.shape[1]): + xcg[0, i] = (xcg[0, i] - L / 2) * shrinkScalar(xcg[1, i]) + L / 2 + for i in range(xcg_.shape[1]): + xcg_[0, i] = (xcg_[0, i] - L / 2) * shrinkScalar(xcg_[1, i]) + L / 2 + + msh = Mesh(etype, xcg, e2vcg, msh.e2bnd, 2) + msh_ = Mesh(etype, xcg_, e2vcg2, msh_.e2bnd, 2) + e2vcg2 = msh_.e2vcg + xcg = msh.xcg + e2vcg = msh.e2vcg + nnode = xcg.shape[1] + + # Create finite element space + neqn1 = ndim + neqn2 = 1 + nvar1 = ndim + nvar2 = 1 + femsp = create_femsp_cg_mixed2( + prob, + msh, + neqn1, + nvar1, + porder, + porder, + e2vcg, + e2vcg, + neqn2, + nvar2, + porder - 1, + porder - 1, + e2vcg2, + e2vcg2, + ) + ldof2gdof = femsp.ldof2gdof_var.ldof2gdof + femsp.dbc = dbc + + tol = 1.0e-8 + maxit = 10000 + [U, info] = solve_fem( + "cg", + msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + msh.e2e, + femsp.spmat, + dbc, + U0, + tol, + maxit, + ) + + idx_free = [i for i in range(len(U)) if i not in dbc_idx] + U0 = U[idx_free].reshape([-1, 1]) + uv = np.reshape(U[0 : ndim * nnode], [ndim, nnode], order="F") + p = U[ndim * nnode :] + uabs = np.sqrt(uv[0, :] ** 2 + uv[1, :] ** 2) + if Re == ReList[-1]: + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + visualize_fem(ax, msh, uabs[e2vcg], {"plot_elem": False, "nref": 4}, []) + ax.set_title("FEM-Mixed2 Velocity Magnitude") + fig.tight_layout(pad=2) + plt.savefig("FE-Mixed2V.pdf", bbox_inches="tight") + + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + visualize_fem(ax, msh_, p[e2vcg2], {"plot_elem": False, "nref": 4}, []) + ax.set_title("FEM-Mixed2 Pressure") + fig.tight_layout(pad=2) + plt.savefig("FE-Mixed2P.pdf", bbox_inches="tight") + + uabs_GCNN, pGCNN = train() + plot(uabs_GCNN, pGCNN) diff --git a/jointContribution/graphGalerkin/Possion.py b/jointContribution/graphGalerkin/Possion.py index 54a149529a..b6a0151de6 100644 --- a/jointContribution/graphGalerkin/Possion.py +++ b/jointContribution/graphGalerkin/Possion.py @@ -1,227 +1,227 @@ -import sys - -import matplotlib.pyplot as plt -import numpy as np -import paddle - -paddle.seed(1334) - -sys.path.insert(0, "pycamotk") -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.create_mesh_hsphere import mesh_hsphere -from pyCaMOtk.visualize_fem import visualize_fem - -sys.path.insert(0, "source") -import setup_prob_eqn_handcode -from FEM_ForwardModel import analyticalPossion -from GCNNModel import PossionNet -from GCNNModel import e2vcg2connectivity -from TensorFEMCore import Double -from TensorFEMCore import create_fem_resjac -from TensorFEMCore import solve_fem_GCNN - -sys.path.insert(0, "utils") -from utils import Data - - -class Possion: - def __init__(self) -> None: - # GCNN model - self.model = PossionNet() - - def params_possion(self): - """ - e2vcg is a 2D array (NNODE PER ELEM, NELEM): The connectivity of the - mesh. The (:, e) entries are the global node numbers of the nodes - that comprise element e. The local node numbers of each element are - defined by the columns of this matrix, e.g., e2vcg(i, e) is the - global node number of the ith local node of element e. - The flux constant Flux=[du/dx, du/dy]^T=K dot [dphi/dx,dphi/dy] - where phi is the solution polynomial function - """ - - # Set up GCNN-FEM Possion problem - self.nin = 1 # Number of input variable - self.nvar = 1 # Number of primanry variable - etype = "hcube" # Mesh type - c = [0, 0] # Domain center - r = 1 # Radius - self.porder = 2 # Polynomial order for solution and geometry basis - nel = [2, 2] # Number of element in x and y axis - self.msh = mesh_hsphere( - etype, c, r, nel, self.porder - ).getmsh() # Create mesh object - self.xcg = self.msh.xcg # Extract node coordinates - self.ndof = self.xcg.shape[1] - self.e2vcg = self.msh.e2vcg # Extract element connectivity - self.connectivity = e2vcg2connectivity(self.msh.e2vcg, "ele") - - self.bnd2nbc = np.asarray([0]) # Define the boundary tag! - self.K = lambda x, el: np.asarray([[1], [0], [0], [1]]) - self.Qb = ( - lambda x, n, bnd, el, fc: 0 - ) # The primary variable value on the boundary - dbc_idx = [ - i - for i in range(self.xcg.shape[1]) - if np.sum(self.xcg[:, i] ** 2) > 1 - 1e-12 - ] # The boundary node id - self.dbc_idx = np.asarray(dbc_idx) - self.dbc_val = dbc_idx * 0 # The boundary node primary variable value - - def train(self): - paddle.device.set_device("gpu:0") - dbc = create_dbc_strct( - self.xcg.shape[1] * self.nvar, self.dbc_idx, self.dbc_val - ) # Create the class of boundary condition - - Src_new = self.model.source - K_new = paddle.to_tensor([[1], [0], [0], [1]], dtype="float32").reshape((4,)) - parsfuncI = lambda x: paddle.concat((K_new, Src_new), axis=0) - S = [2] # Parametrize the source value in the pde -F_ij,j=S_i - LossF = [] - # Define the Training Data - Graph = [] - ii = 0 - - for i in S: - f = lambda x, el: i - prob = setup_prob_eqn_handcode.setup_linelptc_sclr_base_handcode( - 2, self.K, f, self.Qb, self.bnd2nbc - ) - - femsp = create_femsp_cg( - prob, self.msh, self.porder, self.e2vcg, self.porder, self.e2vcg, dbc - ) - fcn = lambda u_: create_fem_resjac( - "cg", - u_, - self.msh.transfdatacontiguous, - femsp.elem, - femsp.elem_data, - femsp.ldof2gdof_eqn.ldof2gdof, - femsp.ldof2gdof_var.ldof2gdof, - self.msh.e2e, - femsp.spmat, - dbc, - [i for i in range(self.ndof) if i not in self.dbc_idx], - parsfuncI, - None, - self.model, - ) - LossF.append(fcn) - - Ue = Double(analyticalPossion(self.xcg, i).flatten().reshape(self.ndof, 1)) - fcn_id = Double(np.asarray([ii])) - Ue_aug = paddle.concat((fcn_id, Ue), axis=0) - Uin = Double(self.xcg.T) - graph = Data(x=Uin, y=Ue_aug, edge_index=self.connectivity) - Graph.append(graph) - ii = ii + 1 - DataList = [[Graph[i]] for i in range(len(S))] - TrainDataloader = DataList - - # Training Data - [model, info] = solve_fem_GCNN( - TrainDataloader, LossF, self.model, self.tol, self.maxit - ) - print("K=", self.K) - print("Min Error=", info["Er"].min()) - print("Mean Error Last 10 iterations=", np.mean(info["Er"][-10:])) - print("Var Error Last 10 iterations=", np.var(info["Er"][-10:])) - - np.savetxt("demo0\ErFinal.txt", info["Er"]) - np.savetxt("demo0\Loss.txt", info["Loss"]) - - solution = model(Graph[0]) - solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) - solution = solution.detach().cpu().numpy() - Ue = Ue.detach().cpu().numpy() - return solution, Ue - - def plot_disk(self, solution, Ue): - ax1 = plt.subplot(1, 1, 1) - _, cbar1 = visualize_fem( - ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] - ) - ax1.tick_params( - axis="both", - which="both", - bottom=False, - left=False, - top=False, - labelbottom=False, - labelleft=False, - ) - ax1.axis("off") - cbar1.remove() - plt.margins(0, 0) - plt.savefig( - "gcnn_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 - ) - plt.close() - - ax2 = plt.subplot(1, 1, 1) - _, cbar2 = visualize_fem( - ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, [] - ) - ax2.tick_params( - axis="both", - which="both", - bottom=False, - left=False, - top=False, - labelbottom=False, - labelleft=False, - ) - ax2.axis("off") - cbar2.remove() - plt.margins(0, 0) - plt.savefig( - "exact_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 - ) - - def plot_circle(self, solution, Ue): - fig = plt.figure() - ax1 = plt.subplot(1, 2, 1) - visualize_fem( - ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] - ) - ax1.set_title("GCNN solution") - ax2 = plt.subplot(1, 2, 2) - visualize_fem(ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, []) - ax2.set_title("Exact solution") - fig.tight_layout(pad=3.0) - plt.savefig("demo0\Demo.pdf", bbox_inches="tight") - - def disk_possion_hard(self): - # Hyper prameters - self.tol = 1.0e-16 - self.maxit = 3000 - self.params_possion() - Ufem = analyticalPossion(self.xcg, 2).flatten().reshape(self.ndof, 1) - - obsidx = np.asarray([8]) - self.dbc_idx = np.hstack((np.asarray(self.dbc_idx), obsidx)) - self.dbc_val = Ufem[self.dbc_idx] - - solution, Ue = self.train() - self.plot_disk(solution, Ue) - - def circle(self): - # Hyper prameters - self.tol = 1.0e-16 - self.maxit = 500 - self.params_possion() - self.dbc_idx = np.asarray(self.dbc_idx) - self.dbc_val = self.dbc_idx * 0 # The boundary node primary variable value - - solution, Ue = self.train() - self.plot_circle(solution, Ue) - - -if __name__ == "__main__": - possion_obj = Possion() - possion_obj.disk_possion_hard() - possion_obj.circle() +import sys + +import matplotlib.pyplot as plt +import numpy as np +import paddle + +paddle.seed(1334) + +sys.path.insert(0, "pycamotk") +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_femsp_cg import create_femsp_cg +from pyCaMOtk.create_mesh_hsphere import mesh_hsphere +from pyCaMOtk.visualize_fem import visualize_fem + +sys.path.insert(0, "source") +import setup_prob_eqn_handcode +from FEM_ForwardModel import analyticalPossion +from GCNNModel import PossionNet +from GCNNModel import e2vcg2connectivity +from TensorFEMCore import Double +from TensorFEMCore import create_fem_resjac +from TensorFEMCore import solve_fem_GCNN + +sys.path.insert(0, "utils") +from utils import Data + + +class Possion: + def __init__(self) -> None: + # GCNN model + self.model = PossionNet() + + def params_possion(self): + """ + e2vcg is a 2D array (NNODE PER ELEM, NELEM): The connectivity of the + mesh. The (:, e) entries are the global node numbers of the nodes + that comprise element e. The local node numbers of each element are + defined by the columns of this matrix, e.g., e2vcg(i, e) is the + global node number of the ith local node of element e. + The flux constant Flux=[du/dx, du/dy]^T=K dot [dphi/dx,dphi/dy] + where phi is the solution polynomial function + """ + + # Set up GCNN-FEM Possion problem + self.nin = 1 # Number of input variable + self.nvar = 1 # Number of primanry variable + etype = "hcube" # Mesh type + c = [0, 0] # Domain center + r = 1 # Radius + self.porder = 2 # Polynomial order for solution and geometry basis + nel = [2, 2] # Number of element in x and y axis + self.msh = mesh_hsphere( + etype, c, r, nel, self.porder + ).getmsh() # Create mesh object + self.xcg = self.msh.xcg # Extract node coordinates + self.ndof = self.xcg.shape[1] + self.e2vcg = self.msh.e2vcg # Extract element connectivity + self.connectivity = e2vcg2connectivity(self.msh.e2vcg, "ele") + + self.bnd2nbc = np.asarray([0]) # Define the boundary tag! + self.K = lambda x, el: np.asarray([[1], [0], [0], [1]]) + self.Qb = ( + lambda x, n, bnd, el, fc: 0 + ) # The primary variable value on the boundary + dbc_idx = [ + i + for i in range(self.xcg.shape[1]) + if np.sum(self.xcg[:, i] ** 2) > 1 - 1e-12 + ] # The boundary node id + self.dbc_idx = np.asarray(dbc_idx) + self.dbc_val = dbc_idx * 0 # The boundary node primary variable value + + def train(self): + paddle.device.set_device("gpu:0") + dbc = create_dbc_strct( + self.xcg.shape[1] * self.nvar, self.dbc_idx, self.dbc_val + ) # Create the class of boundary condition + + Src_new = self.model.source + K_new = paddle.to_tensor([[1], [0], [0], [1]], dtype="float32").reshape((4,)) + parsfuncI = lambda x: paddle.concat((K_new, Src_new), axis=0) + S = [2] # Parametrize the source value in the pde -F_ij,j=S_i + LossF = [] + # Define the Training Data + Graph = [] + ii = 0 + + for i in S: + f = lambda x, el: i + prob = setup_prob_eqn_handcode.setup_linelptc_sclr_base_handcode( + 2, self.K, f, self.Qb, self.bnd2nbc + ) + + femsp = create_femsp_cg( + prob, self.msh, self.porder, self.e2vcg, self.porder, self.e2vcg, dbc + ) + fcn = lambda u_: create_fem_resjac( + "cg", + u_, + self.msh.transfdatacontiguous, + femsp.elem, + femsp.elem_data, + femsp.ldof2gdof_eqn.ldof2gdof, + femsp.ldof2gdof_var.ldof2gdof, + self.msh.e2e, + femsp.spmat, + dbc, + [i for i in range(self.ndof) if i not in self.dbc_idx], + parsfuncI, + None, + self.model, + ) + LossF.append(fcn) + + Ue = Double(analyticalPossion(self.xcg, i).flatten().reshape(self.ndof, 1)) + fcn_id = Double(np.asarray([ii])) + Ue_aug = paddle.concat((fcn_id, Ue), axis=0) + Uin = Double(self.xcg.T) + graph = Data(x=Uin, y=Ue_aug, edge_index=self.connectivity) + Graph.append(graph) + ii = ii + 1 + DataList = [[Graph[i]] for i in range(len(S))] + TrainDataloader = DataList + + # Training Data + [model, info] = solve_fem_GCNN( + TrainDataloader, LossF, self.model, self.tol, self.maxit + ) + print("K=", self.K) + print("Min Error=", info["Er"].min()) + print("Mean Error Last 10 iterations=", np.mean(info["Er"][-10:])) + print("Var Error Last 10 iterations=", np.var(info["Er"][-10:])) + + np.savetxt("demo0\ErFinal.txt", info["Er"]) + np.savetxt("demo0\Loss.txt", info["Loss"]) + + solution = model(Graph[0]) + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + solution = solution.detach().cpu().numpy() + Ue = Ue.detach().cpu().numpy() + return solution, Ue + + def plot_disk(self, solution, Ue): + ax1 = plt.subplot(1, 1, 1) + _, cbar1 = visualize_fem( + ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax1.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + top=False, + labelbottom=False, + labelleft=False, + ) + ax1.axis("off") + cbar1.remove() + plt.margins(0, 0) + plt.savefig( + "gcnn_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 + ) + plt.close() + + ax2 = plt.subplot(1, 1, 1) + _, cbar2 = visualize_fem( + ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax2.tick_params( + axis="both", + which="both", + bottom=False, + left=False, + top=False, + labelbottom=False, + labelleft=False, + ) + ax2.axis("off") + cbar2.remove() + plt.margins(0, 0) + plt.savefig( + "exact_possion_circle.png", bbox_inches="tight", pad_inches=-0.11, dpi=800 + ) + + def plot_circle(self, solution, Ue): + fig = plt.figure() + ax1 = plt.subplot(1, 2, 1) + visualize_fem( + ax1, self.msh, solution[self.e2vcg], {"plot_elem": True, "nref": 6}, [] + ) + ax1.set_title("GCNN solution") + ax2 = plt.subplot(1, 2, 2) + visualize_fem(ax2, self.msh, Ue[self.e2vcg], {"plot_elem": True, "nref": 6}, []) + ax2.set_title("Exact solution") + fig.tight_layout(pad=3.0) + plt.savefig("demo0\Demo.pdf", bbox_inches="tight") + + def disk_possion_hard(self): + # Hyper prameters + self.tol = 1.0e-16 + self.maxit = 3000 + self.params_possion() + Ufem = analyticalPossion(self.xcg, 2).flatten().reshape(self.ndof, 1) + + obsidx = np.asarray([8]) + self.dbc_idx = np.hstack((np.asarray(self.dbc_idx), obsidx)) + self.dbc_val = Ufem[self.dbc_idx] + + solution, Ue = self.train() + self.plot_disk(solution, Ue) + + def circle(self): + # Hyper prameters + self.tol = 1.0e-16 + self.maxit = 500 + self.params_possion() + self.dbc_idx = np.asarray(self.dbc_idx) + self.dbc_val = self.dbc_idx * 0 # The boundary node primary variable value + + solution, Ue = self.train() + self.plot_circle(solution, Ue) + + +if __name__ == "__main__": + possion_obj = Possion() + possion_obj.disk_possion_hard() + possion_obj.circle() diff --git a/jointContribution/graphGalerkin/rfcs/graphGalerkin.md b/jointContribution/graphGalerkin/rfcs/graphGalerkin.md index c969876d56..5de2339028 100644 --- a/jointContribution/graphGalerkin/rfcs/graphGalerkin.md +++ b/jointContribution/graphGalerkin/rfcs/graphGalerkin.md @@ -1,81 +1,81 @@ -# OSPP2023--飞桨PaddleScience-PDE方程求解模型开发设计文档 -| API名称 | 新增API名称 | -| --- | --- | -| 提交作者 | zlynna | -| 提交时间 | 2023-10-12 | -| 版本号 | V1.0 | -| 依赖CINN版本 | PaddlePaddle2.5.0 | -| 文件名 | graphGalerin.md | -# 1. 概述 -## 1.1 相关背景 -尽管物理信息神经网络 (PINNs) 在解决正向和反向问题方面具有巨大的潜力,但存在一些技术挑战,阻碍了其在更复杂和现实应用中的应用。首先,大多数现有的PINNs基于点对点的表达方式,使用全连接网络来学习连续函数,但这种方法在可扩展性和硬边界实施方面存在问题。其次,无限的搜索空间使网络训练的非凸优化变得过于复杂。第三,尽管基于卷积神经网络 (CNN) 的离散学习可以显著提高训练效率,但 CNN 难以处理不规则几何形状和无结构网格。 - -为了妥善解决这些问题,文章提出了一种基于图卷积网络(GCN)和PDE的变分结构的新型离散PINN框架,以统一的方式解决正向和反向偏微分方程(PDE)。使用分段多项式基可以减小搜索空间的维度,有助于训练和收敛。在不需要调整经典PINNs中的损失函数的情况下,所提出的方法可以严格强制边界条件,并在正向和反向设置中加入稀疏数据。GCN的灵活性用于处理不规则的几何形状和无结构的网格。所提出的方法的有效性和优点在各种由线性和非线性PDEs管理的正向和反向计算力学问题上得到了证明。 -## 1.2 功能目标 -本任务中,作者依据文章[Physics-informed graph neural Galerkin networks: A unified framework for solving PDE-governed forward and inverse problems](https://www.sciencedirect.com/science/article/pii/S0045782521007076),完成以下任务: -- 根据论文代码,基于PaddlePaddle复现论文中相关结果,并与参考结果进行比较。 -## 1.3 意义 -基于PaddlePaddle实现物理信息嵌入的图神经网络(GNN)求解偏微分方程(PDE),拓展PaddlePaddle科学计算模型。 -# 2. 飞桨现状 -PaddlePaddle目前无相关模型实现。 -# 3. 业内方案调研 -## 3.1 解决的问题 -本项目基于嵌入物理信息的图神经网络求解偏微分方程,方程主要以以下形式出现: -$$ -\nabla \cdot F(u, \nabla u; \boldsymbol\mu) = S(u, \nabla u; \boldsymbol\mu) \: in \: \Omega -$$ -边界条件为: -$$ -\mathbf\R_u(U_u(\boldsymbol\mu), U_e;\boldsymbol\mu)=0 -$$ -## 3.2 解决的方法 -| 算法 : 基于图卷积网络求解PDE| -| ------| -|输入: 方程参数 $\overline \mu$ , 节点坐标 $\chi$ 和邻接矩阵 A| -|输出: 解函数 $\hat U$| -1. 在积分点上求解基函数 $\Phi$ 以得到 $\Phi(\widetilde x^v)$, $\Phi(\widetilde x^s)$, $\nabla \Phi(\widetilde x^v)$, $\nabla \Phi(\widetilde x^s)$; -2. 计算残差函数 $R(\widetilde U;\mu)$; -3. 应用静力凝聚法; -4. 分割自由度 $\hat U(\Theta)=(\hat U_u(\Theta)^T,\hat {U_e}^T)^T$ 并对关键条件实施强约束, $\hat U_e=U_e$, 并构建物理信息嵌入的损失函数; -5. 求解该优化问题得到解函数 $\hat U=(\hat U_u(\Theta^*)^T,{U^T}_e)^T$; -## 3.3 复现目标 -![result1](https://github.com/zlynna/PaddleScience/blob/develop/jointContribution/graphGalerkin/rfcs/figs/fig1.png) -## 3.4 可能存在的难点 -- PaddlePaddle中没有torch_geomitric中有关Chebconv图卷积层的相关API实现 -# 4. 设计思路与实现方案 -参考参考[PaddleScience复现指南](https://paddlescience-docs.readthedocs.io/zh/latest/zh/reproduction/#2),复现步骤如图所示: - -![复现步骤](https://paddlescience-docs.readthedocs.io/zh/latest/images/overview/reproduction_process.png) -## 4.1 基于PaddlePaddle复现 -根据**参考文献:graphGalerkin**,基于Paddle API实现该模型的设计思路与实现步骤如下: -1. 导入依赖 -2. 生成数据 -3. 构造网络模型 -5. 定义基于物理信息的损失函数$R$ -6. 定义模型的前向传播方法forward -7. 模型训练及推理 -8. 实现graphGalerkin,进行对比分析 -9. 绘制结果 - - -完整复现代码见: -[graphGalerkin](https://aistudio.baidu.com/projectdetail/6625305) - -# 5. 测试和验收的考量 -测试与验收参考[PaddleScience模型复现流程及验收标准](https://paddlescience-docs.readthedocs.io/zh/latest/zh/reproduction/)。 - -1. 提供完整的基于Paddle API的复现方法 -2. 方程求解精度与论文对齐 -# 6. 可行性分析和排期规划 - -## 6.1 可行性分析 - -- 根据torch_geomitric中有关Chebconv图卷积层的相关API实现转换为Paddle API以实现原始论文模型结构。 - -可行 - -## 6.2 排期规划 - -- 202307 : 调研 -- 202308 :基于Paddle API的复现 -- 202309 :整理项目产出,撰写案例文档 \ No newline at end of file +# OSPP2023--飞桨PaddleScience-PDE方程求解模型开发设计文档 +| API名称 | 新增API名称 | +| --- | --- | +| 提交作者 | zlynna | +| 提交时间 | 2023-10-12 | +| 版本号 | V1.0 | +| 依赖CINN版本 | PaddlePaddle2.5.0 | +| 文件名 | graphGalerin.md | +# 1. 概述 +## 1.1 相关背景 +尽管物理信息神经网络 (PINNs) 在解决正向和反向问题方面具有巨大的潜力,但存在一些技术挑战,阻碍了其在更复杂和现实应用中的应用。首先,大多数现有的PINNs基于点对点的表达方式,使用全连接网络来学习连续函数,但这种方法在可扩展性和硬边界实施方面存在问题。其次,无限的搜索空间使网络训练的非凸优化变得过于复杂。第三,尽管基于卷积神经网络 (CNN) 的离散学习可以显著提高训练效率,但 CNN 难以处理不规则几何形状和无结构网格。 + +为了妥善解决这些问题,文章提出了一种基于图卷积网络(GCN)和PDE的变分结构的新型离散PINN框架,以统一的方式解决正向和反向偏微分方程(PDE)。使用分段多项式基可以减小搜索空间的维度,有助于训练和收敛。在不需要调整经典PINNs中的损失函数的情况下,所提出的方法可以严格强制边界条件,并在正向和反向设置中加入稀疏数据。GCN的灵活性用于处理不规则的几何形状和无结构的网格。所提出的方法的有效性和优点在各种由线性和非线性PDEs管理的正向和反向计算力学问题上得到了证明。 +## 1.2 功能目标 +本任务中,作者依据文章[Physics-informed graph neural Galerkin networks: A unified framework for solving PDE-governed forward and inverse problems](https://www.sciencedirect.com/science/article/pii/S0045782521007076),完成以下任务: +- 根据论文代码,基于PaddlePaddle复现论文中相关结果,并与参考结果进行比较。 +## 1.3 意义 +基于PaddlePaddle实现物理信息嵌入的图神经网络(GNN)求解偏微分方程(PDE),拓展PaddlePaddle科学计算模型。 +# 2. 飞桨现状 +PaddlePaddle目前无相关模型实现。 +# 3. 业内方案调研 +## 3.1 解决的问题 +本项目基于嵌入物理信息的图神经网络求解偏微分方程,方程主要以以下形式出现: +$$ +\nabla \cdot F(u, \nabla u; \boldsymbol\mu) = S(u, \nabla u; \boldsymbol\mu) \: in \: \Omega +$$ +边界条件为: +$$ +\mathbf\R_u(U_u(\boldsymbol\mu), U_e;\boldsymbol\mu)=0 +$$ +## 3.2 解决的方法 +| 算法 : 基于图卷积网络求解PDE| +| ------| +|输入: 方程参数 $\overline \mu$ , 节点坐标 $\chi$ 和邻接矩阵 A| +|输出: 解函数 $\hat U$| +1. 在积分点上求解基函数 $\Phi$ 以得到 $\Phi(\widetilde x^v)$, $\Phi(\widetilde x^s)$, $\nabla \Phi(\widetilde x^v)$, $\nabla \Phi(\widetilde x^s)$; +2. 计算残差函数 $R(\widetilde U;\mu)$; +3. 应用静力凝聚法; +4. 分割自由度 $\hat U(\Theta)=(\hat U_u(\Theta)^T,\hat {U_e}^T)^T$ 并对关键条件实施强约束, $\hat U_e=U_e$, 并构建物理信息嵌入的损失函数; +5. 求解该优化问题得到解函数 $\hat U=(\hat U_u(\Theta^*)^T,{U^T}_e)^T$; +## 3.3 复现目标 +![result1](https://github.com/zlynna/PaddleScience/blob/develop/jointContribution/graphGalerkin/rfcs/figs/fig1.png) +## 3.4 可能存在的难点 +- PaddlePaddle中没有torch_geomitric中有关Chebconv图卷积层的相关API实现 +# 4. 设计思路与实现方案 +参考参考[PaddleScience复现指南](https://paddlescience-docs.readthedocs.io/zh/latest/zh/reproduction/#2),复现步骤如图所示: + +![复现步骤](https://paddlescience-docs.readthedocs.io/zh/latest/images/overview/reproduction_process.png) +## 4.1 基于PaddlePaddle复现 +根据**参考文献:graphGalerkin**,基于Paddle API实现该模型的设计思路与实现步骤如下: +1. 导入依赖 +2. 生成数据 +3. 构造网络模型 +5. 定义基于物理信息的损失函数$R$ +6. 定义模型的前向传播方法forward +7. 模型训练及推理 +8. 实现graphGalerkin,进行对比分析 +9. 绘制结果 + + +完整复现代码见: +[graphGalerkin](https://aistudio.baidu.com/projectdetail/6625305) + +# 5. 测试和验收的考量 +测试与验收参考[PaddleScience模型复现流程及验收标准](https://paddlescience-docs.readthedocs.io/zh/latest/zh/reproduction/)。 + +1. 提供完整的基于Paddle API的复现方法 +2. 方程求解精度与论文对齐 +# 6. 可行性分析和排期规划 + +## 6.1 可行性分析 + +- 根据torch_geomitric中有关Chebconv图卷积层的相关API实现转换为Paddle API以实现原始论文模型结构。 + +可行 + +## 6.2 排期规划 + +- 202307 : 调研 +- 202308 :基于Paddle API的复现 +- 202309 :整理项目产出,撰写案例文档 diff --git a/jointContribution/graphGalerkin/source/FEM_ForwardModel.py b/jointContribution/graphGalerkin/source/FEM_ForwardModel.py index c3cbbb7ce7..fdc3db8a18 100644 --- a/jointContribution/graphGalerkin/source/FEM_ForwardModel.py +++ b/jointContribution/graphGalerkin/source/FEM_ForwardModel.py @@ -1,18 +1,20 @@ -import numpy as np -import pdb -import matplotlib.pyplot as plt - -from pyCaMOtk.create_mesh_hsphere import mesh_hsphere -from pyCaMOtk.setup_linelptc_sclr_base_handcode import setup_linelptc_sclr_base_handcode -from pyCaMOtk.create_dbc_strct import create_dbc_strct -from pyCaMOtk.create_femsp_cg import create_femsp_cg -from pyCaMOtk.solve_fem import solve_fem -from pyCaMOtk.visualize_fem import visualize_fem - -def analyticalPossion(xcg,Tc,Tb=0): - Ue=Tc*(1-xcg[0,:]**2-xcg[1,:]**2)/4+Tb - return Ue.flatten() - -def analyticalConeInterpolation(xcg,Tc,Tb=0): - Ue=Tc*(1-np.sqrt(xcg[0,:]**2+xcg[1,:]**2))/4+Tb - return Ue.flatten() \ No newline at end of file +import pdb + +import matplotlib.pyplot as plt +import numpy as np +from pyCaMOtk.create_dbc_strct import create_dbc_strct +from pyCaMOtk.create_femsp_cg import create_femsp_cg +from pyCaMOtk.create_mesh_hsphere import mesh_hsphere +from pyCaMOtk.setup_linelptc_sclr_base_handcode import setup_linelptc_sclr_base_handcode +from pyCaMOtk.solve_fem import solve_fem +from pyCaMOtk.visualize_fem import visualize_fem + + +def analyticalPossion(xcg, Tc, Tb=0): + Ue = Tc * (1 - xcg[0, :] ** 2 - xcg[1, :] ** 2) / 4 + Tb + return Ue.flatten() + + +def analyticalConeInterpolation(xcg, Tc, Tb=0): + Ue = Tc * (1 - np.sqrt(xcg[0, :] ** 2 + xcg[1, :] ** 2)) / 4 + Tb + return Ue.flatten() diff --git a/jointContribution/graphGalerkin/source/GCNNModel.py b/jointContribution/graphGalerkin/source/GCNNModel.py index f7b8c54fd5..666de8887e 100644 --- a/jointContribution/graphGalerkin/source/GCNNModel.py +++ b/jointContribution/graphGalerkin/source/GCNNModel.py @@ -1,246 +1,248 @@ -# from pgl.nn import GCNConv -import numpy as np -import paddle -import paddle.nn.initializer as Initializer -import sys - -sys.path.insert(0, "utils") -from ChebConv import ChebConv -from paddle.nn.functional import relu -from paddle.nn import Layer, Linear - -place = paddle.CUDAPlace(0) - - -def e2vcg2connectivity(e2vcg, type="iso"): - """ - e2vcg should be in np.array - """ - NnG = np.max(e2vcg) + 1 - NnE = e2vcg.shape[1] - if type == "ele": - connectivity = [] - for i in range(NnG): - positions = np.argwhere(e2vcg == i)[:, 0] - # pdb.set_trace() - for j in positions: - for k in range(NnE): - if e2vcg[j, k] != i: - connectivity.append(np.asarray([i, e2vcg[j, k]])) - return paddle.to_tensor( - paddle.floor( - paddle.to_tensor( - np.asarray(connectivity).T, place=place, dtype=paddle.float32 - ) - ), - dtype=paddle.int64, - ) - elif type == "iso": - connectivity = [[i for i in range(NnG)], [i for i in range(NnG)]] - return paddle.to_tensor( - paddle.floor( - paddle.to_tensor( - np.asarray(connectivity), place=place, dtype=paddle.float32 - ) - ), - dtype=paddle.int64, - ) - elif type == "eletruncate": - connectivity = [] - for i in range(NnG): - positions = np.argwhere(e2vcg == i)[:, 0] - for j in positions: - for k in range(NnE): - if e2vcg[j, k] != i: - connectivity.append(np.asarray([i, e2vcg[j, k]])) - return paddle.to_tensor( - paddle.floor( - paddle.to_tensor( - np.asarray(connectivity).T, place=place, dtype=paddle.float32 - ) - ), - dtype=paddle.int64, - ) - - ############################################## - - -############################################## - - -def last_chance0(maru): - f91 = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) - f91 = paddle.create_parameter( - f91.shape, - paddle.float32, - attr=Initializer.Orthogonal(Initializer.calculate_gain("relu")), - ) - for i in range(len(maru.lins)): - w_ = paddle.create_parameter( - f91[i, :, :].T.shape, - paddle.float32, - attr=Initializer.Assign(f91[i, :, :].T), - ) - maru.lins[i].weight = w_ - return maru - - -def last_chance1(maru): - weights = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) - weights = paddle.create_parameter( - weights.shape, weights.dtype, attr=Initializer.Orthogonal() - ) - for i in range(len(maru.lins)): - w_ = paddle.create_parameter( - maru.lins[i].weight.T.shape, - paddle.float32, - attr=Initializer.Assign(weights[i, :, :].T), - ) - maru.lins[i].weight = w_ - return maru - - -class PossionNet(Layer): - def __init__(self, nci=2, nco=1, kk=10): - super(PossionNet, self).__init__() - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers): - setattr(self, f"conv{i+1}", conv) - setattr(self, f"last_chance{i}", last_chance) - self.source = paddle.to_tensor([0.25]) - self.source = paddle.create_parameter( - self.source.shape, - dtype=paddle.float32, - attr=Initializer.Assign(self.source), - ) - - def forward(self, data): - x, edge_index = data.x, data.edge_index - for i, (conv, last_chance) in enumerate(self.conv_layers): - x = conv(x, edge_index) - if i < len(self.conv_layers) - 2: - x = relu(x) - return x - - -######### -class LinearElasticityNet2D(Layer): - def __init__(self): - super(LinearElasticityNet2D, self).__init__() - nci = 2 - nco = 1 - kk = 10 - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers_1 = [] - self.conv_layers_2 = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_1.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_2.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - setattr(self, f"conv{i+1}", conv) - setattr(self, f"last_chance{i}", last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - setattr(self, f"conv{i+9}", conv) - setattr(self, f"last_chance{i}", last_chance) - - self.source = paddle.to_tensor([0.1, 0.1]) - self.source.stop_gradient = False - - def forward(self, data): - x, edge_index = data.x, data.edge_index - n1 = int(max(x.shape) / 2) - idx1 = [2 * i for i in range(n1)] - idx2 = [2 * i + 1 for i in range(n1)] - x1 = x[idx1] - x2 = x[idx2] - edge_index1 = edge_index - edge_index2 = edge_index - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - x1 = conv(x1, edge_index1) - if i < len(self.conv_layers_1) - 2: - x1 = relu(x1) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - x2 = conv(x2, edge_index2) - if i < len(self.conv_layers_2) - 2: - x2 = relu(x2) - - uv = [] - for i in range(n1): - uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) - uv_ = paddle.concat(uv, axis=0) - return uv_ - - -class Ns_Chebnet(Layer): - def __init__(self, split): - super(Ns_Chebnet, self).__init__() - nci = 2 - nco = 1 - kk = 10 - self.split = split - feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] - self.conv_layers_1 = [] - self.conv_layers_2 = [] - self.conv_layers_3 = [] - for i in range(len(feature) - 1): - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_1.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_2.append((conv, last_chance)) - conv = ChebConv(feature[i], feature[i + 1], K=kk) - last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 - self.conv_layers_3.append((conv, last_chance)) - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - setattr(self, f"conv{i+1}", conv) - setattr(self, f"last_chance{i}", last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - setattr(self, f"conv{i+9}", conv) - setattr(self, f"last_chance{i}", last_chance) - for i, (conv, last_chance) in enumerate(self.conv_layers_3): - setattr(self, f"conv{i+17}", conv) - setattr(self, f"last_chance{i}", last_chance) - - def forward(self, data): - x, edge_index = data.x, data.edge_index - n1 = self.split[0] - n2 = self.split[1] - n3 = self.split[2] - idx1 = [2 * i for i in range(n1)] - idx2 = [2 * i + 1 for i in range(n1)] - idx3 = [i + n1 * 2 for i in range(n2)] - x1 = x[idx1] - x2 = x[idx2] - x3 = x[idx3] - edge_index1 = edge_index[:, 0:n3] - edge_index2 = edge_index[:, n3 : 2 * n3] - edge_index3 = edge_index[:, 2 * n3 :] - - for i, (conv, last_chance) in enumerate(self.conv_layers_1): - x1 = conv(x1, edge_index1) - if i < len(self.conv_layers_1) - 2: - x1 = relu(x1) - for i, (conv, last_chance) in enumerate(self.conv_layers_2): - x2 = conv(x2, edge_index2) - if i < len(self.conv_layers_2) - 2: - x2 = relu(x2) - for i, (conv, last_chance) in enumerate(self.conv_layers_3): - x2 = conv(x3, edge_index3) - if i < len(self.conv_layers_3) - 2: - x2 = relu(x2) - - uv = [] - for i in range(n1): - uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) - uv_ = paddle.concat(uv, axis=0) - return paddle.concat([uv_, x3], axis=0) +# from pgl.nn import GCNConv +import sys + +import numpy as np +import paddle +import paddle.nn.initializer as Initializer + +sys.path.insert(0, "utils") +from ChebConv import ChebConv +from paddle.nn import Layer +from paddle.nn import Linear +from paddle.nn.functional import relu + +place = paddle.CUDAPlace(0) + + +def e2vcg2connectivity(e2vcg, type="iso"): + """ + e2vcg should be in np.array + """ + NnG = np.max(e2vcg) + 1 + NnE = e2vcg.shape[1] + if type == "ele": + connectivity = [] + for i in range(NnG): + positions = np.argwhere(e2vcg == i)[:, 0] + # pdb.set_trace() + for j in positions: + for k in range(NnE): + if e2vcg[j, k] != i: + connectivity.append(np.asarray([i, e2vcg[j, k]])) + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity).T, place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + elif type == "iso": + connectivity = [[i for i in range(NnG)], [i for i in range(NnG)]] + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity), place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + elif type == "eletruncate": + connectivity = [] + for i in range(NnG): + positions = np.argwhere(e2vcg == i)[:, 0] + for j in positions: + for k in range(NnE): + if e2vcg[j, k] != i: + connectivity.append(np.asarray([i, e2vcg[j, k]])) + return paddle.to_tensor( + paddle.floor( + paddle.to_tensor( + np.asarray(connectivity).T, place=place, dtype=paddle.float32 + ) + ), + dtype=paddle.int64, + ) + + ############################################## + + +############################################## + + +def last_chance0(maru): + f91 = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) + f91 = paddle.create_parameter( + f91.shape, + paddle.float32, + attr=Initializer.Orthogonal(Initializer.calculate_gain("relu")), + ) + for i in range(len(maru.lins)): + w_ = paddle.create_parameter( + f91[i, :, :].T.shape, + paddle.float32, + attr=Initializer.Assign(f91[i, :, :].T), + ) + maru.lins[i].weight = w_ + return maru + + +def last_chance1(maru): + weights = paddle.concat([dark.weight.T.unsqueeze(0) for dark in maru.lins], axis=0) + weights = paddle.create_parameter( + weights.shape, weights.dtype, attr=Initializer.Orthogonal() + ) + for i in range(len(maru.lins)): + w_ = paddle.create_parameter( + maru.lins[i].weight.T.shape, + paddle.float32, + attr=Initializer.Assign(weights[i, :, :].T), + ) + maru.lins[i].weight = w_ + return maru + + +class PossionNet(Layer): + def __init__(self, nci=2, nco=1, kk=10): + super(PossionNet, self).__init__() + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + self.source = paddle.to_tensor([0.25]) + self.source = paddle.create_parameter( + self.source.shape, + dtype=paddle.float32, + attr=Initializer.Assign(self.source), + ) + + def forward(self, data): + x, edge_index = data.x, data.edge_index + for i, (conv, last_chance) in enumerate(self.conv_layers): + x = conv(x, edge_index) + if i < len(self.conv_layers) - 2: + x = relu(x) + return x + + +######### +class LinearElasticityNet2D(Layer): + def __init__(self): + super(LinearElasticityNet2D, self).__init__() + nci = 2 + nco = 1 + kk = 10 + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers_1 = [] + self.conv_layers_2 = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_1.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_2.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + setattr(self, f"conv{i+9}", conv) + setattr(self, f"last_chance{i}", last_chance) + + self.source = paddle.to_tensor([0.1, 0.1]) + self.source.stop_gradient = False + + def forward(self, data): + x, edge_index = data.x, data.edge_index + n1 = int(max(x.shape) / 2) + idx1 = [2 * i for i in range(n1)] + idx2 = [2 * i + 1 for i in range(n1)] + x1 = x[idx1] + x2 = x[idx2] + edge_index1 = edge_index + edge_index2 = edge_index + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + x1 = conv(x1, edge_index1) + if i < len(self.conv_layers_1) - 2: + x1 = relu(x1) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + x2 = conv(x2, edge_index2) + if i < len(self.conv_layers_2) - 2: + x2 = relu(x2) + + uv = [] + for i in range(n1): + uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) + uv_ = paddle.concat(uv, axis=0) + return uv_ + + +class Ns_Chebnet(Layer): + def __init__(self, split): + super(Ns_Chebnet, self).__init__() + nci = 2 + nco = 1 + kk = 10 + self.split = split + feature = [nci, 32, 64, 128, 256, 128, 64, 32, nco] + self.conv_layers_1 = [] + self.conv_layers_2 = [] + self.conv_layers_3 = [] + for i in range(len(feature) - 1): + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_1.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_2.append((conv, last_chance)) + conv = ChebConv(feature[i], feature[i + 1], K=kk) + last_chance = last_chance0 if i < len(feature) - 2 else last_chance1 + self.conv_layers_3.append((conv, last_chance)) + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + setattr(self, f"conv{i+1}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + setattr(self, f"conv{i+9}", conv) + setattr(self, f"last_chance{i}", last_chance) + for i, (conv, last_chance) in enumerate(self.conv_layers_3): + setattr(self, f"conv{i+17}", conv) + setattr(self, f"last_chance{i}", last_chance) + + def forward(self, data): + x, edge_index = data.x, data.edge_index + n1 = self.split[0] + n2 = self.split[1] + n3 = self.split[2] + idx1 = [2 * i for i in range(n1)] + idx2 = [2 * i + 1 for i in range(n1)] + idx3 = [i + n1 * 2 for i in range(n2)] + x1 = x[idx1] + x2 = x[idx2] + x3 = x[idx3] + edge_index1 = edge_index[:, 0:n3] + edge_index2 = edge_index[:, n3 : 2 * n3] + edge_index3 = edge_index[:, 2 * n3 :] + + for i, (conv, last_chance) in enumerate(self.conv_layers_1): + x1 = conv(x1, edge_index1) + if i < len(self.conv_layers_1) - 2: + x1 = relu(x1) + for i, (conv, last_chance) in enumerate(self.conv_layers_2): + x2 = conv(x2, edge_index2) + if i < len(self.conv_layers_2) - 2: + x2 = relu(x2) + for i, (conv, last_chance) in enumerate(self.conv_layers_3): + x2 = conv(x3, edge_index3) + if i < len(self.conv_layers_3) - 2: + x2 = relu(x2) + + uv = [] + for i in range(n1): + uv.append(paddle.concat([x1[i : i + 1, 0:], x2[i : i + 1, 0:]], axis=0)) + uv_ = paddle.concat(uv, axis=0) + return paddle.concat([uv_, x3], axis=0) diff --git a/jointContribution/graphGalerkin/source/TensorFEMCore.py b/jointContribution/graphGalerkin/source/TensorFEMCore.py index 9815510621..63892ddf24 100644 --- a/jointContribution/graphGalerkin/source/TensorFEMCore.py +++ b/jointContribution/graphGalerkin/source/TensorFEMCore.py @@ -1,436 +1,436 @@ -import os -import pdb -import time - -import matplotlib.pyplot as plt -import numpy as np -import paddle -from paddle.nn import MSELoss -from paddle.optimizer import Adam -from scipy import sparse - -place = paddle.CUDAPlace(0) - - -def eval_unassembled_resjac_claw_cg( - U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model=None -): - """Evaluate elementwise residual and jacobian of conservation law in CG""" - nelem = elem_data.nelem - neqn_per_elem = elem.Tv_eqn_ref.shape[0] - nvar_per_elem = elem.Tv_var_ref.shape[0] - Re = np.zeros([neqn_per_elem, nelem]) - dRe = np.zeros([neqn_per_elem, nvar_per_elem, nelem]) - Re = Double(Re) - dRe = Double(dRe) - Re.stop_gradient = False - dRe.stop_gradient = False - Re_ = [] - dRe_ = [] - for e in range(nelem): - Ue = U[ldof2gdof_var[:, e]] - Re0_, dRe0_ = intg_elem_claw_vol( - Ue, transf_data, elem, elem_data, e, parsfuncI, model - ) - Re1_, dRe1_ = intg_elem_claw_extface( - Ue, transf_data, elem, elem_data, e, parsfuncB - ) - Re_.append(ReshapeFix((Re0_ + Re1_), [neqn_per_elem, 1], order="F")) - dRe_.append( - ReshapeFix((dRe0_ + dRe1_), [neqn_per_elem, nvar_per_elem, 1], order="C") - ) - Re = paddle.concat(Re_, axis=1) - dRe = paddle.concat(dRe_, axis=2) - return Re, dRe - - -def create_fem_resjac( - fespc, - Uf, - transf_data, - elem, - elem_data, - ldof2gdof_eqn, - ldof2gdof_var, - e2e, - spmat, - dbc, - enforce_idx=None, - parsfuncI=None, - parsfuncB=None, - model=None, -): - """Create global residual(loss) and jacobian of conservation law in CG""" - ndof_var = np.max(ldof2gdof_var[:]) + 1 - dbc_idx = paddle.to_tensor(dbc.dbc_idx) - dbc_val = dbc.dbc_val - free_idx = dbc.free_idx - Uf = ReshapeFix(Uf, [ndof_var, 1], "C") - U_temp = paddle.to_tensor( - paddle.zeros([ndof_var, 1]), dtype="float32", place=place, stop_gradient=False - ) - src = paddle.to_tensor( - dbc_val, dtype="float32", place=place, stop_gradient=False - ).reshape([len(dbc_val), 1]) - paddle.index_select(Uf, dbc_idx) - U_temp = paddle.scatter_nd_add( - U_temp, dbc_idx.reshape([-1, 1]), src.reshape([-1, 1]) - ) - U_temp[dbc_idx] = ( - paddle.to_tensor( - dbc_val, dtype="float32", place=place, stop_gradient=False - ).reshape([len(dbc_val), 1]) - - Uf[dbc_idx] - ) - U = U_temp + Uf - # U is the GCNN output hardimpose BC but can backPP - if fespc == "cg" or fespc == "CG": - Re, dRe = eval_unassembled_resjac_claw_cg( - U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model - ) - dR = assemble_nobc_mat(dRe, spmat.cooidx, spmat.lmat2gmat) - else: - raise ValueError("FE space only support cg!") - R = assemble_nobc_vec(Re, ldof2gdof_eqn) - if enforce_idx == None: - Rf = R[free_idx] - else: - Rf = R[enforce_idx] - dRf = dR.tocsr()[free_idx, :] - dRf = dRf.tocsr()[:, free_idx].T - print("Max Rf ===============================", paddle.max(paddle.abs(Rf))) - return Rf, dRf, dbc - - -def intg_elem_claw_vol(Ue, transf_data, elem, elem_data, e, parsfuncI=None, model=None): - """Intergrate elementwise internal volume of element residual and jacobian of conservation law""" - [neqn_per_elem, neqn, ndimP1, nq] = elem.Tv_eqn_ref.shape - [nvar_per_elem, nvar, _, _] = elem.Tv_var_ref.shape - ndim = ndimP1 - 1 - wq = elem.wq - detG = transf_data.detG[:, e] - Tvar = elem_data.Tv_var_phys[:, :, :, :, e].reshape( - [nvar_per_elem, nvar * (ndim + 1) * nq], order="F" - ) - Re = np.zeros([neqn_per_elem, 1]) - dRe = np.zeros([neqn_per_elem, nvar_per_elem]) - Tvar_tensor = paddle.to_tensor(Tvar, place=place, dtype=paddle.float32) - UQq = ReshapeFix(paddle.matmul(Tvar_tensor.T, Ue), [nvar, ndim + 1, nq], "F") - w = wq * detG - Re = Double(Re) - dRe = Double(dRe) - Re.stop_gradient = False - dRe.stop_gradient = False - for k in range(nq): - Teqn = elem_data.Tv_eqn_phys[:, :, :, k, e].reshape( - [neqn_per_elem, neqn * (ndim + 1)], order="F" - ) - Tvar = elem_data.Tv_var_phys[:, :, :, k, e].reshape( - [nvar_per_elem, nvar * (ndim + 1)], order="F" - ) - x = transf_data.xq[:, k, e] - if parsfuncI == None: - pars = elem_data.vol_pars[:, k, e] - else: - pars = parsfuncI(x) - SF, dSFdU = elem.eqn.srcflux(UQq[:, :, k], pars, x) - dSFdU = ReshapeFix(dSFdU, [neqn * (ndim + 1), nvar * (ndim + 1)], order="F") - Teqn = Double(Teqn) - Tvar = Double(Tvar) - SF = ReshapeFix(SF, [len(SF.flatten()), 1]) - Re = Re - w[k] * ReshapeFix(paddle.matmul(Teqn, SF), Re.shape, order="F") - dRe = dRe - w[k] * paddle.matmul(Teqn, paddle.matmul(dSFdU, Tvar.T)) - return Re, dRe - - -def intg_elem_claw_extface(Ue, transf_data, elem, elem_data, e, parsfuncB=None): - """Intergrate elementwise the boundary face of element residual and jacobian of conservation law""" - [neqn_per_elem, neqn, ndimP1, nqf, nf] = elem.Tvf_eqn_ref.shape - [nvar_per_elem, nvar, _, _, _] = elem.Tvf_var_ref.shape - ndim = ndimP1 - 1 - wqf = elem.wqf - sigf = transf_data.sigf[:, :, e] - nbcnbr = elem_data.nbcnbr[:, e] - Re = np.zeros([neqn_per_elem, 1]) - dRe = np.zeros([neqn_per_elem, nvar_per_elem]) - wf = wqf[:].reshape([len(wqf), 1]) * sigf - Re = Double(Re) - dRe = Double(dRe) - Re.stop_gradient = False - dRe.stop_gradient = False - for f in range(nf): - if np.isnan(nbcnbr[f]): - continue - Tvar = np.reshape( - elem_data.Tvf_var_phys[:, :, :, :, f, e], - [nvar_per_elem, nvar * (ndim + 1) * nqf], - order="F", - ) - Tvar = Double(Tvar) - UQqf = ReshapeFix(paddle.matmul(Tvar.T, Ue), [nvar, ndim + 1, nqf], order="F") - for k in range(nqf): - x = transf_data.xqf[:, k, f, e] - n = transf_data.n[:, k, f, e] - Teqn = elem_data.Tvf_eqn_phys[:, :, 0, k, f, e] - Tvar = np.reshape( - elem_data.Tvf_var_phys[:, :, :, k, f, e], - [nvar_per_elem, nvar * (ndim + 1)], - order="F", - ) - Teqn = Double(Teqn) - Tvar = Double(Tvar) - if parsfuncB == None: - pars = elem_data.bnd_pars[:, k, f, e] - else: - pars = parsfuncB(x) - _, _, Fb, dFbdU = elem.eqn.bndstvcflux(nbcnbr[f], UQqf[:, :, k], pars, x, n) - dFbdU = ReshapeFix(dFbdU, [neqn, nvar * (ndim + 1)], order="F") - Re = Re + wf[k, f] * paddle.matmul(Teqn, Fb) - dRe = dRe + wf[k, f] * paddle.matmul(Teqn, paddle.matmul(dFbdU, Tvar.T)) - return Re, dRe - - -def assemble_nobc_mat(Me, cooidx, lmat2gmat): - """Assembly global jacobian of conservation law (currently no use)""" - Me = Me.detach().cpu().numpy() - nnz = cooidx.shape[0] - cooidx = cooidx.astype("int") - Mval = np.zeros(shape=[nnz, 1]) - Mval = Double(Mval) - Mval.stop_gradient = False - idx = paddle.to_tensor(lmat2gmat.reshape([-1, 1])) - src = paddle.to_tensor(Me.reshape([-1, 1])) - Mval = paddle.scatter_nd_add(Mval, idx, src).squeeze(-1) - M = sparse.coo_matrix((Mval, (cooidx[:, 0], cooidx[:, 1]))) - return M - - -def assemble_nobc_vec(Fe, ldof2gdof_eqn): - """Assembly global residual of conservation law (!!very useful!!)""" - ndof = np.max(ldof2gdof_eqn[:]) + 1 - nelem = Fe.shape[1] - F = np.zeros(shape=[ndof, 1]) - F = Double(F) - F.stop_gradient = False - idx = paddle.to_tensor(ldof2gdof_eqn.reshape([-1, 1])) - src = Fe.reshape([-1, 1]) - F = paddle.scatter_nd_add(F, idx, src) - return F - - -def solve_fem_GCNN( - DataLoader, - LossF, - model, - tol=1e-3, - maxit=2000, - qoiidx=None, - softidx=None, - penaltyConstant=None, -): - """Wrapper""" - startime = time.time() - model, info = solve_SGD( - DataLoader, LossF, model, tol, maxit, qoiidx, softidx, penaltyConstant - ) - print("wallclock time of all epochs = ", time.time() - startime) - return model, info - - -def solve_SGD( - DataLoader, - LossF, - model, - tol, - maxit, - qoiidx, - softidx, - penaltyConstant, - plotFlag="True", -): - """ - DataLoader: training data - fcn: loss function - model: GCNN model to be trained - tol: the trauncation of loss function - maxit: the maximum number of epoch - """ - optimizer = Adam(parameters=model.parameters(), learning_rate=0.001) - criterion = MSELoss() - Er = [] - Loss = [] - tol_e = [ - 1, - 0.1, - 0.09, - 0.08, - 0.07, - 0.06, - 0.05, - 0.04, - 0.03, - 0.02, - 0.01, - 0.005, - 0.001, - 0.0009, - 0.0008, - 0.0007, - 0.0006, - 0.0005, - 0.0004, - 0.0003, - 0.0002, - 0.0001, - 0.00001, - ] - idx_tol_e = 0 - for epoch in range(maxit): - print("epoch = ", epoch) - startime = time.time() - er, loss, model = trainmodel( - DataLoader, - LossF, - model, - optimizer, - criterion, - qoiidx, - softidx, - penaltyConstant, - ) - print("Solution er = ", er) - print("wallclock time of this epoch= ", time.time() - startime) - Er.append(er) - Loss.append(loss) - if loss < tol or er < tol_e[idx_tol_e]: - idx_tol_e = idx_tol_e + 1 - print("The training reaches the expected loss!") - pass - np.savetxt("./Er_" + str(er) + "Epoch_" + str(epoch) + ".txt", np.asarray(Er)) - np.savetxt("./Loss_" + str(loss) + "Epoch_" + str(epoch) + ".txt", np.asarray(Loss)) - if plotFlag: - fig = plt.figure() - ax = plt.subplot(1, 1, 1) - ax.plot(Er, label="Relative Error") - ax.plot(Loss, label="|Residual|") - ax.legend() - ax.set_xlabel("Epoch") - ax.set_yscale("log") - fig.savefig("./LossResidual.png", bbox_inches="tight") - plt.show() - - return model, {"Er": np.asarray(Er), "Loss": np.asarray(Loss)} - - -def trainmodel( - DataLoader, LossF, model, optimizer, criterion, qoiidx, softidx, penaltyConstant -): - model.train() - er_0 = 0 - loss_0 = 0 - erlist = [] - ReList = [] - optimizer.clear_grad() - for data in DataLoader: - input = data[0] - fcn_id = data[0].y[0, 0] - truth = data[0].y[1:, 0:] - fcn = LossF[int(fcn_id)] - assert ( - int(fcn_id) - fcn_id - ) ** 2 < 1e-12, "The loss function is selected right!" - tic = time.time() - output = model(input) - Re, dRe, dbc = fcn(output) - print("wallclock time of evl Res= ", time.time() - tic) - ReList.append(paddle.abs(Re)) - solution = ReshapeFix(paddle.clone(output), [len(output.flatten()), 1], "C") - solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) - er_0 = ( - er_0 - + paddle.sqrt( - criterion(solution, truth) / criterion(truth, truth * 0) - ).item() - ) - erlist.append( - paddle.sqrt(criterion(solution, truth) / criterion(truth, truth * 0)).item() - ) - loss = ReList[0] * 0 - for i in range(len(ReList)): - loss = loss + ReList[i] - print("max Res=", loss.abs().max().item()) - loss = paddle.norm(loss) - if softidx is not None and penaltyConstant is not None: - print( - "DataLoss = ", - criterion(solution[softidx], truth[softidx]) * penaltyConstant, - ) - loss = criterion(solution[softidx], truth[softidx]) * penaltyConstant + loss - else: - pass - if qoiidx is not None: - QOI_ER = paddle.sqrt( - criterion(solution[qoiidx], truth[qoiidx]) - / criterion(truth[qoiidx], truth[qoiidx] * 0) - ).item() - print("QOI Error=", QOI_ER) - os.system("touch QOIError.txt") - os.system("touch QOIValue.txt") - file1 = open("QOIError.txt", "a") - file1.writelines(str(QOI_ER) + "\n") - file2 = open("QOIValue.txt", "a") - file2.writelines( - str(solution[qoiidx].detach().cpu().numpy().reshape([1, -1])[:]) + "\n" - ) - file1.close() - file2.close() - else: - pass - tic = time.time() - loss.backward() - print("wallclock time of this BP= ", time.time() - tic) - optimizer.step() - print(">>>>>>>max error<<<<<<< ====================================", max(erlist)) - try: - print(">>>>>>>model source<<<<<<< =======================", model.source) - os.system("touch ModelSource.txt") - os.system("echo ModelSource.txt") - file3 = open("ModelSource.txt", "a") - object2write = model.source.detach().cpu().numpy().reshape([1, -1]) - for ifer in range(2): - try: - file3.writelines(str(object2write[0, ifer]) + "\n") - except: - pass - file3.close() - except: - pass - return er_0 / len(DataLoader), loss.norm().item() / len(DataLoader), model - - -def Reshape(input, Shape, order="F"): - if order == "F": - return paddle.reshape( - input, [Shape[len(Shape) - 1 - i] for i in range(len(Shape))] - ).permute([len(Shape) - 1 - i for i in range(len(Shape))]) - elif order == "C": - return paddle.reshape(input, Shape) - else: - raise ValueError("Reshape Only Support Fortran or C") - - -def ReshapeFix(input, Shape, order="F"): - if order == "F": - return paddle.reshape( - input.T, [Shape[len(Shape) - 1 - i] for i in range(len(Shape))] - ).transpose([len(Shape) - 1 - i for i in range(len(Shape))]) - elif order == "C": - return paddle.reshape(input, Shape) - else: - raise ValueError("Reshape Only Support Fortran or C") - - -def Double(A): - if len(A.shape) == 0 or (len(A.shape) == 1 and A.shape[0] == 1): - return paddle.to_tensor([A], place=place, dtype=paddle.float32).reshape([1, 1]) - else: - return paddle.to_tensor(A, place=place, dtype=paddle.float32) +import os +import pdb +import time + +import matplotlib.pyplot as plt +import numpy as np +import paddle +from paddle.nn import MSELoss +from paddle.optimizer import Adam +from scipy import sparse + +place = paddle.CUDAPlace(0) + + +def eval_unassembled_resjac_claw_cg( + U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model=None +): + """Evaluate elementwise residual and jacobian of conservation law in CG""" + nelem = elem_data.nelem + neqn_per_elem = elem.Tv_eqn_ref.shape[0] + nvar_per_elem = elem.Tv_var_ref.shape[0] + Re = np.zeros([neqn_per_elem, nelem]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem, nelem]) + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + Re_ = [] + dRe_ = [] + for e in range(nelem): + Ue = U[ldof2gdof_var[:, e]] + Re0_, dRe0_ = intg_elem_claw_vol( + Ue, transf_data, elem, elem_data, e, parsfuncI, model + ) + Re1_, dRe1_ = intg_elem_claw_extface( + Ue, transf_data, elem, elem_data, e, parsfuncB + ) + Re_.append(ReshapeFix((Re0_ + Re1_), [neqn_per_elem, 1], order="F")) + dRe_.append( + ReshapeFix((dRe0_ + dRe1_), [neqn_per_elem, nvar_per_elem, 1], order="C") + ) + Re = paddle.concat(Re_, axis=1) + dRe = paddle.concat(dRe_, axis=2) + return Re, dRe + + +def create_fem_resjac( + fespc, + Uf, + transf_data, + elem, + elem_data, + ldof2gdof_eqn, + ldof2gdof_var, + e2e, + spmat, + dbc, + enforce_idx=None, + parsfuncI=None, + parsfuncB=None, + model=None, +): + """Create global residual(loss) and jacobian of conservation law in CG""" + ndof_var = np.max(ldof2gdof_var[:]) + 1 + dbc_idx = paddle.to_tensor(dbc.dbc_idx) + dbc_val = dbc.dbc_val + free_idx = dbc.free_idx + Uf = ReshapeFix(Uf, [ndof_var, 1], "C") + U_temp = paddle.to_tensor( + paddle.zeros([ndof_var, 1]), dtype="float32", place=place, stop_gradient=False + ) + src = paddle.to_tensor( + dbc_val, dtype="float32", place=place, stop_gradient=False + ).reshape([len(dbc_val), 1]) - paddle.index_select(Uf, dbc_idx) + U_temp = paddle.scatter_nd_add( + U_temp, dbc_idx.reshape([-1, 1]), src.reshape([-1, 1]) + ) + U_temp[dbc_idx] = ( + paddle.to_tensor( + dbc_val, dtype="float32", place=place, stop_gradient=False + ).reshape([len(dbc_val), 1]) + - Uf[dbc_idx] + ) + U = U_temp + Uf + # U is the GCNN output hardimpose BC but can backPP + if fespc == "cg" or fespc == "CG": + Re, dRe = eval_unassembled_resjac_claw_cg( + U, transf_data, elem, elem_data, ldof2gdof_var, parsfuncI, parsfuncB, model + ) + dR = assemble_nobc_mat(dRe, spmat.cooidx, spmat.lmat2gmat) + else: + raise ValueError("FE space only support cg!") + R = assemble_nobc_vec(Re, ldof2gdof_eqn) + if enforce_idx == None: + Rf = R[free_idx] + else: + Rf = R[enforce_idx] + dRf = dR.tocsr()[free_idx, :] + dRf = dRf.tocsr()[:, free_idx].T + print("Max Rf ===============================", paddle.max(paddle.abs(Rf))) + return Rf, dRf, dbc + + +def intg_elem_claw_vol(Ue, transf_data, elem, elem_data, e, parsfuncI=None, model=None): + """Intergrate elementwise internal volume of element residual and jacobian of conservation law""" + [neqn_per_elem, neqn, ndimP1, nq] = elem.Tv_eqn_ref.shape + [nvar_per_elem, nvar, _, _] = elem.Tv_var_ref.shape + ndim = ndimP1 - 1 + wq = elem.wq + detG = transf_data.detG[:, e] + Tvar = elem_data.Tv_var_phys[:, :, :, :, e].reshape( + [nvar_per_elem, nvar * (ndim + 1) * nq], order="F" + ) + Re = np.zeros([neqn_per_elem, 1]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem]) + Tvar_tensor = paddle.to_tensor(Tvar, place=place, dtype=paddle.float32) + UQq = ReshapeFix(paddle.matmul(Tvar_tensor.T, Ue), [nvar, ndim + 1, nq], "F") + w = wq * detG + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + for k in range(nq): + Teqn = elem_data.Tv_eqn_phys[:, :, :, k, e].reshape( + [neqn_per_elem, neqn * (ndim + 1)], order="F" + ) + Tvar = elem_data.Tv_var_phys[:, :, :, k, e].reshape( + [nvar_per_elem, nvar * (ndim + 1)], order="F" + ) + x = transf_data.xq[:, k, e] + if parsfuncI == None: + pars = elem_data.vol_pars[:, k, e] + else: + pars = parsfuncI(x) + SF, dSFdU = elem.eqn.srcflux(UQq[:, :, k], pars, x) + dSFdU = ReshapeFix(dSFdU, [neqn * (ndim + 1), nvar * (ndim + 1)], order="F") + Teqn = Double(Teqn) + Tvar = Double(Tvar) + SF = ReshapeFix(SF, [len(SF.flatten()), 1]) + Re = Re - w[k] * ReshapeFix(paddle.matmul(Teqn, SF), Re.shape, order="F") + dRe = dRe - w[k] * paddle.matmul(Teqn, paddle.matmul(dSFdU, Tvar.T)) + return Re, dRe + + +def intg_elem_claw_extface(Ue, transf_data, elem, elem_data, e, parsfuncB=None): + """Intergrate elementwise the boundary face of element residual and jacobian of conservation law""" + [neqn_per_elem, neqn, ndimP1, nqf, nf] = elem.Tvf_eqn_ref.shape + [nvar_per_elem, nvar, _, _, _] = elem.Tvf_var_ref.shape + ndim = ndimP1 - 1 + wqf = elem.wqf + sigf = transf_data.sigf[:, :, e] + nbcnbr = elem_data.nbcnbr[:, e] + Re = np.zeros([neqn_per_elem, 1]) + dRe = np.zeros([neqn_per_elem, nvar_per_elem]) + wf = wqf[:].reshape([len(wqf), 1]) * sigf + Re = Double(Re) + dRe = Double(dRe) + Re.stop_gradient = False + dRe.stop_gradient = False + for f in range(nf): + if np.isnan(nbcnbr[f]): + continue + Tvar = np.reshape( + elem_data.Tvf_var_phys[:, :, :, :, f, e], + [nvar_per_elem, nvar * (ndim + 1) * nqf], + order="F", + ) + Tvar = Double(Tvar) + UQqf = ReshapeFix(paddle.matmul(Tvar.T, Ue), [nvar, ndim + 1, nqf], order="F") + for k in range(nqf): + x = transf_data.xqf[:, k, f, e] + n = transf_data.n[:, k, f, e] + Teqn = elem_data.Tvf_eqn_phys[:, :, 0, k, f, e] + Tvar = np.reshape( + elem_data.Tvf_var_phys[:, :, :, k, f, e], + [nvar_per_elem, nvar * (ndim + 1)], + order="F", + ) + Teqn = Double(Teqn) + Tvar = Double(Tvar) + if parsfuncB == None: + pars = elem_data.bnd_pars[:, k, f, e] + else: + pars = parsfuncB(x) + _, _, Fb, dFbdU = elem.eqn.bndstvcflux(nbcnbr[f], UQqf[:, :, k], pars, x, n) + dFbdU = ReshapeFix(dFbdU, [neqn, nvar * (ndim + 1)], order="F") + Re = Re + wf[k, f] * paddle.matmul(Teqn, Fb) + dRe = dRe + wf[k, f] * paddle.matmul(Teqn, paddle.matmul(dFbdU, Tvar.T)) + return Re, dRe + + +def assemble_nobc_mat(Me, cooidx, lmat2gmat): + """Assembly global jacobian of conservation law (currently no use)""" + Me = Me.detach().cpu().numpy() + nnz = cooidx.shape[0] + cooidx = cooidx.astype("int") + Mval = np.zeros(shape=[nnz, 1]) + Mval = Double(Mval) + Mval.stop_gradient = False + idx = paddle.to_tensor(lmat2gmat.reshape([-1, 1])) + src = paddle.to_tensor(Me.reshape([-1, 1])) + Mval = paddle.scatter_nd_add(Mval, idx, src).squeeze(-1) + M = sparse.coo_matrix((Mval, (cooidx[:, 0], cooidx[:, 1]))) + return M + + +def assemble_nobc_vec(Fe, ldof2gdof_eqn): + """Assembly global residual of conservation law (!!very useful!!)""" + ndof = np.max(ldof2gdof_eqn[:]) + 1 + nelem = Fe.shape[1] + F = np.zeros(shape=[ndof, 1]) + F = Double(F) + F.stop_gradient = False + idx = paddle.to_tensor(ldof2gdof_eqn.reshape([-1, 1])) + src = Fe.reshape([-1, 1]) + F = paddle.scatter_nd_add(F, idx, src) + return F + + +def solve_fem_GCNN( + DataLoader, + LossF, + model, + tol=1e-3, + maxit=2000, + qoiidx=None, + softidx=None, + penaltyConstant=None, +): + """Wrapper""" + startime = time.time() + model, info = solve_SGD( + DataLoader, LossF, model, tol, maxit, qoiidx, softidx, penaltyConstant + ) + print("wallclock time of all epochs = ", time.time() - startime) + return model, info + + +def solve_SGD( + DataLoader, + LossF, + model, + tol, + maxit, + qoiidx, + softidx, + penaltyConstant, + plotFlag="True", +): + """ + DataLoader: training data + fcn: loss function + model: GCNN model to be trained + tol: the trauncation of loss function + maxit: the maximum number of epoch + """ + optimizer = Adam(parameters=model.parameters(), learning_rate=0.001) + criterion = MSELoss() + Er = [] + Loss = [] + tol_e = [ + 1, + 0.1, + 0.09, + 0.08, + 0.07, + 0.06, + 0.05, + 0.04, + 0.03, + 0.02, + 0.01, + 0.005, + 0.001, + 0.0009, + 0.0008, + 0.0007, + 0.0006, + 0.0005, + 0.0004, + 0.0003, + 0.0002, + 0.0001, + 0.00001, + ] + idx_tol_e = 0 + for epoch in range(maxit): + print("epoch = ", epoch) + startime = time.time() + er, loss, model = trainmodel( + DataLoader, + LossF, + model, + optimizer, + criterion, + qoiidx, + softidx, + penaltyConstant, + ) + print("Solution er = ", er) + print("wallclock time of this epoch= ", time.time() - startime) + Er.append(er) + Loss.append(loss) + if loss < tol or er < tol_e[idx_tol_e]: + idx_tol_e = idx_tol_e + 1 + print("The training reaches the expected loss!") + pass + np.savetxt("./Er_" + str(er) + "Epoch_" + str(epoch) + ".txt", np.asarray(Er)) + np.savetxt("./Loss_" + str(loss) + "Epoch_" + str(epoch) + ".txt", np.asarray(Loss)) + if plotFlag: + fig = plt.figure() + ax = plt.subplot(1, 1, 1) + ax.plot(Er, label="Relative Error") + ax.plot(Loss, label="|Residual|") + ax.legend() + ax.set_xlabel("Epoch") + ax.set_yscale("log") + fig.savefig("./LossResidual.png", bbox_inches="tight") + plt.show() + + return model, {"Er": np.asarray(Er), "Loss": np.asarray(Loss)} + + +def trainmodel( + DataLoader, LossF, model, optimizer, criterion, qoiidx, softidx, penaltyConstant +): + model.train() + er_0 = 0 + loss_0 = 0 + erlist = [] + ReList = [] + optimizer.clear_grad() + for data in DataLoader: + input = data[0] + fcn_id = data[0].y[0, 0] + truth = data[0].y[1:, 0:] + fcn = LossF[int(fcn_id)] + assert ( + int(fcn_id) - fcn_id + ) ** 2 < 1e-12, "The loss function is selected right!" + tic = time.time() + output = model(input) + Re, dRe, dbc = fcn(output) + print("wallclock time of evl Res= ", time.time() - tic) + ReList.append(paddle.abs(Re)) + solution = ReshapeFix(paddle.clone(output), [len(output.flatten()), 1], "C") + solution[dbc.dbc_idx] = Double(dbc.dbc_val.reshape([len(dbc.dbc_val), 1])) + er_0 = ( + er_0 + + paddle.sqrt( + criterion(solution, truth) / criterion(truth, truth * 0) + ).item() + ) + erlist.append( + paddle.sqrt(criterion(solution, truth) / criterion(truth, truth * 0)).item() + ) + loss = ReList[0] * 0 + for i in range(len(ReList)): + loss = loss + ReList[i] + print("max Res=", loss.abs().max().item()) + loss = paddle.norm(loss) + if softidx is not None and penaltyConstant is not None: + print( + "DataLoss = ", + criterion(solution[softidx], truth[softidx]) * penaltyConstant, + ) + loss = criterion(solution[softidx], truth[softidx]) * penaltyConstant + loss + else: + pass + if qoiidx is not None: + QOI_ER = paddle.sqrt( + criterion(solution[qoiidx], truth[qoiidx]) + / criterion(truth[qoiidx], truth[qoiidx] * 0) + ).item() + print("QOI Error=", QOI_ER) + os.system("touch QOIError.txt") + os.system("touch QOIValue.txt") + file1 = open("QOIError.txt", "a") + file1.writelines(str(QOI_ER) + "\n") + file2 = open("QOIValue.txt", "a") + file2.writelines( + str(solution[qoiidx].detach().cpu().numpy().reshape([1, -1])[:]) + "\n" + ) + file1.close() + file2.close() + else: + pass + tic = time.time() + loss.backward() + print("wallclock time of this BP= ", time.time() - tic) + optimizer.step() + print(">>>>>>>max error<<<<<<< ====================================", max(erlist)) + try: + print(">>>>>>>model source<<<<<<< =======================", model.source) + os.system("touch ModelSource.txt") + os.system("echo ModelSource.txt") + file3 = open("ModelSource.txt", "a") + object2write = model.source.detach().cpu().numpy().reshape([1, -1]) + for ifer in range(2): + try: + file3.writelines(str(object2write[0, ifer]) + "\n") + except: + pass + file3.close() + except: + pass + return er_0 / len(DataLoader), loss.norm().item() / len(DataLoader), model + + +def Reshape(input, Shape, order="F"): + if order == "F": + return paddle.reshape( + input, [Shape[len(Shape) - 1 - i] for i in range(len(Shape))] + ).permute([len(Shape) - 1 - i for i in range(len(Shape))]) + elif order == "C": + return paddle.reshape(input, Shape) + else: + raise ValueError("Reshape Only Support Fortran or C") + + +def ReshapeFix(input, Shape, order="F"): + if order == "F": + return paddle.reshape( + input.T, [Shape[len(Shape) - 1 - i] for i in range(len(Shape))] + ).transpose([len(Shape) - 1 - i for i in range(len(Shape))]) + elif order == "C": + return paddle.reshape(input, Shape) + else: + raise ValueError("Reshape Only Support Fortran or C") + + +def Double(A): + if len(A.shape) == 0 or (len(A.shape) == 1 and A.shape[0] == 1): + return paddle.to_tensor([A], place=place, dtype=paddle.float32).reshape([1, 1]) + else: + return paddle.to_tensor(A, place=place, dtype=paddle.float32) diff --git a/jointContribution/graphGalerkin/source/setup_prob_eqn_handcode.py b/jointContribution/graphGalerkin/source/setup_prob_eqn_handcode.py index c5c3e83a56..8cd486fcc0 100644 --- a/jointContribution/graphGalerkin/source/setup_prob_eqn_handcode.py +++ b/jointContribution/graphGalerkin/source/setup_prob_eqn_handcode.py @@ -1,228 +1,264 @@ -import paddle -import numpy as np -import pdb - -from TensorFEMCore import Double, ReshapeFix - -""" -####Possion Equation -""" -class setup_linelptc_sclr_base_handcode(object): - """docstring for setup_linelptc_sclr_base_handcode""" - def __init__(self,ndim,K,f,Qb,bnd2nbc): - self.ndim=ndim - self.K=K - self.f=f - self.Qb=Qb - self.bnd2nbc=bnd2nbc - - self.I=np.eye(self.ndim) - if self.K==None: - self.K=lambda x,el: self.I.reshape(self.ndim**2,1,order='F') #Fortan like - if self.f==None: - self.f=lambda x,el: 0 - if self.Qb==None: - self.Qb=lambda x,n,bnd,el,fc: 0 - - self.eqn=LinearEllipticScalarBaseHandcode() - self.vol_pars_fcn=lambda x,el:np.vstack((self.K(x, el),self.f(x, el),np.nan)) - self.bnd_pars_fcn=lambda x,n,bnd,el,fc:np.vstack((self.K(x,el), - self.f(x,el), - self.Qb(x,n,bnd,el,fc))) - - - -class LinearEllipticScalarBaseHandcode(object): - """docstring for LinearEllipticScalarBaseHandcode""" - def __init__(self): - self.neqn=1 - self.nvar=1 - self.ncomp=1 - - def srcflux(self,UQ,pars,x,model=None): - """ - eval_linelptc_base_handcode_srcflux - """ - # Extract information from input - q=UQ[0,1:] - q=ReshapeFix(q,[len(q),1]) - self.ndim=len(q) - try: - k=np.reshape(pars[0:self.ndim**2], - (self.ndim,self.ndim),order='F') - except: - k=paddle.reshape(pars[0:self.ndim**2], - (self.ndim,self.ndim)) - f=pars[self.ndim**2] - try: - temp_flag=(f.requires_grad) - f=f.reshape([1,1]) - except: - f=paddle.to_tensor(f, dtype='float32').reshape([1,1]) - - k_ml=paddle.to_tensor(k, dtype='float32') - # Define flux and source - SF=paddle.concat((f,-1*paddle.mm(k_ml,q)),axis=0) - - # Define partial derivative - dSFdU=np.zeros([self.neqn, self.ndim+1, self.ncomp,self.ndim+1]) - try: - dSFdU[:,1:,:,1:]=np.reshape(-1*k,[self.neqn, self.ndim,self.ncomp,self.ndim]) - except: - k=k.detach().cpu().numpy() - dSFdU[:,1:,:,1:]=np.reshape(-1*k,[self.neqn, self.ndim,self.ncomp,self.ndim]) - dSFdU=paddle.to_tensor(dSFdU, dtype='float32') - return SF, dSFdU - - def bndstvcflux(self,nbcnbr,UQ,pars,x,n): - nvar=UQ.shape[0] - ndim=UQ.shape[1]-1 - - Ub=UQ[:,0] - dUb=np.zeros([nvar,nvar,self.ndim+1]) - dUb[:,:,0]=np.eye(nvar) - - Fn=pars[ndim**2+1] - dFn=np.zeros([nvar,nvar,self.ndim+1]) - dUb=Double(dUb) - Fn=Double(Fn) - dFn=Double(dFn) - return Ub,dUb,Fn,dFn - -""" -####Linear Elasticity Equation -""" - -class setup_linelast_base_handcode(object): - """docstring for setup_linelast_base_handcode""" - def __init__(self,ndim,lam,mu,f,tb,bnd2nbc): - self.bnd2nbc=bnd2nbc - self.eqn=LinearElasticityBaseHandcode(ndim) - self.vol_pars_fcn=lambda x, el: np.vstack((lam(x,el), - mu(x,el), - f(x,el), - np.zeros([ndim,1])+np.nan)) - self.bnd_pars_fcn=lambda x,n,bnd,el,fc:np.vstack((lam(x, el), - mu(x, el), - f(x, el), - tb(x, n, bnd, el, fc))) - - - -class LinearElasticityBaseHandcode(object): - """docstring for LinearElasticityBaseHandcode""" - def __init__(self,ndim): - self.neqn=ndim - self.nvar=ndim - self.bndstvcflux=\ - lambda nbcnbr, UQ, pars, x, n:\ - eval_linelast_base_handcode_bndstvc_intr_bndflux_pars(UQ, pars, x, n) - self.srcflux=lambda UQ,pars,x:\ - eval_linelast_base_handcode_srcflux(UQ, pars, x) - -def eval_linelast_base_handcode_srcflux(UQ, pars, x): - q=UQ[:,1:] - ndim=q.shape[0] - # Define information regarding size of the system - neqn=ndim - ncomp=ndim - - # Extract parameters - lam=pars[0] - mu=pars[1] - f=pars[2:2+ndim] - F=-lam*paddle.trace(q)*(Double(np.eye(ndim)))-mu*(q+q.T) - try: - S=Double(f.reshape([ndim,1],order='F')) - except: - S=f.reshape([ndim,1]) - SF=paddle.concat((S,F),axis=1) - dSFdU=Double(np.zeros([neqn,ndim+1,ncomp,ndim+1])) - for i in range(ndim): - for j in range(ndim): - dSFdU[i,1+i,j,1+j]=dSFdU[i,1+i,j,1+j]-lam - dSFdU[i,1+j,i,1+j]=dSFdU[i,1+j,i,1+j]-mu - dSFdU[i,1+j,j,1+i]=dSFdU[i,1+j,j,1+i]-mu - return SF, dSFdU - -def eval_linelast_base_handcode_bndstvc_intr_bndflux_pars(UQ,pars,x,n): - nvar=UQ.shape[0] - ndim=UQ.shape[1]-1 - - Ub=UQ[:,0] - dUb=np.zeros([nvar,nvar,ndim+1]) - dUb[:,:,0]=np.eye(nvar) - Fn=-pars[-ndim:] - dFn=np.zeros([nvar,nvar,ndim+1]) - dUb=Double(dUb) - Fn=ReshapeFix(Double(Fn),[len(Fn),1],order='F') - dFn=Double(dFn) - return Ub,dUb,Fn,dFn - -""" -#### Inconpressible Navier Stokes Equation -""" -class setup_ins_base_handcode(object): - """docstring for setup_ins_base_handcode""" - def __init__(self,ndim,rho,nu,tb,bnd2nbc): - self.eqn=IncompressibleNavierStokes(ndim) - self.bnd2nbc=bnd2nbc - self.vol_pars_fcn=lambda x,el:np.vstack([rho(x, el), - nu(x, el), - np.zeros([ndim+1,1])+np.nan]) - self.bnd_pars_fcn=lambda x,n,bnd,el,fc:np.vstack([rho(x,el), - nu(x,el), - tb(x,n,bnd,el,fc)]) - -class IncompressibleNavierStokes(object): - """docstring for IncompressibleNavierStokes""" - def __init__(self,ndim): - self.ndim=ndim - self.nvar=ndim+1 - self.srcflux=lambda UQ,pars,x:\ - eval_ins_base_handcode_srcflux(UQ,pars,x) - self.bndstvcflux=lambda nbcnbr,UQ,pars,x,n:\ - eval_ins_base_handcode_bndstvc_intr_bndflux_pars(UQ,pars,x,n) - -def eval_ins_base_handcode_srcflux(UQ,pars,x): - u=UQ[:,0]; q=UQ[:,1:] - ndim=u.shape[0]-1 - neqn=ndim+1 - ncomp=ndim+1 - rho=pars[0] - nu=pars[1] - v=u[0:ndim] - - v=ReshapeFix(v,[len(v),1],'F') - - p=u[-1] - dv=q[0:ndim,:] - S=paddle.concat([-rho*paddle.mm(dv,v),-paddle.trace(dv).reshape([1,1])],axis=0) - - F=paddle.concat([-rho*nu*dv+p*paddle.eye(ndim, dtype='float32'), - paddle.zeros([1,ndim], dtype='float32')],axis=0) - - - SF=paddle.concat([S,F],axis=1) - - dSFdUQ=np.zeros([neqn,ndim+1,ncomp,ndim+1]) - dSFdUQ[:,0,:,0]=np.vstack([np.hstack([-rho*dv.detach().cpu().numpy(),np.zeros([ndim,1])]), np.zeros([1,ndim+1])]) - for i in range(ndim): - dSFdUQ[i,0,i,1:]=-rho*v.detach().cpu().numpy().reshape(dSFdUQ[i,0,i,1:].shape,order='F') - dSFdUQ[-1,0,0:-1,1:]=np.reshape(-np.eye(ndim),[1,ndim,ndim],order='F') - dSFdUQ[0:-1,1:,-1,0]=np.eye(ndim) - for i in range(ndim): - for j in range(ndim): - dSFdUQ[i,1+j,i,1+j]=dSFdUQ[i,1+j,i,1+j]-rho*nu - dSFdUQ=Double(dSFdUQ) - return SF,dSFdUQ - -def eval_ins_base_handcode_bndstvc_intr_bndflux_pars(UQ,pars,x,n): - nvar=UQ.shape[0] - ndim=UQ.shape[1]-1 - Ub=UQ[:,0] - dUb=np.zeros([nvar,nvar,ndim+1]) - dUb[:,:,0]=np.eye(nvar) - Fn=-pars[-ndim-1:].reshape([-1,1]) - dFn=np.zeros([nvar,nvar,ndim+1]) - return Ub,Double(dUb),Double(Fn),Double(dFn) +import pdb + +import numpy as np +import paddle +from TensorFEMCore import Double +from TensorFEMCore import ReshapeFix + +""" +####Possion Equation +""" + + +class setup_linelptc_sclr_base_handcode(object): + """docstring for setup_linelptc_sclr_base_handcode""" + + def __init__(self, ndim, K, f, Qb, bnd2nbc): + self.ndim = ndim + self.K = K + self.f = f + self.Qb = Qb + self.bnd2nbc = bnd2nbc + + self.I = np.eye(self.ndim) + if self.K == None: + self.K = lambda x, el: self.I.reshape( + self.ndim**2, 1, order="F" + ) # Fortan like + if self.f == None: + self.f = lambda x, el: 0 + if self.Qb == None: + self.Qb = lambda x, n, bnd, el, fc: 0 + + self.eqn = LinearEllipticScalarBaseHandcode() + self.vol_pars_fcn = lambda x, el: np.vstack( + (self.K(x, el), self.f(x, el), np.nan) + ) + self.bnd_pars_fcn = lambda x, n, bnd, el, fc: np.vstack( + (self.K(x, el), self.f(x, el), self.Qb(x, n, bnd, el, fc)) + ) + + +class LinearEllipticScalarBaseHandcode(object): + """docstring for LinearEllipticScalarBaseHandcode""" + + def __init__(self): + self.neqn = 1 + self.nvar = 1 + self.ncomp = 1 + + def srcflux(self, UQ, pars, x, model=None): + """ + eval_linelptc_base_handcode_srcflux + """ + # Extract information from input + q = UQ[0, 1:] + q = ReshapeFix(q, [len(q), 1]) + self.ndim = len(q) + try: + k = np.reshape(pars[0 : self.ndim**2], (self.ndim, self.ndim), order="F") + except: + k = paddle.reshape(pars[0 : self.ndim**2], (self.ndim, self.ndim)) + f = pars[self.ndim**2] + try: + temp_flag = f.requires_grad + f = f.reshape([1, 1]) + except: + f = paddle.to_tensor(f, dtype="float32").reshape([1, 1]) + + k_ml = paddle.to_tensor(k, dtype="float32") + # Define flux and source + SF = paddle.concat((f, -1 * paddle.mm(k_ml, q)), axis=0) + + # Define partial derivative + dSFdU = np.zeros([self.neqn, self.ndim + 1, self.ncomp, self.ndim + 1]) + try: + dSFdU[:, 1:, :, 1:] = np.reshape( + -1 * k, [self.neqn, self.ndim, self.ncomp, self.ndim] + ) + except: + k = k.detach().cpu().numpy() + dSFdU[:, 1:, :, 1:] = np.reshape( + -1 * k, [self.neqn, self.ndim, self.ncomp, self.ndim] + ) + dSFdU = paddle.to_tensor(dSFdU, dtype="float32") + return SF, dSFdU + + def bndstvcflux(self, nbcnbr, UQ, pars, x, n): + nvar = UQ.shape[0] + ndim = UQ.shape[1] - 1 + + Ub = UQ[:, 0] + dUb = np.zeros([nvar, nvar, self.ndim + 1]) + dUb[:, :, 0] = np.eye(nvar) + + Fn = pars[ndim**2 + 1] + dFn = np.zeros([nvar, nvar, self.ndim + 1]) + dUb = Double(dUb) + Fn = Double(Fn) + dFn = Double(dFn) + return Ub, dUb, Fn, dFn + + +""" +####Linear Elasticity Equation +""" + + +class setup_linelast_base_handcode(object): + """docstring for setup_linelast_base_handcode""" + + def __init__(self, ndim, lam, mu, f, tb, bnd2nbc): + self.bnd2nbc = bnd2nbc + self.eqn = LinearElasticityBaseHandcode(ndim) + self.vol_pars_fcn = lambda x, el: np.vstack( + (lam(x, el), mu(x, el), f(x, el), np.zeros([ndim, 1]) + np.nan) + ) + self.bnd_pars_fcn = lambda x, n, bnd, el, fc: np.vstack( + (lam(x, el), mu(x, el), f(x, el), tb(x, n, bnd, el, fc)) + ) + + +class LinearElasticityBaseHandcode(object): + """docstring for LinearElasticityBaseHandcode""" + + def __init__(self, ndim): + self.neqn = ndim + self.nvar = ndim + self.bndstvcflux = lambda nbcnbr, UQ, pars, x, n: eval_linelast_base_handcode_bndstvc_intr_bndflux_pars( + UQ, pars, x, n + ) + self.srcflux = lambda UQ, pars, x: eval_linelast_base_handcode_srcflux( + UQ, pars, x + ) + + +def eval_linelast_base_handcode_srcflux(UQ, pars, x): + q = UQ[:, 1:] + ndim = q.shape[0] + # Define information regarding size of the system + neqn = ndim + ncomp = ndim + + # Extract parameters + lam = pars[0] + mu = pars[1] + f = pars[2 : 2 + ndim] + F = -lam * paddle.trace(q) * (Double(np.eye(ndim))) - mu * (q + q.T) + try: + S = Double(f.reshape([ndim, 1], order="F")) + except: + S = f.reshape([ndim, 1]) + SF = paddle.concat((S, F), axis=1) + dSFdU = Double(np.zeros([neqn, ndim + 1, ncomp, ndim + 1])) + for i in range(ndim): + for j in range(ndim): + dSFdU[i, 1 + i, j, 1 + j] = dSFdU[i, 1 + i, j, 1 + j] - lam + dSFdU[i, 1 + j, i, 1 + j] = dSFdU[i, 1 + j, i, 1 + j] - mu + dSFdU[i, 1 + j, j, 1 + i] = dSFdU[i, 1 + j, j, 1 + i] - mu + return SF, dSFdU + + +def eval_linelast_base_handcode_bndstvc_intr_bndflux_pars(UQ, pars, x, n): + nvar = UQ.shape[0] + ndim = UQ.shape[1] - 1 + + Ub = UQ[:, 0] + dUb = np.zeros([nvar, nvar, ndim + 1]) + dUb[:, :, 0] = np.eye(nvar) + Fn = -pars[-ndim:] + dFn = np.zeros([nvar, nvar, ndim + 1]) + dUb = Double(dUb) + Fn = ReshapeFix(Double(Fn), [len(Fn), 1], order="F") + dFn = Double(dFn) + return Ub, dUb, Fn, dFn + + +""" +#### Inconpressible Navier Stokes Equation +""" + + +class setup_ins_base_handcode(object): + """docstring for setup_ins_base_handcode""" + + def __init__(self, ndim, rho, nu, tb, bnd2nbc): + self.eqn = IncompressibleNavierStokes(ndim) + self.bnd2nbc = bnd2nbc + self.vol_pars_fcn = lambda x, el: np.vstack( + [rho(x, el), nu(x, el), np.zeros([ndim + 1, 1]) + np.nan] + ) + self.bnd_pars_fcn = lambda x, n, bnd, el, fc: np.vstack( + [rho(x, el), nu(x, el), tb(x, n, bnd, el, fc)] + ) + + +class IncompressibleNavierStokes(object): + """docstring for IncompressibleNavierStokes""" + + def __init__(self, ndim): + self.ndim = ndim + self.nvar = ndim + 1 + self.srcflux = lambda UQ, pars, x: eval_ins_base_handcode_srcflux(UQ, pars, x) + self.bndstvcflux = lambda nbcnbr, UQ, pars, x, n: eval_ins_base_handcode_bndstvc_intr_bndflux_pars( + UQ, pars, x, n + ) + + +def eval_ins_base_handcode_srcflux(UQ, pars, x): + u = UQ[:, 0] + q = UQ[:, 1:] + ndim = u.shape[0] - 1 + neqn = ndim + 1 + ncomp = ndim + 1 + rho = pars[0] + nu = pars[1] + v = u[0:ndim] + + v = ReshapeFix(v, [len(v), 1], "F") + + p = u[-1] + dv = q[0:ndim, :] + S = paddle.concat( + [-rho * paddle.mm(dv, v), -paddle.trace(dv).reshape([1, 1])], axis=0 + ) + + F = paddle.concat( + [ + -rho * nu * dv + p * paddle.eye(ndim, dtype="float32"), + paddle.zeros([1, ndim], dtype="float32"), + ], + axis=0, + ) + + SF = paddle.concat([S, F], axis=1) + + dSFdUQ = np.zeros([neqn, ndim + 1, ncomp, ndim + 1]) + dSFdUQ[:, 0, :, 0] = np.vstack( + [ + np.hstack([-rho * dv.detach().cpu().numpy(), np.zeros([ndim, 1])]), + np.zeros([1, ndim + 1]), + ] + ) + for i in range(ndim): + dSFdUQ[i, 0, i, 1:] = -rho * v.detach().cpu().numpy().reshape( + dSFdUQ[i, 0, i, 1:].shape, order="F" + ) + dSFdUQ[-1, 0, 0:-1, 1:] = np.reshape(-np.eye(ndim), [1, ndim, ndim], order="F") + dSFdUQ[0:-1, 1:, -1, 0] = np.eye(ndim) + for i in range(ndim): + for j in range(ndim): + dSFdUQ[i, 1 + j, i, 1 + j] = dSFdUQ[i, 1 + j, i, 1 + j] - rho * nu + dSFdUQ = Double(dSFdUQ) + return SF, dSFdUQ + + +def eval_ins_base_handcode_bndstvc_intr_bndflux_pars(UQ, pars, x, n): + nvar = UQ.shape[0] + ndim = UQ.shape[1] - 1 + Ub = UQ[:, 0] + dUb = np.zeros([nvar, nvar, ndim + 1]) + dUb[:, :, 0] = np.eye(nvar) + Fn = -pars[-ndim - 1 :].reshape([-1, 1]) + dFn = np.zeros([nvar, nvar, ndim + 1]) + return Ub, Double(dUb), Double(Fn), Double(dFn) diff --git a/jointContribution/graphGalerkin/utils/ChebConv.py b/jointContribution/graphGalerkin/utils/ChebConv.py index 20d18d503e..826f5fa290 100644 --- a/jointContribution/graphGalerkin/utils/ChebConv.py +++ b/jointContribution/graphGalerkin/utils/ChebConv.py @@ -1,202 +1,202 @@ -from typing import Optional - -import paddle -from init import zeros -from linear import Linear -from message_passing import MessagePassing -from paddle import Tensor -from paddle.nn import LayerList -from utils import add_self_loops -from utils import get_laplacian -from utils import masked_fill -from utils import remove_self_loops - -OptTensor = Optional[Tensor] - - -class ChebConv(MessagePassing): - r"""The chebyshev spectral graph convolutional operator from the - `"Convolutional Neural Networks on Graphs with Fast Localized Spectral - Filtering" `_ paper - - .. math:: - \mathbf{X}^{\prime} = \sum_{k=1}^{K} \mathbf{Z}^{(k)} \cdot - \mathbf{\Theta}^{(k)} - - where :math:`\mathbf{Z}^{(k)}` is computed recursively by - - .. math:: - \mathbf{Z}^{(1)} &= \mathbf{X} - - \mathbf{Z}^{(2)} &= \mathbf{\hat{L}} \cdot \mathbf{X} - - \mathbf{Z}^{(k)} &= 2 \cdot \mathbf{\hat{L}} \cdot - \mathbf{Z}^{(k-1)} - \mathbf{Z}^{(k-2)} - - and :math:`\mathbf{\hat{L}}` denotes the scaled and normalized Laplacian - :math:`\frac{2\mathbf{L}}{\lambda_{\max}} - \mathbf{I}`. - - Args: - in_channels (int): Size of each input sample, or :obj:`-1` to derive - the size from the first input(s) to the forward method. - out_channels (int): Size of each output sample. - K (int): Chebyshev filter size :math:`K`. - normalization (str, optional): The normalization scheme for the graph - Laplacian (default: :obj:`"sym"`): - - 1. :obj:`None`: No normalization - :math:`\mathbf{L} = \mathbf{D} - \mathbf{A}` - - 2. :obj:`"sym"`: Symmetric normalization - :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A} - \mathbf{D}^{-1/2}` - - 3. :obj:`"rw"`: Random-walk normalization - :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}` - - You need to pass :obj:`lambda_max` to the :meth:`forward` method of - this operator in case the normalization is non-symmetric. - :obj:`\lambda_max` should be a :class:`paddle.Tensor` of size - :obj:`[num_graphs]` in a mini-batch scenario and a - scalar/zero-dimensional tensor when operating on single graphs. - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - **kwargs (optional): Additional arguments of - :class:`MessagePassing`. - - Shapes: - - **input:** - node features :math:`(|\mathcal{V}|, F_{in})`, - edge indices :math:`(2, |\mathcal{E}|)`, - edge weights :math:`(|\mathcal{E}|)` *(optional)*, - batch vector :math:`(|\mathcal{V}|)` *(optional)*, - maximum :obj:`lambda` value :math:`(|\mathcal{G}|)` *(optional)* - - **output:** node features :math:`(|\mathcal{V}|, F_{out})` - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - K: int, - normalization: Optional[str] = "sym", - bias: bool = True, - **kwargs, - ): - kwargs.setdefault("aggr", "add") - super().__init__(**kwargs) - - assert K > 0 - assert normalization in [None, "sym", "rw"], "Invalid normalization" - - self.in_channels = in_channels - self.out_channels = out_channels - self.normalization = normalization - self.lins = LayerList( - [ - Linear( - in_channels, out_channels, bias=False, weight_initializer="glorot" - ) - for _ in range(K) - ] - ) - if bias: - self.bias = paddle.create_parameter([out_channels], paddle.float32) - else: - self.register_parameter("bias", None) - - self.reset_parameters() - - def reset_parameters(self): - for lin in self.lins: - lin.reset_parameters() - zeros(self.bias) - - def __norm__( - self, - edge_index, - num_nodes: Optional[int], - edge_weight: OptTensor, - normalization: Optional[str], - lambda_max, - dtype: Optional[int] = None, - batch: OptTensor = None, - ): - - edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) - edge_index, edge_weight = get_laplacian( - edge_index, edge_weight, normalization, dtype, num_nodes - ) - - if batch is not None and lambda_max.numel() > 1: - lambda_max = lambda_max[batch[edge_index[0]]] - - edge_weight = (2.0 * edge_weight) / lambda_max - edge_weight = masked_fill( - edge_weight, edge_weight == float("inf"), 0 - ) ########### - - edge_index, edge_weight = add_self_loops( - edge_index, edge_weight, fill_value=-1.0, num_nodes=num_nodes - ) - assert edge_weight is not None - return edge_index, edge_weight - - def forward( - self, - x, - edge_index, - edge_weight: OptTensor = None, - batch: OptTensor = None, - lambda_max: OptTensor = None, - ): - """""" - if self.normalization != "sym" and lambda_max is None: - raise ValueError( - "You need to pass `lambda_max` to `forward() in`" - "case the normalization is non-symmetric." - ) - - if lambda_max is None: - lambda_max = paddle.to_tensor(2.0, dtype=x.dtype) - if not isinstance(lambda_max, paddle.Tensor): - lambda_max = paddle.to_tensor(lambda_max, dtype=x.dtype) - assert lambda_max is not None - - edge_index, norm = self.__norm__( - edge_index, - x.shape[self.node_dim], - edge_weight, - self.normalization, - lambda_max, - dtype=x.dtype, - batch=batch, - ) - Tx_0 = x - Tx_1 = x # Dummy. - out = self.lins[0](Tx_0) - # propagate_type: (x: Tensor, norm: Tensor) - if len(self.lins) > 1: - Tx_1 = self.propagate(edge_index, x=x, norm=norm, size=None) - out = out + self.lins[1](Tx_1) - - for lin in self.lins[2:]: - Tx_2 = self.propagate(edge_index, x=Tx_1, norm=norm, size=None) - Tx_2 = 2.0 * Tx_2 - Tx_0 - out = out + lin.forward(Tx_2) - Tx_0, Tx_1 = Tx_1, Tx_2 - - if self.bias is not None: - out += self.bias - - return out - - def message(self, x_j, norm): - return norm.reshape([-1, 1]) * x_j - - def __repr__(self) -> str: - return ( - f"{self.__class__.__name__}({self.in_channels}, " - f"{self.out_channels}, K={len(self.lins)}, " - f"normalization={self.normalization})" - ) +from typing import Optional + +import paddle +from init import zeros +from linear import Linear +from message_passing import MessagePassing +from paddle import Tensor +from paddle.nn import LayerList +from utils import add_self_loops +from utils import get_laplacian +from utils import masked_fill +from utils import remove_self_loops + +OptTensor = Optional[Tensor] + + +class ChebConv(MessagePassing): + r"""The chebyshev spectral graph convolutional operator from the + `"Convolutional Neural Networks on Graphs with Fast Localized Spectral + Filtering" `_ paper + + .. math:: + \mathbf{X}^{\prime} = \sum_{k=1}^{K} \mathbf{Z}^{(k)} \cdot + \mathbf{\Theta}^{(k)} + + where :math:`\mathbf{Z}^{(k)}` is computed recursively by + + .. math:: + \mathbf{Z}^{(1)} &= \mathbf{X} + + \mathbf{Z}^{(2)} &= \mathbf{\hat{L}} \cdot \mathbf{X} + + \mathbf{Z}^{(k)} &= 2 \cdot \mathbf{\hat{L}} \cdot + \mathbf{Z}^{(k-1)} - \mathbf{Z}^{(k-2)} + + and :math:`\mathbf{\hat{L}}` denotes the scaled and normalized Laplacian + :math:`\frac{2\mathbf{L}}{\lambda_{\max}} - \mathbf{I}`. + + Args: + in_channels (int): Size of each input sample, or :obj:`-1` to derive + the size from the first input(s) to the forward method. + out_channels (int): Size of each output sample. + K (int): Chebyshev filter size :math:`K`. + normalization (str, optional): The normalization scheme for the graph + Laplacian (default: :obj:`"sym"`): + + 1. :obj:`None`: No normalization + :math:`\mathbf{L} = \mathbf{D} - \mathbf{A}` + + 2. :obj:`"sym"`: Symmetric normalization + :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A} + \mathbf{D}^{-1/2}` + + 3. :obj:`"rw"`: Random-walk normalization + :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}` + + You need to pass :obj:`lambda_max` to the :meth:`forward` method of + this operator in case the normalization is non-symmetric. + :obj:`\lambda_max` should be a :class:`paddle.Tensor` of size + :obj:`[num_graphs]` in a mini-batch scenario and a + scalar/zero-dimensional tensor when operating on single graphs. + bias (bool, optional): If set to :obj:`False`, the layer will not learn + an additive bias. (default: :obj:`True`) + **kwargs (optional): Additional arguments of + :class:`MessagePassing`. + + Shapes: + - **input:** + node features :math:`(|\mathcal{V}|, F_{in})`, + edge indices :math:`(2, |\mathcal{E}|)`, + edge weights :math:`(|\mathcal{E}|)` *(optional)*, + batch vector :math:`(|\mathcal{V}|)` *(optional)*, + maximum :obj:`lambda` value :math:`(|\mathcal{G}|)` *(optional)* + - **output:** node features :math:`(|\mathcal{V}|, F_{out})` + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + K: int, + normalization: Optional[str] = "sym", + bias: bool = True, + **kwargs, + ): + kwargs.setdefault("aggr", "add") + super().__init__(**kwargs) + + assert K > 0 + assert normalization in [None, "sym", "rw"], "Invalid normalization" + + self.in_channels = in_channels + self.out_channels = out_channels + self.normalization = normalization + self.lins = LayerList( + [ + Linear( + in_channels, out_channels, bias=False, weight_initializer="glorot" + ) + for _ in range(K) + ] + ) + if bias: + self.bias = paddle.create_parameter([out_channels], paddle.float32) + else: + self.register_parameter("bias", None) + + self.reset_parameters() + + def reset_parameters(self): + for lin in self.lins: + lin.reset_parameters() + zeros(self.bias) + + def __norm__( + self, + edge_index, + num_nodes: Optional[int], + edge_weight: OptTensor, + normalization: Optional[str], + lambda_max, + dtype: Optional[int] = None, + batch: OptTensor = None, + ): + + edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) + edge_index, edge_weight = get_laplacian( + edge_index, edge_weight, normalization, dtype, num_nodes + ) + + if batch is not None and lambda_max.numel() > 1: + lambda_max = lambda_max[batch[edge_index[0]]] + + edge_weight = (2.0 * edge_weight) / lambda_max + edge_weight = masked_fill( + edge_weight, edge_weight == float("inf"), 0 + ) ########### + + edge_index, edge_weight = add_self_loops( + edge_index, edge_weight, fill_value=-1.0, num_nodes=num_nodes + ) + assert edge_weight is not None + return edge_index, edge_weight + + def forward( + self, + x, + edge_index, + edge_weight: OptTensor = None, + batch: OptTensor = None, + lambda_max: OptTensor = None, + ): + """""" + if self.normalization != "sym" and lambda_max is None: + raise ValueError( + "You need to pass `lambda_max` to `forward() in`" + "case the normalization is non-symmetric." + ) + + if lambda_max is None: + lambda_max = paddle.to_tensor(2.0, dtype=x.dtype) + if not isinstance(lambda_max, paddle.Tensor): + lambda_max = paddle.to_tensor(lambda_max, dtype=x.dtype) + assert lambda_max is not None + + edge_index, norm = self.__norm__( + edge_index, + x.shape[self.node_dim], + edge_weight, + self.normalization, + lambda_max, + dtype=x.dtype, + batch=batch, + ) + Tx_0 = x + Tx_1 = x # Dummy. + out = self.lins[0](Tx_0) + # propagate_type: (x: Tensor, norm: Tensor) + if len(self.lins) > 1: + Tx_1 = self.propagate(edge_index, x=x, norm=norm, size=None) + out = out + self.lins[1](Tx_1) + + for lin in self.lins[2:]: + Tx_2 = self.propagate(edge_index, x=Tx_1, norm=norm, size=None) + Tx_2 = 2.0 * Tx_2 - Tx_0 + out = out + lin.forward(Tx_2) + Tx_0, Tx_1 = Tx_1, Tx_2 + + if self.bias is not None: + out += self.bias + + return out + + def message(self, x_j, norm): + return norm.reshape([-1, 1]) * x_j + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.in_channels}, " + f"{self.out_channels}, K={len(self.lins)}, " + f"normalization={self.normalization})" + ) diff --git a/jointContribution/graphGalerkin/utils/init.py b/jointContribution/graphGalerkin/utils/init.py index e7f7ba175d..205489cbce 100644 --- a/jointContribution/graphGalerkin/utils/init.py +++ b/jointContribution/graphGalerkin/utils/init.py @@ -1,74 +1,82 @@ -from typing import Any - -import math - -import paddle -from paddle import Tensor -from paddle.nn.initializer import Orthogonal - -def uniform(size: int, value: Any): - if isinstance(value, Tensor): - bound = 1.0 / math.sqrt(size) - value.data.uniform_(-bound, bound) - else: - for v in value.parameters() if hasattr(value, 'parameters') else []: - uniform(size, v) - for v in value.buffers() if hasattr(value, 'buffers') else []: - uniform(size, v) - -def kaiming_uniform(value: Any, fan: int, a: float): - if isinstance(value, Tensor): - bound = math.sqrt(6 / ((1 + a**2) * fan)) - value.data.uniform_(-bound, bound) - else: - for v in value.parameters() if hasattr(value, 'parameters') else []: - kaiming_uniform(v, fan, a) - for v in value.buffers() if hasattr(value, 'buffers') else []: - kaiming_uniform(v, fan, a) - -def glorot(value: Any): - if isinstance(value, Tensor): - stdv = math.sqrt(6.0 / (value.shape[-2] + value.shape[-1])) - value = paddle.uniform(value.shape, value.dtype, -stdv, stdv) - else: - for v in value.parameters() if hasattr(value, 'parameters') else []: - glorot(v) - for v in value.buffers() if hasattr(value, 'buffers') else []: - glorot(v) - -def glorot_orthogonal(tensor, scale): - if tensor is not None: - tensor = paddle.create_parameter(tensor.shape, attr=Orthogonal()) - scale /= ((tensor.size(-2) + tensor.size(-1)) * tensor.var()) - tensor.data *= scale.sqrt() - -def constant(value: Any, fill_value: float): - if isinstance(value, Tensor): - value = paddle.full(value.shape, fill_value, value.dtype) - else: - for v in value.parameters() if hasattr(value, 'parameters') else []: - constant(v, fill_value) - for v in value.buffers() if hasattr(value, 'buffers') else []: - constant(v, fill_value) - -def zeros(value: Any): - constant(value, 0.) - -def ones(tensor: Any): - constant(tensor, 1.) - -def normal(value: Any, mean: float, std: float): - if isinstance(value, Tensor): - value.data.normal_(mean, std) - else: - for v in value.parameters() if hasattr(value, 'parameters') else []: - normal(v, mean, std) - for v in value.buffers() if hasattr(value, 'buffers') else []: - normal(v, mean, std) - -def reset(value: Any): - if hasattr(value, 'reset_parameters'): - value.reset_parameters() - else: - for child in value.children() if hasattr(value, 'children') else []: - reset(child) +import math +from typing import Any + +import paddle +from paddle import Tensor +from paddle.nn.initializer import Orthogonal + + +def uniform(size: int, value: Any): + if isinstance(value, Tensor): + bound = 1.0 / math.sqrt(size) + value.data.uniform_(-bound, bound) + else: + for v in value.parameters() if hasattr(value, "parameters") else []: + uniform(size, v) + for v in value.buffers() if hasattr(value, "buffers") else []: + uniform(size, v) + + +def kaiming_uniform(value: Any, fan: int, a: float): + if isinstance(value, Tensor): + bound = math.sqrt(6 / ((1 + a**2) * fan)) + value.data.uniform_(-bound, bound) + else: + for v in value.parameters() if hasattr(value, "parameters") else []: + kaiming_uniform(v, fan, a) + for v in value.buffers() if hasattr(value, "buffers") else []: + kaiming_uniform(v, fan, a) + + +def glorot(value: Any): + if isinstance(value, Tensor): + stdv = math.sqrt(6.0 / (value.shape[-2] + value.shape[-1])) + value = paddle.uniform(value.shape, value.dtype, -stdv, stdv) + else: + for v in value.parameters() if hasattr(value, "parameters") else []: + glorot(v) + for v in value.buffers() if hasattr(value, "buffers") else []: + glorot(v) + + +def glorot_orthogonal(tensor, scale): + if tensor is not None: + tensor = paddle.create_parameter(tensor.shape, attr=Orthogonal()) + scale /= (tensor.size(-2) + tensor.size(-1)) * tensor.var() + tensor.data *= scale.sqrt() + + +def constant(value: Any, fill_value: float): + if isinstance(value, Tensor): + value = paddle.full(value.shape, fill_value, value.dtype) + else: + for v in value.parameters() if hasattr(value, "parameters") else []: + constant(v, fill_value) + for v in value.buffers() if hasattr(value, "buffers") else []: + constant(v, fill_value) + + +def zeros(value: Any): + constant(value, 0.0) + + +def ones(tensor: Any): + constant(tensor, 1.0) + + +def normal(value: Any, mean: float, std: float): + if isinstance(value, Tensor): + value.data.normal_(mean, std) + else: + for v in value.parameters() if hasattr(value, "parameters") else []: + normal(v, mean, std) + for v in value.buffers() if hasattr(value, "buffers") else []: + normal(v, mean, std) + + +def reset(value: Any): + if hasattr(value, "reset_parameters"): + value.reset_parameters() + else: + for child in value.children() if hasattr(value, "children") else []: + reset(child) diff --git a/jointContribution/graphGalerkin/utils/inspector.py b/jointContribution/graphGalerkin/utils/inspector.py index 249187a549..042d461258 100644 --- a/jointContribution/graphGalerkin/utils/inspector.py +++ b/jointContribution/graphGalerkin/utils/inspector.py @@ -1,68 +1,76 @@ -import re -import inspect -from collections import OrderedDict -from typing import Dict, List, Any, Optional, Callable, Set, Tuple -import pyparsing as pp - -class Inspector(object): - def __init__(self, base_class: Any): - self.base_class: Any = base_class - self.params: Dict[str, Dict[str, Any]] = {} - - def inspect(self, func: Callable, - pop_first: bool = False) -> Dict[str, Any]: - params = inspect.signature(func).parameters - params = OrderedDict(params) - if pop_first: - params.popitem(last=False) - self.params[func.__name__] = params - - def keys(self, func_names: Optional[List[str]] = None) -> Set[str]: - keys = [] - for func in func_names or list(self.params.keys()): - keys += self.params[func].keys() - return set(keys) - - def __implements__(self, cls, func_name: str) -> bool: - if cls.__name__ == 'MessagePassing': - return False - if func_name in cls.__dict__.keys(): - return True - return any(self.__implements__(c, func_name) for c in cls.__bases__) - - def implements(self, func_name: str) -> bool: - return self.__implements__(self.base_class.__class__, func_name) - - def distribute(self, func_name, kwargs: Dict[str, Any]): - out = {} - for key, param in self.params[func_name].items(): - data = kwargs.get(key, inspect.Parameter.empty) - if data is inspect.Parameter.empty: - if param.default is inspect.Parameter.empty: - raise TypeError(f'Required parameter {key} is empty.') - data = param.default - out[key] = data - return out - -def func_header_repr(func: Callable, keep_annotation: bool = True) -> str: - source = inspect.getsource(func) - signature = inspect.signature(func) - - if keep_annotation: - return ''.join(re.split(r'(\).*?:.*?\n)', source, - maxsplit=1)[:2]).strip() - - params_repr = ['self'] - for param in signature.parameters.values(): - params_repr.append(param.name) - if param.default is not inspect.Parameter.empty: - params_repr[-1] += f'={param.default}' - - return f'def {func.__name__}({", ".join(params_repr)}):' - -def func_body_repr(func: Callable, keep_annotation: bool = True) -> str: - source = inspect.getsource(func) - body_repr = re.split(r'\).*?:.*?\n', source, maxsplit=1)[1] - if not keep_annotation: - body_repr = re.sub(r'\s*# type:.*\n', '', body_repr) - return body_repr \ No newline at end of file +import inspect +import re +from collections import OrderedDict +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple + +import pyparsing as pp + + +class Inspector(object): + def __init__(self, base_class: Any): + self.base_class: Any = base_class + self.params: Dict[str, Dict[str, Any]] = {} + + def inspect(self, func: Callable, pop_first: bool = False) -> Dict[str, Any]: + params = inspect.signature(func).parameters + params = OrderedDict(params) + if pop_first: + params.popitem(last=False) + self.params[func.__name__] = params + + def keys(self, func_names: Optional[List[str]] = None) -> Set[str]: + keys = [] + for func in func_names or list(self.params.keys()): + keys += self.params[func].keys() + return set(keys) + + def __implements__(self, cls, func_name: str) -> bool: + if cls.__name__ == "MessagePassing": + return False + if func_name in cls.__dict__.keys(): + return True + return any(self.__implements__(c, func_name) for c in cls.__bases__) + + def implements(self, func_name: str) -> bool: + return self.__implements__(self.base_class.__class__, func_name) + + def distribute(self, func_name, kwargs: Dict[str, Any]): + out = {} + for key, param in self.params[func_name].items(): + data = kwargs.get(key, inspect.Parameter.empty) + if data is inspect.Parameter.empty: + if param.default is inspect.Parameter.empty: + raise TypeError(f"Required parameter {key} is empty.") + data = param.default + out[key] = data + return out + + +def func_header_repr(func: Callable, keep_annotation: bool = True) -> str: + source = inspect.getsource(func) + signature = inspect.signature(func) + + if keep_annotation: + return "".join(re.split(r"(\).*?:.*?\n)", source, maxsplit=1)[:2]).strip() + + params_repr = ["self"] + for param in signature.parameters.values(): + params_repr.append(param.name) + if param.default is not inspect.Parameter.empty: + params_repr[-1] += f"={param.default}" + + return f'def {func.__name__}({", ".join(params_repr)}):' + + +def func_body_repr(func: Callable, keep_annotation: bool = True) -> str: + source = inspect.getsource(func) + body_repr = re.split(r"\).*?:.*?\n", source, maxsplit=1)[1] + if not keep_annotation: + body_repr = re.sub(r"\s*# type:.*\n", "", body_repr) + return body_repr diff --git a/jointContribution/graphGalerkin/utils/linear.py b/jointContribution/graphGalerkin/utils/linear.py index b74f953c3e..254a882b00 100644 --- a/jointContribution/graphGalerkin/utils/linear.py +++ b/jointContribution/graphGalerkin/utils/linear.py @@ -1,158 +1,183 @@ -from typing import Optional -from collections import OrderedDict - -import copy -import math - -import paddle -from paddle import nn -from paddle import Tensor -import paddle.nn.functional as F -from paddle.nn.initializer import Uniform - -import init as inits - -class Linear(nn.Layer): - r"""Applies a linear tranformation to the incoming data - - .. math:: - \mathbf{x}^{\prime} = \mathbf{x} \mathbf{W}^{\top} + \mathbf{b} - - similar to :class:`torch.nn.Linear`. - It supports lazy initialization and customizable weight and bias - initialization. - - Args: - in_channels (int): Size of each input sample. Will be initialized - lazily in case it is given as :obj:`-1`. - out_channels (int): Size of each output sample. - bias (bool, optional): If set to :obj:`False`, the layer will not learn - an additive bias. (default: :obj:`True`) - weight_initializer (str, optional): The initializer for the weight - matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"` - or :obj:`None`). - If set to :obj:`None`, will match default weight initialization of - :class:`torch.nn.Linear`. (default: :obj:`None`) - bias_initializer (str, optional): The initializer for the bias vector - (:obj:`"zeros"` or :obj:`None`). - If set to :obj:`None`, will match default bias initialization of - :class:`torch.nn.Linear`. (default: :obj:`None`) - """ - def __init__(self, in_channels: int, out_channels: int, bias: bool = True, - weight_initializer: Optional[str] = None, - bias_initializer: Optional[str] = None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.weight_initializer = weight_initializer - self.bias_initializer = bias_initializer - self._parameters = OrderedDict() - - if in_channels > 0: - self.weight = paddle.create_parameter(shape=[in_channels, out_channels], dtype=paddle.float32) - else: - self.weight = nn.parameter.UninitializedParameter() - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - if bias: - self.bias = paddle.create_parameter(shape=[out_channels], dtype=paddle.float32) - else: - self.register_parameter('bias', None) - - self._load_hook = self.register_state_dict_hook( - self._lazy_load_hook) - - self.reset_parameters() - - def __deepcopy__(self, memo): - out = Linear(self.in_channels, self.out_channels, self.bias - is not None, self.weight_initializer, - self.bias_initializer) - if self.in_channels > 0: - out.weight = copy.deepcopy(self.weight, memo) - if self.bias is not None: - out.bias = copy.deepcopy(self.bias, memo) - return out - - def reset_parameters(self): - # if isinstance(self.weight, nn.parameter.UninitializedParameter): - if self.in_channels<=0: - pass - elif self.weight_initializer == 'glorot': - inits.glorot(self.weight) - elif self.weight_initializer == 'uniform': - bound = 1.0 / math.sqrt(self.weight.size(-1)) - self.weight = paddle.create_parameter(shape=self.weight.shape, dtype=paddle.float32, attr=paddle.ParamAttr(initializer=Uniform(-bound, bound))) - elif self.weight_initializer == 'kaiming_uniform': - inits.kaiming_uniform(self.weight, fan=self.in_channels, - a=math.sqrt(5)) - elif self.weight_initializer is None: - inits.kaiming_uniform(self.weight, fan=self.in_channels, - a=math.sqrt(5)) - else: - raise RuntimeError(f"Linear layer weight initializer " - f"'{self.weight_initializer}' is not supported") - - # if isinstance(self.weight, nn.parameter.UninitializedParameter): - if self.in_channels<=0: - pass - elif self.bias is None: - pass - elif self.bias_initializer == 'zeros': - inits.zeros(self.bias) - elif self.bias_initializer is None: - inits.uniform(self.in_channels, self.bias) - else: - raise RuntimeError(f"Linear layer bias initializer " - f"'{self.bias_initializer}' is not supported") - - def forward(self, x: Tensor) -> Tensor: - """""" - return F.linear(x, self.weight, self.bias) - - @paddle.no_grad() - def initialize_parameters(self, module, input): - if isinstance(self.weight, nn.parameter.UninitializedParameter): - self.in_channels = input[0].size(-1) - self.weight.materialize((self.out_channels, self.in_channels)) - self.reset_parameters() - self._hook.remove() - delattr(self, '_hook') - - def _save_to_state_dict(self, destination, prefix, keep_vars): - if isinstance(self.weight, nn.parameter.UninitializedParameter): - destination[prefix + 'weight'] = self.weight - else: - destination[prefix + 'weight'] = self.weight.detach() - if self.bias is not None: - destination[prefix + 'bias'] = self.bias.detach() - - def _lazy_load_hook(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - - weight = state_dict[prefix + 'weight'] - if isinstance(weight, nn.parameter.UninitializedParameter): - self.in_channels = -1 - self.weight = nn.parameter.UninitializedParameter() - if not hasattr(self, '_hook'): - self._hook = self.register_forward_pre_hook( - self.initialize_parameters) - - elif isinstance(self.weight, nn.parameter.UninitializedParameter): - self.in_channels = weight.size(-1) - self.weight.materialize((self.out_channels, self.in_channels)) - if hasattr(self, '_hook'): - self._hook.remove() - delattr(self, '_hook') - - def __repr__(self) -> str: - return (f'{self.__class__.__name__}({self.in_channels}, ' - f'{self.out_channels}, bias={self.bias is not None})') - - def register_parameter(self, name: str, param) -> None: - if param is None: - self._parameters[name] = None - else: - self._parameters[name] = param - +import copy +import math +from collections import OrderedDict +from typing import Optional + +import init as inits +import paddle +import paddle.nn.functional as F +from paddle import Tensor +from paddle import nn +from paddle.nn.initializer import Uniform + + +class Linear(nn.Layer): + r"""Applies a linear tranformation to the incoming data + + .. math:: + \mathbf{x}^{\prime} = \mathbf{x} \mathbf{W}^{\top} + \mathbf{b} + + similar to :class:`torch.nn.Linear`. + It supports lazy initialization and customizable weight and bias + initialization. + + Args: + in_channels (int): Size of each input sample. Will be initialized + lazily in case it is given as :obj:`-1`. + out_channels (int): Size of each output sample. + bias (bool, optional): If set to :obj:`False`, the layer will not learn + an additive bias. (default: :obj:`True`) + weight_initializer (str, optional): The initializer for the weight + matrix (:obj:`"glorot"`, :obj:`"uniform"`, :obj:`"kaiming_uniform"` + or :obj:`None`). + If set to :obj:`None`, will match default weight initialization of + :class:`torch.nn.Linear`. (default: :obj:`None`) + bias_initializer (str, optional): The initializer for the bias vector + (:obj:`"zeros"` or :obj:`None`). + If set to :obj:`None`, will match default bias initialization of + :class:`torch.nn.Linear`. (default: :obj:`None`) + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + bias: bool = True, + weight_initializer: Optional[str] = None, + bias_initializer: Optional[str] = None, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.weight_initializer = weight_initializer + self.bias_initializer = bias_initializer + self._parameters = OrderedDict() + + if in_channels > 0: + self.weight = paddle.create_parameter( + shape=[in_channels, out_channels], dtype=paddle.float32 + ) + else: + self.weight = nn.parameter.UninitializedParameter() + self._hook = self.register_forward_pre_hook(self.initialize_parameters) + + if bias: + self.bias = paddle.create_parameter( + shape=[out_channels], dtype=paddle.float32 + ) + else: + self.register_parameter("bias", None) + + self._load_hook = self.register_state_dict_hook(self._lazy_load_hook) + + self.reset_parameters() + + def __deepcopy__(self, memo): + out = Linear( + self.in_channels, + self.out_channels, + self.bias is not None, + self.weight_initializer, + self.bias_initializer, + ) + if self.in_channels > 0: + out.weight = copy.deepcopy(self.weight, memo) + if self.bias is not None: + out.bias = copy.deepcopy(self.bias, memo) + return out + + def reset_parameters(self): + # if isinstance(self.weight, nn.parameter.UninitializedParameter): + if self.in_channels <= 0: + pass + elif self.weight_initializer == "glorot": + inits.glorot(self.weight) + elif self.weight_initializer == "uniform": + bound = 1.0 / math.sqrt(self.weight.size(-1)) + self.weight = paddle.create_parameter( + shape=self.weight.shape, + dtype=paddle.float32, + attr=paddle.ParamAttr(initializer=Uniform(-bound, bound)), + ) + elif self.weight_initializer == "kaiming_uniform": + inits.kaiming_uniform(self.weight, fan=self.in_channels, a=math.sqrt(5)) + elif self.weight_initializer is None: + inits.kaiming_uniform(self.weight, fan=self.in_channels, a=math.sqrt(5)) + else: + raise RuntimeError( + f"Linear layer weight initializer " + f"'{self.weight_initializer}' is not supported" + ) + + # if isinstance(self.weight, nn.parameter.UninitializedParameter): + if self.in_channels <= 0: + pass + elif self.bias is None: + pass + elif self.bias_initializer == "zeros": + inits.zeros(self.bias) + elif self.bias_initializer is None: + inits.uniform(self.in_channels, self.bias) + else: + raise RuntimeError( + f"Linear layer bias initializer " + f"'{self.bias_initializer}' is not supported" + ) + + def forward(self, x: Tensor) -> Tensor: + """""" + return F.linear(x, self.weight, self.bias) + + @paddle.no_grad() + def initialize_parameters(self, module, input): + if isinstance(self.weight, nn.parameter.UninitializedParameter): + self.in_channels = input[0].size(-1) + self.weight.materialize((self.out_channels, self.in_channels)) + self.reset_parameters() + self._hook.remove() + delattr(self, "_hook") + + def _save_to_state_dict(self, destination, prefix, keep_vars): + if isinstance(self.weight, nn.parameter.UninitializedParameter): + destination[prefix + "weight"] = self.weight + else: + destination[prefix + "weight"] = self.weight.detach() + if self.bias is not None: + destination[prefix + "bias"] = self.bias.detach() + + def _lazy_load_hook( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + + weight = state_dict[prefix + "weight"] + if isinstance(weight, nn.parameter.UninitializedParameter): + self.in_channels = -1 + self.weight = nn.parameter.UninitializedParameter() + if not hasattr(self, "_hook"): + self._hook = self.register_forward_pre_hook(self.initialize_parameters) + + elif isinstance(self.weight, nn.parameter.UninitializedParameter): + self.in_channels = weight.size(-1) + self.weight.materialize((self.out_channels, self.in_channels)) + if hasattr(self, "_hook"): + self._hook.remove() + delattr(self, "_hook") + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.in_channels}, " + f"{self.out_channels}, bias={self.bias is not None})" + ) + + def register_parameter(self, name: str, param) -> None: + if param is None: + self._parameters[name] = None + else: + self._parameters[name] = param diff --git a/jointContribution/graphGalerkin/utils/message_passing.py b/jointContribution/graphGalerkin/utils/message_passing.py index 98ef276f2b..f1066ec989 100644 --- a/jointContribution/graphGalerkin/utils/message_passing.py +++ b/jointContribution/graphGalerkin/utils/message_passing.py @@ -1,341 +1,370 @@ -from typing import List, Optional, Set, Tuple - -from uuid import uuid1 -from inspect import Parameter -from collections import OrderedDict - -import paddle -from paddle import Tensor - -from inspector import Inspector -from scatter import scatter -from paddle.nn import Layer - -Size = Optional[Tuple[int, int]] -class MessagePassing(Layer): - r"""Base class for creating message passing layers of the form - - .. math:: - \mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, - \square_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} - \left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right), - - where :math:`\square` denotes a differentiable, permutation invariant - function, *e.g.*, sum, mean or max, and :math:`\gamma_{\mathbf{\Theta}}` - and :math:`\phi_{\mathbf{\Theta}}` denote differentiable functions such as - MLPs. - - Args: - aggr (string, optional): The aggregation scheme to use - (:obj:`"add"`, :obj:`"mean"`, :obj:`"max"` or :obj:`None`). - (default: :obj:`"add"`) - flow (string, optional): The flow direction of message passing - (:obj:`"source_to_target"` or :obj:`"target_to_source"`). - (default: :obj:`"source_to_target"`) - node_dim (int, optional): The axis along which to propagate. - (default: :obj:`-2`) - decomposed_layers (int, optional): The number of feature decomposition - layers, as introduced in the `"Optimizing Memory Efficiency of - Graph Neural Networks on Edge Computing Platforms" - `_ paper. - Feature decomposition reduces the peak memory usage by slicing - the feature dimensions into separated feature decomposition layers - during GNN aggregation. - This method can accelerate GNN execution on CPU-based platforms - However, this method is not applicable to all GNN operators - available, in particular for operators in which message computation - can not easily be decomposed, *e.g.* in attention-based GNNs. - The selection of the optimal value of :obj:`decomposed_layers` - depends both on the specific graph dataset and available hardware - resources. - A value of :obj:`2` is suitable in most cases. - Although the peak memory usage is directly associated with the - granularity of feature decomposition, the same is not necessarily - true for execution speedups. (default: :obj:`1`) - """ - - special_args: Set[str] = { - 'edge_index', 'adj_t', 'edge_index_i', 'edge_index_j', 'size', - 'size_i', 'size_j', 'ptr', 'index', 'dim_size' - } - - def __init__(self, aggr: Optional[str] = "add", - flow: str = "source_to_target", node_dim: int = -2, - decomposed_layers: int = 1): - - super().__init__() - - self.aggr = aggr - assert self.aggr in ['add', 'mean', 'max', None] - - self.flow = flow - assert self.flow in ['source_to_target', 'target_to_source'] - - self.node_dim = node_dim - self.decomposed_layers = decomposed_layers - - self.inspector = Inspector(self) - self.inspector.inspect(self.message) - self.inspector.inspect(self.aggregate, pop_first=True) - self.inspector.inspect(self.update, pop_first=True) - self.inspector.inspect(self.edge_update) - self.inspector.inspect(self.message_and_aggregate, pop_first=True) - - self.__user_args__ = self.inspector.keys( - ['message', 'aggregate', 'update']).difference(self.special_args) - self.__fused_user_args__ = self.inspector.keys( - ['message_and_aggregate', 'update']).difference(self.special_args) - self.__edge_user_args__ = self.inspector.keys( - ['edge_update']).difference(self.special_args) - - # Support for "fused" message passing. - self.fuse = self.inspector.implements('message_and_aggregate') - - # Support for GNNExplainer. - self.__explain__ = False - self.__edge_mask__ = None - self.__loop_mask__ = None - - # Hooks: - self._propagate_forward_pre_hooks = OrderedDict() - self._propagate_forward_hooks = OrderedDict() - self._message_forward_pre_hooks = OrderedDict() - self._message_forward_hooks = OrderedDict() - self._aggregate_forward_pre_hooks = OrderedDict() - self._aggregate_forward_hooks = OrderedDict() - self._message_and_aggregate_forward_pre_hooks = OrderedDict() - self._message_and_aggregate_forward_hooks = OrderedDict() - self._edge_update_forward_pre_hooks = OrderedDict() - self._edge_update_forward_hooks = OrderedDict() - - def __set_size__(self, size: List[Optional[int]], dim: int, src: Tensor): - the_size = size[dim] - if the_size is None: - size[dim] = src.shape[self.node_dim] - elif the_size != src.shape[self.node_dim]: - raise ValueError( - (f'Encountered tensor with size {src.size(self.node_dim)} in ' - f'dimension {self.node_dim}, but expected size {the_size}.')) - - def __lift__(self, src, edge_index, dim): - if isinstance(edge_index, Tensor): - index = edge_index[dim] - return src.index_select(index, self.node_dim) - raise ValueError - - def __collect__(self, args, edge_index, size, kwargs): - i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1) - - out = {} - for arg in args: - if arg[-2:] not in ['_i', '_j']: - out[arg] = kwargs.get(arg, Parameter.empty) - else: - dim = 0 if arg[-2:] == '_j' else 1 - data = kwargs.get(arg[:-2], Parameter.empty) - - if isinstance(data, (tuple, list)): - assert len(data) == 2 - if isinstance(data[1 - dim], Tensor): - self.__set_size__(size, 1 - dim, data[1 - dim]) - data = data[dim] - - if isinstance(data, Tensor): - self.__set_size__(size, dim, data) - data = self.__lift__(data, edge_index, - j if arg[-2:] == '_j' else i) - - out[arg] = data - - if isinstance(edge_index, Tensor): - out['adj_t'] = None - out['edge_index'] = edge_index - out['edge_index_i'] = edge_index[i] - out['edge_index_j'] = edge_index[j] - out['ptr'] = None - - out['index'] = out['edge_index_i'] - out['size'] = size - out['size_i'] = size[1] or size[0] - out['size_j'] = size[0] or size[1] - out['dim_size'] = out['size_i'] - - return out - - def __check_input__(self, edge_index, size): - the_size: List[Optional[int]] = [None, None] - - if isinstance(edge_index, Tensor): - assert edge_index.dtype == paddle.int64 - assert edge_index.dim() == 2 - assert edge_index.shape[0] == 2 - if size is not None: - the_size[0] = size[0] - the_size[1] = size[1] - return the_size - - raise ValueError( - ('`MessagePassing.propagate` only supports `int` of ' - 'shape `[2, num_messages]` for ' - 'argument `edge_index`.')) - - def propagate(self, edge_index: Tensor, size: Size = None, **kwargs): - r"""The initial call to start propagating messages. - - Args: - edge_index (Tensor): A :obj:`int` or a - :obj:`edge_index` holds the indices of a general (sparse) - assignment matrix of shape :obj:`[N, M]`. - If :obj:`edge_index` is of type :obj:`int`, its - shape must be defined as :obj:`[2, num_messages]`, where - messages from nodes in :obj:`edge_index[0]` are sent to - nodes in :obj:`edge_index[1]` - (in case :obj:`flow="source_to_target"`). - If :obj:`edge_index` is of type - :obj:`(row, col)` should relate to :obj:`row = edge_index[1]` - and :obj:`col = edge_index[0]`. - The major difference between both formats is that we need to - input the *transposed* sparse adjacency matrix into - :func:`propagate`. - size (tuple, optional): The size :obj:`(N, M)` of the assignment - matrix in case :obj:`edge_index` is a :obj:`LongTensor`. - If set to :obj:`None`, the size will be automatically inferred - and assumed to be quadratic. - **kwargs: Any additional data which is needed to construct and - aggregate messages, and to update node embeddings. - """ - decomposed_layers = 1 if self.__explain__ else self.decomposed_layers - - for hook in self._propagate_forward_pre_hooks.values(): - res = hook(self, (edge_index, size, kwargs)) - if res is not None: - edge_index, size, kwargs = res - - size = self.__check_input__(edge_index, size) - - if isinstance(edge_index, Tensor) or not self.fuse: - if decomposed_layers > 1: - user_args = self.__user_args__ - decomp_args = {a[:-2] for a in user_args if a[-2:] == '_j'} - decomp_kwargs = { - a: kwargs[a].chunk(decomposed_layers, -1) - for a in decomp_args - } - decomp_out = [] - - for i in range(decomposed_layers): - if decomposed_layers > 1: - for arg in decomp_args: - kwargs[arg] = decomp_kwargs[arg][i] - - coll_dict = self.__collect__(self.__user_args__, edge_index, - size, kwargs) - - msg_kwargs = self.inspector.distribute('message', coll_dict) - for hook in self._message_forward_pre_hooks.values(): - res = hook(self, (msg_kwargs, )) - if res is not None: - msg_kwargs = res[0] if isinstance(res, tuple) else res - out = self.message(**msg_kwargs) - for hook in self._message_forward_hooks.values(): - res = hook(self, (msg_kwargs, ), out) - if res is not None: - out = res - - # For `GNNExplainer`, we require a separate message and - # aggregate procedure since this allows us to inject the - # `edge_mask` into the message passing computation scheme. - if self.__explain__: - edge_mask = self.__edge_mask__.sigmoid() - # Some ops add self-loops to `edge_index`. We need to do - # the same for `edge_mask` (but do not train those). - if out.size(self.node_dim) != edge_mask.size(0): - edge_mask = edge_mask[self.__loop_mask__] - loop = edge_mask.new_ones(size[0]) - edge_mask = paddle.concat([edge_mask, loop], dim=0) - assert out.size(self.node_dim) == edge_mask.size(0) - out = out * edge_mask.view([-1] + [1] * (out.dim() - 1)) - - aggr_kwargs = self.inspector.distribute('aggregate', coll_dict) - for hook in self._aggregate_forward_pre_hooks.values(): - res = hook(self, (aggr_kwargs, )) - if res is not None: - aggr_kwargs = res[0] if isinstance(res, tuple) else res - - out = self.aggregate(out, **aggr_kwargs) - for hook in self._aggregate_forward_hooks.values(): - res = hook(self, (aggr_kwargs, ), out) - if res is not None: - out = res - - update_kwargs = self.inspector.distribute('update', coll_dict) - out = self.update(out, **update_kwargs) - - if decomposed_layers > 1: - decomp_out.append(out) - - if decomposed_layers > 1: - out = paddle.concat(decomp_out, dim=-1) - - for hook in self._propagate_forward_hooks.values(): - res = hook(self, (edge_index, size, kwargs), out) - if res is not None: - out = res - - return out - - def message(self, x_j: Tensor) -> Tensor: - r"""Constructs messages from node :math:`j` to node :math:`i` - in analogy to :math:`\phi_{\mathbf{\Theta}}` for each edge in - :obj:`edge_index`. - This function can take any argument as input which was initially - passed to :meth:`propagate`. - Furthermore, tensors passed to :meth:`propagate` can be mapped to the - respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or - :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. - """ - return x_j - - def aggregate(self, inputs: Tensor, index: Tensor, - ptr: Optional[Tensor] = None, - dim_size: Optional[int] = None) -> Tensor: - r"""Aggregates messages from neighbors as - :math:`\square_{j \in \mathcal{N}(i)}`. - - Takes in the output of message computation as first argument and any - argument which was initially passed to :meth:`propagate`. - - By default, this function will delegate its call to scatter functions - that support "add", "mean" and "max" operations as specified in - :meth:`__init__` by the :obj:`aggr` argument. - """ - return scatter(inputs, index, dim=self.node_dim, dim_size=dim_size, - reduce=self.aggr) - - def update(self, inputs: Tensor) -> Tensor: - r"""Updates node embeddings in analogy to - :math:`\gamma_{\mathbf{\Theta}}` for each node - :math:`i \in \mathcal{V}`. - Takes in the output of aggregation as first argument and any argument - which was initially passed to :meth:`propagate`. - """ - return inputs - - def edge_update(self) -> Tensor: - r"""Computes or updates features for each edge in the graph. - This function can take any argument as input which was initially passed - to :meth:`edge_updater`. - Furthermore, tensors passed to :meth:`edge_updater` can be mapped to - the respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or - :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. - """ - raise NotImplementedError - def message_and_aggregate(self, adj_t) -> Tensor: - r"""Fuses computations of :func:`message` and :func:`aggregate` into a - single function. - If applicable, this saves both time and memory since messages do not - explicitly need to be materialized. - This function will only gets called in case it is implemented and - propagation takes place based on a :obj:`torch_sparse.SparseTensor`. - """ - raise NotImplementedError \ No newline at end of file +from collections import OrderedDict +from inspect import Parameter +from typing import List +from typing import Optional +from typing import Set +from typing import Tuple +from uuid import uuid1 + +import paddle +from inspector import Inspector +from paddle import Tensor +from paddle.nn import Layer +from scatter import scatter + +Size = Optional[Tuple[int, int]] + + +class MessagePassing(Layer): + r"""Base class for creating message passing layers of the form + + .. math:: + \mathbf{x}_i^{\prime} = \gamma_{\mathbf{\Theta}} \left( \mathbf{x}_i, + \square_{j \in \mathcal{N}(i)} \, \phi_{\mathbf{\Theta}} + \left(\mathbf{x}_i, \mathbf{x}_j,\mathbf{e}_{j,i}\right) \right), + + where :math:`\square` denotes a differentiable, permutation invariant + function, *e.g.*, sum, mean or max, and :math:`\gamma_{\mathbf{\Theta}}` + and :math:`\phi_{\mathbf{\Theta}}` denote differentiable functions such as + MLPs. + + Args: + aggr (string, optional): The aggregation scheme to use + (:obj:`"add"`, :obj:`"mean"`, :obj:`"max"` or :obj:`None`). + (default: :obj:`"add"`) + flow (string, optional): The flow direction of message passing + (:obj:`"source_to_target"` or :obj:`"target_to_source"`). + (default: :obj:`"source_to_target"`) + node_dim (int, optional): The axis along which to propagate. + (default: :obj:`-2`) + decomposed_layers (int, optional): The number of feature decomposition + layers, as introduced in the `"Optimizing Memory Efficiency of + Graph Neural Networks on Edge Computing Platforms" + `_ paper. + Feature decomposition reduces the peak memory usage by slicing + the feature dimensions into separated feature decomposition layers + during GNN aggregation. + This method can accelerate GNN execution on CPU-based platforms + However, this method is not applicable to all GNN operators + available, in particular for operators in which message computation + can not easily be decomposed, *e.g.* in attention-based GNNs. + The selection of the optimal value of :obj:`decomposed_layers` + depends both on the specific graph dataset and available hardware + resources. + A value of :obj:`2` is suitable in most cases. + Although the peak memory usage is directly associated with the + granularity of feature decomposition, the same is not necessarily + true for execution speedups. (default: :obj:`1`) + """ + + special_args: Set[str] = { + "edge_index", + "adj_t", + "edge_index_i", + "edge_index_j", + "size", + "size_i", + "size_j", + "ptr", + "index", + "dim_size", + } + + def __init__( + self, + aggr: Optional[str] = "add", + flow: str = "source_to_target", + node_dim: int = -2, + decomposed_layers: int = 1, + ): + + super().__init__() + + self.aggr = aggr + assert self.aggr in ["add", "mean", "max", None] + + self.flow = flow + assert self.flow in ["source_to_target", "target_to_source"] + + self.node_dim = node_dim + self.decomposed_layers = decomposed_layers + + self.inspector = Inspector(self) + self.inspector.inspect(self.message) + self.inspector.inspect(self.aggregate, pop_first=True) + self.inspector.inspect(self.update, pop_first=True) + self.inspector.inspect(self.edge_update) + self.inspector.inspect(self.message_and_aggregate, pop_first=True) + + self.__user_args__ = self.inspector.keys( + ["message", "aggregate", "update"] + ).difference(self.special_args) + self.__fused_user_args__ = self.inspector.keys( + ["message_and_aggregate", "update"] + ).difference(self.special_args) + self.__edge_user_args__ = self.inspector.keys(["edge_update"]).difference( + self.special_args + ) + + # Support for "fused" message passing. + self.fuse = self.inspector.implements("message_and_aggregate") + + # Support for GNNExplainer. + self.__explain__ = False + self.__edge_mask__ = None + self.__loop_mask__ = None + + # Hooks: + self._propagate_forward_pre_hooks = OrderedDict() + self._propagate_forward_hooks = OrderedDict() + self._message_forward_pre_hooks = OrderedDict() + self._message_forward_hooks = OrderedDict() + self._aggregate_forward_pre_hooks = OrderedDict() + self._aggregate_forward_hooks = OrderedDict() + self._message_and_aggregate_forward_pre_hooks = OrderedDict() + self._message_and_aggregate_forward_hooks = OrderedDict() + self._edge_update_forward_pre_hooks = OrderedDict() + self._edge_update_forward_hooks = OrderedDict() + + def __set_size__(self, size: List[Optional[int]], dim: int, src: Tensor): + the_size = size[dim] + if the_size is None: + size[dim] = src.shape[self.node_dim] + elif the_size != src.shape[self.node_dim]: + raise ValueError( + ( + f"Encountered tensor with size {src.size(self.node_dim)} in " + f"dimension {self.node_dim}, but expected size {the_size}." + ) + ) + + def __lift__(self, src, edge_index, dim): + if isinstance(edge_index, Tensor): + index = edge_index[dim] + return src.index_select(index, self.node_dim) + raise ValueError + + def __collect__(self, args, edge_index, size, kwargs): + i, j = (1, 0) if self.flow == "source_to_target" else (0, 1) + + out = {} + for arg in args: + if arg[-2:] not in ["_i", "_j"]: + out[arg] = kwargs.get(arg, Parameter.empty) + else: + dim = 0 if arg[-2:] == "_j" else 1 + data = kwargs.get(arg[:-2], Parameter.empty) + + if isinstance(data, (tuple, list)): + assert len(data) == 2 + if isinstance(data[1 - dim], Tensor): + self.__set_size__(size, 1 - dim, data[1 - dim]) + data = data[dim] + + if isinstance(data, Tensor): + self.__set_size__(size, dim, data) + data = self.__lift__(data, edge_index, j if arg[-2:] == "_j" else i) + + out[arg] = data + + if isinstance(edge_index, Tensor): + out["adj_t"] = None + out["edge_index"] = edge_index + out["edge_index_i"] = edge_index[i] + out["edge_index_j"] = edge_index[j] + out["ptr"] = None + + out["index"] = out["edge_index_i"] + out["size"] = size + out["size_i"] = size[1] or size[0] + out["size_j"] = size[0] or size[1] + out["dim_size"] = out["size_i"] + + return out + + def __check_input__(self, edge_index, size): + the_size: List[Optional[int]] = [None, None] + + if isinstance(edge_index, Tensor): + assert edge_index.dtype == paddle.int64 + assert edge_index.dim() == 2 + assert edge_index.shape[0] == 2 + if size is not None: + the_size[0] = size[0] + the_size[1] = size[1] + return the_size + + raise ValueError( + ( + "`MessagePassing.propagate` only supports `int` of " + "shape `[2, num_messages]` for " + "argument `edge_index`." + ) + ) + + def propagate(self, edge_index: Tensor, size: Size = None, **kwargs): + r"""The initial call to start propagating messages. + + Args: + edge_index (Tensor): A :obj:`int` or a + :obj:`edge_index` holds the indices of a general (sparse) + assignment matrix of shape :obj:`[N, M]`. + If :obj:`edge_index` is of type :obj:`int`, its + shape must be defined as :obj:`[2, num_messages]`, where + messages from nodes in :obj:`edge_index[0]` are sent to + nodes in :obj:`edge_index[1]` + (in case :obj:`flow="source_to_target"`). + If :obj:`edge_index` is of type + :obj:`(row, col)` should relate to :obj:`row = edge_index[1]` + and :obj:`col = edge_index[0]`. + The major difference between both formats is that we need to + input the *transposed* sparse adjacency matrix into + :func:`propagate`. + size (tuple, optional): The size :obj:`(N, M)` of the assignment + matrix in case :obj:`edge_index` is a :obj:`LongTensor`. + If set to :obj:`None`, the size will be automatically inferred + and assumed to be quadratic. + **kwargs: Any additional data which is needed to construct and + aggregate messages, and to update node embeddings. + """ + decomposed_layers = 1 if self.__explain__ else self.decomposed_layers + + for hook in self._propagate_forward_pre_hooks.values(): + res = hook(self, (edge_index, size, kwargs)) + if res is not None: + edge_index, size, kwargs = res + + size = self.__check_input__(edge_index, size) + + if isinstance(edge_index, Tensor) or not self.fuse: + if decomposed_layers > 1: + user_args = self.__user_args__ + decomp_args = {a[:-2] for a in user_args if a[-2:] == "_j"} + decomp_kwargs = { + a: kwargs[a].chunk(decomposed_layers, -1) for a in decomp_args + } + decomp_out = [] + + for i in range(decomposed_layers): + if decomposed_layers > 1: + for arg in decomp_args: + kwargs[arg] = decomp_kwargs[arg][i] + + coll_dict = self.__collect__( + self.__user_args__, edge_index, size, kwargs + ) + + msg_kwargs = self.inspector.distribute("message", coll_dict) + for hook in self._message_forward_pre_hooks.values(): + res = hook(self, (msg_kwargs,)) + if res is not None: + msg_kwargs = res[0] if isinstance(res, tuple) else res + out = self.message(**msg_kwargs) + for hook in self._message_forward_hooks.values(): + res = hook(self, (msg_kwargs,), out) + if res is not None: + out = res + + # For `GNNExplainer`, we require a separate message and + # aggregate procedure since this allows us to inject the + # `edge_mask` into the message passing computation scheme. + if self.__explain__: + edge_mask = self.__edge_mask__.sigmoid() + # Some ops add self-loops to `edge_index`. We need to do + # the same for `edge_mask` (but do not train those). + if out.size(self.node_dim) != edge_mask.size(0): + edge_mask = edge_mask[self.__loop_mask__] + loop = edge_mask.new_ones(size[0]) + edge_mask = paddle.concat([edge_mask, loop], dim=0) + assert out.size(self.node_dim) == edge_mask.size(0) + out = out * edge_mask.view([-1] + [1] * (out.dim() - 1)) + + aggr_kwargs = self.inspector.distribute("aggregate", coll_dict) + for hook in self._aggregate_forward_pre_hooks.values(): + res = hook(self, (aggr_kwargs,)) + if res is not None: + aggr_kwargs = res[0] if isinstance(res, tuple) else res + + out = self.aggregate(out, **aggr_kwargs) + for hook in self._aggregate_forward_hooks.values(): + res = hook(self, (aggr_kwargs,), out) + if res is not None: + out = res + + update_kwargs = self.inspector.distribute("update", coll_dict) + out = self.update(out, **update_kwargs) + + if decomposed_layers > 1: + decomp_out.append(out) + + if decomposed_layers > 1: + out = paddle.concat(decomp_out, dim=-1) + + for hook in self._propagate_forward_hooks.values(): + res = hook(self, (edge_index, size, kwargs), out) + if res is not None: + out = res + + return out + + def message(self, x_j: Tensor) -> Tensor: + r"""Constructs messages from node :math:`j` to node :math:`i` + in analogy to :math:`\phi_{\mathbf{\Theta}}` for each edge in + :obj:`edge_index`. + This function can take any argument as input which was initially + passed to :meth:`propagate`. + Furthermore, tensors passed to :meth:`propagate` can be mapped to the + respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or + :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. + """ + return x_j + + def aggregate( + self, + inputs: Tensor, + index: Tensor, + ptr: Optional[Tensor] = None, + dim_size: Optional[int] = None, + ) -> Tensor: + r"""Aggregates messages from neighbors as + :math:`\square_{j \in \mathcal{N}(i)}`. + + Takes in the output of message computation as first argument and any + argument which was initially passed to :meth:`propagate`. + + By default, this function will delegate its call to scatter functions + that support "add", "mean" and "max" operations as specified in + :meth:`__init__` by the :obj:`aggr` argument. + """ + return scatter( + inputs, index, dim=self.node_dim, dim_size=dim_size, reduce=self.aggr + ) + + def update(self, inputs: Tensor) -> Tensor: + r"""Updates node embeddings in analogy to + :math:`\gamma_{\mathbf{\Theta}}` for each node + :math:`i \in \mathcal{V}`. + Takes in the output of aggregation as first argument and any argument + which was initially passed to :meth:`propagate`. + """ + return inputs + + def edge_update(self) -> Tensor: + r"""Computes or updates features for each edge in the graph. + This function can take any argument as input which was initially passed + to :meth:`edge_updater`. + Furthermore, tensors passed to :meth:`edge_updater` can be mapped to + the respective nodes :math:`i` and :math:`j` by appending :obj:`_i` or + :obj:`_j` to the variable name, *.e.g.* :obj:`x_i` and :obj:`x_j`. + """ + raise NotImplementedError + + def message_and_aggregate(self, adj_t) -> Tensor: + r"""Fuses computations of :func:`message` and :func:`aggregate` into a + single function. + If applicable, this saves both time and memory since messages do not + explicitly need to be materialized. + This function will only gets called in case it is implemented and + propagation takes place based on a :obj:`torch_sparse.SparseTensor`. + """ + raise NotImplementedError diff --git a/jointContribution/graphGalerkin/utils/scatter.py b/jointContribution/graphGalerkin/utils/scatter.py index eebea65be9..50173e465e 100644 --- a/jointContribution/graphGalerkin/utils/scatter.py +++ b/jointContribution/graphGalerkin/utils/scatter.py @@ -1,129 +1,155 @@ -from typing import Optional, Tuple - -import paddle - -def broadcast(src: paddle.Tensor, other: paddle.Tensor, dim: int): - if dim < 0: - dim = other.dim() + dim - if src.dim() == 1: - for _ in range(0, dim): - src = src.unsqueeze(0) - for _ in range(src.dim(), other.dim()): - src = src.unsqueeze(-1) - src = src.expand_as(other) - return src - -def scatter_add_(dim, index, src, x): - if x.dim()==1: - output = paddle.scatter_nd_add(x.unsqueeze(-1), index.unsqueeze(-1), src.unsqueeze(-1)).squeeze(-1) - else: - i, j = index.shape - grid_x , grid_y = paddle.meshgrid(paddle.arange(i), paddle.arange(j)) - index = paddle.stack([index.flatten(), grid_y.flatten()], axis=1) - updates_index = paddle.stack([grid_x.flatten(), grid_y.flatten()], axis=1) - updates = paddle.gather_nd(src, index=updates_index) - output = paddle.scatter_nd_add(x, index, updates) - return output - -def scatter_sum(src: paddle.Tensor, index: paddle.Tensor, dim: int = -1, - out: Optional[paddle.Tensor] = None, - dim_size: Optional[int] = None) -> paddle.Tensor: - index = broadcast(index, src, dim) - if out is None: - size = list(src.shape) - if dim_size is not None: - size[dim] = dim_size - elif index.numel() == 0: - size[dim] = 0 - else: - size[dim] = int(index.max()) + 1 - out = paddle.zeros(size, dtype=src.dtype) - return scatter_add_(0, index, src, out) - else: - return scatter_add_(0, index, src, out) - -def scatter_add(src: paddle.Tensor, index: paddle.Tensor, dim: int = -1, - out: Optional[paddle.Tensor] = None, - dim_size: Optional[int] = None) -> paddle.Tensor: - return scatter_sum(src, index, dim, out, dim_size) - -def scatter_mean(src: paddle.Tensor, index: paddle.Tensor, dim: int = -1, - out: Optional[paddle.Tensor] = None, - dim_size: Optional[int] = None) -> paddle.Tensor: - - out = scatter_sum(src, index, dim, out, dim_size) - dim_size = out.size(dim) - - index_dim = dim - if index_dim < 0: - index_dim = index_dim + src.dim() - if index.dim() <= index_dim: - index_dim = index.dim() - 1 - - ones = paddle.ones(index.size(), dtype=src.dtype, place=src.place) - count = scatter_sum(ones, index, index_dim, None, dim_size) - count.clamp_(1) - count = broadcast(count, out, dim) - if paddle.is_floating_point(out): - out.true_divide_(count) - else: - out.floor_divide_(count) - return out - -def scatter(src: paddle.Tensor, index: paddle.Tensor, dim: int = -1, - out: Optional[paddle.Tensor] = None, dim_size: Optional[int] = None, - reduce: str = "sum") -> paddle.Tensor: - r""" - Reduces all values from the :attr:`src` tensor into :attr:`out` at the - indices specified in the :attr:`index` tensor along a given axis - :attr:`dim`. - For each value in :attr:`src`, its output index is specified by its index - in :attr:`src` for dimensions outside of :attr:`dim` and by the - corresponding value in :attr:`index` for dimension :attr:`dim`. - The applied reduction is defined via the :attr:`reduce` argument. - - Formally, if :attr:`src` and :attr:`index` are :math:`n`-dimensional - tensors with size :math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})` - and :attr:`dim` = `i`, then :attr:`out` must be an :math:`n`-dimensional - tensor with size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`. - Moreover, the values of :attr:`index` must be between :math:`0` and - :math:`y - 1` in ascending order. - The :attr:`index` tensor supports broadcasting in case its dimensions do - not match with :attr:`src`. - - For one-dimensional tensors with :obj:`reduce="sum"`, the operation - computes - - .. math:: - \mathrm{out}_i = \mathrm{out}_i + \sum_j~\mathrm{src}_j - - where :math:`\sum_j` is over :math:`j` such that - :math:`\mathrm{index}_j = i`. - - .. note:: - - This operation is implemented via atomic operations on the GPU and is - therefore **non-deterministic** since the order of parallel operations - to the same value is undetermined. - For floating-point variables, this results in a source of variance in - the result. - - :param src: The source tensor. - :param index: The indices of elements to scatter. - :param dim: The axis along which to index. (default: :obj:`-1`) - :param out: The destination tensor. - :param dim_size: If :attr:`out` is not given, automatically create output - with size :attr:`dim_size` at dimension :attr:`dim`. - If :attr:`dim_size` is not given, a minimal sized output tensor - according to :obj:`index.max() + 1` is returned. - :param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mul"`, - :obj:`"mean"`, :obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`) - - :rtype: :class:`Tensor` - """ - if reduce == 'sum' or reduce == 'add': - return scatter_sum(src, index, dim, out, dim_size) - elif reduce == 'mean': - return scatter_mean(src, index, dim, out, dim_size) - else: - raise ValueError +from typing import Optional +from typing import Tuple + +import paddle + + +def broadcast(src: paddle.Tensor, other: paddle.Tensor, dim: int): + if dim < 0: + dim = other.dim() + dim + if src.dim() == 1: + for _ in range(0, dim): + src = src.unsqueeze(0) + for _ in range(src.dim(), other.dim()): + src = src.unsqueeze(-1) + src = src.expand_as(other) + return src + + +def scatter_add_(dim, index, src, x): + if x.dim() == 1: + output = paddle.scatter_nd_add( + x.unsqueeze(-1), index.unsqueeze(-1), src.unsqueeze(-1) + ).squeeze(-1) + else: + i, j = index.shape + grid_x, grid_y = paddle.meshgrid(paddle.arange(i), paddle.arange(j)) + index = paddle.stack([index.flatten(), grid_y.flatten()], axis=1) + updates_index = paddle.stack([grid_x.flatten(), grid_y.flatten()], axis=1) + updates = paddle.gather_nd(src, index=updates_index) + output = paddle.scatter_nd_add(x, index, updates) + return output + + +def scatter_sum( + src: paddle.Tensor, + index: paddle.Tensor, + dim: int = -1, + out: Optional[paddle.Tensor] = None, + dim_size: Optional[int] = None, +) -> paddle.Tensor: + index = broadcast(index, src, dim) + if out is None: + size = list(src.shape) + if dim_size is not None: + size[dim] = dim_size + elif index.numel() == 0: + size[dim] = 0 + else: + size[dim] = int(index.max()) + 1 + out = paddle.zeros(size, dtype=src.dtype) + return scatter_add_(0, index, src, out) + else: + return scatter_add_(0, index, src, out) + + +def scatter_add( + src: paddle.Tensor, + index: paddle.Tensor, + dim: int = -1, + out: Optional[paddle.Tensor] = None, + dim_size: Optional[int] = None, +) -> paddle.Tensor: + return scatter_sum(src, index, dim, out, dim_size) + + +def scatter_mean( + src: paddle.Tensor, + index: paddle.Tensor, + dim: int = -1, + out: Optional[paddle.Tensor] = None, + dim_size: Optional[int] = None, +) -> paddle.Tensor: + + out = scatter_sum(src, index, dim, out, dim_size) + dim_size = out.size(dim) + + index_dim = dim + if index_dim < 0: + index_dim = index_dim + src.dim() + if index.dim() <= index_dim: + index_dim = index.dim() - 1 + + ones = paddle.ones(index.size(), dtype=src.dtype, place=src.place) + count = scatter_sum(ones, index, index_dim, None, dim_size) + count.clamp_(1) + count = broadcast(count, out, dim) + if paddle.is_floating_point(out): + out.true_divide_(count) + else: + out.floor_divide_(count) + return out + + +def scatter( + src: paddle.Tensor, + index: paddle.Tensor, + dim: int = -1, + out: Optional[paddle.Tensor] = None, + dim_size: Optional[int] = None, + reduce: str = "sum", +) -> paddle.Tensor: + r""" + Reduces all values from the :attr:`src` tensor into :attr:`out` at the + indices specified in the :attr:`index` tensor along a given axis + :attr:`dim`. + For each value in :attr:`src`, its output index is specified by its index + in :attr:`src` for dimensions outside of :attr:`dim` and by the + corresponding value in :attr:`index` for dimension :attr:`dim`. + The applied reduction is defined via the :attr:`reduce` argument. + + Formally, if :attr:`src` and :attr:`index` are :math:`n`-dimensional + tensors with size :math:`(x_0, ..., x_{i-1}, x_i, x_{i+1}, ..., x_{n-1})` + and :attr:`dim` = `i`, then :attr:`out` must be an :math:`n`-dimensional + tensor with size :math:`(x_0, ..., x_{i-1}, y, x_{i+1}, ..., x_{n-1})`. + Moreover, the values of :attr:`index` must be between :math:`0` and + :math:`y - 1` in ascending order. + The :attr:`index` tensor supports broadcasting in case its dimensions do + not match with :attr:`src`. + + For one-dimensional tensors with :obj:`reduce="sum"`, the operation + computes + + .. math:: + \mathrm{out}_i = \mathrm{out}_i + \sum_j~\mathrm{src}_j + + where :math:`\sum_j` is over :math:`j` such that + :math:`\mathrm{index}_j = i`. + + .. note:: + + This operation is implemented via atomic operations on the GPU and is + therefore **non-deterministic** since the order of parallel operations + to the same value is undetermined. + For floating-point variables, this results in a source of variance in + the result. + + :param src: The source tensor. + :param index: The indices of elements to scatter. + :param dim: The axis along which to index. (default: :obj:`-1`) + :param out: The destination tensor. + :param dim_size: If :attr:`out` is not given, automatically create output + with size :attr:`dim_size` at dimension :attr:`dim`. + If :attr:`dim_size` is not given, a minimal sized output tensor + according to :obj:`index.max() + 1` is returned. + :param reduce: The reduce operation (:obj:`"sum"`, :obj:`"mul"`, + :obj:`"mean"`, :obj:`"min"` or :obj:`"max"`). (default: :obj:`"sum"`) + + :rtype: :class:`Tensor` + """ + if reduce == "sum" or reduce == "add": + return scatter_sum(src, index, dim, out, dim_size) + elif reduce == "mean": + return scatter_mean(src, index, dim, out, dim_size) + else: + raise ValueError diff --git a/jointContribution/graphGalerkin/utils/utils.py b/jointContribution/graphGalerkin/utils/utils.py index c495408bd6..14f8844376 100644 --- a/jointContribution/graphGalerkin/utils/utils.py +++ b/jointContribution/graphGalerkin/utils/utils.py @@ -1,236 +1,261 @@ -from paddle import Tensor -from typing import Optional, Tuple, Union -import paddle - -from scatter import scatter, scatter_add -OptTensor = Optional[Tensor] - -import pgl - -class Data(): - def __init__(self, x, y, edge_index): - self.y = y - self.x = x - self.edge_index = edge_index - def __call__(self): - return pgl.Graph(edges=self.edge_index, - num_nodes=self.x.shape[0], - node_feat=self.x) - -def maybe_num_nodes(edge_index, num_nodes=None): - if num_nodes is not None: - return num_nodes - elif isinstance(edge_index, Tensor): - return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 - else: - return max(edge_index.size(0), edge_index.size(1)) - -def remove_self_loops(edge_index: Tensor, - edge_attr: OptTensor = None) -> Tuple[Tensor, OptTensor]: - r"""Removes every self-loop in the graph given by :attr:`edge_index`, so - that :math:`(i,i) \not\in \mathcal{E}` for every :math:`i \in \mathcal{V}`. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional - edge features. (default: :obj:`None`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - """ - mask = edge_index[0] != edge_index[1] - for _ in range(edge_index.dim()): - edge_index[_] = paddle.masked_select(edge_index[_], mask) - if edge_attr is None: - return edge_index, None - else: - return edge_index, edge_attr[mask] - -def add_self_loops( - edge_index: Tensor, edge_attr: OptTensor = None, - fill_value: Union[float, Tensor, str] = None, - num_nodes: Optional[int] = None) -> Tuple[Tensor, OptTensor]: - r"""Adds a self-loop :math:`(i,i) \in \mathcal{E}` to every node - :math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`. - In case the graph is weighted or has multi-dimensional edge features - (:obj:`edge_attr != None`), edge features of self-loops will be added - according to :obj:`fill_value`. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional edge - features. (default: :obj:`None`) - fill_value (float or Tensor or str, optional): The way to generate - edge features of self-loops (in case :obj:`edge_attr != None`). - If given as :obj:`float` or :class:`paddle.Tensor`, edge features of - self-loops will be directly given by :obj:`fill_value`. - If given as :obj:`str`, edge features of self-loops are computed by - aggregating all features of edges that point to the specific node, - according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, - :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - """ - N = maybe_num_nodes(edge_index, num_nodes) - - loop_index = paddle.arange(0, N, dtype=paddle.int64) - loop_index = paddle.tile(loop_index.unsqueeze(0), repeat_times=[2, 1]) - if edge_attr is not None: - if fill_value is None: - loop_attr = edge_attr.new_full((N, ) + edge_attr.size()[1:], 1.) - - elif isinstance(fill_value, (int, float)): - loop_attr = paddle.full((N, ), fill_value, dtype=edge_attr.dtype) - elif isinstance(fill_value, Tensor): - loop_attr = fill_value.to(edge_attr.device, edge_attr.dtype) - if edge_attr.dim() != loop_attr.dim(): - loop_attr = loop_attr.unsqueeze(0) - sizes = [N] + [1] * (loop_attr.dim() - 1) - loop_attr = loop_attr.repeat(*sizes) - - elif isinstance(fill_value, str): - loop_attr = scatter(edge_attr, edge_index[1], dim=0, dim_size=N, - reduce=fill_value) - else: - raise AttributeError("No valid 'fill_value' provided") - - edge_attr = paddle.concat([edge_attr, loop_attr], axis=0) - - edge_index = paddle.concat([edge_index, loop_index], axis=1) - return edge_index, edge_attr - -def get_laplacian(edge_index, edge_weight: Optional[paddle.Tensor] = None, - normalization: Optional[str] = None, - dtype: Optional[int] = None, - num_nodes: Optional[int] = None): - r""" Computes the graph Laplacian of the graph given by :obj:`edge_index` - and optional :obj:`edge_weight`. - - Args: - edge_index (LongTensor): The edge indices. - edge_weight (Tensor, optional): One-dimensional edge weights. - (default: :obj:`None`) - normalization (str, optional): The normalization scheme for the graph - Laplacian (default: :obj:`None`): - - 1. :obj:`None`: No normalization - :math:`\mathbf{L} = \mathbf{D} - \mathbf{A}` - - 2. :obj:`"sym"`: Symmetric normalization - :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A} - \mathbf{D}^{-1/2}` - - 3. :obj:`"rw"`: Random-walk normalization - :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}` - dtype (paddle.dtype, optional): The desired data type of returned tensor - in case :obj:`edge_weight=None`. (default: :obj:`None`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) - """ - - if normalization is not None: - assert normalization in ['sym', 'rw'] # 'Invalid normalization' - - edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) - - if edge_weight is None: - edge_weight = paddle.ones(edge_index.shape[1], dtype=dtype) - - num_nodes = maybe_num_nodes(edge_index, num_nodes) - row, col = edge_index[0], edge_index[1] - deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes) - if normalization is None: - # L = D - A. - edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) - edge_weight = paddle.concat([-edge_weight, deg], dim=0) - elif normalization == 'sym': - # Compute A_norm = -D^{-1/2} A D^{-1/2}. - deg_inv_sqrt = deg.pow(-0.5) - deg_inv_sqrt = masked_fill(deg_inv_sqrt,deg_inv_sqrt == float('inf'), 0) - edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col] - # L = I - A_norm. - edge_index, tmp = add_self_loops(edge_index, -edge_weight, - fill_value=1., num_nodes=num_nodes) - assert tmp is not None - edge_weight = tmp - else: - # Compute A_norm = -D^{-1} A. - deg_inv = 1.0 / deg - deg_inv.masked_fill_(deg_inv == float('inf'), 0) - edge_weight = deg_inv[row] * edge_weight - - # L = I - A_norm. - edge_index, tmp = add_self_loops(edge_index, -edge_weight, - fill_value=1., num_nodes=num_nodes) - assert tmp is not None - edge_weight = tmp - - return edge_index, edge_weight - -def masked_fill(x, mask, value): - y = paddle.full(x.shape, value, x.dtype) - return paddle.where(mask, y, x) - -def add_remaining_self_loops( - edge_index: Tensor, edge_attr: OptTensor = None, - fill_value: Union[float, Tensor, str] = None, - num_nodes: Optional[int] = None) -> Tuple[Tensor, OptTensor]: - r"""Adds remaining self-loop :math:`(i,i) \in \mathcal{E}` to every node - :math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`. - In case the graph is weighted or has multi-dimensional edge features - (:obj:`edge_attr != None`), edge features of non-existing self-loops will - be added according to :obj:`fill_value`. - - Args: - edge_index (LongTensor): The edge indices. - edge_attr (Tensor, optional): Edge weights or multi-dimensional edge - features. (default: :obj:`None`) - fill_value (float or Tensor or str, optional): The way to generate - edge features of self-loops (in case :obj:`edge_attr != None`). - If given as :obj:`float` or :class:`paddle.Tensor`, edge features of - self-loops will be directly given by :obj:`fill_value`. - If given as :obj:`str`, edge features of self-loops are computed by - aggregating all features of edges that point to the specific node, - according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, - :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`) - num_nodes (int, optional): The number of nodes, *i.e.* - :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) - - :rtype: (:class:`LongTensor`, :class:`Tensor`) - """ - N = maybe_num_nodes(edge_index, num_nodes) - mask = edge_index[0] != edge_index[1] - - loop_index = paddle.arange(0, N, dtype=paddle.int32) - loop_index = paddle.tile(loop_index.unsqueeze(0), repeat_times=[2, 1]) - - if edge_attr is not None: - if fill_value is None: - loop_attr = edge_attr.new_full((N, ) + edge_attr.size()[1:], 1.) - - elif isinstance(fill_value, (int, float)): - loop_attr = paddle.full((N, ), fill_value, dtype=edge_attr.dtype) - elif isinstance(fill_value, Tensor): - loop_attr = fill_value.to(edge_attr.device, edge_attr.dtype) - if edge_attr.dim() != loop_attr.dim(): - loop_attr = loop_attr.unsqueeze(0) - sizes = [N] + [1] * (loop_attr.dim() - 1) - loop_attr = loop_attr.repeat(*sizes) - - elif isinstance(fill_value, str): - loop_attr = scatter(edge_attr, edge_index[1], dim=0, dim_size=N, - reduce=fill_value) - else: - raise AttributeError("No valid 'fill_value' provided") - - inv_mask = ~mask - - edge_attr = paddle.concat([edge_attr, loop_attr], axis=0) - edge_index = paddle.concat([edge_index, loop_index], axis=1) - return edge_index, edge_attr - -def expand_left(src: paddle.Tensor, dim: int, dims: int) -> paddle.Tensor: - for _ in range(dims + dim if dim < 0 else dim): - src = src.unsqueeze(0) - return src \ No newline at end of file +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +from paddle import Tensor +from scatter import scatter +from scatter import scatter_add + +OptTensor = Optional[Tensor] + +import pgl + + +class Data: + def __init__(self, x, y, edge_index): + self.y = y + self.x = x + self.edge_index = edge_index + + def __call__(self): + return pgl.Graph( + edges=self.edge_index, num_nodes=self.x.shape[0], node_feat=self.x + ) + + +def maybe_num_nodes(edge_index, num_nodes=None): + if num_nodes is not None: + return num_nodes + elif isinstance(edge_index, Tensor): + return int(edge_index.max()) + 1 if edge_index.numel() > 0 else 0 + else: + return max(edge_index.size(0), edge_index.size(1)) + + +def remove_self_loops( + edge_index: Tensor, edge_attr: OptTensor = None +) -> Tuple[Tensor, OptTensor]: + r"""Removes every self-loop in the graph given by :attr:`edge_index`, so + that :math:`(i,i) \not\in \mathcal{E}` for every :math:`i \in \mathcal{V}`. + + Args: + edge_index (LongTensor): The edge indices. + edge_attr (Tensor, optional): Edge weights or multi-dimensional + edge features. (default: :obj:`None`) + + :rtype: (:class:`LongTensor`, :class:`Tensor`) + """ + mask = edge_index[0] != edge_index[1] + for _ in range(edge_index.dim()): + edge_index[_] = paddle.masked_select(edge_index[_], mask) + if edge_attr is None: + return edge_index, None + else: + return edge_index, edge_attr[mask] + + +def add_self_loops( + edge_index: Tensor, + edge_attr: OptTensor = None, + fill_value: Union[float, Tensor, str] = None, + num_nodes: Optional[int] = None, +) -> Tuple[Tensor, OptTensor]: + r"""Adds a self-loop :math:`(i,i) \in \mathcal{E}` to every node + :math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`. + In case the graph is weighted or has multi-dimensional edge features + (:obj:`edge_attr != None`), edge features of self-loops will be added + according to :obj:`fill_value`. + + Args: + edge_index (LongTensor): The edge indices. + edge_attr (Tensor, optional): Edge weights or multi-dimensional edge + features. (default: :obj:`None`) + fill_value (float or Tensor or str, optional): The way to generate + edge features of self-loops (in case :obj:`edge_attr != None`). + If given as :obj:`float` or :class:`paddle.Tensor`, edge features of + self-loops will be directly given by :obj:`fill_value`. + If given as :obj:`str`, edge features of self-loops are computed by + aggregating all features of edges that point to the specific node, + according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, + :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`) + num_nodes (int, optional): The number of nodes, *i.e.* + :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) + + :rtype: (:class:`LongTensor`, :class:`Tensor`) + """ + N = maybe_num_nodes(edge_index, num_nodes) + + loop_index = paddle.arange(0, N, dtype=paddle.int64) + loop_index = paddle.tile(loop_index.unsqueeze(0), repeat_times=[2, 1]) + if edge_attr is not None: + if fill_value is None: + loop_attr = edge_attr.new_full((N,) + edge_attr.size()[1:], 1.0) + + elif isinstance(fill_value, (int, float)): + loop_attr = paddle.full((N,), fill_value, dtype=edge_attr.dtype) + elif isinstance(fill_value, Tensor): + loop_attr = fill_value.to(edge_attr.device, edge_attr.dtype) + if edge_attr.dim() != loop_attr.dim(): + loop_attr = loop_attr.unsqueeze(0) + sizes = [N] + [1] * (loop_attr.dim() - 1) + loop_attr = loop_attr.repeat(*sizes) + + elif isinstance(fill_value, str): + loop_attr = scatter( + edge_attr, edge_index[1], dim=0, dim_size=N, reduce=fill_value + ) + else: + raise AttributeError("No valid 'fill_value' provided") + + edge_attr = paddle.concat([edge_attr, loop_attr], axis=0) + + edge_index = paddle.concat([edge_index, loop_index], axis=1) + return edge_index, edge_attr + + +def get_laplacian( + edge_index, + edge_weight: Optional[paddle.Tensor] = None, + normalization: Optional[str] = None, + dtype: Optional[int] = None, + num_nodes: Optional[int] = None, +): + r"""Computes the graph Laplacian of the graph given by :obj:`edge_index` + and optional :obj:`edge_weight`. + + Args: + edge_index (LongTensor): The edge indices. + edge_weight (Tensor, optional): One-dimensional edge weights. + (default: :obj:`None`) + normalization (str, optional): The normalization scheme for the graph + Laplacian (default: :obj:`None`): + + 1. :obj:`None`: No normalization + :math:`\mathbf{L} = \mathbf{D} - \mathbf{A}` + + 2. :obj:`"sym"`: Symmetric normalization + :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1/2} \mathbf{A} + \mathbf{D}^{-1/2}` + + 3. :obj:`"rw"`: Random-walk normalization + :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}` + dtype (paddle.dtype, optional): The desired data type of returned tensor + in case :obj:`edge_weight=None`. (default: :obj:`None`) + num_nodes (int, optional): The number of nodes, *i.e.* + :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) + """ + + if normalization is not None: + assert normalization in ["sym", "rw"] # 'Invalid normalization' + + edge_index, edge_weight = remove_self_loops(edge_index, edge_weight) + + if edge_weight is None: + edge_weight = paddle.ones(edge_index.shape[1], dtype=dtype) + + num_nodes = maybe_num_nodes(edge_index, num_nodes) + row, col = edge_index[0], edge_index[1] + deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes) + if normalization is None: + # L = D - A. + edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes) + edge_weight = paddle.concat([-edge_weight, deg], dim=0) + elif normalization == "sym": + # Compute A_norm = -D^{-1/2} A D^{-1/2}. + deg_inv_sqrt = deg.pow(-0.5) + deg_inv_sqrt = masked_fill(deg_inv_sqrt, deg_inv_sqrt == float("inf"), 0) + edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col] + # L = I - A_norm. + edge_index, tmp = add_self_loops( + edge_index, -edge_weight, fill_value=1.0, num_nodes=num_nodes + ) + assert tmp is not None + edge_weight = tmp + else: + # Compute A_norm = -D^{-1} A. + deg_inv = 1.0 / deg + deg_inv.masked_fill_(deg_inv == float("inf"), 0) + edge_weight = deg_inv[row] * edge_weight + + # L = I - A_norm. + edge_index, tmp = add_self_loops( + edge_index, -edge_weight, fill_value=1.0, num_nodes=num_nodes + ) + assert tmp is not None + edge_weight = tmp + + return edge_index, edge_weight + + +def masked_fill(x, mask, value): + y = paddle.full(x.shape, value, x.dtype) + return paddle.where(mask, y, x) + + +def add_remaining_self_loops( + edge_index: Tensor, + edge_attr: OptTensor = None, + fill_value: Union[float, Tensor, str] = None, + num_nodes: Optional[int] = None, +) -> Tuple[Tensor, OptTensor]: + r"""Adds remaining self-loop :math:`(i,i) \in \mathcal{E}` to every node + :math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`. + In case the graph is weighted or has multi-dimensional edge features + (:obj:`edge_attr != None`), edge features of non-existing self-loops will + be added according to :obj:`fill_value`. + + Args: + edge_index (LongTensor): The edge indices. + edge_attr (Tensor, optional): Edge weights or multi-dimensional edge + features. (default: :obj:`None`) + fill_value (float or Tensor or str, optional): The way to generate + edge features of self-loops (in case :obj:`edge_attr != None`). + If given as :obj:`float` or :class:`paddle.Tensor`, edge features of + self-loops will be directly given by :obj:`fill_value`. + If given as :obj:`str`, edge features of self-loops are computed by + aggregating all features of edges that point to the specific node, + according to a reduce operation. (:obj:`"add"`, :obj:`"mean"`, + :obj:`"min"`, :obj:`"max"`, :obj:`"mul"`). (default: :obj:`1.`) + num_nodes (int, optional): The number of nodes, *i.e.* + :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) + + :rtype: (:class:`LongTensor`, :class:`Tensor`) + """ + N = maybe_num_nodes(edge_index, num_nodes) + mask = edge_index[0] != edge_index[1] + + loop_index = paddle.arange(0, N, dtype=paddle.int32) + loop_index = paddle.tile(loop_index.unsqueeze(0), repeat_times=[2, 1]) + + if edge_attr is not None: + if fill_value is None: + loop_attr = edge_attr.new_full((N,) + edge_attr.size()[1:], 1.0) + + elif isinstance(fill_value, (int, float)): + loop_attr = paddle.full((N,), fill_value, dtype=edge_attr.dtype) + elif isinstance(fill_value, Tensor): + loop_attr = fill_value.to(edge_attr.device, edge_attr.dtype) + if edge_attr.dim() != loop_attr.dim(): + loop_attr = loop_attr.unsqueeze(0) + sizes = [N] + [1] * (loop_attr.dim() - 1) + loop_attr = loop_attr.repeat(*sizes) + + elif isinstance(fill_value, str): + loop_attr = scatter( + edge_attr, edge_index[1], dim=0, dim_size=N, reduce=fill_value + ) + else: + raise AttributeError("No valid 'fill_value' provided") + + inv_mask = ~mask + + edge_attr = paddle.concat([edge_attr, loop_attr], axis=0) + edge_index = paddle.concat([edge_index, loop_index], axis=1) + return edge_index, edge_attr + + +def expand_left(src: paddle.Tensor, dim: int, dims: int) -> paddle.Tensor: + for _ in range(dims + dim if dim < 0 else dim): + src = src.unsqueeze(0) + return src diff --git a/jointContribution/graphcast/args.py b/jointContribution/graphcast/args.py index f508222824..1f75c26c22 100644 --- a/jointContribution/graphcast/args.py +++ b/jointContribution/graphcast/args.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream from dataclasses import dataclass from dataclasses import field @@ -341,3 +342,327 @@ def mesh2grid_edge_emb_dim(self): with open("GraphCast_small.json", "r") as f: args = TrainingArguments(**json.load(f)) print(args) +======= +from dataclasses import dataclass +from dataclasses import field + +import numpy as np + +# https://www.ecmwf.int/en/forecasts/dataset/ecmwf-reanalysis-v5 +PRESSURE_LEVELS_ERA5_37 = ( + 1, + 2, + 3, + 5, + 7, + 10, + 20, + 30, + 50, + 70, + 100, + 125, + 150, + 175, + 200, + 225, + 250, + 300, + 350, + 400, + 450, + 500, + 550, + 600, + 650, + 700, + 750, + 775, + 800, + 825, + 850, + 875, + 900, + 925, + 950, + 975, + 1000, +) + +# https://www.ecmwf.int/en/forecasts/datasets/set-i +PRESSURE_LEVELS_HRES_25 = ( + 1, + 2, + 3, + 5, + 7, + 10, + 20, + 30, + 50, + 70, + 100, + 150, + 200, + 250, + 300, + 400, + 500, + 600, + 700, + 800, + 850, + 900, + 925, + 950, + 1000, +) + +# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2020MS002203 +PRESSURE_LEVELS_WEATHERBENCH_13 = ( + 50, + 100, + 150, + 200, + 250, + 300, + 400, + 500, + 600, + 700, + 850, + 925, + 1000, +) + +PRESSURE_LEVELS = { + 13: PRESSURE_LEVELS_WEATHERBENCH_13, + 25: PRESSURE_LEVELS_HRES_25, + 37: PRESSURE_LEVELS_ERA5_37, +} + +# The list of all possible atmospheric variables. Taken from: +# https://confluence.ecmwf.int/display/CKB/ERA5%3A+data+documentation#ERA5:datadocumentation-Table9 +ALL_ATMOSPHERIC_VARS = ( + "potential_vorticity", + "specific_rain_water_content", + "specific_snow_water_content", + "geopotential", + "temperature", + "u_component_of_wind", + "v_component_of_wind", + "specific_humidity", + "vertical_velocity", + "vorticity", + "divergence", + "relative_humidity", + "ozone_mass_mixing_ratio", + "specific_cloud_liquid_water_content", + "specific_cloud_ice_water_content", + "fraction_of_cloud_cover", +) + +TARGET_SURFACE_VARS = ( + "2m_temperature", + "mean_sea_level_pressure", + "10m_v_component_of_wind", + "10m_u_component_of_wind", + "total_precipitation_6hr", +) +TARGET_SURFACE_NO_PRECIP_VARS = ( + "2m_temperature", + "mean_sea_level_pressure", + "10m_v_component_of_wind", + "10m_u_component_of_wind", +) +TARGET_ATMOSPHERIC_VARS = ( + "temperature", + "geopotential", + "u_component_of_wind", + "v_component_of_wind", + "vertical_velocity", + "specific_humidity", +) +TARGET_ATMOSPHERIC_NO_W_VARS = ( + "temperature", + "geopotential", + "u_component_of_wind", + "v_component_of_wind", + "specific_humidity", +) +EXTERNAL_FORCING_VARS = ("toa_incident_solar_radiation",) +GENERATED_FORCING_VARS = ( + "year_progress_sin", + "year_progress_cos", + "day_progress_sin", + "day_progress_cos", +) +FORCING_VARS = EXTERNAL_FORCING_VARS + GENERATED_FORCING_VARS +STATIC_VARS = ( + "geopotential_at_surface", + "land_sea_mask", +) + +TASK_input_variables = ( + TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_forcing_variables = FORCING_VARS +TASK_pressure_levels = PRESSURE_LEVELS_ERA5_37 +TASK_input_duration = ("12h",) + +TASK_13_input_variables = ( + TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_13_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_13_forcing_variables = FORCING_VARS +TASK_13_pressure_levels = PRESSURE_LEVELS_WEATHERBENCH_13 +TASK_13_input_duration = ("12h",) + + +TASK_13_PRECIP_OUT_input_variables = ( + TARGET_SURFACE_NO_PRECIP_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_13_PRECIP_OUT_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_13_PRECIP_OUT_forcing_variables = FORCING_VARS +TASK_13_PRECIP_OUT_pressure_levels = PRESSURE_LEVELS_WEATHERBENCH_13 +TASK_13_PRECIP_OUT_input_duration = ("12h",) + + +@dataclass +class TrainingArguments: + + data_path: str = field( + default="data/dataset/source-era5_date-2022-01-01_res-0.25_levels-37_steps-01.nc", + metadata={"help": "data_path."}, + ) + param_path: str = field( + default="data/params/GraphCast---ERA5-1979-2017---resolution-0.25---pressure-levels-37---mesh-2to6---precipitation-input-and-output.pdparams", + metadata={"help": "param_path."}, + ) + stddev_path: str = field( + default="data/stats/stddev_by_level.nc", + metadata={"help": "stddev_path."}, + ) + stddev_diffs_path: str = field( + default="data/stats/diffs_stddev_by_level.nc", + metadata={"help": "stddev_diffs_path."}, + ) + mean_path: str = field( + default="data/stats/mean_by_level.nc", + metadata={"help": "mean_path."}, + ) + type: str = field( + default="graphcast", + metadata={"help": "type."}, + ) + level: int = field( + default=37, + metadata={"help": "level."}, + ) + latent_size: int = field( + default=512, + metadata={"help": "latent_size."}, + ) + hidden_layers: int = field( + default=1, + metadata={"help": "hidden_layers."}, + ) + gnn_msg_steps: int = field( + default=16, + metadata={"help": "gnn_msg_steps."}, + ) + mesh_size: int = field( + default=6, + metadata={"help": "mesh_size."}, + ) + resolution: float = field( + default=0.25, + metadata={"help": "resolution. {0.25, 1.0}"}, + ) + radius_query_fraction_edge_length: float = field( + default=0.6, + metadata={"help": "radius_query_fraction_edge_length."}, + ) + mesh2grid_edge_normalization_factor: float = field( + default=2 / (1 + np.sqrt(5)), + metadata={"help": "mesh2grid_edge_normalization_factor. 1 / phi"}, + ) + + # 输入数据 + mesh_node_dim: int = field( + default=474, + metadata={"help": "mesh_node_dim."}, + ) + grid_node_dim: int = field( + default=474, + metadata={"help": "grid_node_dim."}, + ) + mesh_edge_dim: int = field( + default=4, + metadata={"help": "mesh_edge_dim."}, + ) + grid2mesh_edge_dim: int = field( + default=4, + metadata={"help": "grid2mesh_edge_dim."}, + ) + mesh2grid_edge_dim: int = field( + default=4, + metadata={"help": "mesh2grid_edge_dim."}, + ) + + # 测试数据 + mesh_node_num: int = field( + default=2562, + metadata={"help": "mesh_node_num."}, + ) + grid_node_num: int = field( + default=32768, + metadata={"help": "grid_node_num."}, + ) + mesh_edge_num: int = field( + default=20460, + metadata={"help": "mesh_edge_num."}, + ) + mesh2grid_edge_num: int = field( + default=98304, + metadata={"help": "mesh2grid_edge_num."}, + ) + grid2mesh_edge_num: int = field( + default=50184, + metadata={"help": "grid2mesh_edge_num."}, + ) + # 输出结果 + node_output_dim: int = field( + default=227, + metadata={"help": "node_output_dim."}, + ) + + @property + def grid_node_emb_dim(self): + return self.latent_size + + @property + def mesh_node_emb_dim(self): + return self.latent_size + + @property + def mesh_edge_emb_dim(self): + return self.latent_size + + @property + def grid2mesh_edge_emb_dim(self): + return self.latent_size + + @property + def mesh2grid_edge_emb_dim(self): + return self.latent_size + + +if __name__ == "__main__": + import json + + with open("GraphCast_small.json", "r") as f: + args = TrainingArguments(**json.load(f)) + print(args) +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/config/GraphCast.json b/jointContribution/graphcast/config/GraphCast.json index 28dba9c4ca..0e8e2967d0 100644 --- a/jointContribution/graphcast/config/GraphCast.json +++ b/jointContribution/graphcast/config/GraphCast.json @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream { "data_path": "data/dataset/source-era5_date-2022-01-01_res-0.25_levels-37_steps-01.nc", "gnn_msg_steps": 16, @@ -28,3 +29,32 @@ "target_lead_times": "6h", "type": "graphcast" } +======= +{ + "data_path": "data/dataset/source-era5_date-2022-01-01_res-0.25_levels-37_steps-01.nc", + "gnn_msg_steps": 16, + "grid2mesh_edge_dim": 4, + "grid2mesh_edge_num": 1618818, + "grid_node_dim": 474, + "grid_node_num": 1038240, + "hidden_layers": 1, + "latent_size": 512, + "level": 37, + "mean_path": "data/stats/mean_by_level.nc", + "mesh2grid_edge_dim": 4, + "mesh2grid_edge_normalization_factor": 0.6180338738074472, + "mesh2grid_edge_num": 3114720, + "mesh_edge_dim": 4, + "mesh_edge_num": 327660, + "mesh_node_dim": 474, + "mesh_node_num": 40962, + "mesh_size": 6, + "node_output_dim": 227, + "param_path": "data/params/GraphCast---ERA5-1979-2017---resolution-0.25---pressure-levels-37---mesh-2to6---precipitation-input-and-output.pdparams", + "radius_query_fraction_edge_length": 0.6, + "resolution": 0.25, + "stddev_diffs_path": "data/stats/diffs_stddev_by_level.nc", + "stddev_path": "data/stats/stddev_by_level.nc", + "type": "graphcast" +} +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/config/GraphCast_operational.json b/jointContribution/graphcast/config/GraphCast_operational.json index 4078f53021..5c5db67398 100644 --- a/jointContribution/graphcast/config/GraphCast_operational.json +++ b/jointContribution/graphcast/config/GraphCast_operational.json @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream { "data_path": "data/dataset/source-hres_date-2022-01-01_res-0.25_levels-13_steps-01.nc", "gnn_msg_steps": 16, @@ -28,3 +29,32 @@ "target_lead_times": "6h", "type": "graphcast_operational" } +======= +{ + "data_path": "data/dataset/source-hres_date-2022-01-01_res-0.25_levels-13_steps-01.nc", + "gnn_msg_steps": 16, + "grid2mesh_edge_dim": 4, + "grid2mesh_edge_num": 1618818, + "grid_node_dim": 184, + "grid_node_num": 1038240, + "hidden_layers": 1, + "latent_size": 512, + "level": 13, + "mean_path": "data/stats/mean_by_level.nc", + "mesh2grid_edge_dim": 4, + "mesh2grid_edge_normalization_factor": 0.6180338738074472, + "mesh2grid_edge_num": 3114720, + "mesh_edge_dim": 4, + "mesh_edge_num": 327660, + "mesh_node_dim": 184, + "mesh_node_num": 40962, + "mesh_size": 6, + "node_output_dim": 83, + "param_path": "data/params/GraphCast_operational---ERA5-HRES-1979-2021---resolution-0.25---pressure-levels-13---mesh-2to6---precipitation-output-only.pdparams", + "radius_query_fraction_edge_length": 0.6, + "resolution": 0.25, + "stddev_diffs_path": "data/stats/diffs_stddev_by_level.nc", + "stddev_path": "data/stats/stddev_by_level.nc", + "type": "graphcast_operational" +} +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/config/GraphCast_small.json b/jointContribution/graphcast/config/GraphCast_small.json index 3dc6edcb61..975cc9a355 100644 --- a/jointContribution/graphcast/config/GraphCast_small.json +++ b/jointContribution/graphcast/config/GraphCast_small.json @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream { "data_path": "data/dataset/source-era5_date-2022-01-01_res-1.0_levels-13_steps-01.nc", "gnn_msg_steps": 16, @@ -28,3 +29,32 @@ "target_lead_times": "6h", "type": "graphcast_small" } +======= +{ + "data_path": "data/dataset/source-era5_date-2022-01-01_res-1.0_levels-13_steps-01.nc", + "gnn_msg_steps": 16, + "grid2mesh_edge_dim": 4, + "grid2mesh_edge_num": 101892, + "grid_node_dim": 186, + "grid_node_num": 65160, + "hidden_layers": 1, + "latent_size": 512, + "level": 13, + "mean_path": "data/stats/mean_by_level.nc", + "mesh2grid_edge_dim": 4, + "mesh2grid_edge_normalization_factor": 0.6180338738074472, + "mesh2grid_edge_num": 195480, + "mesh_edge_dim": 4, + "mesh_edge_num": 81900, + "mesh_node_dim": 186, + "mesh_node_num": 10242, + "mesh_size": 5, + "node_output_dim": 83, + "param_path": "data/params/GraphCast_small---ERA5-1979-2015---resolution-1.0---pressure-levels-13---mesh-2to5---precipitation-input-and-output.pdparams", + "radius_query_fraction_edge_length": 0.6, + "resolution": 1.0, + "stddev_diffs_path": "data/stats/diffs_stddev_by_level.nc", + "stddev_path": "data/stats/stddev_by_level.nc", + "type": "graphcast_small" +} +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/data/graphcast-jax2paddle.csv b/jointContribution/graphcast/data/graphcast-jax2paddle.csv index 064b273cd3..981879d9d4 100644 --- a/jointContribution/graphcast/data/graphcast-jax2paddle.csv +++ b/jointContribution/graphcast/data/graphcast-jax2paddle.csv @@ -1,262 +1,262 @@ -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_layer_norm:offset,graphcast.encoder.embedding.grid2mesh_edge_embedding.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_layer_norm:scale,graphcast.encoder.embedding.grid2mesh_edge_embedding.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_0:b,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_0:w,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_1:b,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_1:w,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.2.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_layer_norm:offset,graphcast.encoder.embedding.grid_node_embedding.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_layer_norm:scale,graphcast.encoder.embedding.grid_node_embedding.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_0:b,graphcast.encoder.embedding.grid_node_embedding.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_0:w,graphcast.encoder.embedding.grid_node_embedding.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_1:b,graphcast.encoder.embedding.grid_node_embedding.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_1:w,graphcast.encoder.embedding.grid_node_embedding.mlp.2.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_layer_norm:offset,graphcast.encoder.embedding.mesh_node_embedding.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_layer_norm:scale,graphcast.encoder.embedding.mesh_node_embedding.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh_node_embedding.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh_node_embedding.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh_node_embedding.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh_node_embedding.mlp.2.weight -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.2.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.2.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.layer_norm.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.layer_norm.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.0.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.0.weight -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.2.bias -params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.2.weight -params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_0:b,graphcast.decoder.grid_node_layer.mlp.0.bias -params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_0:w,graphcast.decoder.grid_node_layer.mlp.0.weight -params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_1:b,graphcast.decoder.grid_node_layer.mlp.2.bias -params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_1:w,graphcast.decoder.grid_node_layer.mlp.2.weight -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_layer_norm:offset,graphcast.encoder.embedding.mesh2grid_edge_embedding.layer_norm.bias -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_layer_norm:scale,graphcast.encoder.embedding.mesh2grid_edge_embedding.layer_norm.weight -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.0.bias -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.0.weight -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.2.bias -params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.2.weight -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.layer_norm.bias -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.layer_norm.weight -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.0.bias -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.0.weight -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.2.bias -params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.2.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.layer_norm.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.layer_norm.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.0.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.0.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.2.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.2.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.layer_norm.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.layer_norm.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.0.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.0.weight -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.2.bias -params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.2.weight -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_layer_norm:offset,graphcast.encoder.embedding.mesh_edge_embedding.layer_norm.bias -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_layer_norm:scale,graphcast.encoder.embedding.mesh_edge_embedding.layer_norm.weight -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh_edge_embedding.mlp.0.bias -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh_edge_embedding.mlp.0.weight -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh_edge_embedding.mlp.2.bias -params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh_edge_embedding.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_layer_norm:offset,graphcast.processor.processor.0.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_layer_norm:scale,graphcast.processor.processor.0.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_0:b,graphcast.processor.processor.0.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_0:w,graphcast.processor.processor.0.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_1:b,graphcast.processor.processor.0.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_1:w,graphcast.processor.processor.0.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_layer_norm:offset,graphcast.processor.processor.1.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_layer_norm:scale,graphcast.processor.processor.1.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_0:b,graphcast.processor.processor.1.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_0:w,graphcast.processor.processor.1.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_1:b,graphcast.processor.processor.1.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_1:w,graphcast.processor.processor.1.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_layer_norm:offset,graphcast.processor.processor.2.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_layer_norm:scale,graphcast.processor.processor.2.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_0:b,graphcast.processor.processor.2.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_0:w,graphcast.processor.processor.2.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_1:b,graphcast.processor.processor.2.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_1:w,graphcast.processor.processor.2.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_layer_norm:offset,graphcast.processor.processor.3.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_layer_norm:scale,graphcast.processor.processor.3.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_0:b,graphcast.processor.processor.3.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_0:w,graphcast.processor.processor.3.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_1:b,graphcast.processor.processor.3.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_1:w,graphcast.processor.processor.3.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_layer_norm:offset,graphcast.processor.processor.4.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_layer_norm:scale,graphcast.processor.processor.4.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_0:b,graphcast.processor.processor.4.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_0:w,graphcast.processor.processor.4.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_1:b,graphcast.processor.processor.4.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_1:w,graphcast.processor.processor.4.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_layer_norm:offset,graphcast.processor.processor.5.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_layer_norm:scale,graphcast.processor.processor.5.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_0:b,graphcast.processor.processor.5.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_0:w,graphcast.processor.processor.5.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_1:b,graphcast.processor.processor.5.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_1:w,graphcast.processor.processor.5.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_layer_norm:offset,graphcast.processor.processor.6.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_layer_norm:scale,graphcast.processor.processor.6.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_0:b,graphcast.processor.processor.6.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_0:w,graphcast.processor.processor.6.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_1:b,graphcast.processor.processor.6.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_1:w,graphcast.processor.processor.6.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_layer_norm:offset,graphcast.processor.processor.7.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_layer_norm:scale,graphcast.processor.processor.7.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_0:b,graphcast.processor.processor.7.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_0:w,graphcast.processor.processor.7.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_1:b,graphcast.processor.processor.7.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_1:w,graphcast.processor.processor.7.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_layer_norm:offset,graphcast.processor.processor.8.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_layer_norm:scale,graphcast.processor.processor.8.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_0:b,graphcast.processor.processor.8.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_0:w,graphcast.processor.processor.8.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_1:b,graphcast.processor.processor.8.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_1:w,graphcast.processor.processor.8.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_layer_norm:offset,graphcast.processor.processor.9.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_layer_norm:scale,graphcast.processor.processor.9.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_0:b,graphcast.processor.processor.9.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_0:w,graphcast.processor.processor.9.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_1:b,graphcast.processor.processor.9.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_1:w,graphcast.processor.processor.9.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_layer_norm:offset,graphcast.processor.processor.10.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_layer_norm:scale,graphcast.processor.processor.10.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_0:b,graphcast.processor.processor.10.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_0:w,graphcast.processor.processor.10.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_1:b,graphcast.processor.processor.10.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_1:w,graphcast.processor.processor.10.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_layer_norm:offset,graphcast.processor.processor.11.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_layer_norm:scale,graphcast.processor.processor.11.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_0:b,graphcast.processor.processor.11.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_0:w,graphcast.processor.processor.11.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_1:b,graphcast.processor.processor.11.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_1:w,graphcast.processor.processor.11.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_layer_norm:offset,graphcast.processor.processor.12.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_layer_norm:scale,graphcast.processor.processor.12.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_0:b,graphcast.processor.processor.12.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_0:w,graphcast.processor.processor.12.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_1:b,graphcast.processor.processor.12.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_1:w,graphcast.processor.processor.12.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_layer_norm:offset,graphcast.processor.processor.13.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_layer_norm:scale,graphcast.processor.processor.13.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_0:b,graphcast.processor.processor.13.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_0:w,graphcast.processor.processor.13.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_1:b,graphcast.processor.processor.13.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_1:w,graphcast.processor.processor.13.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_layer_norm:offset,graphcast.processor.processor.14.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_layer_norm:scale,graphcast.processor.processor.14.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_0:b,graphcast.processor.processor.14.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_0:w,graphcast.processor.processor.14.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_1:b,graphcast.processor.processor.14.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_1:w,graphcast.processor.processor.14.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_layer_norm:offset,graphcast.processor.processor.15.edge_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_layer_norm:scale,graphcast.processor.processor.15.edge_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_0:b,graphcast.processor.processor.15.edge_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_0:w,graphcast.processor.processor.15.edge_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_1:b,graphcast.processor.processor.15.edge_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_1:w,graphcast.processor.processor.15.edge_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.processor.processor.0.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.processor.processor.0.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.0.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.0.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.0.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.0.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_layer_norm:offset,graphcast.processor.processor.1.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_layer_norm:scale,graphcast.processor.processor.1.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.1.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.1.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.1.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.1.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_layer_norm:offset,graphcast.processor.processor.2.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_layer_norm:scale,graphcast.processor.processor.2.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.2.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.2.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.2.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.2.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_layer_norm:offset,graphcast.processor.processor.3.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_layer_norm:scale,graphcast.processor.processor.3.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.3.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.3.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.3.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.3.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_layer_norm:offset,graphcast.processor.processor.4.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_layer_norm:scale,graphcast.processor.processor.4.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.4.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.4.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.4.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.4.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_layer_norm:offset,graphcast.processor.processor.5.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_layer_norm:scale,graphcast.processor.processor.5.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.5.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.5.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.5.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.5.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_layer_norm:offset,graphcast.processor.processor.6.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_layer_norm:scale,graphcast.processor.processor.6.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.6.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.6.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.6.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.6.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_layer_norm:offset,graphcast.processor.processor.7.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_layer_norm:scale,graphcast.processor.processor.7.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.7.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.7.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.7.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.7.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_layer_norm:offset,graphcast.processor.processor.8.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_layer_norm:scale,graphcast.processor.processor.8.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.8.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.8.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.8.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.8.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_layer_norm:offset,graphcast.processor.processor.9.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_layer_norm:scale,graphcast.processor.processor.9.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.9.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.9.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.9.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.9.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_layer_norm:offset,graphcast.processor.processor.10.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_layer_norm:scale,graphcast.processor.processor.10.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.10.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.10.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.10.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.10.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_layer_norm:offset,graphcast.processor.processor.11.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_layer_norm:scale,graphcast.processor.processor.11.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.11.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.11.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.11.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.11.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_layer_norm:offset,graphcast.processor.processor.12.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_layer_norm:scale,graphcast.processor.processor.12.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.12.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.12.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.12.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.12.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_layer_norm:offset,graphcast.processor.processor.13.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_layer_norm:scale,graphcast.processor.processor.13.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.13.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.13.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.13.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.13.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_layer_norm:offset,graphcast.processor.processor.14.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_layer_norm:scale,graphcast.processor.processor.14.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.14.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.14.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.14.node_layer.mlp.2.bias -params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.14.node_layer.mlp.2.weight -params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_layer_norm:offset,graphcast.processor.processor.15.node_layer.layer_norm.bias -params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_layer_norm:scale,graphcast.processor.processor.15.node_layer.layer_norm.weight -params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.15.node_layer.mlp.0.bias -params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.15.node_layer.mlp.0.weight -params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.15.node_layer.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_layer_norm:offset,graphcast.encoder.embedding.grid2mesh_edge_embedding.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_layer_norm:scale,graphcast.encoder.embedding.grid2mesh_edge_embedding.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_0:b,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_0:w,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_1:b,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/encoder_edges_grid2mesh_mlp/~/linear_1:w,graphcast.encoder.embedding.grid2mesh_edge_embedding.mlp.2.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_layer_norm:offset,graphcast.encoder.embedding.grid_node_embedding.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_layer_norm:scale,graphcast.encoder.embedding.grid_node_embedding.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_0:b,graphcast.encoder.embedding.grid_node_embedding.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_0:w,graphcast.encoder.embedding.grid_node_embedding.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_1:b,graphcast.encoder.embedding.grid_node_embedding.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_grid_nodes_mlp/~/linear_1:w,graphcast.encoder.embedding.grid_node_embedding.mlp.2.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_layer_norm:offset,graphcast.encoder.embedding.mesh_node_embedding.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_layer_norm:scale,graphcast.encoder.embedding.mesh_node_embedding.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh_node_embedding.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh_node_embedding.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh_node_embedding.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/encoder_nodes_mesh_nodes_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh_node_embedding.mlp.2.weight +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/processor_edges_0_grid2mesh_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.edge_layer.mlp.2.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid_node_layer.fn.mlp.2.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.layer_norm.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.layer_norm.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.0.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.0.weight +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.2.bias +params:grid2mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.encoder.grid2mesh_gnn.grid2mesh_gnn.node_layer.mlp.2.weight +params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_0:b,graphcast.decoder.grid_node_layer.mlp.0.bias +params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_0:w,graphcast.decoder.grid_node_layer.mlp.0.weight +params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_1:b,graphcast.decoder.grid_node_layer.mlp.2.bias +params:mesh2grid_gnn/~_networks_builder/decoder_nodes_grid_nodes_mlp/~/linear_1:w,graphcast.decoder.grid_node_layer.mlp.2.weight +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_layer_norm:offset,graphcast.encoder.embedding.mesh2grid_edge_embedding.layer_norm.bias +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_layer_norm:scale,graphcast.encoder.embedding.mesh2grid_edge_embedding.layer_norm.weight +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.0.bias +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.0.weight +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.2.bias +params:mesh2grid_gnn/~_networks_builder/encoder_edges_mesh2grid_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh2grid_edge_embedding.mlp.2.weight +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.layer_norm.bias +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.layer_norm.weight +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.0.bias +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.0.weight +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.2.bias +params:mesh2grid_gnn/~_networks_builder/processor_edges_0_mesh2grid_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.edge_layer.mlp.2.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.layer_norm.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.layer_norm.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.0.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.0.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.2.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_grid_nodes_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh2grid_gnn.node_layer.mlp.2.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.layer_norm.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.layer_norm.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.0.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.0.weight +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.2.bias +params:mesh2grid_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.decoder.mesh2grid_gnn.mesh_node_layer.fn.mlp.2.weight +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_layer_norm:offset,graphcast.encoder.embedding.mesh_edge_embedding.layer_norm.bias +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_layer_norm:scale,graphcast.encoder.embedding.mesh_edge_embedding.layer_norm.weight +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_0:b,graphcast.encoder.embedding.mesh_edge_embedding.mlp.0.bias +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_0:w,graphcast.encoder.embedding.mesh_edge_embedding.mlp.0.weight +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_1:b,graphcast.encoder.embedding.mesh_edge_embedding.mlp.2.bias +params:mesh_gnn/~_networks_builder/encoder_edges_mesh_mlp/~/linear_1:w,graphcast.encoder.embedding.mesh_edge_embedding.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_layer_norm:offset,graphcast.processor.processor.0.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_layer_norm:scale,graphcast.processor.processor.0.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_0:b,graphcast.processor.processor.0.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_0:w,graphcast.processor.processor.0.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_1:b,graphcast.processor.processor.0.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_0_mesh_mlp/~/linear_1:w,graphcast.processor.processor.0.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_layer_norm:offset,graphcast.processor.processor.1.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_layer_norm:scale,graphcast.processor.processor.1.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_0:b,graphcast.processor.processor.1.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_0:w,graphcast.processor.processor.1.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_1:b,graphcast.processor.processor.1.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_1_mesh_mlp/~/linear_1:w,graphcast.processor.processor.1.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_layer_norm:offset,graphcast.processor.processor.2.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_layer_norm:scale,graphcast.processor.processor.2.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_0:b,graphcast.processor.processor.2.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_0:w,graphcast.processor.processor.2.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_1:b,graphcast.processor.processor.2.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_2_mesh_mlp/~/linear_1:w,graphcast.processor.processor.2.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_layer_norm:offset,graphcast.processor.processor.3.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_layer_norm:scale,graphcast.processor.processor.3.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_0:b,graphcast.processor.processor.3.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_0:w,graphcast.processor.processor.3.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_1:b,graphcast.processor.processor.3.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_3_mesh_mlp/~/linear_1:w,graphcast.processor.processor.3.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_layer_norm:offset,graphcast.processor.processor.4.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_layer_norm:scale,graphcast.processor.processor.4.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_0:b,graphcast.processor.processor.4.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_0:w,graphcast.processor.processor.4.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_1:b,graphcast.processor.processor.4.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_4_mesh_mlp/~/linear_1:w,graphcast.processor.processor.4.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_layer_norm:offset,graphcast.processor.processor.5.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_layer_norm:scale,graphcast.processor.processor.5.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_0:b,graphcast.processor.processor.5.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_0:w,graphcast.processor.processor.5.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_1:b,graphcast.processor.processor.5.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_5_mesh_mlp/~/linear_1:w,graphcast.processor.processor.5.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_layer_norm:offset,graphcast.processor.processor.6.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_layer_norm:scale,graphcast.processor.processor.6.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_0:b,graphcast.processor.processor.6.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_0:w,graphcast.processor.processor.6.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_1:b,graphcast.processor.processor.6.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_6_mesh_mlp/~/linear_1:w,graphcast.processor.processor.6.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_layer_norm:offset,graphcast.processor.processor.7.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_layer_norm:scale,graphcast.processor.processor.7.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_0:b,graphcast.processor.processor.7.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_0:w,graphcast.processor.processor.7.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_1:b,graphcast.processor.processor.7.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_7_mesh_mlp/~/linear_1:w,graphcast.processor.processor.7.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_layer_norm:offset,graphcast.processor.processor.8.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_layer_norm:scale,graphcast.processor.processor.8.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_0:b,graphcast.processor.processor.8.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_0:w,graphcast.processor.processor.8.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_1:b,graphcast.processor.processor.8.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_8_mesh_mlp/~/linear_1:w,graphcast.processor.processor.8.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_layer_norm:offset,graphcast.processor.processor.9.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_layer_norm:scale,graphcast.processor.processor.9.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_0:b,graphcast.processor.processor.9.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_0:w,graphcast.processor.processor.9.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_1:b,graphcast.processor.processor.9.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_9_mesh_mlp/~/linear_1:w,graphcast.processor.processor.9.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_layer_norm:offset,graphcast.processor.processor.10.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_layer_norm:scale,graphcast.processor.processor.10.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_0:b,graphcast.processor.processor.10.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_0:w,graphcast.processor.processor.10.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_1:b,graphcast.processor.processor.10.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_10_mesh_mlp/~/linear_1:w,graphcast.processor.processor.10.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_layer_norm:offset,graphcast.processor.processor.11.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_layer_norm:scale,graphcast.processor.processor.11.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_0:b,graphcast.processor.processor.11.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_0:w,graphcast.processor.processor.11.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_1:b,graphcast.processor.processor.11.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_11_mesh_mlp/~/linear_1:w,graphcast.processor.processor.11.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_layer_norm:offset,graphcast.processor.processor.12.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_layer_norm:scale,graphcast.processor.processor.12.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_0:b,graphcast.processor.processor.12.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_0:w,graphcast.processor.processor.12.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_1:b,graphcast.processor.processor.12.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_12_mesh_mlp/~/linear_1:w,graphcast.processor.processor.12.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_layer_norm:offset,graphcast.processor.processor.13.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_layer_norm:scale,graphcast.processor.processor.13.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_0:b,graphcast.processor.processor.13.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_0:w,graphcast.processor.processor.13.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_1:b,graphcast.processor.processor.13.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_13_mesh_mlp/~/linear_1:w,graphcast.processor.processor.13.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_layer_norm:offset,graphcast.processor.processor.14.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_layer_norm:scale,graphcast.processor.processor.14.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_0:b,graphcast.processor.processor.14.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_0:w,graphcast.processor.processor.14.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_1:b,graphcast.processor.processor.14.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_14_mesh_mlp/~/linear_1:w,graphcast.processor.processor.14.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_layer_norm:offset,graphcast.processor.processor.15.edge_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_layer_norm:scale,graphcast.processor.processor.15.edge_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_0:b,graphcast.processor.processor.15.edge_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_0:w,graphcast.processor.processor.15.edge_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_1:b,graphcast.processor.processor.15.edge_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_edges_15_mesh_mlp/~/linear_1:w,graphcast.processor.processor.15.edge_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:offset,graphcast.processor.processor.0.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_layer_norm:scale,graphcast.processor.processor.0.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.0.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.0.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.0.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_0_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.0.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_layer_norm:offset,graphcast.processor.processor.1.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_layer_norm:scale,graphcast.processor.processor.1.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.1.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.1.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.1.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_1_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.1.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_layer_norm:offset,graphcast.processor.processor.2.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_layer_norm:scale,graphcast.processor.processor.2.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.2.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.2.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.2.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_2_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.2.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_layer_norm:offset,graphcast.processor.processor.3.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_layer_norm:scale,graphcast.processor.processor.3.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.3.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.3.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.3.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_3_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.3.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_layer_norm:offset,graphcast.processor.processor.4.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_layer_norm:scale,graphcast.processor.processor.4.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.4.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.4.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.4.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_4_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.4.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_layer_norm:offset,graphcast.processor.processor.5.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_layer_norm:scale,graphcast.processor.processor.5.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.5.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.5.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.5.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_5_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.5.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_layer_norm:offset,graphcast.processor.processor.6.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_layer_norm:scale,graphcast.processor.processor.6.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.6.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.6.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.6.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_6_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.6.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_layer_norm:offset,graphcast.processor.processor.7.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_layer_norm:scale,graphcast.processor.processor.7.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.7.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.7.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.7.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_7_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.7.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_layer_norm:offset,graphcast.processor.processor.8.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_layer_norm:scale,graphcast.processor.processor.8.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.8.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.8.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.8.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_8_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.8.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_layer_norm:offset,graphcast.processor.processor.9.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_layer_norm:scale,graphcast.processor.processor.9.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.9.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.9.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.9.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_9_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.9.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_layer_norm:offset,graphcast.processor.processor.10.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_layer_norm:scale,graphcast.processor.processor.10.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.10.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.10.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.10.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_10_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.10.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_layer_norm:offset,graphcast.processor.processor.11.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_layer_norm:scale,graphcast.processor.processor.11.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.11.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.11.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.11.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_11_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.11.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_layer_norm:offset,graphcast.processor.processor.12.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_layer_norm:scale,graphcast.processor.processor.12.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.12.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.12.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.12.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_12_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.12.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_layer_norm:offset,graphcast.processor.processor.13.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_layer_norm:scale,graphcast.processor.processor.13.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.13.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.13.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.13.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_13_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.13.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_layer_norm:offset,graphcast.processor.processor.14.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_layer_norm:scale,graphcast.processor.processor.14.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.14.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.14.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.14.node_layer.mlp.2.bias +params:mesh_gnn/~_networks_builder/processor_nodes_14_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.14.node_layer.mlp.2.weight +params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_layer_norm:offset,graphcast.processor.processor.15.node_layer.layer_norm.bias +params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_layer_norm:scale,graphcast.processor.processor.15.node_layer.layer_norm.weight +params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_0:b,graphcast.processor.processor.15.node_layer.mlp.0.bias +params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_0:w,graphcast.processor.processor.15.node_layer.mlp.0.weight +params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_1:b,graphcast.processor.processor.15.node_layer.mlp.2.bias params:mesh_gnn/~_networks_builder/processor_nodes_15_mesh_nodes_mlp/~/linear_1:w,graphcast.processor.processor.15.node_layer.mlp.2.weight \ No newline at end of file diff --git a/jointContribution/graphcast/datasets.py b/jointContribution/graphcast/datasets.py index 2a31c267e0..e83050c00c 100644 --- a/jointContribution/graphcast/datasets.py +++ b/jointContribution/graphcast/datasets.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream import copy import os import pickle @@ -486,3 +487,459 @@ def normalize(self, inputs_data, stddev_data, mean_data): def denormalize(self, inputs_data): return inputs_data * self.stacked_targets_stddev + self.stacked_targets_mean +======= +import copy +import os +import pickle +import typing + +import args +import graphtype +import numpy as np +import paddle +import pandas as pd +import xarray + +_SEC_PER_HOUR = 3600 +_HOUR_PER_DAY = 24 +SEC_PER_DAY = _SEC_PER_HOUR * _HOUR_PER_DAY +_AVG_DAY_PER_YEAR = 365.24219 +AVG_SEC_PER_YEAR = SEC_PER_DAY * _AVG_DAY_PER_YEAR + +DAY_PROGRESS = "day_progress" +YEAR_PROGRESS = "year_progress" + + +def get_year_progress(seconds_since_epoch: np.ndarray) -> np.ndarray: + """Computes year progress for times in seconds. + Args: + seconds_since_epoch: Times in seconds since the "epoch" (the point at which + UNIX time starts). + Returns: + Year progress normalized to be in the [0, 1) interval for each time point. + """ + # Start with the pure integer division, and then float at the very end. + # We will try to keep as much precision as possible. + years_since_epoch = ( + seconds_since_epoch / SEC_PER_DAY / np.float64(_AVG_DAY_PER_YEAR) + ) + # Note depending on how these ops are down, we may end up with a "weak_type" + # which can cause issues in subtle ways, and hard to track here. + # In any case, casting to float32 should get rid of the weak type. + # [0, 1.) Interval. + return np.mod(years_since_epoch, 1.0).astype(np.float32) + + +def get_day_progress( + seconds_since_epoch: np.ndarray, + longitude: np.ndarray, +) -> np.ndarray: + """Computes day progress for times in seconds at each longitude. + Args: + seconds_since_epoch: 1D array of times in seconds since the 'epoch' (the + point at which UNIX time starts). + longitude: 1D array of longitudes at which day progress is computed. + Returns: + 2D array of day progress values normalized to be in the [0, 1) inverval + for each time point at each longitude. + """ + # [0.0, 1.0) Interval. + day_progress_greenwich = np.mod(seconds_since_epoch, SEC_PER_DAY) / SEC_PER_DAY + # Offset the day progress to the longitude of each point on Earth. + longitude_offsets = np.deg2rad(longitude) / (2 * np.pi) + day_progress = np.mod( + day_progress_greenwich[..., np.newaxis] + longitude_offsets, 1.0 + ) + return day_progress.astype(np.float32) + + +def datetime_features(seconds_since_epoch, longitude_offsets): + year_progress = get_year_progress(seconds_since_epoch) + day_progress = get_day_progress(seconds_since_epoch, longitude_offsets) + year_progress_phase = year_progress * (2 * np.pi) + day_progress_phase = day_progress * (2 * np.pi) + returned_data = { + "year_progress_sin": np.sin(year_progress_phase), + "year_progress_cos": np.cos(year_progress_phase), + "day_progress_sin": np.sin(day_progress_phase), + "day_progress_cos": np.cos(day_progress_phase), + } + return returned_data + + +def add_var_into_nc_dataset( + nc_dataset, + var_name, + var_value, + var_dims=( + "batch", + "time", + ), +): + new_var = nc_dataset.createVariable(var_name, "f8", var_dims) + new_var[:] = var_value + return nc_dataset + + +def extract_input_target_times( + dataset: xarray.Dataset, + input_duration, + target_lead_times, +): + (target_lead_times, target_duration) = _process_target_lead_times_and_get_duration( + target_lead_times + ) + + # Shift the coordinates for the time axis so that a timedelta of zero + # corresponds to the forecast reference time. That is, the final timestep + # that's available as input to the forecast, with all following timesteps + # forming the target period which needs to be predicted. + # This means the time coordinates are now forecast lead times. + time = dataset.coords["time"] + dataset = dataset.assign_coords(time=time + target_duration - time[-1]) + + # Slice out targets: + targets = dataset.sel({"time": target_lead_times}) + + input_duration = pd.Timedelta(input_duration) + # Both endpoints are inclusive with label-based slicing, so we offset by a + # small epsilon to make one of the endpoints non-inclusive: + zero = pd.Timedelta(0) + epsilon = pd.Timedelta(1, "ns") + inputs = dataset.sel({"time": slice(-input_duration + epsilon, zero)}) + return inputs, targets + + +def _process_target_lead_times_and_get_duration(target_lead_times): + """Returns the minimum duration for the target lead times.""" + if isinstance(target_lead_times, slice): + # A slice of lead times. xarray already accepts timedelta-like values for + # the begin/end/step of the slice. + if target_lead_times.start is None: + # If the start isn't specified, we assume it starts at the next timestep + # after lead time 0 (lead time 0 is the final input timestep): + target_lead_times = slice( + pd.Timedelta(1, "ns"), target_lead_times.stop, target_lead_times.step + ) + target_duration = pd.Timedelta(target_lead_times.stop) + else: + if not isinstance(target_lead_times, (list, tuple, set)): + # A single lead time, which we wrap as a length-1 array to ensure there + # still remains a time dimension (here of length 1) for consistency. + target_lead_times = [target_lead_times] + + # A list of multiple (not necessarily contiguous) lead times: + target_lead_times = [pd.Timedelta(x) for x in target_lead_times] + target_lead_times.sort() + target_duration = target_lead_times[-1] + return target_lead_times, target_duration + + +def variable_to_stacked( + variable: xarray.Variable, + sizes, + preserved_dims=("batch", "lat", "lon"), +) -> xarray.Variable: + """Converts an xarray.Variable to preserved_dims + ("channels",). + + Any dimensions other than those included in preserved_dims get stacked into a + final "channels" dimension. If any of the preserved_dims are missing then they + are added, with the data broadcast/tiled to match the sizes specified in + `sizes`. + + Args: + variable: An xarray.Variable. + sizes: Mapping including sizes for any dimensions which are not present in + `variable` but are needed for the output. This may be needed for example + for a static variable with only ("lat", "lon") dims, or if you want to + encode just the latitude coordinates (a variable with dims ("lat",)). + preserved_dims: dimensions of variable to not be folded in channels. + + Returns: + An xarray.Variable with dimensions preserved_dims + ("channels",). + """ + stack_to_channels_dims = [d for d in variable.dims if d not in preserved_dims] + if stack_to_channels_dims: + variable = variable.stack(channels=stack_to_channels_dims) + dims = {dim: variable.sizes.get(dim) or sizes[dim] for dim in preserved_dims} + dims["channels"] = variable.sizes.get("channels", 1) + return variable.set_dims(dims) + + +def dataset_to_stacked( + dataset: xarray.Dataset, + sizes=None, + preserved_dims=("batch", "lat", "lon"), +) -> xarray.DataArray: + """Converts an xarray.Dataset to a single stacked array. + + This takes each consistuent data_var, converts it into BHWC layout + using `variable_to_stacked`, then concats them all along the channels axis. + + Args: + dataset: An xarray.Dataset. + sizes: Mapping including sizes for any dimensions which are not present in + the `dataset` but are needed for the output. See variable_to_stacked. + preserved_dims: dimensions from the dataset that should not be folded in + the predictions channels. + + Returns: + An xarray.DataArray with dimensions preserved_dims + ("channels",). + Existing coordinates for preserved_dims axes will be preserved, however + there will be no coordinates for "channels". + """ + data_vars = [ + variable_to_stacked( + dataset.variables[name], sizes or dataset.sizes, preserved_dims + ) + for name in sorted(dataset.data_vars.keys()) + ] + coords = { + dim: coord for dim, coord in dataset.coords.items() if dim in preserved_dims + } + return xarray.DataArray( + data=xarray.Variable.concat(data_vars, dim="channels"), coords=coords + ) + + +def stacked_to_dataset( + stacked_array: xarray.Variable, + template_dataset: xarray.Dataset, + preserved_dims: typing.Tuple[str, ...] = ("batch", "lat", "lon"), +) -> xarray.Dataset: + """The inverse of dataset_to_stacked. + + Requires a template dataset to demonstrate the variables/shapes/coordinates + required. + All variables must have preserved_dims dimensions. + + Args: + stacked_array: Data in BHWC layout, encoded the same as dataset_to_stacked + would if it was asked to encode `template_dataset`. + template_dataset: A template Dataset (or other mapping of DataArrays) + demonstrating the shape of output required (variables, shapes, + coordinates etc). + preserved_dims: dimensions from the target_template that were not folded in + the predictions channels. The preserved_dims need to be a subset of the + dims of all the variables of template_dataset. + + Returns: + An xarray.Dataset (or other mapping of DataArrays) with the same shape and + type as template_dataset. + """ + unstack_from_channels_sizes = {} + var_names = sorted(template_dataset.keys()) + for name in var_names: + template_var = template_dataset[name] + if not all(dim in template_var.dims for dim in preserved_dims): + raise ValueError( + f"stacked_to_dataset requires all Variables to have {preserved_dims} " + f"dimensions, but found only {template_var.dims}." + ) + unstack_from_channels_sizes[name] = { + dim: size + for dim, size in template_var.sizes.items() + if dim not in preserved_dims + } + + channels = { + name: np.prod(list(unstack_sizes.values()), dtype=np.int64) + for name, unstack_sizes in unstack_from_channels_sizes.items() + } + total_expected_channels = sum(channels.values()) + found_channels = stacked_array.sizes["channels"] + if total_expected_channels != found_channels: + raise ValueError( + f"Expected {total_expected_channels} channels but found " + f"{found_channels}, when trying to convert a stacked array of shape " + f"{stacked_array.sizes} to a dataset of shape {template_dataset}." + ) + + data_vars = {} + index = 0 + for name in var_names: + template_var = template_dataset[name] + var = stacked_array.isel({"channels": slice(index, index + channels[name])}) + index += channels[name] + var = var.unstack({"channels": unstack_from_channels_sizes[name]}) + var = var.transpose(*template_var.dims) + data_vars[name] = xarray.DataArray( + data=var, + coords=template_var.coords, + # This might not always be the same as the name it's keyed under; it + # will refer to the original variable name, whereas the key might be + # some alias e.g. temperature_850 under which it should be logged: + name=template_var.name, + ) + return type(template_dataset)( + data_vars + ) # pytype:disable=not-callable,wrong-arg-count + + +class ERA5Data(paddle.io.Dataset): + """ + This class is used to process ERA5 re-analyze data, + and is used to generate the dataset generator supported by + MindSpore. This class inherits the Data class. + + Args: + data_params (dict): dataset-related configuration of the model. + run_mode (str, optional): whether the dataset is used for training, + evaluation or testing. Supports [“train”,“test”, “valid”]. + Default: 'train'. + + Supported Platforms: + ``Ascend`` ``GPU`` + + Examples: + >>> from mindearth.data import Era5Data + >>> data_params = { + ... 'name': 'era5', + ... 'root_dir': './dataset', + ... 'w_size': 256 + ... } + >>> dataset_generator = Era5Data(data_params) + """ + + # TODO: example should include all possible infos: + # data_frequency, patch/patch_size + def __init__(self, config, data_type="train"): + super().__init__() + if config.type == "graphcast": + self.input_variables = args.TASK_input_variables + self.forcing_variables = args.TASK_forcing_variables + self.target_variables = args.TASK_target_variables + self.level_variables = args.PRESSURE_LEVELS[37] + elif config.type == "graphcast_small": + self.input_variables = args.TASK_13_input_variables + self.forcing_variables = args.TASK_13_forcing_variables + self.target_variables = args.TASK_13_target_variables + self.level_variables = args.PRESSURE_LEVELS[13] + elif config.type == "graphcast_operational": + self.input_variables = args.TASK_13_PRECIP_OUT_input_variables + self.forcing_variables = args.TASK_13_PRECIP_OUT_forcing_variables + self.target_variables = args.TASK_13_PRECIP_OUT_target_variables + self.level_variables = args.PRESSURE_LEVELS[13] + + # 数据 + nc_dataset = xarray.open_dataset(config.data_path) + + longitude_offsets = nc_dataset.coords["lon"].data + second_since_epoch = ( + nc_dataset.coords["datetime"].data.astype("datetime64[s]").astype(np.int64) + ) + datetime_feats = datetime_features(second_since_epoch, longitude_offsets) + nc_dataset.update( + { + "year_progress_sin": xarray.Variable( + ("batch", "time"), datetime_feats["year_progress_sin"] + ), + "year_progress_cos": xarray.Variable( + ("batch", "time"), datetime_feats["year_progress_cos"] + ), + "day_progress_sin": xarray.Variable( + ("batch", "time", "lon"), datetime_feats["day_progress_sin"] + ), + "day_progress_cos": xarray.Variable( + ("batch", "time", "lon"), datetime_feats["day_progress_cos"] + ), + } + ) + + inputs, targets = extract_input_target_times( + nc_dataset, input_duration="12h", target_lead_times="6h" + ) + + # 统计数据 + stddev_data = xarray.open_dataset(config.stddev_path).sel( + level=list(self.level_variables) + ) + stddev_diffs_data = xarray.open_dataset(config.stddev_diffs_path).sel( + level=list(self.level_variables) + ) + mean_data = xarray.open_dataset(config.mean_path).sel( + level=list(self.level_variables) + ) + + missing_variables = set(self.target_variables) - set(self.input_variables) + exist_variables = set(self.target_variables) - missing_variables + targets_stddev = stddev_diffs_data[list(exist_variables)] + target_mean = inputs[list(exist_variables)].isel(time=-1) + if missing_variables: + targets_stddev.update({var: stddev_data[var] for var in missing_variables}) + target_mean.update( + {var: mean_data.variables[var] for var in missing_variables} + ) + + stacked_targets_stddev = dataset_to_stacked(targets_stddev, preserved_dims=()) + stacked_targets_mean = dataset_to_stacked(target_mean) + stacked_targets_mean = stacked_targets_mean.transpose("lat", "lon", ...) + + # The forcing uses the same time coordinates as the target. + inputs = inputs[list(self.input_variables)] + forcings = targets[list(self.forcing_variables)] + targets = targets[list(self.target_variables)] + inputs = self.normalize(inputs, stddev_data, mean_data) + forcings = self.normalize(forcings, stddev_data, mean_data) + + self.targets_template = targets + + stacked_inputs = dataset_to_stacked(inputs) + stacked_forcings = dataset_to_stacked(forcings) + stacked_targets = dataset_to_stacked(targets) + stacked_inputs = xarray.concat( + [stacked_inputs, stacked_forcings], dim="channels" + ) + + stacked_inputs = stacked_inputs.transpose("lat", "lon", ...) + stacked_targets = stacked_targets.transpose("lat", "lon", ...) + + # 此处指定input数据为12h数据,target数据为6h数据 + # TODO:处理完整数据集进行训练,处理过程同本函数处理过程 + lat_dim, lon_dim, batch_dim, feat_dim = stacked_inputs.shape + stacked_inputs = stacked_inputs.data.reshape(lat_dim * lon_dim, batch_dim, -1) + stacked_targets = stacked_targets.data.reshape(lat_dim * lon_dim, batch_dim, -1) + self.stacked_targets_stddev = stacked_targets_stddev.data + self.stacked_targets_mean = stacked_targets_mean.data.reshape( + lat_dim * lon_dim, batch_dim, -1 + ) + + self.input_data = [] + self.target_data = [] + + graph_template_path = os.path.join( + "data", "template_graph", f"{config.type}.pkl" + ) + if os.path.exists(graph_template_path): + graph_template = pickle.load(open(graph_template_path, "rb")) + else: + graph_template = graphtype.GraphGridMesh(config) + + graph = copy.deepcopy(graph_template) + graph.grid_node_feat = np.concatenate( + [stacked_inputs, graph.grid_node_feat], axis=-1 + ) + mesh_node_feat = np.zeros([graph.mesh_num_nodes, batch_dim, feat_dim]) + graph.mesh_node_feat = np.concatenate( + [mesh_node_feat, graph.mesh_node_feat], axis=-1 + ) + + self.input_data.append(graph) + self.target_data.append(stacked_targets) + + def __len__(self): + return len(self.input_data) + + def __getitem__(self, idx): + return self.input_data[idx], self.target_data[idx] + + def normalize(self, inputs_data, stddev_data, mean_data): + for name in list(inputs_data.keys()): + inputs_data[name] = (inputs_data[name] - mean_data[name]) / stddev_data[ + name + ] + return inputs_data + + def denormalize(self, inputs_data): + return inputs_data * self.stacked_targets_stddev + self.stacked_targets_mean +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/graphcast.py b/jointContribution/graphcast/graphcast.py index e0041d5ab9..7b4eb4fefe 100644 --- a/jointContribution/graphcast/graphcast.py +++ b/jointContribution/graphcast/graphcast.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream import paddle import paddle.nn as nn import sparse_transformer @@ -360,3 +361,258 @@ def __init__(self, config, **kwargs): def forward(self, graph: graphtype.GraphGridMesh): graph = self.graphcast(graph) return graph +======= +import graphtype +import paddle +import paddle.nn as nn + + +class ResidualConnection(nn.Layer): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, inputs): + return inputs + self.fn(inputs) + + +class GraphCastMLP(nn.Layer): + def __init__( + self, in_features, out_features, latent_features=None, layer_norm=True + ): + super().__init__() + + if latent_features is None: + latent_features = out_features + + self.mlp = nn.Sequential( + nn.Linear(in_features, latent_features, bias_attr=True), + nn.Silu(), + nn.Linear(latent_features, out_features, bias_attr=True), + ) + self.layer_norm = layer_norm + if layer_norm: + self.layer_norm = nn.LayerNorm(out_features) + + def forward(self, feat): + if self.layer_norm: + out = self.layer_norm(self.mlp(feat)) + else: + out = self.mlp(feat) + return out + + +class GraphCastGNN(nn.Layer): + def __init__(self, config, src_type="mesh", dst_type="mesh"): + super().__init__() + + self.src = src_type + self.dst = dst_type + self.config = config + + self.edge_in_dim = config.grid_node_emb_dim + config.mesh_node_emb_dim + if src_type == "mesh" and dst_type == "mesh": + self.edge_in_dim += config.mesh_edge_emb_dim + self.edge_out_dim = config.mesh_edge_emb_dim + self.node_in_dim = config.mesh_node_emb_dim + config.mesh_edge_emb_dim + self.node_out_dim = config.mesh_node_emb_dim + elif src_type == "grid" and dst_type == "mesh": + self.edge_in_dim += config.grid2mesh_edge_emb_dim + self.edge_out_dim = config.grid2mesh_edge_emb_dim + self.node_in_dim = config.mesh_node_emb_dim + config.grid2mesh_edge_emb_dim + self.node_out_dim = config.mesh_node_emb_dim + elif src_type == "mesh" and dst_type == "grid": + self.edge_in_dim += config.mesh2grid_edge_emb_dim + self.edge_out_dim = config.mesh2grid_edge_emb_dim + self.node_in_dim = config.grid_node_emb_dim + config.mesh2grid_edge_emb_dim + self.node_out_dim = config.grid_node_emb_dim + else: + raise ValueError + + self.edge_layer = GraphCastMLP(self.edge_in_dim, self.edge_out_dim) + self.node_layer = GraphCastMLP(self.node_in_dim, self.node_out_dim) + + def forward(self, graph: graphtype.GraphGridMesh): + if self.src == "mesh" and self.dst == "mesh": + edge_feats = graph.mesh_edge_feat + src_node_feats = graph.mesh_node_feat + dst_node_feats = graph.mesh_node_feat + src_idx = graph.mesh2mesh_src_index + dst_idx = graph.mesh2mesh_dst_index + dst_node_num = self.config.mesh_node_num + elif self.src == "grid" and self.dst == "mesh": + edge_feats = graph.grid2mesh_edge_feat + src_node_feats = graph.grid_node_feat + dst_node_feats = graph.mesh_node_feat + src_idx = graph.grid2mesh_src_index + dst_idx = graph.grid2mesh_dst_index + dst_node_num = self.config.mesh_node_num + elif self.src == "mesh" and self.dst == "grid": + edge_feats = graph.mesh2grid_edge_feat + src_node_feats = graph.mesh_node_feat + dst_node_feats = graph.grid_node_feat + src_idx = graph.mesh2grid_src_index + dst_idx = graph.mesh2grid_dst_index + dst_node_num = self.config.grid_node_num + + # 更新edge特征 + edge_feats_concat = paddle.concat( + [ + edge_feats, + paddle.gather(src_node_feats, src_idx), + paddle.gather(dst_node_feats, dst_idx), + ], + axis=-1, + ) + edge_feats_out = self.edge_layer(edge_feats_concat) + + _, batch_dim, _ = edge_feats_out.shape + # 更新node特征 + edge_feats_scatter = paddle.zeros([dst_node_num, batch_dim, self.edge_out_dim]) + node_feats_concat = paddle.concat( + [ + dst_node_feats, + paddle.scatter( + edge_feats_scatter, dst_idx, edge_feats_out, overwrite=False + ), + ], + axis=-1, + ) + node_feats_out = self.node_layer(node_feats_concat) + + if self.src == "mesh" and self.dst == "mesh": + graph.mesh_edge_feat += edge_feats_out + graph.mesh_node_feat += node_feats_out + elif self.src == "grid" and self.dst == "mesh": + graph.grid2mesh_edge_feat += edge_feats_out + graph.mesh_node_feat += node_feats_out + elif self.src == "mesh" and self.dst == "grid": + graph.mesh2grid_edge_feat += edge_feats_out + graph.grid_node_feat += node_feats_out + + return graph + + +class GraphCastEmbedding(nn.Layer): + def __init__(self, config): + super().__init__() + + self.grid_node_embedding = GraphCastMLP( + config.grid_node_dim, config.grid_node_emb_dim + ) + self.mesh_node_embedding = GraphCastMLP( + config.mesh_node_dim, config.mesh_node_emb_dim + ) + self.mesh_edge_embedding = GraphCastMLP( + config.mesh_edge_dim, config.mesh_edge_emb_dim + ) + self.grid2mesh_edge_embedding = GraphCastMLP( + config.grid2mesh_edge_dim, config.grid2mesh_edge_emb_dim + ) + self.mesh2grid_edge_embedding = GraphCastMLP( + config.mesh2grid_edge_dim, config.mesh2grid_edge_emb_dim + ) + + def forward(self, graph: graphtype.GraphGridMesh): + grid_node_emb = self.grid_node_embedding(graph.grid_node_feat) + mesh_node_emb = self.mesh_node_embedding(graph.mesh_node_feat) + mesh_edge_emb = self.mesh_edge_embedding(graph.mesh_edge_feat) + grid2mesh_edge_emb = self.grid2mesh_edge_embedding(graph.grid2mesh_edge_feat) + mesh2grid_edge_emb = self.mesh2grid_edge_embedding(graph.mesh2grid_edge_feat) + + graph.grid_node_feat = grid_node_emb + graph.mesh_node_feat = mesh_node_emb + graph.mesh_edge_feat = mesh_edge_emb + graph.grid2mesh_edge_feat = grid2mesh_edge_emb + graph.mesh2grid_edge_feat = mesh2grid_edge_emb + + return graph + + +class GraphCastGrid2Mesh(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.grid2mesh_gnn = GraphCastGNN(config, src_type="grid", dst_type="mesh") + self.grid_node_layer = ResidualConnection( + GraphCastMLP(config.grid_node_emb_dim, config.grid_node_emb_dim) + ) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.grid2mesh_gnn(graph) + graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) + return graph + + +class GraphCastMesh2Grid(paddle.nn.Layer): + def __init__(self, config): + super().__init__() + self.mesh2grid_gnn = GraphCastGNN(config, src_type="mesh", dst_type="grid") + self.mesh_node_layer = ResidualConnection( + GraphCastMLP(config.mesh_node_emb_dim, config.mesh_node_emb_dim) + ) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.mesh2grid_gnn(graph) + graph.mesh_node_feat = self.mesh_node_layer(graph.mesh_node_feat) + return graph + + +class GraphCastEncoder(nn.Layer): + def __init__(self, config): + super().__init__() + self.embedding = GraphCastEmbedding(config) + self.grid2mesh_gnn = GraphCastGrid2Mesh(config) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.embedding(graph) + graph = self.grid2mesh_gnn(graph) + return graph + + +class GraphCastDecoder(nn.Layer): + def __init__(self, config): + super().__init__() + self.mesh2grid_gnn = GraphCastMesh2Grid(config) + self.grid_node_layer = GraphCastMLP( + config.grid_node_emb_dim, + config.node_output_dim, + latent_features=config.grid_node_emb_dim, + layer_norm=False, + ) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.mesh2grid_gnn(graph) + graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) + return graph + + +class GraphCastProcessor(nn.Layer): + def __init__(self, config): + super().__init__() + + self.processor = nn.Sequential() + for idx in range(config.gnn_msg_steps): + self.processor.add_sublayer( + f"{idx}", + GraphCastGNN(config, src_type="mesh", dst_type="mesh"), + ) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.processor(graph) + return graph + + +class GraphCastNet(nn.Layer): + def __init__(self, config): + super().__init__() + + self.graphcast = nn.Sequential( + ("encoder", GraphCastEncoder(config)), + ("processor", GraphCastProcessor(config)), + ("decoder", GraphCastDecoder(config)), + ) + + def forward(self, graph: graphtype.GraphGridMesh): + graph = self.graphcast(graph) + return graph +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/graphtype.py b/jointContribution/graphcast/graphtype.py index 9bd3d21c7f..21c4d70ab5 100644 --- a/jointContribution/graphcast/graphtype.py +++ b/jointContribution/graphcast/graphtype.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright 2023 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -691,3 +692,649 @@ def convert_np_to_tensor(graph: GraphGridMesh): graph.mesh2grid_edge_feat, dtype=paddle.get_default_dtype() ) return graph +======= +# Copyright 2023 DeepMind Technologies Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import itertools +import typing + +import datasets +import numpy as np +import paddle +import scipy +import trimesh +import utils +import xarray + + +class GraphGridMesh(object): + def __init__( + self, + config, + mesh2mesh_src_index=None, + mesh2mesh_dst_index=None, + grid2mesh_src_index=None, + grid2mesh_dst_index=None, + mesh2grid_src_index=None, + mesh2grid_dst_index=None, + mesh_num_nodes=None, + grid_num_nodes=None, + mesh_num_edges=None, + grid2mesh_num_edges=None, + mesh2grid_num_edges=None, + grid_node_feat=None, + mesh_node_feat=None, + mesh_edge_feat=None, + grid2mesh_edge_feat=None, + mesh2grid_edge_feat=None, + ): + """_summary_ + + Args: + config (_type_): _description_ + mesh2mesh_src_index (_type_, optional): _description_. Defaults to None. + mesh2mesh_dst_index (_type_, optional): _description_. Defaults to None. + grid2mesh_src_index (_type_, optional): _description_. Defaults to None. + grid2mesh_dst_index (_type_, optional): _description_. Defaults to None. + mesh2grid_src_index (_type_, optional): _description_. Defaults to None. + mesh2grid_dst_index (_type_, optional): _description_. Defaults to None. + mesh_num_nodes (_type_, optional): _description_. Defaults to None. + grid_num_nodes (_type_, optional): _description_. Defaults to None. + mesh_num_edges (_type_, optional): _description_. Defaults to None. + grid2mesh_num_edges (_type_, optional): _description_. Defaults to None. + mesh2grid_num_edges (_type_, optional): _description_. Defaults to None. + grid_node_feat (_type_, optional): _description_. Defaults to None. + mesh_node_feat (_type_, optional): _description_. Defaults to None. + mesh_edge_feat (_type_, optional): _description_. Defaults to None. + grid2mesh_edge_feat (_type_, optional): _description_. Defaults to None. + mesh2grid_edge_feat (_type_, optional): _description_. Defaults to None. + """ + self.meshes = get_hierarchy_of_triangular_meshes_for_sphere(config.mesh_size) + + all_input_vars = [ + mesh2mesh_src_index, + mesh2mesh_dst_index, + grid2mesh_src_index, + grid2mesh_dst_index, + mesh2grid_src_index, + mesh2grid_dst_index, + mesh_num_nodes, + grid_num_nodes, + mesh_num_edges, + grid2mesh_num_edges, + mesh2grid_num_edges, + grid_node_feat, + mesh_node_feat, + mesh_edge_feat, + grid2mesh_edge_feat, + mesh2grid_edge_feat, + ] + should_init = any(var is None for var in all_input_vars) + + if should_init: + # 初始化构建 + self.query_radius = ( + self._get_max_edge_distance(self.finest_mesh) + * config.radius_query_fraction_edge_length + ) + self._mesh2grid_edge_normalization_factor = ( + config.mesh2grid_edge_normalization_factor + ) + self._spatial_features_kwargs = dict( + add_node_positions=False, + add_node_latitude=True, + add_node_longitude=True, + add_relative_positions=True, + relative_longitude_local_coordinates=True, + relative_latitude_local_coordinates=True, + ) + + self.init_mesh_properties() + self._init_grid_properties( + grid_lat=np.arange(-90.0, 90.0 + config.resolution, config.resolution), + grid_lon=np.arange(0.0, 360.0, config.resolution), + ) + self._grid2mesh_graph_structure = self._init_grid2mesh_graph() + self._mesh_graph_structure = self._init_mesh_graph() + self._mesh2grid_graph_structure = self._init_mesh2grid_graph() + else: + # 直接构建图数据 + # 图结构信息 + self.mesh2mesh_src_index = mesh2mesh_src_index + self.mesh2mesh_dst_index = mesh2mesh_dst_index + self.grid2mesh_src_index = grid2mesh_src_index + self.grid2mesh_dst_index = grid2mesh_dst_index + self.mesh2grid_src_index = mesh2grid_src_index + self.mesh2grid_dst_index = mesh2grid_dst_index + + self.mesh_num_nodes = mesh_num_nodes + self.grid_num_nodes = grid_num_nodes + + self.mesh_num_edges = mesh_num_edges + self.grid2mesh_num_edges = grid2mesh_num_edges + self.mesh2grid_num_edges = mesh2grid_num_edges + + # 图特征信息 + self.grid_node_feat = grid_node_feat + self.mesh_node_feat = mesh_node_feat + self.mesh_edge_feat = mesh_edge_feat + self.grid2mesh_edge_feat = grid2mesh_edge_feat + self.mesh2grid_edge_feat = mesh2grid_edge_feat + + def update(self, name, value): + if hasattr(self, name): + setattr(self, name, value) + else: + raise ValueError + + @property + def finest_mesh(self): + return self.meshes[-1] + + def init_mesh_properties(self): + """Inits static properties that have to do with mesh nodes.""" + self.mesh_num_nodes = self.finest_mesh.vertices.shape[0] + mesh_phi, mesh_theta = utils.cartesian_to_spherical( + self.finest_mesh.vertices[:, 0], + self.finest_mesh.vertices[:, 1], + self.finest_mesh.vertices[:, 2], + ) + (mesh_nodes_lat, mesh_nodes_lon) = utils.spherical_to_lat_lon( + phi=mesh_phi, + theta=mesh_theta, + ) + # Convert to f32 to ensure the lat/lon features aren't in f64. + self._mesh_nodes_lat = mesh_nodes_lat.astype(np.float32) + self._mesh_nodes_lon = mesh_nodes_lon.astype(np.float32) + + def _init_grid_properties(self, grid_lat: np.ndarray, grid_lon: np.ndarray): + """Inits static properties that have to do with grid nodes.""" + self._grid_lat = grid_lat.astype(np.float32) + self._grid_lon = grid_lon.astype(np.float32) + # Initialized the counters. + self.grid_num_nodes = grid_lat.shape[0] * grid_lon.shape[0] + + # Initialize lat and lon for the grid. + grid_nodes_lon, grid_nodes_lat = np.meshgrid(grid_lon, grid_lat) + self._grid_nodes_lon = grid_nodes_lon.reshape([-1]).astype(np.float32) + self._grid_nodes_lat = grid_nodes_lat.reshape([-1]).astype(np.float32) + + def _init_grid2mesh_graph(self): + """Build Grid2Mesh graph.""" + + # Create some edges according to distance between mesh and grid nodes. + assert self._grid_lat is not None and self._grid_lon is not None + (grid_indices, mesh_indices) = radius_query_indices( + grid_latitude=self._grid_lat, + grid_longitude=self._grid_lon, + mesh=self.finest_mesh, + radius=self.query_radius, + ) + + # Edges sending info from grid to mesh. + senders = grid_indices + receivers = mesh_indices + + # Precompute structural node and edge features according to config options. + # Structural features are those that depend on the fixed values of the + # latitude and longitudes of the nodes. + ( + senders_node_features, + _, + edge_features, + ) = utils.get_bipartite_graph_spatial_features( + senders_node_lat=self._grid_nodes_lat, + senders_node_lon=self._grid_nodes_lon, + receivers_node_lat=self._mesh_nodes_lat, + receivers_node_lon=self._mesh_nodes_lon, + senders=senders, + receivers=receivers, + edge_normalization_factor=None, + **self._spatial_features_kwargs, + ) + + self.grid_node_feat = np.expand_dims(senders_node_features, axis=1) + + self.grid2mesh_src_index = senders + self.grid2mesh_dst_index = receivers + self.grid2mesh_edge_feat = np.expand_dims(edge_features, axis=1) + self.grid2mesh_num_edges = len(edge_features) + + def _init_mesh_graph(self): + """Build Mesh graph.""" + merged_mesh = merge_meshes(self.meshes) + # Work simply on the mesh edges. + senders, receivers = faces_to_edges(merged_mesh.faces) + # Precompute structural node and edge features according to config options. + # Structural features are those that depend on the fixed values of the + # latitude and longitudes of the nodes. + assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None + node_features, edge_features = utils.get_graph_spatial_features( + node_lat=self._mesh_nodes_lat, + node_lon=self._mesh_nodes_lon, + senders=senders, + receivers=receivers, + **self._spatial_features_kwargs, + ) + + self.mesh_node_feat = np.expand_dims(node_features, axis=1) + self.mesh2mesh_src_index = senders + self.mesh2mesh_dst_index = receivers + self.mesh_edge_feat = np.expand_dims(edge_features, axis=1) + self.mesh_num_edges = len(edge_features) + + def _init_mesh2grid_graph(self): + """Build Mesh2Grid graph.""" + + # Create some edges according to how the grid nodes are contained by + # mesh triangles. + (grid_indices, mesh_indices) = in_mesh_triangle_indices( + grid_latitude=self._grid_lat, + grid_longitude=self._grid_lon, + mesh=self.finest_mesh, + ) + + # Edges sending info from mesh to grid. + senders = mesh_indices + receivers = grid_indices + + # Precompute structural node and edge features according to config options. + assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None + (_, _, edge_features) = utils.get_bipartite_graph_spatial_features( + senders_node_lat=self._mesh_nodes_lat, + senders_node_lon=self._mesh_nodes_lon, + receivers_node_lat=self._grid_nodes_lat, + receivers_node_lon=self._grid_nodes_lon, + senders=senders, + receivers=receivers, + edge_normalization_factor=self._mesh2grid_edge_normalization_factor, + **self._spatial_features_kwargs, + ) + + self.mesh2grid_src_index = senders + self.mesh2grid_dst_index = receivers + self.mesh2grid_edge_feat = np.expand_dims(edge_features, axis=1) + self.mesh2grid_num_edges = len(edge_features) + + @staticmethod + def _get_max_edge_distance(mesh): + senders, receivers = faces_to_edges(mesh.faces) + edge_distances = np.linalg.norm( + mesh.vertices[senders] - mesh.vertices[receivers], axis=-1 + ) + return edge_distances.max() + + def grid_node_outputs_to_prediction( + self, + grid_node_outputs: np.ndarray, + targets_template: xarray.Dataset, + ) -> xarray.Dataset: + """[num_grid_nodes, batch, num_outputs] -> xarray.""" + # numpy array with shape [lat_lon_node, batch, channels] + # to xarray `DataArray` (batch, lat, lon, channels) + assert self._grid_lat is not None and self._grid_lon is not None + grid_shape = (self._grid_lat.shape[0], self._grid_lon.shape[0]) + grid_outputs_lat_lon_leading = grid_node_outputs.reshape( + grid_shape + grid_node_outputs.shape[1:] + ) + dims = ("lat", "lon", "batch", "channels") + grid_xarray_lat_lon_leading = xarray.DataArray( + data=grid_outputs_lat_lon_leading, dims=dims + ) + grid_xarray = utils.restore_leading_axes(grid_xarray_lat_lon_leading) + + # xarray `DataArray` (batch, lat, lon, channels) + # to xarray `Dataset` (batch, one time step, lat, lon, level, multiple vars) + return datasets.stacked_to_dataset(grid_xarray.variable, targets_template) + + +class TriangularMesh(typing.NamedTuple): + vertices: np.ndarray + faces: np.ndarray + + +def merge_meshes(mesh_list: typing.Sequence[TriangularMesh]) -> TriangularMesh: + for mesh_i, mesh_ip1 in itertools.pairwise(mesh_list): + num_nodes_mesh_i = mesh_i.vertices.shape[0] + assert np.allclose(mesh_i.vertices, mesh_ip1.vertices[:num_nodes_mesh_i]) + + return TriangularMesh( + vertices=mesh_list[-1].vertices, + faces=np.concatenate([mesh.faces for mesh in mesh_list], axis=0), + ) + + +def get_icosahedron(): + phi = (1 + np.sqrt(5)) / 2 + vertices = [] + for c1, c2 in itertools.product([1.0, -1.0], [phi, -phi]): + vertices.append((c1, c2, 0.0)) + vertices.append((0.0, c1, c2)) + vertices.append((c2, 0.0, c1)) + + vertices = np.array(vertices, dtype=np.float32) + vertices /= np.linalg.norm([1.0, phi]) + + faces = [ + (0, 1, 2), + (0, 6, 1), + (8, 0, 2), + (8, 4, 0), + (3, 8, 2), + (3, 2, 7), + (7, 2, 1), + (0, 4, 6), + (4, 11, 6), + (6, 11, 5), + (1, 5, 7), + (4, 10, 11), + (4, 8, 10), + (10, 8, 3), + (10, 3, 9), + (11, 10, 9), + (11, 9, 5), + (5, 9, 7), + (9, 3, 7), + (1, 6, 5), + ] + + angle_between_faces = 2 * np.arcsin(phi / np.sqrt(3)) + rotation_angle = (np.pi - angle_between_faces) / 2 + rotation = scipy.spatial.transform.Rotation.from_euler( + seq="y", angles=rotation_angle + ) + rotation_matrix = rotation.as_matrix() + vertices = np.dot(vertices, rotation_matrix) + + return TriangularMesh( + vertices=vertices.astype(np.float32), faces=np.array(faces, dtype=np.int32) + ) + + +def get_hierarchy_of_triangular_meshes_for_sphere( + splits: int, +) -> typing.List[TriangularMesh]: + current_mesh = get_icosahedron() + output_meshes = [current_mesh] + for _ in range(splits): + current_mesh = _two_split_unit_sphere_triangle_faces(current_mesh) + output_meshes.append(current_mesh) + return output_meshes + + +def _two_split_unit_sphere_triangle_faces( + triangular_mesh: TriangularMesh, +) -> TriangularMesh: + """Splits each triangular face into 4 triangles keeping the orientation.""" + new_vertices_builder = _ChildVerticesBuilder(triangular_mesh.vertices) + + new_faces = [] + for ind1, ind2, ind3 in triangular_mesh.faces: + ind12 = new_vertices_builder.get_new_child_vertex_index((ind1, ind2)) + ind23 = new_vertices_builder.get_new_child_vertex_index((ind2, ind3)) + ind31 = new_vertices_builder.get_new_child_vertex_index((ind3, ind1)) + new_faces.extend( + [ + [ind1, ind12, ind31], # 1 + [ind12, ind2, ind23], # 2 + [ind31, ind23, ind3], # 3 + [ind12, ind23, ind31], # 4 + ] + ) + return TriangularMesh( + vertices=new_vertices_builder.get_all_vertices(), + faces=np.array(new_faces, dtype=np.int32), + ) + + +class _ChildVerticesBuilder(object): + """Bookkeeping of new child vertices added to an existing set of vertices.""" + + def __init__(self, parent_vertices): + self._child_vertices_index_mapping = {} + self._parent_vertices = parent_vertices + # We start with all previous vertices. + self._all_vertices_list = list(parent_vertices) + + def _get_child_vertex_key(self, parent_vertex_indices): + return tuple(sorted(parent_vertex_indices)) + + def _create_child_vertex(self, parent_vertex_indices): + """Creates a new vertex.""" + # Position for new vertex is the middle point, between the parent points, + # projected to unit sphere. + child_vertex_position = self._parent_vertices[list(parent_vertex_indices)].mean( + 0 + ) + child_vertex_position /= np.linalg.norm(child_vertex_position) + + # Add the vertex to the output list. The index for this new vertex will + # match the length of the list before adding it. + child_vertex_key = self._get_child_vertex_key(parent_vertex_indices) + self._child_vertices_index_mapping[child_vertex_key] = len( + self._all_vertices_list + ) + self._all_vertices_list.append(child_vertex_position) + + def get_new_child_vertex_index(self, parent_vertex_indices): + """Returns index for a child vertex, creating it if necessary.""" + # Get the key to see if we already have a new vertex in the middle. + child_vertex_key = self._get_child_vertex_key(parent_vertex_indices) + if child_vertex_key not in self._child_vertices_index_mapping: + self._create_child_vertex(parent_vertex_indices) + return self._child_vertices_index_mapping[child_vertex_key] + + def get_all_vertices(self): + """Returns an array with old vertices.""" + return np.array(self._all_vertices_list) + + +def faces_to_edges(faces: np.ndarray): + """Transforms polygonal faces to sender and receiver indices. + + It does so by transforming every face into N_i edges. Such if the triangular + face has indices [0, 1, 2], three edges are added 0->1, 1->2, and 2->0. + + If all faces have consistent orientation, and the surface represented by the + faces is closed, then every edge in a polygon with a certain orientation + is also part of another polygon with the opposite orientation. In this + situation, the edges returned by the method are always bidirectional. + + Args: + faces: Integer array of shape [num_faces, 3]. Contains node indices + adjacent to each face. + Returns: + Tuple with sender/receiver indices, each of shape [num_edges=num_faces*3]. + """ + + assert faces.ndim == 2 + assert faces.shape[-1] == 3 + senders = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]]) + receivers = np.concatenate([faces[:, 1], faces[:, 2], faces[:, 0]]) + return senders, receivers + + +# Copyright 2023 DeepMind Technologies Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tools for converting from regular grids on a sphere, to triangular meshes.""" + + +def _grid_lat_lon_to_coordinates( + grid_latitude: np.ndarray, grid_longitude: np.ndarray +) -> np.ndarray: + """Lat [num_lat] lon [num_lon] to 3d coordinates [num_lat, num_lon, 3].""" + # Convert to spherical coordinates phi and theta defined in the grid. + # Each [num_latitude_points, num_longitude_points] + phi_grid, theta_grid = np.meshgrid( + np.deg2rad(grid_longitude), np.deg2rad(90 - grid_latitude) + ) + + # [num_latitude_points, num_longitude_points, 3] + # Note this assumes unit radius, since for now we model the earth as a + # sphere of unit radius, and keep any vertical dimension as a regular grid. + return np.stack( + [ + np.cos(phi_grid) * np.sin(theta_grid), + np.sin(phi_grid) * np.sin(theta_grid), + np.cos(theta_grid), + ], + axis=-1, + ) + + +def radius_query_indices( + *, + grid_latitude: np.ndarray, + grid_longitude: np.ndarray, + mesh: TriangularMesh, + radius: float, +) -> tuple[np.ndarray, np.ndarray]: + """Returns mesh-grid edge indices for radius query. + + Args: + grid_latitude: Latitude values for the grid [num_lat_points] + grid_longitude: Longitude values for the grid [num_lon_points] + mesh: Mesh object. + radius: Radius of connectivity in R3. for a sphere of unit radius. + + Returns: + tuple with `grid_indices` and `mesh_indices` indicating edges between the + grid and the mesh such that the distances in a straight line (not geodesic) + are smaller than or equal to `radius`. + * grid_indices: Indices of shape [num_edges], that index into a + [num_lat_points, num_lon_points] grid, after flattening the leading axes. + * mesh_indices: Indices of shape [num_edges], that index into mesh.vertices. + """ + + # [num_grid_points=num_lat_points * num_lon_points, 3] + grid_positions = _grid_lat_lon_to_coordinates( + grid_latitude, grid_longitude + ).reshape([-1, 3]) + + # [num_mesh_points, 3] + mesh_positions = mesh.vertices + kd_tree = scipy.spatial.cKDTree(mesh_positions) + + # [num_grid_points, num_mesh_points_per_grid_point] + # Note `num_mesh_points_per_grid_point` is not constant, so this is a list + # of arrays, rather than a 2d array. + query_indices = kd_tree.query_ball_point(x=grid_positions, r=radius) + + grid_edge_indices = [] + mesh_edge_indices = [] + for grid_index, mesh_neighbors in enumerate(query_indices): + grid_edge_indices.append(np.repeat(grid_index, len(mesh_neighbors))) + mesh_edge_indices.append(mesh_neighbors) + + # [num_edges] + grid_edge_indices = np.concatenate(grid_edge_indices, axis=0).astype(int) + mesh_edge_indices = np.concatenate(mesh_edge_indices, axis=0).astype(int) + + return grid_edge_indices, mesh_edge_indices + + +def in_mesh_triangle_indices( + *, grid_latitude: np.ndarray, grid_longitude: np.ndarray, mesh: TriangularMesh +) -> tuple[np.ndarray, np.ndarray]: + """Returns mesh-grid edge indices for grid points contained in mesh triangles. + + Args: + grid_latitude: Latitude values for the grid [num_lat_points] + grid_longitude: Longitude values for the grid [num_lon_points] + mesh: Mesh object. + + Returns: + tuple with `grid_indices` and `mesh_indices` indicating edges between the + grid and the mesh vertices of the triangle that contain each grid point. + The number of edges is always num_lat_points * num_lon_points * 3 + * grid_indices: Indices of shape [num_edges], that index into a + [num_lat_points, num_lon_points] grid, after flattening the leading axes. + * mesh_indices: Indices of shape [num_edges], that index into mesh.vertices. + """ + + # [num_grid_points=num_lat_points * num_lon_points, 3] + grid_positions = _grid_lat_lon_to_coordinates( + grid_latitude, grid_longitude + ).reshape([-1, 3]) + + mesh_trimesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces) + + # [num_grid_points] with mesh face indices for each grid point. + _, _, query_face_indices = trimesh.proximity.closest_point( + mesh_trimesh, grid_positions + ) + + # [num_grid_points, 3] with mesh node indices for each grid point. + mesh_edge_indices = mesh.faces[query_face_indices] + + # [num_grid_points, 3] with grid node indices, where every row simply contains + # the row (grid_point) index. + grid_indices = np.arange(grid_positions.shape[0]) + grid_edge_indices = np.tile(grid_indices.reshape([-1, 1]), [1, 3]) + + # Flatten to get a regular list. + # [num_edges=num_grid_points*3] + mesh_edge_indices = mesh_edge_indices.reshape([-1]) + grid_edge_indices = grid_edge_indices.reshape([-1]) + + return grid_edge_indices, mesh_edge_indices + + +def convert_np_to_tensor(graph: GraphGridMesh): + graph.mesh2mesh_src_index = paddle.to_tensor( + graph.mesh2mesh_src_index, dtype=paddle.int64 + ) + graph.mesh2mesh_dst_index = paddle.to_tensor( + graph.mesh2mesh_dst_index, dtype=paddle.int64 + ) + graph.grid2mesh_src_index = paddle.to_tensor( + graph.grid2mesh_src_index, dtype=paddle.int64 + ) + graph.grid2mesh_dst_index = paddle.to_tensor( + graph.grid2mesh_dst_index, dtype=paddle.int64 + ) + graph.mesh2grid_src_index = paddle.to_tensor( + graph.mesh2grid_src_index, dtype=paddle.int64 + ) + graph.mesh2grid_dst_index = paddle.to_tensor( + graph.mesh2grid_dst_index, dtype=paddle.int64 + ) + graph.grid_node_feat = paddle.to_tensor( + graph.grid_node_feat, dtype=paddle.get_default_dtype() + ) + graph.mesh_node_feat = paddle.to_tensor( + graph.mesh_node_feat, dtype=paddle.get_default_dtype() + ) + graph.mesh_edge_feat = paddle.to_tensor( + graph.mesh_edge_feat, dtype=paddle.get_default_dtype() + ) + graph.grid2mesh_edge_feat = paddle.to_tensor( + graph.grid2mesh_edge_feat, dtype=paddle.get_default_dtype() + ) + graph.mesh2grid_edge_feat = paddle.to_tensor( + graph.mesh2grid_edge_feat, dtype=paddle.get_default_dtype() + ) + return graph +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/run.py b/jointContribution/graphcast/run.py index df06da1e82..e02cdb32ad 100644 --- a/jointContribution/graphcast/run.py +++ b/jointContribution/graphcast/run.py @@ -1,192 +1,192 @@ -import json -import os -import pickle - -import args -import datasets -import graphcast -import graphtype -import numpy as np -import paddle -import vis - -# isort: off -from graphtype import GraphGridMesh # noqa: F401 -from graphtype import TriangularMesh # noqa: F401 - - -def convert_parameters(): - def convert( - jax_parameters_path, - paddle_parameters_path, - mapping_csv, - model, - output_size=False, - ): - model = graphcast.GraphCastNet(config) - state_dict = model.state_dict() - jax_data = np.load(jax_parameters_path) - - if output_size: - for key in state_dict.keys(): - print(key, state_dict[key].shape) - - for param_name in jax_data.files: - if jax_data[param_name].size == 1: - print(param_name, "\t", jax_data[param_name]) - else: - print(param_name, "\t", jax_data[param_name].shape) - - with open(mapping_csv, "r") as f: - mapping = [line.strip().split(",") for line in f] - for jax_key, paddle_key in mapping: - state_dict[paddle_key].set_value(jax_data[jax_key]) - paddle.save(state_dict, paddle_parameters_path) - - params_path = "data/params" - mapping_path = "data/graphcast-jax2paddle.csv" - - params_names = [p for p in os.listdir(params_path) if ".npz" in p] - config_jsons = { - "resolution 0.25 - pressure levels 37": "config/GraphCast.json", - "resolution 0.25 - pressure levels 13": "config/GraphCast_operational.json", - "resolution 1.0 - pressure levels 13": "config/GraphCast_small.json", - } - - for params_type, config_json in config_jsons.items(): - params_name = [n for n in params_names if params_type in n] - if len(params_name) > 1: - raise ValueError("More one parameter files") - params_name = params_name[0] - - print(f"Start convert '{params_type}' parameters...") - config_json = config_jsons[params_type] - jax_parameters_path = os.path.join(params_path, params_name) - paddle_parameters_path = os.path.join( - params_path, - params_name.replace(".npz", ".pdparams").replace(" ", "-"), - ) - with open(config_json, "r") as f: - config = args.TrainingArguments(**json.load(f)) - convert(jax_parameters_path, paddle_parameters_path, mapping_path, config) - print(f"Convert {params_type} parameters finished.") - - -def make_graph_template(): - config_jsons = { - "resolution 0.25 - pressure levels 37": "config/GraphCast.json", - "resolution 0.25 - pressure levels 13": "config/GraphCast_operational.json", - "resolution 1.0 - pressure levels 13": "config/GraphCast_small.json", - } - - for model_type, config_json in config_jsons.items(): - print( - f"Make graph template for {model_type} and " - "Save into data/template_graph folder" - ) - - with open(config_json, "r") as f: - config = args.TrainingArguments(**json.load(f)) - graph = GraphGridMesh(config=config) - - graph_template_path = os.path.join( - "data/template_graph", - f"{config.type}.pkl", - ) - with open(graph_template_path, "wb") as f: - pickle.dump(graph, f) - - -def test_datasets(): - with open("config/GraphCast_small.json", "r") as f: - config = args.TrainingArguments(**json.load(f)) - era5dataset = datasets.ERA5Data(config=config, data_type="train") - print(era5dataset) - - -def eval(): - with open("config/GraphCast_small.json", "r") as f: - config = args.TrainingArguments(**json.load(f)) - dataset = datasets.ERA5Data(config=config, data_type="train") - model = graphcast.GraphCastNet(config) - model.set_state_dict(paddle.load(config.param_path)) - graph = model(graphtype.convert_np_to_tensor(dataset.input_data[0])) - pred = dataset.denormalize(graph.grid_node_feat.numpy()) - pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) - print(pred) - - return ( - graph.grid_node_outputs_to_prediction( - dataset.target_data[0], dataset.targets_template - ), - pred, - ) - - -def visualize(target, pred, variable_name, level, robust=True): - plot_size = 5 - plot_max_steps = pred.dims["time"] - - data = { - "Targets": vis.scale( - vis.select(target, variable_name, level, plot_max_steps), robust=robust - ), - "Predictions": vis.scale( - vis.select(pred, variable_name, level, plot_max_steps), robust=robust - ), - "Diff": vis.scale( - ( - vis.select(target, variable_name, level, plot_max_steps) - - vis.select(pred, variable_name, level, plot_max_steps) - ), - robust=robust, - center=0, - ), - } - fig_title = variable_name - if "level" in pred[variable_name].coords: - fig_title += f" at {level} hPa" - - vis.plot_data(data, fig_title, plot_size, robust) - - -def compare(paddle_pred): - with open("config/GraphCast_small.json", "r") as f: - config = args.TrainingArguments(**json.load(f)) - dataset = datasets.ERA5Data(config=config, data_type="train") - graph = graphtype.convert_np_to_tensor(dataset.input_data[0]) - - jax_graphcast_small_pred_path = "other/graphcast_small_output.npy" - jax_graphcast_small_pred = np.load(jax_graphcast_small_pred_path).reshape( - 181 * 360, 1, 83 - ) - jax_graphcast_small_pred = graph.grid_node_outputs_to_prediction( - jax_graphcast_small_pred, dataset.targets_template - ) - - paddle_graphcast_small_pred = paddle_pred - - for var_name in list(paddle_graphcast_small_pred): - diff_var = np.average( - jax_graphcast_small_pred[var_name].data - - paddle_graphcast_small_pred[var_name].data - ) - print(var_name, f"diff is {diff_var}") - - jax_graphcast_small_pred_np = datasets.dataset_to_stacked(jax_graphcast_small_pred) - paddle_graphcast_small_pred_np = datasets.dataset_to_stacked( - paddle_graphcast_small_pred - ) - diff_all = np.average( - jax_graphcast_small_pred_np.data - paddle_graphcast_small_pred_np.data - ) - print(f"All diff is {diff_all}") - - -if __name__ == "__main__": - convert_parameters() # step.1 - make_graph_template() # step.2 - test_datasets() # step.3 - target, pred = eval() # step.4 - visualize(target, pred, "2m_temperature", level=50) - compare(pred) +import json +import os +import pickle + +import args +import datasets +import graphcast +import graphtype +import numpy as np +import paddle +import vis + +# isort: off +from graphtype import GraphGridMesh # noqa: F401 +from graphtype import TriangularMesh # noqa: F401 + + +def convert_parameters(): + def convert( + jax_parameters_path, + paddle_parameters_path, + mapping_csv, + model, + output_size=False, + ): + model = graphcast.GraphCastNet(config) + state_dict = model.state_dict() + jax_data = np.load(jax_parameters_path) + + if output_size: + for key in state_dict.keys(): + print(key, state_dict[key].shape) + + for param_name in jax_data.files: + if jax_data[param_name].size == 1: + print(param_name, "\t", jax_data[param_name]) + else: + print(param_name, "\t", jax_data[param_name].shape) + + with open(mapping_csv, "r") as f: + mapping = [line.strip().split(",") for line in f] + for jax_key, paddle_key in mapping: + state_dict[paddle_key].set_value(jax_data[jax_key]) + paddle.save(state_dict, paddle_parameters_path) + + params_path = "data/params" + mapping_path = "data/graphcast-jax2paddle.csv" + + params_names = [p for p in os.listdir(params_path) if ".npz" in p] + config_jsons = { + "resolution 0.25 - pressure levels 37": "config/GraphCast.json", + "resolution 0.25 - pressure levels 13": "config/GraphCast_operational.json", + "resolution 1.0 - pressure levels 13": "config/GraphCast_small.json", + } + + for params_type, config_json in config_jsons.items(): + params_name = [n for n in params_names if params_type in n] + if len(params_name) > 1: + raise ValueError("More one parameter files") + params_name = params_name[0] + + print(f"Start convert '{params_type}' parameters...") + config_json = config_jsons[params_type] + jax_parameters_path = os.path.join(params_path, params_name) + paddle_parameters_path = os.path.join( + params_path, + params_name.replace(".npz", ".pdparams").replace(" ", "-"), + ) + with open(config_json, "r") as f: + config = args.TrainingArguments(**json.load(f)) + convert(jax_parameters_path, paddle_parameters_path, mapping_path, config) + print(f"Convert {params_type} parameters finished.") + + +def make_graph_template(): + config_jsons = { + "resolution 0.25 - pressure levels 37": "config/GraphCast.json", + "resolution 0.25 - pressure levels 13": "config/GraphCast_operational.json", + "resolution 1.0 - pressure levels 13": "config/GraphCast_small.json", + } + + for model_type, config_json in config_jsons.items(): + print( + f"Make graph template for {model_type} and " + "Save into data/template_graph folder" + ) + + with open(config_json, "r") as f: + config = args.TrainingArguments(**json.load(f)) + graph = GraphGridMesh(config=config) + + graph_template_path = os.path.join( + "data/template_graph", + f"{config.type}.pkl", + ) + with open(graph_template_path, "wb") as f: + pickle.dump(graph, f) + + +def test_datasets(): + with open("config/GraphCast_small.json", "r") as f: + config = args.TrainingArguments(**json.load(f)) + era5dataset = datasets.ERA5Data(config=config, data_type="train") + print(era5dataset) + + +def eval(): + with open("config/GraphCast_small.json", "r") as f: + config = args.TrainingArguments(**json.load(f)) + dataset = datasets.ERA5Data(config=config, data_type="train") + model = graphcast.GraphCastNet(config) + model.set_state_dict(paddle.load(config.param_path)) + graph = model(graphtype.convert_np_to_tensor(dataset.input_data[0])) + pred = dataset.denormalize(graph.grid_node_feat.numpy()) + pred = graph.grid_node_outputs_to_prediction(pred, dataset.targets_template) + print(pred) + + return ( + graph.grid_node_outputs_to_prediction( + dataset.target_data[0], dataset.targets_template + ), + pred, + ) + + +def visualize(target, pred, variable_name, level, robust=True): + plot_size = 5 + plot_max_steps = pred.dims["time"] + + data = { + "Targets": vis.scale( + vis.select(target, variable_name, level, plot_max_steps), robust=robust + ), + "Predictions": vis.scale( + vis.select(pred, variable_name, level, plot_max_steps), robust=robust + ), + "Diff": vis.scale( + ( + vis.select(target, variable_name, level, plot_max_steps) + - vis.select(pred, variable_name, level, plot_max_steps) + ), + robust=robust, + center=0, + ), + } + fig_title = variable_name + if "level" in pred[variable_name].coords: + fig_title += f" at {level} hPa" + + vis.plot_data(data, fig_title, plot_size, robust) + + +def compare(paddle_pred): + with open("config/GraphCast_small.json", "r") as f: + config = args.TrainingArguments(**json.load(f)) + dataset = datasets.ERA5Data(config=config, data_type="train") + graph = graphtype.convert_np_to_tensor(dataset.input_data[0]) + + jax_graphcast_small_pred_path = "other/graphcast_small_output.npy" + jax_graphcast_small_pred = np.load(jax_graphcast_small_pred_path).reshape( + 181 * 360, 1, 83 + ) + jax_graphcast_small_pred = graph.grid_node_outputs_to_prediction( + jax_graphcast_small_pred, dataset.targets_template + ) + + paddle_graphcast_small_pred = paddle_pred + + for var_name in list(paddle_graphcast_small_pred): + diff_var = np.average( + jax_graphcast_small_pred[var_name].data + - paddle_graphcast_small_pred[var_name].data + ) + print(var_name, f"diff is {diff_var}") + + jax_graphcast_small_pred_np = datasets.dataset_to_stacked(jax_graphcast_small_pred) + paddle_graphcast_small_pred_np = datasets.dataset_to_stacked( + paddle_graphcast_small_pred + ) + diff_all = np.average( + jax_graphcast_small_pred_np.data - paddle_graphcast_small_pred_np.data + ) + print(f"All diff is {diff_all}") + + +if __name__ == "__main__": + convert_parameters() # step.1 + make_graph_template() # step.2 + test_datasets() # step.3 + target, pred = eval() # step.4 + visualize(target, pred, "2m_temperature", level=50) + compare(pred) diff --git a/jointContribution/graphcast/utils.py b/jointContribution/graphcast/utils.py index c2fe605f11..8ad7fcb10d 100644 --- a/jointContribution/graphcast/utils.py +++ b/jointContribution/graphcast/utils.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright 2023 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -646,3 +647,617 @@ def fourier_features( ], axis=-1, ) +======= +# Copyright 2023 DeepMind Technologies Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS-IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for building models.""" + +from typing import Optional +from typing import Tuple + +import numpy as np +import scipy +import xarray + + +def get_graph_spatial_features( + *, + node_lat: np.ndarray, + node_lon: np.ndarray, + senders: np.ndarray, + receivers: np.ndarray, + add_node_positions: bool, + add_node_latitude: bool, + add_node_longitude: bool, + add_relative_positions: bool, + relative_longitude_local_coordinates: bool, + relative_latitude_local_coordinates: bool, + sine_cosine_encoding: bool = False, + encoding_num_freqs: int = 10, + encoding_multiplicative_factor: float = 1.2, +) -> Tuple[np.ndarray, np.ndarray]: + """Computes spatial features for the nodes. + + Args: + node_lat: Latitudes in the [-90, 90] interval of shape [num_nodes] + node_lon: Longitudes in the [0, 360] interval of shape [num_nodes] + senders: Sender indices of shape [num_edges] + receivers: Receiver indices of shape [num_edges] + add_node_positions: Add unit norm absolute positions. + add_node_latitude: Add a feature for latitude (cos(90 - lat)) + Note even if this is set to False, the model may be able to infer the + longitude from relative features, unless + `relative_latitude_local_coordinates` is also True, or if there is any + bias on the relative edge sizes for different longitudes. + add_node_longitude: Add features for longitude (cos(lon), sin(lon)). + Note even if this is set to False, the model may be able to infer the + longitude from relative features, unless + `relative_longitude_local_coordinates` is also True, or if there is any + bias on the relative edge sizes for different longitudes. + add_relative_positions: Whether to relative positions in R3 to the edges. + relative_longitude_local_coordinates: If True, relative positions are + computed in a local space where the receiver is at 0 longitude. + relative_latitude_local_coordinates: If True, relative positions are + computed in a local space where the receiver is at 0 latitude. + sine_cosine_encoding: If True, we will transform the node/edge features + with sine and cosine functions, similar to NERF. + encoding_num_freqs: frequency parameter + encoding_multiplicative_factor: used for calculating the frequency. + + Returns: + Arrays of shape: [num_nodes, num_features] and [num_edges, num_features]. + with node and edge features. + """ + + num_nodes = node_lat.shape[0] + num_edges = senders.shape[0] + dtype = node_lat.dtype + node_phi, node_theta = lat_lon_deg_to_spherical(node_lat, node_lon) + + # Computing some node features. + node_features = [] + if add_node_positions: + # Already in [-1, 1.] range. + node_features.extend(spherical_to_cartesian(node_phi, node_theta)) + + if add_node_latitude: + # Using the cos of theta. + # From 1. (north pole) to -1 (south pole). + node_features.append(np.cos(node_theta)) + + if add_node_longitude: + # Using the cos and sin, which is already normalized. + node_features.append(np.cos(node_phi)) + node_features.append(np.sin(node_phi)) + + if not node_features: + node_features = np.zeros([num_nodes, 0], dtype=dtype) + else: + node_features = np.stack(node_features, axis=-1) + + # Computing some edge features. + edge_features = [] + + if add_relative_positions: + + relative_position = get_relative_position_in_receiver_local_coordinates( + node_phi=node_phi, + node_theta=node_theta, + senders=senders, + receivers=receivers, + latitude_local_coordinates=relative_latitude_local_coordinates, + longitude_local_coordinates=relative_longitude_local_coordinates, + ) + + # Note this is L2 distance in 3d space, rather than geodesic distance. + relative_edge_distances = np.linalg.norm( + relative_position, axis=-1, keepdims=True + ) + + # Normalize to the maximum edge distance. Note that we expect to always + # have an edge that goes in the opposite direction of any given edge + # so the distribution of relative positions should be symmetric around + # zero. So by scaling by the maximum length, we expect all relative + # positions to fall in the [-1., 1.] interval, and all relative distances + # to fall in the [0., 1.] interval. + max_edge_distance = relative_edge_distances.max() + edge_features.append(relative_edge_distances / max_edge_distance) + edge_features.append(relative_position / max_edge_distance) + + if not edge_features: + edge_features = np.zeros([num_edges, 0], dtype=dtype) + else: + edge_features = np.concatenate(edge_features, axis=-1) + + if sine_cosine_encoding: + + def sine_cosine_transform(x: np.ndarray) -> np.ndarray: + freqs = encoding_multiplicative_factor ** np.arange(encoding_num_freqs) + phases = freqs * x[..., None] + x_sin = np.sin(phases) + x_cos = np.cos(phases) + x_cat = np.concatenate([x_sin, x_cos], axis=-1) + return x_cat.reshape([x.shape[0], -1]) + + node_features = sine_cosine_transform(node_features) + edge_features = sine_cosine_transform(edge_features) + + return node_features, edge_features + + +def lat_lon_to_leading_axes(grid_xarray: xarray.DataArray) -> xarray.DataArray: + """Reorders xarray so lat/lon axes come first.""" + # leading + ["lat", "lon"] + trailing + # to + # ["lat", "lon"] + leading + trailing + return grid_xarray.transpose("lat", "lon", ...) + + +def restore_leading_axes(grid_xarray: xarray.DataArray) -> xarray.DataArray: + """Reorders xarray so batch/time/level axes come first (if present).""" + + # ["lat", "lon"] + [(batch,) (time,) (level,)] + trailing + # to + # [(batch,) (time,) (level,)] + ["lat", "lon"] + trailing + + input_dims = list(grid_xarray.dims) + output_dims = list(input_dims) + for leading_key in ["level", "time", "batch"]: # reverse order for insert + if leading_key in input_dims: + output_dims.remove(leading_key) + output_dims.insert(0, leading_key) + return grid_xarray.transpose(*output_dims) + + +def lat_lon_deg_to_spherical( + node_lat: np.ndarray, + node_lon: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + phi = np.deg2rad(node_lon) + theta = np.deg2rad(90 - node_lat) + return phi, theta + + +def spherical_to_lat_lon( + phi: np.ndarray, + theta: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + lon = np.mod(np.rad2deg(phi), 360) + lat = 90 - np.rad2deg(theta) + return lat, lon + + +def cartesian_to_spherical( + x: np.ndarray, + y: np.ndarray, + z: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + phi = np.arctan2(y, x) + with np.errstate(invalid="ignore"): # circumventing b/253179568 + theta = np.arccos(z) # Assuming unit radius. + return phi, theta + + +def spherical_to_cartesian( + phi: np.ndarray, theta: np.ndarray +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + # Assuming unit radius. + return (np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)) + + +def get_relative_position_in_receiver_local_coordinates( + node_phi: np.ndarray, + node_theta: np.ndarray, + senders: np.ndarray, + receivers: np.ndarray, + latitude_local_coordinates: bool, + longitude_local_coordinates: bool, +) -> np.ndarray: + """Returns relative position features for the edges. + + The relative positions will be computed in a rotated space for a local + coordinate system as defined by the receiver. The relative positions are + simply obtained by subtracting sender position minues receiver position in + that local coordinate system after the rotation in R^3. + + Args: + node_phi: [num_nodes] with polar angles. + node_theta: [num_nodes] with azimuthal angles. + senders: [num_edges] with indices. + receivers: [num_edges] with indices. + latitude_local_coordinates: Whether to rotate edges such that in the + positions are computed such that the receiver is always at latitude 0. + longitude_local_coordinates: Whether to rotate edges such that in the + positions are computed such that the receiver is always at longitude 0. + + Returns: + Array of relative positions in R3 [num_edges, 3] + """ + + node_pos = np.stack(spherical_to_cartesian(node_phi, node_theta), axis=-1) + + # No rotation in this case. + if not (latitude_local_coordinates or longitude_local_coordinates): + return node_pos[senders] - node_pos[receivers] + + # Get rotation matrices for the local space space for every node. + rotation_matrices = get_rotation_matrices_to_local_coordinates( + reference_phi=node_phi, + reference_theta=node_theta, + rotate_latitude=latitude_local_coordinates, + rotate_longitude=longitude_local_coordinates, + ) + + # Each edge will be rotated according to the rotation matrix of its receiver + # node. + edge_rotation_matrices = rotation_matrices[receivers] + + # Rotate all nodes to the rotated space of the corresponding edge. + # Note for receivers we can also do the matmul first and the gather second: + # ``` + # receiver_pos_in_rotated_space = rotate_with_matrices( + # rotation_matrices, node_pos)[receivers] + # ``` + # which is more efficient, however, we do gather first to keep it more + # symmetric with the sender computation. + receiver_pos_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, node_pos[receivers] + ) + sender_pos_in_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, node_pos[senders] + ) + # Note, here, that because the rotated space is chosen according to the + # receiver, if: + # * latitude_local_coordinates = True: latitude for the receivers will be + # 0, that is the z coordinate will always be 0. + # * longitude_local_coordinates = True: longitude for the receivers will be + # 0, that is the y coordinate will be 0. + + # Now we can just subtract. + # Note we are rotating to a local coordinate system, where the y-z axes are + # parallel to a tangent plane to the sphere, but still remain in a 3d space. + # Note that if both `latitude_local_coordinates` and + # `longitude_local_coordinates` are True, and edges are short, + # then the difference in x coordinate between sender and receiver + # should be small, so we could consider dropping the new x coordinate if + # we wanted to the tangent plane, however in doing so + # we would lose information about the curvature of the mesh, which may be + # important for very coarse meshes. + return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space + + +def get_rotation_matrices_to_local_coordinates( + reference_phi: np.ndarray, + reference_theta: np.ndarray, + rotate_latitude: bool, + rotate_longitude: bool, +) -> np.ndarray: + """Returns a rotation matrix to rotate to a point based on a reference vector. + + The rotation matrix is build such that, a vector in the + same coordinate system at the reference point that points towards the pole + before the rotation, continues to point towards the pole after the rotation. + + Args: + reference_phi: [leading_axis] Polar angles of the reference. + reference_theta: [leading_axis] Azimuthal angles of the reference. + rotate_latitude: Whether to produce a rotation matrix that would rotate + R^3 vectors to zero latitude. + rotate_longitude: Whether to produce a rotation matrix that would rotate + R^3 vectors to zero longitude. + + Returns: + Matrices of shape [leading_axis] such that when applied to the reference + position with `rotate_with_matrices(rotation_matrices, reference_pos)` + + * phi goes to 0. if "rotate_longitude" is True. + + * theta goes to np.pi / 2 if "rotate_latitude" is True. + + The rotation consists of: + * rotate_latitude = False, rotate_longitude = True: + Latitude preserving rotation. + * rotate_latitude = True, rotate_longitude = True: + Latitude preserving rotation, followed by longitude preserving + rotation. + * rotate_latitude = True, rotate_longitude = False: + Latitude preserving rotation, followed by longitude preserving + rotation, and the inverse of the latitude preserving rotation. Note + this is computationally different from rotating the longitude only + and is. We do it like this, so the polar geodesic curve, continues + to be aligned with one of the axis after the rotation. + """ + + if rotate_longitude and rotate_latitude: + + # We first rotate around the z axis "minus the azimuthal angle", to get the + # point with zero longitude + azimuthal_rotation = -reference_phi + + # One then we will do a polar rotation (which can be done along the y + # axis now that we are at longitude 0.), "minus the polar angle plus 2pi" + # to get the point with zero latitude. + polar_rotation = -reference_theta + np.pi / 2 + + return scipy.spatial.transform.Rotation.from_euler( + "zy", np.stack([azimuthal_rotation, polar_rotation], axis=1) + ).as_matrix() + elif rotate_longitude: + # Just like the previous case, but applying only the azimuthal rotation. + azimuthal_rotation = -reference_phi + return scipy.spatial.transform.Rotation.from_euler( + "z", -reference_phi + ).as_matrix() + elif rotate_latitude: + # Just like the first case, but after doing the polar rotation, undoing + # the azimuthal rotation. + azimuthal_rotation = -reference_phi + polar_rotation = -reference_theta + np.pi / 2 + + return scipy.spatial.transform.Rotation.from_euler( + "zyz", + np.stack([azimuthal_rotation, polar_rotation, -azimuthal_rotation], axis=1), + ).as_matrix() + else: + raise ValueError("At least one of longitude and latitude should be rotated.") + + +def rotate_with_matrices( + rotation_matrices: np.ndarray, positions: np.ndarray +) -> np.ndarray: + return np.einsum("bji,bi->bj", rotation_matrices, positions) + + +def get_bipartite_graph_spatial_features( + *, + senders_node_lat: np.ndarray, + senders_node_lon: np.ndarray, + senders: np.ndarray, + receivers_node_lat: np.ndarray, + receivers_node_lon: np.ndarray, + receivers: np.ndarray, + add_node_positions: bool, + add_node_latitude: bool, + add_node_longitude: bool, + add_relative_positions: bool, + edge_normalization_factor: Optional[float] = None, + relative_longitude_local_coordinates: bool, + relative_latitude_local_coordinates: bool, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Computes spatial features for the nodes. + + This function is almost identical to `get_graph_spatial_features`. The only + difference is that sender nodes and receiver nodes can be in different arrays. + This is necessary to enable combination with typed Graph. + + Args: + senders_node_lat: Latitudes in the [-90, 90] interval of shape + [num_sender_nodes] + senders_node_lon: Longitudes in the [0, 360] interval of shape + [num_sender_nodes] + senders: Sender indices of shape [num_edges], indices in [0, + num_sender_nodes) + receivers_node_lat: Latitudes in the [-90, 90] interval of shape + [num_receiver_nodes] + receivers_node_lon: Longitudes in the [0, 360] interval of shape + [num_receiver_nodes] + receivers: Receiver indices of shape [num_edges], indices in [0, + num_receiver_nodes) + add_node_positions: Add unit norm absolute positions. + add_node_latitude: Add a feature for latitude (cos(90 - lat)) Note even if + this is set to False, the model may be able to infer the longitude from + relative features, unless `relative_latitude_local_coordinates` is also + True, or if there is any bias on the relative edge sizes for different + longitudes. + add_node_longitude: Add features for longitude (cos(lon), sin(lon)). Note + even if this is set to False, the model may be able to infer the longitude + from relative features, unless `relative_longitude_local_coordinates` is + also True, or if there is any bias on the relative edge sizes for + different longitudes. + add_relative_positions: Whether to relative positions in R3 to the edges. + edge_normalization_factor: Allows explicitly controlling edge normalization. + If None, defaults to max edge length. This supports using pre-trained + model weights with a different graph structure to what it was trained on. + relative_longitude_local_coordinates: If True, relative positions are + computed in a local space where the receiver is at 0 longitude. + relative_latitude_local_coordinates: If True, relative positions are + computed in a local space where the receiver is at 0 latitude. + + Returns: + Arrays of shape: [num_nodes, num_features] and [num_edges, num_features]. + with node and edge features. + """ + + num_senders = senders_node_lat.shape[0] + num_receivers = receivers_node_lat.shape[0] + num_edges = senders.shape[0] + dtype = senders_node_lat.dtype + assert receivers_node_lat.dtype == dtype + senders_node_phi, senders_node_theta = lat_lon_deg_to_spherical( + senders_node_lat, senders_node_lon + ) + receivers_node_phi, receivers_node_theta = lat_lon_deg_to_spherical( + receivers_node_lat, receivers_node_lon + ) + + # Computing some node features. + senders_node_features = [] + receivers_node_features = [] + if add_node_positions: + # Already in [-1, 1.] range. + senders_node_features.extend( + spherical_to_cartesian(senders_node_phi, senders_node_theta) + ) + receivers_node_features.extend( + spherical_to_cartesian(receivers_node_phi, receivers_node_theta) + ) + + if add_node_latitude: + # Using the cos of theta. + # From 1. (north pole) to -1 (south pole). + senders_node_features.append(np.cos(senders_node_theta)) + receivers_node_features.append(np.cos(receivers_node_theta)) + + if add_node_longitude: + # Using the cos and sin, which is already normalized. + senders_node_features.append(np.cos(senders_node_phi)) + senders_node_features.append(np.sin(senders_node_phi)) + + receivers_node_features.append(np.cos(receivers_node_phi)) + receivers_node_features.append(np.sin(receivers_node_phi)) + + if not senders_node_features: + senders_node_features = np.zeros([num_senders, 0], dtype=dtype) + receivers_node_features = np.zeros([num_receivers, 0], dtype=dtype) + else: + senders_node_features = np.stack(senders_node_features, axis=-1) + receivers_node_features = np.stack(receivers_node_features, axis=-1) + + # Computing some edge features. + edge_features = [] + + if add_relative_positions: + + relative_position = ( + get_bipartite_relative_position_in_receiver_local_coordinates( + senders_node_phi=senders_node_phi, + senders_node_theta=senders_node_theta, + receivers_node_phi=receivers_node_phi, + receivers_node_theta=receivers_node_theta, + senders=senders, + receivers=receivers, + latitude_local_coordinates=relative_latitude_local_coordinates, + longitude_local_coordinates=relative_longitude_local_coordinates, + ) + ) + + # Note this is L2 distance in 3d space, rather than geodesic distance. + relative_edge_distances = np.linalg.norm( + relative_position, axis=-1, keepdims=True + ) + + if edge_normalization_factor is None: + # Normalize to the maximum edge distance. Note that we expect to always + # have an edge that goes in the opposite direction of any given edge + # so the distribution of relative positions should be symmetric around + # zero. So by scaling by the maximum length, we expect all relative + # positions to fall in the [-1., 1.] interval, and all relative distances + # to fall in the [0., 1.] interval. + edge_normalization_factor = relative_edge_distances.max() + + edge_features.append(relative_edge_distances / edge_normalization_factor) + edge_features.append(relative_position / edge_normalization_factor) + + if not edge_features: + edge_features = np.zeros([num_edges, 0], dtype=dtype) + else: + edge_features = np.concatenate(edge_features, axis=-1) + + return senders_node_features, receivers_node_features, edge_features + + +def get_bipartite_relative_position_in_receiver_local_coordinates( + senders_node_phi: np.ndarray, + senders_node_theta: np.ndarray, + senders: np.ndarray, + receivers_node_phi: np.ndarray, + receivers_node_theta: np.ndarray, + receivers: np.ndarray, + latitude_local_coordinates: bool, + longitude_local_coordinates: bool, +) -> np.ndarray: + """Returns relative position features for the edges. + + This function is equivalent to + `get_relative_position_in_receiver_local_coordinates`, but adapted to work + with bipartite typed graphs. + + The relative positions will be computed in a rotated space for a local + coordinate system as defined by the receiver. The relative positions are + simply obtained by subtracting sender position minues receiver position in + that local coordinate system after the rotation in R^3. + + Args: + senders_node_phi: [num_sender_nodes] with polar angles. + senders_node_theta: [num_sender_nodes] with azimuthal angles. + senders: [num_edges] with indices into sender nodes. + receivers_node_phi: [num_sender_nodes] with polar angles. + receivers_node_theta: [num_sender_nodes] with azimuthal angles. + receivers: [num_edges] with indices into receiver nodes. + latitude_local_coordinates: Whether to rotate edges such that in the + positions are computed such that the receiver is always at latitude 0. + longitude_local_coordinates: Whether to rotate edges such that in the + positions are computed such that the receiver is always at longitude 0. + + Returns: + Array of relative positions in R3 [num_edges, 3] + """ + + senders_node_pos = np.stack( + spherical_to_cartesian(senders_node_phi, senders_node_theta), axis=-1 + ) + + receivers_node_pos = np.stack( + spherical_to_cartesian(receivers_node_phi, receivers_node_theta), axis=-1 + ) + + # No rotation in this case. + if not (latitude_local_coordinates or longitude_local_coordinates): + return senders_node_pos[senders] - receivers_node_pos[receivers] + + # Get rotation matrices for the local space space for every receiver node. + receiver_rotation_matrices = get_rotation_matrices_to_local_coordinates( + reference_phi=receivers_node_phi, + reference_theta=receivers_node_theta, + rotate_latitude=latitude_local_coordinates, + rotate_longitude=longitude_local_coordinates, + ) + + # Each edge will be rotated according to the rotation matrix of its receiver + # node. + edge_rotation_matrices = receiver_rotation_matrices[receivers] + + # Rotate all nodes to the rotated space of the corresponding edge. + # Note for receivers we can also do the matmul first and the gather second: + # ``` + # receiver_pos_in_rotated_space = rotate_with_matrices( + # rotation_matrices, node_pos)[receivers] + # ``` + # which is more efficient, however, we do gather first to keep it more + # symmetric with the sender computation. + receiver_pos_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, receivers_node_pos[receivers] + ) + sender_pos_in_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, senders_node_pos[senders] + ) + # Note, here, that because the rotated space is chosen according to the + # receiver, if: + # * latitude_local_coordinates = True: latitude for the receivers will be + # 0, that is the z coordinate will always be 0. + # * longitude_local_coordinates = True: longitude for the receivers will be + # 0, that is the y coordinate will be 0. + + # Now we can just subtract. + # Note we are rotating to a local coordinate system, where the y-z axes are + # parallel to a tangent plane to the sphere, but still remain in a 3d space. + # Note that if both `latitude_local_coordinates` and + # `longitude_local_coordinates` are True, and edges are short, + # then the difference in x coordinate between sender and receiver + # should be small, so we could consider dropping the new x coordinate if + # we wanted to the tangent plane, however in doing so + # we would lose information about the curvature of the mesh, which may be + # important for very coarse meshes. + return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space +>>>>>>> Stashed changes diff --git a/jointContribution/graphcast/vis.py b/jointContribution/graphcast/vis.py index b3080e911f..8071b7ad38 100644 --- a/jointContribution/graphcast/vis.py +++ b/jointContribution/graphcast/vis.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -162,3 +163,114 @@ def log_images( fig_title += f" at {level} hPa" plot_data(data, fig_title, plot_size, robust, file=file) +======= +import datetime +import math +from typing import Optional + +import IPython +import matplotlib +import matplotlib.animation as animation +import matplotlib.pyplot as plt +import numpy as np +import xarray + + +def select( + data: xarray.Dataset, + variable: str, + level: Optional[int] = None, + max_steps: Optional[int] = None, +) -> xarray.Dataset: + data = data[variable] + if "batch" in data.dims: + data = data.isel(batch=0) + if ( + max_steps is not None + and "time" in data.sizes + and max_steps < data.sizes["time"] + ): + data = data.isel(time=range(0, max_steps)) + if level is not None and "level" in data.coords: + data = data.sel(level=level) + return data + + +def scale( + data: xarray.Dataset, + center: Optional[float] = None, + robust: bool = False, +) -> tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: + vmin = np.nanpercentile(data, (2 if robust else 0)) + vmax = np.nanpercentile(data, (98 if robust else 100)) + if center is not None: + diff = max(vmax - center, center - vmin) + vmin = center - diff + vmax = center + diff + return ( + data, + matplotlib.colors.Normalize(vmin, vmax), + ("RdBu_r" if center is not None else "viridis"), + ) + + +def plot_data( + data: dict[str, xarray.Dataset], + fig_title: str, + plot_size: float = 5, + robust: bool = False, + cols: int = 4, +) -> tuple[xarray.Dataset, matplotlib.colors.Normalize, str]: + + first_data = next(iter(data.values()))[0] + max_steps = first_data.sizes.get("time", 1) + assert all(max_steps == d.sizes.get("time", 1) for d, _, _ in data.values()) + + cols = min(cols, len(data)) + rows = math.ceil(len(data) / cols) + figure = plt.figure(figsize=(plot_size * 2 * cols, plot_size * rows)) + figure.suptitle(fig_title, fontsize=16) + figure.subplots_adjust(wspace=0, hspace=0) + figure.tight_layout() + + images = [] + for i, (title, (plot_data, norm, cmap)) in enumerate(data.items()): + ax = figure.add_subplot(rows, cols, i + 1) + ax.set_xticks([]) + ax.set_yticks([]) + ax.set_title(title) + im = ax.imshow( + plot_data.isel(time=0, missing_dims="ignore"), + norm=norm, + origin="lower", + cmap=cmap, + ) + plt.colorbar( + mappable=im, + ax=ax, + orientation="vertical", + pad=0.02, + aspect=16, + shrink=0.75, + cmap=cmap, + extend=("both" if robust else "neither"), + ) + images.append(im) + + def update(frame): + if "time" in first_data.dims: + td = datetime.timedelta( + microseconds=first_data["time"][frame].item() / 1000 + ) + figure.suptitle(f"{fig_title}, {td}", fontsize=16) + else: + figure.suptitle(fig_title, fontsize=16) + for im, (plot_data, norm, cmap) in zip(images, data.values()): + im.set_data(plot_data.isel(time=frame, missing_dims="ignore")) + + ani = animation.FuncAnimation( + fig=figure, func=update, frames=max_steps, interval=250 + ) + plt.close(figure.number) + return IPython.display.HTML(ani.to_jshtml()) +>>>>>>> Stashed changes diff --git a/mkdocs.yml b/mkdocs.yml index 8146ac3cf3..fe3159b7ec 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -94,6 +94,7 @@ nav: - 材料科学(AI for Material): - hPINNs: zh/examples/hpinns.md - CGCNN: zh/examples/cgcnn.md + - psc_NN: zh/examples/perovskite_solar_cells_nn.md - 地球科学(AI for Earth Science): - Extformer-MoE: zh/examples/extformer_moe.md - FourCastNet: zh/examples/fourcastnet.md @@ -207,12 +208,19 @@ plugins: - mkdocstrings: handlers: python: - setup_commands: - - import sys - - sys.path.append("../") - paths: [../ppsci] - selection: - new_path_syntax: true + options: + members: true + show_root_heading: true + show_object_full_path: false + show_category_heading: false + show_if_no_docstring: true + filters: ["!^_"] + docstring_style: google + docstring_section_style: list + show_source: true + show_bases: true + heading_level: 2 + paths: [../ppsci] - mkdocs-video - git-revision-date-localized: enable_creation_date: true diff --git a/ppsci/__init__.py b/ppsci/__init__.py index bde7aa49a5..adba5dfe5b 100644 --- a/ppsci/__init__.py +++ b/ppsci/__init__.py @@ -1,78 +1,78 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci import arch # isort:skip -from ppsci import autodiff # isort:skip -from ppsci import constraint # isort:skip -from ppsci import data # isort:skip -from ppsci import equation # isort:skip -from ppsci import geometry # isort:skip -from ppsci import loss # isort:skip -from ppsci import metric # isort:skip -from ppsci import optimizer # isort:skip -from ppsci import utils # isort:skip -from ppsci import visualize # isort:skip -from ppsci import validate # isort:skip -from ppsci import solver # isort:skip -from ppsci import experimental # isort:skip - -from ppsci.utils.checker import run_check # isort:skip -from ppsci.utils.checker import run_check_mesh # isort:skip -from ppsci.utils import lambdify # isort:skip - - -try: - # import auto-generated version information from '._version' file, using - # setuptools_scm via 'pip install'. Details of versioning rule can be referd to: - # https://peps.python.org/pep-0440/#public-version-identifiers - from ._version import version as __version__ -except ImportError: - __version__ = "unknown version" - -__all__ = [ - "arch", - "autodiff", - "constraint", - "data", - "equation", - "geometry", - "loss", - "metric", - "optimizer", - "utils", - "visualize", - "validate", - "solver", - "experimental", - "run_check", - "run_check_mesh", - "lambdify", -] - - -# NOTE: Register custom solvers for parsing values from omegaconf more flexible -def _register_config_solvers(): - import numpy as np - from omegaconf import OmegaConf - - # register solver for "${numpy:xxx}" item, e.g. pi: "${numpy:pi}" - if not OmegaConf.has_resolver("numpy"): - OmegaConf.register_new_resolver("numpy", lambda x: getattr(np, x)) - - # register solver for "${sum:xxx}" item, e.g. pi: "${sum:[10, 20, 30]}" - if not OmegaConf.has_resolver("sum"): - OmegaConf.register_new_resolver("sum", lambda x: sum(x)) - - -_register_config_solvers() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci import arch # isort:skip +from ppsci import autodiff # isort:skip +from ppsci import constraint # isort:skip +from ppsci import data # isort:skip +from ppsci import equation # isort:skip +from ppsci import geometry # isort:skip +from ppsci import loss # isort:skip +from ppsci import metric # isort:skip +from ppsci import optimizer # isort:skip +from ppsci import utils # isort:skip +from ppsci import visualize # isort:skip +from ppsci import validate # isort:skip +from ppsci import solver # isort:skip +from ppsci import experimental # isort:skip + +from ppsci.utils.checker import run_check # isort:skip +from ppsci.utils.checker import run_check_mesh # isort:skip +from ppsci.utils import lambdify # isort:skip + + +try: + # import auto-generated version information from '._version' file, using + # setuptools_scm via 'pip install'. Details of versioning rule can be referd to: + # https://peps.python.org/pep-0440/#public-version-identifiers + from ._version import version as __version__ +except ImportError: + __version__ = "unknown version" + +__all__ = [ + "arch", + "autodiff", + "constraint", + "data", + "equation", + "geometry", + "loss", + "metric", + "optimizer", + "utils", + "visualize", + "validate", + "solver", + "experimental", + "run_check", + "run_check_mesh", + "lambdify", +] + + +# NOTE: Register custom solvers for parsing values from omegaconf more flexible +def _register_config_solvers(): + import numpy as np + from omegaconf import OmegaConf + + # register solver for "${numpy:xxx}" item, e.g. pi: "${numpy:pi}" + if not OmegaConf.has_resolver("numpy"): + OmegaConf.register_new_resolver("numpy", lambda x: getattr(np, x)) + + # register solver for "${sum:xxx}" item, e.g. pi: "${sum:[10, 20, 30]}" + if not OmegaConf.has_resolver("sum"): + OmegaConf.register_new_resolver("sum", lambda x: sum(x)) + + +_register_config_solvers() diff --git a/ppsci/arch/__init__.py b/ppsci/arch/__init__.py index ec3f63597c..32cbf9d900 100644 --- a/ppsci/arch/__init__.py +++ b/ppsci/arch/__init__.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -132,3 +133,133 @@ def build_model(cfg): logger.debug(str(arch)) return arch +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import copy + +from ppsci.arch.afno import AFNONet # isort:skip +from ppsci.arch.afno import PrecipNet # isort:skip +from ppsci.arch.amgnet import AMGNet # isort:skip +from ppsci.arch.base import Arch # isort:skip +from ppsci.arch.cfdgcn import CFDGCN # isort:skip +from ppsci.arch.chip_deeponets import ChipDeepONets # isort:skip +from ppsci.arch.crystalgraphconvnet import CrystalGraphConvNet # isort:skip +from ppsci.arch.cuboid_transformer import CuboidTransformer # isort:skip +from ppsci.arch.cvit import CVit # isort:skip +from ppsci.arch.cvit import CVit1D # isort:skip +from ppsci.arch.deeponet import DeepONet # isort:skip +from ppsci.arch.dgmr import DGMR # isort:skip +from ppsci.arch.embedding_koopman import CylinderEmbedding # isort:skip +from ppsci.arch.embedding_koopman import LorenzEmbedding # isort:skip +from ppsci.arch.embedding_koopman import RosslerEmbedding # isort:skip +from ppsci.arch.epnn import Epnn # isort:skip +from ppsci.arch.extformer_moe_cuboid import ExtFormerMoECuboid # isort:skip +from ppsci.arch.gan import Discriminator # isort:skip +from ppsci.arch.gan import Generator # isort:skip +from ppsci.arch.geofno import FNO1d # isort:skip +from ppsci.arch.graphcast import GraphCastNet # isort:skip +from ppsci.arch.he_deeponets import HEDeepONets # isort:skip +from ppsci.arch.lno import LNO # isort:skip +from ppsci.arch.mlp import MLP # isort:skip +from ppsci.arch.mlp import ModifiedMLP # isort:skip +from ppsci.arch.mlp import PirateNet # isort:skip +from ppsci.arch.model_list import ModelList # isort:skip +from ppsci.arch.nowcastnet import NowcastNet # isort:skip +from ppsci.arch.phycrnet import PhyCRNet # isort:skip +from ppsci.arch.phylstm import DeepPhyLSTM # isort:skip +from ppsci.arch.physx_transformer import PhysformerGPT2 # isort:skip +from ppsci.arch.sfnonet import SFNONet # isort:skip +from ppsci.arch.spinn import SPINN # isort:skip +from ppsci.arch.tfnonet import TFNO1dNet, TFNO2dNet, TFNO3dNet # isort:skip +from ppsci.arch.transformer import Transformer # isort:skip +from ppsci.arch.unetex import UNetEx # isort:skip +from ppsci.arch.unonet import UNONet # isort:skip +from ppsci.arch.uscnn import USCNN # isort:skip +from ppsci.arch.vae import AutoEncoder # isort:skip +from ppsci.arch.velocitygan import VelocityDiscriminator # isort:skip +from ppsci.arch.velocitygan import VelocityGenerator # isort:skip +from ppsci.arch.moflow_net import MoFlowNet, MoFlowProp # isort:skip +from ppsci.utils import logger # isort:skip + +__all__ = [ + "MoFlowNet", + "MoFlowProp", + "AFNONet", + "AMGNet", + "Arch", + "AutoEncoder", + "build_model", + "CFDGCN", + "ChipDeepONets", + "CrystalGraphConvNet", + "CuboidTransformer", + "CVit", + "CVit1D", + "CylinderEmbedding", + "DeepONet", + "DeepPhyLSTM", + "DGMR", + "Discriminator", + "Epnn", + "ExtFormerMoECuboid", + "FNO1d", + "Generator", + "GraphCastNet", + "HEDeepONets", + "LorenzEmbedding", + "LNO", + "MLP", + "ModelList", + "ModifiedMLP", + "NowcastNet", + "PhyCRNet", + "PhysformerGPT2", + "PirateNet", + "PrecipNet", + "RosslerEmbedding", + "SFNONet", + "SPINN", + "TFNO1dNet", + "TFNO2dNet", + "TFNO3dNet", + "Transformer", + "UNetEx", + "UNONet", + "USCNN", + "VelocityDiscriminator", + "VelocityGenerator", +] + + +def build_model(cfg): + """Build model + + Args: + cfg (DictConfig): Arch config. + + Returns: + nn.Layer: Model. + """ + cfg = copy.deepcopy(cfg) + arch_cls = cfg.pop("name") + arch = eval(arch_cls)(**cfg) + + logger.debug(str(arch)) + + return arch +>>>>>>> Stashed changes diff --git a/ppsci/arch/activation.py b/ppsci/arch/activation.py index 3f78eb1a61..8540266ce4 100644 --- a/ppsci/arch/activation.py +++ b/ppsci/arch/activation.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -158,3 +159,179 @@ def get_activation(act_name: str) -> Callable: return act_layer() return act_layer +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn + +from ppsci.utils import initializer +from ppsci.utils import misc + + +class Stan(nn.Layer): + """Self-scalable Tanh. + paper: https://arxiv.org/abs/2204.12589v1 + + Args: + out_features (int, optional): Output features. Defaults to 1. + """ + + def __init__(self, out_features: int = 1): + super().__init__() + self.beta = self.create_parameter( + shape=(out_features,), + default_initializer=nn.initializer.Constant(1), + ) + + def forward(self, x): + # TODO: manually broadcast beta to x.shape for preventing backward error yet. + return F.tanh(x) * (1 + paddle.broadcast_to(self.beta, x.shape) * x) + # return F.tanh(x) * (1 + self.beta * x) + + +class Swish(nn.Layer): + def __init__(self, beta: float = 1.0): + super().__init__() + self.beta = self.create_parameter( + shape=[], + default_initializer=nn.initializer.Constant(beta), + ) + + def forward(self, x): + return x * F.sigmoid(self.beta * x) + + +class Cos(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, x): + return paddle.cos(x) + + +class Sin(nn.Layer): + def __init__(self): + super().__init__() + + def forward(self, x): + return paddle.sin(x) + + +class Silu(nn.Layer): + """ + FIXME: This activation function is a workaround for the potential occurrence of NaNs + during the computation of the native SiLU function via using x*sigmoid(x) instead of + silu(x) + """ + + def __init__(self): + super().__init__() + + def forward(self, x): + return x * F.sigmoid(x) + + +class Siren(nn.Layer): + """Implicit Neural Representations with Periodic Activation Functions. + paper link: https://arxiv.org/abs/2006.09661 + code ref: https://github.com/vsitzmann/siren/tree/master + """ + + def __init__(self, w0: float = 30): + super().__init__() + self.w0 = w0 + + def forward(self, x): + return paddle.sin(self.w0 * x) + + @staticmethod + def init_for_first_layer(layer: nn.Linear): + """Initialization only for first hidden layer. + ref: https://github.com/vsitzmann/siren/blob/master/modules.py#L630 + """ + if not isinstance(layer, nn.Linear): + raise TypeError( + "Siren initialization only support Linear layer now, " + f"but got {misc.typename(layer)}" + ) + in_features = layer.weight.shape[0] + with paddle.no_grad(): + initializer.uniform_(layer.weight, -1 / in_features, 1 / in_features) + initializer.zeros_(layer.bias) + + @staticmethod + def init_for_hidden_layer(layer: nn.Linear, w0: float = 30): + """Initialization for hidden layer except first layer. + ref: https://github.com/vsitzmann/siren/blob/master/modules.py#L622 + """ + if not isinstance(layer, nn.Linear): + raise TypeError( + "Siren initialization only support Linear layer now, " + f"but got {misc.typename(layer)}" + ) + in_features = layer.weight.shape[0] + with paddle.no_grad(): + initializer.uniform_( + layer.weight, + -np.sqrt(6 / in_features) / w0, + np.sqrt(6 / in_features) / w0, + ) + initializer.zeros_(layer.bias) + + +act_func_dict = { + "elu": nn.ELU(), + "relu": nn.ReLU(), + "selu": nn.SELU(), + "gelu": nn.GELU(), + "leaky_relu": nn.LeakyReLU(), + "sigmoid": nn.Sigmoid(), + "silu": Silu(), + "sin": Sin(), + "cos": Cos(), + "swish": Swish, + "tanh": nn.Tanh(), + "identity": nn.Identity(), + "siren": Siren(), + "stan": Stan, +} + + +def get_activation(act_name: str) -> Callable: + """Get activation function according to act_name. + + Args: + act_name (str): Name of activation, such as "tanh". + + Returns: + Callable: Paddle activation function. + """ + if act_name.lower() not in act_func_dict: + raise ValueError(f"act_name({act_name}) not found in act_func_dict") + + act_layer = act_func_dict[act_name.lower()] + if isinstance(act_layer, type) and act_name != "stan": + # Is a activation class but not a instance of it, instantiate manually(except for 'Stan') + return act_layer() + + return act_layer +>>>>>>> Stashed changes diff --git a/ppsci/arch/afno.py b/ppsci/arch/afno.py index f30cd6ede7..97e035d82b 100644 --- a/ppsci/arch/afno.py +++ b/ppsci/arch/afno.py @@ -1,687 +1,687 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [FourCastNet](https://github.com/NVlabs/FourCastNet) -""" -from __future__ import annotations - -from functools import partial -from typing import Optional -from typing import Tuple - -import paddle -import paddle.fft -import paddle.nn.functional as F -from paddle import nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.utils import initializer - - -def drop_path( - x: paddle.Tensor, - drop_prob: float = 0.0, - training: bool = False, - scale_by_keep: bool = True, -) -> paddle.Tensor: - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... - - Args: - x (paddle.Tensor): The tensor to apply. - drop_prob (float, optional): Drop paths probability. Defaults to 0.0. - training (bool, optional): Whether at training mode. Defaults to False. - scale_by_keep (bool, optional): Whether upscale the output. Defaults to True. - - Returns: - paddle.Tensor: Output tensor after apply dropout. - """ - if drop_prob == 0.0 or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) - random_tensor = paddle.full(shape, keep_prob, x.dtype) - random_tensor = paddle.bernoulli(random_tensor) - if keep_prob > 0.0 and scale_by_keep: - random_tensor = random_tensor / keep_prob - return x * random_tensor - - -class DropPath(nn.Layer): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - - Args: - drop_prob (float, optional): Drop paths probability. Defaults to 0.0. - scale_by_keep (bool, optional): Whether upscale the output. Defaults to True. - """ - - def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): - super().__init__() - self.drop_prob = drop_prob - self.scale_by_keep = scale_by_keep - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) - - def extra_repr(self): - return f"drop_prob={round(self.drop_prob,3):0.3f}" - - -class PeriodicPad2d(nn.Layer): - """Pad longitudinal (left-right) circular and pad latitude (top-bottom) with zeros. - - Args: - pad (int): Number of pad. - """ - - def __init__(self, pad: int): - super(PeriodicPad2d, self).__init__() - self.pad = pad - - def forward(self, x): - # pad left and right circular - out = F.pad(x, (self.pad, self.pad, 0, 0), mode="circular") - # pad top and bottom zeros - out = F.pad( - out, - (0, 0, 0, 0, self.pad, self.pad, 0, 0), - mode="constant", - value=0, - ) - return out - - -class MLP(nn.Layer): - """Multi layer perceptron module used in Transformer. - - Args: - in_features (int): Number of the input features. - hidden_features (Optional[int]): Number of the hidden size. Defaults to None. - out_features (Optional[int]): Number of the output features. Defaults to None. - activation (str, optional): Name of activation function. Defaults to "gelu". - drop (float, optional): Probability of dropout the units. Defaults to 0.0. - """ - - def __init__( - self, - in_features: int, - hidden_features: Optional[int] = None, - out_features: Optional[int] = None, - activation: str = "gelu", - drop: float = 0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_mod.get_activation(activation) - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class AFNO2D(nn.Layer): - """2D Adaptive Fourier Neural Operators. - - Args: - hidden_size (int): Number of hidden size. - num_blocks (int, optional): Number of blocks. Defaults to 8. - sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. - hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. - hidden_size_factor (int, optional): The factor of hidden size. Defaults to 1. - scale (float, optional): The scale factor of the parameter when initialization. Defaults to 0.02. - """ - - def __init__( - self, - hidden_size: int, - num_blocks: int = 8, - sparsity_threshold: float = 0.01, - hard_thresholding_fraction: float = 1.0, - hidden_size_factor: int = 1, - scale: float = 0.02, - ): - super().__init__() - if hidden_size % num_blocks != 0: - raise ValueError( - f"hidden_size({hidden_size}) should be divisble by num_blocks({num_blocks})." - ) - - self.hidden_size = hidden_size - self.sparsity_threshold = sparsity_threshold - self.num_blocks = num_blocks - self.block_size = self.hidden_size // self.num_blocks - self.hard_thresholding_fraction = hard_thresholding_fraction - self.hidden_size_factor = hidden_size_factor - self.scale = scale - - self.w1 = self.create_parameter( - shape=( - 2, - self.num_blocks, - self.block_size, - self.block_size * self.hidden_size_factor, - ), - default_initializer=nn.initializer.Normal(std=self.scale), - ) - self.b1 = self.create_parameter( - shape=(2, self.num_blocks, self.block_size * self.hidden_size_factor), - default_initializer=nn.initializer.Normal(std=self.scale), - ) - self.w2 = self.create_parameter( - shape=( - 2, - self.num_blocks, - self.block_size * self.hidden_size_factor, - self.block_size, - ), - default_initializer=nn.initializer.Normal(std=self.scale), - ) - self.b2 = self.create_parameter( - shape=(2, self.num_blocks, self.block_size), - default_initializer=nn.initializer.Normal(std=self.scale), - ) - - def forward(self, x): - bias = x - - B, H, W, C = x.shape - - x = paddle.fft.rfft2(x, axes=(1, 2), norm="ortho") - x = x.reshape((B, H, W // 2 + 1, self.num_blocks, self.block_size)) - - o1_shape = ( - B, - H, - W // 2 + 1, - self.num_blocks, - self.block_size * self.hidden_size_factor, - ) - o1_real = paddle.zeros(o1_shape) - o1_imag = paddle.zeros(o1_shape) - o2_real = paddle.zeros(x.shape) - o2_imag = paddle.zeros(x.shape) - - total_modes = H // 2 + 1 - kept_modes = int(total_modes * self.hard_thresholding_fraction) - - st, end = total_modes - kept_modes, total_modes + kept_modes - - o1_real[:, st:end, :kept_modes] = F.relu( - paddle.einsum( - "xyzbi,bio->xyzbo", - x[:, st:end, :kept_modes].real(), - self.w1[0], - ) - - paddle.einsum( - "xyzbi,bio->xyzbo", - x[:, st:end, :kept_modes].imag(), - self.w1[1], - ) - + self.b1[0] - ) - - o1_imag[:, st:end, :kept_modes] = F.relu( - paddle.einsum( - "xyzbi,bio->xyzbo", - x[:, st:end, :kept_modes].imag(), - self.w1[0], - ) - + paddle.einsum( - "xyzbi,bio->xyzbo", - x[:, st:end, :kept_modes].real(), - self.w1[1], - ) - + self.b1[1] - ) - - o2_real[:, st:end, :kept_modes] = ( - paddle.einsum( - "xyzbi,bio->xyzbo", - o1_real[:, st:end, :kept_modes], - self.w2[0], - ) - - paddle.einsum( - "xyzbi,bio->xyzbo", - o1_imag[:, st:end, :kept_modes], - self.w2[1], - ) - + self.b2[0] - ) - - o2_imag[:, st:end, :kept_modes] = ( - paddle.einsum( - "xyzbi,bio->xyzbo", - o1_imag[:, st:end, :kept_modes], - self.w2[0], - ) - + paddle.einsum( - "xyzbi,bio->xyzbo", - o1_real[:, st:end, :kept_modes], - self.w2[1], - ) - + self.b2[1] - ) - - x = paddle.stack([o2_real, o2_imag], axis=-1) - x = F.softshrink(x, threshold=self.sparsity_threshold) - x = paddle.as_complex(x) - x = x.reshape((B, H, W // 2 + 1, C)) - x = paddle.fft.irfft2(x, s=(H, W), axes=(1, 2), norm="ortho") - - return x + bias - - -class Block(nn.Layer): - """AFNO network block. - - Args: - dim (int): The input tensor dimension. - mlp_ratio (float, optional): The ratio used in MLP. Defaults to 4.0. - drop (float, optional): The drop ratio used in MLP. Defaults to 0.0. - drop_path (float, optional): The drop ratio used in DropPath. Defaults to 0.0. - activation (str, optional): Name of activation function. Defaults to "gelu". - norm_layer (nn.Layer, optional): Class of norm layer. Defaults to nn.LayerNorm. - double_skip (bool, optional): Whether use double skip. Defaults to True. - num_blocks (int, optional): The number of blocks. Defaults to 8. - sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. - hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. - """ - - def __init__( - self, - dim: int, - mlp_ratio: float = 4.0, - drop: float = 0.0, - drop_path: float = 0.0, - activation: str = "gelu", - norm_layer: nn.Layer = nn.LayerNorm, - double_skip: bool = True, - num_blocks: int = 8, - sparsity_threshold: float = 0.01, - hard_thresholding_fraction: float = 1.0, - ): - super().__init__() - self.norm1 = norm_layer(dim) - self.filter = AFNO2D( - dim, num_blocks, sparsity_threshold, hard_thresholding_fraction - ) - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = MLP( - in_features=dim, - hidden_features=mlp_hidden_dim, - activation=activation, - drop=drop, - ) - self.double_skip = double_skip - - def forward(self, x): - residual = x - x = self.norm1(x) - x = self.filter(x) - - if self.double_skip: - x = x + residual - residual = x - - x = self.norm2(x) - x = self.mlp(x) - x = self.drop_path(x) - x = x + residual - return x - - -class PatchEmbed(nn.Layer): - """Patch embedding module. - - Args: - img_size (Tuple[int, ...], optional): Image size. Defaults to (224, 224). - patch_size (Tuple[int, ...], optional): Patch size. Defaults to (16, 16). - in_channels (int, optional): The input tensor channels. Defaults to 3. - embed_dim (int, optional): The output tensor channels. Defaults to 768. - """ - - def __init__( - self, - img_size: Tuple[int, ...] = (224, 224), - patch_size: Tuple[int, ...] = (16, 16), - in_channels: int = 3, - embed_dim: int = 768, - ): - super().__init__() - num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - self.proj = nn.Conv2D( - in_channels, embed_dim, kernel_size=patch_size, stride=patch_size - ) - - def forward(self, x): - _, _, H, W = x.shape - if not (H == self.img_size[0] and W == self.img_size[1]): - raise ValueError( - f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - ) - x = self.proj(x).flatten(2).transpose((0, 2, 1)) - return x - - -class AFNONet(base.Arch): - """Adaptive Fourier Neural Network. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - img_size (Tuple[int, ...], optional): Image size. Defaults to (720, 1440). - patch_size (Tuple[int, ...], optional): Path. Defaults to (8, 8). - in_channels (int, optional): The input tensor channels. Defaults to 20. - out_channels (int, optional): The output tensor channels. Defaults to 20. - embed_dim (int, optional): The embedding dimension for PatchEmbed. Defaults to 768. - depth (int, optional): Number of transformer depth. Defaults to 12. - mlp_ratio (float, optional): Number of ratio used in MLP. Defaults to 4.0. - drop_rate (float, optional): The drop ratio used in MLP. Defaults to 0.0. - drop_path_rate (float, optional): The drop ratio used in DropPath. Defaults to 0.0. - num_blocks (int, optional): Number of blocks. Defaults to 8. - sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. - hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. - num_timestamps (int, optional): Number of timestamp. Defaults to 1. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) - >>> input_data = {"input": paddle.randn([1, 20, 720, 1440])} - >>> output_data = model(input_data) - >>> for k, v in output_data.items(): - ... print(k, v.shape) - output [1, 20, 720, 1440] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - img_size: Tuple[int, ...] = (720, 1440), - patch_size: Tuple[int, ...] = (8, 8), - in_channels: int = 20, - out_channels: int = 20, - embed_dim: int = 768, - depth: int = 12, - mlp_ratio: float = 4.0, - drop_rate: float = 0.0, - drop_path_rate: float = 0.0, - num_blocks: int = 8, - sparsity_threshold: float = 0.01, - hard_thresholding_fraction: float = 1.0, - num_timestamps: int = 1, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - - self.img_size = img_size - self.patch_size = patch_size - self.in_channels = in_channels - self.out_channels = out_channels - self.embed_dim = embed_dim - self.num_blocks = num_blocks - self.num_timestamps = num_timestamps - norm_layer = partial(nn.LayerNorm, epsilon=1e-6) - - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=self.patch_size, - in_channels=self.in_channels, - embed_dim=embed_dim, - ) - num_patches = self.patch_embed.num_patches - - data = paddle.zeros((1, num_patches, embed_dim)) - data = initializer.trunc_normal_(data, std=0.02) - self.pos_embed = paddle.create_parameter( - shape=data.shape, - dtype=data.dtype, - default_initializer=nn.initializer.Assign(data), - ) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in paddle.linspace(0, drop_path_rate, depth)] - - self.h = img_size[0] // self.patch_size[0] - self.w = img_size[1] // self.patch_size[1] - - self.blocks = nn.LayerList( - [ - Block( - dim=embed_dim, - mlp_ratio=mlp_ratio, - drop=drop_rate, - drop_path=dpr[i], - norm_layer=norm_layer, - num_blocks=self.num_blocks, - sparsity_threshold=sparsity_threshold, - hard_thresholding_fraction=hard_thresholding_fraction, - ) - for i in range(depth) - ] - ) - - self.norm = norm_layer(embed_dim) - self.head = nn.Linear( - embed_dim, - self.out_channels * self.patch_size[0] * self.patch_size[1], - bias_attr=False, - ) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - initializer.trunc_normal_(m.weight, std=0.02) - if m.bias is not None: - initializer.zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - initializer.ones_(m.weight) - initializer.zeros_(m.bias) - elif isinstance(m, nn.Conv2D): - initializer.conv_init_(m) - - def forward_tensor(self, x): - B = x.shape[0] - x = self.patch_embed(x) - x = x + self.pos_embed - x = self.pos_drop(x) - - x = x.reshape((B, self.h, self.w, self.embed_dim)) - for block in self.blocks: - x = block(x) - - x = self.head(x) - - b = x.shape[0] - p1 = self.patch_size[0] - p2 = self.patch_size[1] - h = self.img_size[0] // self.patch_size[0] - w = self.img_size[1] // self.patch_size[1] - c_out = x.shape[3] // (p1 * p2) - x = x.reshape((b, h, w, p1, p2, c_out)) - x = x.transpose((0, 5, 1, 3, 2, 4)) - x = x.reshape((b, c_out, h * p1, w * p2)) - - return x - - @staticmethod - def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - x_tensor = self.concat_to_tensor(x, self.input_keys) - - y = [] - input = x_tensor - for _ in range(self.num_timestamps): - out = self.forward_tensor(input) - y.append(out) - input = out - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - -class PrecipNet(base.Arch): - """Precipitation Network. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - wind_model (base.Arch): Wind model. - img_size (Tuple[int, ...], optional): Image size. Defaults to (720, 1440). - patch_size (Tuple[int, ...], optional): Path. Defaults to (8, 8). - in_channels (int, optional): The input tensor channels. Defaults to 20. - out_channels (int, optional): The output tensor channels. Defaults to 1. - embed_dim (int, optional): The embedding dimension for PatchEmbed. Defaults to 768. - depth (int, optional): Number of transformer depth. Defaults to 12. - mlp_ratio (float, optional): Number of ratio used in MLP. Defaults to 4.0. - drop_rate (float, optional): The drop ratio used in MLP. Defaults to 0.0. - drop_path_rate (float, optional): The drop ratio used in DropPath. Defaults to 0.0. - num_blocks (int, optional): Number of blocks. Defaults to 8. - sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. - hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. - num_timestamps (int, optional): Number of timestamp. Defaults to 1. - - Examples: - >>> import ppsci - >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) - >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) - >>> data = paddle.randn([1, 20, 720, 1440]) - >>> data_dict = {"input": data} - >>> output = model.forward(data_dict) - >>> print(output['output'].shape) - [1, 1, 720, 1440] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - wind_model: base.Arch, - img_size: Tuple[int, ...] = (720, 1440), - patch_size: Tuple[int, ...] = (8, 8), - in_channels: int = 20, - out_channels: int = 1, - embed_dim: int = 768, - depth: int = 12, - mlp_ratio: float = 4.0, - drop_rate: float = 0.0, - drop_path_rate: float = 0.0, - num_blocks: int = 8, - sparsity_threshold: float = 0.01, - hard_thresholding_fraction: float = 1.0, - num_timestamps=1, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - - self.img_size = img_size - self.patch_size = patch_size - self.in_channels = in_channels - self.out_channels = out_channels - self.embed_dim = embed_dim - self.num_blocks = num_blocks - self.num_timestamps = num_timestamps - self.backbone = AFNONet( - ("input",), - ("output",), - img_size=img_size, - patch_size=patch_size, - in_channels=in_channels, - out_channels=out_channels, - embed_dim=embed_dim, - depth=depth, - mlp_ratio=mlp_ratio, - drop_rate=drop_rate, - drop_path_rate=drop_path_rate, - num_blocks=num_blocks, - sparsity_threshold=sparsity_threshold, - hard_thresholding_fraction=hard_thresholding_fraction, - ) - self.ppad = PeriodicPad2d(1) - self.conv = nn.Conv2D( - self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=0 - ) - self.act = nn.ReLU() - self.apply(self._init_weights) - self.wind_model = wind_model - self.wind_model.eval() - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - initializer.trunc_normal_(m.weight, std=0.02) - if m.bias is not None: - initializer.zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - initializer.ones_(m.weight) - initializer.zeros_(m.bias) - elif isinstance(m, nn.Conv2D): - initializer.conv_init_(m) - - def forward_tensor(self, x): - x = self.backbone.forward_tensor(x) - x = self.ppad(x) - x = self.conv(x) - x = self.act(x) - return x - - @staticmethod - def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - x_tensor = self.concat_to_tensor(x, self.input_keys) - - input_wind = x_tensor - y = [] - for _ in range(self.num_timestamps): - with paddle.no_grad(): - out_wind = self.wind_model.forward_tensor(input_wind) - out = self.forward_tensor(out_wind) - y.append(out) - input_wind = out_wind - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [FourCastNet](https://github.com/NVlabs/FourCastNet) +""" +from __future__ import annotations + +from functools import partial +from typing import Optional +from typing import Tuple + +import paddle +import paddle.fft +import paddle.nn.functional as F +from paddle import nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.utils import initializer + + +def drop_path( + x: paddle.Tensor, + drop_prob: float = 0.0, + training: bool = False, + scale_by_keep: bool = True, +) -> paddle.Tensor: + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... + + Args: + x (paddle.Tensor): The tensor to apply. + drop_prob (float, optional): Drop paths probability. Defaults to 0.0. + training (bool, optional): Whether at training mode. Defaults to False. + scale_by_keep (bool, optional): Whether upscale the output. Defaults to True. + + Returns: + paddle.Tensor: Output tensor after apply dropout. + """ + if drop_prob == 0.0 or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = paddle.full(shape, keep_prob, x.dtype) + random_tensor = paddle.bernoulli(random_tensor) + if keep_prob > 0.0 and scale_by_keep: + random_tensor = random_tensor / keep_prob + return x * random_tensor + + +class DropPath(nn.Layer): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Args: + drop_prob (float, optional): Drop paths probability. Defaults to 0.0. + scale_by_keep (bool, optional): Whether upscale the output. Defaults to True. + """ + + def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True): + super().__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f"drop_prob={round(self.drop_prob,3):0.3f}" + + +class PeriodicPad2d(nn.Layer): + """Pad longitudinal (left-right) circular and pad latitude (top-bottom) with zeros. + + Args: + pad (int): Number of pad. + """ + + def __init__(self, pad: int): + super(PeriodicPad2d, self).__init__() + self.pad = pad + + def forward(self, x): + # pad left and right circular + out = F.pad(x, (self.pad, self.pad, 0, 0), mode="circular") + # pad top and bottom zeros + out = F.pad( + out, + (0, 0, 0, 0, self.pad, self.pad, 0, 0), + mode="constant", + value=0, + ) + return out + + +class MLP(nn.Layer): + """Multi layer perceptron module used in Transformer. + + Args: + in_features (int): Number of the input features. + hidden_features (Optional[int]): Number of the hidden size. Defaults to None. + out_features (Optional[int]): Number of the output features. Defaults to None. + activation (str, optional): Name of activation function. Defaults to "gelu". + drop (float, optional): Probability of dropout the units. Defaults to 0.0. + """ + + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + activation: str = "gelu", + drop: float = 0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_mod.get_activation(activation) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class AFNO2D(nn.Layer): + """2D Adaptive Fourier Neural Operators. + + Args: + hidden_size (int): Number of hidden size. + num_blocks (int, optional): Number of blocks. Defaults to 8. + sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. + hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. + hidden_size_factor (int, optional): The factor of hidden size. Defaults to 1. + scale (float, optional): The scale factor of the parameter when initialization. Defaults to 0.02. + """ + + def __init__( + self, + hidden_size: int, + num_blocks: int = 8, + sparsity_threshold: float = 0.01, + hard_thresholding_fraction: float = 1.0, + hidden_size_factor: int = 1, + scale: float = 0.02, + ): + super().__init__() + if hidden_size % num_blocks != 0: + raise ValueError( + f"hidden_size({hidden_size}) should be divisble by num_blocks({num_blocks})." + ) + + self.hidden_size = hidden_size + self.sparsity_threshold = sparsity_threshold + self.num_blocks = num_blocks + self.block_size = self.hidden_size // self.num_blocks + self.hard_thresholding_fraction = hard_thresholding_fraction + self.hidden_size_factor = hidden_size_factor + self.scale = scale + + self.w1 = self.create_parameter( + shape=( + 2, + self.num_blocks, + self.block_size, + self.block_size * self.hidden_size_factor, + ), + default_initializer=nn.initializer.Normal(std=self.scale), + ) + self.b1 = self.create_parameter( + shape=(2, self.num_blocks, self.block_size * self.hidden_size_factor), + default_initializer=nn.initializer.Normal(std=self.scale), + ) + self.w2 = self.create_parameter( + shape=( + 2, + self.num_blocks, + self.block_size * self.hidden_size_factor, + self.block_size, + ), + default_initializer=nn.initializer.Normal(std=self.scale), + ) + self.b2 = self.create_parameter( + shape=(2, self.num_blocks, self.block_size), + default_initializer=nn.initializer.Normal(std=self.scale), + ) + + def forward(self, x): + bias = x + + B, H, W, C = x.shape + + x = paddle.fft.rfft2(x, axes=(1, 2), norm="ortho") + x = x.reshape((B, H, W // 2 + 1, self.num_blocks, self.block_size)) + + o1_shape = ( + B, + H, + W // 2 + 1, + self.num_blocks, + self.block_size * self.hidden_size_factor, + ) + o1_real = paddle.zeros(o1_shape) + o1_imag = paddle.zeros(o1_shape) + o2_real = paddle.zeros(x.shape) + o2_imag = paddle.zeros(x.shape) + + total_modes = H // 2 + 1 + kept_modes = int(total_modes * self.hard_thresholding_fraction) + + st, end = total_modes - kept_modes, total_modes + kept_modes + + o1_real[:, st:end, :kept_modes] = F.relu( + paddle.einsum( + "xyzbi,bio->xyzbo", + x[:, st:end, :kept_modes].real(), + self.w1[0], + ) + - paddle.einsum( + "xyzbi,bio->xyzbo", + x[:, st:end, :kept_modes].imag(), + self.w1[1], + ) + + self.b1[0] + ) + + o1_imag[:, st:end, :kept_modes] = F.relu( + paddle.einsum( + "xyzbi,bio->xyzbo", + x[:, st:end, :kept_modes].imag(), + self.w1[0], + ) + + paddle.einsum( + "xyzbi,bio->xyzbo", + x[:, st:end, :kept_modes].real(), + self.w1[1], + ) + + self.b1[1] + ) + + o2_real[:, st:end, :kept_modes] = ( + paddle.einsum( + "xyzbi,bio->xyzbo", + o1_real[:, st:end, :kept_modes], + self.w2[0], + ) + - paddle.einsum( + "xyzbi,bio->xyzbo", + o1_imag[:, st:end, :kept_modes], + self.w2[1], + ) + + self.b2[0] + ) + + o2_imag[:, st:end, :kept_modes] = ( + paddle.einsum( + "xyzbi,bio->xyzbo", + o1_imag[:, st:end, :kept_modes], + self.w2[0], + ) + + paddle.einsum( + "xyzbi,bio->xyzbo", + o1_real[:, st:end, :kept_modes], + self.w2[1], + ) + + self.b2[1] + ) + + x = paddle.stack([o2_real, o2_imag], axis=-1) + x = F.softshrink(x, threshold=self.sparsity_threshold) + x = paddle.as_complex(x) + x = x.reshape((B, H, W // 2 + 1, C)) + x = paddle.fft.irfft2(x, s=(H, W), axes=(1, 2), norm="ortho") + + return x + bias + + +class Block(nn.Layer): + """AFNO network block. + + Args: + dim (int): The input tensor dimension. + mlp_ratio (float, optional): The ratio used in MLP. Defaults to 4.0. + drop (float, optional): The drop ratio used in MLP. Defaults to 0.0. + drop_path (float, optional): The drop ratio used in DropPath. Defaults to 0.0. + activation (str, optional): Name of activation function. Defaults to "gelu". + norm_layer (nn.Layer, optional): Class of norm layer. Defaults to nn.LayerNorm. + double_skip (bool, optional): Whether use double skip. Defaults to True. + num_blocks (int, optional): The number of blocks. Defaults to 8. + sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. + hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. + """ + + def __init__( + self, + dim: int, + mlp_ratio: float = 4.0, + drop: float = 0.0, + drop_path: float = 0.0, + activation: str = "gelu", + norm_layer: nn.Layer = nn.LayerNorm, + double_skip: bool = True, + num_blocks: int = 8, + sparsity_threshold: float = 0.01, + hard_thresholding_fraction: float = 1.0, + ): + super().__init__() + self.norm1 = norm_layer(dim) + self.filter = AFNO2D( + dim, num_blocks, sparsity_threshold, hard_thresholding_fraction + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = MLP( + in_features=dim, + hidden_features=mlp_hidden_dim, + activation=activation, + drop=drop, + ) + self.double_skip = double_skip + + def forward(self, x): + residual = x + x = self.norm1(x) + x = self.filter(x) + + if self.double_skip: + x = x + residual + residual = x + + x = self.norm2(x) + x = self.mlp(x) + x = self.drop_path(x) + x = x + residual + return x + + +class PatchEmbed(nn.Layer): + """Patch embedding module. + + Args: + img_size (Tuple[int, ...], optional): Image size. Defaults to (224, 224). + patch_size (Tuple[int, ...], optional): Patch size. Defaults to (16, 16). + in_channels (int, optional): The input tensor channels. Defaults to 3. + embed_dim (int, optional): The output tensor channels. Defaults to 768. + """ + + def __init__( + self, + img_size: Tuple[int, ...] = (224, 224), + patch_size: Tuple[int, ...] = (16, 16), + in_channels: int = 3, + embed_dim: int = 768, + ): + super().__init__() + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv2D( + in_channels, embed_dim, kernel_size=patch_size, stride=patch_size + ) + + def forward(self, x): + _, _, H, W = x.shape + if not (H == self.img_size[0] and W == self.img_size[1]): + raise ValueError( + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + ) + x = self.proj(x).flatten(2).transpose((0, 2, 1)) + return x + + +class AFNONet(base.Arch): + """Adaptive Fourier Neural Network. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + img_size (Tuple[int, ...], optional): Image size. Defaults to (720, 1440). + patch_size (Tuple[int, ...], optional): Path. Defaults to (8, 8). + in_channels (int, optional): The input tensor channels. Defaults to 20. + out_channels (int, optional): The output tensor channels. Defaults to 20. + embed_dim (int, optional): The embedding dimension for PatchEmbed. Defaults to 768. + depth (int, optional): Number of transformer depth. Defaults to 12. + mlp_ratio (float, optional): Number of ratio used in MLP. Defaults to 4.0. + drop_rate (float, optional): The drop ratio used in MLP. Defaults to 0.0. + drop_path_rate (float, optional): The drop ratio used in DropPath. Defaults to 0.0. + num_blocks (int, optional): Number of blocks. Defaults to 8. + sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. + hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. + num_timestamps (int, optional): Number of timestamp. Defaults to 1. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.AFNONet(("input", ), ("output", )) + >>> input_data = {"input": paddle.randn([1, 20, 720, 1440])} + >>> output_data = model(input_data) + >>> for k, v in output_data.items(): + ... print(k, v.shape) + output [1, 20, 720, 1440] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + img_size: Tuple[int, ...] = (720, 1440), + patch_size: Tuple[int, ...] = (8, 8), + in_channels: int = 20, + out_channels: int = 20, + embed_dim: int = 768, + depth: int = 12, + mlp_ratio: float = 4.0, + drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + num_blocks: int = 8, + sparsity_threshold: float = 0.01, + hard_thresholding_fraction: float = 1.0, + num_timestamps: int = 1, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.img_size = img_size + self.patch_size = patch_size + self.in_channels = in_channels + self.out_channels = out_channels + self.embed_dim = embed_dim + self.num_blocks = num_blocks + self.num_timestamps = num_timestamps + norm_layer = partial(nn.LayerNorm, epsilon=1e-6) + + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=self.patch_size, + in_channels=self.in_channels, + embed_dim=embed_dim, + ) + num_patches = self.patch_embed.num_patches + + data = paddle.zeros((1, num_patches, embed_dim)) + data = initializer.trunc_normal_(data, std=0.02) + self.pos_embed = paddle.create_parameter( + shape=data.shape, + dtype=data.dtype, + default_initializer=nn.initializer.Assign(data), + ) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in paddle.linspace(0, drop_path_rate, depth)] + + self.h = img_size[0] // self.patch_size[0] + self.w = img_size[1] // self.patch_size[1] + + self.blocks = nn.LayerList( + [ + Block( + dim=embed_dim, + mlp_ratio=mlp_ratio, + drop=drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + num_blocks=self.num_blocks, + sparsity_threshold=sparsity_threshold, + hard_thresholding_fraction=hard_thresholding_fraction, + ) + for i in range(depth) + ] + ) + + self.norm = norm_layer(embed_dim) + self.head = nn.Linear( + embed_dim, + self.out_channels * self.patch_size[0] * self.patch_size[1], + bias_attr=False, + ) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + initializer.trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + initializer.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + initializer.ones_(m.weight) + initializer.zeros_(m.bias) + elif isinstance(m, nn.Conv2D): + initializer.conv_init_(m) + + def forward_tensor(self, x): + B = x.shape[0] + x = self.patch_embed(x) + x = x + self.pos_embed + x = self.pos_drop(x) + + x = x.reshape((B, self.h, self.w, self.embed_dim)) + for block in self.blocks: + x = block(x) + + x = self.head(x) + + b = x.shape[0] + p1 = self.patch_size[0] + p2 = self.patch_size[1] + h = self.img_size[0] // self.patch_size[0] + w = self.img_size[1] // self.patch_size[1] + c_out = x.shape[3] // (p1 * p2) + x = x.reshape((b, h, w, p1, p2, c_out)) + x = x.transpose((0, 5, 1, 3, 2, 4)) + x = x.reshape((b, c_out, h * p1, w * p2)) + + return x + + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + x_tensor = self.concat_to_tensor(x, self.input_keys) + + y = [] + input = x_tensor + for _ in range(self.num_timestamps): + out = self.forward_tensor(input) + y.append(out) + input = out + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + +class PrecipNet(base.Arch): + """Precipitation Network. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + wind_model (base.Arch): Wind model. + img_size (Tuple[int, ...], optional): Image size. Defaults to (720, 1440). + patch_size (Tuple[int, ...], optional): Path. Defaults to (8, 8). + in_channels (int, optional): The input tensor channels. Defaults to 20. + out_channels (int, optional): The output tensor channels. Defaults to 1. + embed_dim (int, optional): The embedding dimension for PatchEmbed. Defaults to 768. + depth (int, optional): Number of transformer depth. Defaults to 12. + mlp_ratio (float, optional): Number of ratio used in MLP. Defaults to 4.0. + drop_rate (float, optional): The drop ratio used in MLP. Defaults to 0.0. + drop_path_rate (float, optional): The drop ratio used in DropPath. Defaults to 0.0. + num_blocks (int, optional): Number of blocks. Defaults to 8. + sparsity_threshold (float, optional): The value of threshold for softshrink. Defaults to 0.01. + hard_thresholding_fraction (float, optional): The value of threshold for keep mode. Defaults to 1.0. + num_timestamps (int, optional): Number of timestamp. Defaults to 1. + + Examples: + >>> import ppsci + >>> wind_model = ppsci.arch.AFNONet(("input", ), ("output", )) + >>> model = ppsci.arch.PrecipNet(("input", ), ("output", ), wind_model) + >>> data = paddle.randn([1, 20, 720, 1440]) + >>> data_dict = {"input": data} + >>> output = model.forward(data_dict) + >>> print(output['output'].shape) + [1, 1, 720, 1440] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + wind_model: base.Arch, + img_size: Tuple[int, ...] = (720, 1440), + patch_size: Tuple[int, ...] = (8, 8), + in_channels: int = 20, + out_channels: int = 1, + embed_dim: int = 768, + depth: int = 12, + mlp_ratio: float = 4.0, + drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + num_blocks: int = 8, + sparsity_threshold: float = 0.01, + hard_thresholding_fraction: float = 1.0, + num_timestamps=1, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.img_size = img_size + self.patch_size = patch_size + self.in_channels = in_channels + self.out_channels = out_channels + self.embed_dim = embed_dim + self.num_blocks = num_blocks + self.num_timestamps = num_timestamps + self.backbone = AFNONet( + ("input",), + ("output",), + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + out_channels=out_channels, + embed_dim=embed_dim, + depth=depth, + mlp_ratio=mlp_ratio, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + num_blocks=num_blocks, + sparsity_threshold=sparsity_threshold, + hard_thresholding_fraction=hard_thresholding_fraction, + ) + self.ppad = PeriodicPad2d(1) + self.conv = nn.Conv2D( + self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=0 + ) + self.act = nn.ReLU() + self.apply(self._init_weights) + self.wind_model = wind_model + self.wind_model.eval() + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + initializer.trunc_normal_(m.weight, std=0.02) + if m.bias is not None: + initializer.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + initializer.ones_(m.weight) + initializer.zeros_(m.bias) + elif isinstance(m, nn.Conv2D): + initializer.conv_init_(m) + + def forward_tensor(self, x): + x = self.backbone.forward_tensor(x) + x = self.ppad(x) + x = self.conv(x) + x = self.act(x) + return x + + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + x_tensor = self.concat_to_tensor(x, self.input_keys) + + input_wind = x_tensor + y = [] + for _ in range(self.num_timestamps): + with paddle.no_grad(): + out_wind = self.wind_model.forward_tensor(input_wind) + out = self.forward_tensor(out_wind) + y.append(out) + input_wind = out_wind + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/amgnet.py b/ppsci/arch/amgnet.py index ce728317d6..151558a2ab 100644 --- a/ppsci/arch/amgnet.py +++ b/ppsci/arch/amgnet.py @@ -1,649 +1,649 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import functools -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn as nn -from typing_extensions import Literal - -try: - import pgl -except ModuleNotFoundError: - pass - -try: - import pyamg -except ModuleNotFoundError: - pass - -from paddle import sparse as pd_sparse -from scipy import sparse as sci_sparse - - -def _knn_interpolate( - features: paddle.Tensor, coarse_nodes: paddle.Tensor, fine_nodes: paddle.Tensor -) -> paddle.Tensor: - coarse_nodes_input = paddle.repeat_interleave( - coarse_nodes.unsqueeze(0), fine_nodes.shape[0], axis=0 - ) # [6684,352,2] - fine_nodes_input = paddle.repeat_interleave( - fine_nodes.unsqueeze(1), coarse_nodes.shape[0], axis=1 - ) # [6684,352,2] - dist_w = 1.0 / ( - paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 - ) # [6684,352] - knn_value, knn_index = paddle.topk(dist_w, k=3, largest=True) # [6684,3],[6684,3] - weight = knn_value.unsqueeze(-2) - features_input = features[knn_index] - output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( - knn_value, axis=-1, keepdim=True - ) - return output - - -def _get_corse_node(latent_graph: "pgl.Graph") -> paddle.Tensor: - row = latent_graph.edge_index[0].numpy() - col = latent_graph.edge_index[1].numpy() - data = paddle.ones(shape=[row.size]).numpy() - A = sci_sparse.coo_matrix((data, (row, col))).tocsr() - splitting = pyamg.classical.split.RS(A) - index = np.array(np.nonzero(splitting)) - b = paddle.to_tensor(index) - b = paddle.squeeze(b) - return b - - -def StAS( - index_A: paddle.Tensor, - value_A: paddle.Tensor, - index_S: paddle.Tensor, - value_S: paddle.Tensor, - N: int, - kN: int, - norm_layer: nn.Layer, -) -> Tuple[paddle.Tensor, paddle.Tensor]: - """ASAP: Adaptive Structure Aware Pooling for Learning Hierarchical Graph Representations. - Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). AAAI(2020) - - Args: - index_A (paddle.Tensor): Indices of sparse matrix A. - value_A (paddle.Tensor): Values of sparse matrix A. - index_S (paddle.Tensor): Indices of sparse matrix S. - value_S (paddle.Tensor): Values of sparse matrix S. - N (int): Dimension N. - kN (int): Dimension kN. - norm_layer (nn.Layer): Normalization layer. - - Returns: - Tuple[paddle.Tensor, paddle.Tensor]: Indices and values of result matrix E. - """ - sp_x = pd_sparse.sparse_coo_tensor(index_A, value_A) - sp_x = pd_sparse.coalesce(sp_x) - index_A = sp_x.indices() - value_A = sp_x.values() - - sp_s = pd_sparse.sparse_coo_tensor(index_S, value_S) - sp_s = pd_sparse.coalesce(sp_s) - index_S = sp_s.indices() - value_S = sp_s.values() - - indices_A = index_A.numpy() - values_A = value_A.numpy() - coo_A = sci_sparse.coo_matrix( - (values_A, (indices_A[0], indices_A[1])), shape=(N, N) - ) - - indices_S = index_S.numpy() - values_S = value_S.numpy() - coo_S = sci_sparse.coo_matrix( - (values_S, (indices_S[0], indices_S[1])), shape=(N, kN) - ) - - ans = coo_A.dot(coo_S).tocoo() - row = paddle.to_tensor(ans.row) - col = paddle.to_tensor(ans.col) - index_B = paddle.stack([row, col], axis=0) - value_B = paddle.to_tensor(ans.data) - - indices_A = index_S - values_A = value_S - coo_A = pd_sparse.sparse_coo_tensor(indices_A, values_A) - out = pd_sparse.transpose(coo_A, [1, 0]) - index_St = out.indices() - value_St = out.values() - - sp_x = pd_sparse.sparse_coo_tensor(index_B, value_B) - sp_x = pd_sparse.coalesce(sp_x) - index_B = sp_x.indices() - value_B = sp_x.values() - - indices_A = index_St.numpy() - values_A = value_St.numpy() - coo_A = sci_sparse.coo_matrix( - (values_A, (indices_A[0], indices_A[1])), shape=(kN, N) - ) - - indices_S = index_B.numpy() - values_S = value_B.numpy() - coo_S = sci_sparse.coo_matrix( - (values_S, (indices_S[0], indices_S[1])), shape=(N, kN) - ) - - ans = coo_A.dot(coo_S).tocoo() - row = paddle.to_tensor(ans.row) - col = paddle.to_tensor(ans.col) - index_E = paddle.stack([row, col], axis=0) - value_E = paddle.to_tensor(ans.data) - - # index_E排序 - sp_x = pd_sparse.sparse_coo_tensor(index_E, value_E) - sp_x = pd_sparse.coalesce(sp_x) - index_E = sp_x.indices() - value_E = sp_x.values() - - return index_E.astype("int64"), value_E - - -def FillZeros( - index_E: paddle.Tensor, value_E: paddle.Tensor, standard_index, kN: int -) -> Tuple[paddle.Tensor, paddle.Tensor]: - shape = [kN, kN] - row_E = index_E[0] - col_E = index_E[1] - DenseMatrix_E = sci_sparse.coo_matrix( - (paddle.ones_like(value_E), (row_E, col_E)), shape - ).toarray() - - row_S = standard_index[0] - col_S = standard_index[1] - DenseMatrix_S = sci_sparse.coo_matrix( - (paddle.ones([row_S.shape[0]]), (row_S, col_S)), shape - ).toarray() - - diff = DenseMatrix_S - DenseMatrix_E - rows, cols = np.nonzero(diff) - rows = paddle.to_tensor(rows, dtype="int32") - cols = paddle.to_tensor(cols, dtype="int32") - index = paddle.stack([rows, cols], axis=0) - value = paddle.zeros([index.shape[1]]) - index_E = paddle.concat([index_E, index], axis=1) - value_E = paddle.concat([value_E, value], axis=-1) - - sp_x = pd_sparse.sparse_coo_tensor(index_E, value_E) - sp_x = pd_sparse.coalesce(sp_x) - index_E = sp_x.indices() - value_E = sp_x.values() - - return index_E.astype("int64"), value_E - - -def remove_self_loops( - edge_index: paddle.Tensor, edge_attr: Optional[paddle.Tensor] = None -) -> Tuple[paddle.Tensor, Optional[paddle.Tensor]]: - # remove self-loop - mask = edge_index[0] != edge_index[1] - mask = mask.tolist() - edge_index = edge_index.t() - edge_index = edge_index[mask] - edge_index = edge_index.t() - if edge_attr is None: - return edge_index, None - else: - return edge_index, edge_attr[mask] - - -def faster_graph_connectivity(perm, edge_index, edge_weight, score, pos, N, norm_layer): - """ - Adapted from Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). Asap: Adaptive structure aware pooling - for learning hierarchical graph representations. AAAI(2020) - """ - - kN = perm.shape[0] - perm2 = perm.reshape((-1, 1)) - mask = (edge_index[0] == perm2).sum(axis=0).astype("bool") - - S0 = edge_index[1][mask].reshape((1, -1)) - S1 = edge_index[0][mask].reshape((1, -1)) - index_S = paddle.concat([S0, S1], axis=0) - value_S = score[mask].detach().squeeze() - n_idx = paddle.zeros([N], dtype=paddle.int64) - n_idx[perm] = paddle.arange(perm.shape[0]) - index_S = index_S.astype("int64") - index_S[1] = n_idx[index_S[1]] - subgraphnode_pos = pos[perm] - index_A = edge_index.clone() - if edge_weight is None: - value_A = value_S.new_ones(edge_index[0].shape[0]) - else: - value_A = edge_weight.clone() - - value_A = paddle.squeeze(value_A) - model_1 = nn.Sequential( - ("l1", nn.Linear(128, 256)), - ("act1", nn.ReLU()), - ("l2", nn.Linear(256, 256)), - ("act2", nn.ReLU()), - ("l4", nn.Linear(256, 128)), - ("act4", nn.ReLU()), - ("l5", nn.Linear(128, 1)), - ) - model_2 = nn.Sequential( - ("l1", nn.Linear(1, 64)), - ("act1", nn.ReLU()), - ("l2", nn.Linear(64, 128)), - ("act2", nn.ReLU()), - ("l4", nn.Linear(128, 128)), - ) - - val_A = model_1(value_A) - val_A = paddle.squeeze(val_A) - index_E, value_E = StAS(index_A, val_A, index_S, value_S, N, kN, norm_layer) - value_E = paddle.reshape(value_E, shape=[-1, 1]) - edge_weight = model_2(value_E) - - return index_E, edge_weight, subgraphnode_pos - - -def norm_graph_connectivity(perm, edge_index, edge_weight, score, pos, N, norm_layer): - """ - Come from Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). Asap: Adaptive - structure aware pooling for learning hierarchical graph representations. AAAI(2020) - """ - - kN = perm.shape[0] - perm2 = perm.reshape((-1, 1)) - mask = (edge_index[0] == perm2).sum(axis=0).astype("bool") - S0 = edge_index[1][mask].reshape((1, -1)) - S1 = edge_index[0][mask].reshape((1, -1)) - - index_S = paddle.concat([S0, S1], axis=0) - value_S = score[mask].detach().squeeze() - n_idx = paddle.zeros([N], dtype=paddle.int64) - n_idx[perm] = paddle.arange(perm.shape[0]) - - index_S = index_S.astype("int64") - index_S[1] = n_idx[index_S[1]] - subgraphnode_pos = pos[perm] - index_A = edge_index.clone() - - if edge_weight is None: - value_A = value_S.new_ones(edge_index[0].shape[0]) - else: - value_A = edge_weight.clone() - - value_A = paddle.squeeze(value_A) - eps_mask = (value_S == 0).astype(paddle.get_default_dtype()) - value_S = paddle.full_like(value_S, 1e-4) * eps_mask + (1 - eps_mask) * value_S - attrlist = [] - standard_index, _ = StAS( - index_A, - paddle.ones_like(value_A[:, 0]), - index_S, - paddle.ones_like(value_S), - N, - kN, - norm_layer, - ) - for i in range(128): - mask = (value_A[:, i] == 0).astype(paddle.get_default_dtype()) - val_A = paddle.full_like(mask, 1e-4) * mask + (1 - mask) * value_A[:, i] - index_E, value_E = StAS(index_A, val_A, index_S, value_S, N, kN, norm_layer) - - if index_E.shape[1] != standard_index.shape[1]: - index_E, value_E = FillZeros(index_E, value_E, standard_index, kN) - - index_E, value_E = remove_self_loops(edge_index=index_E, edge_attr=value_E) - attrlist.append(value_E) - edge_weight = paddle.stack(attrlist, axis=1) - - return index_E, edge_weight, subgraphnode_pos - - -class GraphNetBlock(nn.Layer): - """Multi-Edge Interaction Network with residual connections.""" - - def __init__( - self, model_fn, output_dim, message_passing_aggregator, attention=False - ): - super().__init__() - self.edge_model = model_fn(output_dim, 384) - self.node_model = model_fn(output_dim, 256) - self.message_passing_aggregator = message_passing_aggregator - - def _update_edge_features(self, graph): - """Aggregates node features, and applies edge function.""" - senders = graph.edge_index[0] - receivers = graph.edge_index[1] - sender_features = paddle.index_select(x=graph.x, index=senders, axis=0) - receiver_features = paddle.index_select(x=graph.x, index=receivers, axis=0) - features = [sender_features, receiver_features, graph.edge_attr] - features = paddle.concat(features, axis=-1) - return self.edge_model(features) - - def unsorted_segment_operation(self, data, segment_ids, num_segments, operation): - """Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum. - - Args: - data (paddle.Tensor): A tensor whose segments are to be summed. - segment_ids (paddle.Tensor): The segment indices tensor. - num_segments (int): The number of segments. - operation (str): _description_ - - Returns: - paddle.Tensor: A tensor of same data type as the data argument. - """ - if not all([i in data.shape for i in segment_ids.shape]): - raise ValueError("segment_ids.shape should be a prefix of data.shape") - - if not (data.shape[0] == segment_ids.shape[0]): - raise ValueError("data.shape and segment_ids.shape should be equal") - - shape = [num_segments] + list(data.shape[1:]) - result_shape = paddle.zeros(shape) - if operation == "sum": - result = paddle.scatter(result_shape, segment_ids, data, overwrite=False) - return result - - def _update_node_features(self, node_features, edge_attr, edge_index): - """Aggregates edge features, and applies node function.""" - num_nodes = node_features.shape[0] - features = [node_features] - features.append( - self.unsorted_segment_operation( - edge_attr, - edge_index[1], - num_nodes, - operation=self.message_passing_aggregator, - ) - ) - features = paddle.concat(features, axis=-1) - return self.node_model(features) - - def forward(self, graph): - """Applies GraphNetBlock and returns updated MultiGraph.""" - new_edge_features = self._update_edge_features(graph) - new_node_features = self._update_node_features( - graph.x, graph.edge_attr, graph.edge_index - ) - - new_node_features += graph.x - new_edge_features += graph.edge_attr - latent_graph = pgl.Graph( - num_nodes=new_node_features.shape[0], edges=graph.edge_index - ) - latent_graph.x = new_node_features - latent_graph.edge_attr = new_edge_features - latent_graph.pos = graph.pos - latent_graph.edge_index = graph.edge_index - return latent_graph - - -class Processor(nn.Layer): - """This class takes the nodes with the most influential feature (sum of square) - The the chosen numbers of nodes in each ripple will establish connection(features and distances) with the most influential nodes and this connection will be learned - Then the result is add to output latent graph of encoder and the modified latent graph will be feed into original processor - - Args: - make_mlp (Callable): Function to make MLP. - output_dim (int): Number of dimension of output. - message_passing_steps (int): Message passing steps. - message_passing_aggregator (str): Message passing aggregator. - attention (bool, optional): Whether use attention. Defaults to False. - use_stochastic_message_passing (bool, optional): Whether use stochastic message passing. Defaults to False. - """ - - # Each mesh can be coarsened to have no fewer points than this value - min_nodes = 2000 - - def __init__( - self, - make_mlp: Callable, - output_dim: int, - message_passing_steps: int, - message_passing_aggregator: str, - attention: bool = False, - use_stochastic_message_passing: bool = False, - ): - super().__init__() - self.use_stochastic_message_passing = use_stochastic_message_passing - self.graphnet_blocks = nn.LayerList() - self.cofe_edge_blocks = nn.LayerList() - self.pool_blocks = nn.LayerList() - self.latent_dim = output_dim - self.normalization = nn.LayerNorm(128) - for index in range(message_passing_steps): - self.graphnet_blocks.append( - GraphNetBlock( - model_fn=make_mlp, - output_dim=output_dim, - message_passing_aggregator=message_passing_aggregator, - attention=attention, - ) - ) - - self.pool_blocks.append( - GraphNetBlock( - model_fn=make_mlp, - output_dim=output_dim, - message_passing_aggregator=message_passing_aggregator, - attention=attention, - ) - ) - - def forward(self, latent_graph, speed, normalized_adj_mat=None): - x = [] - pos = [] - new = [] - for graphnet_block, pool in zip(self.graphnet_blocks, self.pool_blocks): - if latent_graph.x.shape[0] > self.min_nodes: - pre_matrix = graphnet_block(latent_graph) - x.append(pre_matrix) - cofe_graph = pool(pre_matrix) - coarsenodes = _get_corse_node(pre_matrix) - nodesfeatures = cofe_graph.x[coarsenodes] - if speed == "fast": - subedge_index, edge_weight, subpos = faster_graph_connectivity( - perm=coarsenodes, - edge_index=cofe_graph.edge_index, - edge_weight=cofe_graph.edge_attr, - score=cofe_graph.edge_attr[:, 0], - pos=cofe_graph.pos, - N=cofe_graph.x.shape[0], - norm_layer=self.normalization, - ) - elif speed == "norm": - subedge_index, edge_weight, subpos = norm_graph_connectivity( - perm=coarsenodes, - edge_index=cofe_graph.edge_index, - edge_weight=cofe_graph.edge_attr, - score=cofe_graph.edge_attr[:, 0], - pos=cofe_graph.pos, - N=cofe_graph.x.shape[0], - norm_layer=self.normalization, - ) - else: - raise ValueError( - f"Argument 'speed' should be 'sum' or 'fast', bot got {speed}." - ) - edge_weight = self.normalization(edge_weight) - pos.append(subpos) - latent_graph = pgl.Graph( - num_nodes=nodesfeatures.shape[0], edges=subedge_index - ) - latent_graph.x = nodesfeatures - latent_graph.edge_attr = edge_weight - latent_graph.pos = subpos - latent_graph.edge_index = subedge_index - else: - latent_graph = graphnet_block(latent_graph) - new.append(latent_graph) - if len(new): - x.append(new[-1]) - return x, pos - - -class FullyConnectedLayer(nn.Layer): - def __init__(self, input_dim: int, hidden_size: Tuple[int, ...]): - super(FullyConnectedLayer, self).__init__() - num_layers = len(hidden_size) - self._layers_ordered_dict = {} - self.in_dim = input_dim - for index, output_dim in enumerate(hidden_size): - self._layers_ordered_dict["linear_" + str(index)] = nn.Linear( - self.in_dim, output_dim - ) - if index < (num_layers - 1): - self._layers_ordered_dict["relu_" + str(index)] = nn.ReLU() - self.in_dim = output_dim - - self.layers = nn.LayerDict(self._layers_ordered_dict) - - def forward(self, input): - for key in self.layers: - layer = self.layers[key] - output = layer(input) - input = output - return input - - -class Encoder(nn.Layer): - """Encodes node and edge features into latent features.""" - - def __init__(self, input_dim, make_mlp, latent_dim): - super(Encoder, self).__init__() - self._make_mlp = make_mlp - self._latent_dim = latent_dim - self.node_model = self._make_mlp(latent_dim, input_dim=input_dim) - self.mesh_edge_model = self._make_mlp(latent_dim, input_dim=1) - - def forward(self, graph): - node_latents = self.node_model(graph.x) - edge_latent = self.mesh_edge_model(graph.edge_attr) - - graph.x = node_latents - graph.edge_attr = edge_latent - return graph - - -class Decoder(nn.Layer): - """Decodes node features from graph. - Encodes node and edge features into latent features. - """ - - def __init__(self, make_mlp, output_dim): - super(Decoder, self).__init__() - self.model = make_mlp(output_dim, 128) - - def forward(self, node_features): - return self.model(node_features) - - -class AMGNet(nn.Layer): - """A Multi-scale Graph neural Network model - based on Encoder-Process-Decoder structure for flow field prediction. - - https://doi.org/10.1080/09540091.2022.2131737 - - Code reference: https://github.com/baoshiaijhin/amgnet - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input", ). - output_keys (Tuple[str, ...]): Name of output keys, such as ("pred", ). - input_dim (int): Number of input dimension. - output_dim (int): Number of output dimension. - latent_dim (int): Number of hidden(feature) dimension. - num_layers (int): Number of layer(s). - message_passing_aggregator (Literal["sum"]): Message aggregator method in graph. - Only "sum" available now. - message_passing_steps (int): Message passing steps in graph. - speed (str): Whether use vanilla method or fast method for graph_connectivity - computation. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.AMGNet( - ... ("input", ), ("pred", ), 5, 3, 64, 2, "sum", 6, "norm", - ... ) - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - input_dim: int, - output_dim: int, - latent_dim: int, - num_layers: int, - message_passing_aggregator: Literal["sum"], - message_passing_steps: int, - speed: Literal["norm", "fast"], - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self._latent_dim = latent_dim - self.speed = speed - self._output_dim = output_dim - self._num_layers = num_layers - - self.encoder = Encoder(input_dim, self._make_mlp, latent_dim=self._latent_dim) - self.processor = Processor( - make_mlp=self._make_mlp, - output_dim=self._latent_dim, - message_passing_steps=message_passing_steps, - message_passing_aggregator=message_passing_aggregator, - use_stochastic_message_passing=False, - ) - self.post_processor = self._make_mlp(self._latent_dim, 128) - self.decoder = Decoder( - make_mlp=functools.partial(self._make_mlp, layer_norm=False), - output_dim=self._output_dim, - ) - - def forward(self, x: Dict[str, "pgl.Graph"]) -> Dict[str, paddle.Tensor]: - graphs = x[self.input_keys[0]] - latent_graph = self.encoder(graphs) - x, p = self.processor(latent_graph, speed=self.speed) - node_features = self._spa_compute(x, p) - pred_field = self.decoder(node_features) - return {self.output_keys[0]: pred_field} - - def _make_mlp(self, output_dim: int, input_dim: int = 5, layer_norm: bool = True): - widths = (self._latent_dim,) * self._num_layers + (output_dim,) - network = FullyConnectedLayer(input_dim, widths) - if layer_norm: - network = nn.Sequential(network, nn.LayerNorm(normalized_shape=widths[-1])) - return network - - def _spa_compute(self, x: List["pgl.Graph"], p): - j = len(x) - 1 - node_features = x[j].x - - for k in range(1, j + 1): - pos = p[-k] - fine_nodes = x[-(k + 1)].pos - feature = _knn_interpolate(node_features, pos, fine_nodes) - node_features = x[-(k + 1)].x + feature - node_features = self.post_processor(node_features) - - return node_features +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import functools +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn as nn +from typing_extensions import Literal + +try: + import pgl +except ModuleNotFoundError: + pass + +try: + import pyamg +except ModuleNotFoundError: + pass + +from paddle import sparse as pd_sparse +from scipy import sparse as sci_sparse + + +def _knn_interpolate( + features: paddle.Tensor, coarse_nodes: paddle.Tensor, fine_nodes: paddle.Tensor +) -> paddle.Tensor: + coarse_nodes_input = paddle.repeat_interleave( + coarse_nodes.unsqueeze(0), fine_nodes.shape[0], axis=0 + ) # [6684,352,2] + fine_nodes_input = paddle.repeat_interleave( + fine_nodes.unsqueeze(1), coarse_nodes.shape[0], axis=1 + ) # [6684,352,2] + dist_w = 1.0 / ( + paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 + ) # [6684,352] + knn_value, knn_index = paddle.topk(dist_w, k=3, largest=True) # [6684,3],[6684,3] + weight = knn_value.unsqueeze(-2) + features_input = features[knn_index] + output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( + knn_value, axis=-1, keepdim=True + ) + return output + + +def _get_corse_node(latent_graph: "pgl.Graph") -> paddle.Tensor: + row = latent_graph.edge_index[0].numpy() + col = latent_graph.edge_index[1].numpy() + data = paddle.ones(shape=[row.size]).numpy() + A = sci_sparse.coo_matrix((data, (row, col))).tocsr() + splitting = pyamg.classical.split.RS(A) + index = np.array(np.nonzero(splitting)) + b = paddle.to_tensor(index) + b = paddle.squeeze(b) + return b + + +def StAS( + index_A: paddle.Tensor, + value_A: paddle.Tensor, + index_S: paddle.Tensor, + value_S: paddle.Tensor, + N: int, + kN: int, + norm_layer: nn.Layer, +) -> Tuple[paddle.Tensor, paddle.Tensor]: + """ASAP: Adaptive Structure Aware Pooling for Learning Hierarchical Graph Representations. + Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). AAAI(2020) + + Args: + index_A (paddle.Tensor): Indices of sparse matrix A. + value_A (paddle.Tensor): Values of sparse matrix A. + index_S (paddle.Tensor): Indices of sparse matrix S. + value_S (paddle.Tensor): Values of sparse matrix S. + N (int): Dimension N. + kN (int): Dimension kN. + norm_layer (nn.Layer): Normalization layer. + + Returns: + Tuple[paddle.Tensor, paddle.Tensor]: Indices and values of result matrix E. + """ + sp_x = pd_sparse.sparse_coo_tensor(index_A, value_A) + sp_x = pd_sparse.coalesce(sp_x) + index_A = sp_x.indices() + value_A = sp_x.values() + + sp_s = pd_sparse.sparse_coo_tensor(index_S, value_S) + sp_s = pd_sparse.coalesce(sp_s) + index_S = sp_s.indices() + value_S = sp_s.values() + + indices_A = index_A.numpy() + values_A = value_A.numpy() + coo_A = sci_sparse.coo_matrix( + (values_A, (indices_A[0], indices_A[1])), shape=(N, N) + ) + + indices_S = index_S.numpy() + values_S = value_S.numpy() + coo_S = sci_sparse.coo_matrix( + (values_S, (indices_S[0], indices_S[1])), shape=(N, kN) + ) + + ans = coo_A.dot(coo_S).tocoo() + row = paddle.to_tensor(ans.row) + col = paddle.to_tensor(ans.col) + index_B = paddle.stack([row, col], axis=0) + value_B = paddle.to_tensor(ans.data) + + indices_A = index_S + values_A = value_S + coo_A = pd_sparse.sparse_coo_tensor(indices_A, values_A) + out = pd_sparse.transpose(coo_A, [1, 0]) + index_St = out.indices() + value_St = out.values() + + sp_x = pd_sparse.sparse_coo_tensor(index_B, value_B) + sp_x = pd_sparse.coalesce(sp_x) + index_B = sp_x.indices() + value_B = sp_x.values() + + indices_A = index_St.numpy() + values_A = value_St.numpy() + coo_A = sci_sparse.coo_matrix( + (values_A, (indices_A[0], indices_A[1])), shape=(kN, N) + ) + + indices_S = index_B.numpy() + values_S = value_B.numpy() + coo_S = sci_sparse.coo_matrix( + (values_S, (indices_S[0], indices_S[1])), shape=(N, kN) + ) + + ans = coo_A.dot(coo_S).tocoo() + row = paddle.to_tensor(ans.row) + col = paddle.to_tensor(ans.col) + index_E = paddle.stack([row, col], axis=0) + value_E = paddle.to_tensor(ans.data) + + # index_E排序 + sp_x = pd_sparse.sparse_coo_tensor(index_E, value_E) + sp_x = pd_sparse.coalesce(sp_x) + index_E = sp_x.indices() + value_E = sp_x.values() + + return index_E.astype("int64"), value_E + + +def FillZeros( + index_E: paddle.Tensor, value_E: paddle.Tensor, standard_index, kN: int +) -> Tuple[paddle.Tensor, paddle.Tensor]: + shape = [kN, kN] + row_E = index_E[0] + col_E = index_E[1] + DenseMatrix_E = sci_sparse.coo_matrix( + (paddle.ones_like(value_E), (row_E, col_E)), shape + ).toarray() + + row_S = standard_index[0] + col_S = standard_index[1] + DenseMatrix_S = sci_sparse.coo_matrix( + (paddle.ones([row_S.shape[0]]), (row_S, col_S)), shape + ).toarray() + + diff = DenseMatrix_S - DenseMatrix_E + rows, cols = np.nonzero(diff) + rows = paddle.to_tensor(rows, dtype="int32") + cols = paddle.to_tensor(cols, dtype="int32") + index = paddle.stack([rows, cols], axis=0) + value = paddle.zeros([index.shape[1]]) + index_E = paddle.concat([index_E, index], axis=1) + value_E = paddle.concat([value_E, value], axis=-1) + + sp_x = pd_sparse.sparse_coo_tensor(index_E, value_E) + sp_x = pd_sparse.coalesce(sp_x) + index_E = sp_x.indices() + value_E = sp_x.values() + + return index_E.astype("int64"), value_E + + +def remove_self_loops( + edge_index: paddle.Tensor, edge_attr: Optional[paddle.Tensor] = None +) -> Tuple[paddle.Tensor, Optional[paddle.Tensor]]: + # remove self-loop + mask = edge_index[0] != edge_index[1] + mask = mask.tolist() + edge_index = edge_index.t() + edge_index = edge_index[mask] + edge_index = edge_index.t() + if edge_attr is None: + return edge_index, None + else: + return edge_index, edge_attr[mask] + + +def faster_graph_connectivity(perm, edge_index, edge_weight, score, pos, N, norm_layer): + """ + Adapted from Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). Asap: Adaptive structure aware pooling + for learning hierarchical graph representations. AAAI(2020) + """ + + kN = perm.shape[0] + perm2 = perm.reshape((-1, 1)) + mask = (edge_index[0] == perm2).sum(axis=0).astype("bool") + + S0 = edge_index[1][mask].reshape((1, -1)) + S1 = edge_index[0][mask].reshape((1, -1)) + index_S = paddle.concat([S0, S1], axis=0) + value_S = score[mask].detach().squeeze() + n_idx = paddle.zeros([N], dtype=paddle.int64) + n_idx[perm] = paddle.arange(perm.shape[0]) + index_S = index_S.astype("int64") + index_S[1] = n_idx[index_S[1]] + subgraphnode_pos = pos[perm] + index_A = edge_index.clone() + if edge_weight is None: + value_A = value_S.new_ones(edge_index[0].shape[0]) + else: + value_A = edge_weight.clone() + + value_A = paddle.squeeze(value_A) + model_1 = nn.Sequential( + ("l1", nn.Linear(128, 256)), + ("act1", nn.ReLU()), + ("l2", nn.Linear(256, 256)), + ("act2", nn.ReLU()), + ("l4", nn.Linear(256, 128)), + ("act4", nn.ReLU()), + ("l5", nn.Linear(128, 1)), + ) + model_2 = nn.Sequential( + ("l1", nn.Linear(1, 64)), + ("act1", nn.ReLU()), + ("l2", nn.Linear(64, 128)), + ("act2", nn.ReLU()), + ("l4", nn.Linear(128, 128)), + ) + + val_A = model_1(value_A) + val_A = paddle.squeeze(val_A) + index_E, value_E = StAS(index_A, val_A, index_S, value_S, N, kN, norm_layer) + value_E = paddle.reshape(value_E, shape=[-1, 1]) + edge_weight = model_2(value_E) + + return index_E, edge_weight, subgraphnode_pos + + +def norm_graph_connectivity(perm, edge_index, edge_weight, score, pos, N, norm_layer): + """ + Come from Ranjan, E., Sanyal, S., Talukdar, P. (2020, April). Asap: Adaptive + structure aware pooling for learning hierarchical graph representations. AAAI(2020) + """ + + kN = perm.shape[0] + perm2 = perm.reshape((-1, 1)) + mask = (edge_index[0] == perm2).sum(axis=0).astype("bool") + S0 = edge_index[1][mask].reshape((1, -1)) + S1 = edge_index[0][mask].reshape((1, -1)) + + index_S = paddle.concat([S0, S1], axis=0) + value_S = score[mask].detach().squeeze() + n_idx = paddle.zeros([N], dtype=paddle.int64) + n_idx[perm] = paddle.arange(perm.shape[0]) + + index_S = index_S.astype("int64") + index_S[1] = n_idx[index_S[1]] + subgraphnode_pos = pos[perm] + index_A = edge_index.clone() + + if edge_weight is None: + value_A = value_S.new_ones(edge_index[0].shape[0]) + else: + value_A = edge_weight.clone() + + value_A = paddle.squeeze(value_A) + eps_mask = (value_S == 0).astype(paddle.get_default_dtype()) + value_S = paddle.full_like(value_S, 1e-4) * eps_mask + (1 - eps_mask) * value_S + attrlist = [] + standard_index, _ = StAS( + index_A, + paddle.ones_like(value_A[:, 0]), + index_S, + paddle.ones_like(value_S), + N, + kN, + norm_layer, + ) + for i in range(128): + mask = (value_A[:, i] == 0).astype(paddle.get_default_dtype()) + val_A = paddle.full_like(mask, 1e-4) * mask + (1 - mask) * value_A[:, i] + index_E, value_E = StAS(index_A, val_A, index_S, value_S, N, kN, norm_layer) + + if index_E.shape[1] != standard_index.shape[1]: + index_E, value_E = FillZeros(index_E, value_E, standard_index, kN) + + index_E, value_E = remove_self_loops(edge_index=index_E, edge_attr=value_E) + attrlist.append(value_E) + edge_weight = paddle.stack(attrlist, axis=1) + + return index_E, edge_weight, subgraphnode_pos + + +class GraphNetBlock(nn.Layer): + """Multi-Edge Interaction Network with residual connections.""" + + def __init__( + self, model_fn, output_dim, message_passing_aggregator, attention=False + ): + super().__init__() + self.edge_model = model_fn(output_dim, 384) + self.node_model = model_fn(output_dim, 256) + self.message_passing_aggregator = message_passing_aggregator + + def _update_edge_features(self, graph): + """Aggregates node features, and applies edge function.""" + senders = graph.edge_index[0] + receivers = graph.edge_index[1] + sender_features = paddle.index_select(x=graph.x, index=senders, axis=0) + receiver_features = paddle.index_select(x=graph.x, index=receivers, axis=0) + features = [sender_features, receiver_features, graph.edge_attr] + features = paddle.concat(features, axis=-1) + return self.edge_model(features) + + def unsorted_segment_operation(self, data, segment_ids, num_segments, operation): + """Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum. + + Args: + data (paddle.Tensor): A tensor whose segments are to be summed. + segment_ids (paddle.Tensor): The segment indices tensor. + num_segments (int): The number of segments. + operation (str): _description_ + + Returns: + paddle.Tensor: A tensor of same data type as the data argument. + """ + if not all([i in data.shape for i in segment_ids.shape]): + raise ValueError("segment_ids.shape should be a prefix of data.shape") + + if not (data.shape[0] == segment_ids.shape[0]): + raise ValueError("data.shape and segment_ids.shape should be equal") + + shape = [num_segments] + list(data.shape[1:]) + result_shape = paddle.zeros(shape) + if operation == "sum": + result = paddle.scatter(result_shape, segment_ids, data, overwrite=False) + return result + + def _update_node_features(self, node_features, edge_attr, edge_index): + """Aggregates edge features, and applies node function.""" + num_nodes = node_features.shape[0] + features = [node_features] + features.append( + self.unsorted_segment_operation( + edge_attr, + edge_index[1], + num_nodes, + operation=self.message_passing_aggregator, + ) + ) + features = paddle.concat(features, axis=-1) + return self.node_model(features) + + def forward(self, graph): + """Applies GraphNetBlock and returns updated MultiGraph.""" + new_edge_features = self._update_edge_features(graph) + new_node_features = self._update_node_features( + graph.x, graph.edge_attr, graph.edge_index + ) + + new_node_features += graph.x + new_edge_features += graph.edge_attr + latent_graph = pgl.Graph( + num_nodes=new_node_features.shape[0], edges=graph.edge_index + ) + latent_graph.x = new_node_features + latent_graph.edge_attr = new_edge_features + latent_graph.pos = graph.pos + latent_graph.edge_index = graph.edge_index + return latent_graph + + +class Processor(nn.Layer): + """This class takes the nodes with the most influential feature (sum of square) + The the chosen numbers of nodes in each ripple will establish connection(features and distances) with the most influential nodes and this connection will be learned + Then the result is add to output latent graph of encoder and the modified latent graph will be feed into original processor + + Args: + make_mlp (Callable): Function to make MLP. + output_dim (int): Number of dimension of output. + message_passing_steps (int): Message passing steps. + message_passing_aggregator (str): Message passing aggregator. + attention (bool, optional): Whether use attention. Defaults to False. + use_stochastic_message_passing (bool, optional): Whether use stochastic message passing. Defaults to False. + """ + + # Each mesh can be coarsened to have no fewer points than this value + min_nodes = 2000 + + def __init__( + self, + make_mlp: Callable, + output_dim: int, + message_passing_steps: int, + message_passing_aggregator: str, + attention: bool = False, + use_stochastic_message_passing: bool = False, + ): + super().__init__() + self.use_stochastic_message_passing = use_stochastic_message_passing + self.graphnet_blocks = nn.LayerList() + self.cofe_edge_blocks = nn.LayerList() + self.pool_blocks = nn.LayerList() + self.latent_dim = output_dim + self.normalization = nn.LayerNorm(128) + for index in range(message_passing_steps): + self.graphnet_blocks.append( + GraphNetBlock( + model_fn=make_mlp, + output_dim=output_dim, + message_passing_aggregator=message_passing_aggregator, + attention=attention, + ) + ) + + self.pool_blocks.append( + GraphNetBlock( + model_fn=make_mlp, + output_dim=output_dim, + message_passing_aggregator=message_passing_aggregator, + attention=attention, + ) + ) + + def forward(self, latent_graph, speed, normalized_adj_mat=None): + x = [] + pos = [] + new = [] + for graphnet_block, pool in zip(self.graphnet_blocks, self.pool_blocks): + if latent_graph.x.shape[0] > self.min_nodes: + pre_matrix = graphnet_block(latent_graph) + x.append(pre_matrix) + cofe_graph = pool(pre_matrix) + coarsenodes = _get_corse_node(pre_matrix) + nodesfeatures = cofe_graph.x[coarsenodes] + if speed == "fast": + subedge_index, edge_weight, subpos = faster_graph_connectivity( + perm=coarsenodes, + edge_index=cofe_graph.edge_index, + edge_weight=cofe_graph.edge_attr, + score=cofe_graph.edge_attr[:, 0], + pos=cofe_graph.pos, + N=cofe_graph.x.shape[0], + norm_layer=self.normalization, + ) + elif speed == "norm": + subedge_index, edge_weight, subpos = norm_graph_connectivity( + perm=coarsenodes, + edge_index=cofe_graph.edge_index, + edge_weight=cofe_graph.edge_attr, + score=cofe_graph.edge_attr[:, 0], + pos=cofe_graph.pos, + N=cofe_graph.x.shape[0], + norm_layer=self.normalization, + ) + else: + raise ValueError( + f"Argument 'speed' should be 'sum' or 'fast', bot got {speed}." + ) + edge_weight = self.normalization(edge_weight) + pos.append(subpos) + latent_graph = pgl.Graph( + num_nodes=nodesfeatures.shape[0], edges=subedge_index + ) + latent_graph.x = nodesfeatures + latent_graph.edge_attr = edge_weight + latent_graph.pos = subpos + latent_graph.edge_index = subedge_index + else: + latent_graph = graphnet_block(latent_graph) + new.append(latent_graph) + if len(new): + x.append(new[-1]) + return x, pos + + +class FullyConnectedLayer(nn.Layer): + def __init__(self, input_dim: int, hidden_size: Tuple[int, ...]): + super(FullyConnectedLayer, self).__init__() + num_layers = len(hidden_size) + self._layers_ordered_dict = {} + self.in_dim = input_dim + for index, output_dim in enumerate(hidden_size): + self._layers_ordered_dict["linear_" + str(index)] = nn.Linear( + self.in_dim, output_dim + ) + if index < (num_layers - 1): + self._layers_ordered_dict["relu_" + str(index)] = nn.ReLU() + self.in_dim = output_dim + + self.layers = nn.LayerDict(self._layers_ordered_dict) + + def forward(self, input): + for key in self.layers: + layer = self.layers[key] + output = layer(input) + input = output + return input + + +class Encoder(nn.Layer): + """Encodes node and edge features into latent features.""" + + def __init__(self, input_dim, make_mlp, latent_dim): + super(Encoder, self).__init__() + self._make_mlp = make_mlp + self._latent_dim = latent_dim + self.node_model = self._make_mlp(latent_dim, input_dim=input_dim) + self.mesh_edge_model = self._make_mlp(latent_dim, input_dim=1) + + def forward(self, graph): + node_latents = self.node_model(graph.x) + edge_latent = self.mesh_edge_model(graph.edge_attr) + + graph.x = node_latents + graph.edge_attr = edge_latent + return graph + + +class Decoder(nn.Layer): + """Decodes node features from graph. + Encodes node and edge features into latent features. + """ + + def __init__(self, make_mlp, output_dim): + super(Decoder, self).__init__() + self.model = make_mlp(output_dim, 128) + + def forward(self, node_features): + return self.model(node_features) + + +class AMGNet(nn.Layer): + """A Multi-scale Graph neural Network model + based on Encoder-Process-Decoder structure for flow field prediction. + + https://doi.org/10.1080/09540091.2022.2131737 + + Code reference: https://github.com/baoshiaijhin/amgnet + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input", ). + output_keys (Tuple[str, ...]): Name of output keys, such as ("pred", ). + input_dim (int): Number of input dimension. + output_dim (int): Number of output dimension. + latent_dim (int): Number of hidden(feature) dimension. + num_layers (int): Number of layer(s). + message_passing_aggregator (Literal["sum"]): Message aggregator method in graph. + Only "sum" available now. + message_passing_steps (int): Message passing steps in graph. + speed (str): Whether use vanilla method or fast method for graph_connectivity + computation. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.AMGNet( + ... ("input", ), ("pred", ), 5, 3, 64, 2, "sum", 6, "norm", + ... ) + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + input_dim: int, + output_dim: int, + latent_dim: int, + num_layers: int, + message_passing_aggregator: Literal["sum"], + message_passing_steps: int, + speed: Literal["norm", "fast"], + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self._latent_dim = latent_dim + self.speed = speed + self._output_dim = output_dim + self._num_layers = num_layers + + self.encoder = Encoder(input_dim, self._make_mlp, latent_dim=self._latent_dim) + self.processor = Processor( + make_mlp=self._make_mlp, + output_dim=self._latent_dim, + message_passing_steps=message_passing_steps, + message_passing_aggregator=message_passing_aggregator, + use_stochastic_message_passing=False, + ) + self.post_processor = self._make_mlp(self._latent_dim, 128) + self.decoder = Decoder( + make_mlp=functools.partial(self._make_mlp, layer_norm=False), + output_dim=self._output_dim, + ) + + def forward(self, x: Dict[str, "pgl.Graph"]) -> Dict[str, paddle.Tensor]: + graphs = x[self.input_keys[0]] + latent_graph = self.encoder(graphs) + x, p = self.processor(latent_graph, speed=self.speed) + node_features = self._spa_compute(x, p) + pred_field = self.decoder(node_features) + return {self.output_keys[0]: pred_field} + + def _make_mlp(self, output_dim: int, input_dim: int = 5, layer_norm: bool = True): + widths = (self._latent_dim,) * self._num_layers + (output_dim,) + network = FullyConnectedLayer(input_dim, widths) + if layer_norm: + network = nn.Sequential(network, nn.LayerNorm(normalized_shape=widths[-1])) + return network + + def _spa_compute(self, x: List["pgl.Graph"], p): + j = len(x) - 1 + node_features = x[j].x + + for k in range(1, j + 1): + pos = p[-k] + fine_nodes = x[-(k + 1)].pos + feature = _knn_interpolate(node_features, pos, fine_nodes) + node_features = x[-(k + 1)].x + feature + node_features = self.post_processor(node_features) + + return node_features diff --git a/ppsci/arch/base.py b/ppsci/arch/base.py index 5b51efec22..2fdf787fc7 100644 --- a/ppsci/arch/base.py +++ b/ppsci/arch/base.py @@ -1,279 +1,279 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Tuple - -import numpy as np -import paddle -from paddle import nn - -from ppsci.utils import logger - - -class Arch(nn.Layer): - """Base class for Network.""" - - input_keys: Tuple[str, ...] - output_keys: Tuple[str, ...] - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._input_transform: Callable[ - [Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor] - ] = None - - self._output_transform: Callable[ - [Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], - Dict[str, paddle.Tensor], - ] = None - - def forward(self, *args, **kwargs): - raise NotImplementedError("Arch.forward is not implemented") - - @property - def num_params(self) -> int: - """Return number of parameters within network. - - Returns: - int: Number of parameters. - """ - num = 0 - for name, param in self.named_parameters(): - if hasattr(param, "shape"): - num += np.prod(list(param.shape), dtype="int") - else: - logger.warning(f"{name} has no attribute 'shape'") - return num - - @property - def num_buffers(self) -> int: - """Return number of buffers within network. - - Returns: - int: Number of buffers. - """ - num = 0 - for name, buffer in self.named_buffers(): - if hasattr(buffer, "shape"): - num += np.prod(list(buffer.shape), dtype="int") - else: - logger.warning(f"{name} has no attribute 'shape'") - return num - - @staticmethod - def concat_to_tensor( - data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1 - ) -> Tuple[paddle.Tensor, ...]: - """Concatenate tensors from dict in the order of given keys. - - Args: - data_dict (Dict[str, paddle.Tensor]): Dict contains tensor. - keys (Tuple[str, ...]): Keys tensor fetched from. - axis (int, optional): Axis concatenate at. Defaults to -1. - - Returns: - Tuple[paddle.Tensor, ...]: Concatenated tensor. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.Arch() - >>> # fetch one tensor - >>> out = model.concat_to_tensor({'x':paddle.rand([64, 64, 1])}, ('x',)) - >>> print(out.dtype, out.shape) - paddle.float32 [64, 64, 1] - >>> # fetch more tensors - >>> out = model.concat_to_tensor( - ... {'x1':paddle.rand([64, 64, 1]), 'x2':paddle.rand([64, 64, 1])}, - ... ('x1', 'x2'), - ... axis=2) - >>> print(out.dtype, out.shape) - paddle.float32 [64, 64, 2] - - """ - if len(keys) == 1: - return data_dict[keys[0]] - data = [data_dict[key] for key in keys] - return paddle.concat(data, axis) - - @staticmethod - def split_to_dict( - data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1 - ) -> Dict[str, paddle.Tensor]: - """Split tensor and wrap into a dict by given keys. - - Args: - data_tensor (paddle.Tensor): Tensor to be split. - keys (Tuple[str, ...]): Keys tensor mapping to. - axis (int, optional): Axis split at. Defaults to -1. - - Returns: - Dict[str, paddle.Tensor]: Dict contains tensor. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.Arch() - >>> # split one tensor - >>> out = model.split_to_dict(paddle.rand([64, 64, 1]), ('x',)) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - x paddle.float32 [64, 64, 1] - >>> # split more tensors - >>> out = model.split_to_dict(paddle.rand([64, 64, 2]), ('x1', 'x2'), axis=2) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - x1 paddle.float32 [64, 64, 1] - x2 paddle.float32 [64, 64, 1] - - """ - if len(keys) == 1: - return {keys[0]: data_tensor} - data = paddle.split(data_tensor, len(keys), axis=axis) - return {key: data[i] for i, key in enumerate(keys)} - - def register_input_transform( - self, - transform: Callable[[Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]], - ): - """Register input transform. - - Args: - transform (Callable[[Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]]): - Input transform of network, receive a single tensor dict and return a single tensor dict. - - Examples: - >>> import ppsci - >>> def transform_in(in_): - ... x = in_["x"] - ... # transform input - ... x_ = 2.0 * x - ... input_trans = {"2x": x_} - ... return input_trans - >>> # `MLP` inherits from `Arch` - >>> model = ppsci.arch.MLP( - ... input_keys=("2x",), - ... output_keys=("y",), - ... num_layers=5, - ... hidden_size=32) - >>> model.register_input_transform(transform_in) - >>> out = model({"x":paddle.rand([64, 64, 1])}) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - y paddle.float32 [64, 64, 1] - - """ - self._input_transform = transform - - def register_output_transform( - self, - transform: Callable[ - [Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], - Dict[str, paddle.Tensor], - ], - ): - """Register output transform. - - Args: - transform (Callable[[Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]]): - Output transform of network, receive two single tensor dict(raw input - and raw output) and return a single tensor dict(transformed output). - - Examples: - >>> import ppsci - >>> def transform_out(in_, out): - ... x = in_["x"] - ... y = out["y"] - ... u = 2.0 * x * y - ... output_trans = {"u": u} - ... return output_trans - >>> # `MLP` inherits from `Arch` - >>> model = ppsci.arch.MLP( - ... input_keys=("x",), - ... output_keys=("y",), - ... num_layers=5, - ... hidden_size=32) - >>> model.register_output_transform(transform_out) - >>> out = model({"x":paddle.rand([64, 64, 1])}) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - u paddle.float32 [64, 64, 1] - - """ - self._output_transform = transform - - def freeze(self): - """Freeze all parameters. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.Arch() - >>> # freeze all parameters and make model `eval` - >>> model.freeze() - >>> assert not model.training - >>> for p in model.parameters(): - ... assert p.stop_gradient - - """ - for param in self.parameters(): - param.stop_gradient = True - - self.eval() - - def unfreeze(self): - """Unfreeze all parameters. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.Arch() - >>> # unfreeze all parameters and make model `train` - >>> model.unfreeze() - >>> assert model.training - >>> for p in model.parameters(): - ... assert not p.stop_gradient - - """ - for param in self.parameters(): - param.stop_gradient = False - - self.train() - - def __str__(self): - num_fc = 0 - num_conv = 0 - num_bn = 0 - for layer in self.sublayers(include_self=True): - if isinstance(layer, nn.Linear): - num_fc += 1 - elif isinstance(layer, (nn.Conv2D, nn.Conv3D, nn.Conv1D)): - num_conv += 1 - elif isinstance(layer, (nn.BatchNorm, nn.BatchNorm2D, nn.BatchNorm3D)): - num_bn += 1 - - return ", ".join( - [ - self.__class__.__name__, - f"input_keys = {self.input_keys}", - f"output_keys = {self.output_keys}", - f"num_fc = {num_fc}", - f"num_conv = {num_conv}", - f"num_bn = {num_bn}", - f"num_params = {self.num_params}", - f"num_buffers = {self.num_buffers}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Tuple + +import numpy as np +import paddle +from paddle import nn + +from ppsci.utils import logger + + +class Arch(nn.Layer): + """Base class for Network.""" + + input_keys: Tuple[str, ...] + output_keys: Tuple[str, ...] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._input_transform: Callable[ + [Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor] + ] = None + + self._output_transform: Callable[ + [Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], + Dict[str, paddle.Tensor], + ] = None + + def forward(self, *args, **kwargs): + raise NotImplementedError("Arch.forward is not implemented") + + @property + def num_params(self) -> int: + """Return number of parameters within network. + + Returns: + int: Number of parameters. + """ + num = 0 + for name, param in self.named_parameters(): + if hasattr(param, "shape"): + num += np.prod(list(param.shape), dtype="int") + else: + logger.warning(f"{name} has no attribute 'shape'") + return num + + @property + def num_buffers(self) -> int: + """Return number of buffers within network. + + Returns: + int: Number of buffers. + """ + num = 0 + for name, buffer in self.named_buffers(): + if hasattr(buffer, "shape"): + num += np.prod(list(buffer.shape), dtype="int") + else: + logger.warning(f"{name} has no attribute 'shape'") + return num + + @staticmethod + def concat_to_tensor( + data_dict: Dict[str, paddle.Tensor], keys: Tuple[str, ...], axis=-1 + ) -> Tuple[paddle.Tensor, ...]: + """Concatenate tensors from dict in the order of given keys. + + Args: + data_dict (Dict[str, paddle.Tensor]): Dict contains tensor. + keys (Tuple[str, ...]): Keys tensor fetched from. + axis (int, optional): Axis concatenate at. Defaults to -1. + + Returns: + Tuple[paddle.Tensor, ...]: Concatenated tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.Arch() + >>> # fetch one tensor + >>> out = model.concat_to_tensor({'x':paddle.rand([64, 64, 1])}, ('x',)) + >>> print(out.dtype, out.shape) + paddle.float32 [64, 64, 1] + >>> # fetch more tensors + >>> out = model.concat_to_tensor( + ... {'x1':paddle.rand([64, 64, 1]), 'x2':paddle.rand([64, 64, 1])}, + ... ('x1', 'x2'), + ... axis=2) + >>> print(out.dtype, out.shape) + paddle.float32 [64, 64, 2] + + """ + if len(keys) == 1: + return data_dict[keys[0]] + data = [data_dict[key] for key in keys] + return paddle.concat(data, axis) + + @staticmethod + def split_to_dict( + data_tensor: paddle.Tensor, keys: Tuple[str, ...], axis=-1 + ) -> Dict[str, paddle.Tensor]: + """Split tensor and wrap into a dict by given keys. + + Args: + data_tensor (paddle.Tensor): Tensor to be split. + keys (Tuple[str, ...]): Keys tensor mapping to. + axis (int, optional): Axis split at. Defaults to -1. + + Returns: + Dict[str, paddle.Tensor]: Dict contains tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.Arch() + >>> # split one tensor + >>> out = model.split_to_dict(paddle.rand([64, 64, 1]), ('x',)) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + x paddle.float32 [64, 64, 1] + >>> # split more tensors + >>> out = model.split_to_dict(paddle.rand([64, 64, 2]), ('x1', 'x2'), axis=2) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + x1 paddle.float32 [64, 64, 1] + x2 paddle.float32 [64, 64, 1] + + """ + if len(keys) == 1: + return {keys[0]: data_tensor} + data = paddle.split(data_tensor, len(keys), axis=axis) + return {key: data[i] for i, key in enumerate(keys)} + + def register_input_transform( + self, + transform: Callable[[Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]], + ): + """Register input transform. + + Args: + transform (Callable[[Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]]): + Input transform of network, receive a single tensor dict and return a single tensor dict. + + Examples: + >>> import ppsci + >>> def transform_in(in_): + ... x = in_["x"] + ... # transform input + ... x_ = 2.0 * x + ... input_trans = {"2x": x_} + ... return input_trans + >>> # `MLP` inherits from `Arch` + >>> model = ppsci.arch.MLP( + ... input_keys=("2x",), + ... output_keys=("y",), + ... num_layers=5, + ... hidden_size=32) + >>> model.register_input_transform(transform_in) + >>> out = model({"x":paddle.rand([64, 64, 1])}) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + y paddle.float32 [64, 64, 1] + + """ + self._input_transform = transform + + def register_output_transform( + self, + transform: Callable[ + [Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], + Dict[str, paddle.Tensor], + ], + ): + """Register output transform. + + Args: + transform (Callable[[Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]], Dict[str, paddle.Tensor]]): + Output transform of network, receive two single tensor dict(raw input + and raw output) and return a single tensor dict(transformed output). + + Examples: + >>> import ppsci + >>> def transform_out(in_, out): + ... x = in_["x"] + ... y = out["y"] + ... u = 2.0 * x * y + ... output_trans = {"u": u} + ... return output_trans + >>> # `MLP` inherits from `Arch` + >>> model = ppsci.arch.MLP( + ... input_keys=("x",), + ... output_keys=("y",), + ... num_layers=5, + ... hidden_size=32) + >>> model.register_output_transform(transform_out) + >>> out = model({"x":paddle.rand([64, 64, 1])}) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + u paddle.float32 [64, 64, 1] + + """ + self._output_transform = transform + + def freeze(self): + """Freeze all parameters. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.Arch() + >>> # freeze all parameters and make model `eval` + >>> model.freeze() + >>> assert not model.training + >>> for p in model.parameters(): + ... assert p.stop_gradient + + """ + for param in self.parameters(): + param.stop_gradient = True + + self.eval() + + def unfreeze(self): + """Unfreeze all parameters. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.Arch() + >>> # unfreeze all parameters and make model `train` + >>> model.unfreeze() + >>> assert model.training + >>> for p in model.parameters(): + ... assert not p.stop_gradient + + """ + for param in self.parameters(): + param.stop_gradient = False + + self.train() + + def __str__(self): + num_fc = 0 + num_conv = 0 + num_bn = 0 + for layer in self.sublayers(include_self=True): + if isinstance(layer, nn.Linear): + num_fc += 1 + elif isinstance(layer, (nn.Conv2D, nn.Conv3D, nn.Conv1D)): + num_conv += 1 + elif isinstance(layer, (nn.BatchNorm, nn.BatchNorm2D, nn.BatchNorm3D)): + num_bn += 1 + + return ", ".join( + [ + self.__class__.__name__, + f"input_keys = {self.input_keys}", + f"output_keys = {self.output_keys}", + f"num_fc = {num_fc}", + f"num_conv = {num_conv}", + f"num_bn = {num_bn}", + f"num_params = {self.num_params}", + f"num_buffers = {self.num_buffers}", + ] + ) diff --git a/ppsci/arch/cfdgcn.py b/ppsci/arch/cfdgcn.py index 181f9f9f99..78111bc27e 100644 --- a/ppsci/arch/cfdgcn.py +++ b/ppsci/arch/cfdgcn.py @@ -1,350 +1,350 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Sequence -from typing import Tuple -from typing import TypeVar -from typing import Union - -import numpy as np -import paddle -from paddle import nn -from paddle.nn import functional as F - -from ppsci.data.dataset import airfoil_dataset - -try: - import pgl -except ModuleNotFoundError: - pass - -GenTensor = TypeVar("GenTensor", paddle.Tensor, np.ndarray) - -SU2_SHAPE_IDS = { - "line": 3, - "triangle": 5, - "quad": 9, -} - - -def _knn_interpolate( - features: paddle.Tensor, coarse_nodes: paddle.Tensor, fine_nodes: paddle.Tensor -) -> paddle.Tensor: - coarse_nodes_input = paddle.repeat_interleave( - coarse_nodes.unsqueeze(0), fine_nodes.shape[0], axis=0 - ) # [6684,352,2] - fine_nodes_input = paddle.repeat_interleave( - fine_nodes.unsqueeze(1), coarse_nodes.shape[0], axis=1 - ) # [6684,352,2] - dist_w = 1.0 / ( - paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 - ) # [6684,352] - knn_value, knn_index = paddle.topk(dist_w, k=3, largest=True) # [6684,3],[6684,3] - weight = knn_value.unsqueeze(-2) - features_input = features[knn_index] - output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( - knn_value, axis=-1, keepdim=True - ) - return output - - -def is_cw( - points: paddle.Tensor, triangles: paddle.Tensor, ret_val=False -) -> Union[bool, paddle.Tensor]: - tri_pts = points[triangles] - a = tri_pts[:, 0] - tri_pts[:, 1] - b = tri_pts[:, 1] - tri_pts[:, 2] - cross = b[:, 0] * a[:, 1] - b[:, 1] * a[:, 0] - - if not ret_val: - return cross > 0 - else: - return cross - - -def left_orthogonal(v: paddle.Tensor) -> paddle.Tensor: - return paddle.stack([-v[..., 1], v[..., 0]], axis=-1) - - -def signed_dist_graph( - nodes: paddle.Tensor, marker_inds, with_sign=False -) -> paddle.Tensor: - # assumes shape is convex - # approximate signed distance by distance to closest point on surface - signed_dists = paddle.zeros([nodes.shape[0]], dtype=paddle.float32) - marker_nodes = nodes[marker_inds] - if type(marker_inds) is paddle.Tensor: - marker_inds = marker_inds.tolist() - marker_inds = set(marker_inds) - - if with_sign: - marker_surfaces = marker_nodes[:-1] - marker_nodes[1:] - last_surface = marker_nodes[-1] - marker_nodes[0] - marker_surfaces = paddle.concat([marker_surfaces, last_surface.unsqueeze(0)]) - normals = left_orthogonal(marker_surfaces) / marker_surfaces.norm( - axis=1 - ).unsqueeze(1) - for i, x in enumerate(nodes): - if i not in marker_inds: - vecs = marker_nodes - x - dists = paddle.linalg.norm(vecs, axis=1) - min_dist = dists.min() - - if with_sign: - # if sign is requested, check if inside marker shape - # dot product with normals to find if inside shape - surface_dists = (vecs * normals).sum(axis=1) - if (surface_dists < 0).unique().shape[0] == 1: - # if all point in same direction it is inside - min_dist *= -1 - - signed_dists[i] = min_dist - return signed_dists - - -def quad2tri(elems: np.array) -> Tuple[List[int], Union[List[int], paddle.Tensor]]: - new_elems = [] - new_edges = [] - for e in elems: - if len(e) <= 3: - new_elems.append(e) - else: - new_elems.append([e[0], e[1], e[2]]) - new_elems.append([e[0], e[2], e[3]]) - new_edges.append(paddle.to_tensor([[e[0]], [e[2]]], dtype=paddle.int64)) - new_edges = ( - paddle.concat(new_edges, axis=1) - if new_edges - else paddle.to_tensor([], dtype=paddle.int64) - ) - return new_elems, new_edges - - -def write_graph_mesh( - output_filename: str, - points: GenTensor, - elems_list: Sequence[Sequence[Sequence[int]]], - marker_dict: Dict[str, Sequence[Sequence[int]]], - dims: int = 2, -) -> None: - def seq2str(s: Sequence[int]) -> str: - return " ".join(str(x) for x in s) - - with open(output_filename, "w") as f: - f.write(f"NDIME={dims}\n") - - num_points = points.shape[0] - f.write(f"NPOIN={num_points}\n") - for i, p in enumerate(points): - f.write(f"{seq2str(p.tolist())} {i}\n") - f.write("\n") - - num_elems = sum([len(elems) for elems in elems_list]) - f.write(f"NELEM={num_elems}\n") - for elems in elems_list: - for e in elems: - if len(e) != 3 and len(e) != 4: - raise ValueError( - f"Meshes only support triangles and quadrilaterals, " - f"passed element had {len(e)} vertices." - ) - elem_id = ( - SU2_SHAPE_IDS["triangle"] if len(e) == 3 else SU2_SHAPE_IDS["quad"] - ) - f.write(f"{elem_id} {seq2str(e)}\n") - f.write("\n") - - num_markers = len(marker_dict) - f.write(f"NMARK={num_markers}\n") - for marker_tag in marker_dict: - f.write(f"MARKER_TAG={marker_tag}\n") - marker_elems = marker_dict[marker_tag] - f.write(f"MARKER_ELEMS={len(marker_elems)}\n") - for m in marker_elems: - f.write(f'{SU2_SHAPE_IDS["line"]} {seq2str(m)}\n') - f.write("\n") - - -class CFDGCN(nn.Layer): - """Graph Neural Networks for Fluid Flow Prediction. - - [Filipe De Avila Belbute-Peres, Thomas Economon, Zico Kolter Proceedings of the 37th International Conference on Machine Learning, PMLR 119:2402-2411, 2020.](https://proceedings.mlr.press/v119/de-avila-belbute-peres20a.html) - - Code reference: https://github.com/locuslab/cfd-gcn - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input", ). - output_keys (Tuple[str, ...]): Name of output keys, such as ("pred", ). - config_file (str): Name of configuration file for su2 module. - coarse_mesh (str): Path of coarse mesh file. - fine_marker_dict (Dict[str, List[List[int]]]): Dict of fine marker. - process_sim (Callable, optional): Preprocess funtion. Defaults to `lambda x, y: x`. - freeze_mesh (bool, optional): Whether set `stop_gradient=True` for nodes. Defaults to False. - num_convs (int, optional): Number of conv layers. Defaults to 6. - num_end_convs (int, optional): Number of end conv layers. Defaults to 3. - hidden_channel (int, optional): Number of channels of hidden layer. Defaults to 512. - out_channel (int, optional): Number of channels of output. Defaults to 3. - su2_module (Optional[Callable]): SU2Module Object. Defaults to None. - - Examples: - >>> import ppsci - >>> import su2paddle # doctest: +SKIP - >>> model = ppsci.arch.CFDGCN( - ... input_keys=("input"), - ... output_keys=("pred"), - ... config_file="/path/to/file.cfg", - ... coarse_mesh="/path/to/file.su2", - ... process_sim=None, - ... freeze_mesh=False, - ... num_convs=6, - ... num_end_convs=3, - ... hidden_channel=512, - ... out_channel=3, - ... su2_module=su2paddle.SU2Module, - ... ) # doctest: +SKIP - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - config_file: str, - coarse_mesh: str, - fine_marker_dict: Dict[str, List[List[int]]], - process_sim: Callable = lambda x, y: x, - freeze_mesh: bool = False, - num_convs: int = 6, - num_end_convs: int = 3, - hidden_channel: int = 512, - out_channel: int = 3, - su2_module: Optional[Callable] = None, - ): - - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - meshes_temp_dir = "temp_meshes" - os.makedirs(meshes_temp_dir, exist_ok=True) - self.mesh_file = os.path.join(meshes_temp_dir, f"{str(os.getpid())}_mesh.su2") - - if not coarse_mesh: - raise ValueError("Need to provide a coarse mesh for CFD-GCN.") - nodes, edges, self.elems, self.marker_dict = airfoil_dataset._get_mesh_graph( - coarse_mesh - ) - if not freeze_mesh: - self.nodes = paddle.to_tensor(nodes, stop_gradient=False) - else: - self.nodes = paddle.to_tensor(nodes, stop_gradient=True) - - self.elems, new_edges = quad2tri(sum(self.elems, [])) - self.elems = [self.elems] - - if is_cw(self.nodes, paddle.to_tensor(self.elems[0])).nonzero().shape[0] != 0: - raise ("Mesh has flipped elems.") - - self.edges = paddle.to_tensor(edges) - self.edges = paddle.concat([self.edges, new_edges], axis=1) - self.marker_inds = paddle.to_tensor(sum(self.marker_dict.values(), [])).unique() - self.fine_marker_dict = paddle.to_tensor(fine_marker_dict["airfoil"]).unique() - self.process_sim = process_sim - - self.write_mesh_file( - self.nodes, self.elems, self.marker_dict, filename=self.mesh_file - ) - self.su2 = su2_module(config_file, mesh_file=self.mesh_file) - self.sdf = None - - self.num_convs = num_end_convs - self.convs = [] - if self.num_convs > 0: - self.convs = nn.LayerList() - in_channels = out_channel + hidden_channel - for i in range(self.num_convs - 1): - self.convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) - in_channels = hidden_channel - self.convs.append(pgl.nn.GCNConv(in_channels, out_channel)) - - self.num_pre_convs = num_convs - num_end_convs - self.pre_convs = [] - if self.num_pre_convs > 0: - in_channels = 5 + 1 # one extra channel for sdf - self.pre_convs = nn.LayerList() - for i in range(self.num_pre_convs - 1): - self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) - in_channels = hidden_channel - self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) - - def forward(self, x: Dict[str, "pgl.Graph"]) -> Dict[str, paddle.Tensor]: - graph = x[self.input_keys[0]] - batch_size = graph.shape[0] - x_list = paddle.split(graph.x, batch_size) - fine_x_list = [] - - for idx in range(batch_size): - x = x_list[idx] - if self.sdf is None: - with paddle.no_grad(): - self.sdf = signed_dist_graph( - x[:, :2], self.fine_marker_dict - ).unsqueeze(1) - fine_x = paddle.concat([x, self.sdf], axis=1) - for conv in self.pre_convs: - fine_x = F.relu(conv(graph, fine_x)) - fine_x_list.append(fine_x) - nodes_input = self.get_nodes().tile([batch_size, 1, 1]) - - batch_y = self.su2( - nodes_input[..., 0], - nodes_input[..., 1], - graph.aoa[..., None], - graph.mach_or_reynolds[..., None], - ) - batch_y = self.process_sim(batch_y, False) - - pred_fields = [] - for idx in range(batch_size): - coarse_y = paddle.stack([y[idx].flatten() for y in batch_y], axis=1).astype( - "float32" - ) - nodes = self.get_nodes() - x = x_list[idx] - fine_y = _knn_interpolate( - features=coarse_y, coarse_nodes=nodes[:, :2], fine_nodes=x[:, :2] - ) - fine_y = paddle.concat([fine_y, fine_x_list[idx]], axis=1) - - for conv in self.convs[:-1]: - fine_y = F.relu(conv(graph, fine_y)) - fine_y = self.convs[-1](graph, fine_y) - pred_fields.append(fine_y) - pred_fields = paddle.concat(pred_fields, axis=0) - return {self.output_keys[0]: pred_fields} - - def get_nodes(self) -> paddle.Tensor: - return self.nodes - - @staticmethod - def write_mesh_file( - x: paddle.Tensor, - elems: paddle.Tensor, - marker_dict: Dict[str, Sequence[Sequence[int]]], - filename: str = "mesh.su2", - ) -> None: - write_graph_mesh(filename, x[:, :2], elems, marker_dict) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import TypeVar +from typing import Union + +import numpy as np +import paddle +from paddle import nn +from paddle.nn import functional as F + +from ppsci.data.dataset import airfoil_dataset + +try: + import pgl +except ModuleNotFoundError: + pass + +GenTensor = TypeVar("GenTensor", paddle.Tensor, np.ndarray) + +SU2_SHAPE_IDS = { + "line": 3, + "triangle": 5, + "quad": 9, +} + + +def _knn_interpolate( + features: paddle.Tensor, coarse_nodes: paddle.Tensor, fine_nodes: paddle.Tensor +) -> paddle.Tensor: + coarse_nodes_input = paddle.repeat_interleave( + coarse_nodes.unsqueeze(0), fine_nodes.shape[0], axis=0 + ) # [6684,352,2] + fine_nodes_input = paddle.repeat_interleave( + fine_nodes.unsqueeze(1), coarse_nodes.shape[0], axis=1 + ) # [6684,352,2] + dist_w = 1.0 / ( + paddle.norm(x=coarse_nodes_input - fine_nodes_input, p=2, axis=-1) + 1e-9 + ) # [6684,352] + knn_value, knn_index = paddle.topk(dist_w, k=3, largest=True) # [6684,3],[6684,3] + weight = knn_value.unsqueeze(-2) + features_input = features[knn_index] + output = paddle.bmm(weight, features_input).squeeze(-2) / paddle.sum( + knn_value, axis=-1, keepdim=True + ) + return output + + +def is_cw( + points: paddle.Tensor, triangles: paddle.Tensor, ret_val=False +) -> Union[bool, paddle.Tensor]: + tri_pts = points[triangles] + a = tri_pts[:, 0] - tri_pts[:, 1] + b = tri_pts[:, 1] - tri_pts[:, 2] + cross = b[:, 0] * a[:, 1] - b[:, 1] * a[:, 0] + + if not ret_val: + return cross > 0 + else: + return cross + + +def left_orthogonal(v: paddle.Tensor) -> paddle.Tensor: + return paddle.stack([-v[..., 1], v[..., 0]], axis=-1) + + +def signed_dist_graph( + nodes: paddle.Tensor, marker_inds, with_sign=False +) -> paddle.Tensor: + # assumes shape is convex + # approximate signed distance by distance to closest point on surface + signed_dists = paddle.zeros([nodes.shape[0]], dtype=paddle.float32) + marker_nodes = nodes[marker_inds] + if type(marker_inds) is paddle.Tensor: + marker_inds = marker_inds.tolist() + marker_inds = set(marker_inds) + + if with_sign: + marker_surfaces = marker_nodes[:-1] - marker_nodes[1:] + last_surface = marker_nodes[-1] - marker_nodes[0] + marker_surfaces = paddle.concat([marker_surfaces, last_surface.unsqueeze(0)]) + normals = left_orthogonal(marker_surfaces) / marker_surfaces.norm( + axis=1 + ).unsqueeze(1) + for i, x in enumerate(nodes): + if i not in marker_inds: + vecs = marker_nodes - x + dists = paddle.linalg.norm(vecs, axis=1) + min_dist = dists.min() + + if with_sign: + # if sign is requested, check if inside marker shape + # dot product with normals to find if inside shape + surface_dists = (vecs * normals).sum(axis=1) + if (surface_dists < 0).unique().shape[0] == 1: + # if all point in same direction it is inside + min_dist *= -1 + + signed_dists[i] = min_dist + return signed_dists + + +def quad2tri(elems: np.array) -> Tuple[List[int], Union[List[int], paddle.Tensor]]: + new_elems = [] + new_edges = [] + for e in elems: + if len(e) <= 3: + new_elems.append(e) + else: + new_elems.append([e[0], e[1], e[2]]) + new_elems.append([e[0], e[2], e[3]]) + new_edges.append(paddle.to_tensor([[e[0]], [e[2]]], dtype=paddle.int64)) + new_edges = ( + paddle.concat(new_edges, axis=1) + if new_edges + else paddle.to_tensor([], dtype=paddle.int64) + ) + return new_elems, new_edges + + +def write_graph_mesh( + output_filename: str, + points: GenTensor, + elems_list: Sequence[Sequence[Sequence[int]]], + marker_dict: Dict[str, Sequence[Sequence[int]]], + dims: int = 2, +) -> None: + def seq2str(s: Sequence[int]) -> str: + return " ".join(str(x) for x in s) + + with open(output_filename, "w") as f: + f.write(f"NDIME={dims}\n") + + num_points = points.shape[0] + f.write(f"NPOIN={num_points}\n") + for i, p in enumerate(points): + f.write(f"{seq2str(p.tolist())} {i}\n") + f.write("\n") + + num_elems = sum([len(elems) for elems in elems_list]) + f.write(f"NELEM={num_elems}\n") + for elems in elems_list: + for e in elems: + if len(e) != 3 and len(e) != 4: + raise ValueError( + f"Meshes only support triangles and quadrilaterals, " + f"passed element had {len(e)} vertices." + ) + elem_id = ( + SU2_SHAPE_IDS["triangle"] if len(e) == 3 else SU2_SHAPE_IDS["quad"] + ) + f.write(f"{elem_id} {seq2str(e)}\n") + f.write("\n") + + num_markers = len(marker_dict) + f.write(f"NMARK={num_markers}\n") + for marker_tag in marker_dict: + f.write(f"MARKER_TAG={marker_tag}\n") + marker_elems = marker_dict[marker_tag] + f.write(f"MARKER_ELEMS={len(marker_elems)}\n") + for m in marker_elems: + f.write(f'{SU2_SHAPE_IDS["line"]} {seq2str(m)}\n') + f.write("\n") + + +class CFDGCN(nn.Layer): + """Graph Neural Networks for Fluid Flow Prediction. + + [Filipe De Avila Belbute-Peres, Thomas Economon, Zico Kolter Proceedings of the 37th International Conference on Machine Learning, PMLR 119:2402-2411, 2020.](https://proceedings.mlr.press/v119/de-avila-belbute-peres20a.html) + + Code reference: https://github.com/locuslab/cfd-gcn + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input", ). + output_keys (Tuple[str, ...]): Name of output keys, such as ("pred", ). + config_file (str): Name of configuration file for su2 module. + coarse_mesh (str): Path of coarse mesh file. + fine_marker_dict (Dict[str, List[List[int]]]): Dict of fine marker. + process_sim (Callable, optional): Preprocess funtion. Defaults to `lambda x, y: x`. + freeze_mesh (bool, optional): Whether set `stop_gradient=True` for nodes. Defaults to False. + num_convs (int, optional): Number of conv layers. Defaults to 6. + num_end_convs (int, optional): Number of end conv layers. Defaults to 3. + hidden_channel (int, optional): Number of channels of hidden layer. Defaults to 512. + out_channel (int, optional): Number of channels of output. Defaults to 3. + su2_module (Optional[Callable]): SU2Module Object. Defaults to None. + + Examples: + >>> import ppsci + >>> import su2paddle # doctest: +SKIP + >>> model = ppsci.arch.CFDGCN( + ... input_keys=("input"), + ... output_keys=("pred"), + ... config_file="/path/to/file.cfg", + ... coarse_mesh="/path/to/file.su2", + ... process_sim=None, + ... freeze_mesh=False, + ... num_convs=6, + ... num_end_convs=3, + ... hidden_channel=512, + ... out_channel=3, + ... su2_module=su2paddle.SU2Module, + ... ) # doctest: +SKIP + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + config_file: str, + coarse_mesh: str, + fine_marker_dict: Dict[str, List[List[int]]], + process_sim: Callable = lambda x, y: x, + freeze_mesh: bool = False, + num_convs: int = 6, + num_end_convs: int = 3, + hidden_channel: int = 512, + out_channel: int = 3, + su2_module: Optional[Callable] = None, + ): + + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + meshes_temp_dir = "temp_meshes" + os.makedirs(meshes_temp_dir, exist_ok=True) + self.mesh_file = os.path.join(meshes_temp_dir, f"{str(os.getpid())}_mesh.su2") + + if not coarse_mesh: + raise ValueError("Need to provide a coarse mesh for CFD-GCN.") + nodes, edges, self.elems, self.marker_dict = airfoil_dataset._get_mesh_graph( + coarse_mesh + ) + if not freeze_mesh: + self.nodes = paddle.to_tensor(nodes, stop_gradient=False) + else: + self.nodes = paddle.to_tensor(nodes, stop_gradient=True) + + self.elems, new_edges = quad2tri(sum(self.elems, [])) + self.elems = [self.elems] + + if is_cw(self.nodes, paddle.to_tensor(self.elems[0])).nonzero().shape[0] != 0: + raise ("Mesh has flipped elems.") + + self.edges = paddle.to_tensor(edges) + self.edges = paddle.concat([self.edges, new_edges], axis=1) + self.marker_inds = paddle.to_tensor(sum(self.marker_dict.values(), [])).unique() + self.fine_marker_dict = paddle.to_tensor(fine_marker_dict["airfoil"]).unique() + self.process_sim = process_sim + + self.write_mesh_file( + self.nodes, self.elems, self.marker_dict, filename=self.mesh_file + ) + self.su2 = su2_module(config_file, mesh_file=self.mesh_file) + self.sdf = None + + self.num_convs = num_end_convs + self.convs = [] + if self.num_convs > 0: + self.convs = nn.LayerList() + in_channels = out_channel + hidden_channel + for i in range(self.num_convs - 1): + self.convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) + in_channels = hidden_channel + self.convs.append(pgl.nn.GCNConv(in_channels, out_channel)) + + self.num_pre_convs = num_convs - num_end_convs + self.pre_convs = [] + if self.num_pre_convs > 0: + in_channels = 5 + 1 # one extra channel for sdf + self.pre_convs = nn.LayerList() + for i in range(self.num_pre_convs - 1): + self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) + in_channels = hidden_channel + self.pre_convs.append(pgl.nn.GCNConv(in_channels, hidden_channel)) + + def forward(self, x: Dict[str, "pgl.Graph"]) -> Dict[str, paddle.Tensor]: + graph = x[self.input_keys[0]] + batch_size = graph.shape[0] + x_list = paddle.split(graph.x, batch_size) + fine_x_list = [] + + for idx in range(batch_size): + x = x_list[idx] + if self.sdf is None: + with paddle.no_grad(): + self.sdf = signed_dist_graph( + x[:, :2], self.fine_marker_dict + ).unsqueeze(1) + fine_x = paddle.concat([x, self.sdf], axis=1) + for conv in self.pre_convs: + fine_x = F.relu(conv(graph, fine_x)) + fine_x_list.append(fine_x) + nodes_input = self.get_nodes().tile([batch_size, 1, 1]) + + batch_y = self.su2( + nodes_input[..., 0], + nodes_input[..., 1], + graph.aoa[..., None], + graph.mach_or_reynolds[..., None], + ) + batch_y = self.process_sim(batch_y, False) + + pred_fields = [] + for idx in range(batch_size): + coarse_y = paddle.stack([y[idx].flatten() for y in batch_y], axis=1).astype( + "float32" + ) + nodes = self.get_nodes() + x = x_list[idx] + fine_y = _knn_interpolate( + features=coarse_y, coarse_nodes=nodes[:, :2], fine_nodes=x[:, :2] + ) + fine_y = paddle.concat([fine_y, fine_x_list[idx]], axis=1) + + for conv in self.convs[:-1]: + fine_y = F.relu(conv(graph, fine_y)) + fine_y = self.convs[-1](graph, fine_y) + pred_fields.append(fine_y) + pred_fields = paddle.concat(pred_fields, axis=0) + return {self.output_keys[0]: pred_fields} + + def get_nodes(self) -> paddle.Tensor: + return self.nodes + + @staticmethod + def write_mesh_file( + x: paddle.Tensor, + elems: paddle.Tensor, + marker_dict: Dict[str, Sequence[Sequence[int]]], + filename: str = "mesh.su2", + ) -> None: + write_graph_mesh(filename, x[:, :2], elems, marker_dict) diff --git a/ppsci/arch/chip_deeponets.py b/ppsci/arch/chip_deeponets.py index 30c87c5656..c644ee5af7 100644 --- a/ppsci/arch/chip_deeponets.py +++ b/ppsci/arch/chip_deeponets.py @@ -1,214 +1,214 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Tuple -from typing import Union - -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.arch import mlp - - -class ChipDeepONets(base.Arch): - """Multi-branch physics-informed deep operator neural network. The network consists of three branch networks: random heat source, boundary function, and boundary type, as well as a trunk network. - - Args: - branch_input_keys (Tuple[str, ...]): Name of input data for internal heat source on branch nets. - BCtype_input_keys (Tuple[str, ...]): Name of input data for boundary types on branch nets. - BC_input_keys (Tuple[str, ...]): Name of input data for boundary on branch nets. - trunk_input_keys (Tuple[str, ...]): Name of input data for trunk net. - output_keys (Tuple[str, ...]): Output name of predicted temperature. - num_loc (int): Number of sampled input data for internal heat source. - bctype_loc (int): Number of sampled input data for boundary types. - BC_num_loc (int): Number of sampled input data for boundary. - num_features (int): Number of features extracted from trunk net, same for all branch nets. - branch_num_layers (int): Number of hidden layers of internal heat source on branch nets. - BC_num_layers (int): Number of hidden layers of boundary on branch nets. - trunk_num_layers (int): Number of hidden layers of trunk net. - branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of internal heat source on branch nets. - An integer for all layers, or list of integer specify each layer's size. - BC_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of boundary on branch nets. - An integer for all layers, or list of integer specify each layer's size. - trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. - An integer for all layers, or list of integer specify each layer's size. - branch_skip_connection (bool, optional): Whether to use skip connection for internal heat source on branch net. Defaults to False. - BC_skip_connection (bool, optional): Whether to use skip connection for boundary on branch net. Defaults to False. - trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. - branch_activation (str, optional): Name of activation function for internal heat source on branch net. Defaults to "tanh". - BC_activation (str, optional): Name of activation function for boundary on branch net. Defaults to "tanh". - trunk_activation (str, optional): Name of activation function for trunk net. Defaults to "tanh". - branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for internal heat source on branch net. Defaults to False. - BC_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for boundary on branch net. Defaults to False. - trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.ChipDeepONets( - ... ('u',), - ... ('bc',), - ... ('bc_data',), - ... ("x",'y'), - ... ("T",), - ... 324, - ... 1, - ... 76, - ... 400, - ... 9, - ... 9, - ... 6, - ... 256, - ... 256, - ... 128, - ... branch_activation="swish", - ... BC_activation="swish", - ... trunk_activation="swish", - ... use_bias=True, - ... ) - """ - - def __init__( - self, - branch_input_keys: Tuple[str, ...], - BCtype_input_keys: Tuple[str, ...], - BC_input_keys: Tuple[str, ...], - trunk_input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_loc: int, - bctype_loc: int, - BC_num_loc: int, - num_features: int, - branch_num_layers: int, - BC_num_layers: int, - trunk_num_layers: int, - branch_hidden_size: Union[int, Tuple[int, ...]], - BC_hidden_size: Union[int, Tuple[int, ...]], - trunk_hidden_size: Union[int, Tuple[int, ...]], - branch_skip_connection: bool = False, - BC_skip_connection: bool = False, - trunk_skip_connection: bool = False, - branch_activation: str = "tanh", - BC_activation: str = "tanh", - trunk_activation: str = "tanh", - branch_weight_norm: bool = False, - BC_weight_norm: bool = False, - trunk_weight_norm: bool = False, - use_bias: bool = True, - ): - super().__init__() - self.trunk_input_keys = trunk_input_keys - self.branch_input_keys = branch_input_keys - self.BCtype_input_keys = BCtype_input_keys - self.BC_input_keys = BC_input_keys - self.input_keys = ( - self.trunk_input_keys - + self.branch_input_keys - + self.BC_input_keys - + self.BCtype_input_keys - ) - self.output_keys = output_keys - - self.branch_net = mlp.MLP( - self.branch_input_keys, - ("b",), - branch_num_layers, - branch_hidden_size, - branch_activation, - branch_skip_connection, - branch_weight_norm, - input_dim=num_loc, - output_dim=num_features, - ) - - self.BCtype_net = mlp.MLP( - self.BCtype_input_keys, - ("bctype",), - BC_num_layers, - BC_hidden_size, - BC_activation, - BC_skip_connection, - BC_weight_norm, - input_dim=bctype_loc, - output_dim=num_features, - ) - - self.BC_net = mlp.MLP( - self.BC_input_keys, - ("bc",), - BC_num_layers, - BC_hidden_size, - BC_activation, - BC_skip_connection, - BC_weight_norm, - input_dim=BC_num_loc, - output_dim=num_features, - ) - - self.trunk_net = mlp.MLP( - self.trunk_input_keys, - ("t",), - trunk_num_layers, - trunk_hidden_size, - trunk_activation, - trunk_skip_connection, - trunk_weight_norm, - input_dim=len(self.trunk_input_keys), - output_dim=num_features, - ) - self.trunk_act = act_mod.get_activation(trunk_activation) - self.bc_act = act_mod.get_activation(BC_activation) - self.branch_act = act_mod.get_activation(branch_activation) - - self.use_bias = use_bias - if use_bias: - # register bias to parameter for updating in optimizer and storage - self.b = self.create_parameter( - shape=(1,), - attr=nn.initializer.Constant(0.0), - ) - - def forward(self, x): - - if self._input_transform is not None: - x = self._input_transform(x) - - # Branch net to encode the input function - u_features = self.branch_net(x)[self.branch_net.output_keys[0]] - bc_features = self.BC_net(x)[self.BC_net.output_keys[0]] - bctype_features = self.BCtype_net(x)[self.BCtype_net.output_keys[0]] - # Trunk net to encode the domain of the output function - y_features = self.trunk_net(x)[self.trunk_net.output_keys[0]] - y_features = self.trunk_act(y_features) - # Dot product - G_u = paddle.sum( - u_features * y_features * bc_features * bctype_features, - axis=1, - keepdim=True, - ) - # Add bias - if self.use_bias: - G_u += self.b - - result_dict = { - self.output_keys[0]: G_u, - } - if self._output_transform is not None: - result_dict = self._output_transform(x, result_dict) - - return result_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.arch import mlp + + +class ChipDeepONets(base.Arch): + """Multi-branch physics-informed deep operator neural network. The network consists of three branch networks: random heat source, boundary function, and boundary type, as well as a trunk network. + + Args: + branch_input_keys (Tuple[str, ...]): Name of input data for internal heat source on branch nets. + BCtype_input_keys (Tuple[str, ...]): Name of input data for boundary types on branch nets. + BC_input_keys (Tuple[str, ...]): Name of input data for boundary on branch nets. + trunk_input_keys (Tuple[str, ...]): Name of input data for trunk net. + output_keys (Tuple[str, ...]): Output name of predicted temperature. + num_loc (int): Number of sampled input data for internal heat source. + bctype_loc (int): Number of sampled input data for boundary types. + BC_num_loc (int): Number of sampled input data for boundary. + num_features (int): Number of features extracted from trunk net, same for all branch nets. + branch_num_layers (int): Number of hidden layers of internal heat source on branch nets. + BC_num_layers (int): Number of hidden layers of boundary on branch nets. + trunk_num_layers (int): Number of hidden layers of trunk net. + branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of internal heat source on branch nets. + An integer for all layers, or list of integer specify each layer's size. + BC_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of boundary on branch nets. + An integer for all layers, or list of integer specify each layer's size. + trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. + An integer for all layers, or list of integer specify each layer's size. + branch_skip_connection (bool, optional): Whether to use skip connection for internal heat source on branch net. Defaults to False. + BC_skip_connection (bool, optional): Whether to use skip connection for boundary on branch net. Defaults to False. + trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. + branch_activation (str, optional): Name of activation function for internal heat source on branch net. Defaults to "tanh". + BC_activation (str, optional): Name of activation function for boundary on branch net. Defaults to "tanh". + trunk_activation (str, optional): Name of activation function for trunk net. Defaults to "tanh". + branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for internal heat source on branch net. Defaults to False. + BC_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for boundary on branch net. Defaults to False. + trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.ChipDeepONets( + ... ('u',), + ... ('bc',), + ... ('bc_data',), + ... ("x",'y'), + ... ("T",), + ... 324, + ... 1, + ... 76, + ... 400, + ... 9, + ... 9, + ... 6, + ... 256, + ... 256, + ... 128, + ... branch_activation="swish", + ... BC_activation="swish", + ... trunk_activation="swish", + ... use_bias=True, + ... ) + """ + + def __init__( + self, + branch_input_keys: Tuple[str, ...], + BCtype_input_keys: Tuple[str, ...], + BC_input_keys: Tuple[str, ...], + trunk_input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_loc: int, + bctype_loc: int, + BC_num_loc: int, + num_features: int, + branch_num_layers: int, + BC_num_layers: int, + trunk_num_layers: int, + branch_hidden_size: Union[int, Tuple[int, ...]], + BC_hidden_size: Union[int, Tuple[int, ...]], + trunk_hidden_size: Union[int, Tuple[int, ...]], + branch_skip_connection: bool = False, + BC_skip_connection: bool = False, + trunk_skip_connection: bool = False, + branch_activation: str = "tanh", + BC_activation: str = "tanh", + trunk_activation: str = "tanh", + branch_weight_norm: bool = False, + BC_weight_norm: bool = False, + trunk_weight_norm: bool = False, + use_bias: bool = True, + ): + super().__init__() + self.trunk_input_keys = trunk_input_keys + self.branch_input_keys = branch_input_keys + self.BCtype_input_keys = BCtype_input_keys + self.BC_input_keys = BC_input_keys + self.input_keys = ( + self.trunk_input_keys + + self.branch_input_keys + + self.BC_input_keys + + self.BCtype_input_keys + ) + self.output_keys = output_keys + + self.branch_net = mlp.MLP( + self.branch_input_keys, + ("b",), + branch_num_layers, + branch_hidden_size, + branch_activation, + branch_skip_connection, + branch_weight_norm, + input_dim=num_loc, + output_dim=num_features, + ) + + self.BCtype_net = mlp.MLP( + self.BCtype_input_keys, + ("bctype",), + BC_num_layers, + BC_hidden_size, + BC_activation, + BC_skip_connection, + BC_weight_norm, + input_dim=bctype_loc, + output_dim=num_features, + ) + + self.BC_net = mlp.MLP( + self.BC_input_keys, + ("bc",), + BC_num_layers, + BC_hidden_size, + BC_activation, + BC_skip_connection, + BC_weight_norm, + input_dim=BC_num_loc, + output_dim=num_features, + ) + + self.trunk_net = mlp.MLP( + self.trunk_input_keys, + ("t",), + trunk_num_layers, + trunk_hidden_size, + trunk_activation, + trunk_skip_connection, + trunk_weight_norm, + input_dim=len(self.trunk_input_keys), + output_dim=num_features, + ) + self.trunk_act = act_mod.get_activation(trunk_activation) + self.bc_act = act_mod.get_activation(BC_activation) + self.branch_act = act_mod.get_activation(branch_activation) + + self.use_bias = use_bias + if use_bias: + # register bias to parameter for updating in optimizer and storage + self.b = self.create_parameter( + shape=(1,), + attr=nn.initializer.Constant(0.0), + ) + + def forward(self, x): + + if self._input_transform is not None: + x = self._input_transform(x) + + # Branch net to encode the input function + u_features = self.branch_net(x)[self.branch_net.output_keys[0]] + bc_features = self.BC_net(x)[self.BC_net.output_keys[0]] + bctype_features = self.BCtype_net(x)[self.BCtype_net.output_keys[0]] + # Trunk net to encode the domain of the output function + y_features = self.trunk_net(x)[self.trunk_net.output_keys[0]] + y_features = self.trunk_act(y_features) + # Dot product + G_u = paddle.sum( + u_features * y_features * bc_features * bctype_features, + axis=1, + keepdim=True, + ) + # Add bias + if self.use_bias: + G_u += self.b + + result_dict = { + self.output_keys[0]: G_u, + } + if self._output_transform is not None: + result_dict = self._output_transform(x, result_dict) + + return result_dict diff --git a/ppsci/arch/crystalgraphconvnet.py b/ppsci/arch/crystalgraphconvnet.py index bb82aa0b81..5964ddaed2 100644 --- a/ppsci/arch/crystalgraphconvnet.py +++ b/ppsci/arch/crystalgraphconvnet.py @@ -1,167 +1,167 @@ -import paddle -import paddle.nn as nn - -from ppsci.arch import base - - -class ConvLayer(nn.Layer): - def __init__(self, atom_fea_len, nbr_fea_len): - super(ConvLayer, self).__init__() - self.atom_fea_len = atom_fea_len - self.nbr_fea_len = nbr_fea_len - self.fc_full = nn.Linear( - 2 * self.atom_fea_len + self.nbr_fea_len, 2 * self.atom_fea_len - ) - self.sigmoid = nn.Sigmoid() - self.softplus1 = nn.Softplus() - self.bn1 = nn.BatchNorm1D(2 * self.atom_fea_len) - self.bn2 = nn.BatchNorm1D(self.atom_fea_len) - self.softplus2 = nn.Softplus() - - def forward(self, atom_in_fea, nbr_fea, nbr_fea_idx): - # TODO will there be problems with the index zero padding? - N, M = nbr_fea_idx.shape - atom_nbr_fea = atom_in_fea[nbr_fea_idx, :] - total_nbr_fea = paddle.concat( - [ - paddle.expand( - atom_in_fea.unsqueeze(1), shape=[N, M, self.atom_fea_len] - ), - atom_nbr_fea, - nbr_fea, - ], - axis=2, - ) - total_gated_fea = self.fc_full(total_nbr_fea) - total_gated_fea = paddle.reshape( - self.bn1(paddle.reshape(total_gated_fea, [-1, self.atom_fea_len * 2])), - [N, M, self.atom_fea_len * 2], - ) - nbr_filter, nbr_core = paddle.chunk(total_gated_fea, chunks=2, axis=2) - nbr_filter = self.sigmoid(nbr_filter) - nbr_core = self.softplus1(nbr_core) - nbr_sumed = paddle.sum(nbr_filter * nbr_core, axis=1) - nbr_sumed = self.bn2(nbr_sumed) - out = self.softplus2(atom_in_fea + nbr_sumed) - return out - - -class CrystalGraphConvNet(base.Arch): - """ - Create a crystal graph convolutional neural network for predicting total - material properties. - - Args: - orig_atom_fea_len (int): Number of atom features in the input. - nbr_fea_len (int): Number of bond features. - atom_fea_len (int): Number of hidden atom features in the convolutional layers. - n_conv (int): Number of convolutional layers. - h_fea_len (int): Number of hidden features after pooling. - n_h (int): Number of hidden layers after pooling. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.CrystalGraphConvNet( - ... orig_atom_fea_len=92, - ... nbr_fea_len=41, - ... atom_fea_len=64, - ... n_conv=3, - ... h_fea_len=128, - ... n_h=1, - ... ) - >>> input_dict = { - ... "i": [ - ... paddle.rand(shape=[45, 92]), paddle.rand(shape=[45, 12, 41]), - ... paddle.randint(high=45, shape=[45, 12]), - ... [ - ... paddle.randint(high=32, shape=[32]), paddle.randint(high=8, shape=[8]), - ... paddle.randint(high=2, shape=[2]), paddle.randint(high=3, shape=[3]) - ... ] - ... ] - ... } - >>> output_dict = model(input_dict) - >>> print(output_dict["out"].shape) - [4, 1] - """ - - def __init__( - self, - orig_atom_fea_len: int, - nbr_fea_len: int, - atom_fea_len: int, - n_conv: int, - h_fea_len: int, - n_h: int, - ): - - super().__init__() - self.embedding = nn.Linear(orig_atom_fea_len, atom_fea_len) - self.convs = nn.LayerList( - [ - ConvLayer(atom_fea_len=atom_fea_len, nbr_fea_len=nbr_fea_len) - for _ in range(n_conv) - ] - ) - self.conv_to_fc = nn.Linear(atom_fea_len, h_fea_len) - self.conv_to_fc_softplus = nn.Softplus() - if n_h > 1: - self.fcs = nn.LayerList( - [nn.Linear(h_fea_len, h_fea_len) for _ in range(n_h - 1)] - ) - self.softpluses = nn.LayerList([nn.Softplus() for _ in range(n_h - 1)]) - - self.fc_out = nn.Linear(h_fea_len, 1) - - def forward(self, input) -> paddle.Tensor: - """ - Forward pass. - - N: Total number of atoms in the batch. - M: Max number of neighbors. - N0: Total number of crystals in the batch. - - Args: - input (list): List of input, which includes the following elements: - atom_fea (paddle.Tensor): Shape (N, orig_atom_fea_len). Atom features from atom type. - nbr_fea (paddle.Tensor): Shape (N, M, nbr_fea_len). Bond features of each atom's M neighbors. - nbr_fea_idx (paddle.Tensor): Shape (N, M). Indices of M neighbors of each atom. - crystal_atom_idx (list): List of paddle.Tensor of length N0. Mapping from the crystal idx to atom idx. - - Returns: - paddle.Tensor: Shape (N,). Atom hidden features after convolution. - """ - atom_fea, nbr_fea, nbr_fea_idx, crystal_atom_idx = input["i"] - atom_fea = self.embedding(atom_fea) - for conv_func in self.convs: - atom_fea = conv_func(atom_fea, nbr_fea, nbr_fea_idx) - crys_fea = self.pooling(atom_fea, crystal_atom_idx) - crys_fea = self.conv_to_fc(self.conv_to_fc_softplus(crys_fea)) - crys_fea = self.conv_to_fc_softplus(crys_fea) - if hasattr(self, "fcs") and hasattr(self, "softpluses"): - for fc, softplus in zip(self.fcs, self.softpluses): - crys_fea = softplus(fc(crys_fea)) - out = self.fc_out(crys_fea) - out_dict = {"out": out} - return out_dict - - def pooling(self, atom_fea, crystal_atom_idx): - """ - Pooling the atom features to crystal features - - N: Total number of atoms in the batch - N0: Total number of crystals in the batch - - Args: - atom_fea (paddle.Tensor): Shape (N, atom_fea_len). Atom feature vectors of the batch. - crystal_atom_idx (List[paddle.Tensor]): Length N0. Mapping from the crystal idx to atom idx - """ - assert ( - sum([len(idx_map) for idx_map in crystal_atom_idx]) - == atom_fea.data.shape[0] - ) - summed_fea = [ - paddle.mean(atom_fea[idx_map], axis=0, keepdim=True) - for idx_map in crystal_atom_idx - ] - return paddle.concat(summed_fea, axis=0) +import paddle +import paddle.nn as nn + +from ppsci.arch import base + + +class ConvLayer(nn.Layer): + def __init__(self, atom_fea_len, nbr_fea_len): + super(ConvLayer, self).__init__() + self.atom_fea_len = atom_fea_len + self.nbr_fea_len = nbr_fea_len + self.fc_full = nn.Linear( + 2 * self.atom_fea_len + self.nbr_fea_len, 2 * self.atom_fea_len + ) + self.sigmoid = nn.Sigmoid() + self.softplus1 = nn.Softplus() + self.bn1 = nn.BatchNorm1D(2 * self.atom_fea_len) + self.bn2 = nn.BatchNorm1D(self.atom_fea_len) + self.softplus2 = nn.Softplus() + + def forward(self, atom_in_fea, nbr_fea, nbr_fea_idx): + # TODO will there be problems with the index zero padding? + N, M = nbr_fea_idx.shape + atom_nbr_fea = atom_in_fea[nbr_fea_idx, :] + total_nbr_fea = paddle.concat( + [ + paddle.expand( + atom_in_fea.unsqueeze(1), shape=[N, M, self.atom_fea_len] + ), + atom_nbr_fea, + nbr_fea, + ], + axis=2, + ) + total_gated_fea = self.fc_full(total_nbr_fea) + total_gated_fea = paddle.reshape( + self.bn1(paddle.reshape(total_gated_fea, [-1, self.atom_fea_len * 2])), + [N, M, self.atom_fea_len * 2], + ) + nbr_filter, nbr_core = paddle.chunk(total_gated_fea, chunks=2, axis=2) + nbr_filter = self.sigmoid(nbr_filter) + nbr_core = self.softplus1(nbr_core) + nbr_sumed = paddle.sum(nbr_filter * nbr_core, axis=1) + nbr_sumed = self.bn2(nbr_sumed) + out = self.softplus2(atom_in_fea + nbr_sumed) + return out + + +class CrystalGraphConvNet(base.Arch): + """ + Create a crystal graph convolutional neural network for predicting total + material properties. + + Args: + orig_atom_fea_len (int): Number of atom features in the input. + nbr_fea_len (int): Number of bond features. + atom_fea_len (int): Number of hidden atom features in the convolutional layers. + n_conv (int): Number of convolutional layers. + h_fea_len (int): Number of hidden features after pooling. + n_h (int): Number of hidden layers after pooling. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.CrystalGraphConvNet( + ... orig_atom_fea_len=92, + ... nbr_fea_len=41, + ... atom_fea_len=64, + ... n_conv=3, + ... h_fea_len=128, + ... n_h=1, + ... ) + >>> input_dict = { + ... "i": [ + ... paddle.rand(shape=[45, 92]), paddle.rand(shape=[45, 12, 41]), + ... paddle.randint(high=45, shape=[45, 12]), + ... [ + ... paddle.randint(high=32, shape=[32]), paddle.randint(high=8, shape=[8]), + ... paddle.randint(high=2, shape=[2]), paddle.randint(high=3, shape=[3]) + ... ] + ... ] + ... } + >>> output_dict = model(input_dict) + >>> print(output_dict["out"].shape) + [4, 1] + """ + + def __init__( + self, + orig_atom_fea_len: int, + nbr_fea_len: int, + atom_fea_len: int, + n_conv: int, + h_fea_len: int, + n_h: int, + ): + + super().__init__() + self.embedding = nn.Linear(orig_atom_fea_len, atom_fea_len) + self.convs = nn.LayerList( + [ + ConvLayer(atom_fea_len=atom_fea_len, nbr_fea_len=nbr_fea_len) + for _ in range(n_conv) + ] + ) + self.conv_to_fc = nn.Linear(atom_fea_len, h_fea_len) + self.conv_to_fc_softplus = nn.Softplus() + if n_h > 1: + self.fcs = nn.LayerList( + [nn.Linear(h_fea_len, h_fea_len) for _ in range(n_h - 1)] + ) + self.softpluses = nn.LayerList([nn.Softplus() for _ in range(n_h - 1)]) + + self.fc_out = nn.Linear(h_fea_len, 1) + + def forward(self, input) -> paddle.Tensor: + """ + Forward pass. + + N: Total number of atoms in the batch. + M: Max number of neighbors. + N0: Total number of crystals in the batch. + + Args: + input (list): List of input, which includes the following elements: + atom_fea (paddle.Tensor): Shape (N, orig_atom_fea_len). Atom features from atom type. + nbr_fea (paddle.Tensor): Shape (N, M, nbr_fea_len). Bond features of each atom's M neighbors. + nbr_fea_idx (paddle.Tensor): Shape (N, M). Indices of M neighbors of each atom. + crystal_atom_idx (list): List of paddle.Tensor of length N0. Mapping from the crystal idx to atom idx. + + Returns: + paddle.Tensor: Shape (N,). Atom hidden features after convolution. + """ + atom_fea, nbr_fea, nbr_fea_idx, crystal_atom_idx = input["i"] + atom_fea = self.embedding(atom_fea) + for conv_func in self.convs: + atom_fea = conv_func(atom_fea, nbr_fea, nbr_fea_idx) + crys_fea = self.pooling(atom_fea, crystal_atom_idx) + crys_fea = self.conv_to_fc(self.conv_to_fc_softplus(crys_fea)) + crys_fea = self.conv_to_fc_softplus(crys_fea) + if hasattr(self, "fcs") and hasattr(self, "softpluses"): + for fc, softplus in zip(self.fcs, self.softpluses): + crys_fea = softplus(fc(crys_fea)) + out = self.fc_out(crys_fea) + out_dict = {"out": out} + return out_dict + + def pooling(self, atom_fea, crystal_atom_idx): + """ + Pooling the atom features to crystal features + + N: Total number of atoms in the batch + N0: Total number of crystals in the batch + + Args: + atom_fea (paddle.Tensor): Shape (N, atom_fea_len). Atom feature vectors of the batch. + crystal_atom_idx (List[paddle.Tensor]): Length N0. Mapping from the crystal idx to atom idx + """ + assert ( + sum([len(idx_map) for idx_map in crystal_atom_idx]) + == atom_fea.data.shape[0] + ) + summed_fea = [ + paddle.mean(atom_fea[idx_map], axis=0, keepdim=True) + for idx_map in crystal_atom_idx + ] + return paddle.concat(summed_fea, axis=0) diff --git a/ppsci/arch/cuboid_transformer.py b/ppsci/arch/cuboid_transformer.py index e0e6cbded6..e7ef507834 100644 --- a/ppsci/arch/cuboid_transformer.py +++ b/ppsci/arch/cuboid_transformer.py @@ -1,958 +1,958 @@ -from typing import Sequence -from typing import Tuple -from typing import Union - -import paddle -from paddle import nn - -import ppsci.arch.cuboid_transformer_decoder as cuboid_decoder -import ppsci.arch.cuboid_transformer_encoder as cuboid_encoder -import ppsci.arch.cuboid_transformer_utils as cuboid_utils -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.arch.cuboid_transformer_encoder import NEGATIVE_SLOPE -from ppsci.utils import initializer - -"""A space-time Transformer with Cuboid Attention""" - - -class InitialEncoder(nn.Layer): - def __init__( - self, - dim, - out_dim, - downsample_scale: Union[int, Sequence[int]], - num_conv_layers: int = 2, - activation: str = "leaky", - padding_type: str = "nearest", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(InitialEncoder, self).__init__() - self.num_conv_layers = num_conv_layers - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - conv_block = [] - for i in range(num_conv_layers): - if i == 0: - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=dim, - out_channels=out_dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - else: - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=out_dim, - out_channels=out_dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - self.conv_block = nn.Sequential(*conv_block) - if isinstance(downsample_scale, int): - patch_merge_downsample = (1, downsample_scale, downsample_scale) - elif len(downsample_scale) == 2: - patch_merge_downsample = (1, *downsample_scale) - elif len(downsample_scale) == 3: - patch_merge_downsample = tuple(downsample_scale) - else: - raise NotImplementedError( - f"downsample_scale {downsample_scale} format not supported!" - ) - self.patch_merge = cuboid_encoder.PatchMerging3D( - dim=out_dim, - out_dim=out_dim, - padding_type=padding_type, - downsample=patch_merge_downsample, - linear_init_mode=linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def forward(self, x): - """x --> [K x Conv2D] --> PatchMerge - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C_out) - """ - - B, T, H, W, C = x.shape - - if self.num_conv_layers > 0: - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = self.conv_block(x).transpose(perm=[0, 2, 3, 1]) - x = self.patch_merge(x.reshape([B, T, H, W, -1])) - else: - x = self.patch_merge(x) - return x - - -class FinalDecoder(nn.Layer): - def __init__( - self, - target_thw: Tuple[int, ...], - dim: int, - num_conv_layers: int = 2, - activation: str = "leaky", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(FinalDecoder, self).__init__() - self.target_thw = target_thw - self.dim = dim - self.num_conv_layers = num_conv_layers - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - conv_block = [] - for i in range(num_conv_layers): - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=dim, - out_channels=dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - self.conv_block = nn.Sequential(*conv_block) - self.upsample = cuboid_decoder.Upsample3DLayer( - dim=dim, - out_dim=dim, - target_size=target_thw, - kernel_size=3, - conv_init_mode=conv_init_mode, - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def forward(self, x): - """x --> Upsample --> [K x Conv2D] - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C) - """ - - x = self.upsample(x) - if self.num_conv_layers > 0: - B, T, H, W, C = x.shape - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = ( - self.conv_block(x) - .transpose(perm=[0, 2, 3, 1]) - .reshape([B, T, H, W, -1]) - ) - return x - - -class InitialStackPatchMergingEncoder(nn.Layer): - def __init__( - self, - num_merge: int, - in_dim: int, - out_dim_list: Tuple[int, ...], - downsample_scale_list: Tuple[float, ...], - num_conv_per_merge_list: Tuple[int, ...] = None, - activation: str = "leaky", - padding_type: str = "nearest", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(InitialStackPatchMergingEncoder, self).__init__() - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.num_merge = num_merge - self.in_dim = in_dim - self.out_dim_list = out_dim_list[:num_merge] - self.downsample_scale_list = downsample_scale_list[:num_merge] - self.num_conv_per_merge_list = num_conv_per_merge_list - self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] - self.conv_block_list = nn.LayerList() - self.patch_merge_list = nn.LayerList() - for i in range(num_merge): - if i == 0: - in_dim = in_dim - else: - in_dim = self.out_dim_list[i - 1] - out_dim = self.out_dim_list[i] - downsample_scale = self.downsample_scale_list[i] - conv_block = [] - for j in range(self.num_conv_per_merge_list[i]): - if j == 0: - conv_in_dim = in_dim - else: - conv_in_dim = out_dim - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=conv_in_dim, - out_channels=out_dim, - ) - ) - conv_block.append( - nn.GroupNorm( - num_groups=self.num_group_list[i], num_channels=out_dim - ) - ) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - conv_block = nn.Sequential(*conv_block) - self.conv_block_list.append(conv_block) - patch_merge = cuboid_encoder.PatchMerging3D( - dim=out_dim, - out_dim=out_dim, - padding_type=padding_type, - downsample=(1, downsample_scale, downsample_scale), - linear_init_mode=linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.patch_merge_list.append(patch_merge) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def get_out_shape_list(self, input_shape): - out_shape_list = [] - for patch_merge in self.patch_merge_list: - input_shape = patch_merge.get_out_shape(input_shape) - out_shape_list.append(input_shape) - return out_shape_list - - def forward(self, x): - """x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C_out) - """ - - for i, (conv_block, patch_merge) in enumerate( - zip(self.conv_block_list, self.patch_merge_list) - ): - B, T, H, W, C = x.shape - if self.num_conv_per_merge_list[i] > 0: - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) - x = patch_merge(x) - return x - - -class FinalStackUpsamplingDecoder(nn.Layer): - def __init__( - self, - target_shape_list: Tuple[Tuple[int, ...]], - in_dim: int, - num_conv_per_up_list: Tuple[int, ...] = None, - activation: str = "leaky", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(FinalStackUpsamplingDecoder, self).__init__() - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.target_shape_list = target_shape_list - self.out_dim_list = [ - target_shape[-1] for target_shape in self.target_shape_list - ] - self.num_upsample = len(target_shape_list) - self.in_dim = in_dim - self.num_conv_per_up_list = num_conv_per_up_list - self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] - self.conv_block_list = nn.LayerList() - self.upsample_list = nn.LayerList() - for i in range(self.num_upsample): - if i == 0: - in_dim = in_dim - else: - in_dim = self.out_dim_list[i - 1] - out_dim = self.out_dim_list[i] - upsample = cuboid_decoder.Upsample3DLayer( - dim=in_dim, - out_dim=in_dim, - target_size=target_shape_list[i][:-1], - kernel_size=3, - conv_init_mode=conv_init_mode, - ) - self.upsample_list.append(upsample) - conv_block = [] - for j in range(num_conv_per_up_list[i]): - if j == 0: - conv_in_dim = in_dim - else: - conv_in_dim = out_dim - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=conv_in_dim, - out_channels=out_dim, - ) - ) - conv_block.append( - nn.GroupNorm( - num_groups=self.num_group_list[i], num_channels=out_dim - ) - ) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - conv_block = nn.Sequential(*conv_block) - self.conv_block_list.append(conv_block) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - @staticmethod - def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False): - dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [ - tuple(enc_input_shape) - ] - if large_channel: - dec_target_shape_list_large_channel = [] - for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]): - dec_target_shape_large_channel = list(dec_target_shape_list[i]) - dec_target_shape_large_channel[-1] = enc_out_shape[-1] - dec_target_shape_list_large_channel.append( - tuple(dec_target_shape_large_channel) - ) - dec_target_shape_list = dec_target_shape_list_large_channel - dec_in_dim = enc_out_shape_list[-1][-1] - return dec_target_shape_list, dec_in_dim - - def forward(self, x): - """x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D] - - Args: - x: Shape (B, T, H, W, C) - - Returns: - out: Shape (B, T, H_new, W_new, C) - """ - for i, (conv_block, upsample) in enumerate( - zip(self.conv_block_list, self.upsample_list) - ): - x = upsample(x) - if self.num_conv_per_up_list[i] > 0: - B, T, H, W, C = x.shape - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) - return x - - -class CuboidTransformer(base.Arch): - """Cuboid Transformer for spatiotemporal forecasting - - We adopt the Non-autoregressive encoder-decoder architecture. - The decoder takes the multi-scale memory output from the encoder. - - The initial downsampling / upsampling layers will be - Downsampling: [K x Conv2D --> PatchMerge] - Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] - - x --> downsample (optional) ---> (+pos_embed) ---> enc --> mem_l initial_z (+pos_embed) ---> FC - | | - |------------| - | - | - y <--- upsample (optional) <--- dec <---------- - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - input_shape (Tuple[int, ...]): The shape of the input data. - target_shape (Tuple[int, ...]): The shape of the target data. - base_units (int, optional): The base units. Defaults to 128. - block_units (int, optional): The block units. Defaults to None. - scale_alpha (float, optional): We scale up the channels based on the formula: - - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. - num_heads (int, optional): The number of heads. Defaults to 4. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ffn dropout. Defaults to 0.0. - downsample (int, optional): The rate of downsample. Defaults to 2. - downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". - upsample_type (str, optional): The rate of upsample. Defaults to "upsample". - upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. - enc_depth (list, optional): The depth of encoder. Defaults to [4, 4, 4]. - enc_attn_patterns (str, optional): The pattern of encoder attention. Defaults to None. - enc_cuboid_size (list, optional): The cuboid size of encoder. Defaults to [(4, 4, 4), (4, 4, 4)]. - enc_cuboid_strategy (list, optional): The cuboid strategy of encoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - enc_shift_size (list, optional): The shift size of encoder. Defaults to [(0, 0, 0), (0, 0, 0)]. - enc_use_inter_ffn (bool, optional): Whether to use intermediate FFN for encoder. Defaults to True. - dec_depth (list, optional): The depth of decoder. Defaults to [2, 2]. - dec_cross_start (int, optional): The cross start of decoder. Defaults to 0. - dec_self_attn_patterns (str, optional): The partterns of decoder. Defaults to None. - dec_self_cuboid_size (list, optional): The cuboid size of decoder. Defaults to [(4, 4, 4), (4, 4, 4)]. - dec_self_cuboid_strategy (list, optional): The strategy of decoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - dec_self_shift_size (list, optional): The shift size of decoder. Defaults to [(1, 1, 1), (0, 0, 0)]. - dec_cross_attn_patterns (_type_, optional): The cross attention patterns of decoder. Defaults to None. - dec_cross_cuboid_hw (list, optional): The cuboid_hw of decoder. Defaults to [(4, 4), (4, 4)]. - dec_cross_cuboid_strategy (list, optional): The cuboid strategy of decoder. Defaults to [("l", "l", "l"), ("d", "l", "l")]. - dec_cross_shift_hw (list, optional): The shift_hw of decoder. Defaults to [(0, 0), (0, 0)]. - dec_cross_n_temporal (list, optional): The cross_n_temporal of decoder. Defaults to [1, 2]. - dec_cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. - dec_use_inter_ffn (bool, optional): Whether to use intermediate FFN for decoder. Defaults to True. - dec_hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed for decoder. Defaults to False. - num_global_vectors (int, optional): The num of global vectors. Defaults to 4. - use_dec_self_global (bool, optional): Whether to use global vector for decoder. Defaults to True. - dec_self_update_global (bool, optional): Whether to update global vector for decoder. Defaults to True. - use_dec_cross_global (bool, optional): Whether to use cross global vector for decoder. Defaults to True. - use_global_vector_ffn (bool, optional): Whether to use global vector FFN. Defaults to True. - use_global_self_attn (bool, optional): Whether to use global attentions. Defaults to False. - separate_global_qkv (bool, optional): Whether to separate global qkv. Defaults to False. - global_dim_ratio (int, optional): The ratio of global dim. Defaults to 1. - self_pattern (str, optional): The pattern. Defaults to "axial". - cross_self_pattern (str, optional): The self cross pattern. Defaults to "axial". - cross_pattern (str, optional): The cross pattern. Defaults to "cross_1x1". - z_init_method (str, optional): How the initial input to the decoder is initialized. Defaults to "nearest_interp". - initial_downsample_type (str, optional): The downsample type of initial. Defaults to "conv". - initial_downsample_activation (str, optional): The downsample activation of initial. Defaults to "leaky". - initial_downsample_scale (int, optional): The downsample scale of initial. Defaults to 1. - initial_downsample_conv_layers (int, optional): The conv layer of downsample of initial. Defaults to 2. - final_upsample_conv_layers (int, optional): The conv layer of final upsample. Defaults to 2. - initial_downsample_stack_conv_num_layers (int, optional): The num of stack conv layer of initial downsample. Defaults to 1. - initial_downsample_stack_conv_dim_list (list, optional): The dim list of stack conv of initial downsample. Defaults to None. - initial_downsample_stack_conv_downscale_list (list, optional): The downscale list of stack conv of initial downsample. Defaults to [1]. - initial_downsample_stack_conv_num_conv_list (list, optional): The num of stack conv list of initial downsample. Defaults to [2]. - ffn_activation (str, optional): The activation of FFN. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The type of normilize. Defaults to "layer_norm". - padding_type (str, optional): The type of padding. Defaults to "ignore". - pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". - checkpoint_level (bool, optional): Whether to use checkpoint. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pose. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use final projection. Defaults to True. - dec_use_first_self_attn (bool, optional): Whether to use first self attention for decoder. Defaults to False. - attn_linear_init_mode (str, optional): The mode of attention linear init. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear init. Defaults to "0". - conv_init_mode (str, optional): The mode of conv init. Defaults to "0". - down_up_linear_init_mode (str, optional): The mode of downsample and upsample linear init. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - input_shape: Tuple[int, ...], - target_shape: Tuple[int, ...], - base_units: int = 128, - block_units: int = None, - scale_alpha: float = 1.0, - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - downsample: int = 2, - downsample_type: str = "patch_merge", - upsample_type: str = "upsample", - upsample_kernel_size: int = 3, - enc_depth: Tuple[int, ...] = [4, 4, 4], - enc_attn_patterns: str = None, - enc_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - enc_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - enc_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], - enc_use_inter_ffn: bool = True, - dec_depth: Tuple[int, ...] = [2, 2], - dec_cross_start: int = 0, - dec_self_attn_patterns: str = None, - dec_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - dec_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - dec_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], - dec_cross_attn_patterns: str = None, - dec_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - dec_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "l", "l"), - ], - dec_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], - dec_cross_n_temporal: Tuple[int, ...] = [1, 2], - dec_cross_last_n_frames: int = None, - dec_use_inter_ffn: bool = True, - dec_hierarchical_pos_embed: bool = False, - num_global_vectors: int = 4, - use_dec_self_global: bool = True, - dec_self_update_global: bool = True, - use_dec_cross_global: bool = True, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - self_pattern: str = "axial", - cross_self_pattern: str = "axial", - cross_pattern: str = "cross_1x1", - z_init_method: str = "nearest_interp", - initial_downsample_type: str = "conv", - initial_downsample_activation: str = "leaky", - initial_downsample_scale: int = 1, - initial_downsample_conv_layers: int = 2, - final_upsample_conv_layers: int = 2, - initial_downsample_stack_conv_num_layers: int = 1, - initial_downsample_stack_conv_dim_list: Tuple[int, ...] = None, - initial_downsample_stack_conv_downscale_list: Tuple[int, ...] = [1], - initial_downsample_stack_conv_num_conv_list: Tuple[int, ...] = [2], - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - padding_type: str = "ignore", - pos_embed_type: str = "t+hw", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - dec_use_first_self_attn: bool = False, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - down_up_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.down_up_linear_init_mode = down_up_linear_init_mode - self.norm_init_mode = norm_init_mode - assert len(enc_depth) == len(dec_depth) - self.base_units = base_units - self.num_global_vectors = num_global_vectors - - num_blocks = len(enc_depth) - if isinstance(self_pattern, str): - enc_attn_patterns = [self_pattern] * num_blocks - - if isinstance(cross_self_pattern, str): - dec_self_attn_patterns = [cross_self_pattern] * num_blocks - - if isinstance(cross_pattern, str): - dec_cross_attn_patterns = [cross_pattern] * num_blocks - - if global_dim_ratio != 1: - assert ( - separate_global_qkv is True - ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - self.global_dim_ratio = global_dim_ratio - self.z_init_method = z_init_method - assert self.z_init_method in ["zeros", "nearest_interp", "last", "mean"] - self.input_shape = input_shape - self.target_shape = target_shape - T_in, H_in, W_in, C_in = input_shape - T_out, H_out, W_out, C_out = target_shape - assert H_in == H_out and W_in == W_out - if self.num_global_vectors > 0: - init_data = paddle.zeros( - (self.num_global_vectors, global_dim_ratio * base_units) - ) - self.init_global_vectors = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - - self.init_global_vectors.stop_gradient = not True - new_input_shape = self.get_initial_encoder_final_decoder( - initial_downsample_scale=initial_downsample_scale, - initial_downsample_type=initial_downsample_type, - activation=initial_downsample_activation, - initial_downsample_conv_layers=initial_downsample_conv_layers, - final_upsample_conv_layers=final_upsample_conv_layers, - padding_type=padding_type, - initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, - initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, - initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, - initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, - ) - T_in, H_in, W_in, _ = new_input_shape - self.encoder = cuboid_encoder.CuboidTransformerEncoder( - input_shape=(T_in, H_in, W_in, base_units), - base_units=base_units, - block_units=block_units, - scale_alpha=scale_alpha, - depth=enc_depth, - downsample=downsample, - downsample_type=downsample_type, - block_attn_patterns=enc_attn_patterns, - block_cuboid_size=enc_cuboid_size, - block_strategy=enc_cuboid_strategy, - block_shift_size=enc_shift_size, - num_heads=num_heads, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - gated_ffn=gated_ffn, - ffn_activation=ffn_activation, - norm_layer=norm_layer, - use_inter_ffn=enc_use_inter_ffn, - padding_type=padding_type, - use_global_vector=num_global_vectors > 0, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - self_attn_use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - conv_init_mode=conv_init_mode, - down_linear_init_mode=down_up_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.enc_pos_embed = cuboid_decoder.PosEmbed( - embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in - ) - mem_shapes = self.encoder.get_mem_shapes() - self.z_proj = nn.Linear( - in_features=mem_shapes[-1][-1], out_features=mem_shapes[-1][-1] - ) - self.dec_pos_embed = cuboid_decoder.PosEmbed( - embed_dim=mem_shapes[-1][-1], - typ=pos_embed_type, - maxT=T_out, - maxH=mem_shapes[-1][1], - maxW=mem_shapes[-1][2], - ) - self.decoder = cuboid_decoder.CuboidTransformerDecoder( - target_temporal_length=T_out, - mem_shapes=mem_shapes, - cross_start=dec_cross_start, - depth=dec_depth, - upsample_type=upsample_type, - block_self_attn_patterns=dec_self_attn_patterns, - block_self_cuboid_size=dec_self_cuboid_size, - block_self_shift_size=dec_self_shift_size, - block_self_cuboid_strategy=dec_self_cuboid_strategy, - block_cross_attn_patterns=dec_cross_attn_patterns, - block_cross_cuboid_hw=dec_cross_cuboid_hw, - block_cross_shift_hw=dec_cross_shift_hw, - block_cross_cuboid_strategy=dec_cross_cuboid_strategy, - block_cross_n_temporal=dec_cross_n_temporal, - cross_last_n_frames=dec_cross_last_n_frames, - num_heads=num_heads, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - upsample_kernel_size=upsample_kernel_size, - ffn_activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=dec_use_inter_ffn, - max_temporal_relative=T_in + T_out, - padding_type=padding_type, - hierarchical_pos_embed=dec_hierarchical_pos_embed, - pos_embed_type=pos_embed_type, - use_self_global=num_global_vectors > 0 and use_dec_self_global, - self_update_global=dec_self_update_global, - use_cross_global=num_global_vectors > 0 and use_dec_cross_global, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - self_attn_use_final_proj=self_attn_use_final_proj, - use_first_self_attn=dec_use_first_self_attn, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - conv_init_mode=conv_init_mode, - up_linear_init_mode=down_up_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.reset_parameters() - - def get_initial_encoder_final_decoder( - self, - initial_downsample_type, - activation, - initial_downsample_scale, - initial_downsample_conv_layers, - final_upsample_conv_layers, - padding_type, - initial_downsample_stack_conv_num_layers, - initial_downsample_stack_conv_dim_list, - initial_downsample_stack_conv_downscale_list, - initial_downsample_stack_conv_num_conv_list, - ): - T_in, H_in, W_in, C_in = self.input_shape - T_out, H_out, W_out, C_out = self.target_shape - self.initial_downsample_type = initial_downsample_type - if self.initial_downsample_type == "conv": - if isinstance(initial_downsample_scale, int): - initial_downsample_scale = ( - 1, - initial_downsample_scale, - initial_downsample_scale, - ) - elif len(initial_downsample_scale) == 2: - initial_downsample_scale = 1, *initial_downsample_scale - elif len(initial_downsample_scale) == 3: - initial_downsample_scale = tuple(initial_downsample_scale) - else: - raise NotImplementedError( - f"initial_downsample_scale {initial_downsample_scale} format not supported!" - ) - self.initial_encoder = InitialEncoder( - dim=C_in, - out_dim=self.base_units, - downsample_scale=initial_downsample_scale, - num_conv_layers=initial_downsample_conv_layers, - padding_type=padding_type, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - - self.final_decoder = FinalDecoder( - dim=self.base_units, - target_thw=(T_out, H_out, W_out), - num_conv_layers=final_upsample_conv_layers, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - new_input_shape = self.initial_encoder.patch_merge.get_out_shape( - self.input_shape - ) - self.dec_final_proj = nn.Linear( - in_features=self.base_units, out_features=C_out - ) - elif self.initial_downsample_type == "stack_conv": - if initial_downsample_stack_conv_dim_list is None: - initial_downsample_stack_conv_dim_list = [ - self.base_units - ] * initial_downsample_stack_conv_num_layers - self.initial_encoder = InitialStackPatchMergingEncoder( - num_merge=initial_downsample_stack_conv_num_layers, - in_dim=C_in, - out_dim_list=initial_downsample_stack_conv_dim_list, - downsample_scale_list=initial_downsample_stack_conv_downscale_list, - num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, - padding_type=padding_type, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list( - self.target_shape - ) - ( - dec_target_shape_list, - dec_in_dim, - ) = FinalStackUpsamplingDecoder.get_init_params( - enc_input_shape=self.target_shape, - enc_out_shape_list=initial_encoder_out_shape_list, - large_channel=True, - ) - self.final_decoder = FinalStackUpsamplingDecoder( - target_shape_list=dec_target_shape_list, - in_dim=dec_in_dim, - num_conv_per_up_list=initial_downsample_stack_conv_num_conv_list[::-1], - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - self.dec_final_proj = nn.Linear( - in_features=dec_target_shape_list[-1][-1], out_features=C_out - ) - new_input_shape = self.initial_encoder.get_out_shape_list(self.input_shape)[ - -1 - ] - else: - raise NotImplementedError(f"{self.initial_downsample_type} is invalid.") - self.input_shape_after_initial_downsample = new_input_shape - T_in, H_in, W_in, _ = new_input_shape - return new_input_shape - - def reset_parameters(self): - if self.num_global_vectors > 0: - self.init_global_vectors = initializer.trunc_normal_( - self.init_global_vectors, std=0.02 - ) - if hasattr(self.initial_encoder, "reset_parameters"): - self.initial_encoder.reset_parameters() - else: - cuboid_utils.apply_initialization( - self.initial_encoder, - conv_mode=self.conv_init_mode, - linear_mode=self.down_up_linear_init_mode, - norm_mode=self.norm_init_mode, - ) - if hasattr(self.final_decoder, "reset_parameters"): - self.final_decoder.reset_parameters() - else: - cuboid_utils.apply_initialization( - self.final_decoder, - conv_mode=self.conv_init_mode, - linear_mode=self.down_up_linear_init_mode, - norm_mode=self.norm_init_mode, - ) - cuboid_utils.apply_initialization( - self.dec_final_proj, linear_mode=self.down_up_linear_init_mode - ) - self.encoder.reset_parameters() - self.enc_pos_embed.reset_parameters() - self.decoder.reset_parameters() - self.dec_pos_embed.reset_parameters() - cuboid_utils.apply_initialization(self.z_proj, linear_mode="0") - - def get_initial_z(self, final_mem, T_out): - B = final_mem.shape[0] - if self.z_init_method == "zeros": - z_shape = list((1, T_out)) + final_mem.shape[2:] - initial_z = paddle.zeros(shape=z_shape, dtype=final_mem.dtype) - initial_z = self.z_proj(self.dec_pos_embed(initial_z)).expand( - shape=[B, -1, -1, -1, -1] - ) - elif self.z_init_method == "nearest_interp": - initial_z = nn.functional.interpolate( - x=final_mem.transpose(perm=[0, 4, 1, 2, 3]), - size=(T_out, final_mem.shape[2], final_mem.shape[3]), - ).transpose(perm=[0, 2, 3, 4, 1]) - initial_z = self.z_proj(initial_z) - elif self.z_init_method == "last": - initial_z = paddle.broadcast_to( - x=final_mem[:, -1:, :, :, :], shape=(B, T_out) + final_mem.shape[2:] - ) - initial_z = self.z_proj(initial_z) - elif self.z_init_method == "mean": - initial_z = paddle.broadcast_to( - x=final_mem.mean(axis=1, keepdims=True), - shape=(B, T_out) + final_mem.shape[2:], - ) - initial_z = self.z_proj(initial_z) - else: - raise NotImplementedError - return initial_z - - def forward(self, x: "paddle.Tensor", verbose: bool = False) -> "paddle.Tensor": - """ - Args: - x (paddle.Tensor): Tensor with shape (B, T, H, W, C). - verbose (bool): If True, print intermediate shapes. - - Returns: - out (paddle.Tensor): The output Shape (B, T_out, H, W, C_out) - """ - - x = self.concat_to_tensor(x, self.input_keys) - flag_ndim = x.ndim - if flag_ndim == 6: - x = x.reshape([-1, *x.shape[2:]]) - B, _, _, _, _ = x.shape - - T_out = self.target_shape[0] - x = self.initial_encoder(x) - x = self.enc_pos_embed(x) - - if self.num_global_vectors > 0: - init_global_vectors = self.init_global_vectors.expand( - shape=[ - B, - self.num_global_vectors, - self.global_dim_ratio * self.base_units, - ] - ) - mem_l, mem_global_vector_l = self.encoder(x, init_global_vectors) - else: - mem_l = self.encoder(x) - - if verbose: - for i, mem in enumerate(mem_l): - print(f"mem[{i}].shape = {mem.shape}") - initial_z = self.get_initial_z(final_mem=mem_l[-1], T_out=T_out) - - if self.num_global_vectors > 0: - dec_out = self.decoder(initial_z, mem_l, mem_global_vector_l) - else: - dec_out = self.decoder(initial_z, mem_l) - - dec_out = self.final_decoder(dec_out) - - out = self.dec_final_proj(dec_out) - if flag_ndim == 6: - out = out.reshape([-1, *out.shape]) - return {key: out for key in self.output_keys} +from typing import Sequence +from typing import Tuple +from typing import Union + +import paddle +from paddle import nn + +import ppsci.arch.cuboid_transformer_decoder as cuboid_decoder +import ppsci.arch.cuboid_transformer_encoder as cuboid_encoder +import ppsci.arch.cuboid_transformer_utils as cuboid_utils +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.arch.cuboid_transformer_encoder import NEGATIVE_SLOPE +from ppsci.utils import initializer + +"""A space-time Transformer with Cuboid Attention""" + + +class InitialEncoder(nn.Layer): + def __init__( + self, + dim, + out_dim, + downsample_scale: Union[int, Sequence[int]], + num_conv_layers: int = 2, + activation: str = "leaky", + padding_type: str = "nearest", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(InitialEncoder, self).__init__() + self.num_conv_layers = num_conv_layers + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + conv_block = [] + for i in range(num_conv_layers): + if i == 0: + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=dim, + out_channels=out_dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + else: + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=out_dim, + out_channels=out_dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + self.conv_block = nn.Sequential(*conv_block) + if isinstance(downsample_scale, int): + patch_merge_downsample = (1, downsample_scale, downsample_scale) + elif len(downsample_scale) == 2: + patch_merge_downsample = (1, *downsample_scale) + elif len(downsample_scale) == 3: + patch_merge_downsample = tuple(downsample_scale) + else: + raise NotImplementedError( + f"downsample_scale {downsample_scale} format not supported!" + ) + self.patch_merge = cuboid_encoder.PatchMerging3D( + dim=out_dim, + out_dim=out_dim, + padding_type=padding_type, + downsample=patch_merge_downsample, + linear_init_mode=linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def forward(self, x): + """x --> [K x Conv2D] --> PatchMerge + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C_out) + """ + + B, T, H, W, C = x.shape + + if self.num_conv_layers > 0: + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = self.conv_block(x).transpose(perm=[0, 2, 3, 1]) + x = self.patch_merge(x.reshape([B, T, H, W, -1])) + else: + x = self.patch_merge(x) + return x + + +class FinalDecoder(nn.Layer): + def __init__( + self, + target_thw: Tuple[int, ...], + dim: int, + num_conv_layers: int = 2, + activation: str = "leaky", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(FinalDecoder, self).__init__() + self.target_thw = target_thw + self.dim = dim + self.num_conv_layers = num_conv_layers + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + conv_block = [] + for i in range(num_conv_layers): + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=dim, + out_channels=dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + self.conv_block = nn.Sequential(*conv_block) + self.upsample = cuboid_decoder.Upsample3DLayer( + dim=dim, + out_dim=dim, + target_size=target_thw, + kernel_size=3, + conv_init_mode=conv_init_mode, + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def forward(self, x): + """x --> Upsample --> [K x Conv2D] + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C) + """ + + x = self.upsample(x) + if self.num_conv_layers > 0: + B, T, H, W, C = x.shape + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = ( + self.conv_block(x) + .transpose(perm=[0, 2, 3, 1]) + .reshape([B, T, H, W, -1]) + ) + return x + + +class InitialStackPatchMergingEncoder(nn.Layer): + def __init__( + self, + num_merge: int, + in_dim: int, + out_dim_list: Tuple[int, ...], + downsample_scale_list: Tuple[float, ...], + num_conv_per_merge_list: Tuple[int, ...] = None, + activation: str = "leaky", + padding_type: str = "nearest", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(InitialStackPatchMergingEncoder, self).__init__() + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.num_merge = num_merge + self.in_dim = in_dim + self.out_dim_list = out_dim_list[:num_merge] + self.downsample_scale_list = downsample_scale_list[:num_merge] + self.num_conv_per_merge_list = num_conv_per_merge_list + self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] + self.conv_block_list = nn.LayerList() + self.patch_merge_list = nn.LayerList() + for i in range(num_merge): + if i == 0: + in_dim = in_dim + else: + in_dim = self.out_dim_list[i - 1] + out_dim = self.out_dim_list[i] + downsample_scale = self.downsample_scale_list[i] + conv_block = [] + for j in range(self.num_conv_per_merge_list[i]): + if j == 0: + conv_in_dim = in_dim + else: + conv_in_dim = out_dim + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=conv_in_dim, + out_channels=out_dim, + ) + ) + conv_block.append( + nn.GroupNorm( + num_groups=self.num_group_list[i], num_channels=out_dim + ) + ) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + conv_block = nn.Sequential(*conv_block) + self.conv_block_list.append(conv_block) + patch_merge = cuboid_encoder.PatchMerging3D( + dim=out_dim, + out_dim=out_dim, + padding_type=padding_type, + downsample=(1, downsample_scale, downsample_scale), + linear_init_mode=linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.patch_merge_list.append(patch_merge) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def get_out_shape_list(self, input_shape): + out_shape_list = [] + for patch_merge in self.patch_merge_list: + input_shape = patch_merge.get_out_shape(input_shape) + out_shape_list.append(input_shape) + return out_shape_list + + def forward(self, x): + """x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C_out) + """ + + for i, (conv_block, patch_merge) in enumerate( + zip(self.conv_block_list, self.patch_merge_list) + ): + B, T, H, W, C = x.shape + if self.num_conv_per_merge_list[i] > 0: + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) + x = patch_merge(x) + return x + + +class FinalStackUpsamplingDecoder(nn.Layer): + def __init__( + self, + target_shape_list: Tuple[Tuple[int, ...]], + in_dim: int, + num_conv_per_up_list: Tuple[int, ...] = None, + activation: str = "leaky", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(FinalStackUpsamplingDecoder, self).__init__() + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.target_shape_list = target_shape_list + self.out_dim_list = [ + target_shape[-1] for target_shape in self.target_shape_list + ] + self.num_upsample = len(target_shape_list) + self.in_dim = in_dim + self.num_conv_per_up_list = num_conv_per_up_list + self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] + self.conv_block_list = nn.LayerList() + self.upsample_list = nn.LayerList() + for i in range(self.num_upsample): + if i == 0: + in_dim = in_dim + else: + in_dim = self.out_dim_list[i - 1] + out_dim = self.out_dim_list[i] + upsample = cuboid_decoder.Upsample3DLayer( + dim=in_dim, + out_dim=in_dim, + target_size=target_shape_list[i][:-1], + kernel_size=3, + conv_init_mode=conv_init_mode, + ) + self.upsample_list.append(upsample) + conv_block = [] + for j in range(num_conv_per_up_list[i]): + if j == 0: + conv_in_dim = in_dim + else: + conv_in_dim = out_dim + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=conv_in_dim, + out_channels=out_dim, + ) + ) + conv_block.append( + nn.GroupNorm( + num_groups=self.num_group_list[i], num_channels=out_dim + ) + ) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + conv_block = nn.Sequential(*conv_block) + self.conv_block_list.append(conv_block) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + @staticmethod + def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False): + dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [ + tuple(enc_input_shape) + ] + if large_channel: + dec_target_shape_list_large_channel = [] + for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]): + dec_target_shape_large_channel = list(dec_target_shape_list[i]) + dec_target_shape_large_channel[-1] = enc_out_shape[-1] + dec_target_shape_list_large_channel.append( + tuple(dec_target_shape_large_channel) + ) + dec_target_shape_list = dec_target_shape_list_large_channel + dec_in_dim = enc_out_shape_list[-1][-1] + return dec_target_shape_list, dec_in_dim + + def forward(self, x): + """x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D] + + Args: + x: Shape (B, T, H, W, C) + + Returns: + out: Shape (B, T, H_new, W_new, C) + """ + for i, (conv_block, upsample) in enumerate( + zip(self.conv_block_list, self.upsample_list) + ): + x = upsample(x) + if self.num_conv_per_up_list[i] > 0: + B, T, H, W, C = x.shape + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) + return x + + +class CuboidTransformer(base.Arch): + """Cuboid Transformer for spatiotemporal forecasting + + We adopt the Non-autoregressive encoder-decoder architecture. + The decoder takes the multi-scale memory output from the encoder. + + The initial downsampling / upsampling layers will be + Downsampling: [K x Conv2D --> PatchMerge] + Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] + + x --> downsample (optional) ---> (+pos_embed) ---> enc --> mem_l initial_z (+pos_embed) ---> FC + | | + |------------| + | + | + y <--- upsample (optional) <--- dec <---------- + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + input_shape (Tuple[int, ...]): The shape of the input data. + target_shape (Tuple[int, ...]): The shape of the target data. + base_units (int, optional): The base units. Defaults to 128. + block_units (int, optional): The block units. Defaults to None. + scale_alpha (float, optional): We scale up the channels based on the formula: + - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. + num_heads (int, optional): The number of heads. Defaults to 4. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ffn dropout. Defaults to 0.0. + downsample (int, optional): The rate of downsample. Defaults to 2. + downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". + upsample_type (str, optional): The rate of upsample. Defaults to "upsample". + upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. + enc_depth (list, optional): The depth of encoder. Defaults to [4, 4, 4]. + enc_attn_patterns (str, optional): The pattern of encoder attention. Defaults to None. + enc_cuboid_size (list, optional): The cuboid size of encoder. Defaults to [(4, 4, 4), (4, 4, 4)]. + enc_cuboid_strategy (list, optional): The cuboid strategy of encoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + enc_shift_size (list, optional): The shift size of encoder. Defaults to [(0, 0, 0), (0, 0, 0)]. + enc_use_inter_ffn (bool, optional): Whether to use intermediate FFN for encoder. Defaults to True. + dec_depth (list, optional): The depth of decoder. Defaults to [2, 2]. + dec_cross_start (int, optional): The cross start of decoder. Defaults to 0. + dec_self_attn_patterns (str, optional): The partterns of decoder. Defaults to None. + dec_self_cuboid_size (list, optional): The cuboid size of decoder. Defaults to [(4, 4, 4), (4, 4, 4)]. + dec_self_cuboid_strategy (list, optional): The strategy of decoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + dec_self_shift_size (list, optional): The shift size of decoder. Defaults to [(1, 1, 1), (0, 0, 0)]. + dec_cross_attn_patterns (_type_, optional): The cross attention patterns of decoder. Defaults to None. + dec_cross_cuboid_hw (list, optional): The cuboid_hw of decoder. Defaults to [(4, 4), (4, 4)]. + dec_cross_cuboid_strategy (list, optional): The cuboid strategy of decoder. Defaults to [("l", "l", "l"), ("d", "l", "l")]. + dec_cross_shift_hw (list, optional): The shift_hw of decoder. Defaults to [(0, 0), (0, 0)]. + dec_cross_n_temporal (list, optional): The cross_n_temporal of decoder. Defaults to [1, 2]. + dec_cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. + dec_use_inter_ffn (bool, optional): Whether to use intermediate FFN for decoder. Defaults to True. + dec_hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed for decoder. Defaults to False. + num_global_vectors (int, optional): The num of global vectors. Defaults to 4. + use_dec_self_global (bool, optional): Whether to use global vector for decoder. Defaults to True. + dec_self_update_global (bool, optional): Whether to update global vector for decoder. Defaults to True. + use_dec_cross_global (bool, optional): Whether to use cross global vector for decoder. Defaults to True. + use_global_vector_ffn (bool, optional): Whether to use global vector FFN. Defaults to True. + use_global_self_attn (bool, optional): Whether to use global attentions. Defaults to False. + separate_global_qkv (bool, optional): Whether to separate global qkv. Defaults to False. + global_dim_ratio (int, optional): The ratio of global dim. Defaults to 1. + self_pattern (str, optional): The pattern. Defaults to "axial". + cross_self_pattern (str, optional): The self cross pattern. Defaults to "axial". + cross_pattern (str, optional): The cross pattern. Defaults to "cross_1x1". + z_init_method (str, optional): How the initial input to the decoder is initialized. Defaults to "nearest_interp". + initial_downsample_type (str, optional): The downsample type of initial. Defaults to "conv". + initial_downsample_activation (str, optional): The downsample activation of initial. Defaults to "leaky". + initial_downsample_scale (int, optional): The downsample scale of initial. Defaults to 1. + initial_downsample_conv_layers (int, optional): The conv layer of downsample of initial. Defaults to 2. + final_upsample_conv_layers (int, optional): The conv layer of final upsample. Defaults to 2. + initial_downsample_stack_conv_num_layers (int, optional): The num of stack conv layer of initial downsample. Defaults to 1. + initial_downsample_stack_conv_dim_list (list, optional): The dim list of stack conv of initial downsample. Defaults to None. + initial_downsample_stack_conv_downscale_list (list, optional): The downscale list of stack conv of initial downsample. Defaults to [1]. + initial_downsample_stack_conv_num_conv_list (list, optional): The num of stack conv list of initial downsample. Defaults to [2]. + ffn_activation (str, optional): The activation of FFN. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The type of normilize. Defaults to "layer_norm". + padding_type (str, optional): The type of padding. Defaults to "ignore". + pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". + checkpoint_level (bool, optional): Whether to use checkpoint. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pose. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use final projection. Defaults to True. + dec_use_first_self_attn (bool, optional): Whether to use first self attention for decoder. Defaults to False. + attn_linear_init_mode (str, optional): The mode of attention linear init. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear init. Defaults to "0". + conv_init_mode (str, optional): The mode of conv init. Defaults to "0". + down_up_linear_init_mode (str, optional): The mode of downsample and upsample linear init. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + input_shape: Tuple[int, ...], + target_shape: Tuple[int, ...], + base_units: int = 128, + block_units: int = None, + scale_alpha: float = 1.0, + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + downsample: int = 2, + downsample_type: str = "patch_merge", + upsample_type: str = "upsample", + upsample_kernel_size: int = 3, + enc_depth: Tuple[int, ...] = [4, 4, 4], + enc_attn_patterns: str = None, + enc_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + enc_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + enc_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], + enc_use_inter_ffn: bool = True, + dec_depth: Tuple[int, ...] = [2, 2], + dec_cross_start: int = 0, + dec_self_attn_patterns: str = None, + dec_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + dec_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + dec_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], + dec_cross_attn_patterns: str = None, + dec_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + dec_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "l", "l"), + ], + dec_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], + dec_cross_n_temporal: Tuple[int, ...] = [1, 2], + dec_cross_last_n_frames: int = None, + dec_use_inter_ffn: bool = True, + dec_hierarchical_pos_embed: bool = False, + num_global_vectors: int = 4, + use_dec_self_global: bool = True, + dec_self_update_global: bool = True, + use_dec_cross_global: bool = True, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + self_pattern: str = "axial", + cross_self_pattern: str = "axial", + cross_pattern: str = "cross_1x1", + z_init_method: str = "nearest_interp", + initial_downsample_type: str = "conv", + initial_downsample_activation: str = "leaky", + initial_downsample_scale: int = 1, + initial_downsample_conv_layers: int = 2, + final_upsample_conv_layers: int = 2, + initial_downsample_stack_conv_num_layers: int = 1, + initial_downsample_stack_conv_dim_list: Tuple[int, ...] = None, + initial_downsample_stack_conv_downscale_list: Tuple[int, ...] = [1], + initial_downsample_stack_conv_num_conv_list: Tuple[int, ...] = [2], + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + padding_type: str = "ignore", + pos_embed_type: str = "t+hw", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + dec_use_first_self_attn: bool = False, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + down_up_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.down_up_linear_init_mode = down_up_linear_init_mode + self.norm_init_mode = norm_init_mode + assert len(enc_depth) == len(dec_depth) + self.base_units = base_units + self.num_global_vectors = num_global_vectors + + num_blocks = len(enc_depth) + if isinstance(self_pattern, str): + enc_attn_patterns = [self_pattern] * num_blocks + + if isinstance(cross_self_pattern, str): + dec_self_attn_patterns = [cross_self_pattern] * num_blocks + + if isinstance(cross_pattern, str): + dec_cross_attn_patterns = [cross_pattern] * num_blocks + + if global_dim_ratio != 1: + assert ( + separate_global_qkv is True + ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + self.global_dim_ratio = global_dim_ratio + self.z_init_method = z_init_method + assert self.z_init_method in ["zeros", "nearest_interp", "last", "mean"] + self.input_shape = input_shape + self.target_shape = target_shape + T_in, H_in, W_in, C_in = input_shape + T_out, H_out, W_out, C_out = target_shape + assert H_in == H_out and W_in == W_out + if self.num_global_vectors > 0: + init_data = paddle.zeros( + (self.num_global_vectors, global_dim_ratio * base_units) + ) + self.init_global_vectors = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + + self.init_global_vectors.stop_gradient = not True + new_input_shape = self.get_initial_encoder_final_decoder( + initial_downsample_scale=initial_downsample_scale, + initial_downsample_type=initial_downsample_type, + activation=initial_downsample_activation, + initial_downsample_conv_layers=initial_downsample_conv_layers, + final_upsample_conv_layers=final_upsample_conv_layers, + padding_type=padding_type, + initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, + initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, + initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, + initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, + ) + T_in, H_in, W_in, _ = new_input_shape + self.encoder = cuboid_encoder.CuboidTransformerEncoder( + input_shape=(T_in, H_in, W_in, base_units), + base_units=base_units, + block_units=block_units, + scale_alpha=scale_alpha, + depth=enc_depth, + downsample=downsample, + downsample_type=downsample_type, + block_attn_patterns=enc_attn_patterns, + block_cuboid_size=enc_cuboid_size, + block_strategy=enc_cuboid_strategy, + block_shift_size=enc_shift_size, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + gated_ffn=gated_ffn, + ffn_activation=ffn_activation, + norm_layer=norm_layer, + use_inter_ffn=enc_use_inter_ffn, + padding_type=padding_type, + use_global_vector=num_global_vectors > 0, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + self_attn_use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + conv_init_mode=conv_init_mode, + down_linear_init_mode=down_up_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.enc_pos_embed = cuboid_decoder.PosEmbed( + embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in + ) + mem_shapes = self.encoder.get_mem_shapes() + self.z_proj = nn.Linear( + in_features=mem_shapes[-1][-1], out_features=mem_shapes[-1][-1] + ) + self.dec_pos_embed = cuboid_decoder.PosEmbed( + embed_dim=mem_shapes[-1][-1], + typ=pos_embed_type, + maxT=T_out, + maxH=mem_shapes[-1][1], + maxW=mem_shapes[-1][2], + ) + self.decoder = cuboid_decoder.CuboidTransformerDecoder( + target_temporal_length=T_out, + mem_shapes=mem_shapes, + cross_start=dec_cross_start, + depth=dec_depth, + upsample_type=upsample_type, + block_self_attn_patterns=dec_self_attn_patterns, + block_self_cuboid_size=dec_self_cuboid_size, + block_self_shift_size=dec_self_shift_size, + block_self_cuboid_strategy=dec_self_cuboid_strategy, + block_cross_attn_patterns=dec_cross_attn_patterns, + block_cross_cuboid_hw=dec_cross_cuboid_hw, + block_cross_shift_hw=dec_cross_shift_hw, + block_cross_cuboid_strategy=dec_cross_cuboid_strategy, + block_cross_n_temporal=dec_cross_n_temporal, + cross_last_n_frames=dec_cross_last_n_frames, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + upsample_kernel_size=upsample_kernel_size, + ffn_activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=dec_use_inter_ffn, + max_temporal_relative=T_in + T_out, + padding_type=padding_type, + hierarchical_pos_embed=dec_hierarchical_pos_embed, + pos_embed_type=pos_embed_type, + use_self_global=num_global_vectors > 0 and use_dec_self_global, + self_update_global=dec_self_update_global, + use_cross_global=num_global_vectors > 0 and use_dec_cross_global, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + self_attn_use_final_proj=self_attn_use_final_proj, + use_first_self_attn=dec_use_first_self_attn, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + conv_init_mode=conv_init_mode, + up_linear_init_mode=down_up_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.reset_parameters() + + def get_initial_encoder_final_decoder( + self, + initial_downsample_type, + activation, + initial_downsample_scale, + initial_downsample_conv_layers, + final_upsample_conv_layers, + padding_type, + initial_downsample_stack_conv_num_layers, + initial_downsample_stack_conv_dim_list, + initial_downsample_stack_conv_downscale_list, + initial_downsample_stack_conv_num_conv_list, + ): + T_in, H_in, W_in, C_in = self.input_shape + T_out, H_out, W_out, C_out = self.target_shape + self.initial_downsample_type = initial_downsample_type + if self.initial_downsample_type == "conv": + if isinstance(initial_downsample_scale, int): + initial_downsample_scale = ( + 1, + initial_downsample_scale, + initial_downsample_scale, + ) + elif len(initial_downsample_scale) == 2: + initial_downsample_scale = 1, *initial_downsample_scale + elif len(initial_downsample_scale) == 3: + initial_downsample_scale = tuple(initial_downsample_scale) + else: + raise NotImplementedError( + f"initial_downsample_scale {initial_downsample_scale} format not supported!" + ) + self.initial_encoder = InitialEncoder( + dim=C_in, + out_dim=self.base_units, + downsample_scale=initial_downsample_scale, + num_conv_layers=initial_downsample_conv_layers, + padding_type=padding_type, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + + self.final_decoder = FinalDecoder( + dim=self.base_units, + target_thw=(T_out, H_out, W_out), + num_conv_layers=final_upsample_conv_layers, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + new_input_shape = self.initial_encoder.patch_merge.get_out_shape( + self.input_shape + ) + self.dec_final_proj = nn.Linear( + in_features=self.base_units, out_features=C_out + ) + elif self.initial_downsample_type == "stack_conv": + if initial_downsample_stack_conv_dim_list is None: + initial_downsample_stack_conv_dim_list = [ + self.base_units + ] * initial_downsample_stack_conv_num_layers + self.initial_encoder = InitialStackPatchMergingEncoder( + num_merge=initial_downsample_stack_conv_num_layers, + in_dim=C_in, + out_dim_list=initial_downsample_stack_conv_dim_list, + downsample_scale_list=initial_downsample_stack_conv_downscale_list, + num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, + padding_type=padding_type, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list( + self.target_shape + ) + ( + dec_target_shape_list, + dec_in_dim, + ) = FinalStackUpsamplingDecoder.get_init_params( + enc_input_shape=self.target_shape, + enc_out_shape_list=initial_encoder_out_shape_list, + large_channel=True, + ) + self.final_decoder = FinalStackUpsamplingDecoder( + target_shape_list=dec_target_shape_list, + in_dim=dec_in_dim, + num_conv_per_up_list=initial_downsample_stack_conv_num_conv_list[::-1], + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + self.dec_final_proj = nn.Linear( + in_features=dec_target_shape_list[-1][-1], out_features=C_out + ) + new_input_shape = self.initial_encoder.get_out_shape_list(self.input_shape)[ + -1 + ] + else: + raise NotImplementedError(f"{self.initial_downsample_type} is invalid.") + self.input_shape_after_initial_downsample = new_input_shape + T_in, H_in, W_in, _ = new_input_shape + return new_input_shape + + def reset_parameters(self): + if self.num_global_vectors > 0: + self.init_global_vectors = initializer.trunc_normal_( + self.init_global_vectors, std=0.02 + ) + if hasattr(self.initial_encoder, "reset_parameters"): + self.initial_encoder.reset_parameters() + else: + cuboid_utils.apply_initialization( + self.initial_encoder, + conv_mode=self.conv_init_mode, + linear_mode=self.down_up_linear_init_mode, + norm_mode=self.norm_init_mode, + ) + if hasattr(self.final_decoder, "reset_parameters"): + self.final_decoder.reset_parameters() + else: + cuboid_utils.apply_initialization( + self.final_decoder, + conv_mode=self.conv_init_mode, + linear_mode=self.down_up_linear_init_mode, + norm_mode=self.norm_init_mode, + ) + cuboid_utils.apply_initialization( + self.dec_final_proj, linear_mode=self.down_up_linear_init_mode + ) + self.encoder.reset_parameters() + self.enc_pos_embed.reset_parameters() + self.decoder.reset_parameters() + self.dec_pos_embed.reset_parameters() + cuboid_utils.apply_initialization(self.z_proj, linear_mode="0") + + def get_initial_z(self, final_mem, T_out): + B = final_mem.shape[0] + if self.z_init_method == "zeros": + z_shape = list((1, T_out)) + final_mem.shape[2:] + initial_z = paddle.zeros(shape=z_shape, dtype=final_mem.dtype) + initial_z = self.z_proj(self.dec_pos_embed(initial_z)).expand( + shape=[B, -1, -1, -1, -1] + ) + elif self.z_init_method == "nearest_interp": + initial_z = nn.functional.interpolate( + x=final_mem.transpose(perm=[0, 4, 1, 2, 3]), + size=(T_out, final_mem.shape[2], final_mem.shape[3]), + ).transpose(perm=[0, 2, 3, 4, 1]) + initial_z = self.z_proj(initial_z) + elif self.z_init_method == "last": + initial_z = paddle.broadcast_to( + x=final_mem[:, -1:, :, :, :], shape=(B, T_out) + final_mem.shape[2:] + ) + initial_z = self.z_proj(initial_z) + elif self.z_init_method == "mean": + initial_z = paddle.broadcast_to( + x=final_mem.mean(axis=1, keepdims=True), + shape=(B, T_out) + final_mem.shape[2:], + ) + initial_z = self.z_proj(initial_z) + else: + raise NotImplementedError + return initial_z + + def forward(self, x: "paddle.Tensor", verbose: bool = False) -> "paddle.Tensor": + """ + Args: + x (paddle.Tensor): Tensor with shape (B, T, H, W, C). + verbose (bool): If True, print intermediate shapes. + + Returns: + out (paddle.Tensor): The output Shape (B, T_out, H, W, C_out) + """ + + x = self.concat_to_tensor(x, self.input_keys) + flag_ndim = x.ndim + if flag_ndim == 6: + x = x.reshape([-1, *x.shape[2:]]) + B, _, _, _, _ = x.shape + + T_out = self.target_shape[0] + x = self.initial_encoder(x) + x = self.enc_pos_embed(x) + + if self.num_global_vectors > 0: + init_global_vectors = self.init_global_vectors.expand( + shape=[ + B, + self.num_global_vectors, + self.global_dim_ratio * self.base_units, + ] + ) + mem_l, mem_global_vector_l = self.encoder(x, init_global_vectors) + else: + mem_l = self.encoder(x) + + if verbose: + for i, mem in enumerate(mem_l): + print(f"mem[{i}].shape = {mem.shape}") + initial_z = self.get_initial_z(final_mem=mem_l[-1], T_out=T_out) + + if self.num_global_vectors > 0: + dec_out = self.decoder(initial_z, mem_l, mem_global_vector_l) + else: + dec_out = self.decoder(initial_z, mem_l) + + dec_out = self.final_decoder(dec_out) + + out = self.dec_final_proj(dec_out) + if flag_ndim == 6: + out = out.reshape([-1, *out.shape]) + return {key: out for key in self.output_keys} diff --git a/ppsci/arch/cuboid_transformer_decoder.py b/ppsci/arch/cuboid_transformer_decoder.py index 894363b1a8..e6114ca6f8 100644 --- a/ppsci/arch/cuboid_transformer_decoder.py +++ b/ppsci/arch/cuboid_transformer_decoder.py @@ -1,1245 +1,1245 @@ -from functools import lru_cache -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn.functional as F -from paddle import nn -from paddle.distributed import fleet - -import ppsci.arch.cuboid_transformer_encoder as cuboid_encoder -import ppsci.arch.cuboid_transformer_utils as cuboid_utils -from ppsci.utils import initializer - - -class PosEmbed(nn.Layer): - """Pose embeding - - Args: - embed_dim (int): The dimension of embeding. - maxT (int): The embeding max time. - maxH (int): The embeding max height. - maxW (int): The embeding max width. - typ (str): - The type of the positional embedding. - - t+h+w: - Embed the spatial position to embeddings - - t+hw: - Embed the spatial position to embeddings - """ - - def __init__(self, embed_dim, maxT, maxH, maxW, typ: str = "t+h+w"): - super(PosEmbed, self).__init__() - self.typ = typ - assert self.typ in ["t+h+w", "t+hw"] - self.maxT = maxT - self.maxH = maxH - self.maxW = maxW - self.embed_dim = embed_dim - if self.typ == "t+h+w": - self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) - self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim) - self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim) - elif self.typ == "t+hw": - self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) - self.HW_embed = nn.Embedding( - num_embeddings=maxH * maxW, embedding_dim=embed_dim - ) - else: - raise NotImplementedError(f"{self.typ} is invalid.") - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization(m, embed_mode="0") - - def forward(self, x): - """ - Args: - x : Shape (B, T, H, W, C) - - Returns: - out : the x + positional embeddings - """ - - _, T, H, W, _ = x.shape - t_idx = paddle.arange(end=T) - h_idx = paddle.arange(end=H) - w_idx = paddle.arange(end=W) - if self.typ == "t+h+w": - return ( - x - + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) - + self.H_embed(h_idx).reshape([1, H, 1, self.embed_dim]) - + self.W_embed(w_idx).reshape([1, 1, W, self.embed_dim]) - ) - elif self.typ == "t+hw": - spatial_idx = h_idx.unsqueeze(axis=-1) * self.maxW + w_idx - return ( - x - + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) - + self.HW_embed(spatial_idx) - ) - else: - raise NotImplementedError(f"{self.typ} is invalid.") - - -@lru_cache() -def compute_cuboid_cross_attention_mask( - T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy, padding_type, device -): - pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal - pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal - pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] - pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] - mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw - x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw - if pad_t_mem > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - mem_mask = paddle.ones(shape=(1, T_mem, H, W, 1), dtype="bool") - mem_mask = F.pad( - mem_mask, [0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0], data_format="NDHWC" - ) - else: - mem_mask = paddle.ones( - shape=(1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if pad_t_x > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - x_mask = paddle.ones(shape=(1, T_x, H, W, 1), dtype="bool") - x_mask = F.pad( - x_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x], data_format="NDHWC" - ) - else: - x_mask = paddle.ones( - shape=(1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if any(i > 0 for i in shift_hw): - if padding_type == "ignore": - x_mask = paddle.roll( - x=x_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - mem_mask = paddle.roll( - x=mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - x_mask = cuboid_encoder.cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy) - x_mask = x_mask.squeeze(axis=-1).squeeze(axis=0) - num_cuboids, x_cuboid_volume = x_mask.shape - mem_mask = cuboid_encoder.cuboid_reorder( - mem_mask, mem_cuboid_size, strategy=strategy - ) - mem_mask = mem_mask.squeeze(axis=-1).squeeze(axis=0) - _, mem_cuboid_volume = mem_mask.shape - shift_mask = np.zeros(shape=(1, n_temporal, H + pad_h, W + pad_w, 1)) - cnt = 0 - for h in ( - slice(-cuboid_hw[0]), - slice(-cuboid_hw[0], -shift_hw[0]), - slice(-shift_hw[0], None), - ): - for w in ( - slice(-cuboid_hw[1]), - slice(-cuboid_hw[1], -shift_hw[1]), - slice(-shift_hw[1], None), - ): - shift_mask[:, :, h, w, :] = cnt - cnt += 1 - shift_mask = paddle.to_tensor(shift_mask) - shift_mask = cuboid_encoder.cuboid_reorder( - shift_mask, (1,) + cuboid_hw, strategy=strategy - ) - shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) - shift_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 - bh_bw = cuboid_hw[0] * cuboid_hw[1] - attn_mask = ( - shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) - * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1)) - * mem_mask.reshape([num_cuboids, 1, 1, -1, bh_bw]) - ) - attn_mask = attn_mask.reshape([num_cuboids, x_cuboid_volume, mem_cuboid_volume]) - return attn_mask - - -class CuboidCrossAttentionLayer(nn.Layer): - """Implements the cuboid cross attention. - - The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the - encoder-decoder-type cross attention. - - Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C), - - Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in - the query tensor with the corresponding cuboid in the memory tensor. - - For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention. - For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can - get after cutting the tensors. For example, if the temporal dilation is 2, both the query and - memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention, - we support "local" and "dilated" decomposition strategy. - - The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw) - - Args: - dim (int): The dimention of input tensor. - num_heads (int): The number of head. - n_temporal (int, optional): The num of temporal. Defaults to 1. - cuboid_hw (tuple, optional): The height and width of cuboid. Defaults to (7, 7). - shift_hw (tuple, optional): The height and width of shift. Defaults to (0, 0). - strategy (tuple, optional): The strategy. Defaults to ("d", "l", "l"). - padding_type (str, optional): The type of padding. Defaults to "ignore". - cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projrction dropout. Defaults to 0.0. - max_temporal_relative (int, optional): The max temporal. Defaults to 50. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to True. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - n_temporal: int = 1, - cuboid_hw: Tuple[int, ...] = (7, 7), - shift_hw: Tuple[int, ...] = (0, 0), - strategy: Tuple[str, ...] = ("d", "l", "l"), - padding_type: str = "ignore", - cross_last_n_frames: int = None, - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - max_temporal_relative: int = 50, - norm_layer: str = "layer_norm", - use_global_vector: bool = True, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: int = 1, - use_relative_pos: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(CuboidCrossAttentionLayer, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - self.dim = dim - self.num_heads = num_heads - self.n_temporal = n_temporal - assert n_temporal > 0 - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - shift_hw = list(shift_hw) - if strategy[1] == "d": - shift_hw[0] = 0 - if strategy[2] == "d": - shift_hw[1] = 0 - self.cuboid_hw = cuboid_hw - self.shift_hw = tuple(shift_hw) - self.strategy = strategy - self.padding_type = padding_type - self.max_temporal_relative = max_temporal_relative - self.cross_last_n_frames = cross_last_n_frames - self.use_relative_pos = use_relative_pos - self.use_global_vector = use_global_vector - self.separate_global_qkv = separate_global_qkv - if global_dim_ratio != 1 and separate_global_qkv is False: - raise ValueError( - "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - ) - self.global_dim_ratio = global_dim_ratio - if self.padding_type not in ["ignore", "zeros", "nearest"]: - raise ValueError('padding_type should be ["ignore", "zeros", "nearest"]') - if use_relative_pos: - init_data = paddle.zeros( - ( - (2 * max_temporal_relative - 1) - * (2 * cuboid_hw[0] - 1) - * (2 * cuboid_hw[1] - 1), - num_heads, - ) - ) - self.relative_position_bias_table = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.relative_position_bias_table.stop_gradient = not True - self.relative_position_bias_table = initializer.trunc_normal_( - self.relative_position_bias_table, std=0.02 - ) - - coords_t = paddle.arange(end=max_temporal_relative) - coords_h = paddle.arange(end=self.cuboid_hw[0]) - coords_w = paddle.arange(end=self.cuboid_hw[1]) - coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) - coords_flatten = paddle.flatten(x=coords, start_axis=1) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = relative_coords.transpose(perm=[1, 2, 0]) - relative_coords[:, :, 0] += max_temporal_relative - 1 - relative_coords[:, :, 1] += self.cuboid_hw[0] - 1 - relative_coords[:, :, 2] += self.cuboid_hw[1] - 1 - relative_position_index = ( - relative_coords[:, :, 0] - * (2 * self.cuboid_hw[0] - 1) - * (2 * self.cuboid_hw[1] - 1) - + relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) - + relative_coords[:, :, 2] - ) - self.register_buffer( - name="relative_position_index", tensor=relative_position_index - ) - self.q_proj = nn.Linear(in_features=dim, out_features=dim, bias_attr=qkv_bias) - self.kv_proj = nn.Linear( - in_features=dim, out_features=dim * 2, bias_attr=qkv_bias - ) - self.attn_drop = nn.Dropout(p=attn_drop) - self.proj = nn.Linear(in_features=dim, out_features=dim) - self.proj_drop = nn.Dropout(p=proj_drop) - if self.use_global_vector: - if self.separate_global_qkv: - self.l2g_q_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.l2g_global_kv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim * 2, - bias_attr=qkv_bias, - ) - self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) - self._checkpoint_level = checkpoint_level - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization( - self.q_proj, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.kv_proj, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.proj, linear_mode=self.ffn_linear_init_mode - ) - cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) - if self.use_global_vector: - if self.separate_global_qkv: - cuboid_utils.apply_initialization( - self.l2g_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode - ) - - def forward(self, x, mem, mem_global_vectors=None): - """Calculate the forward - - Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the - relative position encoding can be calculated correctly. For example: - - mem: 0, 1, 2, 3, 4 - x: 0, 1, 2, 3, 4, 5 - - n_temporal = 1 - mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5 - - n_temporal = 2 - mem: pad, 1, 3 x: 0, 2, 4 - mem: 0, 2, 4 x: 1, 3, 5 - - n_temporal = 3 - mem: pad, 2 dec: 0, 3 - mem: 0, 3 dec: 1, 4 - mem: 1, 4 dec: 2, 5 - - Args: - x (paddle.Tensor): The input of the layer. It will have shape (B, T, H, W, C) - mem (paddle.Tensor): The memory. It will have shape (B, T_mem, H, W, C) - mem_global_vectors (paddle.Tensor): The global vectors from the memory. It will have shape (B, N, C) - - Returns: - out (paddle.Tensor): Output tensor should have shape (B, T, H, W, C_out) - """ - - if self.cross_last_n_frames is not None: - cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1])) - mem = mem[:, -cross_last_n_frames:, ...] - if self.use_global_vector: - _, num_global, _ = mem_global_vectors.shape - x = self.norm(x) - B, T_x, H, W, C_in = x.shape - B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape - assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative - cuboid_hw = self.cuboid_hw - n_temporal = self.n_temporal - shift_hw = self.shift_hw - assert ( - B_mem == B and H == H_mem and W == W_mem and C_in == C_mem - ), f"Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}" - pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal - pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal - pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] - pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] - mem = cuboid_utils.generalize_padding( - mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True - ) - - x = cuboid_utils.generalize_padding( - x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False - ) - - if any(i > 0 for i in shift_hw): - shifted_x = paddle.roll( - x=x, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - shifted_mem = paddle.roll( - x=mem, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - else: - shifted_x = x - shifted_mem = mem - mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw - x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw - reordered_mem = cuboid_encoder.cuboid_reorder( - shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy - ) - reordered_x = cuboid_encoder.cuboid_reorder( - shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy - ) - _, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape - _, num_cuboids, x_cuboid_volume, _ = reordered_x.shape - assert ( - num_cuboids_mem == num_cuboids - ), f"Number of cuboids do not match. num_cuboids={num_cuboids}, num_cuboids_mem={num_cuboids_mem}" - attn_mask = compute_cuboid_cross_attention_mask( - T_x, - T_mem, - H, - W, - n_temporal, - cuboid_hw, - shift_hw, - strategy=self.strategy, - padding_type=self.padding_type, - device=x.place, - ) - head_C = C_in // self.num_heads - kv = ( - self.kv_proj(reordered_mem) - .reshape([B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - k, v = kv[0], kv[1] - q = ( - self.q_proj(reordered_x) - .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - q = q * self.scale - perm_4 = list(range(k.ndim)) - perm_4[-2] = -1 - perm_4[-1] = -2 - attn_score = q @ k.transpose(perm=perm_4) - if self.use_relative_pos: - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index[ - :x_cuboid_volume, :mem_cuboid_volume - ].reshape([-1]) - ].reshape([x_cuboid_volume, mem_cuboid_volume, -1]) - relative_position_bias = relative_position_bias.transpose( - perm=[2, 0, 1] - ).unsqueeze(axis=1) - attn_score = attn_score + relative_position_bias - if self.use_global_vector: - if self.separate_global_qkv: - l2g_q = ( - self.l2g_q_net(reordered_x) - .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - l2g_q = l2g_q * self.scale - l2g_global_kv = ( - self.l2g_global_kv_net(mem_global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] - else: - kv_global = ( - self.kv_proj(mem_global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] - l2g_q = q - perm_5 = list(range(l2g_global_k.ndim)) - perm_5[-2] = -1 - perm_5[-1] = -2 - l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_5) - attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) - if attn_mask.ndim == 5: - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" - ) - else: - attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) - v_l_g = paddle.concat( - x=( - v, - l2g_global_v.expand( - shape=[B, self.num_heads, num_cuboids, num_global, head_C] - ), - ), - axis=3, - ) - attn_score_l2l_l2g = cuboid_encoder.masked_softmax( - attn_score_l2l_l2g, mask=attn_mask_l2l_l2g - ) - attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) - reordered_x = ( - (attn_score_l2l_l2g @ v_l_g) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape(B, num_cuboids, x_cuboid_volume, self.dim) - ) - else: - attn_score = cuboid_encoder.masked_softmax(attn_score, mask=attn_mask) - attn_score = self.attn_drop(attn_score) - reordered_x = ( - (attn_score @ v) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, x_cuboid_volume, self.dim]) - ) - reordered_x = paddle.cast(reordered_x, dtype="float32") - reordered_x = self.proj_drop(self.proj(reordered_x)) - shifted_x = cuboid_encoder.cuboid_reorder_reverse( - reordered_x, - cuboid_size=x_cuboid_size, - strategy=self.strategy, - orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]), - ) - if any(i > 0 for i in shift_hw): - x = paddle.roll(x=shifted_x, shifts=(shift_hw[0], shift_hw[1]), axis=(2, 3)) - else: - x = shifted_x - x = cuboid_utils.generalize_unpadding( - x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type - ) - return x - - -class StackCuboidCrossAttentionBlock(nn.Layer): - """A stack of cuboid cross attention layers. - - The advantage of cuboid attention is that we can combine cuboid attention building blocks with different - hyper-parameters to mimic a broad range of space-time correlation patterns. - - - "use_inter_ffn" is True - x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out - | ^ | ^ - | | | | - |-------------|----|-------------| - - "use_inter_ffn" is False - x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem - | ^ | ^ ^ | ^ - | | | | | | | - |-------------|----|------------|-- ----------|--|-----------| - - Args: - dim (int): The dimension of the input. - num_heads (int): The number of head. - block_cuboid_hw (list, optional): The height and width of block cuboid.Defaults to [(4, 4), (4, 4)]. - block_shift_hw (list, optional): The height and width of shift cuboid . Defaults to [(0, 0), (2, 2)]. - block_n_temporal (list, optional): The length of block temporal. Defaults to [1, 2]. - block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. - padding_type (str, optional): The type of paddling. Defaults to "ignore". - cross_last_n_frames (int, optional): The num of cross_last_n_frames. Defaults to None. - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - activation (str, optional): The activation. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. - max_temporal_relative (int, optional): The max temporal. Defaults to 50. - checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - block_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - block_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (2, 2)], - block_n_temporal: Tuple[int, ...] = [1, 2], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("d", "d", "d"), - ("l", "l", "l"), - ], - padding_type: str = "ignore", - cross_last_n_frames: int = None, - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = True, - max_temporal_relative: int = 50, - checkpoint_level: int = 1, - use_relative_pos: bool = True, - use_global_vector: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(StackCuboidCrossAttentionBlock, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - if ( - len(block_cuboid_hw[0]) <= 0 - or len(block_shift_hw) <= 0 - or len(block_strategy) <= 0 - ): - raise ValueError( - "Incorrect format.The lengths of block_cuboid_hw[0], block_shift_hw, and block_strategy must be greater than zero." - ) - if len(block_cuboid_hw) != len(block_shift_hw) and len(block_shift_hw) == len( - block_strategy - ): - raise ValueError( - "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." - ) - - self.num_attn = len(block_cuboid_hw) - self.checkpoint_level = checkpoint_level - self.use_inter_ffn = use_inter_ffn - self.use_global_vector = use_global_vector - if self.use_inter_ffn: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(self.num_attn) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - ] - ) - self.attn_l = nn.LayerList( - sublayers=[ - CuboidCrossAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_hw=ele_cuboid_hw, - shift_hw=ele_shift_hw, - strategy=ele_strategy, - n_temporal=ele_n_temporal, - cross_last_n_frames=cross_last_n_frames, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - max_temporal_relative=max_temporal_relative, - use_global_vector=use_global_vector, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( - block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal - ) - ] - ) - - def reset_parameters(self): - for m in self.ffn_l: - m.reset_parameters() - for m in self.attn_l: - m.reset_parameters() - - def forward(self, x, mem, mem_global_vector=None): - """ - Args: - x (paddle.Tensor): Shape (B, T_x, H, W, C) - mem (paddle.Tensor): Shape (B, T_mem, H, W, C) - mem_global_vector (paddle.Tensor): Shape (B, N_global, C) - - Returns: - out (paddle.Tensor): (B, T_x, H, W, C_out) - """ - - if self.use_inter_ffn: - for attn, ffn in zip(self.attn_l, self.ffn_l): - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) - else: - x = x + attn(x, mem, mem_global_vector) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - else: - x = ffn(x) - return x - else: - for attn in self.attn_l: - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) - else: - x = x + attn(x, mem, mem_global_vector) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - else: - x = self.ffn_l[0](x) - return x - - -class Upsample3DLayer(nn.Layer): - """Upsampling based on nn.UpSampling and Conv3x3. - - If the temporal dimension remains the same: - x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim) - Else: - x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim) - - Args: - dim (int): The dimension of the input tensor. - out_dim (int): The dimension of the output tensor. - target_size (Tuple[int,...]): The size of output tensor. - temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False. - kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3. - layout (str, optional): The layout of the inputs. Defaults to "THWC". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - out_dim: int, - target_size: Tuple[int, ...], - temporal_upsample: bool = False, - kernel_size: int = 3, - layout: str = "THWC", - conv_init_mode: str = "0", - ): - super(Upsample3DLayer, self).__init__() - self.conv_init_mode = conv_init_mode - self.target_size = target_size - self.out_dim = out_dim - self.temporal_upsample = temporal_upsample - if temporal_upsample: - self.up = nn.Upsample(size=target_size, mode="nearest") - else: - self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode="nearest") - self.conv = nn.Conv2D( - in_channels=dim, - out_channels=out_dim, - kernel_size=(kernel_size, kernel_size), - padding=(kernel_size // 2, kernel_size // 2), - ) - assert layout in ["THWC", "CTHW"] - self.layout = layout - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization(m, conv_mode=self.conv_init_mode) - - def forward(self, x): - """ - - Args: - x : (B, T, H, W, C) or (B, C, T, H, W) - - Returns: - out : (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out) - """ - - if self.layout == "THWC": - B, T, H, W, C = x.shape - if self.temporal_upsample: - x = x.transpose(perm=[0, 4, 1, 2, 3]) - return self.conv(self.up(x)).transpose(perm=[0, 2, 3, 4, 1]) - else: - assert self.target_size[0] == T - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = self.up(x) - return ( - self.conv(x) - .transpose(perm=[0, 2, 3, 1]) - .reshape(list((B,) + self.target_size + (self.out_dim,))) - ) - elif self.layout == "CTHW": - B, C, T, H, W = x.shape - if self.temporal_upsample: - return self.conv(self.up(x)) - else: - assert self.output_size[0] == T - x = x.transpose(perm=[0, 2, 1, 3, 4]) - x = x.reshape([B * T, C, H, W]) - return ( - self.conv(self.up(x)) - .reshape( - [ - B, - self.target_size[0], - self.out_dim, - self.target_size[1], - self.target_size[2], - ] - ) - .transpose(perm=[0, 2, 1, 3, 4]) - ) - - -class CuboidTransformerDecoder(nn.Layer): - """Decoder of the CuboidTransformer. - - For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention - - Repeat the following structure K times - - x --> StackCuboidSelfAttention --> | - |----> StackCuboidCrossAttention (If used) --> out - mem --> | - - Args: - target_temporal_length (int): The temporal length of the target. - mem_shapes (Tuple[int,...]): The mem shapes of the decoder. - cross_start (int, optional): The block to start cross attention. Defaults to 0. - depth (list, optional): The number of layers for each block. Defaults to [2, 2]. - upsample_type (str, optional): The type of upsample. Defaults to "upsample". - upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. - block_self_attn_patterns (str, optional): The patterns of block attention. Defaults to None. - block_self_cuboid_size (list, optional): The size of cuboid block. Defaults to [(4, 4, 4), (4, 4, 4)]. - block_self_cuboid_strategy (list, optional): The strategy of cuboid. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - block_self_shift_size (list, optional): The size of shift. Defaults to [(1, 1, 1), (0, 0, 0)]. - block_cross_attn_patterns (str, optional): The patterns of cross attentions. Defaults to None. - block_cross_cuboid_hw (list, optional): The height and width of cross cuboid. Defaults to [(4, 4), (4, 4)]. - block_cross_cuboid_strategy (list, optional): The strategy of cross cuboid. Defaults to [("l", "l", "l"), ("d", "l", "l")]. - block_cross_shift_hw (list, optional): The height and width of cross shift. Defaults to [(0, 0), (0, 0)]. - block_cross_n_temporal (list, optional): The cross temporal of block. Defaults to [1, 2]. - cross_last_n_frames (int, optional): The num of cross last frames. Defaults to None. - num_heads (int, optional): The num of head. Defaults to 4. - attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. - proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - ffn_activation (str, optional): The activation layer of FFN. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to False. - hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed. Defaults to False. - pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". - max_temporal_relative (int, optional): The max number of teemporal relative. Defaults to 50. - padding_type (str, optional): The type of padding. Defaults to "ignore". - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. - use_first_self_attn (bool, optional): Whether to use first self attention. Defaults to False. - use_self_global (bool, optional): Whether to use self global vector. Defaults to False. - self_update_global (bool, optional): Whether to update global vector. Defaults to True. - use_cross_global (bool, optional): Whether to use cross global vector. Defaults to False. - use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to True. - use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - up_linear_init_mode (str, optional): The mode of up linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - target_temporal_length: int, - mem_shapes: Tuple[int, ...], - cross_start: int = 0, - depth: Tuple[int, ...] = [2, 2], - upsample_type: str = "upsample", - upsample_kernel_size: int = 3, - block_self_attn_patterns: str = None, - block_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - block_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], - block_cross_attn_patterns: str = None, - block_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - block_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "l", "l"), - ], - block_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], - block_cross_n_temporal: Tuple[int, ...] = [1, 2], - cross_last_n_frames: int = None, - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = False, - hierarchical_pos_embed: bool = False, - pos_embed_type: str = "t+hw", - max_temporal_relative: int = 50, - padding_type: str = "ignore", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - use_first_self_attn: bool = False, - use_self_global: bool = False, - self_update_global: bool = True, - use_cross_global: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - up_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(CuboidTransformerDecoder, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.up_linear_init_mode = up_linear_init_mode - self.norm_init_mode = norm_init_mode - assert len(depth) == len(mem_shapes) - self.target_temporal_length = target_temporal_length - self.num_blocks = len(mem_shapes) - self.cross_start = cross_start - self.mem_shapes = mem_shapes - self.depth = depth - self.upsample_type = upsample_type - self.hierarchical_pos_embed = hierarchical_pos_embed - self.checkpoint_level = checkpoint_level - self.use_self_global = use_self_global - self.self_update_global = self_update_global - self.use_cross_global = use_cross_global - self.use_global_vector_ffn = use_global_vector_ffn - self.use_first_self_attn = use_first_self_attn - if block_self_attn_patterns is not None: - if isinstance(block_self_attn_patterns, (tuple, list)): - assert len(block_self_attn_patterns) == self.num_blocks - else: - block_self_attn_patterns = [ - block_self_attn_patterns for _ in range(self.num_blocks) - ] - block_self_cuboid_size = [] - block_self_cuboid_strategy = [] - block_self_shift_size = [] - for idx, key in enumerate(block_self_attn_patterns): - func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) - cuboid_size, strategy, shift_size = func(mem_shapes[idx]) - block_self_cuboid_size.append(cuboid_size) - block_self_cuboid_strategy.append(strategy) - block_self_shift_size.append(shift_size) - else: - if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): - block_self_cuboid_size = [ - block_self_cuboid_size for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_cuboid_size) == self.num_blocks - ), f"Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}" - if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): - block_self_cuboid_strategy = [ - block_self_cuboid_strategy for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_cuboid_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}" - if not isinstance(block_self_shift_size[0][0], (list, tuple)): - block_self_shift_size = [ - block_self_shift_size for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_shift_size) == self.num_blocks - ), f"Incorrect input format! Received block_self_shift_size={block_self_shift_size}" - self_blocks = [] - for i in range(self.num_blocks): - if not self.use_first_self_attn and i == self.num_blocks - 1: - ele_depth = depth[i] - 1 - else: - ele_depth = depth[i] - stack_cuboid_blocks = [ - cuboid_encoder.StackCuboidSelfAttentionBlock( - dim=self.mem_shapes[i][-1], - num_heads=num_heads, - block_cuboid_size=block_self_cuboid_size[i], - block_strategy=block_self_cuboid_strategy[i], - block_shift_size=block_self_shift_size[i], - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - padding_type=padding_type, - use_global_vector=use_self_global, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(ele_depth) - ] - self_blocks.append(nn.LayerList(sublayers=stack_cuboid_blocks)) - self.self_blocks = nn.LayerList(sublayers=self_blocks) - if block_cross_attn_patterns is not None: - if isinstance(block_cross_attn_patterns, (tuple, list)): - assert len(block_cross_attn_patterns) == self.num_blocks - else: - block_cross_attn_patterns = [ - block_cross_attn_patterns for _ in range(self.num_blocks) - ] - block_cross_cuboid_hw = [] - block_cross_cuboid_strategy = [] - block_cross_shift_hw = [] - block_cross_n_temporal = [] - for idx, key in enumerate(block_cross_attn_patterns): - if key == "last_frame_dst": - cuboid_hw = None - shift_hw = None - strategy = None - n_temporal = None - else: - func = cuboid_utils.CuboidCrossAttentionPatterns.get(key) - cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) - block_cross_cuboid_hw.append(cuboid_hw) - block_cross_cuboid_strategy.append(strategy) - block_cross_shift_hw.append(shift_hw) - block_cross_n_temporal.append(n_temporal) - else: - if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): - block_cross_cuboid_hw = [ - block_cross_cuboid_hw for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_cuboid_hw) == self.num_blocks - ), f"Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}" - if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): - block_cross_cuboid_strategy = [ - block_cross_cuboid_strategy for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_cuboid_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}" - if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): - block_cross_shift_hw = [ - block_cross_shift_hw for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_shift_hw) == self.num_blocks - ), f"Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}" - if not isinstance(block_cross_n_temporal[0], (list, tuple)): - block_cross_n_temporal = [ - block_cross_n_temporal for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_n_temporal) == self.num_blocks - ), f"Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}" - self.cross_blocks = nn.LayerList() - for i in range(self.cross_start, self.num_blocks): - cross_block = nn.LayerList( - sublayers=[ - StackCuboidCrossAttentionBlock( - dim=self.mem_shapes[i][-1], - num_heads=num_heads, - block_cuboid_hw=block_cross_cuboid_hw[i], - block_strategy=block_cross_cuboid_strategy[i], - block_shift_hw=block_cross_shift_hw[i], - block_n_temporal=block_cross_n_temporal[i], - cross_last_n_frames=cross_last_n_frames, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - activation=ffn_activation, - max_temporal_relative=max_temporal_relative, - padding_type=padding_type, - use_global_vector=use_cross_global, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(depth[i]) - ] - ) - self.cross_blocks.append(cross_block) - if self.num_blocks > 1: - if self.upsample_type == "upsample": - self.upsample_layers = nn.LayerList( - sublayers=[ - Upsample3DLayer( - dim=self.mem_shapes[i + 1][-1], - out_dim=self.mem_shapes[i][-1], - target_size=(target_temporal_length,) - + self.mem_shapes[i][1:3], - kernel_size=upsample_kernel_size, - temporal_upsample=False, - conv_init_mode=conv_init_mode, - ) - for i in range(self.num_blocks - 1) - ] - ) - else: - raise NotImplementedError(f"{self.upsample_type} is invalid.") - if self.hierarchical_pos_embed: - self.hierarchical_pos_embed_l = nn.LayerList( - sublayers=[ - PosEmbed( - embed_dim=self.mem_shapes[i][-1], - typ=pos_embed_type, - maxT=target_temporal_length, - maxH=self.mem_shapes[i][1], - maxW=self.mem_shapes[i][2], - ) - for i in range(self.num_blocks - 1) - ] - ) - self.reset_parameters() - - def reset_parameters(self): - for ms in self.self_blocks: - for m in ms: - m.reset_parameters() - for ms in self.cross_blocks: - for m in ms: - m.reset_parameters() - if self.num_blocks > 1: - for m in self.upsample_layers: - m.reset_parameters() - if self.hierarchical_pos_embed: - for m in self.hierarchical_pos_embed_l: - m.reset_parameters() - - def forward(self, x, mem_l, mem_global_vector_l=None): - """ - Args: - x : Shape (B, T_top, H_top, W_top, C). - mem_l : A list of memory tensors. - """ - - B, T_top, H_top, W_top, C = x.shape - assert T_top == self.target_temporal_length - assert (H_top, W_top) == (self.mem_shapes[-1][1], self.mem_shapes[-1][2]) - for i in range(self.num_blocks - 1, -1, -1): - mem_global_vector = ( - None if mem_global_vector_l is None else mem_global_vector_l[i] - ) - if not self.use_first_self_attn and i == self.num_blocks - 1: - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][0]( - x, mem_l[i], mem_global_vector - ) - for idx in range(self.depth[i] - 1): - if self.use_self_global: - if self.self_update_global: - x, mem_global_vector = self.self_blocks[i][idx]( - x, mem_global_vector - ) - else: - x, _ = self.self_blocks[i][idx](x, mem_global_vector) - else: - x = self.self_blocks[i][idx](x) - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][idx + 1]( - x, mem_l[i], mem_global_vector - ) - else: - for idx in range(self.depth[i]): - if self.use_self_global: - if self.self_update_global: - x, mem_global_vector = self.self_blocks[i][idx]( - x, mem_global_vector - ) - else: - x, _ = self.self_blocks[i][idx](x, mem_global_vector) - else: - x = self.self_blocks[i][idx](x) - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][idx]( - x, mem_l[i], mem_global_vector - ) - if i > 0: - x = self.upsample_layers[i - 1](x) - if self.hierarchical_pos_embed: - x = self.hierarchical_pos_embed_l[i - 1](x) - return x +from functools import lru_cache +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn +from paddle.distributed import fleet + +import ppsci.arch.cuboid_transformer_encoder as cuboid_encoder +import ppsci.arch.cuboid_transformer_utils as cuboid_utils +from ppsci.utils import initializer + + +class PosEmbed(nn.Layer): + """Pose embeding + + Args: + embed_dim (int): The dimension of embeding. + maxT (int): The embeding max time. + maxH (int): The embeding max height. + maxW (int): The embeding max width. + typ (str): + The type of the positional embedding. + - t+h+w: + Embed the spatial position to embeddings + - t+hw: + Embed the spatial position to embeddings + """ + + def __init__(self, embed_dim, maxT, maxH, maxW, typ: str = "t+h+w"): + super(PosEmbed, self).__init__() + self.typ = typ + assert self.typ in ["t+h+w", "t+hw"] + self.maxT = maxT + self.maxH = maxH + self.maxW = maxW + self.embed_dim = embed_dim + if self.typ == "t+h+w": + self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) + self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim) + self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim) + elif self.typ == "t+hw": + self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) + self.HW_embed = nn.Embedding( + num_embeddings=maxH * maxW, embedding_dim=embed_dim + ) + else: + raise NotImplementedError(f"{self.typ} is invalid.") + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization(m, embed_mode="0") + + def forward(self, x): + """ + Args: + x : Shape (B, T, H, W, C) + + Returns: + out : the x + positional embeddings + """ + + _, T, H, W, _ = x.shape + t_idx = paddle.arange(end=T) + h_idx = paddle.arange(end=H) + w_idx = paddle.arange(end=W) + if self.typ == "t+h+w": + return ( + x + + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) + + self.H_embed(h_idx).reshape([1, H, 1, self.embed_dim]) + + self.W_embed(w_idx).reshape([1, 1, W, self.embed_dim]) + ) + elif self.typ == "t+hw": + spatial_idx = h_idx.unsqueeze(axis=-1) * self.maxW + w_idx + return ( + x + + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) + + self.HW_embed(spatial_idx) + ) + else: + raise NotImplementedError(f"{self.typ} is invalid.") + + +@lru_cache() +def compute_cuboid_cross_attention_mask( + T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy, padding_type, device +): + pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal + pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal + pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] + pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] + mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw + x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw + if pad_t_mem > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + mem_mask = paddle.ones(shape=(1, T_mem, H, W, 1), dtype="bool") + mem_mask = F.pad( + mem_mask, [0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0], data_format="NDHWC" + ) + else: + mem_mask = paddle.ones( + shape=(1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if pad_t_x > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + x_mask = paddle.ones(shape=(1, T_x, H, W, 1), dtype="bool") + x_mask = F.pad( + x_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x], data_format="NDHWC" + ) + else: + x_mask = paddle.ones( + shape=(1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if any(i > 0 for i in shift_hw): + if padding_type == "ignore": + x_mask = paddle.roll( + x=x_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + mem_mask = paddle.roll( + x=mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + x_mask = cuboid_encoder.cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy) + x_mask = x_mask.squeeze(axis=-1).squeeze(axis=0) + num_cuboids, x_cuboid_volume = x_mask.shape + mem_mask = cuboid_encoder.cuboid_reorder( + mem_mask, mem_cuboid_size, strategy=strategy + ) + mem_mask = mem_mask.squeeze(axis=-1).squeeze(axis=0) + _, mem_cuboid_volume = mem_mask.shape + shift_mask = np.zeros(shape=(1, n_temporal, H + pad_h, W + pad_w, 1)) + cnt = 0 + for h in ( + slice(-cuboid_hw[0]), + slice(-cuboid_hw[0], -shift_hw[0]), + slice(-shift_hw[0], None), + ): + for w in ( + slice(-cuboid_hw[1]), + slice(-cuboid_hw[1], -shift_hw[1]), + slice(-shift_hw[1], None), + ): + shift_mask[:, :, h, w, :] = cnt + cnt += 1 + shift_mask = paddle.to_tensor(shift_mask) + shift_mask = cuboid_encoder.cuboid_reorder( + shift_mask, (1,) + cuboid_hw, strategy=strategy + ) + shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) + shift_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 + bh_bw = cuboid_hw[0] * cuboid_hw[1] + attn_mask = ( + shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) + * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1)) + * mem_mask.reshape([num_cuboids, 1, 1, -1, bh_bw]) + ) + attn_mask = attn_mask.reshape([num_cuboids, x_cuboid_volume, mem_cuboid_volume]) + return attn_mask + + +class CuboidCrossAttentionLayer(nn.Layer): + """Implements the cuboid cross attention. + + The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the + encoder-decoder-type cross attention. + + Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C), + + Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in + the query tensor with the corresponding cuboid in the memory tensor. + + For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention. + For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can + get after cutting the tensors. For example, if the temporal dilation is 2, both the query and + memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention, + we support "local" and "dilated" decomposition strategy. + + The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw) + + Args: + dim (int): The dimention of input tensor. + num_heads (int): The number of head. + n_temporal (int, optional): The num of temporal. Defaults to 1. + cuboid_hw (tuple, optional): The height and width of cuboid. Defaults to (7, 7). + shift_hw (tuple, optional): The height and width of shift. Defaults to (0, 0). + strategy (tuple, optional): The strategy. Defaults to ("d", "l", "l"). + padding_type (str, optional): The type of padding. Defaults to "ignore". + cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projrction dropout. Defaults to 0.0. + max_temporal_relative (int, optional): The max temporal. Defaults to 50. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to True. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + n_temporal: int = 1, + cuboid_hw: Tuple[int, ...] = (7, 7), + shift_hw: Tuple[int, ...] = (0, 0), + strategy: Tuple[str, ...] = ("d", "l", "l"), + padding_type: str = "ignore", + cross_last_n_frames: int = None, + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + max_temporal_relative: int = 50, + norm_layer: str = "layer_norm", + use_global_vector: bool = True, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: int = 1, + use_relative_pos: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(CuboidCrossAttentionLayer, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + self.dim = dim + self.num_heads = num_heads + self.n_temporal = n_temporal + assert n_temporal > 0 + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + shift_hw = list(shift_hw) + if strategy[1] == "d": + shift_hw[0] = 0 + if strategy[2] == "d": + shift_hw[1] = 0 + self.cuboid_hw = cuboid_hw + self.shift_hw = tuple(shift_hw) + self.strategy = strategy + self.padding_type = padding_type + self.max_temporal_relative = max_temporal_relative + self.cross_last_n_frames = cross_last_n_frames + self.use_relative_pos = use_relative_pos + self.use_global_vector = use_global_vector + self.separate_global_qkv = separate_global_qkv + if global_dim_ratio != 1 and separate_global_qkv is False: + raise ValueError( + "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + ) + self.global_dim_ratio = global_dim_ratio + if self.padding_type not in ["ignore", "zeros", "nearest"]: + raise ValueError('padding_type should be ["ignore", "zeros", "nearest"]') + if use_relative_pos: + init_data = paddle.zeros( + ( + (2 * max_temporal_relative - 1) + * (2 * cuboid_hw[0] - 1) + * (2 * cuboid_hw[1] - 1), + num_heads, + ) + ) + self.relative_position_bias_table = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.relative_position_bias_table.stop_gradient = not True + self.relative_position_bias_table = initializer.trunc_normal_( + self.relative_position_bias_table, std=0.02 + ) + + coords_t = paddle.arange(end=max_temporal_relative) + coords_h = paddle.arange(end=self.cuboid_hw[0]) + coords_w = paddle.arange(end=self.cuboid_hw[1]) + coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) + coords_flatten = paddle.flatten(x=coords, start_axis=1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.transpose(perm=[1, 2, 0]) + relative_coords[:, :, 0] += max_temporal_relative - 1 + relative_coords[:, :, 1] += self.cuboid_hw[0] - 1 + relative_coords[:, :, 2] += self.cuboid_hw[1] - 1 + relative_position_index = ( + relative_coords[:, :, 0] + * (2 * self.cuboid_hw[0] - 1) + * (2 * self.cuboid_hw[1] - 1) + + relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) + + relative_coords[:, :, 2] + ) + self.register_buffer( + name="relative_position_index", tensor=relative_position_index + ) + self.q_proj = nn.Linear(in_features=dim, out_features=dim, bias_attr=qkv_bias) + self.kv_proj = nn.Linear( + in_features=dim, out_features=dim * 2, bias_attr=qkv_bias + ) + self.attn_drop = nn.Dropout(p=attn_drop) + self.proj = nn.Linear(in_features=dim, out_features=dim) + self.proj_drop = nn.Dropout(p=proj_drop) + if self.use_global_vector: + if self.separate_global_qkv: + self.l2g_q_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.l2g_global_kv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim * 2, + bias_attr=qkv_bias, + ) + self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) + self._checkpoint_level = checkpoint_level + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization( + self.q_proj, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.kv_proj, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.proj, linear_mode=self.ffn_linear_init_mode + ) + cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) + if self.use_global_vector: + if self.separate_global_qkv: + cuboid_utils.apply_initialization( + self.l2g_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode + ) + + def forward(self, x, mem, mem_global_vectors=None): + """Calculate the forward + + Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the + relative position encoding can be calculated correctly. For example: + + mem: 0, 1, 2, 3, 4 + x: 0, 1, 2, 3, 4, 5 + + n_temporal = 1 + mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5 + + n_temporal = 2 + mem: pad, 1, 3 x: 0, 2, 4 + mem: 0, 2, 4 x: 1, 3, 5 + + n_temporal = 3 + mem: pad, 2 dec: 0, 3 + mem: 0, 3 dec: 1, 4 + mem: 1, 4 dec: 2, 5 + + Args: + x (paddle.Tensor): The input of the layer. It will have shape (B, T, H, W, C) + mem (paddle.Tensor): The memory. It will have shape (B, T_mem, H, W, C) + mem_global_vectors (paddle.Tensor): The global vectors from the memory. It will have shape (B, N, C) + + Returns: + out (paddle.Tensor): Output tensor should have shape (B, T, H, W, C_out) + """ + + if self.cross_last_n_frames is not None: + cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1])) + mem = mem[:, -cross_last_n_frames:, ...] + if self.use_global_vector: + _, num_global, _ = mem_global_vectors.shape + x = self.norm(x) + B, T_x, H, W, C_in = x.shape + B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape + assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative + cuboid_hw = self.cuboid_hw + n_temporal = self.n_temporal + shift_hw = self.shift_hw + assert ( + B_mem == B and H == H_mem and W == W_mem and C_in == C_mem + ), f"Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}" + pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal + pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal + pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] + pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] + mem = cuboid_utils.generalize_padding( + mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True + ) + + x = cuboid_utils.generalize_padding( + x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False + ) + + if any(i > 0 for i in shift_hw): + shifted_x = paddle.roll( + x=x, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + shifted_mem = paddle.roll( + x=mem, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + else: + shifted_x = x + shifted_mem = mem + mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw + x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw + reordered_mem = cuboid_encoder.cuboid_reorder( + shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy + ) + reordered_x = cuboid_encoder.cuboid_reorder( + shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy + ) + _, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape + _, num_cuboids, x_cuboid_volume, _ = reordered_x.shape + assert ( + num_cuboids_mem == num_cuboids + ), f"Number of cuboids do not match. num_cuboids={num_cuboids}, num_cuboids_mem={num_cuboids_mem}" + attn_mask = compute_cuboid_cross_attention_mask( + T_x, + T_mem, + H, + W, + n_temporal, + cuboid_hw, + shift_hw, + strategy=self.strategy, + padding_type=self.padding_type, + device=x.place, + ) + head_C = C_in // self.num_heads + kv = ( + self.kv_proj(reordered_mem) + .reshape([B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + k, v = kv[0], kv[1] + q = ( + self.q_proj(reordered_x) + .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + q = q * self.scale + perm_4 = list(range(k.ndim)) + perm_4[-2] = -1 + perm_4[-1] = -2 + attn_score = q @ k.transpose(perm=perm_4) + if self.use_relative_pos: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index[ + :x_cuboid_volume, :mem_cuboid_volume + ].reshape([-1]) + ].reshape([x_cuboid_volume, mem_cuboid_volume, -1]) + relative_position_bias = relative_position_bias.transpose( + perm=[2, 0, 1] + ).unsqueeze(axis=1) + attn_score = attn_score + relative_position_bias + if self.use_global_vector: + if self.separate_global_qkv: + l2g_q = ( + self.l2g_q_net(reordered_x) + .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + l2g_q = l2g_q * self.scale + l2g_global_kv = ( + self.l2g_global_kv_net(mem_global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] + else: + kv_global = ( + self.kv_proj(mem_global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] + l2g_q = q + perm_5 = list(range(l2g_global_k.ndim)) + perm_5[-2] = -1 + perm_5[-1] = -2 + l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_5) + attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) + if attn_mask.ndim == 5: + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" + ) + else: + attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) + v_l_g = paddle.concat( + x=( + v, + l2g_global_v.expand( + shape=[B, self.num_heads, num_cuboids, num_global, head_C] + ), + ), + axis=3, + ) + attn_score_l2l_l2g = cuboid_encoder.masked_softmax( + attn_score_l2l_l2g, mask=attn_mask_l2l_l2g + ) + attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) + reordered_x = ( + (attn_score_l2l_l2g @ v_l_g) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape(B, num_cuboids, x_cuboid_volume, self.dim) + ) + else: + attn_score = cuboid_encoder.masked_softmax(attn_score, mask=attn_mask) + attn_score = self.attn_drop(attn_score) + reordered_x = ( + (attn_score @ v) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, x_cuboid_volume, self.dim]) + ) + reordered_x = paddle.cast(reordered_x, dtype="float32") + reordered_x = self.proj_drop(self.proj(reordered_x)) + shifted_x = cuboid_encoder.cuboid_reorder_reverse( + reordered_x, + cuboid_size=x_cuboid_size, + strategy=self.strategy, + orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]), + ) + if any(i > 0 for i in shift_hw): + x = paddle.roll(x=shifted_x, shifts=(shift_hw[0], shift_hw[1]), axis=(2, 3)) + else: + x = shifted_x + x = cuboid_utils.generalize_unpadding( + x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type + ) + return x + + +class StackCuboidCrossAttentionBlock(nn.Layer): + """A stack of cuboid cross attention layers. + + The advantage of cuboid attention is that we can combine cuboid attention building blocks with different + hyper-parameters to mimic a broad range of space-time correlation patterns. + + - "use_inter_ffn" is True + x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out + | ^ | ^ + | | | | + |-------------|----|-------------| + - "use_inter_ffn" is False + x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem + | ^ | ^ ^ | ^ + | | | | | | | + |-------------|----|------------|-- ----------|--|-----------| + + Args: + dim (int): The dimension of the input. + num_heads (int): The number of head. + block_cuboid_hw (list, optional): The height and width of block cuboid.Defaults to [(4, 4), (4, 4)]. + block_shift_hw (list, optional): The height and width of shift cuboid . Defaults to [(0, 0), (2, 2)]. + block_n_temporal (list, optional): The length of block temporal. Defaults to [1, 2]. + block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. + padding_type (str, optional): The type of paddling. Defaults to "ignore". + cross_last_n_frames (int, optional): The num of cross_last_n_frames. Defaults to None. + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + activation (str, optional): The activation. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. + max_temporal_relative (int, optional): The max temporal. Defaults to 50. + checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + block_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + block_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (2, 2)], + block_n_temporal: Tuple[int, ...] = [1, 2], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("d", "d", "d"), + ("l", "l", "l"), + ], + padding_type: str = "ignore", + cross_last_n_frames: int = None, + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = True, + max_temporal_relative: int = 50, + checkpoint_level: int = 1, + use_relative_pos: bool = True, + use_global_vector: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(StackCuboidCrossAttentionBlock, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + if ( + len(block_cuboid_hw[0]) <= 0 + or len(block_shift_hw) <= 0 + or len(block_strategy) <= 0 + ): + raise ValueError( + "Incorrect format.The lengths of block_cuboid_hw[0], block_shift_hw, and block_strategy must be greater than zero." + ) + if len(block_cuboid_hw) != len(block_shift_hw) and len(block_shift_hw) == len( + block_strategy + ): + raise ValueError( + "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." + ) + + self.num_attn = len(block_cuboid_hw) + self.checkpoint_level = checkpoint_level + self.use_inter_ffn = use_inter_ffn + self.use_global_vector = use_global_vector + if self.use_inter_ffn: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(self.num_attn) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + ] + ) + self.attn_l = nn.LayerList( + sublayers=[ + CuboidCrossAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_hw=ele_cuboid_hw, + shift_hw=ele_shift_hw, + strategy=ele_strategy, + n_temporal=ele_n_temporal, + cross_last_n_frames=cross_last_n_frames, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + max_temporal_relative=max_temporal_relative, + use_global_vector=use_global_vector, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( + block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal + ) + ] + ) + + def reset_parameters(self): + for m in self.ffn_l: + m.reset_parameters() + for m in self.attn_l: + m.reset_parameters() + + def forward(self, x, mem, mem_global_vector=None): + """ + Args: + x (paddle.Tensor): Shape (B, T_x, H, W, C) + mem (paddle.Tensor): Shape (B, T_mem, H, W, C) + mem_global_vector (paddle.Tensor): Shape (B, N_global, C) + + Returns: + out (paddle.Tensor): (B, T_x, H, W, C_out) + """ + + if self.use_inter_ffn: + for attn, ffn in zip(self.attn_l, self.ffn_l): + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) + else: + x = x + attn(x, mem, mem_global_vector) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + else: + x = ffn(x) + return x + else: + for attn in self.attn_l: + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) + else: + x = x + attn(x, mem, mem_global_vector) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + else: + x = self.ffn_l[0](x) + return x + + +class Upsample3DLayer(nn.Layer): + """Upsampling based on nn.UpSampling and Conv3x3. + + If the temporal dimension remains the same: + x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim) + Else: + x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim) + + Args: + dim (int): The dimension of the input tensor. + out_dim (int): The dimension of the output tensor. + target_size (Tuple[int,...]): The size of output tensor. + temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False. + kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3. + layout (str, optional): The layout of the inputs. Defaults to "THWC". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + out_dim: int, + target_size: Tuple[int, ...], + temporal_upsample: bool = False, + kernel_size: int = 3, + layout: str = "THWC", + conv_init_mode: str = "0", + ): + super(Upsample3DLayer, self).__init__() + self.conv_init_mode = conv_init_mode + self.target_size = target_size + self.out_dim = out_dim + self.temporal_upsample = temporal_upsample + if temporal_upsample: + self.up = nn.Upsample(size=target_size, mode="nearest") + else: + self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode="nearest") + self.conv = nn.Conv2D( + in_channels=dim, + out_channels=out_dim, + kernel_size=(kernel_size, kernel_size), + padding=(kernel_size // 2, kernel_size // 2), + ) + assert layout in ["THWC", "CTHW"] + self.layout = layout + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization(m, conv_mode=self.conv_init_mode) + + def forward(self, x): + """ + + Args: + x : (B, T, H, W, C) or (B, C, T, H, W) + + Returns: + out : (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out) + """ + + if self.layout == "THWC": + B, T, H, W, C = x.shape + if self.temporal_upsample: + x = x.transpose(perm=[0, 4, 1, 2, 3]) + return self.conv(self.up(x)).transpose(perm=[0, 2, 3, 4, 1]) + else: + assert self.target_size[0] == T + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = self.up(x) + return ( + self.conv(x) + .transpose(perm=[0, 2, 3, 1]) + .reshape(list((B,) + self.target_size + (self.out_dim,))) + ) + elif self.layout == "CTHW": + B, C, T, H, W = x.shape + if self.temporal_upsample: + return self.conv(self.up(x)) + else: + assert self.output_size[0] == T + x = x.transpose(perm=[0, 2, 1, 3, 4]) + x = x.reshape([B * T, C, H, W]) + return ( + self.conv(self.up(x)) + .reshape( + [ + B, + self.target_size[0], + self.out_dim, + self.target_size[1], + self.target_size[2], + ] + ) + .transpose(perm=[0, 2, 1, 3, 4]) + ) + + +class CuboidTransformerDecoder(nn.Layer): + """Decoder of the CuboidTransformer. + + For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention + + Repeat the following structure K times + + x --> StackCuboidSelfAttention --> | + |----> StackCuboidCrossAttention (If used) --> out + mem --> | + + Args: + target_temporal_length (int): The temporal length of the target. + mem_shapes (Tuple[int,...]): The mem shapes of the decoder. + cross_start (int, optional): The block to start cross attention. Defaults to 0. + depth (list, optional): The number of layers for each block. Defaults to [2, 2]. + upsample_type (str, optional): The type of upsample. Defaults to "upsample". + upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. + block_self_attn_patterns (str, optional): The patterns of block attention. Defaults to None. + block_self_cuboid_size (list, optional): The size of cuboid block. Defaults to [(4, 4, 4), (4, 4, 4)]. + block_self_cuboid_strategy (list, optional): The strategy of cuboid. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + block_self_shift_size (list, optional): The size of shift. Defaults to [(1, 1, 1), (0, 0, 0)]. + block_cross_attn_patterns (str, optional): The patterns of cross attentions. Defaults to None. + block_cross_cuboid_hw (list, optional): The height and width of cross cuboid. Defaults to [(4, 4), (4, 4)]. + block_cross_cuboid_strategy (list, optional): The strategy of cross cuboid. Defaults to [("l", "l", "l"), ("d", "l", "l")]. + block_cross_shift_hw (list, optional): The height and width of cross shift. Defaults to [(0, 0), (0, 0)]. + block_cross_n_temporal (list, optional): The cross temporal of block. Defaults to [1, 2]. + cross_last_n_frames (int, optional): The num of cross last frames. Defaults to None. + num_heads (int, optional): The num of head. Defaults to 4. + attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. + proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + ffn_activation (str, optional): The activation layer of FFN. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to False. + hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed. Defaults to False. + pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". + max_temporal_relative (int, optional): The max number of teemporal relative. Defaults to 50. + padding_type (str, optional): The type of padding. Defaults to "ignore". + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. + use_first_self_attn (bool, optional): Whether to use first self attention. Defaults to False. + use_self_global (bool, optional): Whether to use self global vector. Defaults to False. + self_update_global (bool, optional): Whether to update global vector. Defaults to True. + use_cross_global (bool, optional): Whether to use cross global vector. Defaults to False. + use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to True. + use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + up_linear_init_mode (str, optional): The mode of up linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + target_temporal_length: int, + mem_shapes: Tuple[int, ...], + cross_start: int = 0, + depth: Tuple[int, ...] = [2, 2], + upsample_type: str = "upsample", + upsample_kernel_size: int = 3, + block_self_attn_patterns: str = None, + block_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + block_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], + block_cross_attn_patterns: str = None, + block_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + block_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "l", "l"), + ], + block_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], + block_cross_n_temporal: Tuple[int, ...] = [1, 2], + cross_last_n_frames: int = None, + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = False, + hierarchical_pos_embed: bool = False, + pos_embed_type: str = "t+hw", + max_temporal_relative: int = 50, + padding_type: str = "ignore", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + use_first_self_attn: bool = False, + use_self_global: bool = False, + self_update_global: bool = True, + use_cross_global: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + up_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(CuboidTransformerDecoder, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.up_linear_init_mode = up_linear_init_mode + self.norm_init_mode = norm_init_mode + assert len(depth) == len(mem_shapes) + self.target_temporal_length = target_temporal_length + self.num_blocks = len(mem_shapes) + self.cross_start = cross_start + self.mem_shapes = mem_shapes + self.depth = depth + self.upsample_type = upsample_type + self.hierarchical_pos_embed = hierarchical_pos_embed + self.checkpoint_level = checkpoint_level + self.use_self_global = use_self_global + self.self_update_global = self_update_global + self.use_cross_global = use_cross_global + self.use_global_vector_ffn = use_global_vector_ffn + self.use_first_self_attn = use_first_self_attn + if block_self_attn_patterns is not None: + if isinstance(block_self_attn_patterns, (tuple, list)): + assert len(block_self_attn_patterns) == self.num_blocks + else: + block_self_attn_patterns = [ + block_self_attn_patterns for _ in range(self.num_blocks) + ] + block_self_cuboid_size = [] + block_self_cuboid_strategy = [] + block_self_shift_size = [] + for idx, key in enumerate(block_self_attn_patterns): + func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) + cuboid_size, strategy, shift_size = func(mem_shapes[idx]) + block_self_cuboid_size.append(cuboid_size) + block_self_cuboid_strategy.append(strategy) + block_self_shift_size.append(shift_size) + else: + if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): + block_self_cuboid_size = [ + block_self_cuboid_size for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_cuboid_size) == self.num_blocks + ), f"Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}" + if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): + block_self_cuboid_strategy = [ + block_self_cuboid_strategy for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_cuboid_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}" + if not isinstance(block_self_shift_size[0][0], (list, tuple)): + block_self_shift_size = [ + block_self_shift_size for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_shift_size) == self.num_blocks + ), f"Incorrect input format! Received block_self_shift_size={block_self_shift_size}" + self_blocks = [] + for i in range(self.num_blocks): + if not self.use_first_self_attn and i == self.num_blocks - 1: + ele_depth = depth[i] - 1 + else: + ele_depth = depth[i] + stack_cuboid_blocks = [ + cuboid_encoder.StackCuboidSelfAttentionBlock( + dim=self.mem_shapes[i][-1], + num_heads=num_heads, + block_cuboid_size=block_self_cuboid_size[i], + block_strategy=block_self_cuboid_strategy[i], + block_shift_size=block_self_shift_size[i], + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + padding_type=padding_type, + use_global_vector=use_self_global, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(ele_depth) + ] + self_blocks.append(nn.LayerList(sublayers=stack_cuboid_blocks)) + self.self_blocks = nn.LayerList(sublayers=self_blocks) + if block_cross_attn_patterns is not None: + if isinstance(block_cross_attn_patterns, (tuple, list)): + assert len(block_cross_attn_patterns) == self.num_blocks + else: + block_cross_attn_patterns = [ + block_cross_attn_patterns for _ in range(self.num_blocks) + ] + block_cross_cuboid_hw = [] + block_cross_cuboid_strategy = [] + block_cross_shift_hw = [] + block_cross_n_temporal = [] + for idx, key in enumerate(block_cross_attn_patterns): + if key == "last_frame_dst": + cuboid_hw = None + shift_hw = None + strategy = None + n_temporal = None + else: + func = cuboid_utils.CuboidCrossAttentionPatterns.get(key) + cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) + block_cross_cuboid_hw.append(cuboid_hw) + block_cross_cuboid_strategy.append(strategy) + block_cross_shift_hw.append(shift_hw) + block_cross_n_temporal.append(n_temporal) + else: + if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): + block_cross_cuboid_hw = [ + block_cross_cuboid_hw for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_cuboid_hw) == self.num_blocks + ), f"Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}" + if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): + block_cross_cuboid_strategy = [ + block_cross_cuboid_strategy for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_cuboid_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}" + if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): + block_cross_shift_hw = [ + block_cross_shift_hw for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_shift_hw) == self.num_blocks + ), f"Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}" + if not isinstance(block_cross_n_temporal[0], (list, tuple)): + block_cross_n_temporal = [ + block_cross_n_temporal for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_n_temporal) == self.num_blocks + ), f"Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}" + self.cross_blocks = nn.LayerList() + for i in range(self.cross_start, self.num_blocks): + cross_block = nn.LayerList( + sublayers=[ + StackCuboidCrossAttentionBlock( + dim=self.mem_shapes[i][-1], + num_heads=num_heads, + block_cuboid_hw=block_cross_cuboid_hw[i], + block_strategy=block_cross_cuboid_strategy[i], + block_shift_hw=block_cross_shift_hw[i], + block_n_temporal=block_cross_n_temporal[i], + cross_last_n_frames=cross_last_n_frames, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + activation=ffn_activation, + max_temporal_relative=max_temporal_relative, + padding_type=padding_type, + use_global_vector=use_cross_global, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(depth[i]) + ] + ) + self.cross_blocks.append(cross_block) + if self.num_blocks > 1: + if self.upsample_type == "upsample": + self.upsample_layers = nn.LayerList( + sublayers=[ + Upsample3DLayer( + dim=self.mem_shapes[i + 1][-1], + out_dim=self.mem_shapes[i][-1], + target_size=(target_temporal_length,) + + self.mem_shapes[i][1:3], + kernel_size=upsample_kernel_size, + temporal_upsample=False, + conv_init_mode=conv_init_mode, + ) + for i in range(self.num_blocks - 1) + ] + ) + else: + raise NotImplementedError(f"{self.upsample_type} is invalid.") + if self.hierarchical_pos_embed: + self.hierarchical_pos_embed_l = nn.LayerList( + sublayers=[ + PosEmbed( + embed_dim=self.mem_shapes[i][-1], + typ=pos_embed_type, + maxT=target_temporal_length, + maxH=self.mem_shapes[i][1], + maxW=self.mem_shapes[i][2], + ) + for i in range(self.num_blocks - 1) + ] + ) + self.reset_parameters() + + def reset_parameters(self): + for ms in self.self_blocks: + for m in ms: + m.reset_parameters() + for ms in self.cross_blocks: + for m in ms: + m.reset_parameters() + if self.num_blocks > 1: + for m in self.upsample_layers: + m.reset_parameters() + if self.hierarchical_pos_embed: + for m in self.hierarchical_pos_embed_l: + m.reset_parameters() + + def forward(self, x, mem_l, mem_global_vector_l=None): + """ + Args: + x : Shape (B, T_top, H_top, W_top, C). + mem_l : A list of memory tensors. + """ + + B, T_top, H_top, W_top, C = x.shape + assert T_top == self.target_temporal_length + assert (H_top, W_top) == (self.mem_shapes[-1][1], self.mem_shapes[-1][2]) + for i in range(self.num_blocks - 1, -1, -1): + mem_global_vector = ( + None if mem_global_vector_l is None else mem_global_vector_l[i] + ) + if not self.use_first_self_attn and i == self.num_blocks - 1: + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][0]( + x, mem_l[i], mem_global_vector + ) + for idx in range(self.depth[i] - 1): + if self.use_self_global: + if self.self_update_global: + x, mem_global_vector = self.self_blocks[i][idx]( + x, mem_global_vector + ) + else: + x, _ = self.self_blocks[i][idx](x, mem_global_vector) + else: + x = self.self_blocks[i][idx](x) + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][idx + 1]( + x, mem_l[i], mem_global_vector + ) + else: + for idx in range(self.depth[i]): + if self.use_self_global: + if self.self_update_global: + x, mem_global_vector = self.self_blocks[i][idx]( + x, mem_global_vector + ) + else: + x, _ = self.self_blocks[i][idx](x, mem_global_vector) + else: + x = self.self_blocks[i][idx](x) + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][idx]( + x, mem_l[i], mem_global_vector + ) + if i > 0: + x = self.upsample_layers[i - 1](x) + if self.hierarchical_pos_embed: + x = self.hierarchical_pos_embed_l[i - 1](x) + return x diff --git a/ppsci/arch/cuboid_transformer_encoder.py b/ppsci/arch/cuboid_transformer_encoder.py index 79b2e6fd1d..324864f47d 100644 --- a/ppsci/arch/cuboid_transformer_encoder.py +++ b/ppsci/arch/cuboid_transformer_encoder.py @@ -1,1515 +1,1515 @@ -from collections import OrderedDict -from functools import lru_cache -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn.functional as F -from paddle import nn -from paddle.distributed import fleet - -import ppsci.arch.cuboid_transformer_utils as cuboid_utils -from ppsci.arch import activation as act_mod -from ppsci.utils import initializer - -NEGATIVE_SLOPE = 0.1 - - -class PatchMerging3D(nn.Layer): - """Patch Merging Layer - - Args: - dim (int): Number of input channels. - out_dim (int, optional): The dim of output. Defaults to None. - downsample (tuple, optional): Downsample factor. Defaults to (1, 2, 2). - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - padding_type (str, optional): The type of padding. Defaults to "nearest". - linear_init_mode (str, optional): The mode of linear init. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". - """ - - def __init__( - self, - dim: int, - out_dim: int = None, - downsample: Tuple[int, ...] = (1, 2, 2), - norm_layer: str = "layer_norm", - padding_type: str = "nearest", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super().__init__() - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.dim = dim - if out_dim is None: - out_dim = max(downsample) * dim - self.out_dim = out_dim - self.downsample = downsample - self.padding_type = padding_type - self.reduction = nn.Linear( - in_features=downsample[0] * downsample[1] * downsample[2] * dim, - out_features=out_dim, - bias_attr=False, - ) - self.norm = cuboid_utils.get_norm_layer( - norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, linear_mode=self.linear_init_mode, norm_mode=self.norm_init_mode - ) - - def get_out_shape(self, data_shape): - T, H, W, C_in = data_shape - pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] - pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] - pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] - return ( - (T + pad_t) // self.downsample[0], - (H + pad_h) // self.downsample[1], - (W + pad_w) // self.downsample[2], - self.out_dim, - ) - - def forward(self, x): - """ - - Args: - x : (B, T, H, W, C) - - Returns: - out : Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim) - """ - - B, T, H, W, C = x.shape - pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] - pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] - pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] - if pad_h or pad_h or pad_w: - T += pad_t - H += pad_h - W += pad_w - x = cuboid_utils.generalize_padding( - x, pad_t, pad_h, pad_w, padding_type=self.padding_type - ) - x = ( - x.reshape( - ( - B, - T // self.downsample[0], - self.downsample[0], - H // self.downsample[1], - self.downsample[1], - W // self.downsample[2], - self.downsample[2], - C, - ) - ) - .transpose(perm=[0, 1, 3, 5, 2, 4, 6, 7]) - .reshape( - [ - B, - T // self.downsample[0], - H // self.downsample[1], - W // self.downsample[2], - self.downsample[0] * self.downsample[1] * self.downsample[2] * C, - ] - ) - ) - x = self.norm(x) - x = self.reduction(x) - return x - - -class PositionwiseFFN(nn.Layer): - """The Position-wise FFN layer used in Transformer-like architectures - - If pre_norm is True: - norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data) - Else: - data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data)) - Also, if we use gated projection. We will use - fc1_1 * act(fc1_2(data)) to map the data - - Args: - units (int, optional): The units. Defaults to 512. - hidden_size (int, optional): The size of hidden layer. Defaults to 2048. - activation_dropout (float, optional): The dropout of activate. Defaults to 0.0. - dropout (float, optional): The drop ratio used in DropPat. Defaults to 0.1. - gated_proj (bool, optional): Whether to use gate projection. Defaults to False. - activation (str, optional): The activate. Defaults to "relu". - normalization (str, optional): The normalization. Defaults to "layer_norm". - layer_norm_eps (float, optional): The epsilon of layer normalization. Defaults to 1e-05. - pre_norm (bool): Pre-layer normalization as proposed in the paper: - "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. - You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers". Defaults to False. - linear_init_mode (str, optional): The mode of linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - units: int = 512, - hidden_size: int = 2048, - activation_dropout: float = 0.0, - dropout: float = 0.1, - gated_proj: bool = False, - activation: str = "relu", - normalization: str = "layer_norm", - layer_norm_eps: float = 1e-05, - pre_norm: bool = False, - linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super().__init__() - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self._pre_norm = pre_norm - self._gated_proj = gated_proj - self._kwargs = OrderedDict( - [ - ("units", units), - ("hidden_size", hidden_size), - ("activation_dropout", activation_dropout), - ("activation", activation), - ("dropout", dropout), - ("normalization", normalization), - ("layer_norm_eps", layer_norm_eps), - ("gated_proj", gated_proj), - ("pre_norm", pre_norm), - ] - ) - self.dropout_layer = nn.Dropout(p=dropout) - self.activation_dropout_layer = nn.Dropout(p=activation_dropout) - self.ffn_1 = nn.Linear( - in_features=units, out_features=hidden_size, bias_attr=True - ) - if self._gated_proj: - self.ffn_1_gate = nn.Linear( - in_features=units, out_features=hidden_size, bias_attr=True - ) - if activation == "leaky_relu": - self.activation = nn.LeakyReLU(NEGATIVE_SLOPE) - else: - self.activation = act_mod.get_activation(activation) - self.ffn_2 = nn.Linear( - in_features=hidden_size, out_features=units, bias_attr=True - ) - self.layer_norm = cuboid_utils.get_norm_layer( - normalization=normalization, in_channels=units, epsilon=layer_norm_eps - ) - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization(self.ffn_1, linear_mode=self.linear_init_mode) - if self._gated_proj: - cuboid_utils.apply_initialization( - self.ffn_1_gate, linear_mode=self.linear_init_mode - ) - cuboid_utils.apply_initialization(self.ffn_2, linear_mode=self.linear_init_mode) - cuboid_utils.apply_initialization( - self.layer_norm, norm_mode=self.norm_init_mode - ) - - def forward(self, data): - """ - Args: - x : Shape (B, seq_length, C_in) - - Returns: - out : Shape (B, seq_length, C_out) - """ - - residual = data - if self._pre_norm: - data = self.layer_norm(data) - if self._gated_proj: - out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data) - else: - out = self.activation(self.ffn_1(data)) - out = self.activation_dropout_layer(out) - out = self.ffn_2(out) - out = self.dropout_layer(out) - out = out + residual - if not self._pre_norm: - out = self.layer_norm(out) - return out - - -def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy): - """Update the cuboid_size and shift_size - - Args: - data_shape (Tuple[int,...]): The shape of the data. - cuboid_size (Tuple[int,...]): Size of the cuboid. - shift_size (Tuple[int,...]): Size of the shift. - strategy (str): The strategy of attention. - - Returns: - new_cuboid_size (Tuple[int,...]): Size of the cuboid. - new_shift_size (Tuple[int,...]): Size of the shift. - """ - - new_cuboid_size = list(cuboid_size) - new_shift_size = list(shift_size) - for i in range(len(data_shape)): - if strategy[i] == "d": - new_shift_size[i] = 0 - if data_shape[i] <= cuboid_size[i]: - new_cuboid_size[i] = data_shape[i] - new_shift_size[i] = 0 - return tuple(new_cuboid_size), tuple(new_shift_size) - - -def cuboid_reorder(data, cuboid_size, strategy): - """Reorder the tensor into (B, num_cuboids, bT * bH * bW, C) - We assume that the tensor shapes are divisible to the cuboid sizes. - - Args: - data (paddle.Tensor): The input data. - cuboid_size (Tuple[int,...]): The size of the cuboid. - strategy (Tuple[int,...]): The cuboid strategy. - - Returns: - reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C). - num_cuboids = T / bT * H / bH * W / bW - """ - - B, T, H, W, C = data.shape - num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2] - cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2] - intermediate_shape = [] - nblock_axis = [] - block_axis = [] - for i, (block_size, total_size, ele_strategy) in enumerate( - zip(cuboid_size, (T, H, W), strategy) - ): - if ele_strategy == "l": - intermediate_shape.extend([total_size // block_size, block_size]) - nblock_axis.append(2 * i + 1) - block_axis.append(2 * i + 2) - elif ele_strategy == "d": - intermediate_shape.extend([block_size, total_size // block_size]) - nblock_axis.append(2 * i + 2) - block_axis.append(2 * i + 1) - else: - raise NotImplementedError(f"{ele_strategy} is invalid.") - data = data.reshape(list((B,) + tuple(intermediate_shape) + (C,))) - reordered_data = data.transpose( - perm=(0,) + tuple(nblock_axis) + tuple(block_axis) + (7,) - ) - reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C)) - return reordered_data - - -@lru_cache() -def compute_cuboid_self_attention_mask( - data_shape, cuboid_size, shift_size, strategy, padding_type, device -): - """Compute the shift window attention mask - - Args: - data_shape (Tuple[int,....]): Should be (T, H, W). - cuboid_size (Tuple[int,....]): Size of the cuboid. - shift_size (Tuple[int,....]): The shift size. - strategy (str): The decomposition strategy. - padding_type (str): Type of the padding. - device (str): The device. - - Returns: - attn_mask (paddle.Tensor): Mask with shape (num_cuboid, cuboid_vol, cuboid_vol). - The padded values will always be masked. The other masks will ensure that the shifted windows - will only attend to those in the shifted windows. - """ - T, H, W = data_shape - pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] - pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] - pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] - data_mask = None - if pad_t > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - data_mask = paddle.ones(shape=(1, T, H, W, 1), dtype="bool") - data_mask = F.pad( - data_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], data_format="NDHWC" - ) - else: - data_mask = paddle.ones( - shape=(1, T + pad_t, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if any(i > 0 for i in shift_size): - if padding_type == "ignore": - data_mask = paddle.roll( - x=data_mask, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - if padding_type == "ignore": - data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy) - data_mask = data_mask.squeeze(axis=-1).squeeze(axis=0) - shift_mask = np.zeros(shape=(1, T + pad_t, H + pad_h, W + pad_w, 1)) - cnt = 0 - for t in ( - slice(-cuboid_size[0]), - slice(-cuboid_size[0], -shift_size[0]), - slice(-shift_size[0], None), - ): - for h in ( - slice(-cuboid_size[1]), - slice(-cuboid_size[1], -shift_size[1]), - slice(-shift_size[1], None), - ): - for w in ( - slice(-cuboid_size[2]), - slice(-cuboid_size[2], -shift_size[2]), - slice(-shift_size[2], None), - ): - shift_mask[:, t, h, w, :] = cnt - cnt += 1 - shift_mask = paddle.to_tensor(shift_mask) - shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy) - shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) - attn_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 - if padding_type == "ignore": - attn_mask = ( - data_mask.unsqueeze(axis=1) * data_mask.unsqueeze(axis=2) * attn_mask - ) - return attn_mask - - -def masked_softmax(att_score, mask, axis: int = -1): - """Ignore the masked elements when calculating the softmax. - The mask can be broadcastable. - - Args: - att_score (paddle.Tensor): Shape (..., length, ...) - mask (paddle.Tensor): Shape (..., length, ...) - 1 --> The element is not masked - 0 --> The element is masked - axis (int): The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] - - Returns: - att_weights (paddle.Tensor): Shape (..., length, ...). - """ - - if mask is not None: - if att_score.dtype == paddle.float16: - att_score = att_score.masked_fill(paddle.logical_not(mask), -1e4) - else: - att_score = att_score.masked_fill(paddle.logical_not(mask), -1e18) - att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask - else: - att_weights = nn.functional.softmax(x=att_score, axis=axis) - return att_weights - - -def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape): - """Reverse the reordered cuboid back to the original space - - Args: - data (paddle.Tensor): The input data. - cuboid_size (Tuple[int,...]): The size of cuboid. - strategy (str): The strategy of reordering. - orig_data_shape (Tuple[int,...]): The original shape of the data. - - Returns: - data (paddle.Tensor): The recovered data - """ - - B, num_cuboids, cuboid_volume, C = data.shape - T, H, W = orig_data_shape - permutation_axis = [0] - for i, (block_size, total_size, ele_strategy) in enumerate( - zip(cuboid_size, (T, H, W), strategy) - ): - if ele_strategy == "l": - permutation_axis.append(i + 1) - permutation_axis.append(i + 4) - elif ele_strategy == "d": - permutation_axis.append(i + 4) - permutation_axis.append(i + 1) - else: - raise NotImplementedError((f"{ele_strategy} is invalid.")) - permutation_axis.append(7) - data = data.reshape( - [ - B, - T // cuboid_size[0], - H // cuboid_size[1], - W // cuboid_size[2], - cuboid_size[0], - cuboid_size[1], - cuboid_size[2], - C, - ] - ) - data = data.transpose(perm=permutation_axis) - data = data.reshape((B, T, H, W, C)) - return data - - -class CuboidSelfAttentionLayer(nn.Layer): - """Implements the cuboid self attention. - - The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids. - We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel. - - We adopt two mechanisms for decomposing the input tensor into cuboids: - - (1) local: - We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the - shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows". - (2) dilated: - Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions", - we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`, - we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w. - - The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230. - The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns, - we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy. - - In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described - as regular tensors. We need to define alternative approaches to partition the data into "cuboids". - - In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?", - "[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep - $K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and - the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system. - - Args: - dim (int): The dimension of the input tensor. - num_heads (int): The number of heads. - cuboid_size (tuple, optional): The size of cuboid. Defaults to (2, 7, 7). - shift_size (tuple, optional): The size of shift. Defaults to (0, 0, 0). - strategy (tuple, optional): The strategy. Defaults to ("l", "l", "l"). - padding_type (str, optional): The type of padding. Defaults to "ignore". - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - cuboid_size: Tuple[int, ...] = (2, 7, 7), - shift_size: Tuple[int, ...] = (0, 0, 0), - strategy: Tuple[str, ...] = ("l", "l", "l"), - padding_type: str = "ignore", - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - use_final_proj: bool = True, - norm_layer: str = "layer_norm", - use_global_vector: bool = False, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: bool = True, - use_relative_pos: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(CuboidSelfAttentionLayer, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - assert dim % num_heads == 0 - self.num_heads = num_heads - self.dim = dim - self.cuboid_size = cuboid_size - self.shift_size = shift_size - self.strategy = strategy - self.padding_type = padding_type - self.use_final_proj = use_final_proj - self.use_relative_pos = use_relative_pos - self.use_global_vector = use_global_vector - self.use_global_self_attn = use_global_self_attn - self.separate_global_qkv = separate_global_qkv - if global_dim_ratio != 1: - assert ( - separate_global_qkv is True - ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - self.global_dim_ratio = global_dim_ratio - assert self.padding_type in ["ignore", "zeros", "nearest"] - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - if use_relative_pos: - init_data = paddle.zeros( - ( - (2 * cuboid_size[0] - 1) - * (2 * cuboid_size[1] - 1) - * (2 * cuboid_size[2] - 1), - num_heads, - ) - ) - self.relative_position_bias_table = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.relative_position_bias_table.stop_gradient = not True - self.relative_position_bias_table = initializer.trunc_normal_( - self.relative_position_bias_table, std=0.02 - ) - - coords_t = paddle.arange(end=self.cuboid_size[0]) - coords_h = paddle.arange(end=self.cuboid_size[1]) - coords_w = paddle.arange(end=self.cuboid_size[2]) - coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) - coords_flatten = paddle.flatten(x=coords, start_axis=1) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = relative_coords.transpose(perm=[1, 2, 0]) - relative_coords[:, :, 0] += self.cuboid_size[0] - 1 - relative_coords[:, :, 1] += self.cuboid_size[1] - 1 - relative_coords[:, :, 2] += self.cuboid_size[2] - 1 - relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * ( - 2 * self.cuboid_size[2] - 1 - ) - relative_coords[:, :, 1] *= 2 * self.cuboid_size[2] - 1 - relative_position_index = relative_coords.sum(axis=-1) - self.register_buffer( - name="relative_position_index", tensor=relative_position_index - ) - self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias_attr=qkv_bias) - self.attn_drop = nn.Dropout(p=attn_drop) - if self.use_global_vector: - if self.separate_global_qkv: - self.l2g_q_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.l2g_global_kv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim * 2, - bias_attr=qkv_bias, - ) - self.g2l_global_q_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim, - bias_attr=qkv_bias, - ) - self.g2l_k_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.g2l_v_net = nn.Linear( - in_features=dim, - out_features=global_dim_ratio * dim, - bias_attr=qkv_bias, - ) - if self.use_global_self_attn: - self.g2g_global_qkv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=global_dim_ratio * dim * 3, - bias_attr=qkv_bias, - ) - else: - self.global_qkv = nn.Linear( - in_features=dim, out_features=dim * 3, bias_attr=qkv_bias - ) - self.global_attn_drop = nn.Dropout(p=attn_drop) - if use_final_proj: - self.proj = nn.Linear(in_features=dim, out_features=dim) - self.proj_drop = nn.Dropout(p=proj_drop) - if self.use_global_vector: - self.global_proj = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=global_dim_ratio * dim, - ) - self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) - if self.use_global_vector: - self.global_vec_norm = cuboid_utils.get_norm_layer( - norm_layer, in_channels=global_dim_ratio * dim - ) - self.checkpoint_level = checkpoint_level - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization( - self.qkv, linear_mode=self.attn_linear_init_mode - ) - if self.use_final_proj: - cuboid_utils.apply_initialization( - self.proj, linear_mode=self.ffn_linear_init_mode - ) - cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) - if self.use_global_vector: - if self.separate_global_qkv: - cuboid_utils.apply_initialization( - self.l2g_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_global_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_k_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_v_net, linear_mode=self.attn_linear_init_mode - ) - if self.use_global_self_attn: - cuboid_utils.apply_initialization( - self.g2g_global_qkv_net, linear_mode=self.attn_linear_init_mode - ) - else: - cuboid_utils.apply_initialization( - self.global_qkv, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.global_vec_norm, norm_mode=self.norm_init_mode - ) - - def forward(self, x, global_vectors=None): - x = self.norm(x) - - B, T, H, W, C_in = x.shape - assert C_in == self.dim - if self.use_global_vector: - _, num_global, _ = global_vectors.shape - global_vectors = self.global_vec_norm(global_vectors) - cuboid_size, shift_size = update_cuboid_size_shift_size( - (T, H, W), self.cuboid_size, self.shift_size, self.strategy - ) - - pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] - pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] - pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] - x = cuboid_utils.generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type) - - if any(i > 0 for i in shift_size): - shifted_x = paddle.roll( - x=x, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - else: - shifted_x = x - - reordered_x = cuboid_reorder( - shifted_x, cuboid_size=cuboid_size, strategy=self.strategy - ) - - _, num_cuboids, cuboid_volume, _ = reordered_x.shape - attn_mask = compute_cuboid_self_attention_mask( - (T, H, W), - cuboid_size, - shift_size=shift_size, - strategy=self.strategy, - padding_type=self.padding_type, - device=x.place, - ) - head_C = C_in // self.num_heads - qkv = ( - self.qkv(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - - q, k, v = qkv[0], qkv[1], qkv[2] - q = q * self.scale - perm_0 = list(range(k.ndim)) - perm_0[-2] = -1 - perm_0[-1] = -2 - attn_score = q @ k.transpose(perm=perm_0) - - if self.use_relative_pos: - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape( - [-1] - ) - ].reshape([cuboid_volume, cuboid_volume, -1]) - relative_position_bias = relative_position_bias.transpose( - perm=[2, 0, 1] - ).unsqueeze(axis=1) - attn_score = attn_score + relative_position_bias - - if self.use_global_vector: - global_head_C = self.global_dim_ratio * head_C - if self.separate_global_qkv: - l2g_q = ( - self.l2g_q_net(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - l2g_q = l2g_q * self.scale - l2g_global_kv = ( - self.l2g_global_kv_net(global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] - g2l_global_q = ( - self.g2l_global_q_net(global_vectors) - .reshape([B, num_global, self.num_heads, head_C]) - .transpose(perm=[0, 2, 1, 3]) - ) - g2l_global_q = g2l_global_q * self.scale - g2l_k = ( - self.g2l_k_net(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - g2l_v = ( - self.g2l_v_net(reordered_x) - .reshape( - [B, num_cuboids, cuboid_volume, self.num_heads, global_head_C] - ) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - if self.use_global_self_attn: - g2g_global_qkv = ( - self.g2g_global_qkv_net(global_vectors) - .reshape([B, 1, num_global, 3, self.num_heads, global_head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - g2g_global_q, g2g_global_k, g2g_global_v = ( - g2g_global_qkv[0], - g2g_global_qkv[1], - g2g_global_qkv[2], - ) - g2g_global_q = g2g_global_q.squeeze(axis=2) * self.scale - else: - q_global, k_global, v_global = ( - self.global_qkv(global_vectors) - .reshape([B, 1, num_global, 3, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - q_global = q_global.squeeze(axis=2) * self.scale - l2g_q, g2l_k, g2l_v = q, k, v - g2l_global_q, l2g_global_k, l2g_global_v = ( - q_global, - k_global, - v_global, - ) - if self.use_global_self_attn: - g2g_global_q, g2g_global_k, g2g_global_v = ( - q_global, - k_global, - v_global, - ) - - perm_1 = list(range(l2g_global_k.ndim)) - perm_1[-2] = -1 - perm_1[-1] = -2 - l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_1) - attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) - - if attn_mask.ndim == 5: - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" - ) - elif attn_mask.ndim == 3: - attn_mask = attn_mask.astype("float32") - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NCL" - ) - attn_mask_l2l_l2g = attn_mask_l2l_l2g.astype("bool") - else: - attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) - - v_l_g = paddle.concat( - x=( - v, - l2g_global_v.expand( - shape=[B, self.num_heads, num_cuboids, num_global, head_C] - ), - ), - axis=3, - ) - attn_score_l2l_l2g = masked_softmax( - attn_score_l2l_l2g, mask=attn_mask_l2l_l2g - ) - attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) - reordered_x = ( - (attn_score_l2l_l2g @ v_l_g) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, cuboid_volume, self.dim]) - ) - if self.padding_type == "ignore": - g2l_attn_mask = paddle.ones(shape=(1, T, H, W, 1)) - if pad_t > 0 or pad_h > 0 or pad_w > 0: - g2l_attn_mask = F.pad( - g2l_attn_mask, - [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], - data_format="NDHWC", - ) - if any(i > 0 for i in shift_size): - g2l_attn_mask = paddle.roll( - x=g2l_attn_mask, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - g2l_attn_mask = g2l_attn_mask.reshape((-1,)) - else: - g2l_attn_mask = None - temp = g2l_k.reshape( - [B, self.num_heads, num_cuboids * cuboid_volume, head_C] - ) - perm_2 = list(range(temp.ndim)) - perm_2[-2] = -1 - perm_2[-1] = -2 - g2l_attn_score = g2l_global_q @ temp.transpose(perm=perm_2) - if self.use_global_self_attn: - temp = g2g_global_k.squeeze(axis=2) - perm_3 = list(range(temp.ndim)) - perm_3[-2] = -1 - perm_3[-1] = -2 - g2g_attn_score = g2g_global_q @ temp.transpose(perm=perm_3) - g2all_attn_score = paddle.concat( - x=(g2l_attn_score, g2g_attn_score), axis=-1 - ) - if g2l_attn_mask is not None: - g2all_attn_mask = F.pad( - g2l_attn_mask, - [0, num_global], - "constant", - 1, - data_format="NDHWC", - ) - else: - g2all_attn_mask = None - new_v = paddle.concat( - x=( - g2l_v.reshape( - [ - B, - self.num_heads, - num_cuboids * cuboid_volume, - global_head_C, - ] - ), - g2g_global_v.reshape( - [B, self.num_heads, num_global, global_head_C] - ), - ), - axis=2, - ) - else: - g2all_attn_score = g2l_attn_score - g2all_attn_mask = g2l_attn_mask - new_v = g2l_v.reshape( - [B, self.num_heads, num_cuboids * cuboid_volume, global_head_C] - ) - g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask) - g2all_attn_score = self.global_attn_drop(g2all_attn_score) - new_global_vector = ( - (g2all_attn_score @ new_v) - .transpose(perm=[0, 2, 1, 3]) - .reshape([B, num_global, self.global_dim_ratio * self.dim]) - ) - else: - attn_score = masked_softmax(attn_score, mask=attn_mask) - attn_score = self.attn_drop(attn_score) - reordered_x = ( - (attn_score @ v) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, cuboid_volume, self.dim]) - ) - - if self.use_final_proj: - reordered_x = paddle.cast(reordered_x, dtype="float32") - reordered_x = self.proj_drop(self.proj(reordered_x)) - if self.use_global_vector: - new_global_vector = self.proj_drop(self.global_proj(new_global_vector)) - shifted_x = cuboid_reorder_reverse( - reordered_x, - cuboid_size=cuboid_size, - strategy=self.strategy, - orig_data_shape=(T + pad_t, H + pad_h, W + pad_w), - ) - if any(i > 0 for i in shift_size): - x = paddle.roll( - x=shifted_x, - shifts=(shift_size[0], shift_size[1], shift_size[2]), - axis=(1, 2, 3), - ) - else: - x = shifted_x - x = cuboid_utils.generalize_unpadding( - x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type - ) - if self.use_global_vector: - return x, new_global_vector - else: - return x - - -class StackCuboidSelfAttentionBlock(nn.Layer): - """ - - "use_inter_ffn" is True - x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out - | ^ | ^ - | | | | - |-------------| |-------------| - - "use_inter_ffn" is False - x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out - | ^ | ^ ^ | ^ - | | | | | | | - |-------------| |------------| ----------| |-----------| - If we have enabled global memory vectors, each attention will be a - - Args: - dim (int): The dimension of the input tensor. - num_heads (int): The number of heads. - block_cuboid_size (list, optional): The size of block cuboid . Defaults to [(4, 4, 4), (4, 4, 4)]. - block_shift_size (list, optional): The shift size of block. Defaults to [(0, 0, 0), (2, 2, 2)]. - block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. - padding_type (str, optional): The type of padding. Defaults to "ignore". - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. - Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. - Defaults to 1. - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (2, 2, 2)], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("d", "d", "d"), - ("l", "l", "l"), - ], - padding_type: str = "ignore", - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = False, - use_global_vector: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: bool = True, - use_relative_pos: bool = True, - use_final_proj: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(StackCuboidSelfAttentionBlock, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - if ( - len(block_cuboid_size[0]) <= 0 - or len(block_shift_size) <= 0 - or len(block_strategy) <= 0 - ): - raise ValueError( - "Format of the block cuboid size is not correct. block_cuboid_size={block_cuboid_size}" - ) - if len(block_cuboid_size) != len(block_shift_size) and len( - block_cuboid_size - ) != len(block_strategy): - raise ValueError( - "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." - ) - - self.num_attn = len(block_cuboid_size) - self.checkpoint_level = checkpoint_level - self.use_inter_ffn = use_inter_ffn - self.use_global_vector = use_global_vector - self.use_global_vector_ffn = use_global_vector_ffn - self.use_global_self_attn = use_global_self_attn - self.global_dim_ratio = global_dim_ratio - if self.use_inter_ffn: - self.ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(self.num_attn) - ] - ) - if self.use_global_vector_ffn and self.use_global_vector: - self.global_ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(self.num_attn) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - ] - ) - if self.use_global_vector_ffn and self.use_global_vector: - self.global_ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - ] - ) - self.attn_l = nn.LayerList( - sublayers=[ - CuboidSelfAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_size=ele_cuboid_size, - shift_size=ele_shift_size, - strategy=ele_strategy, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - use_global_vector=use_global_vector, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for ele_cuboid_size, ele_shift_size, ele_strategy in zip( - block_cuboid_size, block_shift_size, block_strategy - ) - ] - ) - - def reset_parameters(self): - for m in self.ffn_l: - m.reset_parameters() - if self.use_global_vector_ffn and self.use_global_vector: - for m in self.global_ffn_l: - m.reset_parameters() - for m in self.attn_l: - m.reset_parameters() - - def forward(self, x, global_vectors=None): - if self.use_inter_ffn: - if self.use_global_vector: - for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): - if self.checkpoint_level >= 2 and self.training: - x_out, global_vectors_out = fleet.utils.recompute( - attn, x, global_vectors - ) - else: - x_out, global_vectors_out = attn(x, global_vectors) - x = x + x_out - global_vectors = global_vectors + global_vectors_out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - if self.use_global_vector_ffn: - global_vectors = fleet.utils.recompute( - self.global_ffn_l[idx], global_vectors - ) - else: - x = ffn(x) - if self.use_global_vector_ffn: - global_vectors = self.global_ffn_l[idx](global_vectors) - return x, global_vectors - else: - for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x) - else: - x = x + attn(x) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - else: - x = ffn(x) - return x - elif self.use_global_vector: - for idx, attn in enumerate(self.attn_l): - if self.checkpoint_level >= 2 and self.training: - x_out, global_vectors_out = fleet.utils.recompute( - attn, x, global_vectors - ) - else: - x_out, global_vectors_out = attn(x, global_vectors) - x = x + x_out - global_vectors = global_vectors + global_vectors_out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - if self.use_global_vector_ffn: - global_vectors = fleet.utils.recompute( - self.global_ffn_l[0], global_vectors - ) - else: - x = self.ffn_l[0](x) - if self.use_global_vector_ffn: - global_vectors = self.global_ffn_l[0](global_vectors) - return x, global_vectors - else: - for idx, attn in enumerate(self.attn_l): - if self.checkpoint_level >= 2 and self.training: - out = fleet.utils.recompute(attn, x) - else: - out = attn(x) - x = x + out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - else: - x = self.ffn_l[0](x) - return x - - -class CuboidTransformerEncoder(nn.Layer): - """Encoder of the CuboidTransformer - - x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out - - Args: - input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C - base_units (int, optional): The number of units. Defaults to 128. - block_units (int, optional): The number of block units. Defaults to None. - scale_alpha (float, optional): We scale up the channels based on the formula: - - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. - depth (list, optional): The number of layers for each block. Defaults to [4, 4, 4]. - downsample (int, optional): The downsample ratio. Defaults to 2. - downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". - block_attn_patterns (str, optional): Attention pattern for the cuboid attention for each block. Defaults to None. - block_cuboid_size (list, optional): A list of cuboid size parameters. Defaults to [(4, 4, 4), (4, 4, 4)]. - block_strategy (list, optional): A list of cuboid strategies. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - block_shift_size (list, optional): A list of shift sizes. Defaults to [(0, 0, 0), (0, 0, 0)]. - num_heads (int, optional): The number of heads. Defaults to 4. - attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. - proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - ffn_activation (str, optional): The FFN activation. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. - padding_type (str, optional): The type of padding. Defaults to "ignore". - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to False. - use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. - Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. - Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - down_linear_init_mode (str, optional): The mode of downsample linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization. Defaults to "0". - """ - - def __init__( - self, - input_shape: Tuple[int, ...], - base_units: int = 128, - block_units: int = None, - scale_alpha: float = 1.0, - depth: Tuple[int, ...] = [4, 4, 4], - downsample: int = 2, - downsample_type: str = "patch_merge", - block_attn_patterns: str = None, - block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = True, - padding_type: str = "ignore", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - use_global_vector: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - down_linear_init_mode: str = "0", - norm_init_mode: str = "0", - ): - super(CuboidTransformerEncoder, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.down_linear_init_mode = down_linear_init_mode - self.norm_init_mode = norm_init_mode - self.input_shape = input_shape - self.depth = depth - self.num_blocks = len(depth) - self.base_units = base_units - self.scale_alpha = scale_alpha - if not isinstance(downsample, (tuple, list)): - downsample = 1, downsample, downsample - self.downsample = downsample - self.downsample_type = downsample_type - self.num_heads = num_heads - self.use_global_vector = use_global_vector - self.checkpoint_level = checkpoint_level - if block_units is None: - block_units = [ - cuboid_utils.round_to( - base_units * int((max(downsample) ** scale_alpha) ** i), 4 - ) - for i in range(self.num_blocks) - ] - else: - assert len(block_units) == self.num_blocks and block_units[0] == base_units - self.block_units = block_units - if self.num_blocks > 1: - if downsample_type == "patch_merge": - self.down_layers = nn.LayerList( - sublayers=[ - PatchMerging3D( - dim=self.block_units[i], - downsample=downsample, - padding_type=padding_type, - out_dim=self.block_units[i + 1], - linear_init_mode=down_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for i in range(self.num_blocks - 1) - ] - ) - else: - raise NotImplementedError(f"{downsample_type} is invalid.") - if self.use_global_vector: - self.down_layer_global_proj = nn.LayerList( - sublayers=[ - nn.Linear( - in_features=global_dim_ratio * self.block_units[i], - out_features=global_dim_ratio * self.block_units[i + 1], - ) - for i in range(self.num_blocks - 1) - ] - ) - if block_attn_patterns is not None: - mem_shapes = self.get_mem_shapes() - if isinstance(block_attn_patterns, (tuple, list)): - assert len(block_attn_patterns) == self.num_blocks - else: - block_attn_patterns = [ - block_attn_patterns for _ in range(self.num_blocks) - ] - block_cuboid_size = [] - block_strategy = [] - block_shift_size = [] - for idx, key in enumerate(block_attn_patterns): - func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) - cuboid_size, strategy, shift_size = func(mem_shapes[idx]) - block_cuboid_size.append(cuboid_size) - block_strategy.append(strategy) - block_shift_size.append(shift_size) - else: - if not isinstance(block_cuboid_size[0][0], (list, tuple)): - block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)] - else: - assert ( - len(block_cuboid_size) == self.num_blocks - ), f"Incorrect input format! Received block_cuboid_size={block_cuboid_size}" - if not isinstance(block_strategy[0][0], (list, tuple)): - block_strategy = [block_strategy for _ in range(self.num_blocks)] - else: - assert ( - len(block_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_strategy={block_strategy}" - if not isinstance(block_shift_size[0][0], (list, tuple)): - block_shift_size = [block_shift_size for _ in range(self.num_blocks)] - else: - assert ( - len(block_shift_size) == self.num_blocks - ), f"Incorrect input format! Received block_shift_size={block_shift_size}" - self.block_cuboid_size = block_cuboid_size - self.block_strategy = block_strategy - self.block_shift_size = block_shift_size - self.blocks = nn.LayerList( - sublayers=[ - nn.Sequential( - *[ - StackCuboidSelfAttentionBlock( - dim=self.block_units[i], - num_heads=num_heads, - block_cuboid_size=block_cuboid_size[i], - block_strategy=block_strategy[i], - block_shift_size=block_shift_size[i], - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - padding_type=padding_type, - use_global_vector=use_global_vector, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(depth[i]) - ] - ) - for i in range(self.num_blocks) - ] - ) - self.reset_parameters() - - def reset_parameters(self): - if self.num_blocks > 1: - for m in self.down_layers: - m.reset_parameters() - if self.use_global_vector: - cuboid_utils.apply_initialization( - self.down_layer_global_proj, linear_mode=self.down_linear_init_mode - ) - for ms in self.blocks: - for m in ms: - m.reset_parameters() - - def get_mem_shapes(self): - """Get the shape of the output memory based on the input shape. This can be used for constructing the decoder. - - Returns: - mem_shapes : A list of shapes of the output memory - """ - - if self.num_blocks == 1: - return [self.input_shape] - else: - mem_shapes = [self.input_shape] - curr_shape = self.input_shape - for down_layer in self.down_layers: - curr_shape = down_layer.get_out_shape(curr_shape) - mem_shapes.append(curr_shape) - return mem_shapes - - def forward(self, x, global_vectors=None): - """ - Args: - x : Shape (B, T, H, W, C) - - Returns: - out (List[paddle.Tensor,..]): A list of tensors from the bottom layer to the top layer of the encoder. For - example, it can have shape - - (B, T, H, W, C1) - - (B, T, H // 2, W // 2, 2 * C1) - - (B, T, H // 4, W // 4, 4 * C1) - ... - global_mem_out (List,Optional): The output of the global vector. - """ - - B, T, H, W, C_in = x.shape - assert (T, H, W, C_in) == self.input_shape - - if self.use_global_vector: - out = [] - global_mem_out = [] - for i in range(self.num_blocks): - for l in self.blocks[i]: - x, global_vectors = l(x, global_vectors) - out.append(x) - global_mem_out.append(global_vectors) - if self.num_blocks > 1 and i < self.num_blocks - 1: - x = self.down_layers[i](x) - global_vectors = self.down_layer_global_proj[i](global_vectors) - return out, global_mem_out - else: - out = [] - for i in range(self.num_blocks): - x = self.blocks[i](x) - out.append(x) - if self.num_blocks > 1 and i < self.num_blocks - 1: - x = self.down_layers[i](x) - return out +from collections import OrderedDict +from functools import lru_cache +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn +from paddle.distributed import fleet + +import ppsci.arch.cuboid_transformer_utils as cuboid_utils +from ppsci.arch import activation as act_mod +from ppsci.utils import initializer + +NEGATIVE_SLOPE = 0.1 + + +class PatchMerging3D(nn.Layer): + """Patch Merging Layer + + Args: + dim (int): Number of input channels. + out_dim (int, optional): The dim of output. Defaults to None. + downsample (tuple, optional): Downsample factor. Defaults to (1, 2, 2). + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + padding_type (str, optional): The type of padding. Defaults to "nearest". + linear_init_mode (str, optional): The mode of linear init. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". + """ + + def __init__( + self, + dim: int, + out_dim: int = None, + downsample: Tuple[int, ...] = (1, 2, 2), + norm_layer: str = "layer_norm", + padding_type: str = "nearest", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super().__init__() + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.dim = dim + if out_dim is None: + out_dim = max(downsample) * dim + self.out_dim = out_dim + self.downsample = downsample + self.padding_type = padding_type + self.reduction = nn.Linear( + in_features=downsample[0] * downsample[1] * downsample[2] * dim, + out_features=out_dim, + bias_attr=False, + ) + self.norm = cuboid_utils.get_norm_layer( + norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, linear_mode=self.linear_init_mode, norm_mode=self.norm_init_mode + ) + + def get_out_shape(self, data_shape): + T, H, W, C_in = data_shape + pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] + pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] + pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] + return ( + (T + pad_t) // self.downsample[0], + (H + pad_h) // self.downsample[1], + (W + pad_w) // self.downsample[2], + self.out_dim, + ) + + def forward(self, x): + """ + + Args: + x : (B, T, H, W, C) + + Returns: + out : Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim) + """ + + B, T, H, W, C = x.shape + pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] + pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] + pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] + if pad_h or pad_h or pad_w: + T += pad_t + H += pad_h + W += pad_w + x = cuboid_utils.generalize_padding( + x, pad_t, pad_h, pad_w, padding_type=self.padding_type + ) + x = ( + x.reshape( + ( + B, + T // self.downsample[0], + self.downsample[0], + H // self.downsample[1], + self.downsample[1], + W // self.downsample[2], + self.downsample[2], + C, + ) + ) + .transpose(perm=[0, 1, 3, 5, 2, 4, 6, 7]) + .reshape( + [ + B, + T // self.downsample[0], + H // self.downsample[1], + W // self.downsample[2], + self.downsample[0] * self.downsample[1] * self.downsample[2] * C, + ] + ) + ) + x = self.norm(x) + x = self.reduction(x) + return x + + +class PositionwiseFFN(nn.Layer): + """The Position-wise FFN layer used in Transformer-like architectures + + If pre_norm is True: + norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data) + Else: + data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data)) + Also, if we use gated projection. We will use + fc1_1 * act(fc1_2(data)) to map the data + + Args: + units (int, optional): The units. Defaults to 512. + hidden_size (int, optional): The size of hidden layer. Defaults to 2048. + activation_dropout (float, optional): The dropout of activate. Defaults to 0.0. + dropout (float, optional): The drop ratio used in DropPat. Defaults to 0.1. + gated_proj (bool, optional): Whether to use gate projection. Defaults to False. + activation (str, optional): The activate. Defaults to "relu". + normalization (str, optional): The normalization. Defaults to "layer_norm". + layer_norm_eps (float, optional): The epsilon of layer normalization. Defaults to 1e-05. + pre_norm (bool): Pre-layer normalization as proposed in the paper: + "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. + You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers". Defaults to False. + linear_init_mode (str, optional): The mode of linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + units: int = 512, + hidden_size: int = 2048, + activation_dropout: float = 0.0, + dropout: float = 0.1, + gated_proj: bool = False, + activation: str = "relu", + normalization: str = "layer_norm", + layer_norm_eps: float = 1e-05, + pre_norm: bool = False, + linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super().__init__() + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self._pre_norm = pre_norm + self._gated_proj = gated_proj + self._kwargs = OrderedDict( + [ + ("units", units), + ("hidden_size", hidden_size), + ("activation_dropout", activation_dropout), + ("activation", activation), + ("dropout", dropout), + ("normalization", normalization), + ("layer_norm_eps", layer_norm_eps), + ("gated_proj", gated_proj), + ("pre_norm", pre_norm), + ] + ) + self.dropout_layer = nn.Dropout(p=dropout) + self.activation_dropout_layer = nn.Dropout(p=activation_dropout) + self.ffn_1 = nn.Linear( + in_features=units, out_features=hidden_size, bias_attr=True + ) + if self._gated_proj: + self.ffn_1_gate = nn.Linear( + in_features=units, out_features=hidden_size, bias_attr=True + ) + if activation == "leaky_relu": + self.activation = nn.LeakyReLU(NEGATIVE_SLOPE) + else: + self.activation = act_mod.get_activation(activation) + self.ffn_2 = nn.Linear( + in_features=hidden_size, out_features=units, bias_attr=True + ) + self.layer_norm = cuboid_utils.get_norm_layer( + normalization=normalization, in_channels=units, epsilon=layer_norm_eps + ) + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization(self.ffn_1, linear_mode=self.linear_init_mode) + if self._gated_proj: + cuboid_utils.apply_initialization( + self.ffn_1_gate, linear_mode=self.linear_init_mode + ) + cuboid_utils.apply_initialization(self.ffn_2, linear_mode=self.linear_init_mode) + cuboid_utils.apply_initialization( + self.layer_norm, norm_mode=self.norm_init_mode + ) + + def forward(self, data): + """ + Args: + x : Shape (B, seq_length, C_in) + + Returns: + out : Shape (B, seq_length, C_out) + """ + + residual = data + if self._pre_norm: + data = self.layer_norm(data) + if self._gated_proj: + out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data) + else: + out = self.activation(self.ffn_1(data)) + out = self.activation_dropout_layer(out) + out = self.ffn_2(out) + out = self.dropout_layer(out) + out = out + residual + if not self._pre_norm: + out = self.layer_norm(out) + return out + + +def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy): + """Update the cuboid_size and shift_size + + Args: + data_shape (Tuple[int,...]): The shape of the data. + cuboid_size (Tuple[int,...]): Size of the cuboid. + shift_size (Tuple[int,...]): Size of the shift. + strategy (str): The strategy of attention. + + Returns: + new_cuboid_size (Tuple[int,...]): Size of the cuboid. + new_shift_size (Tuple[int,...]): Size of the shift. + """ + + new_cuboid_size = list(cuboid_size) + new_shift_size = list(shift_size) + for i in range(len(data_shape)): + if strategy[i] == "d": + new_shift_size[i] = 0 + if data_shape[i] <= cuboid_size[i]: + new_cuboid_size[i] = data_shape[i] + new_shift_size[i] = 0 + return tuple(new_cuboid_size), tuple(new_shift_size) + + +def cuboid_reorder(data, cuboid_size, strategy): + """Reorder the tensor into (B, num_cuboids, bT * bH * bW, C) + We assume that the tensor shapes are divisible to the cuboid sizes. + + Args: + data (paddle.Tensor): The input data. + cuboid_size (Tuple[int,...]): The size of the cuboid. + strategy (Tuple[int,...]): The cuboid strategy. + + Returns: + reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C). + num_cuboids = T / bT * H / bH * W / bW + """ + + B, T, H, W, C = data.shape + num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2] + cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2] + intermediate_shape = [] + nblock_axis = [] + block_axis = [] + for i, (block_size, total_size, ele_strategy) in enumerate( + zip(cuboid_size, (T, H, W), strategy) + ): + if ele_strategy == "l": + intermediate_shape.extend([total_size // block_size, block_size]) + nblock_axis.append(2 * i + 1) + block_axis.append(2 * i + 2) + elif ele_strategy == "d": + intermediate_shape.extend([block_size, total_size // block_size]) + nblock_axis.append(2 * i + 2) + block_axis.append(2 * i + 1) + else: + raise NotImplementedError(f"{ele_strategy} is invalid.") + data = data.reshape(list((B,) + tuple(intermediate_shape) + (C,))) + reordered_data = data.transpose( + perm=(0,) + tuple(nblock_axis) + tuple(block_axis) + (7,) + ) + reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C)) + return reordered_data + + +@lru_cache() +def compute_cuboid_self_attention_mask( + data_shape, cuboid_size, shift_size, strategy, padding_type, device +): + """Compute the shift window attention mask + + Args: + data_shape (Tuple[int,....]): Should be (T, H, W). + cuboid_size (Tuple[int,....]): Size of the cuboid. + shift_size (Tuple[int,....]): The shift size. + strategy (str): The decomposition strategy. + padding_type (str): Type of the padding. + device (str): The device. + + Returns: + attn_mask (paddle.Tensor): Mask with shape (num_cuboid, cuboid_vol, cuboid_vol). + The padded values will always be masked. The other masks will ensure that the shifted windows + will only attend to those in the shifted windows. + """ + T, H, W = data_shape + pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] + pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] + pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] + data_mask = None + if pad_t > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + data_mask = paddle.ones(shape=(1, T, H, W, 1), dtype="bool") + data_mask = F.pad( + data_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], data_format="NDHWC" + ) + else: + data_mask = paddle.ones( + shape=(1, T + pad_t, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if any(i > 0 for i in shift_size): + if padding_type == "ignore": + data_mask = paddle.roll( + x=data_mask, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + if padding_type == "ignore": + data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy) + data_mask = data_mask.squeeze(axis=-1).squeeze(axis=0) + shift_mask = np.zeros(shape=(1, T + pad_t, H + pad_h, W + pad_w, 1)) + cnt = 0 + for t in ( + slice(-cuboid_size[0]), + slice(-cuboid_size[0], -shift_size[0]), + slice(-shift_size[0], None), + ): + for h in ( + slice(-cuboid_size[1]), + slice(-cuboid_size[1], -shift_size[1]), + slice(-shift_size[1], None), + ): + for w in ( + slice(-cuboid_size[2]), + slice(-cuboid_size[2], -shift_size[2]), + slice(-shift_size[2], None), + ): + shift_mask[:, t, h, w, :] = cnt + cnt += 1 + shift_mask = paddle.to_tensor(shift_mask) + shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy) + shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) + attn_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 + if padding_type == "ignore": + attn_mask = ( + data_mask.unsqueeze(axis=1) * data_mask.unsqueeze(axis=2) * attn_mask + ) + return attn_mask + + +def masked_softmax(att_score, mask, axis: int = -1): + """Ignore the masked elements when calculating the softmax. + The mask can be broadcastable. + + Args: + att_score (paddle.Tensor): Shape (..., length, ...) + mask (paddle.Tensor): Shape (..., length, ...) + 1 --> The element is not masked + 0 --> The element is masked + axis (int): The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] + + Returns: + att_weights (paddle.Tensor): Shape (..., length, ...). + """ + + if mask is not None: + if att_score.dtype == paddle.float16: + att_score = att_score.masked_fill(paddle.logical_not(mask), -1e4) + else: + att_score = att_score.masked_fill(paddle.logical_not(mask), -1e18) + att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask + else: + att_weights = nn.functional.softmax(x=att_score, axis=axis) + return att_weights + + +def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape): + """Reverse the reordered cuboid back to the original space + + Args: + data (paddle.Tensor): The input data. + cuboid_size (Tuple[int,...]): The size of cuboid. + strategy (str): The strategy of reordering. + orig_data_shape (Tuple[int,...]): The original shape of the data. + + Returns: + data (paddle.Tensor): The recovered data + """ + + B, num_cuboids, cuboid_volume, C = data.shape + T, H, W = orig_data_shape + permutation_axis = [0] + for i, (block_size, total_size, ele_strategy) in enumerate( + zip(cuboid_size, (T, H, W), strategy) + ): + if ele_strategy == "l": + permutation_axis.append(i + 1) + permutation_axis.append(i + 4) + elif ele_strategy == "d": + permutation_axis.append(i + 4) + permutation_axis.append(i + 1) + else: + raise NotImplementedError((f"{ele_strategy} is invalid.")) + permutation_axis.append(7) + data = data.reshape( + [ + B, + T // cuboid_size[0], + H // cuboid_size[1], + W // cuboid_size[2], + cuboid_size[0], + cuboid_size[1], + cuboid_size[2], + C, + ] + ) + data = data.transpose(perm=permutation_axis) + data = data.reshape((B, T, H, W, C)) + return data + + +class CuboidSelfAttentionLayer(nn.Layer): + """Implements the cuboid self attention. + + The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids. + We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel. + + We adopt two mechanisms for decomposing the input tensor into cuboids: + + (1) local: + We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the + shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows". + (2) dilated: + Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions", + we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`, + we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w. + + The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230. + The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns, + we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy. + + In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described + as regular tensors. We need to define alternative approaches to partition the data into "cuboids". + + In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?", + "[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep + $K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and + the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system. + + Args: + dim (int): The dimension of the input tensor. + num_heads (int): The number of heads. + cuboid_size (tuple, optional): The size of cuboid. Defaults to (2, 7, 7). + shift_size (tuple, optional): The size of shift. Defaults to (0, 0, 0). + strategy (tuple, optional): The strategy. Defaults to ("l", "l", "l"). + padding_type (str, optional): The type of padding. Defaults to "ignore". + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + cuboid_size: Tuple[int, ...] = (2, 7, 7), + shift_size: Tuple[int, ...] = (0, 0, 0), + strategy: Tuple[str, ...] = ("l", "l", "l"), + padding_type: str = "ignore", + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + use_final_proj: bool = True, + norm_layer: str = "layer_norm", + use_global_vector: bool = False, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: bool = True, + use_relative_pos: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(CuboidSelfAttentionLayer, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + assert dim % num_heads == 0 + self.num_heads = num_heads + self.dim = dim + self.cuboid_size = cuboid_size + self.shift_size = shift_size + self.strategy = strategy + self.padding_type = padding_type + self.use_final_proj = use_final_proj + self.use_relative_pos = use_relative_pos + self.use_global_vector = use_global_vector + self.use_global_self_attn = use_global_self_attn + self.separate_global_qkv = separate_global_qkv + if global_dim_ratio != 1: + assert ( + separate_global_qkv is True + ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + self.global_dim_ratio = global_dim_ratio + assert self.padding_type in ["ignore", "zeros", "nearest"] + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + if use_relative_pos: + init_data = paddle.zeros( + ( + (2 * cuboid_size[0] - 1) + * (2 * cuboid_size[1] - 1) + * (2 * cuboid_size[2] - 1), + num_heads, + ) + ) + self.relative_position_bias_table = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.relative_position_bias_table.stop_gradient = not True + self.relative_position_bias_table = initializer.trunc_normal_( + self.relative_position_bias_table, std=0.02 + ) + + coords_t = paddle.arange(end=self.cuboid_size[0]) + coords_h = paddle.arange(end=self.cuboid_size[1]) + coords_w = paddle.arange(end=self.cuboid_size[2]) + coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) + coords_flatten = paddle.flatten(x=coords, start_axis=1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.transpose(perm=[1, 2, 0]) + relative_coords[:, :, 0] += self.cuboid_size[0] - 1 + relative_coords[:, :, 1] += self.cuboid_size[1] - 1 + relative_coords[:, :, 2] += self.cuboid_size[2] - 1 + relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * ( + 2 * self.cuboid_size[2] - 1 + ) + relative_coords[:, :, 1] *= 2 * self.cuboid_size[2] - 1 + relative_position_index = relative_coords.sum(axis=-1) + self.register_buffer( + name="relative_position_index", tensor=relative_position_index + ) + self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(p=attn_drop) + if self.use_global_vector: + if self.separate_global_qkv: + self.l2g_q_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.l2g_global_kv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim * 2, + bias_attr=qkv_bias, + ) + self.g2l_global_q_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim, + bias_attr=qkv_bias, + ) + self.g2l_k_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.g2l_v_net = nn.Linear( + in_features=dim, + out_features=global_dim_ratio * dim, + bias_attr=qkv_bias, + ) + if self.use_global_self_attn: + self.g2g_global_qkv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=global_dim_ratio * dim * 3, + bias_attr=qkv_bias, + ) + else: + self.global_qkv = nn.Linear( + in_features=dim, out_features=dim * 3, bias_attr=qkv_bias + ) + self.global_attn_drop = nn.Dropout(p=attn_drop) + if use_final_proj: + self.proj = nn.Linear(in_features=dim, out_features=dim) + self.proj_drop = nn.Dropout(p=proj_drop) + if self.use_global_vector: + self.global_proj = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=global_dim_ratio * dim, + ) + self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) + if self.use_global_vector: + self.global_vec_norm = cuboid_utils.get_norm_layer( + norm_layer, in_channels=global_dim_ratio * dim + ) + self.checkpoint_level = checkpoint_level + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization( + self.qkv, linear_mode=self.attn_linear_init_mode + ) + if self.use_final_proj: + cuboid_utils.apply_initialization( + self.proj, linear_mode=self.ffn_linear_init_mode + ) + cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) + if self.use_global_vector: + if self.separate_global_qkv: + cuboid_utils.apply_initialization( + self.l2g_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_global_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_k_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_v_net, linear_mode=self.attn_linear_init_mode + ) + if self.use_global_self_attn: + cuboid_utils.apply_initialization( + self.g2g_global_qkv_net, linear_mode=self.attn_linear_init_mode + ) + else: + cuboid_utils.apply_initialization( + self.global_qkv, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.global_vec_norm, norm_mode=self.norm_init_mode + ) + + def forward(self, x, global_vectors=None): + x = self.norm(x) + + B, T, H, W, C_in = x.shape + assert C_in == self.dim + if self.use_global_vector: + _, num_global, _ = global_vectors.shape + global_vectors = self.global_vec_norm(global_vectors) + cuboid_size, shift_size = update_cuboid_size_shift_size( + (T, H, W), self.cuboid_size, self.shift_size, self.strategy + ) + + pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] + pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] + pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] + x = cuboid_utils.generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type) + + if any(i > 0 for i in shift_size): + shifted_x = paddle.roll( + x=x, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + else: + shifted_x = x + + reordered_x = cuboid_reorder( + shifted_x, cuboid_size=cuboid_size, strategy=self.strategy + ) + + _, num_cuboids, cuboid_volume, _ = reordered_x.shape + attn_mask = compute_cuboid_self_attention_mask( + (T, H, W), + cuboid_size, + shift_size=shift_size, + strategy=self.strategy, + padding_type=self.padding_type, + device=x.place, + ) + head_C = C_in // self.num_heads + qkv = ( + self.qkv(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + perm_0 = list(range(k.ndim)) + perm_0[-2] = -1 + perm_0[-1] = -2 + attn_score = q @ k.transpose(perm=perm_0) + + if self.use_relative_pos: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape( + [-1] + ) + ].reshape([cuboid_volume, cuboid_volume, -1]) + relative_position_bias = relative_position_bias.transpose( + perm=[2, 0, 1] + ).unsqueeze(axis=1) + attn_score = attn_score + relative_position_bias + + if self.use_global_vector: + global_head_C = self.global_dim_ratio * head_C + if self.separate_global_qkv: + l2g_q = ( + self.l2g_q_net(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + l2g_q = l2g_q * self.scale + l2g_global_kv = ( + self.l2g_global_kv_net(global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] + g2l_global_q = ( + self.g2l_global_q_net(global_vectors) + .reshape([B, num_global, self.num_heads, head_C]) + .transpose(perm=[0, 2, 1, 3]) + ) + g2l_global_q = g2l_global_q * self.scale + g2l_k = ( + self.g2l_k_net(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + g2l_v = ( + self.g2l_v_net(reordered_x) + .reshape( + [B, num_cuboids, cuboid_volume, self.num_heads, global_head_C] + ) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + if self.use_global_self_attn: + g2g_global_qkv = ( + self.g2g_global_qkv_net(global_vectors) + .reshape([B, 1, num_global, 3, self.num_heads, global_head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + g2g_global_q, g2g_global_k, g2g_global_v = ( + g2g_global_qkv[0], + g2g_global_qkv[1], + g2g_global_qkv[2], + ) + g2g_global_q = g2g_global_q.squeeze(axis=2) * self.scale + else: + q_global, k_global, v_global = ( + self.global_qkv(global_vectors) + .reshape([B, 1, num_global, 3, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + q_global = q_global.squeeze(axis=2) * self.scale + l2g_q, g2l_k, g2l_v = q, k, v + g2l_global_q, l2g_global_k, l2g_global_v = ( + q_global, + k_global, + v_global, + ) + if self.use_global_self_attn: + g2g_global_q, g2g_global_k, g2g_global_v = ( + q_global, + k_global, + v_global, + ) + + perm_1 = list(range(l2g_global_k.ndim)) + perm_1[-2] = -1 + perm_1[-1] = -2 + l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_1) + attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) + + if attn_mask.ndim == 5: + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" + ) + elif attn_mask.ndim == 3: + attn_mask = attn_mask.astype("float32") + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NCL" + ) + attn_mask_l2l_l2g = attn_mask_l2l_l2g.astype("bool") + else: + attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) + + v_l_g = paddle.concat( + x=( + v, + l2g_global_v.expand( + shape=[B, self.num_heads, num_cuboids, num_global, head_C] + ), + ), + axis=3, + ) + attn_score_l2l_l2g = masked_softmax( + attn_score_l2l_l2g, mask=attn_mask_l2l_l2g + ) + attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) + reordered_x = ( + (attn_score_l2l_l2g @ v_l_g) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, cuboid_volume, self.dim]) + ) + if self.padding_type == "ignore": + g2l_attn_mask = paddle.ones(shape=(1, T, H, W, 1)) + if pad_t > 0 or pad_h > 0 or pad_w > 0: + g2l_attn_mask = F.pad( + g2l_attn_mask, + [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], + data_format="NDHWC", + ) + if any(i > 0 for i in shift_size): + g2l_attn_mask = paddle.roll( + x=g2l_attn_mask, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + g2l_attn_mask = g2l_attn_mask.reshape((-1,)) + else: + g2l_attn_mask = None + temp = g2l_k.reshape( + [B, self.num_heads, num_cuboids * cuboid_volume, head_C] + ) + perm_2 = list(range(temp.ndim)) + perm_2[-2] = -1 + perm_2[-1] = -2 + g2l_attn_score = g2l_global_q @ temp.transpose(perm=perm_2) + if self.use_global_self_attn: + temp = g2g_global_k.squeeze(axis=2) + perm_3 = list(range(temp.ndim)) + perm_3[-2] = -1 + perm_3[-1] = -2 + g2g_attn_score = g2g_global_q @ temp.transpose(perm=perm_3) + g2all_attn_score = paddle.concat( + x=(g2l_attn_score, g2g_attn_score), axis=-1 + ) + if g2l_attn_mask is not None: + g2all_attn_mask = F.pad( + g2l_attn_mask, + [0, num_global], + "constant", + 1, + data_format="NDHWC", + ) + else: + g2all_attn_mask = None + new_v = paddle.concat( + x=( + g2l_v.reshape( + [ + B, + self.num_heads, + num_cuboids * cuboid_volume, + global_head_C, + ] + ), + g2g_global_v.reshape( + [B, self.num_heads, num_global, global_head_C] + ), + ), + axis=2, + ) + else: + g2all_attn_score = g2l_attn_score + g2all_attn_mask = g2l_attn_mask + new_v = g2l_v.reshape( + [B, self.num_heads, num_cuboids * cuboid_volume, global_head_C] + ) + g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask) + g2all_attn_score = self.global_attn_drop(g2all_attn_score) + new_global_vector = ( + (g2all_attn_score @ new_v) + .transpose(perm=[0, 2, 1, 3]) + .reshape([B, num_global, self.global_dim_ratio * self.dim]) + ) + else: + attn_score = masked_softmax(attn_score, mask=attn_mask) + attn_score = self.attn_drop(attn_score) + reordered_x = ( + (attn_score @ v) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, cuboid_volume, self.dim]) + ) + + if self.use_final_proj: + reordered_x = paddle.cast(reordered_x, dtype="float32") + reordered_x = self.proj_drop(self.proj(reordered_x)) + if self.use_global_vector: + new_global_vector = self.proj_drop(self.global_proj(new_global_vector)) + shifted_x = cuboid_reorder_reverse( + reordered_x, + cuboid_size=cuboid_size, + strategy=self.strategy, + orig_data_shape=(T + pad_t, H + pad_h, W + pad_w), + ) + if any(i > 0 for i in shift_size): + x = paddle.roll( + x=shifted_x, + shifts=(shift_size[0], shift_size[1], shift_size[2]), + axis=(1, 2, 3), + ) + else: + x = shifted_x + x = cuboid_utils.generalize_unpadding( + x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type + ) + if self.use_global_vector: + return x, new_global_vector + else: + return x + + +class StackCuboidSelfAttentionBlock(nn.Layer): + """ + - "use_inter_ffn" is True + x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out + | ^ | ^ + | | | | + |-------------| |-------------| + - "use_inter_ffn" is False + x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out + | ^ | ^ ^ | ^ + | | | | | | | + |-------------| |------------| ----------| |-----------| + If we have enabled global memory vectors, each attention will be a + + Args: + dim (int): The dimension of the input tensor. + num_heads (int): The number of heads. + block_cuboid_size (list, optional): The size of block cuboid . Defaults to [(4, 4, 4), (4, 4, 4)]. + block_shift_size (list, optional): The shift size of block. Defaults to [(0, 0, 0), (2, 2, 2)]. + block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. + padding_type (str, optional): The type of padding. Defaults to "ignore". + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. + Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. + Defaults to 1. + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (2, 2, 2)], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("d", "d", "d"), + ("l", "l", "l"), + ], + padding_type: str = "ignore", + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = False, + use_global_vector: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: bool = True, + use_relative_pos: bool = True, + use_final_proj: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(StackCuboidSelfAttentionBlock, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + if ( + len(block_cuboid_size[0]) <= 0 + or len(block_shift_size) <= 0 + or len(block_strategy) <= 0 + ): + raise ValueError( + "Format of the block cuboid size is not correct. block_cuboid_size={block_cuboid_size}" + ) + if len(block_cuboid_size) != len(block_shift_size) and len( + block_cuboid_size + ) != len(block_strategy): + raise ValueError( + "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." + ) + + self.num_attn = len(block_cuboid_size) + self.checkpoint_level = checkpoint_level + self.use_inter_ffn = use_inter_ffn + self.use_global_vector = use_global_vector + self.use_global_vector_ffn = use_global_vector_ffn + self.use_global_self_attn = use_global_self_attn + self.global_dim_ratio = global_dim_ratio + if self.use_inter_ffn: + self.ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(self.num_attn) + ] + ) + if self.use_global_vector_ffn and self.use_global_vector: + self.global_ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(self.num_attn) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + ] + ) + if self.use_global_vector_ffn and self.use_global_vector: + self.global_ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + ] + ) + self.attn_l = nn.LayerList( + sublayers=[ + CuboidSelfAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_size=ele_cuboid_size, + shift_size=ele_shift_size, + strategy=ele_strategy, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + use_global_vector=use_global_vector, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for ele_cuboid_size, ele_shift_size, ele_strategy in zip( + block_cuboid_size, block_shift_size, block_strategy + ) + ] + ) + + def reset_parameters(self): + for m in self.ffn_l: + m.reset_parameters() + if self.use_global_vector_ffn and self.use_global_vector: + for m in self.global_ffn_l: + m.reset_parameters() + for m in self.attn_l: + m.reset_parameters() + + def forward(self, x, global_vectors=None): + if self.use_inter_ffn: + if self.use_global_vector: + for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): + if self.checkpoint_level >= 2 and self.training: + x_out, global_vectors_out = fleet.utils.recompute( + attn, x, global_vectors + ) + else: + x_out, global_vectors_out = attn(x, global_vectors) + x = x + x_out + global_vectors = global_vectors + global_vectors_out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + if self.use_global_vector_ffn: + global_vectors = fleet.utils.recompute( + self.global_ffn_l[idx], global_vectors + ) + else: + x = ffn(x) + if self.use_global_vector_ffn: + global_vectors = self.global_ffn_l[idx](global_vectors) + return x, global_vectors + else: + for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x) + else: + x = x + attn(x) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + else: + x = ffn(x) + return x + elif self.use_global_vector: + for idx, attn in enumerate(self.attn_l): + if self.checkpoint_level >= 2 and self.training: + x_out, global_vectors_out = fleet.utils.recompute( + attn, x, global_vectors + ) + else: + x_out, global_vectors_out = attn(x, global_vectors) + x = x + x_out + global_vectors = global_vectors + global_vectors_out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + if self.use_global_vector_ffn: + global_vectors = fleet.utils.recompute( + self.global_ffn_l[0], global_vectors + ) + else: + x = self.ffn_l[0](x) + if self.use_global_vector_ffn: + global_vectors = self.global_ffn_l[0](global_vectors) + return x, global_vectors + else: + for idx, attn in enumerate(self.attn_l): + if self.checkpoint_level >= 2 and self.training: + out = fleet.utils.recompute(attn, x) + else: + out = attn(x) + x = x + out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + else: + x = self.ffn_l[0](x) + return x + + +class CuboidTransformerEncoder(nn.Layer): + """Encoder of the CuboidTransformer + + x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out + + Args: + input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C + base_units (int, optional): The number of units. Defaults to 128. + block_units (int, optional): The number of block units. Defaults to None. + scale_alpha (float, optional): We scale up the channels based on the formula: + - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. + depth (list, optional): The number of layers for each block. Defaults to [4, 4, 4]. + downsample (int, optional): The downsample ratio. Defaults to 2. + downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". + block_attn_patterns (str, optional): Attention pattern for the cuboid attention for each block. Defaults to None. + block_cuboid_size (list, optional): A list of cuboid size parameters. Defaults to [(4, 4, 4), (4, 4, 4)]. + block_strategy (list, optional): A list of cuboid strategies. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + block_shift_size (list, optional): A list of shift sizes. Defaults to [(0, 0, 0), (0, 0, 0)]. + num_heads (int, optional): The number of heads. Defaults to 4. + attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. + proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + ffn_activation (str, optional): The FFN activation. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. + padding_type (str, optional): The type of padding. Defaults to "ignore". + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to False. + use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. + Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. + Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + down_linear_init_mode (str, optional): The mode of downsample linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization. Defaults to "0". + """ + + def __init__( + self, + input_shape: Tuple[int, ...], + base_units: int = 128, + block_units: int = None, + scale_alpha: float = 1.0, + depth: Tuple[int, ...] = [4, 4, 4], + downsample: int = 2, + downsample_type: str = "patch_merge", + block_attn_patterns: str = None, + block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = True, + padding_type: str = "ignore", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + use_global_vector: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + down_linear_init_mode: str = "0", + norm_init_mode: str = "0", + ): + super(CuboidTransformerEncoder, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.down_linear_init_mode = down_linear_init_mode + self.norm_init_mode = norm_init_mode + self.input_shape = input_shape + self.depth = depth + self.num_blocks = len(depth) + self.base_units = base_units + self.scale_alpha = scale_alpha + if not isinstance(downsample, (tuple, list)): + downsample = 1, downsample, downsample + self.downsample = downsample + self.downsample_type = downsample_type + self.num_heads = num_heads + self.use_global_vector = use_global_vector + self.checkpoint_level = checkpoint_level + if block_units is None: + block_units = [ + cuboid_utils.round_to( + base_units * int((max(downsample) ** scale_alpha) ** i), 4 + ) + for i in range(self.num_blocks) + ] + else: + assert len(block_units) == self.num_blocks and block_units[0] == base_units + self.block_units = block_units + if self.num_blocks > 1: + if downsample_type == "patch_merge": + self.down_layers = nn.LayerList( + sublayers=[ + PatchMerging3D( + dim=self.block_units[i], + downsample=downsample, + padding_type=padding_type, + out_dim=self.block_units[i + 1], + linear_init_mode=down_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for i in range(self.num_blocks - 1) + ] + ) + else: + raise NotImplementedError(f"{downsample_type} is invalid.") + if self.use_global_vector: + self.down_layer_global_proj = nn.LayerList( + sublayers=[ + nn.Linear( + in_features=global_dim_ratio * self.block_units[i], + out_features=global_dim_ratio * self.block_units[i + 1], + ) + for i in range(self.num_blocks - 1) + ] + ) + if block_attn_patterns is not None: + mem_shapes = self.get_mem_shapes() + if isinstance(block_attn_patterns, (tuple, list)): + assert len(block_attn_patterns) == self.num_blocks + else: + block_attn_patterns = [ + block_attn_patterns for _ in range(self.num_blocks) + ] + block_cuboid_size = [] + block_strategy = [] + block_shift_size = [] + for idx, key in enumerate(block_attn_patterns): + func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) + cuboid_size, strategy, shift_size = func(mem_shapes[idx]) + block_cuboid_size.append(cuboid_size) + block_strategy.append(strategy) + block_shift_size.append(shift_size) + else: + if not isinstance(block_cuboid_size[0][0], (list, tuple)): + block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)] + else: + assert ( + len(block_cuboid_size) == self.num_blocks + ), f"Incorrect input format! Received block_cuboid_size={block_cuboid_size}" + if not isinstance(block_strategy[0][0], (list, tuple)): + block_strategy = [block_strategy for _ in range(self.num_blocks)] + else: + assert ( + len(block_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_strategy={block_strategy}" + if not isinstance(block_shift_size[0][0], (list, tuple)): + block_shift_size = [block_shift_size for _ in range(self.num_blocks)] + else: + assert ( + len(block_shift_size) == self.num_blocks + ), f"Incorrect input format! Received block_shift_size={block_shift_size}" + self.block_cuboid_size = block_cuboid_size + self.block_strategy = block_strategy + self.block_shift_size = block_shift_size + self.blocks = nn.LayerList( + sublayers=[ + nn.Sequential( + *[ + StackCuboidSelfAttentionBlock( + dim=self.block_units[i], + num_heads=num_heads, + block_cuboid_size=block_cuboid_size[i], + block_strategy=block_strategy[i], + block_shift_size=block_shift_size[i], + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + padding_type=padding_type, + use_global_vector=use_global_vector, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(depth[i]) + ] + ) + for i in range(self.num_blocks) + ] + ) + self.reset_parameters() + + def reset_parameters(self): + if self.num_blocks > 1: + for m in self.down_layers: + m.reset_parameters() + if self.use_global_vector: + cuboid_utils.apply_initialization( + self.down_layer_global_proj, linear_mode=self.down_linear_init_mode + ) + for ms in self.blocks: + for m in ms: + m.reset_parameters() + + def get_mem_shapes(self): + """Get the shape of the output memory based on the input shape. This can be used for constructing the decoder. + + Returns: + mem_shapes : A list of shapes of the output memory + """ + + if self.num_blocks == 1: + return [self.input_shape] + else: + mem_shapes = [self.input_shape] + curr_shape = self.input_shape + for down_layer in self.down_layers: + curr_shape = down_layer.get_out_shape(curr_shape) + mem_shapes.append(curr_shape) + return mem_shapes + + def forward(self, x, global_vectors=None): + """ + Args: + x : Shape (B, T, H, W, C) + + Returns: + out (List[paddle.Tensor,..]): A list of tensors from the bottom layer to the top layer of the encoder. For + example, it can have shape + - (B, T, H, W, C1) + - (B, T, H // 2, W // 2, 2 * C1) + - (B, T, H // 4, W // 4, 4 * C1) + ... + global_mem_out (List,Optional): The output of the global vector. + """ + + B, T, H, W, C_in = x.shape + assert (T, H, W, C_in) == self.input_shape + + if self.use_global_vector: + out = [] + global_mem_out = [] + for i in range(self.num_blocks): + for l in self.blocks[i]: + x, global_vectors = l(x, global_vectors) + out.append(x) + global_mem_out.append(global_vectors) + if self.num_blocks > 1 and i < self.num_blocks - 1: + x = self.down_layers[i](x) + global_vectors = self.down_layer_global_proj[i](global_vectors) + return out, global_mem_out + else: + out = [] + for i in range(self.num_blocks): + x = self.blocks[i](x) + out.append(x) + if self.num_blocks > 1 and i < self.num_blocks - 1: + x = self.down_layers[i](x) + return out diff --git a/ppsci/arch/cuboid_transformer_utils.py b/ppsci/arch/cuboid_transformer_utils.py index 3f7f366bc0..5f7084e00a 100644 --- a/ppsci/arch/cuboid_transformer_utils.py +++ b/ppsci/arch/cuboid_transformer_utils.py @@ -1,347 +1,347 @@ -import functools -from typing import Tuple - -import paddle -import paddle.nn.functional as F -from paddle import nn - -from ppsci.utils import initializer - - -def round_to(dat, c): - return dat + (dat - dat % c) % c - - -class RMSNorm(nn.Layer): - """Root Mean Square Layer Normalization proposed in "[NeurIPS2019] Root Mean Square Layer Normalization" - - Args: - d (Optional[int]): The model size. - p (float, optional): The partial RMSNorm, valid value [0, 1]. Defaults to -1.0. - eps (float, optional): The epsilon value. Defaults to 1e-08. - bias (bool, optional): Whether use bias term for RMSNorm, - because RMSNorm doesn't enforce re-centering invariance.Defaults to False. - """ - - def __init__( - self, - d: Tuple[int, ...], - p: float = -1.0, - eps: float = 1e-08, - bias: bool = False, - ): - super().__init__() - self.eps = eps - self.d = d - self.p = p - self.bias = bias - init_data = paddle.ones(d) - self.scale = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(1.0), - ) - self.scale.stop_gradient = False - self.add_parameter(name="scale", parameter=self.scale) - if self.bias: - init_data = paddle.zeros(d) - self.offset = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.offset.stop_gradient = False - self.add_parameter(name="offset", parameter=self.offset) - - def forward(self, x): - if self.p < 0.0 or self.p > 1.0: - norm_x = x.norm(p=2, axis=-1, keepdim=True) - d_x = self.d - else: - partial_size = int(self.d * self.p) - partial_x, _ = paddle.split( - x=x, num_or_sections=[partial_size, self.d - partial_size], axis=-1 - ) - norm_x = partial_x.norm(p=2, axis=-1, keepdim=True) - d_x = partial_size - rms_x = norm_x * d_x ** (-1.0 / 2) - x_normed = x / (rms_x + self.eps) - if self.bias: - return self.scale * x_normed + self.offset - return self.scale * x_normed - - -def get_norm_layer( - normalization: str = "layer_norm", - axis: int = -1, - epsilon: float = 1e-05, - in_channels: int = 0, - **kwargs, -): - """Get the normalization layer based on the provided type - - Args: - normalization (str): The type of the layer normalization from ['layer_norm']. - axis (float): The axis to normalize the. - epsilon (float): The epsilon of the normalization layer. - in_channels (int): Input channel. - - Returns: - norm_layer (norm): The layer normalization layer. - """ - - if isinstance(normalization, str): - if normalization == "layer_norm": - assert in_channels > 0 - assert axis == -1 - norm_layer = nn.LayerNorm( - normalized_shape=in_channels, epsilon=epsilon, **kwargs - ) - elif normalization == "rms_norm": - assert axis == -1 - norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs) - else: - raise NotImplementedError(f"normalization={normalization} is not supported") - return norm_layer - elif normalization is None: - return nn.Identity() - else: - raise NotImplementedError("The type of normalization must be str") - - -def generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False): - if pad_t == 0 and pad_h == 0 and pad_w == 0: - return x - assert padding_type in ["zeros", "ignore", "nearest"] - B, T, H, W, C = x.shape - if padding_type == "nearest": - return nn.functional.interpolate( - x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T + pad_t, H + pad_h, W + pad_w) - ).transpose(perm=[0, 2, 3, 4, 1]) - elif t_pad_left: - return F.pad(x, [0, 0, 0, pad_w, 0, pad_h, pad_t, 0], data_format="NDHWC") - else: - data_pad = F.pad( - x, [0, 0, pad_t, 0, pad_h, 0, pad_w, 0, 0, 0], data_format="NDHWC" - ) - data_pad = paddle.concat( - [data_pad[:, pad_t:, ...], data_pad[:, :pad_t, ...]], axis=1 - ) - return data_pad - - -def generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type): - assert padding_type in ["zeros", "ignore", "nearest"] - B, T, H, W, C = x.shape - if pad_t == 0 and pad_h == 0 and pad_w == 0: - return x - if padding_type == "nearest": - return nn.functional.interpolate( - x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T - pad_t, H - pad_h, W - pad_w) - ).transpose(perm=[0, 2, 3, 4, 1]) - else: - return x[:, : T - pad_t, : H - pad_h, : W - pad_w, :] - - -def apply_initialization( - m: nn.Layer, - linear_mode: str = "0", - conv_mode: str = "0", - norm_mode: str = "0", - embed_mode: str = "0", -): - if isinstance(m, nn.Linear): - if linear_mode in ("0",): - m.weight = initializer.kaiming_normal_(m.weight, nonlinearity="linear") - elif linear_mode in ("1",): - m.weight = initializer.kaiming_normal_( - m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" - ) - else: - raise NotImplementedError(f"{linear_mode} is invalid.") - if hasattr(m, "bias") and m.bias is not None: - m.bias = initializer.zeros_(m.bias) - elif isinstance( - m, - ( - nn.Conv2D, - nn.Conv3D, - nn.Conv2DTranspose, - nn.Conv3DTranspose, - ), - ): - if conv_mode in ("0",): - m.weight = initializer.kaiming_normal_( - m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" - ) - else: - raise NotImplementedError(f"{conv_mode} is invalid.") - if hasattr(m, "bias") and m.bias is not None: - m.bias = initializer.zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - if norm_mode in ("0",): - m.weight = initializer.zeros_(m.weight) - m.bias = initializer.zeros_(m.bias) - else: - raise NotImplementedError(f"{norm_mode} is invalid.") - elif isinstance(m, nn.GroupNorm): - if norm_mode in ("0",): - m.weight = initializer.ones_(m.weight) - m.bias = initializer.zeros_(m.bias) - else: - raise NotImplementedError(f"{norm_mode} is invalid.") - elif isinstance(m, nn.Embedding): - if embed_mode in ("0",): - m.weight.data = initializer.trunc_normal_(m.weight.data, std=0.02) - else: - raise NotImplementedError(f"{embed_mode} is invalid.") - - else: - pass - - -class CuboidSelfAttentionPatterns: - def __init__(self): - super().__init__() - self.patterns = {} - self.patterns = { - "full": self.full_attention, - "axial": self.axial, - "divided_st": self.divided_space_time, - } - for p in [1, 2, 4, 8, 10]: - for m in [1, 2, 4, 8, 16, 32]: - key = f"video_swin_{p}x{m}" - self.patterns[key] = functools.partial(self.video_swin, P=p, M=m) - - for m in [1, 2, 4, 8, 16, 32]: - key = f"spatial_lg_{m}" - self.patterns[key] = functools.partial(self.spatial_lg_v1, M=m) - - for k in [2, 4, 8]: - key = f"axial_space_dilate_{k}" - self.patterns[key] = functools.partial(self.axial_space_dilate_K, K=k) - - def get(self, pattern_name): - return self.patterns[pattern_name] - - def full_attention(self, input_shape): - T, H, W, _ = input_shape - cuboid_size = [(T, H, W)] - strategy = [("l", "l", "l")] - shift_size = [(0, 0, 0)] - return cuboid_size, strategy, shift_size - - def axial(self, input_shape): - """Axial attention proposed in https://arxiv.org/abs/1912.12180 - - Args: - input_shape (Tuple[int,...]): The shape of the input tensor, T H W. - - Returns: - cuboid_size (Tuple[int,...]): The size of cuboid. - strategy (Tuple[str,...]): The strategy of the attention. - shift_size (Tuple[int,...]): The shift size of the attention. - """ - - T, H, W, _ = input_shape - cuboid_size = [(T, 1, 1), (1, H, 1), (1, 1, W)] - strategy = [("l", "l", "l"), ("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def divided_space_time(self, input_shape): - T, H, W, _ = input_shape - cuboid_size = [(T, 1, 1), (1, H, W)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def video_swin(self, input_shape, P=2, M=4): - """Adopt the strategy in Video SwinTransformer https://arxiv.org/pdf/2106.13230.pdf""" - T, H, W, _ = input_shape - P = min(P, T) - M = min(M, H, W) - cuboid_size = [(P, M, M), (P, M, M)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (P // 2, M // 2, M // 2)] - return cuboid_size, strategy, shift_size - - def spatial_lg_v1(self, input_shape, M=4): - T, H, W, _ = input_shape - if H <= M and W <= M: - cuboid_size = [(T, 1, 1), (1, H, W)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0)] - else: - cuboid_size = [(T, 1, 1), (1, M, M), (1, M, M)] - strategy = [("l", "l", "l"), ("l", "l", "l"), ("d", "d", "d")] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def axial_space_dilate_K(self, input_shape, K=2): - T, H, W, _ = input_shape - K = min(K, H, W) - cuboid_size = [ - (T, 1, 1), - (1, H // K, 1), - (1, H // K, 1), - (1, 1, W // K), - (1, 1, W // K), - ] - strategy = [ - ("l", "l", "l"), - ("d", "d", "d"), - ("l", "l", "l"), - ("d", "d", "d"), - ("l", "l", "l"), - ] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - -class CuboidCrossAttentionPatterns: - def __init__(self): - super().__init__() - self.patterns = {} - for k in [1, 2, 4, 8]: - key1 = f"cross_{k}x{k}" - key2 = f"cross_{k}x{k}_lg" - key3 = f"cross_{k}x{k}_heter" - self.patterns[key1] = functools.partial(self.cross_KxK, K=k) - self.patterns[key2] = functools.partial(self.cross_KxK_lg, K=k) - self.patterns[key3] = functools.partial(self.cross_KxK_heter, K=k) - - def get(self, pattern_name): - return self.patterns[pattern_name] - - def cross_KxK(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K)] - shift_hw = [(0, 0)] - strategy = [("l", "l", "l")] - n_temporal = [1] - return cuboid_hw, shift_hw, strategy, n_temporal - - def cross_KxK_lg(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K), (K, K)] - shift_hw = [(0, 0), (0, 0)] - strategy = [("l", "l", "l"), ("d", "d", "d")] - n_temporal = [1, 1] - return cuboid_hw, shift_hw, strategy, n_temporal - - def cross_KxK_heter(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K), (K, K), (K, K)] - shift_hw = [(0, 0), (0, 0), (K // 2, K // 2)] - strategy = [("l", "l", "l"), ("d", "d", "d"), ("l", "l", "l")] - n_temporal = [1, 1, 1] - return cuboid_hw, shift_hw, strategy, n_temporal - - -CuboidSelfAttentionPatterns = CuboidSelfAttentionPatterns() -CuboidCrossAttentionPatterns = CuboidCrossAttentionPatterns() +import functools +from typing import Tuple + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from ppsci.utils import initializer + + +def round_to(dat, c): + return dat + (dat - dat % c) % c + + +class RMSNorm(nn.Layer): + """Root Mean Square Layer Normalization proposed in "[NeurIPS2019] Root Mean Square Layer Normalization" + + Args: + d (Optional[int]): The model size. + p (float, optional): The partial RMSNorm, valid value [0, 1]. Defaults to -1.0. + eps (float, optional): The epsilon value. Defaults to 1e-08. + bias (bool, optional): Whether use bias term for RMSNorm, + because RMSNorm doesn't enforce re-centering invariance.Defaults to False. + """ + + def __init__( + self, + d: Tuple[int, ...], + p: float = -1.0, + eps: float = 1e-08, + bias: bool = False, + ): + super().__init__() + self.eps = eps + self.d = d + self.p = p + self.bias = bias + init_data = paddle.ones(d) + self.scale = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(1.0), + ) + self.scale.stop_gradient = False + self.add_parameter(name="scale", parameter=self.scale) + if self.bias: + init_data = paddle.zeros(d) + self.offset = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.offset.stop_gradient = False + self.add_parameter(name="offset", parameter=self.offset) + + def forward(self, x): + if self.p < 0.0 or self.p > 1.0: + norm_x = x.norm(p=2, axis=-1, keepdim=True) + d_x = self.d + else: + partial_size = int(self.d * self.p) + partial_x, _ = paddle.split( + x=x, num_or_sections=[partial_size, self.d - partial_size], axis=-1 + ) + norm_x = partial_x.norm(p=2, axis=-1, keepdim=True) + d_x = partial_size + rms_x = norm_x * d_x ** (-1.0 / 2) + x_normed = x / (rms_x + self.eps) + if self.bias: + return self.scale * x_normed + self.offset + return self.scale * x_normed + + +def get_norm_layer( + normalization: str = "layer_norm", + axis: int = -1, + epsilon: float = 1e-05, + in_channels: int = 0, + **kwargs, +): + """Get the normalization layer based on the provided type + + Args: + normalization (str): The type of the layer normalization from ['layer_norm']. + axis (float): The axis to normalize the. + epsilon (float): The epsilon of the normalization layer. + in_channels (int): Input channel. + + Returns: + norm_layer (norm): The layer normalization layer. + """ + + if isinstance(normalization, str): + if normalization == "layer_norm": + assert in_channels > 0 + assert axis == -1 + norm_layer = nn.LayerNorm( + normalized_shape=in_channels, epsilon=epsilon, **kwargs + ) + elif normalization == "rms_norm": + assert axis == -1 + norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs) + else: + raise NotImplementedError(f"normalization={normalization} is not supported") + return norm_layer + elif normalization is None: + return nn.Identity() + else: + raise NotImplementedError("The type of normalization must be str") + + +def generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False): + if pad_t == 0 and pad_h == 0 and pad_w == 0: + return x + assert padding_type in ["zeros", "ignore", "nearest"] + B, T, H, W, C = x.shape + if padding_type == "nearest": + return nn.functional.interpolate( + x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T + pad_t, H + pad_h, W + pad_w) + ).transpose(perm=[0, 2, 3, 4, 1]) + elif t_pad_left: + return F.pad(x, [0, 0, 0, pad_w, 0, pad_h, pad_t, 0], data_format="NDHWC") + else: + data_pad = F.pad( + x, [0, 0, pad_t, 0, pad_h, 0, pad_w, 0, 0, 0], data_format="NDHWC" + ) + data_pad = paddle.concat( + [data_pad[:, pad_t:, ...], data_pad[:, :pad_t, ...]], axis=1 + ) + return data_pad + + +def generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type): + assert padding_type in ["zeros", "ignore", "nearest"] + B, T, H, W, C = x.shape + if pad_t == 0 and pad_h == 0 and pad_w == 0: + return x + if padding_type == "nearest": + return nn.functional.interpolate( + x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T - pad_t, H - pad_h, W - pad_w) + ).transpose(perm=[0, 2, 3, 4, 1]) + else: + return x[:, : T - pad_t, : H - pad_h, : W - pad_w, :] + + +def apply_initialization( + m: nn.Layer, + linear_mode: str = "0", + conv_mode: str = "0", + norm_mode: str = "0", + embed_mode: str = "0", +): + if isinstance(m, nn.Linear): + if linear_mode in ("0",): + m.weight = initializer.kaiming_normal_(m.weight, nonlinearity="linear") + elif linear_mode in ("1",): + m.weight = initializer.kaiming_normal_( + m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" + ) + else: + raise NotImplementedError(f"{linear_mode} is invalid.") + if hasattr(m, "bias") and m.bias is not None: + m.bias = initializer.zeros_(m.bias) + elif isinstance( + m, + ( + nn.Conv2D, + nn.Conv3D, + nn.Conv2DTranspose, + nn.Conv3DTranspose, + ), + ): + if conv_mode in ("0",): + m.weight = initializer.kaiming_normal_( + m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" + ) + else: + raise NotImplementedError(f"{conv_mode} is invalid.") + if hasattr(m, "bias") and m.bias is not None: + m.bias = initializer.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + if norm_mode in ("0",): + m.weight = initializer.zeros_(m.weight) + m.bias = initializer.zeros_(m.bias) + else: + raise NotImplementedError(f"{norm_mode} is invalid.") + elif isinstance(m, nn.GroupNorm): + if norm_mode in ("0",): + m.weight = initializer.ones_(m.weight) + m.bias = initializer.zeros_(m.bias) + else: + raise NotImplementedError(f"{norm_mode} is invalid.") + elif isinstance(m, nn.Embedding): + if embed_mode in ("0",): + m.weight.data = initializer.trunc_normal_(m.weight.data, std=0.02) + else: + raise NotImplementedError(f"{embed_mode} is invalid.") + + else: + pass + + +class CuboidSelfAttentionPatterns: + def __init__(self): + super().__init__() + self.patterns = {} + self.patterns = { + "full": self.full_attention, + "axial": self.axial, + "divided_st": self.divided_space_time, + } + for p in [1, 2, 4, 8, 10]: + for m in [1, 2, 4, 8, 16, 32]: + key = f"video_swin_{p}x{m}" + self.patterns[key] = functools.partial(self.video_swin, P=p, M=m) + + for m in [1, 2, 4, 8, 16, 32]: + key = f"spatial_lg_{m}" + self.patterns[key] = functools.partial(self.spatial_lg_v1, M=m) + + for k in [2, 4, 8]: + key = f"axial_space_dilate_{k}" + self.patterns[key] = functools.partial(self.axial_space_dilate_K, K=k) + + def get(self, pattern_name): + return self.patterns[pattern_name] + + def full_attention(self, input_shape): + T, H, W, _ = input_shape + cuboid_size = [(T, H, W)] + strategy = [("l", "l", "l")] + shift_size = [(0, 0, 0)] + return cuboid_size, strategy, shift_size + + def axial(self, input_shape): + """Axial attention proposed in https://arxiv.org/abs/1912.12180 + + Args: + input_shape (Tuple[int,...]): The shape of the input tensor, T H W. + + Returns: + cuboid_size (Tuple[int,...]): The size of cuboid. + strategy (Tuple[str,...]): The strategy of the attention. + shift_size (Tuple[int,...]): The shift size of the attention. + """ + + T, H, W, _ = input_shape + cuboid_size = [(T, 1, 1), (1, H, 1), (1, 1, W)] + strategy = [("l", "l", "l"), ("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def divided_space_time(self, input_shape): + T, H, W, _ = input_shape + cuboid_size = [(T, 1, 1), (1, H, W)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def video_swin(self, input_shape, P=2, M=4): + """Adopt the strategy in Video SwinTransformer https://arxiv.org/pdf/2106.13230.pdf""" + T, H, W, _ = input_shape + P = min(P, T) + M = min(M, H, W) + cuboid_size = [(P, M, M), (P, M, M)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (P // 2, M // 2, M // 2)] + return cuboid_size, strategy, shift_size + + def spatial_lg_v1(self, input_shape, M=4): + T, H, W, _ = input_shape + if H <= M and W <= M: + cuboid_size = [(T, 1, 1), (1, H, W)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0)] + else: + cuboid_size = [(T, 1, 1), (1, M, M), (1, M, M)] + strategy = [("l", "l", "l"), ("l", "l", "l"), ("d", "d", "d")] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def axial_space_dilate_K(self, input_shape, K=2): + T, H, W, _ = input_shape + K = min(K, H, W) + cuboid_size = [ + (T, 1, 1), + (1, H // K, 1), + (1, H // K, 1), + (1, 1, W // K), + (1, 1, W // K), + ] + strategy = [ + ("l", "l", "l"), + ("d", "d", "d"), + ("l", "l", "l"), + ("d", "d", "d"), + ("l", "l", "l"), + ] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + +class CuboidCrossAttentionPatterns: + def __init__(self): + super().__init__() + self.patterns = {} + for k in [1, 2, 4, 8]: + key1 = f"cross_{k}x{k}" + key2 = f"cross_{k}x{k}_lg" + key3 = f"cross_{k}x{k}_heter" + self.patterns[key1] = functools.partial(self.cross_KxK, K=k) + self.patterns[key2] = functools.partial(self.cross_KxK_lg, K=k) + self.patterns[key3] = functools.partial(self.cross_KxK_heter, K=k) + + def get(self, pattern_name): + return self.patterns[pattern_name] + + def cross_KxK(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K)] + shift_hw = [(0, 0)] + strategy = [("l", "l", "l")] + n_temporal = [1] + return cuboid_hw, shift_hw, strategy, n_temporal + + def cross_KxK_lg(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K), (K, K)] + shift_hw = [(0, 0), (0, 0)] + strategy = [("l", "l", "l"), ("d", "d", "d")] + n_temporal = [1, 1] + return cuboid_hw, shift_hw, strategy, n_temporal + + def cross_KxK_heter(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K), (K, K), (K, K)] + shift_hw = [(0, 0), (0, 0), (K // 2, K // 2)] + strategy = [("l", "l", "l"), ("d", "d", "d"), ("l", "l", "l")] + n_temporal = [1, 1, 1] + return cuboid_hw, shift_hw, strategy, n_temporal + + +CuboidSelfAttentionPatterns = CuboidSelfAttentionPatterns() +CuboidCrossAttentionPatterns = CuboidCrossAttentionPatterns() diff --git a/ppsci/arch/cvit.py b/ppsci/arch/cvit.py index d39abd3118..a23f62449d 100644 --- a/ppsci/arch/cvit.py +++ b/ppsci/arch/cvit.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -1093,3 +1094,1100 @@ def forward(self, x_dict): y_dict = self._output_transform(x_dict, y_dict) return y_dict +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +try: + import einops +except ModuleNotFoundError: + pass +from typing import Callable +from typing import Optional +from typing import Sequence +from typing import Tuple + +import paddle +from paddle import nn +from paddle.nn import functional as F + +from ppsci.arch import base +from ppsci.utils import initializer + + +# Positional embedding from masked autoencoder https://arxiv.org/abs/2111.06377 +def get_1d_sincos_pos_embed_from_grid(embed_dim: int, pos: paddle.Tensor): + if embed_dim % 2 != 0: + raise ValueError(f"embedding dimension({embed_dim}) must be divisible by 2") + + omega = paddle.arange(embed_dim // 2, dtype=paddle.float32) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape([-1]) # (M,) + out = paddle.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = paddle.sin(out) # (M, D/2) + emb_cos = paddle.cos(out) # (M, D/2) + + emb = paddle.concat([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +def get_1d_sincos_pos_embed(embed_dim: int, length: int): + return paddle.unsqueeze( + get_1d_sincos_pos_embed_from_grid( + embed_dim, paddle.arange(length, dtype=paddle.float32) + ), + 0, + ) + + +def get_2d_sincos_pos_embed(embed_dim: int, grid_size: Tuple[int, int]): + def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + if embed_dim % 2 != 0: + raise ValueError(f"embedding dimension({embed_dim}) must be divisible by 2") + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + emb = paddle.concat([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + grid_h = paddle.arange(grid_size[0], dtype=paddle.float32) + grid_w = paddle.arange(grid_size[1], dtype=paddle.float32) + grid = paddle.meshgrid(grid_w, grid_h, indexing="ij") # here w goes first + grid = paddle.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size[0], grid_size[1]]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + + return paddle.unsqueeze(pos_embed, 0) + + +class MlpBlock(nn.Layer): + def __init__(self, in_dim: int, dim: int = 256, out_dim: int = 256): + super().__init__() + self.in_dim = in_dim + self.dim = dim + self.out_dim = out_dim + self.linear1 = nn.Linear(self.in_dim, self.dim) + self.act = nn.GELU(True) + self.linear2 = nn.Linear(self.dim, self.out_dim) + + self._init_weights() + + def forward(self, inputs): + x = self.linear1(inputs) + x = self.act(x) + x = self.linear2(x) + return x + + def _init_weights(self) -> None: + initializer.xavier_uniform_(self.linear1.weight) + initializer.constant_(self.linear1.bias, 0) + initializer.xavier_uniform_(self.linear2.weight) + initializer.constant_(self.linear2.bias, 0) + + +class SelfAttnBlock(nn.Layer): + def __init__( + self, num_heads: int, emb_dim: int, mlp_ratio: int, layer_norm_eps: float = 1e-5 + ): + super().__init__() + self.num_heads = num_heads + self.emb_dim = emb_dim + self.mlp_ratio = mlp_ratio + self.layer_norm1 = nn.LayerNorm(emb_dim, layer_norm_eps) + self.attn_layer = MultiHeadDotProductAttention( + self.emb_dim, + num_heads=self.num_heads, + qkv_features=self.emb_dim, + ) + self.layer_norm2 = nn.LayerNorm(emb_dim, layer_norm_eps) + self.mlp = MlpBlock(self.emb_dim, self.emb_dim * self.mlp_ratio, self.emb_dim) + + def forward(self, inputs): + # inputs: # [B, L/ps, self.emb_dim] + x = self.layer_norm1(inputs) + x = self.attn_layer(x, x) + x = x + inputs + y = self.layer_norm2(x) + y = self.mlp(y) + return x + y + + +class Mlp(nn.Layer): + def __init__( + self, + num_layers: int, + hidden_dim: int, + out_dim: int, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.out_dim = out_dim + self.layer_norm_eps = layer_norm_eps + self.linears = nn.LayerList( + [ + nn.Linear( + self.hidden_dim, + self.hidden_dim, + ) + for _ in range(self.num_layers) + ] + ) + self.gelu = nn.GELU(True) + self.norms = nn.LayerList( + [ + nn.LayerNorm(self.hidden_dim, self.layer_norm_eps) + for _ in range(self.num_layers) + ] + ) + + self.linear_out = nn.Linear(self.hidden_dim, self.out_dim) + + self._init_weights() + + def forward(self, inputs): + x = inputs + for i in range(self.num_layers): + y = self.linears[i](x) + y = self.gelu(y) + x = x + y + x = self.norms[i](x) + + x = self.linear_out(x) + return x + + def _init_weights(self) -> None: + for linear in self.linears: + initializer.xavier_uniform_(linear.weight) + initializer.constant_(linear.bias, 0) + + +class PatchEmbed1D(nn.Layer): + def __init__( + self, + in_dim: int, + patch_size: Sequence[int] = (4,), + emb_dim: int = 768, + use_norm: bool = False, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.patch_size = patch_size + self.emb_dim = emb_dim + self.use_norm = use_norm + self.layer_norm_eps = layer_norm_eps + self.conv = nn.Conv1D( + in_dim, + self.emb_dim, + self.patch_size[0], + self.patch_size[0], + data_format="NLC", + ) + self.norm = ( + nn.LayerNorm(self.emb_dim, self.layer_norm_eps) + if self.use_norm + else nn.Identity() + ) + self._init_weights() + + def forward(self, x): + x = self.conv(x) # [B, L, C] --> [B, L/ps, self.emb_dim] + if self.use_norm: + x = self.norm(x) + return x + + def _init_weights(self) -> None: + initializer.xavier_uniform_(self.conv.weight) + initializer.constant_(self.conv.bias, 0) + + +class PatchEmbed(nn.Layer): + def __init__( + self, + in_dim: int, + spatial_dims: Sequence[int], + patch_size: Tuple[int, ...] = (1, 16, 16), + emb_dim: int = 768, + use_norm: bool = False, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.patch_size = patch_size + self.emb_dim = emb_dim + self.use_norm = use_norm + self.layer_norm_eps = layer_norm_eps + self.conv = nn.Conv3D( + in_dim, + self.emb_dim, + (self.patch_size[0], self.patch_size[1], self.patch_size[2]), + (self.patch_size[0], self.patch_size[1], self.patch_size[2]), + data_format="NDHWC", + ) + self.norm = ( + nn.LayerNorm(self.emb_dim, self.layer_norm_eps) + if self.use_norm + else nn.Identity() + ) + t, h, w = spatial_dims + self.num_patches = [ + t // self.patch_size[0], + h // self.patch_size[1], + w // self.patch_size[2], + ] + self._init_weights() + + def forward(self, x): + b, t, h, w, c = x.shape + + x = self.conv(x) # [B, L, C] --> [B, L/ps, self.emb_dim] + x = x.reshape( + [ + b, + self.num_patches[0], + self.num_patches[1] * self.num_patches[2], + self.emb_dim, + ] + ) + if self.use_norm: + x = self.norm(x) + return x + + def _init_weights(self) -> None: + initializer.xavier_uniform_(self.conv.weight) + initializer.constant_(self.conv.bias, 0) + + +class CrossAttnBlock(nn.Layer): + def __init__( + self, + num_heads: int, + emb_dim: int, + mlp_ratio: int, + layer_norm_eps: float = 1e-5, + out_features: int = None, + qkv_features: int = None, + ): + super().__init__() + self.num_heads = num_heads + self.emb_dim = emb_dim + self.mlp_ratio = mlp_ratio + self.layer_norm_eps = layer_norm_eps + self.head_dim = self.emb_dim // self.num_heads + + self.layer_norm_q = nn.LayerNorm(self.emb_dim, epsilon=self.layer_norm_eps) + self.layer_norm_kv = nn.LayerNorm(self.emb_dim, epsilon=self.layer_norm_eps) + + self.attn_layer = MultiHeadDotProductAttention( + self.emb_dim, + num_heads=num_heads, + qkv_features=qkv_features, + out_features=out_features, + ) + self.layer_norm_y = nn.LayerNorm(self.emb_dim, epsilon=self.layer_norm_eps) + self.mlp = MlpBlock(self.emb_dim, self.emb_dim * self.mlp_ratio, self.emb_dim) + + def forward(self, q_inputs, kv_inputs): + # [B, L/ps, self.dec_emb_dim] + q = self.layer_norm_q(q_inputs) + kv = self.layer_norm_kv(kv_inputs) + x = self.attn_layer(q, kv) + x = x + q_inputs + y = self.layer_norm_y(x) + y = self.mlp(y) + return x + y + + +class Encoder1D(nn.Layer): + def __init__( + self, + in_dim: int, + spatial_dims: int, + patch_size: int = (4,), + emb_dim: int = 256, + depth: int = 3, + num_heads: int = 8, + mlp_ratio: int = 1, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.in_dim = in_dim + self.spatial_dims = spatial_dims + self.patch_size = patch_size + self.emb_dim = emb_dim + self.depth = depth + self.num_heads = num_heads + self.mlp_ratio = mlp_ratio + self.layer_norm_eps = layer_norm_eps + self.patch_embedding = PatchEmbed1D(in_dim, self.patch_size, self.emb_dim) + + self.self_attn_blocks = nn.LayerList( + [ + SelfAttnBlock( + self.num_heads, + self.emb_dim, + self.mlp_ratio, + self.layer_norm_eps, + ) + for _ in range(self.depth) + ] + ) + pos_emb = get_1d_sincos_pos_embed( + self.emb_dim, self.spatial_dims // self.patch_size[0] + ) + self.pos_emb = self.create_parameter( + pos_emb.shape, default_initializer=nn.initializer.Assign(pos_emb) + ) + + def forward(self, x): + x = self.patch_embedding(x) + x = x + self.pos_emb + + for _, block in enumerate(self.self_attn_blocks): + x = block(x) + + return x + + +class TimeAggregation(nn.Layer): + def __init__( + self, + emb_dim: int, + depth: int, + num_heads: int = 8, + num_latents: int = 64, + mlp_ratio: int = 1, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.emb_dim = emb_dim + self.depth = depth + self.num_heads = num_heads + self.num_latents = num_latents + self.mlp_ratio = mlp_ratio + self.layer_norm_eps = layer_norm_eps + self.latents = self.create_parameter( + [self.num_latents, self.emb_dim], + default_initializer=nn.initializer.Normal(std=1e-2), + ) + self.cross_attn_blocks = nn.LayerList( + [ + CrossAttnBlock( + self.num_heads, self.emb_dim, self.mlp_ratio, self.layer_norm_eps + ) + for _ in range(self.depth) + ] + ) + + def forward(self, x): # (B, T, S, D) --> (B, T', S, D) + latents = einops.repeat( + self.latents, "t d -> b s t d", b=x.shape[0], s=x.shape[2] + ) # (B, T', S, D) + x = einops.rearrange(x, "b t s d -> b s t d") # (B, S, T, D) + + # Transformer + for i, block in enumerate(self.cross_attn_blocks): + latents = block(latents, x) + + latents = einops.rearrange(latents, "b s t d -> b t s d") # (B, T', S, D) + return latents + + +class Encoder(nn.Layer): + def __init__( + self, + in_dim: int, + spatial_dims: Sequence[int], + patch_size: int = (1, 16, 16), + emb_dim: int = 256, + depth: int = 3, + num_heads: int = 8, + mlp_ratio: int = 1, + layer_norm_eps: float = 1e-5, + ): + super().__init__() + self.in_dim = in_dim + self.spatial_dims = spatial_dims + self.patch_size = patch_size + self.emb_dim = emb_dim + self.depth = depth + self.num_heads = num_heads + self.mlp_ratio = mlp_ratio + self.layer_norm_eps = layer_norm_eps + self.patch_embedding = PatchEmbed( + in_dim, spatial_dims, self.patch_size, self.emb_dim + ) + + self.time_aggreator = TimeAggregation( + self.emb_dim, + 2, + self.num_heads, + 1, + self.mlp_ratio, + self.layer_norm_eps, + ) + self.norm = nn.LayerNorm(self.emb_dim, epsilon=self.layer_norm_eps) + + self.self_attn_blocks = nn.LayerList( + [ + SelfAttnBlock( + self.num_heads, + self.emb_dim, + self.mlp_ratio, + self.layer_norm_eps, + ) + for _ in range(self.depth) + ] + ) + t, h, w = spatial_dims + + time_emb = get_1d_sincos_pos_embed(self.emb_dim, t // self.patch_size[0]) + self.time_emb = self.create_parameter( + time_emb.shape, default_initializer=nn.initializer.Assign(time_emb) + ) + + pos_emb = get_2d_sincos_pos_embed( + self.emb_dim, (h // self.patch_size[1], w // self.patch_size[2]) + ) + self.pos_emb = self.create_parameter( + pos_emb.shape, default_initializer=nn.initializer.Assign(pos_emb) + ) + + def forward(self, x): + # patchify + x = self.patch_embedding(x) + + # add positional embedding + x = x + self.time_emb.unsqueeze(2) + self.pos_emb.unsqueeze(1) + + # aggregate along time dimension + x = self.time_aggreator(x) + x = self.norm(x) + x = einops.rearrange(x, "b t s d -> b (t s) d") + + for _, block in enumerate(self.self_attn_blocks): + x = block(x) + + return x + + +def dot_product_attention_weights( + query: paddle.Tensor, + key: paddle.Tensor, + bias: Optional[paddle.Tensor] = None, +): + """Computes dot-product attention weights given query and key. + + Used by :func:`dot_product_attention`, which is what you'll most likely use. + But if you want access to the attention weights for introspection, then + you can directly call this function and call einsum yourself. + + Args: + query: queries for calculating attention with shape of [batch..., q_length, + num_heads, qk_depth_per_head]. + key: keys for calculating attention with shape of [batch..., kv_length, + num_heads, qk_depth_per_head]. + bias: bias for the attention weights. This should be broadcastable to the + shape [batch..., num_heads, q_length, kv_length]. This can be used for + incorporating causal masks, padding masks, proximity bias, etc. + + Returns: + Output of shape [batch..., num_heads, q_length, kv_length]. + """ + dtype = query.dtype + + if paddle.in_dynamic_mode(): + assert query.ndim == key.ndim, "q, k must have same rank." + assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match." + assert query.shape[-2] == key.shape[-2], "q, k num_heads must match." + assert query.shape[-1] == key.shape[-1], "q, k depths must match." + + # calculate attention matrix + depth = query.shape[-1] + query = query / (depth**0.5) + # attn weight shape is (batch..., num_heads, q_length, kv_length) + attn_weights = paddle.einsum("...qhd,...khd->...hqk", query, key) + + # apply attention bias: masking, dropout, proximity bias, etc. + if bias is not None: + attn_weights = attn_weights + bias + + # normalize the attention weights + attn_weights = F.softmax(attn_weights).astype(dtype) + + # apply attention dropout + return attn_weights + + +def dot_product_attention( + query: paddle.Tensor, + key: paddle.Tensor, + value: paddle.Tensor, + bias: Optional[paddle.Tensor] = None, +) -> paddle.Tensor: + """Computes dot-product attention given query, key, and value. + + This is the core function for applying attention based on + https://arxiv.org/abs/1706.03762. It calculates the attention weights given + query and key and combines the values using the attention weights. + + Note: query, key, value needn't have any batch dimensions. + + Args: + query: queries for calculating attention with shape of [batch..., q_length, + num_heads, qk_depth_per_head]. + key: keys for calculating attention with shape of [batch..., kv_length, + num_heads, qk_depth_per_head]. + value: values to be used in attention with shape of [batch..., kv_length, + num_heads, v_depth_per_head]. + bias: bias for the attention weights. This should be broadcastable to the + shape [batch..., num_heads, q_length, kv_length]. This can be used for + incorporating causal masks, padding masks, proximity bias, etc. + + Returns: + paddle.Tensor: Output of shape [batch..., q_length, num_heads, v_depth_per_head]. + """ + if paddle.in_dynamic_mode(): + assert key.ndim == query.ndim == value.ndim, "q, k, v must have same rank." + assert ( + query.shape[:-3] == key.shape[:-3] == value.shape[:-3] + ), "q, k, v batch dims must match." + assert ( + query.shape[-2] == key.shape[-2] == value.shape[-2] + ), "q, k, v num_heads must match." + assert key.shape[-3] == value.shape[-3], "k, v lengths must match." + + # compute attention weights + attn_weights = dot_product_attention_weights( + query, + key, + bias, + ) + + # return weighted sum over values for each query position + return paddle.einsum("...hqk,...khd->...qhd", attn_weights, value) + + +class MultiHeadDotProductAttention(nn.Layer): + """Multi-head dot-product attention. + + Args: + in_dim: Number of input dimensions. + num_heads: Number of attention heads. Features (i.e. inputs_q.shape[-1]) + should be divisible by the number of heads. + qkv_features: dimension of the key, query, and value. + out_features: dimension of the last projection + use_bias: bool: whether pointwise QKVO dense transforms use bias. + attention_fn: dot_product_attention or compatible function. Accepts query, + key, value, and returns output of shape [bs, dim1, dim2, ..., dimN,, + num_heads, value_channels]` + normalize_qk: should QK normalization be applied (arxiv.org/abs/2302.05442). + """ + + def __init__( + self, + in_dim, + num_heads: int, + qkv_features: Optional[int] = None, + out_features: Optional[int] = None, + use_bias: bool = True, + attention_fn: Callable[..., paddle.Tensor] = dot_product_attention, + normalize_qk: bool = False, + ): + super().__init__() + self.num_heads = num_heads + self.qkv_features = qkv_features or in_dim + self.out_features = out_features or in_dim + self.use_bias = use_bias + self.attention_fn = attention_fn + self.normalize_qk = normalize_qk + assert self.qkv_features % self.num_heads == 0, ( + f"Memory dimension ({self.qkv_features}) must be divisible by number of" + f" heads ({self.num_heads})." + ) + self.head_dim = self.qkv_features // self.num_heads + + self.linear_q = nn.Linear( + in_dim, + self.qkv_features, + bias_attr=use_bias, + ) + self.linear_k = nn.Linear( + in_dim, + self.qkv_features, + bias_attr=use_bias, + ) + self.linear_v = nn.Linear( + in_dim, + self.qkv_features, + bias_attr=use_bias, + ) + self.query_ln = ( + nn.LayerNorm(self.qkv_features) if normalize_qk else nn.Identity() + ) + self.key_ln = nn.LayerNorm(self.qkv_features) if normalize_qk else nn.Identity() + self.linear_out = nn.Linear( + self.qkv_features, + self.out_features, + bias_attr=use_bias, + ) + + def forward( + self, + inputs_q: paddle.Tensor, + inputs_kv: Optional[paddle.Tensor] = None, + ): + # project inputs_q to multi-headed q/k/v + # dimensions are then [batch..., length, n_heads, n_features_per_head] + q_attn_shape = inputs_q.shape + q_attn_shape = q_attn_shape[:-1] + [self.num_heads, self.head_dim] + + kv_attn_shape = inputs_kv.shape + kv_attn_shape = kv_attn_shape[:-1] + [self.num_heads, self.head_dim] + query, key, value = ( + self.linear_q(inputs_q).reshape(q_attn_shape), + self.linear_k(inputs_kv).reshape(kv_attn_shape), + self.linear_v(inputs_kv).reshape(kv_attn_shape), + ) + + if self.normalize_qk: + # Normalizing query and key projections stabilizes training with higher + # LR. See ViT-22B paper http://arxiv.org/abs/2302.05442 for analysis. + query = self.query_ln(query) + key = self.key_ln(key) + + # apply attention + x = self.attention_fn( + query, + key, + value, + ) + # back to the original inputs dimensions + x = x.reshape(x.shape[:-2] + [x.shape[-2] * x.shape[-1]]) + out = self.linear_out(x) + return out + + +class CVit1D(base.Arch): + """ + 1D Convolutional Vision Transformer (CVit1D) class. + + [Bridging Operator Learning and Conditioned Neural Fields: A Unifying Perspective](https://arxiv.org/abs/2405.13998) + + Args: + input_keys (Sequence[str]): Keys identifying the input tensors. + output_keys (Sequence[str]): Keys identifying the output tensors. + spatial_dims (int): The spatial dimensions of the input data. + in_dim (int): The dimensionality of the input data. + coords_dim (int): The dimensionality of the positional encoding. + patch_size (Sequence[int], optional): Size of the patches. Defaults to (4,). + grid_size (Sequence[int], optional): Size of the grid. Defaults to (200,). + latent_dim (int, optional): Dimensionality of the latent space. Defaults to 256. + emb_dim (int, optional): Dimensionality of the embedding space. Defaults to 256. + depth (int, optional): Number of transformer encoder layers. Defaults to 3. + num_heads (int, optional): Number of attention heads. Defaults to 8. + dec_emb_dim (int, optional): Dimensionality of the decoder embedding space. Defaults to 256. + dec_num_heads (int, optional): Number of decoder attention heads. Defaults to 8. + dec_depth (int, optional): Number of decoder transformer layers. Defaults to 1. + num_mlp_layers (int, optional): Number of layers in the MLP. Defaults to 1. + mlp_ratio (int, optional): Ratio for determining the size of the MLP's hidden layer. Defaults to 1. + out_dim (int, optional): Dimensionality of the output data. Defaults to 1. + layer_norm_eps (float, optional): Epsilon for layer normalization. Defaults to 1e-5. + embedding_type (str, optional): Type of embedding to use ("grid" or other options). Defaults to "grid". + + Examples: + >>> import ppsci + >>> b, l, c = 2, 32, 1 + >>> l_query = 42 + >>> c_in = 1 + >>> c_out = 1 + >>> model = ppsci.arch.CVit1D( + ... input_keys=["u", "y"], + ... output_keys=["s"], + ... in_dim=c_in, + ... coords_dim=1, + ... spatial_dims=l, + ... patch_size=[4], + ... grid_size=[l], + ... latent_dim=32, + ... emb_dim=32, + ... depth=3, + ... num_heads=8, + ... dec_emb_dim=32, + ... dec_num_heads=8, + ... dec_depth=1, + ... num_mlp_layers=1, + ... mlp_ratio=1, + ... out_dim=c_out, + ... layer_norm_eps=1e-5, + ... embedding_type="grid", + ... ) + >>> x = paddle.randn([b, l, c_in]) + >>> coords = paddle.randn([l_query, 1]) + >>> out = model({"u": x, "y": coords})["s"] + >>> print(out.shape) # output shape should be [b, l_query, c_out] + [2, 42, 1] + """ + + def __init__( + self, + input_keys: Sequence[str], + output_keys: Sequence[str], + spatial_dims: int, + in_dim: int, + coords_dim: int, + patch_size: Sequence[int] = (4,), + grid_size: Sequence[int] = (200,), + latent_dim: int = 256, + emb_dim: int = 256, + depth: int = 3, + num_heads: int = 8, + dec_emb_dim: int = 256, + dec_num_heads: int = 8, + dec_depth: int = 1, + num_mlp_layers: int = 1, + mlp_ratio: int = 1, + out_dim: int = 1, + layer_norm_eps: float = 1e-5, + embedding_type: str = "grid", + ): + if not importlib.util.find_spec("einops"): + raise ModuleNotFoundError( + "Please install `einops` by running 'pip install einops'." + ) + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.spatial_dims = spatial_dims + self.in_dim = in_dim + self.coords_dim = coords_dim + self.patch_size = patch_size + self.grid_size = grid_size + self.latent_dim = latent_dim + self.emb_dim = emb_dim + self.depth = depth + self.num_heads = num_heads + self.dec_emb_dim = dec_emb_dim + self.dec_num_heads = dec_num_heads + self.dec_depth = dec_depth + self.num_mlp_layers = num_mlp_layers + self.mlp_ratio = mlp_ratio + self.out_dim = out_dim + self.layer_norm_eps = layer_norm_eps + self.embedding_type = embedding_type + + if self.embedding_type == "grid": + # Create grid and latents + n_x = self.grid_size[0] + self.grid = paddle.linspace(0, 1, n_x) + self.latents = self.create_parameter( + [n_x, self.latent_dim], + default_initializer=nn.initializer.Normal(std=1e-2), + ) + self.fc = nn.Linear(self.latent_dim, self.dec_emb_dim) + self.norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + elif self.embedding_type == "mlp": + self.mlp = MlpBlock(self.latent_dim, self.dec_emb_dim, self.dec_emb_dim) + self.norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + + self.encoder = Encoder1D( + self.in_dim, + self.spatial_dims, + self.patch_size, + self.emb_dim, + self.depth, + self.num_heads, + self.mlp_ratio, + self.layer_norm_eps, + ) + self.enc_norm = nn.LayerNorm(self.emb_dim, self.layer_norm_eps) + self.fc1 = nn.Linear(self.emb_dim, self.dec_emb_dim) + self.cross_attn_blocks = nn.LayerList( + [ + CrossAttnBlock( + self.dec_num_heads, + self.dec_emb_dim, + self.mlp_ratio, + self.layer_norm_eps, + self.dec_emb_dim, + self.dec_emb_dim, + ) + for _ in range(self.dec_depth) + ] + ) + self.block_norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + self.final_mlp = Mlp( + self.num_mlp_layers, + self.dec_emb_dim, + self.out_dim, + layer_norm_eps=self.layer_norm_eps, + ) + + def forward_tensor(self, x, coords): + b, h, c = x.shape + + # process query coordinates + if self.embedding_type == "grid": + d2 = (coords - self.grid.unsqueeze(0)) ** 2 + w = paddle.exp(-1e5 * d2) / paddle.exp(-1e5 * d2).sum(axis=1, keepdim=True) + coords = paddle.einsum("ic,pi->pc", self.latents, w) + coords = self.fc(coords) + coords = self.norm(coords) + elif self.embedding_type == "mlp": + coords = self.mlp(coords) + coords = self.norm(coords) + + coords = einops.repeat(coords, "n d -> b n d", b=b) + + # process input function(encoder) + x = self.encoder(x) + x = self.enc_norm(x) + x = self.fc1(x) + + # decoder + for i, block in enumerate(self.cross_attn_blocks): + x = block(coords, x) + + # mlp + x = self.block_norm(x) + x = self.final_mlp(x) + + return x + + def forward(self, x_dict): + if self._input_transform is not None: + x = self._input_transform(x_dict) + + x, coords = x_dict[self.input_keys[0]], x_dict[self.input_keys[1]] + if coords.ndim >= 3: + coords = coords[0] # [b, n, c] -> [n, c] + + y = self.forward_tensor(x, coords) + + y_dict = {self.output_keys[0]: y} + if self._output_transform is not None: + y_dict = self._output_transform(x_dict, y_dict) + + return y_dict + + +class CVit(base.Arch): + """ + CVit architecture. + + [Bridging Operator Learning and Conditioned Neural Fields: A Unifying Perspective](https://arxiv.org/abs/2405.13998) + + Args: + input_keys (Sequence[str]): Input keys. + output_keys (Sequence[str]): Output keys. + in_dim (int): Dimensionality of the input data. + coords_dim (int): Dimensionality of the coordinates. + spatial_dims (Sequence[int]): Spatial dimensions. + patch_size (Sequence[int], optional): Size of the patches. Defaults to (1, 16, 16). + grid_size (Sequence[int], optional): Size of the grid. Defaults to (128, 128). + latent_dim (int, optional): Dimensionality of the latent space. Defaults to 256. + emb_dim (int, optional): Dimensionality of the embedding space. Defaults to 256. + depth (int, optional): Number of transformer encoder layers. Defaults to 3. + num_heads (int, optional): Number of attention heads. Defaults to 8. + dec_emb_dim (int, optional): Dimensionality of the decoder embedding space. Defaults to 256. + dec_num_heads (int, optional): Number of decoder attention heads. Defaults to 8. + dec_depth (int, optional): Number of decoder transformer layers. Defaults to 1. + num_mlp_layers (int, optional): Number of MLP layers. Defaults to 1. + mlp_ratio (int, optional): Ratio of hidden units. Defaults to 1. + out_dim (int, optional): Dimensionality of the output. Defaults to 1. + layer_norm_eps (float, optional): Epsilon value for layer normalization. Defaults to 1e-5. + embedding_type (str, optional): Type of embedding. Defaults to "grid". + + Examples: + >>> import ppsci + >>> b, t, h, w, c_in = 2, 4, 8, 8, 3 + >>> c_out = 3 + >>> h_query, w_query = 32, 32 + >>> model = ppsci.arch.CVit( + ... input_keys=["u", "y"], + ... output_keys=["s"], + ... in_dim=c_in, + ... coords_dim=2, + ... spatial_dims=[t, h, w], + ... patch_size=(1, 4, 4), + ... grid_size=(h, w), + ... latent_dim=32, + ... emb_dim=32, + ... depth=3, + ... num_heads=8, + ... dec_emb_dim=32, + ... dec_num_heads=8, + ... dec_depth=1, + ... num_mlp_layers=1, + ... mlp_ratio=1, + ... out_dim=c_out, + ... layer_norm_eps=1e-5, + ... embedding_type="grid", + ... ) + >>> x = paddle.randn([b, t, h, w, c_in]) + >>> coords = paddle.randn([h_query * w_query, 2]) + >>> out = model({"u": x, "y": coords})["s"] + >>> print(out.shape) # output shape should be [b, h_query * w_query, c_out] + [2, 1024, 3] + """ + + def __init__( + self, + input_keys: Sequence[str], + output_keys: Sequence[str], + in_dim: int, + coords_dim: int, + spatial_dims: Sequence[int], + patch_size: Sequence[int] = (1, 16, 16), + grid_size: Sequence[int] = (128, 128), + latent_dim: int = 256, + emb_dim: int = 256, + depth: int = 3, + num_heads: int = 8, + dec_emb_dim: int = 256, + dec_num_heads: int = 8, + dec_depth: int = 1, + num_mlp_layers: int = 1, + mlp_ratio: int = 1, + out_dim: int = 1, + layer_norm_eps: float = 1e-5, + embedding_type: str = "grid", + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.spatial_dims = spatial_dims + self.in_dim = in_dim + self.coords_dim = coords_dim + self.patch_size = patch_size + self.grid_size = grid_size + self.latent_dim = latent_dim + self.emb_dim = emb_dim + self.depth = depth + self.num_heads = num_heads + self.dec_emb_dim = dec_emb_dim + self.dec_num_heads = dec_num_heads + self.dec_depth = dec_depth + self.num_mlp_layers = num_mlp_layers + self.mlp_ratio = mlp_ratio + self.out_dim = out_dim + self.layer_norm_eps = layer_norm_eps + self.embedding_type = embedding_type + + if self.embedding_type == "grid": + # Create grid and latents + n_x, n_y = self.grid_size[0], self.grid_size[1] + + x = paddle.linspace(0, 1, n_x) + y = paddle.linspace(0, 1, n_y) + xx, yy = paddle.meshgrid(x, y, indexing="ij") + + self.grid = paddle.hstack([xx.flatten()[:, None], yy.flatten()[:, None]]) + self.latents = self.create_parameter( + [n_x * n_y, self.latent_dim], + default_initializer=nn.initializer.Normal(std=1e-2), + ) + self.fc = nn.Linear(self.latent_dim, self.dec_emb_dim) + self.norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + elif self.embedding_type == "mlp": + self.mlp = MlpBlock(self.latent_dim, self.dec_emb_dim, self.dec_emb_dim) + self.norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + + self.encoder = Encoder( + self.in_dim, + self.spatial_dims, + self.patch_size, + self.emb_dim, + self.depth, + self.num_heads, + self.mlp_ratio, + self.layer_norm_eps, + ) + self.enc_norm = nn.LayerNorm(self.emb_dim, self.layer_norm_eps) + self.fc1 = nn.Linear(self.emb_dim, self.dec_emb_dim) + self.cross_attn_blocks = nn.LayerList( + [ + CrossAttnBlock( + self.dec_num_heads, + self.dec_emb_dim, + self.mlp_ratio, + self.layer_norm_eps, + self.dec_emb_dim, + self.dec_emb_dim, + ) + for _ in range(self.dec_depth) + ] + ) + self.block_norm = nn.LayerNorm(self.dec_emb_dim, self.layer_norm_eps) + self.final_mlp = Mlp( + self.num_mlp_layers, + self.dec_emb_dim, + self.out_dim, + layer_norm_eps=self.layer_norm_eps, + ) + + def forward_tensor(self, x, coords): + b, t, h, w, c = x.shape + + # process query coordinates + if self.embedding_type == "grid": + d2 = ((coords.unsqueeze(1) - self.grid.unsqueeze(0)) ** 2).sum(axis=2) + w = paddle.exp(-1e5 * d2) / paddle.exp(-1e5 * d2).sum(axis=1, keepdim=True) + coords = paddle.einsum("ic,pi->pc", self.latents, w) + coords = self.fc(coords) + coords = self.norm(coords) + elif self.embedding_type == "mlp": + coords = self.mlp(coords) + coords = self.norm(coords) + + coords = einops.repeat(coords, "n d -> b n d", b=b) + + # process input function(encoder) + x = self.encoder(x) + x = self.enc_norm(x) + x = self.fc1(x) + + # decoder + for i, block in enumerate(self.cross_attn_blocks): + x = block(coords, x) + + # mlp + x = self.block_norm(x) + x = self.final_mlp(x) + + return x + + def forward(self, x_dict): + if self._input_transform is not None: + x = self._input_transform(x_dict) + + x, coords = x_dict[self.input_keys[0]], x_dict[self.input_keys[1]] + if coords.ndim >= 3: + coords = coords[0] # [b, n, c] -> [n, c] + + y = self.forward_tensor(x, coords) + + y_dict = {self.output_keys[0]: y} + if self._output_transform is not None: + y_dict = self._output_transform(x_dict, y_dict) + + return y_dict +>>>>>>> Stashed changes diff --git a/ppsci/arch/deeponet.py b/ppsci/arch/deeponet.py index 16a8807d81..d7cdea8db4 100644 --- a/ppsci/arch/deeponet.py +++ b/ppsci/arch/deeponet.py @@ -1,154 +1,154 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Tuple -from typing import Union - -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.arch import mlp - - -class DeepONet(base.Arch): - """Deep operator network. - - [Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021.](https://doi.org/10.1038/s42256-021-00302-5) - - Args: - u_key (str): Name of function data for input function u(x). - y_key (str): Name of location data for input function G(u). - G_key (str): Output name of predicted G(u)(y). - num_loc (int): Number of sampled u(x), i.e. `m` in paper. - num_features (int): Number of features extracted from u(x), same for y. - branch_num_layers (int): Number of hidden layers of branch net. - trunk_num_layers (int): Number of hidden layers of trunk net. - branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of branch net. - An integer for all layers, or list of integer specify each layer's size. - trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. - An integer for all layers, or list of integer specify each layer's size. - branch_skip_connection (bool, optional): Whether to use skip connection for branch net. Defaults to False. - trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. - branch_activation (str, optional): Name of activation function. Defaults to "tanh". - trunk_activation (str, optional): Name of activation function. Defaults to "tanh". - branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for branch net. Defaults to False. - trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.DeepONet( - ... "u", "y", "G", - ... 100, 40, - ... 1, 1, - ... 40, 40, - ... branch_activation="relu", trunk_activation="relu", - ... use_bias=True, - ... ) - >>> input_dict = {"u": paddle.rand([200, 100]), - ... "y": paddle.rand([200, 1])} - >>> output_dict = model(input_dict) - >>> print(output_dict["G"].shape) - [200, 1] - """ - - def __init__( - self, - u_key: str, - y_key: str, - G_key: str, - num_loc: int, - num_features: int, - branch_num_layers: int, - trunk_num_layers: int, - branch_hidden_size: Union[int, Tuple[int, ...]], - trunk_hidden_size: Union[int, Tuple[int, ...]], - branch_skip_connection: bool = False, - trunk_skip_connection: bool = False, - branch_activation: str = "tanh", - trunk_activation: str = "tanh", - branch_weight_norm: bool = False, - trunk_weight_norm: bool = False, - use_bias: bool = True, - ): - super().__init__() - self.u_key = u_key - self.y_key = y_key - self.input_keys = (u_key, y_key) - self.output_keys = (G_key,) - - self.branch_net = mlp.MLP( - (self.u_key,), - ("b",), - branch_num_layers, - branch_hidden_size, - branch_activation, - branch_skip_connection, - branch_weight_norm, - input_dim=num_loc, - output_dim=num_features, - ) - - self.trunk_net = mlp.MLP( - (self.y_key,), - ("t",), - trunk_num_layers, - trunk_hidden_size, - trunk_activation, - trunk_skip_connection, - trunk_weight_norm, - input_dim=1, - output_dim=num_features, - ) - self.trunk_act = act_mod.get_activation(trunk_activation) - - self.use_bias = use_bias - if use_bias: - # register bias to parameter for updating in optimizer and storage - self.b = self.create_parameter( - shape=(1,), - attr=nn.initializer.Constant(0.0), - ) - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - # Branch net to encode the input function - u_features = self.branch_net(x)[self.branch_net.output_keys[0]] - - # Trunk net to encode the domain of the output function - y_features = self.trunk_net(x) - y_features = self.trunk_act(y_features[self.trunk_net.output_keys[0]]) - - # Dot product - G_u = paddle.einsum("bi,bi->b", u_features, y_features) # [batch_size, ] - G_u = paddle.reshape(G_u, [-1, 1]) # reshape [batch_size, ] to [batch_size, 1] - - # Add bias - if self.use_bias: - G_u += self.b - - result_dict = { - self.output_keys[0]: G_u, - } - if self._output_transform is not None: - result_dict = self._output_transform(x, result_dict) - - return result_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.arch import mlp + + +class DeepONet(base.Arch): + """Deep operator network. + + [Lu et al. Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators. Nat Mach Intell, 2021.](https://doi.org/10.1038/s42256-021-00302-5) + + Args: + u_key (str): Name of function data for input function u(x). + y_key (str): Name of location data for input function G(u). + G_key (str): Output name of predicted G(u)(y). + num_loc (int): Number of sampled u(x), i.e. `m` in paper. + num_features (int): Number of features extracted from u(x), same for y. + branch_num_layers (int): Number of hidden layers of branch net. + trunk_num_layers (int): Number of hidden layers of trunk net. + branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of branch net. + An integer for all layers, or list of integer specify each layer's size. + trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. + An integer for all layers, or list of integer specify each layer's size. + branch_skip_connection (bool, optional): Whether to use skip connection for branch net. Defaults to False. + trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. + branch_activation (str, optional): Name of activation function. Defaults to "tanh". + trunk_activation (str, optional): Name of activation function. Defaults to "tanh". + branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for branch net. Defaults to False. + trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.DeepONet( + ... "u", "y", "G", + ... 100, 40, + ... 1, 1, + ... 40, 40, + ... branch_activation="relu", trunk_activation="relu", + ... use_bias=True, + ... ) + >>> input_dict = {"u": paddle.rand([200, 100]), + ... "y": paddle.rand([200, 1])} + >>> output_dict = model(input_dict) + >>> print(output_dict["G"].shape) + [200, 1] + """ + + def __init__( + self, + u_key: str, + y_key: str, + G_key: str, + num_loc: int, + num_features: int, + branch_num_layers: int, + trunk_num_layers: int, + branch_hidden_size: Union[int, Tuple[int, ...]], + trunk_hidden_size: Union[int, Tuple[int, ...]], + branch_skip_connection: bool = False, + trunk_skip_connection: bool = False, + branch_activation: str = "tanh", + trunk_activation: str = "tanh", + branch_weight_norm: bool = False, + trunk_weight_norm: bool = False, + use_bias: bool = True, + ): + super().__init__() + self.u_key = u_key + self.y_key = y_key + self.input_keys = (u_key, y_key) + self.output_keys = (G_key,) + + self.branch_net = mlp.MLP( + (self.u_key,), + ("b",), + branch_num_layers, + branch_hidden_size, + branch_activation, + branch_skip_connection, + branch_weight_norm, + input_dim=num_loc, + output_dim=num_features, + ) + + self.trunk_net = mlp.MLP( + (self.y_key,), + ("t",), + trunk_num_layers, + trunk_hidden_size, + trunk_activation, + trunk_skip_connection, + trunk_weight_norm, + input_dim=1, + output_dim=num_features, + ) + self.trunk_act = act_mod.get_activation(trunk_activation) + + self.use_bias = use_bias + if use_bias: + # register bias to parameter for updating in optimizer and storage + self.b = self.create_parameter( + shape=(1,), + attr=nn.initializer.Constant(0.0), + ) + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + # Branch net to encode the input function + u_features = self.branch_net(x)[self.branch_net.output_keys[0]] + + # Trunk net to encode the domain of the output function + y_features = self.trunk_net(x) + y_features = self.trunk_act(y_features[self.trunk_net.output_keys[0]]) + + # Dot product + G_u = paddle.einsum("bi,bi->b", u_features, y_features) # [batch_size, ] + G_u = paddle.reshape(G_u, [-1, 1]) # reshape [batch_size, ] to [batch_size, 1] + + # Add bias + if self.use_bias: + G_u += self.b + + result_dict = { + self.output_keys[0]: G_u, + } + if self._output_transform is not None: + result_dict = self._output_transform(x, result_dict) + + return result_dict diff --git a/ppsci/arch/dgmr.py b/ppsci/arch/dgmr.py index dd189bb2de..bc8fbf0749 100644 --- a/ppsci/arch/dgmr.py +++ b/ppsci/arch/dgmr.py @@ -1,1151 +1,1151 @@ -from typing import List -from typing import Tuple - -import paddle -import paddle.nn as nn - -from ppsci.arch import base - -try: - import einops -except ModuleNotFoundError: - pass - - -class DGMR(base.Arch): - """Deep Generative Model of Radar. - Nowcasting GAN is an attempt to recreate DeepMind's Skillful Nowcasting GAN from https://arxiv.org/abs/2104.00954. - but slightly modified for multiple satellite channels - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - forecast_steps (int, optional): Number of steps to predict in the future - input_channels (int, optional): Number of input channels per image - gen_lr (float, optional): Learning rate for the generator - disc_lr (float, optional): Learning rate for the discriminators, shared for both temporal and spatial discriminator - conv_type (str, optional): Type of 2d convolution to use, see satflow/models/utils.py for options - beta1 (float, optional): Beta1 for Adam optimizer - beta2 (float, optional): Beta2 for Adam optimizer - num_samples (int, optional): Number of samples of the latent space to sample for training/validation - grid_lambda (float, optional): Lambda for the grid regularization loss - output_shape (int, optional): Shape of the output predictions, generally should be same as the input shape - generation_steps (int, optional): Number of generation steps to use in forward pass, in paper is 6 and the best is chosen for the loss - this results in huge amounts of GPU memory though, so less might work better for training. - context_channels (int, optional): Number of output channels for the lowest block of conditioning stack - latent_channels (int, optional): Number of channels that the latent space should be reshaped to, - input dimension into ConvGRU, also affects the number of channels for other linked inputs/outputs - - Examples: - >>> import ppsci - >>> import paddle - >>> model = ppsci.arch.DGMR(("input", ), ("output", )) - >>> input_dict = {"input": paddle.randn((1, 4, 1, 256, 256))} - >>> output_dict = model(input_dict) # doctest: +SKIP - >>> print(output_dict["output"].shape) # doctest: +SKIP - [1, 18, 1, 256, 256] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - forecast_steps: int = 18, - input_channels: int = 1, - output_shape: int = 256, - gen_lr: float = 5e-05, - disc_lr: float = 0.0002, - conv_type: str = "standard", - num_samples: int = 6, - grid_lambda: float = 20.0, - beta1: float = 0.0, - beta2: float = 0.999, - latent_channels: int = 768, - context_channels: int = 384, - generation_steps: int = 6, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.gen_lr = gen_lr - self.disc_lr = disc_lr - self.beta1 = beta1 - self.beta2 = beta2 - self.grid_lambda = grid_lambda - self.num_samples = num_samples - self.latent_channels = latent_channels - self.context_channels = context_channels - self.input_channels = input_channels - self.generation_steps = generation_steps - self.conditioning_stack = ContextConditioningStack( - input_channels=input_channels, - conv_type=conv_type, - output_channels=self.context_channels, - ) - self.latent_stack = LatentConditioningStack( - shape=(8 * self.input_channels, output_shape // 32, output_shape // 32), - output_channels=self.latent_channels, - ) - self.sampler = Sampler( - forecast_steps=forecast_steps, - latent_channels=self.latent_channels, - context_channels=self.context_channels, - ) - self.generator = Generator( - self.conditioning_stack, self.latent_stack, self.sampler - ) - self.discriminator = Discriminator(input_channels) - self.global_iteration = 0 - self.automatic_optimization = False - - def split_to_dict( - self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] - ): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - x_tensor = self.concat_to_tensor(x, self.input_keys) - y = [self.generator(x_tensor)] - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - -class Sampler(nn.Layer): - """ - Sampler from the Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - The sampler takes the output from the Latent and Context conditioning stacks and - creates one stack of ConvGRU layers per future timestep. - - Args: - forecast_steps: Number of forecast steps - latent_channels: Number of input channels to the lowest ConvGRU layer - """ - - def __init__( - self, - forecast_steps: int = 18, - latent_channels: int = 768, - context_channels: int = 384, - output_channels: int = 1, - ): - super().__init__() - self.forecast_steps = forecast_steps - self.convGRU1 = ConvGRU( - input_channels=latent_channels + context_channels, - output_channels=context_channels, - kernel_size=3, - ) - self.gru_conv_1x1 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=context_channels, - out_channels=latent_channels, - kernel_size=(1, 1), - ) - ) - self.g1 = GBlock( - input_channels=latent_channels, output_channels=latent_channels - ) - self.up_g1 = UpsampleGBlock( - input_channels=latent_channels, output_channels=latent_channels // 2 - ) - self.convGRU2 = ConvGRU( - input_channels=latent_channels // 2 + context_channels // 2, - output_channels=context_channels // 2, - kernel_size=3, - ) - self.gru_conv_1x1_2 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=context_channels // 2, - out_channels=latent_channels // 2, - kernel_size=(1, 1), - ) - ) - self.g2 = GBlock( - input_channels=latent_channels // 2, output_channels=latent_channels // 2 - ) - self.up_g2 = UpsampleGBlock( - input_channels=latent_channels // 2, output_channels=latent_channels // 4 - ) - self.convGRU3 = ConvGRU( - input_channels=latent_channels // 4 + context_channels // 4, - output_channels=context_channels // 4, - kernel_size=3, - ) - self.gru_conv_1x1_3 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=context_channels // 4, - out_channels=latent_channels // 4, - kernel_size=(1, 1), - ) - ) - self.g3 = GBlock( - input_channels=latent_channels // 4, output_channels=latent_channels // 4 - ) - self.up_g3 = UpsampleGBlock( - input_channels=latent_channels // 4, output_channels=latent_channels // 8 - ) - self.convGRU4 = ConvGRU( - input_channels=latent_channels // 8 + context_channels // 8, - output_channels=context_channels // 8, - kernel_size=3, - ) - self.gru_conv_1x1_4 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=context_channels // 8, - out_channels=latent_channels // 8, - kernel_size=(1, 1), - ) - ) - self.g4 = GBlock( - input_channels=latent_channels // 8, output_channels=latent_channels // 8 - ) - self.up_g4 = UpsampleGBlock( - input_channels=latent_channels // 8, output_channels=latent_channels // 16 - ) - self.bn = nn.BatchNorm2D(num_features=latent_channels // 16) - self.relu = nn.ReLU() - self.conv_1x1 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=latent_channels // 16, - out_channels=4 * output_channels, - kernel_size=(1, 1), - ) - ) - self.depth2space = nn.PixelShuffle(upscale_factor=2) - - def forward( - self, conditioning_states: List[paddle.Tensor], latent_dim: paddle.Tensor - ) -> paddle.Tensor: - """ - Perform the sampling from Skillful Nowcasting with GANs - - Args: - conditioning_states: Outputs from the `ContextConditioningStack` with the 4 input states, ordered from largest to smallest spatially - latent_dim: Output from `LatentConditioningStack` for input into the ConvGRUs - Returns: - forecast_steps-length output of images for future timesteps - - """ - init_states = conditioning_states - latent_dim = einops.repeat( - latent_dim, "b c h w -> (repeat b) c h w", repeat=init_states[0].shape[0] - ) - hidden_states = [latent_dim] * self.forecast_steps - - hidden_states = self.convGRU1(hidden_states, init_states[3]) - hidden_states = [self.gru_conv_1x1(h) for h in hidden_states] - hidden_states = [self.g1(h) for h in hidden_states] - hidden_states = [self.up_g1(h) for h in hidden_states] - hidden_states = self.convGRU2(hidden_states, init_states[2]) - hidden_states = [self.gru_conv_1x1_2(h) for h in hidden_states] - hidden_states = [self.g2(h) for h in hidden_states] - hidden_states = [self.up_g2(h) for h in hidden_states] - hidden_states = self.convGRU3(hidden_states, init_states[1]) - hidden_states = [self.gru_conv_1x1_3(h) for h in hidden_states] - hidden_states = [self.g3(h) for h in hidden_states] - hidden_states = [self.up_g3(h) for h in hidden_states] - hidden_states = self.convGRU4(hidden_states, init_states[0]) - hidden_states = [self.gru_conv_1x1_4(h) for h in hidden_states] - hidden_states = [self.g4(h) for h in hidden_states] - hidden_states = [self.up_g4(h) for h in hidden_states] - hidden_states = [nn.functional.relu(x=self.bn(h)) for h in hidden_states] - hidden_states = [self.conv_1x1(h) for h in hidden_states] - hidden_states = [self.depth2space(h) for h in hidden_states] - forecasts = paddle.stack(x=hidden_states, axis=1) - return forecasts - - -class Generator(nn.Layer): - """ - Wraps the three parts of the generator for simpler calling - - Args: - conditioning_stack: A layer representing the conditioning stack. - latent_stack: A layer representing the latent stack. - sampler: A layer representing the sampler. - """ - - def __init__( - self, - conditioning_stack: nn.Layer, - latent_stack: nn.Layer, - sampler: nn.Layer, - ): - super().__init__() - self.conditioning_stack = conditioning_stack - self.latent_stack = latent_stack - self.sampler = sampler - - def forward(self, x): - conditioning_states = self.conditioning_stack(x) - latent_dim = self.latent_stack(x) - x = self.sampler(conditioning_states, latent_dim) - return x - - -class Discriminator(nn.Layer): - def __init__( - self, - input_channels: int = 12, - num_spatial_frames: int = 8, - conv_type: str = "standard", - ): - super().__init__() - self.spatial_discriminator = SpatialDiscriminator( - input_channels=input_channels, - num_timesteps=num_spatial_frames, - conv_type=conv_type, - ) - self.temporal_discriminator = TemporalDiscriminator( - input_channels=input_channels, conv_type=conv_type - ) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - spatial_loss = self.spatial_discriminator(x) - temporal_loss = self.temporal_discriminator(x) - return paddle.concat(x=[spatial_loss, temporal_loss], axis=1) - - -class TemporalDiscriminator(nn.Layer): - """ - Temporal Discriminator from the Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of channels per timestep - crop_size: Size of the crop, in the paper half the width of the input images - num_layers: Number of intermediate DBlock layers to use - conv_type: Type of 2d convolutions to use, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 12, - num_layers: int = 3, - conv_type: str = "standard", - ): - super().__init__() - self.downsample = nn.AvgPool3D( - kernel_size=(1, 2, 2), stride=(1, 2, 2), exclusive=False - ) - self.space2depth = nn.PixelUnshuffle(downscale_factor=2) - internal_chn = 48 - self.d1 = DBlock( - input_channels=4 * input_channels, - output_channels=internal_chn * input_channels, - conv_type="3d", - first_relu=False, - ) - self.d2 = DBlock( - input_channels=internal_chn * input_channels, - output_channels=2 * internal_chn * input_channels, - conv_type="3d", - ) - self.intermediate_dblocks = nn.LayerList() - for _ in range(num_layers): - internal_chn *= 2 - self.intermediate_dblocks.append( - DBlock( - input_channels=internal_chn * input_channels, - output_channels=2 * internal_chn * input_channels, - conv_type=conv_type, - ) - ) - self.d_last = DBlock( - input_channels=2 * internal_chn * input_channels, - output_channels=2 * internal_chn * input_channels, - keep_same_output=True, - conv_type=conv_type, - ) - self.fc = nn.utils.spectral_norm( - layer=nn.Linear( - in_features=2 * internal_chn * input_channels, out_features=1 - ) - ) - self.relu = nn.ReLU() - self.bn = nn.BatchNorm1D(num_features=2 * internal_chn * input_channels) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - x = self.downsample(x) - if len(x.shape) == 4: - x = self.space2depth(x) - elif len(x.shape) == 5: - B, T = x.shape[0], x.shape[1] - x_reshaped = paddle.reshape(x, [-1] + list(x.shape[2:])) - x = self.space2depth(x_reshaped) - x = paddle.reshape(x, [B, T] + list(x.shape[1:])) - x = paddle.transpose(x=x, perm=(0, 2, 1, 3, 4)) - x = self.d1(x) - x = self.d2(x) - x = paddle.transpose(x=x, perm=(0, 2, 1, 3, 4)) - representations = [] - for idx in range(x.shape[1]): - rep = x[:, idx, :, :, :] - for d in self.intermediate_dblocks: - rep = d(rep) - rep = self.d_last(rep) - rep = paddle.sum(x=nn.functional.relu(x=rep), axis=[2, 3]) - rep = self.bn(rep) - rep = self.fc(rep) - representations.append(rep) - x = paddle.stack(x=representations, axis=1) - x = paddle.sum(x=x, keepdim=True, axis=1) - return x - - -class SpatialDiscriminator(nn.Layer): - """ - Spatial discriminator from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels per timestep - num_timesteps: Number of timesteps to use, in the paper 8/18 timesteps were chosen - num_layers: Number of intermediate DBlock layers to use - conv_type: Type of 2d convolutions to use, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 12, - num_timesteps: int = 8, - num_layers: int = 4, - conv_type: str = "standard", - ): - super().__init__() - self.num_timesteps = num_timesteps - self.mean_pool = nn.AvgPool2D(kernel_size=2, exclusive=False) - self.space2depth = nn.PixelUnshuffle(downscale_factor=2) - internal_chn = 24 - self.d1 = DBlock( - input_channels=4 * input_channels, - output_channels=2 * internal_chn * input_channels, - first_relu=False, - conv_type=conv_type, - ) - self.intermediate_dblocks = nn.LayerList() - for _ in range(num_layers): - internal_chn *= 2 - self.intermediate_dblocks.append( - DBlock( - input_channels=internal_chn * input_channels, - output_channels=2 * internal_chn * input_channels, - conv_type=conv_type, - ) - ) - self.d6 = DBlock( - input_channels=2 * internal_chn * input_channels, - output_channels=2 * internal_chn * input_channels, - keep_same_output=True, - conv_type=conv_type, - ) - self.fc = nn.utils.spectral_norm( - layer=nn.Linear( - in_features=2 * internal_chn * input_channels, out_features=1 - ) - ) - self.relu = nn.ReLU() - self.bn = nn.BatchNorm1D(num_features=2 * internal_chn * input_channels) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - idxs = paddle.randint(low=0, high=x.shape[1], shape=(self.num_timesteps,)) - representations = [] - for idx in idxs: - rep = self.mean_pool(x[:, idx, :, :, :]) - if len(rep.shape) == 4: - rep = self.space2depth(rep) - elif len(rep.shape) == 5: - B, T = rep.shape[0], rep.shape[1] - rep_reshaped = paddle.reshape(rep, [-1] + list(rep.shape[2:])) - rep = self.space2depth(rep_reshaped) - rep = paddle.reshape(rep, [B, T] + list(rep.shape[1:])) - rep = self.d1(rep) - for d in self.intermediate_dblocks: - rep = d(rep) - rep = self.d6(rep) - rep = paddle.sum(x=nn.functional.relu(x=rep), axis=[2, 3]) - rep = self.bn(rep) - rep = self.fc(rep) - """ - Pseudocode from DeepMind - # Sum-pool the representations and feed to spectrally normalized lin. layer. - y = tf.reduce_sum(tf.nn.relu(y), axis=[1, 2]) - y = layers.BatchNorm(calc_sigma=False)(y) - output_layer = layers.Linear(output_size=1) - output = output_layer(y) - - # Take the sum across the t samples. Note: we apply the ReLU to - # (1 - score_real) and (1 + score_generated) in the loss. - output = tf.reshape(output, [b, n, 1]) - output = tf.reduce_sum(output, keepdims=True, axis=1) - return output - """ - representations.append(rep) - x = paddle.stack(x=representations, axis=1) - x = paddle.sum(x=x, keepdim=True, axis=1) - return x - - -class GBlock(nn.Layer): - """Residual generator block without upsampling. G Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels - output_channels: Number of output channels - conv_type: Type of convolution desired, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 12, - output_channels: int = 12, - conv_type: str = "standard", - spectral_normalized_eps=0.0001, - ): - super().__init__() - self.output_channels = output_channels - self.bn1 = nn.BatchNorm2D(num_features=input_channels) - self.bn2 = nn.BatchNorm2D(num_features=input_channels) - self.relu = nn.ReLU() - conv2d = get_conv_layer(conv_type) - self.conv_1x1 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, out_channels=output_channels, kernel_size=1 - ), - eps=spectral_normalized_eps, - ) - self.first_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, - out_channels=input_channels, - kernel_size=3, - padding=1, - ), - eps=spectral_normalized_eps, - ) - self.last_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=3, - padding=1, - ), - eps=spectral_normalized_eps, - ) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - if x.shape[1] != self.output_channels: - sc = self.conv_1x1(x) - else: - sc = x - x2 = self.bn1(x) - x2 = self.relu(x2) - x2 = self.first_conv_3x3(x2) - x2 = self.bn2(x2) - x2 = self.relu(x2) - x2 = self.last_conv_3x3(x2) - x = x2 + sc - return x - - -class UpsampleGBlock(nn.Layer): - """Residual generator block with upsampling - G Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels - output_channels: Number of output channels - conv_type: Type of convolution desired, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 12, - output_channels: int = 12, - conv_type: str = "standard", - spectral_normalized_eps=0.0001, - ): - super().__init__() - self.output_channels = output_channels - self.bn1 = nn.BatchNorm2D(num_features=input_channels) - self.bn2 = nn.BatchNorm2D(num_features=input_channels) - self.relu = nn.ReLU() - conv2d = get_conv_layer(conv_type) - self.conv_1x1 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, out_channels=output_channels, kernel_size=1 - ), - eps=spectral_normalized_eps, - ) - self.upsample = nn.Upsample(scale_factor=2, mode="nearest") - self.first_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, - out_channels=input_channels, - kernel_size=3, - padding=1, - ), - eps=spectral_normalized_eps, - ) - self.last_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=3, - padding=1, - ), - eps=spectral_normalized_eps, - ) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - sc = self.upsample(x) - sc = self.conv_1x1(sc) - x2 = self.bn1(x) - x2 = self.relu(x2) - x2 = self.upsample(x2) - x2 = self.first_conv_3x3(x2) - x2 = self.bn2(x2) - x2 = self.relu(x2) - x2 = self.last_conv_3x3(x2) - x = x2 + sc - return x - - -class DBlock(nn.Layer): - """ - D and 3D Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels - output_channels: Number of output channels - conv_type: Convolution type, see satflow/models/utils.py for options - first_relu: Whether to have an ReLU before the first 3x3 convolution - keep_same_output: Whether the output should have the same spatial dimensions as input, if False, downscales by 2 - """ - - def __init__( - self, - input_channels: int = 12, - output_channels: int = 12, - conv_type: str = "standard", - first_relu: bool = True, - keep_same_output: bool = False, - ): - super().__init__() - self.input_channels = input_channels - self.output_channels = output_channels - self.first_relu = first_relu - self.keep_same_output = keep_same_output - self.conv_type = conv_type - conv2d = get_conv_layer(conv_type) - if conv_type == "3d": - self.pooling = nn.AvgPool3D(kernel_size=2, stride=2, exclusive=False) - else: - self.pooling = nn.AvgPool2D(kernel_size=2, stride=2, exclusive=False) - self.conv_1x1 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, out_channels=output_channels, kernel_size=1 - ) - ) - self.first_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=3, - padding=1, - ) - ) - self.last_conv_3x3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=output_channels, - out_channels=output_channels, - kernel_size=3, - padding=1, - stride=1, - ) - ) - self.relu = nn.ReLU() - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - if self.input_channels != self.output_channels: - x1 = self.conv_1x1(x) - if not self.keep_same_output: - x1 = self.pooling(x1) - else: - x1 = x - if self.first_relu: - x = self.relu(x) - x = self.first_conv_3x3(x) - x = self.relu(x) - x = self.last_conv_3x3(x) - if not self.keep_same_output: - x = self.pooling(x) - x = x1 + x - return x - - -class LBlock(nn.Layer): - """Residual block for the Latent Stack. - L-Block for increasing the number of channels in the input - from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels - output_channels: Number of output channels - conv_type: Which type of convolution desired, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 12, - output_channels: int = 12, - kernel_size: int = 3, - conv_type: str = "standard", - ): - super().__init__() - self.input_channels = input_channels - self.output_channels = output_channels - conv2d = get_conv_layer(conv_type) - self.conv_1x1 = conv2d( - in_channels=input_channels, - out_channels=output_channels - input_channels, - kernel_size=1, - ) - self.first_conv_3x3 = conv2d( - input_channels, - out_channels=output_channels, - kernel_size=kernel_size, - padding=1, - stride=1, - ) - self.relu = nn.ReLU() - self.last_conv_3x3 = conv2d( - in_channels=output_channels, - out_channels=output_channels, - kernel_size=kernel_size, - padding=1, - stride=1, - ) - - def forward(self, x) -> paddle.Tensor: - if self.input_channels < self.output_channels: - sc = self.conv_1x1(x) - sc = paddle.concat(x=[x, sc], axis=1) - else: - sc = x - x2 = self.relu(x) - x2 = self.first_conv_3x3(x2) - x2 = self.relu(x2) - x2 = self.last_conv_3x3(x2) - return x2 + sc - - -class ContextConditioningStack(nn.Layer): - """ - Conditioning Stack using the context images from Skillful Nowcasting, , see https://arxiv.org/pdf/2104.00954.pdf - - Args: - input_channels: Number of input channels per timestep - output_channels: Number of output channels for the lowest block - conv_type: Type of 2D convolution to use, see satflow/models/utils.py for options - """ - - def __init__( - self, - input_channels: int = 1, - output_channels: int = 768, - num_context_steps: int = 4, - conv_type: str = "standard", - ): - super().__init__() - conv2d = get_conv_layer(conv_type) - self.space2depth = nn.PixelUnshuffle(downscale_factor=2) - self.d1 = DBlock( - input_channels=4 * input_channels, - output_channels=output_channels // 4 * input_channels // num_context_steps, - conv_type=conv_type, - ) - self.d2 = DBlock( - input_channels=output_channels // 4 * input_channels // num_context_steps, - output_channels=output_channels // 2 * input_channels // num_context_steps, - conv_type=conv_type, - ) - self.d3 = DBlock( - input_channels=output_channels // 2 * input_channels // num_context_steps, - output_channels=output_channels * input_channels // num_context_steps, - conv_type=conv_type, - ) - self.d4 = DBlock( - input_channels=output_channels * input_channels // num_context_steps, - output_channels=output_channels * 2 * input_channels // num_context_steps, - conv_type=conv_type, - ) - self.conv1 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=output_channels // 4 * input_channels, - out_channels=output_channels // 8 * input_channels, - kernel_size=3, - padding=1, - ) - ) - self.conv2 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=output_channels // 2 * input_channels, - out_channels=output_channels // 4 * input_channels, - kernel_size=3, - padding=1, - ) - ) - self.conv3 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=output_channels * input_channels, - out_channels=output_channels // 2 * input_channels, - kernel_size=3, - padding=1, - ) - ) - self.conv4 = nn.utils.spectral_norm( - layer=conv2d( - in_channels=output_channels * 2 * input_channels, - out_channels=output_channels * input_channels, - kernel_size=3, - padding=1, - ) - ) - self.relu = nn.ReLU() - - def forward( - self, x: paddle.Tensor - ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: - if len(x.shape) == 4: - x = self.space2depth(x) - elif len(x.shape) == 5: - B, T = x.shape[0], x.shape[1] - x_reshaped = paddle.reshape(x, [-1] + list(x.shape[2:])) - x = self.space2depth(x_reshaped) - x = paddle.reshape(x, [B, T] + list(x.shape[1:])) - steps = x.shape[1] - scale_1 = [] - scale_2 = [] - scale_3 = [] - scale_4 = [] - for i in range(steps): - s1 = self.d1(x[:, i, :, :, :]) - s2 = self.d2(s1) - s3 = self.d3(s2) - s4 = self.d4(s3) - scale_1.append(s1) - scale_2.append(s2) - scale_3.append(s3) - scale_4.append(s4) - scale_1 = paddle.stack(x=scale_1, axis=1) - scale_2 = paddle.stack(x=scale_2, axis=1) - scale_3 = paddle.stack(x=scale_3, axis=1) - scale_4 = paddle.stack(x=scale_4, axis=1) - scale_1 = self._mixing_layer(scale_1, self.conv1) - scale_2 = self._mixing_layer(scale_2, self.conv2) - scale_3 = self._mixing_layer(scale_3, self.conv3) - scale_4 = self._mixing_layer(scale_4, self.conv4) - return scale_1, scale_2, scale_3, scale_4 - - def _mixing_layer(self, inputs, conv_block): - stacked_inputs = einops.rearrange(inputs, "b t c h w -> b (c t) h w") - return nn.functional.relu(x=conv_block(stacked_inputs)) - - -class LatentConditioningStack(nn.Layer): - """ - Latent conditioning stack from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf - - Args: - shape: Shape of the latent space, Should be (H/32,W/32,x) of the final image shape - output_channels: Number of output channels for the conditioning stack - use_attention: Whether to have a self-attention block or not - """ - - def __init__( - self, - shape: (int, int, int) = (8, 8, 8), - output_channels: int = 768, - use_attention: bool = True, - ): - super().__init__() - self.shape = shape - self.use_attention = use_attention - self.distribution = paddle.distribution.Normal( - loc=paddle.to_tensor(data=[0.0], dtype="float32"), - scale=paddle.to_tensor(data=[2.0], dtype="float32"), - ) - self.conv_3x3 = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=shape[0], - out_channels=shape[0], - kernel_size=(3, 3), - padding=1, - ) - ) - self.l_block1 = LBlock( - input_channels=shape[0], output_channels=output_channels // 32 - ) - self.l_block2 = LBlock( - input_channels=output_channels // 32, output_channels=output_channels // 16 - ) - self.l_block3 = LBlock( - input_channels=output_channels // 16, output_channels=output_channels // 4 - ) - if self.use_attention: - self.att_block = AttentionLayer( - input_channels=output_channels // 4, - output_channels=output_channels // 4, - ) - self.l_block4 = LBlock( - input_channels=output_channels // 4, output_channels=output_channels - ) - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - """ - Args: - x: tensor on the correct device, to move over the latent distribution - Returns: z - """ - z = self.distribution.sample(self.shape) - z = paddle.transpose(x=z, perm=(3, 0, 1, 2)).astype(dtype=x.dtype) - z = self.conv_3x3(z) - z = self.l_block1(z) - z = self.l_block2(z) - z = self.l_block3(z) - z = self.att_block(z) - z = self.l_block4(z) - return z - - -def attention_einsum(q, k, v): - """Apply the attention operator to tensors of shape [h, w, c].""" - k = einops.rearrange(k, "h w c -> (h w) c") - v = einops.rearrange(v, "h w c -> (h w) c") - beta = nn.functional.softmax(x=paddle.einsum("hwc, Lc->hwL", q, k), axis=-1) - out = paddle.einsum("hwL, Lc->hwc", beta, v) - return out - - -class AttentionLayer(nn.Layer): - """Attention Module""" - - def __init__( - self, input_channels: int, output_channels: int, ratio_kq=8, ratio_v=8 - ): - super().__init__() - self.ratio_kq = ratio_kq - self.ratio_v = ratio_v - self.output_channels = output_channels - self.input_channels = input_channels - self.query = nn.Conv2D( - in_channels=input_channels, - out_channels=self.output_channels // self.ratio_kq, - kernel_size=(1, 1), - padding="valid", - bias_attr=False, - ) - self.key = nn.Conv2D( - in_channels=input_channels, - out_channels=self.output_channels // self.ratio_kq, - kernel_size=(1, 1), - padding="valid", - bias_attr=False, - ) - self.value = nn.Conv2D( - in_channels=input_channels, - out_channels=self.output_channels // self.ratio_v, - kernel_size=(1, 1), - padding="valid", - bias_attr=False, - ) - self.last_conv = nn.Conv2D( - in_channels=self.output_channels // 8, - out_channels=self.output_channels, - kernel_size=(1, 1), - padding="valid", - bias_attr=False, - ) - gamma = paddle.create_parameter( - shape=paddle.zeros(shape=[1]).shape, - dtype=paddle.zeros(shape=[1]).numpy().dtype, - default_initializer=nn.initializer.Assign(paddle.zeros(shape=[1])), - ) - gamma.stop_gradient = not True - self.gamma = gamma - - def forward(self, x: paddle.Tensor) -> paddle.Tensor: - query = self.query(x) - key = self.key(x) - value = self.value(x) - out = [] - for b in range(x.shape[0]): - out.append(attention_einsum(query[b], key[b], value[b])) - out = paddle.stack(x=out, axis=0) - out = self.gamma * self.last_conv(out) - return out + x - - -class AddCoords(nn.Layer): - def __init__(self, with_r=False): - super().__init__() - self.with_r = with_r - - def forward(self, input_tensor): - """ - Args: - input_tensor: shape(batch, channel, x_dim, y_dim) - """ - batch_size, _, x_dim, y_dim = input_tensor.shape - xx_channel = paddle.arange(end=x_dim).tile([1, y_dim, 1]) - x = paddle.arange(end=y_dim).tile([1, x_dim, 1]) - perm_0 = list(range(x.ndim)) - perm_0[1] = 2 - perm_0[2] = 1 - yy_channel = x.transpose(perm=perm_0) - xx_channel = xx_channel.astype(dtype="float32") / (x_dim - 1) - yy_channel = yy_channel.astype(dtype="float32") / (y_dim - 1) - xx_channel = xx_channel * 2 - 1 - yy_channel = yy_channel * 2 - 1 - x = xx_channel.tile([batch_size, 1, 1, 1]) - perm_1 = list(range(x.ndim)) - perm_1[2] = 3 - perm_1[3] = 2 - xx_channel = x.transpose(perm=perm_1) - x = yy_channel.tile([batch_size, 1, 1, 1]) - perm_2 = list(range(x.ndim)) - perm_2[2] = 3 - perm_2[3] = 2 - yy_channel = x.transpose(perm=perm_2) - ret = paddle.concat( - x=[ - input_tensor, - xx_channel.astype(dtype=input_tensor.dtype), - yy_channel.astype(dtype=input_tensor.dtype), - ], - axis=1, - ) - if self.with_r: - rr = paddle.sqrt( - x=paddle.pow(x=xx_channel.astype(dtype=input_tensor.dtype) - 0.5, y=2) - + paddle.pow(x=yy_channel.astype(dtype=input_tensor.dtype) - 0.5, y=2) - ) - ret = paddle.concat(x=[ret, rr], axis=1) - return ret - - -class CoordConv(nn.Layer): - def __init__(self, in_channels, out_channels, with_r=False): - super().__init__() - self.addcoords = AddCoords(with_r=with_r) - in_size = in_channels + 2 - if with_r: - in_size += 1 - self.conv = nn.Conv2D(in_size, out_channels) - - def forward(self, x): - ret = self.addcoords(x) - ret = self.conv(ret) - return ret - - -class ConvGRUCell(nn.Layer): - """A ConvGRU implementation. - - Args: - kernel_size: kernel size of the convolutions. Default: 3. - sn_eps: constant for spectral normalization. Default: 1e-4. - """ - - def __init__( - self, input_channels: int, output_channels: int, kernel_size=3, sn_eps=0.0001 - ): - super().__init__() - self._kernel_size = kernel_size - self._sn_eps = sn_eps - self.read_gate_conv = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=(kernel_size, kernel_size), - padding=1, - ), - eps=sn_eps, - ) - self.update_gate_conv = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=(kernel_size, kernel_size), - padding=1, - ), - eps=sn_eps, - ) - self.output_conv = nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=input_channels, - out_channels=output_channels, - kernel_size=(kernel_size, kernel_size), - padding=1, - ), - eps=sn_eps, - ) - - def forward(self, x, prev_state): - """ - ConvGRU forward, returning the current+new state - - Args: - x: Input tensor - prev_state: Previous state - - Returns: - New tensor plus the new state - """ - xh = paddle.concat(x=[x, prev_state], axis=1) - read_gate = nn.functional.sigmoid(x=self.read_gate_conv(xh)) - update_gate = nn.functional.sigmoid(x=self.update_gate_conv(xh)) - gated_input = paddle.concat(x=[x, read_gate * prev_state], axis=1) - c = nn.functional.relu(x=self.output_conv(gated_input)) - out = update_gate * prev_state + (1.0 - update_gate) * c - new_state = out - return out, new_state - - -class ConvGRU(nn.Layer): - """ConvGRU Cell wrapper to replace tf.static_rnn in TF implementation""" - - def __init__( - self, - input_channels: int, - output_channels: int, - kernel_size: int = 3, - sn_eps=0.0001, - ): - super().__init__() - self.cell = ConvGRUCell(input_channels, output_channels, kernel_size, sn_eps) - - def forward(self, x: paddle.Tensor, hidden_state=None) -> paddle.Tensor: - outputs = [] - for step in range(len(x)): - output, hidden_state = self.cell(x[step], hidden_state) - outputs.append(output) - outputs = paddle.stack(x=outputs, axis=0) - return outputs - - -def get_conv_layer(conv_type: str = "standard") -> nn.Layer: - if conv_type == "standard": - conv_layer = nn.Conv2D - elif conv_type == "coord": - conv_layer = CoordConv - elif conv_type == "3d": - conv_layer = nn.Conv3D - else: - raise ValueError(f"{conv_type} is not a recognized Conv method") - return conv_layer +from typing import List +from typing import Tuple + +import paddle +import paddle.nn as nn + +from ppsci.arch import base + +try: + import einops +except ModuleNotFoundError: + pass + + +class DGMR(base.Arch): + """Deep Generative Model of Radar. + Nowcasting GAN is an attempt to recreate DeepMind's Skillful Nowcasting GAN from https://arxiv.org/abs/2104.00954. + but slightly modified for multiple satellite channels + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + forecast_steps (int, optional): Number of steps to predict in the future + input_channels (int, optional): Number of input channels per image + gen_lr (float, optional): Learning rate for the generator + disc_lr (float, optional): Learning rate for the discriminators, shared for both temporal and spatial discriminator + conv_type (str, optional): Type of 2d convolution to use, see satflow/models/utils.py for options + beta1 (float, optional): Beta1 for Adam optimizer + beta2 (float, optional): Beta2 for Adam optimizer + num_samples (int, optional): Number of samples of the latent space to sample for training/validation + grid_lambda (float, optional): Lambda for the grid regularization loss + output_shape (int, optional): Shape of the output predictions, generally should be same as the input shape + generation_steps (int, optional): Number of generation steps to use in forward pass, in paper is 6 and the best is chosen for the loss + this results in huge amounts of GPU memory though, so less might work better for training. + context_channels (int, optional): Number of output channels for the lowest block of conditioning stack + latent_channels (int, optional): Number of channels that the latent space should be reshaped to, + input dimension into ConvGRU, also affects the number of channels for other linked inputs/outputs + + Examples: + >>> import ppsci + >>> import paddle + >>> model = ppsci.arch.DGMR(("input", ), ("output", )) + >>> input_dict = {"input": paddle.randn((1, 4, 1, 256, 256))} + >>> output_dict = model(input_dict) # doctest: +SKIP + >>> print(output_dict["output"].shape) # doctest: +SKIP + [1, 18, 1, 256, 256] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + forecast_steps: int = 18, + input_channels: int = 1, + output_shape: int = 256, + gen_lr: float = 5e-05, + disc_lr: float = 0.0002, + conv_type: str = "standard", + num_samples: int = 6, + grid_lambda: float = 20.0, + beta1: float = 0.0, + beta2: float = 0.999, + latent_channels: int = 768, + context_channels: int = 384, + generation_steps: int = 6, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.gen_lr = gen_lr + self.disc_lr = disc_lr + self.beta1 = beta1 + self.beta2 = beta2 + self.grid_lambda = grid_lambda + self.num_samples = num_samples + self.latent_channels = latent_channels + self.context_channels = context_channels + self.input_channels = input_channels + self.generation_steps = generation_steps + self.conditioning_stack = ContextConditioningStack( + input_channels=input_channels, + conv_type=conv_type, + output_channels=self.context_channels, + ) + self.latent_stack = LatentConditioningStack( + shape=(8 * self.input_channels, output_shape // 32, output_shape // 32), + output_channels=self.latent_channels, + ) + self.sampler = Sampler( + forecast_steps=forecast_steps, + latent_channels=self.latent_channels, + context_channels=self.context_channels, + ) + self.generator = Generator( + self.conditioning_stack, self.latent_stack, self.sampler + ) + self.discriminator = Discriminator(input_channels) + self.global_iteration = 0 + self.automatic_optimization = False + + def split_to_dict( + self, data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...] + ): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + x_tensor = self.concat_to_tensor(x, self.input_keys) + y = [self.generator(x_tensor)] + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + +class Sampler(nn.Layer): + """ + Sampler from the Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + The sampler takes the output from the Latent and Context conditioning stacks and + creates one stack of ConvGRU layers per future timestep. + + Args: + forecast_steps: Number of forecast steps + latent_channels: Number of input channels to the lowest ConvGRU layer + """ + + def __init__( + self, + forecast_steps: int = 18, + latent_channels: int = 768, + context_channels: int = 384, + output_channels: int = 1, + ): + super().__init__() + self.forecast_steps = forecast_steps + self.convGRU1 = ConvGRU( + input_channels=latent_channels + context_channels, + output_channels=context_channels, + kernel_size=3, + ) + self.gru_conv_1x1 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=context_channels, + out_channels=latent_channels, + kernel_size=(1, 1), + ) + ) + self.g1 = GBlock( + input_channels=latent_channels, output_channels=latent_channels + ) + self.up_g1 = UpsampleGBlock( + input_channels=latent_channels, output_channels=latent_channels // 2 + ) + self.convGRU2 = ConvGRU( + input_channels=latent_channels // 2 + context_channels // 2, + output_channels=context_channels // 2, + kernel_size=3, + ) + self.gru_conv_1x1_2 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=context_channels // 2, + out_channels=latent_channels // 2, + kernel_size=(1, 1), + ) + ) + self.g2 = GBlock( + input_channels=latent_channels // 2, output_channels=latent_channels // 2 + ) + self.up_g2 = UpsampleGBlock( + input_channels=latent_channels // 2, output_channels=latent_channels // 4 + ) + self.convGRU3 = ConvGRU( + input_channels=latent_channels // 4 + context_channels // 4, + output_channels=context_channels // 4, + kernel_size=3, + ) + self.gru_conv_1x1_3 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=context_channels // 4, + out_channels=latent_channels // 4, + kernel_size=(1, 1), + ) + ) + self.g3 = GBlock( + input_channels=latent_channels // 4, output_channels=latent_channels // 4 + ) + self.up_g3 = UpsampleGBlock( + input_channels=latent_channels // 4, output_channels=latent_channels // 8 + ) + self.convGRU4 = ConvGRU( + input_channels=latent_channels // 8 + context_channels // 8, + output_channels=context_channels // 8, + kernel_size=3, + ) + self.gru_conv_1x1_4 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=context_channels // 8, + out_channels=latent_channels // 8, + kernel_size=(1, 1), + ) + ) + self.g4 = GBlock( + input_channels=latent_channels // 8, output_channels=latent_channels // 8 + ) + self.up_g4 = UpsampleGBlock( + input_channels=latent_channels // 8, output_channels=latent_channels // 16 + ) + self.bn = nn.BatchNorm2D(num_features=latent_channels // 16) + self.relu = nn.ReLU() + self.conv_1x1 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=latent_channels // 16, + out_channels=4 * output_channels, + kernel_size=(1, 1), + ) + ) + self.depth2space = nn.PixelShuffle(upscale_factor=2) + + def forward( + self, conditioning_states: List[paddle.Tensor], latent_dim: paddle.Tensor + ) -> paddle.Tensor: + """ + Perform the sampling from Skillful Nowcasting with GANs + + Args: + conditioning_states: Outputs from the `ContextConditioningStack` with the 4 input states, ordered from largest to smallest spatially + latent_dim: Output from `LatentConditioningStack` for input into the ConvGRUs + Returns: + forecast_steps-length output of images for future timesteps + + """ + init_states = conditioning_states + latent_dim = einops.repeat( + latent_dim, "b c h w -> (repeat b) c h w", repeat=init_states[0].shape[0] + ) + hidden_states = [latent_dim] * self.forecast_steps + + hidden_states = self.convGRU1(hidden_states, init_states[3]) + hidden_states = [self.gru_conv_1x1(h) for h in hidden_states] + hidden_states = [self.g1(h) for h in hidden_states] + hidden_states = [self.up_g1(h) for h in hidden_states] + hidden_states = self.convGRU2(hidden_states, init_states[2]) + hidden_states = [self.gru_conv_1x1_2(h) for h in hidden_states] + hidden_states = [self.g2(h) for h in hidden_states] + hidden_states = [self.up_g2(h) for h in hidden_states] + hidden_states = self.convGRU3(hidden_states, init_states[1]) + hidden_states = [self.gru_conv_1x1_3(h) for h in hidden_states] + hidden_states = [self.g3(h) for h in hidden_states] + hidden_states = [self.up_g3(h) for h in hidden_states] + hidden_states = self.convGRU4(hidden_states, init_states[0]) + hidden_states = [self.gru_conv_1x1_4(h) for h in hidden_states] + hidden_states = [self.g4(h) for h in hidden_states] + hidden_states = [self.up_g4(h) for h in hidden_states] + hidden_states = [nn.functional.relu(x=self.bn(h)) for h in hidden_states] + hidden_states = [self.conv_1x1(h) for h in hidden_states] + hidden_states = [self.depth2space(h) for h in hidden_states] + forecasts = paddle.stack(x=hidden_states, axis=1) + return forecasts + + +class Generator(nn.Layer): + """ + Wraps the three parts of the generator for simpler calling + + Args: + conditioning_stack: A layer representing the conditioning stack. + latent_stack: A layer representing the latent stack. + sampler: A layer representing the sampler. + """ + + def __init__( + self, + conditioning_stack: nn.Layer, + latent_stack: nn.Layer, + sampler: nn.Layer, + ): + super().__init__() + self.conditioning_stack = conditioning_stack + self.latent_stack = latent_stack + self.sampler = sampler + + def forward(self, x): + conditioning_states = self.conditioning_stack(x) + latent_dim = self.latent_stack(x) + x = self.sampler(conditioning_states, latent_dim) + return x + + +class Discriminator(nn.Layer): + def __init__( + self, + input_channels: int = 12, + num_spatial_frames: int = 8, + conv_type: str = "standard", + ): + super().__init__() + self.spatial_discriminator = SpatialDiscriminator( + input_channels=input_channels, + num_timesteps=num_spatial_frames, + conv_type=conv_type, + ) + self.temporal_discriminator = TemporalDiscriminator( + input_channels=input_channels, conv_type=conv_type + ) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + spatial_loss = self.spatial_discriminator(x) + temporal_loss = self.temporal_discriminator(x) + return paddle.concat(x=[spatial_loss, temporal_loss], axis=1) + + +class TemporalDiscriminator(nn.Layer): + """ + Temporal Discriminator from the Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of channels per timestep + crop_size: Size of the crop, in the paper half the width of the input images + num_layers: Number of intermediate DBlock layers to use + conv_type: Type of 2d convolutions to use, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 12, + num_layers: int = 3, + conv_type: str = "standard", + ): + super().__init__() + self.downsample = nn.AvgPool3D( + kernel_size=(1, 2, 2), stride=(1, 2, 2), exclusive=False + ) + self.space2depth = nn.PixelUnshuffle(downscale_factor=2) + internal_chn = 48 + self.d1 = DBlock( + input_channels=4 * input_channels, + output_channels=internal_chn * input_channels, + conv_type="3d", + first_relu=False, + ) + self.d2 = DBlock( + input_channels=internal_chn * input_channels, + output_channels=2 * internal_chn * input_channels, + conv_type="3d", + ) + self.intermediate_dblocks = nn.LayerList() + for _ in range(num_layers): + internal_chn *= 2 + self.intermediate_dblocks.append( + DBlock( + input_channels=internal_chn * input_channels, + output_channels=2 * internal_chn * input_channels, + conv_type=conv_type, + ) + ) + self.d_last = DBlock( + input_channels=2 * internal_chn * input_channels, + output_channels=2 * internal_chn * input_channels, + keep_same_output=True, + conv_type=conv_type, + ) + self.fc = nn.utils.spectral_norm( + layer=nn.Linear( + in_features=2 * internal_chn * input_channels, out_features=1 + ) + ) + self.relu = nn.ReLU() + self.bn = nn.BatchNorm1D(num_features=2 * internal_chn * input_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + x = self.downsample(x) + if len(x.shape) == 4: + x = self.space2depth(x) + elif len(x.shape) == 5: + B, T = x.shape[0], x.shape[1] + x_reshaped = paddle.reshape(x, [-1] + list(x.shape[2:])) + x = self.space2depth(x_reshaped) + x = paddle.reshape(x, [B, T] + list(x.shape[1:])) + x = paddle.transpose(x=x, perm=(0, 2, 1, 3, 4)) + x = self.d1(x) + x = self.d2(x) + x = paddle.transpose(x=x, perm=(0, 2, 1, 3, 4)) + representations = [] + for idx in range(x.shape[1]): + rep = x[:, idx, :, :, :] + for d in self.intermediate_dblocks: + rep = d(rep) + rep = self.d_last(rep) + rep = paddle.sum(x=nn.functional.relu(x=rep), axis=[2, 3]) + rep = self.bn(rep) + rep = self.fc(rep) + representations.append(rep) + x = paddle.stack(x=representations, axis=1) + x = paddle.sum(x=x, keepdim=True, axis=1) + return x + + +class SpatialDiscriminator(nn.Layer): + """ + Spatial discriminator from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels per timestep + num_timesteps: Number of timesteps to use, in the paper 8/18 timesteps were chosen + num_layers: Number of intermediate DBlock layers to use + conv_type: Type of 2d convolutions to use, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 12, + num_timesteps: int = 8, + num_layers: int = 4, + conv_type: str = "standard", + ): + super().__init__() + self.num_timesteps = num_timesteps + self.mean_pool = nn.AvgPool2D(kernel_size=2, exclusive=False) + self.space2depth = nn.PixelUnshuffle(downscale_factor=2) + internal_chn = 24 + self.d1 = DBlock( + input_channels=4 * input_channels, + output_channels=2 * internal_chn * input_channels, + first_relu=False, + conv_type=conv_type, + ) + self.intermediate_dblocks = nn.LayerList() + for _ in range(num_layers): + internal_chn *= 2 + self.intermediate_dblocks.append( + DBlock( + input_channels=internal_chn * input_channels, + output_channels=2 * internal_chn * input_channels, + conv_type=conv_type, + ) + ) + self.d6 = DBlock( + input_channels=2 * internal_chn * input_channels, + output_channels=2 * internal_chn * input_channels, + keep_same_output=True, + conv_type=conv_type, + ) + self.fc = nn.utils.spectral_norm( + layer=nn.Linear( + in_features=2 * internal_chn * input_channels, out_features=1 + ) + ) + self.relu = nn.ReLU() + self.bn = nn.BatchNorm1D(num_features=2 * internal_chn * input_channels) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + idxs = paddle.randint(low=0, high=x.shape[1], shape=(self.num_timesteps,)) + representations = [] + for idx in idxs: + rep = self.mean_pool(x[:, idx, :, :, :]) + if len(rep.shape) == 4: + rep = self.space2depth(rep) + elif len(rep.shape) == 5: + B, T = rep.shape[0], rep.shape[1] + rep_reshaped = paddle.reshape(rep, [-1] + list(rep.shape[2:])) + rep = self.space2depth(rep_reshaped) + rep = paddle.reshape(rep, [B, T] + list(rep.shape[1:])) + rep = self.d1(rep) + for d in self.intermediate_dblocks: + rep = d(rep) + rep = self.d6(rep) + rep = paddle.sum(x=nn.functional.relu(x=rep), axis=[2, 3]) + rep = self.bn(rep) + rep = self.fc(rep) + """ + Pseudocode from DeepMind + # Sum-pool the representations and feed to spectrally normalized lin. layer. + y = tf.reduce_sum(tf.nn.relu(y), axis=[1, 2]) + y = layers.BatchNorm(calc_sigma=False)(y) + output_layer = layers.Linear(output_size=1) + output = output_layer(y) + + # Take the sum across the t samples. Note: we apply the ReLU to + # (1 - score_real) and (1 + score_generated) in the loss. + output = tf.reshape(output, [b, n, 1]) + output = tf.reduce_sum(output, keepdims=True, axis=1) + return output + """ + representations.append(rep) + x = paddle.stack(x=representations, axis=1) + x = paddle.sum(x=x, keepdim=True, axis=1) + return x + + +class GBlock(nn.Layer): + """Residual generator block without upsampling. G Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels + output_channels: Number of output channels + conv_type: Type of convolution desired, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 12, + output_channels: int = 12, + conv_type: str = "standard", + spectral_normalized_eps=0.0001, + ): + super().__init__() + self.output_channels = output_channels + self.bn1 = nn.BatchNorm2D(num_features=input_channels) + self.bn2 = nn.BatchNorm2D(num_features=input_channels) + self.relu = nn.ReLU() + conv2d = get_conv_layer(conv_type) + self.conv_1x1 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, out_channels=output_channels, kernel_size=1 + ), + eps=spectral_normalized_eps, + ) + self.first_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, + out_channels=input_channels, + kernel_size=3, + padding=1, + ), + eps=spectral_normalized_eps, + ) + self.last_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + padding=1, + ), + eps=spectral_normalized_eps, + ) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + if x.shape[1] != self.output_channels: + sc = self.conv_1x1(x) + else: + sc = x + x2 = self.bn1(x) + x2 = self.relu(x2) + x2 = self.first_conv_3x3(x2) + x2 = self.bn2(x2) + x2 = self.relu(x2) + x2 = self.last_conv_3x3(x2) + x = x2 + sc + return x + + +class UpsampleGBlock(nn.Layer): + """Residual generator block with upsampling + G Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels + output_channels: Number of output channels + conv_type: Type of convolution desired, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 12, + output_channels: int = 12, + conv_type: str = "standard", + spectral_normalized_eps=0.0001, + ): + super().__init__() + self.output_channels = output_channels + self.bn1 = nn.BatchNorm2D(num_features=input_channels) + self.bn2 = nn.BatchNorm2D(num_features=input_channels) + self.relu = nn.ReLU() + conv2d = get_conv_layer(conv_type) + self.conv_1x1 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, out_channels=output_channels, kernel_size=1 + ), + eps=spectral_normalized_eps, + ) + self.upsample = nn.Upsample(scale_factor=2, mode="nearest") + self.first_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, + out_channels=input_channels, + kernel_size=3, + padding=1, + ), + eps=spectral_normalized_eps, + ) + self.last_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + padding=1, + ), + eps=spectral_normalized_eps, + ) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + sc = self.upsample(x) + sc = self.conv_1x1(sc) + x2 = self.bn1(x) + x2 = self.relu(x2) + x2 = self.upsample(x2) + x2 = self.first_conv_3x3(x2) + x2 = self.bn2(x2) + x2 = self.relu(x2) + x2 = self.last_conv_3x3(x2) + x = x2 + sc + return x + + +class DBlock(nn.Layer): + """ + D and 3D Block from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels + output_channels: Number of output channels + conv_type: Convolution type, see satflow/models/utils.py for options + first_relu: Whether to have an ReLU before the first 3x3 convolution + keep_same_output: Whether the output should have the same spatial dimensions as input, if False, downscales by 2 + """ + + def __init__( + self, + input_channels: int = 12, + output_channels: int = 12, + conv_type: str = "standard", + first_relu: bool = True, + keep_same_output: bool = False, + ): + super().__init__() + self.input_channels = input_channels + self.output_channels = output_channels + self.first_relu = first_relu + self.keep_same_output = keep_same_output + self.conv_type = conv_type + conv2d = get_conv_layer(conv_type) + if conv_type == "3d": + self.pooling = nn.AvgPool3D(kernel_size=2, stride=2, exclusive=False) + else: + self.pooling = nn.AvgPool2D(kernel_size=2, stride=2, exclusive=False) + self.conv_1x1 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, out_channels=output_channels, kernel_size=1 + ) + ) + self.first_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=3, + padding=1, + ) + ) + self.last_conv_3x3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=3, + padding=1, + stride=1, + ) + ) + self.relu = nn.ReLU() + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + if self.input_channels != self.output_channels: + x1 = self.conv_1x1(x) + if not self.keep_same_output: + x1 = self.pooling(x1) + else: + x1 = x + if self.first_relu: + x = self.relu(x) + x = self.first_conv_3x3(x) + x = self.relu(x) + x = self.last_conv_3x3(x) + if not self.keep_same_output: + x = self.pooling(x) + x = x1 + x + return x + + +class LBlock(nn.Layer): + """Residual block for the Latent Stack. + L-Block for increasing the number of channels in the input + from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels + output_channels: Number of output channels + conv_type: Which type of convolution desired, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 12, + output_channels: int = 12, + kernel_size: int = 3, + conv_type: str = "standard", + ): + super().__init__() + self.input_channels = input_channels + self.output_channels = output_channels + conv2d = get_conv_layer(conv_type) + self.conv_1x1 = conv2d( + in_channels=input_channels, + out_channels=output_channels - input_channels, + kernel_size=1, + ) + self.first_conv_3x3 = conv2d( + input_channels, + out_channels=output_channels, + kernel_size=kernel_size, + padding=1, + stride=1, + ) + self.relu = nn.ReLU() + self.last_conv_3x3 = conv2d( + in_channels=output_channels, + out_channels=output_channels, + kernel_size=kernel_size, + padding=1, + stride=1, + ) + + def forward(self, x) -> paddle.Tensor: + if self.input_channels < self.output_channels: + sc = self.conv_1x1(x) + sc = paddle.concat(x=[x, sc], axis=1) + else: + sc = x + x2 = self.relu(x) + x2 = self.first_conv_3x3(x2) + x2 = self.relu(x2) + x2 = self.last_conv_3x3(x2) + return x2 + sc + + +class ContextConditioningStack(nn.Layer): + """ + Conditioning Stack using the context images from Skillful Nowcasting, , see https://arxiv.org/pdf/2104.00954.pdf + + Args: + input_channels: Number of input channels per timestep + output_channels: Number of output channels for the lowest block + conv_type: Type of 2D convolution to use, see satflow/models/utils.py for options + """ + + def __init__( + self, + input_channels: int = 1, + output_channels: int = 768, + num_context_steps: int = 4, + conv_type: str = "standard", + ): + super().__init__() + conv2d = get_conv_layer(conv_type) + self.space2depth = nn.PixelUnshuffle(downscale_factor=2) + self.d1 = DBlock( + input_channels=4 * input_channels, + output_channels=output_channels // 4 * input_channels // num_context_steps, + conv_type=conv_type, + ) + self.d2 = DBlock( + input_channels=output_channels // 4 * input_channels // num_context_steps, + output_channels=output_channels // 2 * input_channels // num_context_steps, + conv_type=conv_type, + ) + self.d3 = DBlock( + input_channels=output_channels // 2 * input_channels // num_context_steps, + output_channels=output_channels * input_channels // num_context_steps, + conv_type=conv_type, + ) + self.d4 = DBlock( + input_channels=output_channels * input_channels // num_context_steps, + output_channels=output_channels * 2 * input_channels // num_context_steps, + conv_type=conv_type, + ) + self.conv1 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=output_channels // 4 * input_channels, + out_channels=output_channels // 8 * input_channels, + kernel_size=3, + padding=1, + ) + ) + self.conv2 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=output_channels // 2 * input_channels, + out_channels=output_channels // 4 * input_channels, + kernel_size=3, + padding=1, + ) + ) + self.conv3 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=output_channels * input_channels, + out_channels=output_channels // 2 * input_channels, + kernel_size=3, + padding=1, + ) + ) + self.conv4 = nn.utils.spectral_norm( + layer=conv2d( + in_channels=output_channels * 2 * input_channels, + out_channels=output_channels * input_channels, + kernel_size=3, + padding=1, + ) + ) + self.relu = nn.ReLU() + + def forward( + self, x: paddle.Tensor + ) -> Tuple[paddle.Tensor, paddle.Tensor, paddle.Tensor, paddle.Tensor]: + if len(x.shape) == 4: + x = self.space2depth(x) + elif len(x.shape) == 5: + B, T = x.shape[0], x.shape[1] + x_reshaped = paddle.reshape(x, [-1] + list(x.shape[2:])) + x = self.space2depth(x_reshaped) + x = paddle.reshape(x, [B, T] + list(x.shape[1:])) + steps = x.shape[1] + scale_1 = [] + scale_2 = [] + scale_3 = [] + scale_4 = [] + for i in range(steps): + s1 = self.d1(x[:, i, :, :, :]) + s2 = self.d2(s1) + s3 = self.d3(s2) + s4 = self.d4(s3) + scale_1.append(s1) + scale_2.append(s2) + scale_3.append(s3) + scale_4.append(s4) + scale_1 = paddle.stack(x=scale_1, axis=1) + scale_2 = paddle.stack(x=scale_2, axis=1) + scale_3 = paddle.stack(x=scale_3, axis=1) + scale_4 = paddle.stack(x=scale_4, axis=1) + scale_1 = self._mixing_layer(scale_1, self.conv1) + scale_2 = self._mixing_layer(scale_2, self.conv2) + scale_3 = self._mixing_layer(scale_3, self.conv3) + scale_4 = self._mixing_layer(scale_4, self.conv4) + return scale_1, scale_2, scale_3, scale_4 + + def _mixing_layer(self, inputs, conv_block): + stacked_inputs = einops.rearrange(inputs, "b t c h w -> b (c t) h w") + return nn.functional.relu(x=conv_block(stacked_inputs)) + + +class LatentConditioningStack(nn.Layer): + """ + Latent conditioning stack from Skillful Nowcasting, see https://arxiv.org/pdf/2104.00954.pdf + + Args: + shape: Shape of the latent space, Should be (H/32,W/32,x) of the final image shape + output_channels: Number of output channels for the conditioning stack + use_attention: Whether to have a self-attention block or not + """ + + def __init__( + self, + shape: (int, int, int) = (8, 8, 8), + output_channels: int = 768, + use_attention: bool = True, + ): + super().__init__() + self.shape = shape + self.use_attention = use_attention + self.distribution = paddle.distribution.Normal( + loc=paddle.to_tensor(data=[0.0], dtype="float32"), + scale=paddle.to_tensor(data=[2.0], dtype="float32"), + ) + self.conv_3x3 = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=shape[0], + out_channels=shape[0], + kernel_size=(3, 3), + padding=1, + ) + ) + self.l_block1 = LBlock( + input_channels=shape[0], output_channels=output_channels // 32 + ) + self.l_block2 = LBlock( + input_channels=output_channels // 32, output_channels=output_channels // 16 + ) + self.l_block3 = LBlock( + input_channels=output_channels // 16, output_channels=output_channels // 4 + ) + if self.use_attention: + self.att_block = AttentionLayer( + input_channels=output_channels // 4, + output_channels=output_channels // 4, + ) + self.l_block4 = LBlock( + input_channels=output_channels // 4, output_channels=output_channels + ) + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + """ + Args: + x: tensor on the correct device, to move over the latent distribution + Returns: z + """ + z = self.distribution.sample(self.shape) + z = paddle.transpose(x=z, perm=(3, 0, 1, 2)).astype(dtype=x.dtype) + z = self.conv_3x3(z) + z = self.l_block1(z) + z = self.l_block2(z) + z = self.l_block3(z) + z = self.att_block(z) + z = self.l_block4(z) + return z + + +def attention_einsum(q, k, v): + """Apply the attention operator to tensors of shape [h, w, c].""" + k = einops.rearrange(k, "h w c -> (h w) c") + v = einops.rearrange(v, "h w c -> (h w) c") + beta = nn.functional.softmax(x=paddle.einsum("hwc, Lc->hwL", q, k), axis=-1) + out = paddle.einsum("hwL, Lc->hwc", beta, v) + return out + + +class AttentionLayer(nn.Layer): + """Attention Module""" + + def __init__( + self, input_channels: int, output_channels: int, ratio_kq=8, ratio_v=8 + ): + super().__init__() + self.ratio_kq = ratio_kq + self.ratio_v = ratio_v + self.output_channels = output_channels + self.input_channels = input_channels + self.query = nn.Conv2D( + in_channels=input_channels, + out_channels=self.output_channels // self.ratio_kq, + kernel_size=(1, 1), + padding="valid", + bias_attr=False, + ) + self.key = nn.Conv2D( + in_channels=input_channels, + out_channels=self.output_channels // self.ratio_kq, + kernel_size=(1, 1), + padding="valid", + bias_attr=False, + ) + self.value = nn.Conv2D( + in_channels=input_channels, + out_channels=self.output_channels // self.ratio_v, + kernel_size=(1, 1), + padding="valid", + bias_attr=False, + ) + self.last_conv = nn.Conv2D( + in_channels=self.output_channels // 8, + out_channels=self.output_channels, + kernel_size=(1, 1), + padding="valid", + bias_attr=False, + ) + gamma = paddle.create_parameter( + shape=paddle.zeros(shape=[1]).shape, + dtype=paddle.zeros(shape=[1]).numpy().dtype, + default_initializer=nn.initializer.Assign(paddle.zeros(shape=[1])), + ) + gamma.stop_gradient = not True + self.gamma = gamma + + def forward(self, x: paddle.Tensor) -> paddle.Tensor: + query = self.query(x) + key = self.key(x) + value = self.value(x) + out = [] + for b in range(x.shape[0]): + out.append(attention_einsum(query[b], key[b], value[b])) + out = paddle.stack(x=out, axis=0) + out = self.gamma * self.last_conv(out) + return out + x + + +class AddCoords(nn.Layer): + def __init__(self, with_r=False): + super().__init__() + self.with_r = with_r + + def forward(self, input_tensor): + """ + Args: + input_tensor: shape(batch, channel, x_dim, y_dim) + """ + batch_size, _, x_dim, y_dim = input_tensor.shape + xx_channel = paddle.arange(end=x_dim).tile([1, y_dim, 1]) + x = paddle.arange(end=y_dim).tile([1, x_dim, 1]) + perm_0 = list(range(x.ndim)) + perm_0[1] = 2 + perm_0[2] = 1 + yy_channel = x.transpose(perm=perm_0) + xx_channel = xx_channel.astype(dtype="float32") / (x_dim - 1) + yy_channel = yy_channel.astype(dtype="float32") / (y_dim - 1) + xx_channel = xx_channel * 2 - 1 + yy_channel = yy_channel * 2 - 1 + x = xx_channel.tile([batch_size, 1, 1, 1]) + perm_1 = list(range(x.ndim)) + perm_1[2] = 3 + perm_1[3] = 2 + xx_channel = x.transpose(perm=perm_1) + x = yy_channel.tile([batch_size, 1, 1, 1]) + perm_2 = list(range(x.ndim)) + perm_2[2] = 3 + perm_2[3] = 2 + yy_channel = x.transpose(perm=perm_2) + ret = paddle.concat( + x=[ + input_tensor, + xx_channel.astype(dtype=input_tensor.dtype), + yy_channel.astype(dtype=input_tensor.dtype), + ], + axis=1, + ) + if self.with_r: + rr = paddle.sqrt( + x=paddle.pow(x=xx_channel.astype(dtype=input_tensor.dtype) - 0.5, y=2) + + paddle.pow(x=yy_channel.astype(dtype=input_tensor.dtype) - 0.5, y=2) + ) + ret = paddle.concat(x=[ret, rr], axis=1) + return ret + + +class CoordConv(nn.Layer): + def __init__(self, in_channels, out_channels, with_r=False): + super().__init__() + self.addcoords = AddCoords(with_r=with_r) + in_size = in_channels + 2 + if with_r: + in_size += 1 + self.conv = nn.Conv2D(in_size, out_channels) + + def forward(self, x): + ret = self.addcoords(x) + ret = self.conv(ret) + return ret + + +class ConvGRUCell(nn.Layer): + """A ConvGRU implementation. + + Args: + kernel_size: kernel size of the convolutions. Default: 3. + sn_eps: constant for spectral normalization. Default: 1e-4. + """ + + def __init__( + self, input_channels: int, output_channels: int, kernel_size=3, sn_eps=0.0001 + ): + super().__init__() + self._kernel_size = kernel_size + self._sn_eps = sn_eps + self.read_gate_conv = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=(kernel_size, kernel_size), + padding=1, + ), + eps=sn_eps, + ) + self.update_gate_conv = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=(kernel_size, kernel_size), + padding=1, + ), + eps=sn_eps, + ) + self.output_conv = nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=input_channels, + out_channels=output_channels, + kernel_size=(kernel_size, kernel_size), + padding=1, + ), + eps=sn_eps, + ) + + def forward(self, x, prev_state): + """ + ConvGRU forward, returning the current+new state + + Args: + x: Input tensor + prev_state: Previous state + + Returns: + New tensor plus the new state + """ + xh = paddle.concat(x=[x, prev_state], axis=1) + read_gate = nn.functional.sigmoid(x=self.read_gate_conv(xh)) + update_gate = nn.functional.sigmoid(x=self.update_gate_conv(xh)) + gated_input = paddle.concat(x=[x, read_gate * prev_state], axis=1) + c = nn.functional.relu(x=self.output_conv(gated_input)) + out = update_gate * prev_state + (1.0 - update_gate) * c + new_state = out + return out, new_state + + +class ConvGRU(nn.Layer): + """ConvGRU Cell wrapper to replace tf.static_rnn in TF implementation""" + + def __init__( + self, + input_channels: int, + output_channels: int, + kernel_size: int = 3, + sn_eps=0.0001, + ): + super().__init__() + self.cell = ConvGRUCell(input_channels, output_channels, kernel_size, sn_eps) + + def forward(self, x: paddle.Tensor, hidden_state=None) -> paddle.Tensor: + outputs = [] + for step in range(len(x)): + output, hidden_state = self.cell(x[step], hidden_state) + outputs.append(output) + outputs = paddle.stack(x=outputs, axis=0) + return outputs + + +def get_conv_layer(conv_type: str = "standard") -> nn.Layer: + if conv_type == "standard": + conv_layer = nn.Conv2D + elif conv_type == "coord": + conv_layer = CoordConv + elif conv_type == "3d": + conv_layer = nn.Conv3D + else: + raise ValueError(f"{conv_type} is not a recognized Conv method") + return conv_layer diff --git a/ppsci/arch/embedding_koopman.py b/ppsci/arch/embedding_koopman.py index 367b5cd3ca..0a33d12fd5 100644 --- a/ppsci/arch/embedding_koopman.py +++ b/ppsci/arch/embedding_koopman.py @@ -1,544 +1,544 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) -""" - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle -from paddle import nn -from paddle.nn.initializer import Constant -from paddle.nn.initializer import Uniform - -from ppsci.arch import base - -zeros_ = Constant(value=0.0) -ones_ = Constant(value=1.0) - - -class LorenzEmbedding(base.Arch): - """Embedding Koopman model for the Lorenz ODE system. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("states",). - output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. - std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. - input_size (int, optional): Size of input data. Defaults to 3. - hidden_size (int, optional): Number of hidden size. Defaults to 500. - embed_size (int, optional): Number of embedding size. Defaults to 32. - drop (float, optional): Probability of dropout the units. Defaults to 0.0. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.LorenzEmbedding( - ... input_keys=("x", "y"), - ... output_keys=("u", "v"), - ... input_size=3, - ... hidden_size=500, - ... embed_size=32, - ... drop=0.0, - ... mean=None, - ... std=None, - ... ) - >>> x_shape = [8, 3, 2] - >>> y_shape = [8, 3, 1] - >>> input_dict = {"x": paddle.rand(x_shape), - ... "y": paddle.rand(y_shape)} - >>> output_dict = model(input_dict) - >>> print(output_dict["u"].shape) - [8, 2, 3] - >>> print(output_dict["v"].shape) - [8, 3, 3] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - mean: Optional[Tuple[float, ...]] = None, - std: Optional[Tuple[float, ...]] = None, - input_size: int = 3, - hidden_size: int = 500, - embed_size: int = 32, - drop: float = 0.0, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.input_size = input_size - self.hidden_size = hidden_size - self.embed_size = embed_size - - # build observable network - self.encoder_net = self.build_encoder(input_size, hidden_size, embed_size, drop) - # build koopman operator - self.k_diag, self.k_ut = self.build_koopman_operator(embed_size) - # build recovery network - self.decoder_net = self.build_decoder(input_size, hidden_size, embed_size) - - mean = [0.0, 0.0, 0.0] if mean is None else mean - std = [1.0, 1.0, 1.0] if std is None else std - self.register_buffer("mean", paddle.to_tensor(mean).reshape([1, 3])) - self.register_buffer("std", paddle.to_tensor(std).reshape([1, 3])) - - self.apply(self._init_weights) - - def _init_weights(self, m: nn.Layer): - if isinstance(m, nn.Linear): - k = 1 / m.weight.shape[0] - uniform = Uniform(-(k**0.5), k**0.5) - uniform(m.weight) - if m.bias is not None: - uniform(m.bias) - elif isinstance(m, nn.LayerNorm): - zeros_(m.bias) - ones_(m.weight) - - def build_encoder( - self, input_size: int, hidden_size: int, embed_size: int, drop: float = 0.0 - ): - net = nn.Sequential( - nn.Linear(input_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, embed_size), - nn.LayerNorm(embed_size), - nn.Dropout(drop), - ) - return net - - def build_decoder(self, input_size: int, hidden_size: int, embed_size: int): - net = nn.Sequential( - nn.Linear(embed_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, input_size), - ) - return net - - def build_koopman_operator(self, embed_size: int): - # Learned Koopman operator - data = paddle.linspace(1, 0, embed_size) - k_diag = paddle.create_parameter( - shape=data.shape, - dtype=paddle.get_default_dtype(), - default_initializer=nn.initializer.Assign(data), - ) - - data = 0.1 * paddle.rand([2 * embed_size - 3]) - k_ut = paddle.create_parameter( - shape=data.shape, - dtype=paddle.get_default_dtype(), - default_initializer=nn.initializer.Assign(data), - ) - return k_diag, k_ut - - def encoder(self, x: paddle.Tensor): - x = self._normalize(x) - g = self.encoder_net(x) - return g - - def decoder(self, g: paddle.Tensor): - out = self.decoder_net(g) - x = self._unnormalize(out) - return x - - def koopman_operation(self, embed_data: paddle.Tensor, k_matrix: paddle.Tensor): - # Apply Koopman operation - embed_pred_data = paddle.bmm( - k_matrix.expand( - [embed_data.shape[0], k_matrix.shape[0], k_matrix.shape[1]] - ), - embed_data.transpose([0, 2, 1]), - ).transpose([0, 2, 1]) - return embed_pred_data - - def _normalize(self, x: paddle.Tensor): - return (x - self.mean) / self.std - - def _unnormalize(self, x: paddle.Tensor): - return self.std * x + self.mean - - def get_koopman_matrix(self): - # # Koopman operator - k_ut_tensor = self.k_ut * 1 - k_ut_tensor = paddle.diag( - k_ut_tensor[0 : self.embed_size - 1], offset=1 - ) + paddle.diag(k_ut_tensor[self.embed_size - 1 :], offset=2) - k_matrix = k_ut_tensor + (-1) * k_ut_tensor.t() - k_matrix = k_matrix + paddle.diag(self.k_diag) - return k_matrix - - def forward_tensor(self, x): - k_matrix = self.get_koopman_matrix() - embed_data = self.encoder(x) - recover_data = self.decoder(embed_data) - - embed_pred_data = self.koopman_operation(embed_data, k_matrix) - pred_data = self.decoder(embed_pred_data) - - return (pred_data[:, :-1, :], recover_data, k_matrix) - - @staticmethod - def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - x_tensor = self.concat_to_tensor(x, self.input_keys, axis=-1) - y = self.forward_tensor(x_tensor) - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - -class RosslerEmbedding(LorenzEmbedding): - """Embedding Koopman model for the Rossler ODE system. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("states",). - output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. - std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. - input_size (int, optional): Size of input data. Defaults to 3. - hidden_size (int, optional): Number of hidden size. Defaults to 500. - embed_size (int, optional): Number of embedding size. Defaults to 32. - drop (float, optional): Probability of dropout the units. Defaults to 0.0. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.RosslerEmbedding( - ... input_keys=("x", "y"), - ... output_keys=("u", "v"), - ... input_size=3, - ... hidden_size=500, - ... embed_size=32, - ... drop=0.0, - ... mean=None, - ... std=None, - ... ) - >>> x_shape = [8, 3, 2] - >>> y_shape = [8, 3, 1] - >>> input_dict = {"x": paddle.rand(x_shape), - ... "y": paddle.rand(y_shape)} - >>> output_dict = model(input_dict) - >>> print(output_dict["u"].shape) - [8, 2, 3] - >>> print(output_dict["v"].shape) - [8, 3, 3] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - mean: Optional[Tuple[float, ...]] = None, - std: Optional[Tuple[float, ...]] = None, - input_size: int = 3, - hidden_size: int = 500, - embed_size: int = 32, - drop: float = 0.0, - ): - super().__init__( - input_keys, - output_keys, - mean, - std, - input_size, - hidden_size, - embed_size, - drop, - ) - - -class CylinderEmbedding(base.Arch): - """Embedding Koopman model for the Cylinder system. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("states", "visc"). - output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. - std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. - embed_size (int, optional): Number of embedding size. Defaults to 128. - encoder_channels (Optional[Tuple[int, ...]]): Number of channels in encoder network. Defaults to None. - decoder_channels (Optional[Tuple[int, ...]]): Number of channels in decoder network. Defaults to None. - drop (float, optional): Probability of dropout the units. Defaults to 0.0. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.CylinderEmbedding(("states", "visc"), ("pred_states", "recover_states")) - >>> states_shape = [32, 10, 3, 64, 128] - >>> visc_shape = [32, 1] - >>> input_dict = {"states" : paddle.rand(states_shape), - ... "visc" : paddle.rand(visc_shape)} - >>> out_dict = model(input_dict) - >>> print(out_dict["pred_states"].shape) - [32, 9, 3, 64, 128] - >>> print(out_dict["recover_states"].shape) - [32, 10, 3, 64, 128] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - mean: Optional[Tuple[float, ...]] = None, - std: Optional[Tuple[float, ...]] = None, - embed_size: int = 128, - encoder_channels: Optional[Tuple[int, ...]] = None, - decoder_channels: Optional[Tuple[int, ...]] = None, - drop: float = 0.0, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.embed_size = embed_size - - X, Y = np.meshgrid(np.linspace(-2, 14, 128), np.linspace(-4, 4, 64)) - self.mask = paddle.to_tensor(np.sqrt(X**2 + Y**2)).unsqueeze(0).unsqueeze(0) - - encoder_channels = ( - [4, 16, 32, 64, 128] if encoder_channels is None else encoder_channels - ) - decoder_channels = ( - [embed_size // 32, 128, 64, 32, 16] - if decoder_channels is None - else decoder_channels - ) - self.encoder_net = self.build_encoder(embed_size, encoder_channels, drop) - self.k_diag_net, self.k_ut_net, self.k_lt_net = self.build_koopman_operator( - embed_size - ) - self.decoder_net = self.build_decoder(decoder_channels) - - xidx = [] - yidx = [] - for i in range(1, 5): - yidx.append(np.arange(i, embed_size)) - xidx.append(np.arange(0, embed_size - i)) - self.xidx = paddle.to_tensor(np.concatenate(xidx), dtype="int64") - self.yidx = paddle.to_tensor(np.concatenate(yidx), dtype="int64") - - mean = [0.0, 0.0, 0.0, 0.0] if mean is None else mean - std = [1.0, 1.0, 1.0, 1.0] if std is None else std - self.register_buffer("mean", paddle.to_tensor(mean).reshape([1, 4, 1, 1])) - self.register_buffer("std", paddle.to_tensor(std).reshape([1, 4, 1, 1])) - - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - k = 1 / m.weight.shape[0] - uniform = Uniform(-(k**0.5), k**0.5) - uniform(m.weight) - if m.bias is not None: - uniform(m.bias) - elif isinstance(m, nn.LayerNorm): - zeros_(m.bias) - ones_(m.weight) - elif isinstance(m, nn.Conv2D): - k = 1 / (m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3]) - uniform = Uniform(-(k**0.5), k**0.5) - uniform(m.weight) - if m.bias is not None: - uniform(m.bias) - - def _build_conv_relu_list( - self, in_channels: Tuple[int, ...], out_channels: Tuple[int, ...] - ): - net_list = [ - nn.Conv2D( - in_channels, - out_channels, - kernel_size=(3, 3), - stride=2, - padding=1, - padding_mode="replicate", - ), - nn.ReLU(), - ] - return net_list - - def build_encoder( - self, embed_size: int, channels: Tuple[int, ...], drop: float = 0.0 - ): - net = [] - for i in range(1, len(channels)): - net.extend(self._build_conv_relu_list(channels[i - 1], channels[i])) - net.append( - nn.Conv2D( - channels[-1], - embed_size // 32, - kernel_size=(3, 3), - padding=1, - padding_mode="replicate", - ) - ) - net.append( - nn.LayerNorm( - (4, 4, 8), - ) - ) - net.append(nn.Dropout(drop)) - net = nn.Sequential(*net) - return net - - def _build_upsample_conv_relu( - self, in_channels: Tuple[int, ...], out_channels: Tuple[int, ...] - ): - net_list = [ - nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), - nn.Conv2D( - in_channels, - out_channels, - kernel_size=(3, 3), - stride=1, - padding=1, - padding_mode="replicate", - ), - nn.ReLU(), - ] - return net_list - - def build_decoder(self, channels: Tuple[int, ...]): - net = [] - for i in range(1, len(channels)): - net.extend(self._build_upsample_conv_relu(channels[i - 1], channels[i])) - net.append( - nn.Conv2D( - channels[-1], - 3, - kernel_size=(3, 3), - stride=1, - padding=1, - padding_mode="replicate", - ), - ) - net = nn.Sequential(*net) - return net - - def build_koopman_operator(self, embed_size: int): - # Learned Koopman operator parameters - k_diag_net = nn.Sequential( - nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, embed_size) - ) - - k_ut_net = nn.Sequential( - nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, 4 * embed_size - 10) - ) - k_lt_net = nn.Sequential( - nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, 4 * embed_size - 10) - ) - return k_diag_net, k_ut_net, k_lt_net - - def encoder(self, x: paddle.Tensor, viscosity: paddle.Tensor): - B, T, C, H, W = x.shape - x = x.reshape((B * T, C, H, W)) - viscosity = viscosity.repeat_interleave(T, axis=1).reshape((B * T, 1)) - x = paddle.concat( - [x, viscosity.unsqueeze(-1).unsqueeze(-1) * paddle.ones_like(x[:, :1])], - axis=1, - ) - x = self._normalize(x) - g = self.encoder_net(x) - g = g.reshape([B, T, -1]) - return g - - def decoder(self, g: paddle.Tensor): - B, T, _ = g.shape - x = self.decoder_net(g.reshape([-1, self.embed_size // 32, 4, 8])) - x = self._unnormalize(x) - mask0 = ( - self.mask.repeat_interleave(x.shape[1], axis=1).repeat_interleave( - x.shape[0], axis=0 - ) - < 1 - ) - x[mask0] = 0 - _, C, H, W = x.shape - x = x.reshape([B, T, C, H, W]) - return x - - def get_koopman_matrix(self, g: paddle.Tensor, visc: paddle.Tensor): - # # Koopman operator - kMatrix = paddle.zeros([g.shape[0], self.embed_size, self.embed_size]) - kMatrix.stop_gradient = False - # Populate the off diagonal terms - kMatrixUT_data = self.k_ut_net(100 * visc) - kMatrixLT_data = self.k_lt_net(100 * visc) - - kMatrix = kMatrix.transpose([1, 2, 0]) - kMatrixUT_data_t = kMatrixUT_data.transpose([1, 0]) - kMatrixLT_data_t = kMatrixLT_data.transpose([1, 0]) - kMatrix[self.xidx, self.yidx] = kMatrixUT_data_t - kMatrix[self.yidx, self.xidx] = kMatrixLT_data_t - - # Populate the diagonal - ind = np.diag_indices(kMatrix.shape[1]) - ind = paddle.to_tensor(ind, dtype="int64") - - kMatrixDiag = self.k_diag_net(100 * visc) - kMatrixDiag_t = kMatrixDiag.transpose([1, 0]) - kMatrix[ind[0], ind[1]] = kMatrixDiag_t - return kMatrix.transpose([2, 0, 1]) - - def koopman_operation(self, embed_data: paddle.Tensor, k_matrix: paddle.Tensor): - embed_pred_data = paddle.bmm( - k_matrix, embed_data.transpose([0, 2, 1]) - ).transpose([0, 2, 1]) - return embed_pred_data - - def _normalize(self, x: paddle.Tensor): - x = (x - self.mean) / self.std - return x - - def _unnormalize(self, x: paddle.Tensor): - return self.std[:, :3] * x + self.mean[:, :3] - - def forward_tensor(self, states, visc): - # states.shape=(B, T, C, H, W) - embed_data = self.encoder(states, visc) - recover_data = self.decoder(embed_data) - - k_matrix = self.get_koopman_matrix(embed_data, visc) - embed_pred_data = self.koopman_operation(embed_data, k_matrix) - pred_data = self.decoder(embed_pred_data) - - return (pred_data[:, :-1], recover_data, k_matrix) - - @staticmethod - def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - - if self._input_transform is not None: - x = self._input_transform(x) - - y = self.forward_tensor(**x) - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) +""" + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +from paddle import nn +from paddle.nn.initializer import Constant +from paddle.nn.initializer import Uniform + +from ppsci.arch import base + +zeros_ = Constant(value=0.0) +ones_ = Constant(value=1.0) + + +class LorenzEmbedding(base.Arch): + """Embedding Koopman model for the Lorenz ODE system. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("states",). + output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. + std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. + input_size (int, optional): Size of input data. Defaults to 3. + hidden_size (int, optional): Number of hidden size. Defaults to 500. + embed_size (int, optional): Number of embedding size. Defaults to 32. + drop (float, optional): Probability of dropout the units. Defaults to 0.0. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.LorenzEmbedding( + ... input_keys=("x", "y"), + ... output_keys=("u", "v"), + ... input_size=3, + ... hidden_size=500, + ... embed_size=32, + ... drop=0.0, + ... mean=None, + ... std=None, + ... ) + >>> x_shape = [8, 3, 2] + >>> y_shape = [8, 3, 1] + >>> input_dict = {"x": paddle.rand(x_shape), + ... "y": paddle.rand(y_shape)} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [8, 2, 3] + >>> print(output_dict["v"].shape) + [8, 3, 3] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + mean: Optional[Tuple[float, ...]] = None, + std: Optional[Tuple[float, ...]] = None, + input_size: int = 3, + hidden_size: int = 500, + embed_size: int = 32, + drop: float = 0.0, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.input_size = input_size + self.hidden_size = hidden_size + self.embed_size = embed_size + + # build observable network + self.encoder_net = self.build_encoder(input_size, hidden_size, embed_size, drop) + # build koopman operator + self.k_diag, self.k_ut = self.build_koopman_operator(embed_size) + # build recovery network + self.decoder_net = self.build_decoder(input_size, hidden_size, embed_size) + + mean = [0.0, 0.0, 0.0] if mean is None else mean + std = [1.0, 1.0, 1.0] if std is None else std + self.register_buffer("mean", paddle.to_tensor(mean).reshape([1, 3])) + self.register_buffer("std", paddle.to_tensor(std).reshape([1, 3])) + + self.apply(self._init_weights) + + def _init_weights(self, m: nn.Layer): + if isinstance(m, nn.Linear): + k = 1 / m.weight.shape[0] + uniform = Uniform(-(k**0.5), k**0.5) + uniform(m.weight) + if m.bias is not None: + uniform(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + + def build_encoder( + self, input_size: int, hidden_size: int, embed_size: int, drop: float = 0.0 + ): + net = nn.Sequential( + nn.Linear(input_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, embed_size), + nn.LayerNorm(embed_size), + nn.Dropout(drop), + ) + return net + + def build_decoder(self, input_size: int, hidden_size: int, embed_size: int): + net = nn.Sequential( + nn.Linear(embed_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, input_size), + ) + return net + + def build_koopman_operator(self, embed_size: int): + # Learned Koopman operator + data = paddle.linspace(1, 0, embed_size) + k_diag = paddle.create_parameter( + shape=data.shape, + dtype=paddle.get_default_dtype(), + default_initializer=nn.initializer.Assign(data), + ) + + data = 0.1 * paddle.rand([2 * embed_size - 3]) + k_ut = paddle.create_parameter( + shape=data.shape, + dtype=paddle.get_default_dtype(), + default_initializer=nn.initializer.Assign(data), + ) + return k_diag, k_ut + + def encoder(self, x: paddle.Tensor): + x = self._normalize(x) + g = self.encoder_net(x) + return g + + def decoder(self, g: paddle.Tensor): + out = self.decoder_net(g) + x = self._unnormalize(out) + return x + + def koopman_operation(self, embed_data: paddle.Tensor, k_matrix: paddle.Tensor): + # Apply Koopman operation + embed_pred_data = paddle.bmm( + k_matrix.expand( + [embed_data.shape[0], k_matrix.shape[0], k_matrix.shape[1]] + ), + embed_data.transpose([0, 2, 1]), + ).transpose([0, 2, 1]) + return embed_pred_data + + def _normalize(self, x: paddle.Tensor): + return (x - self.mean) / self.std + + def _unnormalize(self, x: paddle.Tensor): + return self.std * x + self.mean + + def get_koopman_matrix(self): + # # Koopman operator + k_ut_tensor = self.k_ut * 1 + k_ut_tensor = paddle.diag( + k_ut_tensor[0 : self.embed_size - 1], offset=1 + ) + paddle.diag(k_ut_tensor[self.embed_size - 1 :], offset=2) + k_matrix = k_ut_tensor + (-1) * k_ut_tensor.t() + k_matrix = k_matrix + paddle.diag(self.k_diag) + return k_matrix + + def forward_tensor(self, x): + k_matrix = self.get_koopman_matrix() + embed_data = self.encoder(x) + recover_data = self.decoder(embed_data) + + embed_pred_data = self.koopman_operation(embed_data, k_matrix) + pred_data = self.decoder(embed_pred_data) + + return (pred_data[:, :-1, :], recover_data, k_matrix) + + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + x_tensor = self.concat_to_tensor(x, self.input_keys, axis=-1) + y = self.forward_tensor(x_tensor) + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + +class RosslerEmbedding(LorenzEmbedding): + """Embedding Koopman model for the Rossler ODE system. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("states",). + output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. + std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. + input_size (int, optional): Size of input data. Defaults to 3. + hidden_size (int, optional): Number of hidden size. Defaults to 500. + embed_size (int, optional): Number of embedding size. Defaults to 32. + drop (float, optional): Probability of dropout the units. Defaults to 0.0. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.RosslerEmbedding( + ... input_keys=("x", "y"), + ... output_keys=("u", "v"), + ... input_size=3, + ... hidden_size=500, + ... embed_size=32, + ... drop=0.0, + ... mean=None, + ... std=None, + ... ) + >>> x_shape = [8, 3, 2] + >>> y_shape = [8, 3, 1] + >>> input_dict = {"x": paddle.rand(x_shape), + ... "y": paddle.rand(y_shape)} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [8, 2, 3] + >>> print(output_dict["v"].shape) + [8, 3, 3] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + mean: Optional[Tuple[float, ...]] = None, + std: Optional[Tuple[float, ...]] = None, + input_size: int = 3, + hidden_size: int = 500, + embed_size: int = 32, + drop: float = 0.0, + ): + super().__init__( + input_keys, + output_keys, + mean, + std, + input_size, + hidden_size, + embed_size, + drop, + ) + + +class CylinderEmbedding(base.Arch): + """Embedding Koopman model for the Cylinder system. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("states", "visc"). + output_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + mean (Optional[Tuple[float, ...]]): Mean of training dataset. Defaults to None. + std (Optional[Tuple[float, ...]]): Standard Deviation of training dataset. Defaults to None. + embed_size (int, optional): Number of embedding size. Defaults to 128. + encoder_channels (Optional[Tuple[int, ...]]): Number of channels in encoder network. Defaults to None. + decoder_channels (Optional[Tuple[int, ...]]): Number of channels in decoder network. Defaults to None. + drop (float, optional): Probability of dropout the units. Defaults to 0.0. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.CylinderEmbedding(("states", "visc"), ("pred_states", "recover_states")) + >>> states_shape = [32, 10, 3, 64, 128] + >>> visc_shape = [32, 1] + >>> input_dict = {"states" : paddle.rand(states_shape), + ... "visc" : paddle.rand(visc_shape)} + >>> out_dict = model(input_dict) + >>> print(out_dict["pred_states"].shape) + [32, 9, 3, 64, 128] + >>> print(out_dict["recover_states"].shape) + [32, 10, 3, 64, 128] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + mean: Optional[Tuple[float, ...]] = None, + std: Optional[Tuple[float, ...]] = None, + embed_size: int = 128, + encoder_channels: Optional[Tuple[int, ...]] = None, + decoder_channels: Optional[Tuple[int, ...]] = None, + drop: float = 0.0, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.embed_size = embed_size + + X, Y = np.meshgrid(np.linspace(-2, 14, 128), np.linspace(-4, 4, 64)) + self.mask = paddle.to_tensor(np.sqrt(X**2 + Y**2)).unsqueeze(0).unsqueeze(0) + + encoder_channels = ( + [4, 16, 32, 64, 128] if encoder_channels is None else encoder_channels + ) + decoder_channels = ( + [embed_size // 32, 128, 64, 32, 16] + if decoder_channels is None + else decoder_channels + ) + self.encoder_net = self.build_encoder(embed_size, encoder_channels, drop) + self.k_diag_net, self.k_ut_net, self.k_lt_net = self.build_koopman_operator( + embed_size + ) + self.decoder_net = self.build_decoder(decoder_channels) + + xidx = [] + yidx = [] + for i in range(1, 5): + yidx.append(np.arange(i, embed_size)) + xidx.append(np.arange(0, embed_size - i)) + self.xidx = paddle.to_tensor(np.concatenate(xidx), dtype="int64") + self.yidx = paddle.to_tensor(np.concatenate(yidx), dtype="int64") + + mean = [0.0, 0.0, 0.0, 0.0] if mean is None else mean + std = [1.0, 1.0, 1.0, 1.0] if std is None else std + self.register_buffer("mean", paddle.to_tensor(mean).reshape([1, 4, 1, 1])) + self.register_buffer("std", paddle.to_tensor(std).reshape([1, 4, 1, 1])) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + k = 1 / m.weight.shape[0] + uniform = Uniform(-(k**0.5), k**0.5) + uniform(m.weight) + if m.bias is not None: + uniform(m.bias) + elif isinstance(m, nn.LayerNorm): + zeros_(m.bias) + ones_(m.weight) + elif isinstance(m, nn.Conv2D): + k = 1 / (m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3]) + uniform = Uniform(-(k**0.5), k**0.5) + uniform(m.weight) + if m.bias is not None: + uniform(m.bias) + + def _build_conv_relu_list( + self, in_channels: Tuple[int, ...], out_channels: Tuple[int, ...] + ): + net_list = [ + nn.Conv2D( + in_channels, + out_channels, + kernel_size=(3, 3), + stride=2, + padding=1, + padding_mode="replicate", + ), + nn.ReLU(), + ] + return net_list + + def build_encoder( + self, embed_size: int, channels: Tuple[int, ...], drop: float = 0.0 + ): + net = [] + for i in range(1, len(channels)): + net.extend(self._build_conv_relu_list(channels[i - 1], channels[i])) + net.append( + nn.Conv2D( + channels[-1], + embed_size // 32, + kernel_size=(3, 3), + padding=1, + padding_mode="replicate", + ) + ) + net.append( + nn.LayerNorm( + (4, 4, 8), + ) + ) + net.append(nn.Dropout(drop)) + net = nn.Sequential(*net) + return net + + def _build_upsample_conv_relu( + self, in_channels: Tuple[int, ...], out_channels: Tuple[int, ...] + ): + net_list = [ + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True), + nn.Conv2D( + in_channels, + out_channels, + kernel_size=(3, 3), + stride=1, + padding=1, + padding_mode="replicate", + ), + nn.ReLU(), + ] + return net_list + + def build_decoder(self, channels: Tuple[int, ...]): + net = [] + for i in range(1, len(channels)): + net.extend(self._build_upsample_conv_relu(channels[i - 1], channels[i])) + net.append( + nn.Conv2D( + channels[-1], + 3, + kernel_size=(3, 3), + stride=1, + padding=1, + padding_mode="replicate", + ), + ) + net = nn.Sequential(*net) + return net + + def build_koopman_operator(self, embed_size: int): + # Learned Koopman operator parameters + k_diag_net = nn.Sequential( + nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, embed_size) + ) + + k_ut_net = nn.Sequential( + nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, 4 * embed_size - 10) + ) + k_lt_net = nn.Sequential( + nn.Linear(1, 50), nn.ReLU(), nn.Linear(50, 4 * embed_size - 10) + ) + return k_diag_net, k_ut_net, k_lt_net + + def encoder(self, x: paddle.Tensor, viscosity: paddle.Tensor): + B, T, C, H, W = x.shape + x = x.reshape((B * T, C, H, W)) + viscosity = viscosity.repeat_interleave(T, axis=1).reshape((B * T, 1)) + x = paddle.concat( + [x, viscosity.unsqueeze(-1).unsqueeze(-1) * paddle.ones_like(x[:, :1])], + axis=1, + ) + x = self._normalize(x) + g = self.encoder_net(x) + g = g.reshape([B, T, -1]) + return g + + def decoder(self, g: paddle.Tensor): + B, T, _ = g.shape + x = self.decoder_net(g.reshape([-1, self.embed_size // 32, 4, 8])) + x = self._unnormalize(x) + mask0 = ( + self.mask.repeat_interleave(x.shape[1], axis=1).repeat_interleave( + x.shape[0], axis=0 + ) + < 1 + ) + x[mask0] = 0 + _, C, H, W = x.shape + x = x.reshape([B, T, C, H, W]) + return x + + def get_koopman_matrix(self, g: paddle.Tensor, visc: paddle.Tensor): + # # Koopman operator + kMatrix = paddle.zeros([g.shape[0], self.embed_size, self.embed_size]) + kMatrix.stop_gradient = False + # Populate the off diagonal terms + kMatrixUT_data = self.k_ut_net(100 * visc) + kMatrixLT_data = self.k_lt_net(100 * visc) + + kMatrix = kMatrix.transpose([1, 2, 0]) + kMatrixUT_data_t = kMatrixUT_data.transpose([1, 0]) + kMatrixLT_data_t = kMatrixLT_data.transpose([1, 0]) + kMatrix[self.xidx, self.yidx] = kMatrixUT_data_t + kMatrix[self.yidx, self.xidx] = kMatrixLT_data_t + + # Populate the diagonal + ind = np.diag_indices(kMatrix.shape[1]) + ind = paddle.to_tensor(ind, dtype="int64") + + kMatrixDiag = self.k_diag_net(100 * visc) + kMatrixDiag_t = kMatrixDiag.transpose([1, 0]) + kMatrix[ind[0], ind[1]] = kMatrixDiag_t + return kMatrix.transpose([2, 0, 1]) + + def koopman_operation(self, embed_data: paddle.Tensor, k_matrix: paddle.Tensor): + embed_pred_data = paddle.bmm( + k_matrix, embed_data.transpose([0, 2, 1]) + ).transpose([0, 2, 1]) + return embed_pred_data + + def _normalize(self, x: paddle.Tensor): + x = (x - self.mean) / self.std + return x + + def _unnormalize(self, x: paddle.Tensor): + return self.std[:, :3] * x + self.mean[:, :3] + + def forward_tensor(self, states, visc): + # states.shape=(B, T, C, H, W) + embed_data = self.encoder(states, visc) + recover_data = self.decoder(embed_data) + + k_matrix = self.get_koopman_matrix(embed_data, visc) + embed_pred_data = self.koopman_operation(embed_data, k_matrix) + pred_data = self.decoder(embed_pred_data) + + return (pred_data[:, :-1], recover_data, k_matrix) + + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + + if self._input_transform is not None: + x = self._input_transform(x) + + y = self.forward_tensor(**x) + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/epnn.py b/ppsci/arch/epnn.py index 0f6a9ffed6..7a7ad1d79c 100644 --- a/ppsci/arch/epnn.py +++ b/ppsci/arch/epnn.py @@ -1,126 +1,126 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Elasto-Plastic Neural Network (EPNN) - -# DEVELOPED AT: -# COMPUTATIONAL GEOMECHANICS LABORATORY -# DEPARTMENT OF CIVIL ENGINEERING -# UNIVERSITY OF CALGARY, AB, CANADA -# DIRECTOR: Prof. Richard Wan - -# DEVELOPED BY: -# MAHDAD EGHBALIAN - -# MIT License - -# Copyright (c) 2022 Mahdad Eghbalian - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from typing import Tuple - -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base - - -class Epnn(base.Arch): - """Builds a feedforward network with arbitrary layers. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). - node_sizes (Tuple[int, ...]): The tuple of node size. - activations (Tuple[str, ...]): Name of activation functions. - drop_p (float): The parameter p of nn.Dropout. - - Examples: - >>> import ppsci - >>> ann_node_sizes_state = [1, 20] - >>> model = ppsci.arch.Epnn( - ... ("x",), - ... ("y",), - ... node_sizes=ann_node_sizes_state, - ... activations=("leaky_relu",), - ... drop_p=0.0, - ... ) - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - node_sizes: Tuple[int, ...], - activations: Tuple[str, ...], - drop_p: float, - ): - super().__init__() - self.active_func = [ - act_mod.get_activation(act_name) for act_name in activations - ] - self.node_sizes = node_sizes - self.drop_p = drop_p - self.layers = [] - self.layers.append( - nn.Linear(in_features=node_sizes[0], out_features=node_sizes[1]) - ) - layer_sizes = zip(node_sizes[1:-2], node_sizes[2:-1]) - self.layers.extend( - [nn.Linear(in_features=h1, out_features=h2) for h1, h2 in layer_sizes] - ) - self.layers.append( - nn.Linear( - in_features=node_sizes[-2], out_features=node_sizes[-1], bias_attr=False - ) - ) - - self.layers = nn.LayerList(self.layers) - self.dropout = nn.Dropout(p=drop_p) - self.input_keys = input_keys - self.output_keys = output_keys - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - y = x[self.input_keys[0]] - for ilayer in range(len(self.layers)): - y = self.layers[ilayer](y) - if ilayer != len(self.layers) - 1: - y = self.active_func[ilayer + 1](y) - if ilayer != len(self.layers) - 1: - y = self.dropout(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Elasto-Plastic Neural Network (EPNN) + +# DEVELOPED AT: +# COMPUTATIONAL GEOMECHANICS LABORATORY +# DEPARTMENT OF CIVIL ENGINEERING +# UNIVERSITY OF CALGARY, AB, CANADA +# DIRECTOR: Prof. Richard Wan + +# DEVELOPED BY: +# MAHDAD EGHBALIAN + +# MIT License + +# Copyright (c) 2022 Mahdad Eghbalian + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from typing import Tuple + +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base + + +class Epnn(base.Arch): + """Builds a feedforward network with arbitrary layers. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). + node_sizes (Tuple[int, ...]): The tuple of node size. + activations (Tuple[str, ...]): Name of activation functions. + drop_p (float): The parameter p of nn.Dropout. + + Examples: + >>> import ppsci + >>> ann_node_sizes_state = [1, 20] + >>> model = ppsci.arch.Epnn( + ... ("x",), + ... ("y",), + ... node_sizes=ann_node_sizes_state, + ... activations=("leaky_relu",), + ... drop_p=0.0, + ... ) + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + node_sizes: Tuple[int, ...], + activations: Tuple[str, ...], + drop_p: float, + ): + super().__init__() + self.active_func = [ + act_mod.get_activation(act_name) for act_name in activations + ] + self.node_sizes = node_sizes + self.drop_p = drop_p + self.layers = [] + self.layers.append( + nn.Linear(in_features=node_sizes[0], out_features=node_sizes[1]) + ) + layer_sizes = zip(node_sizes[1:-2], node_sizes[2:-1]) + self.layers.extend( + [nn.Linear(in_features=h1, out_features=h2) for h1, h2 in layer_sizes] + ) + self.layers.append( + nn.Linear( + in_features=node_sizes[-2], out_features=node_sizes[-1], bias_attr=False + ) + ) + + self.layers = nn.LayerList(self.layers) + self.dropout = nn.Dropout(p=drop_p) + self.input_keys = input_keys + self.output_keys = output_keys + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + y = x[self.input_keys[0]] + for ilayer in range(len(self.layers)): + y = self.layers[ilayer](y) + if ilayer != len(self.layers) - 1: + y = self.active_func[ilayer + 1](y) + if ilayer != len(self.layers) - 1: + y = self.dropout(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/extformer_moe_cuboid.py b/ppsci/arch/extformer_moe_cuboid.py index bdd6311e2b..7a7aa6ed34 100644 --- a/ppsci/arch/extformer_moe_cuboid.py +++ b/ppsci/arch/extformer_moe_cuboid.py @@ -1,996 +1,996 @@ -from typing import Sequence -from typing import Tuple -from typing import Union - -import paddle -from paddle import nn - -import ppsci.arch.extformer_moe_cuboid_decoder as cuboid_decoder -import ppsci.arch.extformer_moe_cuboid_encoder as cuboid_encoder -import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.arch import extformer_moe_utils -from ppsci.arch.extformer_moe_cuboid_encoder import NEGATIVE_SLOPE -from ppsci.utils import initializer - -"""A space-time Transformer with Cuboid Attention""" - - -class InitialEncoder(nn.Layer): - def __init__( - self, - dim, - out_dim, - downsample_scale: Union[int, Sequence[int]], - num_conv_layers: int = 2, - activation: str = "leaky", - padding_type: str = "nearest", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(InitialEncoder, self).__init__() - self.num_conv_layers = num_conv_layers - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - conv_block = [] - for i in range(num_conv_layers): - if i == 0: - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=dim, - out_channels=out_dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - else: - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=out_dim, - out_channels=out_dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - self.conv_block = nn.Sequential(*conv_block) - if isinstance(downsample_scale, int): - patch_merge_downsample = (1, downsample_scale, downsample_scale) - elif len(downsample_scale) == 2: - patch_merge_downsample = (1, *downsample_scale) - elif len(downsample_scale) == 3: - patch_merge_downsample = tuple(downsample_scale) - else: - raise NotImplementedError( - f"downsample_scale {downsample_scale} format not supported!" - ) - self.patch_merge = cuboid_encoder.PatchMerging3D( - dim=out_dim, - out_dim=out_dim, - padding_type=padding_type, - downsample=patch_merge_downsample, - linear_init_mode=linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def forward(self, x): - """x --> [K x Conv2D] --> PatchMerge - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C_out) - """ - - B, T, H, W, C = x.shape - - if self.num_conv_layers > 0: - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = self.conv_block(x).transpose(perm=[0, 2, 3, 1]) - x = self.patch_merge(x.reshape([B, T, H, W, -1])) - else: - x = self.patch_merge(x) - return x - - -class FinalDecoder(nn.Layer): - def __init__( - self, - target_thw: Tuple[int, ...], - dim: int, - num_conv_layers: int = 2, - activation: str = "leaky", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(FinalDecoder, self).__init__() - self.target_thw = target_thw - self.dim = dim - self.num_conv_layers = num_conv_layers - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - conv_block = [] - for i in range(num_conv_layers): - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=dim, - out_channels=dim, - ) - ) - conv_block.append(nn.GroupNorm(num_groups=16, num_channels=dim)) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - self.conv_block = nn.Sequential(*conv_block) - self.upsample = cuboid_decoder.Upsample3DLayer( - dim=dim, - out_dim=dim, - target_size=target_thw, - kernel_size=3, - conv_init_mode=conv_init_mode, - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def forward(self, x): - """x --> Upsample --> [K x Conv2D] - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C) - """ - - x = self.upsample(x) - if self.num_conv_layers > 0: - B, T, H, W, C = x.shape - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = ( - self.conv_block(x) - .transpose(perm=[0, 2, 3, 1]) - .reshape([B, T, H, W, -1]) - ) - return x - - -class InitialStackPatchMergingEncoder(nn.Layer): - def __init__( - self, - num_merge: int, - in_dim: int, - out_dim_list: Tuple[int, ...], - downsample_scale_list: Tuple[float, ...], - num_conv_per_merge_list: Tuple[int, ...] = None, - activation: str = "leaky", - padding_type: str = "nearest", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(InitialStackPatchMergingEncoder, self).__init__() - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.num_merge = num_merge - self.in_dim = in_dim - self.out_dim_list = out_dim_list[:num_merge] - self.downsample_scale_list = downsample_scale_list[:num_merge] - self.num_conv_per_merge_list = num_conv_per_merge_list - self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] - self.conv_block_list = nn.LayerList() - self.patch_merge_list = nn.LayerList() - for i in range(num_merge): - if i == 0: - in_dim = in_dim - else: - in_dim = self.out_dim_list[i - 1] - out_dim = self.out_dim_list[i] - downsample_scale = self.downsample_scale_list[i] - conv_block = [] - for j in range(self.num_conv_per_merge_list[i]): - if j == 0: - conv_in_dim = in_dim - else: - conv_in_dim = out_dim - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=conv_in_dim, - out_channels=out_dim, - ) - ) - conv_block.append( - nn.GroupNorm( - num_groups=self.num_group_list[i], num_channels=out_dim - ) - ) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - conv_block = nn.Sequential(*conv_block) - self.conv_block_list.append(conv_block) - patch_merge = cuboid_encoder.PatchMerging3D( - dim=out_dim, - out_dim=out_dim, - padding_type=padding_type, - downsample=(1, downsample_scale, downsample_scale), - linear_init_mode=linear_init_mode, - norm_init_mode=norm_init_mode, - ) - self.patch_merge_list.append(patch_merge) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - def get_out_shape_list(self, input_shape): - out_shape_list = [] - for patch_merge in self.patch_merge_list: - input_shape = patch_merge.get_out_shape(input_shape) - out_shape_list.append(input_shape) - return out_shape_list - - def forward(self, x): - """x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge - - Args: - x: (B, T, H, W, C) - - Returns: - out: (B, T, H_new, W_new, C_out) - """ - - for i, (conv_block, patch_merge) in enumerate( - zip(self.conv_block_list, self.patch_merge_list) - ): - B, T, H, W, C = x.shape - if self.num_conv_per_merge_list[i] > 0: - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) - x = patch_merge(x) - return x - - -class FinalStackUpsamplingDecoder(nn.Layer): - def __init__( - self, - target_shape_list: Tuple[Tuple[int, ...]], - in_dim: int, - num_conv_per_up_list: Tuple[int, ...] = None, - activation: str = "leaky", - conv_init_mode: str = "0", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(FinalStackUpsamplingDecoder, self).__init__() - self.conv_init_mode = conv_init_mode - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.target_shape_list = target_shape_list - self.out_dim_list = [ - target_shape[-1] for target_shape in self.target_shape_list - ] - self.num_upsample = len(target_shape_list) - self.in_dim = in_dim - self.num_conv_per_up_list = num_conv_per_up_list - self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] - self.conv_block_list = nn.LayerList() - self.upsample_list = nn.LayerList() - for i in range(self.num_upsample): - if i == 0: - in_dim = in_dim - else: - in_dim = self.out_dim_list[i - 1] - out_dim = self.out_dim_list[i] - upsample = cuboid_decoder.Upsample3DLayer( - dim=in_dim, - out_dim=in_dim, - target_size=target_shape_list[i][:-1], - kernel_size=3, - conv_init_mode=conv_init_mode, - ) - self.upsample_list.append(upsample) - conv_block = [] - for j in range(num_conv_per_up_list[i]): - if j == 0: - conv_in_dim = in_dim - else: - conv_in_dim = out_dim - conv_block.append( - nn.Conv2D( - kernel_size=(3, 3), - padding=(1, 1), - in_channels=conv_in_dim, - out_channels=out_dim, - ) - ) - conv_block.append( - nn.GroupNorm( - num_groups=self.num_group_list[i], num_channels=out_dim - ) - ) - conv_block.append( - act_mod.get_activation(activation) - if activation != "leaky_relu" - else nn.LeakyReLU(NEGATIVE_SLOPE) - ) - conv_block = nn.Sequential(*conv_block) - self.conv_block_list.append(conv_block) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, - conv_mode=self.conv_init_mode, - linear_mode=self.linear_init_mode, - norm_mode=self.norm_init_mode, - ) - - @staticmethod - def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False): - dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [ - tuple(enc_input_shape) - ] - if large_channel: - dec_target_shape_list_large_channel = [] - for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]): - dec_target_shape_large_channel = list(dec_target_shape_list[i]) - dec_target_shape_large_channel[-1] = enc_out_shape[-1] - dec_target_shape_list_large_channel.append( - tuple(dec_target_shape_large_channel) - ) - dec_target_shape_list = dec_target_shape_list_large_channel - dec_in_dim = enc_out_shape_list[-1][-1] - return dec_target_shape_list, dec_in_dim - - def forward(self, x): - """x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D] - - Args: - x: Shape (B, T, H, W, C) - - Returns: - out: Shape (B, T, H_new, W_new, C) - """ - for i, (conv_block, upsample) in enumerate( - zip(self.conv_block_list, self.upsample_list) - ): - x = upsample(x) - if self.num_conv_per_up_list[i] > 0: - B, T, H, W, C = x.shape - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) - return x - - -class ExtFormerMoECuboid(base.Arch): - """Cuboid Transformer for spatiotemporal forecasting - - We adopt the Non-autoregressive encoder-decoder architecture. - The decoder takes the multi-scale memory output from the encoder. - - The initial downsampling / upsampling layers will be - Downsampling: [K x Conv2D --> PatchMerge] - Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] - - x --> downsample (optional) ---> (+pos_embed) ---> enc --> mem_l initial_z (+pos_embed) ---> FC - | | - |------------| - | - | - y <--- upsample (optional) <--- dec <---------- - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - input_shape (Tuple[int, ...]): The shape of the input data. - target_shape (Tuple[int, ...]): The shape of the target data. - base_units (int, optional): The base units. Defaults to 128. - block_units (int, optional): The block units. Defaults to None. - scale_alpha (float, optional): We scale up the channels based on the formula: - - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. - num_heads (int, optional): The number of heads. Defaults to 4. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ffn dropout. Defaults to 0.0. - downsample (int, optional): The rate of downsample. Defaults to 2. - downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". - upsample_type (str, optional): The rate of upsample. Defaults to "upsample". - upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. - enc_depth (list, optional): The depth of encoder. Defaults to [4, 4, 4]. - enc_attn_patterns (str, optional): The pattern of encoder attention. Defaults to None. - enc_cuboid_size (list, optional): The cuboid size of encoder. Defaults to [(4, 4, 4), (4, 4, 4)]. - enc_cuboid_strategy (list, optional): The cuboid strategy of encoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - enc_shift_size (list, optional): The shift size of encoder. Defaults to [(0, 0, 0), (0, 0, 0)]. - enc_use_inter_ffn (bool, optional): Whether to use intermediate FFN for encoder. Defaults to True. - dec_depth (list, optional): The depth of decoder. Defaults to [2, 2]. - dec_cross_start (int, optional): The cross start of decoder. Defaults to 0. - dec_self_attn_patterns (str, optional): The partterns of decoder. Defaults to None. - dec_self_cuboid_size (list, optional): The cuboid size of decoder. Defaults to [(4, 4, 4), (4, 4, 4)]. - dec_self_cuboid_strategy (list, optional): The strategy of decoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - dec_self_shift_size (list, optional): The shift size of decoder. Defaults to [(1, 1, 1), (0, 0, 0)]. - dec_cross_attn_patterns (_type_, optional): The cross attention patterns of decoder. Defaults to None. - dec_cross_cuboid_hw (list, optional): The cuboid_hw of decoder. Defaults to [(4, 4), (4, 4)]. - dec_cross_cuboid_strategy (list, optional): The cuboid strategy of decoder. Defaults to [("l", "l", "l"), ("d", "l", "l")]. - dec_cross_shift_hw (list, optional): The shift_hw of decoder. Defaults to [(0, 0), (0, 0)]. - dec_cross_n_temporal (list, optional): The cross_n_temporal of decoder. Defaults to [1, 2]. - dec_cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. - dec_use_inter_ffn (bool, optional): Whether to use intermediate FFN for decoder. Defaults to True. - dec_hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed for decoder. Defaults to False. - num_global_vectors (int, optional): The num of global vectors. Defaults to 4. - use_dec_self_global (bool, optional): Whether to use global vector for decoder. Defaults to True. - dec_self_update_global (bool, optional): Whether to update global vector for decoder. Defaults to True. - use_dec_cross_global (bool, optional): Whether to use cross global vector for decoder. Defaults to True. - use_global_vector_ffn (bool, optional): Whether to use global vector FFN. Defaults to True. - use_global_self_attn (bool, optional): Whether to use global attentions. Defaults to False. - separate_global_qkv (bool, optional): Whether to separate global qkv. Defaults to False. - global_dim_ratio (int, optional): The ratio of global dim. Defaults to 1. - self_pattern (str, optional): The pattern. Defaults to "axial". - cross_self_pattern (str, optional): The self cross pattern. Defaults to "axial". - cross_pattern (str, optional): The cross pattern. Defaults to "cross_1x1". - z_init_method (str, optional): How the initial input to the decoder is initialized. Defaults to "nearest_interp". - initial_downsample_type (str, optional): The downsample type of initial. Defaults to "conv". - initial_downsample_activation (str, optional): The downsample activation of initial. Defaults to "leaky". - initial_downsample_scale (int, optional): The downsample scale of initial. Defaults to 1. - initial_downsample_conv_layers (int, optional): The conv layer of downsample of initial. Defaults to 2. - final_upsample_conv_layers (int, optional): The conv layer of final upsample. Defaults to 2. - initial_downsample_stack_conv_num_layers (int, optional): The num of stack conv layer of initial downsample. Defaults to 1. - initial_downsample_stack_conv_dim_list (list, optional): The dim list of stack conv of initial downsample. Defaults to None. - initial_downsample_stack_conv_downscale_list (list, optional): The downscale list of stack conv of initial downsample. Defaults to [1]. - initial_downsample_stack_conv_num_conv_list (list, optional): The num of stack conv list of initial downsample. Defaults to [2]. - ffn_activation (str, optional): The activation of FFN. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The type of normilize. Defaults to "layer_norm". - padding_type (str, optional): The type of padding. Defaults to "ignore". - pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". - checkpoint_level (bool, optional): Whether to use checkpoint. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pose. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use final projection. Defaults to True. - dec_use_first_self_attn (bool, optional): Whether to use first self attention for decoder. Defaults to False. - attn_linear_init_mode (str, optional): The mode of attention linear init. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear init. Defaults to "0". - conv_init_mode (str, optional): The mode of conv init. Defaults to "0". - down_up_linear_init_mode (str, optional): The mode of downsample and upsample linear init. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - input_shape: Tuple[int, ...], - target_shape: Tuple[int, ...], - base_units: int = 128, - block_units: int = None, - scale_alpha: float = 1.0, - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - downsample: int = 2, - downsample_type: str = "patch_merge", - upsample_type: str = "upsample", - upsample_kernel_size: int = 3, - enc_depth: Tuple[int, ...] = [4, 4, 4], - enc_attn_patterns: str = None, - enc_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - enc_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - enc_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], - enc_use_inter_ffn: bool = True, - dec_depth: Tuple[int, ...] = [2, 2], - dec_cross_start: int = 0, - dec_self_attn_patterns: str = None, - dec_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - dec_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - dec_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], - dec_cross_attn_patterns: str = None, - dec_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - dec_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "l", "l"), - ], - dec_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], - dec_cross_n_temporal: Tuple[int, ...] = [1, 2], - dec_cross_last_n_frames: int = None, - dec_use_inter_ffn: bool = True, - dec_hierarchical_pos_embed: bool = False, - num_global_vectors: int = 4, - use_dec_self_global: bool = True, - dec_self_update_global: bool = True, - use_dec_cross_global: bool = True, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - self_pattern: str = "axial", - cross_self_pattern: str = "axial", - cross_pattern: str = "cross_1x1", - z_init_method: str = "nearest_interp", - initial_downsample_type: str = "conv", - initial_downsample_activation: str = "leaky", - initial_downsample_scale: int = 1, - initial_downsample_conv_layers: int = 2, - final_upsample_conv_layers: int = 2, - initial_downsample_stack_conv_num_layers: int = 1, - initial_downsample_stack_conv_dim_list: Tuple[int, ...] = None, - initial_downsample_stack_conv_downscale_list: Tuple[int, ...] = [1], - initial_downsample_stack_conv_num_conv_list: Tuple[int, ...] = [2], - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - padding_type: str = "ignore", - pos_embed_type: str = "t+hw", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - dec_use_first_self_attn: bool = False, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - down_up_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - rnc_config: dict = None, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.down_up_linear_init_mode = down_up_linear_init_mode - self.norm_init_mode = norm_init_mode - assert len(enc_depth) == len(dec_depth) - self.base_units = base_units - self.num_global_vectors = num_global_vectors - self.moe_config = moe_config - self.rnc_config = rnc_config - self.checkpoint_level = checkpoint_level - - num_blocks = len(enc_depth) - if isinstance(self_pattern, str): - enc_attn_patterns = [self_pattern] * num_blocks - if isinstance(cross_self_pattern, str): - dec_self_attn_patterns = [cross_self_pattern] * num_blocks - if isinstance(cross_pattern, str): - dec_cross_attn_patterns = [cross_pattern] * num_blocks - if global_dim_ratio != 1: - assert ( - separate_global_qkv is True - ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - self.global_dim_ratio = global_dim_ratio - self.z_init_method = z_init_method - assert self.z_init_method in ["zeros", "nearest_interp", "last", "mean"] - self.input_shape = input_shape - self.target_shape = target_shape - T_in, H_in, W_in, C_in = input_shape - T_out, H_out, W_out, C_out = target_shape - assert H_in == H_out and W_in == W_out - if self.num_global_vectors > 0: - init_data = paddle.zeros( - (self.num_global_vectors, global_dim_ratio * base_units) - ) - self.init_global_vectors = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - - self.init_global_vectors.stop_gradient = not True - new_input_shape = self.get_initial_encoder_final_decoder( - initial_downsample_scale=initial_downsample_scale, - initial_downsample_type=initial_downsample_type, - activation=initial_downsample_activation, - initial_downsample_conv_layers=initial_downsample_conv_layers, - final_upsample_conv_layers=final_upsample_conv_layers, - padding_type=padding_type, - initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, - initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, - initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, - initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, - ) - T_in, H_in, W_in, _ = new_input_shape - self.encoder = cuboid_encoder.CuboidTransformerEncoder( - input_shape=(T_in, H_in, W_in, base_units), - base_units=base_units, - block_units=block_units, - scale_alpha=scale_alpha, - depth=enc_depth, - downsample=downsample, - downsample_type=downsample_type, - block_attn_patterns=enc_attn_patterns, - block_cuboid_size=enc_cuboid_size, - block_strategy=enc_cuboid_strategy, - block_shift_size=enc_shift_size, - num_heads=num_heads, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - gated_ffn=gated_ffn, - ffn_activation=ffn_activation, - norm_layer=norm_layer, - use_inter_ffn=enc_use_inter_ffn, - padding_type=padding_type, - use_global_vector=num_global_vectors > 0, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - self_attn_use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - conv_init_mode=conv_init_mode, - down_linear_init_mode=down_up_linear_init_mode, - norm_init_mode=norm_init_mode, - moe_config=moe_config, - ) - self.enc_pos_embed = cuboid_decoder.PosEmbed( - embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in - ) - mem_shapes = self.encoder.get_mem_shapes() - self.z_proj = nn.Linear( - in_features=mem_shapes[-1][-1], out_features=mem_shapes[-1][-1] - ) - self.dec_pos_embed = cuboid_decoder.PosEmbed( - embed_dim=mem_shapes[-1][-1], - typ=pos_embed_type, - maxT=T_out, - maxH=mem_shapes[-1][1], - maxW=mem_shapes[-1][2], - ) - self.decoder = cuboid_decoder.CuboidTransformerDecoder( - target_temporal_length=T_out, - mem_shapes=mem_shapes, - cross_start=dec_cross_start, - depth=dec_depth, - upsample_type=upsample_type, - block_self_attn_patterns=dec_self_attn_patterns, - block_self_cuboid_size=dec_self_cuboid_size, - block_self_shift_size=dec_self_shift_size, - block_self_cuboid_strategy=dec_self_cuboid_strategy, - block_cross_attn_patterns=dec_cross_attn_patterns, - block_cross_cuboid_hw=dec_cross_cuboid_hw, - block_cross_shift_hw=dec_cross_shift_hw, - block_cross_cuboid_strategy=dec_cross_cuboid_strategy, - block_cross_n_temporal=dec_cross_n_temporal, - cross_last_n_frames=dec_cross_last_n_frames, - num_heads=num_heads, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - upsample_kernel_size=upsample_kernel_size, - ffn_activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=dec_use_inter_ffn, - max_temporal_relative=T_in + T_out, - padding_type=padding_type, - hierarchical_pos_embed=dec_hierarchical_pos_embed, - pos_embed_type=pos_embed_type, - use_self_global=num_global_vectors > 0 and use_dec_self_global, - self_update_global=dec_self_update_global, - use_cross_global=num_global_vectors > 0 and use_dec_cross_global, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - self_attn_use_final_proj=self_attn_use_final_proj, - use_first_self_attn=dec_use_first_self_attn, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - conv_init_mode=conv_init_mode, - up_linear_init_mode=down_up_linear_init_mode, - norm_init_mode=norm_init_mode, - moe_config=moe_config, - ) - - if rnc_config["use_rnc"]: - self.rnc_cri = extformer_moe_utils.RnCLoss(rnc_config) - - self.reset_parameters() - - def get_initial_encoder_final_decoder( - self, - initial_downsample_type, - activation, - initial_downsample_scale, - initial_downsample_conv_layers, - final_upsample_conv_layers, - padding_type, - initial_downsample_stack_conv_num_layers, - initial_downsample_stack_conv_dim_list, - initial_downsample_stack_conv_downscale_list, - initial_downsample_stack_conv_num_conv_list, - ): - T_in, H_in, W_in, C_in = self.input_shape - T_out, H_out, W_out, C_out = self.target_shape - self.initial_downsample_type = initial_downsample_type - if self.initial_downsample_type == "conv": - if isinstance(initial_downsample_scale, int): - initial_downsample_scale = ( - 1, - initial_downsample_scale, - initial_downsample_scale, - ) - elif len(initial_downsample_scale) == 2: - initial_downsample_scale = 1, *initial_downsample_scale - elif len(initial_downsample_scale) == 3: - initial_downsample_scale = tuple(initial_downsample_scale) - else: - raise NotImplementedError( - f"initial_downsample_scale {initial_downsample_scale} format not supported!" - ) - self.initial_encoder = InitialEncoder( - dim=C_in, - out_dim=self.base_units, - downsample_scale=initial_downsample_scale, - num_conv_layers=initial_downsample_conv_layers, - padding_type=padding_type, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - - self.final_decoder = FinalDecoder( - dim=self.base_units, - target_thw=(T_out, H_out, W_out), - num_conv_layers=final_upsample_conv_layers, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - new_input_shape = self.initial_encoder.patch_merge.get_out_shape( - self.input_shape - ) - self.dec_final_proj = nn.Linear( - in_features=self.base_units, out_features=C_out - ) - elif self.initial_downsample_type == "stack_conv": - if initial_downsample_stack_conv_dim_list is None: - initial_downsample_stack_conv_dim_list = [ - self.base_units - ] * initial_downsample_stack_conv_num_layers - self.initial_encoder = InitialStackPatchMergingEncoder( - num_merge=initial_downsample_stack_conv_num_layers, - in_dim=C_in, - out_dim_list=initial_downsample_stack_conv_dim_list, - downsample_scale_list=initial_downsample_stack_conv_downscale_list, - num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, - padding_type=padding_type, - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list( - self.target_shape - ) - ( - dec_target_shape_list, - dec_in_dim, - ) = FinalStackUpsamplingDecoder.get_init_params( - enc_input_shape=self.target_shape, - enc_out_shape_list=initial_encoder_out_shape_list, - large_channel=True, - ) - self.final_decoder = FinalStackUpsamplingDecoder( - target_shape_list=dec_target_shape_list, - in_dim=dec_in_dim, - num_conv_per_up_list=initial_downsample_stack_conv_num_conv_list[::-1], - activation=activation, - conv_init_mode=self.conv_init_mode, - linear_init_mode=self.down_up_linear_init_mode, - norm_init_mode=self.norm_init_mode, - ) - self.dec_final_proj = nn.Linear( - in_features=dec_target_shape_list[-1][-1], out_features=C_out - ) - new_input_shape = self.initial_encoder.get_out_shape_list(self.input_shape)[ - -1 - ] - else: - raise NotImplementedError(f"{self.initial_downsample_type} is invalid.") - self.input_shape_after_initial_downsample = new_input_shape - T_in, H_in, W_in, _ = new_input_shape - return new_input_shape - - def reset_parameters(self): - if self.num_global_vectors > 0: - self.init_global_vectors = initializer.trunc_normal_( - self.init_global_vectors, std=0.02 - ) - if hasattr(self.initial_encoder, "reset_parameters"): - self.initial_encoder.reset_parameters() - else: - cuboid_utils.apply_initialization( - self.initial_encoder, - conv_mode=self.conv_init_mode, - linear_mode=self.down_up_linear_init_mode, - norm_mode=self.norm_init_mode, - ) - if hasattr(self.final_decoder, "reset_parameters"): - self.final_decoder.reset_parameters() - else: - cuboid_utils.apply_initialization( - self.final_decoder, - conv_mode=self.conv_init_mode, - linear_mode=self.down_up_linear_init_mode, - norm_mode=self.norm_init_mode, - ) - cuboid_utils.apply_initialization( - self.dec_final_proj, linear_mode=self.down_up_linear_init_mode - ) - self.encoder.reset_parameters() - self.enc_pos_embed.reset_parameters() - self.decoder.reset_parameters() - self.dec_pos_embed.reset_parameters() - cuboid_utils.apply_initialization(self.z_proj, linear_mode="0") - - def get_initial_z(self, final_mem, T_out): - B = final_mem.shape[0] - if self.z_init_method == "zeros": - z_shape = list((1, T_out)) + final_mem.shape[2:] - initial_z = paddle.zeros(shape=z_shape, dtype=final_mem.dtype) - initial_z = self.z_proj(self.dec_pos_embed(initial_z)).expand( - shape=[B, -1, -1, -1, -1] - ) - elif self.z_init_method == "nearest_interp": - initial_z = nn.functional.interpolate( - x=final_mem.transpose(perm=[0, 4, 1, 2, 3]), - size=(T_out, final_mem.shape[2], final_mem.shape[3]), - ).transpose(perm=[0, 2, 3, 4, 1]) - initial_z = self.z_proj(initial_z) - elif self.z_init_method == "last": - initial_z = paddle.broadcast_to( - x=final_mem[:, -1:, :, :, :], shape=(B, T_out) + final_mem.shape[2:] - ) - initial_z = self.z_proj(initial_z) - elif self.z_init_method == "mean": - initial_z = paddle.broadcast_to( - x=final_mem.mean(axis=1, keepdims=True), - shape=(B, T_out) + final_mem.shape[2:], - ) - initial_z = self.z_proj(initial_z) - else: - raise NotImplementedError - return initial_z - - def forward(self, x: "paddle.Tensor", verbose: bool = False) -> "paddle.Tensor": - """ - Args: - x (paddle.Tensor): Tensor with shape (B, T, H, W, C). - verbose (bool): if True, print intermediate shapes. - - Returns: - out (paddle.Tensor): The output Shape (B, T_out, H, W, C_out) - """ - - labels = x["sst_target"] - x = self.concat_to_tensor(x, self.input_keys) - flag_ndim = x.ndim - if flag_ndim == 6: - x = x.reshape([-1, *x.shape[2:]]) - B, _, _, _, _ = x.shape - - T_out = self.target_shape[0] - x = self.initial_encoder(x) - x = self.enc_pos_embed(x) - - if self.num_global_vectors > 0: - init_global_vectors = self.init_global_vectors.expand( - shape=[ - B, - self.num_global_vectors, - self.global_dim_ratio * self.base_units, - ] - ) - mem_l, mem_global_vector_l = self.encoder(x, init_global_vectors) - else: - mem_l = self.encoder(x) - - if verbose: - for i, mem in enumerate(mem_l): - print(f"mem[{i}].shape = {mem.shape}") - initial_z = self.get_initial_z(final_mem=mem_l[-1], T_out=T_out) - - if self.num_global_vectors > 0: - dec_out = self.decoder(initial_z, mem_l, mem_global_vector_l) - else: - dec_out = self.decoder(initial_z, mem_l) - - dec_out = self.final_decoder(dec_out) - out = self.dec_final_proj(dec_out) - - if flag_ndim == 6: - out = out.reshape([-1, *out.shape]) - - out_dict = {key: out for key in self.output_keys[:2]} - - # moe loss - if self.training: - aux_losses = extformer_moe_utils.aggregate_aux_losses(self) - if len(aux_losses) > 0: - aux_loss = paddle.concat(aux_losses).mean() - else: - aux_loss = None - else: - aux_loss = None - assert "aux_loss" in self.output_keys - out_dict["aux_loss"] = aux_loss - - # rnc - if self.training and self.rnc_config["use_rnc"]: - rank_loss = self.rnc_cri(dec_out, labels) - rank_loss = rank_loss.unsqueeze(0) - else: - rank_loss = None - assert "rank_loss" in self.output_keys - out_dict["rank_loss"] = rank_loss - - return out_dict +from typing import Sequence +from typing import Tuple +from typing import Union + +import paddle +from paddle import nn + +import ppsci.arch.extformer_moe_cuboid_decoder as cuboid_decoder +import ppsci.arch.extformer_moe_cuboid_encoder as cuboid_encoder +import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.arch import extformer_moe_utils +from ppsci.arch.extformer_moe_cuboid_encoder import NEGATIVE_SLOPE +from ppsci.utils import initializer + +"""A space-time Transformer with Cuboid Attention""" + + +class InitialEncoder(nn.Layer): + def __init__( + self, + dim, + out_dim, + downsample_scale: Union[int, Sequence[int]], + num_conv_layers: int = 2, + activation: str = "leaky", + padding_type: str = "nearest", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(InitialEncoder, self).__init__() + self.num_conv_layers = num_conv_layers + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + conv_block = [] + for i in range(num_conv_layers): + if i == 0: + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=dim, + out_channels=out_dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + else: + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=out_dim, + out_channels=out_dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=out_dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + self.conv_block = nn.Sequential(*conv_block) + if isinstance(downsample_scale, int): + patch_merge_downsample = (1, downsample_scale, downsample_scale) + elif len(downsample_scale) == 2: + patch_merge_downsample = (1, *downsample_scale) + elif len(downsample_scale) == 3: + patch_merge_downsample = tuple(downsample_scale) + else: + raise NotImplementedError( + f"downsample_scale {downsample_scale} format not supported!" + ) + self.patch_merge = cuboid_encoder.PatchMerging3D( + dim=out_dim, + out_dim=out_dim, + padding_type=padding_type, + downsample=patch_merge_downsample, + linear_init_mode=linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def forward(self, x): + """x --> [K x Conv2D] --> PatchMerge + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C_out) + """ + + B, T, H, W, C = x.shape + + if self.num_conv_layers > 0: + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = self.conv_block(x).transpose(perm=[0, 2, 3, 1]) + x = self.patch_merge(x.reshape([B, T, H, W, -1])) + else: + x = self.patch_merge(x) + return x + + +class FinalDecoder(nn.Layer): + def __init__( + self, + target_thw: Tuple[int, ...], + dim: int, + num_conv_layers: int = 2, + activation: str = "leaky", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(FinalDecoder, self).__init__() + self.target_thw = target_thw + self.dim = dim + self.num_conv_layers = num_conv_layers + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + conv_block = [] + for i in range(num_conv_layers): + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=dim, + out_channels=dim, + ) + ) + conv_block.append(nn.GroupNorm(num_groups=16, num_channels=dim)) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + self.conv_block = nn.Sequential(*conv_block) + self.upsample = cuboid_decoder.Upsample3DLayer( + dim=dim, + out_dim=dim, + target_size=target_thw, + kernel_size=3, + conv_init_mode=conv_init_mode, + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def forward(self, x): + """x --> Upsample --> [K x Conv2D] + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C) + """ + + x = self.upsample(x) + if self.num_conv_layers > 0: + B, T, H, W, C = x.shape + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = ( + self.conv_block(x) + .transpose(perm=[0, 2, 3, 1]) + .reshape([B, T, H, W, -1]) + ) + return x + + +class InitialStackPatchMergingEncoder(nn.Layer): + def __init__( + self, + num_merge: int, + in_dim: int, + out_dim_list: Tuple[int, ...], + downsample_scale_list: Tuple[float, ...], + num_conv_per_merge_list: Tuple[int, ...] = None, + activation: str = "leaky", + padding_type: str = "nearest", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(InitialStackPatchMergingEncoder, self).__init__() + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.num_merge = num_merge + self.in_dim = in_dim + self.out_dim_list = out_dim_list[:num_merge] + self.downsample_scale_list = downsample_scale_list[:num_merge] + self.num_conv_per_merge_list = num_conv_per_merge_list + self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] + self.conv_block_list = nn.LayerList() + self.patch_merge_list = nn.LayerList() + for i in range(num_merge): + if i == 0: + in_dim = in_dim + else: + in_dim = self.out_dim_list[i - 1] + out_dim = self.out_dim_list[i] + downsample_scale = self.downsample_scale_list[i] + conv_block = [] + for j in range(self.num_conv_per_merge_list[i]): + if j == 0: + conv_in_dim = in_dim + else: + conv_in_dim = out_dim + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=conv_in_dim, + out_channels=out_dim, + ) + ) + conv_block.append( + nn.GroupNorm( + num_groups=self.num_group_list[i], num_channels=out_dim + ) + ) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + conv_block = nn.Sequential(*conv_block) + self.conv_block_list.append(conv_block) + patch_merge = cuboid_encoder.PatchMerging3D( + dim=out_dim, + out_dim=out_dim, + padding_type=padding_type, + downsample=(1, downsample_scale, downsample_scale), + linear_init_mode=linear_init_mode, + norm_init_mode=norm_init_mode, + ) + self.patch_merge_list.append(patch_merge) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + def get_out_shape_list(self, input_shape): + out_shape_list = [] + for patch_merge in self.patch_merge_list: + input_shape = patch_merge.get_out_shape(input_shape) + out_shape_list.append(input_shape) + return out_shape_list + + def forward(self, x): + """x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge + + Args: + x: (B, T, H, W, C) + + Returns: + out: (B, T, H_new, W_new, C_out) + """ + + for i, (conv_block, patch_merge) in enumerate( + zip(self.conv_block_list, self.patch_merge_list) + ): + B, T, H, W, C = x.shape + if self.num_conv_per_merge_list[i] > 0: + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) + x = patch_merge(x) + return x + + +class FinalStackUpsamplingDecoder(nn.Layer): + def __init__( + self, + target_shape_list: Tuple[Tuple[int, ...]], + in_dim: int, + num_conv_per_up_list: Tuple[int, ...] = None, + activation: str = "leaky", + conv_init_mode: str = "0", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(FinalStackUpsamplingDecoder, self).__init__() + self.conv_init_mode = conv_init_mode + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.target_shape_list = target_shape_list + self.out_dim_list = [ + target_shape[-1] for target_shape in self.target_shape_list + ] + self.num_upsample = len(target_shape_list) + self.in_dim = in_dim + self.num_conv_per_up_list = num_conv_per_up_list + self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list] + self.conv_block_list = nn.LayerList() + self.upsample_list = nn.LayerList() + for i in range(self.num_upsample): + if i == 0: + in_dim = in_dim + else: + in_dim = self.out_dim_list[i - 1] + out_dim = self.out_dim_list[i] + upsample = cuboid_decoder.Upsample3DLayer( + dim=in_dim, + out_dim=in_dim, + target_size=target_shape_list[i][:-1], + kernel_size=3, + conv_init_mode=conv_init_mode, + ) + self.upsample_list.append(upsample) + conv_block = [] + for j in range(num_conv_per_up_list[i]): + if j == 0: + conv_in_dim = in_dim + else: + conv_in_dim = out_dim + conv_block.append( + nn.Conv2D( + kernel_size=(3, 3), + padding=(1, 1), + in_channels=conv_in_dim, + out_channels=out_dim, + ) + ) + conv_block.append( + nn.GroupNorm( + num_groups=self.num_group_list[i], num_channels=out_dim + ) + ) + conv_block.append( + act_mod.get_activation(activation) + if activation != "leaky_relu" + else nn.LeakyReLU(NEGATIVE_SLOPE) + ) + conv_block = nn.Sequential(*conv_block) + self.conv_block_list.append(conv_block) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, + conv_mode=self.conv_init_mode, + linear_mode=self.linear_init_mode, + norm_mode=self.norm_init_mode, + ) + + @staticmethod + def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False): + dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [ + tuple(enc_input_shape) + ] + if large_channel: + dec_target_shape_list_large_channel = [] + for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]): + dec_target_shape_large_channel = list(dec_target_shape_list[i]) + dec_target_shape_large_channel[-1] = enc_out_shape[-1] + dec_target_shape_list_large_channel.append( + tuple(dec_target_shape_large_channel) + ) + dec_target_shape_list = dec_target_shape_list_large_channel + dec_in_dim = enc_out_shape_list[-1][-1] + return dec_target_shape_list, dec_in_dim + + def forward(self, x): + """x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D] + + Args: + x: Shape (B, T, H, W, C) + + Returns: + out: Shape (B, T, H_new, W_new, C) + """ + for i, (conv_block, upsample) in enumerate( + zip(self.conv_block_list, self.upsample_list) + ): + x = upsample(x) + if self.num_conv_per_up_list[i] > 0: + B, T, H, W, C = x.shape + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = conv_block(x).transpose(perm=[0, 2, 3, 1]).reshape([B, T, H, W, -1]) + return x + + +class ExtFormerMoECuboid(base.Arch): + """Cuboid Transformer for spatiotemporal forecasting + + We adopt the Non-autoregressive encoder-decoder architecture. + The decoder takes the multi-scale memory output from the encoder. + + The initial downsampling / upsampling layers will be + Downsampling: [K x Conv2D --> PatchMerge] + Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] + + x --> downsample (optional) ---> (+pos_embed) ---> enc --> mem_l initial_z (+pos_embed) ---> FC + | | + |------------| + | + | + y <--- upsample (optional) <--- dec <---------- + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + input_shape (Tuple[int, ...]): The shape of the input data. + target_shape (Tuple[int, ...]): The shape of the target data. + base_units (int, optional): The base units. Defaults to 128. + block_units (int, optional): The block units. Defaults to None. + scale_alpha (float, optional): We scale up the channels based on the formula: + - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. + num_heads (int, optional): The number of heads. Defaults to 4. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ffn dropout. Defaults to 0.0. + downsample (int, optional): The rate of downsample. Defaults to 2. + downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". + upsample_type (str, optional): The rate of upsample. Defaults to "upsample". + upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. + enc_depth (list, optional): The depth of encoder. Defaults to [4, 4, 4]. + enc_attn_patterns (str, optional): The pattern of encoder attention. Defaults to None. + enc_cuboid_size (list, optional): The cuboid size of encoder. Defaults to [(4, 4, 4), (4, 4, 4)]. + enc_cuboid_strategy (list, optional): The cuboid strategy of encoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + enc_shift_size (list, optional): The shift size of encoder. Defaults to [(0, 0, 0), (0, 0, 0)]. + enc_use_inter_ffn (bool, optional): Whether to use intermediate FFN for encoder. Defaults to True. + dec_depth (list, optional): The depth of decoder. Defaults to [2, 2]. + dec_cross_start (int, optional): The cross start of decoder. Defaults to 0. + dec_self_attn_patterns (str, optional): The partterns of decoder. Defaults to None. + dec_self_cuboid_size (list, optional): The cuboid size of decoder. Defaults to [(4, 4, 4), (4, 4, 4)]. + dec_self_cuboid_strategy (list, optional): The strategy of decoder. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + dec_self_shift_size (list, optional): The shift size of decoder. Defaults to [(1, 1, 1), (0, 0, 0)]. + dec_cross_attn_patterns (_type_, optional): The cross attention patterns of decoder. Defaults to None. + dec_cross_cuboid_hw (list, optional): The cuboid_hw of decoder. Defaults to [(4, 4), (4, 4)]. + dec_cross_cuboid_strategy (list, optional): The cuboid strategy of decoder. Defaults to [("l", "l", "l"), ("d", "l", "l")]. + dec_cross_shift_hw (list, optional): The shift_hw of decoder. Defaults to [(0, 0), (0, 0)]. + dec_cross_n_temporal (list, optional): The cross_n_temporal of decoder. Defaults to [1, 2]. + dec_cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. + dec_use_inter_ffn (bool, optional): Whether to use intermediate FFN for decoder. Defaults to True. + dec_hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed for decoder. Defaults to False. + num_global_vectors (int, optional): The num of global vectors. Defaults to 4. + use_dec_self_global (bool, optional): Whether to use global vector for decoder. Defaults to True. + dec_self_update_global (bool, optional): Whether to update global vector for decoder. Defaults to True. + use_dec_cross_global (bool, optional): Whether to use cross global vector for decoder. Defaults to True. + use_global_vector_ffn (bool, optional): Whether to use global vector FFN. Defaults to True. + use_global_self_attn (bool, optional): Whether to use global attentions. Defaults to False. + separate_global_qkv (bool, optional): Whether to separate global qkv. Defaults to False. + global_dim_ratio (int, optional): The ratio of global dim. Defaults to 1. + self_pattern (str, optional): The pattern. Defaults to "axial". + cross_self_pattern (str, optional): The self cross pattern. Defaults to "axial". + cross_pattern (str, optional): The cross pattern. Defaults to "cross_1x1". + z_init_method (str, optional): How the initial input to the decoder is initialized. Defaults to "nearest_interp". + initial_downsample_type (str, optional): The downsample type of initial. Defaults to "conv". + initial_downsample_activation (str, optional): The downsample activation of initial. Defaults to "leaky". + initial_downsample_scale (int, optional): The downsample scale of initial. Defaults to 1. + initial_downsample_conv_layers (int, optional): The conv layer of downsample of initial. Defaults to 2. + final_upsample_conv_layers (int, optional): The conv layer of final upsample. Defaults to 2. + initial_downsample_stack_conv_num_layers (int, optional): The num of stack conv layer of initial downsample. Defaults to 1. + initial_downsample_stack_conv_dim_list (list, optional): The dim list of stack conv of initial downsample. Defaults to None. + initial_downsample_stack_conv_downscale_list (list, optional): The downscale list of stack conv of initial downsample. Defaults to [1]. + initial_downsample_stack_conv_num_conv_list (list, optional): The num of stack conv list of initial downsample. Defaults to [2]. + ffn_activation (str, optional): The activation of FFN. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The type of normilize. Defaults to "layer_norm". + padding_type (str, optional): The type of padding. Defaults to "ignore". + pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". + checkpoint_level (bool, optional): Whether to use checkpoint. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pose. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use final projection. Defaults to True. + dec_use_first_self_attn (bool, optional): Whether to use first self attention for decoder. Defaults to False. + attn_linear_init_mode (str, optional): The mode of attention linear init. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear init. Defaults to "0". + conv_init_mode (str, optional): The mode of conv init. Defaults to "0". + down_up_linear_init_mode (str, optional): The mode of downsample and upsample linear init. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + input_shape: Tuple[int, ...], + target_shape: Tuple[int, ...], + base_units: int = 128, + block_units: int = None, + scale_alpha: float = 1.0, + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + downsample: int = 2, + downsample_type: str = "patch_merge", + upsample_type: str = "upsample", + upsample_kernel_size: int = 3, + enc_depth: Tuple[int, ...] = [4, 4, 4], + enc_attn_patterns: str = None, + enc_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + enc_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + enc_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], + enc_use_inter_ffn: bool = True, + dec_depth: Tuple[int, ...] = [2, 2], + dec_cross_start: int = 0, + dec_self_attn_patterns: str = None, + dec_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + dec_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + dec_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], + dec_cross_attn_patterns: str = None, + dec_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + dec_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "l", "l"), + ], + dec_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], + dec_cross_n_temporal: Tuple[int, ...] = [1, 2], + dec_cross_last_n_frames: int = None, + dec_use_inter_ffn: bool = True, + dec_hierarchical_pos_embed: bool = False, + num_global_vectors: int = 4, + use_dec_self_global: bool = True, + dec_self_update_global: bool = True, + use_dec_cross_global: bool = True, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + self_pattern: str = "axial", + cross_self_pattern: str = "axial", + cross_pattern: str = "cross_1x1", + z_init_method: str = "nearest_interp", + initial_downsample_type: str = "conv", + initial_downsample_activation: str = "leaky", + initial_downsample_scale: int = 1, + initial_downsample_conv_layers: int = 2, + final_upsample_conv_layers: int = 2, + initial_downsample_stack_conv_num_layers: int = 1, + initial_downsample_stack_conv_dim_list: Tuple[int, ...] = None, + initial_downsample_stack_conv_downscale_list: Tuple[int, ...] = [1], + initial_downsample_stack_conv_num_conv_list: Tuple[int, ...] = [2], + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + padding_type: str = "ignore", + pos_embed_type: str = "t+hw", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + dec_use_first_self_attn: bool = False, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + down_up_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + rnc_config: dict = None, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.down_up_linear_init_mode = down_up_linear_init_mode + self.norm_init_mode = norm_init_mode + assert len(enc_depth) == len(dec_depth) + self.base_units = base_units + self.num_global_vectors = num_global_vectors + self.moe_config = moe_config + self.rnc_config = rnc_config + self.checkpoint_level = checkpoint_level + + num_blocks = len(enc_depth) + if isinstance(self_pattern, str): + enc_attn_patterns = [self_pattern] * num_blocks + if isinstance(cross_self_pattern, str): + dec_self_attn_patterns = [cross_self_pattern] * num_blocks + if isinstance(cross_pattern, str): + dec_cross_attn_patterns = [cross_pattern] * num_blocks + if global_dim_ratio != 1: + assert ( + separate_global_qkv is True + ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + self.global_dim_ratio = global_dim_ratio + self.z_init_method = z_init_method + assert self.z_init_method in ["zeros", "nearest_interp", "last", "mean"] + self.input_shape = input_shape + self.target_shape = target_shape + T_in, H_in, W_in, C_in = input_shape + T_out, H_out, W_out, C_out = target_shape + assert H_in == H_out and W_in == W_out + if self.num_global_vectors > 0: + init_data = paddle.zeros( + (self.num_global_vectors, global_dim_ratio * base_units) + ) + self.init_global_vectors = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + + self.init_global_vectors.stop_gradient = not True + new_input_shape = self.get_initial_encoder_final_decoder( + initial_downsample_scale=initial_downsample_scale, + initial_downsample_type=initial_downsample_type, + activation=initial_downsample_activation, + initial_downsample_conv_layers=initial_downsample_conv_layers, + final_upsample_conv_layers=final_upsample_conv_layers, + padding_type=padding_type, + initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, + initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, + initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, + initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, + ) + T_in, H_in, W_in, _ = new_input_shape + self.encoder = cuboid_encoder.CuboidTransformerEncoder( + input_shape=(T_in, H_in, W_in, base_units), + base_units=base_units, + block_units=block_units, + scale_alpha=scale_alpha, + depth=enc_depth, + downsample=downsample, + downsample_type=downsample_type, + block_attn_patterns=enc_attn_patterns, + block_cuboid_size=enc_cuboid_size, + block_strategy=enc_cuboid_strategy, + block_shift_size=enc_shift_size, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + gated_ffn=gated_ffn, + ffn_activation=ffn_activation, + norm_layer=norm_layer, + use_inter_ffn=enc_use_inter_ffn, + padding_type=padding_type, + use_global_vector=num_global_vectors > 0, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + self_attn_use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + conv_init_mode=conv_init_mode, + down_linear_init_mode=down_up_linear_init_mode, + norm_init_mode=norm_init_mode, + moe_config=moe_config, + ) + self.enc_pos_embed = cuboid_decoder.PosEmbed( + embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in + ) + mem_shapes = self.encoder.get_mem_shapes() + self.z_proj = nn.Linear( + in_features=mem_shapes[-1][-1], out_features=mem_shapes[-1][-1] + ) + self.dec_pos_embed = cuboid_decoder.PosEmbed( + embed_dim=mem_shapes[-1][-1], + typ=pos_embed_type, + maxT=T_out, + maxH=mem_shapes[-1][1], + maxW=mem_shapes[-1][2], + ) + self.decoder = cuboid_decoder.CuboidTransformerDecoder( + target_temporal_length=T_out, + mem_shapes=mem_shapes, + cross_start=dec_cross_start, + depth=dec_depth, + upsample_type=upsample_type, + block_self_attn_patterns=dec_self_attn_patterns, + block_self_cuboid_size=dec_self_cuboid_size, + block_self_shift_size=dec_self_shift_size, + block_self_cuboid_strategy=dec_self_cuboid_strategy, + block_cross_attn_patterns=dec_cross_attn_patterns, + block_cross_cuboid_hw=dec_cross_cuboid_hw, + block_cross_shift_hw=dec_cross_shift_hw, + block_cross_cuboid_strategy=dec_cross_cuboid_strategy, + block_cross_n_temporal=dec_cross_n_temporal, + cross_last_n_frames=dec_cross_last_n_frames, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + upsample_kernel_size=upsample_kernel_size, + ffn_activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=dec_use_inter_ffn, + max_temporal_relative=T_in + T_out, + padding_type=padding_type, + hierarchical_pos_embed=dec_hierarchical_pos_embed, + pos_embed_type=pos_embed_type, + use_self_global=num_global_vectors > 0 and use_dec_self_global, + self_update_global=dec_self_update_global, + use_cross_global=num_global_vectors > 0 and use_dec_cross_global, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + self_attn_use_final_proj=self_attn_use_final_proj, + use_first_self_attn=dec_use_first_self_attn, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + conv_init_mode=conv_init_mode, + up_linear_init_mode=down_up_linear_init_mode, + norm_init_mode=norm_init_mode, + moe_config=moe_config, + ) + + if rnc_config["use_rnc"]: + self.rnc_cri = extformer_moe_utils.RnCLoss(rnc_config) + + self.reset_parameters() + + def get_initial_encoder_final_decoder( + self, + initial_downsample_type, + activation, + initial_downsample_scale, + initial_downsample_conv_layers, + final_upsample_conv_layers, + padding_type, + initial_downsample_stack_conv_num_layers, + initial_downsample_stack_conv_dim_list, + initial_downsample_stack_conv_downscale_list, + initial_downsample_stack_conv_num_conv_list, + ): + T_in, H_in, W_in, C_in = self.input_shape + T_out, H_out, W_out, C_out = self.target_shape + self.initial_downsample_type = initial_downsample_type + if self.initial_downsample_type == "conv": + if isinstance(initial_downsample_scale, int): + initial_downsample_scale = ( + 1, + initial_downsample_scale, + initial_downsample_scale, + ) + elif len(initial_downsample_scale) == 2: + initial_downsample_scale = 1, *initial_downsample_scale + elif len(initial_downsample_scale) == 3: + initial_downsample_scale = tuple(initial_downsample_scale) + else: + raise NotImplementedError( + f"initial_downsample_scale {initial_downsample_scale} format not supported!" + ) + self.initial_encoder = InitialEncoder( + dim=C_in, + out_dim=self.base_units, + downsample_scale=initial_downsample_scale, + num_conv_layers=initial_downsample_conv_layers, + padding_type=padding_type, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + + self.final_decoder = FinalDecoder( + dim=self.base_units, + target_thw=(T_out, H_out, W_out), + num_conv_layers=final_upsample_conv_layers, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + new_input_shape = self.initial_encoder.patch_merge.get_out_shape( + self.input_shape + ) + self.dec_final_proj = nn.Linear( + in_features=self.base_units, out_features=C_out + ) + elif self.initial_downsample_type == "stack_conv": + if initial_downsample_stack_conv_dim_list is None: + initial_downsample_stack_conv_dim_list = [ + self.base_units + ] * initial_downsample_stack_conv_num_layers + self.initial_encoder = InitialStackPatchMergingEncoder( + num_merge=initial_downsample_stack_conv_num_layers, + in_dim=C_in, + out_dim_list=initial_downsample_stack_conv_dim_list, + downsample_scale_list=initial_downsample_stack_conv_downscale_list, + num_conv_per_merge_list=initial_downsample_stack_conv_num_conv_list, + padding_type=padding_type, + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + initial_encoder_out_shape_list = self.initial_encoder.get_out_shape_list( + self.target_shape + ) + ( + dec_target_shape_list, + dec_in_dim, + ) = FinalStackUpsamplingDecoder.get_init_params( + enc_input_shape=self.target_shape, + enc_out_shape_list=initial_encoder_out_shape_list, + large_channel=True, + ) + self.final_decoder = FinalStackUpsamplingDecoder( + target_shape_list=dec_target_shape_list, + in_dim=dec_in_dim, + num_conv_per_up_list=initial_downsample_stack_conv_num_conv_list[::-1], + activation=activation, + conv_init_mode=self.conv_init_mode, + linear_init_mode=self.down_up_linear_init_mode, + norm_init_mode=self.norm_init_mode, + ) + self.dec_final_proj = nn.Linear( + in_features=dec_target_shape_list[-1][-1], out_features=C_out + ) + new_input_shape = self.initial_encoder.get_out_shape_list(self.input_shape)[ + -1 + ] + else: + raise NotImplementedError(f"{self.initial_downsample_type} is invalid.") + self.input_shape_after_initial_downsample = new_input_shape + T_in, H_in, W_in, _ = new_input_shape + return new_input_shape + + def reset_parameters(self): + if self.num_global_vectors > 0: + self.init_global_vectors = initializer.trunc_normal_( + self.init_global_vectors, std=0.02 + ) + if hasattr(self.initial_encoder, "reset_parameters"): + self.initial_encoder.reset_parameters() + else: + cuboid_utils.apply_initialization( + self.initial_encoder, + conv_mode=self.conv_init_mode, + linear_mode=self.down_up_linear_init_mode, + norm_mode=self.norm_init_mode, + ) + if hasattr(self.final_decoder, "reset_parameters"): + self.final_decoder.reset_parameters() + else: + cuboid_utils.apply_initialization( + self.final_decoder, + conv_mode=self.conv_init_mode, + linear_mode=self.down_up_linear_init_mode, + norm_mode=self.norm_init_mode, + ) + cuboid_utils.apply_initialization( + self.dec_final_proj, linear_mode=self.down_up_linear_init_mode + ) + self.encoder.reset_parameters() + self.enc_pos_embed.reset_parameters() + self.decoder.reset_parameters() + self.dec_pos_embed.reset_parameters() + cuboid_utils.apply_initialization(self.z_proj, linear_mode="0") + + def get_initial_z(self, final_mem, T_out): + B = final_mem.shape[0] + if self.z_init_method == "zeros": + z_shape = list((1, T_out)) + final_mem.shape[2:] + initial_z = paddle.zeros(shape=z_shape, dtype=final_mem.dtype) + initial_z = self.z_proj(self.dec_pos_embed(initial_z)).expand( + shape=[B, -1, -1, -1, -1] + ) + elif self.z_init_method == "nearest_interp": + initial_z = nn.functional.interpolate( + x=final_mem.transpose(perm=[0, 4, 1, 2, 3]), + size=(T_out, final_mem.shape[2], final_mem.shape[3]), + ).transpose(perm=[0, 2, 3, 4, 1]) + initial_z = self.z_proj(initial_z) + elif self.z_init_method == "last": + initial_z = paddle.broadcast_to( + x=final_mem[:, -1:, :, :, :], shape=(B, T_out) + final_mem.shape[2:] + ) + initial_z = self.z_proj(initial_z) + elif self.z_init_method == "mean": + initial_z = paddle.broadcast_to( + x=final_mem.mean(axis=1, keepdims=True), + shape=(B, T_out) + final_mem.shape[2:], + ) + initial_z = self.z_proj(initial_z) + else: + raise NotImplementedError + return initial_z + + def forward(self, x: "paddle.Tensor", verbose: bool = False) -> "paddle.Tensor": + """ + Args: + x (paddle.Tensor): Tensor with shape (B, T, H, W, C). + verbose (bool): if True, print intermediate shapes. + + Returns: + out (paddle.Tensor): The output Shape (B, T_out, H, W, C_out) + """ + + labels = x["sst_target"] + x = self.concat_to_tensor(x, self.input_keys) + flag_ndim = x.ndim + if flag_ndim == 6: + x = x.reshape([-1, *x.shape[2:]]) + B, _, _, _, _ = x.shape + + T_out = self.target_shape[0] + x = self.initial_encoder(x) + x = self.enc_pos_embed(x) + + if self.num_global_vectors > 0: + init_global_vectors = self.init_global_vectors.expand( + shape=[ + B, + self.num_global_vectors, + self.global_dim_ratio * self.base_units, + ] + ) + mem_l, mem_global_vector_l = self.encoder(x, init_global_vectors) + else: + mem_l = self.encoder(x) + + if verbose: + for i, mem in enumerate(mem_l): + print(f"mem[{i}].shape = {mem.shape}") + initial_z = self.get_initial_z(final_mem=mem_l[-1], T_out=T_out) + + if self.num_global_vectors > 0: + dec_out = self.decoder(initial_z, mem_l, mem_global_vector_l) + else: + dec_out = self.decoder(initial_z, mem_l) + + dec_out = self.final_decoder(dec_out) + out = self.dec_final_proj(dec_out) + + if flag_ndim == 6: + out = out.reshape([-1, *out.shape]) + + out_dict = {key: out for key in self.output_keys[:2]} + + # moe loss + if self.training: + aux_losses = extformer_moe_utils.aggregate_aux_losses(self) + if len(aux_losses) > 0: + aux_loss = paddle.concat(aux_losses).mean() + else: + aux_loss = None + else: + aux_loss = None + assert "aux_loss" in self.output_keys + out_dict["aux_loss"] = aux_loss + + # rnc + if self.training and self.rnc_config["use_rnc"]: + rank_loss = self.rnc_cri(dec_out, labels) + rank_loss = rank_loss.unsqueeze(0) + else: + rank_loss = None + assert "rank_loss" in self.output_keys + out_dict["rank_loss"] = rank_loss + + return out_dict diff --git a/ppsci/arch/extformer_moe_cuboid_decoder.py b/ppsci/arch/extformer_moe_cuboid_decoder.py index aee77f7a8a..9e8970980a 100644 --- a/ppsci/arch/extformer_moe_cuboid_decoder.py +++ b/ppsci/arch/extformer_moe_cuboid_decoder.py @@ -1,1475 +1,1475 @@ -from functools import lru_cache -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn.functional as F -from paddle import nn -from paddle.distributed import fleet - -import ppsci.arch.extformer_moe_cuboid_encoder as cuboid_encoder -import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils -import ppsci.arch.extformer_moe_utils as moe_utils -from ppsci.utils import initializer - - -class PosEmbed(nn.Layer): - """pose embeding - - Args: - embed_dim (int): The dimension of embeding. - maxT (int): The embeding max time. - maxH (int): The embeding max height. - maxW (int): The embeding max width. - typ (str): - The type of the positional embedding. - - t+h+w: - Embed the spatial position to embeddings - - t+hw: - Embed the spatial position to embeddings - """ - - def __init__( - self, - embed_dim, - maxT, - maxH, - maxW, - typ: str = "t+h+w", - moe_config: dict = None, - ): - super(PosEmbed, self).__init__() - self.typ = typ - assert self.typ in ["t+h+w", "t+hw"] - self.maxT = maxT - self.maxH = maxH - self.maxW = maxW - self.embed_dim = embed_dim - if self.typ == "t+h+w": - self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) - self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim) - self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim) - elif self.typ == "t+hw": - self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) - self.HW_embed = nn.Embedding( - num_embeddings=maxH * maxW, embedding_dim=embed_dim - ) - else: - raise NotImplementedError(f"{self.typ} is invalid.") - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization(m, embed_mode="0") - - def forward(self, x): - """ - Args: - x : Shape (B, T, H, W, C) - - Returns: - out : the x + positional embeddings - """ - - _, T, H, W, _ = x.shape - t_idx = paddle.arange(end=T) - h_idx = paddle.arange(end=H) - w_idx = paddle.arange(end=W) - if self.typ == "t+h+w": - return ( - x - + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) - + self.H_embed(h_idx).reshape([1, H, 1, self.embed_dim]) - + self.W_embed(w_idx).reshape([1, 1, W, self.embed_dim]) - ) - elif self.typ == "t+hw": - spatial_idx = h_idx.unsqueeze(axis=-1) * self.maxW + w_idx - return ( - x - + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) - + self.HW_embed(spatial_idx) - ) - else: - raise NotImplementedError(f"{self.typ} is invalid.") - - -@lru_cache() -def compute_cuboid_cross_attention_mask( - T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy, padding_type, device -): - pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal - pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal - pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] - pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] - mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw - x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw - if pad_t_mem > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - mem_mask = paddle.ones(shape=(1, T_mem, H, W, 1), dtype="bool") - mem_mask = F.pad( - mem_mask, [0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0], data_format="NDHWC" - ) - else: - mem_mask = paddle.ones( - shape=(1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if pad_t_x > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - x_mask = paddle.ones(shape=(1, T_x, H, W, 1), dtype="bool") - x_mask = F.pad( - x_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x], data_format="NDHWC" - ) - else: - x_mask = paddle.ones( - shape=(1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if any(i > 0 for i in shift_hw): - if padding_type == "ignore": - x_mask = paddle.roll( - x=x_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - mem_mask = paddle.roll( - x=mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - x_mask = cuboid_encoder.cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy) - x_mask = x_mask.squeeze(axis=-1).squeeze(axis=0) - num_cuboids, x_cuboid_volume = x_mask.shape - mem_mask = cuboid_encoder.cuboid_reorder( - mem_mask, mem_cuboid_size, strategy=strategy - ) - mem_mask = mem_mask.squeeze(axis=-1).squeeze(axis=0) - _, mem_cuboid_volume = mem_mask.shape - shift_mask = np.zeros(shape=(1, n_temporal, H + pad_h, W + pad_w, 1)) - cnt = 0 - for h in ( - slice(-cuboid_hw[0]), - slice(-cuboid_hw[0], -shift_hw[0]), - slice(-shift_hw[0], None), - ): - for w in ( - slice(-cuboid_hw[1]), - slice(-cuboid_hw[1], -shift_hw[1]), - slice(-shift_hw[1], None), - ): - shift_mask[:, :, h, w, :] = cnt - cnt += 1 - shift_mask = paddle.to_tensor(shift_mask) - shift_mask = cuboid_encoder.cuboid_reorder( - shift_mask, (1,) + cuboid_hw, strategy=strategy - ) - shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) - shift_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 - bh_bw = cuboid_hw[0] * cuboid_hw[1] - attn_mask = ( - shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) - * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1)) - * mem_mask.reshape([num_cuboids, 1, 1, -1, bh_bw]) - ) - attn_mask = attn_mask.reshape([num_cuboids, x_cuboid_volume, mem_cuboid_volume]) - return attn_mask - - -class CuboidCrossAttentionLayer(nn.Layer): - """Implements the cuboid cross attention. - - The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the - encoder-decoder-type cross attention. - - Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C), - - Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in - the query tensor with the corresponding cuboid in the memory tensor. - - For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention. - For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can - get after cutting the tensors. For example, if the temporal dilation is 2, both the query and - memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention, - we support "local" and "dilated" decomposition strategy. - - The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw) - - Args: - dim (int): The dimention of input tensor. - num_heads (int): The number of head. - n_temporal (int, optional): The num of temporal. Defaults to 1. - cuboid_hw (tuple, optional): The height and width of cuboid. Defaults to (7, 7). - shift_hw (tuple, optional): The height and width of shift. Defaults to (0, 0). - strategy (tuple, optional): The strategy. Defaults to ("d", "l", "l"). - padding_type (str, optional): The type of padding. Defaults to "ignore". - cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projrction dropout. Defaults to 0.0. - max_temporal_relative (int, optional): The max temporal. Defaults to 50. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to True. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - n_temporal: int = 1, - cuboid_hw: Tuple[int, ...] = (7, 7), - shift_hw: Tuple[int, ...] = (0, 0), - strategy: Tuple[str, ...] = ("d", "l", "l"), - padding_type: str = "ignore", - cross_last_n_frames: int = None, - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - max_temporal_relative: int = 50, - norm_layer: str = "layer_norm", - use_global_vector: bool = True, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: int = 1, - use_relative_pos: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(CuboidCrossAttentionLayer, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - self.dim = dim - self.num_heads = num_heads - self.n_temporal = n_temporal - assert n_temporal > 0 - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - shift_hw = list(shift_hw) - if strategy[1] == "d": - shift_hw[0] = 0 - if strategy[2] == "d": - shift_hw[1] = 0 - self.cuboid_hw = cuboid_hw - self.shift_hw = tuple(shift_hw) - self.strategy = strategy - self.padding_type = padding_type - self.max_temporal_relative = max_temporal_relative - self.cross_last_n_frames = cross_last_n_frames - self.use_relative_pos = use_relative_pos - self.use_global_vector = use_global_vector - self.separate_global_qkv = separate_global_qkv - if global_dim_ratio != 1 and separate_global_qkv is False: - raise ValueError( - "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - ) - self.global_dim_ratio = global_dim_ratio - if self.padding_type not in ["ignore", "zeros", "nearest"]: - raise ValueError('padding_type should be ["ignore", "zeros", "nearest"]') - if use_relative_pos: - init_data = paddle.zeros( - ( - (2 * max_temporal_relative - 1) - * (2 * cuboid_hw[0] - 1) - * (2 * cuboid_hw[1] - 1), - num_heads, - ) - ) - self.relative_position_bias_table = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.relative_position_bias_table.stop_gradient = not True - self.relative_position_bias_table = initializer.trunc_normal_( - self.relative_position_bias_table, std=0.02 - ) - - coords_t = paddle.arange(end=max_temporal_relative) - coords_h = paddle.arange(end=self.cuboid_hw[0]) - coords_w = paddle.arange(end=self.cuboid_hw[1]) - coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) - coords_flatten = paddle.flatten(x=coords, start_axis=1) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = relative_coords.transpose(perm=[1, 2, 0]) - relative_coords[:, :, 0] += max_temporal_relative - 1 - relative_coords[:, :, 1] += self.cuboid_hw[0] - 1 - relative_coords[:, :, 2] += self.cuboid_hw[1] - 1 - relative_position_index = ( - relative_coords[:, :, 0] - * (2 * self.cuboid_hw[0] - 1) - * (2 * self.cuboid_hw[1] - 1) - + relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) - + relative_coords[:, :, 2] - ) - self.register_buffer( - name="relative_position_index", tensor=relative_position_index - ) - self.q_proj = nn.Linear(in_features=dim, out_features=dim, bias_attr=qkv_bias) - self.kv_proj = nn.Linear( - in_features=dim, out_features=dim * 2, bias_attr=qkv_bias - ) - self.attn_drop = nn.Dropout(p=attn_drop) - self.proj = nn.Linear(in_features=dim, out_features=dim) - self.proj_drop = nn.Dropout(p=proj_drop) - if self.use_global_vector: - if self.separate_global_qkv: - self.l2g_q_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.l2g_global_kv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim * 2, - bias_attr=qkv_bias, - ) - self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) - self._checkpoint_level = checkpoint_level - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization( - self.q_proj, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.kv_proj, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.proj, linear_mode=self.ffn_linear_init_mode - ) - cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) - if self.use_global_vector: - if self.separate_global_qkv: - cuboid_utils.apply_initialization( - self.l2g_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode - ) - - def forward(self, x, mem, mem_global_vectors=None): - """Calculate the forward - - Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the - relative position encoding can be calculated correctly. For example: - - mem: 0, 1, 2, 3, 4 - x: 0, 1, 2, 3, 4, 5 - - n_temporal = 1 - mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5 - - n_temporal = 2 - mem: pad, 1, 3 x: 0, 2, 4 - mem: 0, 2, 4 x: 1, 3, 5 - - n_temporal = 3 - mem: pad, 2 dec: 0, 3 - mem: 0, 3 dec: 1, 4 - mem: 1, 4 dec: 2, 5 - - Args: - x (paddle.Tensor): The input of the layer. It will have shape (B, T, H, W, C) - mem (paddle.Tensor): The memory. It will have shape (B, T_mem, H, W, C) - mem_global_vectors (paddle.Tensor): The global vectors from the memory. It will have shape (B, N, C) - - Returns: - out (paddle.Tensor): Output tensor should have shape (B, T, H, W, C_out) - """ - - if self.cross_last_n_frames is not None: - cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1])) - mem = mem[:, -cross_last_n_frames:, ...] - if self.use_global_vector: - _, num_global, _ = mem_global_vectors.shape - x = self.norm(x) - B, T_x, H, W, C_in = x.shape - B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape - assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative - cuboid_hw = self.cuboid_hw - n_temporal = self.n_temporal - shift_hw = self.shift_hw - assert ( - B_mem == B and H == H_mem and W == W_mem and C_in == C_mem - ), f"Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}" - pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal - pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal - pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] - pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] - mem = cuboid_utils.generalize_padding( - mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True - ) - - x = cuboid_utils.generalize_padding( - x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False - ) - - if any(i > 0 for i in shift_hw): - shifted_x = paddle.roll( - x=x, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - shifted_mem = paddle.roll( - x=mem, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) - ) - else: - shifted_x = x - shifted_mem = mem - mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw - x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw - reordered_mem = cuboid_encoder.cuboid_reorder( - shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy - ) - reordered_x = cuboid_encoder.cuboid_reorder( - shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy - ) - _, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape - _, num_cuboids, x_cuboid_volume, _ = reordered_x.shape - assert ( - num_cuboids_mem == num_cuboids - ), f"Number of cuboids do not match. num_cuboids={num_cuboids}, num_cuboids_mem={num_cuboids_mem}" - attn_mask = compute_cuboid_cross_attention_mask( - T_x, - T_mem, - H, - W, - n_temporal, - cuboid_hw, - shift_hw, - strategy=self.strategy, - padding_type=self.padding_type, - device=x.place, - ) - head_C = C_in // self.num_heads - kv = ( - self.kv_proj(reordered_mem) - .reshape([B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - k, v = kv[0], kv[1] - q = ( - self.q_proj(reordered_x) - .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - q = q * self.scale - perm_4 = list(range(k.ndim)) - perm_4[-2] = -1 - perm_4[-1] = -2 - attn_score = q @ k.transpose(perm=perm_4) - if self.use_relative_pos: - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index[ - :x_cuboid_volume, :mem_cuboid_volume - ].reshape([-1]) - ].reshape([x_cuboid_volume, mem_cuboid_volume, -1]) - relative_position_bias = relative_position_bias.transpose( - perm=[2, 0, 1] - ).unsqueeze(axis=1) - attn_score = attn_score + relative_position_bias - if self.use_global_vector: - if self.separate_global_qkv: - l2g_q = ( - self.l2g_q_net(reordered_x) - .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - l2g_q = l2g_q * self.scale - l2g_global_kv = ( - self.l2g_global_kv_net(mem_global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] - else: - kv_global = ( - self.kv_proj(mem_global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] - l2g_q = q - perm_5 = list(range(l2g_global_k.ndim)) - perm_5[-2] = -1 - perm_5[-1] = -2 - l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_5) - attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) - if attn_mask.ndim == 5: - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" - ) - else: - attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) - v_l_g = paddle.concat( - x=( - v, - l2g_global_v.expand( - shape=[B, self.num_heads, num_cuboids, num_global, head_C] - ), - ), - axis=3, - ) - attn_score_l2l_l2g = cuboid_encoder.masked_softmax( - attn_score_l2l_l2g, mask=attn_mask_l2l_l2g - ) - attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) - reordered_x = ( - (attn_score_l2l_l2g @ v_l_g) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape(B, num_cuboids, x_cuboid_volume, self.dim) - ) - else: - attn_score = cuboid_encoder.masked_softmax(attn_score, mask=attn_mask) - attn_score = self.attn_drop(attn_score) - reordered_x = ( - (attn_score @ v) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, x_cuboid_volume, self.dim]) - ) - reordered_x = paddle.cast(reordered_x, dtype="float32") - reordered_x = self.proj_drop(self.proj(reordered_x)) - shifted_x = cuboid_encoder.cuboid_reorder_reverse( - reordered_x, - cuboid_size=x_cuboid_size, - strategy=self.strategy, - orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]), - ) - if any(i > 0 for i in shift_hw): - x = paddle.roll(x=shifted_x, shifts=(shift_hw[0], shift_hw[1]), axis=(2, 3)) - else: - x = shifted_x - x = cuboid_utils.generalize_unpadding( - x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type - ) - return x - - -class StackCuboidCrossAttentionBlock(nn.Layer): - """A stack of cuboid cross attention layers. - - The advantage of cuboid attention is that we can combine cuboid attention building blocks with different - hyper-parameters to mimic a broad range of space-time correlation patterns. - - - "use_inter_ffn" is True - x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out - | ^ | ^ - | | | | - |-------------|----|-------------| - - "use_inter_ffn" is False - x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem - | ^ | ^ ^ | ^ - | | | | | | | - |-------------|----|------------|-- ----------|--|-----------| - - Args: - dim (int): The dimension of the input. - num_heads (int): The number of head. - block_cuboid_hw (list, optional): The height and width of block cuboid.Defaults to [(4, 4), (4, 4)]. - block_shift_hw (list, optional): The height and width of shift cuboid . Defaults to [(0, 0), (2, 2)]. - block_n_temporal (list, optional): The length of block temporal. Defaults to [1, 2]. - block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. - padding_type (str, optional): The type of paddling. Defaults to "ignore". - cross_last_n_frames (int, optional): The num of cross_last_n_frames. Defaults to None. - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - activation (str, optional): The activation. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. - max_temporal_relative (int, optional): The max temporal. Defaults to 50. - checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - block_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - block_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (2, 2)], - block_n_temporal: Tuple[int, ...] = [1, 2], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("d", "d", "d"), - ("l", "l", "l"), - ], - padding_type: str = "ignore", - cross_last_n_frames: int = None, - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = True, - max_temporal_relative: int = 50, - checkpoint_level: int = 1, - use_relative_pos: bool = True, - use_global_vector: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - expert_shape: tuple = None, - ): - super(StackCuboidCrossAttentionBlock, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - if ( - len(block_cuboid_hw[0]) <= 0 - or len(block_shift_hw) <= 0 - or len(block_strategy) <= 0 - ): - raise ValueError( - "Incorrect format.The lengths of block_cuboid_hw[0], block_shift_hw, and block_strategy must be greater than zero." - ) - if len(block_cuboid_hw) != len(block_shift_hw) and len(block_shift_hw) == len( - block_strategy - ): - raise ValueError( - "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." - ) - - self.num_attn = len(block_cuboid_hw) - self.checkpoint_level = checkpoint_level - self.use_inter_ffn = use_inter_ffn - self.use_global_vector = use_global_vector - if self.use_inter_ffn: - if moe_config["use_ffn_moe"]: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.MixtureFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - else: - if moe_config["use_ffn_moe"]: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.MixtureFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - cuboid_encoder.PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - - if moe_config["use_attn_moe"]: - self.attn_l = nn.LayerList( - sublayers=[ - MixtureCrossAttention( - dim=dim, - num_heads=num_heads, - cuboid_hw=ele_cuboid_hw, - shift_hw=ele_shift_hw, - strategy=ele_strategy, - n_temporal=ele_n_temporal, - cross_last_n_frames=cross_last_n_frames, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - max_temporal_relative=max_temporal_relative, - use_global_vector=use_global_vector, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( - block_cuboid_hw, - block_shift_hw, - block_strategy, - block_n_temporal, - ) - ] - ) - else: - self.attn_l = nn.LayerList( - sublayers=[ - CuboidCrossAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_hw=ele_cuboid_hw, - shift_hw=ele_shift_hw, - strategy=ele_strategy, - n_temporal=ele_n_temporal, - cross_last_n_frames=cross_last_n_frames, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - max_temporal_relative=max_temporal_relative, - use_global_vector=use_global_vector, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( - block_cuboid_hw, - block_shift_hw, - block_strategy, - block_n_temporal, - ) - ] - ) - - def reset_parameters(self): - for m in self.ffn_l: - m.reset_parameters() - for m in self.attn_l: - m.reset_parameters() - - def forward(self, x, mem, mem_global_vector=None): - """ - Args: - x (paddle.Tensor): Shape (B, T_x, H, W, C) - mem (paddle.Tensor): Shape (B, T_mem, H, W, C) - mem_global_vector (paddle.Tensor): Shape (B, N_global, C) - - Returns: - out (paddle.Tensor): (B, T_x, H, W, C_out) - """ - - if self.use_inter_ffn: - for attn, ffn in zip(self.attn_l, self.ffn_l): - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) - else: - x = x + attn(x, mem, mem_global_vector) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - else: - x = ffn(x) - return x - else: - for attn in self.attn_l: - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) - else: - x = x + attn(x, mem, mem_global_vector) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - else: - x = self.ffn_l[0](x) - return x - - -class Upsample3DLayer(nn.Layer): - """Upsampling based on nn.UpSampling and Conv3x3. - - If the temporal dimension remains the same: - x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim) - Else: - x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim) - - Args: - dim (int): The dimension of the input tensor. - out_dim (int): The dimension of the output tensor. - target_size (Tuple[int,...]): The size of output tensor. - temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False. - kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3. - layout (str, optional): The layout of the inputs. Defaults to "THWC". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - out_dim: int, - target_size: Tuple[int, ...], - temporal_upsample: bool = False, - kernel_size: int = 3, - layout: str = "THWC", - conv_init_mode: str = "0", - moe_config: dict = None, - ): - super(Upsample3DLayer, self).__init__() - self.conv_init_mode = conv_init_mode - self.target_size = target_size - self.out_dim = out_dim - self.temporal_upsample = temporal_upsample - if temporal_upsample: - self.up = nn.Upsample(size=target_size, mode="nearest") - else: - self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode="nearest") - self.conv = nn.Conv2D( - in_channels=dim, - out_channels=out_dim, - kernel_size=(kernel_size, kernel_size), - padding=(kernel_size // 2, kernel_size // 2), - ) - assert layout in ["THWC", "CTHW"] - self.layout = layout - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization(m, conv_mode=self.conv_init_mode) - - def forward(self, x): - """ - - Args: - x : (B, T, H, W, C) or (B, C, T, H, W) - - Returns: - out : (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out) - """ - - if self.layout == "THWC": - B, T, H, W, C = x.shape - if self.temporal_upsample: - x = x.transpose(perm=[0, 4, 1, 2, 3]) - return self.conv(self.up(x)).transpose(perm=[0, 2, 3, 4, 1]) - else: - assert self.target_size[0] == T - x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) - x = self.up(x) - return ( - self.conv(x) - .transpose(perm=[0, 2, 3, 1]) - .reshape(list((B,) + self.target_size + (self.out_dim,))) - ) - elif self.layout == "CTHW": - B, C, T, H, W = x.shape - if self.temporal_upsample: - return self.conv(self.up(x)) - else: - assert self.output_size[0] == T - x = x.transpose(perm=[0, 2, 1, 3, 4]) - x = x.reshape([B * T, C, H, W]) - return ( - self.conv(self.up(x)) - .reshape( - [ - B, - self.target_size[0], - self.out_dim, - self.target_size[1], - self.target_size[2], - ] - ) - .transpose(perm=[0, 2, 1, 3, 4]) - ) - - -class CuboidTransformerDecoder(nn.Layer): - """Decoder of the CuboidTransformer. - - For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention - - Repeat the following structure K times - - x --> StackCuboidSelfAttention --> | - |----> StackCuboidCrossAttention (If used) --> out - mem --> | - - Args: - target_temporal_length (int): The temporal length of the target. - mem_shapes (Tuple[int,...]): The mem shapes of the decoder. - cross_start (int, optional): The block to start cross attention. Defaults to 0. - depth (list, optional): The number of layers for each block. Defaults to [2, 2]. - upsample_type (str, optional): The type of upsample. Defaults to "upsample". - upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. - block_self_attn_patterns (str, optional): The patterns of block attention. Defaults to None. - block_self_cuboid_size (list, optional): The size of cuboid block. Defaults to [(4, 4, 4), (4, 4, 4)]. - block_self_cuboid_strategy (list, optional): The strategy of cuboid. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - block_self_shift_size (list, optional): The size of shift. Defaults to [(1, 1, 1), (0, 0, 0)]. - block_cross_attn_patterns (str, optional): The patterns of cross attentions. Defaults to None. - block_cross_cuboid_hw (list, optional): The height and width of cross cuboid. Defaults to [(4, 4), (4, 4)]. - block_cross_cuboid_strategy (list, optional): The strategy of cross cuboid. Defaults to [("l", "l", "l"), ("d", "l", "l")]. - block_cross_shift_hw (list, optional): The height and width of cross shift. Defaults to [(0, 0), (0, 0)]. - block_cross_n_temporal (list, optional): The cross temporal of block. Defaults to [1, 2]. - cross_last_n_frames (int, optional): The num of cross last frames. Defaults to None. - num_heads (int, optional): The num of head. Defaults to 4. - attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. - proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - ffn_activation (str, optional): The activation layer of FFN. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to False. - hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed. Defaults to False. - pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". - max_temporal_relative (int, optional): The max number of teemporal relative. Defaults to 50. - padding_type (str, optional): The type of padding. Defaults to "ignore". - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. - use_first_self_attn (bool, optional): Whether to use first self attention. Defaults to False. - use_self_global (bool, optional): Whether to use self global vector. Defaults to False. - self_update_global (bool, optional): Whether to update global vector. Defaults to True. - use_cross_global (bool, optional): Whether to use cross global vector. Defaults to False. - use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to True. - use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - up_linear_init_mode (str, optional): The mode of up linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - target_temporal_length: int, - mem_shapes: Tuple[int, ...], - cross_start: int = 0, - depth: Tuple[int, ...] = [2, 2], - upsample_type: str = "upsample", - upsample_kernel_size: int = 3, - block_self_attn_patterns: str = None, - block_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - block_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], - block_cross_attn_patterns: str = None, - block_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], - block_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "l", "l"), - ], - block_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], - block_cross_n_temporal: Tuple[int, ...] = [1, 2], - cross_last_n_frames: int = None, - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = False, - hierarchical_pos_embed: bool = False, - pos_embed_type: str = "t+hw", - max_temporal_relative: int = 50, - padding_type: str = "ignore", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - use_first_self_attn: bool = False, - use_self_global: bool = False, - self_update_global: bool = True, - use_cross_global: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - up_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(CuboidTransformerDecoder, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.up_linear_init_mode = up_linear_init_mode - self.norm_init_mode = norm_init_mode - assert len(depth) == len(mem_shapes) - self.target_temporal_length = target_temporal_length - self.num_blocks = len(mem_shapes) - self.cross_start = cross_start - self.mem_shapes = mem_shapes - self.depth = depth - self.upsample_type = upsample_type - self.hierarchical_pos_embed = hierarchical_pos_embed - self.checkpoint_level = checkpoint_level - self.use_self_global = use_self_global - self.self_update_global = self_update_global - self.use_cross_global = use_cross_global - self.use_global_vector_ffn = use_global_vector_ffn - self.use_first_self_attn = use_first_self_attn - if block_self_attn_patterns is not None: - if isinstance(block_self_attn_patterns, (tuple, list)): - assert len(block_self_attn_patterns) == self.num_blocks - else: - block_self_attn_patterns = [ - block_self_attn_patterns for _ in range(self.num_blocks) - ] - block_self_cuboid_size = [] - block_self_cuboid_strategy = [] - block_self_shift_size = [] - for idx, key in enumerate(block_self_attn_patterns): - func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) - cuboid_size, strategy, shift_size = func(mem_shapes[idx]) - block_self_cuboid_size.append(cuboid_size) - block_self_cuboid_strategy.append(strategy) - block_self_shift_size.append(shift_size) - else: - if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): - block_self_cuboid_size = [ - block_self_cuboid_size for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_cuboid_size) == self.num_blocks - ), f"Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}" - if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): - block_self_cuboid_strategy = [ - block_self_cuboid_strategy for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_cuboid_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}" - if not isinstance(block_self_shift_size[0][0], (list, tuple)): - block_self_shift_size = [ - block_self_shift_size for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_self_shift_size) == self.num_blocks - ), f"Incorrect input format! Received block_self_shift_size={block_self_shift_size}" - - expert_shape_list = [ - (target_temporal_length,) + mem_shape[1:] for mem_shape in mem_shapes - ] - self_blocks = [] - for i in range(self.num_blocks): - if not self.use_first_self_attn and i == self.num_blocks - 1: - ele_depth = depth[i] - 1 - else: - ele_depth = depth[i] - stack_cuboid_blocks = [ - cuboid_encoder.StackCuboidSelfAttentionBlock( - dim=self.mem_shapes[i][-1], - num_heads=num_heads, - block_cuboid_size=block_self_cuboid_size[i], - block_strategy=block_self_cuboid_strategy[i], - block_shift_size=block_self_shift_size[i], - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - padding_type=padding_type, - use_global_vector=use_self_global, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape_list[i], - moe_config=moe_config, - ) - for _ in range(ele_depth) - ] - self_blocks.append(nn.LayerList(sublayers=stack_cuboid_blocks)) - self.self_blocks = nn.LayerList(sublayers=self_blocks) - - if block_cross_attn_patterns is not None: - if isinstance(block_cross_attn_patterns, (tuple, list)): - assert len(block_cross_attn_patterns) == self.num_blocks - else: - block_cross_attn_patterns = [ - block_cross_attn_patterns for _ in range(self.num_blocks) - ] - block_cross_cuboid_hw = [] - block_cross_cuboid_strategy = [] - block_cross_shift_hw = [] - block_cross_n_temporal = [] - for idx, key in enumerate(block_cross_attn_patterns): - if key == "last_frame_dst": - cuboid_hw = None - shift_hw = None - strategy = None - n_temporal = None - else: - func = cuboid_utils.CuboidCrossAttentionPatterns.get(key) - cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) - block_cross_cuboid_hw.append(cuboid_hw) - block_cross_cuboid_strategy.append(strategy) - block_cross_shift_hw.append(shift_hw) - block_cross_n_temporal.append(n_temporal) - else: - if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): - block_cross_cuboid_hw = [ - block_cross_cuboid_hw for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_cuboid_hw) == self.num_blocks - ), f"Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}" - if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): - block_cross_cuboid_strategy = [ - block_cross_cuboid_strategy for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_cuboid_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}" - if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): - block_cross_shift_hw = [ - block_cross_shift_hw for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_shift_hw) == self.num_blocks - ), f"Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}" - if not isinstance(block_cross_n_temporal[0], (list, tuple)): - block_cross_n_temporal = [ - block_cross_n_temporal for _ in range(self.num_blocks) - ] - else: - assert ( - len(block_cross_n_temporal) == self.num_blocks - ), f"Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}" - self.cross_blocks = nn.LayerList() - assert self.cross_start == 0 - for i in range(self.cross_start, self.num_blocks): - cross_block = nn.LayerList( - sublayers=[ - StackCuboidCrossAttentionBlock( - dim=self.mem_shapes[i][-1], - num_heads=num_heads, - block_cuboid_hw=block_cross_cuboid_hw[i], - block_strategy=block_cross_cuboid_strategy[i], - block_shift_hw=block_cross_shift_hw[i], - block_n_temporal=block_cross_n_temporal[i], - cross_last_n_frames=cross_last_n_frames, - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - activation=ffn_activation, - max_temporal_relative=max_temporal_relative, - padding_type=padding_type, - use_global_vector=use_cross_global, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape_list[i], - moe_config=moe_config, - ) - for _ in range(depth[i]) - ] - ) - self.cross_blocks.append(cross_block) - if self.num_blocks > 1: - if self.upsample_type == "upsample": - self.upsample_layers = nn.LayerList( - sublayers=[ - Upsample3DLayer( - dim=self.mem_shapes[i + 1][-1], - out_dim=self.mem_shapes[i][-1], - target_size=(target_temporal_length,) - + self.mem_shapes[i][1:3], - kernel_size=upsample_kernel_size, - temporal_upsample=False, - conv_init_mode=conv_init_mode, - ) - for i in range(self.num_blocks - 1) - ] - ) - else: - raise NotImplementedError(f"{self.upsample_type} is invalid.") - if self.hierarchical_pos_embed: - self.hierarchical_pos_embed_l = nn.LayerList( - sublayers=[ - PosEmbed( - embed_dim=self.mem_shapes[i][-1], - typ=pos_embed_type, - maxT=target_temporal_length, - maxH=self.mem_shapes[i][1], - maxW=self.mem_shapes[i][2], - ) - for i in range(self.num_blocks - 1) - ] - ) - self.reset_parameters() - - def reset_parameters(self): - for ms in self.self_blocks: - for m in ms: - m.reset_parameters() - for ms in self.cross_blocks: - for m in ms: - m.reset_parameters() - if self.num_blocks > 1: - for m in self.upsample_layers: - m.reset_parameters() - if self.hierarchical_pos_embed: - for m in self.hierarchical_pos_embed_l: - m.reset_parameters() - - def forward(self, x, mem_l, mem_global_vector_l=None): - """ - Args: - x : Shape (B, T_top, H_top, W_top, C). - mem_l : A list of memory tensors. - """ - - B, T_top, H_top, W_top, C = x.shape - assert T_top == self.target_temporal_length - assert (H_top, W_top) == (self.mem_shapes[-1][1], self.mem_shapes[-1][2]) - for i in range(self.num_blocks - 1, -1, -1): - mem_global_vector = ( - None if mem_global_vector_l is None else mem_global_vector_l[i] - ) - if not self.use_first_self_attn and i == self.num_blocks - 1: - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][0]( - x, mem_l[i], mem_global_vector - ) - for idx in range(self.depth[i] - 1): - if self.use_self_global: - if self.self_update_global: - x, mem_global_vector = self.self_blocks[i][idx]( - x, mem_global_vector - ) - else: - x, _ = self.self_blocks[i][idx](x, mem_global_vector) - else: - x = self.self_blocks[i][idx](x) - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][idx + 1]( - x, mem_l[i], mem_global_vector - ) - else: - for idx in range(self.depth[i]): - if self.use_self_global: - if self.self_update_global: - x, mem_global_vector = self.self_blocks[i][idx]( - x, mem_global_vector - ) - else: - x, _ = self.self_blocks[i][idx](x, mem_global_vector) - else: - x = self.self_blocks[i][idx](x) - if i >= self.cross_start: - x = self.cross_blocks[i - self.cross_start][idx]( - x, mem_l[i], mem_global_vector - ) - if i > 0: - x = self.upsample_layers[i - 1](x) - if self.hierarchical_pos_embed: - x = self.hierarchical_pos_embed_l[i - 1](x) - return x - - -class MixtureCrossAttention(nn.Layer): - def __init__( - self, - dim, - num_heads, - cuboid_hw, - shift_hw, - strategy, - n_temporal, - cross_last_n_frames, - padding_type, - qkv_bias, - qk_scale, - attn_drop, - proj_drop, - norm_layer, - max_temporal_relative, - use_global_vector, - separate_global_qkv, - global_dim_ratio, - checkpoint_level, - use_relative_pos, - attn_linear_init_mode, - ffn_linear_init_mode, - norm_init_mode, - expert_shape, - moe_config, - ): - super().__init__() - - self.in_dim = dim - self.out_dim = dim - self.expert_shape = expert_shape # T, H, W, C - self.num_experts = moe_config["num_experts"] - self.out_planes = moe_config["out_planes"] - self.moe_config = moe_config - assert expert_shape is not None and moe_config["use_attn_moe"] - assert not use_global_vector - - if moe_config["gate_style"] == "linear": - self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "spatial-latent": - self.gate = moe_utils.SpatialLatentGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "cuboid-latent": - self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "spatial-latent-linear": - self.gate = moe_utils.SpatialLatentLinearGatingNet( - moe_config, expert_shape, dim - ) - elif moe_config["gate_style"] == "cuboid-latent-linear": - self.gate = moe_utils.CuboidLatentLinearGatingNet( - moe_config, expert_shape, dim - ) - else: - raise NotImplementedError - - self.experts = nn.LayerList( - [ - CuboidCrossAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_hw=cuboid_hw, - shift_hw=shift_hw, - strategy=strategy, - n_temporal=n_temporal, - cross_last_n_frames=cross_last_n_frames, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - max_temporal_relative=max_temporal_relative, - use_global_vector=use_global_vector, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(self.num_experts) - ] - ) - - def forward(self, x, mem, mem_global_vectors=None): - - B, T_x, H, W, C = x.shape - _, T_m, _, _, _ = mem.shape - E = self.num_experts - assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] - ( - dense_routing_weights, - sparse_routing_weights, - sparse_routing_inds, - self.aux_loss, - ) = self.gate( - x - ) # dense: B, T_x, H, W, E - - dispatcher = moe_utils.DenseDispatcher( - E, - sparse_routing_weights.reshape([B * T_x * H * W, -1]), - sparse_routing_inds.reshape([B * T_x * H * W, -1]), - ) - expert_outputs = paddle.stack( - [self.experts[i](x, mem, mem_global_vectors) for i in range(E)], axis=-2 - ).reshape([B * T_x * H * W, E, C]) - y = dispatcher.combine(expert_outputs).reshape([B, T_x, H, W, C]) - - return y - - def reset_parameters(self): - - for i in range(len(self.experts)): - self.experts[i].reset_parameters() +from functools import lru_cache +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn +from paddle.distributed import fleet + +import ppsci.arch.extformer_moe_cuboid_encoder as cuboid_encoder +import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils +import ppsci.arch.extformer_moe_utils as moe_utils +from ppsci.utils import initializer + + +class PosEmbed(nn.Layer): + """pose embeding + + Args: + embed_dim (int): The dimension of embeding. + maxT (int): The embeding max time. + maxH (int): The embeding max height. + maxW (int): The embeding max width. + typ (str): + The type of the positional embedding. + - t+h+w: + Embed the spatial position to embeddings + - t+hw: + Embed the spatial position to embeddings + """ + + def __init__( + self, + embed_dim, + maxT, + maxH, + maxW, + typ: str = "t+h+w", + moe_config: dict = None, + ): + super(PosEmbed, self).__init__() + self.typ = typ + assert self.typ in ["t+h+w", "t+hw"] + self.maxT = maxT + self.maxH = maxH + self.maxW = maxW + self.embed_dim = embed_dim + if self.typ == "t+h+w": + self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) + self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim) + self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim) + elif self.typ == "t+hw": + self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim) + self.HW_embed = nn.Embedding( + num_embeddings=maxH * maxW, embedding_dim=embed_dim + ) + else: + raise NotImplementedError(f"{self.typ} is invalid.") + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization(m, embed_mode="0") + + def forward(self, x): + """ + Args: + x : Shape (B, T, H, W, C) + + Returns: + out : the x + positional embeddings + """ + + _, T, H, W, _ = x.shape + t_idx = paddle.arange(end=T) + h_idx = paddle.arange(end=H) + w_idx = paddle.arange(end=W) + if self.typ == "t+h+w": + return ( + x + + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) + + self.H_embed(h_idx).reshape([1, H, 1, self.embed_dim]) + + self.W_embed(w_idx).reshape([1, 1, W, self.embed_dim]) + ) + elif self.typ == "t+hw": + spatial_idx = h_idx.unsqueeze(axis=-1) * self.maxW + w_idx + return ( + x + + self.T_embed(t_idx).reshape([T, 1, 1, self.embed_dim]) + + self.HW_embed(spatial_idx) + ) + else: + raise NotImplementedError(f"{self.typ} is invalid.") + + +@lru_cache() +def compute_cuboid_cross_attention_mask( + T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy, padding_type, device +): + pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal + pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal + pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] + pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] + mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw + x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw + if pad_t_mem > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + mem_mask = paddle.ones(shape=(1, T_mem, H, W, 1), dtype="bool") + mem_mask = F.pad( + mem_mask, [0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0], data_format="NDHWC" + ) + else: + mem_mask = paddle.ones( + shape=(1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if pad_t_x > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + x_mask = paddle.ones(shape=(1, T_x, H, W, 1), dtype="bool") + x_mask = F.pad( + x_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x], data_format="NDHWC" + ) + else: + x_mask = paddle.ones( + shape=(1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if any(i > 0 for i in shift_hw): + if padding_type == "ignore": + x_mask = paddle.roll( + x=x_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + mem_mask = paddle.roll( + x=mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + x_mask = cuboid_encoder.cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy) + x_mask = x_mask.squeeze(axis=-1).squeeze(axis=0) + num_cuboids, x_cuboid_volume = x_mask.shape + mem_mask = cuboid_encoder.cuboid_reorder( + mem_mask, mem_cuboid_size, strategy=strategy + ) + mem_mask = mem_mask.squeeze(axis=-1).squeeze(axis=0) + _, mem_cuboid_volume = mem_mask.shape + shift_mask = np.zeros(shape=(1, n_temporal, H + pad_h, W + pad_w, 1)) + cnt = 0 + for h in ( + slice(-cuboid_hw[0]), + slice(-cuboid_hw[0], -shift_hw[0]), + slice(-shift_hw[0], None), + ): + for w in ( + slice(-cuboid_hw[1]), + slice(-cuboid_hw[1], -shift_hw[1]), + slice(-shift_hw[1], None), + ): + shift_mask[:, :, h, w, :] = cnt + cnt += 1 + shift_mask = paddle.to_tensor(shift_mask) + shift_mask = cuboid_encoder.cuboid_reorder( + shift_mask, (1,) + cuboid_hw, strategy=strategy + ) + shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) + shift_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 + bh_bw = cuboid_hw[0] * cuboid_hw[1] + attn_mask = ( + shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) + * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1)) + * mem_mask.reshape([num_cuboids, 1, 1, -1, bh_bw]) + ) + attn_mask = attn_mask.reshape([num_cuboids, x_cuboid_volume, mem_cuboid_volume]) + return attn_mask + + +class CuboidCrossAttentionLayer(nn.Layer): + """Implements the cuboid cross attention. + + The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the + encoder-decoder-type cross attention. + + Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C), + + Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in + the query tensor with the corresponding cuboid in the memory tensor. + + For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention. + For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can + get after cutting the tensors. For example, if the temporal dilation is 2, both the query and + memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention, + we support "local" and "dilated" decomposition strategy. + + The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw) + + Args: + dim (int): The dimention of input tensor. + num_heads (int): The number of head. + n_temporal (int, optional): The num of temporal. Defaults to 1. + cuboid_hw (tuple, optional): The height and width of cuboid. Defaults to (7, 7). + shift_hw (tuple, optional): The height and width of shift. Defaults to (0, 0). + strategy (tuple, optional): The strategy. Defaults to ("d", "l", "l"). + padding_type (str, optional): The type of padding. Defaults to "ignore". + cross_last_n_frames (int, optional): The cross_last_n_frames of decoder. Defaults to None. + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projrction dropout. Defaults to 0.0. + max_temporal_relative (int, optional): The max temporal. Defaults to 50. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to True. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + n_temporal: int = 1, + cuboid_hw: Tuple[int, ...] = (7, 7), + shift_hw: Tuple[int, ...] = (0, 0), + strategy: Tuple[str, ...] = ("d", "l", "l"), + padding_type: str = "ignore", + cross_last_n_frames: int = None, + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + max_temporal_relative: int = 50, + norm_layer: str = "layer_norm", + use_global_vector: bool = True, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: int = 1, + use_relative_pos: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(CuboidCrossAttentionLayer, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + self.dim = dim + self.num_heads = num_heads + self.n_temporal = n_temporal + assert n_temporal > 0 + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + shift_hw = list(shift_hw) + if strategy[1] == "d": + shift_hw[0] = 0 + if strategy[2] == "d": + shift_hw[1] = 0 + self.cuboid_hw = cuboid_hw + self.shift_hw = tuple(shift_hw) + self.strategy = strategy + self.padding_type = padding_type + self.max_temporal_relative = max_temporal_relative + self.cross_last_n_frames = cross_last_n_frames + self.use_relative_pos = use_relative_pos + self.use_global_vector = use_global_vector + self.separate_global_qkv = separate_global_qkv + if global_dim_ratio != 1 and separate_global_qkv is False: + raise ValueError( + "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + ) + self.global_dim_ratio = global_dim_ratio + if self.padding_type not in ["ignore", "zeros", "nearest"]: + raise ValueError('padding_type should be ["ignore", "zeros", "nearest"]') + if use_relative_pos: + init_data = paddle.zeros( + ( + (2 * max_temporal_relative - 1) + * (2 * cuboid_hw[0] - 1) + * (2 * cuboid_hw[1] - 1), + num_heads, + ) + ) + self.relative_position_bias_table = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.relative_position_bias_table.stop_gradient = not True + self.relative_position_bias_table = initializer.trunc_normal_( + self.relative_position_bias_table, std=0.02 + ) + + coords_t = paddle.arange(end=max_temporal_relative) + coords_h = paddle.arange(end=self.cuboid_hw[0]) + coords_w = paddle.arange(end=self.cuboid_hw[1]) + coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) + coords_flatten = paddle.flatten(x=coords, start_axis=1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.transpose(perm=[1, 2, 0]) + relative_coords[:, :, 0] += max_temporal_relative - 1 + relative_coords[:, :, 1] += self.cuboid_hw[0] - 1 + relative_coords[:, :, 2] += self.cuboid_hw[1] - 1 + relative_position_index = ( + relative_coords[:, :, 0] + * (2 * self.cuboid_hw[0] - 1) + * (2 * self.cuboid_hw[1] - 1) + + relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) + + relative_coords[:, :, 2] + ) + self.register_buffer( + name="relative_position_index", tensor=relative_position_index + ) + self.q_proj = nn.Linear(in_features=dim, out_features=dim, bias_attr=qkv_bias) + self.kv_proj = nn.Linear( + in_features=dim, out_features=dim * 2, bias_attr=qkv_bias + ) + self.attn_drop = nn.Dropout(p=attn_drop) + self.proj = nn.Linear(in_features=dim, out_features=dim) + self.proj_drop = nn.Dropout(p=proj_drop) + if self.use_global_vector: + if self.separate_global_qkv: + self.l2g_q_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.l2g_global_kv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim * 2, + bias_attr=qkv_bias, + ) + self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) + self._checkpoint_level = checkpoint_level + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization( + self.q_proj, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.kv_proj, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.proj, linear_mode=self.ffn_linear_init_mode + ) + cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) + if self.use_global_vector: + if self.separate_global_qkv: + cuboid_utils.apply_initialization( + self.l2g_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode + ) + + def forward(self, x, mem, mem_global_vectors=None): + """Calculate the forward + + Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the + relative position encoding can be calculated correctly. For example: + + mem: 0, 1, 2, 3, 4 + x: 0, 1, 2, 3, 4, 5 + + n_temporal = 1 + mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5 + + n_temporal = 2 + mem: pad, 1, 3 x: 0, 2, 4 + mem: 0, 2, 4 x: 1, 3, 5 + + n_temporal = 3 + mem: pad, 2 dec: 0, 3 + mem: 0, 3 dec: 1, 4 + mem: 1, 4 dec: 2, 5 + + Args: + x (paddle.Tensor): The input of the layer. It will have shape (B, T, H, W, C) + mem (paddle.Tensor): The memory. It will have shape (B, T_mem, H, W, C) + mem_global_vectors (paddle.Tensor): The global vectors from the memory. It will have shape (B, N, C) + + Returns: + out (paddle.Tensor): Output tensor should have shape (B, T, H, W, C_out) + """ + + if self.cross_last_n_frames is not None: + cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1])) + mem = mem[:, -cross_last_n_frames:, ...] + if self.use_global_vector: + _, num_global, _ = mem_global_vectors.shape + x = self.norm(x) + B, T_x, H, W, C_in = x.shape + B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape + assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative + cuboid_hw = self.cuboid_hw + n_temporal = self.n_temporal + shift_hw = self.shift_hw + assert ( + B_mem == B and H == H_mem and W == W_mem and C_in == C_mem + ), f"Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}" + pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal + pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal + pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0] + pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1] + mem = cuboid_utils.generalize_padding( + mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True + ) + + x = cuboid_utils.generalize_padding( + x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False + ) + + if any(i > 0 for i in shift_hw): + shifted_x = paddle.roll( + x=x, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + shifted_mem = paddle.roll( + x=mem, shifts=(-shift_hw[0], -shift_hw[1]), axis=(2, 3) + ) + else: + shifted_x = x + shifted_mem = mem + mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw + x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw + reordered_mem = cuboid_encoder.cuboid_reorder( + shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy + ) + reordered_x = cuboid_encoder.cuboid_reorder( + shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy + ) + _, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape + _, num_cuboids, x_cuboid_volume, _ = reordered_x.shape + assert ( + num_cuboids_mem == num_cuboids + ), f"Number of cuboids do not match. num_cuboids={num_cuboids}, num_cuboids_mem={num_cuboids_mem}" + attn_mask = compute_cuboid_cross_attention_mask( + T_x, + T_mem, + H, + W, + n_temporal, + cuboid_hw, + shift_hw, + strategy=self.strategy, + padding_type=self.padding_type, + device=x.place, + ) + head_C = C_in // self.num_heads + kv = ( + self.kv_proj(reordered_mem) + .reshape([B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + k, v = kv[0], kv[1] + q = ( + self.q_proj(reordered_x) + .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + q = q * self.scale + perm_4 = list(range(k.ndim)) + perm_4[-2] = -1 + perm_4[-1] = -2 + attn_score = q @ k.transpose(perm=perm_4) + if self.use_relative_pos: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index[ + :x_cuboid_volume, :mem_cuboid_volume + ].reshape([-1]) + ].reshape([x_cuboid_volume, mem_cuboid_volume, -1]) + relative_position_bias = relative_position_bias.transpose( + perm=[2, 0, 1] + ).unsqueeze(axis=1) + attn_score = attn_score + relative_position_bias + if self.use_global_vector: + if self.separate_global_qkv: + l2g_q = ( + self.l2g_q_net(reordered_x) + .reshape([B, num_cuboids, x_cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + l2g_q = l2g_q * self.scale + l2g_global_kv = ( + self.l2g_global_kv_net(mem_global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] + else: + kv_global = ( + self.kv_proj(mem_global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] + l2g_q = q + perm_5 = list(range(l2g_global_k.ndim)) + perm_5[-2] = -1 + perm_5[-1] = -2 + l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_5) + attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) + if attn_mask.ndim == 5: + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" + ) + else: + attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) + v_l_g = paddle.concat( + x=( + v, + l2g_global_v.expand( + shape=[B, self.num_heads, num_cuboids, num_global, head_C] + ), + ), + axis=3, + ) + attn_score_l2l_l2g = cuboid_encoder.masked_softmax( + attn_score_l2l_l2g, mask=attn_mask_l2l_l2g + ) + attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) + reordered_x = ( + (attn_score_l2l_l2g @ v_l_g) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape(B, num_cuboids, x_cuboid_volume, self.dim) + ) + else: + attn_score = cuboid_encoder.masked_softmax(attn_score, mask=attn_mask) + attn_score = self.attn_drop(attn_score) + reordered_x = ( + (attn_score @ v) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, x_cuboid_volume, self.dim]) + ) + reordered_x = paddle.cast(reordered_x, dtype="float32") + reordered_x = self.proj_drop(self.proj(reordered_x)) + shifted_x = cuboid_encoder.cuboid_reorder_reverse( + reordered_x, + cuboid_size=x_cuboid_size, + strategy=self.strategy, + orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]), + ) + if any(i > 0 for i in shift_hw): + x = paddle.roll(x=shifted_x, shifts=(shift_hw[0], shift_hw[1]), axis=(2, 3)) + else: + x = shifted_x + x = cuboid_utils.generalize_unpadding( + x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type + ) + return x + + +class StackCuboidCrossAttentionBlock(nn.Layer): + """A stack of cuboid cross attention layers. + + The advantage of cuboid attention is that we can combine cuboid attention building blocks with different + hyper-parameters to mimic a broad range of space-time correlation patterns. + + - "use_inter_ffn" is True + x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out + | ^ | ^ + | | | | + |-------------|----|-------------| + - "use_inter_ffn" is False + x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem + | ^ | ^ ^ | ^ + | | | | | | | + |-------------|----|------------|-- ----------|--|-----------| + + Args: + dim (int): The dimension of the input. + num_heads (int): The number of head. + block_cuboid_hw (list, optional): The height and width of block cuboid.Defaults to [(4, 4), (4, 4)]. + block_shift_hw (list, optional): The height and width of shift cuboid . Defaults to [(0, 0), (2, 2)]. + block_n_temporal (list, optional): The length of block temporal. Defaults to [1, 2]. + block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. + padding_type (str, optional): The type of paddling. Defaults to "ignore". + cross_last_n_frames (int, optional): The num of cross_last_n_frames. Defaults to None. + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + activation (str, optional): The activation. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. + max_temporal_relative (int, optional): The max temporal. Defaults to 50. + checkpoint_level (int, optional): Whether to enable gradient checkpointing. Defaults to 1. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + block_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + block_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (2, 2)], + block_n_temporal: Tuple[int, ...] = [1, 2], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("d", "d", "d"), + ("l", "l", "l"), + ], + padding_type: str = "ignore", + cross_last_n_frames: int = None, + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = True, + max_temporal_relative: int = 50, + checkpoint_level: int = 1, + use_relative_pos: bool = True, + use_global_vector: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + expert_shape: tuple = None, + ): + super(StackCuboidCrossAttentionBlock, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + if ( + len(block_cuboid_hw[0]) <= 0 + or len(block_shift_hw) <= 0 + or len(block_strategy) <= 0 + ): + raise ValueError( + "Incorrect format.The lengths of block_cuboid_hw[0], block_shift_hw, and block_strategy must be greater than zero." + ) + if len(block_cuboid_hw) != len(block_shift_hw) and len(block_shift_hw) == len( + block_strategy + ): + raise ValueError( + "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." + ) + + self.num_attn = len(block_cuboid_hw) + self.checkpoint_level = checkpoint_level + self.use_inter_ffn = use_inter_ffn + self.use_global_vector = use_global_vector + if self.use_inter_ffn: + if moe_config["use_ffn_moe"]: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.MixtureFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + else: + if moe_config["use_ffn_moe"]: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.MixtureFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + cuboid_encoder.PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + + if moe_config["use_attn_moe"]: + self.attn_l = nn.LayerList( + sublayers=[ + MixtureCrossAttention( + dim=dim, + num_heads=num_heads, + cuboid_hw=ele_cuboid_hw, + shift_hw=ele_shift_hw, + strategy=ele_strategy, + n_temporal=ele_n_temporal, + cross_last_n_frames=cross_last_n_frames, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + max_temporal_relative=max_temporal_relative, + use_global_vector=use_global_vector, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( + block_cuboid_hw, + block_shift_hw, + block_strategy, + block_n_temporal, + ) + ] + ) + else: + self.attn_l = nn.LayerList( + sublayers=[ + CuboidCrossAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_hw=ele_cuboid_hw, + shift_hw=ele_shift_hw, + strategy=ele_strategy, + n_temporal=ele_n_temporal, + cross_last_n_frames=cross_last_n_frames, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + max_temporal_relative=max_temporal_relative, + use_global_vector=use_global_vector, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal in zip( + block_cuboid_hw, + block_shift_hw, + block_strategy, + block_n_temporal, + ) + ] + ) + + def reset_parameters(self): + for m in self.ffn_l: + m.reset_parameters() + for m in self.attn_l: + m.reset_parameters() + + def forward(self, x, mem, mem_global_vector=None): + """ + Args: + x (paddle.Tensor): Shape (B, T_x, H, W, C) + mem (paddle.Tensor): Shape (B, T_mem, H, W, C) + mem_global_vector (paddle.Tensor): Shape (B, N_global, C) + + Returns: + out (paddle.Tensor): (B, T_x, H, W, C_out) + """ + + if self.use_inter_ffn: + for attn, ffn in zip(self.attn_l, self.ffn_l): + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) + else: + x = x + attn(x, mem, mem_global_vector) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + else: + x = ffn(x) + return x + else: + for attn in self.attn_l: + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x, mem, mem_global_vector) + else: + x = x + attn(x, mem, mem_global_vector) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + else: + x = self.ffn_l[0](x) + return x + + +class Upsample3DLayer(nn.Layer): + """Upsampling based on nn.UpSampling and Conv3x3. + + If the temporal dimension remains the same: + x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim) + Else: + x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim) + + Args: + dim (int): The dimension of the input tensor. + out_dim (int): The dimension of the output tensor. + target_size (Tuple[int,...]): The size of output tensor. + temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False. + kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3. + layout (str, optional): The layout of the inputs. Defaults to "THWC". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + out_dim: int, + target_size: Tuple[int, ...], + temporal_upsample: bool = False, + kernel_size: int = 3, + layout: str = "THWC", + conv_init_mode: str = "0", + moe_config: dict = None, + ): + super(Upsample3DLayer, self).__init__() + self.conv_init_mode = conv_init_mode + self.target_size = target_size + self.out_dim = out_dim + self.temporal_upsample = temporal_upsample + if temporal_upsample: + self.up = nn.Upsample(size=target_size, mode="nearest") + else: + self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode="nearest") + self.conv = nn.Conv2D( + in_channels=dim, + out_channels=out_dim, + kernel_size=(kernel_size, kernel_size), + padding=(kernel_size // 2, kernel_size // 2), + ) + assert layout in ["THWC", "CTHW"] + self.layout = layout + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization(m, conv_mode=self.conv_init_mode) + + def forward(self, x): + """ + + Args: + x : (B, T, H, W, C) or (B, C, T, H, W) + + Returns: + out : (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out) + """ + + if self.layout == "THWC": + B, T, H, W, C = x.shape + if self.temporal_upsample: + x = x.transpose(perm=[0, 4, 1, 2, 3]) + return self.conv(self.up(x)).transpose(perm=[0, 2, 3, 4, 1]) + else: + assert self.target_size[0] == T + x = x.reshape([B * T, H, W, C]).transpose(perm=[0, 3, 1, 2]) + x = self.up(x) + return ( + self.conv(x) + .transpose(perm=[0, 2, 3, 1]) + .reshape(list((B,) + self.target_size + (self.out_dim,))) + ) + elif self.layout == "CTHW": + B, C, T, H, W = x.shape + if self.temporal_upsample: + return self.conv(self.up(x)) + else: + assert self.output_size[0] == T + x = x.transpose(perm=[0, 2, 1, 3, 4]) + x = x.reshape([B * T, C, H, W]) + return ( + self.conv(self.up(x)) + .reshape( + [ + B, + self.target_size[0], + self.out_dim, + self.target_size[1], + self.target_size[2], + ] + ) + .transpose(perm=[0, 2, 1, 3, 4]) + ) + + +class CuboidTransformerDecoder(nn.Layer): + """Decoder of the CuboidTransformer. + + For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention + + Repeat the following structure K times + + x --> StackCuboidSelfAttention --> | + |----> StackCuboidCrossAttention (If used) --> out + mem --> | + + Args: + target_temporal_length (int): The temporal length of the target. + mem_shapes (Tuple[int,...]): The mem shapes of the decoder. + cross_start (int, optional): The block to start cross attention. Defaults to 0. + depth (list, optional): The number of layers for each block. Defaults to [2, 2]. + upsample_type (str, optional): The type of upsample. Defaults to "upsample". + upsample_kernel_size (int, optional): The kernel size of upsample. Defaults to 3. + block_self_attn_patterns (str, optional): The patterns of block attention. Defaults to None. + block_self_cuboid_size (list, optional): The size of cuboid block. Defaults to [(4, 4, 4), (4, 4, 4)]. + block_self_cuboid_strategy (list, optional): The strategy of cuboid. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + block_self_shift_size (list, optional): The size of shift. Defaults to [(1, 1, 1), (0, 0, 0)]. + block_cross_attn_patterns (str, optional): The patterns of cross attentions. Defaults to None. + block_cross_cuboid_hw (list, optional): The height and width of cross cuboid. Defaults to [(4, 4), (4, 4)]. + block_cross_cuboid_strategy (list, optional): The strategy of cross cuboid. Defaults to [("l", "l", "l"), ("d", "l", "l")]. + block_cross_shift_hw (list, optional): The height and width of cross shift. Defaults to [(0, 0), (0, 0)]. + block_cross_n_temporal (list, optional): The cross temporal of block. Defaults to [1, 2]. + cross_last_n_frames (int, optional): The num of cross last frames. Defaults to None. + num_heads (int, optional): The num of head. Defaults to 4. + attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. + proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + ffn_activation (str, optional): The activation layer of FFN. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to False. + hierarchical_pos_embed (bool, optional): Whether to use hierarchical pos_embed. Defaults to False. + pos_embed_type (str, optional): The type of pos embeding. Defaults to "t+hw". + max_temporal_relative (int, optional): The max number of teemporal relative. Defaults to 50. + padding_type (str, optional): The type of padding. Defaults to "ignore". + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. + use_first_self_attn (bool, optional): Whether to use first self attention. Defaults to False. + use_self_global (bool, optional): Whether to use self global vector. Defaults to False. + self_update_global (bool, optional): Whether to update global vector. Defaults to True. + use_cross_global (bool, optional): Whether to use cross global vector. Defaults to False. + use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to True. + use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + up_linear_init_mode (str, optional): The mode of up linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + target_temporal_length: int, + mem_shapes: Tuple[int, ...], + cross_start: int = 0, + depth: Tuple[int, ...] = [2, 2], + upsample_type: str = "upsample", + upsample_kernel_size: int = 3, + block_self_attn_patterns: str = None, + block_self_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_self_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + block_self_shift_size: Tuple[Tuple[int, ...], ...] = [(1, 1, 1), (0, 0, 0)], + block_cross_attn_patterns: str = None, + block_cross_cuboid_hw: Tuple[Tuple[int, ...], ...] = [(4, 4), (4, 4)], + block_cross_cuboid_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "l", "l"), + ], + block_cross_shift_hw: Tuple[Tuple[int, ...], ...] = [(0, 0), (0, 0)], + block_cross_n_temporal: Tuple[int, ...] = [1, 2], + cross_last_n_frames: int = None, + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = False, + hierarchical_pos_embed: bool = False, + pos_embed_type: str = "t+hw", + max_temporal_relative: int = 50, + padding_type: str = "ignore", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + use_first_self_attn: bool = False, + use_self_global: bool = False, + self_update_global: bool = True, + use_cross_global: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + up_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(CuboidTransformerDecoder, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.up_linear_init_mode = up_linear_init_mode + self.norm_init_mode = norm_init_mode + assert len(depth) == len(mem_shapes) + self.target_temporal_length = target_temporal_length + self.num_blocks = len(mem_shapes) + self.cross_start = cross_start + self.mem_shapes = mem_shapes + self.depth = depth + self.upsample_type = upsample_type + self.hierarchical_pos_embed = hierarchical_pos_embed + self.checkpoint_level = checkpoint_level + self.use_self_global = use_self_global + self.self_update_global = self_update_global + self.use_cross_global = use_cross_global + self.use_global_vector_ffn = use_global_vector_ffn + self.use_first_self_attn = use_first_self_attn + if block_self_attn_patterns is not None: + if isinstance(block_self_attn_patterns, (tuple, list)): + assert len(block_self_attn_patterns) == self.num_blocks + else: + block_self_attn_patterns = [ + block_self_attn_patterns for _ in range(self.num_blocks) + ] + block_self_cuboid_size = [] + block_self_cuboid_strategy = [] + block_self_shift_size = [] + for idx, key in enumerate(block_self_attn_patterns): + func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) + cuboid_size, strategy, shift_size = func(mem_shapes[idx]) + block_self_cuboid_size.append(cuboid_size) + block_self_cuboid_strategy.append(strategy) + block_self_shift_size.append(shift_size) + else: + if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): + block_self_cuboid_size = [ + block_self_cuboid_size for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_cuboid_size) == self.num_blocks + ), f"Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}" + if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): + block_self_cuboid_strategy = [ + block_self_cuboid_strategy for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_cuboid_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}" + if not isinstance(block_self_shift_size[0][0], (list, tuple)): + block_self_shift_size = [ + block_self_shift_size for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_self_shift_size) == self.num_blocks + ), f"Incorrect input format! Received block_self_shift_size={block_self_shift_size}" + + expert_shape_list = [ + (target_temporal_length,) + mem_shape[1:] for mem_shape in mem_shapes + ] + self_blocks = [] + for i in range(self.num_blocks): + if not self.use_first_self_attn and i == self.num_blocks - 1: + ele_depth = depth[i] - 1 + else: + ele_depth = depth[i] + stack_cuboid_blocks = [ + cuboid_encoder.StackCuboidSelfAttentionBlock( + dim=self.mem_shapes[i][-1], + num_heads=num_heads, + block_cuboid_size=block_self_cuboid_size[i], + block_strategy=block_self_cuboid_strategy[i], + block_shift_size=block_self_shift_size[i], + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + padding_type=padding_type, + use_global_vector=use_self_global, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape_list[i], + moe_config=moe_config, + ) + for _ in range(ele_depth) + ] + self_blocks.append(nn.LayerList(sublayers=stack_cuboid_blocks)) + self.self_blocks = nn.LayerList(sublayers=self_blocks) + + if block_cross_attn_patterns is not None: + if isinstance(block_cross_attn_patterns, (tuple, list)): + assert len(block_cross_attn_patterns) == self.num_blocks + else: + block_cross_attn_patterns = [ + block_cross_attn_patterns for _ in range(self.num_blocks) + ] + block_cross_cuboid_hw = [] + block_cross_cuboid_strategy = [] + block_cross_shift_hw = [] + block_cross_n_temporal = [] + for idx, key in enumerate(block_cross_attn_patterns): + if key == "last_frame_dst": + cuboid_hw = None + shift_hw = None + strategy = None + n_temporal = None + else: + func = cuboid_utils.CuboidCrossAttentionPatterns.get(key) + cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) + block_cross_cuboid_hw.append(cuboid_hw) + block_cross_cuboid_strategy.append(strategy) + block_cross_shift_hw.append(shift_hw) + block_cross_n_temporal.append(n_temporal) + else: + if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): + block_cross_cuboid_hw = [ + block_cross_cuboid_hw for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_cuboid_hw) == self.num_blocks + ), f"Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}" + if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): + block_cross_cuboid_strategy = [ + block_cross_cuboid_strategy for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_cuboid_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}" + if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): + block_cross_shift_hw = [ + block_cross_shift_hw for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_shift_hw) == self.num_blocks + ), f"Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}" + if not isinstance(block_cross_n_temporal[0], (list, tuple)): + block_cross_n_temporal = [ + block_cross_n_temporal for _ in range(self.num_blocks) + ] + else: + assert ( + len(block_cross_n_temporal) == self.num_blocks + ), f"Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}" + self.cross_blocks = nn.LayerList() + assert self.cross_start == 0 + for i in range(self.cross_start, self.num_blocks): + cross_block = nn.LayerList( + sublayers=[ + StackCuboidCrossAttentionBlock( + dim=self.mem_shapes[i][-1], + num_heads=num_heads, + block_cuboid_hw=block_cross_cuboid_hw[i], + block_strategy=block_cross_cuboid_strategy[i], + block_shift_hw=block_cross_shift_hw[i], + block_n_temporal=block_cross_n_temporal[i], + cross_last_n_frames=cross_last_n_frames, + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + activation=ffn_activation, + max_temporal_relative=max_temporal_relative, + padding_type=padding_type, + use_global_vector=use_cross_global, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape_list[i], + moe_config=moe_config, + ) + for _ in range(depth[i]) + ] + ) + self.cross_blocks.append(cross_block) + if self.num_blocks > 1: + if self.upsample_type == "upsample": + self.upsample_layers = nn.LayerList( + sublayers=[ + Upsample3DLayer( + dim=self.mem_shapes[i + 1][-1], + out_dim=self.mem_shapes[i][-1], + target_size=(target_temporal_length,) + + self.mem_shapes[i][1:3], + kernel_size=upsample_kernel_size, + temporal_upsample=False, + conv_init_mode=conv_init_mode, + ) + for i in range(self.num_blocks - 1) + ] + ) + else: + raise NotImplementedError(f"{self.upsample_type} is invalid.") + if self.hierarchical_pos_embed: + self.hierarchical_pos_embed_l = nn.LayerList( + sublayers=[ + PosEmbed( + embed_dim=self.mem_shapes[i][-1], + typ=pos_embed_type, + maxT=target_temporal_length, + maxH=self.mem_shapes[i][1], + maxW=self.mem_shapes[i][2], + ) + for i in range(self.num_blocks - 1) + ] + ) + self.reset_parameters() + + def reset_parameters(self): + for ms in self.self_blocks: + for m in ms: + m.reset_parameters() + for ms in self.cross_blocks: + for m in ms: + m.reset_parameters() + if self.num_blocks > 1: + for m in self.upsample_layers: + m.reset_parameters() + if self.hierarchical_pos_embed: + for m in self.hierarchical_pos_embed_l: + m.reset_parameters() + + def forward(self, x, mem_l, mem_global_vector_l=None): + """ + Args: + x : Shape (B, T_top, H_top, W_top, C). + mem_l : A list of memory tensors. + """ + + B, T_top, H_top, W_top, C = x.shape + assert T_top == self.target_temporal_length + assert (H_top, W_top) == (self.mem_shapes[-1][1], self.mem_shapes[-1][2]) + for i in range(self.num_blocks - 1, -1, -1): + mem_global_vector = ( + None if mem_global_vector_l is None else mem_global_vector_l[i] + ) + if not self.use_first_self_attn and i == self.num_blocks - 1: + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][0]( + x, mem_l[i], mem_global_vector + ) + for idx in range(self.depth[i] - 1): + if self.use_self_global: + if self.self_update_global: + x, mem_global_vector = self.self_blocks[i][idx]( + x, mem_global_vector + ) + else: + x, _ = self.self_blocks[i][idx](x, mem_global_vector) + else: + x = self.self_blocks[i][idx](x) + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][idx + 1]( + x, mem_l[i], mem_global_vector + ) + else: + for idx in range(self.depth[i]): + if self.use_self_global: + if self.self_update_global: + x, mem_global_vector = self.self_blocks[i][idx]( + x, mem_global_vector + ) + else: + x, _ = self.self_blocks[i][idx](x, mem_global_vector) + else: + x = self.self_blocks[i][idx](x) + if i >= self.cross_start: + x = self.cross_blocks[i - self.cross_start][idx]( + x, mem_l[i], mem_global_vector + ) + if i > 0: + x = self.upsample_layers[i - 1](x) + if self.hierarchical_pos_embed: + x = self.hierarchical_pos_embed_l[i - 1](x) + return x + + +class MixtureCrossAttention(nn.Layer): + def __init__( + self, + dim, + num_heads, + cuboid_hw, + shift_hw, + strategy, + n_temporal, + cross_last_n_frames, + padding_type, + qkv_bias, + qk_scale, + attn_drop, + proj_drop, + norm_layer, + max_temporal_relative, + use_global_vector, + separate_global_qkv, + global_dim_ratio, + checkpoint_level, + use_relative_pos, + attn_linear_init_mode, + ffn_linear_init_mode, + norm_init_mode, + expert_shape, + moe_config, + ): + super().__init__() + + self.in_dim = dim + self.out_dim = dim + self.expert_shape = expert_shape # T, H, W, C + self.num_experts = moe_config["num_experts"] + self.out_planes = moe_config["out_planes"] + self.moe_config = moe_config + assert expert_shape is not None and moe_config["use_attn_moe"] + assert not use_global_vector + + if moe_config["gate_style"] == "linear": + self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "spatial-latent": + self.gate = moe_utils.SpatialLatentGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "cuboid-latent": + self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "spatial-latent-linear": + self.gate = moe_utils.SpatialLatentLinearGatingNet( + moe_config, expert_shape, dim + ) + elif moe_config["gate_style"] == "cuboid-latent-linear": + self.gate = moe_utils.CuboidLatentLinearGatingNet( + moe_config, expert_shape, dim + ) + else: + raise NotImplementedError + + self.experts = nn.LayerList( + [ + CuboidCrossAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_hw=cuboid_hw, + shift_hw=shift_hw, + strategy=strategy, + n_temporal=n_temporal, + cross_last_n_frames=cross_last_n_frames, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + max_temporal_relative=max_temporal_relative, + use_global_vector=use_global_vector, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(self.num_experts) + ] + ) + + def forward(self, x, mem, mem_global_vectors=None): + + B, T_x, H, W, C = x.shape + _, T_m, _, _, _ = mem.shape + E = self.num_experts + assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] + ( + dense_routing_weights, + sparse_routing_weights, + sparse_routing_inds, + self.aux_loss, + ) = self.gate( + x + ) # dense: B, T_x, H, W, E + + dispatcher = moe_utils.DenseDispatcher( + E, + sparse_routing_weights.reshape([B * T_x * H * W, -1]), + sparse_routing_inds.reshape([B * T_x * H * W, -1]), + ) + expert_outputs = paddle.stack( + [self.experts[i](x, mem, mem_global_vectors) for i in range(E)], axis=-2 + ).reshape([B * T_x * H * W, E, C]) + y = dispatcher.combine(expert_outputs).reshape([B, T_x, H, W, C]) + + return y + + def reset_parameters(self): + + for i in range(len(self.experts)): + self.experts[i].reset_parameters() diff --git a/ppsci/arch/extformer_moe_cuboid_encoder.py b/ppsci/arch/extformer_moe_cuboid_encoder.py index c26b3837a5..a538240842 100644 --- a/ppsci/arch/extformer_moe_cuboid_encoder.py +++ b/ppsci/arch/extformer_moe_cuboid_encoder.py @@ -1,1992 +1,1992 @@ -from collections import OrderedDict -from functools import lru_cache -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn.functional as F -from paddle import nn -from paddle.distributed import fleet - -import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils -import ppsci.arch.extformer_moe_utils as moe_utils -from ppsci.arch import activation as act_mod -from ppsci.utils import initializer - -NEGATIVE_SLOPE = 0.1 - - -class PatchMerging3D(nn.Layer): - """Patch Merging Layer - - Args: - dim (int): Number of input channels. - out_dim (int, optional): The dim of output. Defaults to None. - downsample (tuple, optional): Downsample factor. Defaults to (1, 2, 2). - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - padding_type (str, optional): The type of padding. Defaults to "nearest". - linear_init_mode (str, optional): The mode of linear init. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". - """ - - def __init__( - self, - dim: int, - out_dim: int = None, - downsample: Tuple[int, ...] = (1, 2, 2), - norm_layer: str = "layer_norm", - padding_type: str = "nearest", - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super().__init__() - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self.dim = dim - if out_dim is None: - out_dim = max(downsample) * dim - self.out_dim = out_dim - self.downsample = downsample - self.padding_type = padding_type - self.reduction = nn.Linear( - in_features=downsample[0] * downsample[1] * downsample[2] * dim, - out_features=out_dim, - bias_attr=False, - ) - self.norm = cuboid_utils.get_norm_layer( - norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim - ) - self.reset_parameters() - - def reset_parameters(self): - for m in self.children(): - cuboid_utils.apply_initialization( - m, linear_mode=self.linear_init_mode, norm_mode=self.norm_init_mode - ) - - def get_out_shape(self, data_shape): - T, H, W, C_in = data_shape - pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] - pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] - pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] - return ( - (T + pad_t) // self.downsample[0], - (H + pad_h) // self.downsample[1], - (W + pad_w) // self.downsample[2], - self.out_dim, - ) - - def forward(self, x): - """ - - Args: - x : (B, T, H, W, C) - - Returns: - out : Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim) - """ - - B, T, H, W, C = x.shape - pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] - pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] - pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] - if pad_h or pad_h or pad_w: - T += pad_t - H += pad_h - W += pad_w - x = cuboid_utils.generalize_padding( - x, pad_t, pad_h, pad_w, padding_type=self.padding_type - ) - x = ( - x.reshape( - ( - B, - T // self.downsample[0], - self.downsample[0], - H // self.downsample[1], - self.downsample[1], - W // self.downsample[2], - self.downsample[2], - C, - ) - ) - .transpose(perm=[0, 1, 3, 5, 2, 4, 6, 7]) - .reshape( - [ - B, - T // self.downsample[0], - H // self.downsample[1], - W // self.downsample[2], - self.downsample[0] * self.downsample[1] * self.downsample[2] * C, - ] - ) - ) - x = self.norm(x) - x = self.reduction(x) - return x - - -class PositionwiseFFN(nn.Layer): - """The Position-wise FFN layer used in Transformer-like architectures - - If pre_norm is True: - norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data) - Else: - data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data)) - Also, if we use gated projection. We will use - fc1_1 * act(fc1_2(data)) to map the data - - Args: - units (int, optional): The units. Defaults to 512. - hidden_size (int, optional): The size of hidden layer. Defaults to 2048. - activation_dropout (float, optional): The dropout of activate. Defaults to 0.0. - dropout (float, optional): The drop ratio used in DropPat. Defaults to 0.1. - gated_proj (bool, optional): Whether to use gate projection. Defaults to False. - activation (str, optional): The activate. Defaults to "relu". - normalization (str, optional): The normalization. Defaults to "layer_norm". - layer_norm_eps (float, optional): The epsilon of layer normalization. Defaults to 1e-05. - pre_norm (bool): Pre-layer normalization as proposed in the paper: - "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. - You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers". Defaults to False. - linear_init_mode (str, optional): The mode of linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - units: int = 512, - hidden_size: int = 2048, - activation_dropout: float = 0.0, - dropout: float = 0.1, - gated_proj: bool = False, - activation: str = "relu", - normalization: str = "layer_norm", - layer_norm_eps: float = 1e-05, - pre_norm: bool = False, - linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - expert_shape: tuple = None, - ): - super().__init__() - self.linear_init_mode = linear_init_mode - self.norm_init_mode = norm_init_mode - self._pre_norm = pre_norm - self._gated_proj = gated_proj - self._kwargs = OrderedDict( - [ - ("units", units), - ("hidden_size", hidden_size), - ("activation_dropout", activation_dropout), - ("activation", activation), - ("dropout", dropout), - ("normalization", normalization), - ("layer_norm_eps", layer_norm_eps), - ("gated_proj", gated_proj), - ("pre_norm", pre_norm), - ] - ) - self.dropout_layer = nn.Dropout(p=dropout) - self.activation_dropout_layer = nn.Dropout(p=activation_dropout) - - if moe_config["use_linear_moe"]: - self.ffn_1 = MixtureLinear( - in_dim=units, - out_dim=hidden_size, - bias_attr=True, - expert_shape=expert_shape[:-1] + (hidden_size,), - moe_config=moe_config, - ) - else: - self.ffn_1 = nn.Linear( - in_features=units, out_features=hidden_size, bias_attr=True - ) - if self._gated_proj: - self.ffn_1_gate = nn.Linear( - in_features=units, out_features=hidden_size, bias_attr=True - ) - if activation == "leaky_relu": - self.activation = nn.LeakyReLU(NEGATIVE_SLOPE) - else: - self.activation = act_mod.get_activation(activation) - - if moe_config["use_linear_moe"]: - self.ffn_2 = MixtureLinear( - in_dim=hidden_size, - out_dim=units, - bias_attr=True, - expert_shape=expert_shape, - moe_config=moe_config, - ) - else: - self.ffn_2 = nn.Linear( - in_features=hidden_size, out_features=units, bias_attr=True - ) - self.layer_norm = cuboid_utils.get_norm_layer( - normalization=normalization, in_channels=units, epsilon=layer_norm_eps - ) - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization(self.ffn_1, linear_mode=self.linear_init_mode) - if self._gated_proj: - cuboid_utils.apply_initialization( - self.ffn_1_gate, linear_mode=self.linear_init_mode - ) - cuboid_utils.apply_initialization(self.ffn_2, linear_mode=self.linear_init_mode) - cuboid_utils.apply_initialization( - self.layer_norm, norm_mode=self.norm_init_mode - ) - - def forward(self, data): - """ - Args: - x : Shape (B, seq_length, C_in) - - Returns: - out : Shape (B, seq_length, C_out) - """ - - residual = data - if self._pre_norm: - data = self.layer_norm(data) - if self._gated_proj: - out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data) - else: - out = self.activation(self.ffn_1(data)) - out = self.activation_dropout_layer(out) - out = self.ffn_2(out) - out = self.dropout_layer(out) - out = out + residual - if not self._pre_norm: - out = self.layer_norm(out) - return out - - -def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy): - """Update the cuboid_size and shift_size - - Args: - data_shape (Tuple[int,...]): The shape of the data. - cuboid_size (Tuple[int,...]): Size of the cuboid. - shift_size (Tuple[int,...]): Size of the shift. - strategy (str): The strategy of attention. - - Returns: - new_cuboid_size (Tuple[int,...]): Size of the cuboid. - new_shift_size (Tuple[int,...]): Size of the shift. - """ - - new_cuboid_size = list(cuboid_size) - new_shift_size = list(shift_size) - for i in range(len(data_shape)): - if strategy[i] == "d": - new_shift_size[i] = 0 - if data_shape[i] <= cuboid_size[i]: - new_cuboid_size[i] = data_shape[i] - new_shift_size[i] = 0 - return tuple(new_cuboid_size), tuple(new_shift_size) - - -def cuboid_reorder(data, cuboid_size, strategy): - """Reorder the tensor into (B, num_cuboids, bT * bH * bW, C) - We assume that the tensor shapes are divisible to the cuboid sizes. - - Args: - data (paddle.Tensor): The input data. - cuboid_size (Tuple[int,...]): The size of the cuboid. - strategy (Tuple[int,...]): The cuboid strategy. - - Returns: - reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C). - num_cuboids = T / bT * H / bH * W / bW - """ - - B, T, H, W, C = data.shape - num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2] - cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2] - intermediate_shape = [] - nblock_axis = [] - block_axis = [] - for i, (block_size, total_size, ele_strategy) in enumerate( - zip(cuboid_size, (T, H, W), strategy) - ): - if ele_strategy == "l": - intermediate_shape.extend([total_size // block_size, block_size]) - nblock_axis.append(2 * i + 1) - block_axis.append(2 * i + 2) - elif ele_strategy == "d": - intermediate_shape.extend([block_size, total_size // block_size]) - nblock_axis.append(2 * i + 2) - block_axis.append(2 * i + 1) - else: - raise NotImplementedError(f"{ele_strategy} is invalid.") - data = data.reshape(list((B,) + tuple(intermediate_shape) + (C,))) - reordered_data = data.transpose( - perm=(0,) + tuple(nblock_axis) + tuple(block_axis) + (7,) - ) - reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C)) - return reordered_data - - -@lru_cache() -def compute_cuboid_self_attention_mask( - data_shape, cuboid_size, shift_size, strategy, padding_type, device -): - """Compute the shift window attention mask - - Args: - data_shape (Tuple[int,....]): Should be (T, H, W). - cuboid_size (Tuple[int,....]): Size of the cuboid. - shift_size (Tuple[int,....]): The shift size. - strategy (str): The decomposition strategy. - padding_type (str): Type of the padding. - device (str): The device. - - Returns: - attn_mask (paddle.Tensor): Mask with shape (num_cuboid, cuboid_vol, cuboid_vol). - The padded values will always be masked. The other masks will ensure that the shifted windows - will only attend to those in the shifted windows. - """ - T, H, W = data_shape - pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] - pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] - pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] - data_mask = None - if pad_t > 0 or pad_h > 0 or pad_w > 0: - if padding_type == "ignore": - data_mask = paddle.ones(shape=(1, T, H, W, 1), dtype="bool") - data_mask = F.pad( - data_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], data_format="NDHWC" - ) - else: - data_mask = paddle.ones( - shape=(1, T + pad_t, H + pad_h, W + pad_w, 1), dtype="bool" - ) - if any(i > 0 for i in shift_size): - if padding_type == "ignore": - data_mask = paddle.roll( - x=data_mask, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - if padding_type == "ignore": - data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy) - data_mask = data_mask.squeeze(axis=-1).squeeze(axis=0) - shift_mask = np.zeros(shape=(1, T + pad_t, H + pad_h, W + pad_w, 1)) - cnt = 0 - for t in ( - slice(-cuboid_size[0]), - slice(-cuboid_size[0], -shift_size[0]), - slice(-shift_size[0], None), - ): - for h in ( - slice(-cuboid_size[1]), - slice(-cuboid_size[1], -shift_size[1]), - slice(-shift_size[1], None), - ): - for w in ( - slice(-cuboid_size[2]), - slice(-cuboid_size[2], -shift_size[2]), - slice(-shift_size[2], None), - ): - shift_mask[:, t, h, w, :] = cnt - cnt += 1 - shift_mask = paddle.to_tensor(shift_mask) - shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy) - shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) - attn_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 - if padding_type == "ignore": - attn_mask = ( - data_mask.unsqueeze(axis=1) * data_mask.unsqueeze(axis=2) * attn_mask - ) - return attn_mask - - -def masked_softmax(att_score, mask, axis: int = -1): - """Ignore the masked elements when calculating the softmax. - The mask can be broadcastable. - - Args: - att_score (paddle.Tensor): Shape (..., length, ...) - mask (paddle.Tensor): Shape (..., length, ...) - 1 --> The element is not masked - 0 --> The element is masked - axis (int): The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] - - Returns: - att_weights (paddle.Tensor): Shape (..., length, ...). - """ - - if mask is not None: - if att_score.dtype == paddle.float16: - att_score = att_score.masked_fill(paddle.logical_not(mask), -1e4) - else: - att_score = att_score.masked_fill(paddle.logical_not(mask), -1e18) - att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask.astype( - att_score.dtype - ) - else: - att_weights = nn.functional.softmax(x=att_score, axis=axis) - return att_weights - - -def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape): - """Reverse the reordered cuboid back to the original space - - Args: - data (paddle.Tensor): The input data. - cuboid_size (Tuple[int,...]): The size of cuboid. - strategy (str): The strategy of reordering. - orig_data_shape (Tuple[int,...]): The original shape of the data. - - Returns: - data (paddle.Tensor): The recovered data - """ - - B, num_cuboids, cuboid_volume, C = data.shape - T, H, W = orig_data_shape - permutation_axis = [0] - for i, (block_size, total_size, ele_strategy) in enumerate( - zip(cuboid_size, (T, H, W), strategy) - ): - if ele_strategy == "l": - permutation_axis.append(i + 1) - permutation_axis.append(i + 4) - elif ele_strategy == "d": - permutation_axis.append(i + 4) - permutation_axis.append(i + 1) - else: - raise NotImplementedError((f"{ele_strategy} is invalid.")) - permutation_axis.append(7) - data = data.reshape( - [ - B, - T // cuboid_size[0], - H // cuboid_size[1], - W // cuboid_size[2], - cuboid_size[0], - cuboid_size[1], - cuboid_size[2], - C, - ] - ) - data = data.transpose(perm=permutation_axis) - data = data.reshape((B, T, H, W, C)) - return data - - -class CuboidSelfAttentionLayer(nn.Layer): - """Implements the cuboid self attention. - - The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids. - We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel. - - We adopt two mechanisms for decomposing the input tensor into cuboids: - - (1) local: - We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the - shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows". - (2) dilated: - Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions", - we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`, - we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w. - - The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230. - The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns, - we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy. - - In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described - as regular tensors. We need to define alternative approaches to partition the data into "cuboids". - - In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?", - "[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep - $K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and - the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system. - - Args: - dim (int): The dimension of the input tensor. - num_heads (int): The number of heads. - cuboid_size (tuple, optional): The size of cuboid. Defaults to (2, 7, 7). - shift_size (tuple, optional): The size of shift. Defaults to (0, 0, 0). - strategy (tuple, optional): The strategy. Defaults to ("l", "l", "l"). - padding_type (str, optional): The type of padding. Defaults to "ignore". - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - cuboid_size: Tuple[int, ...] = (2, 7, 7), - shift_size: Tuple[int, ...] = (0, 0, 0), - strategy: Tuple[str, ...] = ("l", "l", "l"), - padding_type: str = "ignore", - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - use_final_proj: bool = True, - norm_layer: str = "layer_norm", - use_global_vector: bool = False, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: bool = True, - use_relative_pos: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(CuboidSelfAttentionLayer, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - assert dim % num_heads == 0 - self.num_heads = num_heads - self.dim = dim - self.cuboid_size = cuboid_size - self.shift_size = shift_size - self.strategy = strategy - self.padding_type = padding_type - self.use_final_proj = use_final_proj - self.use_relative_pos = use_relative_pos - self.use_global_vector = use_global_vector - self.use_global_self_attn = use_global_self_attn - self.separate_global_qkv = separate_global_qkv - if global_dim_ratio != 1: - assert ( - separate_global_qkv is True - ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." - self.global_dim_ratio = global_dim_ratio - assert self.padding_type in ["ignore", "zeros", "nearest"] - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - if use_relative_pos: - init_data = paddle.zeros( - ( - (2 * cuboid_size[0] - 1) - * (2 * cuboid_size[1] - 1) - * (2 * cuboid_size[2] - 1), - num_heads, - ) - ) - self.relative_position_bias_table = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.relative_position_bias_table.stop_gradient = not True - self.relative_position_bias_table = initializer.trunc_normal_( - self.relative_position_bias_table, std=0.02 - ) - - coords_t = paddle.arange(end=self.cuboid_size[0]) - coords_h = paddle.arange(end=self.cuboid_size[1]) - coords_w = paddle.arange(end=self.cuboid_size[2]) - coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) - coords_flatten = paddle.flatten(x=coords, start_axis=1) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = relative_coords.transpose(perm=[1, 2, 0]) - relative_coords[:, :, 0] += self.cuboid_size[0] - 1 - relative_coords[:, :, 1] += self.cuboid_size[1] - 1 - relative_coords[:, :, 2] += self.cuboid_size[2] - 1 - relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * ( - 2 * self.cuboid_size[2] - 1 - ) - relative_coords[:, :, 1] *= 2 * self.cuboid_size[2] - 1 - relative_position_index = relative_coords.sum(axis=-1) - self.register_buffer( - name="relative_position_index", tensor=relative_position_index - ) - self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias_attr=qkv_bias) - self.attn_drop = nn.Dropout(p=attn_drop) - if self.use_global_vector: - if self.separate_global_qkv: - self.l2g_q_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.l2g_global_kv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim * 2, - bias_attr=qkv_bias, - ) - self.g2l_global_q_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=dim, - bias_attr=qkv_bias, - ) - self.g2l_k_net = nn.Linear( - in_features=dim, out_features=dim, bias_attr=qkv_bias - ) - self.g2l_v_net = nn.Linear( - in_features=dim, - out_features=global_dim_ratio * dim, - bias_attr=qkv_bias, - ) - if self.use_global_self_attn: - self.g2g_global_qkv_net = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=global_dim_ratio * dim * 3, - bias_attr=qkv_bias, - ) - else: - self.global_qkv = nn.Linear( - in_features=dim, out_features=dim * 3, bias_attr=qkv_bias - ) - self.global_attn_drop = nn.Dropout(p=attn_drop) - if use_final_proj: - self.proj = nn.Linear(in_features=dim, out_features=dim) - self.proj_drop = nn.Dropout(p=proj_drop) - if self.use_global_vector: - self.global_proj = nn.Linear( - in_features=global_dim_ratio * dim, - out_features=global_dim_ratio * dim, - ) - self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) - if self.use_global_vector: - self.global_vec_norm = cuboid_utils.get_norm_layer( - norm_layer, in_channels=global_dim_ratio * dim - ) - self.checkpoint_level = checkpoint_level - self.reset_parameters() - - def reset_parameters(self): - cuboid_utils.apply_initialization( - self.qkv, linear_mode=self.attn_linear_init_mode - ) - if self.use_final_proj: - cuboid_utils.apply_initialization( - self.proj, linear_mode=self.ffn_linear_init_mode - ) - cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) - if self.use_global_vector: - if self.separate_global_qkv: - cuboid_utils.apply_initialization( - self.l2g_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_global_q_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_k_net, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.g2l_v_net, linear_mode=self.attn_linear_init_mode - ) - if self.use_global_self_attn: - cuboid_utils.apply_initialization( - self.g2g_global_qkv_net, linear_mode=self.attn_linear_init_mode - ) - else: - cuboid_utils.apply_initialization( - self.global_qkv, linear_mode=self.attn_linear_init_mode - ) - cuboid_utils.apply_initialization( - self.global_vec_norm, norm_mode=self.norm_init_mode - ) - - def forward(self, x, global_vectors=None): - x = self.norm(x) - - B, T, H, W, C_in = x.shape - assert C_in == self.dim - if self.use_global_vector: - _, num_global, _ = global_vectors.shape - global_vectors = self.global_vec_norm(global_vectors) - cuboid_size, shift_size = update_cuboid_size_shift_size( - (T, H, W), self.cuboid_size, self.shift_size, self.strategy - ) - - pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] - pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] - pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] - x = cuboid_utils.generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type) - - if any(i > 0 for i in shift_size): - shifted_x = paddle.roll( - x=x, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - else: - shifted_x = x - - reordered_x = cuboid_reorder( - shifted_x, cuboid_size=cuboid_size, strategy=self.strategy - ) - - _, num_cuboids, cuboid_volume, _ = reordered_x.shape - attn_mask = compute_cuboid_self_attention_mask( - (T, H, W), - cuboid_size, - shift_size=shift_size, - strategy=self.strategy, - padding_type=self.padding_type, - device=x.place, - ) - head_C = C_in // self.num_heads - qkv = ( - self.qkv(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - - q, k, v = qkv[0], qkv[1], qkv[2] - q = q * self.scale - perm_0 = list(range(k.ndim)) - perm_0[-2] = -1 - perm_0[-1] = -2 - attn_score = q @ k.transpose(perm=perm_0) - - if self.use_relative_pos: - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape( - [-1] - ) - ].reshape([cuboid_volume, cuboid_volume, -1]) - relative_position_bias = relative_position_bias.transpose( - perm=[2, 0, 1] - ).unsqueeze(axis=1) - attn_score = attn_score + relative_position_bias - - if self.use_global_vector: - global_head_C = self.global_dim_ratio * head_C - if self.separate_global_qkv: - l2g_q = ( - self.l2g_q_net(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - l2g_q = l2g_q * self.scale - l2g_global_kv = ( - self.l2g_global_kv_net(global_vectors) - .reshape([B, 1, num_global, 2, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] - g2l_global_q = ( - self.g2l_global_q_net(global_vectors) - .reshape([B, num_global, self.num_heads, head_C]) - .transpose(perm=[0, 2, 1, 3]) - ) - g2l_global_q = g2l_global_q * self.scale - g2l_k = ( - self.g2l_k_net(reordered_x) - .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - g2l_v = ( - self.g2l_v_net(reordered_x) - .reshape( - [B, num_cuboids, cuboid_volume, self.num_heads, global_head_C] - ) - .transpose(perm=[0, 3, 1, 2, 4]) - ) - if self.use_global_self_attn: - g2g_global_qkv = ( - self.g2g_global_qkv_net(global_vectors) - .reshape([B, 1, num_global, 3, self.num_heads, global_head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - g2g_global_q, g2g_global_k, g2g_global_v = ( - g2g_global_qkv[0], - g2g_global_qkv[1], - g2g_global_qkv[2], - ) - g2g_global_q = g2g_global_q.squeeze(axis=2) * self.scale - else: - q_global, k_global, v_global = ( - self.global_qkv(global_vectors) - .reshape([B, 1, num_global, 3, self.num_heads, head_C]) - .transpose(perm=[3, 0, 4, 1, 2, 5]) - ) - q_global = q_global.squeeze(axis=2) * self.scale - l2g_q, g2l_k, g2l_v = q, k, v - g2l_global_q, l2g_global_k, l2g_global_v = ( - q_global, - k_global, - v_global, - ) - if self.use_global_self_attn: - g2g_global_q, g2g_global_k, g2g_global_v = ( - q_global, - k_global, - v_global, - ) - - perm_1 = list(range(l2g_global_k.ndim)) - perm_1[-2] = -1 - perm_1[-1] = -2 - l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_1) - attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) - - if attn_mask.ndim == 5: - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" - ) - elif attn_mask.ndim == 3: - attn_mask = attn_mask.astype("float32") - attn_mask_l2l_l2g = F.pad( - attn_mask, [0, num_global], "constant", 1, data_format="NCL" - ) - attn_mask_l2l_l2g = attn_mask_l2l_l2g.astype("bool") - else: - attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) - - v_l_g = paddle.concat( - x=( - v, - l2g_global_v.expand( - shape=[B, self.num_heads, num_cuboids, num_global, head_C] - ), - ), - axis=3, - ) - attn_score_l2l_l2g = masked_softmax( - attn_score_l2l_l2g, mask=attn_mask_l2l_l2g - ) - attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) - reordered_x = ( - (attn_score_l2l_l2g @ v_l_g) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, cuboid_volume, self.dim]) - ) - if self.padding_type == "ignore": - g2l_attn_mask = paddle.ones(shape=(1, T, H, W, 1)) - if pad_t > 0 or pad_h > 0 or pad_w > 0: - g2l_attn_mask = F.pad( - g2l_attn_mask, - [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], - data_format="NDHWC", - ) - if any(i > 0 for i in shift_size): - g2l_attn_mask = paddle.roll( - x=g2l_attn_mask, - shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), - axis=(1, 2, 3), - ) - g2l_attn_mask = g2l_attn_mask.reshape((-1,)) - else: - g2l_attn_mask = None - temp = g2l_k.reshape( - [B, self.num_heads, num_cuboids * cuboid_volume, head_C] - ) - perm_2 = list(range(temp.ndim)) - perm_2[-2] = -1 - perm_2[-1] = -2 - g2l_attn_score = g2l_global_q @ temp.transpose(perm=perm_2) - if self.use_global_self_attn: - temp = g2g_global_k.squeeze(axis=2) - perm_3 = list(range(temp.ndim)) - perm_3[-2] = -1 - perm_3[-1] = -2 - g2g_attn_score = g2g_global_q @ temp.transpose(perm=perm_3) - g2all_attn_score = paddle.concat( - x=(g2l_attn_score, g2g_attn_score), axis=-1 - ) - if g2l_attn_mask is not None: - g2all_attn_mask = F.pad( - g2l_attn_mask, - [0, num_global], - "constant", - 1, - data_format="NDHWC", - ) - else: - g2all_attn_mask = None - new_v = paddle.concat( - x=( - g2l_v.reshape( - [ - B, - self.num_heads, - num_cuboids * cuboid_volume, - global_head_C, - ] - ), - g2g_global_v.reshape( - [B, self.num_heads, num_global, global_head_C] - ), - ), - axis=2, - ) - else: - g2all_attn_score = g2l_attn_score - g2all_attn_mask = g2l_attn_mask - new_v = g2l_v.reshape( - [B, self.num_heads, num_cuboids * cuboid_volume, global_head_C] - ) - g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask) - g2all_attn_score = self.global_attn_drop(g2all_attn_score) - new_global_vector = ( - (g2all_attn_score @ new_v) - .transpose(perm=[0, 2, 1, 3]) - .reshape([B, num_global, self.global_dim_ratio * self.dim]) - ) - else: - attn_score = masked_softmax(attn_score, mask=attn_mask) - attn_score = self.attn_drop(attn_score) - reordered_x = ( - (attn_score @ v) - .transpose(perm=[0, 2, 3, 1, 4]) - .reshape([B, num_cuboids, cuboid_volume, self.dim]) - ) - - if self.use_final_proj: - reordered_x = paddle.cast(reordered_x, dtype="float32") - reordered_x = self.proj_drop(self.proj(reordered_x)) - if self.use_global_vector: - new_global_vector = self.proj_drop(self.global_proj(new_global_vector)) - shifted_x = cuboid_reorder_reverse( - reordered_x, - cuboid_size=cuboid_size, - strategy=self.strategy, - orig_data_shape=(T + pad_t, H + pad_h, W + pad_w), - ) - if any(i > 0 for i in shift_size): - x = paddle.roll( - x=shifted_x, - shifts=(shift_size[0], shift_size[1], shift_size[2]), - axis=(1, 2, 3), - ) - else: - x = shifted_x - x = cuboid_utils.generalize_unpadding( - x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type - ) - if self.use_global_vector: - return x, new_global_vector - else: - return x - - -class StackCuboidSelfAttentionBlock(nn.Layer): - """ - - "use_inter_ffn" is True - x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out - | ^ | ^ - | | | | - |-------------| |-------------| - - "use_inter_ffn" is False - x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out - | ^ | ^ ^ | ^ - | | | | | | | - |-------------| |------------| ----------| |-----------| - If we have enabled global memory vectors, each attention will be a - - Args: - dim (int): The dimension of the input tensor. - num_heads (int): The number of heads. - block_cuboid_size (list, optional): The size of block cuboid . Defaults to [(4, 4, 4), (4, 4, 4)]. - block_shift_size (list, optional): The shift size of block. Defaults to [(0, 0, 0), (2, 2, 2)]. - block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. - padding_type (str, optional): The type of padding. Defaults to "ignore". - qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. - qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. - attn_drop (float, optional): The attention dropout. Defaults to 0.0. - proj_drop (float, optional): The projection dropout. Defaults to 0.0. - use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. - Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. - Defaults to 1. - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". - """ - - def __init__( - self, - dim: int, - num_heads: int, - block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (2, 2, 2)], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("d", "d", "d"), - ("l", "l", "l"), - ], - padding_type: str = "ignore", - qkv_bias: bool = False, - qk_scale: float = None, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = False, - use_global_vector: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - checkpoint_level: bool = True, - use_relative_pos: bool = True, - use_final_proj: bool = True, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - expert_shape: tuple = None, - ): - super(StackCuboidSelfAttentionBlock, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.norm_init_mode = norm_init_mode - if ( - len(block_cuboid_size[0]) <= 0 - or len(block_shift_size) <= 0 - or len(block_strategy) <= 0 - ): - raise ValueError( - "Format of the block cuboid size is not correct. block_cuboid_size={block_cuboid_size}" - ) - if len(block_cuboid_size) != len(block_shift_size) and len( - block_cuboid_size - ) != len(block_strategy): - raise ValueError( - "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." - ) - - self.num_attn = len(block_cuboid_size) - self.checkpoint_level = checkpoint_level - self.use_inter_ffn = use_inter_ffn - self.use_global_vector = use_global_vector - self.use_global_vector_ffn = use_global_vector_ffn - self.use_global_self_attn = use_global_self_attn - self.global_dim_ratio = global_dim_ratio - if self.use_inter_ffn: - if moe_config["use_ffn_moe"]: - self.ffn_l = nn.LayerList( - sublayers=[ - MixtureFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - if self.use_global_vector_ffn and self.use_global_vector: - if moe_config["use_ffn_moe"]: - self.global_ffn_l = nn.LayerList( - sublayers=[ - MixtureFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - else: - self.global_ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for _ in range(self.num_attn) - ] - ) - else: - if moe_config["use_ffn_moe"]: - self.ffn_l = nn.LayerList( - sublayers=[ - MixtureFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - else: - self.ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=dim, - hidden_size=4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - if self.use_global_vector_ffn and self.use_global_vector: - if moe_config["use_ffn_moe"]: - self.global_ffn_l = nn.LayerList( - sublayers=[ - MixtureFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - else: - self.global_ffn_l = nn.LayerList( - sublayers=[ - PositionwiseFFN( - units=global_dim_ratio * dim, - hidden_size=global_dim_ratio * 4 * dim, - activation_dropout=ffn_drop, - dropout=ffn_drop, - gated_proj=gated_ffn, - activation=activation, - normalization=norm_layer, - pre_norm=True, - linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - ] - ) - - if moe_config["use_attn_moe"]: - self.attn_l = nn.LayerList( - sublayers=[ - MixtureSelfAttention( - dim=dim, - num_heads=num_heads, - cuboid_size=ele_cuboid_size, - shift_size=ele_shift_size, - strategy=ele_strategy, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - use_global_vector=use_global_vector, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape, - moe_config=moe_config, - ) - for ele_cuboid_size, ele_shift_size, ele_strategy in zip( - block_cuboid_size, block_shift_size, block_strategy - ) - ] - ) - else: - self.attn_l = nn.LayerList( - sublayers=[ - CuboidSelfAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_size=ele_cuboid_size, - shift_size=ele_shift_size, - strategy=ele_strategy, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - use_global_vector=use_global_vector, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for ele_cuboid_size, ele_shift_size, ele_strategy in zip( - block_cuboid_size, block_shift_size, block_strategy - ) - ] - ) - - def reset_parameters(self): - for m in self.ffn_l: - m.reset_parameters() - if self.use_global_vector_ffn and self.use_global_vector: - for m in self.global_ffn_l: - m.reset_parameters() - for m in self.attn_l: - m.reset_parameters() - - def forward(self, x, global_vectors=None): - if self.use_inter_ffn: - if self.use_global_vector: - for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): - if self.checkpoint_level >= 2 and self.training: - x_out, global_vectors_out = fleet.utils.recompute( - attn, x, global_vectors - ) - else: - x_out, global_vectors_out = attn(x, global_vectors) - x = x + x_out - global_vectors = global_vectors + global_vectors_out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - if self.use_global_vector_ffn: - global_vectors = fleet.utils.recompute( - self.global_ffn_l[idx], global_vectors - ) - else: - x = ffn(x) - if self.use_global_vector_ffn: - global_vectors = self.global_ffn_l[idx](global_vectors) - return x, global_vectors - else: - for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): - if self.checkpoint_level >= 2 and self.training: - x = x + fleet.utils.recompute(attn, x) - else: - x = x + attn(x) - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(ffn, x) - else: - x = ffn(x) - return x - elif self.use_global_vector: - for idx, attn in enumerate(self.attn_l): - if self.checkpoint_level >= 2 and self.training: - x_out, global_vectors_out = fleet.utils.recompute( - attn, x, global_vectors - ) - else: - x_out, global_vectors_out = attn(x, global_vectors) - x = x + x_out - global_vectors = global_vectors + global_vectors_out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - if self.use_global_vector_ffn: - global_vectors = fleet.utils.recompute( - self.global_ffn_l[0], global_vectors - ) - else: - x = self.ffn_l[0](x) - if self.use_global_vector_ffn: - global_vectors = self.global_ffn_l[0](global_vectors) - return x, global_vectors - else: - for idx, attn in enumerate(self.attn_l): - if self.checkpoint_level >= 2 and self.training: - out = fleet.utils.recompute(attn, x) - else: - out = attn(x) - x = x + out - if self.checkpoint_level >= 1 and self.training: - x = fleet.utils.recompute(self.ffn_l[0], x) - else: - x = self.ffn_l[0](x) - return x - - -class CuboidTransformerEncoder(nn.Layer): - """Encoder of the CuboidTransformer - - x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out - - Args: - input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C - base_units (int, optional): The number of units. Defaults to 128. - block_units (int, optional): The number of block units. Defaults to None. - scale_alpha (float, optional): We scale up the channels based on the formula: - - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. - depth (list, optional): The number of layers for each block. Defaults to [4, 4, 4]. - downsample (int, optional): The downsample ratio. Defaults to 2. - downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". - block_attn_patterns (str, optional): Attention pattern for the cuboid attention for each block. Defaults to None. - block_cuboid_size (list, optional): A list of cuboid size parameters. Defaults to [(4, 4, 4), (4, 4, 4)]. - block_strategy (list, optional): A list of cuboid strategies. Defaults to [("l", "l", "l"), ("d", "d", "d")]. - block_shift_size (list, optional): A list of shift sizes. Defaults to [(0, 0, 0), (0, 0, 0)]. - num_heads (int, optional): The number of heads. Defaults to 4. - attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. - proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. - ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. - ffn_activation (str, optional): The FFN activation. Defaults to "leaky". - gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. - norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". - use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. - padding_type (str, optional): The type of padding. Defaults to "ignore". - checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. - use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. - self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. - use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. - use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to False. - use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. - separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. - Defaults to False. - global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. - Defaults to 1. - attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". - ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". - conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". - down_linear_init_mode (str, optional): The mode of downsample linear initialization. Defaults to "0". - norm_init_mode (str, optional): The mode of normalization. Defaults to "0". - """ - - def __init__( - self, - input_shape: Tuple[int, ...], - base_units: int = 128, - block_units: int = None, - scale_alpha: float = 1.0, - depth: Tuple[int, ...] = [4, 4, 4], - downsample: int = 2, - downsample_type: str = "patch_merge", - block_attn_patterns: str = None, - block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], - block_strategy: Tuple[Tuple[str, ...], ...] = [ - ("l", "l", "l"), - ("d", "d", "d"), - ], - block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], - num_heads: int = 4, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - ffn_drop: float = 0.0, - ffn_activation: str = "leaky", - gated_ffn: bool = False, - norm_layer: str = "layer_norm", - use_inter_ffn: bool = True, - padding_type: str = "ignore", - checkpoint_level: bool = True, - use_relative_pos: bool = True, - self_attn_use_final_proj: bool = True, - use_global_vector: bool = False, - use_global_vector_ffn: bool = True, - use_global_self_attn: bool = False, - separate_global_qkv: bool = False, - global_dim_ratio: int = 1, - attn_linear_init_mode: str = "0", - ffn_linear_init_mode: str = "0", - conv_init_mode: str = "0", - down_linear_init_mode: str = "0", - norm_init_mode: str = "0", - moe_config: dict = None, - ): - super(CuboidTransformerEncoder, self).__init__() - self.attn_linear_init_mode = attn_linear_init_mode - self.ffn_linear_init_mode = ffn_linear_init_mode - self.conv_init_mode = conv_init_mode - self.down_linear_init_mode = down_linear_init_mode - self.norm_init_mode = norm_init_mode - self.input_shape = input_shape - self.depth = depth - self.num_blocks = len(depth) - self.base_units = base_units - self.scale_alpha = scale_alpha - if not isinstance(downsample, (tuple, list)): - downsample = 1, downsample, downsample - self.downsample = downsample - self.downsample_type = downsample_type - self.num_heads = num_heads - self.use_global_vector = use_global_vector - self.checkpoint_level = checkpoint_level - - if block_units is None: - block_units = [ - cuboid_utils.round_to( - base_units * int((max(downsample) ** scale_alpha) ** i), 4 - ) - for i in range(self.num_blocks) - ] - else: - assert len(block_units) == self.num_blocks and block_units[0] == base_units - self.block_units = block_units - if self.num_blocks > 1: - if downsample_type == "patch_merge": - self.down_layers = nn.LayerList( - sublayers=[ - PatchMerging3D( - dim=self.block_units[i], - downsample=downsample, - padding_type=padding_type, - out_dim=self.block_units[i + 1], - linear_init_mode=down_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for i in range(self.num_blocks - 1) - ] - ) - else: - raise NotImplementedError(f"{downsample_type} is invalid.") - if self.use_global_vector: - self.down_layer_global_proj = nn.LayerList( - sublayers=[ - nn.Linear( - in_features=global_dim_ratio * self.block_units[i], - out_features=global_dim_ratio * self.block_units[i + 1], - ) - for i in range(self.num_blocks - 1) - ] - ) - if block_attn_patterns is not None: - mem_shapes = self.get_mem_shapes() - if isinstance(block_attn_patterns, (tuple, list)): - assert len(block_attn_patterns) == self.num_blocks - else: - block_attn_patterns = [ - block_attn_patterns for _ in range(self.num_blocks) - ] - block_cuboid_size = [] - block_strategy = [] - block_shift_size = [] - for idx, key in enumerate(block_attn_patterns): - func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) - cuboid_size, strategy, shift_size = func(mem_shapes[idx]) - block_cuboid_size.append(cuboid_size) - block_strategy.append(strategy) - block_shift_size.append(shift_size) - else: - if not isinstance(block_cuboid_size[0][0], (list, tuple)): - block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)] - else: - assert ( - len(block_cuboid_size) == self.num_blocks - ), f"Incorrect input format! Received block_cuboid_size={block_cuboid_size}" - if not isinstance(block_strategy[0][0], (list, tuple)): - block_strategy = [block_strategy for _ in range(self.num_blocks)] - else: - assert ( - len(block_strategy) == self.num_blocks - ), f"Incorrect input format! Received block_strategy={block_strategy}" - if not isinstance(block_shift_size[0][0], (list, tuple)): - block_shift_size = [block_shift_size for _ in range(self.num_blocks)] - else: - assert ( - len(block_shift_size) == self.num_blocks - ), f"Incorrect input format! Received block_shift_size={block_shift_size}" - self.block_cuboid_size = block_cuboid_size - self.block_strategy = block_strategy - self.block_shift_size = block_shift_size - - expert_shape_list = self.get_mem_shapes() - self.blocks = nn.LayerList( - sublayers=[ - nn.Sequential( - *[ - StackCuboidSelfAttentionBlock( - dim=self.block_units[i], - num_heads=num_heads, - block_cuboid_size=block_cuboid_size[i], - block_strategy=block_strategy[i], - block_shift_size=block_shift_size[i], - attn_drop=attn_drop, - proj_drop=proj_drop, - ffn_drop=ffn_drop, - activation=ffn_activation, - gated_ffn=gated_ffn, - norm_layer=norm_layer, - use_inter_ffn=use_inter_ffn, - padding_type=padding_type, - use_global_vector=use_global_vector, - use_global_vector_ffn=use_global_vector_ffn, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=self_attn_use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - expert_shape=expert_shape_list[i], - moe_config=moe_config, - ) - for _ in range(depth[i]) - ] - ) - for i in range(self.num_blocks) - ] - ) - self.reset_parameters() - - def reset_parameters(self): - if self.num_blocks > 1: - for m in self.down_layers: - m.reset_parameters() - if self.use_global_vector: - cuboid_utils.apply_initialization( - self.down_layer_global_proj, linear_mode=self.down_linear_init_mode - ) - for ms in self.blocks: - for m in ms: - m.reset_parameters() - - def get_mem_shapes(self): - """Get the shape of the output memory based on the input shape. This can be used for constructing the decoder. - - Returns: - mem_shapes : A list of shapes of the output memory - """ - - if self.num_blocks == 1: - return [self.input_shape] - else: - mem_shapes = [self.input_shape] - curr_shape = self.input_shape - for down_layer in self.down_layers: - curr_shape = down_layer.get_out_shape(curr_shape) - mem_shapes.append(curr_shape) - return mem_shapes - - def forward(self, x, global_vectors=None): - """ - Args: - x : Shape (B, T, H, W, C) - - Returns: - out (List[paddle.Tensor,..]): A list of tensors from the bottom layer to the top layer of the encoder. For - example, it can have shape - - (B, T, H, W, C1) - - (B, T, H // 2, W // 2, 2 * C1) - - (B, T, H // 4, W // 4, 4 * C1) - ... - global_mem_out (List,Optional): The output of the global vector. - """ - - B, T, H, W, C_in = x.shape - assert (T, H, W, C_in) == self.input_shape - - if self.use_global_vector: - out = [] - global_mem_out = [] - for i in range(self.num_blocks): - for l in self.blocks[i]: - x, global_vectors = l(x, global_vectors) - out.append(x) - global_mem_out.append(global_vectors) - if self.num_blocks > 1 and i < self.num_blocks - 1: - x = self.down_layers[i](x) - global_vectors = self.down_layer_global_proj[i](global_vectors) - return out, global_mem_out - else: - out = [] - for i in range(self.num_blocks): - x = self.blocks[i](x) - out.append(x) - if self.num_blocks > 1 and i < self.num_blocks - 1: - x = self.down_layers[i](x) - return out - - -class MixtureLinear(nn.Layer): - def __init__(self, in_dim, out_dim, expert_shape, moe_config, bias_attr=True): - super().__init__() - - self.in_dim = in_dim - self.out_dim = out_dim - self.bias = bias_attr - self.expert_shape = expert_shape # T, H, W, C_o - self.num_experts = moe_config["num_experts"] - self.out_planes = moe_config["out_planes"] - self.moe_config = moe_config - assert expert_shape is not None and moe_config["use_linear_moe"] - - if moe_config["gate_style"] == "linear": - self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, in_dim) - elif moe_config["gate_style"] == "spatial-latent": - self.gate = moe_utils.SpatialLatentGatingNet( - moe_config, expert_shape, in_dim - ) - elif moe_config["gate_style"] == "cuboid-latent": - self.gate = moe_utils.CuboidLatentGatingNet( - moe_config, expert_shape, in_dim - ) - elif moe_config["gate_style"] == "spatial-latent-linear": - self.gate = moe_utils.SpatialLatentLinearGatingNet( - moe_config, expert_shape, in_dim - ) - elif moe_config["gate_style"] == "cuboid-latent-linear": - self.gate = moe_utils.CuboidLatentLinearGatingNet( - moe_config, expert_shape, in_dim - ) - else: - raise NotImplementedError - - self.experts = nn.LayerList( - [ - nn.Linear(in_features=in_dim, out_features=out_dim, bias_attr=bias_attr) - for _ in range(self.num_experts) - ] - ) - - def forward(self, x): - - B, T, H, W, C = x.shape - E = self.num_experts - assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] - ( - dense_routing_weights, - sparse_routing_weights, - sparse_routing_inds, - self.aux_loss, - ) = self.gate( - x - ) # dense: B, T, H, W, E - - if self.moe_config["dispatch_style"] == "dense": - dispatcher = moe_utils.DenseDispatcher( - E, - sparse_routing_weights.reshape([B * T * H * W, -1]), - sparse_routing_inds.reshape([B * T * H * W, -1]), - ) - expert_outputs = paddle.stack( - [self.experts[i](x.reshape([B * T * H * W, -1])) for i in range(E)], - axis=-2, - ) - y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, -1]) - elif self.moe_config["dispatch_style"] == "sparse": - dispatcher = moe_utils.SparseDispatcher( - E, - sparse_routing_weights.reshape([B * T * H * W, -1]), - sparse_routing_inds.reshape([B * T * H * W, -1]), - ) - expert_inputs = dispatcher.dispatch(x.reshape([B * T * H * W, -1])) - expert_outputs = [ - self.experts[i](expert_inputs[i]) - if expert_inputs[i].shape[0] > 0 - else paddle.zeros([0, self.out_dim]) - for i in range(E) - ] - y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, -1]) - else: - raise NotImplementedError - - return y - - -class MixtureFFN(nn.Layer): - def __init__( - self, - units, - hidden_size, - activation_dropout, - dropout, - gated_proj, - activation, - normalization, - pre_norm, - linear_init_mode, - norm_init_mode, - expert_shape, - moe_config, - ): - super().__init__() - - self.in_dim = units - self.out_dim = units - self.expert_shape = expert_shape # T, H, W, C_o - self.num_experts = moe_config["num_experts"] - self.out_planes = moe_config["out_planes"] - self.moe_config = moe_config - assert expert_shape is not None and moe_config["use_ffn_moe"] - - if moe_config["gate_style"] == "linear": - self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, units) - elif moe_config["gate_style"] == "spatial-latent": - self.gate = moe_utils.SpatialLatentGatingNet( - moe_config, expert_shape, units - ) - elif moe_config["gate_style"] == "cuboid-latent": - self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, units) - elif moe_config["gate_style"] == "spatial-latent-linear": - self.gate = moe_utils.SpatialLatentLinearGatingNet( - moe_config, expert_shape, units - ) - elif moe_config["gate_style"] == "cuboid-latent-linear": - self.gate = moe_utils.CuboidLatentLinearGatingNet( - moe_config, expert_shape, units - ) - else: - raise NotImplementedError - - self.experts = nn.LayerList( - [ - PositionwiseFFN( - units=units, - hidden_size=hidden_size, - activation_dropout=activation_dropout, - dropout=dropout, - gated_proj=gated_proj, - activation=activation, - normalization=normalization, - pre_norm=pre_norm, - linear_init_mode=linear_init_mode, - norm_init_mode=norm_init_mode, - moe_config=moe_config, - expert_shape=expert_shape, - ) - for _ in range(self.num_experts) - ] - ) - - def forward(self, x): - - B, T, H, W, C = x.shape - E = self.num_experts - assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] - ( - dense_routing_weights, - sparse_routing_weights, - sparse_routing_inds, - self.aux_loss, - ) = self.gate( - x - ) # dense: B, T, H, W, E - - if self.moe_config["dispatch_style"] == "dense": - dispatcher = moe_utils.DenseDispatcher( - E, - sparse_routing_weights.reshape([B * T * H * W, -1]), - sparse_routing_inds.reshape([B * T * H * W, -1]), - ) - expert_outputs = paddle.stack( - [self.experts[i](x.reshape([B * T * H * W, -1])) for i in range(E)], - axis=-2, - ) - y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) - elif self.moe_config["dispatch_style"] == "sparse": - dispatcher = moe_utils.SparseDispatcher( - E, - sparse_routing_weights.reshape([B * T * H * W, -1]), - sparse_routing_inds.reshape([B * T * H * W, -1]), - ) - expert_inputs = dispatcher.dispatch(x.reshape([B * T * H * W, -1])) - expert_outputs = [ - self.experts[i](expert_inputs[i]) - if expert_inputs[i].shape[0] > 0 - else paddle.zeros([0, self.out_dim]) - for i in range(E) - ] - y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) - else: - raise NotImplementedError - - return y - - def reset_parameters(self): - - for i in range(len(self.experts)): - self.experts[i].reset_parameters() - - -class MixtureSelfAttention(nn.Layer): - def __init__( - self, - dim, - num_heads, - cuboid_size, - shift_size, - strategy, - padding_type, - qkv_bias, - qk_scale, - attn_drop, - proj_drop, - norm_layer, - use_global_vector, - use_global_self_attn, - separate_global_qkv, - global_dim_ratio, - checkpoint_level, - use_relative_pos, - use_final_proj, - attn_linear_init_mode, - ffn_linear_init_mode, - norm_init_mode, - expert_shape, - moe_config, - ): - super().__init__() - - self.in_dim = dim - self.out_dim = dim - self.expert_shape = expert_shape # T, H, W, C - self.num_experts = moe_config["num_experts"] - self.out_planes = moe_config["out_planes"] - self.moe_config = moe_config - assert expert_shape is not None and moe_config["use_attn_moe"] - assert not use_global_vector - - if moe_config["gate_style"] == "linear": - self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "spatial-latent": - self.gate = moe_utils.SpatialLatentGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "cuboid-latent": - self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, dim) - elif moe_config["gate_style"] == "spatial-latent-linear": - self.gate = moe_utils.SpatialLatentLinearGatingNet( - moe_config, expert_shape, dim - ) - elif moe_config["gate_style"] == "cuboid-latent-linear": - self.gate = moe_utils.CuboidLatentLinearGatingNet( - moe_config, expert_shape, dim - ) - else: - raise NotImplementedError - - self.experts = nn.LayerList( - [ - CuboidSelfAttentionLayer( - dim=dim, - num_heads=num_heads, - cuboid_size=cuboid_size, - shift_size=shift_size, - strategy=strategy, - padding_type=padding_type, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=proj_drop, - norm_layer=norm_layer, - use_global_vector=use_global_vector, - use_global_self_attn=use_global_self_attn, - separate_global_qkv=separate_global_qkv, - global_dim_ratio=global_dim_ratio, - checkpoint_level=checkpoint_level, - use_relative_pos=use_relative_pos, - use_final_proj=use_final_proj, - attn_linear_init_mode=attn_linear_init_mode, - ffn_linear_init_mode=ffn_linear_init_mode, - norm_init_mode=norm_init_mode, - ) - for _ in range(self.num_experts) - ] - ) - - def forward(self, x, global_vectors=None): - - B, T, H, W, C = x.shape - E = self.num_experts - assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] - ( - dense_routing_weights, - sparse_routing_weights, - sparse_routing_inds, - self.aux_loss, - ) = self.gate( - x - ) # dense: B, T, H, W, E - - dispatcher = moe_utils.DenseDispatcher( - E, - sparse_routing_weights.reshape([B * T * H * W, -1]), - sparse_routing_inds.reshape([B * T * H * W, -1]), - ) - expert_outputs = paddle.stack( - [self.experts[i](x, global_vectors) for i in range(E)], axis=-2 - ).reshape([B * T * H * W, E, C]) - y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) - - return y - - def reset_parameters(self): - - for i in range(len(self.experts)): - self.experts[i].reset_parameters() +from collections import OrderedDict +from functools import lru_cache +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn.functional as F +from paddle import nn +from paddle.distributed import fleet + +import ppsci.arch.extformer_moe_cuboid_utils as cuboid_utils +import ppsci.arch.extformer_moe_utils as moe_utils +from ppsci.arch import activation as act_mod +from ppsci.utils import initializer + +NEGATIVE_SLOPE = 0.1 + + +class PatchMerging3D(nn.Layer): + """Patch Merging Layer + + Args: + dim (int): Number of input channels. + out_dim (int, optional): The dim of output. Defaults to None. + downsample (tuple, optional): Downsample factor. Defaults to (1, 2, 2). + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + padding_type (str, optional): The type of padding. Defaults to "nearest". + linear_init_mode (str, optional): The mode of linear init. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization init. Defaults to "0". + """ + + def __init__( + self, + dim: int, + out_dim: int = None, + downsample: Tuple[int, ...] = (1, 2, 2), + norm_layer: str = "layer_norm", + padding_type: str = "nearest", + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super().__init__() + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self.dim = dim + if out_dim is None: + out_dim = max(downsample) * dim + self.out_dim = out_dim + self.downsample = downsample + self.padding_type = padding_type + self.reduction = nn.Linear( + in_features=downsample[0] * downsample[1] * downsample[2] * dim, + out_features=out_dim, + bias_attr=False, + ) + self.norm = cuboid_utils.get_norm_layer( + norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim + ) + self.reset_parameters() + + def reset_parameters(self): + for m in self.children(): + cuboid_utils.apply_initialization( + m, linear_mode=self.linear_init_mode, norm_mode=self.norm_init_mode + ) + + def get_out_shape(self, data_shape): + T, H, W, C_in = data_shape + pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] + pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] + pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] + return ( + (T + pad_t) // self.downsample[0], + (H + pad_h) // self.downsample[1], + (W + pad_w) // self.downsample[2], + self.out_dim, + ) + + def forward(self, x): + """ + + Args: + x : (B, T, H, W, C) + + Returns: + out : Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim) + """ + + B, T, H, W, C = x.shape + pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0] + pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1] + pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2] + if pad_h or pad_h or pad_w: + T += pad_t + H += pad_h + W += pad_w + x = cuboid_utils.generalize_padding( + x, pad_t, pad_h, pad_w, padding_type=self.padding_type + ) + x = ( + x.reshape( + ( + B, + T // self.downsample[0], + self.downsample[0], + H // self.downsample[1], + self.downsample[1], + W // self.downsample[2], + self.downsample[2], + C, + ) + ) + .transpose(perm=[0, 1, 3, 5, 2, 4, 6, 7]) + .reshape( + [ + B, + T // self.downsample[0], + H // self.downsample[1], + W // self.downsample[2], + self.downsample[0] * self.downsample[1] * self.downsample[2] * C, + ] + ) + ) + x = self.norm(x) + x = self.reduction(x) + return x + + +class PositionwiseFFN(nn.Layer): + """The Position-wise FFN layer used in Transformer-like architectures + + If pre_norm is True: + norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data) + Else: + data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data)) + Also, if we use gated projection. We will use + fc1_1 * act(fc1_2(data)) to map the data + + Args: + units (int, optional): The units. Defaults to 512. + hidden_size (int, optional): The size of hidden layer. Defaults to 2048. + activation_dropout (float, optional): The dropout of activate. Defaults to 0.0. + dropout (float, optional): The drop ratio used in DropPat. Defaults to 0.1. + gated_proj (bool, optional): Whether to use gate projection. Defaults to False. + activation (str, optional): The activate. Defaults to "relu". + normalization (str, optional): The normalization. Defaults to "layer_norm". + layer_norm_eps (float, optional): The epsilon of layer normalization. Defaults to 1e-05. + pre_norm (bool): Pre-layer normalization as proposed in the paper: + "[ACL2018] The Best of Both Worlds: Combining Recent Advances in Neural Machine Translation" This will stabilize the training of Transformers. + You may also refer to "[Arxiv2020] Understanding the Difficulty of Training Transformers". Defaults to False. + linear_init_mode (str, optional): The mode of linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + units: int = 512, + hidden_size: int = 2048, + activation_dropout: float = 0.0, + dropout: float = 0.1, + gated_proj: bool = False, + activation: str = "relu", + normalization: str = "layer_norm", + layer_norm_eps: float = 1e-05, + pre_norm: bool = False, + linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + expert_shape: tuple = None, + ): + super().__init__() + self.linear_init_mode = linear_init_mode + self.norm_init_mode = norm_init_mode + self._pre_norm = pre_norm + self._gated_proj = gated_proj + self._kwargs = OrderedDict( + [ + ("units", units), + ("hidden_size", hidden_size), + ("activation_dropout", activation_dropout), + ("activation", activation), + ("dropout", dropout), + ("normalization", normalization), + ("layer_norm_eps", layer_norm_eps), + ("gated_proj", gated_proj), + ("pre_norm", pre_norm), + ] + ) + self.dropout_layer = nn.Dropout(p=dropout) + self.activation_dropout_layer = nn.Dropout(p=activation_dropout) + + if moe_config["use_linear_moe"]: + self.ffn_1 = MixtureLinear( + in_dim=units, + out_dim=hidden_size, + bias_attr=True, + expert_shape=expert_shape[:-1] + (hidden_size,), + moe_config=moe_config, + ) + else: + self.ffn_1 = nn.Linear( + in_features=units, out_features=hidden_size, bias_attr=True + ) + if self._gated_proj: + self.ffn_1_gate = nn.Linear( + in_features=units, out_features=hidden_size, bias_attr=True + ) + if activation == "leaky_relu": + self.activation = nn.LeakyReLU(NEGATIVE_SLOPE) + else: + self.activation = act_mod.get_activation(activation) + + if moe_config["use_linear_moe"]: + self.ffn_2 = MixtureLinear( + in_dim=hidden_size, + out_dim=units, + bias_attr=True, + expert_shape=expert_shape, + moe_config=moe_config, + ) + else: + self.ffn_2 = nn.Linear( + in_features=hidden_size, out_features=units, bias_attr=True + ) + self.layer_norm = cuboid_utils.get_norm_layer( + normalization=normalization, in_channels=units, epsilon=layer_norm_eps + ) + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization(self.ffn_1, linear_mode=self.linear_init_mode) + if self._gated_proj: + cuboid_utils.apply_initialization( + self.ffn_1_gate, linear_mode=self.linear_init_mode + ) + cuboid_utils.apply_initialization(self.ffn_2, linear_mode=self.linear_init_mode) + cuboid_utils.apply_initialization( + self.layer_norm, norm_mode=self.norm_init_mode + ) + + def forward(self, data): + """ + Args: + x : Shape (B, seq_length, C_in) + + Returns: + out : Shape (B, seq_length, C_out) + """ + + residual = data + if self._pre_norm: + data = self.layer_norm(data) + if self._gated_proj: + out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data) + else: + out = self.activation(self.ffn_1(data)) + out = self.activation_dropout_layer(out) + out = self.ffn_2(out) + out = self.dropout_layer(out) + out = out + residual + if not self._pre_norm: + out = self.layer_norm(out) + return out + + +def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy): + """Update the cuboid_size and shift_size + + Args: + data_shape (Tuple[int,...]): The shape of the data. + cuboid_size (Tuple[int,...]): Size of the cuboid. + shift_size (Tuple[int,...]): Size of the shift. + strategy (str): The strategy of attention. + + Returns: + new_cuboid_size (Tuple[int,...]): Size of the cuboid. + new_shift_size (Tuple[int,...]): Size of the shift. + """ + + new_cuboid_size = list(cuboid_size) + new_shift_size = list(shift_size) + for i in range(len(data_shape)): + if strategy[i] == "d": + new_shift_size[i] = 0 + if data_shape[i] <= cuboid_size[i]: + new_cuboid_size[i] = data_shape[i] + new_shift_size[i] = 0 + return tuple(new_cuboid_size), tuple(new_shift_size) + + +def cuboid_reorder(data, cuboid_size, strategy): + """Reorder the tensor into (B, num_cuboids, bT * bH * bW, C) + We assume that the tensor shapes are divisible to the cuboid sizes. + + Args: + data (paddle.Tensor): The input data. + cuboid_size (Tuple[int,...]): The size of the cuboid. + strategy (Tuple[int,...]): The cuboid strategy. + + Returns: + reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C). + num_cuboids = T / bT * H / bH * W / bW + """ + + B, T, H, W, C = data.shape + num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2] + cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2] + intermediate_shape = [] + nblock_axis = [] + block_axis = [] + for i, (block_size, total_size, ele_strategy) in enumerate( + zip(cuboid_size, (T, H, W), strategy) + ): + if ele_strategy == "l": + intermediate_shape.extend([total_size // block_size, block_size]) + nblock_axis.append(2 * i + 1) + block_axis.append(2 * i + 2) + elif ele_strategy == "d": + intermediate_shape.extend([block_size, total_size // block_size]) + nblock_axis.append(2 * i + 2) + block_axis.append(2 * i + 1) + else: + raise NotImplementedError(f"{ele_strategy} is invalid.") + data = data.reshape(list((B,) + tuple(intermediate_shape) + (C,))) + reordered_data = data.transpose( + perm=(0,) + tuple(nblock_axis) + tuple(block_axis) + (7,) + ) + reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C)) + return reordered_data + + +@lru_cache() +def compute_cuboid_self_attention_mask( + data_shape, cuboid_size, shift_size, strategy, padding_type, device +): + """Compute the shift window attention mask + + Args: + data_shape (Tuple[int,....]): Should be (T, H, W). + cuboid_size (Tuple[int,....]): Size of the cuboid. + shift_size (Tuple[int,....]): The shift size. + strategy (str): The decomposition strategy. + padding_type (str): Type of the padding. + device (str): The device. + + Returns: + attn_mask (paddle.Tensor): Mask with shape (num_cuboid, cuboid_vol, cuboid_vol). + The padded values will always be masked. The other masks will ensure that the shifted windows + will only attend to those in the shifted windows. + """ + T, H, W = data_shape + pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] + pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] + pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] + data_mask = None + if pad_t > 0 or pad_h > 0 or pad_w > 0: + if padding_type == "ignore": + data_mask = paddle.ones(shape=(1, T, H, W, 1), dtype="bool") + data_mask = F.pad( + data_mask, [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], data_format="NDHWC" + ) + else: + data_mask = paddle.ones( + shape=(1, T + pad_t, H + pad_h, W + pad_w, 1), dtype="bool" + ) + if any(i > 0 for i in shift_size): + if padding_type == "ignore": + data_mask = paddle.roll( + x=data_mask, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + if padding_type == "ignore": + data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy) + data_mask = data_mask.squeeze(axis=-1).squeeze(axis=0) + shift_mask = np.zeros(shape=(1, T + pad_t, H + pad_h, W + pad_w, 1)) + cnt = 0 + for t in ( + slice(-cuboid_size[0]), + slice(-cuboid_size[0], -shift_size[0]), + slice(-shift_size[0], None), + ): + for h in ( + slice(-cuboid_size[1]), + slice(-cuboid_size[1], -shift_size[1]), + slice(-shift_size[1], None), + ): + for w in ( + slice(-cuboid_size[2]), + slice(-cuboid_size[2], -shift_size[2]), + slice(-shift_size[2], None), + ): + shift_mask[:, t, h, w, :] = cnt + cnt += 1 + shift_mask = paddle.to_tensor(shift_mask) + shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy) + shift_mask = shift_mask.squeeze(axis=-1).squeeze(axis=0) + attn_mask = shift_mask.unsqueeze(axis=1) - shift_mask.unsqueeze(axis=2) == 0 + if padding_type == "ignore": + attn_mask = ( + data_mask.unsqueeze(axis=1) * data_mask.unsqueeze(axis=2) * attn_mask + ) + return attn_mask + + +def masked_softmax(att_score, mask, axis: int = -1): + """Ignore the masked elements when calculating the softmax. + The mask can be broadcastable. + + Args: + att_score (paddle.Tensor): Shape (..., length, ...) + mask (paddle.Tensor): Shape (..., length, ...) + 1 --> The element is not masked + 0 --> The element is masked + axis (int): The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis] + + Returns: + att_weights (paddle.Tensor): Shape (..., length, ...). + """ + + if mask is not None: + if att_score.dtype == paddle.float16: + att_score = att_score.masked_fill(paddle.logical_not(mask), -1e4) + else: + att_score = att_score.masked_fill(paddle.logical_not(mask), -1e18) + att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask.astype( + att_score.dtype + ) + else: + att_weights = nn.functional.softmax(x=att_score, axis=axis) + return att_weights + + +def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape): + """Reverse the reordered cuboid back to the original space + + Args: + data (paddle.Tensor): The input data. + cuboid_size (Tuple[int,...]): The size of cuboid. + strategy (str): The strategy of reordering. + orig_data_shape (Tuple[int,...]): The original shape of the data. + + Returns: + data (paddle.Tensor): The recovered data + """ + + B, num_cuboids, cuboid_volume, C = data.shape + T, H, W = orig_data_shape + permutation_axis = [0] + for i, (block_size, total_size, ele_strategy) in enumerate( + zip(cuboid_size, (T, H, W), strategy) + ): + if ele_strategy == "l": + permutation_axis.append(i + 1) + permutation_axis.append(i + 4) + elif ele_strategy == "d": + permutation_axis.append(i + 4) + permutation_axis.append(i + 1) + else: + raise NotImplementedError((f"{ele_strategy} is invalid.")) + permutation_axis.append(7) + data = data.reshape( + [ + B, + T // cuboid_size[0], + H // cuboid_size[1], + W // cuboid_size[2], + cuboid_size[0], + cuboid_size[1], + cuboid_size[2], + C, + ] + ) + data = data.transpose(perm=permutation_axis) + data = data.reshape((B, T, H, W, C)) + return data + + +class CuboidSelfAttentionLayer(nn.Layer): + """Implements the cuboid self attention. + + The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids. + We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel. + + We adopt two mechanisms for decomposing the input tensor into cuboids: + + (1) local: + We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the + shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows". + (2) dilated: + Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions", + we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`, + we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w. + + The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230. + The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns, + we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy. + + In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described + as regular tensors. We need to define alternative approaches to partition the data into "cuboids". + + In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?", + "[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep + $K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and + the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system. + + Args: + dim (int): The dimension of the input tensor. + num_heads (int): The number of heads. + cuboid_size (tuple, optional): The size of cuboid. Defaults to (2, 7, 7). + shift_size (tuple, optional): The size of shift. Defaults to (0, 0, 0). + strategy (tuple, optional): The strategy. Defaults to ("l", "l", "l"). + padding_type (str, optional): The type of padding. Defaults to "ignore". + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. Defaults to 1. + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + cuboid_size: Tuple[int, ...] = (2, 7, 7), + shift_size: Tuple[int, ...] = (0, 0, 0), + strategy: Tuple[str, ...] = ("l", "l", "l"), + padding_type: str = "ignore", + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + use_final_proj: bool = True, + norm_layer: str = "layer_norm", + use_global_vector: bool = False, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: bool = True, + use_relative_pos: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(CuboidSelfAttentionLayer, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + assert dim % num_heads == 0 + self.num_heads = num_heads + self.dim = dim + self.cuboid_size = cuboid_size + self.shift_size = shift_size + self.strategy = strategy + self.padding_type = padding_type + self.use_final_proj = use_final_proj + self.use_relative_pos = use_relative_pos + self.use_global_vector = use_global_vector + self.use_global_self_attn = use_global_self_attn + self.separate_global_qkv = separate_global_qkv + if global_dim_ratio != 1: + assert ( + separate_global_qkv is True + ), "Setting global_dim_ratio != 1 requires separate_global_qkv == True." + self.global_dim_ratio = global_dim_ratio + assert self.padding_type in ["ignore", "zeros", "nearest"] + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + if use_relative_pos: + init_data = paddle.zeros( + ( + (2 * cuboid_size[0] - 1) + * (2 * cuboid_size[1] - 1) + * (2 * cuboid_size[2] - 1), + num_heads, + ) + ) + self.relative_position_bias_table = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.relative_position_bias_table.stop_gradient = not True + self.relative_position_bias_table = initializer.trunc_normal_( + self.relative_position_bias_table, std=0.02 + ) + + coords_t = paddle.arange(end=self.cuboid_size[0]) + coords_h = paddle.arange(end=self.cuboid_size[1]) + coords_w = paddle.arange(end=self.cuboid_size[2]) + coords = paddle.stack(x=paddle.meshgrid(coords_t, coords_h, coords_w)) + coords_flatten = paddle.flatten(x=coords, start_axis=1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.transpose(perm=[1, 2, 0]) + relative_coords[:, :, 0] += self.cuboid_size[0] - 1 + relative_coords[:, :, 1] += self.cuboid_size[1] - 1 + relative_coords[:, :, 2] += self.cuboid_size[2] - 1 + relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * ( + 2 * self.cuboid_size[2] - 1 + ) + relative_coords[:, :, 1] *= 2 * self.cuboid_size[2] - 1 + relative_position_index = relative_coords.sum(axis=-1) + self.register_buffer( + name="relative_position_index", tensor=relative_position_index + ) + self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias_attr=qkv_bias) + self.attn_drop = nn.Dropout(p=attn_drop) + if self.use_global_vector: + if self.separate_global_qkv: + self.l2g_q_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.l2g_global_kv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim * 2, + bias_attr=qkv_bias, + ) + self.g2l_global_q_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=dim, + bias_attr=qkv_bias, + ) + self.g2l_k_net = nn.Linear( + in_features=dim, out_features=dim, bias_attr=qkv_bias + ) + self.g2l_v_net = nn.Linear( + in_features=dim, + out_features=global_dim_ratio * dim, + bias_attr=qkv_bias, + ) + if self.use_global_self_attn: + self.g2g_global_qkv_net = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=global_dim_ratio * dim * 3, + bias_attr=qkv_bias, + ) + else: + self.global_qkv = nn.Linear( + in_features=dim, out_features=dim * 3, bias_attr=qkv_bias + ) + self.global_attn_drop = nn.Dropout(p=attn_drop) + if use_final_proj: + self.proj = nn.Linear(in_features=dim, out_features=dim) + self.proj_drop = nn.Dropout(p=proj_drop) + if self.use_global_vector: + self.global_proj = nn.Linear( + in_features=global_dim_ratio * dim, + out_features=global_dim_ratio * dim, + ) + self.norm = cuboid_utils.get_norm_layer(norm_layer, in_channels=dim) + if self.use_global_vector: + self.global_vec_norm = cuboid_utils.get_norm_layer( + norm_layer, in_channels=global_dim_ratio * dim + ) + self.checkpoint_level = checkpoint_level + self.reset_parameters() + + def reset_parameters(self): + cuboid_utils.apply_initialization( + self.qkv, linear_mode=self.attn_linear_init_mode + ) + if self.use_final_proj: + cuboid_utils.apply_initialization( + self.proj, linear_mode=self.ffn_linear_init_mode + ) + cuboid_utils.apply_initialization(self.norm, norm_mode=self.norm_init_mode) + if self.use_global_vector: + if self.separate_global_qkv: + cuboid_utils.apply_initialization( + self.l2g_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.l2g_global_kv_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_global_q_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_k_net, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.g2l_v_net, linear_mode=self.attn_linear_init_mode + ) + if self.use_global_self_attn: + cuboid_utils.apply_initialization( + self.g2g_global_qkv_net, linear_mode=self.attn_linear_init_mode + ) + else: + cuboid_utils.apply_initialization( + self.global_qkv, linear_mode=self.attn_linear_init_mode + ) + cuboid_utils.apply_initialization( + self.global_vec_norm, norm_mode=self.norm_init_mode + ) + + def forward(self, x, global_vectors=None): + x = self.norm(x) + + B, T, H, W, C_in = x.shape + assert C_in == self.dim + if self.use_global_vector: + _, num_global, _ = global_vectors.shape + global_vectors = self.global_vec_norm(global_vectors) + cuboid_size, shift_size = update_cuboid_size_shift_size( + (T, H, W), self.cuboid_size, self.shift_size, self.strategy + ) + + pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0] + pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1] + pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2] + x = cuboid_utils.generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type) + + if any(i > 0 for i in shift_size): + shifted_x = paddle.roll( + x=x, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + else: + shifted_x = x + + reordered_x = cuboid_reorder( + shifted_x, cuboid_size=cuboid_size, strategy=self.strategy + ) + + _, num_cuboids, cuboid_volume, _ = reordered_x.shape + attn_mask = compute_cuboid_self_attention_mask( + (T, H, W), + cuboid_size, + shift_size=shift_size, + strategy=self.strategy, + padding_type=self.padding_type, + device=x.place, + ) + head_C = C_in // self.num_heads + qkv = ( + self.qkv(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + perm_0 = list(range(k.ndim)) + perm_0[-2] = -1 + perm_0[-1] = -2 + attn_score = q @ k.transpose(perm=perm_0) + + if self.use_relative_pos: + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape( + [-1] + ) + ].reshape([cuboid_volume, cuboid_volume, -1]) + relative_position_bias = relative_position_bias.transpose( + perm=[2, 0, 1] + ).unsqueeze(axis=1) + attn_score = attn_score + relative_position_bias + + if self.use_global_vector: + global_head_C = self.global_dim_ratio * head_C + if self.separate_global_qkv: + l2g_q = ( + self.l2g_q_net(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + l2g_q = l2g_q * self.scale + l2g_global_kv = ( + self.l2g_global_kv_net(global_vectors) + .reshape([B, 1, num_global, 2, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1] + g2l_global_q = ( + self.g2l_global_q_net(global_vectors) + .reshape([B, num_global, self.num_heads, head_C]) + .transpose(perm=[0, 2, 1, 3]) + ) + g2l_global_q = g2l_global_q * self.scale + g2l_k = ( + self.g2l_k_net(reordered_x) + .reshape([B, num_cuboids, cuboid_volume, self.num_heads, head_C]) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + g2l_v = ( + self.g2l_v_net(reordered_x) + .reshape( + [B, num_cuboids, cuboid_volume, self.num_heads, global_head_C] + ) + .transpose(perm=[0, 3, 1, 2, 4]) + ) + if self.use_global_self_attn: + g2g_global_qkv = ( + self.g2g_global_qkv_net(global_vectors) + .reshape([B, 1, num_global, 3, self.num_heads, global_head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + g2g_global_q, g2g_global_k, g2g_global_v = ( + g2g_global_qkv[0], + g2g_global_qkv[1], + g2g_global_qkv[2], + ) + g2g_global_q = g2g_global_q.squeeze(axis=2) * self.scale + else: + q_global, k_global, v_global = ( + self.global_qkv(global_vectors) + .reshape([B, 1, num_global, 3, self.num_heads, head_C]) + .transpose(perm=[3, 0, 4, 1, 2, 5]) + ) + q_global = q_global.squeeze(axis=2) * self.scale + l2g_q, g2l_k, g2l_v = q, k, v + g2l_global_q, l2g_global_k, l2g_global_v = ( + q_global, + k_global, + v_global, + ) + if self.use_global_self_attn: + g2g_global_q, g2g_global_k, g2g_global_v = ( + q_global, + k_global, + v_global, + ) + + perm_1 = list(range(l2g_global_k.ndim)) + perm_1[-2] = -1 + perm_1[-1] = -2 + l2g_attn_score = l2g_q @ l2g_global_k.transpose(perm=perm_1) + attn_score_l2l_l2g = paddle.concat(x=(attn_score, l2g_attn_score), axis=-1) + + if attn_mask.ndim == 5: + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NDHWC" + ) + elif attn_mask.ndim == 3: + attn_mask = attn_mask.astype("float32") + attn_mask_l2l_l2g = F.pad( + attn_mask, [0, num_global], "constant", 1, data_format="NCL" + ) + attn_mask_l2l_l2g = attn_mask_l2l_l2g.astype("bool") + else: + attn_mask_l2l_l2g = F.pad(attn_mask, [0, num_global], "constant", 1) + + v_l_g = paddle.concat( + x=( + v, + l2g_global_v.expand( + shape=[B, self.num_heads, num_cuboids, num_global, head_C] + ), + ), + axis=3, + ) + attn_score_l2l_l2g = masked_softmax( + attn_score_l2l_l2g, mask=attn_mask_l2l_l2g + ) + attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) + reordered_x = ( + (attn_score_l2l_l2g @ v_l_g) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, cuboid_volume, self.dim]) + ) + if self.padding_type == "ignore": + g2l_attn_mask = paddle.ones(shape=(1, T, H, W, 1)) + if pad_t > 0 or pad_h > 0 or pad_w > 0: + g2l_attn_mask = F.pad( + g2l_attn_mask, + [0, 0, 0, pad_w, 0, pad_h, 0, pad_t], + data_format="NDHWC", + ) + if any(i > 0 for i in shift_size): + g2l_attn_mask = paddle.roll( + x=g2l_attn_mask, + shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), + axis=(1, 2, 3), + ) + g2l_attn_mask = g2l_attn_mask.reshape((-1,)) + else: + g2l_attn_mask = None + temp = g2l_k.reshape( + [B, self.num_heads, num_cuboids * cuboid_volume, head_C] + ) + perm_2 = list(range(temp.ndim)) + perm_2[-2] = -1 + perm_2[-1] = -2 + g2l_attn_score = g2l_global_q @ temp.transpose(perm=perm_2) + if self.use_global_self_attn: + temp = g2g_global_k.squeeze(axis=2) + perm_3 = list(range(temp.ndim)) + perm_3[-2] = -1 + perm_3[-1] = -2 + g2g_attn_score = g2g_global_q @ temp.transpose(perm=perm_3) + g2all_attn_score = paddle.concat( + x=(g2l_attn_score, g2g_attn_score), axis=-1 + ) + if g2l_attn_mask is not None: + g2all_attn_mask = F.pad( + g2l_attn_mask, + [0, num_global], + "constant", + 1, + data_format="NDHWC", + ) + else: + g2all_attn_mask = None + new_v = paddle.concat( + x=( + g2l_v.reshape( + [ + B, + self.num_heads, + num_cuboids * cuboid_volume, + global_head_C, + ] + ), + g2g_global_v.reshape( + [B, self.num_heads, num_global, global_head_C] + ), + ), + axis=2, + ) + else: + g2all_attn_score = g2l_attn_score + g2all_attn_mask = g2l_attn_mask + new_v = g2l_v.reshape( + [B, self.num_heads, num_cuboids * cuboid_volume, global_head_C] + ) + g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask) + g2all_attn_score = self.global_attn_drop(g2all_attn_score) + new_global_vector = ( + (g2all_attn_score @ new_v) + .transpose(perm=[0, 2, 1, 3]) + .reshape([B, num_global, self.global_dim_ratio * self.dim]) + ) + else: + attn_score = masked_softmax(attn_score, mask=attn_mask) + attn_score = self.attn_drop(attn_score) + reordered_x = ( + (attn_score @ v) + .transpose(perm=[0, 2, 3, 1, 4]) + .reshape([B, num_cuboids, cuboid_volume, self.dim]) + ) + + if self.use_final_proj: + reordered_x = paddle.cast(reordered_x, dtype="float32") + reordered_x = self.proj_drop(self.proj(reordered_x)) + if self.use_global_vector: + new_global_vector = self.proj_drop(self.global_proj(new_global_vector)) + shifted_x = cuboid_reorder_reverse( + reordered_x, + cuboid_size=cuboid_size, + strategy=self.strategy, + orig_data_shape=(T + pad_t, H + pad_h, W + pad_w), + ) + if any(i > 0 for i in shift_size): + x = paddle.roll( + x=shifted_x, + shifts=(shift_size[0], shift_size[1], shift_size[2]), + axis=(1, 2, 3), + ) + else: + x = shifted_x + x = cuboid_utils.generalize_unpadding( + x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type + ) + if self.use_global_vector: + return x, new_global_vector + else: + return x + + +class StackCuboidSelfAttentionBlock(nn.Layer): + """ + - "use_inter_ffn" is True + x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out + | ^ | ^ + | | | | + |-------------| |-------------| + - "use_inter_ffn" is False + x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out + | ^ | ^ ^ | ^ + | | | | | | | + |-------------| |------------| ----------| |-----------| + If we have enabled global memory vectors, each attention will be a + + Args: + dim (int): The dimension of the input tensor. + num_heads (int): The number of heads. + block_cuboid_size (list, optional): The size of block cuboid . Defaults to [(4, 4, 4), (4, 4, 4)]. + block_shift_size (list, optional): The shift size of block. Defaults to [(0, 0, 0), (2, 2, 2)]. + block_strategy (list, optional): The strategy of block. Defaults to [("d", "d", "d"), ("l", "l", "l")]. + padding_type (str, optional): The type of padding. Defaults to "ignore". + qkv_bias (bool, optional): Whether to enable bias in calculating qkv attention. Defaults to False. + qk_scale (float, optional): Whether to enable scale factor when calculating the attention. Defaults to None. + attn_drop (float, optional): The attention dropout. Defaults to 0.0. + proj_drop (float, optional): The projection dropout. Defaults to 0.0. + use_final_proj (bool, optional): Whether to use the final projection. Defaults to True. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_self_attn (bool, optional): Whether to use self attention among global vectors. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. + Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. + Defaults to 1. + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization initialization. Defaults to "0". + """ + + def __init__( + self, + dim: int, + num_heads: int, + block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (2, 2, 2)], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("d", "d", "d"), + ("l", "l", "l"), + ], + padding_type: str = "ignore", + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = False, + use_global_vector: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + checkpoint_level: bool = True, + use_relative_pos: bool = True, + use_final_proj: bool = True, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + expert_shape: tuple = None, + ): + super(StackCuboidSelfAttentionBlock, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.norm_init_mode = norm_init_mode + if ( + len(block_cuboid_size[0]) <= 0 + or len(block_shift_size) <= 0 + or len(block_strategy) <= 0 + ): + raise ValueError( + "Format of the block cuboid size is not correct. block_cuboid_size={block_cuboid_size}" + ) + if len(block_cuboid_size) != len(block_shift_size) and len( + block_cuboid_size + ) != len(block_strategy): + raise ValueError( + "The lengths of block_cuboid_size, block_shift_size, and block_strategy must be equal." + ) + + self.num_attn = len(block_cuboid_size) + self.checkpoint_level = checkpoint_level + self.use_inter_ffn = use_inter_ffn + self.use_global_vector = use_global_vector + self.use_global_vector_ffn = use_global_vector_ffn + self.use_global_self_attn = use_global_self_attn + self.global_dim_ratio = global_dim_ratio + if self.use_inter_ffn: + if moe_config["use_ffn_moe"]: + self.ffn_l = nn.LayerList( + sublayers=[ + MixtureFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + if self.use_global_vector_ffn and self.use_global_vector: + if moe_config["use_ffn_moe"]: + self.global_ffn_l = nn.LayerList( + sublayers=[ + MixtureFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + else: + self.global_ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for _ in range(self.num_attn) + ] + ) + else: + if moe_config["use_ffn_moe"]: + self.ffn_l = nn.LayerList( + sublayers=[ + MixtureFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + else: + self.ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=dim, + hidden_size=4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + if self.use_global_vector_ffn and self.use_global_vector: + if moe_config["use_ffn_moe"]: + self.global_ffn_l = nn.LayerList( + sublayers=[ + MixtureFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + else: + self.global_ffn_l = nn.LayerList( + sublayers=[ + PositionwiseFFN( + units=global_dim_ratio * dim, + hidden_size=global_dim_ratio * 4 * dim, + activation_dropout=ffn_drop, + dropout=ffn_drop, + gated_proj=gated_ffn, + activation=activation, + normalization=norm_layer, + pre_norm=True, + linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + ] + ) + + if moe_config["use_attn_moe"]: + self.attn_l = nn.LayerList( + sublayers=[ + MixtureSelfAttention( + dim=dim, + num_heads=num_heads, + cuboid_size=ele_cuboid_size, + shift_size=ele_shift_size, + strategy=ele_strategy, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + use_global_vector=use_global_vector, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape, + moe_config=moe_config, + ) + for ele_cuboid_size, ele_shift_size, ele_strategy in zip( + block_cuboid_size, block_shift_size, block_strategy + ) + ] + ) + else: + self.attn_l = nn.LayerList( + sublayers=[ + CuboidSelfAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_size=ele_cuboid_size, + shift_size=ele_shift_size, + strategy=ele_strategy, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + use_global_vector=use_global_vector, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for ele_cuboid_size, ele_shift_size, ele_strategy in zip( + block_cuboid_size, block_shift_size, block_strategy + ) + ] + ) + + def reset_parameters(self): + for m in self.ffn_l: + m.reset_parameters() + if self.use_global_vector_ffn and self.use_global_vector: + for m in self.global_ffn_l: + m.reset_parameters() + for m in self.attn_l: + m.reset_parameters() + + def forward(self, x, global_vectors=None): + if self.use_inter_ffn: + if self.use_global_vector: + for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): + if self.checkpoint_level >= 2 and self.training: + x_out, global_vectors_out = fleet.utils.recompute( + attn, x, global_vectors + ) + else: + x_out, global_vectors_out = attn(x, global_vectors) + x = x + x_out + global_vectors = global_vectors + global_vectors_out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + if self.use_global_vector_ffn: + global_vectors = fleet.utils.recompute( + self.global_ffn_l[idx], global_vectors + ) + else: + x = ffn(x) + if self.use_global_vector_ffn: + global_vectors = self.global_ffn_l[idx](global_vectors) + return x, global_vectors + else: + for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)): + if self.checkpoint_level >= 2 and self.training: + x = x + fleet.utils.recompute(attn, x) + else: + x = x + attn(x) + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(ffn, x) + else: + x = ffn(x) + return x + elif self.use_global_vector: + for idx, attn in enumerate(self.attn_l): + if self.checkpoint_level >= 2 and self.training: + x_out, global_vectors_out = fleet.utils.recompute( + attn, x, global_vectors + ) + else: + x_out, global_vectors_out = attn(x, global_vectors) + x = x + x_out + global_vectors = global_vectors + global_vectors_out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + if self.use_global_vector_ffn: + global_vectors = fleet.utils.recompute( + self.global_ffn_l[0], global_vectors + ) + else: + x = self.ffn_l[0](x) + if self.use_global_vector_ffn: + global_vectors = self.global_ffn_l[0](global_vectors) + return x, global_vectors + else: + for idx, attn in enumerate(self.attn_l): + if self.checkpoint_level >= 2 and self.training: + out = fleet.utils.recompute(attn, x) + else: + out = attn(x) + x = x + out + if self.checkpoint_level >= 1 and self.training: + x = fleet.utils.recompute(self.ffn_l[0], x) + else: + x = self.ffn_l[0](x) + return x + + +class CuboidTransformerEncoder(nn.Layer): + """Encoder of the CuboidTransformer + + x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out + + Args: + input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C + base_units (int, optional): The number of units. Defaults to 128. + block_units (int, optional): The number of block units. Defaults to None. + scale_alpha (float, optional): We scale up the channels based on the formula: + - round_to(base_units * max(downsample_scale) ** units_alpha, 4). Defaults to 1.0. + depth (list, optional): The number of layers for each block. Defaults to [4, 4, 4]. + downsample (int, optional): The downsample ratio. Defaults to 2. + downsample_type (str, optional): The type of downsample. Defaults to "patch_merge". + block_attn_patterns (str, optional): Attention pattern for the cuboid attention for each block. Defaults to None. + block_cuboid_size (list, optional): A list of cuboid size parameters. Defaults to [(4, 4, 4), (4, 4, 4)]. + block_strategy (list, optional): A list of cuboid strategies. Defaults to [("l", "l", "l"), ("d", "d", "d")]. + block_shift_size (list, optional): A list of shift sizes. Defaults to [(0, 0, 0), (0, 0, 0)]. + num_heads (int, optional): The number of heads. Defaults to 4. + attn_drop (float, optional): The ratio of attention dropout. Defaults to 0.0. + proj_drop (float, optional): The ratio of projection dropout. Defaults to 0.0. + ffn_drop (float, optional): The ratio of FFN dropout. Defaults to 0.0. + ffn_activation (str, optional): The FFN activation. Defaults to "leaky". + gated_ffn (bool, optional): Whether to use gate FFN. Defaults to False. + norm_layer (str, optional): The normalization layer. Defaults to "layer_norm". + use_inter_ffn (bool, optional): Whether to use inter FFN. Defaults to True. + padding_type (str, optional): The type of padding. Defaults to "ignore". + checkpoint_level (bool, optional): Whether to enable gradient checkpointing. Defaults to True. + use_relative_pos (bool, optional): Whether to use relative pos. Defaults to True. + self_attn_use_final_proj (bool, optional): Whether to use self attention for final projection. Defaults to True. + use_global_vector (bool, optional): Whether to use the global vector or not. Defaults to False. + use_global_vector_ffn (bool, optional): Whether to use FFN global vectors. Defaults to False. + use_global_self_attn (bool, optional): Whether to use global self attention. Defaults to False. + separate_global_qkv (bool, optional): Whether to use different network to calc q_global, k_global, v_global. + Defaults to False. + global_dim_ratio (int, optional): The dim (channels) of global vectors is `global_dim_ratio*dim`. + Defaults to 1. + attn_linear_init_mode (str, optional): The mode of attention linear initialization. Defaults to "0". + ffn_linear_init_mode (str, optional): The mode of FFN linear initialization. Defaults to "0". + conv_init_mode (str, optional): The mode of conv initialization. Defaults to "0". + down_linear_init_mode (str, optional): The mode of downsample linear initialization. Defaults to "0". + norm_init_mode (str, optional): The mode of normalization. Defaults to "0". + """ + + def __init__( + self, + input_shape: Tuple[int, ...], + base_units: int = 128, + block_units: int = None, + scale_alpha: float = 1.0, + depth: Tuple[int, ...] = [4, 4, 4], + downsample: int = 2, + downsample_type: str = "patch_merge", + block_attn_patterns: str = None, + block_cuboid_size: Tuple[Tuple[int, ...], ...] = [(4, 4, 4), (4, 4, 4)], + block_strategy: Tuple[Tuple[str, ...], ...] = [ + ("l", "l", "l"), + ("d", "d", "d"), + ], + block_shift_size: Tuple[Tuple[int, ...], ...] = [(0, 0, 0), (0, 0, 0)], + num_heads: int = 4, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + ffn_drop: float = 0.0, + ffn_activation: str = "leaky", + gated_ffn: bool = False, + norm_layer: str = "layer_norm", + use_inter_ffn: bool = True, + padding_type: str = "ignore", + checkpoint_level: bool = True, + use_relative_pos: bool = True, + self_attn_use_final_proj: bool = True, + use_global_vector: bool = False, + use_global_vector_ffn: bool = True, + use_global_self_attn: bool = False, + separate_global_qkv: bool = False, + global_dim_ratio: int = 1, + attn_linear_init_mode: str = "0", + ffn_linear_init_mode: str = "0", + conv_init_mode: str = "0", + down_linear_init_mode: str = "0", + norm_init_mode: str = "0", + moe_config: dict = None, + ): + super(CuboidTransformerEncoder, self).__init__() + self.attn_linear_init_mode = attn_linear_init_mode + self.ffn_linear_init_mode = ffn_linear_init_mode + self.conv_init_mode = conv_init_mode + self.down_linear_init_mode = down_linear_init_mode + self.norm_init_mode = norm_init_mode + self.input_shape = input_shape + self.depth = depth + self.num_blocks = len(depth) + self.base_units = base_units + self.scale_alpha = scale_alpha + if not isinstance(downsample, (tuple, list)): + downsample = 1, downsample, downsample + self.downsample = downsample + self.downsample_type = downsample_type + self.num_heads = num_heads + self.use_global_vector = use_global_vector + self.checkpoint_level = checkpoint_level + + if block_units is None: + block_units = [ + cuboid_utils.round_to( + base_units * int((max(downsample) ** scale_alpha) ** i), 4 + ) + for i in range(self.num_blocks) + ] + else: + assert len(block_units) == self.num_blocks and block_units[0] == base_units + self.block_units = block_units + if self.num_blocks > 1: + if downsample_type == "patch_merge": + self.down_layers = nn.LayerList( + sublayers=[ + PatchMerging3D( + dim=self.block_units[i], + downsample=downsample, + padding_type=padding_type, + out_dim=self.block_units[i + 1], + linear_init_mode=down_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for i in range(self.num_blocks - 1) + ] + ) + else: + raise NotImplementedError(f"{downsample_type} is invalid.") + if self.use_global_vector: + self.down_layer_global_proj = nn.LayerList( + sublayers=[ + nn.Linear( + in_features=global_dim_ratio * self.block_units[i], + out_features=global_dim_ratio * self.block_units[i + 1], + ) + for i in range(self.num_blocks - 1) + ] + ) + if block_attn_patterns is not None: + mem_shapes = self.get_mem_shapes() + if isinstance(block_attn_patterns, (tuple, list)): + assert len(block_attn_patterns) == self.num_blocks + else: + block_attn_patterns = [ + block_attn_patterns for _ in range(self.num_blocks) + ] + block_cuboid_size = [] + block_strategy = [] + block_shift_size = [] + for idx, key in enumerate(block_attn_patterns): + func = cuboid_utils.CuboidSelfAttentionPatterns.get(key) + cuboid_size, strategy, shift_size = func(mem_shapes[idx]) + block_cuboid_size.append(cuboid_size) + block_strategy.append(strategy) + block_shift_size.append(shift_size) + else: + if not isinstance(block_cuboid_size[0][0], (list, tuple)): + block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)] + else: + assert ( + len(block_cuboid_size) == self.num_blocks + ), f"Incorrect input format! Received block_cuboid_size={block_cuboid_size}" + if not isinstance(block_strategy[0][0], (list, tuple)): + block_strategy = [block_strategy for _ in range(self.num_blocks)] + else: + assert ( + len(block_strategy) == self.num_blocks + ), f"Incorrect input format! Received block_strategy={block_strategy}" + if not isinstance(block_shift_size[0][0], (list, tuple)): + block_shift_size = [block_shift_size for _ in range(self.num_blocks)] + else: + assert ( + len(block_shift_size) == self.num_blocks + ), f"Incorrect input format! Received block_shift_size={block_shift_size}" + self.block_cuboid_size = block_cuboid_size + self.block_strategy = block_strategy + self.block_shift_size = block_shift_size + + expert_shape_list = self.get_mem_shapes() + self.blocks = nn.LayerList( + sublayers=[ + nn.Sequential( + *[ + StackCuboidSelfAttentionBlock( + dim=self.block_units[i], + num_heads=num_heads, + block_cuboid_size=block_cuboid_size[i], + block_strategy=block_strategy[i], + block_shift_size=block_shift_size[i], + attn_drop=attn_drop, + proj_drop=proj_drop, + ffn_drop=ffn_drop, + activation=ffn_activation, + gated_ffn=gated_ffn, + norm_layer=norm_layer, + use_inter_ffn=use_inter_ffn, + padding_type=padding_type, + use_global_vector=use_global_vector, + use_global_vector_ffn=use_global_vector_ffn, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=self_attn_use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + expert_shape=expert_shape_list[i], + moe_config=moe_config, + ) + for _ in range(depth[i]) + ] + ) + for i in range(self.num_blocks) + ] + ) + self.reset_parameters() + + def reset_parameters(self): + if self.num_blocks > 1: + for m in self.down_layers: + m.reset_parameters() + if self.use_global_vector: + cuboid_utils.apply_initialization( + self.down_layer_global_proj, linear_mode=self.down_linear_init_mode + ) + for ms in self.blocks: + for m in ms: + m.reset_parameters() + + def get_mem_shapes(self): + """Get the shape of the output memory based on the input shape. This can be used for constructing the decoder. + + Returns: + mem_shapes : A list of shapes of the output memory + """ + + if self.num_blocks == 1: + return [self.input_shape] + else: + mem_shapes = [self.input_shape] + curr_shape = self.input_shape + for down_layer in self.down_layers: + curr_shape = down_layer.get_out_shape(curr_shape) + mem_shapes.append(curr_shape) + return mem_shapes + + def forward(self, x, global_vectors=None): + """ + Args: + x : Shape (B, T, H, W, C) + + Returns: + out (List[paddle.Tensor,..]): A list of tensors from the bottom layer to the top layer of the encoder. For + example, it can have shape + - (B, T, H, W, C1) + - (B, T, H // 2, W // 2, 2 * C1) + - (B, T, H // 4, W // 4, 4 * C1) + ... + global_mem_out (List,Optional): The output of the global vector. + """ + + B, T, H, W, C_in = x.shape + assert (T, H, W, C_in) == self.input_shape + + if self.use_global_vector: + out = [] + global_mem_out = [] + for i in range(self.num_blocks): + for l in self.blocks[i]: + x, global_vectors = l(x, global_vectors) + out.append(x) + global_mem_out.append(global_vectors) + if self.num_blocks > 1 and i < self.num_blocks - 1: + x = self.down_layers[i](x) + global_vectors = self.down_layer_global_proj[i](global_vectors) + return out, global_mem_out + else: + out = [] + for i in range(self.num_blocks): + x = self.blocks[i](x) + out.append(x) + if self.num_blocks > 1 and i < self.num_blocks - 1: + x = self.down_layers[i](x) + return out + + +class MixtureLinear(nn.Layer): + def __init__(self, in_dim, out_dim, expert_shape, moe_config, bias_attr=True): + super().__init__() + + self.in_dim = in_dim + self.out_dim = out_dim + self.bias = bias_attr + self.expert_shape = expert_shape # T, H, W, C_o + self.num_experts = moe_config["num_experts"] + self.out_planes = moe_config["out_planes"] + self.moe_config = moe_config + assert expert_shape is not None and moe_config["use_linear_moe"] + + if moe_config["gate_style"] == "linear": + self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, in_dim) + elif moe_config["gate_style"] == "spatial-latent": + self.gate = moe_utils.SpatialLatentGatingNet( + moe_config, expert_shape, in_dim + ) + elif moe_config["gate_style"] == "cuboid-latent": + self.gate = moe_utils.CuboidLatentGatingNet( + moe_config, expert_shape, in_dim + ) + elif moe_config["gate_style"] == "spatial-latent-linear": + self.gate = moe_utils.SpatialLatentLinearGatingNet( + moe_config, expert_shape, in_dim + ) + elif moe_config["gate_style"] == "cuboid-latent-linear": + self.gate = moe_utils.CuboidLatentLinearGatingNet( + moe_config, expert_shape, in_dim + ) + else: + raise NotImplementedError + + self.experts = nn.LayerList( + [ + nn.Linear(in_features=in_dim, out_features=out_dim, bias_attr=bias_attr) + for _ in range(self.num_experts) + ] + ) + + def forward(self, x): + + B, T, H, W, C = x.shape + E = self.num_experts + assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] + ( + dense_routing_weights, + sparse_routing_weights, + sparse_routing_inds, + self.aux_loss, + ) = self.gate( + x + ) # dense: B, T, H, W, E + + if self.moe_config["dispatch_style"] == "dense": + dispatcher = moe_utils.DenseDispatcher( + E, + sparse_routing_weights.reshape([B * T * H * W, -1]), + sparse_routing_inds.reshape([B * T * H * W, -1]), + ) + expert_outputs = paddle.stack( + [self.experts[i](x.reshape([B * T * H * W, -1])) for i in range(E)], + axis=-2, + ) + y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, -1]) + elif self.moe_config["dispatch_style"] == "sparse": + dispatcher = moe_utils.SparseDispatcher( + E, + sparse_routing_weights.reshape([B * T * H * W, -1]), + sparse_routing_inds.reshape([B * T * H * W, -1]), + ) + expert_inputs = dispatcher.dispatch(x.reshape([B * T * H * W, -1])) + expert_outputs = [ + self.experts[i](expert_inputs[i]) + if expert_inputs[i].shape[0] > 0 + else paddle.zeros([0, self.out_dim]) + for i in range(E) + ] + y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, -1]) + else: + raise NotImplementedError + + return y + + +class MixtureFFN(nn.Layer): + def __init__( + self, + units, + hidden_size, + activation_dropout, + dropout, + gated_proj, + activation, + normalization, + pre_norm, + linear_init_mode, + norm_init_mode, + expert_shape, + moe_config, + ): + super().__init__() + + self.in_dim = units + self.out_dim = units + self.expert_shape = expert_shape # T, H, W, C_o + self.num_experts = moe_config["num_experts"] + self.out_planes = moe_config["out_planes"] + self.moe_config = moe_config + assert expert_shape is not None and moe_config["use_ffn_moe"] + + if moe_config["gate_style"] == "linear": + self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, units) + elif moe_config["gate_style"] == "spatial-latent": + self.gate = moe_utils.SpatialLatentGatingNet( + moe_config, expert_shape, units + ) + elif moe_config["gate_style"] == "cuboid-latent": + self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, units) + elif moe_config["gate_style"] == "spatial-latent-linear": + self.gate = moe_utils.SpatialLatentLinearGatingNet( + moe_config, expert_shape, units + ) + elif moe_config["gate_style"] == "cuboid-latent-linear": + self.gate = moe_utils.CuboidLatentLinearGatingNet( + moe_config, expert_shape, units + ) + else: + raise NotImplementedError + + self.experts = nn.LayerList( + [ + PositionwiseFFN( + units=units, + hidden_size=hidden_size, + activation_dropout=activation_dropout, + dropout=dropout, + gated_proj=gated_proj, + activation=activation, + normalization=normalization, + pre_norm=pre_norm, + linear_init_mode=linear_init_mode, + norm_init_mode=norm_init_mode, + moe_config=moe_config, + expert_shape=expert_shape, + ) + for _ in range(self.num_experts) + ] + ) + + def forward(self, x): + + B, T, H, W, C = x.shape + E = self.num_experts + assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] + ( + dense_routing_weights, + sparse_routing_weights, + sparse_routing_inds, + self.aux_loss, + ) = self.gate( + x + ) # dense: B, T, H, W, E + + if self.moe_config["dispatch_style"] == "dense": + dispatcher = moe_utils.DenseDispatcher( + E, + sparse_routing_weights.reshape([B * T * H * W, -1]), + sparse_routing_inds.reshape([B * T * H * W, -1]), + ) + expert_outputs = paddle.stack( + [self.experts[i](x.reshape([B * T * H * W, -1])) for i in range(E)], + axis=-2, + ) + y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) + elif self.moe_config["dispatch_style"] == "sparse": + dispatcher = moe_utils.SparseDispatcher( + E, + sparse_routing_weights.reshape([B * T * H * W, -1]), + sparse_routing_inds.reshape([B * T * H * W, -1]), + ) + expert_inputs = dispatcher.dispatch(x.reshape([B * T * H * W, -1])) + expert_outputs = [ + self.experts[i](expert_inputs[i]) + if expert_inputs[i].shape[0] > 0 + else paddle.zeros([0, self.out_dim]) + for i in range(E) + ] + y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) + else: + raise NotImplementedError + + return y + + def reset_parameters(self): + + for i in range(len(self.experts)): + self.experts[i].reset_parameters() + + +class MixtureSelfAttention(nn.Layer): + def __init__( + self, + dim, + num_heads, + cuboid_size, + shift_size, + strategy, + padding_type, + qkv_bias, + qk_scale, + attn_drop, + proj_drop, + norm_layer, + use_global_vector, + use_global_self_attn, + separate_global_qkv, + global_dim_ratio, + checkpoint_level, + use_relative_pos, + use_final_proj, + attn_linear_init_mode, + ffn_linear_init_mode, + norm_init_mode, + expert_shape, + moe_config, + ): + super().__init__() + + self.in_dim = dim + self.out_dim = dim + self.expert_shape = expert_shape # T, H, W, C + self.num_experts = moe_config["num_experts"] + self.out_planes = moe_config["out_planes"] + self.moe_config = moe_config + assert expert_shape is not None and moe_config["use_attn_moe"] + assert not use_global_vector + + if moe_config["gate_style"] == "linear": + self.gate = moe_utils.LinearGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "spatial-latent": + self.gate = moe_utils.SpatialLatentGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "cuboid-latent": + self.gate = moe_utils.CuboidLatentGatingNet(moe_config, expert_shape, dim) + elif moe_config["gate_style"] == "spatial-latent-linear": + self.gate = moe_utils.SpatialLatentLinearGatingNet( + moe_config, expert_shape, dim + ) + elif moe_config["gate_style"] == "cuboid-latent-linear": + self.gate = moe_utils.CuboidLatentLinearGatingNet( + moe_config, expert_shape, dim + ) + else: + raise NotImplementedError + + self.experts = nn.LayerList( + [ + CuboidSelfAttentionLayer( + dim=dim, + num_heads=num_heads, + cuboid_size=cuboid_size, + shift_size=shift_size, + strategy=strategy, + padding_type=padding_type, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + use_global_vector=use_global_vector, + use_global_self_attn=use_global_self_attn, + separate_global_qkv=separate_global_qkv, + global_dim_ratio=global_dim_ratio, + checkpoint_level=checkpoint_level, + use_relative_pos=use_relative_pos, + use_final_proj=use_final_proj, + attn_linear_init_mode=attn_linear_init_mode, + ffn_linear_init_mode=ffn_linear_init_mode, + norm_init_mode=norm_init_mode, + ) + for _ in range(self.num_experts) + ] + ) + + def forward(self, x, global_vectors=None): + + B, T, H, W, C = x.shape + E = self.num_experts + assert C == self.in_dim and list(self.expert_shape)[:-1] == x.shape[1:-1] + ( + dense_routing_weights, + sparse_routing_weights, + sparse_routing_inds, + self.aux_loss, + ) = self.gate( + x + ) # dense: B, T, H, W, E + + dispatcher = moe_utils.DenseDispatcher( + E, + sparse_routing_weights.reshape([B * T * H * W, -1]), + sparse_routing_inds.reshape([B * T * H * W, -1]), + ) + expert_outputs = paddle.stack( + [self.experts[i](x, global_vectors) for i in range(E)], axis=-2 + ).reshape([B * T * H * W, E, C]) + y = dispatcher.combine(expert_outputs).reshape([B, T, H, W, C]) + + return y + + def reset_parameters(self): + + for i in range(len(self.experts)): + self.experts[i].reset_parameters() diff --git a/ppsci/arch/extformer_moe_cuboid_utils.py b/ppsci/arch/extformer_moe_cuboid_utils.py index 20531c82d6..6408f1d926 100644 --- a/ppsci/arch/extformer_moe_cuboid_utils.py +++ b/ppsci/arch/extformer_moe_cuboid_utils.py @@ -1,350 +1,350 @@ -import functools -from typing import Tuple - -import paddle -import paddle.nn.functional as F -from paddle import nn - -from ppsci.utils import initializer - - -def round_to(dat, c): - return dat + (dat - dat % c) % c - - -class RMSNorm(nn.Layer): - """Root Mean Square Layer Normalization proposed in "[NeurIPS2019] Root Mean Square Layer Normalization" - - Args: - d (Optional[int]): The model size. - p (float, optional): The partial RMSNorm, valid value [0, 1]. Defaults to -1.0. - eps (float, optional): The epsilon value. Defaults to 1e-08. - bias (bool, optional): Whether use bias term for RMSNorm, - because RMSNorm doesn't enforce re-centering invariance.Defaults to False. - """ - - def __init__( - self, - d: Tuple[int, ...], - p: float = -1.0, - eps: float = 1e-08, - bias: bool = False, - ): - super().__init__() - self.eps = eps - self.d = d - self.p = p - self.bias = bias - init_data = paddle.ones(d) - self.scale = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(1.0), - ) - self.scale.stop_gradient = False - self.add_parameter(name="scale", parameter=self.scale) - if self.bias: - init_data = paddle.zeros(d) - self.offset = paddle.create_parameter( - shape=init_data.shape, - dtype=init_data.dtype, - default_initializer=nn.initializer.Constant(0.0), - ) - self.offset.stop_gradient = False - self.add_parameter(name="offset", parameter=self.offset) - - def forward(self, x): - if self.p < 0.0 or self.p > 1.0: - norm_x = x.norm(p=2, axis=-1, keepdim=True) - d_x = self.d - else: - partial_size = int(self.d * self.p) - partial_x, _ = paddle.split( - x=x, num_or_sections=[partial_size, self.d - partial_size], axis=-1 - ) - norm_x = partial_x.norm(p=2, axis=-1, keepdim=True) - d_x = partial_size - rms_x = norm_x * d_x ** (-1.0 / 2) - x_normed = x / (rms_x + self.eps) - if self.bias: - return self.scale * x_normed + self.offset - return self.scale * x_normed - - -def get_norm_layer( - normalization: str = "layer_norm", - axis: int = -1, - epsilon: float = 1e-05, - in_channels: int = 0, - **kwargs, -): - """Get the normalization layer based on the provided type - - Args: - normalization (str): The type of the layer normalization from ['layer_norm']. - axis (float): The axis to normalize the. - epsilon (float): The epsilon of the normalization layer. - in_channels (int): Input channel. - - Returns: - norm_layer (norm): The layer normalization layer. - """ - - if isinstance(normalization, str): - if normalization == "layer_norm": - assert in_channels > 0 - assert axis == -1 - norm_layer = nn.LayerNorm( - normalized_shape=in_channels, epsilon=epsilon, **kwargs - ) - elif normalization == "rms_norm": - assert axis == -1 - norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs) - else: - raise NotImplementedError(f"normalization={normalization} is not supported") - return norm_layer - elif normalization is None: - return nn.Identity() - else: - raise NotImplementedError("The type of normalization must be str") - - -def generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False): - if pad_t == 0 and pad_h == 0 and pad_w == 0: - return x - assert padding_type in ["zeros", "ignore", "nearest"] - B, T, H, W, C = x.shape - if padding_type == "nearest": - return nn.functional.interpolate( - x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T + pad_t, H + pad_h, W + pad_w) - ).transpose(perm=[0, 2, 3, 4, 1]) - elif t_pad_left: - return F.pad(x, [0, 0, 0, pad_w, 0, pad_h, pad_t, 0], data_format="NDHWC") - else: - data_pad = F.pad( - x, [0, 0, pad_t, 0, pad_h, 0, pad_w, 0, 0, 0], data_format="NDHWC" - ) - data_pad = paddle.concat( - [data_pad[:, pad_t:, ...], data_pad[:, :pad_t, ...]], axis=1 - ) - return data_pad - - -def generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type): - assert padding_type in ["zeros", "ignore", "nearest"] - B, T, H, W, C = x.shape - if pad_t == 0 and pad_h == 0 and pad_w == 0: - return x - if padding_type == "nearest": - return nn.functional.interpolate( - x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T - pad_t, H - pad_h, W - pad_w) - ).transpose(perm=[0, 2, 3, 4, 1]) - else: - return x[:, : T - pad_t, : H - pad_h, : W - pad_w, :] - - -def apply_initialization( - m: nn.Layer, - linear_mode: str = "0", - conv_mode: str = "0", - norm_mode: str = "0", - embed_mode: str = "0", -): - if isinstance(m, nn.Linear): - if linear_mode in ("0",): - m.weight = initializer.kaiming_normal_(m.weight, nonlinearity="linear") - elif linear_mode in ("1",): - m.weight = initializer.kaiming_normal_( - m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" - ) - else: - raise NotImplementedError(f"{linear_mode} is invalid.") - if hasattr(m, "bias") and m.bias is not None: - m.bias = initializer.zeros_(m.bias) - elif isinstance( - m, - ( - nn.Conv2D, - nn.Conv3D, - nn.Conv2DTranspose, - nn.Conv3DTranspose, - ), - ): - if conv_mode in ("0",): - m.weight = initializer.kaiming_normal_( - m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" - ) - else: - raise NotImplementedError(f"{conv_mode} is invalid.") - if hasattr(m, "bias") and m.bias is not None: - m.bias = initializer.zeros_(m.bias) - elif isinstance(m, nn.LayerNorm): - if norm_mode in ("0",): - m.weight = initializer.zeros_(m.weight) - m.bias = initializer.zeros_(m.bias) - else: - raise NotImplementedError(f"{norm_mode} is invalid.") - elif isinstance(m, nn.GroupNorm): - if norm_mode in ("0",): - m.weight = initializer.ones_(m.weight) - m.bias = initializer.zeros_(m.bias) - else: - raise NotImplementedError(f"{norm_mode} is invalid.") - elif isinstance(m, nn.Embedding): - if embed_mode in ("0",): - m.weight.data = initializer.trunc_normal_(m.weight.data, std=0.02) - else: - raise NotImplementedError(f"{embed_mode} is invalid.") - elif isinstance(m, nn.Layer) and hasattr(m, "experts"): - for lin in m.experts: - assert isinstance(lin, nn.Linear) - apply_initialization(lin, linear_mode=linear_mode) - else: - pass - - -class CuboidSelfAttentionPatterns: - def __init__(self): - super().__init__() - self.patterns = {} - self.patterns = { - "full": self.full_attention, - "axial": self.axial, - "divided_st": self.divided_space_time, - } - for p in [1, 2, 4, 8, 10]: - for m in [1, 2, 4, 8, 16, 32]: - key = f"video_swin_{p}x{m}" - self.patterns[key] = functools.partial(self.video_swin, P=p, M=m) - - for m in [1, 2, 4, 8, 16, 32]: - key = f"spatial_lg_{m}" - self.patterns[key] = functools.partial(self.spatial_lg_v1, M=m) - - for k in [2, 4, 8]: - key = f"axial_space_dilate_{k}" - self.patterns[key] = functools.partial(self.axial_space_dilate_K, K=k) - - def get(self, pattern_name): - return self.patterns[pattern_name] - - def full_attention(self, input_shape): - T, H, W, _ = input_shape - cuboid_size = [(T, H, W)] - strategy = [("l", "l", "l")] - shift_size = [(0, 0, 0)] - return cuboid_size, strategy, shift_size - - def axial(self, input_shape): - """Axial attention proposed in https://arxiv.org/abs/1912.12180 - - Args: - input_shape (Tuple[int,...]): The shape of the input tensor, T H W. - - Returns: - cuboid_size (Tuple[int,...]): The size of cuboid. - strategy (Tuple[str,...]): The strategy of the attention. - shift_size (Tuple[int,...]): The shift size of the attention. - """ - - T, H, W, _ = input_shape - cuboid_size = [(T, 1, 1), (1, H, 1), (1, 1, W)] - strategy = [("l", "l", "l"), ("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def divided_space_time(self, input_shape): - T, H, W, _ = input_shape - cuboid_size = [(T, 1, 1), (1, H, W)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def video_swin(self, input_shape, P=2, M=4): - """Adopt the strategy in Video SwinTransformer https://arxiv.org/pdf/2106.13230.pdf""" - T, H, W, _ = input_shape - P = min(P, T) - M = min(M, H, W) - cuboid_size = [(P, M, M), (P, M, M)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (P // 2, M // 2, M // 2)] - return cuboid_size, strategy, shift_size - - def spatial_lg_v1(self, input_shape, M=4): - T, H, W, _ = input_shape - if H <= M and W <= M: - cuboid_size = [(T, 1, 1), (1, H, W)] - strategy = [("l", "l", "l"), ("l", "l", "l")] - shift_size = [(0, 0, 0), (0, 0, 0)] - else: - cuboid_size = [(T, 1, 1), (1, M, M), (1, M, M)] - strategy = [("l", "l", "l"), ("l", "l", "l"), ("d", "d", "d")] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - def axial_space_dilate_K(self, input_shape, K=2): - T, H, W, _ = input_shape - K = min(K, H, W) - cuboid_size = [ - (T, 1, 1), - (1, H // K, 1), - (1, H // K, 1), - (1, 1, W // K), - (1, 1, W // K), - ] - strategy = [ - ("l", "l", "l"), - ("d", "d", "d"), - ("l", "l", "l"), - ("d", "d", "d"), - ("l", "l", "l"), - ] - shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)] - return cuboid_size, strategy, shift_size - - -class CuboidCrossAttentionPatterns: - def __init__(self): - super().__init__() - self.patterns = {} - for k in [1, 2, 4, 8]: - key1 = f"cross_{k}x{k}" - key2 = f"cross_{k}x{k}_lg" - key3 = f"cross_{k}x{k}_heter" - self.patterns[key1] = functools.partial(self.cross_KxK, K=k) - self.patterns[key2] = functools.partial(self.cross_KxK_lg, K=k) - self.patterns[key3] = functools.partial(self.cross_KxK_heter, K=k) - - def get(self, pattern_name): - return self.patterns[pattern_name] - - def cross_KxK(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K)] - shift_hw = [(0, 0)] - strategy = [("l", "l", "l")] - n_temporal = [1] - return cuboid_hw, shift_hw, strategy, n_temporal - - def cross_KxK_lg(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K), (K, K)] - shift_hw = [(0, 0), (0, 0)] - strategy = [("l", "l", "l"), ("d", "d", "d")] - n_temporal = [1, 1] - return cuboid_hw, shift_hw, strategy, n_temporal - - def cross_KxK_heter(self, mem_shape, K): - T_mem, H, W, _ = mem_shape - K = min(K, H, W) - cuboid_hw = [(K, K), (K, K), (K, K)] - shift_hw = [(0, 0), (0, 0), (K // 2, K // 2)] - strategy = [("l", "l", "l"), ("d", "d", "d"), ("l", "l", "l")] - n_temporal = [1, 1, 1] - return cuboid_hw, shift_hw, strategy, n_temporal - - -CuboidSelfAttentionPatterns = CuboidSelfAttentionPatterns() -CuboidCrossAttentionPatterns = CuboidCrossAttentionPatterns() +import functools +from typing import Tuple + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from ppsci.utils import initializer + + +def round_to(dat, c): + return dat + (dat - dat % c) % c + + +class RMSNorm(nn.Layer): + """Root Mean Square Layer Normalization proposed in "[NeurIPS2019] Root Mean Square Layer Normalization" + + Args: + d (Optional[int]): The model size. + p (float, optional): The partial RMSNorm, valid value [0, 1]. Defaults to -1.0. + eps (float, optional): The epsilon value. Defaults to 1e-08. + bias (bool, optional): Whether use bias term for RMSNorm, + because RMSNorm doesn't enforce re-centering invariance.Defaults to False. + """ + + def __init__( + self, + d: Tuple[int, ...], + p: float = -1.0, + eps: float = 1e-08, + bias: bool = False, + ): + super().__init__() + self.eps = eps + self.d = d + self.p = p + self.bias = bias + init_data = paddle.ones(d) + self.scale = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(1.0), + ) + self.scale.stop_gradient = False + self.add_parameter(name="scale", parameter=self.scale) + if self.bias: + init_data = paddle.zeros(d) + self.offset = paddle.create_parameter( + shape=init_data.shape, + dtype=init_data.dtype, + default_initializer=nn.initializer.Constant(0.0), + ) + self.offset.stop_gradient = False + self.add_parameter(name="offset", parameter=self.offset) + + def forward(self, x): + if self.p < 0.0 or self.p > 1.0: + norm_x = x.norm(p=2, axis=-1, keepdim=True) + d_x = self.d + else: + partial_size = int(self.d * self.p) + partial_x, _ = paddle.split( + x=x, num_or_sections=[partial_size, self.d - partial_size], axis=-1 + ) + norm_x = partial_x.norm(p=2, axis=-1, keepdim=True) + d_x = partial_size + rms_x = norm_x * d_x ** (-1.0 / 2) + x_normed = x / (rms_x + self.eps) + if self.bias: + return self.scale * x_normed + self.offset + return self.scale * x_normed + + +def get_norm_layer( + normalization: str = "layer_norm", + axis: int = -1, + epsilon: float = 1e-05, + in_channels: int = 0, + **kwargs, +): + """Get the normalization layer based on the provided type + + Args: + normalization (str): The type of the layer normalization from ['layer_norm']. + axis (float): The axis to normalize the. + epsilon (float): The epsilon of the normalization layer. + in_channels (int): Input channel. + + Returns: + norm_layer (norm): The layer normalization layer. + """ + + if isinstance(normalization, str): + if normalization == "layer_norm": + assert in_channels > 0 + assert axis == -1 + norm_layer = nn.LayerNorm( + normalized_shape=in_channels, epsilon=epsilon, **kwargs + ) + elif normalization == "rms_norm": + assert axis == -1 + norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs) + else: + raise NotImplementedError(f"normalization={normalization} is not supported") + return norm_layer + elif normalization is None: + return nn.Identity() + else: + raise NotImplementedError("The type of normalization must be str") + + +def generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False): + if pad_t == 0 and pad_h == 0 and pad_w == 0: + return x + assert padding_type in ["zeros", "ignore", "nearest"] + B, T, H, W, C = x.shape + if padding_type == "nearest": + return nn.functional.interpolate( + x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T + pad_t, H + pad_h, W + pad_w) + ).transpose(perm=[0, 2, 3, 4, 1]) + elif t_pad_left: + return F.pad(x, [0, 0, 0, pad_w, 0, pad_h, pad_t, 0], data_format="NDHWC") + else: + data_pad = F.pad( + x, [0, 0, pad_t, 0, pad_h, 0, pad_w, 0, 0, 0], data_format="NDHWC" + ) + data_pad = paddle.concat( + [data_pad[:, pad_t:, ...], data_pad[:, :pad_t, ...]], axis=1 + ) + return data_pad + + +def generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type): + assert padding_type in ["zeros", "ignore", "nearest"] + B, T, H, W, C = x.shape + if pad_t == 0 and pad_h == 0 and pad_w == 0: + return x + if padding_type == "nearest": + return nn.functional.interpolate( + x=x.transpose(perm=[0, 4, 1, 2, 3]), size=(T - pad_t, H - pad_h, W - pad_w) + ).transpose(perm=[0, 2, 3, 4, 1]) + else: + return x[:, : T - pad_t, : H - pad_h, : W - pad_w, :] + + +def apply_initialization( + m: nn.Layer, + linear_mode: str = "0", + conv_mode: str = "0", + norm_mode: str = "0", + embed_mode: str = "0", +): + if isinstance(m, nn.Linear): + if linear_mode in ("0",): + m.weight = initializer.kaiming_normal_(m.weight, nonlinearity="linear") + elif linear_mode in ("1",): + m.weight = initializer.kaiming_normal_( + m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" + ) + else: + raise NotImplementedError(f"{linear_mode} is invalid.") + if hasattr(m, "bias") and m.bias is not None: + m.bias = initializer.zeros_(m.bias) + elif isinstance( + m, + ( + nn.Conv2D, + nn.Conv3D, + nn.Conv2DTranspose, + nn.Conv3DTranspose, + ), + ): + if conv_mode in ("0",): + m.weight = initializer.kaiming_normal_( + m.weight, a=0.1, mode="fan_out", nonlinearity="leaky_relu" + ) + else: + raise NotImplementedError(f"{conv_mode} is invalid.") + if hasattr(m, "bias") and m.bias is not None: + m.bias = initializer.zeros_(m.bias) + elif isinstance(m, nn.LayerNorm): + if norm_mode in ("0",): + m.weight = initializer.zeros_(m.weight) + m.bias = initializer.zeros_(m.bias) + else: + raise NotImplementedError(f"{norm_mode} is invalid.") + elif isinstance(m, nn.GroupNorm): + if norm_mode in ("0",): + m.weight = initializer.ones_(m.weight) + m.bias = initializer.zeros_(m.bias) + else: + raise NotImplementedError(f"{norm_mode} is invalid.") + elif isinstance(m, nn.Embedding): + if embed_mode in ("0",): + m.weight.data = initializer.trunc_normal_(m.weight.data, std=0.02) + else: + raise NotImplementedError(f"{embed_mode} is invalid.") + elif isinstance(m, nn.Layer) and hasattr(m, "experts"): + for lin in m.experts: + assert isinstance(lin, nn.Linear) + apply_initialization(lin, linear_mode=linear_mode) + else: + pass + + +class CuboidSelfAttentionPatterns: + def __init__(self): + super().__init__() + self.patterns = {} + self.patterns = { + "full": self.full_attention, + "axial": self.axial, + "divided_st": self.divided_space_time, + } + for p in [1, 2, 4, 8, 10]: + for m in [1, 2, 4, 8, 16, 32]: + key = f"video_swin_{p}x{m}" + self.patterns[key] = functools.partial(self.video_swin, P=p, M=m) + + for m in [1, 2, 4, 8, 16, 32]: + key = f"spatial_lg_{m}" + self.patterns[key] = functools.partial(self.spatial_lg_v1, M=m) + + for k in [2, 4, 8]: + key = f"axial_space_dilate_{k}" + self.patterns[key] = functools.partial(self.axial_space_dilate_K, K=k) + + def get(self, pattern_name): + return self.patterns[pattern_name] + + def full_attention(self, input_shape): + T, H, W, _ = input_shape + cuboid_size = [(T, H, W)] + strategy = [("l", "l", "l")] + shift_size = [(0, 0, 0)] + return cuboid_size, strategy, shift_size + + def axial(self, input_shape): + """Axial attention proposed in https://arxiv.org/abs/1912.12180 + + Args: + input_shape (Tuple[int,...]): The shape of the input tensor, T H W. + + Returns: + cuboid_size (Tuple[int,...]): The size of cuboid. + strategy (Tuple[str,...]): The strategy of the attention. + shift_size (Tuple[int,...]): The shift size of the attention. + """ + + T, H, W, _ = input_shape + cuboid_size = [(T, 1, 1), (1, H, 1), (1, 1, W)] + strategy = [("l", "l", "l"), ("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def divided_space_time(self, input_shape): + T, H, W, _ = input_shape + cuboid_size = [(T, 1, 1), (1, H, W)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def video_swin(self, input_shape, P=2, M=4): + """Adopt the strategy in Video SwinTransformer https://arxiv.org/pdf/2106.13230.pdf""" + T, H, W, _ = input_shape + P = min(P, T) + M = min(M, H, W) + cuboid_size = [(P, M, M), (P, M, M)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (P // 2, M // 2, M // 2)] + return cuboid_size, strategy, shift_size + + def spatial_lg_v1(self, input_shape, M=4): + T, H, W, _ = input_shape + if H <= M and W <= M: + cuboid_size = [(T, 1, 1), (1, H, W)] + strategy = [("l", "l", "l"), ("l", "l", "l")] + shift_size = [(0, 0, 0), (0, 0, 0)] + else: + cuboid_size = [(T, 1, 1), (1, M, M), (1, M, M)] + strategy = [("l", "l", "l"), ("l", "l", "l"), ("d", "d", "d")] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + def axial_space_dilate_K(self, input_shape, K=2): + T, H, W, _ = input_shape + K = min(K, H, W) + cuboid_size = [ + (T, 1, 1), + (1, H // K, 1), + (1, H // K, 1), + (1, 1, W // K), + (1, 1, W // K), + ] + strategy = [ + ("l", "l", "l"), + ("d", "d", "d"), + ("l", "l", "l"), + ("d", "d", "d"), + ("l", "l", "l"), + ] + shift_size = [(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)] + return cuboid_size, strategy, shift_size + + +class CuboidCrossAttentionPatterns: + def __init__(self): + super().__init__() + self.patterns = {} + for k in [1, 2, 4, 8]: + key1 = f"cross_{k}x{k}" + key2 = f"cross_{k}x{k}_lg" + key3 = f"cross_{k}x{k}_heter" + self.patterns[key1] = functools.partial(self.cross_KxK, K=k) + self.patterns[key2] = functools.partial(self.cross_KxK_lg, K=k) + self.patterns[key3] = functools.partial(self.cross_KxK_heter, K=k) + + def get(self, pattern_name): + return self.patterns[pattern_name] + + def cross_KxK(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K)] + shift_hw = [(0, 0)] + strategy = [("l", "l", "l")] + n_temporal = [1] + return cuboid_hw, shift_hw, strategy, n_temporal + + def cross_KxK_lg(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K), (K, K)] + shift_hw = [(0, 0), (0, 0)] + strategy = [("l", "l", "l"), ("d", "d", "d")] + n_temporal = [1, 1] + return cuboid_hw, shift_hw, strategy, n_temporal + + def cross_KxK_heter(self, mem_shape, K): + T_mem, H, W, _ = mem_shape + K = min(K, H, W) + cuboid_hw = [(K, K), (K, K), (K, K)] + shift_hw = [(0, 0), (0, 0), (K // 2, K // 2)] + strategy = [("l", "l", "l"), ("d", "d", "d"), ("l", "l", "l")] + n_temporal = [1, 1, 1] + return cuboid_hw, shift_hw, strategy, n_temporal + + +CuboidSelfAttentionPatterns = CuboidSelfAttentionPatterns() +CuboidCrossAttentionPatterns = CuboidCrossAttentionPatterns() diff --git a/ppsci/arch/extformer_moe_utils.py b/ppsci/arch/extformer_moe_utils.py index 3332b356c8..1460a43e7c 100644 --- a/ppsci/arch/extformer_moe_utils.py +++ b/ppsci/arch/extformer_moe_utils.py @@ -1,563 +1,563 @@ -import math - -import paddle -from paddle import nn - -# MoE Gating - - -class GatingNet(nn.Layer): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__() - - self.num_experts = moe_config["num_experts"] - self.out_planes = moe_config["out_planes"] - self.aux_loss_style = moe_config["aux_loss_style"] - assert self.out_planes > 1 and self.out_planes <= self.num_experts - assert len(input_shape) == 4 - self.input_shape = input_shape - - self.noise_lin = nn.Linear( - in_features=in_channels, out_features=self.num_experts, bias_attr=False - ) - self.noise_eps = 1e-2 - self.softplus = nn.Softplus() - self.softmax = nn.Softmax(axis=-1) - - self.importance_weight = moe_config["importance_weight"] - self.load_weight = moe_config["load_weight"] - - def cv_squared(self, x, eps=1e-25): - return x.var(axis=-1) / (x.mean(axis=-1) ** 2 + eps) - - def intra_cdf(self, value, loc=0.0, scale=1.0): - return 0.5 * (1 + paddle.erf((value - loc) / scale / math.sqrt(2))) - - def importance_loss_cell(self, routing_weights): - importance_loss = self.cv_squared(routing_weights.sum(axis=0)).mean() - return importance_loss - - def load_loss_cell( - self, clean_values, noisy_values, noise_stddev, noisy_top_values - ): - B, T, H, W, E = clean_values.shape - M = noisy_top_values.shape[-1] - clean_values = clean_values.transpose([1, 2, 3, 0, 4]) - noisy_values = noisy_values.transpose([1, 2, 3, 0, 4]) - noise_stddev = noise_stddev.transpose([1, 2, 3, 0, 4]) - top_values_flat = noisy_top_values.transpose([1, 2, 3, 0, 4]).reshape( - [T, H, W, B * M] - ) - - threshold_positions_if_in = paddle.arange(B) * M + self.out_planes - threshold_if_in = paddle.take_along_axis( - top_values_flat, - axis=-1, - indices=threshold_positions_if_in.unsqueeze(axis=[0, 1, 2]), - ).unsqueeze( - -1 - ) # T, H, W, B, 1 - is_in = noisy_values > threshold_if_in # T, H, W, B, E - threshold_positions_if_out = threshold_positions_if_in - 1 - threshold_if_out = paddle.take_along_axis( - top_values_flat, - axis=-1, - indices=threshold_positions_if_out.unsqueeze(axis=[0, 1, 2]), - ).unsqueeze(-1) - - prob_if_in = self.intra_cdf( - (clean_values - threshold_if_in) / noise_stddev - ) # T, H, W, B, E - prob_if_out = self.intra_cdf( - (clean_values - threshold_if_out) / noise_stddev - ) # T, H, W, B, E - prob = paddle.where(is_in, prob_if_in, prob_if_out) # T, H, W, B, E - - load_loss = self.cv_squared(prob.sum(axis=-2)).mean() - return load_loss - - def importance_loss_all(self, routing_weights): - importance_loss = self.cv_squared(routing_weights.sum(axis=0)) - return importance_loss - - def load_loss_all(self, clean_values, noisy_values, noise_stddev, noisy_top_values): - B, E = clean_values.shape - M = noisy_top_values.shape[-1] - top_values_flat = noisy_top_values.flatten() # B * M - - threshold_positions_if_in = paddle.arange(B) * M + self.out_planes # B - threshold_if_in = paddle.take_along_axis( - top_values_flat, axis=-1, indices=threshold_positions_if_in - ).unsqueeze( - -1 - ) # B, 1 - is_in = noisy_values > threshold_if_in # B, E - threshold_positions_if_out = threshold_positions_if_in - 1 # B - threshold_if_out = paddle.take_along_axis( - top_values_flat, axis=-1, indices=threshold_positions_if_out - ).unsqueeze( - -1 - ) # B, 1 - - prob_if_in = self.intra_cdf( - (clean_values - threshold_if_in) / noise_stddev - ) # B, E - prob_if_out = self.intra_cdf( - (clean_values - threshold_if_out) / noise_stddev - ) # B, E - prob = paddle.where(is_in, prob_if_in, prob_if_out) # B, E - - load_loss = self.cv_squared(prob.sum(axis=0)) - return load_loss - - def forward(self, x, t_map=None, eps=1e-25, dense_routing=False): - assert x.shape[1:-1] == list(self.input_shape)[:-1] - B, T, H, W, C = x.shape - E = self.num_experts - - raw_logits = self.gating(x, t_map) - if self.training: - noise = self.softplus(self.noise_lin(x)) + self.noise_eps - noisy_logits = raw_logits + paddle.randn(shape=raw_logits.shape) * noise - logits = noisy_logits - else: - logits = raw_logits - - assert logits.shape[-1] == self.num_experts - logits = self.softmax(logits) # [B, T, H, W, E] - top_logits, top_indices = logits.topk( - min(self.out_planes + 1, self.num_experts), axis=-1 - ) - top_k_logits = top_logits[:, :, :, :, : self.out_planes] - top_k_indices = top_indices[:, :, :, :, : self.out_planes] - top_k_gates = top_k_logits / ( - top_k_logits.sum(axis=-1, keepdim=True) + eps - ) # normalization - - if dense_routing: - # zeros = paddle.zeros_like(logits) - # zeros.stop_gradient = False - # print(zeros.shape) - # print(top_k_gates.shape, top_k_gates[0, 0, 0, 0]) - # routing_weights = paddle.put_along_axis(zeros, axis=-1, indices=top_k_indices, values=top_k_gates) - # print(routing_weights.shape, routing_weights.stop_gradient) - pass - else: - routing_weights = None - - if self.training: - if self.aux_loss_style == "cell": - # importance_loss = self.importance_loss(routing_weights) - importance_loss = self.importance_loss_cell(logits) - load_loss = self.load_loss_cell( - raw_logits, noisy_logits, noise, top_logits - ) - elif self.aux_loss_style == "all": - importance_loss = self.importance_loss_all( - logits.reshape([B * T * H * W, E]) - ) - load_loss = self.load_loss_all( - raw_logits.reshape([B * T * H * W, E]), - noisy_logits.reshape([B * T * H * W, E]), - noise.reshape([B * T * H * W, E]), - top_logits.reshape([B * T * H * W, -1]), - ) - else: - raise NotImplementedError - loss = ( - self.importance_weight * importance_loss + self.load_weight * load_loss - ) - else: - loss = None - - return routing_weights, top_k_gates, top_k_indices, loss - - -class LinearGatingNet(GatingNet): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__(moe_config, input_shape, in_channels) - assert len(input_shape) == 4 - T, H, W, C = input_shape - - self.lin = nn.Linear( - in_features=in_channels, out_features=self.num_experts, bias_attr=False - ) - - def gating(self, x, t_map=None): - routing_weights = self.lin(x) # [B, T, H, W, E] - return routing_weights - - -class SpatialLatentGatingNet(GatingNet): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__(moe_config, input_shape, in_channels) - assert len(input_shape) == 4 - T, H, W, C = input_shape - - gain = 1.0 - fan = self.out_planes / self.num_experts - bound = gain * math.sqrt(3.0 / fan) - self.routing_weights = paddle.create_parameter( - shape=[H, W, self.num_experts], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - - def gating(self, x, t_map=None): - # assert t_map is not None - routing_weights = self.routing_weights.unsqueeze(0).tile( - [x.shape[0], x.shape[1], 1, 1, 1] - ) # [B, T, H, W, E] - return routing_weights - - -class SpatialLatentLinearGatingNet(GatingNet): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__(moe_config, input_shape, in_channels) - assert len(input_shape) == 4 - T, H, W, C = input_shape - - gain = 1.0 - fan = self.out_planes / self.num_experts - bound = gain * math.sqrt(3.0 / fan) - self.spatial_routing_weights = paddle.create_parameter( - shape=[H, W, self.num_experts], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - self.lin = nn.Linear( - in_features=in_channels, out_features=self.num_experts, bias_attr=False - ) - - self.combine_weight = paddle.create_parameter( - shape=[H, W, self.num_experts, 2], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - - def gating(self, x, t_map=None): - # assert t_map is not None - spatial_routing_weights = self.spatial_routing_weights.tile( - [x.shape[0], x.shape[1], 1, 1, 1] - ) # [B, T, H, W, E] - linear_routing_weights = self.lin(x) # [B, T, H, W, E] - routing_weights = paddle.stack( - [spatial_routing_weights, linear_routing_weights], axis=-1 - ) # [B, T, H, W, E, 2] - combine_weight = self.combine_weight.tile( - [x.shape[0], x.shape[1], 1, 1, 1, 1] - ) # [B, T, H, W, E, 2] - routing_weights = (routing_weights * combine_weight).sum(-1) # [B, T, H, W, E] - return routing_weights - - -class CuboidLatentGatingNet(GatingNet): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__(moe_config, input_shape, in_channels) - assert len(input_shape) == 4 - T, H, W, C = input_shape - - gain = 1.0 - fan = self.out_planes / self.num_experts - bound = gain * math.sqrt(3.0 / fan) - self.routing_weights = paddle.create_parameter( - shape=[T, H, W, self.num_experts], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - - def gating(self, x, t_map=None): - # assert t_map is not None - routing_weights = self.routing_weights.unsqueeze(0).tile( - [x.shape[0], 1, 1, 1, 1] - ) # [B, T, H, W, E] - return routing_weights - - -class CuboidLatentLinearGatingNet(GatingNet): - def __init__(self, moe_config, input_shape, in_channels): - super().__init__(moe_config, input_shape, in_channels) - assert len(input_shape) == 4 - T, H, W, C = input_shape - - gain = 1.0 - fan = self.out_planes / self.num_experts - bound = gain * math.sqrt(3.0 / fan) - self.cuboid_routing_weights = paddle.create_parameter( - shape=[T, H, W, self.num_experts], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - - self.lin = nn.Linear( - in_features=in_channels, out_features=self.num_experts, bias_attr=False - ) - - self.combine_weight = paddle.create_parameter( - shape=[T, H, W, self.num_experts, 2], - dtype="float32", - default_initializer=nn.initializer.Uniform(-bound, bound), - ) - - def gating(self, x, t_map=None): - # assert t_map is not None - cuboid_routing_weights = self.cuboid_routing_weights.unsqueeze(0).tile( - [x.shape[0], 1, 1, 1, 1] - ) # [B, T, H, W, E] - linear_routing_weights = self.lin(x) # [B, T, H, W, E] - routing_weights = paddle.stack( - [cuboid_routing_weights, linear_routing_weights], axis=-1 - ) # [B, T, H, W, E, 2] - combine_weight = self.combine_weight.tile( - [x.shape[0], 1, 1, 1, 1, 1] - ) # [B, T, H, W, E, 2] - routing_weights = (routing_weights * combine_weight).sum(-1) # [B, T, H, W, E] - return routing_weights - - -def aggregate_aux_losses(net): - aux_losses = [] - for module in net.sublayers(): - if hasattr(module, "aux_loss"): - aux_losses.append(module.aux_loss.unsqueeze(0)) - return aux_losses - - -# MoE Routing - - -class SparseDispatcherScatter(object): - def __init__(self, num_experts, gates): - self._gates = gates - self._num_experts = num_experts - sorted_experts, index_sorted_experts = paddle.nonzero(gates).sort( - 0 - ), paddle.nonzero(gates).argsort(0) - _, self._expert_index = sorted_experts.split(1, axis=1) - self._batch_index = paddle.nonzero(gates)[index_sorted_experts[:, 1], 0] - self._part_sizes = (gates > 0).sum(0).tolist() - gates_exp = gates[self._batch_index.flatten()] - self._nonzero_gates = paddle.take_along_axis( - gates_exp, axis=1, indices=self._expert_index - ) - - def dispatch(self, inp): - inp_exp = inp[self._batch_index].squeeze(1) - return paddle.split(inp_exp, self._part_sizes, axis=0) - - def combine(self, expert_out, multiply_by_gates=True): - stitched = paddle.concat(expert_out, 0) - if multiply_by_gates: - stitched = stitched.multiply(self._nonzero_gates) - zeros = paddle.zeros([self._gates.shape[0], expert_out[-1].shape[1]]) - zeros.stop_gradient = False - # combine samples that have been processed by the same k experts - combined = zeros.index_add(0, self._batch_index, stitched.float()) - return combined - - -class SparseDispatcher(object): - def __init__(self, num_experts, top_k_gates, top_k_indices): - self.num_experts = num_experts - self.gates = top_k_gates # [B, K] - self.gate_inds = top_k_indices # [B, K] - E = num_experts - B, K = top_k_gates.shape - self.batch_index_per_expert = paddle.stack( - [ - (top_k_indices == expert_id).sum(-1).astype("bool") - for expert_id in range(E) - ], - axis=0, - ) # [E, B] - self.gates_per_expert = paddle.concat( - [top_k_gates[top_k_indices == expert_id] for expert_id in range(E)] - ) # B * K - self.batch_index_all = paddle.nonzero(self.batch_index_per_expert)[ - :, 1 - ] # B * K - self.expert_size = self.batch_index_per_expert.sum(-1) # [E] - - def dispatch(self, x): - B, C = x.shape - dispatched_x = [ - x[batch_index] for batch_index in self.batch_index_per_expert - ] # E * [B_e, C] - return dispatched_x - - def combine(self, expert_out): - # expert_out: E * [B_e, C] - assert len(expert_out) == self.num_experts - com_res = paddle.concat(expert_out, axis=0) # [B * K, C] - zeros = paddle.zeros([self.gates.shape[0], com_res.shape[1]]) - zeros.stop_gradient = False - combined_res = zeros.index_add( - axis=0, - index=self.batch_index_all, - value=com_res * self.gates_per_expert.unsqueeze(-1), - ) - return combined_res - - -class DenseDispatcher(object): - def __init__(self, num_experts, top_k_gates, top_k_indices): - self.num_experts = num_experts - self.gates = top_k_gates # [B, K] - self.gate_inds = top_k_indices # [B, K] - - def combine(self, expert_out): - # expert_out: [B, E, C] - B, E, C = expert_out.shape - assert E == self.num_experts - selected_out = paddle.take_along_axis( - expert_out, axis=1, indices=self.gate_inds.unsqueeze(-1) - ) # [B, K, C] - combined_res = (selected_out * self.gates.unsqueeze(-1)).sum(1) - return combined_res - - -# RNC - - -class LabelDifference(nn.Layer): - def __init__(self, distance_type="l1"): - super().__init__() - self.distance_type = distance_type - - def forward(self, labels): - # labels: [bs, label_dim] - # output: [bs, bs] - assert labels.ndim == 3 - if self.distance_type == "l1": - return paddle.abs(labels[:, :, None, :] - labels[:, None, :, :]).sum( - axis=-1 - ) - else: - raise ValueError(self.distance_type) - - -class FeatureSimilarity(nn.Layer): - def __init__(self, similarity_type="l2", temperature=2): - super().__init__() - self.similarity_type = similarity_type - self.t = temperature - - def forward(self, features): - # labels: [bs, feat_dim] - # output: [bs, bs] - assert features.ndim == 3 - if self.similarity_type == "l2": - logits = -(features[:, :, None, :] - features[:, None, :, :]).norm( - 2, axis=-1 - ) - logits /= self.t - logits_max = paddle.max(logits, axis=1, keepdim=True) - logits -= logits_max.detach() - return logits - elif self.similarity_type == "cosine": - cos_func = nn.CosineSimilarity(axis=-1) - logits = cos_func(features[:, :, None, :], features[:, None, :, :]) - logits /= self.t - return logits - else: - raise ValueError(self.similarity_type) - - -class RnCLoss(nn.Layer): - def __init__(self, rnc_config): - super().__init__() - - self.rank_mode = rnc_config["rank_imbalance_style"] - self.t = rnc_config["rank_imbalance_temp"] - self.label_diff_fn = LabelDifference(rnc_config["label_difference_style"]) - self.feature_sim_fn = FeatureSimilarity( - rnc_config["feature_similarity_style"], self.t - ) - self.rnc_weight = rnc_config["rank_reg_coeff"] - self.loss_cal_mode = rnc_config["loss_cal_style"] - self.softmax_cri = nn.Softmax(axis=-1) - - def cal_loss(self, features, labels): - - B = features.shape[0] - assert B > 1 - label_diffs = self.label_diff_fn(labels) - logits = self.feature_sim_fn(features) - exp_logits = logits.exp() - n = logits.shape[1] - - # remove diagonal - logits = logits.masked_select( - (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) - ).reshape([B, n, n - 1]) - exp_logits = exp_logits.masked_select( - (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) - ).reshape([B, n, n - 1]) - label_diffs = label_diffs.masked_select( - (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) - ).reshape([B, n, n - 1]) - - if self.loss_cal_mode == "memory-efficient": - loss = 0.0 - for k in range(n - 1): - pos_logits = logits[:, :, k] # [B, n] - pos_label_diffs = label_diffs[:, :, k] # [B, n] - neg_mask = (label_diffs >= pos_label_diffs.unsqueeze(-1)).astype( - "float32" - ) # [B, n, n - 1] - pos_log_probs = pos_logits - paddle.log( - (neg_mask * exp_logits).sum(axis=-1) - ) # [B, n] - loss += -pos_log_probs.sum() - loss /= B * n * (n - 1) - elif self.loss_cal_mode == "computation-efficient": - neg_mask = (label_diffs.unsqueeze(-2) >= label_diffs.unsqueeze(-1)).astype( - "float32" - ) # [B, n, n - 1, n - 1] - pos_log_probs = logits - paddle.log( - (neg_mask * exp_logits.unsqueeze(-2).tile([1, 1, n - 1, 1])).sum( - axis=-1 - ) - ) # [B, n, n - 1] - loss = -pos_log_probs.mean() - else: - raise NotImplementedError - - return loss - - def forward(self, features, labels): - # features: [B, T_o, H, W, C_o] - # labels: [B, T_o, H, W, C_l] - - B, T_o, H, W, C_o = features.shape - _, _, _, _, C_l = labels.shape - - loss = None - if self.rank_mode == "batch": - features = features.reshape([B, -1, C_o]).transpose([1, 0, 2]) - labels = labels.reshape([B, -1, C_l]).transpose([1, 0, 2]) - loss = self.cal_loss(features, labels) - elif self.rank_mode == "batch+T+H+W": - feat = features.transpose([0, 2, 3, 1, 4]).reshape([-1, T_o, C_o]) - label = labels.transpose([0, 2, 3, 1, 4]).reshape([-1, T_o, C_l]) - loss_T = self.cal_loss(feat, label) - - feat = features.transpose([0, 1, 3, 2, 4]).reshape([-1, H, C_o]) - label = labels.transpose([0, 1, 3, 2, 4]).reshape([-1, H, C_l]) - loss_H = self.cal_loss(feat, label) - - feat = features.reshape([-1, W, C_o]) - label = labels.reshape([-1, W, C_l]) - loss_W = self.cal_loss(feat, label) - - feat = features.transpose([1, 2, 3, 0, 4]).reshape([-1, B, C_o]) - label = labels.transpose([1, 2, 3, 0, 4]).reshape([-1, B, C_l]) - loss_batch = self.cal_loss(feat, label) - - loss = loss_T + loss_H + loss_W + loss_batch - else: - raise NotImplementedError - - loss = self.rnc_weight * loss - - return loss +import math + +import paddle +from paddle import nn + +# MoE Gating + + +class GatingNet(nn.Layer): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__() + + self.num_experts = moe_config["num_experts"] + self.out_planes = moe_config["out_planes"] + self.aux_loss_style = moe_config["aux_loss_style"] + assert self.out_planes > 1 and self.out_planes <= self.num_experts + assert len(input_shape) == 4 + self.input_shape = input_shape + + self.noise_lin = nn.Linear( + in_features=in_channels, out_features=self.num_experts, bias_attr=False + ) + self.noise_eps = 1e-2 + self.softplus = nn.Softplus() + self.softmax = nn.Softmax(axis=-1) + + self.importance_weight = moe_config["importance_weight"] + self.load_weight = moe_config["load_weight"] + + def cv_squared(self, x, eps=1e-25): + return x.var(axis=-1) / (x.mean(axis=-1) ** 2 + eps) + + def intra_cdf(self, value, loc=0.0, scale=1.0): + return 0.5 * (1 + paddle.erf((value - loc) / scale / math.sqrt(2))) + + def importance_loss_cell(self, routing_weights): + importance_loss = self.cv_squared(routing_weights.sum(axis=0)).mean() + return importance_loss + + def load_loss_cell( + self, clean_values, noisy_values, noise_stddev, noisy_top_values + ): + B, T, H, W, E = clean_values.shape + M = noisy_top_values.shape[-1] + clean_values = clean_values.transpose([1, 2, 3, 0, 4]) + noisy_values = noisy_values.transpose([1, 2, 3, 0, 4]) + noise_stddev = noise_stddev.transpose([1, 2, 3, 0, 4]) + top_values_flat = noisy_top_values.transpose([1, 2, 3, 0, 4]).reshape( + [T, H, W, B * M] + ) + + threshold_positions_if_in = paddle.arange(B) * M + self.out_planes + threshold_if_in = paddle.take_along_axis( + top_values_flat, + axis=-1, + indices=threshold_positions_if_in.unsqueeze(axis=[0, 1, 2]), + ).unsqueeze( + -1 + ) # T, H, W, B, 1 + is_in = noisy_values > threshold_if_in # T, H, W, B, E + threshold_positions_if_out = threshold_positions_if_in - 1 + threshold_if_out = paddle.take_along_axis( + top_values_flat, + axis=-1, + indices=threshold_positions_if_out.unsqueeze(axis=[0, 1, 2]), + ).unsqueeze(-1) + + prob_if_in = self.intra_cdf( + (clean_values - threshold_if_in) / noise_stddev + ) # T, H, W, B, E + prob_if_out = self.intra_cdf( + (clean_values - threshold_if_out) / noise_stddev + ) # T, H, W, B, E + prob = paddle.where(is_in, prob_if_in, prob_if_out) # T, H, W, B, E + + load_loss = self.cv_squared(prob.sum(axis=-2)).mean() + return load_loss + + def importance_loss_all(self, routing_weights): + importance_loss = self.cv_squared(routing_weights.sum(axis=0)) + return importance_loss + + def load_loss_all(self, clean_values, noisy_values, noise_stddev, noisy_top_values): + B, E = clean_values.shape + M = noisy_top_values.shape[-1] + top_values_flat = noisy_top_values.flatten() # B * M + + threshold_positions_if_in = paddle.arange(B) * M + self.out_planes # B + threshold_if_in = paddle.take_along_axis( + top_values_flat, axis=-1, indices=threshold_positions_if_in + ).unsqueeze( + -1 + ) # B, 1 + is_in = noisy_values > threshold_if_in # B, E + threshold_positions_if_out = threshold_positions_if_in - 1 # B + threshold_if_out = paddle.take_along_axis( + top_values_flat, axis=-1, indices=threshold_positions_if_out + ).unsqueeze( + -1 + ) # B, 1 + + prob_if_in = self.intra_cdf( + (clean_values - threshold_if_in) / noise_stddev + ) # B, E + prob_if_out = self.intra_cdf( + (clean_values - threshold_if_out) / noise_stddev + ) # B, E + prob = paddle.where(is_in, prob_if_in, prob_if_out) # B, E + + load_loss = self.cv_squared(prob.sum(axis=0)) + return load_loss + + def forward(self, x, t_map=None, eps=1e-25, dense_routing=False): + assert x.shape[1:-1] == list(self.input_shape)[:-1] + B, T, H, W, C = x.shape + E = self.num_experts + + raw_logits = self.gating(x, t_map) + if self.training: + noise = self.softplus(self.noise_lin(x)) + self.noise_eps + noisy_logits = raw_logits + paddle.randn(shape=raw_logits.shape) * noise + logits = noisy_logits + else: + logits = raw_logits + + assert logits.shape[-1] == self.num_experts + logits = self.softmax(logits) # [B, T, H, W, E] + top_logits, top_indices = logits.topk( + min(self.out_planes + 1, self.num_experts), axis=-1 + ) + top_k_logits = top_logits[:, :, :, :, : self.out_planes] + top_k_indices = top_indices[:, :, :, :, : self.out_planes] + top_k_gates = top_k_logits / ( + top_k_logits.sum(axis=-1, keepdim=True) + eps + ) # normalization + + if dense_routing: + # zeros = paddle.zeros_like(logits) + # zeros.stop_gradient = False + # print(zeros.shape) + # print(top_k_gates.shape, top_k_gates[0, 0, 0, 0]) + # routing_weights = paddle.put_along_axis(zeros, axis=-1, indices=top_k_indices, values=top_k_gates) + # print(routing_weights.shape, routing_weights.stop_gradient) + pass + else: + routing_weights = None + + if self.training: + if self.aux_loss_style == "cell": + # importance_loss = self.importance_loss(routing_weights) + importance_loss = self.importance_loss_cell(logits) + load_loss = self.load_loss_cell( + raw_logits, noisy_logits, noise, top_logits + ) + elif self.aux_loss_style == "all": + importance_loss = self.importance_loss_all( + logits.reshape([B * T * H * W, E]) + ) + load_loss = self.load_loss_all( + raw_logits.reshape([B * T * H * W, E]), + noisy_logits.reshape([B * T * H * W, E]), + noise.reshape([B * T * H * W, E]), + top_logits.reshape([B * T * H * W, -1]), + ) + else: + raise NotImplementedError + loss = ( + self.importance_weight * importance_loss + self.load_weight * load_loss + ) + else: + loss = None + + return routing_weights, top_k_gates, top_k_indices, loss + + +class LinearGatingNet(GatingNet): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__(moe_config, input_shape, in_channels) + assert len(input_shape) == 4 + T, H, W, C = input_shape + + self.lin = nn.Linear( + in_features=in_channels, out_features=self.num_experts, bias_attr=False + ) + + def gating(self, x, t_map=None): + routing_weights = self.lin(x) # [B, T, H, W, E] + return routing_weights + + +class SpatialLatentGatingNet(GatingNet): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__(moe_config, input_shape, in_channels) + assert len(input_shape) == 4 + T, H, W, C = input_shape + + gain = 1.0 + fan = self.out_planes / self.num_experts + bound = gain * math.sqrt(3.0 / fan) + self.routing_weights = paddle.create_parameter( + shape=[H, W, self.num_experts], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + + def gating(self, x, t_map=None): + # assert t_map is not None + routing_weights = self.routing_weights.unsqueeze(0).tile( + [x.shape[0], x.shape[1], 1, 1, 1] + ) # [B, T, H, W, E] + return routing_weights + + +class SpatialLatentLinearGatingNet(GatingNet): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__(moe_config, input_shape, in_channels) + assert len(input_shape) == 4 + T, H, W, C = input_shape + + gain = 1.0 + fan = self.out_planes / self.num_experts + bound = gain * math.sqrt(3.0 / fan) + self.spatial_routing_weights = paddle.create_parameter( + shape=[H, W, self.num_experts], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + self.lin = nn.Linear( + in_features=in_channels, out_features=self.num_experts, bias_attr=False + ) + + self.combine_weight = paddle.create_parameter( + shape=[H, W, self.num_experts, 2], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + + def gating(self, x, t_map=None): + # assert t_map is not None + spatial_routing_weights = self.spatial_routing_weights.tile( + [x.shape[0], x.shape[1], 1, 1, 1] + ) # [B, T, H, W, E] + linear_routing_weights = self.lin(x) # [B, T, H, W, E] + routing_weights = paddle.stack( + [spatial_routing_weights, linear_routing_weights], axis=-1 + ) # [B, T, H, W, E, 2] + combine_weight = self.combine_weight.tile( + [x.shape[0], x.shape[1], 1, 1, 1, 1] + ) # [B, T, H, W, E, 2] + routing_weights = (routing_weights * combine_weight).sum(-1) # [B, T, H, W, E] + return routing_weights + + +class CuboidLatentGatingNet(GatingNet): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__(moe_config, input_shape, in_channels) + assert len(input_shape) == 4 + T, H, W, C = input_shape + + gain = 1.0 + fan = self.out_planes / self.num_experts + bound = gain * math.sqrt(3.0 / fan) + self.routing_weights = paddle.create_parameter( + shape=[T, H, W, self.num_experts], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + + def gating(self, x, t_map=None): + # assert t_map is not None + routing_weights = self.routing_weights.unsqueeze(0).tile( + [x.shape[0], 1, 1, 1, 1] + ) # [B, T, H, W, E] + return routing_weights + + +class CuboidLatentLinearGatingNet(GatingNet): + def __init__(self, moe_config, input_shape, in_channels): + super().__init__(moe_config, input_shape, in_channels) + assert len(input_shape) == 4 + T, H, W, C = input_shape + + gain = 1.0 + fan = self.out_planes / self.num_experts + bound = gain * math.sqrt(3.0 / fan) + self.cuboid_routing_weights = paddle.create_parameter( + shape=[T, H, W, self.num_experts], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + + self.lin = nn.Linear( + in_features=in_channels, out_features=self.num_experts, bias_attr=False + ) + + self.combine_weight = paddle.create_parameter( + shape=[T, H, W, self.num_experts, 2], + dtype="float32", + default_initializer=nn.initializer.Uniform(-bound, bound), + ) + + def gating(self, x, t_map=None): + # assert t_map is not None + cuboid_routing_weights = self.cuboid_routing_weights.unsqueeze(0).tile( + [x.shape[0], 1, 1, 1, 1] + ) # [B, T, H, W, E] + linear_routing_weights = self.lin(x) # [B, T, H, W, E] + routing_weights = paddle.stack( + [cuboid_routing_weights, linear_routing_weights], axis=-1 + ) # [B, T, H, W, E, 2] + combine_weight = self.combine_weight.tile( + [x.shape[0], 1, 1, 1, 1, 1] + ) # [B, T, H, W, E, 2] + routing_weights = (routing_weights * combine_weight).sum(-1) # [B, T, H, W, E] + return routing_weights + + +def aggregate_aux_losses(net): + aux_losses = [] + for module in net.sublayers(): + if hasattr(module, "aux_loss"): + aux_losses.append(module.aux_loss.unsqueeze(0)) + return aux_losses + + +# MoE Routing + + +class SparseDispatcherScatter(object): + def __init__(self, num_experts, gates): + self._gates = gates + self._num_experts = num_experts + sorted_experts, index_sorted_experts = paddle.nonzero(gates).sort( + 0 + ), paddle.nonzero(gates).argsort(0) + _, self._expert_index = sorted_experts.split(1, axis=1) + self._batch_index = paddle.nonzero(gates)[index_sorted_experts[:, 1], 0] + self._part_sizes = (gates > 0).sum(0).tolist() + gates_exp = gates[self._batch_index.flatten()] + self._nonzero_gates = paddle.take_along_axis( + gates_exp, axis=1, indices=self._expert_index + ) + + def dispatch(self, inp): + inp_exp = inp[self._batch_index].squeeze(1) + return paddle.split(inp_exp, self._part_sizes, axis=0) + + def combine(self, expert_out, multiply_by_gates=True): + stitched = paddle.concat(expert_out, 0) + if multiply_by_gates: + stitched = stitched.multiply(self._nonzero_gates) + zeros = paddle.zeros([self._gates.shape[0], expert_out[-1].shape[1]]) + zeros.stop_gradient = False + # combine samples that have been processed by the same k experts + combined = zeros.index_add(0, self._batch_index, stitched.float()) + return combined + + +class SparseDispatcher(object): + def __init__(self, num_experts, top_k_gates, top_k_indices): + self.num_experts = num_experts + self.gates = top_k_gates # [B, K] + self.gate_inds = top_k_indices # [B, K] + E = num_experts + B, K = top_k_gates.shape + self.batch_index_per_expert = paddle.stack( + [ + (top_k_indices == expert_id).sum(-1).astype("bool") + for expert_id in range(E) + ], + axis=0, + ) # [E, B] + self.gates_per_expert = paddle.concat( + [top_k_gates[top_k_indices == expert_id] for expert_id in range(E)] + ) # B * K + self.batch_index_all = paddle.nonzero(self.batch_index_per_expert)[ + :, 1 + ] # B * K + self.expert_size = self.batch_index_per_expert.sum(-1) # [E] + + def dispatch(self, x): + B, C = x.shape + dispatched_x = [ + x[batch_index] for batch_index in self.batch_index_per_expert + ] # E * [B_e, C] + return dispatched_x + + def combine(self, expert_out): + # expert_out: E * [B_e, C] + assert len(expert_out) == self.num_experts + com_res = paddle.concat(expert_out, axis=0) # [B * K, C] + zeros = paddle.zeros([self.gates.shape[0], com_res.shape[1]]) + zeros.stop_gradient = False + combined_res = zeros.index_add( + axis=0, + index=self.batch_index_all, + value=com_res * self.gates_per_expert.unsqueeze(-1), + ) + return combined_res + + +class DenseDispatcher(object): + def __init__(self, num_experts, top_k_gates, top_k_indices): + self.num_experts = num_experts + self.gates = top_k_gates # [B, K] + self.gate_inds = top_k_indices # [B, K] + + def combine(self, expert_out): + # expert_out: [B, E, C] + B, E, C = expert_out.shape + assert E == self.num_experts + selected_out = paddle.take_along_axis( + expert_out, axis=1, indices=self.gate_inds.unsqueeze(-1) + ) # [B, K, C] + combined_res = (selected_out * self.gates.unsqueeze(-1)).sum(1) + return combined_res + + +# RNC + + +class LabelDifference(nn.Layer): + def __init__(self, distance_type="l1"): + super().__init__() + self.distance_type = distance_type + + def forward(self, labels): + # labels: [bs, label_dim] + # output: [bs, bs] + assert labels.ndim == 3 + if self.distance_type == "l1": + return paddle.abs(labels[:, :, None, :] - labels[:, None, :, :]).sum( + axis=-1 + ) + else: + raise ValueError(self.distance_type) + + +class FeatureSimilarity(nn.Layer): + def __init__(self, similarity_type="l2", temperature=2): + super().__init__() + self.similarity_type = similarity_type + self.t = temperature + + def forward(self, features): + # labels: [bs, feat_dim] + # output: [bs, bs] + assert features.ndim == 3 + if self.similarity_type == "l2": + logits = -(features[:, :, None, :] - features[:, None, :, :]).norm( + 2, axis=-1 + ) + logits /= self.t + logits_max = paddle.max(logits, axis=1, keepdim=True) + logits -= logits_max.detach() + return logits + elif self.similarity_type == "cosine": + cos_func = nn.CosineSimilarity(axis=-1) + logits = cos_func(features[:, :, None, :], features[:, None, :, :]) + logits /= self.t + return logits + else: + raise ValueError(self.similarity_type) + + +class RnCLoss(nn.Layer): + def __init__(self, rnc_config): + super().__init__() + + self.rank_mode = rnc_config["rank_imbalance_style"] + self.t = rnc_config["rank_imbalance_temp"] + self.label_diff_fn = LabelDifference(rnc_config["label_difference_style"]) + self.feature_sim_fn = FeatureSimilarity( + rnc_config["feature_similarity_style"], self.t + ) + self.rnc_weight = rnc_config["rank_reg_coeff"] + self.loss_cal_mode = rnc_config["loss_cal_style"] + self.softmax_cri = nn.Softmax(axis=-1) + + def cal_loss(self, features, labels): + + B = features.shape[0] + assert B > 1 + label_diffs = self.label_diff_fn(labels) + logits = self.feature_sim_fn(features) + exp_logits = logits.exp() + n = logits.shape[1] + + # remove diagonal + logits = logits.masked_select( + (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) + ).reshape([B, n, n - 1]) + exp_logits = exp_logits.masked_select( + (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) + ).reshape([B, n, n - 1]) + label_diffs = label_diffs.masked_select( + (1 - paddle.eye(n)).astype("bool").unsqueeze(0).tile([B, 1, 1]) + ).reshape([B, n, n - 1]) + + if self.loss_cal_mode == "memory-efficient": + loss = 0.0 + for k in range(n - 1): + pos_logits = logits[:, :, k] # [B, n] + pos_label_diffs = label_diffs[:, :, k] # [B, n] + neg_mask = (label_diffs >= pos_label_diffs.unsqueeze(-1)).astype( + "float32" + ) # [B, n, n - 1] + pos_log_probs = pos_logits - paddle.log( + (neg_mask * exp_logits).sum(axis=-1) + ) # [B, n] + loss += -pos_log_probs.sum() + loss /= B * n * (n - 1) + elif self.loss_cal_mode == "computation-efficient": + neg_mask = (label_diffs.unsqueeze(-2) >= label_diffs.unsqueeze(-1)).astype( + "float32" + ) # [B, n, n - 1, n - 1] + pos_log_probs = logits - paddle.log( + (neg_mask * exp_logits.unsqueeze(-2).tile([1, 1, n - 1, 1])).sum( + axis=-1 + ) + ) # [B, n, n - 1] + loss = -pos_log_probs.mean() + else: + raise NotImplementedError + + return loss + + def forward(self, features, labels): + # features: [B, T_o, H, W, C_o] + # labels: [B, T_o, H, W, C_l] + + B, T_o, H, W, C_o = features.shape + _, _, _, _, C_l = labels.shape + + loss = None + if self.rank_mode == "batch": + features = features.reshape([B, -1, C_o]).transpose([1, 0, 2]) + labels = labels.reshape([B, -1, C_l]).transpose([1, 0, 2]) + loss = self.cal_loss(features, labels) + elif self.rank_mode == "batch+T+H+W": + feat = features.transpose([0, 2, 3, 1, 4]).reshape([-1, T_o, C_o]) + label = labels.transpose([0, 2, 3, 1, 4]).reshape([-1, T_o, C_l]) + loss_T = self.cal_loss(feat, label) + + feat = features.transpose([0, 1, 3, 2, 4]).reshape([-1, H, C_o]) + label = labels.transpose([0, 1, 3, 2, 4]).reshape([-1, H, C_l]) + loss_H = self.cal_loss(feat, label) + + feat = features.reshape([-1, W, C_o]) + label = labels.reshape([-1, W, C_l]) + loss_W = self.cal_loss(feat, label) + + feat = features.transpose([1, 2, 3, 0, 4]).reshape([-1, B, C_o]) + label = labels.transpose([1, 2, 3, 0, 4]).reshape([-1, B, C_l]) + loss_batch = self.cal_loss(feat, label) + + loss = loss_T + loss_H + loss_W + loss_batch + else: + raise NotImplementedError + + loss = self.rnc_weight * loss + + return loss diff --git a/ppsci/arch/fno_block.py b/ppsci/arch/fno_block.py index 787ee4ea53..1720b347dc 100644 --- a/ppsci/arch/fno_block.py +++ b/ppsci/arch/fno_block.py @@ -1,1269 +1,1269 @@ -import itertools -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -import omegaconf -import paddle -import paddle.nn.functional as F -from paddle import nn - -from ppsci.utils import initializer -from ppsci.utils import logger - -einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - - -class DomainPadding(nn.Layer): - """Applies domain padding scaled automatically to the input's resolution - - Args: - domain_padding (Union[float, List[float]]): Typically, between zero and one, percentage of padding to use. - padding_mode (str, optional): Whether to pad on both sides, by default - 'one-sided'.Options are 'symmetric' or 'one-sided'。 Defaults to "one-sided". - output_scaling_factor (Union[int, List[int]], optional): Scaling factor for the - output. Defaults to 1. - """ - - def __init__( - self, - domain_padding: Union[float, List[float]], - padding_mode: str = "one-sided", - output_scaling_factor: Union[int, List[int]] = 1, - ): - super().__init__() - self.domain_padding = domain_padding - self.padding_mode = padding_mode.lower() - if output_scaling_factor is None: - output_scaling_factor = 1 - self.output_scaling_factor: Union[int, List[int]] = output_scaling_factor - - # dict(f'{resolution}'=padding) such that padded = F.pad(x, indices) - self._padding = dict() - - # dict(f'{resolution}'=indices_to_unpad) such that unpadded = x[indices] - self._unpad_indices = dict() - - def forward(self, x): - self.pad(x) - - def pad(self, x): - """Take an input and pad it by the desired fraction - - The amount of padding will be automatically scaled with the resolution - """ - resolution = x.shape[2:] - - if isinstance(self.domain_padding, (float, int)): - self.domain_padding = [float(self.domain_padding)] * len(resolution) - - assert len(self.domain_padding) == len(resolution), ( - "domain_padding length must match the number of spatial/time dimensions " - "(excluding batch, ch)" - ) - - output_scaling_factor = self.output_scaling_factor - if not isinstance(self.output_scaling_factor, list): - # if unset by the user, scaling_factor will be 1 be default, - # so `output_scaling_factor` should never be None. - output_scaling_factor: List[float] = validate_scaling_factor( - self.output_scaling_factor, len(resolution), n_layers=None - ) - - try: - padding = self._padding[f"{resolution}"] - return F.pad(x, padding, mode="constant") - except KeyError: - padding = [round(p * r) for (p, r) in zip(self.domain_padding, resolution)] - - output_pad = padding - output_pad = [ - round(i * j) for (i, j) in zip(output_scaling_factor, output_pad) - ] - - # padding is being applied in reverse order - # (so we must reverse the padding list) - padding = padding[::-1] - - # the F.pad(x, padding) funtion pads the tensor 'x' in reverse order of the "padding" list i.e. the last axis of tensor 'x' will be padded by the amount mention at the first position of the 'padding' vector. The details about F.pad can be found here: - # https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/functional/pad_cn.html - - if self.padding_mode == "symmetric": - # Pad both sides - unpad_list = list() - for p in output_pad: - if p == 0: - padding_end = None - padding_start = None - else: - padding_end = p - padding_start = -p - unpad_list.append(slice(padding_end, padding_start, None)) - - unpad_indices = (Ellipsis,) + tuple( - [slice(p, -p, None) for p in padding] - ) - padding = [i for p in padding for i in (p, p)] - - elif self.padding_mode == "one-sided": - # One-side padding - unpad_list = list() - for p in output_pad: - if p == 0: - padding_start = None - else: - padding_start = -p - unpad_list.append(slice(None, padding_start, None)) - unpad_indices = (Ellipsis,) + tuple(unpad_list) - padding = [i for p in padding for i in (0, p)] - else: - raise ValueError(f"Got self.padding_mode = {self.padding_mode}") - - self._padding[f"{resolution}"] = padding - - padded = F.pad(x, padding, mode="constant") - output_shape = padded.shape[2:] - output_shape = [ - round(i * j) for (i, j) in zip(output_scaling_factor, output_shape) - ] - - self._unpad_indices[f"{[i for i in output_shape]}"] = unpad_indices - - return padded - - def unpad(self, x): - """Remove the padding from padding inputs""" - unpad_indices = self._unpad_indices[f"{x.shape[2:]}"] - - return x[unpad_indices] - - -class SoftGating(nn.Layer): - """Applies soft-gating by weighting the channels of the given input - - Given an input x of size `(batch-size, channels, height, width)`, - this returns `x * w ` - where w is of shape `(1, channels, 1, 1)` - - Args: - in_features (int): The number of input features. - out_features (int, optional): Number of output features. Defaults to None. - n_dim (int, optional): Dimensionality of the input (excluding batch-size and channels). - ``n_dim=2`` corresponds to having Module2D. Defaults to 2. - bias (bool, optional): Whether to use bias. Defaults to False. - """ - - def __init__( - self, in_features, out_features: int = None, n_dim: int = 2, bias: bool = False - ): - super().__init__() - if out_features is not None and in_features != out_features: - raise ValueError( - f"Got in_features = {in_features} and out_features = {out_features}" - "but these two must be the same for soft-gating" - ) - self.in_features = in_features - self.out_features = out_features - - self.weight = self.create_parameter( - shape=(1, self.in_features, *(1,) * n_dim), - default_initializer=nn.initializer.Constant(1.0), - ) - if bias: - self.bias = self.create_parameter( - shape=(1, self.in_features, *(1,) * n_dim), - default_initializer=nn.initializer.Constant(1.0), - ) - else: - self.bias = None - - def forward(self, x): - """Applies soft-gating to a batch of activations""" - if self.bias is not None: - return self.weight * x + self.bias - else: - return self.weight * x - - -def skip_connection( - in_features, - out_features, - n_dim: int = 2, - bias: bool = False, - type: str = "soft-gating", -): - """A wrapper for several types of skip connections. - Returns an nn.Module skip connections, one of {'identity', 'linear', soft-gating'} - - Args: - in_features (int): Number of input features. - out_features (int): Number of output features. - n_dim (int, optional): Dimensionality of the input (excluding batch-size and channels). - ``n_dim=2`` corresponds to having Module2D. . Defaults to 2. - bias (bool, optional): Whether to use a bias. Defaults to False. - type (str, optional): Kind of skip connection to use,{'identity', 'linear', soft-gating'}. - Defaults to "soft-gating". - """ - - if type.lower() == "soft-gating": - return SoftGating( - in_features=in_features, out_features=out_features, bias=bias, n_dim=n_dim - ) - elif type.lower() == "linear": - return getattr(nn, f"Conv{n_dim}D")( - in_channels=in_features, - out_channels=out_features, - kernel_size=1, - bias_attr=bias, - ) - elif type.lower() == "identity": - return nn.Identity() - else: - raise ValueError( - f"Got skip-connection type = {type}, expected one of {'soft-gating', 'linear', 'identity'}." - ) - - -class AdaIN(nn.Layer): - def __init__(self, embed_dim, in_channels, mlp=None, eps=1e-5): - super().__init__() - self.in_channels = in_channels - self.embed_dim = embed_dim - self.eps = eps - - if mlp is None: - mlp = nn.Sequential( - nn.Linear(embed_dim, 512), nn.GELU(), nn.Linear(512, 2 * in_channels) - ) - self.mlp = mlp - - self.embedding = None - - def set_embedding(self, x): - self.embedding = x.reshape( - self.embed_dim, - ) - - def forward(self, x): - assert ( - self.embedding is not None - ), "AdaIN: update embeddding before running forward" - - weight, bias = paddle.split( - self.mlp(self.embedding), - self.embedding.shape[0] // self.in_channels, - axis=0, - ) - - return nn.functional.group_norm(x, self.in_channels, self.eps, weight, bias) - - -class MLP(nn.Layer): - """A Multi-Layer Perceptron, with arbitrary number of layers - - Args: - in_channels (int): The number of input channels. - out_channels (int, optional): The number of output channels. Defaults to None. - hidden_channels (int, optional): The number of hidden channels. Defaults to None. - n_layers (int, optional): The number of layers. Defaults to 2. - n_dim (int, optional): The type of convolution,2D or 3D. Defaults to 2. - non_linearity (nn.functional, optional): The activation function. Defaults to F.gelu. - dropout (float, optional): The ratio of dropout. Defaults to 0.0. - """ - - def __init__( - self, - in_channels: int, - out_channels: int = None, - hidden_channels: int = None, - n_layers: int = 2, - n_dim: int = 2, - non_linearity: nn.functional = F.gelu, - dropout: float = 0.0, - **kwargs, - ): - super().__init__() - self.n_layers = n_layers - self.in_channels = in_channels - self.out_channels = in_channels if out_channels is None else out_channels - self.hidden_channels = ( - in_channels if hidden_channels is None else hidden_channels - ) - self.non_linearity = non_linearity - self.dropout = ( - nn.LayerList([nn.Dropout(dropout) for _ in range(n_layers)]) - if dropout > 0.0 - else None - ) - - Conv = getattr(nn, f"Conv{n_dim}D") - self.fcs = nn.LayerList() - for i in range(n_layers): - if i == 0 and i == (n_layers - 1): - self.fcs.append(Conv(self.in_channels, self.out_channels, 1)) - elif i == 0: - self.fcs.append(Conv(self.in_channels, self.hidden_channels, 1)) - elif i == (n_layers - 1): - self.fcs.append(Conv(self.hidden_channels, self.out_channels, 1)) - else: - self.fcs.append(Conv(self.hidden_channels, self.hidden_channels, 1)) - - def forward(self, x): - for i, fc in enumerate(self.fcs): - x = fc(x) - if i < self.n_layers - 1: - x = self.non_linearity(x) - if self.dropout is not None: - x = self.dropout[i](x) - return x - - -def _contract_dense(x, weight, separable=False): - order = len(x.shape) - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] - if not isinstance(weight, paddle.Tensor): - weight = paddle.to_tensor(weight) - - return paddle.einsum(eq, x, weight) - - -def _contract_dense_trick(x, weight_real, weight_imag, separable=False): - # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle - order = len(x.shape) - # batch-size, in_channels, x, y... - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - - o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( - eq, x.imag(), weight_imag - ) - o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( - eq, x.real(), weight_imag - ) - x = paddle.complex(o1_real, o1_imag) - return x - - -def _contract_dense_separable(x, weight, separable=True): - if not separable: - raise ValueError("This function is only for separable=True") - return x * weight - - -def get_contract_fun( - weight, implementation: str = "reconstructed", separable: bool = False -): - """Generic ND implementation of Fourier Spectral Conv contraction. - - Args: - weight (paddle.tensor): FactorizedTensor. - implementation (str, optional): {'reconstructed', 'factorized'}. - whether to reconstruct the weight and do a forward pass (reconstructed) - or contract directly the factors of the factorized weight with the input (factorized). Defaults to "reconstructed". - separable (bool, optional): Whether to use the separable implementation of contraction. This - arg is only checked when `implementation=reconstructed`. Defaults to False. - - Returns: - function : (x, weight) -> x * weight in Fourier space. - """ - - if implementation == "reconstructed": - if separable: - return _contract_dense_separable - else: - return _contract_dense_trick - elif implementation == "factorized": - if isinstance(weight, paddle.Tensor): - return _contract_dense_trick - - else: - raise ValueError( - f'Got implementation={implementation}, expected "reconstructed" or "factorized"' - ) - - -Number = Union[float, int] - - -def validate_scaling_factor( - scaling_factor: Union[None, Number, List[Number], List[List[Number]]], - n_dim: int, - n_layers: Optional[int] = None, -) -> Union[None, List[float], List[List[float]]]: - """Validates the format and dimensionality of the scaling factor. - - Args: - scaling_factor (Union[None, Number, List[Number], List[List[Number]]]): The - scaling factor. - n_dim (int): The required number of dimensions for expanding `scaling_factor`. - n_layers (Optional[int], optional): The number of layers for the returned - nested list. If None, return a single list (rather than a list of lists) - with `factor` repeated `dim` times. Defaults to None. - """ - - if scaling_factor is None: - return None - if isinstance(scaling_factor, (float, int)): - if n_layers is None: - return [float(scaling_factor)] * n_dim - - return [[float(scaling_factor)] * n_dim] * n_layers - if ( - isinstance(scaling_factor, list) - and len(scaling_factor) > 0 - and all([isinstance(s, (float, int)) for s in scaling_factor]) - ): - - return [[float(s)] * n_dim for s in scaling_factor] - - if ( - isinstance(scaling_factor, list) - and len(scaling_factor) > 0 - and all( - [isinstance(s, (omegaconf.listconfig.ListConfig)) for s in scaling_factor] - ) - ): - s_sub_pass = True - for s in scaling_factor: - if all([isinstance(s_sub, (int, float)) for s_sub in s]): - pass - else: - s_sub_pass = False - if s_sub_pass: - return scaling_factor - - return None - - -def resample(x, res_scale, axis, output_shape=None): - """A module for generic n-dimentional interpolation (Fourier resampling). - - Args: - x (paddle.Tensor): Input activation of size (batch_size, channels, d1, ..., dN). - res_scale (optional[int,tuple]): Scaling factor along each of the dimensions in - 'axis' parameter. If res_scale is scaler, then isotropic scaling is performed. - axis (int): Axis or dimensions along which interpolation will be performed. - output_shape (optional[None ,tuple[int]]): The output shape. Defaults to None. - """ - - if isinstance(res_scale, (float, int)): - if axis is None: - axis = list(range(2, x.ndim)) - res_scale = [res_scale] * len(axis) - elif isinstance(axis, int): - axis = [axis] - res_scale = [res_scale] - else: - res_scale = [res_scale] * len(axis) - else: - assert len(res_scale) == len(axis), "leght of res_scale and axis are not same" - - old_size = x.shape[-len(axis) :] - if output_shape is None: - new_size = tuple([int(round(s * r)) for (s, r) in zip(old_size, res_scale)]) - else: - new_size = output_shape - - if len(axis) == 1: - return F.interpolate(x, size=new_size[0], mode="linear", align_corners=True) - if len(axis) == 2: - return F.interpolate(x, size=new_size, mode="bicubic", align_corners=True) - - X = paddle.fft.rfftn(x.astype("float32"), norm="forward", axes=axis) - - new_fft_size = list(new_size) - new_fft_size[-1] = new_fft_size[-1] // 2 + 1 # Redundant last coefficient - new_fft_size_c = [min(i, j) for (i, j) in zip(new_fft_size, X.shape[-len(axis) :])] - out_fft = paddle.zeros( - [x.shape[0], x.shape[1], *new_fft_size], dtype=paddle.complex64 - ) - - mode_indexing = [((None, m // 2), (-m // 2, None)) for m in new_fft_size_c[:-1]] + [ - ((None, new_fft_size_c[-1]),) - ] - for i, boundaries in enumerate(itertools.product(*mode_indexing)): - - idx_tuple = [slice(None), slice(None)] + [slice(*b) for b in boundaries] - - out_fft[idx_tuple] = X[idx_tuple] - y = paddle.fft.irfftn(out_fft, s=new_size, norm="forward", axes=axis) - - return y - - -class FactorizedTensor(nn.Layer): - def __init__(self, shape, init_scale): - super().__init__() - self.shape = shape - self.init_scale = init_scale - self.real = self.create_parameter( - shape=shape, - ) - self.real = initializer.normal_(self.real, 0, init_scale) - self.imag = self.create_parameter(shape=shape) - self.imag = initializer.normal_(self.imag, 0, init_scale) - - def __repr__(self): - return f"FactorizedTensor(shape={self.shape})" - - @property - def data(self): - return paddle.complex(self.real, self.imag) - - -class FactorizedSpectralConv(nn.Layer): - """Generic N-Dimensional Fourier Neural Operator - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_modes (Tuple[int, ...]): Number of modes to use for contraction in Fourier domain during training. - .. warning:: - We take care of the redundancy in the Fourier modes, therefore, for an input - of size I_1, ..., I_N, please provide modes M_K that are I_1 < M_K <= I_N - We will automatically keep the right amount of modes: specifically, for the - last mode only, if you specify M_N modes we will use M_N // 2 + 1 modes - as the real FFT is redundant along that last dimension. - - .. note:: - Provided modes should be even integers. odd numbers will be rounded to the closest even number. - This can be updated dynamically during training. - max_n_modes (int, optional): * If not None, **maximum** number of modes to keep - in Fourier Layer, along each dim - The number of modes (`n_modes`) cannot be increased beyond that. - * If None, all the n_modes are used. Defaults to None. - bias (bool, optional): Whether to use bias in the layers. Defaults to True. - n_layers (int, optional): Number of Fourier Layers. Defaults to 1. - separable (bool, optional): Whether to use separable Fourier Conv. Defaults to False. - output_scaling_factor (Optional[Union[Number, List[Number]]], optional): Scaling factor for the - output. Defaults to None. - rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 0.5. - factorization (str, optional): Tensor factorization of the parameters weight to use. - * If None, a dense tensor parametrizes the Spectral convolutions - * Otherwise, the specified tensor factorization is used. Defaults to None. - implementation (str, optional): If factorization is not None, forward mode to use. - * `reconstructed` : the full weight tensor is reconstructed from the - factorization and used for the forward pass - * `factorized` : the input is directly contracted with the factors of - the decomposition. Defaults to "reconstructed". - joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a - single tensor. Defaults to False. - init_std (str, optional): The std to use for the init. Defaults to "auto". - fft_norm (str, optional):The normalization mode for the FFT. Defaults to "backward". - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - n_modes: Tuple[int, ...], - max_n_modes: int = None, - bias: bool = True, - n_layers: int = 1, - separable: bool = False, - output_scaling_factor: Optional[Union[Number, List[Number]]] = None, - rank: float = 0.5, - factorization: str = None, - implementation: str = "reconstructed", - joint_factorization: bool = False, - init_std: str = "auto", - fft_norm: str = "backward", - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.joint_factorization = joint_factorization - self.n_modes = n_modes - - self.order = len(self.n_modes) - if max_n_modes is None: - max_n_modes = self.n_modes - elif isinstance(max_n_modes, int): - max_n_modes = [max_n_modes] - self.max_n_modes = max_n_modes - self.rank = rank - self.factorization = factorization - self.n_layers = n_layers - self.implementation = implementation - - self.output_scaling_factor: Union[ - None, List[List[float]] - ] = validate_scaling_factor(output_scaling_factor, self.order, n_layers) - - if init_std == "auto": - init_std = (2 / (in_channels + out_channels)) ** 0.5 - else: - init_std = init_std - self.fft_norm = fft_norm - if factorization is None: - factorization = "Dense" - if not factorization.lower().startswith("complex"): - factorization = f"Complex{factorization}" - if separable: - if in_channels != out_channels: - raise ValueError( - f"To use separable Fourier Conv, in_channels must be equal to out_channels, but got in_channels={in_channels} and out_channels={out_channels}" - ) - weight_shape = (in_channels, *max_n_modes) - else: - weight_shape = (in_channels, out_channels, *max_n_modes) - self.separable = separable - if joint_factorization: - self.weight = paddle.create_parameter( - shape=(n_layers, *weight_shape), - dtype="float32", - ) - self.weight = initializer.normal_(self.weight, 0, init_std) - else: - self.weight = nn.LayerList( - [ - FactorizedTensor(weight_shape, init_scale=init_std) - for _ in range(n_layers) - ] - ) - - self._contract = get_contract_fun( - self.weight[0].data, implementation=implementation, separable=separable - ) - if bias: - shape = (n_layers, self.out_channels) + (1,) * self.order - init_bias = init_std * paddle.randn(shape) - self.bias = paddle.create_parameter( - shape=shape, - dtype=(init_bias.dtype), - default_initializer=nn.initializer.Assign(init_bias), - ) - self.bias.stop_gradient = False - else: - self.bias = None - - @property - def n_modes(self): - return self._n_modes - - @n_modes.setter - def n_modes(self, n_modes): - if isinstance(n_modes, int): # Should happen for 1D FNO only - n_modes = [n_modes] - else: - n_modes = list(n_modes) - # The last mode has a redundacy as we use real FFT - # As a design choice we do the operation here to avoid users dealing with the +1 - n_modes[-1] = n_modes[-1] // 2 + 1 - self._n_modes = n_modes - - def transform(self, x, layer_index=0, output_shape=None): - in_shape = list(x.shape[2:]) - - if self.output_scaling_factor is not None and output_shape is None: - out_shape = tuple( - [ - round(s * r) - for (s, r) in zip(in_shape, self.output_scaling_factor[layer_index]) - ] - ) - elif output_shape is not None: - out_shape = output_shape - else: - out_shape = in_shape - if in_shape == out_shape: - return x - else: - return resample( - x, - 1.0, - list(range(2, x.ndim)), - output_shape=out_shape, - ) - - def forward( - self, - x: paddle.Tensor, - indices: int = 0, - output_shape: Optional[Tuple[int]] = None, - ): - batchsize, channels, *mode_sizes = x.shape - fft_size = list(mode_sizes) - fft_size[-1] = fft_size[-1] // 2 + 1 - fft_dims = list(range(-self.order, 0)) - - x = paddle.fft.rfftn(x=x, norm=self.fft_norm, axes=fft_dims) - - if self.order > 1: - x = paddle.fft.fftshift(x=x, axes=fft_dims[:-1]) - - out_fft = paddle.zeros( - shape=[batchsize, self.out_channels, *fft_size], dtype=paddle.complex64 - ) - - starts = [ - (max_modes - min(size, n_mode)) - for size, n_mode, max_modes in zip(fft_size, self.n_modes, self.max_n_modes) - ] - slices_w = [slice(None), slice(None)] - slices_w += [ - (slice(start // 2, -start // 2) if start else slice(start, None)) - for start in starts[:-1] - ] - slices_w += [slice(None, -starts[-1]) if starts[-1] else slice(None)] - - w_real = self.weight[indices].real[ - slices_w[0], slices_w[1], slices_w[2], slices_w[3] - ] - w_imag = self.weight[indices].imag[ - slices_w[0], slices_w[1], slices_w[2], slices_w[3] - ] - - starts = [ - (size - min(size, n_mode)) - for (size, n_mode) in zip(list(x.shape[2:]), list(w_real.shape[2:])) - ] - slices_x = [slice(None), slice(None)] # Batch_size, channels - slices_x += [ - slice(start // 2, -start // 2) if start else slice(start, None) - for start in starts[:-1] - ] - slices_x += [ - slice(None, -starts[-1]) if starts[-1] else slice(None) - ] # The last mode already has redundant half removed - idx_tuple = slices_x - if len(idx_tuple) == 4: - out_fft[ - idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3] - ] = self._contract( - x[idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3]], - w_real, - w_imag, - separable=self.separable, - ) - elif len(idx_tuple) == 3: - out_fft[idx_tuple[0], idx_tuple[1], idx_tuple[2]] = self._contract( - x[idx_tuple[0], idx_tuple[1], idx_tuple[2]], - w_real, - w_imag, - separable=self.separable, - ) - else: - raise ValueError("Not implemented") - - if self.output_scaling_factor is not None and output_shape is None: - mode_sizes = tuple( - [ - round(s * r) - for (s, r) in zip(mode_sizes, self.output_scaling_factor[indices]) - ] - ) - - if output_shape is not None: - mode_sizes = output_shape - - if self.order > 1: - out_fft = paddle.fft.fftshift(x=out_fft, axes=fft_dims[:-1]) - - x = paddle.fft.irfftn( - x=out_fft, s=mode_sizes, axes=fft_dims, norm=self.fft_norm - ) - if self.bias is not None: - x = x + self.bias[indices, ...] - return x - - -class FactorizedSpectralConv1d(FactorizedSpectralConv): - """1D Spectral Conv - - This is provided for reference only, - see :class:`FactorizedSpectralConv` for the preferred, general implementation - """ - - def forward(self, x, indices=0): - batchsize, channels, width = x.shape - - x = paddle.fft.rfft(x, norm=self.fft_norm) - - out_fft = paddle.zeros( - shape=[batchsize, self.out_channels, width // 2 + 1], dtype=paddle.complex64 - ) - - slices = ( - slice(None), # Equivalent to: [:, - slice(None), # ............... :, - slice(None, self.n_modes[0]), # :half_n_modes[0]] - ) - - w_real = self.weight[indices].real[slices[0], slices[1], slices[2]] - w_imag = self.weight[indices].imag[slices[0], slices[1], slices[2]] - - out_fft[slices[0], slices[1], slices[2]] = self._contract( - x[slices[0], slices[1], slices[2]], - w_real, - w_imag, - separable=self.separable, - ) - - if self.output_scaling_factor is not None: - width = round(width * self.output_scaling_factor[0]) - - x = paddle.fft.irfft(out_fft, n=width, norm=self.fft_norm) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - - -class FactorizedSpectralConv2d(FactorizedSpectralConv): - """2D Spectral Conv. - - This is provided for reference only, - see :class:`FactorizedSpectralConv` for the preferred, general implementation - """ - - def forward(self, x, indices=0): - batchsize, channels, height, width = x.shape - - x = paddle.fft.rfft2(x.float(), norm=self.fft_norm, axes=(-2, -1)) - - # The output will be of size (batch_size, self.out_channels, - # x.size(-2), x.size(-1)//2 + 1) - out_fft = paddle.zeros( - shape=[batchsize, self.out_channels, height, width // 2 + 1], - dtype=paddle.complex64, - ) - - slices0 = ( - slice(None), # Equivalent to: [:, - slice(None), # ............... :, - slice(self.n_modes[0] // 2), # :half_n_modes[0], - slice(self.n_modes[1]), # :half_n_modes[1]] - ) - slices1 = ( - slice(None), # Equivalent to: [:, - slice(None), # ...................... :, - slice(-self.n_modes[0] // 2, None), # -half_n_modes[0]:, - slice(self.n_modes[1]), # ...... :half_n_modes[1]] - ) - logger.message( - f"2D: {x[slices0].shape=}, {self._get_weight(indices)[slices0].shape=}, {self._get_weight(indices).shape=}" - ) - - w_real = self.weight[indices].real[ - slices1[0], slices1[1], slices1[2], slices1[3] - ] - w_imag = self.weight[indices].imag[ - slices1[0], slices1[1], slices1[2], slices1[3] - ] - - """Upper block (truncate high frequencies).""" - out_fft[slices0[0], slices0[1], slices0[2], slices0[3]] = self._contract( - x[slices0[0], slices0[1], slices0[2], slices0[3]], - w_real, - w_imag, - separable=self.separable, - ) - - w_real = self.weight[indices].real[ - slices0[0], slices0[1], slices0[2], slices0[3] - ] - w_imag = self.weight[indices].imag[ - slices0[0], slices0[1], slices0[2], slices0[3] - ] - - """Lower block""" - out_fft[slices1[0], slices1[1], slices1[2], slices1[3]] = self._contract( - x[slices1[0], slices1[1], slices1[2], slices1[3]], - w_real, - w_imag, - separable=self.separable, - ) - - if self.output_scaling_factor is not None: - width = round(width * self.output_scaling_factor[indices][0]) - height = round(height * self.output_scaling_factor[indices][1]) - - x = paddle.fft.irfft2( - out_fft, s=(height, width), axes=(-2, -1), norm=self.fft_norm - ) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - - -class FactorizedSpectralConv3d(FactorizedSpectralConv): - """3D Spectral Conv. - - This is provided for reference only, - see :class:`FactorizedSpectralConv` for the preferred, general implementation - """ - - def forward(self, x, indices=0): - batchsize, channels, height, width, depth = x.shape - - x = paddle.fft.rfftn(x.float(), norm=self.fft_norm, axes=[-3, -2, -1]) - - out_fft = paddle.zeros( - shape=[batchsize, self.out_channels, height, width, depth // 2 + 1], - dtype=paddle.complex64, - ) - - slices0 = ( - slice(None), # Equivalent to: [:, - slice(None), # ............... :, - slice(self.n_modes[0] // 2), # :half_n_modes[0], - slice(self.n_modes[1] // 2), # :half_n_modes[1], - slice(self.n_modes[2]), # :half_n_modes[2]] - ) - slices1 = ( - slice(None), # Equivalent to: [:, - slice(None), # ...................... :, - slice(self.n_modes[0] // 2), # ...... :half_n_modes[0], - slice(-self.n_modes[1] // 2, None), # -half_n_modes[1]:, - slice(self.n_modes[2]), # ...... :half_n_modes[0]] - ) - slices2 = ( - slice(None), # Equivalent to: [:, - slice(None), # ...................... :, - slice(-self.n_modes[0] // 2, None), # -half_n_modes[0]:, - slice(self.n_modes[1] // 2), # ...... :half_n_modes[1], - slice(self.n_modes[2]), # ...... :half_n_modes[2]] - ) - slices3 = ( - slice(None), # Equivalent to: [:, - slice(None), # ...................... :, - slice(-self.n_modes[0] // 2, None), # -half_n_modes[0], - slice(-self.n_modes[1] // 2, None), # -half_n_modes[1], - slice(self.n_modes[2]), # ...... :half_n_modes[2]] - ) - - w_real = self.weight[indices].real[ - slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] - ] - w_imag = self.weight[indices].imag[ - slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] - ] - - """Upper block -- truncate high frequencies.""" - out_fft[ - slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] - ] = self._contract( - x[slices0[0], slices0[1], slices0[2], slices0[3], slices0[4]], - w_real, - w_imag, - separable=self.separable, - ) - - w_real = self.weight[indices].real[ - slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] - ] - w_imag = self.weight[indices].imag[ - slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] - ] - """Low-pass filter for indices 2 & 4, and high-pass filter for index 3.""" - out_fft[ - slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] - ] = self._contract( - x[slices1[0], slices1[1], slices1[2], slices1[3], slices1[4]], - w_real, - w_imag, - separable=self.separable, - ) - - w_real = self.weight[indices].real[ - slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] - ] - w_imag = self.weight[indices].imag[ - slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] - ] - """Low-pass filter for indices 3 & 4, and high-pass filter for index 2.""" - out_fft[ - slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] - ] = self._contract( - x[slices2[0], slices2[1], slices2[2], slices2[3], slices2[4]], - w_real, - w_imag, - separable=self.separable, - ) - - w_real = self.weight[indices].real[ - slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] - ] - w_imag = self.weight[indices].imag[ - slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] - ] - """Lower block -- low-cut filter in indices 2 & 3 - and high-cut filter in index 4.""" - out_fft[ - slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] - ] = self._contract( - x[slices3[0], slices3[1], slices3[2], slices3[3], slices3[4]], - w_real, - w_imag, - separable=self.separable, - ) - - if self.output_scaling_factor is not None: - width = round(width * self.output_scaling_factor[0]) - height = round(height * self.output_scaling_factor[1]) - depth = round(depth * self.output_scaling_factor[2]) - - x = paddle.fft.irfftn( - out_fft, s=(height, width, depth), axes=[-3, -2, -1], norm=self.fft_norm - ) - - if self.bias is not None: - x = x + self.bias[indices, ...] - return x - - -class FNOBlocks(nn.Layer): - def __init__( - self, - in_channels: int, - out_channels: int, - n_modes: Tuple[int, ...], - output_scaling_factor: Optional[Union[Number, List[Number]]] = None, - n_layers: int = 1, - max_n_modes: int = None, - use_mlp: bool = False, - mlp: Optional[Dict[str, float]] = None, - non_linearity: nn.functional = F.gelu, - stabilizer: str = None, - norm: str = None, - ada_in_features: Optional[int] = None, - preactivation: bool = False, - fno_skip: str = "linear", - mlp_skip: str = "soft-gating", - separable: bool = False, - factorization: str = None, - rank: float = 1.0, - SpectralConv: FactorizedSpectralConv = FactorizedSpectralConv, - joint_factorization: bool = False, - implementation: str = "factorized", - fft_norm: str = "forward", - **kwargs, - ): - super().__init__() - if isinstance(n_modes, int): - n_modes = [n_modes] - self._n_modes = n_modes - self.n_dim = len(n_modes) - - self.max_n_modes = max_n_modes - self.in_channels = in_channels - self.out_channels = out_channels - self.n_layers = n_layers - self.joint_factorization = joint_factorization - self.non_linearity = non_linearity - self.rank = rank - self.factorization = factorization - self.fno_skip = fno_skip - self.mlp_skip = mlp_skip - self.use_mlp = use_mlp - self.fft_norm = fft_norm - self.implementation = implementation - self.separable = separable - self.preactivation = preactivation - self.ada_in_features = ada_in_features - self.stabilizer = stabilizer - self.norm = norm - - self.convs = SpectralConv( - self.in_channels, - self.out_channels, - self.n_modes, - output_scaling_factor=output_scaling_factor, - max_n_modes=max_n_modes, - rank=rank, - implementation=implementation, - separable=separable, - factorization=factorization, - joint_factorization=joint_factorization, - n_layers=n_layers, - ) - - self.fno_skips = nn.LayerList( - [ - skip_connection( - self.in_channels, - self.out_channels, - type=fno_skip, - n_dim=self.n_dim, - ) - for _ in range(n_layers) - ] - ) - - if use_mlp: - self.mlp = nn.LayerList( - [ - MLP( - in_channels=self.out_channels, - hidden_channels=int( - round(self.out_channels * mlp["expansion"]) - ), - dropout=mlp["dropout"], - n_dim=self.n_dim, - ) - for _ in range(n_layers) - ] - ) - self.mlp_skips = nn.LayerList( - [ - skip_connection( - self.in_channels, - self.out_channels, - type=mlp_skip, - n_dim=self.n_dim, - ) - for _ in range(n_layers) - ] - ) - else: - self.mlp = None - - # Each block will have 2 norms if we also use an MLP - self.n_norms = 1 if self.mlp is None else 2 - if norm is None: - self.norm = None - elif norm == "instance_norm": - self.norm = nn.LayerList( - [ - getattr(nn, f"InstanceNorm{self.n_dim}d")( - num_features=self.out_channels - ) - for _ in range(n_layers * self.n_norms) - ] - ) - elif norm == "group_norm": - self.norm = nn.LayerList( - [ - nn.GroupNorm(num_groups=1, num_channels=self.out_channels) - for _ in range(n_layers * self.n_norms) - ] - ) - elif norm == "ada_in": - self.norm = nn.LayerList( - [ - AdaIN(ada_in_features, out_channels) - for _ in range(n_layers * self.n_norms) - ] - ) - else: - raise ValueError( - f"Got {norm} but expected None or one of [instance_norm, group_norm, layer_norm]" - ) - - def forward(self, x, index=0, output_shape=None): - if self.preactivation: - return self.forward_with_preactivation(x, index, output_shape=output_shape) - else: - return self.forward_with_postactivation(x, index, output_shape=output_shape) - - def forward_with_postactivation(self, x, index=0, output_shape=None): - x_skip_fno = self.fno_skips[index](x) - x_skip_fno = self.convs.transform(x_skip_fno, index, output_shape=output_shape) - if self.mlp is not None: - x_skip_mlp = self.mlp_skips[index](x) - x_skip_mlp = self.convs.transform( - x_skip_mlp, index, output_shape=output_shape - ) - if self.stabilizer == "tanh": - x = paddle.tanh(x) - - x_fno = self.convs(x, index, output_shape=output_shape) - if self.norm is not None: - x_fno = self.norm[self.n_norms * index](x_fno) - - x = x_fno + x_skip_fno - - if (self.mlp is not None) or (index < (self.n_layers - 1)): - x = self.non_linearity(x) - - if self.mlp is not None: - x = self.mlp[index](x) + x_skip_mlp - - if self.norm is not None: - x = self.norm[self.n_norms * index + 1](x) - - if index < (self.n_layers - 1): - x = self.non_linearity(x) - - return x - - def forward_with_preactivation(self, x, index=0, output_shape=None): - # Apply non-linear activation (and norm) - # before this block's convolution/forward pass: - x = self.non_linearity(x) - - if self.norm is not None: - x = self.norm[self.n_norms * index](x) - - x_skip_fno = self.fno_skips[index](x) - x_skip_fno = self.convs.transform(x_skip_fno, index, output_shape=output_shape) - - if self.mlp is not None: - x_skip_mlp = self.mlp_skips[index](x) - x_skip_mlp = self.convs.transform( - x_skip_mlp, index, output_shape=output_shape - ) - - if self.stabilizer == "tanh": - x = paddle.tanh(x) - - x_fno = self.convs(x, index, output_shape=output_shape) - x = x_fno + x_skip_fno - - if self.mlp is not None: - if index < (self.n_layers - 1): - x = self.non_linearity(x) - - if self.norm is not None: - x = self.norm[self.n_norms * index + 1](x) - - x = self.mlp[index](x) + x_skip_mlp - - return x - - @property - def n_modes(self): - return self._n_modes - - @n_modes.setter - def n_modes(self, n_modes): - if isinstance(n_modes, int): # Should happen for 1D FNO only - n_modes = [n_modes] - else: - n_modes = list(n_modes) - # The last mode has a redundacy as we use real FFT - # As a design choice we do the operation here to avoid users dealing with the +1 - n_modes[-1] = n_modes[-1] // 2 + 1 - self._n_modes = n_modes +import itertools +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import omegaconf +import paddle +import paddle.nn.functional as F +from paddle import nn + +from ppsci.utils import initializer +from ppsci.utils import logger + +einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + +class DomainPadding(nn.Layer): + """Applies domain padding scaled automatically to the input's resolution + + Args: + domain_padding (Union[float, List[float]]): Typically, between zero and one, percentage of padding to use. + padding_mode (str, optional): Whether to pad on both sides, by default + 'one-sided'.Options are 'symmetric' or 'one-sided'。 Defaults to "one-sided". + output_scaling_factor (Union[int, List[int]], optional): Scaling factor for the + output. Defaults to 1. + """ + + def __init__( + self, + domain_padding: Union[float, List[float]], + padding_mode: str = "one-sided", + output_scaling_factor: Union[int, List[int]] = 1, + ): + super().__init__() + self.domain_padding = domain_padding + self.padding_mode = padding_mode.lower() + if output_scaling_factor is None: + output_scaling_factor = 1 + self.output_scaling_factor: Union[int, List[int]] = output_scaling_factor + + # dict(f'{resolution}'=padding) such that padded = F.pad(x, indices) + self._padding = dict() + + # dict(f'{resolution}'=indices_to_unpad) such that unpadded = x[indices] + self._unpad_indices = dict() + + def forward(self, x): + self.pad(x) + + def pad(self, x): + """Take an input and pad it by the desired fraction + + The amount of padding will be automatically scaled with the resolution + """ + resolution = x.shape[2:] + + if isinstance(self.domain_padding, (float, int)): + self.domain_padding = [float(self.domain_padding)] * len(resolution) + + assert len(self.domain_padding) == len(resolution), ( + "domain_padding length must match the number of spatial/time dimensions " + "(excluding batch, ch)" + ) + + output_scaling_factor = self.output_scaling_factor + if not isinstance(self.output_scaling_factor, list): + # if unset by the user, scaling_factor will be 1 be default, + # so `output_scaling_factor` should never be None. + output_scaling_factor: List[float] = validate_scaling_factor( + self.output_scaling_factor, len(resolution), n_layers=None + ) + + try: + padding = self._padding[f"{resolution}"] + return F.pad(x, padding, mode="constant") + except KeyError: + padding = [round(p * r) for (p, r) in zip(self.domain_padding, resolution)] + + output_pad = padding + output_pad = [ + round(i * j) for (i, j) in zip(output_scaling_factor, output_pad) + ] + + # padding is being applied in reverse order + # (so we must reverse the padding list) + padding = padding[::-1] + + # the F.pad(x, padding) funtion pads the tensor 'x' in reverse order of the "padding" list i.e. the last axis of tensor 'x' will be padded by the amount mention at the first position of the 'padding' vector. The details about F.pad can be found here: + # https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/functional/pad_cn.html + + if self.padding_mode == "symmetric": + # Pad both sides + unpad_list = list() + for p in output_pad: + if p == 0: + padding_end = None + padding_start = None + else: + padding_end = p + padding_start = -p + unpad_list.append(slice(padding_end, padding_start, None)) + + unpad_indices = (Ellipsis,) + tuple( + [slice(p, -p, None) for p in padding] + ) + padding = [i for p in padding for i in (p, p)] + + elif self.padding_mode == "one-sided": + # One-side padding + unpad_list = list() + for p in output_pad: + if p == 0: + padding_start = None + else: + padding_start = -p + unpad_list.append(slice(None, padding_start, None)) + unpad_indices = (Ellipsis,) + tuple(unpad_list) + padding = [i for p in padding for i in (0, p)] + else: + raise ValueError(f"Got self.padding_mode = {self.padding_mode}") + + self._padding[f"{resolution}"] = padding + + padded = F.pad(x, padding, mode="constant") + output_shape = padded.shape[2:] + output_shape = [ + round(i * j) for (i, j) in zip(output_scaling_factor, output_shape) + ] + + self._unpad_indices[f"{[i for i in output_shape]}"] = unpad_indices + + return padded + + def unpad(self, x): + """Remove the padding from padding inputs""" + unpad_indices = self._unpad_indices[f"{x.shape[2:]}"] + + return x[unpad_indices] + + +class SoftGating(nn.Layer): + """Applies soft-gating by weighting the channels of the given input + + Given an input x of size `(batch-size, channels, height, width)`, + this returns `x * w ` + where w is of shape `(1, channels, 1, 1)` + + Args: + in_features (int): The number of input features. + out_features (int, optional): Number of output features. Defaults to None. + n_dim (int, optional): Dimensionality of the input (excluding batch-size and channels). + ``n_dim=2`` corresponds to having Module2D. Defaults to 2. + bias (bool, optional): Whether to use bias. Defaults to False. + """ + + def __init__( + self, in_features, out_features: int = None, n_dim: int = 2, bias: bool = False + ): + super().__init__() + if out_features is not None and in_features != out_features: + raise ValueError( + f"Got in_features = {in_features} and out_features = {out_features}" + "but these two must be the same for soft-gating" + ) + self.in_features = in_features + self.out_features = out_features + + self.weight = self.create_parameter( + shape=(1, self.in_features, *(1,) * n_dim), + default_initializer=nn.initializer.Constant(1.0), + ) + if bias: + self.bias = self.create_parameter( + shape=(1, self.in_features, *(1,) * n_dim), + default_initializer=nn.initializer.Constant(1.0), + ) + else: + self.bias = None + + def forward(self, x): + """Applies soft-gating to a batch of activations""" + if self.bias is not None: + return self.weight * x + self.bias + else: + return self.weight * x + + +def skip_connection( + in_features, + out_features, + n_dim: int = 2, + bias: bool = False, + type: str = "soft-gating", +): + """A wrapper for several types of skip connections. + Returns an nn.Module skip connections, one of {'identity', 'linear', soft-gating'} + + Args: + in_features (int): Number of input features. + out_features (int): Number of output features. + n_dim (int, optional): Dimensionality of the input (excluding batch-size and channels). + ``n_dim=2`` corresponds to having Module2D. . Defaults to 2. + bias (bool, optional): Whether to use a bias. Defaults to False. + type (str, optional): Kind of skip connection to use,{'identity', 'linear', soft-gating'}. + Defaults to "soft-gating". + """ + + if type.lower() == "soft-gating": + return SoftGating( + in_features=in_features, out_features=out_features, bias=bias, n_dim=n_dim + ) + elif type.lower() == "linear": + return getattr(nn, f"Conv{n_dim}D")( + in_channels=in_features, + out_channels=out_features, + kernel_size=1, + bias_attr=bias, + ) + elif type.lower() == "identity": + return nn.Identity() + else: + raise ValueError( + f"Got skip-connection type = {type}, expected one of {'soft-gating', 'linear', 'identity'}." + ) + + +class AdaIN(nn.Layer): + def __init__(self, embed_dim, in_channels, mlp=None, eps=1e-5): + super().__init__() + self.in_channels = in_channels + self.embed_dim = embed_dim + self.eps = eps + + if mlp is None: + mlp = nn.Sequential( + nn.Linear(embed_dim, 512), nn.GELU(), nn.Linear(512, 2 * in_channels) + ) + self.mlp = mlp + + self.embedding = None + + def set_embedding(self, x): + self.embedding = x.reshape( + self.embed_dim, + ) + + def forward(self, x): + assert ( + self.embedding is not None + ), "AdaIN: update embeddding before running forward" + + weight, bias = paddle.split( + self.mlp(self.embedding), + self.embedding.shape[0] // self.in_channels, + axis=0, + ) + + return nn.functional.group_norm(x, self.in_channels, self.eps, weight, bias) + + +class MLP(nn.Layer): + """A Multi-Layer Perceptron, with arbitrary number of layers + + Args: + in_channels (int): The number of input channels. + out_channels (int, optional): The number of output channels. Defaults to None. + hidden_channels (int, optional): The number of hidden channels. Defaults to None. + n_layers (int, optional): The number of layers. Defaults to 2. + n_dim (int, optional): The type of convolution,2D or 3D. Defaults to 2. + non_linearity (nn.functional, optional): The activation function. Defaults to F.gelu. + dropout (float, optional): The ratio of dropout. Defaults to 0.0. + """ + + def __init__( + self, + in_channels: int, + out_channels: int = None, + hidden_channels: int = None, + n_layers: int = 2, + n_dim: int = 2, + non_linearity: nn.functional = F.gelu, + dropout: float = 0.0, + **kwargs, + ): + super().__init__() + self.n_layers = n_layers + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + self.hidden_channels = ( + in_channels if hidden_channels is None else hidden_channels + ) + self.non_linearity = non_linearity + self.dropout = ( + nn.LayerList([nn.Dropout(dropout) for _ in range(n_layers)]) + if dropout > 0.0 + else None + ) + + Conv = getattr(nn, f"Conv{n_dim}D") + self.fcs = nn.LayerList() + for i in range(n_layers): + if i == 0 and i == (n_layers - 1): + self.fcs.append(Conv(self.in_channels, self.out_channels, 1)) + elif i == 0: + self.fcs.append(Conv(self.in_channels, self.hidden_channels, 1)) + elif i == (n_layers - 1): + self.fcs.append(Conv(self.hidden_channels, self.out_channels, 1)) + else: + self.fcs.append(Conv(self.hidden_channels, self.hidden_channels, 1)) + + def forward(self, x): + for i, fc in enumerate(self.fcs): + x = fc(x) + if i < self.n_layers - 1: + x = self.non_linearity(x) + if self.dropout is not None: + x = self.dropout[i](x) + return x + + +def _contract_dense(x, weight, separable=False): + order = len(x.shape) + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] + if not isinstance(weight, paddle.Tensor): + weight = paddle.to_tensor(weight) + + return paddle.einsum(eq, x, weight) + + +def _contract_dense_trick(x, weight_real, weight_imag, separable=False): + # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle + order = len(x.shape) + # batch-size, in_channels, x, y... + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + + o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( + eq, x.imag(), weight_imag + ) + o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( + eq, x.real(), weight_imag + ) + x = paddle.complex(o1_real, o1_imag) + return x + + +def _contract_dense_separable(x, weight, separable=True): + if not separable: + raise ValueError("This function is only for separable=True") + return x * weight + + +def get_contract_fun( + weight, implementation: str = "reconstructed", separable: bool = False +): + """Generic ND implementation of Fourier Spectral Conv contraction. + + Args: + weight (paddle.tensor): FactorizedTensor. + implementation (str, optional): {'reconstructed', 'factorized'}. + whether to reconstruct the weight and do a forward pass (reconstructed) + or contract directly the factors of the factorized weight with the input (factorized). Defaults to "reconstructed". + separable (bool, optional): Whether to use the separable implementation of contraction. This + arg is only checked when `implementation=reconstructed`. Defaults to False. + + Returns: + function : (x, weight) -> x * weight in Fourier space. + """ + + if implementation == "reconstructed": + if separable: + return _contract_dense_separable + else: + return _contract_dense_trick + elif implementation == "factorized": + if isinstance(weight, paddle.Tensor): + return _contract_dense_trick + + else: + raise ValueError( + f'Got implementation={implementation}, expected "reconstructed" or "factorized"' + ) + + +Number = Union[float, int] + + +def validate_scaling_factor( + scaling_factor: Union[None, Number, List[Number], List[List[Number]]], + n_dim: int, + n_layers: Optional[int] = None, +) -> Union[None, List[float], List[List[float]]]: + """Validates the format and dimensionality of the scaling factor. + + Args: + scaling_factor (Union[None, Number, List[Number], List[List[Number]]]): The + scaling factor. + n_dim (int): The required number of dimensions for expanding `scaling_factor`. + n_layers (Optional[int], optional): The number of layers for the returned + nested list. If None, return a single list (rather than a list of lists) + with `factor` repeated `dim` times. Defaults to None. + """ + + if scaling_factor is None: + return None + if isinstance(scaling_factor, (float, int)): + if n_layers is None: + return [float(scaling_factor)] * n_dim + + return [[float(scaling_factor)] * n_dim] * n_layers + if ( + isinstance(scaling_factor, list) + and len(scaling_factor) > 0 + and all([isinstance(s, (float, int)) for s in scaling_factor]) + ): + + return [[float(s)] * n_dim for s in scaling_factor] + + if ( + isinstance(scaling_factor, list) + and len(scaling_factor) > 0 + and all( + [isinstance(s, (omegaconf.listconfig.ListConfig)) for s in scaling_factor] + ) + ): + s_sub_pass = True + for s in scaling_factor: + if all([isinstance(s_sub, (int, float)) for s_sub in s]): + pass + else: + s_sub_pass = False + if s_sub_pass: + return scaling_factor + + return None + + +def resample(x, res_scale, axis, output_shape=None): + """A module for generic n-dimentional interpolation (Fourier resampling). + + Args: + x (paddle.Tensor): Input activation of size (batch_size, channels, d1, ..., dN). + res_scale (optional[int,tuple]): Scaling factor along each of the dimensions in + 'axis' parameter. If res_scale is scaler, then isotropic scaling is performed. + axis (int): Axis or dimensions along which interpolation will be performed. + output_shape (optional[None ,tuple[int]]): The output shape. Defaults to None. + """ + + if isinstance(res_scale, (float, int)): + if axis is None: + axis = list(range(2, x.ndim)) + res_scale = [res_scale] * len(axis) + elif isinstance(axis, int): + axis = [axis] + res_scale = [res_scale] + else: + res_scale = [res_scale] * len(axis) + else: + assert len(res_scale) == len(axis), "leght of res_scale and axis are not same" + + old_size = x.shape[-len(axis) :] + if output_shape is None: + new_size = tuple([int(round(s * r)) for (s, r) in zip(old_size, res_scale)]) + else: + new_size = output_shape + + if len(axis) == 1: + return F.interpolate(x, size=new_size[0], mode="linear", align_corners=True) + if len(axis) == 2: + return F.interpolate(x, size=new_size, mode="bicubic", align_corners=True) + + X = paddle.fft.rfftn(x.astype("float32"), norm="forward", axes=axis) + + new_fft_size = list(new_size) + new_fft_size[-1] = new_fft_size[-1] // 2 + 1 # Redundant last coefficient + new_fft_size_c = [min(i, j) for (i, j) in zip(new_fft_size, X.shape[-len(axis) :])] + out_fft = paddle.zeros( + [x.shape[0], x.shape[1], *new_fft_size], dtype=paddle.complex64 + ) + + mode_indexing = [((None, m // 2), (-m // 2, None)) for m in new_fft_size_c[:-1]] + [ + ((None, new_fft_size_c[-1]),) + ] + for i, boundaries in enumerate(itertools.product(*mode_indexing)): + + idx_tuple = [slice(None), slice(None)] + [slice(*b) for b in boundaries] + + out_fft[idx_tuple] = X[idx_tuple] + y = paddle.fft.irfftn(out_fft, s=new_size, norm="forward", axes=axis) + + return y + + +class FactorizedTensor(nn.Layer): + def __init__(self, shape, init_scale): + super().__init__() + self.shape = shape + self.init_scale = init_scale + self.real = self.create_parameter( + shape=shape, + ) + self.real = initializer.normal_(self.real, 0, init_scale) + self.imag = self.create_parameter(shape=shape) + self.imag = initializer.normal_(self.imag, 0, init_scale) + + def __repr__(self): + return f"FactorizedTensor(shape={self.shape})" + + @property + def data(self): + return paddle.complex(self.real, self.imag) + + +class FactorizedSpectralConv(nn.Layer): + """Generic N-Dimensional Fourier Neural Operator + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + n_modes (Tuple[int, ...]): Number of modes to use for contraction in Fourier domain during training. + .. warning:: + We take care of the redundancy in the Fourier modes, therefore, for an input + of size I_1, ..., I_N, please provide modes M_K that are I_1 < M_K <= I_N + We will automatically keep the right amount of modes: specifically, for the + last mode only, if you specify M_N modes we will use M_N // 2 + 1 modes + as the real FFT is redundant along that last dimension. + + .. note:: + Provided modes should be even integers. odd numbers will be rounded to the closest even number. + This can be updated dynamically during training. + max_n_modes (int, optional): * If not None, **maximum** number of modes to keep + in Fourier Layer, along each dim + The number of modes (`n_modes`) cannot be increased beyond that. + * If None, all the n_modes are used. Defaults to None. + bias (bool, optional): Whether to use bias in the layers. Defaults to True. + n_layers (int, optional): Number of Fourier Layers. Defaults to 1. + separable (bool, optional): Whether to use separable Fourier Conv. Defaults to False. + output_scaling_factor (Optional[Union[Number, List[Number]]], optional): Scaling factor for the + output. Defaults to None. + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 0.5. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions + * Otherwise, the specified tensor factorization is used. Defaults to None. + implementation (str, optional): If factorization is not None, forward mode to use. + * `reconstructed` : the full weight tensor is reconstructed from the + factorization and used for the forward pass + * `factorized` : the input is directly contracted with the factors of + the decomposition. Defaults to "reconstructed". + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor. Defaults to False. + init_std (str, optional): The std to use for the init. Defaults to "auto". + fft_norm (str, optional):The normalization mode for the FFT. Defaults to "backward". + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + n_modes: Tuple[int, ...], + max_n_modes: int = None, + bias: bool = True, + n_layers: int = 1, + separable: bool = False, + output_scaling_factor: Optional[Union[Number, List[Number]]] = None, + rank: float = 0.5, + factorization: str = None, + implementation: str = "reconstructed", + joint_factorization: bool = False, + init_std: str = "auto", + fft_norm: str = "backward", + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.joint_factorization = joint_factorization + self.n_modes = n_modes + + self.order = len(self.n_modes) + if max_n_modes is None: + max_n_modes = self.n_modes + elif isinstance(max_n_modes, int): + max_n_modes = [max_n_modes] + self.max_n_modes = max_n_modes + self.rank = rank + self.factorization = factorization + self.n_layers = n_layers + self.implementation = implementation + + self.output_scaling_factor: Union[ + None, List[List[float]] + ] = validate_scaling_factor(output_scaling_factor, self.order, n_layers) + + if init_std == "auto": + init_std = (2 / (in_channels + out_channels)) ** 0.5 + else: + init_std = init_std + self.fft_norm = fft_norm + if factorization is None: + factorization = "Dense" + if not factorization.lower().startswith("complex"): + factorization = f"Complex{factorization}" + if separable: + if in_channels != out_channels: + raise ValueError( + f"To use separable Fourier Conv, in_channels must be equal to out_channels, but got in_channels={in_channels} and out_channels={out_channels}" + ) + weight_shape = (in_channels, *max_n_modes) + else: + weight_shape = (in_channels, out_channels, *max_n_modes) + self.separable = separable + if joint_factorization: + self.weight = paddle.create_parameter( + shape=(n_layers, *weight_shape), + dtype="float32", + ) + self.weight = initializer.normal_(self.weight, 0, init_std) + else: + self.weight = nn.LayerList( + [ + FactorizedTensor(weight_shape, init_scale=init_std) + for _ in range(n_layers) + ] + ) + + self._contract = get_contract_fun( + self.weight[0].data, implementation=implementation, separable=separable + ) + if bias: + shape = (n_layers, self.out_channels) + (1,) * self.order + init_bias = init_std * paddle.randn(shape) + self.bias = paddle.create_parameter( + shape=shape, + dtype=(init_bias.dtype), + default_initializer=nn.initializer.Assign(init_bias), + ) + self.bias.stop_gradient = False + else: + self.bias = None + + @property + def n_modes(self): + return self._n_modes + + @n_modes.setter + def n_modes(self, n_modes): + if isinstance(n_modes, int): # Should happen for 1D FNO only + n_modes = [n_modes] + else: + n_modes = list(n_modes) + # The last mode has a redundacy as we use real FFT + # As a design choice we do the operation here to avoid users dealing with the +1 + n_modes[-1] = n_modes[-1] // 2 + 1 + self._n_modes = n_modes + + def transform(self, x, layer_index=0, output_shape=None): + in_shape = list(x.shape[2:]) + + if self.output_scaling_factor is not None and output_shape is None: + out_shape = tuple( + [ + round(s * r) + for (s, r) in zip(in_shape, self.output_scaling_factor[layer_index]) + ] + ) + elif output_shape is not None: + out_shape = output_shape + else: + out_shape = in_shape + if in_shape == out_shape: + return x + else: + return resample( + x, + 1.0, + list(range(2, x.ndim)), + output_shape=out_shape, + ) + + def forward( + self, + x: paddle.Tensor, + indices: int = 0, + output_shape: Optional[Tuple[int]] = None, + ): + batchsize, channels, *mode_sizes = x.shape + fft_size = list(mode_sizes) + fft_size[-1] = fft_size[-1] // 2 + 1 + fft_dims = list(range(-self.order, 0)) + + x = paddle.fft.rfftn(x=x, norm=self.fft_norm, axes=fft_dims) + + if self.order > 1: + x = paddle.fft.fftshift(x=x, axes=fft_dims[:-1]) + + out_fft = paddle.zeros( + shape=[batchsize, self.out_channels, *fft_size], dtype=paddle.complex64 + ) + + starts = [ + (max_modes - min(size, n_mode)) + for size, n_mode, max_modes in zip(fft_size, self.n_modes, self.max_n_modes) + ] + slices_w = [slice(None), slice(None)] + slices_w += [ + (slice(start // 2, -start // 2) if start else slice(start, None)) + for start in starts[:-1] + ] + slices_w += [slice(None, -starts[-1]) if starts[-1] else slice(None)] + + w_real = self.weight[indices].real[ + slices_w[0], slices_w[1], slices_w[2], slices_w[3] + ] + w_imag = self.weight[indices].imag[ + slices_w[0], slices_w[1], slices_w[2], slices_w[3] + ] + + starts = [ + (size - min(size, n_mode)) + for (size, n_mode) in zip(list(x.shape[2:]), list(w_real.shape[2:])) + ] + slices_x = [slice(None), slice(None)] # Batch_size, channels + slices_x += [ + slice(start // 2, -start // 2) if start else slice(start, None) + for start in starts[:-1] + ] + slices_x += [ + slice(None, -starts[-1]) if starts[-1] else slice(None) + ] # The last mode already has redundant half removed + idx_tuple = slices_x + if len(idx_tuple) == 4: + out_fft[ + idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3] + ] = self._contract( + x[idx_tuple[0], idx_tuple[1], idx_tuple[2], idx_tuple[3]], + w_real, + w_imag, + separable=self.separable, + ) + elif len(idx_tuple) == 3: + out_fft[idx_tuple[0], idx_tuple[1], idx_tuple[2]] = self._contract( + x[idx_tuple[0], idx_tuple[1], idx_tuple[2]], + w_real, + w_imag, + separable=self.separable, + ) + else: + raise ValueError("Not implemented") + + if self.output_scaling_factor is not None and output_shape is None: + mode_sizes = tuple( + [ + round(s * r) + for (s, r) in zip(mode_sizes, self.output_scaling_factor[indices]) + ] + ) + + if output_shape is not None: + mode_sizes = output_shape + + if self.order > 1: + out_fft = paddle.fft.fftshift(x=out_fft, axes=fft_dims[:-1]) + + x = paddle.fft.irfftn( + x=out_fft, s=mode_sizes, axes=fft_dims, norm=self.fft_norm + ) + if self.bias is not None: + x = x + self.bias[indices, ...] + return x + + +class FactorizedSpectralConv1d(FactorizedSpectralConv): + """1D Spectral Conv + + This is provided for reference only, + see :class:`FactorizedSpectralConv` for the preferred, general implementation + """ + + def forward(self, x, indices=0): + batchsize, channels, width = x.shape + + x = paddle.fft.rfft(x, norm=self.fft_norm) + + out_fft = paddle.zeros( + shape=[batchsize, self.out_channels, width // 2 + 1], dtype=paddle.complex64 + ) + + slices = ( + slice(None), # Equivalent to: [:, + slice(None), # ............... :, + slice(None, self.n_modes[0]), # :half_n_modes[0]] + ) + + w_real = self.weight[indices].real[slices[0], slices[1], slices[2]] + w_imag = self.weight[indices].imag[slices[0], slices[1], slices[2]] + + out_fft[slices[0], slices[1], slices[2]] = self._contract( + x[slices[0], slices[1], slices[2]], + w_real, + w_imag, + separable=self.separable, + ) + + if self.output_scaling_factor is not None: + width = round(width * self.output_scaling_factor[0]) + + x = paddle.fft.irfft(out_fft, n=width, norm=self.fft_norm) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + +class FactorizedSpectralConv2d(FactorizedSpectralConv): + """2D Spectral Conv. + + This is provided for reference only, + see :class:`FactorizedSpectralConv` for the preferred, general implementation + """ + + def forward(self, x, indices=0): + batchsize, channels, height, width = x.shape + + x = paddle.fft.rfft2(x.float(), norm=self.fft_norm, axes=(-2, -1)) + + # The output will be of size (batch_size, self.out_channels, + # x.size(-2), x.size(-1)//2 + 1) + out_fft = paddle.zeros( + shape=[batchsize, self.out_channels, height, width // 2 + 1], + dtype=paddle.complex64, + ) + + slices0 = ( + slice(None), # Equivalent to: [:, + slice(None), # ............... :, + slice(self.n_modes[0] // 2), # :half_n_modes[0], + slice(self.n_modes[1]), # :half_n_modes[1]] + ) + slices1 = ( + slice(None), # Equivalent to: [:, + slice(None), # ...................... :, + slice(-self.n_modes[0] // 2, None), # -half_n_modes[0]:, + slice(self.n_modes[1]), # ...... :half_n_modes[1]] + ) + logger.message( + f"2D: {x[slices0].shape=}, {self._get_weight(indices)[slices0].shape=}, {self._get_weight(indices).shape=}" + ) + + w_real = self.weight[indices].real[ + slices1[0], slices1[1], slices1[2], slices1[3] + ] + w_imag = self.weight[indices].imag[ + slices1[0], slices1[1], slices1[2], slices1[3] + ] + + """Upper block (truncate high frequencies).""" + out_fft[slices0[0], slices0[1], slices0[2], slices0[3]] = self._contract( + x[slices0[0], slices0[1], slices0[2], slices0[3]], + w_real, + w_imag, + separable=self.separable, + ) + + w_real = self.weight[indices].real[ + slices0[0], slices0[1], slices0[2], slices0[3] + ] + w_imag = self.weight[indices].imag[ + slices0[0], slices0[1], slices0[2], slices0[3] + ] + + """Lower block""" + out_fft[slices1[0], slices1[1], slices1[2], slices1[3]] = self._contract( + x[slices1[0], slices1[1], slices1[2], slices1[3]], + w_real, + w_imag, + separable=self.separable, + ) + + if self.output_scaling_factor is not None: + width = round(width * self.output_scaling_factor[indices][0]) + height = round(height * self.output_scaling_factor[indices][1]) + + x = paddle.fft.irfft2( + out_fft, s=(height, width), axes=(-2, -1), norm=self.fft_norm + ) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + +class FactorizedSpectralConv3d(FactorizedSpectralConv): + """3D Spectral Conv. + + This is provided for reference only, + see :class:`FactorizedSpectralConv` for the preferred, general implementation + """ + + def forward(self, x, indices=0): + batchsize, channels, height, width, depth = x.shape + + x = paddle.fft.rfftn(x.float(), norm=self.fft_norm, axes=[-3, -2, -1]) + + out_fft = paddle.zeros( + shape=[batchsize, self.out_channels, height, width, depth // 2 + 1], + dtype=paddle.complex64, + ) + + slices0 = ( + slice(None), # Equivalent to: [:, + slice(None), # ............... :, + slice(self.n_modes[0] // 2), # :half_n_modes[0], + slice(self.n_modes[1] // 2), # :half_n_modes[1], + slice(self.n_modes[2]), # :half_n_modes[2]] + ) + slices1 = ( + slice(None), # Equivalent to: [:, + slice(None), # ...................... :, + slice(self.n_modes[0] // 2), # ...... :half_n_modes[0], + slice(-self.n_modes[1] // 2, None), # -half_n_modes[1]:, + slice(self.n_modes[2]), # ...... :half_n_modes[0]] + ) + slices2 = ( + slice(None), # Equivalent to: [:, + slice(None), # ...................... :, + slice(-self.n_modes[0] // 2, None), # -half_n_modes[0]:, + slice(self.n_modes[1] // 2), # ...... :half_n_modes[1], + slice(self.n_modes[2]), # ...... :half_n_modes[2]] + ) + slices3 = ( + slice(None), # Equivalent to: [:, + slice(None), # ...................... :, + slice(-self.n_modes[0] // 2, None), # -half_n_modes[0], + slice(-self.n_modes[1] // 2, None), # -half_n_modes[1], + slice(self.n_modes[2]), # ...... :half_n_modes[2]] + ) + + w_real = self.weight[indices].real[ + slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] + ] + w_imag = self.weight[indices].imag[ + slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] + ] + + """Upper block -- truncate high frequencies.""" + out_fft[ + slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] + ] = self._contract( + x[slices0[0], slices0[1], slices0[2], slices0[3], slices0[4]], + w_real, + w_imag, + separable=self.separable, + ) + + w_real = self.weight[indices].real[ + slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] + ] + w_imag = self.weight[indices].imag[ + slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] + ] + """Low-pass filter for indices 2 & 4, and high-pass filter for index 3.""" + out_fft[ + slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] + ] = self._contract( + x[slices1[0], slices1[1], slices1[2], slices1[3], slices1[4]], + w_real, + w_imag, + separable=self.separable, + ) + + w_real = self.weight[indices].real[ + slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] + ] + w_imag = self.weight[indices].imag[ + slices1[0], slices1[1], slices1[2], slices1[3], slices1[4] + ] + """Low-pass filter for indices 3 & 4, and high-pass filter for index 2.""" + out_fft[ + slices2[0], slices2[1], slices2[2], slices2[3], slices2[4] + ] = self._contract( + x[slices2[0], slices2[1], slices2[2], slices2[3], slices2[4]], + w_real, + w_imag, + separable=self.separable, + ) + + w_real = self.weight[indices].real[ + slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] + ] + w_imag = self.weight[indices].imag[ + slices0[0], slices0[1], slices0[2], slices0[3], slices0[4] + ] + """Lower block -- low-cut filter in indices 2 & 3 + and high-cut filter in index 4.""" + out_fft[ + slices3[0], slices3[1], slices3[2], slices3[3], slices3[4] + ] = self._contract( + x[slices3[0], slices3[1], slices3[2], slices3[3], slices3[4]], + w_real, + w_imag, + separable=self.separable, + ) + + if self.output_scaling_factor is not None: + width = round(width * self.output_scaling_factor[0]) + height = round(height * self.output_scaling_factor[1]) + depth = round(depth * self.output_scaling_factor[2]) + + x = paddle.fft.irfftn( + out_fft, s=(height, width, depth), axes=[-3, -2, -1], norm=self.fft_norm + ) + + if self.bias is not None: + x = x + self.bias[indices, ...] + return x + + +class FNOBlocks(nn.Layer): + def __init__( + self, + in_channels: int, + out_channels: int, + n_modes: Tuple[int, ...], + output_scaling_factor: Optional[Union[Number, List[Number]]] = None, + n_layers: int = 1, + max_n_modes: int = None, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + non_linearity: nn.functional = F.gelu, + stabilizer: str = None, + norm: str = None, + ada_in_features: Optional[int] = None, + preactivation: bool = False, + fno_skip: str = "linear", + mlp_skip: str = "soft-gating", + separable: bool = False, + factorization: str = None, + rank: float = 1.0, + SpectralConv: FactorizedSpectralConv = FactorizedSpectralConv, + joint_factorization: bool = False, + implementation: str = "factorized", + fft_norm: str = "forward", + **kwargs, + ): + super().__init__() + if isinstance(n_modes, int): + n_modes = [n_modes] + self._n_modes = n_modes + self.n_dim = len(n_modes) + + self.max_n_modes = max_n_modes + self.in_channels = in_channels + self.out_channels = out_channels + self.n_layers = n_layers + self.joint_factorization = joint_factorization + self.non_linearity = non_linearity + self.rank = rank + self.factorization = factorization + self.fno_skip = fno_skip + self.mlp_skip = mlp_skip + self.use_mlp = use_mlp + self.fft_norm = fft_norm + self.implementation = implementation + self.separable = separable + self.preactivation = preactivation + self.ada_in_features = ada_in_features + self.stabilizer = stabilizer + self.norm = norm + + self.convs = SpectralConv( + self.in_channels, + self.out_channels, + self.n_modes, + output_scaling_factor=output_scaling_factor, + max_n_modes=max_n_modes, + rank=rank, + implementation=implementation, + separable=separable, + factorization=factorization, + joint_factorization=joint_factorization, + n_layers=n_layers, + ) + + self.fno_skips = nn.LayerList( + [ + skip_connection( + self.in_channels, + self.out_channels, + type=fno_skip, + n_dim=self.n_dim, + ) + for _ in range(n_layers) + ] + ) + + if use_mlp: + self.mlp = nn.LayerList( + [ + MLP( + in_channels=self.out_channels, + hidden_channels=int( + round(self.out_channels * mlp["expansion"]) + ), + dropout=mlp["dropout"], + n_dim=self.n_dim, + ) + for _ in range(n_layers) + ] + ) + self.mlp_skips = nn.LayerList( + [ + skip_connection( + self.in_channels, + self.out_channels, + type=mlp_skip, + n_dim=self.n_dim, + ) + for _ in range(n_layers) + ] + ) + else: + self.mlp = None + + # Each block will have 2 norms if we also use an MLP + self.n_norms = 1 if self.mlp is None else 2 + if norm is None: + self.norm = None + elif norm == "instance_norm": + self.norm = nn.LayerList( + [ + getattr(nn, f"InstanceNorm{self.n_dim}d")( + num_features=self.out_channels + ) + for _ in range(n_layers * self.n_norms) + ] + ) + elif norm == "group_norm": + self.norm = nn.LayerList( + [ + nn.GroupNorm(num_groups=1, num_channels=self.out_channels) + for _ in range(n_layers * self.n_norms) + ] + ) + elif norm == "ada_in": + self.norm = nn.LayerList( + [ + AdaIN(ada_in_features, out_channels) + for _ in range(n_layers * self.n_norms) + ] + ) + else: + raise ValueError( + f"Got {norm} but expected None or one of [instance_norm, group_norm, layer_norm]" + ) + + def forward(self, x, index=0, output_shape=None): + if self.preactivation: + return self.forward_with_preactivation(x, index, output_shape=output_shape) + else: + return self.forward_with_postactivation(x, index, output_shape=output_shape) + + def forward_with_postactivation(self, x, index=0, output_shape=None): + x_skip_fno = self.fno_skips[index](x) + x_skip_fno = self.convs.transform(x_skip_fno, index, output_shape=output_shape) + if self.mlp is not None: + x_skip_mlp = self.mlp_skips[index](x) + x_skip_mlp = self.convs.transform( + x_skip_mlp, index, output_shape=output_shape + ) + if self.stabilizer == "tanh": + x = paddle.tanh(x) + + x_fno = self.convs(x, index, output_shape=output_shape) + if self.norm is not None: + x_fno = self.norm[self.n_norms * index](x_fno) + + x = x_fno + x_skip_fno + + if (self.mlp is not None) or (index < (self.n_layers - 1)): + x = self.non_linearity(x) + + if self.mlp is not None: + x = self.mlp[index](x) + x_skip_mlp + + if self.norm is not None: + x = self.norm[self.n_norms * index + 1](x) + + if index < (self.n_layers - 1): + x = self.non_linearity(x) + + return x + + def forward_with_preactivation(self, x, index=0, output_shape=None): + # Apply non-linear activation (and norm) + # before this block's convolution/forward pass: + x = self.non_linearity(x) + + if self.norm is not None: + x = self.norm[self.n_norms * index](x) + + x_skip_fno = self.fno_skips[index](x) + x_skip_fno = self.convs.transform(x_skip_fno, index, output_shape=output_shape) + + if self.mlp is not None: + x_skip_mlp = self.mlp_skips[index](x) + x_skip_mlp = self.convs.transform( + x_skip_mlp, index, output_shape=output_shape + ) + + if self.stabilizer == "tanh": + x = paddle.tanh(x) + + x_fno = self.convs(x, index, output_shape=output_shape) + x = x_fno + x_skip_fno + + if self.mlp is not None: + if index < (self.n_layers - 1): + x = self.non_linearity(x) + + if self.norm is not None: + x = self.norm[self.n_norms * index + 1](x) + + x = self.mlp[index](x) + x_skip_mlp + + return x + + @property + def n_modes(self): + return self._n_modes + + @n_modes.setter + def n_modes(self, n_modes): + if isinstance(n_modes, int): # Should happen for 1D FNO only + n_modes = [n_modes] + else: + n_modes = list(n_modes) + # The last mode has a redundacy as we use real FFT + # As a design choice we do the operation here to avoid users dealing with the +1 + n_modes[-1] = n_modes[-1] // 2 + 1 + self._n_modes = n_modes diff --git a/ppsci/arch/gan.py b/ppsci/arch/gan.py index a673b8e440..195bca6678 100644 --- a/ppsci/arch/gan.py +++ b/ppsci/arch/gan.py @@ -1,400 +1,400 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import List -from typing import Tuple - -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base - - -class Conv2DBlock(nn.Layer): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - stride, - use_bn, - act, - mean, - std, - value, - ): - super().__init__() - weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=mean, std=std) - ) - bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(value=value)) - self.conv_2d = nn.Conv2D( - in_channel, - out_channel, - kernel_size, - stride, - padding="SAME", - weight_attr=weight_attr, - bias_attr=bias_attr, - ) - self.bn = nn.BatchNorm2D(out_channel) if use_bn else None - self.act = act_mod.get_activation(act) if act else None - - def forward(self, x): - y = x - y = self.conv_2d(y) - if self.bn: - y = self.bn(y) - if self.act: - y = self.act(y) - return y - - -class VariantResBlock(nn.Layer): - def __init__( - self, - in_channel, - out_channels, - kernel_sizes, - strides, - use_bns, - acts, - mean, - std, - value, - ): - super().__init__() - self.conv_2d_0 = Conv2DBlock( - in_channel=in_channel, - out_channel=out_channels[0], - kernel_size=kernel_sizes[0], - stride=strides[0], - use_bn=use_bns[0], - act=acts[0], - mean=mean, - std=std, - value=value, - ) - self.conv_2d_1 = Conv2DBlock( - in_channel=out_channels[0], - out_channel=out_channels[1], - kernel_size=kernel_sizes[1], - stride=strides[1], - use_bn=use_bns[1], - act=acts[1], - mean=mean, - std=std, - value=value, - ) - - self.conv_2d_2 = Conv2DBlock( - in_channel=in_channel, - out_channel=out_channels[2], - kernel_size=kernel_sizes[2], - stride=strides[2], - use_bn=use_bns[2], - act=acts[2], - mean=mean, - std=std, - value=value, - ) - - self.act = act_mod.get_activation("relu") - - def forward(self, x): - y = x - y = self.conv_2d_0(y) - y = self.conv_2d_1(y) - short = self.conv_2d_2(x) - y = paddle.add(y, short) - y = self.act(y) - return y - - -class FCBlock(nn.Layer): - def __init__(self, in_channel, act, mean, std, value): - super().__init__() - self.flatten = nn.Flatten() - weight_attr = paddle.ParamAttr( - initializer=nn.initializer.Normal(mean=mean, std=std) - ) - bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(value=value)) - self.linear = nn.Linear( - in_channel, - 1, - weight_attr=weight_attr, - bias_attr=bias_attr, - ) - self.act = act_mod.get_activation(act) if act else None - - def forward(self, x): - y = x - y = self.flatten(y) - y = self.linear(y) - if self.act: - y = self.act(y) - return y - - -class Generator(base.Arch): - """Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is - unique to "tempoGAN" example but not an open source network. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). - in_channel (int): Number of input channels of the first conv layer. - out_channels_tuple (Tuple[Tuple[int, ...], ...]): Number of output channels of all conv layers, - such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] - kernel_sizes_tuple (Tuple[Tuple[int, ...], ...]): Number of kernel_size of all conv layers, - such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] - strides_tuple (Tuple[Tuple[int, ...], ...]): Number of stride of all conv layers, - such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] - use_bns_tuple (Tuple[Tuple[bool, ...], ...]): Whether to use the batch_norm layer after each conv layer. - acts_tuple (Tuple[Tuple[str, ...], ...]): Whether to use the activation layer after each conv layer. If so, witch activation to use, - such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] - - Examples: - >>> import ppsci - >>> in_channel = 1 - >>> rb_channel0 = (2, 8, 8) - >>> rb_channel1 = (128, 128, 128) - >>> rb_channel2 = (32, 8, 8) - >>> rb_channel3 = (2, 1, 1) - >>> out_channels_tuple = (rb_channel0, rb_channel1, rb_channel2, rb_channel3) - >>> kernel_sizes_tuple = (((5, 5), ) * 2 + ((1, 1), ), ) * 4 - >>> strides_tuple = ((1, 1, 1), ) * 4 - >>> use_bns_tuple = ((True, True, True), ) * 3 + ((False, False, False), ) - >>> acts_tuple = (("relu", None, None), ) * 4 - >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) - >>> batch_size = 4 - >>> height = 64 - >>> width = 64 - >>> input_data = paddle.randn([batch_size, in_channel, height, width]) - >>> input_dict = {'in': input_data} - >>> output_data = model(input_dict) - >>> print(output_data['out'].shape) - [4, 1, 64, 64] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - in_channel: int, - out_channels_tuple: Tuple[Tuple[int, ...], ...], - kernel_sizes_tuple: Tuple[Tuple[int, ...], ...], - strides_tuple: Tuple[Tuple[int, ...], ...], - use_bns_tuple: Tuple[Tuple[bool, ...], ...], - acts_tuple: Tuple[Tuple[str, ...], ...], - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.in_channel = in_channel - self.out_channels_tuple = out_channels_tuple - self.kernel_sizes_tuple = kernel_sizes_tuple - self.strides_tuple = strides_tuple - self.use_bns_tuple = use_bns_tuple - self.acts_tuple = acts_tuple - - self.init_blocks() - - def init_blocks(self): - blocks_list = [] - for i in range(len(self.out_channels_tuple)): - in_channel = ( - self.in_channel if i == 0 else self.out_channels_tuple[i - 1][-1] - ) - blocks_list.append( - VariantResBlock( - in_channel=in_channel, - out_channels=self.out_channels_tuple[i], - kernel_sizes=self.kernel_sizes_tuple[i], - strides=self.strides_tuple[i], - use_bns=self.use_bns_tuple[i], - acts=self.acts_tuple[i], - mean=0.0, - std=0.04, - value=0.1, - ) - ) - self.blocks = nn.LayerList(blocks_list) - - def forward_tensor(self, x): - y = x - for block in self.blocks: - y = block(y) - return y - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - y = self.concat_to_tensor(x, self.input_keys, axis=-1) - y = self.forward_tensor(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - -class Discriminator(base.Arch): - """Discriminator Net of GAN. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). - in_channel (int): Number of input channels of the first conv layer. - out_channels (Tuple[int, ...]): Number of output channels of all conv layers, - such as (out_conv0, out_conv1, out_conv2). - fc_channel (int): Number of input features of linear layer. Number of output features of the layer - is set to 1 in this Net to construct a fully_connected layer. - kernel_sizes (Tuple[int, ...]): Number of kernel_size of all conv layers, - such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). - strides (Tuple[int, ...]): Number of stride of all conv layers, - such as (stride_conv0, stride_conv1, stride_conv2). - use_bns (Tuple[bool, ...]): Whether to use the batch_norm layer after each conv layer. - acts (Tuple[str, ...]): Whether to use the activation layer after each conv layer. If so, witch activation to use, - such as (act_conv0, act_conv1, act_conv2). - - Examples: - >>> import ppsci - >>> in_channel = 2 - >>> in_channel_tempo = 3 - >>> out_channels = (32, 64, 128, 256) - >>> fc_channel = 65536 - >>> kernel_sizes = ((4, 4), (4, 4), (4, 4), (4, 4)) - >>> strides = (2, 2, 2, 1) - >>> use_bns = (False, True, True, True) - >>> acts = ("leaky_relu", "leaky_relu", "leaky_relu", "leaky_relu", None) - >>> output_keys_disc = ("out_1", "out_2", "out_3", "out_4", "out_5", "out_6", "out_7", "out_8", "out_9", "out_10") - >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) - >>> input_data = [paddle.to_tensor(paddle.randn([1, in_channel, 128, 128])),paddle.to_tensor(paddle.randn([1, in_channel, 128, 128]))] - >>> input_dict = {"in_1": input_data[0],"in_2": input_data[1]} - >>> out_dict = model(input_dict) - >>> for k, v in out_dict.items(): - ... print(k, v.shape) - out_1 [1, 32, 64, 64] - out_2 [1, 64, 32, 32] - out_3 [1, 128, 16, 16] - out_4 [1, 256, 16, 16] - out_5 [1, 1] - out_6 [1, 32, 64, 64] - out_7 [1, 64, 32, 32] - out_8 [1, 128, 16, 16] - out_9 [1, 256, 16, 16] - out_10 [1, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - in_channel: int, - out_channels: Tuple[int, ...], - fc_channel: int, - kernel_sizes: Tuple[int, ...], - strides: Tuple[int, ...], - use_bns: Tuple[bool, ...], - acts: Tuple[str, ...], - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.in_channel = in_channel - self.out_channels = out_channels - self.fc_channel = fc_channel - self.kernel_sizes = kernel_sizes - self.strides = strides - self.use_bns = use_bns - self.acts = acts - - self.init_layers() - - def init_layers(self): - layers_list = [] - for i in range(len(self.out_channels)): - in_channel = self.in_channel if i == 0 else self.out_channels[i - 1] - layers_list.append( - Conv2DBlock( - in_channel=in_channel, - out_channel=self.out_channels[i], - kernel_size=self.kernel_sizes[i], - stride=self.strides[i], - use_bn=self.use_bns[i], - act=self.acts[i], - mean=0.0, - std=0.04, - value=0.1, - ) - ) - - layers_list.append( - FCBlock(self.fc_channel, self.acts[4], mean=0.0, std=0.04, value=0.1) - ) - self.layers = nn.LayerList(layers_list) - - def forward_tensor(self, x): - y = x - y_list = [] - for layer in self.layers: - y = layer(y) - y_list.append(y) - return y_list # y_conv1, y_conv2, y_conv3, y_conv4, y_fc(y_out) - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - y_list = [] - # y1_conv1, y1_conv2, y1_conv3, y1_conv4, y1_fc, y2_conv1, y2_conv2, y2_conv3, y2_conv4, y2_fc - for k in x: - y_list.extend(self.forward_tensor(x[k])) - - y = self.split_to_dict(y_list, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - - return y - - @staticmethod - def split_to_dict( - data_list: List[paddle.Tensor], keys: Tuple[str, ...] - ) -> Dict[str, paddle.Tensor]: - """Overwrite of split_to_dict() method belongs to Class base.Arch. - - Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. - That is because input in "tempoGAN" example is not in a regular format, but a format like: - { - "input1": paddle.concat([in1, in2], axis=1), - "input2": paddle.concat([in1, in3], axis=1), - } - - Args: - data_list (List[paddle.Tensor]): The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. - keys (Tuple[str, ...]): Keys of outputs. - - Returns: - Dict[str, paddle.Tensor]: Dict with split data. - """ - if len(keys) == 1: - return {keys[0]: data_list[0]} - return {key: data_list[i] for i, key in enumerate(keys)} +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import List +from typing import Tuple + +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base + + +class Conv2DBlock(nn.Layer): + def __init__( + self, + in_channel, + out_channel, + kernel_size, + stride, + use_bn, + act, + mean, + std, + value, + ): + super().__init__() + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal(mean=mean, std=std) + ) + bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(value=value)) + self.conv_2d = nn.Conv2D( + in_channel, + out_channel, + kernel_size, + stride, + padding="SAME", + weight_attr=weight_attr, + bias_attr=bias_attr, + ) + self.bn = nn.BatchNorm2D(out_channel) if use_bn else None + self.act = act_mod.get_activation(act) if act else None + + def forward(self, x): + y = x + y = self.conv_2d(y) + if self.bn: + y = self.bn(y) + if self.act: + y = self.act(y) + return y + + +class VariantResBlock(nn.Layer): + def __init__( + self, + in_channel, + out_channels, + kernel_sizes, + strides, + use_bns, + acts, + mean, + std, + value, + ): + super().__init__() + self.conv_2d_0 = Conv2DBlock( + in_channel=in_channel, + out_channel=out_channels[0], + kernel_size=kernel_sizes[0], + stride=strides[0], + use_bn=use_bns[0], + act=acts[0], + mean=mean, + std=std, + value=value, + ) + self.conv_2d_1 = Conv2DBlock( + in_channel=out_channels[0], + out_channel=out_channels[1], + kernel_size=kernel_sizes[1], + stride=strides[1], + use_bn=use_bns[1], + act=acts[1], + mean=mean, + std=std, + value=value, + ) + + self.conv_2d_2 = Conv2DBlock( + in_channel=in_channel, + out_channel=out_channels[2], + kernel_size=kernel_sizes[2], + stride=strides[2], + use_bn=use_bns[2], + act=acts[2], + mean=mean, + std=std, + value=value, + ) + + self.act = act_mod.get_activation("relu") + + def forward(self, x): + y = x + y = self.conv_2d_0(y) + y = self.conv_2d_1(y) + short = self.conv_2d_2(x) + y = paddle.add(y, short) + y = self.act(y) + return y + + +class FCBlock(nn.Layer): + def __init__(self, in_channel, act, mean, std, value): + super().__init__() + self.flatten = nn.Flatten() + weight_attr = paddle.ParamAttr( + initializer=nn.initializer.Normal(mean=mean, std=std) + ) + bias_attr = paddle.ParamAttr(initializer=nn.initializer.Constant(value=value)) + self.linear = nn.Linear( + in_channel, + 1, + weight_attr=weight_attr, + bias_attr=bias_attr, + ) + self.act = act_mod.get_activation(act) if act else None + + def forward(self, x): + y = x + y = self.flatten(y) + y = self.linear(y) + if self.act: + y = self.act(y) + return y + + +class Generator(base.Arch): + """Generator Net of GAN. Attention, the net using a kind of variant of ResBlock which is + unique to "tempoGAN" example but not an open source network. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). + in_channel (int): Number of input channels of the first conv layer. + out_channels_tuple (Tuple[Tuple[int, ...], ...]): Number of output channels of all conv layers, + such as [[out_res0_conv0, out_res0_conv1], [out_res1_conv0, out_res1_conv1]] + kernel_sizes_tuple (Tuple[Tuple[int, ...], ...]): Number of kernel_size of all conv layers, + such as [[kernel_size_res0_conv0, kernel_size_res0_conv1], [kernel_size_res1_conv0, kernel_size_res1_conv1]] + strides_tuple (Tuple[Tuple[int, ...], ...]): Number of stride of all conv layers, + such as [[stride_res0_conv0, stride_res0_conv1], [stride_res1_conv0, stride_res1_conv1]] + use_bns_tuple (Tuple[Tuple[bool, ...], ...]): Whether to use the batch_norm layer after each conv layer. + acts_tuple (Tuple[Tuple[str, ...], ...]): Whether to use the activation layer after each conv layer. If so, witch activation to use, + such as [[act_res0_conv0, act_res0_conv1], [act_res1_conv0, act_res1_conv1]] + + Examples: + >>> import ppsci + >>> in_channel = 1 + >>> rb_channel0 = (2, 8, 8) + >>> rb_channel1 = (128, 128, 128) + >>> rb_channel2 = (32, 8, 8) + >>> rb_channel3 = (2, 1, 1) + >>> out_channels_tuple = (rb_channel0, rb_channel1, rb_channel2, rb_channel3) + >>> kernel_sizes_tuple = (((5, 5), ) * 2 + ((1, 1), ), ) * 4 + >>> strides_tuple = ((1, 1, 1), ) * 4 + >>> use_bns_tuple = ((True, True, True), ) * 3 + ((False, False, False), ) + >>> acts_tuple = (("relu", None, None), ) * 4 + >>> model = ppsci.arch.Generator(("in",), ("out",), in_channel, out_channels_tuple, kernel_sizes_tuple, strides_tuple, use_bns_tuple, acts_tuple) + >>> batch_size = 4 + >>> height = 64 + >>> width = 64 + >>> input_data = paddle.randn([batch_size, in_channel, height, width]) + >>> input_dict = {'in': input_data} + >>> output_data = model(input_dict) + >>> print(output_data['out'].shape) + [4, 1, 64, 64] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + in_channel: int, + out_channels_tuple: Tuple[Tuple[int, ...], ...], + kernel_sizes_tuple: Tuple[Tuple[int, ...], ...], + strides_tuple: Tuple[Tuple[int, ...], ...], + use_bns_tuple: Tuple[Tuple[bool, ...], ...], + acts_tuple: Tuple[Tuple[str, ...], ...], + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.in_channel = in_channel + self.out_channels_tuple = out_channels_tuple + self.kernel_sizes_tuple = kernel_sizes_tuple + self.strides_tuple = strides_tuple + self.use_bns_tuple = use_bns_tuple + self.acts_tuple = acts_tuple + + self.init_blocks() + + def init_blocks(self): + blocks_list = [] + for i in range(len(self.out_channels_tuple)): + in_channel = ( + self.in_channel if i == 0 else self.out_channels_tuple[i - 1][-1] + ) + blocks_list.append( + VariantResBlock( + in_channel=in_channel, + out_channels=self.out_channels_tuple[i], + kernel_sizes=self.kernel_sizes_tuple[i], + strides=self.strides_tuple[i], + use_bns=self.use_bns_tuple[i], + acts=self.acts_tuple[i], + mean=0.0, + std=0.04, + value=0.1, + ) + ) + self.blocks = nn.LayerList(blocks_list) + + def forward_tensor(self, x): + y = x + for block in self.blocks: + y = block(y) + return y + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + y = self.concat_to_tensor(x, self.input_keys, axis=-1) + y = self.forward_tensor(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + +class Discriminator(base.Arch): + """Discriminator Net of GAN. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). + in_channel (int): Number of input channels of the first conv layer. + out_channels (Tuple[int, ...]): Number of output channels of all conv layers, + such as (out_conv0, out_conv1, out_conv2). + fc_channel (int): Number of input features of linear layer. Number of output features of the layer + is set to 1 in this Net to construct a fully_connected layer. + kernel_sizes (Tuple[int, ...]): Number of kernel_size of all conv layers, + such as (kernel_size_conv0, kernel_size_conv1, kernel_size_conv2). + strides (Tuple[int, ...]): Number of stride of all conv layers, + such as (stride_conv0, stride_conv1, stride_conv2). + use_bns (Tuple[bool, ...]): Whether to use the batch_norm layer after each conv layer. + acts (Tuple[str, ...]): Whether to use the activation layer after each conv layer. If so, witch activation to use, + such as (act_conv0, act_conv1, act_conv2). + + Examples: + >>> import ppsci + >>> in_channel = 2 + >>> in_channel_tempo = 3 + >>> out_channels = (32, 64, 128, 256) + >>> fc_channel = 65536 + >>> kernel_sizes = ((4, 4), (4, 4), (4, 4), (4, 4)) + >>> strides = (2, 2, 2, 1) + >>> use_bns = (False, True, True, True) + >>> acts = ("leaky_relu", "leaky_relu", "leaky_relu", "leaky_relu", None) + >>> output_keys_disc = ("out_1", "out_2", "out_3", "out_4", "out_5", "out_6", "out_7", "out_8", "out_9", "out_10") + >>> model = ppsci.arch.Discriminator(("in_1","in_2"), output_keys_disc, in_channel, out_channels, fc_channel, kernel_sizes, strides, use_bns, acts) + >>> input_data = [paddle.to_tensor(paddle.randn([1, in_channel, 128, 128])),paddle.to_tensor(paddle.randn([1, in_channel, 128, 128]))] + >>> input_dict = {"in_1": input_data[0],"in_2": input_data[1]} + >>> out_dict = model(input_dict) + >>> for k, v in out_dict.items(): + ... print(k, v.shape) + out_1 [1, 32, 64, 64] + out_2 [1, 64, 32, 32] + out_3 [1, 128, 16, 16] + out_4 [1, 256, 16, 16] + out_5 [1, 1] + out_6 [1, 32, 64, 64] + out_7 [1, 64, 32, 32] + out_8 [1, 128, 16, 16] + out_9 [1, 256, 16, 16] + out_10 [1, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + in_channel: int, + out_channels: Tuple[int, ...], + fc_channel: int, + kernel_sizes: Tuple[int, ...], + strides: Tuple[int, ...], + use_bns: Tuple[bool, ...], + acts: Tuple[str, ...], + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.in_channel = in_channel + self.out_channels = out_channels + self.fc_channel = fc_channel + self.kernel_sizes = kernel_sizes + self.strides = strides + self.use_bns = use_bns + self.acts = acts + + self.init_layers() + + def init_layers(self): + layers_list = [] + for i in range(len(self.out_channels)): + in_channel = self.in_channel if i == 0 else self.out_channels[i - 1] + layers_list.append( + Conv2DBlock( + in_channel=in_channel, + out_channel=self.out_channels[i], + kernel_size=self.kernel_sizes[i], + stride=self.strides[i], + use_bn=self.use_bns[i], + act=self.acts[i], + mean=0.0, + std=0.04, + value=0.1, + ) + ) + + layers_list.append( + FCBlock(self.fc_channel, self.acts[4], mean=0.0, std=0.04, value=0.1) + ) + self.layers = nn.LayerList(layers_list) + + def forward_tensor(self, x): + y = x + y_list = [] + for layer in self.layers: + y = layer(y) + y_list.append(y) + return y_list # y_conv1, y_conv2, y_conv3, y_conv4, y_fc(y_out) + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + y_list = [] + # y1_conv1, y1_conv2, y1_conv3, y1_conv4, y1_fc, y2_conv1, y2_conv2, y2_conv3, y2_conv4, y2_fc + for k in x: + y_list.extend(self.forward_tensor(x[k])) + + y = self.split_to_dict(y_list, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + + return y + + @staticmethod + def split_to_dict( + data_list: List[paddle.Tensor], keys: Tuple[str, ...] + ) -> Dict[str, paddle.Tensor]: + """Overwrite of split_to_dict() method belongs to Class base.Arch. + + Reason for overwriting is there is no concat_to_tensor() method called in "tempoGAN" example. + That is because input in "tempoGAN" example is not in a regular format, but a format like: + { + "input1": paddle.concat([in1, in2], axis=1), + "input2": paddle.concat([in1, in3], axis=1), + } + + Args: + data_list (List[paddle.Tensor]): The data to be split. It should be a list of tensor(s), but not a paddle.Tensor. + keys (Tuple[str, ...]): Keys of outputs. + + Returns: + Dict[str, paddle.Tensor]: Dict with split data. + """ + if len(keys) == 1: + return {keys[0]: data_list[0]} + return {key: data_list[i] for i, key in enumerate(keys)} diff --git a/ppsci/arch/geofno.py b/ppsci/arch/geofno.py index 94eea5d9af..3524c99959 100644 --- a/ppsci/arch/geofno.py +++ b/ppsci/arch/geofno.py @@ -1,205 +1,205 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F -import paddle.nn.initializer as Initializer - - -class SpectralConv1d(nn.Layer): - """ - 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. - """ - - def __init__(self, in_channels, out_channels, modes1): - super(SpectralConv1d, self).__init__() - - self.in_channels = in_channels - self.out_channels = out_channels - # Number of Fourier modes to multiply, at most floor(N/2) + 1 - self.modes1 = modes1 - self.scale = 1 / (in_channels * out_channels) - - real = paddle.rand(shape=[in_channels, out_channels, modes1]) - real.stop_gradient = False - img = paddle.rand(shape=[in_channels, out_channels, modes1]) - img.stop_gradient = False - self.weights1_real = self.create_parameter( - [in_channels, out_channels, self.modes1], - attr=Initializer.Assign(self.scale * real), - ) - self.weights1_imag = self.create_parameter( - [in_channels, out_channels, self.modes1], - attr=Initializer.Assign(self.scale * img), - ) - - def compl_mul1d(self, op1, op2_real, op2_imag): - # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x) - eq = "bix,iox->box" - op1_real = op1.real() - op1_imag = op1.imag() - result_real = paddle.unsqueeze( - paddle.einsum(eq, op1_real, op2_real) - - paddle.einsum(eq, op1_imag, op2_imag), - axis=-1, - ) - result_imag = paddle.unsqueeze( - paddle.einsum(eq, op1_real, op2_imag) - + paddle.einsum(eq, op1_imag, op2_real), - axis=-1, - ) - result_complex = paddle.as_complex( - paddle.concat([result_real, result_imag], axis=-1) - ) - return result_complex - - def forward(self, x, output_size=None): - batchsize = x.shape[0] - # Compute Fourier coeffcients up to factor of e^(- something constant) - x_ft = paddle.fft.rfft(x) - - # Multiply relevant Fourier modes - out_ft_real = paddle.zeros( - [batchsize, self.out_channels, x.shape[-1] // 2 + 1], dtype="float32" - ) - out_ft_img = paddle.zeros( - [batchsize, self.out_channels, x.shape[-1] // 2 + 1], dtype="float32" - ) - out_ft = paddle.complex(out_ft_real, out_ft_img) - - out_ft[:, :, : self.modes1] = self.compl_mul1d( - x_ft[:, :, : self.modes1], self.weights1_real, self.weights1_imag - ) - - # Return to physical space - if output_size is None: - x = paddle.fft.irfft(out_ft, n=x.shape[-1]) - else: - x = paddle.fft.irfft(out_ft, n=output_size) - - return x - - -class FNO1d(nn.Layer): - """The overall network. It contains 4 layers of the Fourier layer. - 1. Lift the input to the desire channel dimension by self.fc0 . - 2. 4 layers of the integral operators u' = (W + K)(u). - W defined by self.w; K defined by self.conv . - 3. Project from the channel space to the output space by self.fc1 and self.fc2 . - - Args: - input_key (Tuple[str, ...], optional): Key to get the input tensor from the dict. Defaults to ("intput",). - output_key (Tuple[str, ...], optional): Key to save the output tensor into the dict. Defaults to ("output",). - modes (int, optional, optional): Number of Fourier modes to compute, it should be the same as - that in fft part of the code below. Defaults to 64. - width (int, optional, optional): Number of channels in each Fourier layer. Defaults to 64. - padding (int, optional, optional): How many zeros to pad to the input Tensor. Defaults to 100. - input_channel (int, optional, optional): Number of channels of the input tensor. Defaults to 2. - output_np (int, optional, optional): Number of points to sample the solution. Defaults to 2001. - - Examples: - >>> model = ppsci.arch.FNO1d() - >>> input_data = paddle.randn([100, 2001, 2]) - >>> input_dict = {"input": input_data} - >>> out_dict = model(input_dict) - >>> for k, v in out_dict.items(): - ... print(k, v.shape) - output [100, 1] - """ - - def __init__( - self, - input_key=("input",), - output_key=("output",), - modes=64, - width=64, - padding=100, - input_channel=2, - output_np=2001, - ): - super().__init__() - self.input_keys = input_key - self.output_keys = output_key - - self.output_np = output_np - self.modes1 = modes - self.width = width - self.padding = padding - self.fc0 = nn.Linear(input_channel, self.width) - - self.conv0 = SpectralConv1d(self.width, self.width, self.modes1) - self.conv1 = SpectralConv1d(self.width, self.width, self.modes1) - self.conv2 = SpectralConv1d(self.width, self.width, self.modes1) - self.conv3 = SpectralConv1d(self.width, self.width, self.modes1) - self.conv4 = SpectralConv1d(self.width, self.width, self.modes1) - - self.w0 = nn.Conv1D(self.width, self.width, 1) - self.w1 = nn.Conv1D(self.width, self.width, 1) - self.w2 = nn.Conv1D(self.width, self.width, 1) - self.w3 = nn.Conv1D(self.width, self.width, 1) - - self.fc1 = nn.Linear(self.width, 128) - self.fc2 = nn.Linear(128, 1) - - def _functional_pad(self, x, pad, mode="constant", value=0.0, data_format="NCL"): - if len(x.shape) * 2 == len(pad) and mode == "constant": - pad = ( - paddle.to_tensor(pad, dtype="float32") - .reshape((-1, 2)) - .flip([0]) - .flatten() - .tolist() - ) - return F.pad(x, pad, mode, value, data_format) - - def forward(self, x): - x = x[self.input_keys[0]] - # Dict - x = self.fc0(x) - x = paddle.transpose(x, perm=[0, 2, 1]) - # pad the domain if input is non-periodic - x = self._functional_pad(x, [0, self.padding]) - - x1 = self.conv0(x) - x2 = self.w0(x) - x = x1 + x2 - x = F.gelu(x=x, approximate=False) - - x1 = self.conv1(x) - x2 = self.w1(x) - x = x1 + x2 - x = F.gelu(x, approximate=False) - - x1 = self.conv2(x) - x2 = self.w2(x) - x = x1 + x2 - x = F.gelu(x, approximate=False) - - x1 = self.conv3(x) - x2 = self.w3(x) - x = x1 + x2 - x = F.gelu(x, approximate=False) - - x = x[..., : -self.padding] - x1 = self.conv4(x, self.output_np) - x2 = F.interpolate(x, size=[self.output_np], mode="linear", align_corners=True) - x = x1 + x2 - # Change the x-dimension to (batch, channel, 2001) - x = x.transpose(perm=[0, 2, 1]) - x = self.fc1(x) - x = F.gelu(x, approximate=False) - x = self.fc2(x) - - return {self.output_keys[0]: x} +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.nn.initializer as Initializer + + +class SpectralConv1d(nn.Layer): + """ + 1D Fourier layer. It does FFT, linear transform, and Inverse FFT. + """ + + def __init__(self, in_channels, out_channels, modes1): + super(SpectralConv1d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + # Number of Fourier modes to multiply, at most floor(N/2) + 1 + self.modes1 = modes1 + self.scale = 1 / (in_channels * out_channels) + + real = paddle.rand(shape=[in_channels, out_channels, modes1]) + real.stop_gradient = False + img = paddle.rand(shape=[in_channels, out_channels, modes1]) + img.stop_gradient = False + self.weights1_real = self.create_parameter( + [in_channels, out_channels, self.modes1], + attr=Initializer.Assign(self.scale * real), + ) + self.weights1_imag = self.create_parameter( + [in_channels, out_channels, self.modes1], + attr=Initializer.Assign(self.scale * img), + ) + + def compl_mul1d(self, op1, op2_real, op2_imag): + # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x) + eq = "bix,iox->box" + op1_real = op1.real() + op1_imag = op1.imag() + result_real = paddle.unsqueeze( + paddle.einsum(eq, op1_real, op2_real) + - paddle.einsum(eq, op1_imag, op2_imag), + axis=-1, + ) + result_imag = paddle.unsqueeze( + paddle.einsum(eq, op1_real, op2_imag) + + paddle.einsum(eq, op1_imag, op2_real), + axis=-1, + ) + result_complex = paddle.as_complex( + paddle.concat([result_real, result_imag], axis=-1) + ) + return result_complex + + def forward(self, x, output_size=None): + batchsize = x.shape[0] + # Compute Fourier coeffcients up to factor of e^(- something constant) + x_ft = paddle.fft.rfft(x) + + # Multiply relevant Fourier modes + out_ft_real = paddle.zeros( + [batchsize, self.out_channels, x.shape[-1] // 2 + 1], dtype="float32" + ) + out_ft_img = paddle.zeros( + [batchsize, self.out_channels, x.shape[-1] // 2 + 1], dtype="float32" + ) + out_ft = paddle.complex(out_ft_real, out_ft_img) + + out_ft[:, :, : self.modes1] = self.compl_mul1d( + x_ft[:, :, : self.modes1], self.weights1_real, self.weights1_imag + ) + + # Return to physical space + if output_size is None: + x = paddle.fft.irfft(out_ft, n=x.shape[-1]) + else: + x = paddle.fft.irfft(out_ft, n=output_size) + + return x + + +class FNO1d(nn.Layer): + """The overall network. It contains 4 layers of the Fourier layer. + 1. Lift the input to the desire channel dimension by self.fc0 . + 2. 4 layers of the integral operators u' = (W + K)(u). + W defined by self.w; K defined by self.conv . + 3. Project from the channel space to the output space by self.fc1 and self.fc2 . + + Args: + input_key (Tuple[str, ...], optional): Key to get the input tensor from the dict. Defaults to ("intput",). + output_key (Tuple[str, ...], optional): Key to save the output tensor into the dict. Defaults to ("output",). + modes (int, optional, optional): Number of Fourier modes to compute, it should be the same as + that in fft part of the code below. Defaults to 64. + width (int, optional, optional): Number of channels in each Fourier layer. Defaults to 64. + padding (int, optional, optional): How many zeros to pad to the input Tensor. Defaults to 100. + input_channel (int, optional, optional): Number of channels of the input tensor. Defaults to 2. + output_np (int, optional, optional): Number of points to sample the solution. Defaults to 2001. + + Examples: + >>> model = ppsci.arch.FNO1d() + >>> input_data = paddle.randn([100, 2001, 2]) + >>> input_dict = {"input": input_data} + >>> out_dict = model(input_dict) + >>> for k, v in out_dict.items(): + ... print(k, v.shape) + output [100, 1] + """ + + def __init__( + self, + input_key=("input",), + output_key=("output",), + modes=64, + width=64, + padding=100, + input_channel=2, + output_np=2001, + ): + super().__init__() + self.input_keys = input_key + self.output_keys = output_key + + self.output_np = output_np + self.modes1 = modes + self.width = width + self.padding = padding + self.fc0 = nn.Linear(input_channel, self.width) + + self.conv0 = SpectralConv1d(self.width, self.width, self.modes1) + self.conv1 = SpectralConv1d(self.width, self.width, self.modes1) + self.conv2 = SpectralConv1d(self.width, self.width, self.modes1) + self.conv3 = SpectralConv1d(self.width, self.width, self.modes1) + self.conv4 = SpectralConv1d(self.width, self.width, self.modes1) + + self.w0 = nn.Conv1D(self.width, self.width, 1) + self.w1 = nn.Conv1D(self.width, self.width, 1) + self.w2 = nn.Conv1D(self.width, self.width, 1) + self.w3 = nn.Conv1D(self.width, self.width, 1) + + self.fc1 = nn.Linear(self.width, 128) + self.fc2 = nn.Linear(128, 1) + + def _functional_pad(self, x, pad, mode="constant", value=0.0, data_format="NCL"): + if len(x.shape) * 2 == len(pad) and mode == "constant": + pad = ( + paddle.to_tensor(pad, dtype="float32") + .reshape((-1, 2)) + .flip([0]) + .flatten() + .tolist() + ) + return F.pad(x, pad, mode, value, data_format) + + def forward(self, x): + x = x[self.input_keys[0]] + # Dict + x = self.fc0(x) + x = paddle.transpose(x, perm=[0, 2, 1]) + # pad the domain if input is non-periodic + x = self._functional_pad(x, [0, self.padding]) + + x1 = self.conv0(x) + x2 = self.w0(x) + x = x1 + x2 + x = F.gelu(x=x, approximate=False) + + x1 = self.conv1(x) + x2 = self.w1(x) + x = x1 + x2 + x = F.gelu(x, approximate=False) + + x1 = self.conv2(x) + x2 = self.w2(x) + x = x1 + x2 + x = F.gelu(x, approximate=False) + + x1 = self.conv3(x) + x2 = self.w3(x) + x = x1 + x2 + x = F.gelu(x, approximate=False) + + x = x[..., : -self.padding] + x1 = self.conv4(x, self.output_np) + x2 = F.interpolate(x, size=[self.output_np], mode="linear", align_corners=True) + x = x1 + x2 + # Change the x-dimension to (batch, channel, 2001) + x = x.transpose(perm=[0, 2, 1]) + x = self.fc1(x) + x = F.gelu(x, approximate=False) + x = self.fc2(x) + + return {self.output_keys[0]: x} diff --git a/ppsci/arch/graphcast.py b/ppsci/arch/graphcast.py index 79a1c0aeae..9f3533e769 100644 --- a/ppsci/arch/graphcast.py +++ b/ppsci/arch/graphcast.py @@ -1,492 +1,492 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING -from typing import Dict -from typing import Tuple - -import paddle -import paddle.nn as nn - -from ppsci.arch import base - -if TYPE_CHECKING: - import ppsci.data.dataset.atmospheric_dataset as atmospheric_dataset - - -class ResidualConnection(nn.Layer): - def __init__(self, fn): - super().__init__() - self.fn = fn - - def forward(self, inputs): - return inputs + self.fn(inputs) - - -class GraphCastMLP(nn.Layer): - def __init__( - self, in_features, out_features, latent_features=None, layer_norm=True - ): - super().__init__() - - if latent_features is None: - latent_features = out_features - - self.mlp = nn.Sequential( - nn.Linear(in_features, latent_features, bias_attr=True), - nn.Silu(), - nn.Linear(latent_features, out_features, bias_attr=True), - ) - self.layer_norm = layer_norm - if layer_norm: - self.layer_norm = nn.LayerNorm(out_features) - - def forward(self, feat): - if self.layer_norm: - out = self.layer_norm(self.mlp(feat)) - else: - out = self.mlp(feat) - return out - - -class GraphCastGNN(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_emb_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_emb_dim: int, - src_type: str = "mesh", - dst_type: str = "mesh", - ): - super().__init__() - - self.src = src_type - self.dst = dst_type - self.grid_node_num = grid_node_num - self.mesh_node_num = mesh_node_num - self.edge_in_dim = grid_node_emb_dim + mesh_node_emb_dim - - if src_type == "mesh" and dst_type == "mesh": - self.edge_in_dim += mesh_edge_emb_dim - self.edge_out_dim = mesh_edge_emb_dim - self.node_in_dim = mesh_node_emb_dim + mesh_edge_emb_dim - self.node_out_dim = mesh_node_emb_dim - elif src_type == "grid" and dst_type == "mesh": - self.edge_in_dim += grid2mesh_edge_emb_dim - self.edge_out_dim = grid2mesh_edge_emb_dim - self.node_in_dim = mesh_node_emb_dim + grid2mesh_edge_emb_dim - self.node_out_dim = mesh_node_emb_dim - elif src_type == "mesh" and dst_type == "grid": - self.edge_in_dim += mesh2grid_edge_emb_dim - self.edge_out_dim = mesh2grid_edge_emb_dim - self.node_in_dim = grid_node_emb_dim + mesh2grid_edge_emb_dim - self.node_out_dim = grid_node_emb_dim - else: - raise ValueError - - self.edge_layer = GraphCastMLP(self.edge_in_dim, self.edge_out_dim) - self.node_layer = GraphCastMLP(self.node_in_dim, self.node_out_dim) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - if self.src == "mesh" and self.dst == "mesh": - edge_feats = graph.mesh_edge_feat - src_node_feats = graph.mesh_node_feat - dst_node_feats = graph.mesh_node_feat - src_idx = graph.mesh2mesh_src_index - dst_idx = graph.mesh2mesh_dst_index - dst_node_num = self.mesh_node_num - elif self.src == "grid" and self.dst == "mesh": - edge_feats = graph.grid2mesh_edge_feat - src_node_feats = graph.grid_node_feat - dst_node_feats = graph.mesh_node_feat - src_idx = graph.grid2mesh_src_index - dst_idx = graph.grid2mesh_dst_index - dst_node_num = self.mesh_node_num - elif self.src == "mesh" and self.dst == "grid": - edge_feats = graph.mesh2grid_edge_feat - src_node_feats = graph.mesh_node_feat - dst_node_feats = graph.grid_node_feat - src_idx = graph.mesh2grid_src_index - dst_idx = graph.mesh2grid_dst_index - dst_node_num = self.grid_node_num - - # update edge features - edge_feats_concat = paddle.concat( - [ - edge_feats, - paddle.gather(src_node_feats, src_idx), - paddle.gather(dst_node_feats, dst_idx), - ], - axis=-1, - ) - edge_feats_out = self.edge_layer(edge_feats_concat) - - _, batch_dim, _ = edge_feats_out.shape - - # update node features - edge_feats_scatter = paddle.zeros([dst_node_num, batch_dim, self.edge_out_dim]) - node_feats_concat = paddle.concat( - [ - dst_node_feats, - paddle.scatter( - edge_feats_scatter, dst_idx, edge_feats_out, overwrite=False - ), - ], - axis=-1, - ) - node_feats_out = self.node_layer(node_feats_concat) - - if self.src == "mesh" and self.dst == "mesh": - graph.mesh_edge_feat += edge_feats_out - graph.mesh_node_feat += node_feats_out - elif self.src == "grid" and self.dst == "mesh": - graph.grid2mesh_edge_feat += edge_feats_out - graph.mesh_node_feat += node_feats_out - elif self.src == "mesh" and self.dst == "grid": - graph.mesh2grid_edge_feat += edge_feats_out - graph.grid_node_feat += node_feats_out - - return graph - - -class GraphCastEmbedding(nn.Layer): - def __init__( - self, - grid_node_dim: int, - grid_node_emb_dim: int, - mesh_node_dim: int, - mesh_node_emb_dim: int, - mesh_edge_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_dim: int, - mesh2grid_edge_emb_dim: int, - ): - super().__init__() - - self.grid_node_embedding = GraphCastMLP(grid_node_dim, grid_node_emb_dim) - self.mesh_node_embedding = GraphCastMLP(mesh_node_dim, mesh_node_emb_dim) - self.mesh_edge_embedding = GraphCastMLP(mesh_edge_dim, mesh_edge_emb_dim) - self.grid2mesh_edge_embedding = GraphCastMLP( - grid2mesh_edge_dim, grid2mesh_edge_emb_dim - ) - self.mesh2grid_edge_embedding = GraphCastMLP( - mesh2grid_edge_dim, mesh2grid_edge_emb_dim - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - grid_node_emb = self.grid_node_embedding(graph.grid_node_feat) - mesh_node_emb = self.mesh_node_embedding(graph.mesh_node_feat) - mesh_edge_emb = self.mesh_edge_embedding(graph.mesh_edge_feat) - grid2mesh_edge_emb = self.grid2mesh_edge_embedding(graph.grid2mesh_edge_feat) - mesh2grid_edge_emb = self.mesh2grid_edge_embedding(graph.mesh2grid_edge_feat) - - graph.grid_node_feat = grid_node_emb - graph.mesh_node_feat = mesh_node_emb - graph.mesh_edge_feat = mesh_edge_emb - graph.grid2mesh_edge_feat = grid2mesh_edge_emb - graph.mesh2grid_edge_feat = mesh2grid_edge_emb - - return graph - - -class GraphCastGrid2Mesh(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_emb_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_emb_dim: int, - ): - super().__init__() - self.grid2mesh_gnn = GraphCastGNN( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - src_type="grid", - dst_type="mesh", - ) - self.grid_node_layer = ResidualConnection( - GraphCastMLP(grid_node_emb_dim, grid_node_emb_dim) - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - graph = self.grid2mesh_gnn(graph) - graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) - return graph - - -class GraphCastMesh2Grid(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_emb_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_emb_dim: int, - ): - super().__init__() - self.mesh2grid_gnn = GraphCastGNN( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - src_type="mesh", - dst_type="grid", - ) - self.mesh_node_layer = ResidualConnection( - GraphCastMLP(mesh_node_emb_dim, mesh_node_emb_dim) - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - graph = self.mesh2grid_gnn(graph) - graph.mesh_node_feat = self.mesh_node_layer(graph.mesh_node_feat) - return graph - - -class GraphCastEncoder(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_dim: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_dim: int, - mesh_node_emb_dim: int, - mesh_edge_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_dim: int, - mesh2grid_edge_emb_dim: int, - ): - super().__init__() - self.embedding = GraphCastEmbedding( - grid_node_dim=grid_node_dim, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_dim=mesh_node_dim, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_dim=mesh_edge_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_dim=grid2mesh_edge_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_dim=mesh2grid_edge_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - ) - self.grid2mesh_gnn = GraphCastGrid2Mesh( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - graph = self.embedding(graph) - graph = self.grid2mesh_gnn(graph) - return graph - - -class GraphCastDecoder(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_emb_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_emb_dim: int, - node_output_dim: int, - ): - super().__init__() - self.mesh2grid_gnn = GraphCastMesh2Grid( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - ) - self.grid_node_layer = GraphCastMLP( - grid_node_emb_dim, - node_output_dim, - latent_features=grid_node_emb_dim, - layer_norm=False, - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - graph = self.mesh2grid_gnn(graph) - graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) - return graph - - -class GraphCastProcessor(nn.Layer): - def __init__( - self, - grid_node_num: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_emb_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_emb_dim: int, - gnn_msg_steps: int, - ): - super().__init__() - - self.processor = nn.Sequential() - for idx in range(gnn_msg_steps): - self.processor.add_sublayer( - f"{idx}", - GraphCastGNN( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - src_type="mesh", - dst_type="mesh", - ), - ) - - def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): - graph = self.processor(graph) - return graph - - -class GraphCastNet(base.Arch): - """GraphCast Network - - Args: - input_keys (Tuple[str, ...]): Name of input keys. - output_keys (Tuple[str, ...]): Name of output keys. - grid_node_num (int): Number of grid nodes. - grid_node_dim (int): Dimension of grid nodes. - grid_node_emb_dim (int): Dimension of emdding grid nodes. - mesh_node_num (int): Number of mesh nodes. - mesh_node_dim (int): Dimension of mesh nodes. - mesh_node_emb_dim (int): Dimension of emdding mesh nodes. - mesh_edge_dim (int): Dimension of mesh edges. - mesh_edge_emb_dim (int): Dimension of emdding mesh edges. - grid2mesh_edge_dim (int): Dimension of mesh edges in Grid2Mesh GNN. - grid2mesh_edge_emb_dim (int): Dimension of emdding mesh edges in Grid2Mesh GNN. - mesh2grid_edge_dim (int): Dimension of mesh edges in Mesh2Grid GNN. - mesh2grid_edge_emb_dim (int): Dimension of emdding mesh edges in Mesh2Grid GNN. - gnn_msg_steps (int): Step of gnn messages. - node_output_dim (int): Dimension of output nodes. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - grid_node_num: int, - grid_node_dim: int, - grid_node_emb_dim: int, - mesh_node_num: int, - mesh_node_dim: int, - mesh_node_emb_dim: int, - mesh_edge_dim: int, - mesh_edge_emb_dim: int, - grid2mesh_edge_dim: int, - grid2mesh_edge_emb_dim: int, - mesh2grid_edge_dim: int, - mesh2grid_edge_emb_dim: int, - gnn_msg_steps: int, - node_output_dim: int, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.graphcast = nn.Sequential( - ( - "encoder", - GraphCastEncoder( - grid_node_num=grid_node_num, - grid_node_dim=grid_node_dim, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_dim=mesh_node_dim, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_dim=mesh_edge_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_dim=grid2mesh_edge_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_dim=mesh2grid_edge_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - ), - ), - ( - "processor", - GraphCastProcessor( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - gnn_msg_steps=gnn_msg_steps, - ), - ), - ( - "decoder", - GraphCastDecoder( - grid_node_num=grid_node_num, - grid_node_emb_dim=grid_node_emb_dim, - mesh_node_num=mesh_node_num, - mesh_node_emb_dim=mesh_node_emb_dim, - mesh_edge_emb_dim=mesh_edge_emb_dim, - grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, - mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, - node_output_dim=node_output_dim, - ), - ), - ) - - def forward( - self, x: Dict[str, "atmospheric_dataset.GraphGridMesh"] - ) -> Dict[str, paddle.Tensor]: - if self._input_transform is not None: - x = self._input_transform(x) - - graph = x[self.input_keys[0]] - y = self.graphcast(graph) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return {self.output_keys[0]: y} +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING +from typing import Dict +from typing import Tuple + +import paddle +import paddle.nn as nn + +from ppsci.arch import base + +if TYPE_CHECKING: + import ppsci.data.dataset.atmospheric_dataset as atmospheric_dataset + + +class ResidualConnection(nn.Layer): + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, inputs): + return inputs + self.fn(inputs) + + +class GraphCastMLP(nn.Layer): + def __init__( + self, in_features, out_features, latent_features=None, layer_norm=True + ): + super().__init__() + + if latent_features is None: + latent_features = out_features + + self.mlp = nn.Sequential( + nn.Linear(in_features, latent_features, bias_attr=True), + nn.Silu(), + nn.Linear(latent_features, out_features, bias_attr=True), + ) + self.layer_norm = layer_norm + if layer_norm: + self.layer_norm = nn.LayerNorm(out_features) + + def forward(self, feat): + if self.layer_norm: + out = self.layer_norm(self.mlp(feat)) + else: + out = self.mlp(feat) + return out + + +class GraphCastGNN(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_emb_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_emb_dim: int, + src_type: str = "mesh", + dst_type: str = "mesh", + ): + super().__init__() + + self.src = src_type + self.dst = dst_type + self.grid_node_num = grid_node_num + self.mesh_node_num = mesh_node_num + self.edge_in_dim = grid_node_emb_dim + mesh_node_emb_dim + + if src_type == "mesh" and dst_type == "mesh": + self.edge_in_dim += mesh_edge_emb_dim + self.edge_out_dim = mesh_edge_emb_dim + self.node_in_dim = mesh_node_emb_dim + mesh_edge_emb_dim + self.node_out_dim = mesh_node_emb_dim + elif src_type == "grid" and dst_type == "mesh": + self.edge_in_dim += grid2mesh_edge_emb_dim + self.edge_out_dim = grid2mesh_edge_emb_dim + self.node_in_dim = mesh_node_emb_dim + grid2mesh_edge_emb_dim + self.node_out_dim = mesh_node_emb_dim + elif src_type == "mesh" and dst_type == "grid": + self.edge_in_dim += mesh2grid_edge_emb_dim + self.edge_out_dim = mesh2grid_edge_emb_dim + self.node_in_dim = grid_node_emb_dim + mesh2grid_edge_emb_dim + self.node_out_dim = grid_node_emb_dim + else: + raise ValueError + + self.edge_layer = GraphCastMLP(self.edge_in_dim, self.edge_out_dim) + self.node_layer = GraphCastMLP(self.node_in_dim, self.node_out_dim) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + if self.src == "mesh" and self.dst == "mesh": + edge_feats = graph.mesh_edge_feat + src_node_feats = graph.mesh_node_feat + dst_node_feats = graph.mesh_node_feat + src_idx = graph.mesh2mesh_src_index + dst_idx = graph.mesh2mesh_dst_index + dst_node_num = self.mesh_node_num + elif self.src == "grid" and self.dst == "mesh": + edge_feats = graph.grid2mesh_edge_feat + src_node_feats = graph.grid_node_feat + dst_node_feats = graph.mesh_node_feat + src_idx = graph.grid2mesh_src_index + dst_idx = graph.grid2mesh_dst_index + dst_node_num = self.mesh_node_num + elif self.src == "mesh" and self.dst == "grid": + edge_feats = graph.mesh2grid_edge_feat + src_node_feats = graph.mesh_node_feat + dst_node_feats = graph.grid_node_feat + src_idx = graph.mesh2grid_src_index + dst_idx = graph.mesh2grid_dst_index + dst_node_num = self.grid_node_num + + # update edge features + edge_feats_concat = paddle.concat( + [ + edge_feats, + paddle.gather(src_node_feats, src_idx), + paddle.gather(dst_node_feats, dst_idx), + ], + axis=-1, + ) + edge_feats_out = self.edge_layer(edge_feats_concat) + + _, batch_dim, _ = edge_feats_out.shape + + # update node features + edge_feats_scatter = paddle.zeros([dst_node_num, batch_dim, self.edge_out_dim]) + node_feats_concat = paddle.concat( + [ + dst_node_feats, + paddle.scatter( + edge_feats_scatter, dst_idx, edge_feats_out, overwrite=False + ), + ], + axis=-1, + ) + node_feats_out = self.node_layer(node_feats_concat) + + if self.src == "mesh" and self.dst == "mesh": + graph.mesh_edge_feat += edge_feats_out + graph.mesh_node_feat += node_feats_out + elif self.src == "grid" and self.dst == "mesh": + graph.grid2mesh_edge_feat += edge_feats_out + graph.mesh_node_feat += node_feats_out + elif self.src == "mesh" and self.dst == "grid": + graph.mesh2grid_edge_feat += edge_feats_out + graph.grid_node_feat += node_feats_out + + return graph + + +class GraphCastEmbedding(nn.Layer): + def __init__( + self, + grid_node_dim: int, + grid_node_emb_dim: int, + mesh_node_dim: int, + mesh_node_emb_dim: int, + mesh_edge_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_dim: int, + mesh2grid_edge_emb_dim: int, + ): + super().__init__() + + self.grid_node_embedding = GraphCastMLP(grid_node_dim, grid_node_emb_dim) + self.mesh_node_embedding = GraphCastMLP(mesh_node_dim, mesh_node_emb_dim) + self.mesh_edge_embedding = GraphCastMLP(mesh_edge_dim, mesh_edge_emb_dim) + self.grid2mesh_edge_embedding = GraphCastMLP( + grid2mesh_edge_dim, grid2mesh_edge_emb_dim + ) + self.mesh2grid_edge_embedding = GraphCastMLP( + mesh2grid_edge_dim, mesh2grid_edge_emb_dim + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + grid_node_emb = self.grid_node_embedding(graph.grid_node_feat) + mesh_node_emb = self.mesh_node_embedding(graph.mesh_node_feat) + mesh_edge_emb = self.mesh_edge_embedding(graph.mesh_edge_feat) + grid2mesh_edge_emb = self.grid2mesh_edge_embedding(graph.grid2mesh_edge_feat) + mesh2grid_edge_emb = self.mesh2grid_edge_embedding(graph.mesh2grid_edge_feat) + + graph.grid_node_feat = grid_node_emb + graph.mesh_node_feat = mesh_node_emb + graph.mesh_edge_feat = mesh_edge_emb + graph.grid2mesh_edge_feat = grid2mesh_edge_emb + graph.mesh2grid_edge_feat = mesh2grid_edge_emb + + return graph + + +class GraphCastGrid2Mesh(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_emb_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_emb_dim: int, + ): + super().__init__() + self.grid2mesh_gnn = GraphCastGNN( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + src_type="grid", + dst_type="mesh", + ) + self.grid_node_layer = ResidualConnection( + GraphCastMLP(grid_node_emb_dim, grid_node_emb_dim) + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + graph = self.grid2mesh_gnn(graph) + graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) + return graph + + +class GraphCastMesh2Grid(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_emb_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_emb_dim: int, + ): + super().__init__() + self.mesh2grid_gnn = GraphCastGNN( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + src_type="mesh", + dst_type="grid", + ) + self.mesh_node_layer = ResidualConnection( + GraphCastMLP(mesh_node_emb_dim, mesh_node_emb_dim) + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + graph = self.mesh2grid_gnn(graph) + graph.mesh_node_feat = self.mesh_node_layer(graph.mesh_node_feat) + return graph + + +class GraphCastEncoder(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_dim: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_dim: int, + mesh_node_emb_dim: int, + mesh_edge_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_dim: int, + mesh2grid_edge_emb_dim: int, + ): + super().__init__() + self.embedding = GraphCastEmbedding( + grid_node_dim=grid_node_dim, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_dim=mesh_node_dim, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_dim=mesh_edge_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_dim=grid2mesh_edge_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_dim=mesh2grid_edge_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + ) + self.grid2mesh_gnn = GraphCastGrid2Mesh( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + graph = self.embedding(graph) + graph = self.grid2mesh_gnn(graph) + return graph + + +class GraphCastDecoder(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_emb_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_emb_dim: int, + node_output_dim: int, + ): + super().__init__() + self.mesh2grid_gnn = GraphCastMesh2Grid( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + ) + self.grid_node_layer = GraphCastMLP( + grid_node_emb_dim, + node_output_dim, + latent_features=grid_node_emb_dim, + layer_norm=False, + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + graph = self.mesh2grid_gnn(graph) + graph.grid_node_feat = self.grid_node_layer(graph.grid_node_feat) + return graph + + +class GraphCastProcessor(nn.Layer): + def __init__( + self, + grid_node_num: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_emb_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_emb_dim: int, + gnn_msg_steps: int, + ): + super().__init__() + + self.processor = nn.Sequential() + for idx in range(gnn_msg_steps): + self.processor.add_sublayer( + f"{idx}", + GraphCastGNN( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + src_type="mesh", + dst_type="mesh", + ), + ) + + def forward(self, graph: "atmospheric_dataset.GraphGridMesh"): + graph = self.processor(graph) + return graph + + +class GraphCastNet(base.Arch): + """GraphCast Network + + Args: + input_keys (Tuple[str, ...]): Name of input keys. + output_keys (Tuple[str, ...]): Name of output keys. + grid_node_num (int): Number of grid nodes. + grid_node_dim (int): Dimension of grid nodes. + grid_node_emb_dim (int): Dimension of emdding grid nodes. + mesh_node_num (int): Number of mesh nodes. + mesh_node_dim (int): Dimension of mesh nodes. + mesh_node_emb_dim (int): Dimension of emdding mesh nodes. + mesh_edge_dim (int): Dimension of mesh edges. + mesh_edge_emb_dim (int): Dimension of emdding mesh edges. + grid2mesh_edge_dim (int): Dimension of mesh edges in Grid2Mesh GNN. + grid2mesh_edge_emb_dim (int): Dimension of emdding mesh edges in Grid2Mesh GNN. + mesh2grid_edge_dim (int): Dimension of mesh edges in Mesh2Grid GNN. + mesh2grid_edge_emb_dim (int): Dimension of emdding mesh edges in Mesh2Grid GNN. + gnn_msg_steps (int): Step of gnn messages. + node_output_dim (int): Dimension of output nodes. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + grid_node_num: int, + grid_node_dim: int, + grid_node_emb_dim: int, + mesh_node_num: int, + mesh_node_dim: int, + mesh_node_emb_dim: int, + mesh_edge_dim: int, + mesh_edge_emb_dim: int, + grid2mesh_edge_dim: int, + grid2mesh_edge_emb_dim: int, + mesh2grid_edge_dim: int, + mesh2grid_edge_emb_dim: int, + gnn_msg_steps: int, + node_output_dim: int, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.graphcast = nn.Sequential( + ( + "encoder", + GraphCastEncoder( + grid_node_num=grid_node_num, + grid_node_dim=grid_node_dim, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_dim=mesh_node_dim, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_dim=mesh_edge_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_dim=grid2mesh_edge_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_dim=mesh2grid_edge_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + ), + ), + ( + "processor", + GraphCastProcessor( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + gnn_msg_steps=gnn_msg_steps, + ), + ), + ( + "decoder", + GraphCastDecoder( + grid_node_num=grid_node_num, + grid_node_emb_dim=grid_node_emb_dim, + mesh_node_num=mesh_node_num, + mesh_node_emb_dim=mesh_node_emb_dim, + mesh_edge_emb_dim=mesh_edge_emb_dim, + grid2mesh_edge_emb_dim=grid2mesh_edge_emb_dim, + mesh2grid_edge_emb_dim=mesh2grid_edge_emb_dim, + node_output_dim=node_output_dim, + ), + ), + ) + + def forward( + self, x: Dict[str, "atmospheric_dataset.GraphGridMesh"] + ) -> Dict[str, paddle.Tensor]: + if self._input_transform is not None: + x = self._input_transform(x) + + graph = x[self.input_keys[0]] + y = self.graphcast(graph) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return {self.output_keys[0]: y} diff --git a/ppsci/arch/he_deeponets.py b/ppsci/arch/he_deeponets.py index 811da0d1b1..d9d56ba56a 100644 --- a/ppsci/arch/he_deeponets.py +++ b/ppsci/arch/he_deeponets.py @@ -1,197 +1,197 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Tuple -from typing import Union - -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.arch import mlp - - -class HEDeepONets(base.Arch): - """Physical information deep operator networks. - - Args: - heat_input_keys (Tuple[str, ...]): Name of input data for heat boundary. - cold_input_keys (Tuple[str, ...]): Name of input data for cold boundary. - trunk_input_keys (Tuple[str, ...]): Name of input data for trunk net. - output_keys (Tuple[str, ...]): Output name of predicted temperature. - heat_num_loc (int): Number of sampled input data for heat boundary. - cold_num_loc (int): Number of sampled input data for cold boundary. - num_features (int): Number of features extracted from heat boundary, same for cold boundary and trunk net. - branch_num_layers (int): Number of hidden layers of branch net. - trunk_num_layers (int): Number of hidden layers of trunk net. - branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of branch net. - An integer for all layers, or list of integer specify each layer's size. - trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. - An integer for all layers, or list of integer specify each layer's size. - branch_skip_connection (bool, optional): Whether to use skip connection for branch net. Defaults to False. - trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. - branch_activation (str, optional): Name of activation function for branch net. Defaults to "tanh". - trunk_activation (str, optional): Name of activation function for trunk net. Defaults to "tanh". - branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for branch net. Defaults to False. - trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. - use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.HEDeepONets( - ... ('qm_h',), - ... ('qm_c',), - ... ("x",'t'), - ... ("T_h",'T_c','T_w'), - ... 1, - ... 1, - ... 100, - ... 9, - ... 6, - ... 256, - ... 128, - ... branch_activation="swish", - ... trunk_activation="swish", - ... use_bias=True, - ... ) - """ - - def __init__( - self, - heat_input_keys: Tuple[str, ...], - cold_input_keys: Tuple[str, ...], - trunk_input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - heat_num_loc: int, - cold_num_loc: int, - num_features: int, - branch_num_layers: int, - trunk_num_layers: int, - branch_hidden_size: Union[int, Tuple[int, ...]], - trunk_hidden_size: Union[int, Tuple[int, ...]], - branch_skip_connection: bool = False, - trunk_skip_connection: bool = False, - branch_activation: str = "tanh", - trunk_activation: str = "tanh", - branch_weight_norm: bool = False, - trunk_weight_norm: bool = False, - use_bias: bool = True, - ): - super().__init__() - self.trunk_input_keys = trunk_input_keys - self.heat_input_keys = heat_input_keys - self.cold_input_keys = cold_input_keys - self.input_keys = ( - self.trunk_input_keys + self.heat_input_keys + self.cold_input_keys - ) - self.output_keys = output_keys - self.num_features = num_features - - self.heat_net = mlp.MLP( - self.heat_input_keys, - ("h",), - branch_num_layers, - branch_hidden_size, - branch_activation, - branch_skip_connection, - branch_weight_norm, - input_dim=heat_num_loc, - output_dim=num_features * len(self.output_keys), - ) - - self.cold_net = mlp.MLP( - self.cold_input_keys, - ("c",), - branch_num_layers, - branch_hidden_size, - branch_activation, - branch_skip_connection, - branch_weight_norm, - input_dim=cold_num_loc, - output_dim=num_features * len(self.output_keys), - ) - - self.trunk_net = mlp.MLP( - self.trunk_input_keys, - ("t",), - trunk_num_layers, - trunk_hidden_size, - trunk_activation, - trunk_skip_connection, - trunk_weight_norm, - input_dim=len(self.trunk_input_keys), - output_dim=num_features * len(self.output_keys), - ) - self.trunk_act = act_mod.get_activation(trunk_activation) - self.heat_act = act_mod.get_activation(branch_activation) - self.cold_act = act_mod.get_activation(branch_activation) - - self.use_bias = use_bias - if use_bias: - # register bias to parameter for updating in optimizer and storage - self.b = self.create_parameter( - shape=(len(self.output_keys),), - attr=nn.initializer.Constant(0.0), - ) - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - # Branch net to encode the input function - heat_features = self.heat_net(x)[self.heat_net.output_keys[0]] - cold_features = self.cold_net(x)[self.cold_net.output_keys[0]] - # Trunk net to encode the domain of the output function - y_features = self.trunk_net(x)[self.trunk_net.output_keys[0]] - y_features = self.trunk_act(y_features) - # Dot product - G_u_h = paddle.sum( - heat_features[:, : self.num_features] - * y_features[:, : self.num_features] - * cold_features[:, : self.num_features], - axis=1, - keepdim=True, - ) - G_u_c = paddle.sum( - heat_features[:, self.num_features : 2 * self.num_features] - * y_features[:, self.num_features : 2 * self.num_features] - * cold_features[:, self.num_features : 2 * self.num_features], - axis=1, - keepdim=True, - ) - G_u_w = paddle.sum( - heat_features[:, 2 * self.num_features :] - * y_features[:, 2 * self.num_features :] - * cold_features[:, 2 * self.num_features :], - axis=1, - keepdim=True, - ) - # Add bias - if self.use_bias: - G_u_h += self.b[0] - G_u_c += self.b[1] - G_u_w += self.b[2] - - result_dict = { - self.output_keys[0]: G_u_h, - self.output_keys[1]: G_u_c, - self.output_keys[2]: G_u_w, - } - if self._output_transform is not None: - result_dict = self._output_transform(x, result_dict) - - return result_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.arch import mlp + + +class HEDeepONets(base.Arch): + """Physical information deep operator networks. + + Args: + heat_input_keys (Tuple[str, ...]): Name of input data for heat boundary. + cold_input_keys (Tuple[str, ...]): Name of input data for cold boundary. + trunk_input_keys (Tuple[str, ...]): Name of input data for trunk net. + output_keys (Tuple[str, ...]): Output name of predicted temperature. + heat_num_loc (int): Number of sampled input data for heat boundary. + cold_num_loc (int): Number of sampled input data for cold boundary. + num_features (int): Number of features extracted from heat boundary, same for cold boundary and trunk net. + branch_num_layers (int): Number of hidden layers of branch net. + trunk_num_layers (int): Number of hidden layers of trunk net. + branch_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of branch net. + An integer for all layers, or list of integer specify each layer's size. + trunk_hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size of trunk net. + An integer for all layers, or list of integer specify each layer's size. + branch_skip_connection (bool, optional): Whether to use skip connection for branch net. Defaults to False. + trunk_skip_connection (bool, optional): Whether to use skip connection for trunk net. Defaults to False. + branch_activation (str, optional): Name of activation function for branch net. Defaults to "tanh". + trunk_activation (str, optional): Name of activation function for trunk net. Defaults to "tanh". + branch_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for branch net. Defaults to False. + trunk_weight_norm (bool, optional): Whether to apply weight norm on parameter(s) for trunk net. Defaults to False. + use_bias (bool, optional): Whether to add bias on predicted G(u)(y). Defaults to True. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.HEDeepONets( + ... ('qm_h',), + ... ('qm_c',), + ... ("x",'t'), + ... ("T_h",'T_c','T_w'), + ... 1, + ... 1, + ... 100, + ... 9, + ... 6, + ... 256, + ... 128, + ... branch_activation="swish", + ... trunk_activation="swish", + ... use_bias=True, + ... ) + """ + + def __init__( + self, + heat_input_keys: Tuple[str, ...], + cold_input_keys: Tuple[str, ...], + trunk_input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + heat_num_loc: int, + cold_num_loc: int, + num_features: int, + branch_num_layers: int, + trunk_num_layers: int, + branch_hidden_size: Union[int, Tuple[int, ...]], + trunk_hidden_size: Union[int, Tuple[int, ...]], + branch_skip_connection: bool = False, + trunk_skip_connection: bool = False, + branch_activation: str = "tanh", + trunk_activation: str = "tanh", + branch_weight_norm: bool = False, + trunk_weight_norm: bool = False, + use_bias: bool = True, + ): + super().__init__() + self.trunk_input_keys = trunk_input_keys + self.heat_input_keys = heat_input_keys + self.cold_input_keys = cold_input_keys + self.input_keys = ( + self.trunk_input_keys + self.heat_input_keys + self.cold_input_keys + ) + self.output_keys = output_keys + self.num_features = num_features + + self.heat_net = mlp.MLP( + self.heat_input_keys, + ("h",), + branch_num_layers, + branch_hidden_size, + branch_activation, + branch_skip_connection, + branch_weight_norm, + input_dim=heat_num_loc, + output_dim=num_features * len(self.output_keys), + ) + + self.cold_net = mlp.MLP( + self.cold_input_keys, + ("c",), + branch_num_layers, + branch_hidden_size, + branch_activation, + branch_skip_connection, + branch_weight_norm, + input_dim=cold_num_loc, + output_dim=num_features * len(self.output_keys), + ) + + self.trunk_net = mlp.MLP( + self.trunk_input_keys, + ("t",), + trunk_num_layers, + trunk_hidden_size, + trunk_activation, + trunk_skip_connection, + trunk_weight_norm, + input_dim=len(self.trunk_input_keys), + output_dim=num_features * len(self.output_keys), + ) + self.trunk_act = act_mod.get_activation(trunk_activation) + self.heat_act = act_mod.get_activation(branch_activation) + self.cold_act = act_mod.get_activation(branch_activation) + + self.use_bias = use_bias + if use_bias: + # register bias to parameter for updating in optimizer and storage + self.b = self.create_parameter( + shape=(len(self.output_keys),), + attr=nn.initializer.Constant(0.0), + ) + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + # Branch net to encode the input function + heat_features = self.heat_net(x)[self.heat_net.output_keys[0]] + cold_features = self.cold_net(x)[self.cold_net.output_keys[0]] + # Trunk net to encode the domain of the output function + y_features = self.trunk_net(x)[self.trunk_net.output_keys[0]] + y_features = self.trunk_act(y_features) + # Dot product + G_u_h = paddle.sum( + heat_features[:, : self.num_features] + * y_features[:, : self.num_features] + * cold_features[:, : self.num_features], + axis=1, + keepdim=True, + ) + G_u_c = paddle.sum( + heat_features[:, self.num_features : 2 * self.num_features] + * y_features[:, self.num_features : 2 * self.num_features] + * cold_features[:, self.num_features : 2 * self.num_features], + axis=1, + keepdim=True, + ) + G_u_w = paddle.sum( + heat_features[:, 2 * self.num_features :] + * y_features[:, 2 * self.num_features :] + * cold_features[:, 2 * self.num_features :], + axis=1, + keepdim=True, + ) + # Add bias + if self.use_bias: + G_u_h += self.b[0] + G_u_c += self.b[1] + G_u_w += self.b[2] + + result_dict = { + self.output_keys[0]: G_u_h, + self.output_keys[1]: G_u_c, + self.output_keys[2]: G_u_w, + } + if self._output_transform is not None: + result_dict = self._output_transform(x, result_dict) + + return result_dict diff --git a/ppsci/arch/lno.py b/ppsci/arch/lno.py index d600c5d028..ece3cbe253 100644 --- a/ppsci/arch/lno.py +++ b/ppsci/arch/lno.py @@ -1,312 +1,312 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import operator -from functools import reduce -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.utils import initializer - - -class Laplace(nn.Layer): - """Generic N-Dimensional Laplace Operator with Pole-Residue Method. - - Args: - in_channels (int): Number of input channels of the first layer. - out_channels (int): Number of output channels of the last layer. - modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. - T (paddle.Tensor): Linspace of time dimension. - data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - modes: Tuple[int, ...], - T: paddle.Tensor, - data: Tuple[paddle.Tensor, ...], - ): - super().__init__() - self.char1 = "pqr" - self.char2 = "mnk" - self.modes = modes - self.scale = 1 / (in_channels * out_channels) - self.dims = len(modes) - - self.weights_pole_real = nn.ParameterList() - self.weights_pole_imag = nn.ParameterList() - for i in range(self.dims): - weight_real = self._init_weights( - self.create_parameter((in_channels, out_channels, modes[i], 1)) - ) - weight_imag = self._init_weights( - self.create_parameter((in_channels, out_channels, modes[i], 1)) - ) - self.weights_pole_real.append(weight_real) - self.weights_pole_imag.append(weight_imag) - - residues_shape = (in_channels, out_channels) + modes + (1,) - self.weights_residue_real = self._init_weights( - self.create_parameter(residues_shape) - ) - self.weights_residue_imag = self._init_weights( - self.create_parameter(residues_shape) - ) - - self.initialize_lambdas(T, data) - self.get_einsum_eqs() - - def _init_weights(self, weight) -> paddle.Tensor: - return initializer.uniform_(weight, a=0, b=self.scale) - - def initialize_lambdas(self, T, data) -> None: - self.t_lst = (T,) + data - self.lambdas = [] - for i in range(self.dims): - t_i = self.t_lst[i] - self.register_buffer(f"t_{i}", t_i) - dt = (t_i[0, 1] - t_i[0, 0]).item() - omega = paddle.fft.fftfreq(n=tuple(t_i.shape)[1], d=dt) * 2 * np.pi * 1.0j - lambda_ = omega.reshape([*omega.shape, 1, 1, 1]) - self.register_buffer(f"lambda_{i}", lambda_) - self.lambdas.append(lambda_) - - def get_einsum_eqs(self) -> None: - terms_eq = [] - terms_x2_eq = [] - for i in range(self.dims): - term_eq = self.char1[i] + "io" + self.char2[i] - terms_eq.append(term_eq) - term_x2_eq = "io" + self.char2[i] + self.char1[i] - terms_x2_eq.append(term_x2_eq) - self.eq1 = ( - "bi" - + "".join(self.char1) - + "," - + "io" - + "".join(self.char2) - + "," - + ",".join(terms_eq) - + "->" - + "bo" - + "".join(self.char1) - ) - self.eq2 = ( - "bi" - + "".join(self.char1) - + "," - + "io" - + "".join(self.char2) - + "," - + ",".join(terms_eq) - + "->" - + "bo" - + "".join(self.char2) - ) - self.eq_x2 = ( - "bi" - + "".join(self.char2) - + "," - + ",".join(terms_x2_eq) - + "->bo" - + "".join(self.char1) - ) - - def output_PR(self, alpha) -> Tuple[paddle.Tensor, paddle.Tensor]: - weights_residue = paddle.as_complex( - paddle.concat( - [self.weights_residue_real, self.weights_residue_imag], axis=-1 - ) - ) - self.weights_pole = [] - terms = [] - for i in range(self.dims): - weights_pole = paddle.as_complex( - paddle.concat( - [self.weights_pole_real[i], self.weights_pole_imag[i]], axis=-1 - ) - ) - self.weights_pole.append(weights_pole) - sub = paddle.subtract(self.lambdas[i], weights_pole) - terms.append(paddle.divide(paddle.to_tensor(1, dtype=sub.dtype), sub)) - - output_residue1 = paddle.einsum(self.eq1, alpha, weights_residue, *terms) - output_residue2 = (-1) ** self.dims * paddle.einsum( - self.eq2, alpha, weights_residue, *terms - ) - return output_residue1, output_residue2 - - def forward(self, x): - alpha = paddle.fft.fftn(x=x, axes=[-3, -2, -1]) - output_residue1, output_residue2 = self.output_PR(alpha) - - x1 = paddle.fft.ifftn( - x=output_residue1, s=(x.shape[-3], x.shape[-2], x.shape[-1]) - ) - x1 = paddle.real(x=x1) - - exp_terms = [] - for i in range(self.dims): - term = paddle.einsum( - "io" - + self.char2[i] - + ",d" - + self.char1[i] - + "->io" - + self.char2[i] - + self.char1[i], - self.weights_pole[i], - self.t_lst[i].astype(paddle.complex64).reshape([1, -1]), - ) - exp_terms.append(paddle.exp(term)) - - x2 = paddle.einsum(self.eq_x2, output_residue2, *exp_terms) - x2 = paddle.real(x2) - x2 = x2 / reduce(operator.mul, x.shape[-3:], 1) - return x1 + x2 - - -class LNO(base.Arch): - """Laplace Neural Operator net. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). - width (int): Tensor width of Laplace Layer. - modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. - T (paddle.Tensor): Linspace of time dimension. - data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. - in_features (int, optional): Number of input channels of the first layer.. Defaults to 1. - hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64. - activation (str, optional): The activation function. Defaults to "sin". - use_norm (bool, optional): Whether to use normalization layers. Defaults to True. - use_grid (bool, optional): Whether to create grid. Defaults to False. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - width: int, - modes: Tuple[int, ...], - T: paddle.Tensor, - data: Optional[Tuple[paddle.Tensor, ...]] = None, - in_features: int = 1, - hidden_features: int = 64, - activation: str = "sin", - use_norm: bool = True, - use_grid: bool = False, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.width = width - self.modes = modes - self.dims = len(modes) - assert self.dims <= 3, "Only 3 dims and lower of modes are supported now." - - if data is None: - data = () - assert ( - self.dims == len(data) + 1 - ), f"Dims of modes is {self.dims} but only {len(data)} dims(except T) of data received." - - self.fc0 = nn.Linear(in_features=in_features, out_features=self.width) - self.laplace = Laplace(self.width, self.width, self.modes, T, data) - self.conv = getattr(nn, f"Conv{self.dims}D")( - in_channels=self.width, - out_channels=self.width, - kernel_size=1, - data_format="NCDHW", - ) - if use_norm: - self.norm = getattr(nn, f"InstanceNorm{self.dims}D")( - num_features=self.width, - weight_attr=False, - bias_attr=False, - ) - self.fc1 = nn.Linear(in_features=self.width, out_features=hidden_features) - self.fc2 = nn.Linear(in_features=hidden_features, out_features=1) - self.act = act_mod.get_activation(activation) - - self.use_norm = use_norm - self.use_grid = use_grid - - def get_grid(self, shape): - batchsize, size_t, size_x, size_y = shape[0], shape[1], shape[2], shape[3] - gridt = paddle.linspace(0, 1, size_t) - gridt = gridt.reshape([1, size_t, 1, 1, 1]).tile( - [batchsize, 1, size_x, size_y, 1] - ) - gridx = paddle.linspace(0, 1, size_x) - gridx = gridx.reshape([1, 1, size_x, 1, 1]).tile( - [batchsize, size_t, 1, size_y, 1] - ) - gridy = paddle.linspace(0, 1, size_y) - gridy = gridy.reshape([1, 1, 1, size_y, 1]).tile( - [batchsize, size_t, size_x, 1, 1] - ) - return paddle.concat([gridt, gridx, gridy], axis=-1) - - def transpoe_to_NCDHW(self, x): - perm = [0, self.dims + 1] + list(range(1, self.dims + 1)) - return paddle.transpose(x, perm=perm) - - def transpoe_to_NDHWC(self, x): - perm = [0] + list(range(2, self.dims + 2)) + [1] - return paddle.transpose(x, perm=perm) - - def forward_tensor(self, x): - if self.use_grid: - grid = self.get_grid(x.shape) - x = paddle.concat([x, grid], axis=-1) - x = self.fc0(x) - x = self.transpoe_to_NCDHW(x) - - if self.use_norm: - x1 = self.norm(self.laplace(self.norm(x))) - else: - x1 = self.laplace(x) - - x2 = self.conv(x) - x = x1 + x2 - - x = self.transpoe_to_NDHWC(x) - - x = self.fc1(x) - x = self.act(x) - x = self.fc2(x) - return x - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - y = self.concat_to_tensor(x, self.input_keys, axis=-1) - y = self.forward_tensor(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import operator +from functools import reduce +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.utils import initializer + + +class Laplace(nn.Layer): + """Generic N-Dimensional Laplace Operator with Pole-Residue Method. + + Args: + in_channels (int): Number of input channels of the first layer. + out_channels (int): Number of output channels of the last layer. + modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. + T (paddle.Tensor): Linspace of time dimension. + data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + modes: Tuple[int, ...], + T: paddle.Tensor, + data: Tuple[paddle.Tensor, ...], + ): + super().__init__() + self.char1 = "pqr" + self.char2 = "mnk" + self.modes = modes + self.scale = 1 / (in_channels * out_channels) + self.dims = len(modes) + + self.weights_pole_real = nn.ParameterList() + self.weights_pole_imag = nn.ParameterList() + for i in range(self.dims): + weight_real = self._init_weights( + self.create_parameter((in_channels, out_channels, modes[i], 1)) + ) + weight_imag = self._init_weights( + self.create_parameter((in_channels, out_channels, modes[i], 1)) + ) + self.weights_pole_real.append(weight_real) + self.weights_pole_imag.append(weight_imag) + + residues_shape = (in_channels, out_channels) + modes + (1,) + self.weights_residue_real = self._init_weights( + self.create_parameter(residues_shape) + ) + self.weights_residue_imag = self._init_weights( + self.create_parameter(residues_shape) + ) + + self.initialize_lambdas(T, data) + self.get_einsum_eqs() + + def _init_weights(self, weight) -> paddle.Tensor: + return initializer.uniform_(weight, a=0, b=self.scale) + + def initialize_lambdas(self, T, data) -> None: + self.t_lst = (T,) + data + self.lambdas = [] + for i in range(self.dims): + t_i = self.t_lst[i] + self.register_buffer(f"t_{i}", t_i) + dt = (t_i[0, 1] - t_i[0, 0]).item() + omega = paddle.fft.fftfreq(n=tuple(t_i.shape)[1], d=dt) * 2 * np.pi * 1.0j + lambda_ = omega.reshape([*omega.shape, 1, 1, 1]) + self.register_buffer(f"lambda_{i}", lambda_) + self.lambdas.append(lambda_) + + def get_einsum_eqs(self) -> None: + terms_eq = [] + terms_x2_eq = [] + for i in range(self.dims): + term_eq = self.char1[i] + "io" + self.char2[i] + terms_eq.append(term_eq) + term_x2_eq = "io" + self.char2[i] + self.char1[i] + terms_x2_eq.append(term_x2_eq) + self.eq1 = ( + "bi" + + "".join(self.char1) + + "," + + "io" + + "".join(self.char2) + + "," + + ",".join(terms_eq) + + "->" + + "bo" + + "".join(self.char1) + ) + self.eq2 = ( + "bi" + + "".join(self.char1) + + "," + + "io" + + "".join(self.char2) + + "," + + ",".join(terms_eq) + + "->" + + "bo" + + "".join(self.char2) + ) + self.eq_x2 = ( + "bi" + + "".join(self.char2) + + "," + + ",".join(terms_x2_eq) + + "->bo" + + "".join(self.char1) + ) + + def output_PR(self, alpha) -> Tuple[paddle.Tensor, paddle.Tensor]: + weights_residue = paddle.as_complex( + paddle.concat( + [self.weights_residue_real, self.weights_residue_imag], axis=-1 + ) + ) + self.weights_pole = [] + terms = [] + for i in range(self.dims): + weights_pole = paddle.as_complex( + paddle.concat( + [self.weights_pole_real[i], self.weights_pole_imag[i]], axis=-1 + ) + ) + self.weights_pole.append(weights_pole) + sub = paddle.subtract(self.lambdas[i], weights_pole) + terms.append(paddle.divide(paddle.to_tensor(1, dtype=sub.dtype), sub)) + + output_residue1 = paddle.einsum(self.eq1, alpha, weights_residue, *terms) + output_residue2 = (-1) ** self.dims * paddle.einsum( + self.eq2, alpha, weights_residue, *terms + ) + return output_residue1, output_residue2 + + def forward(self, x): + alpha = paddle.fft.fftn(x=x, axes=[-3, -2, -1]) + output_residue1, output_residue2 = self.output_PR(alpha) + + x1 = paddle.fft.ifftn( + x=output_residue1, s=(x.shape[-3], x.shape[-2], x.shape[-1]) + ) + x1 = paddle.real(x=x1) + + exp_terms = [] + for i in range(self.dims): + term = paddle.einsum( + "io" + + self.char2[i] + + ",d" + + self.char1[i] + + "->io" + + self.char2[i] + + self.char1[i], + self.weights_pole[i], + self.t_lst[i].astype(paddle.complex64).reshape([1, -1]), + ) + exp_terms.append(paddle.exp(term)) + + x2 = paddle.einsum(self.eq_x2, output_residue2, *exp_terms) + x2 = paddle.real(x2) + x2 = x2 / reduce(operator.mul, x.shape[-3:], 1) + return x1 + x2 + + +class LNO(base.Arch): + """Laplace Neural Operator net. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input1", "input2"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output1", "output2"). + width (int): Tensor width of Laplace Layer. + modes (Tuple[int, ...]): Number of modes to use for contraction in Laplace domain during training. + T (paddle.Tensor): Linspace of time dimension. + data (Tuple[paddle.Tensor, ...]): Linspaces of other dimensions. + in_features (int, optional): Number of input channels of the first layer.. Defaults to 1. + hidden_features (int, optional): Number of channels of the fully-connected layer. Defaults to 64. + activation (str, optional): The activation function. Defaults to "sin". + use_norm (bool, optional): Whether to use normalization layers. Defaults to True. + use_grid (bool, optional): Whether to create grid. Defaults to False. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + width: int, + modes: Tuple[int, ...], + T: paddle.Tensor, + data: Optional[Tuple[paddle.Tensor, ...]] = None, + in_features: int = 1, + hidden_features: int = 64, + activation: str = "sin", + use_norm: bool = True, + use_grid: bool = False, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.width = width + self.modes = modes + self.dims = len(modes) + assert self.dims <= 3, "Only 3 dims and lower of modes are supported now." + + if data is None: + data = () + assert ( + self.dims == len(data) + 1 + ), f"Dims of modes is {self.dims} but only {len(data)} dims(except T) of data received." + + self.fc0 = nn.Linear(in_features=in_features, out_features=self.width) + self.laplace = Laplace(self.width, self.width, self.modes, T, data) + self.conv = getattr(nn, f"Conv{self.dims}D")( + in_channels=self.width, + out_channels=self.width, + kernel_size=1, + data_format="NCDHW", + ) + if use_norm: + self.norm = getattr(nn, f"InstanceNorm{self.dims}D")( + num_features=self.width, + weight_attr=False, + bias_attr=False, + ) + self.fc1 = nn.Linear(in_features=self.width, out_features=hidden_features) + self.fc2 = nn.Linear(in_features=hidden_features, out_features=1) + self.act = act_mod.get_activation(activation) + + self.use_norm = use_norm + self.use_grid = use_grid + + def get_grid(self, shape): + batchsize, size_t, size_x, size_y = shape[0], shape[1], shape[2], shape[3] + gridt = paddle.linspace(0, 1, size_t) + gridt = gridt.reshape([1, size_t, 1, 1, 1]).tile( + [batchsize, 1, size_x, size_y, 1] + ) + gridx = paddle.linspace(0, 1, size_x) + gridx = gridx.reshape([1, 1, size_x, 1, 1]).tile( + [batchsize, size_t, 1, size_y, 1] + ) + gridy = paddle.linspace(0, 1, size_y) + gridy = gridy.reshape([1, 1, 1, size_y, 1]).tile( + [batchsize, size_t, size_x, 1, 1] + ) + return paddle.concat([gridt, gridx, gridy], axis=-1) + + def transpoe_to_NCDHW(self, x): + perm = [0, self.dims + 1] + list(range(1, self.dims + 1)) + return paddle.transpose(x, perm=perm) + + def transpoe_to_NDHWC(self, x): + perm = [0] + list(range(2, self.dims + 2)) + [1] + return paddle.transpose(x, perm=perm) + + def forward_tensor(self, x): + if self.use_grid: + grid = self.get_grid(x.shape) + x = paddle.concat([x, grid], axis=-1) + x = self.fc0(x) + x = self.transpoe_to_NCDHW(x) + + if self.use_norm: + x1 = self.norm(self.laplace(self.norm(x))) + else: + x1 = self.laplace(x) + + x2 = self.conv(x) + x = x1 + x2 + + x = self.transpoe_to_NDHWC(x) + + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + y = self.concat_to_tensor(x, self.input_keys, axis=-1) + y = self.forward_tensor(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/mlp.py b/ppsci/arch/mlp.py index 1e4e1c42a6..b3bd86921d 100644 --- a/ppsci/arch/mlp.py +++ b/ppsci/arch/mlp.py @@ -1,820 +1,820 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base -from ppsci.utils import initializer - - -class WeightNormLinear(nn.Layer): - def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.weight_v = self.create_parameter((in_features, out_features)) - self.weight_g = self.create_parameter((out_features,)) - if bias: - self.bias = self.create_parameter((out_features,)) - else: - self.bias = None - self._init_weights() - - def _init_weights(self) -> None: - initializer.xavier_uniform_(self.weight_v) - initializer.constant_(self.weight_g, 1.0) - if self.bias is not None: - initializer.constant_(self.bias, 0.0) - - def forward(self, input): - norm = self.weight_v.norm(p=2, axis=0, keepdim=True) - weight = self.weight_g * self.weight_v / norm - return nn.functional.linear(input, weight, self.bias) - - -class RandomWeightFactorization(nn.Layer): - def __init__( - self, - in_features: int, - out_features: int, - bias: bool = True, - mean: float = 0.5, - std: float = 0.1, - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.weight_v = self.create_parameter((in_features, out_features)) - self.weight_g = self.create_parameter((out_features,)) - if bias: - self.bias = self.create_parameter((out_features,)) - else: - self.bias = None - - self._init_weights(mean, std) - - def _init_weights(self, mean, std): - with paddle.no_grad(): - initializer.glorot_normal_(self.weight_v) - - nn.initializer.Normal(mean, std)(self.weight_g) - paddle.assign(paddle.exp(self.weight_g), self.weight_g) - paddle.assign(self.weight_v / self.weight_g, self.weight_v) - if self.bias is not None: - initializer.constant_(self.bias, 0.0) - - self.weight_g.stop_gradient = False - self.weight_v.stop_gradient = False - self.bias.stop_gradient = False - - def forward(self, input): - return nn.functional.linear(input, self.weight_g * self.weight_v, self.bias) - - -class PeriodEmbedding(nn.Layer): - def __init__(self, periods: Dict[str, Tuple[float, bool]]): - super().__init__() - self.freqs_dict = { - k: self.create_parameter( - [], - attr=paddle.ParamAttr(trainable=trainable), - default_initializer=nn.initializer.Constant(2 * np.pi / float(p)), - ) # mu = 2*pi / period for sin/cos function - for k, (p, trainable) in periods.items() - } - self.freqs = nn.ParameterList(list(self.freqs_dict.values())) - - def forward(self, x: Dict[str, paddle.Tensor]): - y = {k: v for k, v in x.items()} # shallow copy to avoid modifying input dict - - for k, w in self.freqs_dict.items(): - y[k] = paddle.concat([paddle.cos(w * x[k]), paddle.sin(w * x[k])], axis=-1) - - return y - - -class FourierEmbedding(nn.Layer): - def __init__(self, in_features, out_features, scale): - super().__init__() - if out_features % 2 != 0: - raise ValueError(f"out_features must be even, but got {out_features}.") - - self.kernel = self.create_parameter( - [in_features, out_features // 2], - default_initializer=nn.initializer.Normal(std=scale), - ) - - def forward(self, x: paddle.Tensor): - y = paddle.concat( - [ - paddle.cos(x @ self.kernel), - paddle.sin(x @ self.kernel), - ], - axis=-1, - ) - return y - - -class MLP(base.Arch): - """Multi layer perceptron network. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). - num_layers (int): Number of hidden layers. - hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size. - An integer for all layers, or list of integer specify each layer's size. - activation (str, optional): Name of activation function. Defaults to "tanh". - skip_connection (bool, optional): Whether to use skip connection. Defaults to False. - weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. - input_dim (Optional[int]): Number of input's dimension. Defaults to None. - output_dim (Optional[int]): Number of output's dimension. Defaults to None. - periods (Optional[Dict[int, Tuple[float, bool]]]): Period of each input key, - input in given channel will be period embeded if specified, each tuple of - periods list is [period, trainable]. Defaults to None. - fourier (Optional[Dict[str, Union[float, int]]]): Random fourier feature embedding, - e.g. {'dim': 256, 'scale': 1.0}. Defaults to None. - random_weight (Optional[Dict[str, float]]): Mean and std of random weight - factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.MLP( - ... input_keys=("x", "y"), - ... output_keys=("u", "v"), - ... num_layers=5, - ... hidden_size=128 - ... ) - >>> input_dict = {"x": paddle.rand([64, 1]), - ... "y": paddle.rand([64, 1])} - >>> output_dict = model(input_dict) - >>> print(output_dict["u"].shape) - [64, 1] - >>> print(output_dict["v"].shape) - [64, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_layers: int, - hidden_size: Union[int, Tuple[int, ...]], - activation: str = "tanh", - skip_connection: bool = False, - weight_norm: bool = False, - input_dim: Optional[int] = None, - output_dim: Optional[int] = None, - periods: Optional[Dict[int, Tuple[float, bool]]] = None, - fourier: Optional[Dict[str, Union[float, int]]] = None, - random_weight: Optional[Dict[str, float]] = None, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.linears = [] - self.acts = [] - self.periods = periods - self.fourier = fourier - if periods: - self.period_emb = PeriodEmbedding(periods) - - if isinstance(hidden_size, (tuple, list)): - if num_layers is not None: - raise ValueError( - "num_layers should be None when hidden_size is specified" - ) - elif isinstance(hidden_size, int): - if not isinstance(num_layers, int): - raise ValueError( - "num_layers should be an int when hidden_size is an int" - ) - hidden_size = [hidden_size] * num_layers - else: - raise ValueError( - f"hidden_size should be list of int or int, but got {type(hidden_size)}" - ) - - # initialize FC layer(s) - cur_size = len(self.input_keys) if input_dim is None else input_dim - if input_dim is None and periods: - # period embeded channel(s) will be doubled automatically - # if input_dim is not specified - cur_size += len(periods) - - if fourier: - self.fourier_emb = FourierEmbedding( - cur_size, fourier["dim"], fourier["scale"] - ) - cur_size = fourier["dim"] - - for i, _size in enumerate(hidden_size): - if weight_norm: - self.linears.append(WeightNormLinear(cur_size, _size)) - elif random_weight: - self.linears.append( - RandomWeightFactorization( - cur_size, - _size, - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - else: - self.linears.append(nn.Linear(cur_size, _size)) - - # initialize activation function - self.acts.append( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(_size) - ) - # special initialization for certain activation - # TODO: Adapt code below to a more elegant style - if activation == "siren": - if i == 0: - act_mod.Siren.init_for_first_layer(self.linears[-1]) - else: - act_mod.Siren.init_for_hidden_layer(self.linears[-1]) - - cur_size = _size - - self.linears = nn.LayerList(self.linears) - self.acts = nn.LayerList(self.acts) - if random_weight: - self.last_fc = RandomWeightFactorization( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - else: - self.last_fc = nn.Linear( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - ) - - self.skip_connection = skip_connection - - def forward_tensor(self, x): - y = x - skip = None - for i, linear in enumerate(self.linears): - y = linear(y) - if self.skip_connection and i % 2 == 0: - if skip is not None: - skip = y - y = y + skip - else: - skip = y - y = self.acts[i](y) - - y = self.last_fc(y) - - return y - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - if self.periods: - x = self.period_emb(x) - - y = self.concat_to_tensor(x, self.input_keys, axis=-1) - - if self.fourier: - y = self.fourier_emb(y) - - y = self.forward_tensor(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - -class ModifiedMLP(base.Arch): - """Modified Multi layer perceptron network. - - Understanding and mitigating gradient pathologies in physics-informed - neural networks. https://arxiv.org/pdf/2001.04536.pdf. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). - num_layers (int): Number of hidden layers. - hidden_size (int): Number of hidden size, an integer for all layers. - activation (str, optional): Name of activation function. Defaults to "tanh". - skip_connection (bool, optional): Whether to use skip connection. Defaults to False. - weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. - input_dim (Optional[int]): Number of input's dimension. Defaults to None. - output_dim (Optional[int]): Number of output's dimension. Defaults to None. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.ModifiedMLP( - ... input_keys=("x", "y"), - ... output_keys=("u", "v"), - ... num_layers=5, - ... hidden_size=128 - ... ) - >>> input_dict = {"x": paddle.rand([64, 1]), - ... "y": paddle.rand([64, 1])} - >>> output_dict = model(input_dict) - >>> print(output_dict["u"].shape) - [64, 1] - >>> print(output_dict["v"].shape) - [64, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_layers: int, - hidden_size: int, - activation: str = "tanh", - skip_connection: bool = False, - weight_norm: bool = False, - input_dim: Optional[int] = None, - output_dim: Optional[int] = None, - periods: Optional[Dict[int, Tuple[float, bool]]] = None, - fourier: Optional[Dict[str, Union[float, int]]] = None, - random_weight: Optional[Dict[str, float]] = None, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.linears = [] - self.acts = [] - self.periods = periods - self.fourier = fourier - if periods: - self.period_emb = PeriodEmbedding(periods) - if isinstance(hidden_size, int): - if not isinstance(num_layers, int): - raise ValueError("num_layers should be an int") - hidden_size = [hidden_size] * num_layers - else: - raise ValueError(f"hidden_size should be int, but got {type(hidden_size)}") - - # initialize FC layer(s) - cur_size = len(self.input_keys) if input_dim is None else input_dim - if input_dim is None and periods: - # period embeded channel(s) will be doubled automatically - # if input_dim is not specified - cur_size += len(periods) - - if fourier: - self.fourier_emb = FourierEmbedding( - cur_size, fourier["dim"], fourier["scale"] - ) - cur_size = fourier["dim"] - - self.embed_u = nn.Sequential( - ( - WeightNormLinear(cur_size, hidden_size[0]) - if weight_norm - else ( - nn.Linear(cur_size, hidden_size[0]) - if random_weight is None - else RandomWeightFactorization( - cur_size, - hidden_size[0], - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - ), - ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(hidden_size[0]) - ), - ) - self.embed_v = nn.Sequential( - ( - WeightNormLinear(cur_size, hidden_size[0]) - if weight_norm - else ( - nn.Linear(cur_size, hidden_size[0]) - if random_weight is None - else RandomWeightFactorization( - cur_size, - hidden_size[0], - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - ), - ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(hidden_size[0]) - ), - ) - - for i, _size in enumerate(hidden_size): - if weight_norm: - self.linears.append(WeightNormLinear(cur_size, _size)) - elif random_weight: - self.linears.append( - RandomWeightFactorization( - cur_size, - _size, - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - else: - self.linears.append(nn.Linear(cur_size, _size)) - - # initialize activation function - self.acts.append( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(_size) - ) - # special initialization for certain activation - # TODO: Adapt code below to a more elegant style - if activation == "siren": - if i == 0: - act_mod.Siren.init_for_first_layer(self.linears[-1]) - else: - act_mod.Siren.init_for_hidden_layer(self.linears[-1]) - - cur_size = _size - - self.linears = nn.LayerList(self.linears) - self.acts = nn.LayerList(self.acts) - if random_weight: - self.last_fc = RandomWeightFactorization( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - else: - self.last_fc = nn.Linear( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - ) - - self.skip_connection = skip_connection - - def forward_tensor(self, x): - u = self.embed_u(x) - v = self.embed_v(x) - - y = x - skip = None - for i, linear in enumerate(self.linears): - y = linear(y) - y = self.acts[i](y) - y = y * u + (1 - y) * v - if self.skip_connection and i % 2 == 0: - if skip is not None: - skip = y - y = y + skip - else: - skip = y - - y = self.last_fc(y) - - return y - - def forward(self, x): - x_identity = x - if self._input_transform is not None: - x = self._input_transform(x) - - if self.periods: - x = self.period_emb(x) - - y = self.concat_to_tensor(x, self.input_keys, axis=-1) - - if self.fourier: - y = self.fourier_emb(y) - - y = self.forward_tensor(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x_identity, y) - return y - - -class PirateNetBlock(nn.Layer): - r"""Basic block of PirateNet. - - $$ - \begin{align*} - \Phi(\mathbf{x})=\left[\begin{array}{l} - \cos (\mathbf{B} \mathbf{x}) \\ - \sin (\mathbf{B} \mathbf{x}) - \end{array}\right] \\ - \mathbf{f}^{(l)} & =\sigma\left(\mathbf{W}_1^{(l)} \mathbf{x}^{(l)}+\mathbf{b}_1^{(l)}\right) \\ - \mathbf{z}_1^{(l)} & =\mathbf{f}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{f}^{(l)}\right) \odot \mathbf{V} \\ - \mathbf{g}^{(l)} & =\sigma\left(\mathbf{W}_2^{(l)} \mathbf{z}_1^{(l)}+\mathbf{b}_2^{(l)}\right) \\ - \mathbf{z}_2^{(l)} & =\mathbf{g}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{g}^{(l)}\right) \odot \mathbf{V} \\ - \mathbf{h}^{(l)} & =\sigma\left(\mathbf{W}_3^{(l)} \mathbf{z}_2^{(l)}+\mathbf{b}_3^{(l)}\right) \\ - \mathbf{x}^{(l+1)} & =\alpha^{(l)} \cdot \mathbf{h}^{(l)}+\left(1-\alpha^{(l)}\right) \cdot \mathbf{x}^{(l)} - \end{align*} - $$ - - Args: - embed_dim (int): Embedding dimension. - activation (str, optional): Name of activation function. Defaults to "tanh". - random_weight (Optional[Dict[str, float]]): Mean and std of random weight - factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. - """ - - def __init__( - self, - embed_dim: int, - activation: str = "tanh", - random_weight: Optional[Dict[str, float]] = None, - ): - super().__init__() - self.linear1 = ( - nn.Linear(embed_dim, embed_dim) - if random_weight is None - else RandomWeightFactorization( - embed_dim, - embed_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - self.linear2 = ( - nn.Linear(embed_dim, embed_dim) - if random_weight is None - else RandomWeightFactorization( - embed_dim, - embed_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - self.linear3 = ( - nn.Linear(embed_dim, embed_dim) - if random_weight is None - else RandomWeightFactorization( - embed_dim, - embed_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - self.alpha = self.create_parameter( - [ - 1, - ], - default_initializer=nn.initializer.Constant(0), - ) - self.act1 = ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(embed_dim) - ) - self.act2 = ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(embed_dim) - ) - self.act3 = ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(embed_dim) - ) - - def forward(self, x, u, v): - f = self.act1(self.linear1(x)) - z1 = f * u + (1 - f) * v - g = self.act2(self.linear2(z1)) - z2 = g * u + (1 - g) * v - h = self.act3(self.linear3(z2)) - out = self.alpha * h + (1 - self.alpha) * x - return out - - -class PirateNet(base.Arch): - r"""PirateNet. - - [PIRATENETS: PHYSICS-INFORMED DEEP LEARNING WITHRESIDUAL ADAPTIVE NETWORKS](https://arxiv.org/pdf/2402.00326.pdf) - - $$ - \begin{align*} - \Phi(\mathbf{x}) &= \left[\begin{array}{l} - \cos (\mathbf{B} \mathbf{x}) \\ - \sin (\mathbf{B} \mathbf{x}) - \end{array}\right] \\ - \mathbf{f}^{(l)} &= \sigma\left(\mathbf{W}_1^{(l)} \mathbf{x}^{(l)}+\mathbf{b}_1^{(l)}\right) \\ - \mathbf{z}_1^{(l)} &= \mathbf{f}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{f}^{(l)}\right) \odot \mathbf{V} \\ - \mathbf{g}^{(l)} &= \sigma\left(\mathbf{W}_2^{(l)} \mathbf{z}_1^{(l)}+\mathbf{b}_2^{(l)}\right) \\ - \mathbf{z}_2^{(l)} &= \mathbf{g}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{g}^{(l)}\right) \odot \mathbf{V} \\ - \mathbf{h}^{(l)} &= \sigma\left(\mathbf{W}_3^{(l)} \mathbf{z}_2^{(l)}+\mathbf{b}_3^{(l)}\right) \\ - \mathbf{x}^{(l+1)} &= \text{PirateBlock}^{(l)}\left(\mathbf{x}^{(l)}\right), l=1...L-1\\ - \mathbf{u}_\theta &= \mathbf{W}^{(L+1)} \mathbf{x}^{(L)} - \end{align*} - $$ - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). - num_blocks (int): Number of PirateBlocks. - hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size. - An integer for all layers, or list of integer specify each layer's size. - activation (str, optional): Name of activation function. Defaults to "tanh". - weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. - input_dim (Optional[int]): Number of input's dimension. Defaults to None. - output_dim (Optional[int]): Number of output's dimension. Defaults to None. - periods (Optional[Dict[int, Tuple[float, bool]]]): Period of each input key, - input in given channel will be period embeded if specified, each tuple of - periods list is [period, trainable]. Defaults to None. - fourier (Optional[Dict[str, Union[float, int]]]): Random fourier feature embedding, - e.g. {'dim': 256, 'scale': 1.0}. Defaults to None. - random_weight (Optional[Dict[str, float]]): Mean and std of random weight - factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.PirateNet( - ... input_keys=("x", "y"), - ... output_keys=("u", "v"), - ... num_blocks=3, - ... hidden_size=256, - ... fourier={'dim': 256, 'scale': 1.0}, - ... ) - >>> input_dict = {"x": paddle.rand([64, 1]), - ... "y": paddle.rand([64, 1])} - >>> output_dict = model(input_dict) - >>> print(output_dict["u"].shape) - [64, 1] - >>> print(output_dict["v"].shape) - [64, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_blocks: int, - hidden_size: int, - activation: str = "tanh", - weight_norm: bool = False, - input_dim: Optional[int] = None, - output_dim: Optional[int] = None, - periods: Optional[Dict[int, Tuple[float, bool]]] = None, - fourier: Optional[Dict[str, Union[float, int]]] = None, - random_weight: Optional[Dict[str, float]] = None, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.blocks = [] - self.periods = periods - self.fourier = fourier - if periods: - self.period_emb = PeriodEmbedding(periods) - - if isinstance(hidden_size, int): - if not isinstance(num_blocks, int): - raise ValueError("num_blocks should be an int") - hidden_size = [hidden_size] * num_blocks - else: - raise ValueError(f"hidden_size should be int, but got {type(hidden_size)}") - - # initialize FC layer(s) - cur_size = len(self.input_keys) if input_dim is None else input_dim - if input_dim is None and periods: - # period embeded channel(s) will be doubled automatically - # if input_dim is not specified - cur_size += len(periods) - - if fourier: - self.fourier_emb = FourierEmbedding( - cur_size, fourier["dim"], fourier["scale"] - ) - cur_size = fourier["dim"] - - self.embed_u = nn.Sequential( - ( - WeightNormLinear(cur_size, hidden_size[0]) - if weight_norm - else ( - nn.Linear(cur_size, hidden_size[0]) - if random_weight is None - else RandomWeightFactorization( - cur_size, - hidden_size[0], - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - ), - ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(hidden_size[0]) - ), - ) - self.embed_v = nn.Sequential( - ( - WeightNormLinear(cur_size, hidden_size[0]) - if weight_norm - else ( - nn.Linear(cur_size, hidden_size[0]) - if random_weight is None - else RandomWeightFactorization( - cur_size, - hidden_size[0], - mean=random_weight["mean"], - std=random_weight["std"], - ) - ) - ), - ( - act_mod.get_activation(activation) - if activation != "stan" - else act_mod.get_activation(activation)(hidden_size[0]) - ), - ) - - for i, _size in enumerate(hidden_size): - self.blocks.append( - PirateNetBlock( - cur_size, - activation=activation, - random_weight=random_weight, - ) - ) - cur_size = _size - - self.blocks = nn.LayerList(self.blocks) - if random_weight: - self.last_fc = RandomWeightFactorization( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - mean=random_weight["mean"], - std=random_weight["std"], - ) - else: - self.last_fc = nn.Linear( - cur_size, - len(self.output_keys) if output_dim is None else output_dim, - ) - - def forward_tensor(self, x): - u = self.embed_u(x) - v = self.embed_v(x) - - y = x - for i, block in enumerate(self.blocks): - y = block(y, u, v) - - y = self.last_fc(y) - return y - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - if self.periods: - x = self.period_emb(x) - - y = self.concat_to_tensor(x, self.input_keys, axis=-1) - - if self.fourier: - y = self.fourier_emb(y) - - y = self.forward_tensor(y) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base +from ppsci.utils import initializer + + +class WeightNormLinear(nn.Layer): + def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.weight_v = self.create_parameter((in_features, out_features)) + self.weight_g = self.create_parameter((out_features,)) + if bias: + self.bias = self.create_parameter((out_features,)) + else: + self.bias = None + self._init_weights() + + def _init_weights(self) -> None: + initializer.xavier_uniform_(self.weight_v) + initializer.constant_(self.weight_g, 1.0) + if self.bias is not None: + initializer.constant_(self.bias, 0.0) + + def forward(self, input): + norm = self.weight_v.norm(p=2, axis=0, keepdim=True) + weight = self.weight_g * self.weight_v / norm + return nn.functional.linear(input, weight, self.bias) + + +class RandomWeightFactorization(nn.Layer): + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + mean: float = 0.5, + std: float = 0.1, + ): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.weight_v = self.create_parameter((in_features, out_features)) + self.weight_g = self.create_parameter((out_features,)) + if bias: + self.bias = self.create_parameter((out_features,)) + else: + self.bias = None + + self._init_weights(mean, std) + + def _init_weights(self, mean, std): + with paddle.no_grad(): + initializer.glorot_normal_(self.weight_v) + + nn.initializer.Normal(mean, std)(self.weight_g) + paddle.assign(paddle.exp(self.weight_g), self.weight_g) + paddle.assign(self.weight_v / self.weight_g, self.weight_v) + if self.bias is not None: + initializer.constant_(self.bias, 0.0) + + self.weight_g.stop_gradient = False + self.weight_v.stop_gradient = False + self.bias.stop_gradient = False + + def forward(self, input): + return nn.functional.linear(input, self.weight_g * self.weight_v, self.bias) + + +class PeriodEmbedding(nn.Layer): + def __init__(self, periods: Dict[str, Tuple[float, bool]]): + super().__init__() + self.freqs_dict = { + k: self.create_parameter( + [], + attr=paddle.ParamAttr(trainable=trainable), + default_initializer=nn.initializer.Constant(2 * np.pi / float(p)), + ) # mu = 2*pi / period for sin/cos function + for k, (p, trainable) in periods.items() + } + self.freqs = nn.ParameterList(list(self.freqs_dict.values())) + + def forward(self, x: Dict[str, paddle.Tensor]): + y = {k: v for k, v in x.items()} # shallow copy to avoid modifying input dict + + for k, w in self.freqs_dict.items(): + y[k] = paddle.concat([paddle.cos(w * x[k]), paddle.sin(w * x[k])], axis=-1) + + return y + + +class FourierEmbedding(nn.Layer): + def __init__(self, in_features, out_features, scale): + super().__init__() + if out_features % 2 != 0: + raise ValueError(f"out_features must be even, but got {out_features}.") + + self.kernel = self.create_parameter( + [in_features, out_features // 2], + default_initializer=nn.initializer.Normal(std=scale), + ) + + def forward(self, x: paddle.Tensor): + y = paddle.concat( + [ + paddle.cos(x @ self.kernel), + paddle.sin(x @ self.kernel), + ], + axis=-1, + ) + return y + + +class MLP(base.Arch): + """Multi layer perceptron network. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). + num_layers (int): Number of hidden layers. + hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size. + An integer for all layers, or list of integer specify each layer's size. + activation (str, optional): Name of activation function. Defaults to "tanh". + skip_connection (bool, optional): Whether to use skip connection. Defaults to False. + weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. + input_dim (Optional[int]): Number of input's dimension. Defaults to None. + output_dim (Optional[int]): Number of output's dimension. Defaults to None. + periods (Optional[Dict[int, Tuple[float, bool]]]): Period of each input key, + input in given channel will be period embeded if specified, each tuple of + periods list is [period, trainable]. Defaults to None. + fourier (Optional[Dict[str, Union[float, int]]]): Random fourier feature embedding, + e.g. {'dim': 256, 'scale': 1.0}. Defaults to None. + random_weight (Optional[Dict[str, float]]): Mean and std of random weight + factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.MLP( + ... input_keys=("x", "y"), + ... output_keys=("u", "v"), + ... num_layers=5, + ... hidden_size=128 + ... ) + >>> input_dict = {"x": paddle.rand([64, 1]), + ... "y": paddle.rand([64, 1])} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [64, 1] + >>> print(output_dict["v"].shape) + [64, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_layers: int, + hidden_size: Union[int, Tuple[int, ...]], + activation: str = "tanh", + skip_connection: bool = False, + weight_norm: bool = False, + input_dim: Optional[int] = None, + output_dim: Optional[int] = None, + periods: Optional[Dict[int, Tuple[float, bool]]] = None, + fourier: Optional[Dict[str, Union[float, int]]] = None, + random_weight: Optional[Dict[str, float]] = None, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.linears = [] + self.acts = [] + self.periods = periods + self.fourier = fourier + if periods: + self.period_emb = PeriodEmbedding(periods) + + if isinstance(hidden_size, (tuple, list)): + if num_layers is not None: + raise ValueError( + "num_layers should be None when hidden_size is specified" + ) + elif isinstance(hidden_size, int): + if not isinstance(num_layers, int): + raise ValueError( + "num_layers should be an int when hidden_size is an int" + ) + hidden_size = [hidden_size] * num_layers + else: + raise ValueError( + f"hidden_size should be list of int or int, but got {type(hidden_size)}" + ) + + # initialize FC layer(s) + cur_size = len(self.input_keys) if input_dim is None else input_dim + if input_dim is None and periods: + # period embeded channel(s) will be doubled automatically + # if input_dim is not specified + cur_size += len(periods) + + if fourier: + self.fourier_emb = FourierEmbedding( + cur_size, fourier["dim"], fourier["scale"] + ) + cur_size = fourier["dim"] + + for i, _size in enumerate(hidden_size): + if weight_norm: + self.linears.append(WeightNormLinear(cur_size, _size)) + elif random_weight: + self.linears.append( + RandomWeightFactorization( + cur_size, + _size, + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + else: + self.linears.append(nn.Linear(cur_size, _size)) + + # initialize activation function + self.acts.append( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(_size) + ) + # special initialization for certain activation + # TODO: Adapt code below to a more elegant style + if activation == "siren": + if i == 0: + act_mod.Siren.init_for_first_layer(self.linears[-1]) + else: + act_mod.Siren.init_for_hidden_layer(self.linears[-1]) + + cur_size = _size + + self.linears = nn.LayerList(self.linears) + self.acts = nn.LayerList(self.acts) + if random_weight: + self.last_fc = RandomWeightFactorization( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + else: + self.last_fc = nn.Linear( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + ) + + self.skip_connection = skip_connection + + def forward_tensor(self, x): + y = x + skip = None + for i, linear in enumerate(self.linears): + y = linear(y) + if self.skip_connection and i % 2 == 0: + if skip is not None: + skip = y + y = y + skip + else: + skip = y + y = self.acts[i](y) + + y = self.last_fc(y) + + return y + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + if self.periods: + x = self.period_emb(x) + + y = self.concat_to_tensor(x, self.input_keys, axis=-1) + + if self.fourier: + y = self.fourier_emb(y) + + y = self.forward_tensor(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + +class ModifiedMLP(base.Arch): + """Modified Multi layer perceptron network. + + Understanding and mitigating gradient pathologies in physics-informed + neural networks. https://arxiv.org/pdf/2001.04536.pdf. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). + num_layers (int): Number of hidden layers. + hidden_size (int): Number of hidden size, an integer for all layers. + activation (str, optional): Name of activation function. Defaults to "tanh". + skip_connection (bool, optional): Whether to use skip connection. Defaults to False. + weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. + input_dim (Optional[int]): Number of input's dimension. Defaults to None. + output_dim (Optional[int]): Number of output's dimension. Defaults to None. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.ModifiedMLP( + ... input_keys=("x", "y"), + ... output_keys=("u", "v"), + ... num_layers=5, + ... hidden_size=128 + ... ) + >>> input_dict = {"x": paddle.rand([64, 1]), + ... "y": paddle.rand([64, 1])} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [64, 1] + >>> print(output_dict["v"].shape) + [64, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_layers: int, + hidden_size: int, + activation: str = "tanh", + skip_connection: bool = False, + weight_norm: bool = False, + input_dim: Optional[int] = None, + output_dim: Optional[int] = None, + periods: Optional[Dict[int, Tuple[float, bool]]] = None, + fourier: Optional[Dict[str, Union[float, int]]] = None, + random_weight: Optional[Dict[str, float]] = None, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.linears = [] + self.acts = [] + self.periods = periods + self.fourier = fourier + if periods: + self.period_emb = PeriodEmbedding(periods) + if isinstance(hidden_size, int): + if not isinstance(num_layers, int): + raise ValueError("num_layers should be an int") + hidden_size = [hidden_size] * num_layers + else: + raise ValueError(f"hidden_size should be int, but got {type(hidden_size)}") + + # initialize FC layer(s) + cur_size = len(self.input_keys) if input_dim is None else input_dim + if input_dim is None and periods: + # period embeded channel(s) will be doubled automatically + # if input_dim is not specified + cur_size += len(periods) + + if fourier: + self.fourier_emb = FourierEmbedding( + cur_size, fourier["dim"], fourier["scale"] + ) + cur_size = fourier["dim"] + + self.embed_u = nn.Sequential( + ( + WeightNormLinear(cur_size, hidden_size[0]) + if weight_norm + else ( + nn.Linear(cur_size, hidden_size[0]) + if random_weight is None + else RandomWeightFactorization( + cur_size, + hidden_size[0], + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + ), + ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(hidden_size[0]) + ), + ) + self.embed_v = nn.Sequential( + ( + WeightNormLinear(cur_size, hidden_size[0]) + if weight_norm + else ( + nn.Linear(cur_size, hidden_size[0]) + if random_weight is None + else RandomWeightFactorization( + cur_size, + hidden_size[0], + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + ), + ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(hidden_size[0]) + ), + ) + + for i, _size in enumerate(hidden_size): + if weight_norm: + self.linears.append(WeightNormLinear(cur_size, _size)) + elif random_weight: + self.linears.append( + RandomWeightFactorization( + cur_size, + _size, + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + else: + self.linears.append(nn.Linear(cur_size, _size)) + + # initialize activation function + self.acts.append( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(_size) + ) + # special initialization for certain activation + # TODO: Adapt code below to a more elegant style + if activation == "siren": + if i == 0: + act_mod.Siren.init_for_first_layer(self.linears[-1]) + else: + act_mod.Siren.init_for_hidden_layer(self.linears[-1]) + + cur_size = _size + + self.linears = nn.LayerList(self.linears) + self.acts = nn.LayerList(self.acts) + if random_weight: + self.last_fc = RandomWeightFactorization( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + else: + self.last_fc = nn.Linear( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + ) + + self.skip_connection = skip_connection + + def forward_tensor(self, x): + u = self.embed_u(x) + v = self.embed_v(x) + + y = x + skip = None + for i, linear in enumerate(self.linears): + y = linear(y) + y = self.acts[i](y) + y = y * u + (1 - y) * v + if self.skip_connection and i % 2 == 0: + if skip is not None: + skip = y + y = y + skip + else: + skip = y + + y = self.last_fc(y) + + return y + + def forward(self, x): + x_identity = x + if self._input_transform is not None: + x = self._input_transform(x) + + if self.periods: + x = self.period_emb(x) + + y = self.concat_to_tensor(x, self.input_keys, axis=-1) + + if self.fourier: + y = self.fourier_emb(y) + + y = self.forward_tensor(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x_identity, y) + return y + + +class PirateNetBlock(nn.Layer): + r"""Basic block of PirateNet. + + $$ + \begin{align*} + \Phi(\mathbf{x})=\left[\begin{array}{l} + \cos (\mathbf{B} \mathbf{x}) \\ + \sin (\mathbf{B} \mathbf{x}) + \end{array}\right] \\ + \mathbf{f}^{(l)} & =\sigma\left(\mathbf{W}_1^{(l)} \mathbf{x}^{(l)}+\mathbf{b}_1^{(l)}\right) \\ + \mathbf{z}_1^{(l)} & =\mathbf{f}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{f}^{(l)}\right) \odot \mathbf{V} \\ + \mathbf{g}^{(l)} & =\sigma\left(\mathbf{W}_2^{(l)} \mathbf{z}_1^{(l)}+\mathbf{b}_2^{(l)}\right) \\ + \mathbf{z}_2^{(l)} & =\mathbf{g}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{g}^{(l)}\right) \odot \mathbf{V} \\ + \mathbf{h}^{(l)} & =\sigma\left(\mathbf{W}_3^{(l)} \mathbf{z}_2^{(l)}+\mathbf{b}_3^{(l)}\right) \\ + \mathbf{x}^{(l+1)} & =\alpha^{(l)} \cdot \mathbf{h}^{(l)}+\left(1-\alpha^{(l)}\right) \cdot \mathbf{x}^{(l)} + \end{align*} + $$ + + Args: + embed_dim (int): Embedding dimension. + activation (str, optional): Name of activation function. Defaults to "tanh". + random_weight (Optional[Dict[str, float]]): Mean and std of random weight + factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. + """ + + def __init__( + self, + embed_dim: int, + activation: str = "tanh", + random_weight: Optional[Dict[str, float]] = None, + ): + super().__init__() + self.linear1 = ( + nn.Linear(embed_dim, embed_dim) + if random_weight is None + else RandomWeightFactorization( + embed_dim, + embed_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + self.linear2 = ( + nn.Linear(embed_dim, embed_dim) + if random_weight is None + else RandomWeightFactorization( + embed_dim, + embed_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + self.linear3 = ( + nn.Linear(embed_dim, embed_dim) + if random_weight is None + else RandomWeightFactorization( + embed_dim, + embed_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + self.alpha = self.create_parameter( + [ + 1, + ], + default_initializer=nn.initializer.Constant(0), + ) + self.act1 = ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(embed_dim) + ) + self.act2 = ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(embed_dim) + ) + self.act3 = ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(embed_dim) + ) + + def forward(self, x, u, v): + f = self.act1(self.linear1(x)) + z1 = f * u + (1 - f) * v + g = self.act2(self.linear2(z1)) + z2 = g * u + (1 - g) * v + h = self.act3(self.linear3(z2)) + out = self.alpha * h + (1 - self.alpha) * x + return out + + +class PirateNet(base.Arch): + r"""PirateNet. + + [PIRATENETS: PHYSICS-INFORMED DEEP LEARNING WITHRESIDUAL ADAPTIVE NETWORKS](https://arxiv.org/pdf/2402.00326.pdf) + + $$ + \begin{align*} + \Phi(\mathbf{x}) &= \left[\begin{array}{l} + \cos (\mathbf{B} \mathbf{x}) \\ + \sin (\mathbf{B} \mathbf{x}) + \end{array}\right] \\ + \mathbf{f}^{(l)} &= \sigma\left(\mathbf{W}_1^{(l)} \mathbf{x}^{(l)}+\mathbf{b}_1^{(l)}\right) \\ + \mathbf{z}_1^{(l)} &= \mathbf{f}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{f}^{(l)}\right) \odot \mathbf{V} \\ + \mathbf{g}^{(l)} &= \sigma\left(\mathbf{W}_2^{(l)} \mathbf{z}_1^{(l)}+\mathbf{b}_2^{(l)}\right) \\ + \mathbf{z}_2^{(l)} &= \mathbf{g}^{(l)} \odot \mathbf{U}+\left(1-\mathbf{g}^{(l)}\right) \odot \mathbf{V} \\ + \mathbf{h}^{(l)} &= \sigma\left(\mathbf{W}_3^{(l)} \mathbf{z}_2^{(l)}+\mathbf{b}_3^{(l)}\right) \\ + \mathbf{x}^{(l+1)} &= \text{PirateBlock}^{(l)}\left(\mathbf{x}^{(l)}\right), l=1...L-1\\ + \mathbf{u}_\theta &= \mathbf{W}^{(L+1)} \mathbf{x}^{(L)} + \end{align*} + $$ + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). + num_blocks (int): Number of PirateBlocks. + hidden_size (Union[int, Tuple[int, ...]]): Number of hidden size. + An integer for all layers, or list of integer specify each layer's size. + activation (str, optional): Name of activation function. Defaults to "tanh". + weight_norm (bool, optional): Whether to apply weight norm on parameter(s). Defaults to False. + input_dim (Optional[int]): Number of input's dimension. Defaults to None. + output_dim (Optional[int]): Number of output's dimension. Defaults to None. + periods (Optional[Dict[int, Tuple[float, bool]]]): Period of each input key, + input in given channel will be period embeded if specified, each tuple of + periods list is [period, trainable]. Defaults to None. + fourier (Optional[Dict[str, Union[float, int]]]): Random fourier feature embedding, + e.g. {'dim': 256, 'scale': 1.0}. Defaults to None. + random_weight (Optional[Dict[str, float]]): Mean and std of random weight + factorization layer, e.g. {"mean": 0.5, "std: 0.1"}. Defaults to None. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.PirateNet( + ... input_keys=("x", "y"), + ... output_keys=("u", "v"), + ... num_blocks=3, + ... hidden_size=256, + ... fourier={'dim': 256, 'scale': 1.0}, + ... ) + >>> input_dict = {"x": paddle.rand([64, 1]), + ... "y": paddle.rand([64, 1])} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [64, 1] + >>> print(output_dict["v"].shape) + [64, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_blocks: int, + hidden_size: int, + activation: str = "tanh", + weight_norm: bool = False, + input_dim: Optional[int] = None, + output_dim: Optional[int] = None, + periods: Optional[Dict[int, Tuple[float, bool]]] = None, + fourier: Optional[Dict[str, Union[float, int]]] = None, + random_weight: Optional[Dict[str, float]] = None, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.blocks = [] + self.periods = periods + self.fourier = fourier + if periods: + self.period_emb = PeriodEmbedding(periods) + + if isinstance(hidden_size, int): + if not isinstance(num_blocks, int): + raise ValueError("num_blocks should be an int") + hidden_size = [hidden_size] * num_blocks + else: + raise ValueError(f"hidden_size should be int, but got {type(hidden_size)}") + + # initialize FC layer(s) + cur_size = len(self.input_keys) if input_dim is None else input_dim + if input_dim is None and periods: + # period embeded channel(s) will be doubled automatically + # if input_dim is not specified + cur_size += len(periods) + + if fourier: + self.fourier_emb = FourierEmbedding( + cur_size, fourier["dim"], fourier["scale"] + ) + cur_size = fourier["dim"] + + self.embed_u = nn.Sequential( + ( + WeightNormLinear(cur_size, hidden_size[0]) + if weight_norm + else ( + nn.Linear(cur_size, hidden_size[0]) + if random_weight is None + else RandomWeightFactorization( + cur_size, + hidden_size[0], + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + ), + ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(hidden_size[0]) + ), + ) + self.embed_v = nn.Sequential( + ( + WeightNormLinear(cur_size, hidden_size[0]) + if weight_norm + else ( + nn.Linear(cur_size, hidden_size[0]) + if random_weight is None + else RandomWeightFactorization( + cur_size, + hidden_size[0], + mean=random_weight["mean"], + std=random_weight["std"], + ) + ) + ), + ( + act_mod.get_activation(activation) + if activation != "stan" + else act_mod.get_activation(activation)(hidden_size[0]) + ), + ) + + for i, _size in enumerate(hidden_size): + self.blocks.append( + PirateNetBlock( + cur_size, + activation=activation, + random_weight=random_weight, + ) + ) + cur_size = _size + + self.blocks = nn.LayerList(self.blocks) + if random_weight: + self.last_fc = RandomWeightFactorization( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + mean=random_weight["mean"], + std=random_weight["std"], + ) + else: + self.last_fc = nn.Linear( + cur_size, + len(self.output_keys) if output_dim is None else output_dim, + ) + + def forward_tensor(self, x): + u = self.embed_u(x) + v = self.embed_v(x) + + y = x + for i, block in enumerate(self.blocks): + y = block(y, u, v) + + y = self.last_fc(y) + return y + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + if self.periods: + x = self.period_emb(x) + + y = self.concat_to_tensor(x, self.input_keys, axis=-1) + + if self.fourier: + y = self.fourier_emb(y) + + y = self.forward_tensor(y) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/model_list.py b/ppsci/arch/model_list.py index f5f7feeb8b..38269dd51e 100644 --- a/ppsci/arch/model_list.py +++ b/ppsci/arch/model_list.py @@ -1,72 +1,72 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Tuple - -from paddle import nn - -from ppsci.arch import base - - -class ModelList(base.Arch): - """ModelList layer which wrap more than one model that shares inputs. - - Args: - model_list (Tuple[base.Arch, ...]): Model(s) nested in tuple. - - Examples: - >>> import paddle - >>> import ppsci - >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) - >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) - >>> model = ppsci.arch.ModelList((model1, model2)) - >>> input_dict = {"x": paddle.rand([64, 64, 1]),"y": paddle.rand([64, 64, 1])} - >>> output_dict = model(input_dict) - >>> for k, v in output_dict.items(): - ... print(k, v.shape) - u [64, 64, 1] - v [64, 64, 1] - w [64, 64, 1] - p [64, 64, 1] - """ - - def __init__( - self, - model_list: Tuple[base.Arch, ...], - ): - super().__init__() - self.input_keys = sum([model.input_keys for model in model_list], ()) - self.input_keys = set(self.input_keys) - - output_keys_set = set() - for model in model_list: - if len(output_keys_set & set(model.output_keys)): - raise ValueError( - "output_keys of model from model_list should be unique," - f"but got duplicate keys: {output_keys_set & set(model.output_keys)}" - ) - output_keys_set = output_keys_set | set(model.output_keys) - self.output_keys = tuple(output_keys_set) - - self.model_list = nn.LayerList(model_list) - - def forward(self, x): - y_all = {} - for model in self.model_list: - y = model(x) - y_all.update(y) - - return y_all +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple + +from paddle import nn + +from ppsci.arch import base + + +class ModelList(base.Arch): + """ModelList layer which wrap more than one model that shares inputs. + + Args: + model_list (Tuple[base.Arch, ...]): Model(s) nested in tuple. + + Examples: + >>> import paddle + >>> import ppsci + >>> model1 = ppsci.arch.MLP(("x", "y"), ("u", "v"), 10, 128) + >>> model2 = ppsci.arch.MLP(("x", "y"), ("w", "p"), 5, 128) + >>> model = ppsci.arch.ModelList((model1, model2)) + >>> input_dict = {"x": paddle.rand([64, 64, 1]),"y": paddle.rand([64, 64, 1])} + >>> output_dict = model(input_dict) + >>> for k, v in output_dict.items(): + ... print(k, v.shape) + u [64, 64, 1] + v [64, 64, 1] + w [64, 64, 1] + p [64, 64, 1] + """ + + def __init__( + self, + model_list: Tuple[base.Arch, ...], + ): + super().__init__() + self.input_keys = sum([model.input_keys for model in model_list], ()) + self.input_keys = set(self.input_keys) + + output_keys_set = set() + for model in model_list: + if len(output_keys_set & set(model.output_keys)): + raise ValueError( + "output_keys of model from model_list should be unique," + f"but got duplicate keys: {output_keys_set & set(model.output_keys)}" + ) + output_keys_set = output_keys_set | set(model.output_keys) + self.output_keys = tuple(output_keys_set) + + self.model_list = nn.LayerList(model_list) + + def forward(self, x): + y_all = {} + for model in self.model_list: + y = model(x) + y_all.update(y) + + return y_all diff --git a/ppsci/arch/moflow_basic.py b/ppsci/arch/moflow_basic.py index 68f10efafb..e85ef11e94 100644 --- a/ppsci/arch/moflow_basic.py +++ b/ppsci/arch/moflow_basic.py @@ -1,297 +1,297 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright 2020 Chengxi Zang - -import numpy as np -import paddle -import paddle.nn as nn -from scipy import linalg as la - - -# logabs = lambda x: paddle.log(x=paddle.abs(x=x)) -def logabs(x): - return paddle.log(paddle.abs(x)) - - -class ActNorm(nn.Layer): - def __init__(self, in_channel, logdet=True): - super().__init__() - self.loc = self.create_parameter( - [1, in_channel, 1, 1], - default_initializer=nn.initializer.Constant(value=0.0), - ) - - self.scale = self.create_parameter( - [1, in_channel, 1, 1], - default_initializer=nn.initializer.Constant(value=1.0), - ) - - self.register_buffer( - name="initialized", tensor=paddle.to_tensor(data=0, dtype="uint8") - ) - self.logdet = logdet - - def initialize(self, input): - with paddle.no_grad(): - flatten = input.transpose(perm=[1, 0, 2, 3]).reshape( - [tuple(input.shape)[1], -1] - ) - mean = ( - flatten.mean(axis=1) - .unsqueeze(axis=1) - .unsqueeze(axis=2) - .unsqueeze(axis=3) - .transpose(perm=[1, 0, 2, 3]) - ) - std = ( - flatten.std(axis=1) - .unsqueeze(axis=1) - .unsqueeze(axis=2) - .unsqueeze(axis=3) - .transpose(perm=[1, 0, 2, 3]) - ) - paddle.assign(-mean, output=self.loc.data) - paddle.assign(1 / (std + 1e-06), output=self.scale.data) - - def forward(self, input): - _, _, height, width = tuple(input.shape) - if self.initialized.item() == 0: - self.initialize(input) - self.initialized.fill_(value=1) - log_abs = logabs(self.scale) - logdet = height * width * paddle.sum(x=log_abs) - if self.logdet: - return self.scale * (input + self.loc), logdet - else: - return self.scale * (input + self.loc) - - def reverse(self, output): - return output / self.scale - self.loc - - -class ActNorm2D(nn.Layer): - def __init__(self, in_dim, logdet=True): - super().__init__() - self.loc = self.create_parameter( - [1, in_dim, 1], - default_initializer=nn.initializer.Constant(value=0.0), - ) - - self.scale = self.create_parameter( - [1, in_dim, 1], - default_initializer=nn.initializer.Constant(value=1.0), - ) - - self.register_buffer( - name="initialized", tensor=paddle.to_tensor(data=0, dtype="uint8") - ) - self.logdet = logdet - - def initialize(self, input): - with paddle.no_grad(): - flatten = input.transpose(perm=[1, 0, 2]).reshape( - [tuple(input.shape)[1], -1] - ) - mean = ( - flatten.mean(axis=1) - .unsqueeze(axis=1) - .unsqueeze(axis=2) - .transpose(perm=[1, 0, 2]) - ) - std = ( - flatten.std(axis=1) - .unsqueeze(axis=1) - .unsqueeze(axis=2) - .transpose(perm=[1, 0, 2]) - ) - paddle.assign(-mean, output=self.loc.data) - paddle.assign(1 / (std + 1e-06), output=self.scale.data) - - def forward(self, input): - _, _, height = tuple(input.shape) - if self.initialized.item() == 0: - self.initialize(input) - self.initialized.fill_(value=1) - log_abs = logabs(self.scale) - logdet = height * paddle.sum(x=log_abs) - if self.logdet: - return self.scale * (input + self.loc), logdet - else: - return self.scale * (input + self.loc) - - def reverse(self, output): - return output / self.scale - self.loc - - -class InvConv2d(nn.Layer): - def __init__(self, in_channel): - super().__init__() - weight = paddle.randn([in_channel, in_channel]) - q, _ = paddle.linalg.qr(weight) - weight = q.unsqueeze(2).unsqueeze(3) - self.weight = paddle.create_parameter( - weight.shape, - weight.numpy().dtype, - default_initializer=nn.initializer.Assign(weight), - ) - - def forward(self, input): - _, _, height, width = tuple(input.shape) - out = nn.functional.conv2d(x=input, weight=self.weight) - res = paddle.linalg.slogdet(self.weight.squeeze().astype(dtype="float64")) - logdet = height * width * (res[0], res[1])[1].astype(dtype="float32") - return out, logdet - - def reverse(self, output): - return nn.functional.conv2d( - x=output, - weight=self.weight.squeeze().inverse().unsqueeze(axis=2).unsqueeze(axis=3), - ) - - -class InvConv2dLU(nn.Layer): - def __init__(self, in_channel): - super().__init__() - weight = np.random.randn(in_channel, in_channel) - q, _ = la.qr(weight) - w_p, w_l, w_u = la.lu(q.astype(np.float32)) - w_s = np.diag(w_u) - w_u = np.triu(w_u, 1) - u_mask = np.triu(np.ones_like(w_u), 1) - l_mask = u_mask.T - w_p = paddle.to_tensor(data=w_p) - w_l = paddle.to_tensor(data=w_l) - w_s = paddle.to_tensor(data=w_s) - w_u = paddle.to_tensor(data=w_u) - self.register_buffer(name="w_p", tensor=w_p) - self.register_buffer(name="u_mask", tensor=paddle.to_tensor(data=u_mask)) - self.register_buffer(name="l_mask", tensor=paddle.to_tensor(data=l_mask)) - self.register_buffer(name="s_sign", tensor=paddle.sign(x=w_s)) - self.register_buffer( - name="l_eye", tensor=paddle.eye(num_rows=tuple(l_mask.shape)[0]) - ) - self.w_l = paddle.create_parameter( - w_l.shape, - w_l.numpy().dtype, - default_initializer=nn.initializer.Assign(w_l), - ) - - self.w_s = paddle.create_parameter( - logabs(w_s).shape, - logabs(w_s).numpy().dtype, - default_initializer=nn.initializer.Assign(logabs(w_s)), - ) - - self.w_u = paddle.create_parameter( - w_u.shape, - w_u.numpy().dtype, - default_initializer=nn.initializer.Assign(w_u), - ) - - def forward(self, input): - _, _, height, width = tuple(input.shape) - weight = self.calc_weight() - out = nn.functional.conv2d(x=input, weight=weight) - logdet = height * width * paddle.sum(x=self.w_s) - return out, logdet - - def calc_weight(self): - weight = ( - self.w_p - @ (self.w_l * self.l_mask + self.l_eye) - @ ( - self.w_u * self.u_mask - + paddle.diag(x=self.s_sign * paddle.exp(x=self.w_s)) - ) - ) - return weight.unsqueeze(axis=2).unsqueeze(axis=3) - - def reverse(self, output): - weight = self.calc_weight() - return nn.functional.conv2d( - x=output, - weight=weight.squeeze().inverse().unsqueeze(axis=2).unsqueeze(axis=3), - ) - - -class GraphLinear(nn.Layer): - """Graph Linear layer. - This function assumes its input is 3-dimensional. Or 4-dim or whatever, only last dim are changed - Differently from :class:`nn.Linear`, it applies an affine - transformation to the third axis of input `x`. - Warning: original Chainer.link.Link use i.i.d. Gaussian initialization as default, - while default nn.Linear initialization using init.kaiming_uniform_ - """ - - def __init__(self, in_size, out_size, bias=True): - super(GraphLinear, self).__init__() - self.in_size = in_size - self.out_size = out_size - self.linear = nn.Linear( - in_features=in_size, out_features=out_size, bias_attr=bias - ) - - def forward(self, x): - """Forward propagation. - Args: - x (:class:`chainer.Variable`, or :class:`numpy.ndarray` ): - Input array that should be a float array whose ``ndim`` is 3. - - It represents a minibatch of atoms, each of which consists - of a sequence of molecules. Each molecule is represented - by integer IDs. The first axis is an index of atoms - (i.e. minibatch dimension) and the second one an index - of molecules. - - Returns: - class:`chainer.Variable`: - A 3-dimeisional array. - - """ - h = x - h = h.reshape([-1, tuple(x.shape)[-1]]) - h = self.linear(h) - h = h.reshape(tuple(tuple(x.shape)[:-1] + (self.out_size,))) - return h - - -class GraphConv(nn.Layer): - """ - graph convolution over batch and multi-graphs - Args: - in_channels: e.g. 8 - out_channels: e.g. 64 - num_edge_type (types of edges/bonds): e.g. 4 - return: - class:`chainer.Variable`: - """ - - def __init__(self, in_channels, out_channels, num_edge_type=4): - super(GraphConv, self).__init__() - self.graph_linear_self = GraphLinear(in_channels, out_channels) - self.graph_linear_edge = GraphLinear(in_channels, out_channels * num_edge_type) - self.num_edge_type = num_edge_type - self.in_ch = in_channels - self.out_ch = out_channels - - def forward(self, adj, h): - mb, node, ch = tuple(h.shape) - hs = self.graph_linear_self(h) - m = self.graph_linear_edge(h) - m = m.reshape([mb, node, self.out_ch, self.num_edge_type]) - m = m.transpose(perm=[0, 3, 1, 2]) - hr = paddle.matmul(x=adj, y=m) - hr = hr.sum(axis=1) - return hs + hr +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2020 Chengxi Zang + +import numpy as np +import paddle +import paddle.nn as nn +from scipy import linalg as la + + +# logabs = lambda x: paddle.log(x=paddle.abs(x=x)) +def logabs(x): + return paddle.log(paddle.abs(x)) + + +class ActNorm(nn.Layer): + def __init__(self, in_channel, logdet=True): + super().__init__() + self.loc = self.create_parameter( + [1, in_channel, 1, 1], + default_initializer=nn.initializer.Constant(value=0.0), + ) + + self.scale = self.create_parameter( + [1, in_channel, 1, 1], + default_initializer=nn.initializer.Constant(value=1.0), + ) + + self.register_buffer( + name="initialized", tensor=paddle.to_tensor(data=0, dtype="uint8") + ) + self.logdet = logdet + + def initialize(self, input): + with paddle.no_grad(): + flatten = input.transpose(perm=[1, 0, 2, 3]).reshape( + [tuple(input.shape)[1], -1] + ) + mean = ( + flatten.mean(axis=1) + .unsqueeze(axis=1) + .unsqueeze(axis=2) + .unsqueeze(axis=3) + .transpose(perm=[1, 0, 2, 3]) + ) + std = ( + flatten.std(axis=1) + .unsqueeze(axis=1) + .unsqueeze(axis=2) + .unsqueeze(axis=3) + .transpose(perm=[1, 0, 2, 3]) + ) + paddle.assign(-mean, output=self.loc.data) + paddle.assign(1 / (std + 1e-06), output=self.scale.data) + + def forward(self, input): + _, _, height, width = tuple(input.shape) + if self.initialized.item() == 0: + self.initialize(input) + self.initialized.fill_(value=1) + log_abs = logabs(self.scale) + logdet = height * width * paddle.sum(x=log_abs) + if self.logdet: + return self.scale * (input + self.loc), logdet + else: + return self.scale * (input + self.loc) + + def reverse(self, output): + return output / self.scale - self.loc + + +class ActNorm2D(nn.Layer): + def __init__(self, in_dim, logdet=True): + super().__init__() + self.loc = self.create_parameter( + [1, in_dim, 1], + default_initializer=nn.initializer.Constant(value=0.0), + ) + + self.scale = self.create_parameter( + [1, in_dim, 1], + default_initializer=nn.initializer.Constant(value=1.0), + ) + + self.register_buffer( + name="initialized", tensor=paddle.to_tensor(data=0, dtype="uint8") + ) + self.logdet = logdet + + def initialize(self, input): + with paddle.no_grad(): + flatten = input.transpose(perm=[1, 0, 2]).reshape( + [tuple(input.shape)[1], -1] + ) + mean = ( + flatten.mean(axis=1) + .unsqueeze(axis=1) + .unsqueeze(axis=2) + .transpose(perm=[1, 0, 2]) + ) + std = ( + flatten.std(axis=1) + .unsqueeze(axis=1) + .unsqueeze(axis=2) + .transpose(perm=[1, 0, 2]) + ) + paddle.assign(-mean, output=self.loc.data) + paddle.assign(1 / (std + 1e-06), output=self.scale.data) + + def forward(self, input): + _, _, height = tuple(input.shape) + if self.initialized.item() == 0: + self.initialize(input) + self.initialized.fill_(value=1) + log_abs = logabs(self.scale) + logdet = height * paddle.sum(x=log_abs) + if self.logdet: + return self.scale * (input + self.loc), logdet + else: + return self.scale * (input + self.loc) + + def reverse(self, output): + return output / self.scale - self.loc + + +class InvConv2d(nn.Layer): + def __init__(self, in_channel): + super().__init__() + weight = paddle.randn([in_channel, in_channel]) + q, _ = paddle.linalg.qr(weight) + weight = q.unsqueeze(2).unsqueeze(3) + self.weight = paddle.create_parameter( + weight.shape, + weight.numpy().dtype, + default_initializer=nn.initializer.Assign(weight), + ) + + def forward(self, input): + _, _, height, width = tuple(input.shape) + out = nn.functional.conv2d(x=input, weight=self.weight) + res = paddle.linalg.slogdet(self.weight.squeeze().astype(dtype="float64")) + logdet = height * width * (res[0], res[1])[1].astype(dtype="float32") + return out, logdet + + def reverse(self, output): + return nn.functional.conv2d( + x=output, + weight=self.weight.squeeze().inverse().unsqueeze(axis=2).unsqueeze(axis=3), + ) + + +class InvConv2dLU(nn.Layer): + def __init__(self, in_channel): + super().__init__() + weight = np.random.randn(in_channel, in_channel) + q, _ = la.qr(weight) + w_p, w_l, w_u = la.lu(q.astype(np.float32)) + w_s = np.diag(w_u) + w_u = np.triu(w_u, 1) + u_mask = np.triu(np.ones_like(w_u), 1) + l_mask = u_mask.T + w_p = paddle.to_tensor(data=w_p) + w_l = paddle.to_tensor(data=w_l) + w_s = paddle.to_tensor(data=w_s) + w_u = paddle.to_tensor(data=w_u) + self.register_buffer(name="w_p", tensor=w_p) + self.register_buffer(name="u_mask", tensor=paddle.to_tensor(data=u_mask)) + self.register_buffer(name="l_mask", tensor=paddle.to_tensor(data=l_mask)) + self.register_buffer(name="s_sign", tensor=paddle.sign(x=w_s)) + self.register_buffer( + name="l_eye", tensor=paddle.eye(num_rows=tuple(l_mask.shape)[0]) + ) + self.w_l = paddle.create_parameter( + w_l.shape, + w_l.numpy().dtype, + default_initializer=nn.initializer.Assign(w_l), + ) + + self.w_s = paddle.create_parameter( + logabs(w_s).shape, + logabs(w_s).numpy().dtype, + default_initializer=nn.initializer.Assign(logabs(w_s)), + ) + + self.w_u = paddle.create_parameter( + w_u.shape, + w_u.numpy().dtype, + default_initializer=nn.initializer.Assign(w_u), + ) + + def forward(self, input): + _, _, height, width = tuple(input.shape) + weight = self.calc_weight() + out = nn.functional.conv2d(x=input, weight=weight) + logdet = height * width * paddle.sum(x=self.w_s) + return out, logdet + + def calc_weight(self): + weight = ( + self.w_p + @ (self.w_l * self.l_mask + self.l_eye) + @ ( + self.w_u * self.u_mask + + paddle.diag(x=self.s_sign * paddle.exp(x=self.w_s)) + ) + ) + return weight.unsqueeze(axis=2).unsqueeze(axis=3) + + def reverse(self, output): + weight = self.calc_weight() + return nn.functional.conv2d( + x=output, + weight=weight.squeeze().inverse().unsqueeze(axis=2).unsqueeze(axis=3), + ) + + +class GraphLinear(nn.Layer): + """Graph Linear layer. + This function assumes its input is 3-dimensional. Or 4-dim or whatever, only last dim are changed + Differently from :class:`nn.Linear`, it applies an affine + transformation to the third axis of input `x`. + Warning: original Chainer.link.Link use i.i.d. Gaussian initialization as default, + while default nn.Linear initialization using init.kaiming_uniform_ + """ + + def __init__(self, in_size, out_size, bias=True): + super(GraphLinear, self).__init__() + self.in_size = in_size + self.out_size = out_size + self.linear = nn.Linear( + in_features=in_size, out_features=out_size, bias_attr=bias + ) + + def forward(self, x): + """Forward propagation. + Args: + x (:class:`chainer.Variable`, or :class:`numpy.ndarray` ): + Input array that should be a float array whose ``ndim`` is 3. + + It represents a minibatch of atoms, each of which consists + of a sequence of molecules. Each molecule is represented + by integer IDs. The first axis is an index of atoms + (i.e. minibatch dimension) and the second one an index + of molecules. + + Returns: + class:`chainer.Variable`: + A 3-dimeisional array. + + """ + h = x + h = h.reshape([-1, tuple(x.shape)[-1]]) + h = self.linear(h) + h = h.reshape(tuple(tuple(x.shape)[:-1] + (self.out_size,))) + return h + + +class GraphConv(nn.Layer): + """ + graph convolution over batch and multi-graphs + Args: + in_channels: e.g. 8 + out_channels: e.g. 64 + num_edge_type (types of edges/bonds): e.g. 4 + return: + class:`chainer.Variable`: + """ + + def __init__(self, in_channels, out_channels, num_edge_type=4): + super(GraphConv, self).__init__() + self.graph_linear_self = GraphLinear(in_channels, out_channels) + self.graph_linear_edge = GraphLinear(in_channels, out_channels * num_edge_type) + self.num_edge_type = num_edge_type + self.in_ch = in_channels + self.out_ch = out_channels + + def forward(self, adj, h): + mb, node, ch = tuple(h.shape) + hs = self.graph_linear_self(h) + m = self.graph_linear_edge(h) + m = m.reshape([mb, node, self.out_ch, self.num_edge_type]) + m = m.transpose(perm=[0, 3, 1, 2]) + hr = paddle.matmul(x=adj, y=m) + hr = hr.sum(axis=1) + return hs + hr diff --git a/ppsci/arch/moflow_glow.py b/ppsci/arch/moflow_glow.py index 5fbeb71520..e580ef43e9 100644 --- a/ppsci/arch/moflow_glow.py +++ b/ppsci/arch/moflow_glow.py @@ -1,477 +1,477 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright 2020 Chengxi Zang - -import warnings - -import paddle -import paddle.nn as nn - -from ppsci.arch.moflow_basic import ActNorm -from ppsci.arch.moflow_basic import ActNorm2D -from ppsci.arch.moflow_basic import GraphConv -from ppsci.arch.moflow_basic import GraphLinear -from ppsci.arch.moflow_basic import InvConv2d -from ppsci.arch.moflow_basic import InvConv2dLU - -warnings.filterwarnings( - "ignore", message="when training, we now always track global mean and variance." -) - - -class AffineCoupling(nn.Layer): - def __init__(self, in_channel, hidden_channels, affine=True, mask_swap=False): - super(AffineCoupling, self).__init__() - self.affine = affine - self.layers = nn.LayerList() - self.norms = nn.LayerList() - self.mask_swap = mask_swap - last_h = in_channel // 2 - if affine: - vh = tuple(hidden_channels) + (in_channel,) - else: - vh = tuple(hidden_channels) + (in_channel // 2,) - for h in vh: - self.layers.append( - nn.Conv2D(in_channels=last_h, out_channels=h, kernel_size=3, padding=1) - ) - self.norms.append(nn.BatchNorm2D(num_features=h)) - last_h = h - - def forward(self, input): - in_a, in_b = input.chunk(chunks=2, axis=1) - if self.mask_swap: - in_a, in_b = in_b, in_a - if self.affine: - s, t = self._s_t_function(in_a) - out_b = (in_b + t) * s - logdet = paddle.sum( - x=paddle.log(x=paddle.abs(x=s)).reshape([tuple(input.shape)[0], -1]), - axis=1, - ) - else: - _, t = self._s_t_function(in_a) - out_b = in_b + t - logdet = None - if self.mask_swap: - result = paddle.concat(x=[out_b, in_a], axis=1) - else: - result = paddle.concat(x=[in_a, out_b], axis=1) - return result, logdet - - def reverse(self, output): - out_a, out_b = output.chunk(chunks=2, axis=1) - if self.mask_swap: - out_a, out_b = out_b, out_a - if self.affine: - s, t = self._s_t_function(out_a) - in_b = out_b / s - t - else: - _, t = self._s_t_function(out_a) - in_b = out_b - t - if self.mask_swap: - result = paddle.concat(x=[in_b, out_a], axis=1) - else: - result = paddle.concat(x=[out_a, in_b], axis=1) - return result - - def _s_t_function(self, x): - h = x - for i in range(len(self.layers) - 1): - h = self.layers[i](h) - h = self.norms[i](h) - h = nn.functional.relu(x=h) - h = self.layers[-1](h) - s = None - if self.affine: - log_s, t = h.chunk(chunks=2, axis=1) - s = nn.functional.sigmoid(x=log_s) - else: - t = h - return s, t - - -class GraphAffineCoupling(nn.Layer): - def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True): - super(GraphAffineCoupling, self).__init__() - self.n_node = n_node - self.in_dim = in_dim - self.hidden_dim_dict = hidden_dim_dict - self.masked_row = masked_row - self.affine = affine - self.hidden_dim_gnn = hidden_dim_dict["gnn"] - self.hidden_dim_linear = hidden_dim_dict["linear"] - self.net = nn.LayerList() - self.norm = nn.LayerList() - last_dim = in_dim - for out_dim in self.hidden_dim_gnn: - self.net.append(GraphConv(last_dim, out_dim)) - self.norm.append(nn.BatchNorm1D(num_features=n_node)) - last_dim = out_dim - self.net_lin = nn.LayerList() - self.norm_lin = nn.LayerList() - for out_dim in self.hidden_dim_linear: - self.net_lin.append(GraphLinear(last_dim, out_dim)) - self.norm_lin.append(nn.BatchNorm1D(num_features=n_node)) - last_dim = out_dim - if affine: - self.net_lin.append(GraphLinear(last_dim, in_dim * 2)) - else: - self.net_lin.append(GraphLinear(last_dim, in_dim)) - self.scale = paddle.create_parameter( - paddle.zeros(shape=[1]).shape, - paddle.zeros(shape=[1]).numpy().dtype, - default_initializer=nn.initializer.Assign(paddle.zeros(shape=[1])), - ) - - mask = paddle.ones(shape=[n_node, in_dim]) - mask[masked_row, :] = 0 - self.register_buffer(name="mask", tensor=mask) - - def forward(self, adj, input): - masked_x = self.mask * input - s, t = self._s_t_function(adj, masked_x) - if self.affine: - out = masked_x + (1 - self.mask) * (input + t) * s - logdet = paddle.sum( - x=paddle.log(x=paddle.abs(x=s)).reshape([tuple(input.shape)[0], -1]), - axis=1, - ) - else: - out = masked_x + t * (1 - self.mask) - logdet = None - return out, logdet - - def reverse(self, adj, output): - masked_y = self.mask * output - s, t = self._s_t_function(adj, masked_y) - if self.affine: - input = masked_y + (1 - self.mask) * (output / s - t) - else: - input = masked_y + (1 - self.mask) * (output - t) - return input - - def _s_t_function(self, adj, x): - s = None - h = x - for i in range(len(self.net)): - h = self.net[i](adj, h) - h = self.norm[i](h) - h = nn.functional.relu(x=h) - for i in range(len(self.net_lin) - 1): - h = self.net_lin[i](h) - h = self.norm_lin[i](h) - h = nn.functional.relu(x=h) - h = self.net_lin[-1](h) - if self.affine: - log_s, t = h.chunk(chunks=2, axis=-1) - s = nn.functional.sigmoid(x=log_s) - else: - t = h - return s, t - - -class Flow(nn.Layer): - def __init__( - self, in_channel, hidden_channels, affine=True, conv_lu=2, mask_swap=False - ): - super(Flow, self).__init__() - self.actnorm = ActNorm(in_channel) - if conv_lu == 0: - self.invconv = InvConv2d(in_channel) - elif conv_lu == 1: - self.invconv = InvConv2dLU(in_channel) - elif conv_lu == 2: - self.invconv = None - else: - raise ValueError( - "conv_lu in {0,1,2}, 0:InvConv2d, 1:InvConv2dLU, 2:none-just swap to update in coupling" - ) - self.coupling = AffineCoupling( - in_channel, hidden_channels, affine=affine, mask_swap=mask_swap - ) - - def forward(self, input): - out, logdet = self.actnorm(input) - if self.invconv: - out, det1 = self.invconv(out) - else: - det1 = 0 - out, det2 = self.coupling(out) - logdet = logdet + det1 - if det2 is not None: - logdet = logdet + det2 - return out, logdet - - def reverse(self, output): - input = self.coupling.reverse(output) - if self.invconv: - input = self.invconv.reverse(input) - input = self.actnorm.reverse(input) - return input - - -class FlowOnGraph(nn.Layer): - def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True): - super(FlowOnGraph, self).__init__() - self.n_node = n_node - self.in_dim = in_dim - self.hidden_dim_dict = hidden_dim_dict - self.masked_row = masked_row - self.affine = affine - self.actnorm = ActNorm2D(in_dim=n_node) - self.coupling = GraphAffineCoupling( - n_node, in_dim, hidden_dim_dict, masked_row, affine=affine - ) - - def forward(self, adj, input): - out, logdet = self.actnorm(input) - det1 = 0 - out, det2 = self.coupling(adj, out) - logdet = logdet + det1 - if det2 is not None: - logdet = logdet + det2 - return out, logdet - - def reverse(self, adj, output): - input = self.coupling.reverse(adj, output) - input = self.actnorm.reverse(input) - return input - - -class Block(nn.Layer): - def __init__( - self, in_channel, n_flow, squeeze_fold, hidden_channels, affine=True, conv_lu=2 - ): - super(Block, self).__init__() - self.squeeze_fold = squeeze_fold - squeeze_dim = in_channel * self.squeeze_fold * self.squeeze_fold - self.flows = nn.LayerList() - for i in range(n_flow): - if conv_lu in (0, 1): - self.flows.append( - Flow( - squeeze_dim, - hidden_channels, - affine=affine, - conv_lu=conv_lu, - mask_swap=False, - ) - ) - else: - self.flows.append( - Flow( - squeeze_dim, - hidden_channels, - affine=affine, - conv_lu=2, - mask_swap=bool(i % 2), - ) - ) - - def forward(self, input): - out = self._squeeze(input) - logdet = 0 - for flow in self.flows: - out, det = flow(out) - logdet = logdet + det - out = self._unsqueeze(out) - return out, logdet - - def reverse(self, output): - input = self._squeeze(output) - for flow in self.flows[::-1]: - input = flow.reverse(input) - unsqueezed = self._unsqueeze(input) - return unsqueezed - - def _squeeze(self, x): - """Trade spatial extent for channels. In forward direction, convert each - 1x4x4 volume of input into a 4x1x1 volume of output. - - Args: - x (paddle.Tensor): Input to squeeze or unsqueeze. - reverse (bool): Reverse the operation, i.e., unsqueeze. - - Returns: - x (paddle.Tensor): Squeezed or unsqueezed tensor. - """ - assert len(tuple(x.shape)) == 4 - b_size, n_channel, height, width = tuple(x.shape) - fold = self.squeeze_fold - squeezed = x.reshape( - [b_size, n_channel, height // fold, fold, width // fold, fold] - ) - squeezed = squeezed.transpose(perm=[0, 1, 3, 5, 2, 4]).contiguous() - out = squeezed.reshape( - [b_size, n_channel * fold * fold, height // fold, width // fold] - ) - return out - - def _unsqueeze(self, x): - assert len(tuple(x.shape)) == 4 - b_size, n_channel, height, width = tuple(x.shape) - fold = self.squeeze_fold - unsqueezed = x.reshape( - [b_size, n_channel // (fold * fold), fold, fold, height, width] - ) - unsqueezed = unsqueezed.transpose(perm=[0, 1, 4, 2, 5, 3]).contiguous() - out = unsqueezed.reshape( - [b_size, n_channel // (fold * fold), height * fold, width * fold] - ) - return out - - -class BlockOnGraph(nn.Layer): - def __init__( - self, - n_node, - in_dim, - hidden_dim_dict, - n_flow, - mask_row_size=1, - mask_row_stride=1, - affine=True, - ): - """ - - :param n_node: - :param in_dim: - :param hidden_dim: - :param n_flow: - :param mask_row_size: number of rows to be masked for update - :param mask_row_stride: number of steps between two masks' firs row - :param affine: - """ - super(BlockOnGraph, self).__init__() - assert 0 < mask_row_size < n_node - self.flows = nn.LayerList() - for i in range(n_flow): - start = i * mask_row_stride - masked_row = [(r % n_node) for r in range(start, start + mask_row_size)] - self.flows.append( - FlowOnGraph( - n_node, - in_dim, - hidden_dim_dict, - masked_row=masked_row, - affine=affine, - ) - ) - - def forward(self, adj, input): - out = input - logdet = 0 - for flow in self.flows: - out, det = flow(adj, out) - logdet = logdet + det - return out, logdet - - def reverse(self, adj, output): - input = output - for flow in self.flows[::-1]: - input = flow.reverse(adj, input) - return input - - -class Glow(nn.Layer): - def __init__( - self, - in_channel, - n_flow, - n_block, - squeeze_fold, - hidden_channel, - affine=True, - conv_lu=2, - ): - super(Glow, self).__init__() - self.blocks = nn.LayerList() - n_channel = in_channel - for i in range(n_block): - self.blocks.append( - Block( - n_channel, - n_flow, - squeeze_fold, - hidden_channel, - affine=affine, - conv_lu=conv_lu, - ) - ) - - def forward(self, input): - logdet = 0 - out = input - for block in self.blocks: - out, det = block(out) - logdet = logdet + det - return out, logdet - - def reverse(self, z): - h = z - for i, block in enumerate(self.blocks[::-1]): - h = block.reverse(h) - return h - - -class GlowOnGraph(nn.Layer): - def __init__( - self, - n_node, - in_dim, - hidden_dim_dict, - n_flow, - n_block, - mask_row_size_list=[2], - mask_row_stride_list=[1], - affine=True, - ): - super(GlowOnGraph, self).__init__() - assert len(mask_row_size_list) == n_block or len(mask_row_size_list) == 1 - assert len(mask_row_stride_list) == n_block or len(mask_row_stride_list) == 1 - if len(mask_row_size_list) == 1: - mask_row_size_list = mask_row_size_list * n_block - if len(mask_row_stride_list) == 1: - mask_row_stride_list = mask_row_stride_list * n_block - self.blocks = nn.LayerList() - for i in range(n_block): - mask_row_size = mask_row_size_list[i] - mask_row_stride = mask_row_stride_list[i] - self.blocks.append( - BlockOnGraph( - n_node, - in_dim, - hidden_dim_dict, - n_flow, - mask_row_size, - mask_row_stride, - affine=affine, - ) - ) - - def forward(self, adj, x): - logdet = 0 - out = x - for block in self.blocks: - out, det = block(adj, out) - logdet = logdet + det - return out, logdet - - def reverse(self, adj, z): - input = z - for i, block in enumerate(self.blocks[::-1]): - input = block.reverse(adj, input) - return input +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2020 Chengxi Zang + +import warnings + +import paddle +import paddle.nn as nn + +from ppsci.arch.moflow_basic import ActNorm +from ppsci.arch.moflow_basic import ActNorm2D +from ppsci.arch.moflow_basic import GraphConv +from ppsci.arch.moflow_basic import GraphLinear +from ppsci.arch.moflow_basic import InvConv2d +from ppsci.arch.moflow_basic import InvConv2dLU + +warnings.filterwarnings( + "ignore", message="when training, we now always track global mean and variance." +) + + +class AffineCoupling(nn.Layer): + def __init__(self, in_channel, hidden_channels, affine=True, mask_swap=False): + super(AffineCoupling, self).__init__() + self.affine = affine + self.layers = nn.LayerList() + self.norms = nn.LayerList() + self.mask_swap = mask_swap + last_h = in_channel // 2 + if affine: + vh = tuple(hidden_channels) + (in_channel,) + else: + vh = tuple(hidden_channels) + (in_channel // 2,) + for h in vh: + self.layers.append( + nn.Conv2D(in_channels=last_h, out_channels=h, kernel_size=3, padding=1) + ) + self.norms.append(nn.BatchNorm2D(num_features=h)) + last_h = h + + def forward(self, input): + in_a, in_b = input.chunk(chunks=2, axis=1) + if self.mask_swap: + in_a, in_b = in_b, in_a + if self.affine: + s, t = self._s_t_function(in_a) + out_b = (in_b + t) * s + logdet = paddle.sum( + x=paddle.log(x=paddle.abs(x=s)).reshape([tuple(input.shape)[0], -1]), + axis=1, + ) + else: + _, t = self._s_t_function(in_a) + out_b = in_b + t + logdet = None + if self.mask_swap: + result = paddle.concat(x=[out_b, in_a], axis=1) + else: + result = paddle.concat(x=[in_a, out_b], axis=1) + return result, logdet + + def reverse(self, output): + out_a, out_b = output.chunk(chunks=2, axis=1) + if self.mask_swap: + out_a, out_b = out_b, out_a + if self.affine: + s, t = self._s_t_function(out_a) + in_b = out_b / s - t + else: + _, t = self._s_t_function(out_a) + in_b = out_b - t + if self.mask_swap: + result = paddle.concat(x=[in_b, out_a], axis=1) + else: + result = paddle.concat(x=[out_a, in_b], axis=1) + return result + + def _s_t_function(self, x): + h = x + for i in range(len(self.layers) - 1): + h = self.layers[i](h) + h = self.norms[i](h) + h = nn.functional.relu(x=h) + h = self.layers[-1](h) + s = None + if self.affine: + log_s, t = h.chunk(chunks=2, axis=1) + s = nn.functional.sigmoid(x=log_s) + else: + t = h + return s, t + + +class GraphAffineCoupling(nn.Layer): + def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True): + super(GraphAffineCoupling, self).__init__() + self.n_node = n_node + self.in_dim = in_dim + self.hidden_dim_dict = hidden_dim_dict + self.masked_row = masked_row + self.affine = affine + self.hidden_dim_gnn = hidden_dim_dict["gnn"] + self.hidden_dim_linear = hidden_dim_dict["linear"] + self.net = nn.LayerList() + self.norm = nn.LayerList() + last_dim = in_dim + for out_dim in self.hidden_dim_gnn: + self.net.append(GraphConv(last_dim, out_dim)) + self.norm.append(nn.BatchNorm1D(num_features=n_node)) + last_dim = out_dim + self.net_lin = nn.LayerList() + self.norm_lin = nn.LayerList() + for out_dim in self.hidden_dim_linear: + self.net_lin.append(GraphLinear(last_dim, out_dim)) + self.norm_lin.append(nn.BatchNorm1D(num_features=n_node)) + last_dim = out_dim + if affine: + self.net_lin.append(GraphLinear(last_dim, in_dim * 2)) + else: + self.net_lin.append(GraphLinear(last_dim, in_dim)) + self.scale = paddle.create_parameter( + paddle.zeros(shape=[1]).shape, + paddle.zeros(shape=[1]).numpy().dtype, + default_initializer=nn.initializer.Assign(paddle.zeros(shape=[1])), + ) + + mask = paddle.ones(shape=[n_node, in_dim]) + mask[masked_row, :] = 0 + self.register_buffer(name="mask", tensor=mask) + + def forward(self, adj, input): + masked_x = self.mask * input + s, t = self._s_t_function(adj, masked_x) + if self.affine: + out = masked_x + (1 - self.mask) * (input + t) * s + logdet = paddle.sum( + x=paddle.log(x=paddle.abs(x=s)).reshape([tuple(input.shape)[0], -1]), + axis=1, + ) + else: + out = masked_x + t * (1 - self.mask) + logdet = None + return out, logdet + + def reverse(self, adj, output): + masked_y = self.mask * output + s, t = self._s_t_function(adj, masked_y) + if self.affine: + input = masked_y + (1 - self.mask) * (output / s - t) + else: + input = masked_y + (1 - self.mask) * (output - t) + return input + + def _s_t_function(self, adj, x): + s = None + h = x + for i in range(len(self.net)): + h = self.net[i](adj, h) + h = self.norm[i](h) + h = nn.functional.relu(x=h) + for i in range(len(self.net_lin) - 1): + h = self.net_lin[i](h) + h = self.norm_lin[i](h) + h = nn.functional.relu(x=h) + h = self.net_lin[-1](h) + if self.affine: + log_s, t = h.chunk(chunks=2, axis=-1) + s = nn.functional.sigmoid(x=log_s) + else: + t = h + return s, t + + +class Flow(nn.Layer): + def __init__( + self, in_channel, hidden_channels, affine=True, conv_lu=2, mask_swap=False + ): + super(Flow, self).__init__() + self.actnorm = ActNorm(in_channel) + if conv_lu == 0: + self.invconv = InvConv2d(in_channel) + elif conv_lu == 1: + self.invconv = InvConv2dLU(in_channel) + elif conv_lu == 2: + self.invconv = None + else: + raise ValueError( + "conv_lu in {0,1,2}, 0:InvConv2d, 1:InvConv2dLU, 2:none-just swap to update in coupling" + ) + self.coupling = AffineCoupling( + in_channel, hidden_channels, affine=affine, mask_swap=mask_swap + ) + + def forward(self, input): + out, logdet = self.actnorm(input) + if self.invconv: + out, det1 = self.invconv(out) + else: + det1 = 0 + out, det2 = self.coupling(out) + logdet = logdet + det1 + if det2 is not None: + logdet = logdet + det2 + return out, logdet + + def reverse(self, output): + input = self.coupling.reverse(output) + if self.invconv: + input = self.invconv.reverse(input) + input = self.actnorm.reverse(input) + return input + + +class FlowOnGraph(nn.Layer): + def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row, affine=True): + super(FlowOnGraph, self).__init__() + self.n_node = n_node + self.in_dim = in_dim + self.hidden_dim_dict = hidden_dim_dict + self.masked_row = masked_row + self.affine = affine + self.actnorm = ActNorm2D(in_dim=n_node) + self.coupling = GraphAffineCoupling( + n_node, in_dim, hidden_dim_dict, masked_row, affine=affine + ) + + def forward(self, adj, input): + out, logdet = self.actnorm(input) + det1 = 0 + out, det2 = self.coupling(adj, out) + logdet = logdet + det1 + if det2 is not None: + logdet = logdet + det2 + return out, logdet + + def reverse(self, adj, output): + input = self.coupling.reverse(adj, output) + input = self.actnorm.reverse(input) + return input + + +class Block(nn.Layer): + def __init__( + self, in_channel, n_flow, squeeze_fold, hidden_channels, affine=True, conv_lu=2 + ): + super(Block, self).__init__() + self.squeeze_fold = squeeze_fold + squeeze_dim = in_channel * self.squeeze_fold * self.squeeze_fold + self.flows = nn.LayerList() + for i in range(n_flow): + if conv_lu in (0, 1): + self.flows.append( + Flow( + squeeze_dim, + hidden_channels, + affine=affine, + conv_lu=conv_lu, + mask_swap=False, + ) + ) + else: + self.flows.append( + Flow( + squeeze_dim, + hidden_channels, + affine=affine, + conv_lu=2, + mask_swap=bool(i % 2), + ) + ) + + def forward(self, input): + out = self._squeeze(input) + logdet = 0 + for flow in self.flows: + out, det = flow(out) + logdet = logdet + det + out = self._unsqueeze(out) + return out, logdet + + def reverse(self, output): + input = self._squeeze(output) + for flow in self.flows[::-1]: + input = flow.reverse(input) + unsqueezed = self._unsqueeze(input) + return unsqueezed + + def _squeeze(self, x): + """Trade spatial extent for channels. In forward direction, convert each + 1x4x4 volume of input into a 4x1x1 volume of output. + + Args: + x (paddle.Tensor): Input to squeeze or unsqueeze. + reverse (bool): Reverse the operation, i.e., unsqueeze. + + Returns: + x (paddle.Tensor): Squeezed or unsqueezed tensor. + """ + assert len(tuple(x.shape)) == 4 + b_size, n_channel, height, width = tuple(x.shape) + fold = self.squeeze_fold + squeezed = x.reshape( + [b_size, n_channel, height // fold, fold, width // fold, fold] + ) + squeezed = squeezed.transpose(perm=[0, 1, 3, 5, 2, 4]).contiguous() + out = squeezed.reshape( + [b_size, n_channel * fold * fold, height // fold, width // fold] + ) + return out + + def _unsqueeze(self, x): + assert len(tuple(x.shape)) == 4 + b_size, n_channel, height, width = tuple(x.shape) + fold = self.squeeze_fold + unsqueezed = x.reshape( + [b_size, n_channel // (fold * fold), fold, fold, height, width] + ) + unsqueezed = unsqueezed.transpose(perm=[0, 1, 4, 2, 5, 3]).contiguous() + out = unsqueezed.reshape( + [b_size, n_channel // (fold * fold), height * fold, width * fold] + ) + return out + + +class BlockOnGraph(nn.Layer): + def __init__( + self, + n_node, + in_dim, + hidden_dim_dict, + n_flow, + mask_row_size=1, + mask_row_stride=1, + affine=True, + ): + """ + + :param n_node: + :param in_dim: + :param hidden_dim: + :param n_flow: + :param mask_row_size: number of rows to be masked for update + :param mask_row_stride: number of steps between two masks' firs row + :param affine: + """ + super(BlockOnGraph, self).__init__() + assert 0 < mask_row_size < n_node + self.flows = nn.LayerList() + for i in range(n_flow): + start = i * mask_row_stride + masked_row = [(r % n_node) for r in range(start, start + mask_row_size)] + self.flows.append( + FlowOnGraph( + n_node, + in_dim, + hidden_dim_dict, + masked_row=masked_row, + affine=affine, + ) + ) + + def forward(self, adj, input): + out = input + logdet = 0 + for flow in self.flows: + out, det = flow(adj, out) + logdet = logdet + det + return out, logdet + + def reverse(self, adj, output): + input = output + for flow in self.flows[::-1]: + input = flow.reverse(adj, input) + return input + + +class Glow(nn.Layer): + def __init__( + self, + in_channel, + n_flow, + n_block, + squeeze_fold, + hidden_channel, + affine=True, + conv_lu=2, + ): + super(Glow, self).__init__() + self.blocks = nn.LayerList() + n_channel = in_channel + for i in range(n_block): + self.blocks.append( + Block( + n_channel, + n_flow, + squeeze_fold, + hidden_channel, + affine=affine, + conv_lu=conv_lu, + ) + ) + + def forward(self, input): + logdet = 0 + out = input + for block in self.blocks: + out, det = block(out) + logdet = logdet + det + return out, logdet + + def reverse(self, z): + h = z + for i, block in enumerate(self.blocks[::-1]): + h = block.reverse(h) + return h + + +class GlowOnGraph(nn.Layer): + def __init__( + self, + n_node, + in_dim, + hidden_dim_dict, + n_flow, + n_block, + mask_row_size_list=[2], + mask_row_stride_list=[1], + affine=True, + ): + super(GlowOnGraph, self).__init__() + assert len(mask_row_size_list) == n_block or len(mask_row_size_list) == 1 + assert len(mask_row_stride_list) == n_block or len(mask_row_stride_list) == 1 + if len(mask_row_size_list) == 1: + mask_row_size_list = mask_row_size_list * n_block + if len(mask_row_stride_list) == 1: + mask_row_stride_list = mask_row_stride_list * n_block + self.blocks = nn.LayerList() + for i in range(n_block): + mask_row_size = mask_row_size_list[i] + mask_row_stride = mask_row_stride_list[i] + self.blocks.append( + BlockOnGraph( + n_node, + in_dim, + hidden_dim_dict, + n_flow, + mask_row_size, + mask_row_stride, + affine=affine, + ) + ) + + def forward(self, adj, x): + logdet = 0 + out = x + for block in self.blocks: + out, det = block(adj, out) + logdet = logdet + det + return out, logdet + + def reverse(self, adj, z): + input = z + for i, block in enumerate(self.blocks[::-1]): + input = block.reverse(adj, input) + return input diff --git a/ppsci/arch/moflow_net.py b/ppsci/arch/moflow_net.py index c2f88607bc..021df4a613 100644 --- a/ppsci/arch/moflow_net.py +++ b/ppsci/arch/moflow_net.py @@ -1,335 +1,335 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright 2020 Chengxi Zang -from __future__ import annotations - -import math -from typing import Dict -from typing import Tuple - -import paddle - -from ppsci.arch import base -from ppsci.arch.moflow_glow import Glow -from ppsci.arch.moflow_glow import GlowOnGraph - - -def gaussian_nll(x, mean, ln_var, reduce="sum"): - """Computes the negative log-likelihood of a Gaussian distribution. - - Given two variable ``mean`` representing :math:`\\mu` and ``ln_var`` - representing :math:`\\log(\\sigma^2)`, this function computes in - elementwise manner the negative log-likelihood of :math:`x` on a - Gaussian distribution :math:`N(\\mu, S)`, - - .. math:: - - -\\log N(x; \\mu, \\sigma^2) = - \\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) + - \\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu), - - where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal - matrix where :math:`S_{ii} = \\sigma_i^2`. - - The output is a variable whose value depends on the value of - the option ``reduce``. If it is ``'no'``, it holds the elementwise - loss values. If it is ``'sum'``, loss values are summed up. - - Args: - x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. - mean (:class:`~chainer.Variable` or :ref:`ndarray`): A variable - representing mean of a Gaussian distribution, :math:`\\mu`. - ln_var (:class:`~chainer.Variable` or :ref:`ndarray`): A variable - representing logarithm of variance of a Gaussian distribution, - :math:`\\log(\\sigma^2)`. - reduce (str): Reduction option. Its value must be either - ``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised. - - Returns: - ~chainer.Variable: - A variable representing the negative log-likelihood. - If ``reduce`` is ``'no'``, the output variable holds array - whose shape is same as one of (hence both of) input variables. - If it is ``'sum'``, the output variable holds a scalar value. - - """ - if reduce not in ("sum", "no"): - raise ValueError( - "only 'sum' and 'no' are valid for 'reduce', but '%s' is given" % reduce - ) - x_prec = paddle.exp(x=-ln_var) - x_diff = x - mean - x_power = x_diff * x_diff * x_prec * -0.5 - loss = (ln_var + math.log(2 * math.pi)) / 2 - x_power - if reduce == "sum": - return loss.sum() - else: - return loss - - -def rescale_adj(adj, type="all"): - if type == "view": - out_degree = adj.sum(axis=-1) - out_degree_sqrt_inv = out_degree.pow(y=-1) - out_degree_sqrt_inv[out_degree_sqrt_inv == float("inf")] = 0 - adj_prime = out_degree_sqrt_inv.unsqueeze(axis=-1) * adj - else: - num_neighbors = adj.sum(axis=(1, 2)).astype(dtype="float32") - num_neighbors_inv = num_neighbors.pow(y=-1) - num_neighbors_inv[num_neighbors_inv == float("inf")] = 0 - adj_prime = num_neighbors_inv[:, None, None, :] * adj - return adj_prime - - -class MoFlowNet(base.Arch): - """ - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("nodes","edges",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output","sum_log_det"). - hyper_params (object): More parameters derived from hyper_params for easy use. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - hyper_params: None, - ): - super(MoFlowNet, self).__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.hyper_params = hyper_params - self.b_n_type = hyper_params.b_n_type - self.a_n_node = hyper_params.a_n_node - self.a_n_type = hyper_params.a_n_type - self.b_size = self.a_n_node * self.a_n_node * self.b_n_type - self.a_size = self.a_n_node * self.a_n_type - self.noise_scale = hyper_params.noise_scale - if hyper_params.learn_dist: - self.ln_var = paddle.create_parameter( - paddle.zeros(shape=[1]).shape, - paddle.zeros(shape=[1]).numpy().dtype, - default_initializer=paddle.nn.initializer.Assign( - paddle.zeros(shape=[1]) - ), - ) - - else: - self.register_buffer(name="ln_var", tensor=paddle.zeros(shape=[1])) - self.bond_model = Glow( - in_channel=hyper_params.b_n_type, - n_flow=hyper_params.b_n_flow, - n_block=hyper_params.b_n_block, - squeeze_fold=hyper_params.b_n_squeeze, - hidden_channel=hyper_params.b_hidden_ch, - affine=hyper_params.b_affine, - conv_lu=hyper_params.b_conv_lu, - ) - self.atom_model = GlowOnGraph( - n_node=hyper_params.a_n_node, - in_dim=hyper_params.a_n_type, - hidden_dim_dict={ - "gnn": hyper_params.a_hidden_gnn, - "linear": hyper_params.a_hidden_lin, - }, - n_flow=hyper_params.a_n_flow, - n_block=hyper_params.a_n_block, - mask_row_size_list=hyper_params.mask_row_size_list, - mask_row_stride_list=hyper_params.mask_row_stride_list, - affine=hyper_params.a_affine, - ) - - def forward(self, x): - h = x[self.input_keys[0]] - adj = x[self.input_keys[1]] - adj_normalized = rescale_adj(adj).to(adj) - - if self.training: - if self.noise_scale == 0: - h = h / 2.0 - 0.5 + paddle.rand(shape=h.shape, dtype=h.dtype) * 0.4 - else: - h = h + paddle.rand(shape=h.shape, dtype=h.dtype) * self.noise_scale - h, sum_log_det_jacs_x = self.atom_model(adj_normalized, h) - if self.training: - if self.noise_scale == 0: - adj = ( - adj / 2.0 - - 0.5 - + paddle.rand(shape=adj.shape, dtype=adj.dtype) * 0.4 - ) - else: - adj = ( - adj - + paddle.rand(shape=adj.shape, dtype=adj.dtype) * self.noise_scale - ) - adj_h, sum_log_det_jacs_adj = self.bond_model(adj) - out = [h, adj_h] - result_dict = { - self.output_keys[0]: out, - self.output_keys[1]: [sum_log_det_jacs_x, sum_log_det_jacs_adj], - } - - return result_dict - - def reverse(self, z, true_adj=None): - """ - Returns a molecule, given its latent vector. - - Args: - z: latent vector. Shape: [B, N*N*M + N*T] (100,369) 369=9*9 * 4 + 9*5 - B = Batch size, N = number of atoms, M = number of bond types, - T = number of atom types (Carbon, Oxygen etc.) - true_adj: used for testing. An adjacency matrix of a real molecule - - return: - adjacency matrix and feature matrix of a molecule - """ - batch_size = tuple(z.shape)[0] - with paddle.no_grad(): - z_x = z[:, : self.a_size] - z_adj = z[:, self.a_size :] - if true_adj is None: - h_adj = z_adj.reshape( - [batch_size, self.b_n_type, self.a_n_node, self.a_n_node] - ) - h_adj = self.bond_model.reverse(h_adj) - if self.noise_scale == 0: - h_adj = (h_adj + 0.5) * 2 - adj = h_adj - adj = adj + adj.transpose(perm=[0, 1, 3, 2]) - adj = adj / 2 - adj = paddle.nn.functional.softmax(adj, axis=1) - max_bond = adj.max(axis=1).reshape( - [batch_size, -1, self.a_n_node, self.a_n_node] - ) - adj = paddle.floor(x=adj / max_bond) - else: - adj = true_adj - h_x = z_x.reshape([batch_size, self.a_n_node, self.a_n_type]) - adj_normalized = rescale_adj(adj).to(h_x) - h_x = self.atom_model.reverse(adj_normalized, h_x) - if self.noise_scale == 0: - h_x = (h_x + 0.5) * 2 - return adj, h_x - - def log_prob_loss(self, output_dict: Dict, *args): - losses = 0 - z = output_dict[self.output_keys[0]] - logdet = output_dict[self.output_keys[1]] - z[0] = z[0].reshape([tuple(z[0].shape)[0], -1]) - z[1] = z[1].reshape([tuple(z[1].shape)[0], -1]) - logdet[0] = logdet[0] - self.a_size * math.log(2.0) - logdet[1] = logdet[1] - self.b_size * math.log(2.0) - if len(self.ln_var) == 1: - ln_var_adj = self.ln_var * paddle.ones(shape=[self.b_size]).to(z[0]) - ln_var_x = self.ln_var * paddle.ones(shape=[self.a_size]).to(z[0]) - else: - ln_var_adj = self.ln_var[0] * paddle.ones(shape=[self.b_size]).to(z[0]) - ln_var_x = self.ln_var[1] * paddle.ones(shape=[self.a_size]).to(z[0]) - nll_adj = paddle.mean( - paddle.sum( - gaussian_nll( - z[1], - paddle.zeros(shape=self.b_size).to(z[0]), - ln_var_adj, - reduce="no", - ), - axis=1, - ) - - logdet[1] - ) - nll_adj = nll_adj / (self.b_size * math.log(2.0)) - nll_x = paddle.mean( - paddle.sum( - gaussian_nll( - z[0], - paddle.zeros(shape=self.a_size).to(z[0]), - ln_var_x, - reduce="no", - ), - axis=1, - ) - - logdet[0] - ) - nll_x = nll_x / (self.a_size * math.log(2.0)) - if nll_x.item() < 0: - print(f"nll_x: {nll_x.item()}") - losses = nll_x + nll_adj - return {"total_loss": losses} - - def save_hyperparams(self, path): - self.hyper_params.save(path) - - -class MoFlowProp(base.Arch): - """ - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("nodes","edges",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output","sum_log_det"). - model (MoFlowNet): pre-trained model. - hidden_size (int): Hidden dimension list for output regression. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - model: MoFlowNet, - hidden_size, - ): - super(MoFlowProp, self).__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.model = model - self.latent_size = model.b_size + model.a_size - self.hidden_size = hidden_size - vh = (self.latent_size,) + tuple(hidden_size) + (1,) - modules = [] - for i in range(len(vh) - 1): - modules.append(paddle.nn.Linear(in_features=vh[i], out_features=vh[i + 1])) - if i < len(vh) - 2: - modules.append(paddle.nn.Tanh()) - self.propNN = paddle.nn.Sequential(*modules) - - def encode(self, x): - with paddle.no_grad(): - self.model.eval() - output_dict = self.model(x) - z = output_dict["output"] - sum_log_det_jacs = output_dict["sum_log_det"] - h = paddle.concat( - [ - z[0].reshape([tuple(z[0].shape)[0], -1]), - z[1].reshape([tuple(z[1].shape)[0], -1]), - ], - axis=1, - ) - return h, sum_log_det_jacs - - def reverse(self, z): - with paddle.no_grad(): - self.model.eval() - adj, x = self.model.reverse(z, true_adj=None) - return adj, x - - def forward(self, x): - h, sum_log_det_jacs = self.encode(x) - output = self.propNN(h) - result_dict = { - self.output_keys[0]: [h, output], - self.output_keys[1]: sum_log_det_jacs, - } - - return result_dict +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2020 Chengxi Zang +from __future__ import annotations + +import math +from typing import Dict +from typing import Tuple + +import paddle + +from ppsci.arch import base +from ppsci.arch.moflow_glow import Glow +from ppsci.arch.moflow_glow import GlowOnGraph + + +def gaussian_nll(x, mean, ln_var, reduce="sum"): + """Computes the negative log-likelihood of a Gaussian distribution. + + Given two variable ``mean`` representing :math:`\\mu` and ``ln_var`` + representing :math:`\\log(\\sigma^2)`, this function computes in + elementwise manner the negative log-likelihood of :math:`x` on a + Gaussian distribution :math:`N(\\mu, S)`, + + .. math:: + + -\\log N(x; \\mu, \\sigma^2) = + \\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) + + \\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu), + + where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal + matrix where :math:`S_{ii} = \\sigma_i^2`. + + The output is a variable whose value depends on the value of + the option ``reduce``. If it is ``'no'``, it holds the elementwise + loss values. If it is ``'sum'``, loss values are summed up. + + Args: + x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. + mean (:class:`~chainer.Variable` or :ref:`ndarray`): A variable + representing mean of a Gaussian distribution, :math:`\\mu`. + ln_var (:class:`~chainer.Variable` or :ref:`ndarray`): A variable + representing logarithm of variance of a Gaussian distribution, + :math:`\\log(\\sigma^2)`. + reduce (str): Reduction option. Its value must be either + ``'sum'`` or ``'no'``. Otherwise, :class:`ValueError` is raised. + + Returns: + ~chainer.Variable: + A variable representing the negative log-likelihood. + If ``reduce`` is ``'no'``, the output variable holds array + whose shape is same as one of (hence both of) input variables. + If it is ``'sum'``, the output variable holds a scalar value. + + """ + if reduce not in ("sum", "no"): + raise ValueError( + "only 'sum' and 'no' are valid for 'reduce', but '%s' is given" % reduce + ) + x_prec = paddle.exp(x=-ln_var) + x_diff = x - mean + x_power = x_diff * x_diff * x_prec * -0.5 + loss = (ln_var + math.log(2 * math.pi)) / 2 - x_power + if reduce == "sum": + return loss.sum() + else: + return loss + + +def rescale_adj(adj, type="all"): + if type == "view": + out_degree = adj.sum(axis=-1) + out_degree_sqrt_inv = out_degree.pow(y=-1) + out_degree_sqrt_inv[out_degree_sqrt_inv == float("inf")] = 0 + adj_prime = out_degree_sqrt_inv.unsqueeze(axis=-1) * adj + else: + num_neighbors = adj.sum(axis=(1, 2)).astype(dtype="float32") + num_neighbors_inv = num_neighbors.pow(y=-1) + num_neighbors_inv[num_neighbors_inv == float("inf")] = 0 + adj_prime = num_neighbors_inv[:, None, None, :] * adj + return adj_prime + + +class MoFlowNet(base.Arch): + """ + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("nodes","edges",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output","sum_log_det"). + hyper_params (object): More parameters derived from hyper_params for easy use. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + hyper_params: None, + ): + super(MoFlowNet, self).__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.hyper_params = hyper_params + self.b_n_type = hyper_params.b_n_type + self.a_n_node = hyper_params.a_n_node + self.a_n_type = hyper_params.a_n_type + self.b_size = self.a_n_node * self.a_n_node * self.b_n_type + self.a_size = self.a_n_node * self.a_n_type + self.noise_scale = hyper_params.noise_scale + if hyper_params.learn_dist: + self.ln_var = paddle.create_parameter( + paddle.zeros(shape=[1]).shape, + paddle.zeros(shape=[1]).numpy().dtype, + default_initializer=paddle.nn.initializer.Assign( + paddle.zeros(shape=[1]) + ), + ) + + else: + self.register_buffer(name="ln_var", tensor=paddle.zeros(shape=[1])) + self.bond_model = Glow( + in_channel=hyper_params.b_n_type, + n_flow=hyper_params.b_n_flow, + n_block=hyper_params.b_n_block, + squeeze_fold=hyper_params.b_n_squeeze, + hidden_channel=hyper_params.b_hidden_ch, + affine=hyper_params.b_affine, + conv_lu=hyper_params.b_conv_lu, + ) + self.atom_model = GlowOnGraph( + n_node=hyper_params.a_n_node, + in_dim=hyper_params.a_n_type, + hidden_dim_dict={ + "gnn": hyper_params.a_hidden_gnn, + "linear": hyper_params.a_hidden_lin, + }, + n_flow=hyper_params.a_n_flow, + n_block=hyper_params.a_n_block, + mask_row_size_list=hyper_params.mask_row_size_list, + mask_row_stride_list=hyper_params.mask_row_stride_list, + affine=hyper_params.a_affine, + ) + + def forward(self, x): + h = x[self.input_keys[0]] + adj = x[self.input_keys[1]] + adj_normalized = rescale_adj(adj).to(adj) + + if self.training: + if self.noise_scale == 0: + h = h / 2.0 - 0.5 + paddle.rand(shape=h.shape, dtype=h.dtype) * 0.4 + else: + h = h + paddle.rand(shape=h.shape, dtype=h.dtype) * self.noise_scale + h, sum_log_det_jacs_x = self.atom_model(adj_normalized, h) + if self.training: + if self.noise_scale == 0: + adj = ( + adj / 2.0 + - 0.5 + + paddle.rand(shape=adj.shape, dtype=adj.dtype) * 0.4 + ) + else: + adj = ( + adj + + paddle.rand(shape=adj.shape, dtype=adj.dtype) * self.noise_scale + ) + adj_h, sum_log_det_jacs_adj = self.bond_model(adj) + out = [h, adj_h] + result_dict = { + self.output_keys[0]: out, + self.output_keys[1]: [sum_log_det_jacs_x, sum_log_det_jacs_adj], + } + + return result_dict + + def reverse(self, z, true_adj=None): + """ + Returns a molecule, given its latent vector. + + Args: + z: latent vector. Shape: [B, N*N*M + N*T] (100,369) 369=9*9 * 4 + 9*5 + B = Batch size, N = number of atoms, M = number of bond types, + T = number of atom types (Carbon, Oxygen etc.) + true_adj: used for testing. An adjacency matrix of a real molecule + + return: + adjacency matrix and feature matrix of a molecule + """ + batch_size = tuple(z.shape)[0] + with paddle.no_grad(): + z_x = z[:, : self.a_size] + z_adj = z[:, self.a_size :] + if true_adj is None: + h_adj = z_adj.reshape( + [batch_size, self.b_n_type, self.a_n_node, self.a_n_node] + ) + h_adj = self.bond_model.reverse(h_adj) + if self.noise_scale == 0: + h_adj = (h_adj + 0.5) * 2 + adj = h_adj + adj = adj + adj.transpose(perm=[0, 1, 3, 2]) + adj = adj / 2 + adj = paddle.nn.functional.softmax(adj, axis=1) + max_bond = adj.max(axis=1).reshape( + [batch_size, -1, self.a_n_node, self.a_n_node] + ) + adj = paddle.floor(x=adj / max_bond) + else: + adj = true_adj + h_x = z_x.reshape([batch_size, self.a_n_node, self.a_n_type]) + adj_normalized = rescale_adj(adj).to(h_x) + h_x = self.atom_model.reverse(adj_normalized, h_x) + if self.noise_scale == 0: + h_x = (h_x + 0.5) * 2 + return adj, h_x + + def log_prob_loss(self, output_dict: Dict, *args): + losses = 0 + z = output_dict[self.output_keys[0]] + logdet = output_dict[self.output_keys[1]] + z[0] = z[0].reshape([tuple(z[0].shape)[0], -1]) + z[1] = z[1].reshape([tuple(z[1].shape)[0], -1]) + logdet[0] = logdet[0] - self.a_size * math.log(2.0) + logdet[1] = logdet[1] - self.b_size * math.log(2.0) + if len(self.ln_var) == 1: + ln_var_adj = self.ln_var * paddle.ones(shape=[self.b_size]).to(z[0]) + ln_var_x = self.ln_var * paddle.ones(shape=[self.a_size]).to(z[0]) + else: + ln_var_adj = self.ln_var[0] * paddle.ones(shape=[self.b_size]).to(z[0]) + ln_var_x = self.ln_var[1] * paddle.ones(shape=[self.a_size]).to(z[0]) + nll_adj = paddle.mean( + paddle.sum( + gaussian_nll( + z[1], + paddle.zeros(shape=self.b_size).to(z[0]), + ln_var_adj, + reduce="no", + ), + axis=1, + ) + - logdet[1] + ) + nll_adj = nll_adj / (self.b_size * math.log(2.0)) + nll_x = paddle.mean( + paddle.sum( + gaussian_nll( + z[0], + paddle.zeros(shape=self.a_size).to(z[0]), + ln_var_x, + reduce="no", + ), + axis=1, + ) + - logdet[0] + ) + nll_x = nll_x / (self.a_size * math.log(2.0)) + if nll_x.item() < 0: + print(f"nll_x: {nll_x.item()}") + losses = nll_x + nll_adj + return {"total_loss": losses} + + def save_hyperparams(self, path): + self.hyper_params.save(path) + + +class MoFlowProp(base.Arch): + """ + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("nodes","edges",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output","sum_log_det"). + model (MoFlowNet): pre-trained model. + hidden_size (int): Hidden dimension list for output regression. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + model: MoFlowNet, + hidden_size, + ): + super(MoFlowProp, self).__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.model = model + self.latent_size = model.b_size + model.a_size + self.hidden_size = hidden_size + vh = (self.latent_size,) + tuple(hidden_size) + (1,) + modules = [] + for i in range(len(vh) - 1): + modules.append(paddle.nn.Linear(in_features=vh[i], out_features=vh[i + 1])) + if i < len(vh) - 2: + modules.append(paddle.nn.Tanh()) + self.propNN = paddle.nn.Sequential(*modules) + + def encode(self, x): + with paddle.no_grad(): + self.model.eval() + output_dict = self.model(x) + z = output_dict["output"] + sum_log_det_jacs = output_dict["sum_log_det"] + h = paddle.concat( + [ + z[0].reshape([tuple(z[0].shape)[0], -1]), + z[1].reshape([tuple(z[1].shape)[0], -1]), + ], + axis=1, + ) + return h, sum_log_det_jacs + + def reverse(self, z): + with paddle.no_grad(): + self.model.eval() + adj, x = self.model.reverse(z, true_adj=None) + return adj, x + + def forward(self, x): + h, sum_log_det_jacs = self.encode(x) + output = self.propNN(h) + result_dict = { + self.output_keys[0]: [h, output], + self.output_keys[1]: sum_log_det_jacs, + } + + return result_dict diff --git a/ppsci/arch/nowcastnet.py b/ppsci/arch/nowcastnet.py index bc7538ad91..61bd065fc2 100644 --- a/ppsci/arch/nowcastnet.py +++ b/ppsci/arch/nowcastnet.py @@ -1,639 +1,639 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import collections -from typing import Tuple - -import paddle -from paddle import nn - -from ppsci.arch import base - - -class NowcastNet(base.Arch): - """The NowcastNet model. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - input_length (int, optional): Input length. Defaults to 9. - total_length (int, optional): Total length. Defaults to 29. - image_height (int, optional): Image height. Defaults to 512. - image_width (int, optional): Image width. Defaults to 512. - image_ch (int, optional): Image channel. Defaults to 2. - ngf (int, optional): Noise Projector input length. Defaults to 32. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.NowcastNet(("input", ), ("output", )) - >>> input_data = paddle.rand([1, 9, 512, 512, 2]) - >>> input_dict = {"input": input_data} - >>> output_dict = model(input_dict) - >>> print(output_dict["output"].shape) - [1, 20, 512, 512, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - input_length: int = 9, - total_length: int = 29, - image_height: int = 512, - image_width: int = 512, - image_ch: int = 2, - ngf: int = 32, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - - self.input_length = input_length - self.total_length = total_length - self.image_height = image_height - self.image_width = image_width - self.image_ch = image_ch - self.ngf = ngf - - configs = collections.namedtuple( - "Object", ["ngf", "evo_ic", "gen_oc", "ic_feature"] - ) - configs.ngf = self.ngf - configs.evo_ic = self.total_length - self.input_length - configs.gen_oc = self.total_length - self.input_length - configs.ic_feature = self.ngf * 10 - - self.pred_length = self.total_length - self.input_length - self.evo_net = Evolution_Network(self.input_length, self.pred_length, base_c=32) - self.gen_enc = Generative_Encoder(self.total_length, base_c=self.ngf) - self.gen_dec = Generative_Decoder(configs) - self.proj = Noise_Projector(self.ngf) - sample_tensor = paddle.zeros(shape=[1, 1, self.image_height, self.image_width]) - self.grid = make_grid(sample_tensor) - - @staticmethod - def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - x_tensor = self.concat_to_tensor(x, self.input_keys) - - y = [] - out = self.forward_tensor(x_tensor) - y.append(out) - y = self.split_to_dict(y, self.output_keys) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - def forward_tensor(self, x): - all_frames = x[:, :, :, :, :1] - frames = all_frames.transpose(perm=[0, 1, 4, 2, 3]) - batch = frames.shape[0] - height = frames.shape[3] - width = frames.shape[4] - # Input Frames - input_frames = frames[:, : self.input_length] - input_frames = input_frames.reshape((batch, self.input_length, height, width)) - # Evolution Network - intensity, motion = self.evo_net(input_frames) - motion_ = motion.reshape((batch, self.pred_length, 2, height, width)) - intensity_ = intensity.reshape((batch, self.pred_length, 1, height, width)) - series = [] - last_frames = all_frames[:, self.input_length - 1 : self.input_length, :, :, 0] - grid = self.grid.tile((batch, 1, 1, 1)) - for i in range(self.pred_length): - last_frames = warp( - last_frames, motion_[:, i], grid, mode="nearest", padding_mode="border" - ) - last_frames = last_frames + intensity_[:, i] - series.append(last_frames) - evo_result = paddle.concat(x=series, axis=1) - evo_result = evo_result / 128 - # Generative Network - evo_feature = self.gen_enc(paddle.concat(x=[input_frames, evo_result], axis=1)) - noise = paddle.randn(shape=[batch, self.ngf, height // 32, width // 32]) - noise = self.proj(noise) - ngf = noise.shape[1] - noise_feature = ( - noise.reshape((batch, -1, 4, 4, 8, 8)) - .transpose(perm=[0, 1, 4, 5, 2, 3]) - .reshape((batch, ngf // 16, height // 8, width // 8)) - ) - feature = paddle.concat(x=[evo_feature, noise_feature], axis=1) - gen_result = self.gen_dec(feature, evo_result) - return gen_result.unsqueeze(axis=-1) - - -class Evolution_Network(nn.Layer): - def __init__(self, n_channels, n_classes, base_c=64, bilinear=True): - super().__init__() - self.n_channels = n_channels - self.n_classes = n_classes - self.bilinear = bilinear - base_c = base_c - self.inc = DoubleConv(n_channels, base_c) - self.down1 = Down(base_c * 1, base_c * 2) - self.down2 = Down(base_c * 2, base_c * 4) - self.down3 = Down(base_c * 4, base_c * 8) - factor = 2 if bilinear else 1 - self.down4 = Down(base_c * 8, base_c * 16 // factor) - self.up1 = Up(base_c * 16, base_c * 8 // factor, bilinear) - self.up2 = Up(base_c * 8, base_c * 4 // factor, bilinear) - self.up3 = Up(base_c * 4, base_c * 2 // factor, bilinear) - self.up4 = Up(base_c * 2, base_c * 1, bilinear) - self.outc = OutConv(base_c * 1, n_classes) - param1 = paddle.zeros(shape=[1, n_classes, 1, 1]) - gamma = self.create_parameter( - shape=param1.shape, - dtype=param1.dtype, - default_initializer=nn.initializer.Assign(param1), - ) - gamma.stop_gradient = False - self.gamma = gamma - self.up1_v = Up(base_c * 16, base_c * 8 // factor, bilinear) - self.up2_v = Up(base_c * 8, base_c * 4 // factor, bilinear) - self.up3_v = Up(base_c * 4, base_c * 2 // factor, bilinear) - self.up4_v = Up(base_c * 2, base_c * 1, bilinear) - self.outc_v = OutConv(base_c * 1, n_classes * 2) - - def forward(self, x): - x1 = self.inc(x) - x2 = self.down1(x1) - x3 = self.down2(x2) - x4 = self.down3(x3) - x5 = self.down4(x4) - x = self.up1(x5, x4) - x = self.up2(x, x3) - x = self.up3(x, x2) - x = self.up4(x, x1) - x = self.outc(x) * self.gamma - v = self.up1_v(x5, x4) - v = self.up2_v(v, x3) - v = self.up3_v(v, x2) - v = self.up4_v(v, x1) - v = self.outc_v(v) - return x, v - - -class DoubleConv(nn.Layer): - def __init__(self, in_channels, out_channels, kernel=3, mid_channels=None): - super().__init__() - if not mid_channels: - mid_channels = out_channels - self.double_conv = nn.Sequential( - nn.BatchNorm2D(num_features=in_channels), - nn.ReLU(), - nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=kernel, - padding=kernel // 2, - ) - ), - nn.BatchNorm2D(num_features=mid_channels), - nn.ReLU(), - nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=kernel, - padding=kernel // 2, - ) - ), - ) - self.single_conv = nn.Sequential( - nn.BatchNorm2D(num_features=in_channels), - nn.utils.spectral_norm( - layer=nn.Conv2D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel, - padding=kernel // 2, - ) - ), - ) - - def forward(self, x): - shortcut = self.single_conv(x) - x = self.double_conv(x) - x = x + shortcut - return x - - -class Down(nn.Layer): - def __init__(self, in_channels, out_channels, kernel=3): - super().__init__() - self.maxpool_conv = nn.Sequential( - nn.MaxPool2D(kernel_size=2), - DoubleConv(in_channels, out_channels, kernel), - ) - - def forward(self, x): - x = self.maxpool_conv(x) - return x - - -class Up(nn.Layer): - def __init__(self, in_channels, out_channels, bilinear=True, kernel=3): - super().__init__() - if bilinear: - self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) - self.conv = DoubleConv( - in_channels, out_channels, kernel=kernel, mid_channels=in_channels // 2 - ) - else: - self.up = nn.Conv2DTranspose( - in_channels=in_channels, - out_channels=in_channels // 2, - kernel_size=2, - stride=2, - ) - self.conv = DoubleConv(in_channels, out_channels, kernel) - - def forward(self, x1, x2): - x1 = self.up(x1) - # input is CHW - diffY = x2.shape[2] - x1.shape[2] - diffX = x2.shape[3] - x1.shape[3] - x1 = nn.functional.pad( - x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2] - ) - x = paddle.concat(x=[x2, x1], axis=1) - return self.conv(x) - - -class Up_S(nn.Layer): - def __init__(self, in_channels, out_channels, bilinear=True, kernel=3): - super().__init__() - if bilinear: - self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) - self.conv = DoubleConv( - in_channels, out_channels, kernel=kernel, mid_channels=in_channels - ) - else: - self.up = nn.Conv2DTranspose( - in_channels=in_channels, - out_channels=in_channels, - kernel_size=2, - stride=2, - ) - self.conv = DoubleConv(in_channels, out_channels, kernel) - - def forward(self, x): - x = self.up(x) - return self.conv(x) - - -class OutConv(nn.Layer): - def __init__(self, in_channels, out_channels): - super().__init__() - self.conv = nn.Conv2D( - in_channels=in_channels, out_channels=out_channels, kernel_size=1 - ) - - def forward(self, x): - return self.conv(x) - - -class Generative_Encoder(nn.Layer): - def __init__(self, n_channels, base_c=64): - super().__init__() - base_c = base_c - self.inc = DoubleConv(n_channels, base_c, kernel=3) - self.down1 = Down(base_c * 1, base_c * 2, 3) - self.down2 = Down(base_c * 2, base_c * 4, 3) - self.down3 = Down(base_c * 4, base_c * 8, 3) - - def forward(self, x): - x = self.inc(x) - x = self.down1(x) - x = self.down2(x) - x = self.down3(x) - return x - - -class Generative_Decoder(nn.Layer): - def __init__(self, opt): - super().__init__() - self.opt = opt - nf = opt.ngf - ic = opt.ic_feature - self.fc = nn.Conv2D( - in_channels=ic, out_channels=8 * nf, kernel_size=3, padding=1 - ) - self.head_0 = GenBlock(8 * nf, 8 * nf, opt) - self.G_middle_0 = GenBlock(8 * nf, 4 * nf, opt, double_conv=True) - self.G_middle_1 = GenBlock(4 * nf, 4 * nf, opt, double_conv=True) - self.up_0 = GenBlock(4 * nf, 2 * nf, opt) - self.up_1 = GenBlock(2 * nf, 1 * nf, opt, double_conv=True) - self.up_2 = GenBlock(1 * nf, 1 * nf, opt, double_conv=True) - final_nc = nf * 1 - self.conv_img = nn.Conv2D( - in_channels=final_nc, out_channels=self.opt.gen_oc, kernel_size=3, padding=1 - ) - self.up = nn.Upsample(scale_factor=2) - - def forward(self, x, evo): - x = self.fc(x) - x = self.head_0(x, evo) - x = self.up(x) - x = self.G_middle_0(x, evo) - x = self.G_middle_1(x, evo) - x = self.up(x) - x = self.up_0(x, evo) - x = self.up(x) - x = self.up_1(x, evo) - x = self.up_2(x, evo) - x = self.conv_img(nn.functional.leaky_relu(x=x, negative_slope=0.2)) - return x - - -class GenBlock(nn.Layer): - def __init__(self, fin, fout, opt, use_se=False, dilation=1, double_conv=False): - super().__init__() - self.learned_shortcut = fin != fout - fmiddle = min(fin, fout) - self.opt = opt - self.double_conv = double_conv - self.pad = nn.Pad2D(padding=dilation, mode="reflect") - self.conv_0 = nn.Conv2D( - in_channels=fin, - out_channels=fmiddle, - kernel_size=3, - padding=0, - dilation=dilation, - ) - self.conv_1 = nn.Conv2D( - in_channels=fmiddle, - out_channels=fout, - kernel_size=3, - padding=0, - dilation=dilation, - ) - if self.learned_shortcut: - self.conv_s = nn.Conv2D( - in_channels=fin, out_channels=fout, kernel_size=1, bias_attr=False - ) - self.conv_0 = nn.utils.spectral_norm(layer=self.conv_0) - self.conv_1 = nn.utils.spectral_norm(layer=self.conv_1) - if self.learned_shortcut: - self.conv_s = nn.utils.spectral_norm(layer=self.conv_s) - ic = opt.evo_ic - self.norm_0 = SPADE(fin, ic) - self.norm_1 = SPADE(fmiddle, ic) - if self.learned_shortcut: - self.norm_s = SPADE(fin, ic) - - def forward(self, x, evo): - x_s = self.shortcut(x, evo) - dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, evo)))) - if self.double_conv: - dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, evo)))) - out = x_s + dx - return out - - def shortcut(self, x, evo): - if self.learned_shortcut: - x_s = self.conv_s(self.norm_s(x, evo)) - else: - x_s = x - return x_s - - def actvn(self, x): - return nn.functional.leaky_relu(x=x, negative_slope=0.2) - - -class SPADE(nn.Layer): - def __init__(self, norm_nc, label_nc): - super().__init__() - ks = 3 - self.param_free_norm = nn.InstanceNorm2D( - num_features=norm_nc, weight_attr=False, bias_attr=False, momentum=1 - 0.1 - ) - nhidden = 64 - ks = 3 - pw = ks // 2 - self.mlp_shared = nn.Sequential( - nn.Pad2D(padding=pw, mode="reflect"), - nn.Conv2D( - in_channels=label_nc, out_channels=nhidden, kernel_size=ks, padding=0 - ), - nn.ReLU(), - ) - self.pad = nn.Pad2D(padding=pw, mode="reflect") - self.mlp_gamma = nn.Conv2D( - in_channels=nhidden, out_channels=norm_nc, kernel_size=ks, padding=0 - ) - self.mlp_beta = nn.Conv2D( - in_channels=nhidden, out_channels=norm_nc, kernel_size=ks, padding=0 - ) - - def forward(self, x, evo): - normalized = self.param_free_norm(x) - evo = nn.functional.adaptive_avg_pool2d(x=evo, output_size=x.shape[2:]) - actv = self.mlp_shared(evo) - gamma = self.mlp_gamma(self.pad(actv)) - beta = self.mlp_beta(self.pad(actv)) - out = normalized * (1 + gamma) + beta - return out - - -class Noise_Projector(nn.Layer): - def __init__(self, input_length): - super().__init__() - self.input_length = input_length - self.conv_first = nn.utils.spectral_norm( - nn.Conv2D( - in_channels=self.input_length, - out_channels=self.input_length * 2, - kernel_size=3, - padding=1, - ) - ) - self.L1 = ProjBlock(self.input_length * 2, self.input_length * 4) - self.L2 = ProjBlock(self.input_length * 4, self.input_length * 8) - self.L3 = ProjBlock(self.input_length * 8, self.input_length * 16) - self.L4 = ProjBlock(self.input_length * 16, self.input_length * 32) - - def forward(self, x): - x = self.conv_first(x) - x = self.L1(x) - x = self.L2(x) - x = self.L3(x) - x = self.L4(x) - return x - - -class ProjBlock(nn.Layer): - def __init__(self, in_channel, out_channel): - super().__init__() - self.one_conv = nn.utils.spectral_norm( - nn.Conv2D( - in_channels=in_channel, - out_channels=out_channel - in_channel, - kernel_size=1, - padding=0, - ) - ) - self.double_conv = nn.Sequential( - nn.utils.spectral_norm( - nn.Conv2D( - in_channels=in_channel, - out_channels=out_channel, - kernel_size=3, - padding=1, - ) - ), - nn.ReLU(), - nn.utils.spectral_norm( - nn.Conv2D( - in_channels=out_channel, - out_channels=out_channel, - kernel_size=3, - padding=1, - ) - ), - ) - - def forward(self, x): - x1 = paddle.concat(x=[x, self.one_conv(x)], axis=1) - x2 = self.double_conv(x) - output = x1 + x2 - return output - - -def make_grid(input): - B, C, H, W = input.shape - xx = paddle.arange(start=0, end=W).reshape((1, -1)).tile((H, 1)) - yy = paddle.arange(start=0, end=H).reshape((-1, 1)).tile((1, W)) - xx = xx.reshape((1, 1, H, W)).tile((B, 1, 1, 1)) - yy = yy.reshape((1, 1, H, W)).tile((B, 1, 1, 1)) - grid = paddle.concat(x=(xx, yy), axis=1).astype(dtype=paddle.get_default_dtype()) - return grid - - -def warp(input, flow, grid, mode="bilinear", padding_mode="zeros"): - B, C, H, W = input.shape - vgrid = grid + flow - vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0 - vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0 - vgrid = vgrid.transpose(perm=[0, 2, 3, 1]) - output = nn.functional.grid_sample( - x=input.cpu(), - grid=vgrid.cpu(), - padding_mode=padding_mode, - mode=mode, - align_corners=True, - ) - return output.cuda() - - -def l2normalize(v, eps=1e-12): - return v / (v.norm() + eps) - - -class spectral_norm(nn.Layer): - def __init__(self, module, name="weight", power_iterations=1): - super().__init__() - self.module = module - self.name = name - self.power_iterations = power_iterations - if not self._made_params(): - self._make_params() - - def _update_u_v(self): - u = getattr(self.module, self.name + "_u") - v = getattr(self.module, self.name + "_v") - w = getattr(self.module, self.name + "_bar") - height = w.detach().shape[0] - for _ in range(self.power_iterations): - v = l2normalize( - paddle.mv( - x=paddle.t(input=w.reshape((height, -1)).detach()), vec=u.detach() - ) - ) - u = l2normalize( - paddle.mv(x=w.reshape((height, -1)).detach(), vec=v.detach()) - ) - sigma = u.dot(y=w.reshape((height, -1)).mv(vec=v)) - setattr(self.module, self.name, w / sigma.expand_as(y=w)) - - def _made_params(self): - try: - _ = getattr(self.module, self.name + "_u") - _ = getattr(self.module, self.name + "_v") - _ = getattr(self.module, self.name + "_bar") - return True - except AttributeError: - return False - - def _make_params(self): - w = getattr(self.module, self.name) - height = w.detach().shape[0] - width = w.reshape((height, -1)).detach().shape[1] - - tmp_w = paddle.normal(shape=[height]) - out_0 = paddle.create_parameter( - shape=tmp_w.shape, - dtype=tmp_w.numpy().dtype, - default_initializer=nn.initializer.Assign(tmp_w), - ) - out_0.stop_gradient = True - u = out_0 - - tmp_w = paddle.normal(shape=[width]) - out_1 = paddle.create_parameter( - shape=tmp_w.shape, - dtype=tmp_w.numpy().dtype, - default_initializer=nn.initializer.Assign(tmp_w), - ) - out_1.stop_gradient = True - v = out_1 - u = l2normalize(u) - v = l2normalize(v) - tmp_w = w.detach() - out_2 = paddle.create_parameter( - shape=tmp_w.shape, - dtype=tmp_w.numpy().dtype, - default_initializer=nn.initializer.Assign(tmp_w), - ) - out_2.stop_gradient = False - w_bar = out_2 - del self.module._parameters[self.name] - - u = create_param(u) - v = create_param(v) - self.module.add_parameter(name=self.name + "_u", parameter=u) - self.module.add_parameter(name=self.name + "_v", parameter=v) - self.module.add_parameter(name=self.name + "_bar", parameter=w_bar) - - def forward(self, *args): - self._update_u_v() - return self.module.forward(*args) - - -def create_param(x): - param = paddle.create_parameter( - shape=x.shape, - dtype=x.dtype, - default_initializer=nn.initializer.Assign(x), - ) - param.stop_gradient = x.stop_gradient - return param +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +from typing import Tuple + +import paddle +from paddle import nn + +from ppsci.arch import base + + +class NowcastNet(base.Arch): + """The NowcastNet model. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + input_length (int, optional): Input length. Defaults to 9. + total_length (int, optional): Total length. Defaults to 29. + image_height (int, optional): Image height. Defaults to 512. + image_width (int, optional): Image width. Defaults to 512. + image_ch (int, optional): Image channel. Defaults to 2. + ngf (int, optional): Noise Projector input length. Defaults to 32. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.NowcastNet(("input", ), ("output", )) + >>> input_data = paddle.rand([1, 9, 512, 512, 2]) + >>> input_dict = {"input": input_data} + >>> output_dict = model(input_dict) + >>> print(output_dict["output"].shape) + [1, 20, 512, 512, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + input_length: int = 9, + total_length: int = 29, + image_height: int = 512, + image_width: int = 512, + image_ch: int = 2, + ngf: int = 32, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.input_length = input_length + self.total_length = total_length + self.image_height = image_height + self.image_width = image_width + self.image_ch = image_ch + self.ngf = ngf + + configs = collections.namedtuple( + "Object", ["ngf", "evo_ic", "gen_oc", "ic_feature"] + ) + configs.ngf = self.ngf + configs.evo_ic = self.total_length - self.input_length + configs.gen_oc = self.total_length - self.input_length + configs.ic_feature = self.ngf * 10 + + self.pred_length = self.total_length - self.input_length + self.evo_net = Evolution_Network(self.input_length, self.pred_length, base_c=32) + self.gen_enc = Generative_Encoder(self.total_length, base_c=self.ngf) + self.gen_dec = Generative_Decoder(configs) + self.proj = Noise_Projector(self.ngf) + sample_tensor = paddle.zeros(shape=[1, 1, self.image_height, self.image_width]) + self.grid = make_grid(sample_tensor) + + @staticmethod + def split_to_dict(data_tensors: Tuple[paddle.Tensor, ...], keys: Tuple[str, ...]): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + x_tensor = self.concat_to_tensor(x, self.input_keys) + + y = [] + out = self.forward_tensor(x_tensor) + y.append(out) + y = self.split_to_dict(y, self.output_keys) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + def forward_tensor(self, x): + all_frames = x[:, :, :, :, :1] + frames = all_frames.transpose(perm=[0, 1, 4, 2, 3]) + batch = frames.shape[0] + height = frames.shape[3] + width = frames.shape[4] + # Input Frames + input_frames = frames[:, : self.input_length] + input_frames = input_frames.reshape((batch, self.input_length, height, width)) + # Evolution Network + intensity, motion = self.evo_net(input_frames) + motion_ = motion.reshape((batch, self.pred_length, 2, height, width)) + intensity_ = intensity.reshape((batch, self.pred_length, 1, height, width)) + series = [] + last_frames = all_frames[:, self.input_length - 1 : self.input_length, :, :, 0] + grid = self.grid.tile((batch, 1, 1, 1)) + for i in range(self.pred_length): + last_frames = warp( + last_frames, motion_[:, i], grid, mode="nearest", padding_mode="border" + ) + last_frames = last_frames + intensity_[:, i] + series.append(last_frames) + evo_result = paddle.concat(x=series, axis=1) + evo_result = evo_result / 128 + # Generative Network + evo_feature = self.gen_enc(paddle.concat(x=[input_frames, evo_result], axis=1)) + noise = paddle.randn(shape=[batch, self.ngf, height // 32, width // 32]) + noise = self.proj(noise) + ngf = noise.shape[1] + noise_feature = ( + noise.reshape((batch, -1, 4, 4, 8, 8)) + .transpose(perm=[0, 1, 4, 5, 2, 3]) + .reshape((batch, ngf // 16, height // 8, width // 8)) + ) + feature = paddle.concat(x=[evo_feature, noise_feature], axis=1) + gen_result = self.gen_dec(feature, evo_result) + return gen_result.unsqueeze(axis=-1) + + +class Evolution_Network(nn.Layer): + def __init__(self, n_channels, n_classes, base_c=64, bilinear=True): + super().__init__() + self.n_channels = n_channels + self.n_classes = n_classes + self.bilinear = bilinear + base_c = base_c + self.inc = DoubleConv(n_channels, base_c) + self.down1 = Down(base_c * 1, base_c * 2) + self.down2 = Down(base_c * 2, base_c * 4) + self.down3 = Down(base_c * 4, base_c * 8) + factor = 2 if bilinear else 1 + self.down4 = Down(base_c * 8, base_c * 16 // factor) + self.up1 = Up(base_c * 16, base_c * 8 // factor, bilinear) + self.up2 = Up(base_c * 8, base_c * 4 // factor, bilinear) + self.up3 = Up(base_c * 4, base_c * 2 // factor, bilinear) + self.up4 = Up(base_c * 2, base_c * 1, bilinear) + self.outc = OutConv(base_c * 1, n_classes) + param1 = paddle.zeros(shape=[1, n_classes, 1, 1]) + gamma = self.create_parameter( + shape=param1.shape, + dtype=param1.dtype, + default_initializer=nn.initializer.Assign(param1), + ) + gamma.stop_gradient = False + self.gamma = gamma + self.up1_v = Up(base_c * 16, base_c * 8 // factor, bilinear) + self.up2_v = Up(base_c * 8, base_c * 4 // factor, bilinear) + self.up3_v = Up(base_c * 4, base_c * 2 // factor, bilinear) + self.up4_v = Up(base_c * 2, base_c * 1, bilinear) + self.outc_v = OutConv(base_c * 1, n_classes * 2) + + def forward(self, x): + x1 = self.inc(x) + x2 = self.down1(x1) + x3 = self.down2(x2) + x4 = self.down3(x3) + x5 = self.down4(x4) + x = self.up1(x5, x4) + x = self.up2(x, x3) + x = self.up3(x, x2) + x = self.up4(x, x1) + x = self.outc(x) * self.gamma + v = self.up1_v(x5, x4) + v = self.up2_v(v, x3) + v = self.up3_v(v, x2) + v = self.up4_v(v, x1) + v = self.outc_v(v) + return x, v + + +class DoubleConv(nn.Layer): + def __init__(self, in_channels, out_channels, kernel=3, mid_channels=None): + super().__init__() + if not mid_channels: + mid_channels = out_channels + self.double_conv = nn.Sequential( + nn.BatchNorm2D(num_features=in_channels), + nn.ReLU(), + nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=kernel, + padding=kernel // 2, + ) + ), + nn.BatchNorm2D(num_features=mid_channels), + nn.ReLU(), + nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=kernel, + padding=kernel // 2, + ) + ), + ) + self.single_conv = nn.Sequential( + nn.BatchNorm2D(num_features=in_channels), + nn.utils.spectral_norm( + layer=nn.Conv2D( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel, + padding=kernel // 2, + ) + ), + ) + + def forward(self, x): + shortcut = self.single_conv(x) + x = self.double_conv(x) + x = x + shortcut + return x + + +class Down(nn.Layer): + def __init__(self, in_channels, out_channels, kernel=3): + super().__init__() + self.maxpool_conv = nn.Sequential( + nn.MaxPool2D(kernel_size=2), + DoubleConv(in_channels, out_channels, kernel), + ) + + def forward(self, x): + x = self.maxpool_conv(x) + return x + + +class Up(nn.Layer): + def __init__(self, in_channels, out_channels, bilinear=True, kernel=3): + super().__init__() + if bilinear: + self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) + self.conv = DoubleConv( + in_channels, out_channels, kernel=kernel, mid_channels=in_channels // 2 + ) + else: + self.up = nn.Conv2DTranspose( + in_channels=in_channels, + out_channels=in_channels // 2, + kernel_size=2, + stride=2, + ) + self.conv = DoubleConv(in_channels, out_channels, kernel) + + def forward(self, x1, x2): + x1 = self.up(x1) + # input is CHW + diffY = x2.shape[2] - x1.shape[2] + diffX = x2.shape[3] - x1.shape[3] + x1 = nn.functional.pad( + x1, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2] + ) + x = paddle.concat(x=[x2, x1], axis=1) + return self.conv(x) + + +class Up_S(nn.Layer): + def __init__(self, in_channels, out_channels, bilinear=True, kernel=3): + super().__init__() + if bilinear: + self.up = nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True) + self.conv = DoubleConv( + in_channels, out_channels, kernel=kernel, mid_channels=in_channels + ) + else: + self.up = nn.Conv2DTranspose( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=2, + stride=2, + ) + self.conv = DoubleConv(in_channels, out_channels, kernel) + + def forward(self, x): + x = self.up(x) + return self.conv(x) + + +class OutConv(nn.Layer): + def __init__(self, in_channels, out_channels): + super().__init__() + self.conv = nn.Conv2D( + in_channels=in_channels, out_channels=out_channels, kernel_size=1 + ) + + def forward(self, x): + return self.conv(x) + + +class Generative_Encoder(nn.Layer): + def __init__(self, n_channels, base_c=64): + super().__init__() + base_c = base_c + self.inc = DoubleConv(n_channels, base_c, kernel=3) + self.down1 = Down(base_c * 1, base_c * 2, 3) + self.down2 = Down(base_c * 2, base_c * 4, 3) + self.down3 = Down(base_c * 4, base_c * 8, 3) + + def forward(self, x): + x = self.inc(x) + x = self.down1(x) + x = self.down2(x) + x = self.down3(x) + return x + + +class Generative_Decoder(nn.Layer): + def __init__(self, opt): + super().__init__() + self.opt = opt + nf = opt.ngf + ic = opt.ic_feature + self.fc = nn.Conv2D( + in_channels=ic, out_channels=8 * nf, kernel_size=3, padding=1 + ) + self.head_0 = GenBlock(8 * nf, 8 * nf, opt) + self.G_middle_0 = GenBlock(8 * nf, 4 * nf, opt, double_conv=True) + self.G_middle_1 = GenBlock(4 * nf, 4 * nf, opt, double_conv=True) + self.up_0 = GenBlock(4 * nf, 2 * nf, opt) + self.up_1 = GenBlock(2 * nf, 1 * nf, opt, double_conv=True) + self.up_2 = GenBlock(1 * nf, 1 * nf, opt, double_conv=True) + final_nc = nf * 1 + self.conv_img = nn.Conv2D( + in_channels=final_nc, out_channels=self.opt.gen_oc, kernel_size=3, padding=1 + ) + self.up = nn.Upsample(scale_factor=2) + + def forward(self, x, evo): + x = self.fc(x) + x = self.head_0(x, evo) + x = self.up(x) + x = self.G_middle_0(x, evo) + x = self.G_middle_1(x, evo) + x = self.up(x) + x = self.up_0(x, evo) + x = self.up(x) + x = self.up_1(x, evo) + x = self.up_2(x, evo) + x = self.conv_img(nn.functional.leaky_relu(x=x, negative_slope=0.2)) + return x + + +class GenBlock(nn.Layer): + def __init__(self, fin, fout, opt, use_se=False, dilation=1, double_conv=False): + super().__init__() + self.learned_shortcut = fin != fout + fmiddle = min(fin, fout) + self.opt = opt + self.double_conv = double_conv + self.pad = nn.Pad2D(padding=dilation, mode="reflect") + self.conv_0 = nn.Conv2D( + in_channels=fin, + out_channels=fmiddle, + kernel_size=3, + padding=0, + dilation=dilation, + ) + self.conv_1 = nn.Conv2D( + in_channels=fmiddle, + out_channels=fout, + kernel_size=3, + padding=0, + dilation=dilation, + ) + if self.learned_shortcut: + self.conv_s = nn.Conv2D( + in_channels=fin, out_channels=fout, kernel_size=1, bias_attr=False + ) + self.conv_0 = nn.utils.spectral_norm(layer=self.conv_0) + self.conv_1 = nn.utils.spectral_norm(layer=self.conv_1) + if self.learned_shortcut: + self.conv_s = nn.utils.spectral_norm(layer=self.conv_s) + ic = opt.evo_ic + self.norm_0 = SPADE(fin, ic) + self.norm_1 = SPADE(fmiddle, ic) + if self.learned_shortcut: + self.norm_s = SPADE(fin, ic) + + def forward(self, x, evo): + x_s = self.shortcut(x, evo) + dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, evo)))) + if self.double_conv: + dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, evo)))) + out = x_s + dx + return out + + def shortcut(self, x, evo): + if self.learned_shortcut: + x_s = self.conv_s(self.norm_s(x, evo)) + else: + x_s = x + return x_s + + def actvn(self, x): + return nn.functional.leaky_relu(x=x, negative_slope=0.2) + + +class SPADE(nn.Layer): + def __init__(self, norm_nc, label_nc): + super().__init__() + ks = 3 + self.param_free_norm = nn.InstanceNorm2D( + num_features=norm_nc, weight_attr=False, bias_attr=False, momentum=1 - 0.1 + ) + nhidden = 64 + ks = 3 + pw = ks // 2 + self.mlp_shared = nn.Sequential( + nn.Pad2D(padding=pw, mode="reflect"), + nn.Conv2D( + in_channels=label_nc, out_channels=nhidden, kernel_size=ks, padding=0 + ), + nn.ReLU(), + ) + self.pad = nn.Pad2D(padding=pw, mode="reflect") + self.mlp_gamma = nn.Conv2D( + in_channels=nhidden, out_channels=norm_nc, kernel_size=ks, padding=0 + ) + self.mlp_beta = nn.Conv2D( + in_channels=nhidden, out_channels=norm_nc, kernel_size=ks, padding=0 + ) + + def forward(self, x, evo): + normalized = self.param_free_norm(x) + evo = nn.functional.adaptive_avg_pool2d(x=evo, output_size=x.shape[2:]) + actv = self.mlp_shared(evo) + gamma = self.mlp_gamma(self.pad(actv)) + beta = self.mlp_beta(self.pad(actv)) + out = normalized * (1 + gamma) + beta + return out + + +class Noise_Projector(nn.Layer): + def __init__(self, input_length): + super().__init__() + self.input_length = input_length + self.conv_first = nn.utils.spectral_norm( + nn.Conv2D( + in_channels=self.input_length, + out_channels=self.input_length * 2, + kernel_size=3, + padding=1, + ) + ) + self.L1 = ProjBlock(self.input_length * 2, self.input_length * 4) + self.L2 = ProjBlock(self.input_length * 4, self.input_length * 8) + self.L3 = ProjBlock(self.input_length * 8, self.input_length * 16) + self.L4 = ProjBlock(self.input_length * 16, self.input_length * 32) + + def forward(self, x): + x = self.conv_first(x) + x = self.L1(x) + x = self.L2(x) + x = self.L3(x) + x = self.L4(x) + return x + + +class ProjBlock(nn.Layer): + def __init__(self, in_channel, out_channel): + super().__init__() + self.one_conv = nn.utils.spectral_norm( + nn.Conv2D( + in_channels=in_channel, + out_channels=out_channel - in_channel, + kernel_size=1, + padding=0, + ) + ) + self.double_conv = nn.Sequential( + nn.utils.spectral_norm( + nn.Conv2D( + in_channels=in_channel, + out_channels=out_channel, + kernel_size=3, + padding=1, + ) + ), + nn.ReLU(), + nn.utils.spectral_norm( + nn.Conv2D( + in_channels=out_channel, + out_channels=out_channel, + kernel_size=3, + padding=1, + ) + ), + ) + + def forward(self, x): + x1 = paddle.concat(x=[x, self.one_conv(x)], axis=1) + x2 = self.double_conv(x) + output = x1 + x2 + return output + + +def make_grid(input): + B, C, H, W = input.shape + xx = paddle.arange(start=0, end=W).reshape((1, -1)).tile((H, 1)) + yy = paddle.arange(start=0, end=H).reshape((-1, 1)).tile((1, W)) + xx = xx.reshape((1, 1, H, W)).tile((B, 1, 1, 1)) + yy = yy.reshape((1, 1, H, W)).tile((B, 1, 1, 1)) + grid = paddle.concat(x=(xx, yy), axis=1).astype(dtype=paddle.get_default_dtype()) + return grid + + +def warp(input, flow, grid, mode="bilinear", padding_mode="zeros"): + B, C, H, W = input.shape + vgrid = grid + flow + vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0 + vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0 + vgrid = vgrid.transpose(perm=[0, 2, 3, 1]) + output = nn.functional.grid_sample( + x=input.cpu(), + grid=vgrid.cpu(), + padding_mode=padding_mode, + mode=mode, + align_corners=True, + ) + return output.cuda() + + +def l2normalize(v, eps=1e-12): + return v / (v.norm() + eps) + + +class spectral_norm(nn.Layer): + def __init__(self, module, name="weight", power_iterations=1): + super().__init__() + self.module = module + self.name = name + self.power_iterations = power_iterations + if not self._made_params(): + self._make_params() + + def _update_u_v(self): + u = getattr(self.module, self.name + "_u") + v = getattr(self.module, self.name + "_v") + w = getattr(self.module, self.name + "_bar") + height = w.detach().shape[0] + for _ in range(self.power_iterations): + v = l2normalize( + paddle.mv( + x=paddle.t(input=w.reshape((height, -1)).detach()), vec=u.detach() + ) + ) + u = l2normalize( + paddle.mv(x=w.reshape((height, -1)).detach(), vec=v.detach()) + ) + sigma = u.dot(y=w.reshape((height, -1)).mv(vec=v)) + setattr(self.module, self.name, w / sigma.expand_as(y=w)) + + def _made_params(self): + try: + _ = getattr(self.module, self.name + "_u") + _ = getattr(self.module, self.name + "_v") + _ = getattr(self.module, self.name + "_bar") + return True + except AttributeError: + return False + + def _make_params(self): + w = getattr(self.module, self.name) + height = w.detach().shape[0] + width = w.reshape((height, -1)).detach().shape[1] + + tmp_w = paddle.normal(shape=[height]) + out_0 = paddle.create_parameter( + shape=tmp_w.shape, + dtype=tmp_w.numpy().dtype, + default_initializer=nn.initializer.Assign(tmp_w), + ) + out_0.stop_gradient = True + u = out_0 + + tmp_w = paddle.normal(shape=[width]) + out_1 = paddle.create_parameter( + shape=tmp_w.shape, + dtype=tmp_w.numpy().dtype, + default_initializer=nn.initializer.Assign(tmp_w), + ) + out_1.stop_gradient = True + v = out_1 + u = l2normalize(u) + v = l2normalize(v) + tmp_w = w.detach() + out_2 = paddle.create_parameter( + shape=tmp_w.shape, + dtype=tmp_w.numpy().dtype, + default_initializer=nn.initializer.Assign(tmp_w), + ) + out_2.stop_gradient = False + w_bar = out_2 + del self.module._parameters[self.name] + + u = create_param(u) + v = create_param(v) + self.module.add_parameter(name=self.name + "_u", parameter=u) + self.module.add_parameter(name=self.name + "_v", parameter=v) + self.module.add_parameter(name=self.name + "_bar", parameter=w_bar) + + def forward(self, *args): + self._update_u_v() + return self.module.forward(*args) + + +def create_param(x): + param = paddle.create_parameter( + shape=x.shape, + dtype=x.dtype, + default_initializer=nn.initializer.Assign(x), + ) + param.stop_gradient = x.stop_gradient + return param diff --git a/ppsci/arch/paddle_harmonics/legendre.py b/ppsci/arch/paddle_harmonics/legendre.py index 376599719e..f2a6abcb0b 100644 --- a/ppsci/arch/paddle_harmonics/legendre.py +++ b/ppsci/arch/paddle_harmonics/legendre.py @@ -1,176 +1,176 @@ -# coding=utf-8 - -# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/legendre.py) -""" - -import numpy as np - - -def clm(l, m): - """ - Defines the normalization factor to orthonormalize the Spherical Harmonics - """ - return np.sqrt((2 * l + 1) / 4 / np.pi) * np.sqrt( - np.math.factorial(l - m) / np.math.factorial(l + m) - ) - - -def legpoly(mmax, lmax, x, norm="ortho", inverse=False, csphase=True): - r""" - Computes the values of (-1)^m c^l_m P^l_m(x) at the positions specified by x. - The resulting tensor has shape (mmax, lmax, len(x)). The Condon-Shortley Phase (-1)^m - can be turned off optionally. - - method of computation follows - [1] Schaeffer, N.; Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Rapp, R.H.; A Fortran Program for the Computation of Gravimetric Quantities from High Degree Spherical Harmonic Expansions, Ohio State University Columbus; report; 1982; - https://apps.dtic.mil/sti/citations/ADA123406 - [3] Schrama, E.; Orbit integration based upon interpolated gravitational gradients - """ - # compute the tensor P^m_n: - nmax = max(mmax, lmax) - vdm = np.zeros((nmax, nmax, len(x)), dtype=np.float64) - - norm_factor = 1.0 if norm == "ortho" else np.sqrt(4 * np.pi) - norm_factor = 1.0 / norm_factor if inverse else norm_factor - - # initial values to start the recursion - vdm[0, 0, :] = norm_factor / np.sqrt(4 * np.pi) - - # fill the diagonal and the lower diagonal - for l in range(1, nmax): - vdm[l - 1, l, :] = np.sqrt(2 * l + 1) * x * vdm[l - 1, l - 1, :] - vdm[l, l, :] = ( - np.sqrt((2 * l + 1) * (1 + x) * (1 - x) / 2 / l) * vdm[l - 1, l - 1, :] - ) - - # fill the remaining values on the upper triangle and multiply b - for l in range(2, nmax): - for m in range(0, l - 1): - vdm[m, l, :] = ( - x - * np.sqrt((2 * l - 1) / (l - m) * (2 * l + 1) / (l + m)) - * vdm[m, l - 1, :] - - np.sqrt( - (l + m - 1) - / (l - m) - * (2 * l + 1) - / (2 * l - 3) - * (l - m - 1) - / (l + m) - ) - * vdm[m, l - 2, :] - ) - - if norm == "schmidt": - for l in range(0, nmax): - if inverse: - vdm[:, l, :] = vdm[:, l, :] * np.sqrt(2 * l + 1) - else: - vdm[:, l, :] = vdm[:, l, :] / np.sqrt(2 * l + 1) - - vdm = vdm[:mmax, :lmax] - - if csphase: - for m in range(1, mmax, 2): - vdm[m] *= -1 - - return vdm - - -def _precompute_legpoly(mmax, lmax, t, norm="ortho", inverse=False, csphase=True): - r""" - Computes the values of (-1)^m c^l_m P^l_m(\cos \theta) at the positions specified by t (theta). - The resulting tensor has shape (mmax, lmax, len(x)). The Condon-Shortley Phase (-1)^m - can be turned off optionally. - - method of computation follows - [1] Schaeffer, N.; Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Rapp, R.H.; A Fortran Program for the Computation of Gravimetric Quantities from High Degree Spherical Harmonic Expansions, Ohio State University Columbus; report; 1982; - https://apps.dtic.mil/sti/citations/ADA123406 - [3] Schrama, E.; Orbit integration based upon interpolated gravitational gradients - """ - return legpoly(mmax, lmax, np.cos(t), norm=norm, inverse=inverse, csphase=csphase) - - -def _precompute_dlegpoly(mmax, lmax, t, norm="ortho", inverse=False, csphase=True): - r""" - Computes the values of the derivatives $\frac{d}{d \theta} P^m_l(\cos \theta)$ - at the positions specified by t (theta), as well as $\frac{1}{\sin \theta} P^m_l(\cos \theta)$, - needed for the computation of the vector spherical harmonics. The resulting tensor has shape - (2, mmax, lmax, len(t)). - - computation follows - [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. - """ - pct = _precompute_legpoly( - mmax + 1, lmax + 1, t, norm=norm, inverse=inverse, csphase=False - ) - - dpct = np.zeros((2, mmax, lmax, len(t)), dtype=np.float64) - - # fill the derivative terms wrt theta - for l in range(0, lmax): - - # m = 0 - dpct[0, 0, l] = -np.sqrt(l * (l + 1)) * pct[1, l] - - # 0 < m < l - for m in range(1, min(l, mmax)): - dpct[0, m, l] = 0.5 * ( - np.sqrt((l + m) * (l - m + 1)) * pct[m - 1, l] - - np.sqrt((l - m) * (l + m + 1)) * pct[m + 1, l] - ) - - # m == l - if mmax > l: - dpct[0, l, l] = np.sqrt(l / 2) * pct[l - 1, l] - - # fill the - 1j m P^m_l / sin(phi). as this component is purely imaginary, - # we won't store it explicitly in a complex array - for m in range(1, min(l + 1, mmax)): - # this component is implicitly complex - # we do not divide by m here as this cancels with the derivative of the exponential - dpct[1, m, l] = ( - 0.5 - * np.sqrt((2 * l + 1) / (2 * l + 3)) - * ( - np.sqrt((l - m + 1) * (l - m + 2)) * pct[m - 1, l + 1] - + np.sqrt((l + m + 1) * (l + m + 2)) * pct[m + 1, l + 1] - ) - ) - - if csphase: - for m in range(1, mmax, 2): - dpct[:, m] *= -1 - - return dpct +# coding=utf-8 + +# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/legendre.py) +""" + +import numpy as np + + +def clm(l, m): + """ + Defines the normalization factor to orthonormalize the Spherical Harmonics + """ + return np.sqrt((2 * l + 1) / 4 / np.pi) * np.sqrt( + np.math.factorial(l - m) / np.math.factorial(l + m) + ) + + +def legpoly(mmax, lmax, x, norm="ortho", inverse=False, csphase=True): + r""" + Computes the values of (-1)^m c^l_m P^l_m(x) at the positions specified by x. + The resulting tensor has shape (mmax, lmax, len(x)). The Condon-Shortley Phase (-1)^m + can be turned off optionally. + + method of computation follows + [1] Schaeffer, N.; Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Rapp, R.H.; A Fortran Program for the Computation of Gravimetric Quantities from High Degree Spherical Harmonic Expansions, Ohio State University Columbus; report; 1982; + https://apps.dtic.mil/sti/citations/ADA123406 + [3] Schrama, E.; Orbit integration based upon interpolated gravitational gradients + """ + # compute the tensor P^m_n: + nmax = max(mmax, lmax) + vdm = np.zeros((nmax, nmax, len(x)), dtype=np.float64) + + norm_factor = 1.0 if norm == "ortho" else np.sqrt(4 * np.pi) + norm_factor = 1.0 / norm_factor if inverse else norm_factor + + # initial values to start the recursion + vdm[0, 0, :] = norm_factor / np.sqrt(4 * np.pi) + + # fill the diagonal and the lower diagonal + for l in range(1, nmax): + vdm[l - 1, l, :] = np.sqrt(2 * l + 1) * x * vdm[l - 1, l - 1, :] + vdm[l, l, :] = ( + np.sqrt((2 * l + 1) * (1 + x) * (1 - x) / 2 / l) * vdm[l - 1, l - 1, :] + ) + + # fill the remaining values on the upper triangle and multiply b + for l in range(2, nmax): + for m in range(0, l - 1): + vdm[m, l, :] = ( + x + * np.sqrt((2 * l - 1) / (l - m) * (2 * l + 1) / (l + m)) + * vdm[m, l - 1, :] + - np.sqrt( + (l + m - 1) + / (l - m) + * (2 * l + 1) + / (2 * l - 3) + * (l - m - 1) + / (l + m) + ) + * vdm[m, l - 2, :] + ) + + if norm == "schmidt": + for l in range(0, nmax): + if inverse: + vdm[:, l, :] = vdm[:, l, :] * np.sqrt(2 * l + 1) + else: + vdm[:, l, :] = vdm[:, l, :] / np.sqrt(2 * l + 1) + + vdm = vdm[:mmax, :lmax] + + if csphase: + for m in range(1, mmax, 2): + vdm[m] *= -1 + + return vdm + + +def _precompute_legpoly(mmax, lmax, t, norm="ortho", inverse=False, csphase=True): + r""" + Computes the values of (-1)^m c^l_m P^l_m(\cos \theta) at the positions specified by t (theta). + The resulting tensor has shape (mmax, lmax, len(x)). The Condon-Shortley Phase (-1)^m + can be turned off optionally. + + method of computation follows + [1] Schaeffer, N.; Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Rapp, R.H.; A Fortran Program for the Computation of Gravimetric Quantities from High Degree Spherical Harmonic Expansions, Ohio State University Columbus; report; 1982; + https://apps.dtic.mil/sti/citations/ADA123406 + [3] Schrama, E.; Orbit integration based upon interpolated gravitational gradients + """ + return legpoly(mmax, lmax, np.cos(t), norm=norm, inverse=inverse, csphase=csphase) + + +def _precompute_dlegpoly(mmax, lmax, t, norm="ortho", inverse=False, csphase=True): + r""" + Computes the values of the derivatives $\frac{d}{d \theta} P^m_l(\cos \theta)$ + at the positions specified by t (theta), as well as $\frac{1}{\sin \theta} P^m_l(\cos \theta)$, + needed for the computation of the vector spherical harmonics. The resulting tensor has shape + (2, mmax, lmax, len(t)). + + computation follows + [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. + """ + pct = _precompute_legpoly( + mmax + 1, lmax + 1, t, norm=norm, inverse=inverse, csphase=False + ) + + dpct = np.zeros((2, mmax, lmax, len(t)), dtype=np.float64) + + # fill the derivative terms wrt theta + for l in range(0, lmax): + + # m = 0 + dpct[0, 0, l] = -np.sqrt(l * (l + 1)) * pct[1, l] + + # 0 < m < l + for m in range(1, min(l, mmax)): + dpct[0, m, l] = 0.5 * ( + np.sqrt((l + m) * (l - m + 1)) * pct[m - 1, l] + - np.sqrt((l - m) * (l + m + 1)) * pct[m + 1, l] + ) + + # m == l + if mmax > l: + dpct[0, l, l] = np.sqrt(l / 2) * pct[l - 1, l] + + # fill the - 1j m P^m_l / sin(phi). as this component is purely imaginary, + # we won't store it explicitly in a complex array + for m in range(1, min(l + 1, mmax)): + # this component is implicitly complex + # we do not divide by m here as this cancels with the derivative of the exponential + dpct[1, m, l] = ( + 0.5 + * np.sqrt((2 * l + 1) / (2 * l + 3)) + * ( + np.sqrt((l - m + 1) * (l - m + 2)) * pct[m - 1, l + 1] + + np.sqrt((l + m + 1) * (l + m + 2)) * pct[m + 1, l + 1] + ) + ) + + if csphase: + for m in range(1, mmax, 2): + dpct[:, m] *= -1 + + return dpct diff --git a/ppsci/arch/paddle_harmonics/quadrature.py b/ppsci/arch/paddle_harmonics/quadrature.py index 25de0e1436..909da923ff 100644 --- a/ppsci/arch/paddle_harmonics/quadrature.py +++ b/ppsci/arch/paddle_harmonics/quadrature.py @@ -1,156 +1,156 @@ -# coding=utf-8 - -# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/quadrature.py) -""" - -import numpy as np - - -def legendre_gauss_weights(n, a=-1.0, b=1.0): - r""" - Helper routine which returns the Legendre-Gauss nodes and weights - on the interval [a, b] - """ - xlg, wlg = np.polynomial.legendre.leggauss(n) - xlg = (b - a) * 0.5 * xlg + (b + a) * 0.5 - wlg = wlg * (b - a) * 0.5 - - return xlg, wlg - - -def lobatto_weights(n, a=-1.0, b=1.0, tol=1e-16, maxiter=100): - r""" - Helper routine which returns the Legendre-Gauss-Lobatto nodes and weights - on the interval [a, b] - """ - wlg = np.zeros((n,)) - tlg = np.zeros((n,)) - tmp = np.zeros((n,)) - - # Vandermonde Matrix - vdm = np.zeros((n, n)) - - # initialize Chebyshev nodes as first guess - for i in range(n): - tlg[i] = -np.cos(np.pi * i / (n - 1)) - - tmp = 2.0 - - for i in range(maxiter): - tmp = tlg - - vdm[:, 0] = 1.0 - vdm[:, 1] = tlg - - for k in range(2, n): - vdm[:, k] = ( - (2 * k - 1) * tlg * vdm[:, k - 1] - (k - 1) * vdm[:, k - 2] - ) / k - - tlg = tmp - (tlg * vdm[:, n - 1] - vdm[:, n - 2]) / (n * vdm[:, n - 1]) - - if max(abs(tlg - tmp).flatten()) < tol: - break - - wlg = 2.0 / ((n * (n - 1)) * (vdm[:, n - 1] ** 2)) - - # rescale - tlg = (b - a) * 0.5 * tlg + (b + a) * 0.5 - wlg = wlg * (b - a) * 0.5 - - return tlg, wlg - - -def clenshaw_curtiss_weights(n, a=-1.0, b=1.0): - r""" - Computation of the Clenshaw-Curtis quadrature nodes and weights. - This implementation follows - - [1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018. - """ - assert n > 1 - - tcc = np.cos(np.linspace(np.pi, 0, n)) - - if n == 2: - wcc = np.array([1.0, 1.0]) - else: - - n1 = n - 1 - N = np.arange(1, n1, 2) - l = len(N) - m = n1 - l - - v = np.concatenate([2 / N / (N - 2), 1 / N[-1:], np.zeros(m)]) - v = 0 - v[:-1] - v[-1:0:-1] - - g0 = -np.ones(n1) - g0[l] = g0[l] + n1 - g0[m] = g0[m] + n1 - g = g0 / (n1**2 - 1 + (n1 % 2)) - wcc = np.fft.ifft(v + g).real - wcc = np.concatenate((wcc, wcc[:1])) - - # rescale - tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5 - wcc = wcc * (b - a) * 0.5 - - return tcc, wcc - - -def fejer2_weights(n, a=-1.0, b=1.0): - r""" - Computation of the Fejer quadrature nodes and weights. - This implementation follows - - [1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018. - """ - assert n > 2 - - tcc = np.cos(np.linspace(np.pi, 0, n)) - - n1 = n - 1 - N = np.arange(1, n1, 2) - l = len(N) - m = n1 - l - - v = np.concatenate([2 / N / (N - 2), 1 / N[-1:], np.zeros(m)]) - v = 0 - v[:-1] - v[-1:0:-1] - - wcc = np.fft.ifft(v).real - wcc = np.concatenate((wcc, wcc[:1])) - - # rescale - tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5 - wcc = wcc * (b - a) * 0.5 - - return tcc, wcc +# coding=utf-8 + +# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/quadrature.py) +""" + +import numpy as np + + +def legendre_gauss_weights(n, a=-1.0, b=1.0): + r""" + Helper routine which returns the Legendre-Gauss nodes and weights + on the interval [a, b] + """ + xlg, wlg = np.polynomial.legendre.leggauss(n) + xlg = (b - a) * 0.5 * xlg + (b + a) * 0.5 + wlg = wlg * (b - a) * 0.5 + + return xlg, wlg + + +def lobatto_weights(n, a=-1.0, b=1.0, tol=1e-16, maxiter=100): + r""" + Helper routine which returns the Legendre-Gauss-Lobatto nodes and weights + on the interval [a, b] + """ + wlg = np.zeros((n,)) + tlg = np.zeros((n,)) + tmp = np.zeros((n,)) + + # Vandermonde Matrix + vdm = np.zeros((n, n)) + + # initialize Chebyshev nodes as first guess + for i in range(n): + tlg[i] = -np.cos(np.pi * i / (n - 1)) + + tmp = 2.0 + + for i in range(maxiter): + tmp = tlg + + vdm[:, 0] = 1.0 + vdm[:, 1] = tlg + + for k in range(2, n): + vdm[:, k] = ( + (2 * k - 1) * tlg * vdm[:, k - 1] - (k - 1) * vdm[:, k - 2] + ) / k + + tlg = tmp - (tlg * vdm[:, n - 1] - vdm[:, n - 2]) / (n * vdm[:, n - 1]) + + if max(abs(tlg - tmp).flatten()) < tol: + break + + wlg = 2.0 / ((n * (n - 1)) * (vdm[:, n - 1] ** 2)) + + # rescale + tlg = (b - a) * 0.5 * tlg + (b + a) * 0.5 + wlg = wlg * (b - a) * 0.5 + + return tlg, wlg + + +def clenshaw_curtiss_weights(n, a=-1.0, b=1.0): + r""" + Computation of the Clenshaw-Curtis quadrature nodes and weights. + This implementation follows + + [1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018. + """ + assert n > 1 + + tcc = np.cos(np.linspace(np.pi, 0, n)) + + if n == 2: + wcc = np.array([1.0, 1.0]) + else: + + n1 = n - 1 + N = np.arange(1, n1, 2) + l = len(N) + m = n1 - l + + v = np.concatenate([2 / N / (N - 2), 1 / N[-1:], np.zeros(m)]) + v = 0 - v[:-1] - v[-1:0:-1] + + g0 = -np.ones(n1) + g0[l] = g0[l] + n1 + g0[m] = g0[m] + n1 + g = g0 / (n1**2 - 1 + (n1 % 2)) + wcc = np.fft.ifft(v + g).real + wcc = np.concatenate((wcc, wcc[:1])) + + # rescale + tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5 + wcc = wcc * (b - a) * 0.5 + + return tcc, wcc + + +def fejer2_weights(n, a=-1.0, b=1.0): + r""" + Computation of the Fejer quadrature nodes and weights. + This implementation follows + + [1] Joerg Waldvogel, Fast Construction of the Fejer and Clenshaw-Curtis Quadrature Rules; BIT Numerical Mathematics, Vol. 43, No. 1, pp. 001–018. + """ + assert n > 2 + + tcc = np.cos(np.linspace(np.pi, 0, n)) + + n1 = n - 1 + N = np.arange(1, n1, 2) + l = len(N) + m = n1 - l + + v = np.concatenate([2 / N / (N - 2), 1 / N[-1:], np.zeros(m)]) + v = 0 - v[:-1] - v[-1:0:-1] + + wcc = np.fft.ifft(v).real + wcc = np.concatenate((wcc, wcc[:1])) + + # rescale + tcc = (b - a) * 0.5 * tcc + (b + a) * 0.5 + wcc = wcc * (b - a) * 0.5 + + return tcc, wcc diff --git a/ppsci/arch/paddle_harmonics/random_fields.py b/ppsci/arch/paddle_harmonics/random_fields.py index 8fad28cf26..31036b6f27 100644 --- a/ppsci/arch/paddle_harmonics/random_fields.py +++ b/ppsci/arch/paddle_harmonics/random_fields.py @@ -1,148 +1,148 @@ -# coding=utf-8 - -# SPDX-FileCopyrightText: Copyright (c) 2022 The paddle-harmonics Authors. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/random_fields.py) -""" - -import paddle -from paddle import nn - -from ppsci.arch.paddle_harmonics.sht import InverseRealSHT - - -class GaussianRandomFieldS2(nn.Layer): - r""" - A mean-zero Gaussian Random Field on the sphere with Matern covariance: - C = sigma^2 (-Lap + tau^2 I)^(-alpha). - - Lap is the Laplacian on the sphere, I the identity operator, - and sigma, tau, alpha are scalar parameters. - - Note: C is trace-class on L^2 if and only if alpha > 1. - - Args: - nlat (int): Number of latitudinal modes.longitudinal modes are 2*nlat. - alpha (float, optional): Regularity parameter. Larger means smoother. Defaults to 2.0. - tau (float, optional): Lenght-scale parameter. Larger means more scales. Defaults to 3.0. - sigma (float, optional): Scale parameter. Larger means bigger. - If None, sigma = tau**(0.5*(2*alpha - 2.0)). Defaults to None. - radius (float, optional): Radius of the sphere. Defaults to 1.0. - grid (str, optional): Grid type. Currently supports "equiangular" and - "legendre-gauss". Defaults to "equiangular". - dtype (paddle.dtype, optional): Numerical type for the calculations. Defaults to paddle.float32. - """ - - def __init__( - self, - nlat, - alpha: float = 2.0, - tau: float = 3.0, - sigma: float = None, - radius: float = 1.0, - grid: str = "equiangular", - dtype: paddle.dtype = paddle.float32, - ): - - super().__init__() - - # Number of latitudinal modes. - self.nlat = nlat - - # Default value of sigma if None is given. - if sigma is None: - assert alpha > 1.0, f"Alpha must be greater than one, got {alpha}." - sigma = tau ** (0.5 * (2 * alpha - 2.0)) - - # Inverse SHT - self.isht = InverseRealSHT( - self.nlat, 2 * self.nlat, grid=grid, norm="backward" - ).to(dtype=dtype) - - # Square root of the eigenvalues of C. - sqrt_eig = ( - paddle.to_tensor([j * (j + 1) for j in range(self.nlat)]) - .reshape([self.nlat, 1]) - .tile([1, self.nlat + 1]) - ) - sqrt_eig = paddle.tril( - sigma * (((sqrt_eig / radius**2) + tau**2) ** (-alpha / 2.0)) - ) - sqrt_eig[0, 0] = 0.0 - sqrt_eig = sqrt_eig.unsqueeze(0) - self.register_buffer("sqrt_eig", sqrt_eig) - - # Save mean and var of the standard Gaussian. - # Need these to re-initialize distribution on a new device. - mean = paddle.to_tensor([0.0]).astype(dtype) - var = paddle.to_tensor([1.0]).astype(dtype) - self.register_buffer("mean", mean) - self.register_buffer("var", var) - - # Standard normal noise sampler. - self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) - - def forward(self, N, xi=None): - """Sample random functions from a spherical GRF. - - Args: - N (int): Number of functions to sample. - xi (paddle.Tensor, optional): Noise is a complex tensor of size (N, nlat, nlat+1). - If None, new Gaussian noise is sampled. - If xi is provided, N is ignored.. Defaults to None. - - Returns: - u (paddle.Tensor): N random samples from the GRF returned as a - tensor of size (N, nlat, 2*nlat) on a equiangular grid. - """ - - # Sample Gaussian noise. - if xi is None: - xi = self.gaussian_noise.sample((N, self.nlat, self.nlat + 1, 2)).squeeze() - xi = paddle.as_complex(xi) - - # Karhunen-Loeve expansion. - u = self.isht(xi * self.sqrt_eig) - - return u - - # Override cuda and to methods so sampler gets initialized with mean - # and variance on the correct device. - def cuda(self, *args, **kwargs): - super().cuda(*args, **kwargs) - self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) - - return self - - def to(self, *args, **kwargs): - super().to(*args, **kwargs) - self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) - - return self +# coding=utf-8 + +# SPDX-FileCopyrightText: Copyright (c) 2022 The paddle-harmonics Authors. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/random_fields.py) +""" + +import paddle +from paddle import nn + +from ppsci.arch.paddle_harmonics.sht import InverseRealSHT + + +class GaussianRandomFieldS2(nn.Layer): + r""" + A mean-zero Gaussian Random Field on the sphere with Matern covariance: + C = sigma^2 (-Lap + tau^2 I)^(-alpha). + + Lap is the Laplacian on the sphere, I the identity operator, + and sigma, tau, alpha are scalar parameters. + + Note: C is trace-class on L^2 if and only if alpha > 1. + + Args: + nlat (int): Number of latitudinal modes.longitudinal modes are 2*nlat. + alpha (float, optional): Regularity parameter. Larger means smoother. Defaults to 2.0. + tau (float, optional): Lenght-scale parameter. Larger means more scales. Defaults to 3.0. + sigma (float, optional): Scale parameter. Larger means bigger. + If None, sigma = tau**(0.5*(2*alpha - 2.0)). Defaults to None. + radius (float, optional): Radius of the sphere. Defaults to 1.0. + grid (str, optional): Grid type. Currently supports "equiangular" and + "legendre-gauss". Defaults to "equiangular". + dtype (paddle.dtype, optional): Numerical type for the calculations. Defaults to paddle.float32. + """ + + def __init__( + self, + nlat, + alpha: float = 2.0, + tau: float = 3.0, + sigma: float = None, + radius: float = 1.0, + grid: str = "equiangular", + dtype: paddle.dtype = paddle.float32, + ): + + super().__init__() + + # Number of latitudinal modes. + self.nlat = nlat + + # Default value of sigma if None is given. + if sigma is None: + assert alpha > 1.0, f"Alpha must be greater than one, got {alpha}." + sigma = tau ** (0.5 * (2 * alpha - 2.0)) + + # Inverse SHT + self.isht = InverseRealSHT( + self.nlat, 2 * self.nlat, grid=grid, norm="backward" + ).to(dtype=dtype) + + # Square root of the eigenvalues of C. + sqrt_eig = ( + paddle.to_tensor([j * (j + 1) for j in range(self.nlat)]) + .reshape([self.nlat, 1]) + .tile([1, self.nlat + 1]) + ) + sqrt_eig = paddle.tril( + sigma * (((sqrt_eig / radius**2) + tau**2) ** (-alpha / 2.0)) + ) + sqrt_eig[0, 0] = 0.0 + sqrt_eig = sqrt_eig.unsqueeze(0) + self.register_buffer("sqrt_eig", sqrt_eig) + + # Save mean and var of the standard Gaussian. + # Need these to re-initialize distribution on a new device. + mean = paddle.to_tensor([0.0]).astype(dtype) + var = paddle.to_tensor([1.0]).astype(dtype) + self.register_buffer("mean", mean) + self.register_buffer("var", var) + + # Standard normal noise sampler. + self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) + + def forward(self, N, xi=None): + """Sample random functions from a spherical GRF. + + Args: + N (int): Number of functions to sample. + xi (paddle.Tensor, optional): Noise is a complex tensor of size (N, nlat, nlat+1). + If None, new Gaussian noise is sampled. + If xi is provided, N is ignored.. Defaults to None. + + Returns: + u (paddle.Tensor): N random samples from the GRF returned as a + tensor of size (N, nlat, 2*nlat) on a equiangular grid. + """ + + # Sample Gaussian noise. + if xi is None: + xi = self.gaussian_noise.sample((N, self.nlat, self.nlat + 1, 2)).squeeze() + xi = paddle.as_complex(xi) + + # Karhunen-Loeve expansion. + u = self.isht(xi * self.sqrt_eig) + + return u + + # Override cuda and to methods so sampler gets initialized with mean + # and variance on the correct device. + def cuda(self, *args, **kwargs): + super().cuda(*args, **kwargs) + self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) + + return self + + def to(self, *args, **kwargs): + super().to(*args, **kwargs) + self.gaussian_noise = paddle.distribution.Normal(self.mean, self.var) + + return self diff --git a/ppsci/arch/paddle_harmonics/sht.py b/ppsci/arch/paddle_harmonics/sht.py index bf5e685a04..1aabb74d34 100644 --- a/ppsci/arch/paddle_harmonics/sht.py +++ b/ppsci/arch/paddle_harmonics/sht.py @@ -1,461 +1,461 @@ -# coding=utf-8 - -# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. -# SPDX-License-Identifier: BSD-3-Clause -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/sht.py) -""" - -import math - -import numpy as np -import paddle -import paddle.fft -import paddle.nn as nn - -from ppsci.arch.paddle_harmonics.legendre import _precompute_dlegpoly -from ppsci.arch.paddle_harmonics.legendre import _precompute_legpoly -from ppsci.arch.paddle_harmonics.quadrature import clenshaw_curtiss_weights -from ppsci.arch.paddle_harmonics.quadrature import legendre_gauss_weights -from ppsci.arch.paddle_harmonics.quadrature import lobatto_weights - - -class RealSHT(nn.Layer): - """ - Defines a module for computing the forward (real-valued) SHT. - Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. - The SHT is applied to the last two dimensions of the input - - [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. - - Initializes the SHT Layer, precomputing the necessary quadrature weights. - - Args: - nlat (int): Input grid resolution in the latitudinal direction. - nlon (int): Input grid resolution in the longitudinal direction. - lmax (int, optional): The max input grid resolution in the latitudinal direction. Defaults to None. - mmax (int, optional): The max input grid resolution in the longitudinal direction. Defaults to None. - grid (str, optional): Grid in the latitude direction (for now only tensor product grids are supported). - Defaults to "lobatto". - norm (str, optional): The type of normalization to use. Defaults to "ortho". - csphase (bool, optional): Whether to apply the complex-conjugate symmetry phase factor. Defaults to True. - """ - - def __init__( - self, - nlat, - nlon, - lmax=None, - mmax=None, - grid="lobatto", - norm="ortho", - csphase=True, - ): - super().__init__() - - self.nlat = nlat - self.nlon = nlon - self.grid = grid - self.norm = norm - self.csphase = csphase - - # compute quadrature points - if self.grid == "legendre-gauss": - cost, w = legendre_gauss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - elif self.grid == "lobatto": - cost, w = lobatto_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - 1 - elif self.grid == "equiangular": - cost, w = clenshaw_curtiss_weights(nlat, -1, 1) - # cost, w = fejer2_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - else: - raise (ValueError("Unknown quadrature mode")) - - # apply cosine transform and flip them - tq = np.flip(np.arccos(cost)) - - # determine the dimensions - self.mmax = mmax or self.nlon // 2 + 1 - - # combine quadrature weights with the legendre weights - weights = paddle.to_tensor(w) - pct = _precompute_legpoly( - self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase - ) - pct = paddle.to_tensor(pct) - self.weights = paddle.einsum("mlk,k->mlk", pct, weights) - - def extra_repr(self): - return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" - - def forward(self, x: paddle.Tensor): - - assert x.shape[-2] == self.nlat - assert x.shape[-1] == self.nlon - - # apply real fft in the longitudinal direction - x = ( - 2.0 - * paddle.to_tensor(math.pi) - * paddle.fft.rfft(x, axis=-1, norm="forward") - ) - - # do the Legendre-Gauss quadrature - x = paddle.as_real(x) - # distributed contraction: fork - out_shape = list(x.shape) - out_shape[-3] = self.lmax - out_shape[-2] = self.mmax - xout = paddle.zeros(out_shape, dtype=x.dtype) - - # contraction - xout[..., 0] = paddle.einsum( - "...km,mlk->...lm", x[..., : self.mmax, 0], self.weights.astype(x.dtype) - ) - xout[..., 1] = paddle.einsum( - "...km,mlk->...lm", x[..., : self.mmax, 1], self.weights.astype(x.dtype) - ) - x = paddle.as_complex(xout) - - return x - - -class InverseRealSHT(nn.Layer): - """ - Defines a module for computing the inverse (real-valued) SHT. - Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. - nlat, nlon: Output dimensions - lmax, mmax: Input dimensions (spherical coefficients). For convenience, these are inferred from the output dimensions - - [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. - """ - - def __init__( - self, - nlat, - nlon, - lmax=None, - mmax=None, - grid="lobatto", - norm="ortho", - csphase=True, - ): - - super().__init__() - - self.nlat = nlat - self.nlon = nlon - self.grid = grid - self.norm = norm - self.csphase = csphase - - # compute quadrature points - if self.grid == "legendre-gauss": - cost, _ = legendre_gauss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - elif self.grid == "lobatto": - cost, _ = lobatto_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - 1 - elif self.grid == "equiangular": - cost, _ = clenshaw_curtiss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - else: - raise (ValueError("Unknown quadrature mode")) - - # apply cosine transform and flip them - t = np.flip(np.arccos(cost)) - - # determine the dimensions - self.mmax = mmax or self.nlon // 2 + 1 - - pct = _precompute_legpoly( - self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase - ) - self.pct = paddle.to_tensor(pct) - - def extra_repr(self): - return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" - - def forward(self, x: paddle.Tensor): - - assert x.shape[-2] == self.lmax - assert x.shape[-1] == self.mmax - - # Evaluate associated Legendre functions on the output nodes - x = paddle.as_real(x) - - rl = paddle.einsum("...lm, mlk->...km", x[..., 0], self.pct.astype(x.dtype)) - im = paddle.einsum("...lm, mlk->...km", x[..., 1], self.pct.astype(x.dtype)) - xs = paddle.stack((rl, im), -1) - - # apply the inverse (real) FFT - x = paddle.as_complex(xs) - x = paddle.fft.irfft(x, n=self.nlon, axis=-1, norm="forward") - - return x - - -class RealVectorSHT(nn.Layer): - """ - Defines a module for computing the forward (real) vector SHT. - Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. - The SHT is applied to the last three dimensions of the input. - - [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. - - Initializes the vector SHT Layer, precomputing the necessary quadrature weights. - """ - - def __init__( - self, - nlat, - nlon, - lmax=None, - mmax=None, - grid="lobatto", - norm="ortho", - csphase=True, - ): - super().__init__() - - self.nlat = nlat - self.nlon = nlon - self.grid = grid - self.norm = norm - self.csphase = csphase - - # compute quadrature points - if self.grid == "legendre-gauss": - cost, w = legendre_gauss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - elif self.grid == "lobatto": - cost, w = lobatto_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - 1 - elif self.grid == "equiangular": - cost, w = clenshaw_curtiss_weights(nlat, -1, 1) - # cost, w = fejer2_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - else: - raise (ValueError("Unknown quadrature mode")) - - # apply cosine transform and flip them - tq = np.flip(np.arccos(cost)) - - # determine the dimensions - self.mmax = mmax or self.nlon // 2 + 1 - - weights = paddle.to_tensor(w) - dpct = _precompute_dlegpoly( - self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase - ) - dpct = paddle.to_tensor(dpct) - - # combine integration weights, normalization factor in to one: - l = paddle.arange(0, self.lmax) - norm_factor = 1.0 / l / (l + 1) - norm_factor[0] = 1.0 - weights = paddle.einsum("dmlk,k,l->dmlk", dpct, weights, norm_factor) - # since the second component is imaginary, we need to take complex conjugation into account - weights[1] = -1 * weights[1] - - self.weights = weights - - def extra_repr(self): - return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" - - def forward(self, x: paddle.Tensor): - - assert len(x.shape) >= 3 - - # apply real fft in the longitudinal direction - x = 2.0 * paddle.to_tensor(np.pi) * paddle.fft.rfft(x, axis=-1, norm="forward") - - # do the Legendre-Gauss quadrature - x = paddle.as_real(x) - - # distributed contraction: fork - out_shape = list(x.shape) - out_shape[-3] = self.lmax - out_shape[-2] = self.mmax - xout = paddle.zeros(out_shape, dtype=x.dtype) - - # contraction - spheroidal component - # real component - xout[..., 0, :, :, 0] = paddle.einsum( - "...km,mlk->...lm", - x[..., 0, :, : self.mmax, 0], - self.weights[0].astype(x.dtype), - ) - paddle.einsum( - "...km,mlk->...lm", - x[..., 1, :, : self.mmax, 1], - self.weights[1].astype(x.dtype), - ) - - # iamg component - xout[..., 0, :, :, 1] = paddle.einsum( - "...km,mlk->...lm", - x[..., 0, :, : self.mmax, 1], - self.weights[0].astype(x.dtype), - ) + paddle.einsum( - "...km,mlk->...lm", - x[..., 1, :, : self.mmax, 0], - self.weights[1].astype(x.dtype), - ) - - # contraction - toroidal component - # real component - xout[..., 1, :, :, 0] = -paddle.einsum( - "...km,mlk->...lm", - x[..., 0, :, : self.mmax, 1], - self.weights[1].astype(x.dtype), - ) - paddle.einsum( - "...km,mlk->...lm", - x[..., 1, :, : self.mmax, 0], - self.weights[0].astype(x.dtype), - ) - # imag component - xout[..., 1, :, :, 1] = paddle.einsum( - "...km,mlk->...lm", - x[..., 0, :, : self.mmax, 0], - self.weights[1].astype(x.dtype), - ) - paddle.einsum( - "...km,mlk->...lm", - x[..., 1, :, : self.mmax, 1], - self.weights[0].astype(x.dtype), - ) - - return paddle.as_complex(xout) - - -class InverseRealVectorSHT(nn.Layer): - """ - Defines a module for computing the inverse (real-valued) vector SHT. - Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. - - [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. - [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. - """ - - def __init__( - self, - nlat, - nlon, - lmax=None, - mmax=None, - grid="lobatto", - norm="ortho", - csphase=True, - ): - - super().__init__() - - self.nlat = nlat - self.nlon = nlon - self.grid = grid - self.norm = norm - self.csphase = csphase - - # compute quadrature points - if self.grid == "legendre-gauss": - cost, _ = legendre_gauss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - elif self.grid == "lobatto": - cost, _ = lobatto_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - 1 - elif self.grid == "equiangular": - cost, _ = clenshaw_curtiss_weights(nlat, -1, 1) - self.lmax = lmax or self.nlat - else: - raise (ValueError("Unknown quadrature mode")) - - # apply cosine transform and flip them - t = np.flip(np.arccos(cost)) - - # determine the dimensions - self.mmax = mmax or self.nlon // 2 + 1 - - dpct = _precompute_dlegpoly( - self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase - ) - self.dpct = paddle.to_tensor(dpct) - - def extra_repr(self): - return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" - - def forward(self, x: paddle.Tensor): - - assert x.shape[-2] == self.lmax - assert x.shape[-1] == self.mmax - - # Evaluate associated Legendre functions on the output nodes - x = paddle.as_real(x) - - # contraction - spheroidal component - # real component - srl = paddle.einsum( - "...lm,mlk->...km", x[..., 0, :, :, 0], self.dpct[0].astype(x.dtype) - ) - paddle.einsum( - "...lm,mlk->...km", x[..., 1, :, :, 1], self.dpct[1].astype(x.dtype) - ) - # iamg component - sim = paddle.einsum( - "...lm,mlk->...km", x[..., 0, :, :, 1], self.dpct[0].astype(x.dtype) - ) + paddle.einsum( - "...lm,mlk->...km", x[..., 1, :, :, 0], self.dpct[1].astype(x.dtype) - ) - - # contraction - toroidal component - # real component - trl = -paddle.einsum( - "...lm,mlk->...km", x[..., 0, :, :, 1], self.dpct[1].astype(x.dtype) - ) - paddle.einsum( - "...lm,mlk->...km", x[..., 1, :, :, 0], self.dpct[0].astype(x.dtype) - ) - # imag component - tim = paddle.einsum( - "...lm,mlk->...km", x[..., 0, :, :, 0], self.dpct[1].astype(x.dtype) - ) - paddle.einsum( - "...lm,mlk->...km", x[..., 1, :, :, 1], self.dpct[0].astype(x.dtype) - ) - - # reassemble - s = paddle.stack((srl, sim), -1) - t = paddle.stack((trl, tim), -1) - xs = paddle.stack((s, t), -4) - - # apply the inverse (real) FFT - x = paddle.as_complex(xs) - x = paddle.fft.irfft(x, n=self.nlon, axis=-1, norm="forward") - - return x +# coding=utf-8 + +# SPDX-FileCopyrightText: Copyright (c) 2022 The torch-harmonics Authors. All rights reserved. +# SPDX-License-Identifier: BSD-3-Clause +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Code below is heavily based on [torch-harmonics](https://github.com/NVIDIA/torch-harmonics/blob/main/torch_harmonics/sht.py) +""" + +import math + +import numpy as np +import paddle +import paddle.fft +import paddle.nn as nn + +from ppsci.arch.paddle_harmonics.legendre import _precompute_dlegpoly +from ppsci.arch.paddle_harmonics.legendre import _precompute_legpoly +from ppsci.arch.paddle_harmonics.quadrature import clenshaw_curtiss_weights +from ppsci.arch.paddle_harmonics.quadrature import legendre_gauss_weights +from ppsci.arch.paddle_harmonics.quadrature import lobatto_weights + + +class RealSHT(nn.Layer): + """ + Defines a module for computing the forward (real-valued) SHT. + Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. + The SHT is applied to the last two dimensions of the input + + [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. + + Initializes the SHT Layer, precomputing the necessary quadrature weights. + + Args: + nlat (int): Input grid resolution in the latitudinal direction. + nlon (int): Input grid resolution in the longitudinal direction. + lmax (int, optional): The max input grid resolution in the latitudinal direction. Defaults to None. + mmax (int, optional): The max input grid resolution in the longitudinal direction. Defaults to None. + grid (str, optional): Grid in the latitude direction (for now only tensor product grids are supported). + Defaults to "lobatto". + norm (str, optional): The type of normalization to use. Defaults to "ortho". + csphase (bool, optional): Whether to apply the complex-conjugate symmetry phase factor. Defaults to True. + """ + + def __init__( + self, + nlat, + nlon, + lmax=None, + mmax=None, + grid="lobatto", + norm="ortho", + csphase=True, + ): + super().__init__() + + self.nlat = nlat + self.nlon = nlon + self.grid = grid + self.norm = norm + self.csphase = csphase + + # compute quadrature points + if self.grid == "legendre-gauss": + cost, w = legendre_gauss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + elif self.grid == "lobatto": + cost, w = lobatto_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat - 1 + elif self.grid == "equiangular": + cost, w = clenshaw_curtiss_weights(nlat, -1, 1) + # cost, w = fejer2_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + else: + raise (ValueError("Unknown quadrature mode")) + + # apply cosine transform and flip them + tq = np.flip(np.arccos(cost)) + + # determine the dimensions + self.mmax = mmax or self.nlon // 2 + 1 + + # combine quadrature weights with the legendre weights + weights = paddle.to_tensor(w) + pct = _precompute_legpoly( + self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase + ) + pct = paddle.to_tensor(pct) + self.weights = paddle.einsum("mlk,k->mlk", pct, weights) + + def extra_repr(self): + return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" + + def forward(self, x: paddle.Tensor): + + assert x.shape[-2] == self.nlat + assert x.shape[-1] == self.nlon + + # apply real fft in the longitudinal direction + x = ( + 2.0 + * paddle.to_tensor(math.pi) + * paddle.fft.rfft(x, axis=-1, norm="forward") + ) + + # do the Legendre-Gauss quadrature + x = paddle.as_real(x) + # distributed contraction: fork + out_shape = list(x.shape) + out_shape[-3] = self.lmax + out_shape[-2] = self.mmax + xout = paddle.zeros(out_shape, dtype=x.dtype) + + # contraction + xout[..., 0] = paddle.einsum( + "...km,mlk->...lm", x[..., : self.mmax, 0], self.weights.astype(x.dtype) + ) + xout[..., 1] = paddle.einsum( + "...km,mlk->...lm", x[..., : self.mmax, 1], self.weights.astype(x.dtype) + ) + x = paddle.as_complex(xout) + + return x + + +class InverseRealSHT(nn.Layer): + """ + Defines a module for computing the inverse (real-valued) SHT. + Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. + nlat, nlon: Output dimensions + lmax, mmax: Input dimensions (spherical coefficients). For convenience, these are inferred from the output dimensions + + [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. + """ + + def __init__( + self, + nlat, + nlon, + lmax=None, + mmax=None, + grid="lobatto", + norm="ortho", + csphase=True, + ): + + super().__init__() + + self.nlat = nlat + self.nlon = nlon + self.grid = grid + self.norm = norm + self.csphase = csphase + + # compute quadrature points + if self.grid == "legendre-gauss": + cost, _ = legendre_gauss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + elif self.grid == "lobatto": + cost, _ = lobatto_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat - 1 + elif self.grid == "equiangular": + cost, _ = clenshaw_curtiss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + else: + raise (ValueError("Unknown quadrature mode")) + + # apply cosine transform and flip them + t = np.flip(np.arccos(cost)) + + # determine the dimensions + self.mmax = mmax or self.nlon // 2 + 1 + + pct = _precompute_legpoly( + self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase + ) + self.pct = paddle.to_tensor(pct) + + def extra_repr(self): + return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" + + def forward(self, x: paddle.Tensor): + + assert x.shape[-2] == self.lmax + assert x.shape[-1] == self.mmax + + # Evaluate associated Legendre functions on the output nodes + x = paddle.as_real(x) + + rl = paddle.einsum("...lm, mlk->...km", x[..., 0], self.pct.astype(x.dtype)) + im = paddle.einsum("...lm, mlk->...km", x[..., 1], self.pct.astype(x.dtype)) + xs = paddle.stack((rl, im), -1) + + # apply the inverse (real) FFT + x = paddle.as_complex(xs) + x = paddle.fft.irfft(x, n=self.nlon, axis=-1, norm="forward") + + return x + + +class RealVectorSHT(nn.Layer): + """ + Defines a module for computing the forward (real) vector SHT. + Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. + The SHT is applied to the last three dimensions of the input. + + [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. + + Initializes the vector SHT Layer, precomputing the necessary quadrature weights. + """ + + def __init__( + self, + nlat, + nlon, + lmax=None, + mmax=None, + grid="lobatto", + norm="ortho", + csphase=True, + ): + super().__init__() + + self.nlat = nlat + self.nlon = nlon + self.grid = grid + self.norm = norm + self.csphase = csphase + + # compute quadrature points + if self.grid == "legendre-gauss": + cost, w = legendre_gauss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + elif self.grid == "lobatto": + cost, w = lobatto_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat - 1 + elif self.grid == "equiangular": + cost, w = clenshaw_curtiss_weights(nlat, -1, 1) + # cost, w = fejer2_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + else: + raise (ValueError("Unknown quadrature mode")) + + # apply cosine transform and flip them + tq = np.flip(np.arccos(cost)) + + # determine the dimensions + self.mmax = mmax or self.nlon // 2 + 1 + + weights = paddle.to_tensor(w) + dpct = _precompute_dlegpoly( + self.mmax, self.lmax, tq, norm=self.norm, csphase=self.csphase + ) + dpct = paddle.to_tensor(dpct) + + # combine integration weights, normalization factor in to one: + l = paddle.arange(0, self.lmax) + norm_factor = 1.0 / l / (l + 1) + norm_factor[0] = 1.0 + weights = paddle.einsum("dmlk,k,l->dmlk", dpct, weights, norm_factor) + # since the second component is imaginary, we need to take complex conjugation into account + weights[1] = -1 * weights[1] + + self.weights = weights + + def extra_repr(self): + return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" + + def forward(self, x: paddle.Tensor): + + assert len(x.shape) >= 3 + + # apply real fft in the longitudinal direction + x = 2.0 * paddle.to_tensor(np.pi) * paddle.fft.rfft(x, axis=-1, norm="forward") + + # do the Legendre-Gauss quadrature + x = paddle.as_real(x) + + # distributed contraction: fork + out_shape = list(x.shape) + out_shape[-3] = self.lmax + out_shape[-2] = self.mmax + xout = paddle.zeros(out_shape, dtype=x.dtype) + + # contraction - spheroidal component + # real component + xout[..., 0, :, :, 0] = paddle.einsum( + "...km,mlk->...lm", + x[..., 0, :, : self.mmax, 0], + self.weights[0].astype(x.dtype), + ) - paddle.einsum( + "...km,mlk->...lm", + x[..., 1, :, : self.mmax, 1], + self.weights[1].astype(x.dtype), + ) + + # iamg component + xout[..., 0, :, :, 1] = paddle.einsum( + "...km,mlk->...lm", + x[..., 0, :, : self.mmax, 1], + self.weights[0].astype(x.dtype), + ) + paddle.einsum( + "...km,mlk->...lm", + x[..., 1, :, : self.mmax, 0], + self.weights[1].astype(x.dtype), + ) + + # contraction - toroidal component + # real component + xout[..., 1, :, :, 0] = -paddle.einsum( + "...km,mlk->...lm", + x[..., 0, :, : self.mmax, 1], + self.weights[1].astype(x.dtype), + ) - paddle.einsum( + "...km,mlk->...lm", + x[..., 1, :, : self.mmax, 0], + self.weights[0].astype(x.dtype), + ) + # imag component + xout[..., 1, :, :, 1] = paddle.einsum( + "...km,mlk->...lm", + x[..., 0, :, : self.mmax, 0], + self.weights[1].astype(x.dtype), + ) - paddle.einsum( + "...km,mlk->...lm", + x[..., 1, :, : self.mmax, 1], + self.weights[0].astype(x.dtype), + ) + + return paddle.as_complex(xout) + + +class InverseRealVectorSHT(nn.Layer): + """ + Defines a module for computing the inverse (real-valued) vector SHT. + Precomputes Legendre Gauss nodes, weights and associated Legendre polynomials on these nodes. + + [1] Schaeffer, N. Efficient spherical harmonic transforms aimed at pseudospectral numerical simulations, G3: Geochemistry, Geophysics, Geosystems. + [2] Wang, B., Wang, L., Xie, Z.; Accurate calculation of spherical and vector spherical harmonic expansions via spectral element grids; Adv Comput Math. + """ + + def __init__( + self, + nlat, + nlon, + lmax=None, + mmax=None, + grid="lobatto", + norm="ortho", + csphase=True, + ): + + super().__init__() + + self.nlat = nlat + self.nlon = nlon + self.grid = grid + self.norm = norm + self.csphase = csphase + + # compute quadrature points + if self.grid == "legendre-gauss": + cost, _ = legendre_gauss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + elif self.grid == "lobatto": + cost, _ = lobatto_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat - 1 + elif self.grid == "equiangular": + cost, _ = clenshaw_curtiss_weights(nlat, -1, 1) + self.lmax = lmax or self.nlat + else: + raise (ValueError("Unknown quadrature mode")) + + # apply cosine transform and flip them + t = np.flip(np.arccos(cost)) + + # determine the dimensions + self.mmax = mmax or self.nlon // 2 + 1 + + dpct = _precompute_dlegpoly( + self.mmax, self.lmax, t, norm=self.norm, inverse=True, csphase=self.csphase + ) + self.dpct = paddle.to_tensor(dpct) + + def extra_repr(self): + return f"nlat={self.nlat}, nlon={self.nlon},\n lmax={self.lmax}, mmax={self.mmax},\n grid={self.grid}, csphase={self.csphase}" + + def forward(self, x: paddle.Tensor): + + assert x.shape[-2] == self.lmax + assert x.shape[-1] == self.mmax + + # Evaluate associated Legendre functions on the output nodes + x = paddle.as_real(x) + + # contraction - spheroidal component + # real component + srl = paddle.einsum( + "...lm,mlk->...km", x[..., 0, :, :, 0], self.dpct[0].astype(x.dtype) + ) - paddle.einsum( + "...lm,mlk->...km", x[..., 1, :, :, 1], self.dpct[1].astype(x.dtype) + ) + # iamg component + sim = paddle.einsum( + "...lm,mlk->...km", x[..., 0, :, :, 1], self.dpct[0].astype(x.dtype) + ) + paddle.einsum( + "...lm,mlk->...km", x[..., 1, :, :, 0], self.dpct[1].astype(x.dtype) + ) + + # contraction - toroidal component + # real component + trl = -paddle.einsum( + "...lm,mlk->...km", x[..., 0, :, :, 1], self.dpct[1].astype(x.dtype) + ) - paddle.einsum( + "...lm,mlk->...km", x[..., 1, :, :, 0], self.dpct[0].astype(x.dtype) + ) + # imag component + tim = paddle.einsum( + "...lm,mlk->...km", x[..., 0, :, :, 0], self.dpct[1].astype(x.dtype) + ) - paddle.einsum( + "...lm,mlk->...km", x[..., 1, :, :, 1], self.dpct[0].astype(x.dtype) + ) + + # reassemble + s = paddle.stack((srl, sim), -1) + t = paddle.stack((trl, tim), -1) + xs = paddle.stack((s, t), -4) + + # apply the inverse (real) FFT + x = paddle.as_complex(xs) + x = paddle.fft.irfft(x, n=self.nlon, axis=-1, norm="forward") + + return x diff --git a/ppsci/arch/phycrnet.py b/ppsci/arch/phycrnet.py index c72583ebf9..b238b139c7 100644 --- a/ppsci/arch/phycrnet.py +++ b/ppsci/arch/phycrnet.py @@ -1,540 +1,540 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Tuple - -import numpy as np -import paddle -import paddle.nn as nn -from paddle.nn import utils - -from ppsci.arch import base - -# define the high-order finite difference kernels -LALP_OP = [ - [ - [ - [0, 0, -1 / 12, 0, 0], - [0, 0, 4 / 3, 0, 0], - [-1 / 12, 4 / 3, -5, 4 / 3, -1 / 12], - [0, 0, 4 / 3, 0, 0], - [0, 0, -1 / 12, 0, 0], - ] - ] -] - -PARTIAL_Y = [ - [ - [ - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - [1 / 12, -8 / 12, 0, 8 / 12, -1 / 12], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0], - ] - ] -] - -PARTIAL_X = [ - [ - [ - [0, 0, 1 / 12, 0, 0], - [0, 0, -8 / 12, 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 8 / 12, 0, 0], - [0, 0, -1 / 12, 0, 0], - ] - ] -] - - -# specific parameters for burgers equation -def _initialize_weights(module): - if isinstance(module, nn.Conv2D): - c = 1.0 # 0.5 - initializer = nn.initializer.Uniform( - -c * np.sqrt(1 / (3 * 3 * 320)), c * np.sqrt(1 / (3 * 3 * 320)) - ) - initializer(module.weight) - elif isinstance(module, nn.Linear): - initializer = nn.initializer.Constant(0.0) - initializer(module.bias) - - -class PhyCRNet(base.Arch): - """Physics-informed convolutional-recurrent neural networks. - - Args: - input_channels (int): The input channels. - hidden_channels (Tuple[int, ...]): The hidden channels. - input_kernel_size (Tuple[int, ...]): The input kernel size(s). - input_stride (Tuple[int, ...]): The input stride(s). - input_padding (Tuple[int, ...]): The input padding(s). - dt (float): The dt parameter. - num_layers (Tuple[int, ...]): The number of layers. - upscale_factor (int): The upscale factor. - step (int, optional): The step(s). Defaults to 1. - effective_step (Tuple[int, ...], optional): The effective step. Defaults to (1, ). - - Examples: - >>> import ppsci - >>> model = ppsci.arch.PhyCRNet( - ... input_channels=2, - ... hidden_channels=[8, 32, 128, 128], - ... input_kernel_size=[4, 4, 4, 3], - ... input_stride=[2, 2, 2, 1], - ... input_padding=[1, 1, 1, 1], - ... dt=0.002, - ... num_layers=[3, 1], - ... upscale_factor=8 - ... ) - """ - - def __init__( - self, - input_channels: int, - hidden_channels: Tuple[int, ...], - input_kernel_size: Tuple[int, ...], - input_stride: Tuple[int, ...], - input_padding: Tuple[int, ...], - dt: float, - num_layers: Tuple[int, ...], - upscale_factor: int, - step: int = 1, - effective_step: Tuple[int, ...] = (1,), - ): - super(PhyCRNet, self).__init__() - - # input channels of layer includes input_channels and hidden_channels of cells - self.input_channels = [input_channels] + hidden_channels - self.hidden_channels = hidden_channels - self.input_kernel_size = input_kernel_size - self.input_stride = input_stride - self.input_padding = input_padding - self.step = step - self.effective_step = effective_step - self._all_layers = [] - self.dt = dt - self.upscale_factor = upscale_factor - - # number of layers - self.num_encoder = num_layers[0] - self.num_convlstm = num_layers[1] - - # encoder - downsampling - self.encoder = nn.LayerList( - [ - encoder_block( - input_channels=self.input_channels[i], - hidden_channels=self.hidden_channels[i], - input_kernel_size=self.input_kernel_size[i], - input_stride=self.input_stride[i], - input_padding=self.input_padding[i], - ) - for i in range(self.num_encoder) - ] - ) - - # ConvLSTM - self.convlstm = nn.LayerList( - [ - ConvLSTMCell( - input_channels=self.input_channels[i], - hidden_channels=self.hidden_channels[i], - input_kernel_size=self.input_kernel_size[i], - input_stride=self.input_stride[i], - input_padding=self.input_padding[i], - ) - for i in range(self.num_encoder, self.num_encoder + self.num_convlstm) - ] - ) - - # output layer - self.output_layer = nn.Conv2D( - 2, 2, kernel_size=5, stride=1, padding=2, padding_mode="circular" - ) - - # pixelshuffle - upscale - self.pixelshuffle = nn.PixelShuffle(self.upscale_factor) - - # initialize weights - self.apply(_initialize_weights) - initializer_0 = nn.initializer.Constant(0.0) - initializer_0(self.output_layer.bias) - self.enable_transform = True - - def forward(self, x): - if self.enable_transform: - if self._input_transform is not None: - x = self._input_transform(x) - output_x = x - - self.initial_state = x["initial_state"] - x = x["input"] - internal_state = [] - outputs = [] - second_last_state = [] - - for step in range(self.step): - xt = x - - # encoder - for encoder in self.encoder: - x = encoder(x) - - # convlstm - for i, lstm in enumerate(self.convlstm, self.num_encoder): - if step == 0: - (h, c) = lstm.init_hidden_tensor( - prev_state=self.initial_state[i - self.num_encoder] - ) - internal_state.append((h, c)) - - # one-step forward - (h, c) = internal_state[i - self.num_encoder] - x, new_c = lstm(x, h, c) - internal_state[i - self.num_encoder] = (x, new_c) - - # output - x = self.pixelshuffle(x) - x = self.output_layer(x) - - # residual connection - x = xt + self.dt * x - - if step == (self.step - 2): - second_last_state = internal_state.copy() - - if step in self.effective_step: - outputs.append(x) - - result_dict = {"outputs": outputs, "second_last_state": second_last_state} - if self.enable_transform: - if self._output_transform is not None: - result_dict = self._output_transform(output_x, result_dict) - return result_dict - - -class ConvLSTMCell(nn.Layer): - """Convolutional LSTM""" - - def __init__( - self, - input_channels, - hidden_channels, - input_kernel_size, - input_stride, - input_padding, - hidden_kernel_size=3, - num_features=4, - ): - super(ConvLSTMCell, self).__init__() - - self.input_channels = input_channels - self.hidden_channels = hidden_channels - self.hidden_kernel_size = hidden_kernel_size # Page 9, The convolutional operations in ConvLSTM have 3x3 kernels. - self.input_kernel_size = input_kernel_size - self.input_stride = input_stride - self.input_padding = input_padding - self.num_features = ( - num_features # Page 10, block of different dense layers {4, 3, 4} - ) - - # padding for hidden state - self.padding = int((self.hidden_kernel_size - 1) / 2) - - self.Wxi = nn.Conv2D( - self.input_channels, - self.hidden_channels, - self.input_kernel_size, - self.input_stride, - self.input_padding, - bias_attr=None, - padding_mode="circular", - ) - - self.Whi = nn.Conv2D( - self.hidden_channels, - self.hidden_channels, - self.hidden_kernel_size, - 1, - padding=1, - bias_attr=False, - padding_mode="circular", - ) - - self.Wxf = nn.Conv2D( - self.input_channels, - self.hidden_channels, - self.input_kernel_size, - self.input_stride, - self.input_padding, - bias_attr=None, - padding_mode="circular", - ) - - self.Whf = nn.Conv2D( - self.hidden_channels, - self.hidden_channels, - self.hidden_kernel_size, - 1, - padding=1, - bias_attr=False, - padding_mode="circular", - ) - - self.Wxc = nn.Conv2D( - self.input_channels, - self.hidden_channels, - self.input_kernel_size, - self.input_stride, - self.input_padding, - bias_attr=None, - padding_mode="circular", - ) - - self.Whc = nn.Conv2D( - self.hidden_channels, - self.hidden_channels, - self.hidden_kernel_size, - 1, - padding=1, - bias_attr=False, - padding_mode="circular", - ) - - self.Wxo = nn.Conv2D( - self.input_channels, - self.hidden_channels, - self.input_kernel_size, - self.input_stride, - self.input_padding, - bias_attr=None, - padding_mode="circular", - ) - - self.Who = nn.Conv2D( - self.hidden_channels, - self.hidden_channels, - self.hidden_kernel_size, - 1, - padding=1, - bias_attr=False, - padding_mode="circular", - ) - - initializer_0 = nn.initializer.Constant(0.0) - initializer_1 = nn.initializer.Constant(1.0) - - initializer_0(self.Wxi.bias) - initializer_0(self.Wxf.bias) - initializer_0(self.Wxc.bias) - initializer_1(self.Wxo.bias) - - def forward(self, x, h, c): - ci = nn.functional.sigmoid(self.Wxi(x) + self.Whi(h)) - cf = nn.functional.sigmoid(self.Wxf(x) + self.Whf(h)) - cc = cf * c + ci * paddle.tanh(self.Wxc(x) + self.Whc(h)) - co = nn.functional.sigmoid(self.Wxo(x) + self.Who(h)) - ch = co * paddle.tanh(cc) - return ch, cc - - def init_hidden_tensor(self, prev_state): - return ((prev_state[0]).cuda(), (prev_state[1]).cuda()) - - -class encoder_block(nn.Layer): - """Encoder with CNN""" - - def __init__( - self, - input_channels, - hidden_channels, - input_kernel_size, - input_stride, - input_padding, - ): - super(encoder_block, self).__init__() - - self.input_channels = input_channels - self.hidden_channels = hidden_channels - self.input_kernel_size = input_kernel_size - self.input_stride = input_stride - self.input_padding = input_padding - - self.conv = utils.weight_norm( - nn.Conv2D( - self.input_channels, - self.hidden_channels, - self.input_kernel_size, - self.input_stride, - self.input_padding, - bias_attr=None, - padding_mode="circular", - ) - ) - - self.act = nn.ReLU() - - initializer_0 = nn.initializer.Constant(0.0) - initializer_0(self.conv.bias) - - def forward(self, x): - return self.act(self.conv(x)) - - -class Conv2DDerivative(nn.Layer): - def __init__(self, der_filter, resol, kernel_size=3, name=""): - super(Conv2DDerivative, self).__init__() - - self.resol = resol # constant in the finite difference - self.name = name - self.input_channels = 1 - self.output_channels = 1 - self.kernel_size = kernel_size - - self.padding = int((kernel_size - 1) / 2) - self.filter = nn.Conv2D( - self.input_channels, - self.output_channels, - self.kernel_size, - 1, - padding=0, - bias_attr=False, - ) - - # Fixed gradient operator - self.filter.weight = self.create_parameter( - shape=self.filter.weight.shape, - dtype=self.filter.weight.dtype, - default_initializer=nn.initializer.Assign( - paddle.to_tensor( - der_filter, dtype=paddle.get_default_dtype(), stop_gradient=True - ) - ), - ) - self.filter.weight.stop_gradient = True - - def forward(self, input): - derivative = self.filter(input) - return derivative / self.resol - - -class Conv1DDerivative(nn.Layer): - def __init__(self, der_filter, resol, kernel_size=3, name=""): - super(Conv1DDerivative, self).__init__() - - self.resol = resol # $\delta$*constant in the finite difference - self.name = name - self.input_channels = 1 - self.output_channels = 1 - self.kernel_size = kernel_size - - self.padding = int((kernel_size - 1) / 2) - self.filter = nn.Conv1D( - self.input_channels, - self.output_channels, - self.kernel_size, - 1, - padding=0, - bias_attr=False, - ) - - # Fixed gradient operator - self.filter.weight = self.create_parameter( - shape=self.filter.weight.shape, - dtype=self.filter.weight.dtype, - default_initializer=nn.initializer.Assign( - paddle.to_tensor( - der_filter, dtype=paddle.get_default_dtype(), stop_gradient=True - ) - ), - ) - self.filter.weight.stop_gradient = True - - def forward(self, input): - derivative = self.filter(input) - return derivative / self.resol - - -class loss_generator(nn.Layer): - """Loss generator for physics loss""" - - def __init__(self, dt, dx): - """Construct the derivatives, X = Width, Y = Height""" - super(loss_generator, self).__init__() - - # spatial derivative operator - self.laplace = Conv2DDerivative( - der_filter=LALP_OP, resol=(dx**2), kernel_size=5, name="laplace_operator" - ) - - self.dx = Conv2DDerivative( - der_filter=PARTIAL_X, resol=(dx * 1), kernel_size=5, name="dx_operator" - ) - - self.dy = Conv2DDerivative( - der_filter=PARTIAL_Y, resol=(dx * 1), kernel_size=5, name="dy_operator" - ) - - # temporal derivative operator - self.dt = Conv1DDerivative( - der_filter=[[[-1, 0, 1]]], resol=(dt * 2), kernel_size=3, name="partial_t" - ) - - def get_phy_Loss(self, output): - # spatial derivatives - laplace_u = self.laplace(output[1:-1, 0:1, :, :]) # [t,c,h,w] - laplace_v = self.laplace(output[1:-1, 1:2, :, :]) - - u_x = self.dx(output[1:-1, 0:1, :, :]) - u_y = self.dy(output[1:-1, 0:1, :, :]) - v_x = self.dx(output[1:-1, 1:2, :, :]) - v_y = self.dy(output[1:-1, 1:2, :, :]) - - # temporal derivative - u - u = output[:, 0:1, 2:-2, 2:-2] - lent = u.shape[0] - lenx = u.shape[3] - leny = u.shape[2] - u_conv1d = u.transpose((2, 3, 1, 0)) # [height(Y), width(X), c, step] - u_conv1d = u_conv1d.reshape((lenx * leny, 1, lent)) - u_t = self.dt(u_conv1d) # lent-2 due to no-padding - u_t = u_t.reshape((leny, lenx, 1, lent - 2)) - u_t = u_t.transpose((3, 2, 0, 1)) # [step-2, c, height(Y), width(X)] - - # temporal derivative - v - v = output[:, 1:2, 2:-2, 2:-2] - v_conv1d = v.transpose((2, 3, 1, 0)) # [height(Y), width(X), c, step] - v_conv1d = v_conv1d.reshape((lenx * leny, 1, lent)) - v_t = self.dt(v_conv1d) # lent-2 due to no-padding - v_t = v_t.reshape((leny, lenx, 1, lent - 2)) - v_t = v_t.transpose((3, 2, 0, 1)) # [step-2, c, height(Y), width(X)] - - u = output[1:-1, 0:1, 2:-2, 2:-2] # [t, c, height(Y), width(X)] - v = output[1:-1, 1:2, 2:-2, 2:-2] # [t, c, height(Y), width(X)] - - assert laplace_u.shape == u_t.shape - assert u_t.shape == v_t.shape - assert laplace_u.shape == u.shape - assert laplace_v.shape == v.shape - - # Reynolds number - R = 200.0 - - # 2D burgers eqn - f_u = u_t + u * u_x + v * u_y - (1 / R) * laplace_u - f_v = v_t + u * v_x + v * v_y - (1 / R) * laplace_v - - return f_u, f_v +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import numpy as np +import paddle +import paddle.nn as nn +from paddle.nn import utils + +from ppsci.arch import base + +# define the high-order finite difference kernels +LALP_OP = [ + [ + [ + [0, 0, -1 / 12, 0, 0], + [0, 0, 4 / 3, 0, 0], + [-1 / 12, 4 / 3, -5, 4 / 3, -1 / 12], + [0, 0, 4 / 3, 0, 0], + [0, 0, -1 / 12, 0, 0], + ] + ] +] + +PARTIAL_Y = [ + [ + [ + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [1 / 12, -8 / 12, 0, 8 / 12, -1 / 12], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ] + ] +] + +PARTIAL_X = [ + [ + [ + [0, 0, 1 / 12, 0, 0], + [0, 0, -8 / 12, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 8 / 12, 0, 0], + [0, 0, -1 / 12, 0, 0], + ] + ] +] + + +# specific parameters for burgers equation +def _initialize_weights(module): + if isinstance(module, nn.Conv2D): + c = 1.0 # 0.5 + initializer = nn.initializer.Uniform( + -c * np.sqrt(1 / (3 * 3 * 320)), c * np.sqrt(1 / (3 * 3 * 320)) + ) + initializer(module.weight) + elif isinstance(module, nn.Linear): + initializer = nn.initializer.Constant(0.0) + initializer(module.bias) + + +class PhyCRNet(base.Arch): + """Physics-informed convolutional-recurrent neural networks. + + Args: + input_channels (int): The input channels. + hidden_channels (Tuple[int, ...]): The hidden channels. + input_kernel_size (Tuple[int, ...]): The input kernel size(s). + input_stride (Tuple[int, ...]): The input stride(s). + input_padding (Tuple[int, ...]): The input padding(s). + dt (float): The dt parameter. + num_layers (Tuple[int, ...]): The number of layers. + upscale_factor (int): The upscale factor. + step (int, optional): The step(s). Defaults to 1. + effective_step (Tuple[int, ...], optional): The effective step. Defaults to (1, ). + + Examples: + >>> import ppsci + >>> model = ppsci.arch.PhyCRNet( + ... input_channels=2, + ... hidden_channels=[8, 32, 128, 128], + ... input_kernel_size=[4, 4, 4, 3], + ... input_stride=[2, 2, 2, 1], + ... input_padding=[1, 1, 1, 1], + ... dt=0.002, + ... num_layers=[3, 1], + ... upscale_factor=8 + ... ) + """ + + def __init__( + self, + input_channels: int, + hidden_channels: Tuple[int, ...], + input_kernel_size: Tuple[int, ...], + input_stride: Tuple[int, ...], + input_padding: Tuple[int, ...], + dt: float, + num_layers: Tuple[int, ...], + upscale_factor: int, + step: int = 1, + effective_step: Tuple[int, ...] = (1,), + ): + super(PhyCRNet, self).__init__() + + # input channels of layer includes input_channels and hidden_channels of cells + self.input_channels = [input_channels] + hidden_channels + self.hidden_channels = hidden_channels + self.input_kernel_size = input_kernel_size + self.input_stride = input_stride + self.input_padding = input_padding + self.step = step + self.effective_step = effective_step + self._all_layers = [] + self.dt = dt + self.upscale_factor = upscale_factor + + # number of layers + self.num_encoder = num_layers[0] + self.num_convlstm = num_layers[1] + + # encoder - downsampling + self.encoder = nn.LayerList( + [ + encoder_block( + input_channels=self.input_channels[i], + hidden_channels=self.hidden_channels[i], + input_kernel_size=self.input_kernel_size[i], + input_stride=self.input_stride[i], + input_padding=self.input_padding[i], + ) + for i in range(self.num_encoder) + ] + ) + + # ConvLSTM + self.convlstm = nn.LayerList( + [ + ConvLSTMCell( + input_channels=self.input_channels[i], + hidden_channels=self.hidden_channels[i], + input_kernel_size=self.input_kernel_size[i], + input_stride=self.input_stride[i], + input_padding=self.input_padding[i], + ) + for i in range(self.num_encoder, self.num_encoder + self.num_convlstm) + ] + ) + + # output layer + self.output_layer = nn.Conv2D( + 2, 2, kernel_size=5, stride=1, padding=2, padding_mode="circular" + ) + + # pixelshuffle - upscale + self.pixelshuffle = nn.PixelShuffle(self.upscale_factor) + + # initialize weights + self.apply(_initialize_weights) + initializer_0 = nn.initializer.Constant(0.0) + initializer_0(self.output_layer.bias) + self.enable_transform = True + + def forward(self, x): + if self.enable_transform: + if self._input_transform is not None: + x = self._input_transform(x) + output_x = x + + self.initial_state = x["initial_state"] + x = x["input"] + internal_state = [] + outputs = [] + second_last_state = [] + + for step in range(self.step): + xt = x + + # encoder + for encoder in self.encoder: + x = encoder(x) + + # convlstm + for i, lstm in enumerate(self.convlstm, self.num_encoder): + if step == 0: + (h, c) = lstm.init_hidden_tensor( + prev_state=self.initial_state[i - self.num_encoder] + ) + internal_state.append((h, c)) + + # one-step forward + (h, c) = internal_state[i - self.num_encoder] + x, new_c = lstm(x, h, c) + internal_state[i - self.num_encoder] = (x, new_c) + + # output + x = self.pixelshuffle(x) + x = self.output_layer(x) + + # residual connection + x = xt + self.dt * x + + if step == (self.step - 2): + second_last_state = internal_state.copy() + + if step in self.effective_step: + outputs.append(x) + + result_dict = {"outputs": outputs, "second_last_state": second_last_state} + if self.enable_transform: + if self._output_transform is not None: + result_dict = self._output_transform(output_x, result_dict) + return result_dict + + +class ConvLSTMCell(nn.Layer): + """Convolutional LSTM""" + + def __init__( + self, + input_channels, + hidden_channels, + input_kernel_size, + input_stride, + input_padding, + hidden_kernel_size=3, + num_features=4, + ): + super(ConvLSTMCell, self).__init__() + + self.input_channels = input_channels + self.hidden_channels = hidden_channels + self.hidden_kernel_size = hidden_kernel_size # Page 9, The convolutional operations in ConvLSTM have 3x3 kernels. + self.input_kernel_size = input_kernel_size + self.input_stride = input_stride + self.input_padding = input_padding + self.num_features = ( + num_features # Page 10, block of different dense layers {4, 3, 4} + ) + + # padding for hidden state + self.padding = int((self.hidden_kernel_size - 1) / 2) + + self.Wxi = nn.Conv2D( + self.input_channels, + self.hidden_channels, + self.input_kernel_size, + self.input_stride, + self.input_padding, + bias_attr=None, + padding_mode="circular", + ) + + self.Whi = nn.Conv2D( + self.hidden_channels, + self.hidden_channels, + self.hidden_kernel_size, + 1, + padding=1, + bias_attr=False, + padding_mode="circular", + ) + + self.Wxf = nn.Conv2D( + self.input_channels, + self.hidden_channels, + self.input_kernel_size, + self.input_stride, + self.input_padding, + bias_attr=None, + padding_mode="circular", + ) + + self.Whf = nn.Conv2D( + self.hidden_channels, + self.hidden_channels, + self.hidden_kernel_size, + 1, + padding=1, + bias_attr=False, + padding_mode="circular", + ) + + self.Wxc = nn.Conv2D( + self.input_channels, + self.hidden_channels, + self.input_kernel_size, + self.input_stride, + self.input_padding, + bias_attr=None, + padding_mode="circular", + ) + + self.Whc = nn.Conv2D( + self.hidden_channels, + self.hidden_channels, + self.hidden_kernel_size, + 1, + padding=1, + bias_attr=False, + padding_mode="circular", + ) + + self.Wxo = nn.Conv2D( + self.input_channels, + self.hidden_channels, + self.input_kernel_size, + self.input_stride, + self.input_padding, + bias_attr=None, + padding_mode="circular", + ) + + self.Who = nn.Conv2D( + self.hidden_channels, + self.hidden_channels, + self.hidden_kernel_size, + 1, + padding=1, + bias_attr=False, + padding_mode="circular", + ) + + initializer_0 = nn.initializer.Constant(0.0) + initializer_1 = nn.initializer.Constant(1.0) + + initializer_0(self.Wxi.bias) + initializer_0(self.Wxf.bias) + initializer_0(self.Wxc.bias) + initializer_1(self.Wxo.bias) + + def forward(self, x, h, c): + ci = nn.functional.sigmoid(self.Wxi(x) + self.Whi(h)) + cf = nn.functional.sigmoid(self.Wxf(x) + self.Whf(h)) + cc = cf * c + ci * paddle.tanh(self.Wxc(x) + self.Whc(h)) + co = nn.functional.sigmoid(self.Wxo(x) + self.Who(h)) + ch = co * paddle.tanh(cc) + return ch, cc + + def init_hidden_tensor(self, prev_state): + return ((prev_state[0]).cuda(), (prev_state[1]).cuda()) + + +class encoder_block(nn.Layer): + """Encoder with CNN""" + + def __init__( + self, + input_channels, + hidden_channels, + input_kernel_size, + input_stride, + input_padding, + ): + super(encoder_block, self).__init__() + + self.input_channels = input_channels + self.hidden_channels = hidden_channels + self.input_kernel_size = input_kernel_size + self.input_stride = input_stride + self.input_padding = input_padding + + self.conv = utils.weight_norm( + nn.Conv2D( + self.input_channels, + self.hidden_channels, + self.input_kernel_size, + self.input_stride, + self.input_padding, + bias_attr=None, + padding_mode="circular", + ) + ) + + self.act = nn.ReLU() + + initializer_0 = nn.initializer.Constant(0.0) + initializer_0(self.conv.bias) + + def forward(self, x): + return self.act(self.conv(x)) + + +class Conv2DDerivative(nn.Layer): + def __init__(self, der_filter, resol, kernel_size=3, name=""): + super(Conv2DDerivative, self).__init__() + + self.resol = resol # constant in the finite difference + self.name = name + self.input_channels = 1 + self.output_channels = 1 + self.kernel_size = kernel_size + + self.padding = int((kernel_size - 1) / 2) + self.filter = nn.Conv2D( + self.input_channels, + self.output_channels, + self.kernel_size, + 1, + padding=0, + bias_attr=False, + ) + + # Fixed gradient operator + self.filter.weight = self.create_parameter( + shape=self.filter.weight.shape, + dtype=self.filter.weight.dtype, + default_initializer=nn.initializer.Assign( + paddle.to_tensor( + der_filter, dtype=paddle.get_default_dtype(), stop_gradient=True + ) + ), + ) + self.filter.weight.stop_gradient = True + + def forward(self, input): + derivative = self.filter(input) + return derivative / self.resol + + +class Conv1DDerivative(nn.Layer): + def __init__(self, der_filter, resol, kernel_size=3, name=""): + super(Conv1DDerivative, self).__init__() + + self.resol = resol # $\delta$*constant in the finite difference + self.name = name + self.input_channels = 1 + self.output_channels = 1 + self.kernel_size = kernel_size + + self.padding = int((kernel_size - 1) / 2) + self.filter = nn.Conv1D( + self.input_channels, + self.output_channels, + self.kernel_size, + 1, + padding=0, + bias_attr=False, + ) + + # Fixed gradient operator + self.filter.weight = self.create_parameter( + shape=self.filter.weight.shape, + dtype=self.filter.weight.dtype, + default_initializer=nn.initializer.Assign( + paddle.to_tensor( + der_filter, dtype=paddle.get_default_dtype(), stop_gradient=True + ) + ), + ) + self.filter.weight.stop_gradient = True + + def forward(self, input): + derivative = self.filter(input) + return derivative / self.resol + + +class loss_generator(nn.Layer): + """Loss generator for physics loss""" + + def __init__(self, dt, dx): + """Construct the derivatives, X = Width, Y = Height""" + super(loss_generator, self).__init__() + + # spatial derivative operator + self.laplace = Conv2DDerivative( + der_filter=LALP_OP, resol=(dx**2), kernel_size=5, name="laplace_operator" + ) + + self.dx = Conv2DDerivative( + der_filter=PARTIAL_X, resol=(dx * 1), kernel_size=5, name="dx_operator" + ) + + self.dy = Conv2DDerivative( + der_filter=PARTIAL_Y, resol=(dx * 1), kernel_size=5, name="dy_operator" + ) + + # temporal derivative operator + self.dt = Conv1DDerivative( + der_filter=[[[-1, 0, 1]]], resol=(dt * 2), kernel_size=3, name="partial_t" + ) + + def get_phy_Loss(self, output): + # spatial derivatives + laplace_u = self.laplace(output[1:-1, 0:1, :, :]) # [t,c,h,w] + laplace_v = self.laplace(output[1:-1, 1:2, :, :]) + + u_x = self.dx(output[1:-1, 0:1, :, :]) + u_y = self.dy(output[1:-1, 0:1, :, :]) + v_x = self.dx(output[1:-1, 1:2, :, :]) + v_y = self.dy(output[1:-1, 1:2, :, :]) + + # temporal derivative - u + u = output[:, 0:1, 2:-2, 2:-2] + lent = u.shape[0] + lenx = u.shape[3] + leny = u.shape[2] + u_conv1d = u.transpose((2, 3, 1, 0)) # [height(Y), width(X), c, step] + u_conv1d = u_conv1d.reshape((lenx * leny, 1, lent)) + u_t = self.dt(u_conv1d) # lent-2 due to no-padding + u_t = u_t.reshape((leny, lenx, 1, lent - 2)) + u_t = u_t.transpose((3, 2, 0, 1)) # [step-2, c, height(Y), width(X)] + + # temporal derivative - v + v = output[:, 1:2, 2:-2, 2:-2] + v_conv1d = v.transpose((2, 3, 1, 0)) # [height(Y), width(X), c, step] + v_conv1d = v_conv1d.reshape((lenx * leny, 1, lent)) + v_t = self.dt(v_conv1d) # lent-2 due to no-padding + v_t = v_t.reshape((leny, lenx, 1, lent - 2)) + v_t = v_t.transpose((3, 2, 0, 1)) # [step-2, c, height(Y), width(X)] + + u = output[1:-1, 0:1, 2:-2, 2:-2] # [t, c, height(Y), width(X)] + v = output[1:-1, 1:2, 2:-2, 2:-2] # [t, c, height(Y), width(X)] + + assert laplace_u.shape == u_t.shape + assert u_t.shape == v_t.shape + assert laplace_u.shape == u.shape + assert laplace_v.shape == v.shape + + # Reynolds number + R = 200.0 + + # 2D burgers eqn + f_u = u_t + u * u_x + v * u_y - (1 / R) * laplace_u + f_v = v_t + u * v_x + v * v_y - (1 / R) * laplace_v + + return f_u, f_v diff --git a/ppsci/arch/phylstm.py b/ppsci/arch/phylstm.py index b2d935465e..79fc01d06f 100644 --- a/ppsci/arch/phylstm.py +++ b/ppsci/arch/phylstm.py @@ -1,205 +1,205 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn - -from ppsci.arch import base - - -class DeepPhyLSTM(base.Arch): - """DeepPhyLSTM init function. - - Args: - input_size (int): The input size. - output_size (int): The output size. - hidden_size (int, optional): The hidden size. Defaults to 100. - model_type (int, optional): The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. - - Examples: - >>> import paddle - >>> import ppsci - >>> # model_type is `2` - >>> model = ppsci.arch.DeepPhyLSTM( - ... input_size=16, - ... output_size=1, - ... hidden_size=100, - ... model_type=2) - >>> out = model( - ... {"ag":paddle.rand([64, 16, 16]), - ... "ag_c":paddle.rand([64, 16, 16]), - ... "phi":paddle.rand([1, 16, 16])}) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - eta_pred paddle.float32 [64, 16, 1] - eta_dot_pred paddle.float32 [64, 16, 1] - g_pred paddle.float32 [64, 16, 1] - eta_t_pred_c paddle.float32 [64, 16, 1] - eta_dot_pred_c paddle.float32 [64, 16, 1] - lift_pred_c paddle.float32 [64, 16, 1] - >>> # model_type is `3` - >>> model = ppsci.arch.DeepPhyLSTM( - ... input_size=16, - ... output_size=1, - ... hidden_size=100, - ... model_type=3) - >>> out = model( - ... {"ag":paddle.rand([64, 16, 1]), - ... "ag_c":paddle.rand([64, 16, 1]), - ... "phi":paddle.rand([1, 16, 16])}) - >>> for k, v in out.items(): - ... print(f"{k} {v.dtype} {v.shape}") - eta_pred paddle.float32 [64, 16, 1] - eta_dot_pred paddle.float32 [64, 16, 1] - g_pred paddle.float32 [64, 16, 1] - eta_t_pred_c paddle.float32 [64, 16, 1] - eta_dot_pred_c paddle.float32 [64, 16, 1] - lift_pred_c paddle.float32 [64, 16, 1] - g_t_pred_c paddle.float32 [64, 16, 1] - g_dot_pred_c paddle.float32 [64, 16, 1] - """ - - def __init__(self, input_size, output_size, hidden_size=100, model_type=2): - super().__init__() - self.input_size = input_size - self.output_size = output_size - self.hidden_size = hidden_size - self.model_type = model_type - - if self.model_type == 2: - self.lstm_model = nn.Sequential( - nn.LSTM(input_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, hidden_size), - nn.Linear(hidden_size, 3 * output_size), - ) - - self.lstm_model_f = nn.Sequential( - nn.LSTM(3 * output_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, hidden_size), - nn.Linear(hidden_size, output_size), - ) - elif self.model_type == 3: - self.lstm_model = nn.Sequential( - nn.LSTM(1, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, 3 * output_size), - ) - - self.lstm_model_f = nn.Sequential( - nn.LSTM(3 * output_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, output_size), - ) - - self.lstm_model_g = nn.Sequential( - nn.LSTM(2 * output_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.LSTM(hidden_size, hidden_size), - nn.ReLU(), - nn.Linear(hidden_size, output_size), - ) - else: - raise ValueError(f"model_type should be 2 or 3, but got {model_type}") - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - if self.model_type == 2: - result_dict = self._forward_type_2(x) - elif self.model_type == 3: - result_dict = self._forward_type_3(x) - if self._output_transform is not None: - result_dict = self._output_transform(x, result_dict) - return result_dict - - def _forward_type_2(self, x): - output = self.lstm_model(x["ag"]) - eta_pred = output[:, :, 0 : self.output_size] - eta_dot_pred = output[:, :, self.output_size : 2 * self.output_size] - g_pred = output[:, :, 2 * self.output_size :] - - # for ag_c - output_c = self.lstm_model(x["ag_c"]) - eta_pred_c = output_c[:, :, 0 : self.output_size] - eta_dot_pred_c = output_c[:, :, self.output_size : 2 * self.output_size] - g_pred_c = output_c[:, :, 2 * self.output_size :] - eta_t_pred_c = paddle.matmul(x["phi"], eta_pred_c) - eta_tt_pred_c = paddle.matmul(x["phi"], eta_dot_pred_c) - eta_dot1_pred_c = eta_dot_pred_c[:, :, 0:1] - tmp = paddle.concat([eta_pred_c, eta_dot1_pred_c, g_pred_c], 2) - f = self.lstm_model_f(tmp) - lift_pred_c = eta_tt_pred_c + f - - return { - "eta_pred": eta_pred, - "eta_dot_pred": eta_dot_pred, - "g_pred": g_pred, - "eta_t_pred_c": eta_t_pred_c, - "eta_dot_pred_c": eta_dot_pred_c, - "lift_pred_c": lift_pred_c, - } - - def _forward_type_3(self, x): - # physics informed neural networks - output = self.lstm_model(x["ag"]) - eta_pred = output[:, :, 0 : self.output_size] - eta_dot_pred = output[:, :, self.output_size : 2 * self.output_size] - g_pred = output[:, :, 2 * self.output_size :] - - output_c = self.lstm_model(x["ag_c"]) - eta_pred_c = output_c[:, :, 0 : self.output_size] - eta_dot_pred_c = output_c[:, :, self.output_size : 2 * self.output_size] - g_pred_c = output_c[:, :, 2 * self.output_size :] - - eta_t_pred_c = paddle.matmul(x["phi"], eta_pred_c) - eta_tt_pred_c = paddle.matmul(x["phi"], eta_dot_pred_c) - g_t_pred_c = paddle.matmul(x["phi"], g_pred_c) - - f = self.lstm_model_f(paddle.concat([eta_pred_c, eta_dot_pred_c, g_pred_c], 2)) - lift_pred_c = eta_tt_pred_c + f - - eta_dot1_pred_c = eta_dot_pred_c[:, :, 0:1] - g_dot_pred_c = self.lstm_model_g(paddle.concat([eta_dot1_pred_c, g_pred_c], 2)) - - return { - "eta_pred": eta_pred, - "eta_dot_pred": eta_dot_pred, - "g_pred": g_pred, - "eta_t_pred_c": eta_t_pred_c, - "eta_dot_pred_c": eta_dot_pred_c, - "lift_pred_c": lift_pred_c, - "g_t_pred_c": g_t_pred_c, - "g_dot_pred_c": g_dot_pred_c, - } +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.nn as nn + +from ppsci.arch import base + + +class DeepPhyLSTM(base.Arch): + """DeepPhyLSTM init function. + + Args: + input_size (int): The input size. + output_size (int): The output size. + hidden_size (int, optional): The hidden size. Defaults to 100. + model_type (int, optional): The model type, value is 2 or 3, 2 indicates having two sub-models, 3 indicates having three submodels. Defaults to 2. + + Examples: + >>> import paddle + >>> import ppsci + >>> # model_type is `2` + >>> model = ppsci.arch.DeepPhyLSTM( + ... input_size=16, + ... output_size=1, + ... hidden_size=100, + ... model_type=2) + >>> out = model( + ... {"ag":paddle.rand([64, 16, 16]), + ... "ag_c":paddle.rand([64, 16, 16]), + ... "phi":paddle.rand([1, 16, 16])}) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + eta_pred paddle.float32 [64, 16, 1] + eta_dot_pred paddle.float32 [64, 16, 1] + g_pred paddle.float32 [64, 16, 1] + eta_t_pred_c paddle.float32 [64, 16, 1] + eta_dot_pred_c paddle.float32 [64, 16, 1] + lift_pred_c paddle.float32 [64, 16, 1] + >>> # model_type is `3` + >>> model = ppsci.arch.DeepPhyLSTM( + ... input_size=16, + ... output_size=1, + ... hidden_size=100, + ... model_type=3) + >>> out = model( + ... {"ag":paddle.rand([64, 16, 1]), + ... "ag_c":paddle.rand([64, 16, 1]), + ... "phi":paddle.rand([1, 16, 16])}) + >>> for k, v in out.items(): + ... print(f"{k} {v.dtype} {v.shape}") + eta_pred paddle.float32 [64, 16, 1] + eta_dot_pred paddle.float32 [64, 16, 1] + g_pred paddle.float32 [64, 16, 1] + eta_t_pred_c paddle.float32 [64, 16, 1] + eta_dot_pred_c paddle.float32 [64, 16, 1] + lift_pred_c paddle.float32 [64, 16, 1] + g_t_pred_c paddle.float32 [64, 16, 1] + g_dot_pred_c paddle.float32 [64, 16, 1] + """ + + def __init__(self, input_size, output_size, hidden_size=100, model_type=2): + super().__init__() + self.input_size = input_size + self.output_size = output_size + self.hidden_size = hidden_size + self.model_type = model_type + + if self.model_type == 2: + self.lstm_model = nn.Sequential( + nn.LSTM(input_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, hidden_size), + nn.Linear(hidden_size, 3 * output_size), + ) + + self.lstm_model_f = nn.Sequential( + nn.LSTM(3 * output_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, hidden_size), + nn.Linear(hidden_size, output_size), + ) + elif self.model_type == 3: + self.lstm_model = nn.Sequential( + nn.LSTM(1, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, 3 * output_size), + ) + + self.lstm_model_f = nn.Sequential( + nn.LSTM(3 * output_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, output_size), + ) + + self.lstm_model_g = nn.Sequential( + nn.LSTM(2 * output_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.LSTM(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, output_size), + ) + else: + raise ValueError(f"model_type should be 2 or 3, but got {model_type}") + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + if self.model_type == 2: + result_dict = self._forward_type_2(x) + elif self.model_type == 3: + result_dict = self._forward_type_3(x) + if self._output_transform is not None: + result_dict = self._output_transform(x, result_dict) + return result_dict + + def _forward_type_2(self, x): + output = self.lstm_model(x["ag"]) + eta_pred = output[:, :, 0 : self.output_size] + eta_dot_pred = output[:, :, self.output_size : 2 * self.output_size] + g_pred = output[:, :, 2 * self.output_size :] + + # for ag_c + output_c = self.lstm_model(x["ag_c"]) + eta_pred_c = output_c[:, :, 0 : self.output_size] + eta_dot_pred_c = output_c[:, :, self.output_size : 2 * self.output_size] + g_pred_c = output_c[:, :, 2 * self.output_size :] + eta_t_pred_c = paddle.matmul(x["phi"], eta_pred_c) + eta_tt_pred_c = paddle.matmul(x["phi"], eta_dot_pred_c) + eta_dot1_pred_c = eta_dot_pred_c[:, :, 0:1] + tmp = paddle.concat([eta_pred_c, eta_dot1_pred_c, g_pred_c], 2) + f = self.lstm_model_f(tmp) + lift_pred_c = eta_tt_pred_c + f + + return { + "eta_pred": eta_pred, + "eta_dot_pred": eta_dot_pred, + "g_pred": g_pred, + "eta_t_pred_c": eta_t_pred_c, + "eta_dot_pred_c": eta_dot_pred_c, + "lift_pred_c": lift_pred_c, + } + + def _forward_type_3(self, x): + # physics informed neural networks + output = self.lstm_model(x["ag"]) + eta_pred = output[:, :, 0 : self.output_size] + eta_dot_pred = output[:, :, self.output_size : 2 * self.output_size] + g_pred = output[:, :, 2 * self.output_size :] + + output_c = self.lstm_model(x["ag_c"]) + eta_pred_c = output_c[:, :, 0 : self.output_size] + eta_dot_pred_c = output_c[:, :, self.output_size : 2 * self.output_size] + g_pred_c = output_c[:, :, 2 * self.output_size :] + + eta_t_pred_c = paddle.matmul(x["phi"], eta_pred_c) + eta_tt_pred_c = paddle.matmul(x["phi"], eta_dot_pred_c) + g_t_pred_c = paddle.matmul(x["phi"], g_pred_c) + + f = self.lstm_model_f(paddle.concat([eta_pred_c, eta_dot_pred_c, g_pred_c], 2)) + lift_pred_c = eta_tt_pred_c + f + + eta_dot1_pred_c = eta_dot_pred_c[:, :, 0:1] + g_dot_pred_c = self.lstm_model_g(paddle.concat([eta_dot1_pred_c, g_pred_c], 2)) + + return { + "eta_pred": eta_pred, + "eta_dot_pred": eta_dot_pred, + "g_pred": g_pred, + "eta_t_pred_c": eta_t_pred_c, + "eta_dot_pred_c": eta_dot_pred_c, + "lift_pred_c": lift_pred_c, + "g_t_pred_c": g_t_pred_c, + "g_dot_pred_c": g_dot_pred_c, + } diff --git a/ppsci/arch/physx_transformer.py b/ppsci/arch/physx_transformer.py index 267fb458c6..4057f9d374 100644 --- a/ppsci/arch/physx_transformer.py +++ b/ppsci/arch/physx_transformer.py @@ -1,407 +1,407 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) -""" - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -import paddle -import paddle.nn.functional as F -from paddle import nn -from paddle.nn.initializer import Constant -from paddle.nn.initializer import Normal - -from ppsci.arch import base -from ppsci.arch.embedding_koopman import CylinderEmbedding - -zeros_ = Constant(value=0.0) -ones_ = Constant(value=1.0) - - -class MaskedAttention(nn.Layer): - """Masked self-attention module. - - Args: - embed_dim (int): The expected feature size in the input and output. - num_ctx (int): Context length of block. - num_heads (int): The number of heads in multi-head attention. - attn_drop (float, optional): The dropout probability used on attention - weights to drop some attention targets. Defaults to 0. - proj_drop (float, optional): The dropout probability used on output. Defaults to 0. - scale (bool, optional): Whether to scale attention weights. Defaults to False. - """ - - def __init__( - self, - embed_dim: int, - num_ctx: int, - num_heads: int, - attn_drop: float = 0.0, - proj_drop: float = 0.0, - scale: bool = False, - ): - super().__init__() - self.register_buffer( - "bias", - paddle.tril(paddle.ones((num_ctx, num_ctx), dtype="int32")).reshape( - [1, 1, num_ctx, num_ctx] - ), - ) - - self.register_buffer("masked_bias", paddle.to_tensor(-1e4)) - self.num_heads = num_heads - self.split_size = embed_dim - self.scale = scale - - self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3) - self.out_proj = nn.Linear(embed_dim, embed_dim) - self.attn_drop = nn.Dropout(attn_drop) - self.proj_drop = nn.Dropout(proj_drop) - - def _attn( - self, - query, - key, - value, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - attn = paddle.matmul(query, key) - if self.scale: - attn = attn / (float(value.shape[-1]) ** 0.5) - - nd, ns = attn.shape[-2], attn.shape[-1] - mask = self.bias[:, :, ns - nd : ns, :ns] - attn = paddle.where(mask > 0, attn, self.masked_bias.cast(attn.dtype)) - - if attention_mask is not None: - attn = attn + attention_mask - - attn = F.softmax(attn, axis=-1) - attn = self.attn_drop(attn) - - if head_mask is not None: - attn = attn * head_mask - - outputs = [paddle.matmul(attn, value)] - if output_attentions: - outputs.append(attn) - return outputs - - def merge_heads(self, x): - x = x.transpose([0, 2, 1, 3]) - new_x_shape = x.shape[:-2] + [ - x.shape[-2] * x.shape[-1], - ] - return x.reshape(new_x_shape) - - def split_heads(self, x, k=False): - new_x_shape = x.shape[:-1] + [self.num_heads, x.shape[-1] // self.num_heads] - x = x.reshape(new_x_shape) - if k: - return x.transpose([0, 2, 3, 1]) - return x.transpose([0, 2, 1, 3]) - - def forward( - self, - x, - layer_past=None, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - x = self.qkv_proj(x) - query, key, value = x.split(x.shape[2] // self.split_size, axis=2) - query = self.split_heads(query) - key = self.split_heads(key, k=True) - value = self.split_heads(value) - # Concat previous key and value tensors - if layer_past is not None: - past_key, past_value = layer_past[0].transpose([0, 1, 3, 2]), layer_past[1] - key = paddle.concat((past_key, key), axis=-1) - value = paddle.concat((past_value, value), axis=-2) - - attn_outputs = self._attn( - query, key, value, attention_mask, head_mask, output_attentions - ) - output = attn_outputs[0] - output = self.merge_heads(output) - output = self.out_proj(output) - output = self.proj_drop(output) - - outputs = [output] + attn_outputs[1:] - return outputs - - -class MLP(nn.Layer): - """Multi layer perceptron module used in Transformer. - - Args: - in_features (int): Number of the input features. - hidden_features (Optional[int]): Number of the hidden size. Defaults to None. - out_features (Optional[int]): Number of the output features. Defaults to None. - drop (float, optional): Probability of dropout the units. Defaults to 0. - """ - - def __init__( - self, - in_features: int, - hidden_features: Optional[int] = None, - out_features: Optional[int] = None, - drop: float = 0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = nn.GELU(approximate=True) - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Block(nn.Layer): - """Transformer decoder block consisting of layer norm, - masked self-attention, layer norm and fully connected layer. - - Args: - num_ctx (int): Context length of block - embed_size (int): The number of embedding size. - num_heads (int): The number of heads in multi-head attention. - attn_pdrop (float): The dropout probability used on attention - weights to drop some attention targets. - resid_pdrop (float): The dropout probability used on output. - scale (bool, optional): Scaled self-attention calculation. Defaults to False. - """ - - def __init__( - self, - num_ctx: int, - embed_size: int, - num_heads: int, - attn_pdrop: float, - resid_pdrop: float, - scale: bool = False, - ): - super().__init__() - self.ln_1 = nn.LayerNorm(embed_size) - self.attn = MaskedAttention( - embed_size, num_ctx, num_heads, attn_pdrop, resid_pdrop, scale - ) - self.ln_2 = nn.LayerNorm(embed_size) - self.mlp = MLP(embed_size, 4 * embed_size, resid_pdrop) - - def forward( - self, - x, - layer_past=None, - attention_mask=None, - head_mask=None, - output_attentions=False, - ): - # Evaluate attention heads - output_attn = self.attn.forward( - self.ln_1(x), - layer_past=layer_past, - attention_mask=attention_mask, - head_mask=head_mask, - output_attentions=output_attentions, - ) - x = x + output_attn[0] - m = self.mlp(self.ln_2(x)) - x = x + m - outputs = [x] + output_attn[1:] - return outputs - - -class PhysformerGPT2(base.Arch): - """Transformer decoder model for modeling physics. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("embeds",). - output_keys (Tuple[str, ...]): Output keys, such as ("pred_embeds",). - num_layers (int): Number of transformer layers. - num_ctx (int): Context length of block. - embed_size (int): The number of embedding size. - num_heads (int): The number of heads in multi-head attention. - embd_pdrop (float, optional): The dropout probability used on embedding features. Defaults to 0.0. - attn_pdrop (float, optional): The dropout probability used on attention weights. Defaults to 0.0. - resid_pdrop (float, optional): The dropout probability used on block outputs. Defaults to 0.0. - initializer_range (float, optional): Initializer range of linear layer. Defaults to 0.05. - embedding_model (Optional[base.Arch]): Embedding model, If this parameter is set, - the embedding model will map the input data to the embedding space and the - output data to the physical space. Defaults to None. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) - >>> data = paddle.to_tensor(paddle.randn([10, 16, 128])) - >>> inputs = {"embeds": data} - >>> outputs = model(inputs) - >>> print(outputs["pred_embeds"].shape) - [10, 16, 128] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_layers: int, - num_ctx: int, - embed_size: int, - num_heads: int, - embd_pdrop: float = 0.0, - attn_pdrop: float = 0.0, - resid_pdrop: float = 0.0, - initializer_range: float = 0.05, - embedding_model: Optional[base.Arch] = None, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - - self.num_layers = num_layers - self.num_ctx = num_ctx - self.embed_size = embed_size - self.num_heads = num_heads - self.embd_pdrop = embd_pdrop - self.attn_pdrop = attn_pdrop - self.resid_pdrop = resid_pdrop - self.initializer_range = initializer_range - - self.drop = nn.Dropout(embd_pdrop) - self.blocks = nn.LayerList( - [ - Block( - num_ctx, embed_size, num_heads, attn_pdrop, resid_pdrop, scale=True - ) - for _ in range(num_layers) - ] - ) - self.ln = nn.LayerNorm(embed_size) - self.linear = nn.Linear(embed_size, embed_size) - - self.apply(self._init_weights) - self.embedding_model = embedding_model - - def _init_weights(self, module): - if isinstance(module, nn.Linear): - normal_ = Normal(mean=0.0, std=self.initializer_range) - normal_(module.weight) - if module.bias is not None: - zeros_(module.bias) - elif isinstance(module, nn.LayerNorm): - zeros_(module.bias) - ones_(module.weight) - - def get_position_embed(self, x): - B, N, _ = x.shape - position_ids = paddle.arange(0, N, dtype=paddle.get_default_dtype()).reshape( - [1, N, 1] - ) - position_ids = position_ids.repeat_interleave(B, axis=0) - - position_embeds = paddle.zeros_like(x) - i = paddle.arange(0, self.embed_size // 2).unsqueeze(0).unsqueeze(0) - position_embeds[:, :, ::2] = paddle.sin( - position_ids / 10000 ** (2 * i / self.embed_size) - ) - position_embeds[:, :, 1::2] = paddle.cos( - position_ids / 10000 ** (2 * i / self.embed_size) - ) - return position_embeds - - def _generate_time_series(self, x, max_length): - cur_len = x.shape[1] - if cur_len >= max_length: - raise ValueError( - f"max_length({max_length}) should be larger than " - f"the length of input context({cur_len})" - ) - - while cur_len < max_length: - model_inputs = x[:, -1:] - outputs = self.forward_tensor(model_inputs) - next_output = outputs[0][:, -1:] - x = paddle.concat([x, next_output], axis=1) - cur_len = cur_len + 1 - return x - - @paddle.no_grad() - def generate(self, x, max_length=256): - if max_length <= 0: - raise ValueError( - f"max_length({max_length}) should be a strictly positive integer." - ) - outputs = self._generate_time_series(x, max_length) - return outputs - - def forward_tensor(self, x): - position_embeds = self.get_position_embed(x) - # Combine input embedding, position embedding - hidden_states = x + position_embeds - hidden_states = self.drop(hidden_states) - - # Loop through transformer self-attention layers - for block in self.blocks: - block_outputs = block(hidden_states) - hidden_states = block_outputs[0] - outputs = self.linear(self.ln(hidden_states)) - return (outputs,) - - def forward_eval(self, x): - input_embeds = x[:, :1] - outputs = self.generate(input_embeds) - return (outputs[:, 1:],) - - @staticmethod - def split_to_dict(data_tensors, keys): - return {key: data_tensors[i] for i, key in enumerate(keys)} - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - x_tensor = self.concat_to_tensor(x, self.input_keys, axis=-1) - if self.embedding_model is not None: - if isinstance(self.embedding_model, CylinderEmbedding): - x_tensor = self.embedding_model.encoder(x_tensor, x["visc"]) - else: - x_tensor = self.embedding_model.encoder(x_tensor) - - if self.training: - y = self.forward_tensor(x_tensor) - else: - y = self.forward_eval(x_tensor) - - if self.embedding_model is not None: - y = (self.embedding_model.decoder(y[0]),) - - y = self.split_to_dict(y, self.output_keys) - if self._output_transform is not None: - y = self._output_transform(x, y) - return y +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) +""" + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +import paddle +import paddle.nn.functional as F +from paddle import nn +from paddle.nn.initializer import Constant +from paddle.nn.initializer import Normal + +from ppsci.arch import base +from ppsci.arch.embedding_koopman import CylinderEmbedding + +zeros_ = Constant(value=0.0) +ones_ = Constant(value=1.0) + + +class MaskedAttention(nn.Layer): + """Masked self-attention module. + + Args: + embed_dim (int): The expected feature size in the input and output. + num_ctx (int): Context length of block. + num_heads (int): The number of heads in multi-head attention. + attn_drop (float, optional): The dropout probability used on attention + weights to drop some attention targets. Defaults to 0. + proj_drop (float, optional): The dropout probability used on output. Defaults to 0. + scale (bool, optional): Whether to scale attention weights. Defaults to False. + """ + + def __init__( + self, + embed_dim: int, + num_ctx: int, + num_heads: int, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + scale: bool = False, + ): + super().__init__() + self.register_buffer( + "bias", + paddle.tril(paddle.ones((num_ctx, num_ctx), dtype="int32")).reshape( + [1, 1, num_ctx, num_ctx] + ), + ) + + self.register_buffer("masked_bias", paddle.to_tensor(-1e4)) + self.num_heads = num_heads + self.split_size = embed_dim + self.scale = scale + + self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3) + self.out_proj = nn.Linear(embed_dim, embed_dim) + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def _attn( + self, + query, + key, + value, + attention_mask=None, + head_mask=None, + output_attentions=False, + ): + attn = paddle.matmul(query, key) + if self.scale: + attn = attn / (float(value.shape[-1]) ** 0.5) + + nd, ns = attn.shape[-2], attn.shape[-1] + mask = self.bias[:, :, ns - nd : ns, :ns] + attn = paddle.where(mask > 0, attn, self.masked_bias.cast(attn.dtype)) + + if attention_mask is not None: + attn = attn + attention_mask + + attn = F.softmax(attn, axis=-1) + attn = self.attn_drop(attn) + + if head_mask is not None: + attn = attn * head_mask + + outputs = [paddle.matmul(attn, value)] + if output_attentions: + outputs.append(attn) + return outputs + + def merge_heads(self, x): + x = x.transpose([0, 2, 1, 3]) + new_x_shape = x.shape[:-2] + [ + x.shape[-2] * x.shape[-1], + ] + return x.reshape(new_x_shape) + + def split_heads(self, x, k=False): + new_x_shape = x.shape[:-1] + [self.num_heads, x.shape[-1] // self.num_heads] + x = x.reshape(new_x_shape) + if k: + return x.transpose([0, 2, 3, 1]) + return x.transpose([0, 2, 1, 3]) + + def forward( + self, + x, + layer_past=None, + attention_mask=None, + head_mask=None, + output_attentions=False, + ): + x = self.qkv_proj(x) + query, key, value = x.split(x.shape[2] // self.split_size, axis=2) + query = self.split_heads(query) + key = self.split_heads(key, k=True) + value = self.split_heads(value) + # Concat previous key and value tensors + if layer_past is not None: + past_key, past_value = layer_past[0].transpose([0, 1, 3, 2]), layer_past[1] + key = paddle.concat((past_key, key), axis=-1) + value = paddle.concat((past_value, value), axis=-2) + + attn_outputs = self._attn( + query, key, value, attention_mask, head_mask, output_attentions + ) + output = attn_outputs[0] + output = self.merge_heads(output) + output = self.out_proj(output) + output = self.proj_drop(output) + + outputs = [output] + attn_outputs[1:] + return outputs + + +class MLP(nn.Layer): + """Multi layer perceptron module used in Transformer. + + Args: + in_features (int): Number of the input features. + hidden_features (Optional[int]): Number of the hidden size. Defaults to None. + out_features (Optional[int]): Number of the output features. Defaults to None. + drop (float, optional): Probability of dropout the units. Defaults to 0. + """ + + def __init__( + self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + drop: float = 0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = nn.GELU(approximate=True) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Block(nn.Layer): + """Transformer decoder block consisting of layer norm, + masked self-attention, layer norm and fully connected layer. + + Args: + num_ctx (int): Context length of block + embed_size (int): The number of embedding size. + num_heads (int): The number of heads in multi-head attention. + attn_pdrop (float): The dropout probability used on attention + weights to drop some attention targets. + resid_pdrop (float): The dropout probability used on output. + scale (bool, optional): Scaled self-attention calculation. Defaults to False. + """ + + def __init__( + self, + num_ctx: int, + embed_size: int, + num_heads: int, + attn_pdrop: float, + resid_pdrop: float, + scale: bool = False, + ): + super().__init__() + self.ln_1 = nn.LayerNorm(embed_size) + self.attn = MaskedAttention( + embed_size, num_ctx, num_heads, attn_pdrop, resid_pdrop, scale + ) + self.ln_2 = nn.LayerNorm(embed_size) + self.mlp = MLP(embed_size, 4 * embed_size, resid_pdrop) + + def forward( + self, + x, + layer_past=None, + attention_mask=None, + head_mask=None, + output_attentions=False, + ): + # Evaluate attention heads + output_attn = self.attn.forward( + self.ln_1(x), + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + ) + x = x + output_attn[0] + m = self.mlp(self.ln_2(x)) + x = x + m + outputs = [x] + output_attn[1:] + return outputs + + +class PhysformerGPT2(base.Arch): + """Transformer decoder model for modeling physics. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("embeds",). + output_keys (Tuple[str, ...]): Output keys, such as ("pred_embeds",). + num_layers (int): Number of transformer layers. + num_ctx (int): Context length of block. + embed_size (int): The number of embedding size. + num_heads (int): The number of heads in multi-head attention. + embd_pdrop (float, optional): The dropout probability used on embedding features. Defaults to 0.0. + attn_pdrop (float, optional): The dropout probability used on attention weights. Defaults to 0.0. + resid_pdrop (float, optional): The dropout probability used on block outputs. Defaults to 0.0. + initializer_range (float, optional): Initializer range of linear layer. Defaults to 0.05. + embedding_model (Optional[base.Arch]): Embedding model, If this parameter is set, + the embedding model will map the input data to the embedding space and the + output data to the physical space. Defaults to None. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.PhysformerGPT2(("embeds", ), ("pred_embeds", ), 6, 16, 128, 4) + >>> data = paddle.to_tensor(paddle.randn([10, 16, 128])) + >>> inputs = {"embeds": data} + >>> outputs = model(inputs) + >>> print(outputs["pred_embeds"].shape) + [10, 16, 128] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_layers: int, + num_ctx: int, + embed_size: int, + num_heads: int, + embd_pdrop: float = 0.0, + attn_pdrop: float = 0.0, + resid_pdrop: float = 0.0, + initializer_range: float = 0.05, + embedding_model: Optional[base.Arch] = None, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.num_layers = num_layers + self.num_ctx = num_ctx + self.embed_size = embed_size + self.num_heads = num_heads + self.embd_pdrop = embd_pdrop + self.attn_pdrop = attn_pdrop + self.resid_pdrop = resid_pdrop + self.initializer_range = initializer_range + + self.drop = nn.Dropout(embd_pdrop) + self.blocks = nn.LayerList( + [ + Block( + num_ctx, embed_size, num_heads, attn_pdrop, resid_pdrop, scale=True + ) + for _ in range(num_layers) + ] + ) + self.ln = nn.LayerNorm(embed_size) + self.linear = nn.Linear(embed_size, embed_size) + + self.apply(self._init_weights) + self.embedding_model = embedding_model + + def _init_weights(self, module): + if isinstance(module, nn.Linear): + normal_ = Normal(mean=0.0, std=self.initializer_range) + normal_(module.weight) + if module.bias is not None: + zeros_(module.bias) + elif isinstance(module, nn.LayerNorm): + zeros_(module.bias) + ones_(module.weight) + + def get_position_embed(self, x): + B, N, _ = x.shape + position_ids = paddle.arange(0, N, dtype=paddle.get_default_dtype()).reshape( + [1, N, 1] + ) + position_ids = position_ids.repeat_interleave(B, axis=0) + + position_embeds = paddle.zeros_like(x) + i = paddle.arange(0, self.embed_size // 2).unsqueeze(0).unsqueeze(0) + position_embeds[:, :, ::2] = paddle.sin( + position_ids / 10000 ** (2 * i / self.embed_size) + ) + position_embeds[:, :, 1::2] = paddle.cos( + position_ids / 10000 ** (2 * i / self.embed_size) + ) + return position_embeds + + def _generate_time_series(self, x, max_length): + cur_len = x.shape[1] + if cur_len >= max_length: + raise ValueError( + f"max_length({max_length}) should be larger than " + f"the length of input context({cur_len})" + ) + + while cur_len < max_length: + model_inputs = x[:, -1:] + outputs = self.forward_tensor(model_inputs) + next_output = outputs[0][:, -1:] + x = paddle.concat([x, next_output], axis=1) + cur_len = cur_len + 1 + return x + + @paddle.no_grad() + def generate(self, x, max_length=256): + if max_length <= 0: + raise ValueError( + f"max_length({max_length}) should be a strictly positive integer." + ) + outputs = self._generate_time_series(x, max_length) + return outputs + + def forward_tensor(self, x): + position_embeds = self.get_position_embed(x) + # Combine input embedding, position embedding + hidden_states = x + position_embeds + hidden_states = self.drop(hidden_states) + + # Loop through transformer self-attention layers + for block in self.blocks: + block_outputs = block(hidden_states) + hidden_states = block_outputs[0] + outputs = self.linear(self.ln(hidden_states)) + return (outputs,) + + def forward_eval(self, x): + input_embeds = x[:, :1] + outputs = self.generate(input_embeds) + return (outputs[:, 1:],) + + @staticmethod + def split_to_dict(data_tensors, keys): + return {key: data_tensors[i] for i, key in enumerate(keys)} + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + x_tensor = self.concat_to_tensor(x, self.input_keys, axis=-1) + if self.embedding_model is not None: + if isinstance(self.embedding_model, CylinderEmbedding): + x_tensor = self.embedding_model.encoder(x_tensor, x["visc"]) + else: + x_tensor = self.embedding_model.encoder(x_tensor) + + if self.training: + y = self.forward_tensor(x_tensor) + else: + y = self.forward_eval(x_tensor) + + if self.embedding_model is not None: + y = (self.embedding_model.decoder(y[0]),) + + y = self.split_to_dict(y, self.output_keys) + if self._output_transform is not None: + y = self._output_transform(x, y) + return y diff --git a/ppsci/arch/sfnonet.py b/ppsci/arch/sfnonet.py index aa7e2456a7..6af1cdda70 100644 --- a/ppsci/arch/sfnonet.py +++ b/ppsci/arch/sfnonet.py @@ -1,568 +1,568 @@ -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -import paddle.nn.functional as F -from paddle import nn - -from ppsci.arch import base -from ppsci.arch import fno_block -from ppsci.arch.paddle_harmonics import sht as paddle_sht -from ppsci.utils import initializer - -einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - - -def _contract_dense(x, weight, separable=False, dhconv=True): - order = len(x.shape) - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - if dhconv: - weight_syms.pop() - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] - if not isinstance(weight, paddle.Tensor): - weight = paddle.to_tensor(weight) - - return paddle.einsum(eq, x, weight) - - -def _contract_dense_trick(x, weight_real, weight_imag, separable=False, dhconv=True): - # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle - order = len(x.shape) - # batch-size, in_channels, x, y... - x_syms = list(einsum_symbols[:order]) - - # in_channels, out_channels, x, y... - weight_syms = list(x_syms[1:]) # no batch-size - - # batch-size, out_channels, x, y... - if separable: - out_syms = [x_syms[0]] + list(weight_syms) - else: - weight_syms.insert(1, einsum_symbols[order]) # outputs - out_syms = list(weight_syms) - out_syms[0] = x_syms[0] - - if dhconv: - weight_syms.pop() - - eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) - - o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( - eq, x.imag(), weight_imag - ) - o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( - eq, x.real(), weight_imag - ) - x = paddle.complex(o1_real, o1_imag) - return x - - -def _contract_dense_separable(x, weight, separable=True): - if not separable: - raise ValueError("This function is only for separable=True") - return x * weight - - -def get_contract_fun(weight, implementation="reconstructed", separable=False): - """Generic ND implementation of Fourier Spectral Conv contraction. - - Args: - weight (FactorizedTensor): The factoriz Tensor. - implementation (str, optional): Whether to reconstruct the weight and do a forward pass (reconstructed) - or contract directly the factors of the factorized weight with the input (factorized). - {'reconstructed', 'factorized'} Defaults to "reconstructed". - separable (bool, optional): Whether to use the separable implementation of contraction. This arg is - only checked when `implementation=reconstructed`. Defaults to False. - """ - - if implementation == "reconstructed": - if separable: - return _contract_dense_separable - else: - return _contract_dense_trick - elif implementation == "factorized": - if isinstance(weight, paddle.Tensor): - return _contract_dense_trick - - else: - raise ValueError( - f'Got implementation={implementation}, expected "reconstructed" or "factorized"' - ) - - -class SHT(nn.Layer): - """A wrapper for the Spherical Harmonics transform - - Allows to call it with an interface similar to that of FFT - """ - - def __init__(self, dtype=paddle.float32): - super().__init__() - self.dtype = dtype - self._SHT_cache = nn.LayerDict() - self._iSHT_cache = nn.LayerDict() - - def sht(self, x, s=None, norm="ortho", grid="equiangular"): - *_, height, width = x.shape # height = latitude, width = longitude - if s is None: - if grid == "equiangular": - modes_width = height // 2 - else: - modes_width = height - modes_height = height - else: - modes_height, modes_width = s - - cache_key = f"{height}_{width}_{modes_height}_{modes_width}_{norm}_{grid}" - - try: - sht = self._SHT_cache[cache_key] - except KeyError: - sht = paddle_sht.RealSHT( - nlat=height, - nlon=width, - lmax=modes_height, - mmax=modes_width, - grid=grid, - norm=norm, - ).astype(dtype=self.dtype) - - self._SHT_cache[cache_key] = sht - - return sht(x) - - def isht(self, x, s=None, norm="ortho", grid="equiangular"): - *_, modes_height, modes_width = x.shape # height = latitude, width = longitude - if s is None: - if grid == "equiangular": - width = modes_width * 2 - else: - width = modes_width - height = modes_height - else: - height, width = s - - cache_key = f"{height}_{width}_{modes_height}_{modes_width}_{norm}_{grid}" - - try: - isht = self._iSHT_cache[cache_key] - except KeyError: - isht = paddle_sht.InverseRealSHT( - nlat=height, - nlon=width, - lmax=modes_height, - mmax=modes_width, - grid=grid, - norm=norm, - ).astype(dtype=self.dtype) - self._iSHT_cache[cache_key] = isht - - return isht(x) - - -Number = Union[int, float] - - -class SphericalConv(nn.Layer): - """Spherical Convolution, base class for the SFNO [1]. - .. [1] Spherical Fourier Neural Operators: Learning Stable Dynamics on the Sphere, - Boris Bonev, Thorsten Kurth, Christian Hundt, Jaideep Pathak, Maximilian Baust, Karthik Kashinath, Anima Anandkumar, - ICML 2023. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - n_modes (Tuple[int, ...]): Number of modes to use for contraction in Fourier domain during - training. - max_n_modes (int, optional): The maximum number of modes to use for contraction in Fourier domain during - training. Defaults to None. - bias (bool, optional): Whether to use bias in the layers. Defaults to True. - n_layers (int, optional): Number of Fourier Layers. Defaults to 1. - separable (bool, optional): Whether to use separable Fourier Conv. Defaults to False. - output_scaling_factor (Optional[Union[Number, List[Number]]], optional): Scaling factor for the - output. Defaults to None. - rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 0.5. - factorization (str, optional): Tensor factorization of the parameters weight to use. Defaults to "dense". - implementation (str, optional): If factorization is not None, forward mode to use. Defaults to "reconstructed". - joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a - single tensor. Defaults to False. - init_std (str, optional): The std to use for the init. Defaults to "auto". - sht_norm (str, optional): The normalization mode of the SHT. Defaults to "ortho". - sht_grids (str, optional): The grid of the SHT. Defaults to "equiangular". - dtype (paddle.float32, optional): The data type. Defaults to paddle.float32. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - n_modes: Tuple[int, ...], - max_n_modes: int = None, - bias: bool = True, - n_layers: int = 1, - separable: bool = False, - output_scaling_factor: Optional[Union[Number, List[Number]]] = None, - rank: float = 0.5, - factorization: str = "dense", - implementation: str = "reconstructed", - joint_factorization: bool = False, - init_std: str = "auto", - sht_norm: str = "ortho", - sht_grids: str = "equiangular", - dtype: paddle.dtype = paddle.float32, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - - self.dtype = dtype - - self.joint_factorization = joint_factorization - - if isinstance(n_modes, int): - n_modes = [n_modes] - self._n_modes = n_modes - self.order = len(n_modes) - - if max_n_modes is None: - max_n_modes = self.n_modes - elif isinstance(max_n_modes, int): - max_n_modes = [max_n_modes] - self.max_n_modes = max_n_modes - - self.rank = rank - self.factorization = factorization - self.n_layers = n_layers - self.implementation = implementation - - self.output_scaling_factor: Union[ - None, List[List[float]] - ] = fno_block.validate_scaling_factor( - output_scaling_factor, self.order, n_layers - ) - - if init_std == "auto": - init_std = (2 / (in_channels + out_channels)) ** 0.5 - else: - init_std = init_std - - if separable: - if in_channels != out_channels: - raise ValueError( - f"To use separable Fourier Conv, in_channels must be equal to out_channels, but got in_channels={in_channels} and out_channels={out_channels}" - ) - weight_shape = (in_channels, *self.n_modes[:-1]) - else: - weight_shape = (in_channels, out_channels, *self.n_modes[:-1]) - self.separable = separable - - if joint_factorization: - self.weight = paddle.create_parameter( - shape=(n_layers, *weight_shape), - dtype="float32", - ) - self.weight = initializer.normal_(self.weight, 0, init_std) - else: - self.weight = nn.LayerList( - [ - fno_block.FactorizedTensor(weight_shape, init_scale=init_std) - for _ in range(n_layers) - ] - ) - self._contract = get_contract_fun( - self.weight[0].data, implementation=implementation, separable=separable - ) - if bias: - shape = (n_layers, self.out_channels) + (1,) * self.order - init_bias = init_std * paddle.randn(shape) - self.bias = paddle.create_parameter( - shape=shape, - dtype=(init_bias.dtype), - default_initializer=nn.initializer.Assign(init_bias), - ) - self.bias.stop_gradient = False - else: - self.bias = None - - self.sht_norm = sht_norm - if isinstance(sht_grids, str): - sht_grids = [sht_grids] * (self.n_layers + 1) - self.sht_grids = sht_grids - self.sht_handle = SHT(dtype=self.dtype) - - @property - def n_modes(self): - return self._n_modes - - @n_modes.setter - def n_modes(self, n_modes): - if isinstance(n_modes, int): # Should happen for 1D FNO only - n_modes = [n_modes] - else: - n_modes = list(n_modes) - self._n_modes = n_modes - - def forward(self, x, indices=0, output_shape=None): - batchsize, channels, height, width = x.shape - - if self.output_scaling_factor is not None and output_shape is None: - scaling_factors = self.output_scaling_factor[indices] - height = round(height * scaling_factors[0]) - width = round(width * scaling_factors[1]) - elif output_shape is not None: - height, width = output_shape[0], output_shape[1] - - out_fft = self.sht_handle.sht( - x, - s=(self.n_modes[0], self.n_modes[1] // 2), - norm=self.sht_norm, - grid=self.sht_grids[indices], - ) - - w_real = self.weight[indices].real[:, :, : self.n_modes[0]] - w_imag = self.weight[indices].imag[:, :, : self.n_modes[0]] - - out_fft = self._contract( - out_fft[:, :, : self.n_modes[0], : self.n_modes[1] // 2], - w_real, - w_imag, - separable=self.separable, - dhconv=True, - ) - - x = self.sht_handle.isht( - out_fft, - s=(height, width), - norm=self.sht_norm, - grid=self.sht_grids[indices + 1], - ) - - if self.bias is not None: - x = x + self.bias[indices, ...] - - return x - - def transform(self, x, layer_index=0, output_shape=None): - *_, in_height, in_width = x.shape - - if self.output_scaling_factor is not None and output_shape is None: - height = round(in_height * self.output_scaling_factor[layer_index][0]) - width = round(in_width * self.output_scaling_factor[layer_index][1]) - elif output_shape is not None: - height, width = output_shape[0], output_shape[1] - else: - height, width = in_height, in_width - - # Return the identity if the resolution and grid of the input and output are the same - if ((in_height, in_width) == (height, width)) and ( - self.sht_grids[layer_index] == self.sht_grids[layer_index + 1] - ): - return x - else: - coefs = self.sht_handle.sht( - x, s=self.n_modes, norm=self.sht_norm, grid=self.sht_grids[layer_index] - ) - return self.sht_handle.isht( - coefs, - s=(height, width), - norm=self.sht_norm, - grid=self.sht_grids[layer_index + 1], - ) - - -class SFNONet(base.Arch): - """N-Dimensional Tensorized Fourier Neural Operator. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - n_modes (Tuple[int, ...]): Number of modes to keep in Fourier Layer, along each dimension - The dimensionality of the SFNO is inferred from ``len(n_modes)` - hidden_channels (int): Width of the FNO (i.e. number of channels) - in_channels (int, optional): Number of input channels. Defaults to 3. - out_channels (int, optional): Number of output channels. Defaults to 1. - lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. - Defaults to 256. - projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. - Defaults to 256. - n_layers (int, optional): Number of Fourier Layers. Defaults to 4. - use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. - mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. - Defaults to None. - non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. - norm (str, optional): Normalization layer to use. Defaults to None. - ada_in_features (int,optional): The input channles of the adaptive normalization.Defaults to None. - preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. - fno_skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. - Defaults to "soft-gating". - separable (bool, optional): Whether to use a depthwise separable spectral convolution. - Defaults to False. - factorization (str, optional): Tensor factorization of the parameters weight to use. - * If None, a dense tensor parametrizes the Spectral convolutions. - * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". - rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. - joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a - single tensor (vs one per layer). Defaults to False. - implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". - If factorization is not None, forward mode to use:: - * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. - * `factorized` : the input is directly contracted with the factors of the decomposition. - domain_padding (Optional[list], optional): Whether to use percentage of padding. Defaults to None. - domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional - How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". - fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". - patching_levels (int, optional): Number of patching levels to use. Defaults to 0. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - n_modes: Tuple[int, ...], - hidden_channels: int, - in_channels: int = 3, - out_channels: int = 1, - lifting_channels: int = 256, - projection_channels: int = 256, - n_layers: int = 4, - use_mlp: bool = False, - mlp: Optional[Dict[str, float]] = None, - max_n_modes: int = None, - non_linearity: nn.functional = F.gelu, - stabilizer: str = None, - norm: str = None, - ada_in_features: Optional[int] = None, - preactivation: bool = False, - fno_skip: str = "linear", - mlp_skip: str = "soft-gating", - separable: bool = False, - factorization: str = None, - rank: float = 1.0, - joint_factorization: bool = False, - implementation: str = "factorized", - domain_padding: Optional[list] = None, - domain_padding_mode: str = "one-sided", - fft_norm: str = "forward", - patching_levels: int = 0, - **kwargs, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - - self.n_dim = len(n_modes) - self.n_modes = n_modes - self.hidden_channels = hidden_channels - self.lifting_channels = lifting_channels - self.projection_channels = projection_channels - self.in_channels = in_channels - if patching_levels: - self.in_channels = self.in_channels * patching_levels + 1 - self.out_channels = out_channels - self.n_layers = n_layers - self.joint_factorization = joint_factorization - self.non_linearity = non_linearity - self.rank = rank - self.factorization = factorization - self.fno_skip = (fno_skip,) - self.mlp_skip = (mlp_skip,) - self.fft_norm = fft_norm - self.implementation = implementation - self.separable = separable - self.preactivation = preactivation - self.stabilizer = stabilizer - if domain_padding is not None and ( - (isinstance(domain_padding, list) and sum(domain_padding) > 0) - or (isinstance(domain_padding, (float, int)) and domain_padding > 0) - ): - self.domain_padding = fno_block.DomainPadding( - domain_padding=domain_padding, padding_mode=domain_padding_mode - ) - else: - self.domain_padding = None - self.domain_padding_mode = domain_padding_mode - - self.fno_blocks = fno_block.FNOBlocks( - in_channels=hidden_channels, - out_channels=hidden_channels, - n_modes=self.n_modes, - n_layers=n_layers, - max_n_modes=max_n_modes, - use_mlp=use_mlp, - mlp=mlp, - non_linearity=non_linearity, - stabilizer=stabilizer, - norm=norm, - ada_in_features=ada_in_features, - preactivation=preactivation, - fno_skip=fno_skip, - mlp_skip=mlp_skip, - separable=separable, - factorization=factorization, - rank=rank, - SpectralConv=SphericalConv, - joint_factorization=joint_factorization, - implementation=implementation, - fft_norm=fft_norm, - ) - # if lifting_channels is passed, make lifting an MLP - # with a hidden layer of size lifting_channels - if self.lifting_channels: - self.lifting = fno_block.MLP( - in_channels=in_channels, - out_channels=self.hidden_channels, - hidden_channels=self.lifting_channels, - n_layers=2, - n_dim=self.n_dim, - ) - # otherwise, make it a linear layer - else: - self.lifting = fno_block.MLP( - in_channels=in_channels, - out_channels=self.hidden_channels, - hidden_channels=self.hidden_channels, - n_layers=1, - n_dim=self.n_dim, - ) - self.projection = fno_block.MLP( - in_channels=self.hidden_channels, - out_channels=out_channels, - hidden_channels=self.projection_channels, - n_layers=2, - n_dim=self.n_dim, - non_linearity=non_linearity, - ) - - def forward(self, x): - """SFNO's forward pass""" - x = self.concat_to_tensor(x, self.input_keys) - - x = self.lifting(x) - if self.domain_padding is not None: - x = self.domain_padding.pad(x) - # x is 0.4 * [1, 32, 16, 16], passed - for index in range(self.n_layers): - x = self.fno_blocks(x, index) - - if self.domain_padding is not None: - x = self.domain_padding.unpad(x) - out = self.projection(x) - - return {self.output_keys[0]: out} +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn.functional as F +from paddle import nn + +from ppsci.arch import base +from ppsci.arch import fno_block +from ppsci.arch.paddle_harmonics import sht as paddle_sht +from ppsci.utils import initializer + +einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + +def _contract_dense(x, weight, separable=False, dhconv=True): + order = len(x.shape) + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + if dhconv: + weight_syms.pop() + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + # For the darcy flow, the only einsum is abcd,becd->aecd, where x and weights are shaped [32,32,8,8] + if not isinstance(weight, paddle.Tensor): + weight = paddle.to_tensor(weight) + + return paddle.einsum(eq, x, weight) + + +def _contract_dense_trick(x, weight_real, weight_imag, separable=False, dhconv=True): + # the same as above function, but do the complex multiplication manually to avoid the einsum bug in paddle + order = len(x.shape) + # batch-size, in_channels, x, y... + x_syms = list(einsum_symbols[:order]) + + # in_channels, out_channels, x, y... + weight_syms = list(x_syms[1:]) # no batch-size + + # batch-size, out_channels, x, y... + if separable: + out_syms = [x_syms[0]] + list(weight_syms) + else: + weight_syms.insert(1, einsum_symbols[order]) # outputs + out_syms = list(weight_syms) + out_syms[0] = x_syms[0] + + if dhconv: + weight_syms.pop() + + eq = "".join(x_syms) + "," + "".join(weight_syms) + "->" + "".join(out_syms) + + o1_real = paddle.einsum(eq, x.real(), weight_real) - paddle.einsum( + eq, x.imag(), weight_imag + ) + o1_imag = paddle.einsum(eq, x.imag(), weight_real) + paddle.einsum( + eq, x.real(), weight_imag + ) + x = paddle.complex(o1_real, o1_imag) + return x + + +def _contract_dense_separable(x, weight, separable=True): + if not separable: + raise ValueError("This function is only for separable=True") + return x * weight + + +def get_contract_fun(weight, implementation="reconstructed", separable=False): + """Generic ND implementation of Fourier Spectral Conv contraction. + + Args: + weight (FactorizedTensor): The factoriz Tensor. + implementation (str, optional): Whether to reconstruct the weight and do a forward pass (reconstructed) + or contract directly the factors of the factorized weight with the input (factorized). + {'reconstructed', 'factorized'} Defaults to "reconstructed". + separable (bool, optional): Whether to use the separable implementation of contraction. This arg is + only checked when `implementation=reconstructed`. Defaults to False. + """ + + if implementation == "reconstructed": + if separable: + return _contract_dense_separable + else: + return _contract_dense_trick + elif implementation == "factorized": + if isinstance(weight, paddle.Tensor): + return _contract_dense_trick + + else: + raise ValueError( + f'Got implementation={implementation}, expected "reconstructed" or "factorized"' + ) + + +class SHT(nn.Layer): + """A wrapper for the Spherical Harmonics transform + + Allows to call it with an interface similar to that of FFT + """ + + def __init__(self, dtype=paddle.float32): + super().__init__() + self.dtype = dtype + self._SHT_cache = nn.LayerDict() + self._iSHT_cache = nn.LayerDict() + + def sht(self, x, s=None, norm="ortho", grid="equiangular"): + *_, height, width = x.shape # height = latitude, width = longitude + if s is None: + if grid == "equiangular": + modes_width = height // 2 + else: + modes_width = height + modes_height = height + else: + modes_height, modes_width = s + + cache_key = f"{height}_{width}_{modes_height}_{modes_width}_{norm}_{grid}" + + try: + sht = self._SHT_cache[cache_key] + except KeyError: + sht = paddle_sht.RealSHT( + nlat=height, + nlon=width, + lmax=modes_height, + mmax=modes_width, + grid=grid, + norm=norm, + ).astype(dtype=self.dtype) + + self._SHT_cache[cache_key] = sht + + return sht(x) + + def isht(self, x, s=None, norm="ortho", grid="equiangular"): + *_, modes_height, modes_width = x.shape # height = latitude, width = longitude + if s is None: + if grid == "equiangular": + width = modes_width * 2 + else: + width = modes_width + height = modes_height + else: + height, width = s + + cache_key = f"{height}_{width}_{modes_height}_{modes_width}_{norm}_{grid}" + + try: + isht = self._iSHT_cache[cache_key] + except KeyError: + isht = paddle_sht.InverseRealSHT( + nlat=height, + nlon=width, + lmax=modes_height, + mmax=modes_width, + grid=grid, + norm=norm, + ).astype(dtype=self.dtype) + self._iSHT_cache[cache_key] = isht + + return isht(x) + + +Number = Union[int, float] + + +class SphericalConv(nn.Layer): + """Spherical Convolution, base class for the SFNO [1]. + .. [1] Spherical Fourier Neural Operators: Learning Stable Dynamics on the Sphere, + Boris Bonev, Thorsten Kurth, Christian Hundt, Jaideep Pathak, Maximilian Baust, Karthik Kashinath, Anima Anandkumar, + ICML 2023. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + n_modes (Tuple[int, ...]): Number of modes to use for contraction in Fourier domain during + training. + max_n_modes (int, optional): The maximum number of modes to use for contraction in Fourier domain during + training. Defaults to None. + bias (bool, optional): Whether to use bias in the layers. Defaults to True. + n_layers (int, optional): Number of Fourier Layers. Defaults to 1. + separable (bool, optional): Whether to use separable Fourier Conv. Defaults to False. + output_scaling_factor (Optional[Union[Number, List[Number]]], optional): Scaling factor for the + output. Defaults to None. + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 0.5. + factorization (str, optional): Tensor factorization of the parameters weight to use. Defaults to "dense". + implementation (str, optional): If factorization is not None, forward mode to use. Defaults to "reconstructed". + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor. Defaults to False. + init_std (str, optional): The std to use for the init. Defaults to "auto". + sht_norm (str, optional): The normalization mode of the SHT. Defaults to "ortho". + sht_grids (str, optional): The grid of the SHT. Defaults to "equiangular". + dtype (paddle.float32, optional): The data type. Defaults to paddle.float32. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + n_modes: Tuple[int, ...], + max_n_modes: int = None, + bias: bool = True, + n_layers: int = 1, + separable: bool = False, + output_scaling_factor: Optional[Union[Number, List[Number]]] = None, + rank: float = 0.5, + factorization: str = "dense", + implementation: str = "reconstructed", + joint_factorization: bool = False, + init_std: str = "auto", + sht_norm: str = "ortho", + sht_grids: str = "equiangular", + dtype: paddle.dtype = paddle.float32, + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + + self.dtype = dtype + + self.joint_factorization = joint_factorization + + if isinstance(n_modes, int): + n_modes = [n_modes] + self._n_modes = n_modes + self.order = len(n_modes) + + if max_n_modes is None: + max_n_modes = self.n_modes + elif isinstance(max_n_modes, int): + max_n_modes = [max_n_modes] + self.max_n_modes = max_n_modes + + self.rank = rank + self.factorization = factorization + self.n_layers = n_layers + self.implementation = implementation + + self.output_scaling_factor: Union[ + None, List[List[float]] + ] = fno_block.validate_scaling_factor( + output_scaling_factor, self.order, n_layers + ) + + if init_std == "auto": + init_std = (2 / (in_channels + out_channels)) ** 0.5 + else: + init_std = init_std + + if separable: + if in_channels != out_channels: + raise ValueError( + f"To use separable Fourier Conv, in_channels must be equal to out_channels, but got in_channels={in_channels} and out_channels={out_channels}" + ) + weight_shape = (in_channels, *self.n_modes[:-1]) + else: + weight_shape = (in_channels, out_channels, *self.n_modes[:-1]) + self.separable = separable + + if joint_factorization: + self.weight = paddle.create_parameter( + shape=(n_layers, *weight_shape), + dtype="float32", + ) + self.weight = initializer.normal_(self.weight, 0, init_std) + else: + self.weight = nn.LayerList( + [ + fno_block.FactorizedTensor(weight_shape, init_scale=init_std) + for _ in range(n_layers) + ] + ) + self._contract = get_contract_fun( + self.weight[0].data, implementation=implementation, separable=separable + ) + if bias: + shape = (n_layers, self.out_channels) + (1,) * self.order + init_bias = init_std * paddle.randn(shape) + self.bias = paddle.create_parameter( + shape=shape, + dtype=(init_bias.dtype), + default_initializer=nn.initializer.Assign(init_bias), + ) + self.bias.stop_gradient = False + else: + self.bias = None + + self.sht_norm = sht_norm + if isinstance(sht_grids, str): + sht_grids = [sht_grids] * (self.n_layers + 1) + self.sht_grids = sht_grids + self.sht_handle = SHT(dtype=self.dtype) + + @property + def n_modes(self): + return self._n_modes + + @n_modes.setter + def n_modes(self, n_modes): + if isinstance(n_modes, int): # Should happen for 1D FNO only + n_modes = [n_modes] + else: + n_modes = list(n_modes) + self._n_modes = n_modes + + def forward(self, x, indices=0, output_shape=None): + batchsize, channels, height, width = x.shape + + if self.output_scaling_factor is not None and output_shape is None: + scaling_factors = self.output_scaling_factor[indices] + height = round(height * scaling_factors[0]) + width = round(width * scaling_factors[1]) + elif output_shape is not None: + height, width = output_shape[0], output_shape[1] + + out_fft = self.sht_handle.sht( + x, + s=(self.n_modes[0], self.n_modes[1] // 2), + norm=self.sht_norm, + grid=self.sht_grids[indices], + ) + + w_real = self.weight[indices].real[:, :, : self.n_modes[0]] + w_imag = self.weight[indices].imag[:, :, : self.n_modes[0]] + + out_fft = self._contract( + out_fft[:, :, : self.n_modes[0], : self.n_modes[1] // 2], + w_real, + w_imag, + separable=self.separable, + dhconv=True, + ) + + x = self.sht_handle.isht( + out_fft, + s=(height, width), + norm=self.sht_norm, + grid=self.sht_grids[indices + 1], + ) + + if self.bias is not None: + x = x + self.bias[indices, ...] + + return x + + def transform(self, x, layer_index=0, output_shape=None): + *_, in_height, in_width = x.shape + + if self.output_scaling_factor is not None and output_shape is None: + height = round(in_height * self.output_scaling_factor[layer_index][0]) + width = round(in_width * self.output_scaling_factor[layer_index][1]) + elif output_shape is not None: + height, width = output_shape[0], output_shape[1] + else: + height, width = in_height, in_width + + # Return the identity if the resolution and grid of the input and output are the same + if ((in_height, in_width) == (height, width)) and ( + self.sht_grids[layer_index] == self.sht_grids[layer_index + 1] + ): + return x + else: + coefs = self.sht_handle.sht( + x, s=self.n_modes, norm=self.sht_norm, grid=self.sht_grids[layer_index] + ) + return self.sht_handle.isht( + coefs, + s=(height, width), + norm=self.sht_norm, + grid=self.sht_grids[layer_index + 1], + ) + + +class SFNONet(base.Arch): + """N-Dimensional Tensorized Fourier Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + n_modes (Tuple[int, ...]): Number of modes to keep in Fourier Layer, along each dimension + The dimensionality of the SFNO is inferred from ``len(n_modes)` + hidden_channels (int): Width of the FNO (i.e. number of channels) + in_channels (int, optional): Number of input channels. Defaults to 3. + out_channels (int, optional): Number of output channels. Defaults to 1. + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (str, optional): Normalization layer to use. Defaults to None. + ada_in_features (int,optional): The input channles of the adaptive normalization.Defaults to None. + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + fno_skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. + Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (Optional[list], optional): Whether to use percentage of padding. Defaults to None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + n_modes: Tuple[int, ...], + hidden_channels: int, + in_channels: int = 3, + out_channels: int = 1, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + max_n_modes: int = None, + non_linearity: nn.functional = F.gelu, + stabilizer: str = None, + norm: str = None, + ada_in_features: Optional[int] = None, + preactivation: bool = False, + fno_skip: str = "linear", + mlp_skip: str = "soft-gating", + separable: bool = False, + factorization: str = None, + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[list] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + **kwargs, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.n_dim = len(n_modes) + self.n_modes = n_modes + self.hidden_channels = hidden_channels + self.lifting_channels = lifting_channels + self.projection_channels = projection_channels + self.in_channels = in_channels + if patching_levels: + self.in_channels = self.in_channels * patching_levels + 1 + self.out_channels = out_channels + self.n_layers = n_layers + self.joint_factorization = joint_factorization + self.non_linearity = non_linearity + self.rank = rank + self.factorization = factorization + self.fno_skip = (fno_skip,) + self.mlp_skip = (mlp_skip,) + self.fft_norm = fft_norm + self.implementation = implementation + self.separable = separable + self.preactivation = preactivation + self.stabilizer = stabilizer + if domain_padding is not None and ( + (isinstance(domain_padding, list) and sum(domain_padding) > 0) + or (isinstance(domain_padding, (float, int)) and domain_padding > 0) + ): + self.domain_padding = fno_block.DomainPadding( + domain_padding=domain_padding, padding_mode=domain_padding_mode + ) + else: + self.domain_padding = None + self.domain_padding_mode = domain_padding_mode + + self.fno_blocks = fno_block.FNOBlocks( + in_channels=hidden_channels, + out_channels=hidden_channels, + n_modes=self.n_modes, + n_layers=n_layers, + max_n_modes=max_n_modes, + use_mlp=use_mlp, + mlp=mlp, + non_linearity=non_linearity, + stabilizer=stabilizer, + norm=norm, + ada_in_features=ada_in_features, + preactivation=preactivation, + fno_skip=fno_skip, + mlp_skip=mlp_skip, + separable=separable, + factorization=factorization, + rank=rank, + SpectralConv=SphericalConv, + joint_factorization=joint_factorization, + implementation=implementation, + fft_norm=fft_norm, + ) + # if lifting_channels is passed, make lifting an MLP + # with a hidden layer of size lifting_channels + if self.lifting_channels: + self.lifting = fno_block.MLP( + in_channels=in_channels, + out_channels=self.hidden_channels, + hidden_channels=self.lifting_channels, + n_layers=2, + n_dim=self.n_dim, + ) + # otherwise, make it a linear layer + else: + self.lifting = fno_block.MLP( + in_channels=in_channels, + out_channels=self.hidden_channels, + hidden_channels=self.hidden_channels, + n_layers=1, + n_dim=self.n_dim, + ) + self.projection = fno_block.MLP( + in_channels=self.hidden_channels, + out_channels=out_channels, + hidden_channels=self.projection_channels, + n_layers=2, + n_dim=self.n_dim, + non_linearity=non_linearity, + ) + + def forward(self, x): + """SFNO's forward pass""" + x = self.concat_to_tensor(x, self.input_keys) + + x = self.lifting(x) + if self.domain_padding is not None: + x = self.domain_padding.pad(x) + # x is 0.4 * [1, 32, 16, 16], passed + for index in range(self.n_layers): + x = self.fno_blocks(x, index) + + if self.domain_padding is not None: + x = self.domain_padding.unpad(x) + out = self.projection(x) + + return {self.output_keys[0]: out} diff --git a/ppsci/arch/spinn.py b/ppsci/arch/spinn.py index 014446941f..040e2067a7 100644 --- a/ppsci/arch/spinn.py +++ b/ppsci/arch/spinn.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -178,3 +179,185 @@ def forward(self, x): output = self._output_transform(x, output) return output +======= +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn as nn + +from ppsci.arch import base +from ppsci.arch.mlp import ModifiedMLP +from ppsci.utils import initializer + + +class SPINN(base.Arch): + """Separable Physics-Informed Neural Networks. + + Args: + input_keys (Tuple[str, ...]): Keys of input variables. + output_keys (Tuple[str, ...]): Keys of output variables. + r (int): Number of features for each output dimension. + num_layers (int): Number of layers. + hidden_size (Union[int, Tuple[int, ...]]): Size of hidden layer. + activation (str, optional): Name of activation function. + skip_connection (bool, optional): Whether to use skip connection. + weight_norm (bool, optional): Whether to use weight normalization. + periods (Optional[Dict[int, Tuple[float, bool]]], optional): Periodicity of input variables. + fourier (Optional[Dict[str, Union[float, int]]], optional): Frequency of input variables. + random_weight (Optional[Dict[str, float]], optional): Random weight of linear layer. + + Examples: + >>> from ppsci.arch import SPINN + >>> model = SPINN( + ... input_keys=('x', 'y', 'z'), + ... output_keys=('u', 'v'), + ... r=32, + ... num_layers=4, + ... hidden_size=32, + ... ) + >>> input_dict = {"x": paddle.rand([3, 1]), + ... "y": paddle.rand([4, 1]), + ... "z": paddle.rand([5, 1])} + >>> output_dict = model(input_dict) + >>> print(output_dict["u"].shape) + [3, 4, 5, 1] + >>> print(output_dict["v"].shape) + [3, 4, 5, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + r: int, + num_layers: int, + hidden_size: Union[int, Tuple[int, ...]], + activation: str = "tanh", + skip_connection: bool = False, + weight_norm: bool = False, + periods: Optional[Dict[int, Tuple[float, bool]]] = None, + fourier: Optional[Dict[str, Union[float, int]]] = None, + random_weight: Optional[Dict[str, float]] = None, + ): + + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.r = r + input_dim = len(self.input_keys) + + self.branch_nets = nn.LayerList() + for i in range(input_dim): + self.branch_nets.append( + ModifiedMLP( + input_keys=(input_keys[i],), + output_keys=("f",), + num_layers=num_layers, + hidden_size=hidden_size, + activation=activation, + skip_connection=skip_connection, + weight_norm=weight_norm, + output_dim=r * len(output_keys), + periods=periods, + fourier=fourier, + random_weight=random_weight, + ) + ) + + self._init_weights() + + def _init_weights(self): + for m in self.sublayers(True): + if isinstance(m, nn.Linear): + initializer.glorot_normal_(m.weight) + initializer.zeros_(m.bias) + + def _tensor_contraction(self, x: paddle.Tensor, y: paddle.Tensor) -> paddle.Tensor: + """Tensor contraction between two tensors along the last channel. + + Args: + x (Tensor): Input tensor with shape [*N, C]. + y (Tensor): Input tensor with shape [*M, C] + + Returns: + Tensor: Output tensor with shape [*N, *M, C]. + """ + x_ndim = x.ndim + y_ndim = y.ndim + out_dim = x_ndim + y_ndim - 1 + + # Align the dimensions of x and y to out_dim + if x_ndim < out_dim: + # Add singleton dimensions to x at the end of dimensions + x = x.unsqueeze([-2] * (out_dim - x_ndim)) + if y_ndim < out_dim: + # Add singleton dimensions to y at the begin of dimensions + y = y.unsqueeze([0] * (out_dim - y_ndim)) + + # Multiply x and y with implicit broadcasting + out = x * y + + return out + + def forward_tensor(self, x, y, z) -> List[paddle.Tensor]: + # forward each dim branch + feature_f = [] + for i, input_var in enumerate((x, y, z)): + input_i = {self.input_keys[i]: input_var} + output_f_i = self.branch_nets[i](input_i) + feature_f.append(output_f_i["f"]) # [B, r*output_dim] + + output = [] + for i, key in enumerate(self.output_keys): + st, ed = i * self.r, (i + 1) * self.r + # do tensor contraction and sum over all branch outputs + if ed - st == self.r: + output_i = feature_f[0] + else: + output_i = feature_f[0][:, st:ed] + + for j in range(1, len(self.input_keys)): + if ed - st == self.r: + output_ii = feature_f[j] + else: + output_ii = feature_f[j][:, st:ed] + output_i = self._tensor_contraction(output_i, output_ii) + + output_i = output_i.sum(-1, keepdim=True) + output.append(output_i) + + return output + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + output = self.forward_tensor(*[x[key] for key in self.input_keys]) + + output = {key: output[i] for i, key in enumerate(self.output_keys)} + + if self._output_transform is not None: + output = self._output_transform(x, output) + + return output +>>>>>>> Stashed changes diff --git a/ppsci/arch/tfnonet.py b/ppsci/arch/tfnonet.py index 37388d615f..ba252c3763 100644 --- a/ppsci/arch/tfnonet.py +++ b/ppsci/arch/tfnonet.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream from typing import Dict from typing import Optional from typing import Tuple @@ -512,3 +513,519 @@ def __init__( self.n_modes_height = n_modes_height self.n_modes_width = n_modes_width self.n_modes_depth = n_modes_depth +======= +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle.nn.functional as F +from paddle import nn + +from ppsci.arch import base +from ppsci.arch import fno_block + + +class FNONet(base.Arch): + """N-Dimensional Tensorized Fourier Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + n_modes (Tuple[int, ...]): Number of modes to keep in Fourier Layer, along each dimension + The dimensionality of the TFNO is inferred from ``len(n_modes)` + hidden_channels (int): Width of the FNO (i.e. number of channels) + in_channels (int, optional): Number of input channels. Defaults to 3. + out_channels (int, optional): Number of output channels. Defaults to 1. + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (str, optional): Normalization layer to use. Defaults to None. + ada_in_features (int,optional): The input channles of the adaptive normalization.Defaults to None.s + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. + Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (Optional[Union[list,float,int]]): Whether to use percentage of padding. Defaults to + None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + SpectralConv (nn.layer, optional): Spectral convolution layer to use. + Defaults to fno_block.FactorizedSpectralConv. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + n_modes: Tuple[int, ...], + hidden_channels: int, + in_channels: int = 3, + out_channels: int = 1, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + max_n_modes: int = None, + non_linearity: nn.functional = F.gelu, + stabilizer: str = None, + norm: str = None, + ada_in_features: Optional[int] = None, + preactivation: bool = False, + fno_skip: str = "linear", + mlp_skip: str = "soft-gating", + separable: bool = False, + factorization: str = None, + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[Union[list, float, int]] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + SpectralConv: nn.Layer = fno_block.FactorizedSpectralConv, + **kwargs, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + + self.n_dim = len(n_modes) + self.n_modes = n_modes + self.hidden_channels = hidden_channels + self.lifting_channels = lifting_channels + self.projection_channels = projection_channels + self.in_channels = in_channels + if patching_levels: + self.in_channels = self.in_channels * patching_levels + 1 + self.out_channels = out_channels + self.n_layers = n_layers + self.joint_factorization = joint_factorization + self.non_linearity = non_linearity + self.rank = rank + self.factorization = factorization + self.fno_skip = (fno_skip,) + self.mlp_skip = (mlp_skip,) + self.fft_norm = fft_norm + self.implementation = implementation + self.separable = separable + self.preactivation = preactivation + self.stabilizer = stabilizer + if domain_padding is not None and ( + (isinstance(domain_padding, list) and sum(domain_padding) > 0) + or (isinstance(domain_padding, (float, int)) and domain_padding > 0) + ): + self.domain_padding = fno_block.DomainPadding( + domain_padding=domain_padding, padding_mode=domain_padding_mode + ) + else: + self.domain_padding = None + self.domain_padding_mode = domain_padding_mode + self.fno_blocks = fno_block.FNOBlocks( + in_channels=hidden_channels, + out_channels=hidden_channels, + n_modes=self.n_modes, + n_layers=n_layers, + max_n_modes=max_n_modes, + use_mlp=use_mlp, + mlp=mlp, + non_linearity=non_linearity, + stabilizer=stabilizer, + norm=norm, + ada_in_features=ada_in_features, + preactivation=preactivation, + fno_skip=fno_skip, + mlp_skip=mlp_skip, + separable=separable, + factorization=factorization, + rank=rank, + SpectralConv=SpectralConv, + joint_factorization=joint_factorization, + implementation=implementation, + fft_norm=fft_norm, + ) + # if lifting_channels is passed, make lifting an MLP + # with a hidden layer of size lifting_channels + if self.lifting_channels: + self.lifting = fno_block.MLP( + in_channels=in_channels, + out_channels=self.hidden_channels, + hidden_channels=self.lifting_channels, + n_layers=2, + n_dim=self.n_dim, + ) + # otherwise, make it a linear layer + else: + self.lifting = fno_block.MLP( + in_channels=in_channels, + out_channels=self.hidden_channels, + hidden_channels=self.hidden_channels, + n_layers=1, + n_dim=self.n_dim, + ) + self.projection = fno_block.MLP( + in_channels=self.hidden_channels, + out_channels=out_channels, + hidden_channels=self.projection_channels, + n_layers=2, + n_dim=self.n_dim, + non_linearity=non_linearity, + ) + + def forward(self, x): + """TFNO's forward pass""" + x = self.concat_to_tensor(x, self.input_keys) + + x = self.lifting(x) + if self.domain_padding is not None: + x = self.domain_padding.pad(x) + # x is 0.4 * [1, 32, 16, 16], passed + for index in range(self.n_layers): + x = self.fno_blocks(x, index) + + if self.domain_padding is not None: + x = self.domain_padding.unpad(x) + out = self.projection(x) + return {self.output_keys[0]: out} + + +class TFNO1dNet(FNONet): + """1D Fourier Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + n_modes_height (Tuple[int, ...]): Number of Fourier modes to keep along the height, along each + dimension. + hidden_channels (int): Width of the FNO (i.e. number of channels). + in_channels (int, optional): Number of input channels. Defaults to 3. + out_channels (int, optional): Number of output channels. Defaults to 1. + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (F.module, optional): Normalization layer to use. Defaults to None. + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. + Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (Optional[Union[list, float, int]], optional): Whether to use percentage of padding. + Defaults to None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + SpectralConv (nn.layer, optional): Spectral convolution layer to use. + Defaults to fno_block.FactorizedSpectralConv. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + n_modes_height: Tuple[int, ...], + hidden_channels: int, + in_channels: int = 3, + out_channels: int = 1, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + non_linearity: nn.functional = F.gelu, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + norm: str = None, + skip: str = "soft-gating", + separable: bool = False, + preactivation: bool = False, + factorization: str = "Tucker", + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[Union[list, float, int]] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + SpectralConv: nn.Layer = fno_block.FactorizedSpectralConv, + **kwargs, + ): + super().__init__( + input_keys=input_keys, + output_keys=output_keys, + n_modes=(n_modes_height,), + hidden_channels=hidden_channels, + in_channels=in_channels, + out_channels=out_channels, + lifting_channels=lifting_channels, + projection_channels=projection_channels, + n_layers=n_layers, + non_linearity=non_linearity, + use_mlp=use_mlp, + mlp=mlp, + norm=norm, + skip=skip, + separable=separable, + preactivation=preactivation, + factorization=factorization, + rank=rank, + joint_factorization=joint_factorization, + implementation=implementation, + domain_padding=domain_padding, + domain_padding_mode=domain_padding_mode, + fft_norm=fft_norm, + patching_levels=patching_levels, + SpectralConv=SpectralConv, + ) + self.n_modes_height = n_modes_height + + +class TFNO2dNet(FNONet): + """2D Fourier Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + n_modes_height (int): Number of Fourier modes to keep along the height. + n_modes_width (int): Number of modes to keep in Fourier Layer, along the width. + hidden_channels (int): Width of the FNO (i.e. number of channels). + in_channels (int, optional): Number of input channels. Defaults to 3. + out_channels (int, optional): Number of output channels. Defaults to 1. + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.Layer, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (F.module, optional): Normalization layer to use. Defaults to None. + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. + Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (Union[list,float,int], optional): Whether to use percentage of padding. Defaults to + None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + SpectralConv (nn.layer, optional): Spectral convolution layer to use. + Defaults to fno_block.FactorizedSpectralConv. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + n_modes_height: int, + n_modes_width: int, + hidden_channels: int, + in_channels: int = 3, + out_channels: int = 1, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + non_linearity: nn.functional = F.gelu, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + norm: str = None, + skip: str = "soft-gating", + separable: bool = False, + preactivation: bool = False, + factorization: str = "Tucker", + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[Union[list, float, int]] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + SpectralConv: nn.layer = fno_block.FactorizedSpectralConv, + **kwargs, + ): + super().__init__( + input_keys=input_keys, + output_keys=output_keys, + n_modes=(n_modes_height, n_modes_width), + hidden_channels=hidden_channels, + in_channels=in_channels, + out_channels=out_channels, + lifting_channels=lifting_channels, + projection_channels=projection_channels, + n_layers=n_layers, + non_linearity=non_linearity, + use_mlp=use_mlp, + mlp=mlp, + norm=norm, + skip=skip, + separable=separable, + preactivation=preactivation, + factorization=factorization, + rank=rank, + joint_factorization=joint_factorization, + implementation=implementation, + domain_padding=domain_padding, + domain_padding_mode=domain_padding_mode, + fft_norm=fft_norm, + patching_levels=patching_levels, + SpectralConv=SpectralConv, + ) + self.n_modes_height = n_modes_height + self.n_modes_width = n_modes_width + + +class TFNO3dNet(FNONet): + """3D Fourier Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + n_modes_height (int): Number of Fourier modes to keep along the height. + n_modes_width (int): Number of modes to keep in Fourier Layer, along the width. + n_modes_depth (int): Number of Fourier modes to keep along the depth. + hidden_channels (int): Width of the FNO (i.e. number of channels). + in_channels (int, optional): Number of input channels. Defaults to 3. + out_channels (int, optional): Number of output channels. Defaults to 1. + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.Layer, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (F.module, optional): Normalization layer to use. Defaults to None. + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + skip (str, optional): Type of skip connection to use,{'linear', 'identity', 'soft-gating'}. + Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (str, optional): Whether to use percentage of padding. Defaults to None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + SpectralConv (nn.layer, optional): Spectral convolution layer to use. Defaults to fno_block. + FactorizedSpectralConv. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + n_modes_height: int, + n_modes_width: int, + n_modes_depth: int, + hidden_channels: int, + in_channels: int = 3, + out_channels: int = 1, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + non_linearity: nn.functional = F.gelu, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + norm: str = None, + skip: str = "soft-gating", + separable: bool = False, + preactivation: bool = False, + factorization: str = "Tucker", + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[Union[list, float, int]] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + SpectralConv: nn.layer = fno_block.FactorizedSpectralConv, + **kwargs, + ): + super().__init__( + input_keys=input_keys, + output_keys=output_keys, + n_modes=(n_modes_height, n_modes_width, n_modes_depth), + hidden_channels=hidden_channels, + in_channels=in_channels, + out_channels=out_channels, + lifting_channels=lifting_channels, + projection_channels=projection_channels, + n_layers=n_layers, + non_linearity=non_linearity, + use_mlp=use_mlp, + mlp=mlp, + norm=norm, + skip=skip, + separable=separable, + preactivation=preactivation, + factorization=factorization, + rank=rank, + joint_factorization=joint_factorization, + implementation=implementation, + domain_padding=domain_padding, + domain_padding_mode=domain_padding_mode, + fft_norm=fft_norm, + patching_levels=patching_levels, + SpectralConv=SpectralConv, + ) + self.n_modes_height = n_modes_height + self.n_modes_width = n_modes_width + self.n_modes_height = n_modes_height +>>>>>>> Stashed changes diff --git a/ppsci/arch/tgcn.py b/ppsci/arch/tgcn.py index 5cf1ebc3eb..6c152eaf57 100644 --- a/ppsci/arch/tgcn.py +++ b/ppsci/arch/tgcn.py @@ -1,200 +1,200 @@ -from typing import Tuple - -import paddle as pp -import paddle.nn.functional as F -from numpy import ndarray -from paddle import nn -from paddle.nn.initializer import KaimingNormal - -from ppsci.arch.base import Arch - - -class graph_conv(nn.Layer): - def __init__(self, in_dim, out_dim, dropout, num_layer=2): - super(graph_conv, self).__init__() - self.mlp = nn.Conv2D( - (num_layer + 1) * in_dim, - out_dim, - kernel_size=(1, 1), - weight_attr=KaimingNormal(), - ) - self.dropout = dropout - self.num_layer = num_layer - - def forward(self, x, adj): - # B C N T - out = [x] - for _ in range(self.num_layer): - new_x = pp.matmul(adj, x) - out.append(new_x) - x = new_x - - h = pp.concat(out, axis=1) - h = self.mlp(h) - h = F.dropout(h, self.dropout, training=self.training) - return h - - -class tempol_conv(nn.Layer): - def __init__(self, in_dim, out_dim, hidden, num_layer=3, k_s=3, alpha=0.1): - super(tempol_conv, self).__init__() - self.leakyrelu = nn.LeakyReLU(alpha) - self.tc_convs = nn.LayerList() - self.num_layer = num_layer - for i in range(num_layer): - in_channels = in_dim if i == 0 else hidden - self.tc_convs.append( - nn.Conv2D( - in_channels=in_channels, - out_channels=hidden, - kernel_size=(1, k_s), - padding=(0, i + 1), - dilation=i + 1, - weight_attr=KaimingNormal(), - ) - ) - - self.mlp = nn.Conv2D( - in_channels=in_dim + hidden * num_layer, - out_channels=out_dim, - kernel_size=(1, 1), - weight_attr=KaimingNormal(), - ) - - def forward(self, x): - # B C N T - x_cat = [x] - for i in range(self.num_layer): - x = self.leakyrelu(self.tc_convs[i](x)) - x_cat.append(x) - tc_out = self.mlp(pp.concat(x_cat, axis=1)) - return tc_out - - -class TGCN(Arch): - """ - TGCN is a class that represents an Temporal Graph Convolutional Network model. - - Args: - input_keys (Tuple[str, ...]): A tuple of input keys. - output_keys (Tuple[str, ...]): A tuple of output keys. - adj (ndarray): The adjacency matrix of the graph. - in_dim (int): The dimension of the input data. - emb_dim (int): The dimension of the embedded space. - hidden (int): The dimension of the latent space. - gc_layer (int): The number of the graph convolutional layer. - tc_layer (int): The number of the temporal convolutional layer. - k_s (int): The kernel size of the temporal convolutional layer. - dropout (float): The dropout rate. - alpha (float): The negative slope of LeakyReLU. - input_len (int): The input timesteps. - label_len (int): The output timesteps. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.TGCN( - ... input_keys=("input",), - ... output_keys=("label",), - ... adj=numpy.ones((307, 307), dtype=numpy.float32), - ... in_dim=1, - ... emb_dim=32 - ... hidden=64, - ... gc_layer=2, - ... tc_layer=2 - ... k_s=3, - ... dropout=0.25, - ... alpha=0.1, - ... input_len=12, - ... label_len=12, - ... ) - >>> input_dict = {"input": paddle.rand([64, 12, 307, 1]),} - >>> label_dict = model(input_dict) - >>> print(label_dict["label"].shape) - [64, 12, 307, 1] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - adj: ndarray, - in_dim: int, - emb_dim: int, - hidden: int, - gc_layer: int, - tc_layer: int, - k_s: int, - dropout: float, - alpha: float, - input_len: int, - label_len: int, - ): - super(TGCN, self).__init__() - - self.input_keys = input_keys - self.output_keys = output_keys - - self.register_buffer("adj", pp.to_tensor(data=adj)) - - self.emb_conv = nn.Conv2D( - in_channels=in_dim, - out_channels=emb_dim, - kernel_size=(1, 1), - weight_attr=KaimingNormal(), - ) - - self.tc1_conv = tempol_conv( - emb_dim, hidden, hidden, num_layer=tc_layer, k_s=k_s, alpha=alpha - ) - self.sc1_conv = graph_conv(hidden, hidden, dropout, num_layer=gc_layer) - self.bn1 = nn.BatchNorm2D(hidden) - - self.tc2_conv = tempol_conv( - hidden, hidden, hidden, num_layer=tc_layer, k_s=k_s, alpha=alpha - ) - self.sc2_conv = graph_conv(hidden, hidden, dropout, num_layer=gc_layer) - self.bn2 = nn.BatchNorm2D(hidden) - - self.end_conv_1 = nn.Conv2D( - in_channels=emb_dim + hidden + hidden, - out_channels=2 * hidden, - kernel_size=(1, 1), - weight_attr=KaimingNormal(), - ) - self.end_conv_2 = nn.Conv2D( - in_channels=2 * hidden, - out_channels=label_len, - kernel_size=(1, input_len), - weight_attr=KaimingNormal(), - ) - - def forward(self, raw): - # emb block - x = raw[self.input_keys[0]] - x = x.transpose(perm=[0, 3, 2, 1]) # B in_dim N T - emb_x = self.emb_conv(x) # B emd_dim N T - - # TC1 - tc1_out = self.tc1_conv(emb_x) # B hidden N T - - # SC1 - sc1_out = self.sc1_conv(tc1_out, self.adj) # B hidden N T - sc1_out = sc1_out + tc1_out - sc1_out = self.bn1(sc1_out) - - # TC2 - tc2_out = self.tc2_conv(sc1_out) # B hidden N T - - # SC2 - sc2_out = self.sc2_conv(tc2_out, self.adj) # B hidden N T - sc2_out = sc2_out + tc2_out - sc2_out = self.bn2(sc2_out) - - # readout block - x_out = F.relu(pp.concat((emb_x, sc1_out, sc2_out), axis=1)) - x_out = F.relu(self.end_conv_1(x_out)) - # transform - x_out = self.end_conv_2(x_out) # B T N 1 - - return {self.output_keys[0]: x_out} +from typing import Tuple + +import paddle as pp +import paddle.nn.functional as F +from numpy import ndarray +from paddle import nn +from paddle.nn.initializer import KaimingNormal + +from ppsci.arch.base import Arch + + +class graph_conv(nn.Layer): + def __init__(self, in_dim, out_dim, dropout, num_layer=2): + super(graph_conv, self).__init__() + self.mlp = nn.Conv2D( + (num_layer + 1) * in_dim, + out_dim, + kernel_size=(1, 1), + weight_attr=KaimingNormal(), + ) + self.dropout = dropout + self.num_layer = num_layer + + def forward(self, x, adj): + # B C N T + out = [x] + for _ in range(self.num_layer): + new_x = pp.matmul(adj, x) + out.append(new_x) + x = new_x + + h = pp.concat(out, axis=1) + h = self.mlp(h) + h = F.dropout(h, self.dropout, training=self.training) + return h + + +class tempol_conv(nn.Layer): + def __init__(self, in_dim, out_dim, hidden, num_layer=3, k_s=3, alpha=0.1): + super(tempol_conv, self).__init__() + self.leakyrelu = nn.LeakyReLU(alpha) + self.tc_convs = nn.LayerList() + self.num_layer = num_layer + for i in range(num_layer): + in_channels = in_dim if i == 0 else hidden + self.tc_convs.append( + nn.Conv2D( + in_channels=in_channels, + out_channels=hidden, + kernel_size=(1, k_s), + padding=(0, i + 1), + dilation=i + 1, + weight_attr=KaimingNormal(), + ) + ) + + self.mlp = nn.Conv2D( + in_channels=in_dim + hidden * num_layer, + out_channels=out_dim, + kernel_size=(1, 1), + weight_attr=KaimingNormal(), + ) + + def forward(self, x): + # B C N T + x_cat = [x] + for i in range(self.num_layer): + x = self.leakyrelu(self.tc_convs[i](x)) + x_cat.append(x) + tc_out = self.mlp(pp.concat(x_cat, axis=1)) + return tc_out + + +class TGCN(Arch): + """ + TGCN is a class that represents an Temporal Graph Convolutional Network model. + + Args: + input_keys (Tuple[str, ...]): A tuple of input keys. + output_keys (Tuple[str, ...]): A tuple of output keys. + adj (ndarray): The adjacency matrix of the graph. + in_dim (int): The dimension of the input data. + emb_dim (int): The dimension of the embedded space. + hidden (int): The dimension of the latent space. + gc_layer (int): The number of the graph convolutional layer. + tc_layer (int): The number of the temporal convolutional layer. + k_s (int): The kernel size of the temporal convolutional layer. + dropout (float): The dropout rate. + alpha (float): The negative slope of LeakyReLU. + input_len (int): The input timesteps. + label_len (int): The output timesteps. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.TGCN( + ... input_keys=("input",), + ... output_keys=("label",), + ... adj=numpy.ones((307, 307), dtype=numpy.float32), + ... in_dim=1, + ... emb_dim=32 + ... hidden=64, + ... gc_layer=2, + ... tc_layer=2 + ... k_s=3, + ... dropout=0.25, + ... alpha=0.1, + ... input_len=12, + ... label_len=12, + ... ) + >>> input_dict = {"input": paddle.rand([64, 12, 307, 1]),} + >>> label_dict = model(input_dict) + >>> print(label_dict["label"].shape) + [64, 12, 307, 1] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + adj: ndarray, + in_dim: int, + emb_dim: int, + hidden: int, + gc_layer: int, + tc_layer: int, + k_s: int, + dropout: float, + alpha: float, + input_len: int, + label_len: int, + ): + super(TGCN, self).__init__() + + self.input_keys = input_keys + self.output_keys = output_keys + + self.register_buffer("adj", pp.to_tensor(data=adj)) + + self.emb_conv = nn.Conv2D( + in_channels=in_dim, + out_channels=emb_dim, + kernel_size=(1, 1), + weight_attr=KaimingNormal(), + ) + + self.tc1_conv = tempol_conv( + emb_dim, hidden, hidden, num_layer=tc_layer, k_s=k_s, alpha=alpha + ) + self.sc1_conv = graph_conv(hidden, hidden, dropout, num_layer=gc_layer) + self.bn1 = nn.BatchNorm2D(hidden) + + self.tc2_conv = tempol_conv( + hidden, hidden, hidden, num_layer=tc_layer, k_s=k_s, alpha=alpha + ) + self.sc2_conv = graph_conv(hidden, hidden, dropout, num_layer=gc_layer) + self.bn2 = nn.BatchNorm2D(hidden) + + self.end_conv_1 = nn.Conv2D( + in_channels=emb_dim + hidden + hidden, + out_channels=2 * hidden, + kernel_size=(1, 1), + weight_attr=KaimingNormal(), + ) + self.end_conv_2 = nn.Conv2D( + in_channels=2 * hidden, + out_channels=label_len, + kernel_size=(1, input_len), + weight_attr=KaimingNormal(), + ) + + def forward(self, raw): + # emb block + x = raw[self.input_keys[0]] + x = x.transpose(perm=[0, 3, 2, 1]) # B in_dim N T + emb_x = self.emb_conv(x) # B emd_dim N T + + # TC1 + tc1_out = self.tc1_conv(emb_x) # B hidden N T + + # SC1 + sc1_out = self.sc1_conv(tc1_out, self.adj) # B hidden N T + sc1_out = sc1_out + tc1_out + sc1_out = self.bn1(sc1_out) + + # TC2 + tc2_out = self.tc2_conv(sc1_out) # B hidden N T + + # SC2 + sc2_out = self.sc2_conv(tc2_out, self.adj) # B hidden N T + sc2_out = sc2_out + tc2_out + sc2_out = self.bn2(sc2_out) + + # readout block + x_out = F.relu(pp.concat((emb_x, sc1_out, sc2_out), axis=1)) + x_out = F.relu(self.end_conv_1(x_out)) + # transform + x_out = self.end_conv_2(x_out) # B T N 1 + + return {self.output_keys[0]: x_out} diff --git a/ppsci/arch/transformer.py b/ppsci/arch/transformer.py index a80eb46dcf..1e1621c2ab 100644 --- a/ppsci/arch/transformer.py +++ b/ppsci/arch/transformer.py @@ -1,417 +1,417 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Reference: https://github.com/omron-sinicx/transformer4sr -""" - -from __future__ import annotations - -import math -from typing import Callable -from typing import Tuple - -import paddle -import paddle.nn as nn - -from ppsci.arch import activation as act_mod -from ppsci.arch import base - - -def transpose_aux_func(dims, dim0, dim1): - perm = list(range(dims)) - perm[dim0], perm[dim1] = perm[dim1], perm[dim0] - return perm - - -class MultiHeadAttention(nn.Layer): - def __init__(self, heads, d_model): - super().__init__() - self.heads = heads - self.d_model = d_model - assert d_model % heads == 0 - self.d_k = d_model // heads - self.W_Q = nn.Linear(in_features=d_model, out_features=d_model) - self.W_K = nn.Linear(in_features=d_model, out_features=d_model) - self.W_V = nn.Linear(in_features=d_model, out_features=d_model) - self.W_O = nn.Linear(in_features=d_model, out_features=d_model) - - def scaled_dot_product_attention(self, Q, K, V, mask=None): - scores = paddle.matmul( - x=Q, y=K.transpose(perm=transpose_aux_func(K.ndim, -1, -2)) - ) / math.sqrt(self.d_k) - if mask is not None: - scores = paddle.where( - condition=mask, - x=paddle.to_tensor(data=[-1e9], dtype="float32"), - y=scores, - ) - weights = nn.functional.softmax(x=scores, axis=-1) - return paddle.matmul(x=weights, y=V) - - def forward(self, Q, K, V, mask=None): - Q_temp = paddle.reshape( - x=self.W_Q(Q), - shape=[i for i in tuple(Q.shape)[:-1]] + [self.heads] + [self.d_k], - ).transpose( - perm=transpose_aux_func( - paddle.reshape( - x=self.W_Q(Q), - shape=[i for i in tuple(Q.shape)[:-1]] + [self.heads] + [self.d_k], - ).ndim, - 1, - 2, - ) - ) - K_temp = paddle.reshape( - x=self.W_K(K), - shape=[i for i in tuple(K.shape)[:-1]] + [self.heads] + [self.d_k], - ).transpose( - perm=transpose_aux_func( - paddle.reshape( - x=self.W_K(K), - shape=[i for i in tuple(K.shape)[:-1]] + [self.heads] + [self.d_k], - ).ndim, - 1, - 2, - ) - ) - V_temp = paddle.reshape( - x=self.W_V(V), - shape=[i for i in tuple(V.shape)[:-1]] + [self.heads] + [self.d_k], - ).transpose( - perm=transpose_aux_func( - paddle.reshape( - x=self.W_V(V), - shape=[i for i in tuple(V.shape)[:-1]] + [self.heads] + [self.d_k], - ).ndim, - 1, - 2, - ) - ) - sdpa = self.scaled_dot_product_attention( - Q_temp, K_temp, V_temp, mask - ).transpose( - perm=transpose_aux_func( - self.scaled_dot_product_attention(Q_temp, K_temp, V_temp, mask).ndim, - 1, - 2, - ) - ) - sdpa = paddle.reshape( - x=sdpa, shape=[i for i in tuple(sdpa.shape)[:-2]] + [self.d_model] - ) - y_mha = self.W_O(sdpa) - return y_mha - - -class MLP(nn.Layer): - def __init__(self, list_dims, act="relu", dropout=0.0): - super().__init__() - self.layers = nn.LayerList() - for i in range(len(list_dims) - 1): - self.layers.append( - nn.Linear(in_features=list_dims[i], out_features=list_dims[i + 1]) - ) - self.layers.append(act_mod.get_activation(act) if act else None) - self.layers.append(nn.Dropout(p=dropout)) - - def forward(self, x): - y = x - for layer in self.layers: - y = layer(y) - return y - - -class EncoderLayerMix(nn.Layer): - def __init__(self, in_features, d_model, heads, act="relu", dropout=0.0): - super().__init__() - self.mlp = MLP([in_features, d_model, d_model], act="relu", dropout=dropout) - self.multihead_attention = MultiHeadAttention(heads, d_model) - self.dropout = nn.Dropout(p=dropout) - self.norm = nn.LayerNorm(normalized_shape=d_model) - - def forward(self, x): - y = x - y = paddle.flatten(y, start_axis=2) - y = self.mlp(y) - y = self.multihead_attention(y, y, y, mask=None) - y = self.dropout(y) - y = paddle.unsqueeze(y, axis=2) - y = x + y - y = self.norm(y) - return y - - -class Encoder(nn.Layer): - def __init__( - self, num_layers, num_var_max, d_model, heads, act="relu", dropout=0.0 - ): - super().__init__() - self.first_mlp = MLP([1, d_model, d_model], act="relu", dropout=dropout) - self.layers = nn.LayerList( - sublayers=[ - EncoderLayerMix( - d_model * num_var_max, d_model, heads, act="relu", dropout=dropout - ) - for _ in range(num_layers) - ] - ) - self.last_mlp = MLP([d_model, d_model], act="relu", dropout=dropout) - - def forward(self, x): - y = x - y = self.first_mlp(y) - for layer in self.layers: - y = layer(y) - y = self.last_mlp(y) - y = paddle.max(y, axis=1) - return y - - -class TokenEmbeddings(nn.Layer): - def __init__(self, vocab_size, seq_length, d_model, dropout=0.0): - super().__init__() - self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=d_model) - self.seq_length = seq_length - self.d_model = d_model - self.dropout = nn.Dropout(dropout) - self.get_pe_num() - - def get_pe_num(self): - self.pe = paddle.zeros(shape=[self.seq_length, self.d_model]) - numerator = paddle.arange( - self.seq_length, dtype=paddle.get_default_dtype() - ).unsqueeze(axis=1) - denominator = paddle.pow( - x=paddle.to_tensor(10e4, dtype=paddle.get_default_dtype()), - y=paddle.arange(self.d_model, step=2) / self.d_model, - ).unsqueeze(axis=0) - self.pe[:, 0::2] = paddle.sin(x=numerator / denominator) - self.pe[:, 1::2] = paddle.cos(x=numerator / denominator) - self.pe.stop_gradient = True - - def forward(self, x): - # embedding - y = x - y = self.embed(y) * math.sqrt(self.d_model) - # position encoding - y = self.dropout(y + self.pe) - return y - - -class DecoderLayer(nn.Layer): - def __init__(self, heads, d_model, act="relu", dropout=0.0): - super().__init__() - self.multihead_attention_1 = MultiHeadAttention(heads, d_model) - self.dropout_1 = nn.Dropout(p=dropout) - self.norm_1 = nn.LayerNorm(d_model) - - self.multihead_attention_2 = MultiHeadAttention(heads, d_model) - self.dropout_2 = nn.Dropout(p=dropout) - self.norm_2 = nn.LayerNorm(d_model) - - self.mlp = MLP([d_model, 2 * d_model, d_model], act="relu", dropout=dropout) - self.norm_3 = nn.LayerNorm(d_model) - - def forward(self, x_emb, x_enc, mask): - y_mha_1 = self.multihead_attention_1(x_emb, x_emb, x_emb, mask=mask) - y_mha_1 = self.dropout_1(y_mha_1) - y = y_mha_1 + x_emb - y = self.norm_1(y) - y_mha_2 = self.multihead_attention_2(y, x_enc, x_enc, mask=None) - y_mha_2 = self.dropout_2(y_mha_2) - y = y + y_mha_2 - y = self.norm_2(y) - y_mlp = self.mlp(y) - y = y + y_mlp - y = self.norm_3(y) - return y - - -class Decoder(nn.Layer): - def __init__( - self, - num_layers, - vocab_size, - seq_length, - d_model, - heads, - act="relu", - dropout=0.0, - ): - super().__init__() - self.token_embeddings = TokenEmbeddings( - vocab_size, seq_length, d_model, dropout - ) - self.dropout = nn.Dropout(p=dropout) - self.layers = nn.LayerList( - sublayers=[ - DecoderLayer(heads, d_model, act="relu", dropout=dropout) - for _ in range(num_layers) - ] - ) - - def forward(self, x_target, x_enc, mask): - y = x_target - y = self.token_embeddings(y) - y = self.dropout(y) - for layer in self.layers: - y = layer(y, x_enc, mask) - return y - - -class Transformer(base.Arch): - """A Kind of Transformer Model. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). - output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). - num_var_max (int): Maximum number of variables. - vocab_size (int): Size of vocab. Size of unary operators = 1, binary operators = 2. - seq_length (int): Length of sequance. - d_model (int, optional): The innermost dimension of model. Defaults to 256. - heads (int, optional): The number of independent heads for the multi-head attention layers. Defaults to 4. - num_layers_enc (int, optional): The number of encoders. Defaults to 4. - num_layers_dec (int, optional): The number of decoders. Defaults to 8. - dropout (float, optional): Dropout regularization. Defaults to 0.0. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.Transformer( - ... input_keys=("input", "target_seq"), - ... output_keys=("output",), - ... num_var_max=7, - ... vocab_size=20, - ... seq_length=30, - ... ) - >>> input_dict = {"input": paddle.rand([512, 50, 7, 1]), - ... "target_seq": paddle.rand([512, 30])} - >>> output_dict = model(input_dict) - >>> print(output_dict["output"].shape) - [512, 30, 20] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - num_var_max: int, - vocab_size: int, - seq_length: int, - d_model: int = 256, - heads: int = 4, - num_layers_enc: int = 4, - num_layers_dec: int = 8, - act: str = "relu", - dropout: float = 0.0, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - self.num_var_max = num_var_max - self.vocab_size = vocab_size - self.seq_length = seq_length - self.d_model = d_model - self.heads = heads - self.num_layers_enc = num_layers_enc - self.num_layers_dec = num_layers_dec - self.act = act - self.dropout = dropout - - self.encoder = Encoder( - num_layers_enc, num_var_max, d_model, heads, act="relu", dropout=dropout - ) - self.decoder = Decoder( - num_layers_dec, - vocab_size, - seq_length, - d_model, - heads, - act="relu", - dropout=dropout, - ) - self.last_layer = paddle.nn.Linear(in_features=d_model, out_features=vocab_size) - - def get_mask(self, target_seq): - padding_mask = paddle.equal(target_seq, 0).unsqueeze(axis=1).unsqueeze(axis=1) - future_mask = paddle.triu( - paddle.ones(shape=[target_seq.shape[1], target_seq.shape[1]]), - diagonal=1, - ).astype(dtype="bool") - mask = paddle.logical_or(x=padding_mask, y=future_mask) - return mask - - def forward_tensor(self, x_lst): - y, target_seq = x_lst[0], x_lst[1] - mask = self.get_mask(target_seq) - y_enc = self.encoder(y) - y = self.decoder(target_seq, y_enc, mask) - y = self.last_layer(y) - return y - - def forward(self, x): - if self._input_transform is not None: - x = self._input_transform(x) - - x_lst = [x[key] for key in self.input_keys] # input, target_seq - y = self.forward_tensor(x_lst) - y = self.split_to_dict(y, self.output_keys, axis=-1) - - if self._output_transform is not None: - y = self._output_transform(x, y) - return y - - @paddle.no_grad() - def decode_process( - self, dataset: paddle.Tensor, complete_func: Callable - ) -> paddle.Tensor: - """Greedy decode with the Transformer model, decode until the equation tree is completed. - - Args: - dataset (paddle.Tensor): Tabular dataset. - complete_func (Callable): Function used to calculate whether inference is complete. - """ - encoder_output = self.encoder(dataset) - decoder_output = paddle.zeros( - shape=(dataset.shape[0], self.seq_length + 1), dtype=paddle.int64 - ) - decoder_output[:, 0] = 1 - is_complete = paddle.zeros(shape=dataset.shape[0], dtype=paddle.bool) - for n1 in range(self.seq_length): - padding_mask = ( - paddle.equal(x=decoder_output[:, :-1], y=0) - .unsqueeze(axis=1) - .unsqueeze(axis=1) - ) - future_mask = paddle.triu( - x=paddle.ones(shape=[self.seq_length, self.seq_length]), diagonal=1 - ).astype(dtype=paddle.bool) - mask_dec = paddle.logical_or(x=padding_mask, y=future_mask) - y_dec = self.decoder( - x_target=decoder_output[:, :-1], - x_enc=encoder_output, - mask=mask_dec, - ) - y_mlp = self.last_layer(y_dec) - # set value depending on complete condition - decoder_output[:, n1 + 1] = paddle.where( - is_complete, 0, paddle.argmax(y_mlp[:, n1], axis=-1) - ) - # set complete condition - for n2 in range(dataset.shape[0]): - if complete_func(decoder_output[n2, 1:]): - is_complete[n2] = True - return decoder_output +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Reference: https://github.com/omron-sinicx/transformer4sr +""" + +from __future__ import annotations + +import math +from typing import Callable +from typing import Tuple + +import paddle +import paddle.nn as nn + +from ppsci.arch import activation as act_mod +from ppsci.arch import base + + +def transpose_aux_func(dims, dim0, dim1): + perm = list(range(dims)) + perm[dim0], perm[dim1] = perm[dim1], perm[dim0] + return perm + + +class MultiHeadAttention(nn.Layer): + def __init__(self, heads, d_model): + super().__init__() + self.heads = heads + self.d_model = d_model + assert d_model % heads == 0 + self.d_k = d_model // heads + self.W_Q = nn.Linear(in_features=d_model, out_features=d_model) + self.W_K = nn.Linear(in_features=d_model, out_features=d_model) + self.W_V = nn.Linear(in_features=d_model, out_features=d_model) + self.W_O = nn.Linear(in_features=d_model, out_features=d_model) + + def scaled_dot_product_attention(self, Q, K, V, mask=None): + scores = paddle.matmul( + x=Q, y=K.transpose(perm=transpose_aux_func(K.ndim, -1, -2)) + ) / math.sqrt(self.d_k) + if mask is not None: + scores = paddle.where( + condition=mask, + x=paddle.to_tensor(data=[-1e9], dtype="float32"), + y=scores, + ) + weights = nn.functional.softmax(x=scores, axis=-1) + return paddle.matmul(x=weights, y=V) + + def forward(self, Q, K, V, mask=None): + Q_temp = paddle.reshape( + x=self.W_Q(Q), + shape=[i for i in tuple(Q.shape)[:-1]] + [self.heads] + [self.d_k], + ).transpose( + perm=transpose_aux_func( + paddle.reshape( + x=self.W_Q(Q), + shape=[i for i in tuple(Q.shape)[:-1]] + [self.heads] + [self.d_k], + ).ndim, + 1, + 2, + ) + ) + K_temp = paddle.reshape( + x=self.W_K(K), + shape=[i for i in tuple(K.shape)[:-1]] + [self.heads] + [self.d_k], + ).transpose( + perm=transpose_aux_func( + paddle.reshape( + x=self.W_K(K), + shape=[i for i in tuple(K.shape)[:-1]] + [self.heads] + [self.d_k], + ).ndim, + 1, + 2, + ) + ) + V_temp = paddle.reshape( + x=self.W_V(V), + shape=[i for i in tuple(V.shape)[:-1]] + [self.heads] + [self.d_k], + ).transpose( + perm=transpose_aux_func( + paddle.reshape( + x=self.W_V(V), + shape=[i for i in tuple(V.shape)[:-1]] + [self.heads] + [self.d_k], + ).ndim, + 1, + 2, + ) + ) + sdpa = self.scaled_dot_product_attention( + Q_temp, K_temp, V_temp, mask + ).transpose( + perm=transpose_aux_func( + self.scaled_dot_product_attention(Q_temp, K_temp, V_temp, mask).ndim, + 1, + 2, + ) + ) + sdpa = paddle.reshape( + x=sdpa, shape=[i for i in tuple(sdpa.shape)[:-2]] + [self.d_model] + ) + y_mha = self.W_O(sdpa) + return y_mha + + +class MLP(nn.Layer): + def __init__(self, list_dims, act="relu", dropout=0.0): + super().__init__() + self.layers = nn.LayerList() + for i in range(len(list_dims) - 1): + self.layers.append( + nn.Linear(in_features=list_dims[i], out_features=list_dims[i + 1]) + ) + self.layers.append(act_mod.get_activation(act) if act else None) + self.layers.append(nn.Dropout(p=dropout)) + + def forward(self, x): + y = x + for layer in self.layers: + y = layer(y) + return y + + +class EncoderLayerMix(nn.Layer): + def __init__(self, in_features, d_model, heads, act="relu", dropout=0.0): + super().__init__() + self.mlp = MLP([in_features, d_model, d_model], act="relu", dropout=dropout) + self.multihead_attention = MultiHeadAttention(heads, d_model) + self.dropout = nn.Dropout(p=dropout) + self.norm = nn.LayerNorm(normalized_shape=d_model) + + def forward(self, x): + y = x + y = paddle.flatten(y, start_axis=2) + y = self.mlp(y) + y = self.multihead_attention(y, y, y, mask=None) + y = self.dropout(y) + y = paddle.unsqueeze(y, axis=2) + y = x + y + y = self.norm(y) + return y + + +class Encoder(nn.Layer): + def __init__( + self, num_layers, num_var_max, d_model, heads, act="relu", dropout=0.0 + ): + super().__init__() + self.first_mlp = MLP([1, d_model, d_model], act="relu", dropout=dropout) + self.layers = nn.LayerList( + sublayers=[ + EncoderLayerMix( + d_model * num_var_max, d_model, heads, act="relu", dropout=dropout + ) + for _ in range(num_layers) + ] + ) + self.last_mlp = MLP([d_model, d_model], act="relu", dropout=dropout) + + def forward(self, x): + y = x + y = self.first_mlp(y) + for layer in self.layers: + y = layer(y) + y = self.last_mlp(y) + y = paddle.max(y, axis=1) + return y + + +class TokenEmbeddings(nn.Layer): + def __init__(self, vocab_size, seq_length, d_model, dropout=0.0): + super().__init__() + self.embed = nn.Embedding(num_embeddings=vocab_size, embedding_dim=d_model) + self.seq_length = seq_length + self.d_model = d_model + self.dropout = nn.Dropout(dropout) + self.get_pe_num() + + def get_pe_num(self): + self.pe = paddle.zeros(shape=[self.seq_length, self.d_model]) + numerator = paddle.arange( + self.seq_length, dtype=paddle.get_default_dtype() + ).unsqueeze(axis=1) + denominator = paddle.pow( + x=paddle.to_tensor(10e4, dtype=paddle.get_default_dtype()), + y=paddle.arange(self.d_model, step=2) / self.d_model, + ).unsqueeze(axis=0) + self.pe[:, 0::2] = paddle.sin(x=numerator / denominator) + self.pe[:, 1::2] = paddle.cos(x=numerator / denominator) + self.pe.stop_gradient = True + + def forward(self, x): + # embedding + y = x + y = self.embed(y) * math.sqrt(self.d_model) + # position encoding + y = self.dropout(y + self.pe) + return y + + +class DecoderLayer(nn.Layer): + def __init__(self, heads, d_model, act="relu", dropout=0.0): + super().__init__() + self.multihead_attention_1 = MultiHeadAttention(heads, d_model) + self.dropout_1 = nn.Dropout(p=dropout) + self.norm_1 = nn.LayerNorm(d_model) + + self.multihead_attention_2 = MultiHeadAttention(heads, d_model) + self.dropout_2 = nn.Dropout(p=dropout) + self.norm_2 = nn.LayerNorm(d_model) + + self.mlp = MLP([d_model, 2 * d_model, d_model], act="relu", dropout=dropout) + self.norm_3 = nn.LayerNorm(d_model) + + def forward(self, x_emb, x_enc, mask): + y_mha_1 = self.multihead_attention_1(x_emb, x_emb, x_emb, mask=mask) + y_mha_1 = self.dropout_1(y_mha_1) + y = y_mha_1 + x_emb + y = self.norm_1(y) + y_mha_2 = self.multihead_attention_2(y, x_enc, x_enc, mask=None) + y_mha_2 = self.dropout_2(y_mha_2) + y = y + y_mha_2 + y = self.norm_2(y) + y_mlp = self.mlp(y) + y = y + y_mlp + y = self.norm_3(y) + return y + + +class Decoder(nn.Layer): + def __init__( + self, + num_layers, + vocab_size, + seq_length, + d_model, + heads, + act="relu", + dropout=0.0, + ): + super().__init__() + self.token_embeddings = TokenEmbeddings( + vocab_size, seq_length, d_model, dropout + ) + self.dropout = nn.Dropout(p=dropout) + self.layers = nn.LayerList( + sublayers=[ + DecoderLayer(heads, d_model, act="relu", dropout=dropout) + for _ in range(num_layers) + ] + ) + + def forward(self, x_target, x_enc, mask): + y = x_target + y = self.token_embeddings(y) + y = self.dropout(y) + for layer in self.layers: + y = layer(y, x_enc, mask) + return y + + +class Transformer(base.Arch): + """A Kind of Transformer Model. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("x", "y", "z"). + output_keys (Tuple[str, ...]): Name of output keys, such as ("u", "v", "w"). + num_var_max (int): Maximum number of variables. + vocab_size (int): Size of vocab. Size of unary operators = 1, binary operators = 2. + seq_length (int): Length of sequance. + d_model (int, optional): The innermost dimension of model. Defaults to 256. + heads (int, optional): The number of independent heads for the multi-head attention layers. Defaults to 4. + num_layers_enc (int, optional): The number of encoders. Defaults to 4. + num_layers_dec (int, optional): The number of decoders. Defaults to 8. + dropout (float, optional): Dropout regularization. Defaults to 0.0. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.Transformer( + ... input_keys=("input", "target_seq"), + ... output_keys=("output",), + ... num_var_max=7, + ... vocab_size=20, + ... seq_length=30, + ... ) + >>> input_dict = {"input": paddle.rand([512, 50, 7, 1]), + ... "target_seq": paddle.rand([512, 30])} + >>> output_dict = model(input_dict) + >>> print(output_dict["output"].shape) + [512, 30, 20] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + num_var_max: int, + vocab_size: int, + seq_length: int, + d_model: int = 256, + heads: int = 4, + num_layers_enc: int = 4, + num_layers_dec: int = 8, + act: str = "relu", + dropout: float = 0.0, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + self.num_var_max = num_var_max + self.vocab_size = vocab_size + self.seq_length = seq_length + self.d_model = d_model + self.heads = heads + self.num_layers_enc = num_layers_enc + self.num_layers_dec = num_layers_dec + self.act = act + self.dropout = dropout + + self.encoder = Encoder( + num_layers_enc, num_var_max, d_model, heads, act="relu", dropout=dropout + ) + self.decoder = Decoder( + num_layers_dec, + vocab_size, + seq_length, + d_model, + heads, + act="relu", + dropout=dropout, + ) + self.last_layer = paddle.nn.Linear(in_features=d_model, out_features=vocab_size) + + def get_mask(self, target_seq): + padding_mask = paddle.equal(target_seq, 0).unsqueeze(axis=1).unsqueeze(axis=1) + future_mask = paddle.triu( + paddle.ones(shape=[target_seq.shape[1], target_seq.shape[1]]), + diagonal=1, + ).astype(dtype="bool") + mask = paddle.logical_or(x=padding_mask, y=future_mask) + return mask + + def forward_tensor(self, x_lst): + y, target_seq = x_lst[0], x_lst[1] + mask = self.get_mask(target_seq) + y_enc = self.encoder(y) + y = self.decoder(target_seq, y_enc, mask) + y = self.last_layer(y) + return y + + def forward(self, x): + if self._input_transform is not None: + x = self._input_transform(x) + + x_lst = [x[key] for key in self.input_keys] # input, target_seq + y = self.forward_tensor(x_lst) + y = self.split_to_dict(y, self.output_keys, axis=-1) + + if self._output_transform is not None: + y = self._output_transform(x, y) + return y + + @paddle.no_grad() + def decode_process( + self, dataset: paddle.Tensor, complete_func: Callable + ) -> paddle.Tensor: + """Greedy decode with the Transformer model, decode until the equation tree is completed. + + Args: + dataset (paddle.Tensor): Tabular dataset. + complete_func (Callable): Function used to calculate whether inference is complete. + """ + encoder_output = self.encoder(dataset) + decoder_output = paddle.zeros( + shape=(dataset.shape[0], self.seq_length + 1), dtype=paddle.int64 + ) + decoder_output[:, 0] = 1 + is_complete = paddle.zeros(shape=dataset.shape[0], dtype=paddle.bool) + for n1 in range(self.seq_length): + padding_mask = ( + paddle.equal(x=decoder_output[:, :-1], y=0) + .unsqueeze(axis=1) + .unsqueeze(axis=1) + ) + future_mask = paddle.triu( + x=paddle.ones(shape=[self.seq_length, self.seq_length]), diagonal=1 + ).astype(dtype=paddle.bool) + mask_dec = paddle.logical_or(x=padding_mask, y=future_mask) + y_dec = self.decoder( + x_target=decoder_output[:, :-1], + x_enc=encoder_output, + mask=mask_dec, + ) + y_mlp = self.last_layer(y_dec) + # set value depending on complete condition + decoder_output[:, n1 + 1] = paddle.where( + is_complete, 0, paddle.argmax(y_mlp[:, n1], axis=-1) + ) + # set complete condition + for n2 in range(dataset.shape[0]): + if complete_func(decoder_output[n2, 1:]): + is_complete[n2] = True + return decoder_output diff --git a/ppsci/arch/unetex.py b/ppsci/arch/unetex.py index d0ba170464..7d28d7cf59 100644 --- a/ppsci/arch/unetex.py +++ b/ppsci/arch/unetex.py @@ -1,290 +1,290 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional -from typing import Tuple -from typing import Type - -import paddle -from paddle import nn - -from ppsci.arch import base - - -def create_layer( - in_channel, - out_channel, - kernel_size, - weight_norm=True, - batch_norm=True, - activation=nn.ReLU, - convolution=nn.Conv2D, -): - if kernel_size % 2 == 0: - raise ValueError("kernel_size should even number") - conv = convolution(in_channel, out_channel, kernel_size, padding=kernel_size // 2) - if weight_norm: - conv = nn.util.weight_norm(conv) - layer = [] - layer.append(conv) - if activation is not None: - layer.append(activation()) - if batch_norm: - layer.append(nn.BatchNorm2D(out_channel)) - return nn.Sequential(*layer) - - -def create_encoder_block( - in_channel, - out_channel, - kernel_size, - weight_norm=True, - batch_norm=True, - activation=nn.ReLU, - layers=2, -): - encoder = [] - encoder.append( - create_layer( - in_channel, - out_channel, - kernel_size, - weight_norm, - batch_norm, - activation, - nn.Conv2D, - ) - ) - for i in range(layers - 1): - encoder.append( - create_layer( - out_channel, - out_channel, - kernel_size, - weight_norm, - batch_norm, - activation, - nn.Conv2D, - ) - ) - return nn.Sequential(*encoder) - - -def create_decoder_block( - in_channel, - out_channel, - kernel_size, - weight_norm=True, - batch_norm=True, - activation=nn.ReLU, - layers=2, - final_layer=False, -): - decoder = [] - for i in range(layers): - _in = in_channel - _out = in_channel - _batch_norm = batch_norm - _activation = activation - if i == 0: - _in = in_channel * 2 - if i == layers - 1: - _out = out_channel - if final_layer: - _batch_norm = False - _activation = None - decoder.append( - create_layer( - _in, - _out, - kernel_size, - weight_norm, - _batch_norm, - _activation, - nn.Conv2DTranspose, - ) - ) - return nn.Sequential(*decoder) - - -def create_encoder( - in_channel, filters, kernel_size, wn=True, bn=True, activation=nn.ReLU, layers=2 -): - encoder = [] - for i in range(len(filters)): - encoder_layer = create_encoder_block( - in_channel if i == 0 else filters[i - 1], - filters[i], - kernel_size, - wn, - bn, - activation, - layers, - ) - encoder = encoder + [encoder_layer] - return nn.Sequential(*encoder) - - -def create_decoder( - out_channel, - filters, - kernel_size, - weight_norm=True, - batch_norm=True, - activation=nn.ReLU, - layers=2, -): - decoder = [] - for i in range(len(filters)): - if i == 0: - decoder_layer = create_decoder_block( - filters[i], - out_channel, - kernel_size, - weight_norm, - batch_norm, - activation, - layers, - final_layer=True, - ) - else: - decoder_layer = create_decoder_block( - filters[i], - filters[i - 1], - kernel_size, - weight_norm, - batch_norm, - activation, - layers, - final_layer=False, - ) - decoder = [decoder_layer] + decoder - return nn.Sequential(*decoder) - - -class UNetEx(base.Arch): - """U-Net Extension for CFD. - - Reference: [Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020.](https://arxiv.org/abs/2004.08826) - - Args: - input_key (str): Name of function data for input. - output_key (str): Name of function data for output. - in_channel (int): Number of channels of input. - out_channel (int): Number of channels of output. - kernel_size (int, optional): Size of kernel of convolution layer. Defaults to 3. - filters (Tuple[int, ...], optional): Number of filters. Defaults to (16, 32, 64). - layers (int, optional): Number of encoders or decoders. Defaults to 3. - weight_norm (bool, optional): Whether use weight normalization layer. Defaults to True. - batch_norm (bool, optional): Whether add batch normalization layer. Defaults to True. - activation (Type[nn.Layer], optional): Name of activation function. Defaults to nn.ReLU. - final_activation (Optional[Type[nn.Layer]]): Name of final activation function. Defaults to None. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.UNetEx( - ... input_key="input", - ... output_key="output", - ... in_channel=3, - ... out_channel=3, - ... kernel_size=5, - ... filters=(4, 4, 4, 4), - ... layers=3, - ... weight_norm=False, - ... batch_norm=False, - ... activation=None, - ... final_activation=None, - ... ) - >>> input_dict = {'input': paddle.rand([4, 3, 4, 4])} - >>> output_dict = model(input_dict) - >>> print(output_dict['output']) # doctest: +SKIP - >>> print(output_dict['output'].shape) - [4, 3, 4, 4] - """ - - def __init__( - self, - input_key: str, - output_key: str, - in_channel: int, - out_channel: int, - kernel_size: int = 3, - filters: Tuple[int, ...] = (16, 32, 64), - layers: int = 3, - weight_norm: bool = True, - batch_norm: bool = True, - activation: Type[nn.Layer] = nn.ReLU, - final_activation: Optional[Type[nn.Layer]] = None, - ): - if len(filters) == 0: - raise ValueError("The filters shouldn't be empty ") - - super().__init__() - self.input_keys = (input_key,) - self.output_keys = (output_key,) - self.final_activation = final_activation - self.encoder = create_encoder( - in_channel, - filters, - kernel_size, - weight_norm, - batch_norm, - activation, - layers, - ) - decoders = [ - create_decoder( - 1, filters, kernel_size, weight_norm, batch_norm, activation, layers - ) - for i in range(out_channel) - ] - self.decoders = nn.Sequential(*decoders) - - def encode(self, x): - tensors = [] - indices = [] - sizes = [] - for encoder in self.encoder: - x = encoder(x) - sizes.append(x.shape) - tensors.append(x) - x, ind = nn.functional.max_pool2d(x, 2, 2, return_mask=True) - indices.append(ind) - return x, tensors, indices, sizes - - def decode(self, x, tensors, indices, sizes): - y = [] - for _decoder in self.decoders: - _x = x - _tensors = tensors[:] - _indices = indices[:] - _sizes = sizes[:] - for decoder in _decoder: - tensor = _tensors.pop() - size = _sizes.pop() - indice = _indices.pop() - # upsample operations - _x = nn.functional.max_unpool2d(_x, indice, 2, 2, output_size=size) - _x = paddle.concat([tensor, _x], axis=1) - _x = decoder(_x) - y.append(_x) - return paddle.concat(y, axis=1) - - def forward(self, x): - x = x[self.input_keys[0]] - x, tensors, indices, sizes = self.encode(x) - x = self.decode(x, tensors, indices, sizes) - if self.final_activation is not None: - x = self.final_activation(x) - return {self.output_keys[0]: x} +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional +from typing import Tuple +from typing import Type + +import paddle +from paddle import nn + +from ppsci.arch import base + + +def create_layer( + in_channel, + out_channel, + kernel_size, + weight_norm=True, + batch_norm=True, + activation=nn.ReLU, + convolution=nn.Conv2D, +): + if kernel_size % 2 == 0: + raise ValueError("kernel_size should even number") + conv = convolution(in_channel, out_channel, kernel_size, padding=kernel_size // 2) + if weight_norm: + conv = nn.util.weight_norm(conv) + layer = [] + layer.append(conv) + if activation is not None: + layer.append(activation()) + if batch_norm: + layer.append(nn.BatchNorm2D(out_channel)) + return nn.Sequential(*layer) + + +def create_encoder_block( + in_channel, + out_channel, + kernel_size, + weight_norm=True, + batch_norm=True, + activation=nn.ReLU, + layers=2, +): + encoder = [] + encoder.append( + create_layer( + in_channel, + out_channel, + kernel_size, + weight_norm, + batch_norm, + activation, + nn.Conv2D, + ) + ) + for i in range(layers - 1): + encoder.append( + create_layer( + out_channel, + out_channel, + kernel_size, + weight_norm, + batch_norm, + activation, + nn.Conv2D, + ) + ) + return nn.Sequential(*encoder) + + +def create_decoder_block( + in_channel, + out_channel, + kernel_size, + weight_norm=True, + batch_norm=True, + activation=nn.ReLU, + layers=2, + final_layer=False, +): + decoder = [] + for i in range(layers): + _in = in_channel + _out = in_channel + _batch_norm = batch_norm + _activation = activation + if i == 0: + _in = in_channel * 2 + if i == layers - 1: + _out = out_channel + if final_layer: + _batch_norm = False + _activation = None + decoder.append( + create_layer( + _in, + _out, + kernel_size, + weight_norm, + _batch_norm, + _activation, + nn.Conv2DTranspose, + ) + ) + return nn.Sequential(*decoder) + + +def create_encoder( + in_channel, filters, kernel_size, wn=True, bn=True, activation=nn.ReLU, layers=2 +): + encoder = [] + for i in range(len(filters)): + encoder_layer = create_encoder_block( + in_channel if i == 0 else filters[i - 1], + filters[i], + kernel_size, + wn, + bn, + activation, + layers, + ) + encoder = encoder + [encoder_layer] + return nn.Sequential(*encoder) + + +def create_decoder( + out_channel, + filters, + kernel_size, + weight_norm=True, + batch_norm=True, + activation=nn.ReLU, + layers=2, +): + decoder = [] + for i in range(len(filters)): + if i == 0: + decoder_layer = create_decoder_block( + filters[i], + out_channel, + kernel_size, + weight_norm, + batch_norm, + activation, + layers, + final_layer=True, + ) + else: + decoder_layer = create_decoder_block( + filters[i], + filters[i - 1], + kernel_size, + weight_norm, + batch_norm, + activation, + layers, + final_layer=False, + ) + decoder = [decoder_layer] + decoder + return nn.Sequential(*decoder) + + +class UNetEx(base.Arch): + """U-Net Extension for CFD. + + Reference: [Ribeiro M D, Rehman A, Ahmed S, et al. DeepCFD: Efficient steady-state laminar flow approximation with deep convolutional neural networks[J]. arXiv preprint arXiv:2004.08826, 2020.](https://arxiv.org/abs/2004.08826) + + Args: + input_key (str): Name of function data for input. + output_key (str): Name of function data for output. + in_channel (int): Number of channels of input. + out_channel (int): Number of channels of output. + kernel_size (int, optional): Size of kernel of convolution layer. Defaults to 3. + filters (Tuple[int, ...], optional): Number of filters. Defaults to (16, 32, 64). + layers (int, optional): Number of encoders or decoders. Defaults to 3. + weight_norm (bool, optional): Whether use weight normalization layer. Defaults to True. + batch_norm (bool, optional): Whether add batch normalization layer. Defaults to True. + activation (Type[nn.Layer], optional): Name of activation function. Defaults to nn.ReLU. + final_activation (Optional[Type[nn.Layer]]): Name of final activation function. Defaults to None. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.UNetEx( + ... input_key="input", + ... output_key="output", + ... in_channel=3, + ... out_channel=3, + ... kernel_size=5, + ... filters=(4, 4, 4, 4), + ... layers=3, + ... weight_norm=False, + ... batch_norm=False, + ... activation=None, + ... final_activation=None, + ... ) + >>> input_dict = {'input': paddle.rand([4, 3, 4, 4])} + >>> output_dict = model(input_dict) + >>> print(output_dict['output']) # doctest: +SKIP + >>> print(output_dict['output'].shape) + [4, 3, 4, 4] + """ + + def __init__( + self, + input_key: str, + output_key: str, + in_channel: int, + out_channel: int, + kernel_size: int = 3, + filters: Tuple[int, ...] = (16, 32, 64), + layers: int = 3, + weight_norm: bool = True, + batch_norm: bool = True, + activation: Type[nn.Layer] = nn.ReLU, + final_activation: Optional[Type[nn.Layer]] = None, + ): + if len(filters) == 0: + raise ValueError("The filters shouldn't be empty ") + + super().__init__() + self.input_keys = (input_key,) + self.output_keys = (output_key,) + self.final_activation = final_activation + self.encoder = create_encoder( + in_channel, + filters, + kernel_size, + weight_norm, + batch_norm, + activation, + layers, + ) + decoders = [ + create_decoder( + 1, filters, kernel_size, weight_norm, batch_norm, activation, layers + ) + for i in range(out_channel) + ] + self.decoders = nn.Sequential(*decoders) + + def encode(self, x): + tensors = [] + indices = [] + sizes = [] + for encoder in self.encoder: + x = encoder(x) + sizes.append(x.shape) + tensors.append(x) + x, ind = nn.functional.max_pool2d(x, 2, 2, return_mask=True) + indices.append(ind) + return x, tensors, indices, sizes + + def decode(self, x, tensors, indices, sizes): + y = [] + for _decoder in self.decoders: + _x = x + _tensors = tensors[:] + _indices = indices[:] + _sizes = sizes[:] + for decoder in _decoder: + tensor = _tensors.pop() + size = _sizes.pop() + indice = _indices.pop() + # upsample operations + _x = nn.functional.max_unpool2d(_x, indice, 2, 2, output_size=size) + _x = paddle.concat([tensor, _x], axis=1) + _x = decoder(_x) + y.append(_x) + return paddle.concat(y, axis=1) + + def forward(self, x): + x = x[self.input_keys[0]] + x, tensors, indices, sizes = self.encode(x) + x = self.decode(x, tensors, indices, sizes) + if self.final_activation is not None: + x = self.final_activation(x) + return {self.output_keys[0]: x} diff --git a/ppsci/arch/unonet.py b/ppsci/arch/unonet.py index 255855d250..238295c443 100644 --- a/ppsci/arch/unonet.py +++ b/ppsci/arch/unonet.py @@ -1,289 +1,289 @@ -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from ppsci.arch import base -from ppsci.arch import fno_block - - -class UNONet(base.Arch): - """N-Dimensional U-Shaped Neural Operator. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). - in_channels (int, optional): Number of input channels. - out_channels (int, optional): Number of output channels. - hidden_channels (int): Width of the FNO (i.e. number of channels). - lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. - Defaults to 256. - projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. - Defaults to 256. - n_layers (int, optional): Number of Fourier Layers. Defaults to 4. - uno_out_channels (Tuple[int, ...], optional): Number of output channel of each Fourier Layers. - Eaxmple: For a Five layer UNO uno_out_channels can be [32,64,64,64,32].c - uno_n_modes (Tuple[Tuple[int, ...], ...]): Number of Fourier Modes to use in integral operation of each - Fourier Layers (along each dimension). - Example: For a five layer UNO with 2D input the uno_n_modes can be: [[5,5],[5,5],[5,5],[5,5],[5,5]]. Defaults to None. - uno_scalings (Tuple[Tuple[int, ...], ...]): Scaling Factors for each Fourier Layers. - Example: For a five layer UNO with 2D input, the uno_scalings can be : [[1.0,1.0],[0.5,0.5],[1,1],[1,1],[2,2]].Defaults to None. - horizontal_skips_map (Dict, optional): A map {...., b: a, ....} denoting horizontal skip connection - from a-th layer to b-th layer. If None default skip connection is applied. - Example: For a 5 layer UNO architecture, the skip connections can be horizontal_skips_map ={4:0,3:1}.Defaults to None. - incremental_n_modes (tuple[int],optional): Incremental number of modes to use in Fourier domain. - * If not None, this allows to incrementally increase the number of modes in Fourier domain - during training. Has to verify n <= N for (n, m) in zip(incremental_n_modes, n_modes). - * If None, all the n_modes are used. - This can be updated dynamically during training.Defaults to None. - use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. - mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. - Defaults to None. - non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. - norm (str, optional): Normalization layer to use. Defaults to None. - ada_in_features (Optional[int],optional): The input channles of the adaptive normalization.Defaults to - None. - preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. - fno_skip (str, optional): Type of skip connection to use for fno_block. Defaults to "linear". - horizontal_skip (str, optional): Type of skip connection to use for horizontal skip. Defaults to - "linear". - mlp_skip (str, optional): Type of skip connection to use for mlp. Defaults to "soft-gating". - separable (bool, optional): Whether to use a depthwise separable spectral convolution. - Defaults to False. - factorization (str, optional): Tensor factorization of the parameters weight to use. - * If None, a dense tensor parametrizes the Spectral convolutions. - * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". - rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. - joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a - single tensor (vs one per layer). Defaults to False. - implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". - If factorization is not None, forward mode to use:: - * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. - * `factorized` : the input is directly contracted with the factors of the decomposition. - domain_padding (Optional[Union[list, float, int]], optional): Whether to use percentage of padding. - Defaults to None. - domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional - How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". - fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". - patching_levels (int, optional): Number of patching levels to use. Defaults to 0. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - in_channels: int, - out_channels: int, - hidden_channels: int, - lifting_channels: int = 256, - projection_channels: int = 256, - n_layers: int = 4, - uno_out_channels: Tuple[int, ...] = None, - uno_n_modes: Tuple[Tuple[int, ...], ...] = None, - uno_scalings: Tuple[Tuple[int, ...], ...] = None, - horizontal_skips_map: Dict = None, - incremental_n_modes: Tuple[int, ...] = None, - use_mlp: bool = False, - mlp: Optional[Dict[str, float]] = None, - non_linearity: nn.functional = F.gelu, - norm: str = None, - ada_in_features: Optional[int] = None, - preactivation: bool = False, - fno_skip: str = "linear", - horizontal_skip: str = "linear", - mlp_skip: str = "soft-gating", - separable: bool = False, - factorization: str = None, - rank: float = 1.0, - joint_factorization: bool = False, - implementation: str = "factorized", - domain_padding: Optional[Union[list, float, int]] = None, - domain_padding_mode: str = "one-sided", - fft_norm: str = "forward", - patching_levels: int = 0, - **kwargs, - ): - super().__init__() - self.input_keys = input_keys - self.output_keys = output_keys - if uno_out_channels is None: - raise ValueError("uno_out_channels can not be None") - if uno_n_modes is None: - raise ValueError("uno_n_modes can not be None") - if uno_scalings is None: - raise ValueError("uno_scalings can not be None") - - if len(uno_out_channels) != n_layers: - raise ValueError("Output channels for all layers are not given") - - if len(uno_n_modes) != n_layers: - raise ValueError("Number of modes for all layers are not given") - - if len(uno_scalings) != n_layers: - raise ValueError("Scaling factor for all layers are not given") - - self.n_dim = len(uno_n_modes[0]) - self.uno_out_channels = uno_out_channels - self.uno_n_modes = uno_n_modes - self.uno_scalings = uno_scalings - - self.hidden_channels = hidden_channels - self.lifting_channels = lifting_channels - self.projection_channels = projection_channels - self.in_channels = in_channels - if patching_levels: - self.in_channels = self.in_channels * patching_levels + 1 - self.out_channels = out_channels - self.n_layers = n_layers - self.horizontal_skips_map = horizontal_skips_map - self.joint_factorization = joint_factorization - self.non_linearity = non_linearity - self.rank = rank - self.factorization = factorization - self.fno_skip = (fno_skip,) - self.mlp_skip = (mlp_skip,) - self.fft_norm = fft_norm - self.implementation = implementation - self.separable = separable - self.preactivation = preactivation - self._incremental_n_modes = incremental_n_modes - self.mlp = mlp - # constructing default skip maps - if self.horizontal_skips_map is None: - self.horizontal_skips_map = {} - for i in range( - 0, - n_layers // 2, - ): - # example, if n_layers = 5, then 4:0, 3:1 - self.horizontal_skips_map[n_layers - i - 1] = i - # self.uno_scalings may be a 1d list specifying uniform scaling factor at each layer - # or a 2d list, where each row specifies scaling factors along each dimention. - # To get the final (end to end) scaling factors we need to multiply - # the scaling factors (a list) of all layer. - - self.end_to_end_scaling_factor = [1] * len(self.uno_scalings[0]) - # multiplying scaling factors - for k in self.uno_scalings: - self.end_to_end_scaling_factor = [ - i * j for (i, j) in zip(self.end_to_end_scaling_factor, k) - ] - - # list with a single element is replaced by the scaler. - if len(self.end_to_end_scaling_factor) == 1: - self.end_to_end_scaling_factor = self.end_to_end_scaling_factor[0] - - if isinstance(self.end_to_end_scaling_factor, (float, int)): - self.end_to_end_scaling_factor = [ - self.end_to_end_scaling_factor - ] * self.n_dim - - if domain_padding is not None and ( - (isinstance(domain_padding, list) and sum(domain_padding) > 0) - or (isinstance(domain_padding, (float, int)) and domain_padding > 0) - ): - self.domain_padding = fno_block.DomainPadding( - domain_padding=domain_padding, padding_mode=domain_padding_mode - ) - else: - self.domain_padding = None - self.domain_padding_mode = domain_padding_mode - - self.lifting = fno_block.MLP( - in_channels=in_channels, - out_channels=self.hidden_channels, - hidden_channels=self.lifting_channels, - n_layers=2, - n_dim=self.n_dim, - ) - - self.fno_blocks = nn.LayerList([]) - self.horizontal_skips = nn.LayerDict({}) - prev_out = self.hidden_channels - for i in range(self.n_layers): - if i in self.horizontal_skips_map.keys(): - prev_out = ( - prev_out + self.uno_out_channels[self.horizontal_skips_map[i]] - ) - self.fno_blocks.append( - fno_block.FNOBlocks( - in_channels=prev_out, - out_channels=self.uno_out_channels[i], - n_modes=self.uno_n_modes[i], - use_mlp=use_mlp, - mlp=mlp, - output_scaling_factor=[self.uno_scalings[i]], - non_linearity=non_linearity, - norm=norm, - ada_in_features=ada_in_features, - preactivation=preactivation, - fno_skip=fno_skip, - mlp_skip=mlp_skip, - separable=separable, - incremental_n_modes=incremental_n_modes, - factorization=factorization, - rank=rank, - SpectralConv=fno_block.FactorizedSpectralConv, - joint_factorization=joint_factorization, - implementation=implementation, - fft_norm=fft_norm, - ) - ) - - if i in self.horizontal_skips_map.values(): - self.horizontal_skips[str(i)] = fno_block.skip_connection( - self.uno_out_channels[i], - self.uno_out_channels[i], - type=horizontal_skip, - n_dim=self.n_dim, - ) - prev_out = self.uno_out_channels[i] - - self.projection = fno_block.MLP( - in_channels=prev_out, - out_channels=out_channels, - hidden_channels=self.projection_channels, - n_layers=2, - n_dim=self.n_dim, - non_linearity=non_linearity, - ) - - def forward(self, x, **kwargs): - x = self.concat_to_tensor(x, self.input_keys) - x = self.lifting(x) - if self.domain_padding is not None: - x = self.domain_padding.pad(x) - output_shape = [ - int(round(i * j)) - for (i, j) in zip(x.shape[-self.n_dim :], self.end_to_end_scaling_factor) - ] - - skip_outputs = {} - cur_output = None - for layer_idx in range(self.n_layers): - if layer_idx in self.horizontal_skips_map.keys(): - skip_val = skip_outputs[self.horizontal_skips_map[layer_idx]] - output_scaling_factors = [ - m / n for (m, n) in zip(x.shape, skip_val.shape) - ] - output_scaling_factors = output_scaling_factors[-1 * self.n_dim :] - t = fno_block.resample( - skip_val, output_scaling_factors, list(range(-self.n_dim, 0)) - ) - x = paddle.concat([x, t], axis=1) - - if layer_idx == self.n_layers - 1: - cur_output = output_shape - x = self.fno_blocks[layer_idx](x, output_shape=cur_output) - if layer_idx in self.horizontal_skips_map.values(): - skip_outputs[layer_idx] = self.horizontal_skips[str(layer_idx)](x) - - if self.domain_padding is not None: - x = self.domain_padding.unpad(x) - - out = self.projection(x) - return {self.output_keys[0]: out} +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import paddle.nn as nn +import paddle.nn.functional as F + +from ppsci.arch import base +from ppsci.arch import fno_block + + +class UNONet(base.Arch): + """N-Dimensional U-Shaped Neural Operator. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + output_keys (Tuple[str, ...]): Name of output keys, such as ("output",). + in_channels (int, optional): Number of input channels. + out_channels (int, optional): Number of output channels. + hidden_channels (int): Width of the FNO (i.e. number of channels). + lifting_channels (int, optional): Number of hidden channels of the lifting block of the FNO. + Defaults to 256. + projection_channels (int, optional): Number of hidden channels of the projection block of the FNO. + Defaults to 256. + n_layers (int, optional): Number of Fourier Layers. Defaults to 4. + uno_out_channels (Tuple[int, ...], optional): Number of output channel of each Fourier Layers. + Eaxmple: For a Five layer UNO uno_out_channels can be [32,64,64,64,32].c + uno_n_modes (Tuple[Tuple[int, ...], ...]): Number of Fourier Modes to use in integral operation of each + Fourier Layers (along each dimension). + Example: For a five layer UNO with 2D input the uno_n_modes can be: [[5,5],[5,5],[5,5],[5,5],[5,5]]. Defaults to None. + uno_scalings (Tuple[Tuple[int, ...], ...]): Scaling Factors for each Fourier Layers. + Example: For a five layer UNO with 2D input, the uno_scalings can be : [[1.0,1.0],[0.5,0.5],[1,1],[1,1],[2,2]].Defaults to None. + horizontal_skips_map (Dict, optional): A map {...., b: a, ....} denoting horizontal skip connection + from a-th layer to b-th layer. If None default skip connection is applied. + Example: For a 5 layer UNO architecture, the skip connections can be horizontal_skips_map ={4:0,3:1}.Defaults to None. + incremental_n_modes (tuple[int],optional): Incremental number of modes to use in Fourier domain. + * If not None, this allows to incrementally increase the number of modes in Fourier domain + during training. Has to verify n <= N for (n, m) in zip(incremental_n_modes, n_modes). + * If None, all the n_modes are used. + This can be updated dynamically during training.Defaults to None. + use_mlp (bool, optional): Whether to use an MLP layer after each FNO block. Defaults to False. + mlp (Dict[str, float], optional): Parameters of the MLP. {'expansion': float, 'dropout': float}. + Defaults to None. + non_linearity (nn.functional, optional): Non-Linearity module to use. Defaults to F.gelu. + norm (str, optional): Normalization layer to use. Defaults to None. + ada_in_features (Optional[int],optional): The input channles of the adaptive normalization.Defaults to + None. + preactivation (bool, optional): Whether to use resnet-style preactivation. Defaults to False. + fno_skip (str, optional): Type of skip connection to use for fno_block. Defaults to "linear". + horizontal_skip (str, optional): Type of skip connection to use for horizontal skip. Defaults to + "linear". + mlp_skip (str, optional): Type of skip connection to use for mlp. Defaults to "soft-gating". + separable (bool, optional): Whether to use a depthwise separable spectral convolution. + Defaults to False. + factorization (str, optional): Tensor factorization of the parameters weight to use. + * If None, a dense tensor parametrizes the Spectral convolutions. + * Otherwise, the specified tensor factorization is used. Defaults to "Tucker". + rank (float, optional): Rank of the tensor factorization of the Fourier weights. Defaults to 1.0. + joint_factorization (bool, optional): Whether all the Fourier Layers should be parametrized by a + single tensor (vs one per layer). Defaults to False. + implementation (str, optional): {'factorized', 'reconstructed'}, optional. Defaults to "factorized". + If factorization is not None, forward mode to use:: + * `reconstructed` : the full weight tensor is reconstructed from the factorization and used for the forward pass. + * `factorized` : the input is directly contracted with the factors of the decomposition. + domain_padding (Optional[Union[list, float, int]], optional): Whether to use percentage of padding. + Defaults to None. + domain_padding_mode (str, optional): {'symmetric', 'one-sided'}, optional + How to perform domain padding, by default 'one-sided'. Defaults to "one-sided". + fft_norm (str, optional): The normalization mode for the FFT. Defaults to "forward". + patching_levels (int, optional): Number of patching levels to use. Defaults to 0. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + in_channels: int, + out_channels: int, + hidden_channels: int, + lifting_channels: int = 256, + projection_channels: int = 256, + n_layers: int = 4, + uno_out_channels: Tuple[int, ...] = None, + uno_n_modes: Tuple[Tuple[int, ...], ...] = None, + uno_scalings: Tuple[Tuple[int, ...], ...] = None, + horizontal_skips_map: Dict = None, + incremental_n_modes: Tuple[int, ...] = None, + use_mlp: bool = False, + mlp: Optional[Dict[str, float]] = None, + non_linearity: nn.functional = F.gelu, + norm: str = None, + ada_in_features: Optional[int] = None, + preactivation: bool = False, + fno_skip: str = "linear", + horizontal_skip: str = "linear", + mlp_skip: str = "soft-gating", + separable: bool = False, + factorization: str = None, + rank: float = 1.0, + joint_factorization: bool = False, + implementation: str = "factorized", + domain_padding: Optional[Union[list, float, int]] = None, + domain_padding_mode: str = "one-sided", + fft_norm: str = "forward", + patching_levels: int = 0, + **kwargs, + ): + super().__init__() + self.input_keys = input_keys + self.output_keys = output_keys + if uno_out_channels is None: + raise ValueError("uno_out_channels can not be None") + if uno_n_modes is None: + raise ValueError("uno_n_modes can not be None") + if uno_scalings is None: + raise ValueError("uno_scalings can not be None") + + if len(uno_out_channels) != n_layers: + raise ValueError("Output channels for all layers are not given") + + if len(uno_n_modes) != n_layers: + raise ValueError("Number of modes for all layers are not given") + + if len(uno_scalings) != n_layers: + raise ValueError("Scaling factor for all layers are not given") + + self.n_dim = len(uno_n_modes[0]) + self.uno_out_channels = uno_out_channels + self.uno_n_modes = uno_n_modes + self.uno_scalings = uno_scalings + + self.hidden_channels = hidden_channels + self.lifting_channels = lifting_channels + self.projection_channels = projection_channels + self.in_channels = in_channels + if patching_levels: + self.in_channels = self.in_channels * patching_levels + 1 + self.out_channels = out_channels + self.n_layers = n_layers + self.horizontal_skips_map = horizontal_skips_map + self.joint_factorization = joint_factorization + self.non_linearity = non_linearity + self.rank = rank + self.factorization = factorization + self.fno_skip = (fno_skip,) + self.mlp_skip = (mlp_skip,) + self.fft_norm = fft_norm + self.implementation = implementation + self.separable = separable + self.preactivation = preactivation + self._incremental_n_modes = incremental_n_modes + self.mlp = mlp + # constructing default skip maps + if self.horizontal_skips_map is None: + self.horizontal_skips_map = {} + for i in range( + 0, + n_layers // 2, + ): + # example, if n_layers = 5, then 4:0, 3:1 + self.horizontal_skips_map[n_layers - i - 1] = i + # self.uno_scalings may be a 1d list specifying uniform scaling factor at each layer + # or a 2d list, where each row specifies scaling factors along each dimention. + # To get the final (end to end) scaling factors we need to multiply + # the scaling factors (a list) of all layer. + + self.end_to_end_scaling_factor = [1] * len(self.uno_scalings[0]) + # multiplying scaling factors + for k in self.uno_scalings: + self.end_to_end_scaling_factor = [ + i * j for (i, j) in zip(self.end_to_end_scaling_factor, k) + ] + + # list with a single element is replaced by the scaler. + if len(self.end_to_end_scaling_factor) == 1: + self.end_to_end_scaling_factor = self.end_to_end_scaling_factor[0] + + if isinstance(self.end_to_end_scaling_factor, (float, int)): + self.end_to_end_scaling_factor = [ + self.end_to_end_scaling_factor + ] * self.n_dim + + if domain_padding is not None and ( + (isinstance(domain_padding, list) and sum(domain_padding) > 0) + or (isinstance(domain_padding, (float, int)) and domain_padding > 0) + ): + self.domain_padding = fno_block.DomainPadding( + domain_padding=domain_padding, padding_mode=domain_padding_mode + ) + else: + self.domain_padding = None + self.domain_padding_mode = domain_padding_mode + + self.lifting = fno_block.MLP( + in_channels=in_channels, + out_channels=self.hidden_channels, + hidden_channels=self.lifting_channels, + n_layers=2, + n_dim=self.n_dim, + ) + + self.fno_blocks = nn.LayerList([]) + self.horizontal_skips = nn.LayerDict({}) + prev_out = self.hidden_channels + for i in range(self.n_layers): + if i in self.horizontal_skips_map.keys(): + prev_out = ( + prev_out + self.uno_out_channels[self.horizontal_skips_map[i]] + ) + self.fno_blocks.append( + fno_block.FNOBlocks( + in_channels=prev_out, + out_channels=self.uno_out_channels[i], + n_modes=self.uno_n_modes[i], + use_mlp=use_mlp, + mlp=mlp, + output_scaling_factor=[self.uno_scalings[i]], + non_linearity=non_linearity, + norm=norm, + ada_in_features=ada_in_features, + preactivation=preactivation, + fno_skip=fno_skip, + mlp_skip=mlp_skip, + separable=separable, + incremental_n_modes=incremental_n_modes, + factorization=factorization, + rank=rank, + SpectralConv=fno_block.FactorizedSpectralConv, + joint_factorization=joint_factorization, + implementation=implementation, + fft_norm=fft_norm, + ) + ) + + if i in self.horizontal_skips_map.values(): + self.horizontal_skips[str(i)] = fno_block.skip_connection( + self.uno_out_channels[i], + self.uno_out_channels[i], + type=horizontal_skip, + n_dim=self.n_dim, + ) + prev_out = self.uno_out_channels[i] + + self.projection = fno_block.MLP( + in_channels=prev_out, + out_channels=out_channels, + hidden_channels=self.projection_channels, + n_layers=2, + n_dim=self.n_dim, + non_linearity=non_linearity, + ) + + def forward(self, x, **kwargs): + x = self.concat_to_tensor(x, self.input_keys) + x = self.lifting(x) + if self.domain_padding is not None: + x = self.domain_padding.pad(x) + output_shape = [ + int(round(i * j)) + for (i, j) in zip(x.shape[-self.n_dim :], self.end_to_end_scaling_factor) + ] + + skip_outputs = {} + cur_output = None + for layer_idx in range(self.n_layers): + if layer_idx in self.horizontal_skips_map.keys(): + skip_val = skip_outputs[self.horizontal_skips_map[layer_idx]] + output_scaling_factors = [ + m / n for (m, n) in zip(x.shape, skip_val.shape) + ] + output_scaling_factors = output_scaling_factors[-1 * self.n_dim :] + t = fno_block.resample( + skip_val, output_scaling_factors, list(range(-self.n_dim, 0)) + ) + x = paddle.concat([x, t], axis=1) + + if layer_idx == self.n_layers - 1: + cur_output = output_shape + x = self.fno_blocks[layer_idx](x, output_shape=cur_output) + if layer_idx in self.horizontal_skips_map.values(): + skip_outputs[layer_idx] = self.horizontal_skips[str(layer_idx)](x) + + if self.domain_padding is not None: + x = self.domain_padding.unpad(x) + + out = self.projection(x) + return {self.output_keys[0]: out} diff --git a/ppsci/arch/vae.py b/ppsci/arch/vae.py index 2a05f0d648..d4271f6af0 100644 --- a/ppsci/arch/vae.py +++ b/ppsci/arch/vae.py @@ -1,103 +1,103 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Tuple - -import paddle -import paddle.nn as nn - -from ppsci.arch import base - - -class AutoEncoder(base.Arch): - """ - AutoEncoder is a class that represents an autoencoder neural network model. - - Args: - input_keys (Tuple[str, ...]): A tuple of input keys. - output_keys (Tuple[str, ...]): A tuple of output keys. - input_dim (int): The dimension of the input data. - latent_dim (int): The dimension of the latent space. - hidden_dim (int): The dimension of the hidden layer. - - Examples: - >>> import paddle - >>> import ppsci - >>> model = ppsci.arch.AutoEncoder( - ... input_keys=("input1",), - ... output_keys=("mu", "log_sigma", "decoder_z",), - ... input_dim=100, - ... latent_dim=50, - ... hidden_dim=200 - ... ) - >>> input_dict = {"input1": paddle.rand([200, 100]),} - >>> output_dict = model(input_dict) - >>> print(output_dict["mu"].shape) - [200, 50] - >>> print(output_dict["log_sigma"].shape) - [200, 50] - >>> print(output_dict["decoder_z"].shape) - [200, 100] - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - output_keys: Tuple[str, ...], - input_dim: int, - latent_dim: int, - hidden_dim: int, - ): - super(AutoEncoder, self).__init__() - self.input_keys = input_keys - self.output_keys = output_keys - # encoder - self._encoder_linear = nn.Sequential( - nn.Linear(input_dim, hidden_dim), - nn.Tanh(), - ) - self._encoder_mu = nn.Linear(hidden_dim, latent_dim) - self._encoder_log_sigma = nn.Linear(hidden_dim, latent_dim) - - self._decoder = nn.Sequential( - nn.Linear(latent_dim, hidden_dim), - nn.Tanh(), - nn.Linear(hidden_dim, input_dim), - ) - - def encoder(self, x): - h = self._encoder_linear(x) - mu = self._encoder_mu(h) - log_sigma = self._encoder_log_sigma(h) - return mu, log_sigma - - def decoder(self, x): - return self._decoder(x) - - def forward_tensor(self, x): - mu, log_sigma = self.encoder(x) - z = mu + paddle.randn(mu.shape) * paddle.exp(log_sigma) - return mu, log_sigma, self.decoder(z) - - def forward(self, x): - x = self.concat_to_tensor(x, self.input_keys, axis=-1) - mu, log_sigma, decoder_z = self.forward_tensor(x) - result_dict = { - self.output_keys[0]: mu, - self.output_keys[1]: log_sigma, - self.output_keys[2]: decoder_z, - } - return result_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Tuple + +import paddle +import paddle.nn as nn + +from ppsci.arch import base + + +class AutoEncoder(base.Arch): + """ + AutoEncoder is a class that represents an autoencoder neural network model. + + Args: + input_keys (Tuple[str, ...]): A tuple of input keys. + output_keys (Tuple[str, ...]): A tuple of output keys. + input_dim (int): The dimension of the input data. + latent_dim (int): The dimension of the latent space. + hidden_dim (int): The dimension of the hidden layer. + + Examples: + >>> import paddle + >>> import ppsci + >>> model = ppsci.arch.AutoEncoder( + ... input_keys=("input1",), + ... output_keys=("mu", "log_sigma", "decoder_z",), + ... input_dim=100, + ... latent_dim=50, + ... hidden_dim=200 + ... ) + >>> input_dict = {"input1": paddle.rand([200, 100]),} + >>> output_dict = model(input_dict) + >>> print(output_dict["mu"].shape) + [200, 50] + >>> print(output_dict["log_sigma"].shape) + [200, 50] + >>> print(output_dict["decoder_z"].shape) + [200, 100] + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + output_keys: Tuple[str, ...], + input_dim: int, + latent_dim: int, + hidden_dim: int, + ): + super(AutoEncoder, self).__init__() + self.input_keys = input_keys + self.output_keys = output_keys + # encoder + self._encoder_linear = nn.Sequential( + nn.Linear(input_dim, hidden_dim), + nn.Tanh(), + ) + self._encoder_mu = nn.Linear(hidden_dim, latent_dim) + self._encoder_log_sigma = nn.Linear(hidden_dim, latent_dim) + + self._decoder = nn.Sequential( + nn.Linear(latent_dim, hidden_dim), + nn.Tanh(), + nn.Linear(hidden_dim, input_dim), + ) + + def encoder(self, x): + h = self._encoder_linear(x) + mu = self._encoder_mu(h) + log_sigma = self._encoder_log_sigma(h) + return mu, log_sigma + + def decoder(self, x): + return self._decoder(x) + + def forward_tensor(self, x): + mu, log_sigma = self.encoder(x) + z = mu + paddle.randn(mu.shape) * paddle.exp(log_sigma) + return mu, log_sigma, self.decoder(z) + + def forward(self, x): + x = self.concat_to_tensor(x, self.input_keys, axis=-1) + mu, log_sigma, decoder_z = self.forward_tensor(x) + result_dict = { + self.output_keys[0]: mu, + self.output_keys[1]: log_sigma, + self.output_keys[2]: decoder_z, + } + return result_dict diff --git a/ppsci/autodiff/__init__.py b/ppsci/autodiff/__init__.py index 68a0570c24..5523b15219 100644 --- a/ppsci/autodiff/__init__.py +++ b/ppsci/autodiff/__init__.py @@ -1,17 +1,17 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.autodiff.ad import clear -from ppsci.autodiff.ad import hessian -from ppsci.autodiff.ad import jacobian +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.autodiff.ad import clear +from ppsci.autodiff.ad import hessian +from ppsci.autodiff.ad import jacobian diff --git a/ppsci/autodiff/ad.py b/ppsci/autodiff/ad.py index ba3afd14a4..72e96f8878 100644 --- a/ppsci/autodiff/ad.py +++ b/ppsci/autodiff/ad.py @@ -1,341 +1,341 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module is adapted from [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Union - -import paddle - - -class _Jacobian: - """Compute Jacobian matrix J: J[i][j] = dy_i/dx_j, where i = 0, ..., dim_y-1 and - j = 0, ..., dim_x - 1. - - It is lazy evaluation, i.e., it only computes J[i][j] when needed, and will cache - by output tensor(row index in jacobian matrix). - - Args: - ys (paddle.Tensor): Output Tensor of shape [batch_size, dim_y]. - xs (paddle.Tensor): Input Tensor of shape [batch_size, dim_x]. - """ - - def __init__( - self, - ys: "paddle.Tensor", - xs: "paddle.Tensor", - J: Optional[Dict[int, paddle.Tensor]] = None, - ): - self.ys = ys - self.xs = xs - - self.dim_y = ys.shape[1] - self.dim_x = xs.shape[1] - - self.J: Dict[int, paddle.Tensor] = {} if J is None else J - - def __call__( - self, - i: int = 0, - j: Optional[int] = None, - retain_graph: Optional[bool] = None, - create_graph: bool = True, - ) -> "paddle.Tensor": - """ - Returns J[`i`][`j`]. If `j` is ``None``, returns the gradient of y_i, i.e. J[i]. - """ - if not 0 <= i < self.dim_y: - raise ValueError(f"i({i}) should in range [0, {self.dim_y}).") - if j is not None and not 0 <= j < self.dim_x: - raise ValueError(f"j({j}) should in range [0, {self.dim_x}).") - # Compute J[i] - if i not in self.J: - y = self.ys[:, i : i + 1] if self.dim_y > 1 else self.ys - self.J[i] = paddle.grad( - y, self.xs, retain_graph=retain_graph, create_graph=create_graph - )[0] - - return self.J[i] if (j is None or self.dim_x == 1) else self.J[i][:, j : j + 1] - - -class Jacobians: - r"""Compute multiple Jacobians. - - $$ - \rm Jacobian(ys, xs, i, j) = \dfrac{\partial ys_i}{\partial xs_j} - $$ - - A new instance will be created for a new pair of (output, input). For the (output, - input) pair that has been computed before, it will reuse the previous instance, - rather than creating a new one. - """ - - def __init__(self): - self.Js = {} - - def __call__( - self, - ys: "paddle.Tensor", - xs: Union["paddle.Tensor", List["paddle.Tensor"]], - i: int = 0, - j: Optional[int] = None, - retain_graph: Optional[bool] = None, - create_graph: bool = True, - ) -> Union["paddle.Tensor", List["paddle.Tensor"]]: - """Compute jacobians for given ys and xs. - - Args: - ys (paddle.Tensor): Output tensor. - xs (Union[paddle.Tensor, List[paddle.Tensor]]): Input tensor(s). - i (int, optional): i-th output variable. Defaults to 0. - j (Optional[int]): j-th input variable. Defaults to None. - retain_graph (Optional[bool]): Whether to retain the forward graph which - is used to calculate the gradient. When it is True, the graph would - be retained, in which way users can calculate backward twice for the - same graph. When it is False, the graph would be freed. Default None, - which means it is equal to `create_graph`. - create_graph (bool, optional): Whether to create the gradient graphs of - the computing process. When it is True, higher order derivatives are - supported to compute; when it is False, the gradient graphs of the - computing process would be discarded. Default False. - - Returns: - paddle.Tensor: Jacobian matrix of ys[i] to xs[j]. - - Examples: - >>> import paddle - >>> import ppsci - >>> x = paddle.randn([4, 1]) - >>> x.stop_gradient = False - >>> y = x * x - >>> dy_dx = ppsci.autodiff.jacobian(y, x) - >>> print(dy_dx.shape) - [4, 1] - """ - if not isinstance(xs, (list, tuple)): - key = (ys, xs) - if key not in self.Js: - self.Js[key] = _Jacobian(ys, xs) - return self.Js[key](i, j, retain_graph, create_graph) - else: - xs_require = [xs[i] for i in range(len(xs)) if (ys, xs[i]) not in self.Js] - grads_require = paddle.grad( - ys, - xs_require, - create_graph=create_graph, - retain_graph=retain_graph, - ) - - idx = 0 - Js_list = [] - for k, xs_ in enumerate(xs): - key = (ys, xs_) - assert xs_.shape[-1] == 1, ( - f"The last dim of each xs should be 1, but xs[{k}] has shape " - f"{xs_.shape}" - ) - if key not in self.Js: - self.Js[key] = _Jacobian(ys, xs_, {0: grads_require[idx]}) - idx += 1 - Js_list.append(self.Js[key](i, j, retain_graph, create_graph)) - return Js_list - - def _clear(self): - """Clear cached Jacobians.""" - self.Js = {} - - -# Use high-order differentiation with singleton pattern for convenient -jacobian: Callable[ - [ - "paddle.Tensor", - Union["paddle.Tensor", List["paddle.Tensor"]], - int, - Optional[int], - Optional[bool], - bool, - ], - Union["paddle.Tensor", List["paddle.Tensor"]], -] = Jacobians() - - -class _Hessian: - """Compute Hessian matrix H: H[i][j] = d^2y / dx_i dx_j, where i,j = 0,..., dim_x-1. - - It is lazy evaluation, i.e., it only computes H[i][j] when needed. - - Args: - ys: Output Tensor of shape (batch_size, 1) or (batch_size, dim_y > 1). - xs: Input Tensor of shape (batch_size, dim_x). - component: If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` - is used to compute the Hessian. Do not use if `y` has the shape (batch_size, - 1). - grad_y: The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid - duplicate computation. `grad_y` can be computed from ``Jacobian``. - """ - - def __init__( - self, - ys: "paddle.Tensor", - xs: "paddle.Tensor", - component: Optional[int] = None, - grad_y: Optional["paddle.Tensor"] = None, - ): - dim_y = ys.shape[1] - - if dim_y > 1: - if component is None: - raise ValueError( - f"component({component}) can not be None when dim_y({dim_y})>1." - ) - if component >= dim_y: - raise ValueError( - f"component({component}) should be smaller than dim_y({dim_y})." - ) - else: - if component is not None: - raise ValueError( - f"component{component} should be set to None when dim_y({dim_y})=1." - ) - component = 0 - - if grad_y is None: - # `create_graph` of first order(jacobian) should be `True` in _Hessian. - grad_y = jacobian( - ys, xs, i=component, j=None, retain_graph=None, create_graph=True - ) - self.H = _Jacobian(grad_y, xs) - - def __call__( - self, - i: int = 0, - j: int = 0, - retain_graph: Optional[bool] = None, - create_graph: bool = True, - ): - """Returns H[`i`][`j`].""" - return self.H(i, j, retain_graph, create_graph) - - -class Hessians: - r"""Compute multiple Hessians. - - $$ - \rm Hessian(ys, xs, component, i, j) = \dfrac{\partial ys_{component}}{\partial xs_i \partial xs_j} - $$ - - A new instance will be created for a new pair of (output, input). For the (output, - input) pair that has been computed before, it will reuse the previous instance, - rather than creating a new one. - """ - - def __init__(self): - self.Hs = {} - - def __call__( - self, - ys: "paddle.Tensor", - xs: "paddle.Tensor", - component: Optional[int] = None, - i: int = 0, - j: int = 0, - grad_y: Optional["paddle.Tensor"] = None, - retain_graph: Optional[bool] = None, - create_graph: bool = True, - ) -> "paddle.Tensor": - """Compute hessian matrix for given ys and xs. - - Args: - ys (paddle.Tensor): Output tensor. - xs (paddle.Tensor): Input tensor. - component (Optional[int]): If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` - is used to compute the Hessian. Do not use if `y` has the shape (batch_size, - 1). Defaults to None. - i (int, optional): I-th input variable. Defaults to 0. - j (int, optional): J-th input variable. Defaults to 0. - grad_y (Optional[paddle.Tensor]): The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid - duplicate computation. Defaults to None. - retain_graph (Optional[bool]): Whether to retain the forward graph which - is used to calculate the gradient. When it is True, the graph would - be retained, in which way users can calculate backward twice for the - same graph. When it is False, the graph would be freed. Default None, - which means it is equal to `create_graph`. - create_graph (bool, optional): Whether to create the gradient graphs of - the computing process. When it is True, higher order derivatives are - supported to compute; when it is False, the gradient graphs of the - computing process would be discarded. Default False. - - Returns: - paddle.Tensor: Hessian matrix. - - Examples: - >>> import paddle - >>> import ppsci - >>> x = paddle.randn([4, 3]) - >>> x.stop_gradient = False - >>> y = (x * x).sin() - >>> dy_dxx = ppsci.autodiff.hessian(y, x, component=0) - >>> print(dy_dxx.shape) - [4, 1] - """ - key = (ys, xs, component) - if key not in self.Hs: - self.Hs[key] = _Hessian(ys, xs, component=component, grad_y=grad_y) - return self.Hs[key](i, j, retain_graph, create_graph) - - def _clear(self): - """Clear cached Hessians.""" - self.Hs = {} - - -# Use high-order differentiation with singleton pattern for convenient -hessian: Callable[ - [ - "paddle.Tensor", - "paddle.Tensor", - Optional[int], - int, - int, - Optional["paddle.Tensor"], - Optional[bool], - bool, - ], - "paddle.Tensor", -] = Hessians() - - -def clear(): - """Clear cached Jacobians and Hessians. - - Examples: - >>> import paddle - >>> import ppsci - >>> x = paddle.randn([4, 3]) - >>> x.stop_gradient = False - >>> y = (x * x).sin() - >>> dy_dxx = ppsci.autodiff.hessian(y, x, component=0) - >>> ppsci.autodiff.clear() - >>> print(ppsci.autodiff.hessian.Hs) - {} - """ - jacobian._clear() - hessian._clear() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module is adapted from [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Union + +import paddle + + +class _Jacobian: + """Compute Jacobian matrix J: J[i][j] = dy_i/dx_j, where i = 0, ..., dim_y-1 and + j = 0, ..., dim_x - 1. + + It is lazy evaluation, i.e., it only computes J[i][j] when needed, and will cache + by output tensor(row index in jacobian matrix). + + Args: + ys (paddle.Tensor): Output Tensor of shape [batch_size, dim_y]. + xs (paddle.Tensor): Input Tensor of shape [batch_size, dim_x]. + """ + + def __init__( + self, + ys: "paddle.Tensor", + xs: "paddle.Tensor", + J: Optional[Dict[int, paddle.Tensor]] = None, + ): + self.ys = ys + self.xs = xs + + self.dim_y = ys.shape[1] + self.dim_x = xs.shape[1] + + self.J: Dict[int, paddle.Tensor] = {} if J is None else J + + def __call__( + self, + i: int = 0, + j: Optional[int] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = True, + ) -> "paddle.Tensor": + """ + Returns J[`i`][`j`]. If `j` is ``None``, returns the gradient of y_i, i.e. J[i]. + """ + if not 0 <= i < self.dim_y: + raise ValueError(f"i({i}) should in range [0, {self.dim_y}).") + if j is not None and not 0 <= j < self.dim_x: + raise ValueError(f"j({j}) should in range [0, {self.dim_x}).") + # Compute J[i] + if i not in self.J: + y = self.ys[:, i : i + 1] if self.dim_y > 1 else self.ys + self.J[i] = paddle.grad( + y, self.xs, retain_graph=retain_graph, create_graph=create_graph + )[0] + + return self.J[i] if (j is None or self.dim_x == 1) else self.J[i][:, j : j + 1] + + +class Jacobians: + r"""Compute multiple Jacobians. + + $$ + \rm Jacobian(ys, xs, i, j) = \dfrac{\partial ys_i}{\partial xs_j} + $$ + + A new instance will be created for a new pair of (output, input). For the (output, + input) pair that has been computed before, it will reuse the previous instance, + rather than creating a new one. + """ + + def __init__(self): + self.Js = {} + + def __call__( + self, + ys: "paddle.Tensor", + xs: Union["paddle.Tensor", List["paddle.Tensor"]], + i: int = 0, + j: Optional[int] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = True, + ) -> Union["paddle.Tensor", List["paddle.Tensor"]]: + """Compute jacobians for given ys and xs. + + Args: + ys (paddle.Tensor): Output tensor. + xs (Union[paddle.Tensor, List[paddle.Tensor]]): Input tensor(s). + i (int, optional): i-th output variable. Defaults to 0. + j (Optional[int]): j-th input variable. Defaults to None. + retain_graph (Optional[bool]): Whether to retain the forward graph which + is used to calculate the gradient. When it is True, the graph would + be retained, in which way users can calculate backward twice for the + same graph. When it is False, the graph would be freed. Default None, + which means it is equal to `create_graph`. + create_graph (bool, optional): Whether to create the gradient graphs of + the computing process. When it is True, higher order derivatives are + supported to compute; when it is False, the gradient graphs of the + computing process would be discarded. Default False. + + Returns: + paddle.Tensor: Jacobian matrix of ys[i] to xs[j]. + + Examples: + >>> import paddle + >>> import ppsci + >>> x = paddle.randn([4, 1]) + >>> x.stop_gradient = False + >>> y = x * x + >>> dy_dx = ppsci.autodiff.jacobian(y, x) + >>> print(dy_dx.shape) + [4, 1] + """ + if not isinstance(xs, (list, tuple)): + key = (ys, xs) + if key not in self.Js: + self.Js[key] = _Jacobian(ys, xs) + return self.Js[key](i, j, retain_graph, create_graph) + else: + xs_require = [xs[i] for i in range(len(xs)) if (ys, xs[i]) not in self.Js] + grads_require = paddle.grad( + ys, + xs_require, + create_graph=create_graph, + retain_graph=retain_graph, + ) + + idx = 0 + Js_list = [] + for k, xs_ in enumerate(xs): + key = (ys, xs_) + assert xs_.shape[-1] == 1, ( + f"The last dim of each xs should be 1, but xs[{k}] has shape " + f"{xs_.shape}" + ) + if key not in self.Js: + self.Js[key] = _Jacobian(ys, xs_, {0: grads_require[idx]}) + idx += 1 + Js_list.append(self.Js[key](i, j, retain_graph, create_graph)) + return Js_list + + def _clear(self): + """Clear cached Jacobians.""" + self.Js = {} + + +# Use high-order differentiation with singleton pattern for convenient +jacobian: Callable[ + [ + "paddle.Tensor", + Union["paddle.Tensor", List["paddle.Tensor"]], + int, + Optional[int], + Optional[bool], + bool, + ], + Union["paddle.Tensor", List["paddle.Tensor"]], +] = Jacobians() + + +class _Hessian: + """Compute Hessian matrix H: H[i][j] = d^2y / dx_i dx_j, where i,j = 0,..., dim_x-1. + + It is lazy evaluation, i.e., it only computes H[i][j] when needed. + + Args: + ys: Output Tensor of shape (batch_size, 1) or (batch_size, dim_y > 1). + xs: Input Tensor of shape (batch_size, dim_x). + component: If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` + is used to compute the Hessian. Do not use if `y` has the shape (batch_size, + 1). + grad_y: The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid + duplicate computation. `grad_y` can be computed from ``Jacobian``. + """ + + def __init__( + self, + ys: "paddle.Tensor", + xs: "paddle.Tensor", + component: Optional[int] = None, + grad_y: Optional["paddle.Tensor"] = None, + ): + dim_y = ys.shape[1] + + if dim_y > 1: + if component is None: + raise ValueError( + f"component({component}) can not be None when dim_y({dim_y})>1." + ) + if component >= dim_y: + raise ValueError( + f"component({component}) should be smaller than dim_y({dim_y})." + ) + else: + if component is not None: + raise ValueError( + f"component{component} should be set to None when dim_y({dim_y})=1." + ) + component = 0 + + if grad_y is None: + # `create_graph` of first order(jacobian) should be `True` in _Hessian. + grad_y = jacobian( + ys, xs, i=component, j=None, retain_graph=None, create_graph=True + ) + self.H = _Jacobian(grad_y, xs) + + def __call__( + self, + i: int = 0, + j: int = 0, + retain_graph: Optional[bool] = None, + create_graph: bool = True, + ): + """Returns H[`i`][`j`].""" + return self.H(i, j, retain_graph, create_graph) + + +class Hessians: + r"""Compute multiple Hessians. + + $$ + \rm Hessian(ys, xs, component, i, j) = \dfrac{\partial ys_{component}}{\partial xs_i \partial xs_j} + $$ + + A new instance will be created for a new pair of (output, input). For the (output, + input) pair that has been computed before, it will reuse the previous instance, + rather than creating a new one. + """ + + def __init__(self): + self.Hs = {} + + def __call__( + self, + ys: "paddle.Tensor", + xs: "paddle.Tensor", + component: Optional[int] = None, + i: int = 0, + j: int = 0, + grad_y: Optional["paddle.Tensor"] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = True, + ) -> "paddle.Tensor": + """Compute hessian matrix for given ys and xs. + + Args: + ys (paddle.Tensor): Output tensor. + xs (paddle.Tensor): Input tensor. + component (Optional[int]): If `y` has the shape (batch_size, dim_y > 1), then `y[:, component]` + is used to compute the Hessian. Do not use if `y` has the shape (batch_size, + 1). Defaults to None. + i (int, optional): I-th input variable. Defaults to 0. + j (int, optional): J-th input variable. Defaults to 0. + grad_y (Optional[paddle.Tensor]): The gradient of `y` w.r.t. `xs`. Provide `grad_y` if known to avoid + duplicate computation. Defaults to None. + retain_graph (Optional[bool]): Whether to retain the forward graph which + is used to calculate the gradient. When it is True, the graph would + be retained, in which way users can calculate backward twice for the + same graph. When it is False, the graph would be freed. Default None, + which means it is equal to `create_graph`. + create_graph (bool, optional): Whether to create the gradient graphs of + the computing process. When it is True, higher order derivatives are + supported to compute; when it is False, the gradient graphs of the + computing process would be discarded. Default False. + + Returns: + paddle.Tensor: Hessian matrix. + + Examples: + >>> import paddle + >>> import ppsci + >>> x = paddle.randn([4, 3]) + >>> x.stop_gradient = False + >>> y = (x * x).sin() + >>> dy_dxx = ppsci.autodiff.hessian(y, x, component=0) + >>> print(dy_dxx.shape) + [4, 1] + """ + key = (ys, xs, component) + if key not in self.Hs: + self.Hs[key] = _Hessian(ys, xs, component=component, grad_y=grad_y) + return self.Hs[key](i, j, retain_graph, create_graph) + + def _clear(self): + """Clear cached Hessians.""" + self.Hs = {} + + +# Use high-order differentiation with singleton pattern for convenient +hessian: Callable[ + [ + "paddle.Tensor", + "paddle.Tensor", + Optional[int], + int, + int, + Optional["paddle.Tensor"], + Optional[bool], + bool, + ], + "paddle.Tensor", +] = Hessians() + + +def clear(): + """Clear cached Jacobians and Hessians. + + Examples: + >>> import paddle + >>> import ppsci + >>> x = paddle.randn([4, 3]) + >>> x.stop_gradient = False + >>> y = (x * x).sin() + >>> dy_dxx = ppsci.autodiff.hessian(y, x, component=0) + >>> ppsci.autodiff.clear() + >>> print(ppsci.autodiff.hessian.Hs) + {} + """ + jacobian._clear() + hessian._clear() diff --git a/ppsci/constraint/__init__.py b/ppsci/constraint/__init__.py index 9179439436..9fe394c52a 100644 --- a/ppsci/constraint/__init__.py +++ b/ppsci/constraint/__init__.py @@ -1,86 +1,86 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import copy - -from ppsci.constraint.base import Constraint -from ppsci.constraint.boundary_constraint import BoundaryConstraint -from ppsci.constraint.initial_constraint import InitialConstraint -from ppsci.constraint.integral_constraint import IntegralConstraint -from ppsci.constraint.interior_constraint import InteriorConstraint -from ppsci.constraint.periodic_constraint import PeriodicConstraint -from ppsci.constraint.supervised_constraint import SupervisedConstraint -from ppsci.loss import build_loss -from ppsci.utils import logger -from ppsci.utils import misc - -__all__ = [ - "Constraint", - "BoundaryConstraint", - "InitialConstraint", - "IntegralConstraint", - "InteriorConstraint", - "PeriodicConstraint", - "SupervisedConstraint", -] - - -def build_constraint(cfg, equation_dict, geom_dict): - """Build constraint(s). - - Args: - cfg (List[DictConfig]): Constraint config list. - equation_dict (Dct[str, Equation]): Equation(s) in dict. - geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. - - Returns: - Dict[str, constraint]: Constraint(s) in dict. - """ - if cfg is None: - return None - cfg = copy.deepcopy(cfg) - global_dataloader_cfg = cfg["dataloader"] - constraint_cfg = cfg["content"] - - constraint_dict = misc.PrettyOrderedDict() - for _item in constraint_cfg: - constraint_cls = next(iter(_item.keys())) - _constraint_cfg = _item[constraint_cls] - constraint_name = _constraint_cfg.get("name", constraint_cls) - - # select equation - if isinstance(_constraint_cfg["output_expr"], str): - equation_name = _constraint_cfg.pop("output_expr") - _constraint_cfg["output_expr"] = equation_dict[equation_name].equations - - # select geometry - geom_name = _constraint_cfg.pop("geom") - _constraint_cfg["geom"] = geom_dict[geom_name] - - # update complete dataloader config - local_dataloader_cfg = _constraint_cfg["dataloader"] - local_dataloader_cfg.update(global_dataloader_cfg) - - # build loss - _constraint_cfg["loss"] = build_loss(_constraint_cfg["loss"]) - - # instantiate constraint - _constraint_cfg["dataloader_cfg"] = _constraint_cfg.pop("dataloader") - constraint_dict[constraint_name] = eval(constraint_cls)(**_constraint_cfg) - - logger.debug(str(constraint_dict[constraint_name])) - - return constraint_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import copy + +from ppsci.constraint.base import Constraint +from ppsci.constraint.boundary_constraint import BoundaryConstraint +from ppsci.constraint.initial_constraint import InitialConstraint +from ppsci.constraint.integral_constraint import IntegralConstraint +from ppsci.constraint.interior_constraint import InteriorConstraint +from ppsci.constraint.periodic_constraint import PeriodicConstraint +from ppsci.constraint.supervised_constraint import SupervisedConstraint +from ppsci.loss import build_loss +from ppsci.utils import logger +from ppsci.utils import misc + +__all__ = [ + "Constraint", + "BoundaryConstraint", + "InitialConstraint", + "IntegralConstraint", + "InteriorConstraint", + "PeriodicConstraint", + "SupervisedConstraint", +] + + +def build_constraint(cfg, equation_dict, geom_dict): + """Build constraint(s). + + Args: + cfg (List[DictConfig]): Constraint config list. + equation_dict (Dct[str, Equation]): Equation(s) in dict. + geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. + + Returns: + Dict[str, constraint]: Constraint(s) in dict. + """ + if cfg is None: + return None + cfg = copy.deepcopy(cfg) + global_dataloader_cfg = cfg["dataloader"] + constraint_cfg = cfg["content"] + + constraint_dict = misc.PrettyOrderedDict() + for _item in constraint_cfg: + constraint_cls = next(iter(_item.keys())) + _constraint_cfg = _item[constraint_cls] + constraint_name = _constraint_cfg.get("name", constraint_cls) + + # select equation + if isinstance(_constraint_cfg["output_expr"], str): + equation_name = _constraint_cfg.pop("output_expr") + _constraint_cfg["output_expr"] = equation_dict[equation_name].equations + + # select geometry + geom_name = _constraint_cfg.pop("geom") + _constraint_cfg["geom"] = geom_dict[geom_name] + + # update complete dataloader config + local_dataloader_cfg = _constraint_cfg["dataloader"] + local_dataloader_cfg.update(global_dataloader_cfg) + + # build loss + _constraint_cfg["loss"] = build_loss(_constraint_cfg["loss"]) + + # instantiate constraint + _constraint_cfg["dataloader_cfg"] = _constraint_cfg.pop("dataloader") + constraint_dict[constraint_name] = eval(constraint_cls)(**_constraint_cfg) + + logger.debug(str(constraint_dict[constraint_name])) + + return constraint_dict diff --git a/ppsci/constraint/base.py b/ppsci/constraint/base.py index c3b4c8a122..d06f113fac 100644 --- a/ppsci/constraint/base.py +++ b/ppsci/constraint/base.py @@ -1,62 +1,62 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Dict - -from paddle import io - -from ppsci import data - -if TYPE_CHECKING: - from ppsci import loss - - -class Constraint: - """Base class for constraint. - - Args: - dataset (io.Dataset): Dataset. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - name (str): Name of constraint. - """ - - def __init__( - self, - dataset: io.Dataset, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - name: str, - ): - self.data_loader = data.build_dataloader(dataset, dataloader_cfg) - self.data_iter = iter(self.data_loader) - self.loss = loss - self.name = name - - def __str__(self): - return ", ".join( - [ - self.__class__.__name__, - f"name = {self.name}", - f"input_keys = {self.input_keys}", - f"output_keys = {self.output_keys}", - f"output_expr = {self.output_expr}", - f"label_dict = {self.label_dict}", - f"loss = {self.loss}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict + +from paddle import io + +from ppsci import data + +if TYPE_CHECKING: + from ppsci import loss + + +class Constraint: + """Base class for constraint. + + Args: + dataset (io.Dataset): Dataset. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + name (str): Name of constraint. + """ + + def __init__( + self, + dataset: io.Dataset, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + name: str, + ): + self.data_loader = data.build_dataloader(dataset, dataloader_cfg) + self.data_iter = iter(self.data_loader) + self.loss = loss + self.name = name + + def __str__(self): + return ", ".join( + [ + self.__class__.__name__, + f"name = {self.name}", + f"input_keys = {self.input_keys}", + f"output_keys = {self.output_keys}", + f"output_expr = {self.output_expr}", + f"label_dict = {self.label_dict}", + f"loss = {self.loss}", + ] + ) diff --git a/ppsci/constraint/boundary_constraint.py b/ppsci/constraint/boundary_constraint.py index 8ac30fcb41..f371a7ff79 100644 --- a/ppsci/constraint/boundary_constraint.py +++ b/ppsci/constraint/boundary_constraint.py @@ -1,163 +1,163 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci.constraint import base -from ppsci.data import dataset - -if TYPE_CHECKING: - from ppsci import loss - - -class BoundaryConstraint(base.Constraint): - """Class for boundary constraint. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.Geometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified boundaries. - Defaults to None. - evenly (bool, optional): Whether to use evenly distribution sampling. - Defaults to False. - weight_dict (Optional[Dict[str, Union[float, Callable]]]): Define the weight of each - constraint variable. Defaults to None. - name (str, optional): Name of constraint object. Defaults to "BC". - - Examples: - >>> import ppsci - >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> bc = ppsci.constraint.BoundaryConstraint( - ... {"u": lambda out: out["u"]}, - ... {"u": 0}, - ... rect, - ... { - ... "dataset": "IterableNamedArrayDataset", - ... "iters_per_epoch": 1, - ... "batch_size": 16, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... name="BC", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.Geometry, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - weight_dict: Optional[Dict[str, Union[float, Callable]]] = None, - name: str = "BC", - ): - self.label_dict = label_dict - self.input_keys = geom.dim_keys - self.output_keys = tuple(label_dict.keys()) - self.output_expr = { - k: v for k, v in output_expr.items() if k in self.output_keys - } - - if isinstance(criteria, str): - criteria = eval(criteria) - - # prepare input - input = geom.sample_boundary( - dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], - random, - criteria, - evenly, - ) - if "area" in input: - input["area"] *= dataloader_cfg["iters_per_epoch"] - - # prepare label - label = {} - for key, value in label_dict.items(): - if isinstance(value, (int, float)): - label[key] = np.full_like(next(iter(input.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, axis: np.maximum(xy[0], xy[1])}, "numpy"], - ) - label[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - label[key] = func(input) - if isinstance(label[key], (int, float)): - label[key] = np.full_like(next(iter(input.values())), label[key]) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # prepare weight - weight = None - if weight_dict is not None: - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - weight[key] = np.full_like(next(iter(label.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - [sympy.Symbol(k) for k in geom.dim_keys], - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - weight[key] = func(**{k: input[k] for k in geom.dim_keys}) - elif callable(value): - func = value - weight[key] = func(input) - if isinstance(weight[key], (int, float)): - weight[key] = np.full_like( - next(iter(input.values())), weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # wrap input, label, weight into a dataset - if isinstance(dataloader_cfg["dataset"], str): - dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} - dataloader_cfg["dataset"].update( - {"input": input, "label": label, "weight": weight} - ) - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci.constraint import base +from ppsci.data import dataset + +if TYPE_CHECKING: + from ppsci import loss + + +class BoundaryConstraint(base.Constraint): + """Class for boundary constraint. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.Geometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified boundaries. + Defaults to None. + evenly (bool, optional): Whether to use evenly distribution sampling. + Defaults to False. + weight_dict (Optional[Dict[str, Union[float, Callable]]]): Define the weight of each + constraint variable. Defaults to None. + name (str, optional): Name of constraint object. Defaults to "BC". + + Examples: + >>> import ppsci + >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> bc = ppsci.constraint.BoundaryConstraint( + ... {"u": lambda out: out["u"]}, + ... {"u": 0}, + ... rect, + ... { + ... "dataset": "IterableNamedArrayDataset", + ... "iters_per_epoch": 1, + ... "batch_size": 16, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... name="BC", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.Geometry, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + weight_dict: Optional[Dict[str, Union[float, Callable]]] = None, + name: str = "BC", + ): + self.label_dict = label_dict + self.input_keys = geom.dim_keys + self.output_keys = tuple(label_dict.keys()) + self.output_expr = { + k: v for k, v in output_expr.items() if k in self.output_keys + } + + if isinstance(criteria, str): + criteria = eval(criteria) + + # prepare input + input = geom.sample_boundary( + dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], + random, + criteria, + evenly, + ) + if "area" in input: + input["area"] *= dataloader_cfg["iters_per_epoch"] + + # prepare label + label = {} + for key, value in label_dict.items(): + if isinstance(value, (int, float)): + label[key] = np.full_like(next(iter(input.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, axis: np.maximum(xy[0], xy[1])}, "numpy"], + ) + label[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + label[key] = func(input) + if isinstance(label[key], (int, float)): + label[key] = np.full_like(next(iter(input.values())), label[key]) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # prepare weight + weight = None + if weight_dict is not None: + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + weight[key] = np.full_like(next(iter(label.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + [sympy.Symbol(k) for k in geom.dim_keys], + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + weight[key] = func(**{k: input[k] for k in geom.dim_keys}) + elif callable(value): + func = value + weight[key] = func(input) + if isinstance(weight[key], (int, float)): + weight[key] = np.full_like( + next(iter(input.values())), weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # wrap input, label, weight into a dataset + if isinstance(dataloader_cfg["dataset"], str): + dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} + dataloader_cfg["dataset"].update( + {"input": input, "label": label, "weight": weight} + ) + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) diff --git a/ppsci/constraint/initial_constraint.py b/ppsci/constraint/initial_constraint.py index 63ae320993..5614625110 100644 --- a/ppsci/constraint/initial_constraint.py +++ b/ppsci/constraint/initial_constraint.py @@ -1,172 +1,172 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci.constraint import base -from ppsci.data import dataset - -if TYPE_CHECKING: - from ppsci import loss - - -class InitialConstraint(base.Constraint): - """Class for initial interior constraint. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.TimeXGeometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified boundaries. - Defaults to None. - evenly (bool, optional): Whether to use evenly distribution sampling. - Defaults to False. - weight_dict (Optional[Dict[str, Callable]]): Define the weight of each - constraint variable. Defaults to None. - compute_sdf_derivatives (Optional[bool]): Whether compute derivatives for SDF. - Defaults to False. - name (str, optional): Name of constraint object. Defaults to "IC". - - Examples: - >>> import ppsci - >>> rect = ppsci.geometry.TimeXGeometry( - ... ppsci.geometry.TimeDomain(0, 1), - ... ppsci.geometry.Rectangle((0, 0), (1, 1)), - ... ) - >>> ic = ppsci.constraint.InitialConstraint( - ... {"u": lambda out: out["u"]}, - ... {"u": 0}, - ... rect, - ... { - ... "dataset": "IterableNamedArrayDataset", - ... "iters_per_epoch": 1, - ... "batch_size": 16, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... name="IC", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.TimeXGeometry, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - weight_dict: Optional[Dict[str, Callable]] = None, - compute_sdf_derivatives: bool = False, - name: str = "IC", - ): - self.label_dict = label_dict - self.input_keys = geom.dim_keys - self.output_keys = tuple(label_dict.keys()) - self.output_expr = { - k: v for k, v in output_expr.items() if k in self.output_keys - } - - if isinstance(criteria, str): - criteria = eval(criteria) - - # prepare input - input = geom.sample_initial_interior( - dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], - random, - criteria, - evenly, - compute_sdf_derivatives, - ) - if "area" in input: - input["area"] *= dataloader_cfg["iters_per_epoch"] - - # prepare label - label = {} - for key, value in label_dict.items(): - if isinstance(value, (int, float)): - label[key] = np.full_like(next(iter(input.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - label[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - label[key] = func(input) - if isinstance(label[key], (int, float)): - label[key] = np.full_like(next(iter(input.values())), label[key]) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # prepare weight - weight = None - if weight_dict is not None: - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - weight[key] = np.full_like(next(iter(label.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - weight[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - weight[key] = func(input) - if isinstance(weight[key], (int, float)): - weight[key] = np.full_like( - next(iter(input.values())), weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # wrap input, label, weight into a dataset - if isinstance(dataloader_cfg["dataset"], str): - dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} - dataloader_cfg["dataset"].update( - {"input": input, "label": label, "weight": weight} - ) - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci.constraint import base +from ppsci.data import dataset + +if TYPE_CHECKING: + from ppsci import loss + + +class InitialConstraint(base.Constraint): + """Class for initial interior constraint. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.TimeXGeometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified boundaries. + Defaults to None. + evenly (bool, optional): Whether to use evenly distribution sampling. + Defaults to False. + weight_dict (Optional[Dict[str, Callable]]): Define the weight of each + constraint variable. Defaults to None. + compute_sdf_derivatives (Optional[bool]): Whether compute derivatives for SDF. + Defaults to False. + name (str, optional): Name of constraint object. Defaults to "IC". + + Examples: + >>> import ppsci + >>> rect = ppsci.geometry.TimeXGeometry( + ... ppsci.geometry.TimeDomain(0, 1), + ... ppsci.geometry.Rectangle((0, 0), (1, 1)), + ... ) + >>> ic = ppsci.constraint.InitialConstraint( + ... {"u": lambda out: out["u"]}, + ... {"u": 0}, + ... rect, + ... { + ... "dataset": "IterableNamedArrayDataset", + ... "iters_per_epoch": 1, + ... "batch_size": 16, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... name="IC", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.TimeXGeometry, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + weight_dict: Optional[Dict[str, Callable]] = None, + compute_sdf_derivatives: bool = False, + name: str = "IC", + ): + self.label_dict = label_dict + self.input_keys = geom.dim_keys + self.output_keys = tuple(label_dict.keys()) + self.output_expr = { + k: v for k, v in output_expr.items() if k in self.output_keys + } + + if isinstance(criteria, str): + criteria = eval(criteria) + + # prepare input + input = geom.sample_initial_interior( + dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], + random, + criteria, + evenly, + compute_sdf_derivatives, + ) + if "area" in input: + input["area"] *= dataloader_cfg["iters_per_epoch"] + + # prepare label + label = {} + for key, value in label_dict.items(): + if isinstance(value, (int, float)): + label[key] = np.full_like(next(iter(input.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + label[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + label[key] = func(input) + if isinstance(label[key], (int, float)): + label[key] = np.full_like(next(iter(input.values())), label[key]) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # prepare weight + weight = None + if weight_dict is not None: + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + weight[key] = np.full_like(next(iter(label.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + weight[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + weight[key] = func(input) + if isinstance(weight[key], (int, float)): + weight[key] = np.full_like( + next(iter(input.values())), weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # wrap input, label, weight into a dataset + if isinstance(dataloader_cfg["dataset"], str): + dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} + dataloader_cfg["dataset"].update( + {"input": input, "label": label, "weight": weight} + ) + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) diff --git a/ppsci/constraint/integral_constraint.py b/ppsci/constraint/integral_constraint.py index 19f6f8a1fe..27fbc01cd3 100644 --- a/ppsci/constraint/integral_constraint.py +++ b/ppsci/constraint/integral_constraint.py @@ -1,178 +1,178 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Union - -import numpy as np -import paddle -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci.constraint import base -from ppsci.data import dataset -from ppsci.utils import misc - -if TYPE_CHECKING: - from ppsci import loss - - -class IntegralConstraint(base.Constraint): - """Class for integral constraint. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.Geometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified boundaries. - Defaults to None. - weight_dict (Optional[Dict[str, Callable]]): Define the weight of each - constraint variable. Defaults to None. - name (str, optional): Name of constraint object. Defaults to "IgC". - - Examples: - >>> import ppsci - >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> igc = ppsci.constraint.IntegralConstraint( - ... {"u": lambda out: out["u"]}, - ... {"u": 0}, - ... rect, - ... { - ... "dataset": "IterableNamedArrayDataset", - ... "iters_per_epoch": 1, - ... "batch_size": 16, - ... "integral_batch_size": 8, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... name="IgC", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.Geometry, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - weight_dict: Optional[Dict[str, Callable]] = None, - name: str = "IgC", - ): - self.label_dict = label_dict - self.input_keys = geom.dim_keys - self.output_keys = tuple(label_dict.keys()) - self.output_expr = { - k: v for k, v in output_expr.items() if k in self.output_keys - } - - if isinstance(criteria, str): - criteria = eval(criteria) - - # prepare input - input_list: List[Dict[str, np.ndarray]] = [] - for _ in range( - dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"] - ): - input = geom.sample_boundary( - dataloader_cfg["integral_batch_size"], random, criteria - ) - input_list.append(input) - input = misc.stack_dict_list(input_list) - # shape of each input is [batch_size, integral_batch_size, ndim] - - # prepare label - # shape of each label is [batch_size, ndim] - label = {} - for key, value in label_dict.items(): - if isinstance(value, (int, float)): - label[key] = np.full( - (next(iter(input.values())).shape[0], 1), - value, - paddle.get_default_dtype(), - ) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - label[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - label[key] = func(input) - if isinstance(label[key], (int, float)): - label[key] = np.full( - (next(iter(input.values())).shape[0], 1), - label[key], - paddle.get_default_dtype(), - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # prepare weight - # shape of each weight is [batch_size, ndim] - weight = None - if weight_dict is not None: - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - weight[key] = np.full_like(next(iter(label.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - weight[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - weight[key] = func(input) - if isinstance(weight[key], (int, float)): - weight[key] = np.full_like( - next(iter(input.values())), weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # wrap input, label, weight into a dataset - if isinstance(dataloader_cfg["dataset"], str): - dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} - dataloader_cfg["dataset"].update( - {"input": input, "label": label, "weight": weight} - ) - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Union + +import numpy as np +import paddle +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci.constraint import base +from ppsci.data import dataset +from ppsci.utils import misc + +if TYPE_CHECKING: + from ppsci import loss + + +class IntegralConstraint(base.Constraint): + """Class for integral constraint. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.Geometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified boundaries. + Defaults to None. + weight_dict (Optional[Dict[str, Callable]]): Define the weight of each + constraint variable. Defaults to None. + name (str, optional): Name of constraint object. Defaults to "IgC". + + Examples: + >>> import ppsci + >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> igc = ppsci.constraint.IntegralConstraint( + ... {"u": lambda out: out["u"]}, + ... {"u": 0}, + ... rect, + ... { + ... "dataset": "IterableNamedArrayDataset", + ... "iters_per_epoch": 1, + ... "batch_size": 16, + ... "integral_batch_size": 8, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... name="IgC", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.Geometry, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + weight_dict: Optional[Dict[str, Callable]] = None, + name: str = "IgC", + ): + self.label_dict = label_dict + self.input_keys = geom.dim_keys + self.output_keys = tuple(label_dict.keys()) + self.output_expr = { + k: v for k, v in output_expr.items() if k in self.output_keys + } + + if isinstance(criteria, str): + criteria = eval(criteria) + + # prepare input + input_list: List[Dict[str, np.ndarray]] = [] + for _ in range( + dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"] + ): + input = geom.sample_boundary( + dataloader_cfg["integral_batch_size"], random, criteria + ) + input_list.append(input) + input = misc.stack_dict_list(input_list) + # shape of each input is [batch_size, integral_batch_size, ndim] + + # prepare label + # shape of each label is [batch_size, ndim] + label = {} + for key, value in label_dict.items(): + if isinstance(value, (int, float)): + label[key] = np.full( + (next(iter(input.values())).shape[0], 1), + value, + paddle.get_default_dtype(), + ) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + label[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + label[key] = func(input) + if isinstance(label[key], (int, float)): + label[key] = np.full( + (next(iter(input.values())).shape[0], 1), + label[key], + paddle.get_default_dtype(), + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # prepare weight + # shape of each weight is [batch_size, ndim] + weight = None + if weight_dict is not None: + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + weight[key] = np.full_like(next(iter(label.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + weight[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + weight[key] = func(input) + if isinstance(weight[key], (int, float)): + weight[key] = np.full_like( + next(iter(input.values())), weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # wrap input, label, weight into a dataset + if isinstance(dataloader_cfg["dataset"], str): + dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} + dataloader_cfg["dataset"].update( + {"input": input, "label": label, "weight": weight} + ) + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) diff --git a/ppsci/constraint/interior_constraint.py b/ppsci/constraint/interior_constraint.py index 3c1eb7ed3f..7a126ae2f9 100644 --- a/ppsci/constraint/interior_constraint.py +++ b/ppsci/constraint/interior_constraint.py @@ -1,174 +1,174 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci.constraint import base -from ppsci.data import dataset - -if TYPE_CHECKING: - from ppsci import loss - - -class InteriorConstraint(base.Constraint): - """Class for interior constraint. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.Geometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified boundaries. - Defaults to None. - evenly (bool, optional): Whether to use evenly distribution sampling. - Defaults to False. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the - weight of each constraint variable. Defaults to None. - compute_sdf_derivatives (Optional[bool]): Whether compute derivatives for SDF. - Defaults to False. - name (str, optional): Name of constraint object. Defaults to "EQ". - - Examples: - >>> import ppsci - >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> pde_constraint = ppsci.constraint.InteriorConstraint( - ... {"u": lambda out: out["u"]}, - ... {"u": 0}, - ... rect, - ... { - ... "dataset": "IterableNamedArrayDataset", - ... "iters_per_epoch": 1, - ... "batch_size": 16, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... name="EQ", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.Geometry, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - compute_sdf_derivatives: bool = False, - name: str = "EQ", - ): - self.label_dict = label_dict - self.input_keys = geom.dim_keys - self.output_keys = tuple(label_dict.keys()) - self.output_expr = { - k: v for k, v in output_expr.items() if k in self.output_keys - } - - if isinstance(criteria, str): - criteria = eval(criteria) - - # prepare input - input = geom.sample_interior( - dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], - random, - criteria, - evenly, - compute_sdf_derivatives, - ) - if "area" in input: - input["area"] *= dataloader_cfg["iters_per_epoch"] - - # prepare label - label = {} - for key, value in label_dict.items(): - if isinstance(value, (int, float)): - label[key] = np.full_like(next(iter(input.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - label[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - label[key] = func(input) - if isinstance(label[key], (int, float)): - label[key] = np.full_like(next(iter(input.values())), label[key]) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # prepare weight - weight = None - if weight_dict is not None: - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - for key, value in weight_dict.items(): - if isinstance(value, str): - if value == "sdf": - weight[key] = input["sdf"] - else: - raise NotImplementedError(f"string {value} is invalid yet.") - elif isinstance(value, (int, float)): - weight[key] = np.full_like(next(iter(label.values())), float(value)) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - weight[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - weight[key] = func(input) - if isinstance(weight[key], (int, float)): - weight[key] = np.full_like( - next(iter(input.values())), weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # wrap input, label, weight into a dataset - if isinstance(dataloader_cfg["dataset"], str): - dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} - dataloader_cfg["dataset"].update( - {"input": input, "label": label, "weight": weight} - ) - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci.constraint import base +from ppsci.data import dataset + +if TYPE_CHECKING: + from ppsci import loss + + +class InteriorConstraint(base.Constraint): + """Class for interior constraint. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.Geometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified boundaries. + Defaults to None. + evenly (bool, optional): Whether to use evenly distribution sampling. + Defaults to False. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the + weight of each constraint variable. Defaults to None. + compute_sdf_derivatives (Optional[bool]): Whether compute derivatives for SDF. + Defaults to False. + name (str, optional): Name of constraint object. Defaults to "EQ". + + Examples: + >>> import ppsci + >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> pde_constraint = ppsci.constraint.InteriorConstraint( + ... {"u": lambda out: out["u"]}, + ... {"u": 0}, + ... rect, + ... { + ... "dataset": "IterableNamedArrayDataset", + ... "iters_per_epoch": 1, + ... "batch_size": 16, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... name="EQ", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.Geometry, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + compute_sdf_derivatives: bool = False, + name: str = "EQ", + ): + self.label_dict = label_dict + self.input_keys = geom.dim_keys + self.output_keys = tuple(label_dict.keys()) + self.output_expr = { + k: v for k, v in output_expr.items() if k in self.output_keys + } + + if isinstance(criteria, str): + criteria = eval(criteria) + + # prepare input + input = geom.sample_interior( + dataloader_cfg["batch_size"] * dataloader_cfg["iters_per_epoch"], + random, + criteria, + evenly, + compute_sdf_derivatives, + ) + if "area" in input: + input["area"] *= dataloader_cfg["iters_per_epoch"] + + # prepare label + label = {} + for key, value in label_dict.items(): + if isinstance(value, (int, float)): + label[key] = np.full_like(next(iter(input.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + label[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + label[key] = func(input) + if isinstance(label[key], (int, float)): + label[key] = np.full_like(next(iter(input.values())), label[key]) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # prepare weight + weight = None + if weight_dict is not None: + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + for key, value in weight_dict.items(): + if isinstance(value, str): + if value == "sdf": + weight[key] = input["sdf"] + else: + raise NotImplementedError(f"string {value} is invalid yet.") + elif isinstance(value, (int, float)): + weight[key] = np.full_like(next(iter(label.values())), float(value)) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + weight[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + weight[key] = func(input) + if isinstance(weight[key], (int, float)): + weight[key] = np.full_like( + next(iter(input.values())), weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # wrap input, label, weight into a dataset + if isinstance(dataloader_cfg["dataset"], str): + dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} + dataloader_cfg["dataset"].update( + {"input": input, "label": label, "weight": weight} + ) + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) diff --git a/ppsci/constraint/periodic_constraint.py b/ppsci/constraint/periodic_constraint.py index cb5fc1a332..a3765bcc90 100644 --- a/ppsci/constraint/periodic_constraint.py +++ b/ppsci/constraint/periodic_constraint.py @@ -1,169 +1,169 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import paddle -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci.constraint import base -from ppsci.data import dataset - -if TYPE_CHECKING: - from ppsci import loss - - -class PeriodicConstraint(base.Constraint): - """Class for periodic constraint. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.Geometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - periodic_key (str): Name of dimension which periodic constraint applied to. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified boundaries. - Defaults to None. - evenly (bool, optional): Whether to use evenly distribution sampling. - Defaults to False. - weight_dict (Optional[Dict[str, Callable]]): Define the weight of each - constraint variable. Defaults to None. - name (str, optional): Name of constraint object. Defaults to "PeriodicBC". - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.Geometry, - periodic_key: str, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - weight_dict: Optional[Dict[str, Callable]] = None, - name: str = "PeriodicBC", - ): - self.input_keys = geom.dim_keys - self.output_keys = tuple(output_expr.keys()) - self.output_expr = { - k: v for k, v in output_expr.items() if k in self.output_keys - } - - if isinstance(criteria, str): - criteria = eval(criteria) - - if dataloader_cfg["batch_size"] % 2 > 0: - raise ValueError( - f"batch_size({dataloader_cfg['sampler']['batch_size']}) " - "should be positive and even when using PeriodicConstraint" - ) - if dataloader_cfg.get("shuffle", False): - raise ValueError( - f"shuffle({dataloader_cfg['sampler']['batch_size']}) " - "should be False when using PeriodicConstraint" - ) - - # prepare input - _bs_half = dataloader_cfg["batch_size"] // 2 - input = geom.sample_boundary( - _bs_half * dataloader_cfg["iters_per_epoch"], - random, - criteria, - evenly, - ) - if "area" in input: - input["area"] *= dataloader_cfg["iters_per_epoch"] - - input_periodic = geom.periodic_point( - input, - geom.geometry.dim_keys.index(periodic_key) - if isinstance(geom, geometry.TimeXGeometry) - else geom.dim_keys.index(periodic_key), - ) - # concatenate original data next to periodic data, i.e. - # [orignal1, periodic1, orignal2, periodic2, ..., orignalN, periodicN] - mixed_input = {} - for key in input: - mixed_input[key] = [] - for iter_id in range(dataloader_cfg["iters_per_epoch"]): - mixed_input[key].append( - input[key][iter_id * _bs_half : (iter_id + 1) * _bs_half] - ) - mixed_input[key].append( - input_periodic[key][iter_id * _bs_half : (iter_id + 1) * _bs_half] - ) - mixed_input[key] = np.vstack(mixed_input[key]) - - # prepare label, keep label the same shape as input_periodic - label = {} - for key, value in label_dict.items(): - # set all label's to zero for dummy data. - label[key] = np.full( - (next(iter(mixed_input.values())).shape[0], 1), - 0, - paddle.get_default_dtype(), - ) - - # # prepare weight, keep weight the same shape as input_periodic - weight = None - if weight_dict is not None: - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - weight[key] = np.full_like(next(iter(label.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - [sympy.Symbol(k) for k in geom.dim_keys], - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - weight[key] = func(**{k: mixed_input[k] for k in geom.dim_keys}) - elif callable(value): - func = value - weight[key] = func(mixed_input) - if isinstance(weight[key], (int, float)): - weight[key] = np.full_like( - next(iter(mixed_input.values())), weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - # wrap input, label, weight into a dataset - if isinstance(dataloader_cfg["dataset"], str): - dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} - dataloader_cfg["dataset"].update( - {"input": mixed_input, "label": label, "weight": weight} - ) - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import paddle +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci.constraint import base +from ppsci.data import dataset + +if TYPE_CHECKING: + from ppsci import loss + + +class PeriodicConstraint(base.Constraint): + """Class for periodic constraint. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.Geometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + periodic_key (str): Name of dimension which periodic constraint applied to. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified boundaries. + Defaults to None. + evenly (bool, optional): Whether to use evenly distribution sampling. + Defaults to False. + weight_dict (Optional[Dict[str, Callable]]): Define the weight of each + constraint variable. Defaults to None. + name (str, optional): Name of constraint object. Defaults to "PeriodicBC". + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.Geometry, + periodic_key: str, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + weight_dict: Optional[Dict[str, Callable]] = None, + name: str = "PeriodicBC", + ): + self.input_keys = geom.dim_keys + self.output_keys = tuple(output_expr.keys()) + self.output_expr = { + k: v for k, v in output_expr.items() if k in self.output_keys + } + + if isinstance(criteria, str): + criteria = eval(criteria) + + if dataloader_cfg["batch_size"] % 2 > 0: + raise ValueError( + f"batch_size({dataloader_cfg['sampler']['batch_size']}) " + "should be positive and even when using PeriodicConstraint" + ) + if dataloader_cfg.get("shuffle", False): + raise ValueError( + f"shuffle({dataloader_cfg['sampler']['batch_size']}) " + "should be False when using PeriodicConstraint" + ) + + # prepare input + _bs_half = dataloader_cfg["batch_size"] // 2 + input = geom.sample_boundary( + _bs_half * dataloader_cfg["iters_per_epoch"], + random, + criteria, + evenly, + ) + if "area" in input: + input["area"] *= dataloader_cfg["iters_per_epoch"] + + input_periodic = geom.periodic_point( + input, + geom.geometry.dim_keys.index(periodic_key) + if isinstance(geom, geometry.TimeXGeometry) + else geom.dim_keys.index(periodic_key), + ) + # concatenate original data next to periodic data, i.e. + # [orignal1, periodic1, orignal2, periodic2, ..., orignalN, periodicN] + mixed_input = {} + for key in input: + mixed_input[key] = [] + for iter_id in range(dataloader_cfg["iters_per_epoch"]): + mixed_input[key].append( + input[key][iter_id * _bs_half : (iter_id + 1) * _bs_half] + ) + mixed_input[key].append( + input_periodic[key][iter_id * _bs_half : (iter_id + 1) * _bs_half] + ) + mixed_input[key] = np.vstack(mixed_input[key]) + + # prepare label, keep label the same shape as input_periodic + label = {} + for key, value in label_dict.items(): + # set all label's to zero for dummy data. + label[key] = np.full( + (next(iter(mixed_input.values())).shape[0], 1), + 0, + paddle.get_default_dtype(), + ) + + # # prepare weight, keep weight the same shape as input_periodic + weight = None + if weight_dict is not None: + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + weight[key] = np.full_like(next(iter(label.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + [sympy.Symbol(k) for k in geom.dim_keys], + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + weight[key] = func(**{k: mixed_input[k] for k in geom.dim_keys}) + elif callable(value): + func = value + weight[key] = func(mixed_input) + if isinstance(weight[key], (int, float)): + weight[key] = np.full_like( + next(iter(mixed_input.values())), weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + # wrap input, label, weight into a dataset + if isinstance(dataloader_cfg["dataset"], str): + dataloader_cfg["dataset"] = {"name": dataloader_cfg["dataset"]} + dataloader_cfg["dataset"].update( + {"input": mixed_input, "label": label, "weight": weight} + ) + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) diff --git a/ppsci/constraint/supervised_constraint.py b/ppsci/constraint/supervised_constraint.py index 84b8816222..cc4b686d07 100644 --- a/ppsci/constraint/supervised_constraint.py +++ b/ppsci/constraint/supervised_constraint.py @@ -1,92 +1,92 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional - -from ppsci.constraint import base -from ppsci.data import dataset - -if TYPE_CHECKING: - from ppsci import loss - - -class SupervisedConstraint(base.Constraint): - """Class for supervised constraint. - - Args: - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - output_expr (Optional[Dict[str, Callable]]): List of label expression. - Defaults to None. - name (str, optional): Name of constraint object. Defaults to "Sup". - - Examples: - >>> import ppsci - >>> bc_sup = ppsci.constraint.SupervisedConstraint( - ... { - ... "dataset": { - ... "name": "IterableCSVDataset", - ... "file_path": "/path/to/file.csv", - ... "input_keys": ("x", "y"), - ... "label_keys": ("u", "v"), - ... }, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... name="bc_sup", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - output_expr: Optional[Dict[str, Callable]] = None, - name: str = "Sup", - ): - # build dataset - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - self.input_keys = _dataset.input_keys - self.output_keys = ( - tuple(output_expr.keys()) - if output_expr is not None - else _dataset.label_keys - ) - - self.output_expr = output_expr - if self.output_expr is None: - self.output_expr = { - key: (lambda out, k=key: out[k]) for key in self.output_keys - } - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, name) - - def __str__(self): - return ", ".join( - [ - self.__class__.__name__, - f"name = {self.name}", - f"input_keys = {self.input_keys}", - f"output_keys = {self.output_keys}", - f"output_expr = {self.output_expr}", - f"loss = {self.loss}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional + +from ppsci.constraint import base +from ppsci.data import dataset + +if TYPE_CHECKING: + from ppsci import loss + + +class SupervisedConstraint(base.Constraint): + """Class for supervised constraint. + + Args: + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + output_expr (Optional[Dict[str, Callable]]): List of label expression. + Defaults to None. + name (str, optional): Name of constraint object. Defaults to "Sup". + + Examples: + >>> import ppsci + >>> bc_sup = ppsci.constraint.SupervisedConstraint( + ... { + ... "dataset": { + ... "name": "IterableCSVDataset", + ... "file_path": "/path/to/file.csv", + ... "input_keys": ("x", "y"), + ... "label_keys": ("u", "v"), + ... }, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... name="bc_sup", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + output_expr: Optional[Dict[str, Callable]] = None, + name: str = "Sup", + ): + # build dataset + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + self.input_keys = _dataset.input_keys + self.output_keys = ( + tuple(output_expr.keys()) + if output_expr is not None + else _dataset.label_keys + ) + + self.output_expr = output_expr + if self.output_expr is None: + self.output_expr = { + key: (lambda out, k=key: out[k]) for key in self.output_keys + } + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, name) + + def __str__(self): + return ", ".join( + [ + self.__class__.__name__, + f"name = {self.name}", + f"input_keys = {self.input_keys}", + f"output_keys = {self.output_keys}", + f"output_expr = {self.output_expr}", + f"loss = {self.loss}", + ] + ) diff --git a/ppsci/data/__init__.py b/ppsci/data/__init__.py index 9b8d91a749..c5db269a55 100644 --- a/ppsci/data/__init__.py +++ b/ppsci/data/__init__.py @@ -1,209 +1,209 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import random -from functools import partial -from typing import Callable -from typing import Optional - -import numpy as np -import paddle.distributed as dist -from paddle import device -from paddle import io - -from ppsci.data import dataloader -from ppsci.data import dataset -from ppsci.data import process -from ppsci.data.process import batch_transform -from ppsci.data.process import transform -from ppsci.utils import logger - -__all__ = [ - "dataset", - "process", - "dataloader", - "build_dataloader", - "transform", - "batch_transform", -] - - -def worker_init_fn(worker_id: int, num_workers: int, rank: int, base_seed: int) -> None: - """Callback function on each worker subprocess after seeding and before data loading. - - Args: - worker_id (int): Worker id in [0, num_workers - 1]. - num_workers (int): Number of subprocesses to use for data loading. - rank (int): Rank of process in distributed environment. If in non-distributed - environment, it is a constant number `0`. - base_seed (int): Base random seed. - """ - # The seed of each worker equals to: user_seed + num_worker * rank + worker_id - worker_seed = base_seed + num_workers * rank + worker_id - np.random.seed(worker_seed) - random.seed(worker_seed) - - -def build_dataloader(_dataset, cfg): - world_size = dist.get_world_size() - # just return IterableDataset as dataloader - if isinstance(_dataset, io.IterableDataset): - if world_size > 1: - raise ValueError( - f"world_size({world_size}) should be 1 when using IterableDataset." - ) - return _dataset - - cfg = copy.deepcopy(cfg) - - # build sampler - sampler_cfg = cfg.pop("sampler", None) - if sampler_cfg is not None: - batch_sampler_cls = sampler_cfg.pop("name") - - if batch_sampler_cls == "BatchSampler": - if world_size > 1: - batch_sampler_cls = "DistributedBatchSampler" - logger.warning( - f"Automatically use 'DistributedBatchSampler' instead of " - f"'BatchSampler' when world_size({world_size}) > 1." - ) - - sampler_cfg["batch_size"] = cfg["batch_size"] - batch_sampler = getattr(io, batch_sampler_cls)(_dataset, **sampler_cfg) - else: - batch_sampler_cls = "BatchSampler" - if world_size > 1: - batch_sampler_cls = "DistributedBatchSampler" - logger.warning( - f"Automatically use 'DistributedBatchSampler' instead of " - f"'BatchSampler' when world_size({world_size}) > 1." - ) - batch_sampler = getattr(io, batch_sampler_cls)( - _dataset, - batch_size=cfg["batch_size"], - shuffle=False, - drop_last=False, - ) - logger.message( - "'shuffle' and 'drop_last' are both set to False in default as sampler config is not specified." - ) - - # build collate_fn if specified - batch_transforms_cfg = cfg.pop("batch_transforms", None) - collate_fn: Optional[Callable] = cfg.pop("collate_fn", None) - if isinstance(batch_transforms_cfg, (list, tuple)): - collate_fn = batch_transform.build_batch_transforms( - batch_transforms_cfg, collate_fn - ) - - # build init function - _DEFAULT_NUM_WORKERS = 1 - _DEFAULT_SEED = 42 - init_fn = partial( - worker_init_fn, - num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), - rank=dist.get_rank(), - base_seed=cfg.get("seed", _DEFAULT_SEED), - ) - - # build dataloader - if getattr(_dataset, "use_pgl", False): - # Use special dataloader from "Paddle Graph Learning" toolkit. - try: - from pgl.utils import data as pgl_data - except ModuleNotFoundError as e: - logger.error("Please install pgl with `pip install pgl`.") - raise ModuleNotFoundError(str(e)) - - if collate_fn is None: - collate_fn = batch_transform.default_collate_fn - dataloader_ = pgl_data.Dataloader( - dataset=_dataset, - batch_size=cfg["batch_size"], - drop_last=sampler_cfg.get("drop_last", False), - shuffle=sampler_cfg.get("shuffle", False), - num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), - collate_fn=collate_fn, - ) - elif getattr(_dataset, "use_graph_grid_mesh", False): - # Use special dataloader `GridMeshAtmosphericDataset`. - - if collate_fn is None: - collate_fn = batch_transform.default_collate_fn - dataloader_ = io.DataLoader( - dataset=_dataset, - places=device.get_device(), - batch_sampler=batch_sampler, - collate_fn=collate_fn, - num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), - use_shared_memory=cfg.get("use_shared_memory", False), - worker_init_fn=init_fn, - ) - else: - if ( - cfg.get("auto_collation", not getattr(_dataset, "batch_index", False)) - is False - ): - if "transforms" in cfg["dataset"] and "auto_collation" not in cfg: - logger.warning( - "'transforms' and batch indexing(auto_collation=False) are both " - "enabled. If you do want to apply transforms to the batch samples, " - "please explicitly set 'auto_collation' to False in dataloader_cfg;" - " otherwise, the 'transforms' will be retained, but batch indexing " - "will be disabled." - ) - else: - # 1. wrap batch_sampler again into BatchSampler for disabling auto collation, - # which can speed up the process of batch samples indexing from dataset. See - # details at: https://discuss.pytorch.org/t/efficiency-of-dataloader-and-collate-for-large-array-like-datasets/59569/8 - batch_sampler = io.BatchSampler(sampler=batch_sampler, batch_size=1) - if collate_fn is not None: - raise NotImplementedError( - "Detected collate_fn is not None for 'batch_transforms' might " - "be specified in 'dataloader_cfg', which is not supported yet " - "with 'auto_collation' is False at the same time" - ) - # 2. disable auto collation by given identity collate_fn which return the first - # (also the only) batch data in batch list, or there will be a redundant - # axis at the first dimension returned by dataloader. This step is necessary - # because paddle do not support 'sampler' as instantiation argument of 'io.DataLoader' - collate_fn = lambda batch: batch[0] # noqa: E731 - _DEFAULT_NUM_WORKERS = 0 - logger.info( - "Auto collation is disabled and set num_workers to " - f"{_DEFAULT_NUM_WORKERS} to speed up batch sampling." - ) - - dataloader_ = io.DataLoader( - dataset=_dataset, - places=device.get_device(), - batch_sampler=batch_sampler, - collate_fn=collate_fn, - num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), - use_shared_memory=cfg.get("use_shared_memory", False), - worker_init_fn=init_fn, - # TODO: Do not enable 'persistent_workers' below for - # 'IndexError: pop from empty list ...' will be raised in certain cases - # persistent_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS) > 0, - ) - - if len(dataloader_) == 0: - raise ValueError( - f"batch_size({sampler_cfg['batch_size']}) should not bigger than number of " - f"samples({len(_dataset)}) when drop_last is {sampler_cfg.get('drop_last', False)}." - ) - - return dataloader_ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import random +from functools import partial +from typing import Callable +from typing import Optional + +import numpy as np +import paddle.distributed as dist +from paddle import device +from paddle import io + +from ppsci.data import dataloader +from ppsci.data import dataset +from ppsci.data import process +from ppsci.data.process import batch_transform +from ppsci.data.process import transform +from ppsci.utils import logger + +__all__ = [ + "dataset", + "process", + "dataloader", + "build_dataloader", + "transform", + "batch_transform", +] + + +def worker_init_fn(worker_id: int, num_workers: int, rank: int, base_seed: int) -> None: + """Callback function on each worker subprocess after seeding and before data loading. + + Args: + worker_id (int): Worker id in [0, num_workers - 1]. + num_workers (int): Number of subprocesses to use for data loading. + rank (int): Rank of process in distributed environment. If in non-distributed + environment, it is a constant number `0`. + base_seed (int): Base random seed. + """ + # The seed of each worker equals to: user_seed + num_worker * rank + worker_id + worker_seed = base_seed + num_workers * rank + worker_id + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def build_dataloader(_dataset, cfg): + world_size = dist.get_world_size() + # just return IterableDataset as dataloader + if isinstance(_dataset, io.IterableDataset): + if world_size > 1: + raise ValueError( + f"world_size({world_size}) should be 1 when using IterableDataset." + ) + return _dataset + + cfg = copy.deepcopy(cfg) + + # build sampler + sampler_cfg = cfg.pop("sampler", None) + if sampler_cfg is not None: + batch_sampler_cls = sampler_cfg.pop("name") + + if batch_sampler_cls == "BatchSampler": + if world_size > 1: + batch_sampler_cls = "DistributedBatchSampler" + logger.warning( + f"Automatically use 'DistributedBatchSampler' instead of " + f"'BatchSampler' when world_size({world_size}) > 1." + ) + + sampler_cfg["batch_size"] = cfg["batch_size"] + batch_sampler = getattr(io, batch_sampler_cls)(_dataset, **sampler_cfg) + else: + batch_sampler_cls = "BatchSampler" + if world_size > 1: + batch_sampler_cls = "DistributedBatchSampler" + logger.warning( + f"Automatically use 'DistributedBatchSampler' instead of " + f"'BatchSampler' when world_size({world_size}) > 1." + ) + batch_sampler = getattr(io, batch_sampler_cls)( + _dataset, + batch_size=cfg["batch_size"], + shuffle=False, + drop_last=False, + ) + logger.message( + "'shuffle' and 'drop_last' are both set to False in default as sampler config is not specified." + ) + + # build collate_fn if specified + batch_transforms_cfg = cfg.pop("batch_transforms", None) + collate_fn: Optional[Callable] = cfg.pop("collate_fn", None) + if isinstance(batch_transforms_cfg, (list, tuple)): + collate_fn = batch_transform.build_batch_transforms( + batch_transforms_cfg, collate_fn + ) + + # build init function + _DEFAULT_NUM_WORKERS = 1 + _DEFAULT_SEED = 42 + init_fn = partial( + worker_init_fn, + num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), + rank=dist.get_rank(), + base_seed=cfg.get("seed", _DEFAULT_SEED), + ) + + # build dataloader + if getattr(_dataset, "use_pgl", False): + # Use special dataloader from "Paddle Graph Learning" toolkit. + try: + from pgl.utils import data as pgl_data + except ModuleNotFoundError as e: + logger.error("Please install pgl with `pip install pgl`.") + raise ModuleNotFoundError(str(e)) + + if collate_fn is None: + collate_fn = batch_transform.default_collate_fn + dataloader_ = pgl_data.Dataloader( + dataset=_dataset, + batch_size=cfg["batch_size"], + drop_last=sampler_cfg.get("drop_last", False), + shuffle=sampler_cfg.get("shuffle", False), + num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), + collate_fn=collate_fn, + ) + elif getattr(_dataset, "use_graph_grid_mesh", False): + # Use special dataloader `GridMeshAtmosphericDataset`. + + if collate_fn is None: + collate_fn = batch_transform.default_collate_fn + dataloader_ = io.DataLoader( + dataset=_dataset, + places=device.get_device(), + batch_sampler=batch_sampler, + collate_fn=collate_fn, + num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), + use_shared_memory=cfg.get("use_shared_memory", False), + worker_init_fn=init_fn, + ) + else: + if ( + cfg.get("auto_collation", not getattr(_dataset, "batch_index", False)) + is False + ): + if "transforms" in cfg["dataset"] and "auto_collation" not in cfg: + logger.warning( + "'transforms' and batch indexing(auto_collation=False) are both " + "enabled. If you do want to apply transforms to the batch samples, " + "please explicitly set 'auto_collation' to False in dataloader_cfg;" + " otherwise, the 'transforms' will be retained, but batch indexing " + "will be disabled." + ) + else: + # 1. wrap batch_sampler again into BatchSampler for disabling auto collation, + # which can speed up the process of batch samples indexing from dataset. See + # details at: https://discuss.pytorch.org/t/efficiency-of-dataloader-and-collate-for-large-array-like-datasets/59569/8 + batch_sampler = io.BatchSampler(sampler=batch_sampler, batch_size=1) + if collate_fn is not None: + raise NotImplementedError( + "Detected collate_fn is not None for 'batch_transforms' might " + "be specified in 'dataloader_cfg', which is not supported yet " + "with 'auto_collation' is False at the same time" + ) + # 2. disable auto collation by given identity collate_fn which return the first + # (also the only) batch data in batch list, or there will be a redundant + # axis at the first dimension returned by dataloader. This step is necessary + # because paddle do not support 'sampler' as instantiation argument of 'io.DataLoader' + collate_fn = lambda batch: batch[0] # noqa: E731 + _DEFAULT_NUM_WORKERS = 0 + logger.info( + "Auto collation is disabled and set num_workers to " + f"{_DEFAULT_NUM_WORKERS} to speed up batch sampling." + ) + + dataloader_ = io.DataLoader( + dataset=_dataset, + places=device.get_device(), + batch_sampler=batch_sampler, + collate_fn=collate_fn, + num_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS), + use_shared_memory=cfg.get("use_shared_memory", False), + worker_init_fn=init_fn, + # TODO: Do not enable 'persistent_workers' below for + # 'IndexError: pop from empty list ...' will be raised in certain cases + # persistent_workers=cfg.get("num_workers", _DEFAULT_NUM_WORKERS) > 0, + ) + + if len(dataloader_) == 0: + raise ValueError( + f"batch_size({sampler_cfg['batch_size']}) should not bigger than number of " + f"samples({len(_dataset)}) when drop_last is {sampler_cfg.get('drop_last', False)}." + ) + + return dataloader_ diff --git a/ppsci/data/dataloader.py b/ppsci/data/dataloader.py index 4c01da873e..ff7868133a 100644 --- a/ppsci/data/dataloader.py +++ b/ppsci/data/dataloader.py @@ -1,47 +1,47 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Union - -from paddle import io - - -class InfiniteDataLoader: - """A wrapper for infinite dataloader. - - Args: - dataloader (Union[io.DataLoader, io.IterableDataset]): A finite and iterable loader or iterable dataset to be wrapped. - """ - - def __init__(self, dataloader: Union[io.DataLoader, io.IterableDataset]): - self.dataloader = dataloader - if isinstance(dataloader, io.DataLoader): - self.dataset = dataloader.dataset - elif isinstance(dataloader, io.IterableDataset): - self.dataset = dataloader - else: - raise TypeError( - f"dataloader should be io.DataLoader or io.IterableDataset, but got {type(dataloader)}" - ) - - def __iter__(self): - while True: - dataloader_iter = iter(self.dataloader) - for batch in dataloader_iter: - yield batch - - def __len__(self): - return len(self.dataloader) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Union + +from paddle import io + + +class InfiniteDataLoader: + """A wrapper for infinite dataloader. + + Args: + dataloader (Union[io.DataLoader, io.IterableDataset]): A finite and iterable loader or iterable dataset to be wrapped. + """ + + def __init__(self, dataloader: Union[io.DataLoader, io.IterableDataset]): + self.dataloader = dataloader + if isinstance(dataloader, io.DataLoader): + self.dataset = dataloader.dataset + elif isinstance(dataloader, io.IterableDataset): + self.dataset = dataloader + else: + raise TypeError( + f"dataloader should be io.DataLoader or io.IterableDataset, but got {type(dataloader)}" + ) + + def __iter__(self): + while True: + dataloader_iter = iter(self.dataloader) + for batch in dataloader_iter: + yield batch + + def __len__(self): + return len(self.dataloader) diff --git a/ppsci/data/dataset/__init__.py b/ppsci/data/dataset/__init__.py index 9ece354700..cce79b39d2 100644 --- a/ppsci/data/dataset/__init__.py +++ b/ppsci/data/dataset/__init__.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -116,3 +117,117 @@ def build_dataset(cfg) -> "io.Dataset": logger.debug(str(dataset)) return dataset +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from typing import TYPE_CHECKING + +from ppsci.data.dataset.airfoil_dataset import MeshAirfoilDataset +from ppsci.data.dataset.array_dataset import ChipHeatDataset +from ppsci.data.dataset.array_dataset import ContinuousNamedArrayDataset +from ppsci.data.dataset.array_dataset import IterableNamedArrayDataset +from ppsci.data.dataset.array_dataset import NamedArrayDataset +from ppsci.data.dataset.atmospheric_dataset import GridMeshAtmosphericDataset +from ppsci.data.dataset.cgcnn_dataset import CIFData as CGCNNDataset +from ppsci.data.dataset.csv_dataset import CSVDataset +from ppsci.data.dataset.csv_dataset import IterableCSVDataset +from ppsci.data.dataset.cylinder_dataset import MeshCylinderDataset +from ppsci.data.dataset.darcyflow_dataset import DarcyFlowDataset +from ppsci.data.dataset.dgmr_dataset import DGMRDataset +from ppsci.data.dataset.enso_dataset import ENSODataset +from ppsci.data.dataset.era5_dataset import ERA5Dataset +from ppsci.data.dataset.era5_dataset import ERA5SampledDataset +from ppsci.data.dataset.ext_moe_enso_dataset import ExtMoEENSODataset +from ppsci.data.dataset.fwi_dataset import FWIDataset +from ppsci.data.dataset.mat_dataset import IterableMatDataset +from ppsci.data.dataset.mat_dataset import MatDataset +from ppsci.data.dataset.moflow_dataset import MOlFLOWDataset +from ppsci.data.dataset.mrms_dataset import MRMSDataset +from ppsci.data.dataset.mrms_dataset import MRMSSampledDataset +from ppsci.data.dataset.npz_dataset import IterableNPZDataset +from ppsci.data.dataset.npz_dataset import NPZDataset +from ppsci.data.dataset.pems_dataset import PEMSDataset +from ppsci.data.dataset.radar_dataset import RadarDataset +from ppsci.data.dataset.sevir_dataset import SEVIRDataset +from ppsci.data.dataset.spherical_swe_dataset import SphericalSWEDataset +from ppsci.data.dataset.trphysx_dataset import CylinderDataset +from ppsci.data.dataset.trphysx_dataset import LorenzDataset +from ppsci.data.dataset.trphysx_dataset import RosslerDataset +from ppsci.data.dataset.vtu_dataset import VtuDataset +from ppsci.data.process import transform +from ppsci.utils import logger + +if TYPE_CHECKING: + from paddle import io + +__all__ = [ + "IterableNamedArrayDataset", + "NamedArrayDataset", + "ContinuousNamedArrayDataset", + "ChipHeatDataset", + "CSVDataset", + "IterableCSVDataset", + "ERA5Dataset", + "ERA5SampledDataset", + "GridMeshAtmosphericDataset", + "IterableMatDataset", + "MatDataset", + "MRMSDataset", + "MRMSSampledDataset", + "IterableNPZDataset", + "NPZDataset", + "PEMSDataset", + "CylinderDataset", + "LorenzDataset", + "RadarDataset", + "RosslerDataset", + "VtuDataset", + "DGMRDataset", + "MeshAirfoilDataset", + "MeshCylinderDataset", + "DarcyFlowDataset", + "SphericalSWEDataset", + "ENSODataset", + "ExtMoEENSODataset", + "SEVIRDataset", + "MOlFLOWDataset", + "build_dataset", + "CGCNNDataset", + "FWIDataset", +] + + +def build_dataset(cfg) -> "io.Dataset": + """Build dataset + + Args: + cfg (List[DictConfig]): Dataset config list. + + Returns: + Dict[str, io.Dataset]: dataset. + """ + cfg = copy.deepcopy(cfg) + + dataset_cls = cfg.pop("name") + if "transforms" in cfg: + cfg["transforms"] = transform.build_transforms(cfg.pop("transforms")) + + dataset = eval(dataset_cls)(**cfg) + + logger.debug(str(dataset)) + + return dataset +>>>>>>> Stashed changes diff --git a/ppsci/data/dataset/airfoil_dataset.py b/ppsci/data/dataset/airfoil_dataset.py index 2a249104f7..40b70b7fe2 100644 --- a/ppsci/data/dataset/airfoil_dataset.py +++ b/ppsci/data/dataset/airfoil_dataset.py @@ -1,241 +1,241 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -import pickle -from os import path as osp -from typing import Dict -from typing import List -from typing import Tuple - -import numpy as np -import paddle -from paddle import io - -try: - import pgl -except ModuleNotFoundError: - pass - -SU2_SHAPE_IDS = { - "line": 3, - "triangle": 5, - "quad": 9, -} - - -# HACK: Simplify code below -def _get_mesh_graph( - mesh_filename: str, dtype: np.dtype = np.float32 -) -> Tuple[np.ndarray, np.ndarray, List[List[List[int]]], Dict[str, List[List[int]]]]: - def get_rhs(s: str) -> str: - return s.split("=")[-1] - - marker_dict = {} - with open(mesh_filename) as f: - for line in f: - if line.startswith("NPOIN"): - num_points = int(get_rhs(line)) - mesh_points = [ - [float(p) for p in f.readline().split()[:2]] - for _ in range(num_points) - ] - nodes = np.array(mesh_points, dtype=dtype) - elif line.startswith("NMARK"): - num_markers = int(get_rhs(line)) - for _ in range(num_markers): - line = f.readline() - assert line.startswith("MARKER_TAG") - marker_tag = get_rhs(line).strip() - num_elems = int(get_rhs(f.readline())) - marker_elems = [ - [int(e) for e in f.readline().split()[-2:]] - for _ in range(num_elems) - ] - marker_dict[marker_tag] = marker_elems - elif line.startswith("NELEM"): - edges = [] - triangles = [] - quads = [] - num_edges = int(get_rhs(line)) - for _ in range(num_edges): - elem = [int(p) for p in f.readline().split()] - if elem[0] == SU2_SHAPE_IDS["triangle"]: - n = 3 - triangles.append(elem[1 : 1 + n]) - elif elem[0] == SU2_SHAPE_IDS["quad"]: - n = 4 - quads.append(elem[1 : 1 + n]) - else: - raise NotImplementedError - elem = elem[1 : 1 + n] - edges += [[elem[i], elem[(i + 1) % n]] for i in range(n)] - edges = np.array(edges, dtype=np.compat.long).transpose() - elems = [triangles, quads] - return nodes, edges, elems, marker_dict - - -class MeshAirfoilDataset(io.Dataset): - """Dataset for `MeshAirfoil`. - - Args: - input_keys (Tuple[str, ...]): Name of input data. - label_keys (Tuple[str, ...]): Name of label data. - data_dir (str): Directory of MeshAirfoil data. - mesh_graph_path (str): Path of mesh graph. - transpose_edges (bool, optional): Whether transpose the edges array from (2, num_edges) to (num_edges, 2) for convenient of slicing. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.MeshAirfoilDataset( - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... "data_dir": "/path/to/MeshAirfoilDataset", - ... "mesh_graph_path": "/path/to/file.su2", - ... "transpose_edges": False, - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - use_pgl: bool = True - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - mesh_graph_path: str, - transpose_edges: bool = False, - ): - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.file_list = os.listdir(self.data_dir) - self.len = len(self.file_list) - self.mesh_graph = _get_mesh_graph(mesh_graph_path) - - with open(osp.join(osp.dirname(self.data_dir), "train_max_min.pkl"), "rb") as f: - self.normalization_factors = pickle.load(f) - - self.nodes = self.mesh_graph[0] - self.edges = self.mesh_graph[1] - if transpose_edges: - self.edges = self.edges.transpose([1, 0]) - self.elems_list = self.mesh_graph[2] - self.marker_dict = self.mesh_graph[3] - self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) - for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): - for elem in marker_elems: - self.node_markers[elem[0]] = i - self.node_markers[elem[1]] = i - - self.raw_graphs = [self.get(i) for i in range(len(self))] - - def __len__(self): - return self.len - - def __getitem__(self, idx): - return ( - { - self.input_keys[0]: self.raw_graphs[idx], - }, - { - self.label_keys[0]: self.raw_graphs[idx], - }, - None, - ) - - def get(self, idx): - with open(osp.join(self.data_dir, self.file_list[idx]), "rb") as f: - fields = pickle.load(f) - fields = self._preprocess(fields) - aoa, reynolds, mach = self._get_params_from_name(self.file_list[idx]) - # aoa = aoa - mach_or_reynolds = mach if reynolds is None else reynolds - # mach_or_reynolds = mach_or_reynolds - norm_aoa = aoa / 10 - norm_mach_or_reynolds = ( - mach_or_reynolds if reynolds is None else (mach_or_reynolds - 1.5e6) / 1.5e6 - ) - - nodes = np.concatenate( - [ - self.nodes, - np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], - np.repeat(a=norm_mach_or_reynolds, repeats=self.nodes.shape[0])[ - :, np.newaxis - ], - self.node_markers, - ], - axis=-1, - ).astype(paddle.get_default_dtype()) - - data = pgl.Graph( - num_nodes=nodes.shape[0], - edges=self.edges, - ) - data.x = nodes - data.y = fields - data.pos = self.nodes - data.edge_index = self.edges - - sender = data.x[data.edge_index[0]] - receiver = data.x[data.edge_index[1]] - relation_pos = sender[:, 0:2] - receiver[:, 0:2] - post = np.linalg.norm(relation_pos, ord=2, axis=1, keepdims=True).astype( - paddle.get_default_dtype() - ) - data.edge_attr = post - std_epsilon = [1e-8] - a = np.mean(data.edge_attr, axis=0) - b = data.edge_attr.std(axis=0) - b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) - data.edge_attr = (data.edge_attr - a) / b - data.aoa = aoa - data.norm_aoa = norm_aoa - data.mach_or_reynolds = mach_or_reynolds - data.norm_mach_or_reynolds = norm_mach_or_reynolds - return data - - def _preprocess(self, tensor_list, stack_output=True): - data_max, data_min = self.normalization_factors - normalized_tensors = [] - for i in range(len(tensor_list)): - normalized = (tensor_list[i] - data_min[i]) / ( - data_max[i] - data_min[i] - ) * 2 - 1 - normalized_tensors.append(normalized) - if stack_output: - normalized_tensors = np.stack(normalized_tensors, axis=1) - return normalized_tensors - - def _get_params_from_name(self, filename): - s = filename.rsplit(".", 1)[0].split("_") - aoa = np.array(s[s.index("aoa") + 1])[np.newaxis].astype( - paddle.get_default_dtype() - ) - reynolds = s[s.index("re") + 1] - reynolds = ( - np.array(reynolds)[np.newaxis].astype(paddle.get_default_dtype()) - if reynolds != "None" - else None - ) - mach = np.array(s[s.index("mach") + 1])[np.newaxis].astype( - paddle.get_default_dtype() - ) - return aoa, reynolds, mach +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import pickle +from os import path as osp +from typing import Dict +from typing import List +from typing import Tuple + +import numpy as np +import paddle +from paddle import io + +try: + import pgl +except ModuleNotFoundError: + pass + +SU2_SHAPE_IDS = { + "line": 3, + "triangle": 5, + "quad": 9, +} + + +# HACK: Simplify code below +def _get_mesh_graph( + mesh_filename: str, dtype: np.dtype = np.float32 +) -> Tuple[np.ndarray, np.ndarray, List[List[List[int]]], Dict[str, List[List[int]]]]: + def get_rhs(s: str) -> str: + return s.split("=")[-1] + + marker_dict = {} + with open(mesh_filename) as f: + for line in f: + if line.startswith("NPOIN"): + num_points = int(get_rhs(line)) + mesh_points = [ + [float(p) for p in f.readline().split()[:2]] + for _ in range(num_points) + ] + nodes = np.array(mesh_points, dtype=dtype) + elif line.startswith("NMARK"): + num_markers = int(get_rhs(line)) + for _ in range(num_markers): + line = f.readline() + assert line.startswith("MARKER_TAG") + marker_tag = get_rhs(line).strip() + num_elems = int(get_rhs(f.readline())) + marker_elems = [ + [int(e) for e in f.readline().split()[-2:]] + for _ in range(num_elems) + ] + marker_dict[marker_tag] = marker_elems + elif line.startswith("NELEM"): + edges = [] + triangles = [] + quads = [] + num_edges = int(get_rhs(line)) + for _ in range(num_edges): + elem = [int(p) for p in f.readline().split()] + if elem[0] == SU2_SHAPE_IDS["triangle"]: + n = 3 + triangles.append(elem[1 : 1 + n]) + elif elem[0] == SU2_SHAPE_IDS["quad"]: + n = 4 + quads.append(elem[1 : 1 + n]) + else: + raise NotImplementedError + elem = elem[1 : 1 + n] + edges += [[elem[i], elem[(i + 1) % n]] for i in range(n)] + edges = np.array(edges, dtype=np.compat.long).transpose() + elems = [triangles, quads] + return nodes, edges, elems, marker_dict + + +class MeshAirfoilDataset(io.Dataset): + """Dataset for `MeshAirfoil`. + + Args: + input_keys (Tuple[str, ...]): Name of input data. + label_keys (Tuple[str, ...]): Name of label data. + data_dir (str): Directory of MeshAirfoil data. + mesh_graph_path (str): Path of mesh graph. + transpose_edges (bool, optional): Whether transpose the edges array from (2, num_edges) to (num_edges, 2) for convenient of slicing. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.MeshAirfoilDataset( + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "data_dir": "/path/to/MeshAirfoilDataset", + ... "mesh_graph_path": "/path/to/file.su2", + ... "transpose_edges": False, + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + use_pgl: bool = True + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + mesh_graph_path: str, + transpose_edges: bool = False, + ): + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.file_list = os.listdir(self.data_dir) + self.len = len(self.file_list) + self.mesh_graph = _get_mesh_graph(mesh_graph_path) + + with open(osp.join(osp.dirname(self.data_dir), "train_max_min.pkl"), "rb") as f: + self.normalization_factors = pickle.load(f) + + self.nodes = self.mesh_graph[0] + self.edges = self.mesh_graph[1] + if transpose_edges: + self.edges = self.edges.transpose([1, 0]) + self.elems_list = self.mesh_graph[2] + self.marker_dict = self.mesh_graph[3] + self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) + for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): + for elem in marker_elems: + self.node_markers[elem[0]] = i + self.node_markers[elem[1]] = i + + self.raw_graphs = [self.get(i) for i in range(len(self))] + + def __len__(self): + return self.len + + def __getitem__(self, idx): + return ( + { + self.input_keys[0]: self.raw_graphs[idx], + }, + { + self.label_keys[0]: self.raw_graphs[idx], + }, + None, + ) + + def get(self, idx): + with open(osp.join(self.data_dir, self.file_list[idx]), "rb") as f: + fields = pickle.load(f) + fields = self._preprocess(fields) + aoa, reynolds, mach = self._get_params_from_name(self.file_list[idx]) + # aoa = aoa + mach_or_reynolds = mach if reynolds is None else reynolds + # mach_or_reynolds = mach_or_reynolds + norm_aoa = aoa / 10 + norm_mach_or_reynolds = ( + mach_or_reynolds if reynolds is None else (mach_or_reynolds - 1.5e6) / 1.5e6 + ) + + nodes = np.concatenate( + [ + self.nodes, + np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], + np.repeat(a=norm_mach_or_reynolds, repeats=self.nodes.shape[0])[ + :, np.newaxis + ], + self.node_markers, + ], + axis=-1, + ).astype(paddle.get_default_dtype()) + + data = pgl.Graph( + num_nodes=nodes.shape[0], + edges=self.edges, + ) + data.x = nodes + data.y = fields + data.pos = self.nodes + data.edge_index = self.edges + + sender = data.x[data.edge_index[0]] + receiver = data.x[data.edge_index[1]] + relation_pos = sender[:, 0:2] - receiver[:, 0:2] + post = np.linalg.norm(relation_pos, ord=2, axis=1, keepdims=True).astype( + paddle.get_default_dtype() + ) + data.edge_attr = post + std_epsilon = [1e-8] + a = np.mean(data.edge_attr, axis=0) + b = data.edge_attr.std(axis=0) + b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) + data.edge_attr = (data.edge_attr - a) / b + data.aoa = aoa + data.norm_aoa = norm_aoa + data.mach_or_reynolds = mach_or_reynolds + data.norm_mach_or_reynolds = norm_mach_or_reynolds + return data + + def _preprocess(self, tensor_list, stack_output=True): + data_max, data_min = self.normalization_factors + normalized_tensors = [] + for i in range(len(tensor_list)): + normalized = (tensor_list[i] - data_min[i]) / ( + data_max[i] - data_min[i] + ) * 2 - 1 + normalized_tensors.append(normalized) + if stack_output: + normalized_tensors = np.stack(normalized_tensors, axis=1) + return normalized_tensors + + def _get_params_from_name(self, filename): + s = filename.rsplit(".", 1)[0].split("_") + aoa = np.array(s[s.index("aoa") + 1])[np.newaxis].astype( + paddle.get_default_dtype() + ) + reynolds = s[s.index("re") + 1] + reynolds = ( + np.array(reynolds)[np.newaxis].astype(paddle.get_default_dtype()) + if reynolds != "None" + else None + ) + mach = np.array(s[s.index("mach") + 1])[np.newaxis].astype( + paddle.get_default_dtype() + ) + return aoa, reynolds, mach diff --git a/ppsci/data/dataset/array_dataset.py b/ppsci/data/dataset/array_dataset.py index d7cc10455b..6711179453 100644 --- a/ppsci/data/dataset/array_dataset.py +++ b/ppsci/data/dataset/array_dataset.py @@ -1,312 +1,312 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Optional - -import numpy as np -import paddle -from paddle import io -from paddle import vision - -from ppsci.utils import logger - - -class NamedArrayDataset(io.Dataset): - """Class for Named Array Dataset. - - Args: - input (Dict[str, np.ndarray]): Input dict. - label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. - weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> input = {"x": np.random.randn(100, 1)} - >>> output = {"u": np.random.randn(100, 1)} - >>> weight = {"u": np.random.randn(100, 1)} - >>> dataset = ppsci.data.dataset.NamedArrayDataset(input, output, weight) - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - input: Dict[str, np.ndarray], - label: Optional[Dict[str, np.ndarray]] = None, - weight: Optional[Dict[str, np.ndarray]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input = input - self.label = {} if label is None else label - self.input_keys = tuple(input.keys()) - self.label_keys = tuple(self.label.keys()) - self.weight = {} if weight is None else weight - self.transforms = transforms - self._len = len(next(iter(input.values()))) - for key in input: - if key in self.label and len(input[key]) != len(self.label[key]): - logger.warning( - f"The length of input {key}({len(input[key])}) is not equal to " - f"the length of label {key}({len(self.label[key])})." - ) - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - weight_item = {key: value[idx] for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - return self._len - - -class IterableNamedArrayDataset(io.IterableDataset): - """IterableNamedArrayDataset for full-data loading. - - Args: - input (Dict[str, np.ndarray]): Input dict. - label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. - weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> input = {"x": np.random.randn(100, 1)} - >>> label = {"u": np.random.randn(100, 1)} - >>> weight = {"u": np.random.randn(100, 1)} - >>> dataset = ppsci.data.dataset.IterableNamedArrayDataset(input, label, weight) - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input: Dict[str, np.ndarray], - label: Optional[Dict[str, np.ndarray]] = None, - weight: Optional[Dict[str, np.ndarray]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input = {key: paddle.to_tensor(value) for key, value in input.items()} - self.label = ( - {key: paddle.to_tensor(value) for key, value in label.items()} - if label is not None - else {} - ) - self.input_keys = tuple(input.keys()) - self.label_keys = tuple(self.label.keys()) - self.weight = ( - { - key: paddle.to_tensor(value, paddle.get_default_dtype()) - for key, value in weight.items() - } - if weight is not None - else None - ) - self._len = len(next(iter(self.input.values()))) - self.transforms = transforms - - @property - def num_samples(self): - """Number of samples within current dataset.""" - return self._len - - def __iter__(self): - if callable(self.transforms): - input_, label_, weight_ = self.transforms( - self.input, self.label, self.weight - ) - yield input_, label_, weight_ - else: - yield self.input, self.label, self.weight - - def __len__(self): - return 1 - - -class ContinuousNamedArrayDataset(io.IterableDataset): - """ContinuousNamedArrayDataset for iterable sampling. - - Args: - input (Callable): Function generate input dict. - label (Callable): Function generate label dict. - weight (Optional[Callable]): Function generate weight dict. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> import numpy as np - >>> input = lambda : {"x": np.random.randn(100, 1)} - >>> label = lambda inp: {"u": np.random.randn(100, 1)} - >>> weight = lambda inp, label: {"u": 1 - (label["u"] ** 2)} - >>> dataset = ppsci.data.dataset.ContinuousNamedArrayDataset(input, label, weight) - >>> input_batch, label_batch, weight_batch = next(iter(dataset)) - >>> print(input_batch["x"].shape) - [100, 1] - >>> print(label_batch["u"].shape) - [100, 1] - >>> print(weight_batch["u"].shape) - [100, 1] - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input: Callable, - label: Callable, - weight: Optional[Callable] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_fn = input - self.input_keys = tuple(self.input_fn().keys()) - - self.label_fn = label - input_ = self.input_fn() - self.label_keys = tuple(self.label_fn(input_).keys()) - - self.weight_fn = weight - self.transforms = transforms - - @property - def num_samples(self): - """Number of samples within current dataset.""" - raise NotImplementedError( - "ContinuousNamedArrayDataset has no fixed number of samples." - ) - - def __iter__(self): - def to_tensor_dict(_dict): - if _dict is None: - return None - return {k: paddle.to_tensor(v) for k, v in _dict.items()} - - while True: - input_batch = self.input_fn() - label_batch = self.label_fn(input_batch) - if callable(self.weight_fn): - weight_batch = self.weight_fn(input_batch, label_batch) - else: - weight_batch = None - - if callable(self.transforms): - input_batch, label_batch, weight_batch = self.transforms( - input_batch, label_batch, weight_batch - ) - yield to_tensor_dict(input_batch), to_tensor_dict( - label_batch - ), to_tensor_dict(weight_batch) - - def __len__(self): - return 1 - - -class ChipHeatDataset(io.Dataset): - """ChipHeatDataset for data loading of multi-branch DeepONet model. - - Args: - input (Dict[str, np.ndarray]): Input dict. - label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. - index (tuple[str, ...]): Key of input dict. - data_type (str): One of key of input dict. - weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> input = {"x": np.random.randn(100, 1)} - >>> label = {"u": np.random.randn(100, 1)} - >>> index = ('x', 'u', 'bc', 'bc_data') - >>> data_type = 'u' - >>> weight = {"u": np.random.randn(100, 1)} - >>> dataset = ppsci.data.dataset.ChipHeatDataset(input, label, index, data_type, weight) - """ - - def __init__( - self, - input: Dict[str, np.ndarray], - label: Dict[str, np.ndarray], - index: tuple[str, ...], - data_type: str, - weight: Optional[Dict[str, float]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input = input - self.label = label - self.input_keys = tuple(input.keys()) - self.label_keys = tuple(label.keys()) - self.index = index - self.data_type = data_type - self.weight = {} if weight is None else weight - self.transforms = transforms - - def __getitem__(self, idx): - quotient = idx - index_ir = dict() - for i in self.index: - index_ir[i] = 0 - - for i in index_ir: - num = len(self.input[i]) - index_ir[i] = quotient % num - quotient = quotient // num - - input_item = {} - for key in self.input: - if key == "y": - input_item[key] = self.input[key][index_ir["x"]] - elif key == "u_one": - input_item[key] = self.input[key][ - len(self.input[self.data_type]) * index_ir["x"] - + index_ir[self.data_type] - ] - else: - input_item[key] = self.input[key][index_ir[key]] - - label_item = {key: value for key, value in self.label.items()} - weight_item = {key: value for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - (input_item, label_item, weight_item) - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - _len = 1 - for i in self.index: - _len *= len(self.input[i]) - return _len +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional + +import numpy as np +import paddle +from paddle import io +from paddle import vision + +from ppsci.utils import logger + + +class NamedArrayDataset(io.Dataset): + """Class for Named Array Dataset. + + Args: + input (Dict[str, np.ndarray]): Input dict. + label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. + weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> input = {"x": np.random.randn(100, 1)} + >>> output = {"u": np.random.randn(100, 1)} + >>> weight = {"u": np.random.randn(100, 1)} + >>> dataset = ppsci.data.dataset.NamedArrayDataset(input, output, weight) + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + input: Dict[str, np.ndarray], + label: Optional[Dict[str, np.ndarray]] = None, + weight: Optional[Dict[str, np.ndarray]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input = input + self.label = {} if label is None else label + self.input_keys = tuple(input.keys()) + self.label_keys = tuple(self.label.keys()) + self.weight = {} if weight is None else weight + self.transforms = transforms + self._len = len(next(iter(input.values()))) + for key in input: + if key in self.label and len(input[key]) != len(self.label[key]): + logger.warning( + f"The length of input {key}({len(input[key])}) is not equal to " + f"the length of label {key}({len(self.label[key])})." + ) + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + weight_item = {key: value[idx] for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + return self._len + + +class IterableNamedArrayDataset(io.IterableDataset): + """IterableNamedArrayDataset for full-data loading. + + Args: + input (Dict[str, np.ndarray]): Input dict. + label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. + weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> input = {"x": np.random.randn(100, 1)} + >>> label = {"u": np.random.randn(100, 1)} + >>> weight = {"u": np.random.randn(100, 1)} + >>> dataset = ppsci.data.dataset.IterableNamedArrayDataset(input, label, weight) + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input: Dict[str, np.ndarray], + label: Optional[Dict[str, np.ndarray]] = None, + weight: Optional[Dict[str, np.ndarray]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input = {key: paddle.to_tensor(value) for key, value in input.items()} + self.label = ( + {key: paddle.to_tensor(value) for key, value in label.items()} + if label is not None + else {} + ) + self.input_keys = tuple(input.keys()) + self.label_keys = tuple(self.label.keys()) + self.weight = ( + { + key: paddle.to_tensor(value, paddle.get_default_dtype()) + for key, value in weight.items() + } + if weight is not None + else None + ) + self._len = len(next(iter(self.input.values()))) + self.transforms = transforms + + @property + def num_samples(self): + """Number of samples within current dataset.""" + return self._len + + def __iter__(self): + if callable(self.transforms): + input_, label_, weight_ = self.transforms( + self.input, self.label, self.weight + ) + yield input_, label_, weight_ + else: + yield self.input, self.label, self.weight + + def __len__(self): + return 1 + + +class ContinuousNamedArrayDataset(io.IterableDataset): + """ContinuousNamedArrayDataset for iterable sampling. + + Args: + input (Callable): Function generate input dict. + label (Callable): Function generate label dict. + weight (Optional[Callable]): Function generate weight dict. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> input = lambda : {"x": np.random.randn(100, 1)} + >>> label = lambda inp: {"u": np.random.randn(100, 1)} + >>> weight = lambda inp, label: {"u": 1 - (label["u"] ** 2)} + >>> dataset = ppsci.data.dataset.ContinuousNamedArrayDataset(input, label, weight) + >>> input_batch, label_batch, weight_batch = next(iter(dataset)) + >>> print(input_batch["x"].shape) + [100, 1] + >>> print(label_batch["u"].shape) + [100, 1] + >>> print(weight_batch["u"].shape) + [100, 1] + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input: Callable, + label: Callable, + weight: Optional[Callable] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_fn = input + self.input_keys = tuple(self.input_fn().keys()) + + self.label_fn = label + input_ = self.input_fn() + self.label_keys = tuple(self.label_fn(input_).keys()) + + self.weight_fn = weight + self.transforms = transforms + + @property + def num_samples(self): + """Number of samples within current dataset.""" + raise NotImplementedError( + "ContinuousNamedArrayDataset has no fixed number of samples." + ) + + def __iter__(self): + def to_tensor_dict(_dict): + if _dict is None: + return None + return {k: paddle.to_tensor(v) for k, v in _dict.items()} + + while True: + input_batch = self.input_fn() + label_batch = self.label_fn(input_batch) + if callable(self.weight_fn): + weight_batch = self.weight_fn(input_batch, label_batch) + else: + weight_batch = None + + if callable(self.transforms): + input_batch, label_batch, weight_batch = self.transforms( + input_batch, label_batch, weight_batch + ) + yield to_tensor_dict(input_batch), to_tensor_dict( + label_batch + ), to_tensor_dict(weight_batch) + + def __len__(self): + return 1 + + +class ChipHeatDataset(io.Dataset): + """ChipHeatDataset for data loading of multi-branch DeepONet model. + + Args: + input (Dict[str, np.ndarray]): Input dict. + label (Optional[Dict[str, np.ndarray]]): Label dict. Defaults to None. + index (tuple[str, ...]): Key of input dict. + data_type (str): One of key of input dict. + weight (Optional[Dict[str, np.ndarray]]): Weight dict. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> input = {"x": np.random.randn(100, 1)} + >>> label = {"u": np.random.randn(100, 1)} + >>> index = ('x', 'u', 'bc', 'bc_data') + >>> data_type = 'u' + >>> weight = {"u": np.random.randn(100, 1)} + >>> dataset = ppsci.data.dataset.ChipHeatDataset(input, label, index, data_type, weight) + """ + + def __init__( + self, + input: Dict[str, np.ndarray], + label: Dict[str, np.ndarray], + index: tuple[str, ...], + data_type: str, + weight: Optional[Dict[str, float]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input = input + self.label = label + self.input_keys = tuple(input.keys()) + self.label_keys = tuple(label.keys()) + self.index = index + self.data_type = data_type + self.weight = {} if weight is None else weight + self.transforms = transforms + + def __getitem__(self, idx): + quotient = idx + index_ir = dict() + for i in self.index: + index_ir[i] = 0 + + for i in index_ir: + num = len(self.input[i]) + index_ir[i] = quotient % num + quotient = quotient // num + + input_item = {} + for key in self.input: + if key == "y": + input_item[key] = self.input[key][index_ir["x"]] + elif key == "u_one": + input_item[key] = self.input[key][ + len(self.input[self.data_type]) * index_ir["x"] + + index_ir[self.data_type] + ] + else: + input_item[key] = self.input[key][index_ir[key]] + + label_item = {key: value for key, value in self.label.items()} + weight_item = {key: value for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + (input_item, label_item, weight_item) + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + _len = 1 + for i in self.index: + _len *= len(self.input[i]) + return _len diff --git a/ppsci/data/dataset/atmospheric_dataset.py b/ppsci/data/dataset/atmospheric_dataset.py index e43138eb94..6e37907b24 100644 --- a/ppsci/data/dataset/atmospheric_dataset.py +++ b/ppsci/data/dataset/atmospheric_dataset.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -1779,3 +1780,1786 @@ def normalize(self, inputs_data, stddev_data, mean_data): def denormalize(self, inputs_data): return inputs_data * self.stacked_targets_stddev + self.stacked_targets_mean +======= +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List +from typing import NamedTuple +from typing import Optional +from typing import Sequence +from typing import Tuple + +import numpy as np +import paddle +import pandas as pd +import scipy +from paddle import io + +try: + import trimesh + import xarray +except ModuleNotFoundError: + pass + +# https://www.ecmwf.int/en/forecasts/dataset/ecmwf-reanalysis-v5 +PRESSURE_LEVELS_ERA5_37 = ( + 1, + 2, + 3, + 5, + 7, + 10, + 20, + 30, + 50, + 70, + 100, + 125, + 150, + 175, + 200, + 225, + 250, + 300, + 350, + 400, + 450, + 500, + 550, + 600, + 650, + 700, + 750, + 775, + 800, + 825, + 850, + 875, + 900, + 925, + 950, + 975, + 1000, +) + +# https://www.ecmwf.int/en/forecasts/datasets/set-i +PRESSURE_LEVELS_HRES_25 = ( + 1, + 2, + 3, + 5, + 7, + 10, + 20, + 30, + 50, + 70, + 100, + 150, + 200, + 250, + 300, + 400, + 500, + 600, + 700, + 800, + 850, + 900, + 925, + 950, + 1000, +) + +# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2020MS002203 +PRESSURE_LEVELS_WEATHERBENCH_13 = ( + 50, + 100, + 150, + 200, + 250, + 300, + 400, + 500, + 600, + 700, + 850, + 925, + 1000, +) + +PRESSURE_LEVELS = { + 13: PRESSURE_LEVELS_WEATHERBENCH_13, + 25: PRESSURE_LEVELS_HRES_25, + 37: PRESSURE_LEVELS_ERA5_37, +} + + +TARGET_SURFACE_VARS = ( + "2m_temperature", + "mean_sea_level_pressure", + "10m_v_component_of_wind", + "10m_u_component_of_wind", + "total_precipitation_6hr", +) +TARGET_SURFACE_NO_PRECIP_VARS = ( + "2m_temperature", + "mean_sea_level_pressure", + "10m_v_component_of_wind", + "10m_u_component_of_wind", +) +TARGET_ATMOSPHERIC_VARS = ( + "temperature", + "geopotential", + "u_component_of_wind", + "v_component_of_wind", + "vertical_velocity", + "specific_humidity", +) +TARGET_ATMOSPHERIC_NO_W_VARS = ( + "temperature", + "geopotential", + "u_component_of_wind", + "v_component_of_wind", + "specific_humidity", +) +EXTERNAL_FORCING_VARS = ("toa_incident_solar_radiation",) +GENERATED_FORCING_VARS = ( + "year_progress_sin", + "year_progress_cos", + "day_progress_sin", + "day_progress_cos", +) +FORCING_VARS = EXTERNAL_FORCING_VARS + GENERATED_FORCING_VARS +STATIC_VARS = ( + "geopotential_at_surface", + "land_sea_mask", +) + +TASK_input_variables = ( + TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_forcing_variables = FORCING_VARS +TASK_pressure_levels = PRESSURE_LEVELS_ERA5_37 +TASK_input_duration = ("12h",) + +TASK_13_input_variables = ( + TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_13_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_13_forcing_variables = FORCING_VARS +TASK_13_pressure_levels = PRESSURE_LEVELS_WEATHERBENCH_13 +TASK_13_input_duration = ("12h",) + + +TASK_13_PRECIP_OUT_input_variables = ( + TARGET_SURFACE_NO_PRECIP_VARS + TARGET_ATMOSPHERIC_VARS + FORCING_VARS + STATIC_VARS +) +TASK_13_PRECIP_OUT_target_variables = TARGET_SURFACE_VARS + TARGET_ATMOSPHERIC_VARS +TASK_13_PRECIP_OUT_forcing_variables = FORCING_VARS +TASK_13_PRECIP_OUT_pressure_levels = PRESSURE_LEVELS_WEATHERBENCH_13 +TASK_13_PRECIP_OUT_input_duration = ("12h",) + +_SEC_PER_HOUR = 3600 +_HOUR_PER_DAY = 24 +SEC_PER_DAY = _SEC_PER_HOUR * _HOUR_PER_DAY +_AVG_DAY_PER_YEAR = 365.24219 +AVG_SEC_PER_YEAR = SEC_PER_DAY * _AVG_DAY_PER_YEAR + +DAY_PROGRESS = "day_progress" +YEAR_PROGRESS = "year_progress" + + +def stacked_to_dataset( + stacked_array: "xarray.Variable", + template_dataset: "xarray.Dataset", + preserved_dims: Tuple[str, ...] = ("batch", "lat", "lon"), +) -> "xarray.Dataset": + """The inverse of dataset_to_stacked. + + Requires a template dataset to demonstrate the variables/shapes/coordinates + required. + All variables must have preserved_dims dimensions. + + Args: + stacked_array: Data in BHWC layout, encoded the same as dataset_to_stacked would if it was asked to encode `template_dataset`. + template_dataset: A template Dataset (or other mapping of DataArrays) demonstrating the shape of output required (variables, shapes, coordinates etc). + preserved_dims: dimensions from the target_template that were not folded in the predictions channels. The preserved_dims need to be a subset of the dims of all the variables of template_dataset. + + Returns: + An xarray.Dataset (or other mapping of DataArrays) with the same shape and type as template_dataset. + """ + unstack_from_channels_sizes = {} + var_names = sorted(template_dataset.keys()) + for name in var_names: + template_var = template_dataset[name] + if not all(dim in template_var.dims for dim in preserved_dims): + raise ValueError( + f"stacked_to_dataset requires all Variables to have {preserved_dims} " + f"dimensions, but found only {template_var.dims}." + ) + unstack_from_channels_sizes[name] = { + dim: size + for dim, size in template_var.sizes.items() + if dim not in preserved_dims + } + + channels = { + name: np.prod(list(unstack_sizes.values()), dtype=np.int64) + for name, unstack_sizes in unstack_from_channels_sizes.items() + } + total_expected_channels = sum(channels.values()) + found_channels = stacked_array.sizes["channels"] + if total_expected_channels != found_channels: + raise ValueError( + f"Expected {total_expected_channels} channels but found " + f"{found_channels}, when trying to convert a stacked array of shape " + f"{stacked_array.sizes} to a dataset of shape {template_dataset}." + ) + + data_vars = {} + index = 0 + for name in var_names: + template_var = template_dataset[name] + var = stacked_array.isel({"channels": slice(index, index + channels[name])}) + index += channels[name] + var = var.unstack({"channels": unstack_from_channels_sizes[name]}) + var = var.transpose(*template_var.dims) + data_vars[name] = xarray.DataArray( + data=var, + coords=template_var.coords, + # This might not always be the same as the name it's keyed under; it + # will refer to the original variable name, whereas the key might be + # some alias e.g. temperature_850 under which it should be logged: + name=template_var.name, + ) + return type(template_dataset)( + data_vars + ) # pytype:disable=not-callable,wrong-arg-count + + +def get_graph_spatial_features( + *, + node_lat: np.ndarray, + node_lon: np.ndarray, + senders: np.ndarray, + receivers: np.ndarray, + add_node_positions: bool, + add_node_latitude: bool, + add_node_longitude: bool, + add_relative_positions: bool, + relative_longitude_local_coordinates: bool, + relative_latitude_local_coordinates: bool, + sine_cosine_encoding: bool = False, + encoding_num_freqs: int = 10, + encoding_multiplicative_factor: float = 1.2, +) -> Tuple[np.ndarray, np.ndarray]: + """Computes spatial features for the nodes. + + Args: + node_lat: Latitudes in the [-90, 90] interval of shape [num_nodes] + node_lon: Longitudes in the [0, 360] interval of shape [num_nodes] + senders: Sender indices of shape [num_edges] + receivers: Receiver indices of shape [num_edges] + add_node_positions: Add unit norm absolute positions. + add_node_latitude: Add a feature for latitude (cos(90 - lat)) + Note even if this is set to False, the model may be able to infer the longitude from relative features, unless `relative_latitude_local_coordinates` is also True, or if there is any bias on the relative edge sizes for different longitudes. + add_node_longitude: Add features for longitude (cos(lon), sin(lon)). + Note even if this is set to False, the model may be able to infer the longitude from relative features, unless `relative_longitude_local_coordinates` is also True, or if there is any bias on the relative edge sizes for different longitudes. + add_relative_positions: Whether to relative positions in R3 to the edges. + relative_longitude_local_coordinates: If True, relative positions are computed in a local space where the receiver is at 0 longitude. + relative_latitude_local_coordinates: If True, relative positions are computed in a local space where the receiver is at 0 latitude. + sine_cosine_encoding: If True, we will transform the node/edge features with sine and cosine functions, similar to NERF. + encoding_num_freqs: frequency parameter + encoding_multiplicative_factor: used for calculating the frequency. + + Returns: + Arrays of shape: [num_nodes, num_features] and [num_edges, num_features]. + with node and edge features. + """ + + num_nodes = node_lat.shape[0] + num_edges = senders.shape[0] + dtype = node_lat.dtype + node_phi, node_theta = lat_lon_deg_to_spherical(node_lat, node_lon) + + # Computing some node features. + node_features = [] + if add_node_positions: + # Already in [-1, 1.] range. + node_features.extend(spherical_to_cartesian(node_phi, node_theta)) + + if add_node_latitude: + # Using the cos of theta. + # From 1. (north pole) to -1 (south pole). + node_features.append(np.cos(node_theta)) + + if add_node_longitude: + # Using the cos and sin, which is already normalized. + node_features.append(np.cos(node_phi)) + node_features.append(np.sin(node_phi)) + + if not node_features: + node_features = np.zeros([num_nodes, 0], dtype=dtype) + else: + node_features = np.stack(node_features, axis=-1) + + # Computing some edge features. + edge_features = [] + + if add_relative_positions: + + relative_position = get_relative_position_in_receiver_local_coordinates( + node_phi=node_phi, + node_theta=node_theta, + senders=senders, + receivers=receivers, + latitude_local_coordinates=relative_latitude_local_coordinates, + longitude_local_coordinates=relative_longitude_local_coordinates, + ) + + # Note this is L2 distance in 3d space, rather than geodesic distance. + relative_edge_distances = np.linalg.norm( + relative_position, axis=-1, keepdims=True + ) + + # Normalize to the maximum edge distance. Note that we expect to always + # have an edge that goes in the opposite direction of any given edge + # so the distribution of relative positions should be symmetric around + # zero. So by scaling by the maximum length, we expect all relative + # positions to fall in the [-1., 1.] interval, and all relative distances + # to fall in the [0., 1.] interval. + max_edge_distance = relative_edge_distances.max() + edge_features.append(relative_edge_distances / max_edge_distance) + edge_features.append(relative_position / max_edge_distance) + + if not edge_features: + edge_features = np.zeros([num_edges, 0], dtype=dtype) + else: + edge_features = np.concatenate(edge_features, axis=-1) + + if sine_cosine_encoding: + + def sine_cosine_transform(x: np.ndarray) -> np.ndarray: + freqs = encoding_multiplicative_factor ** np.arange(encoding_num_freqs) + phases = freqs * x[..., None] + x_sin = np.sin(phases) + x_cos = np.cos(phases) + x_cat = np.concatenate([x_sin, x_cos], axis=-1) + return x_cat.reshape([x.shape[0], -1]) + + node_features = sine_cosine_transform(node_features) + edge_features = sine_cosine_transform(edge_features) + + return node_features, edge_features + + +def lat_lon_to_leading_axes(grid_xarray: "xarray.DataArray") -> "xarray.DataArray": + """Reorders xarray so lat/lon axes come first.""" + # leading + ["lat", "lon"] + trailing + # to + # ["lat", "lon"] + leading + trailing + return grid_xarray.transpose("lat", "lon", ...) + + +def restore_leading_axes(grid_xarray: "xarray.DataArray") -> "xarray.DataArray": + """Reorders xarray so batch/time/level axes come first (if present).""" + + # ["lat", "lon"] + [(batch,) (time,) (level,)] + trailing + # to + # [(batch,) (time,) (level,)] + ["lat", "lon"] + trailing + + input_dims = list(grid_xarray.dims) + output_dims = list(input_dims) + for leading_key in ["level", "time", "batch"]: # reverse order for insert + if leading_key in input_dims: + output_dims.remove(leading_key) + output_dims.insert(0, leading_key) + return grid_xarray.transpose(*output_dims) + + +def lat_lon_deg_to_spherical( + node_lat: np.ndarray, + node_lon: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + phi = np.deg2rad(node_lon) + theta = np.deg2rad(90 - node_lat) + return phi, theta + + +def spherical_to_lat_lon( + phi: np.ndarray, + theta: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + lon = np.mod(np.rad2deg(phi), 360) + lat = 90 - np.rad2deg(theta) + return lat, lon + + +def cartesian_to_spherical( + x: np.ndarray, + y: np.ndarray, + z: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray]: + phi = np.arctan2(y, x) + with np.errstate(invalid="ignore"): # circumventing b/253179568 + theta = np.arccos(z) # Assuming unit radius. + return phi, theta + + +def spherical_to_cartesian( + phi: np.ndarray, theta: np.ndarray +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + # Assuming unit radius. + return (np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta), np.cos(theta)) + + +def get_relative_position_in_receiver_local_coordinates( + node_phi: np.ndarray, + node_theta: np.ndarray, + senders: np.ndarray, + receivers: np.ndarray, + latitude_local_coordinates: bool, + longitude_local_coordinates: bool, +) -> np.ndarray: + """Returns relative position features for the edges. + + The relative positions will be computed in a rotated space for a local + coordinate system as defined by the receiver. The relative positions are + simply obtained by subtracting sender position minues receiver position in + that local coordinate system after the rotation in R^3. + + Args: + node_phi: [num_nodes] with polar angles. + node_theta: [num_nodes] with azimuthal angles. + senders: [num_edges] with indices. + receivers: [num_edges] with indices. + latitude_local_coordinates: Whether to rotate edges such that in the positions are computed such that the receiver is always at latitude 0. + longitude_local_coordinates: Whether to rotate edges such that in the positions are computed such that the receiver is always at longitude 0. + + Returns: + Array of relative positions in R3 [num_edges, 3] + """ + + node_pos = np.stack(spherical_to_cartesian(node_phi, node_theta), axis=-1) + + # No rotation in this case. + if not (latitude_local_coordinates or longitude_local_coordinates): + return node_pos[senders] - node_pos[receivers] + + # Get rotation matrices for the local space space for every node. + rotation_matrices = get_rotation_matrices_to_local_coordinates( + reference_phi=node_phi, + reference_theta=node_theta, + rotate_latitude=latitude_local_coordinates, + rotate_longitude=longitude_local_coordinates, + ) + + # Each edge will be rotated according to the rotation matrix of its receiver + # node. + edge_rotation_matrices = rotation_matrices[receivers] + + # Rotate all nodes to the rotated space of the corresponding edge. + # Note for receivers we can also do the matmul first and the gather second: + # ``` + # receiver_pos_in_rotated_space = rotate_with_matrices( + # rotation_matrices, node_pos)[receivers] + # ``` + # which is more efficient, however, we do gather first to keep it more + # symmetric with the sender computation. + receiver_pos_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, node_pos[receivers] + ) + sender_pos_in_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, node_pos[senders] + ) + # Note, here, that because the rotated space is chosen according to the + # receiver, if: + # * latitude_local_coordinates = True: latitude for the receivers will be + # 0, that is the z coordinate will always be 0. + # * longitude_local_coordinates = True: longitude for the receivers will be + # 0, that is the y coordinate will be 0. + + # Now we can just subtract. + # Note we are rotating to a local coordinate system, where the y-z axes are + # parallel to a tangent plane to the sphere, but still remain in a 3d space. + # Note that if both `latitude_local_coordinates` and + # `longitude_local_coordinates` are True, and edges are short, + # then the difference in x coordinate between sender and receiver + # should be small, so we could consider dropping the new x coordinate if + # we wanted to the tangent plane, however in doing so + # we would lose information about the curvature of the mesh, which may be + # important for very coarse meshes. + return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space + + +def get_rotation_matrices_to_local_coordinates( + reference_phi: np.ndarray, + reference_theta: np.ndarray, + rotate_latitude: bool, + rotate_longitude: bool, +) -> np.ndarray: + """Returns a rotation matrix to rotate to a point based on a reference vector. + + The rotation matrix is build such that, a vector in the + same coordinate system at the reference point that points towards the pole + before the rotation, continues to point towards the pole after the rotation. + + Args: + reference_phi: [leading_axis] Polar angles of the reference. + reference_theta: [leading_axis] Azimuthal angles of the reference. + rotate_latitude: Whether to produce a rotation matrix that would rotate R^3 vectors to zero latitude. + rotate_longitude: Whether to produce a rotation matrix that would rotate R^3 vectors to zero longitude. + + Returns: + Matrices of shape [leading_axis] such that when applied to the reference + position with `rotate_with_matrices(rotation_matrices, reference_pos)` + + * phi goes to 0. if "rotate_longitude" is True. + + * theta goes to np.pi / 2 if "rotate_latitude" is True. + + The rotation consists of: + * rotate_latitude = False, rotate_longitude = True: + Latitude preserving rotation. + * rotate_latitude = True, rotate_longitude = True: + Latitude preserving rotation, followed by longitude preserving rotation. + * rotate_latitude = True, rotate_longitude = False: + Latitude preserving rotation, followed by longitude preserving rotation, and the inverse of the latitude preserving rotation. Note this is computationally different from rotating the longitude only and is. We do it like this, so the polar geodesic curve, continues to be aligned with one of the axis after the rotation. + """ + + if rotate_longitude and rotate_latitude: + + # We first rotate around the z axis "minus the azimuthal angle", to get the + # point with zero longitude + azimuthal_rotation = -reference_phi + + # One then we will do a polar rotation (which can be done along the y + # axis now that we are at longitude 0.), "minus the polar angle plus 2pi" + # to get the point with zero latitude. + polar_rotation = -reference_theta + np.pi / 2 + + return scipy.spatial.transform.Rotation.from_euler( + "zy", np.stack([azimuthal_rotation, polar_rotation], axis=1) + ).as_matrix() + elif rotate_longitude: + # Just like the previous case, but applying only the azimuthal rotation. + azimuthal_rotation = -reference_phi + return scipy.spatial.transform.Rotation.from_euler( + "z", -reference_phi + ).as_matrix() + elif rotate_latitude: + # Just like the first case, but after doing the polar rotation, undoing + # the azimuthal rotation. + azimuthal_rotation = -reference_phi + polar_rotation = -reference_theta + np.pi / 2 + + return scipy.spatial.transform.Rotation.from_euler( + "zyz", + np.stack([azimuthal_rotation, polar_rotation, -azimuthal_rotation], axis=1), + ).as_matrix() + else: + raise ValueError("At least one of longitude and latitude should be rotated.") + + +def rotate_with_matrices( + rotation_matrices: np.ndarray, positions: np.ndarray +) -> np.ndarray: + return np.einsum("bji,bi->bj", rotation_matrices, positions) + + +def get_bipartite_graph_spatial_features( + *, + senders_node_lat: np.ndarray, + senders_node_lon: np.ndarray, + senders: np.ndarray, + receivers_node_lat: np.ndarray, + receivers_node_lon: np.ndarray, + receivers: np.ndarray, + add_node_positions: bool, + add_node_latitude: bool, + add_node_longitude: bool, + add_relative_positions: bool, + edge_normalization_factor: Optional[float] = None, + relative_longitude_local_coordinates: bool, + relative_latitude_local_coordinates: bool, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Computes spatial features for the nodes. + + This function is almost identical to `get_graph_spatial_features`. The only + difference is that sender nodes and receiver nodes can be in different arrays. + This is necessary to enable combination with typed Graph. + + Args: + senders_node_lat: Latitudes in the [-90, 90] interval of shape [num_sender_nodes] + senders_node_lon: Longitudes in the [0, 360] interval of shape [num_sender_nodes] + senders: Sender indices of shape [num_edges], indices in [0, num_sender_nodes) + receivers_node_lat: Latitudes in the [-90, 90] interval of shape [num_receiver_nodes] + receivers_node_lon: Longitudes in the [0, 360] interval of shape [num_receiver_nodes] + receivers: Receiver indices of shape [num_edges], indices in [0, num_receiver_nodes) + add_node_positions: Add unit norm absolute positions. + add_node_latitude: Add a feature for latitude (cos(90 - lat)). + Note even ifthis is set to False, the model may be able to infer the longitude from relative features, unless `relative_latitude_local_coordinates` is also True, or if there is any bias on the relative edge sizes for different longitudes. + add_node_longitude: Add features for longitude (cos(lon), sin(lon)). + Note even if this is set to False, the model may be able to infer the longitude from relative features, unless `relative_longitude_local_coordinates` is also True, or if there is any bias on the relative edge sizes for different longitudes. + add_relative_positions: Whether to relative positions in R3 to the edges. + edge_normalization_factor: Allows explicitly controlling edge normalization. If None, defaults to max edge length. This supports using pre-trained model weights with a different graph structure to what it was trained on. + relative_longitude_local_coordinates: If True, relative positions are computed in a local space where the receiver is at 0 longitude. + relative_latitude_local_coordinates: If True, relative positions are computed in a local space where the receiver is at 0 latitude. + + Returns: + Arrays of shape: [num_nodes, num_features] and [num_edges, num_features]. with node and edge features. + """ + + num_senders = senders_node_lat.shape[0] + num_receivers = receivers_node_lat.shape[0] + num_edges = senders.shape[0] + dtype = senders_node_lat.dtype + assert receivers_node_lat.dtype == dtype + senders_node_phi, senders_node_theta = lat_lon_deg_to_spherical( + senders_node_lat, senders_node_lon + ) + receivers_node_phi, receivers_node_theta = lat_lon_deg_to_spherical( + receivers_node_lat, receivers_node_lon + ) + + # Computing some node features. + senders_node_features = [] + receivers_node_features = [] + if add_node_positions: + # Already in [-1, 1.] range. + senders_node_features.extend( + spherical_to_cartesian(senders_node_phi, senders_node_theta) + ) + receivers_node_features.extend( + spherical_to_cartesian(receivers_node_phi, receivers_node_theta) + ) + + if add_node_latitude: + # Using the cos of theta. + # From 1. (north pole) to -1 (south pole). + senders_node_features.append(np.cos(senders_node_theta)) + receivers_node_features.append(np.cos(receivers_node_theta)) + + if add_node_longitude: + # Using the cos and sin, which is already normalized. + senders_node_features.append(np.cos(senders_node_phi)) + senders_node_features.append(np.sin(senders_node_phi)) + + receivers_node_features.append(np.cos(receivers_node_phi)) + receivers_node_features.append(np.sin(receivers_node_phi)) + + if not senders_node_features: + senders_node_features = np.zeros([num_senders, 0], dtype=dtype) + receivers_node_features = np.zeros([num_receivers, 0], dtype=dtype) + else: + senders_node_features = np.stack(senders_node_features, axis=-1) + receivers_node_features = np.stack(receivers_node_features, axis=-1) + + # Computing some edge features. + edge_features = [] + + if add_relative_positions: + + relative_position = ( + get_bipartite_relative_position_in_receiver_local_coordinates( + senders_node_phi=senders_node_phi, + senders_node_theta=senders_node_theta, + receivers_node_phi=receivers_node_phi, + receivers_node_theta=receivers_node_theta, + senders=senders, + receivers=receivers, + latitude_local_coordinates=relative_latitude_local_coordinates, + longitude_local_coordinates=relative_longitude_local_coordinates, + ) + ) + + # Note this is L2 distance in 3d space, rather than geodesic distance. + relative_edge_distances = np.linalg.norm( + relative_position, axis=-1, keepdims=True + ) + + if edge_normalization_factor is None: + # Normalize to the maximum edge distance. Note that we expect to always + # have an edge that goes in the opposite direction of any given edge + # so the distribution of relative positions should be symmetric around + # zero. So by scaling by the maximum length, we expect all relative + # positions to fall in the [-1., 1.] interval, and all relative distances + # to fall in the [0., 1.] interval. + edge_normalization_factor = relative_edge_distances.max() + + edge_features.append(relative_edge_distances / edge_normalization_factor) + edge_features.append(relative_position / edge_normalization_factor) + + if not edge_features: + edge_features = np.zeros([num_edges, 0], dtype=dtype) + else: + edge_features = np.concatenate(edge_features, axis=-1) + + return senders_node_features, receivers_node_features, edge_features + + +def get_bipartite_relative_position_in_receiver_local_coordinates( + senders_node_phi: np.ndarray, + senders_node_theta: np.ndarray, + senders: np.ndarray, + receivers_node_phi: np.ndarray, + receivers_node_theta: np.ndarray, + receivers: np.ndarray, + latitude_local_coordinates: bool, + longitude_local_coordinates: bool, +) -> np.ndarray: + """Returns relative position features for the edges. + + This function is equivalent to + `get_relative_position_in_receiver_local_coordinates`, but adapted to work + with bipartite typed graphs. + + The relative positions will be computed in a rotated space for a local + coordinate system as defined by the receiver. The relative positions are + simply obtained by subtracting sender position minues receiver position in + that local coordinate system after the rotation in R^3. + + Args: + senders_node_phi: [num_sender_nodes] with polar angles. + senders_node_theta: [num_sender_nodes] with azimuthal angles. + senders: [num_edges] with indices into sender nodes. + receivers_node_phi: [num_sender_nodes] with polar angles. + receivers_node_theta: [num_sender_nodes] with azimuthal angles. + receivers: [num_edges] with indices into receiver nodes. + latitude_local_coordinates: Whether to rotate edges such that in the positions are computed such that the receiver is always at latitude 0. + longitude_local_coordinates: Whether to rotate edges such that in the positions are computed such that the receiver is always at longitude 0. + + Returns: + Array of relative positions in R3 [num_edges, 3] + """ + + senders_node_pos = np.stack( + spherical_to_cartesian(senders_node_phi, senders_node_theta), axis=-1 + ) + + receivers_node_pos = np.stack( + spherical_to_cartesian(receivers_node_phi, receivers_node_theta), axis=-1 + ) + + # No rotation in this case. + if not (latitude_local_coordinates or longitude_local_coordinates): + return senders_node_pos[senders] - receivers_node_pos[receivers] + + # Get rotation matrices for the local space space for every receiver node. + receiver_rotation_matrices = get_rotation_matrices_to_local_coordinates( + reference_phi=receivers_node_phi, + reference_theta=receivers_node_theta, + rotate_latitude=latitude_local_coordinates, + rotate_longitude=longitude_local_coordinates, + ) + + # Each edge will be rotated according to the rotation matrix of its receiver + # node. + edge_rotation_matrices = receiver_rotation_matrices[receivers] + + # Rotate all nodes to the rotated space of the corresponding edge. + # Note for receivers we can also do the matmul first and the gather second: + # ``` + # receiver_pos_in_rotated_space = rotate_with_matrices( + # rotation_matrices, node_pos)[receivers] + # ``` + # which is more efficient, however, we do gather first to keep it more + # symmetric with the sender computation. + receiver_pos_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, receivers_node_pos[receivers] + ) + sender_pos_in_in_rotated_space = rotate_with_matrices( + edge_rotation_matrices, senders_node_pos[senders] + ) + # Note, here, that because the rotated space is chosen according to the + # receiver, if: + # * latitude_local_coordinates = True: latitude for the receivers will be + # 0, that is the z coordinate will always be 0. + # * longitude_local_coordinates = True: longitude for the receivers will be + # 0, that is the y coordinate will be 0. + + # Now we can just subtract. + # Note we are rotating to a local coordinate system, where the y-z axes are + # parallel to a tangent plane to the sphere, but still remain in a 3d space. + # Note that if both `latitude_local_coordinates` and + # `longitude_local_coordinates` are True, and edges are short, + # then the difference in x coordinate between sender and receiver + # should be small, so we could consider dropping the new x coordinate if + # we wanted to the tangent plane, however in doing so + # we would lose information about the curvature of the mesh, which may be + # important for very coarse meshes. + return sender_pos_in_in_rotated_space - receiver_pos_in_rotated_space + + +class GraphGridMesh: + """Graph datatype of GraphCast. + + Args: + mesh_size (int): size of mesh. + radius_query_fraction_edge_length (float): _description_ + mesh2grid_edge_normalization_factor (float): Normalization factor of edge in Mesh2Grid GNN. + resolution (float): resolution of atmospheric data. + mesh2mesh_src_index (np.array, optional): Index of Mesh2Mesh source node. Defaults to None. + mesh2mesh_dst_index (np.array, optional): Index of Mesh2Mesh destination node. Defaults to None. + grid2mesh_src_index (np.array, optional): Index of Grid2Mesh source node. Defaults to None. + grid2mesh_dst_index (np.array, optional): Index of Grid2Mesh destination node. + mesh2grid_src_index (np.array, optional): Index of Mesh2Grid source node. Defaults to None. + mesh2grid_dst_index (np.array, optional): Index of Mesh2Grid destination node. Defaults to None. + mesh_num_nodes (int, optional): Number of mesh nodes. Defaults to None. + grid_num_nodes (int, optional): Number of grid nodes. Defaults to None. + mesh_num_edges (int, optional): Number of mesh edges. Defaults to None. + grid2mesh_num_edges (int, optional): Number of edges in Grid2Mesh GNN. Defaults to None. + mesh2grid_num_edges (int, optional): Number of edges in Mesh2Grid GNN. Defaults to None. + grid_node_feat (np.array, optional): Feature of grid nodes. Defaults to None. + mesh_node_feat (np.array, optional): Feature of mehs nodes. Defaults to None. + mesh_edge_feat (np.array, optional): Feature of mesh edges. Defaults to None. + grid2mesh_edge_feat (np.array, optional): Feature of edges in Grid2Mesh GNN. Defaults to None. + mesh2grid_edge_feat (np.array, optional): Feature of edges in Mesh2Grid GNN. Defaults to None. + """ + + def __init__( + self, + mesh_size: int, + radius_query_fraction_edge_length: float, + mesh2grid_edge_normalization_factor: float, + resolution: float, + mesh2mesh_src_index: np.array = None, + mesh2mesh_dst_index: np.array = None, + grid2mesh_src_index: np.array = None, + grid2mesh_dst_index: np.array = None, + mesh2grid_src_index: np.array = None, + mesh2grid_dst_index: np.array = None, + mesh_num_nodes: int = None, + grid_num_nodes: int = None, + mesh_num_edges: int = None, + grid2mesh_num_edges: np.array = None, + mesh2grid_num_edges: np.array = None, + grid_node_feat: np.array = None, + mesh_node_feat: np.array = None, + mesh_edge_feat: np.array = None, + grid2mesh_edge_feat: np.array = None, + mesh2grid_edge_feat: np.array = None, + ): + self.meshes = get_hierarchy_of_triangular_meshes_for_sphere(mesh_size) + + all_input_vars = [ + mesh2mesh_src_index, + mesh2mesh_dst_index, + grid2mesh_src_index, + grid2mesh_dst_index, + mesh2grid_src_index, + mesh2grid_dst_index, + mesh_num_nodes, + grid_num_nodes, + mesh_num_edges, + grid2mesh_num_edges, + mesh2grid_num_edges, + grid_node_feat, + mesh_node_feat, + mesh_edge_feat, + grid2mesh_edge_feat, + mesh2grid_edge_feat, + ] + should_init = any(var is None for var in all_input_vars) + + if should_init: + self.query_radius = ( + self._get_max_edge_distance(self.finest_mesh) + * radius_query_fraction_edge_length + ) + self._mesh2grid_edge_normalization_factor = ( + mesh2grid_edge_normalization_factor + ) + self._spatial_features_kwargs = dict( + add_node_positions=False, + add_node_latitude=True, + add_node_longitude=True, + add_relative_positions=True, + relative_longitude_local_coordinates=True, + relative_latitude_local_coordinates=True, + ) + + self.init_mesh_properties() + self._init_grid_properties( + grid_lat=np.arange(-90.0, 90.0 + resolution, resolution), + grid_lon=np.arange(0.0, 360.0, resolution), + ) + self._grid2mesh_graph_structure = self._init_grid2mesh_graph() + self._mesh_graph_structure = self._init_mesh_graph() + self._mesh2grid_graph_structure = self._init_mesh2grid_graph() + else: + self.mesh2mesh_src_index = mesh2mesh_src_index + self.mesh2mesh_dst_index = mesh2mesh_dst_index + self.grid2mesh_src_index = grid2mesh_src_index + self.grid2mesh_dst_index = grid2mesh_dst_index + self.mesh2grid_src_index = mesh2grid_src_index + self.mesh2grid_dst_index = mesh2grid_dst_index + + self.mesh_num_nodes = mesh_num_nodes + self.grid_num_nodes = grid_num_nodes + + self.mesh_num_edges = mesh_num_edges + self.grid2mesh_num_edges = grid2mesh_num_edges + self.mesh2grid_num_edges = mesh2grid_num_edges + + self.grid_node_feat = grid_node_feat + self.mesh_node_feat = mesh_node_feat + self.mesh_edge_feat = mesh_edge_feat + self.grid2mesh_edge_feat = grid2mesh_edge_feat + self.mesh2grid_edge_feat = mesh2grid_edge_feat + + def update(self, name, value): + if hasattr(self, name): + setattr(self, name, value) + else: + raise ValueError + + def tensor(self): + self.mesh2mesh_src_index = paddle.to_tensor( + self.mesh2mesh_src_index, dtype=paddle.int64 + ) + + self.mesh2mesh_dst_index = paddle.to_tensor( + self.mesh2mesh_dst_index, dtype=paddle.int64 + ) + self.grid2mesh_src_index = paddle.to_tensor( + self.grid2mesh_src_index, dtype=paddle.int64 + ) + self.grid2mesh_dst_index = paddle.to_tensor( + self.grid2mesh_dst_index, dtype=paddle.int64 + ) + self.mesh2grid_src_index = paddle.to_tensor( + self.mesh2grid_src_index, dtype=paddle.int64 + ) + self.mesh2grid_dst_index = paddle.to_tensor( + self.mesh2grid_dst_index, dtype=paddle.int64 + ) + self.grid_node_feat = paddle.to_tensor( + self.grid_node_feat, dtype=paddle.get_default_dtype() + ) + self.mesh_node_feat = paddle.to_tensor( + self.mesh_node_feat, dtype=paddle.get_default_dtype() + ) + self.mesh_edge_feat = paddle.to_tensor( + self.mesh_edge_feat, dtype=paddle.get_default_dtype() + ) + self.grid2mesh_edge_feat = paddle.to_tensor( + self.grid2mesh_edge_feat, dtype=paddle.get_default_dtype() + ) + self.mesh2grid_edge_feat = paddle.to_tensor( + self.mesh2grid_edge_feat, dtype=paddle.get_default_dtype() + ) + return self + + @property + def finest_mesh(self): + return self.meshes[-1] + + def init_mesh_properties(self): + """Inits static properties that have to do with mesh nodes.""" + self.mesh_num_nodes = self.finest_mesh.vertices.shape[0] + mesh_phi, mesh_theta = cartesian_to_spherical( + self.finest_mesh.vertices[:, 0], + self.finest_mesh.vertices[:, 1], + self.finest_mesh.vertices[:, 2], + ) + (mesh_nodes_lat, mesh_nodes_lon) = spherical_to_lat_lon( + phi=mesh_phi, + theta=mesh_theta, + ) + # Convert to f32 to ensure the lat/lon features aren't in f64. + self._mesh_nodes_lat = mesh_nodes_lat.astype(np.float32) + self._mesh_nodes_lon = mesh_nodes_lon.astype(np.float32) + + def _init_grid_properties(self, grid_lat: np.ndarray, grid_lon: np.ndarray): + """Inits static properties that have to do with grid nodes.""" + self._grid_lat = grid_lat.astype(np.float32) + self._grid_lon = grid_lon.astype(np.float32) + # Initialized the counters. + self.grid_num_nodes = grid_lat.shape[0] * grid_lon.shape[0] + + # Initialize lat and lon for the grid. + grid_nodes_lon, grid_nodes_lat = np.meshgrid(grid_lon, grid_lat) + self._grid_nodes_lon = grid_nodes_lon.reshape([-1]).astype(np.float32) + self._grid_nodes_lat = grid_nodes_lat.reshape([-1]).astype(np.float32) + + def _init_grid2mesh_graph(self): + """Build Grid2Mesh graph.""" + + # Create some edges according to distance between mesh and grid nodes. + assert self._grid_lat is not None and self._grid_lon is not None + (grid_indices, mesh_indices) = radius_query_indices( + grid_latitude=self._grid_lat, + grid_longitude=self._grid_lon, + mesh=self.finest_mesh, + radius=self.query_radius, + ) + + # Edges sending info from grid to mesh. + senders = grid_indices + receivers = mesh_indices + + # Precompute structural node and edge features according to config options. + # Structural features are those that depend on the fixed values of the + # latitude and longitudes of the nodes. + ( + senders_node_features, + _, + edge_features, + ) = get_bipartite_graph_spatial_features( + senders_node_lat=self._grid_nodes_lat, + senders_node_lon=self._grid_nodes_lon, + receivers_node_lat=self._mesh_nodes_lat, + receivers_node_lon=self._mesh_nodes_lon, + senders=senders, + receivers=receivers, + edge_normalization_factor=None, + **self._spatial_features_kwargs, + ) + + self.grid_node_feat = np.expand_dims(senders_node_features, axis=1) + + self.grid2mesh_src_index = senders + self.grid2mesh_dst_index = receivers + self.grid2mesh_edge_feat = np.expand_dims(edge_features, axis=1) + self.grid2mesh_num_edges = len(edge_features) + + def _init_mesh_graph(self): + """Build Mesh graph.""" + merged_mesh = merge_meshes(self.meshes) + # Work simply on the mesh edges. + senders, receivers = faces_to_edges(merged_mesh.faces) + # Precompute structural node and edge features according to config options. + # Structural features are those that depend on the fixed values of the + # latitude and longitudes of the nodes. + assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None + node_features, edge_features = get_graph_spatial_features( + node_lat=self._mesh_nodes_lat, + node_lon=self._mesh_nodes_lon, + senders=senders, + receivers=receivers, + **self._spatial_features_kwargs, + ) + + self.mesh_node_feat = np.expand_dims(node_features, axis=1) + self.mesh2mesh_src_index = senders + self.mesh2mesh_dst_index = receivers + self.mesh_edge_feat = np.expand_dims(edge_features, axis=1) + self.mesh_num_edges = len(edge_features) + + def _init_mesh2grid_graph(self): + """Build Mesh2Grid graph.""" + + # Create some edges according to how the grid nodes are contained by + # mesh triangles. + (grid_indices, mesh_indices) = in_mesh_triangle_indices( + grid_latitude=self._grid_lat, + grid_longitude=self._grid_lon, + mesh=self.finest_mesh, + ) + + # Edges sending info from mesh to grid. + senders = mesh_indices + receivers = grid_indices + + # Precompute structural node and edge features according to config options. + assert self._mesh_nodes_lat is not None and self._mesh_nodes_lon is not None + (_, _, edge_features) = get_bipartite_graph_spatial_features( + senders_node_lat=self._mesh_nodes_lat, + senders_node_lon=self._mesh_nodes_lon, + receivers_node_lat=self._grid_nodes_lat, + receivers_node_lon=self._grid_nodes_lon, + senders=senders, + receivers=receivers, + edge_normalization_factor=self._mesh2grid_edge_normalization_factor, + **self._spatial_features_kwargs, + ) + + self.mesh2grid_src_index = senders + self.mesh2grid_dst_index = receivers + self.mesh2grid_edge_feat = np.expand_dims(edge_features, axis=1) + self.mesh2grid_num_edges = len(edge_features) + + @staticmethod + def _get_max_edge_distance(mesh): + senders, receivers = faces_to_edges(mesh.faces) + edge_distances = np.linalg.norm( + mesh.vertices[senders] - mesh.vertices[receivers], axis=-1 + ) + return edge_distances.max() + + def grid_node_outputs_to_prediction( + self, + grid_node_outputs: np.ndarray, + targets_template: "xarray.Dataset", + ) -> "xarray.Dataset": + """[num_grid_nodes, batch, num_outputs] -> xarray.""" + # numpy array with shape [lat_lon_node, batch, channels] + # to xarray `DataArray` (batch, lat, lon, channels) + assert self._grid_lat is not None and self._grid_lon is not None + grid_shape = (self._grid_lat.shape[0], self._grid_lon.shape[0]) + grid_outputs_lat_lon_leading = grid_node_outputs.reshape( + grid_shape + grid_node_outputs.shape[1:] + ) + dims = ("lat", "lon", "batch", "channels") + grid_xarray_lat_lon_leading = xarray.DataArray( + data=grid_outputs_lat_lon_leading, dims=dims + ) + grid_xarray = restore_leading_axes(grid_xarray_lat_lon_leading) + + # xarray `DataArray` (batch, lat, lon, channels) + # to xarray `Dataset` (batch, one time step, lat, lon, level, multiple vars) + return stacked_to_dataset(grid_xarray.variable, targets_template) + + +class TriangularMesh(NamedTuple): + vertices: np.ndarray + faces: np.ndarray + + +def merge_meshes(mesh_list: Sequence[TriangularMesh]) -> TriangularMesh: + for i in range(len(mesh_list) - 1): + mesh_i, mesh_ip1 = mesh_list[i], mesh_list[i + 1] + num_nodes_mesh_i = mesh_i.vertices.shape[0] + assert np.allclose(mesh_i.vertices, mesh_ip1.vertices[:num_nodes_mesh_i]) + + return TriangularMesh( + vertices=mesh_list[-1].vertices, + faces=np.concatenate([mesh.faces for mesh in mesh_list], axis=0), + ) + + +def get_icosahedron(): + phi = (1 + np.sqrt(5)) / 2 + product = [[1.0, phi], [1.0, -phi], [-1.0, phi], [-1.0, -phi]] + vertices = [] + for p in product: + c1 = p[0] + c2 = p[1] + vertices.append((c1, c2, 0.0)) + vertices.append((0.0, c1, c2)) + vertices.append((c2, 0.0, c1)) + + vertices = np.array(vertices, dtype=np.float32) + vertices /= np.linalg.norm([1.0, phi]) + + faces = [ + (0, 1, 2), + (0, 6, 1), + (8, 0, 2), + (8, 4, 0), + (3, 8, 2), + (3, 2, 7), + (7, 2, 1), + (0, 4, 6), + (4, 11, 6), + (6, 11, 5), + (1, 5, 7), + (4, 10, 11), + (4, 8, 10), + (10, 8, 3), + (10, 3, 9), + (11, 10, 9), + (11, 9, 5), + (5, 9, 7), + (9, 3, 7), + (1, 6, 5), + ] + + angle_between_faces = 2 * np.arcsin(phi / np.sqrt(3)) + rotation_angle = (np.pi - angle_between_faces) / 2 + rotation = scipy.spatial.transform.Rotation.from_euler( + seq="y", angles=rotation_angle + ) + rotation_matrix = rotation.as_matrix() + vertices = np.dot(vertices, rotation_matrix) + + return TriangularMesh( + vertices=vertices.astype(np.float32), faces=np.array(faces, dtype=np.int32) + ) + + +def get_hierarchy_of_triangular_meshes_for_sphere( + splits: int, +) -> List[TriangularMesh]: + current_mesh = get_icosahedron() + output_meshes = [current_mesh] + for _ in range(splits): + current_mesh = _two_split_unit_sphere_triangle_faces(current_mesh) + output_meshes.append(current_mesh) + return output_meshes + + +def _two_split_unit_sphere_triangle_faces( + triangular_mesh: TriangularMesh, +) -> TriangularMesh: + """Splits each triangular face into 4 triangles keeping the orientation.""" + new_vertices_builder = _ChildVerticesBuilder(triangular_mesh.vertices) + + new_faces = [] + for ind1, ind2, ind3 in triangular_mesh.faces: + ind12 = new_vertices_builder.get_new_child_vertex_index((ind1, ind2)) + ind23 = new_vertices_builder.get_new_child_vertex_index((ind2, ind3)) + ind31 = new_vertices_builder.get_new_child_vertex_index((ind3, ind1)) + new_faces.extend( + [ + [ind1, ind12, ind31], # 1 + [ind12, ind2, ind23], # 2 + [ind31, ind23, ind3], # 3 + [ind12, ind23, ind31], # 4 + ] + ) + return TriangularMesh( + vertices=new_vertices_builder.get_all_vertices(), + faces=np.array(new_faces, dtype=np.int32), + ) + + +class _ChildVerticesBuilder: + """Bookkeeping of new child vertices added to an existing set of vertices.""" + + def __init__(self, parent_vertices): + self._child_vertices_index_mapping = {} + self._parent_vertices = parent_vertices + # We start with all previous vertices. + self._all_vertices_list = list(parent_vertices) + + def _get_child_vertex_key(self, parent_vertex_indices): + return tuple(sorted(parent_vertex_indices)) + + def _create_child_vertex(self, parent_vertex_indices): + """Creates a new vertex.""" + # Position for new vertex is the middle point, between the parent points, + # projected to unit sphere. + child_vertex_position = self._parent_vertices[list(parent_vertex_indices)].mean( + 0 + ) + child_vertex_position /= np.linalg.norm(child_vertex_position) + + # Add the vertex to the output list. The index for this new vertex will + # match the length of the list before adding it. + child_vertex_key = self._get_child_vertex_key(parent_vertex_indices) + self._child_vertices_index_mapping[child_vertex_key] = len( + self._all_vertices_list + ) + self._all_vertices_list.append(child_vertex_position) + + def get_new_child_vertex_index(self, parent_vertex_indices): + """Returns index for a child vertex, creating it if necessary.""" + # Get the key to see if we already have a new vertex in the middle. + child_vertex_key = self._get_child_vertex_key(parent_vertex_indices) + if child_vertex_key not in self._child_vertices_index_mapping: + self._create_child_vertex(parent_vertex_indices) + return self._child_vertices_index_mapping[child_vertex_key] + + def get_all_vertices(self): + """Returns an array with old vertices.""" + return np.array(self._all_vertices_list) + + +def faces_to_edges(faces: np.ndarray): + """Transforms polygonal faces to sender and receiver indices. + + It does so by transforming every face into N_i edges. Such if the triangular + face has indices [0, 1, 2], three edges are added 0->1, 1->2, and 2->0. + + If all faces have consistent orientation, and the surface represented by the + faces is closed, then every edge in a polygon with a certain orientation + is also part of another polygon with the opposite orientation. In this + situation, the edges returned by the method are always bidirectional. + + Args: + faces: Integer array of shape [num_faces, 3]. Contains node indices adjacent to each face. + Returns: + Tuple with sender/receiver indices, each of shape [num_edges=num_faces*3]. + """ + + assert faces.ndim == 2 + assert faces.shape[-1] == 3 + senders = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]]) + receivers = np.concatenate([faces[:, 1], faces[:, 2], faces[:, 0]]) + return senders, receivers + + +def _grid_lat_lon_to_coordinates( + grid_latitude: np.ndarray, grid_longitude: np.ndarray +) -> np.ndarray: + """Lat [num_lat] lon [num_lon] to 3d coordinates [num_lat, num_lon, 3].""" + # Convert to spherical coordinates phi and theta defined in the grid. + # Each [num_latitude_points, num_longitude_points] + phi_grid, theta_grid = np.meshgrid( + np.deg2rad(grid_longitude), np.deg2rad(90 - grid_latitude) + ) + + # [num_latitude_points, num_longitude_points, 3] + # Note this assumes unit radius, since for now we model the earth as a + # sphere of unit radius, and keep any vertical dimension as a regular grid. + return np.stack( + [ + np.cos(phi_grid) * np.sin(theta_grid), + np.sin(phi_grid) * np.sin(theta_grid), + np.cos(theta_grid), + ], + axis=-1, + ) + + +def radius_query_indices( + *, + grid_latitude: np.ndarray, + grid_longitude: np.ndarray, + mesh: TriangularMesh, + radius: float, +) -> Tuple[np.ndarray, np.ndarray]: + """Returns mesh-grid edge indices for radius query. + + Args: + grid_latitude: Latitude values for the grid [num_lat_points] + grid_longitude: Longitude values for the grid [num_lon_points] + mesh: Mesh object. + radius: Radius of connectivity in R3. for a sphere of unit radius. + + Returns: + tuple with `grid_indices` and `mesh_indices` indicating edges between the grid and the mesh such that the distances in a straight line (not geodesic) are smaller than or equal to `radius`. + grid_indices: Indices of shape [num_edges], that index into a + [num_lat_points, num_lon_points] grid, after flattening the leading axes. + mesh_indices: Indices of shape [num_edges], that index into mesh.vertices. + """ + + # [num_grid_points=num_lat_points * num_lon_points, 3] + grid_positions = _grid_lat_lon_to_coordinates( + grid_latitude, grid_longitude + ).reshape([-1, 3]) + + # [num_mesh_points, 3] + mesh_positions = mesh.vertices + kd_tree = scipy.spatial.cKDTree(mesh_positions) + + # [num_grid_points, num_mesh_points_per_grid_point] + # Note `num_mesh_points_per_grid_point` is not constant, so this is a list + # of arrays, rather than a 2d array. + query_indices = kd_tree.query_ball_point(x=grid_positions, r=radius) + + grid_edge_indices = [] + mesh_edge_indices = [] + for grid_index, mesh_neighbors in enumerate(query_indices): + grid_edge_indices.append(np.repeat(grid_index, len(mesh_neighbors))) + mesh_edge_indices.append(mesh_neighbors) + + # [num_edges] + grid_edge_indices = np.concatenate(grid_edge_indices, axis=0).astype(int) + mesh_edge_indices = np.concatenate(mesh_edge_indices, axis=0).astype(int) + + return grid_edge_indices, mesh_edge_indices + + +def in_mesh_triangle_indices( + *, grid_latitude: np.ndarray, grid_longitude: np.ndarray, mesh: TriangularMesh +) -> tuple[np.ndarray, np.ndarray]: + """Returns mesh-grid edge indices for grid points contained in mesh triangles. + + Args: + grid_latitude: Latitude values for the grid [num_lat_points] + grid_longitude: Longitude values for the grid [num_lon_points] + mesh: Mesh object. + + Returns: + tuple with `grid_indices` and `mesh_indices` indicating edges between the grid and the mesh vertices of the triangle that contain each grid point. The number of edges is always num_lat_points * num_lon_points * 3 + grid_indices: Indices of shape [num_edges], that index into a [num_lat_points, num_lon_points] grid, after flattening the leading axes. + mesh_indices: Indices of shape [num_edges], that index into mesh.vertices. + """ + + # [num_grid_points=num_lat_points * num_lon_points, 3] + grid_positions = _grid_lat_lon_to_coordinates( + grid_latitude, grid_longitude + ).reshape([-1, 3]) + + mesh_trimesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces) + + # [num_grid_points] with mesh face indices for each grid point. + _, _, query_face_indices = trimesh.proximity.closest_point( + mesh_trimesh, grid_positions + ) + + # [num_grid_points, 3] with mesh node indices for each grid point. + mesh_edge_indices = mesh.faces[query_face_indices] + + # [num_grid_points, 3] with grid node indices, where every row simply contains + # the row (grid_point) index. + grid_indices = np.arange(grid_positions.shape[0]) + grid_edge_indices = np.tile(grid_indices.reshape([-1, 1]), [1, 3]) + + # Flatten to get a regular list. + # [num_edges=num_grid_points*3] + mesh_edge_indices = mesh_edge_indices.reshape([-1]) + grid_edge_indices = grid_edge_indices.reshape([-1]) + + return grid_edge_indices, mesh_edge_indices + + +def get_year_progress(seconds_since_epoch: np.ndarray) -> np.ndarray: + """Computes year progress for times in seconds. + Args: + seconds_since_epoch: Times in seconds since the "epoch" (the point at which UNIX time starts). + Returns: + Year progress normalized to be in the `[0, 1)` interval for each time point. + """ + # Start with the pure integer division, and then float at the very end. + # We will try to keep as much precision as possible. + years_since_epoch = ( + seconds_since_epoch / SEC_PER_DAY / np.float64(_AVG_DAY_PER_YEAR) + ) + # Note depending on how these ops are down, we may end up with a "weak_type" + # which can cause issues in subtle ways, and hard to track here. + # In any case, casting to float32 should get rid of the weak type. + # [0, 1.) Interval. + return np.mod(years_since_epoch, 1.0).astype(np.float32) + + +def get_day_progress( + seconds_since_epoch: np.ndarray, + longitude: np.ndarray, +) -> np.ndarray: + """Computes day progress for times in seconds at each longitude. + Args: + seconds_since_epoch: 1D array of times in seconds since the 'epoch' (the point at which UNIX time starts). + longitude: 1D array of longitudes at which day progress is computed. + Returns: + 2D array of day progress values normalized to be in the [0, 1) inverval for each time point at each longitude. + """ + # [0.0, 1.0) Interval. + day_progress_greenwich = np.mod(seconds_since_epoch, SEC_PER_DAY) / SEC_PER_DAY + # Offset the day progress to the longitude of each point on Earth. + longitude_offsets = np.deg2rad(longitude) / (2 * np.pi) + day_progress = np.mod( + day_progress_greenwich[..., np.newaxis] + longitude_offsets, 1.0 + ) + return day_progress.astype(np.float32) + + +def datetime_features(seconds_since_epoch, longitude_offsets): + year_progress = get_year_progress(seconds_since_epoch) + day_progress = get_day_progress(seconds_since_epoch, longitude_offsets) + year_progress_phase = year_progress * (2 * np.pi) + day_progress_phase = day_progress * (2 * np.pi) + returned_data = { + "year_progress_sin": np.sin(year_progress_phase), + "year_progress_cos": np.cos(year_progress_phase), + "day_progress_sin": np.sin(day_progress_phase), + "day_progress_cos": np.cos(day_progress_phase), + } + return returned_data + + +def add_var_into_nc_dataset( + nc_dataset, + var_name, + var_value, + var_dims=( + "batch", + "time", + ), +): + new_var = nc_dataset.createVariable(var_name, "f8", var_dims) + new_var[:] = var_value + return nc_dataset + + +def extract_input_target_times( + dataset: "xarray.Dataset", + input_duration: str, + target_lead_times: str, +): + (target_lead_times, target_duration) = _process_target_lead_times_and_get_duration( + target_lead_times + ) + time = dataset.coords["time"] + dataset = dataset.assign_coords(time=time + target_duration - time[-1]) + + targets = dataset.sel({"time": target_lead_times}) + + input_duration = pd.Timedelta(input_duration) + zero = pd.Timedelta(0) + epsilon = pd.Timedelta(1, "ns") + inputs = dataset.sel({"time": slice(-input_duration + epsilon, zero)}) + return inputs, targets + + +def _process_target_lead_times_and_get_duration(target_lead_times: str): + """Returns the minimum duration for the target lead times.""" + if isinstance(target_lead_times, slice): + if target_lead_times.start is None: + target_lead_times = slice( + pd.Timedelta(1, "ns"), target_lead_times.stop, target_lead_times.step + ) + target_duration = pd.Timedelta(target_lead_times.stop) + else: + if not isinstance(target_lead_times, (list, tuple, set)): + target_lead_times = [target_lead_times] + + target_lead_times = [pd.Timedelta(x) for x in target_lead_times] + target_lead_times.sort() + target_duration = target_lead_times[-1] + return target_lead_times, target_duration + + +def variable_to_stacked( + variable: "xarray.Variable", + sizes: "xarray.core.utils.Frozen", + preserved_dims=("batch", "lat", "lon"), +) -> "xarray.Variable": + """Converts an xarray.Variable to preserved_dims + ("channels",). + + Any dimensions other than those included in preserved_dims get stacked into a final "channels" dimension. If any of the preserved_dims are missing then they are added, with the data broadcast/tiled to match the sizes specified in `sizes`. + + Args: + variable: An xarray.Variable. + sizes: Mapping including sizes for any dimensions which are not present in `variable` but are needed for the output. This may be needed for example for a static variable with only ("lat", "lon") dims, or if you want to encode just the latitude coordinates (a variable with dims ("lat",)). + preserved_dims: dimensions of variable to not be folded in channels. + + Returns: + An xarray.Variable with dimensions preserved_dims + ("channels",). + """ + stack_to_channels_dims = [d for d in variable.dims if d not in preserved_dims] + if stack_to_channels_dims: + variable = variable.stack(channels=stack_to_channels_dims) + dims = {dim: variable.sizes.get(dim) or sizes[dim] for dim in preserved_dims} + dims["channels"] = variable.sizes.get("channels", 1) + return variable.set_dims(dims) + + +def dataset_to_stacked( + dataset: "xarray.Dataset", + sizes=None, + preserved_dims=("batch", "lat", "lon"), +) -> "xarray.DataArray": + """Converts an xarray.Dataset to a single stacked array. + + This takes each consistuent data_var, converts it into BHWC layout + using `variable_to_stacked`, then concats them all along the channels axis. + + Args: + dataset: An xarray.Dataset. + sizes: Mapping including sizes for any dimensions which are not present in the `dataset` but are needed for the output. See variable_to_stacked. + preserved_dims: dimensions from the dataset that should not be folded in the predictions channels. + + Returns: + An xarray.DataArray with dimensions preserved_dims + ("channels",). Existing coordinates for preserved_dims axes will be preserved, however there will be no coordinates for "channels". + """ + data_vars = [ + variable_to_stacked( + dataset.variables[name], sizes or dataset.sizes, preserved_dims + ) + for name in sorted(dataset.data_vars.keys()) + ] + coords = { + dim: coord for dim, coord in dataset.coords.items() if dim in preserved_dims + } + return xarray.DataArray( + data=xarray.Variable.concat(data_vars, dim="channels"), coords=coords + ) + + +class GridMeshAtmosphericDataset(io.Dataset): + """This class is used to process ERA5 re-analyze data, and is used to generate the dataset generator supported by MindSpore. This class inherits the Data class. + + Args: + input_keys (Tuple[str, ...]): Name of input data. + label_keys (Tuple[str, ...]): Name of label data. + data_path: Path of atmospheric datafile. + mean_path: Path of mean datafile. + stddev_path: Path of standard deviation datafile. + stddev_diffs_path: Path of standard deviation different datafile. + type: Type of GraphCast network. + mesh_size: Size of mesh. + mesh2grid_edge_normalization_factor: Factor of normalization of edges in Mesh2Grid GNN. + radius_query_fraction_edge_length: Length of radius query fraction edges. + resolution: Resolution of atmospheric data. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.GridMeshAtmosphericDataset( + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "data_path": "/path/to/file.nc", + ... "mean_path": "/path/to/file.nc", + ... "stddev_path": "/path/to/file.nc", + ... "stddev_diffs_path": "/path/to/file.nc", + ... "type": "graphcast_small", + ... "mesh_size": 5, + ... "mesh2grid_edge_normalization_factor": 0.06, + ... "radius_query_fraction_edge_length": 0.6180338738074472, + ... "resolution": 1, + ... ) # doctest: +SKIP + """ + + use_graph_grid_mesh: bool = True + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_path: str, + mean_path: str, + stddev_path: str, + stddev_diffs_path: str, + type: str, + mesh_size: int, + mesh2grid_edge_normalization_factor: float, + radius_query_fraction_edge_length: float, + resolution: float, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + if type == "graphcast": + self.input_variables = TASK_input_variables + self.forcing_variables = TASK_forcing_variables + self.target_variables = TASK_target_variables + self.level_variables = PRESSURE_LEVELS[37] + elif type == "graphcast_small": + self.input_variables = TASK_13_input_variables + self.forcing_variables = TASK_13_forcing_variables + self.target_variables = TASK_13_target_variables + self.level_variables = PRESSURE_LEVELS[13] + elif type == "graphcast_operational": + self.input_variables = TASK_13_PRECIP_OUT_input_variables + self.forcing_variables = TASK_13_PRECIP_OUT_forcing_variables + self.target_variables = TASK_13_PRECIP_OUT_target_variables + self.level_variables = PRESSURE_LEVELS[13] + + nc_dataset = xarray.open_dataset(data_path) + + longitude_offsets = nc_dataset.coords["lon"].data + second_since_epoch = ( + nc_dataset.coords["datetime"].data.astype("datetime64[s]").astype(np.int64) + ) + datetime_feats = datetime_features(second_since_epoch, longitude_offsets) + nc_dataset.update( + { + "year_progress_sin": xarray.Variable( + ("batch", "time"), datetime_feats["year_progress_sin"] + ), + "year_progress_cos": xarray.Variable( + ("batch", "time"), datetime_feats["year_progress_cos"] + ), + "day_progress_sin": xarray.Variable( + ("batch", "time", "lon"), datetime_feats["day_progress_sin"] + ), + "day_progress_cos": xarray.Variable( + ("batch", "time", "lon"), datetime_feats["day_progress_cos"] + ), + } + ) + + inputs, targets = extract_input_target_times( + nc_dataset, input_duration="12h", target_lead_times="6h" + ) + + stddev_data = xarray.open_dataset(stddev_path).sel( + level=list(self.level_variables) + ) + stddev_diffs_data = xarray.open_dataset(stddev_diffs_path).sel( + level=list(self.level_variables) + ) + mean_data = xarray.open_dataset(mean_path).sel(level=list(self.level_variables)) + + missing_variables = set(self.target_variables) - set(self.input_variables) + exist_variables = set(self.target_variables) - missing_variables + targets_stddev = stddev_diffs_data[list(exist_variables)] + target_mean = inputs[list(exist_variables)].isel(time=-1) + if missing_variables: + targets_stddev.update({var: stddev_data[var] for var in missing_variables}) + target_mean.update( + {var: mean_data.variables[var] for var in missing_variables} + ) + + stacked_targets_stddev = dataset_to_stacked(targets_stddev, preserved_dims=()) + stacked_targets_mean = dataset_to_stacked(target_mean) + stacked_targets_mean = stacked_targets_mean.transpose("lat", "lon", ...) + + inputs = inputs[list(self.input_variables)] + forcings = targets[list(self.forcing_variables)] + targets = targets[list(self.target_variables)] + inputs = self.normalize(inputs, stddev_data, mean_data) + forcings = self.normalize(forcings, stddev_data, mean_data) + + self.targets_template = targets + + stacked_inputs = dataset_to_stacked(inputs) + stacked_forcings = dataset_to_stacked(forcings) + stacked_targets = dataset_to_stacked(targets) + stacked_inputs = xarray.concat( + [stacked_inputs, stacked_forcings], dim="channels" + ) + + stacked_inputs = stacked_inputs.transpose("lat", "lon", ...) + stacked_targets = stacked_targets.transpose("lat", "lon", ...) + + lat_dim, lon_dim, batch_dim, feat_dim = stacked_inputs.shape + stacked_inputs = stacked_inputs.data.reshape(lat_dim * lon_dim, batch_dim, -1) + stacked_targets = stacked_targets.data.reshape(lat_dim * lon_dim, batch_dim, -1) + self.stacked_targets_stddev = stacked_targets_stddev.data + self.stacked_targets_mean = stacked_targets_mean.data.reshape( + lat_dim * lon_dim, batch_dim, -1 + ) + + self.input_data = [] + self.target_data = [] + + graph = GraphGridMesh( + mesh_size=mesh_size, + radius_query_fraction_edge_length=radius_query_fraction_edge_length, + mesh2grid_edge_normalization_factor=mesh2grid_edge_normalization_factor, + resolution=resolution, + ) + + graph.grid_node_feat = np.concatenate( + [stacked_inputs, graph.grid_node_feat], axis=-1 + ) + mesh_node_feat = np.zeros([graph.mesh_num_nodes, batch_dim, feat_dim]) + graph.mesh_node_feat = np.concatenate( + [mesh_node_feat, graph.mesh_node_feat], axis=-1 + ) + + self.input_data.append(graph) + self.target_data.append(stacked_targets) + + def __len__(self): + return len(self.input_data) + + def __getitem__(self, idx): + return ( + { + self.input_keys[0]: self.input_data[idx], + }, + { + self.label_keys[0]: self.target_data[idx], + }, + None, + ) + + def normalize(self, inputs_data, stddev_data, mean_data): + for name in list(inputs_data.keys()): + inputs_data[name] = (inputs_data[name] - mean_data[name]) / stddev_data[ + name + ] + return inputs_data + + def denormalize(self, inputs_data): + return inputs_data * self.stacked_targets_stddev + self.stacked_targets_mean +>>>>>>> Stashed changes diff --git a/ppsci/data/dataset/cgcnn_dataset.py b/ppsci/data/dataset/cgcnn_dataset.py index d1d93720fc..8e54e3145a 100644 --- a/ppsci/data/dataset/cgcnn_dataset.py +++ b/ppsci/data/dataset/cgcnn_dataset.py @@ -1,312 +1,312 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import csv -import functools -import json -import os -import random -import warnings -from typing import Tuple - -import numpy as np -import paddle -from paddle import io - -try: - from pymatgen.core.structure import Structure -except ModuleNotFoundError: - pass - - -def collate_pool(dataset_list): - - """ - Collate a list of data and return a batch for predicting crystal properties. - - Args: - dataset_list (list): A list of tuples for each data point containing: - - atom_fea (paddle.Tensor): Shape (n_i, atom_fea_len). - - nbr_fea (paddle.Tensor): Shape (n_i, M, nbr_fea_len). - - nbr_fea_idx (paddle.Tensor): Shape (n_i, M). - - target (paddle.Tensor): Shape (1,). - - cif_id (str or int). - - Returns: - tuple: Contains the following: - - batch_atom_fea (paddle.Tensor): Shape (N, orig_atom_fea_len). Atom features from atom type. - - batch_nbr_fea (paddle.Tensor): Shape (N, M, nbr_fea_len). Bond features of each atom's M neighbors. - - batch_nbr_fea_idx (paddle.Tensor): Shape (N, M). Indices of M neighbors of each atom. - - crystal_atom_idx (list): List of paddle.Tensor of length N0. Mapping from the crystal idx to atom idx. - - target (paddle.Tensor): Shape (N, 1). Target value for prediction. - - batch_cif_ids (list): List of CIF IDs. - - Notes: - - N = sum(n_i); N0 = sum(i) - """ - batch_atom_fea, batch_nbr_fea, batch_nbr_fea_idx = [], [], [] - crystal_atom_idx, batch_target = [], [] - batch_cif_ids = [] - base_idx = 0 - for i, item in enumerate(dataset_list): - input: Tuple[np.ndarray, np.ndarray, np.ndarray] = item[0]["i"] - label = item[1]["l"] - id = item[2]["c"] - atom_fea, nbr_fea, nbr_fea_idx = input - target = label - cif_id = id - n_i = atom_fea.shape[0] # number of atoms for this crystal - batch_atom_fea.append(atom_fea) - batch_nbr_fea.append(nbr_fea) - batch_nbr_fea_idx.append(nbr_fea_idx + base_idx) - new_idx = np.arange(n_i, dtype="int64") + int(base_idx) - crystal_atom_idx.append(new_idx) - batch_target.append(target) - batch_cif_ids.append(cif_id) - base_idx += n_i - # Debugging: print shapes of the tensors to ensure they are consistent - # print("Shapes of batch_atom_fea:", [x.shape for x in batch_atom_fea]) - # print("Shapes of batch_nbr_fea:", [x.shape for x in batch_nbr_fea]) - # print("Shapes of batch_nbr_fea_idx:", [x.shape for x in batch_nbr_fea_idx]) - # Ensure all tensors in the lists have consistent shapes before concatenation - batch_atom_fea = np.concatenate(batch_atom_fea, axis=0) - batch_nbr_fea = np.concatenate(batch_nbr_fea, axis=0) - batch_nbr_fea_idx = np.concatenate(batch_nbr_fea_idx, axis=0) - return ( - { - "i": ( - np.array(batch_atom_fea, dtype="float32"), - np.array(batch_nbr_fea, dtype="float32"), - np.array(batch_nbr_fea_idx), - [np.array(crys_idx) for crys_idx in crystal_atom_idx], - ) - }, - {"l": np.array(np.stack(batch_target, axis=0))}, - {"c": batch_cif_ids}, - ) - - -class GaussianDistance(object): - """ - Expands the distance by Gaussian basis. - - Args: - dmin (float): Minimum interatomic distance. - dmax (float): Maximum interatomic distance. - step (float): Step size for the Gaussian filter. - """ - - def __init__(self, dmin, dmax, step, var=None): - assert dmin < dmax - assert dmax - dmin > step - self.filter = np.arange(dmin, dmax + step, step) - if var is None: - var = step - self.var = var - - def expand(self, distances): - """ - Apply Gaussian distance filter to a numpy distance array. - - Args: - distance (np.array): n-dimensional distance matrix of any shape. - - Returns: - np.array: Expanded distance matrix with the last dimension of length len(self.filter). - """ - - return np.exp( - -((distances[..., np.newaxis] - self.filter) ** 2) / self.var**2 - ) - - -class AtomInitializer(object): - """ - Base class for intializing the vector representation for atoms. - - !!! Use one AtomInitializer per dataset !!! - """ - - def __init__(self, atom_types): - self.atom_types = set(atom_types) - self._embedding = {} - - def get_atom_fea(self, atom_type): - assert atom_type in self.atom_types - return self._embedding[atom_type] - - def load_state_dict(self, state_dict): - self._embedding = state_dict - self.atom_types = set(self._embedding.keys()) - self._decodedict = { - idx: atom_type for atom_type, idx in self._embedding.items() - } - - def state_dict(self): - return self._embedding - - def decode(self, idx): - if not hasattr(self, "_decodedict"): - self._decodedict = { - idx: atom_type for atom_type, idx in self._embedding.items() - } - return self._decodedict[idx] - - -class AtomCustomJSONInitializer(AtomInitializer): - """ - Initialize atom feature vectors using a JSON file, which is a Python dictionary mapping from element number to a list representing the feature vector of the element. - - Args: - elem_embedding_file (str): The path to the .json file. - """ - - def __init__(self, elem_embedding_file): - with open(elem_embedding_file) as f: - elem_embedding = json.load(f) - elem_embedding = {int(key): value for key, value in elem_embedding.items()} - atom_types = set(elem_embedding.keys()) - super(AtomCustomJSONInitializer, self).__init__(atom_types) - for key, value in elem_embedding.items(): - self._embedding[key] = np.array(value, dtype=float) - - -class CIFData(io.Dataset): - """ - The CIFData dataset is a wrapper for a dataset where the crystal structures - are stored in the form of CIF files. The dataset should have the following - directory structure: - - root_dir - ├── id_prop.csv - ├── atom_init.json - ├── id0.cif - ├── id1.cif - ├── ... - - id_prop.csv: a CSV file with two columns. The first column recodes a - unique ID for each crystal, and the second column recodes the value of - target property. - - atom_init.json: a JSON file that stores the initialization vector for each element. - - ID.cif: a CIF file that recodes the crystal structure, where ID is the - unique ID for the crystal. - - Args - root_dir (str): The path to the root directory of the dataset - max_num_nbr (int): The maximum number of neighbors while constructing the crystal graph - radius (float): The cutoff radius for searching neighbors - dmin (float): The minimum distance for constructing GaussianDistance - step (float): The step size for constructing GaussianDistance - random_seed (int): Random seed for shuffling the dataset - - - Returns - atom_fea (paddle.Tensor): Shape (n_i, atom_fea_len) - nbr_fea (paddle.Tensor): Shape (n_i, M, nbr_fea_len) - nbr_fea_idx (paddle.Tensor): Shape (n_i, M) - target (paddle.Tensor): Shape (1, ) - cif_id (str or int) - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.CGCNNDataset( - ... "file_path": "/path/to/CGCNNDataset", - ... "input_keys": "i", - ... "label_keys": "l", - ... "id_keys": "c", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - root_dir: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - id_keys: Tuple[str, ...], - max_num_nbr: int = 12, - radius: int = 8, - dmin: int = 0, - step: float = 0.2, - random_seed: int = 123, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - self.id_keys = id_keys - self.root_dir = root_dir - self.max_num_nbr, self.radius = max_num_nbr, radius - assert os.path.exists(root_dir), "root_dir does not exist!" - id_prop_file = os.path.join(self.root_dir, "id_prop.csv") - assert os.path.exists(id_prop_file), "id_prop.csv does not exist!" - with open(id_prop_file) as f: - reader = csv.reader(f) - self.id_prop_data = [row for row in reader] - random.seed(random_seed) - random.shuffle(self.id_prop_data) - atom_init_file = os.path.join(self.root_dir, "atom_init.json") - assert os.path.exists(atom_init_file), f"{atom_init_file} does not exist!" - self.ari = AtomCustomJSONInitializer(atom_init_file) - self.gdf = GaussianDistance(dmin=dmin, dmax=self.radius, step=step) - self.raw_data = [self.get(i) for i in range(len(self))] - - def __len__(self): - return len(self.id_prop_data) - - @functools.lru_cache(maxsize=None) # Cache loaded structures - def __getitem__(self, idx): - return ( - {self.input_keys[0]: self.raw_data[idx][0]}, - {self.label_keys[0]: self.raw_data[idx][1]}, - {self.id_keys[0]: self.raw_data[idx][2]}, - ) - - def get(self, idx): - cif_id, target = self.id_prop_data[idx] - crystal = Structure.from_file(os.path.join(self.root_dir, cif_id + ".cif")) - atom_fea = np.vstack( - [ - self.ari.get_atom_fea(crystal[i].specie.number) - for i in range(len(crystal)) - ] - ) - atom_fea = paddle.Tensor(atom_fea) - all_nbrs = crystal.get_all_neighbors(self.radius, include_index=True) - all_nbrs = [sorted(nbrs, key=lambda x: x[1]) for nbrs in all_nbrs] - nbr_fea_idx, nbr_fea = [], [] - for nbr in all_nbrs: - if len(nbr) < self.max_num_nbr: - warnings.warn( - f"{cif_id} not find enough neighbors to build graph. " - "If it happens frequently, consider increase " - "radius." - ) - nbr_fea_idx.append( - list(map(lambda x: x[2], nbr)) + [0] * (self.max_num_nbr - len(nbr)) - ) - nbr_fea.append( - list(map(lambda x: x[1], nbr)) - + [self.radius + 1.0] * (self.max_num_nbr - len(nbr)) - ) - else: - nbr_fea_idx.append(list(map(lambda x: x[2], nbr[: self.max_num_nbr]))) - nbr_fea.append(list(map(lambda x: x[1], nbr[: self.max_num_nbr]))) - nbr_fea_idx, nbr_fea = np.array(nbr_fea_idx), np.array(nbr_fea) - nbr_fea = self.gdf.expand(nbr_fea) - atom_fea = np.array(atom_fea) - nbr_fea = np.array(nbr_fea) - nbr_fea_idx = np.array(nbr_fea_idx, dtype="int64") - target = np.array([float(target)], dtype="float32") - return (atom_fea, nbr_fea, nbr_fea_idx), target, cif_id +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import functools +import json +import os +import random +import warnings +from typing import Tuple + +import numpy as np +import paddle +from paddle import io + +try: + from pymatgen.core.structure import Structure +except ModuleNotFoundError: + pass + + +def collate_pool(dataset_list): + + """ + Collate a list of data and return a batch for predicting crystal properties. + + Args: + dataset_list (list): A list of tuples for each data point containing: + - atom_fea (paddle.Tensor): Shape (n_i, atom_fea_len). + - nbr_fea (paddle.Tensor): Shape (n_i, M, nbr_fea_len). + - nbr_fea_idx (paddle.Tensor): Shape (n_i, M). + - target (paddle.Tensor): Shape (1,). + - cif_id (str or int). + + Returns: + tuple: Contains the following: + - batch_atom_fea (paddle.Tensor): Shape (N, orig_atom_fea_len). Atom features from atom type. + - batch_nbr_fea (paddle.Tensor): Shape (N, M, nbr_fea_len). Bond features of each atom's M neighbors. + - batch_nbr_fea_idx (paddle.Tensor): Shape (N, M). Indices of M neighbors of each atom. + - crystal_atom_idx (list): List of paddle.Tensor of length N0. Mapping from the crystal idx to atom idx. + - target (paddle.Tensor): Shape (N, 1). Target value for prediction. + - batch_cif_ids (list): List of CIF IDs. + + Notes: + - N = sum(n_i); N0 = sum(i) + """ + batch_atom_fea, batch_nbr_fea, batch_nbr_fea_idx = [], [], [] + crystal_atom_idx, batch_target = [], [] + batch_cif_ids = [] + base_idx = 0 + for i, item in enumerate(dataset_list): + input: Tuple[np.ndarray, np.ndarray, np.ndarray] = item[0]["i"] + label = item[1]["l"] + id = item[2]["c"] + atom_fea, nbr_fea, nbr_fea_idx = input + target = label + cif_id = id + n_i = atom_fea.shape[0] # number of atoms for this crystal + batch_atom_fea.append(atom_fea) + batch_nbr_fea.append(nbr_fea) + batch_nbr_fea_idx.append(nbr_fea_idx + base_idx) + new_idx = np.arange(n_i, dtype="int64") + int(base_idx) + crystal_atom_idx.append(new_idx) + batch_target.append(target) + batch_cif_ids.append(cif_id) + base_idx += n_i + # Debugging: print shapes of the tensors to ensure they are consistent + # print("Shapes of batch_atom_fea:", [x.shape for x in batch_atom_fea]) + # print("Shapes of batch_nbr_fea:", [x.shape for x in batch_nbr_fea]) + # print("Shapes of batch_nbr_fea_idx:", [x.shape for x in batch_nbr_fea_idx]) + # Ensure all tensors in the lists have consistent shapes before concatenation + batch_atom_fea = np.concatenate(batch_atom_fea, axis=0) + batch_nbr_fea = np.concatenate(batch_nbr_fea, axis=0) + batch_nbr_fea_idx = np.concatenate(batch_nbr_fea_idx, axis=0) + return ( + { + "i": ( + np.array(batch_atom_fea, dtype="float32"), + np.array(batch_nbr_fea, dtype="float32"), + np.array(batch_nbr_fea_idx), + [np.array(crys_idx) for crys_idx in crystal_atom_idx], + ) + }, + {"l": np.array(np.stack(batch_target, axis=0))}, + {"c": batch_cif_ids}, + ) + + +class GaussianDistance(object): + """ + Expands the distance by Gaussian basis. + + Args: + dmin (float): Minimum interatomic distance. + dmax (float): Maximum interatomic distance. + step (float): Step size for the Gaussian filter. + """ + + def __init__(self, dmin, dmax, step, var=None): + assert dmin < dmax + assert dmax - dmin > step + self.filter = np.arange(dmin, dmax + step, step) + if var is None: + var = step + self.var = var + + def expand(self, distances): + """ + Apply Gaussian distance filter to a numpy distance array. + + Args: + distance (np.array): n-dimensional distance matrix of any shape. + + Returns: + np.array: Expanded distance matrix with the last dimension of length len(self.filter). + """ + + return np.exp( + -((distances[..., np.newaxis] - self.filter) ** 2) / self.var**2 + ) + + +class AtomInitializer(object): + """ + Base class for intializing the vector representation for atoms. + + !!! Use one AtomInitializer per dataset !!! + """ + + def __init__(self, atom_types): + self.atom_types = set(atom_types) + self._embedding = {} + + def get_atom_fea(self, atom_type): + assert atom_type in self.atom_types + return self._embedding[atom_type] + + def load_state_dict(self, state_dict): + self._embedding = state_dict + self.atom_types = set(self._embedding.keys()) + self._decodedict = { + idx: atom_type for atom_type, idx in self._embedding.items() + } + + def state_dict(self): + return self._embedding + + def decode(self, idx): + if not hasattr(self, "_decodedict"): + self._decodedict = { + idx: atom_type for atom_type, idx in self._embedding.items() + } + return self._decodedict[idx] + + +class AtomCustomJSONInitializer(AtomInitializer): + """ + Initialize atom feature vectors using a JSON file, which is a Python dictionary mapping from element number to a list representing the feature vector of the element. + + Args: + elem_embedding_file (str): The path to the .json file. + """ + + def __init__(self, elem_embedding_file): + with open(elem_embedding_file) as f: + elem_embedding = json.load(f) + elem_embedding = {int(key): value for key, value in elem_embedding.items()} + atom_types = set(elem_embedding.keys()) + super(AtomCustomJSONInitializer, self).__init__(atom_types) + for key, value in elem_embedding.items(): + self._embedding[key] = np.array(value, dtype=float) + + +class CIFData(io.Dataset): + """ + The CIFData dataset is a wrapper for a dataset where the crystal structures + are stored in the form of CIF files. The dataset should have the following + directory structure: + + root_dir + ├── id_prop.csv + ├── atom_init.json + ├── id0.cif + ├── id1.cif + ├── ... + + id_prop.csv: a CSV file with two columns. The first column recodes a + unique ID for each crystal, and the second column recodes the value of + target property. + + atom_init.json: a JSON file that stores the initialization vector for each element. + + ID.cif: a CIF file that recodes the crystal structure, where ID is the + unique ID for the crystal. + + Args + root_dir (str): The path to the root directory of the dataset + max_num_nbr (int): The maximum number of neighbors while constructing the crystal graph + radius (float): The cutoff radius for searching neighbors + dmin (float): The minimum distance for constructing GaussianDistance + step (float): The step size for constructing GaussianDistance + random_seed (int): Random seed for shuffling the dataset + + + Returns + atom_fea (paddle.Tensor): Shape (n_i, atom_fea_len) + nbr_fea (paddle.Tensor): Shape (n_i, M, nbr_fea_len) + nbr_fea_idx (paddle.Tensor): Shape (n_i, M) + target (paddle.Tensor): Shape (1, ) + cif_id (str or int) + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.CGCNNDataset( + ... "file_path": "/path/to/CGCNNDataset", + ... "input_keys": "i", + ... "label_keys": "l", + ... "id_keys": "c", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + root_dir: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + id_keys: Tuple[str, ...], + max_num_nbr: int = 12, + radius: int = 8, + dmin: int = 0, + step: float = 0.2, + random_seed: int = 123, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + self.id_keys = id_keys + self.root_dir = root_dir + self.max_num_nbr, self.radius = max_num_nbr, radius + assert os.path.exists(root_dir), "root_dir does not exist!" + id_prop_file = os.path.join(self.root_dir, "id_prop.csv") + assert os.path.exists(id_prop_file), "id_prop.csv does not exist!" + with open(id_prop_file) as f: + reader = csv.reader(f) + self.id_prop_data = [row for row in reader] + random.seed(random_seed) + random.shuffle(self.id_prop_data) + atom_init_file = os.path.join(self.root_dir, "atom_init.json") + assert os.path.exists(atom_init_file), f"{atom_init_file} does not exist!" + self.ari = AtomCustomJSONInitializer(atom_init_file) + self.gdf = GaussianDistance(dmin=dmin, dmax=self.radius, step=step) + self.raw_data = [self.get(i) for i in range(len(self))] + + def __len__(self): + return len(self.id_prop_data) + + @functools.lru_cache(maxsize=None) # Cache loaded structures + def __getitem__(self, idx): + return ( + {self.input_keys[0]: self.raw_data[idx][0]}, + {self.label_keys[0]: self.raw_data[idx][1]}, + {self.id_keys[0]: self.raw_data[idx][2]}, + ) + + def get(self, idx): + cif_id, target = self.id_prop_data[idx] + crystal = Structure.from_file(os.path.join(self.root_dir, cif_id + ".cif")) + atom_fea = np.vstack( + [ + self.ari.get_atom_fea(crystal[i].specie.number) + for i in range(len(crystal)) + ] + ) + atom_fea = paddle.Tensor(atom_fea) + all_nbrs = crystal.get_all_neighbors(self.radius, include_index=True) + all_nbrs = [sorted(nbrs, key=lambda x: x[1]) for nbrs in all_nbrs] + nbr_fea_idx, nbr_fea = [], [] + for nbr in all_nbrs: + if len(nbr) < self.max_num_nbr: + warnings.warn( + f"{cif_id} not find enough neighbors to build graph. " + "If it happens frequently, consider increase " + "radius." + ) + nbr_fea_idx.append( + list(map(lambda x: x[2], nbr)) + [0] * (self.max_num_nbr - len(nbr)) + ) + nbr_fea.append( + list(map(lambda x: x[1], nbr)) + + [self.radius + 1.0] * (self.max_num_nbr - len(nbr)) + ) + else: + nbr_fea_idx.append(list(map(lambda x: x[2], nbr[: self.max_num_nbr]))) + nbr_fea.append(list(map(lambda x: x[1], nbr[: self.max_num_nbr]))) + nbr_fea_idx, nbr_fea = np.array(nbr_fea_idx), np.array(nbr_fea) + nbr_fea = self.gdf.expand(nbr_fea) + atom_fea = np.array(atom_fea) + nbr_fea = np.array(nbr_fea) + nbr_fea_idx = np.array(nbr_fea_idx, dtype="int64") + target = np.array([float(target)], dtype="float32") + return (atom_fea, nbr_fea, nbr_fea_idx), target, cif_id diff --git a/ppsci/data/dataset/csv_dataset.py b/ppsci/data/dataset/csv_dataset.py index c14bb107da..02b8405115 100644 --- a/ppsci/data/dataset/csv_dataset.py +++ b/ppsci/data/dataset/csv_dataset.py @@ -1,287 +1,287 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -from paddle import io -from paddle import vision - -from ppsci.utils import misc -from ppsci.utils import reader - - -class CSVDataset(io.Dataset): - """Dataset class for .csv file. - - Args: - file_path (str): CSV file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...]): List of label keys. - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - i.e. {inner_key: outer_key}. Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.CSVDataset( - ... "/path/to/file.csv", - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_csv_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = ( - {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} - if weight_dict is not None - else {} - ) - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - weight_item = {key: value[idx] for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - return self._len - - -class IterableCSVDataset(io.IterableDataset): - """IterableCSVDataset for full-data loading. - - Args: - file_path (str): CSV file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...]): List of label keys. - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.IterableCSVDataset( - ... "/path/to/file.csv" - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_csv_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = ( - {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} - if weight_dict is not None - else {} - ) - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} - self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} - self.weight = { - key: paddle.to_tensor(value) for key, value in self.weight.items() - } - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - @property - def num_samples(self): - """Number of samples within current dataset.""" - return self._len - - def __iter__(self): - if callable(self.transforms): - input_, label_, weight_ = self.transforms( - self.input, self.label, self.weight - ) - yield input_, label_, weight_ - else: - yield self.input, self.label, self.weight - - def __len__(self): - return 1 +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +from paddle import io +from paddle import vision + +from ppsci.utils import misc +from ppsci.utils import reader + + +class CSVDataset(io.Dataset): + """Dataset class for .csv file. + + Args: + file_path (str): CSV file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...]): List of label keys. + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + i.e. {inner_key: outer_key}. Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.CSVDataset( + ... "/path/to/file.csv", + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_csv_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = ( + {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} + if weight_dict is not None + else {} + ) + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + weight_item = {key: value[idx] for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + return self._len + + +class IterableCSVDataset(io.IterableDataset): + """IterableCSVDataset for full-data loading. + + Args: + file_path (str): CSV file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...]): List of label keys. + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.IterableCSVDataset( + ... "/path/to/file.csv" + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_csv_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = ( + {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} + if weight_dict is not None + else {} + ) + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} + self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} + self.weight = { + key: paddle.to_tensor(value) for key, value in self.weight.items() + } + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + @property + def num_samples(self): + """Number of samples within current dataset.""" + return self._len + + def __iter__(self): + if callable(self.transforms): + input_, label_, weight_ = self.transforms( + self.input, self.label, self.weight + ) + yield input_, label_, weight_ + else: + yield self.input, self.label, self.weight + + def __len__(self): + return 1 diff --git a/ppsci/data/dataset/cylinder_dataset.py b/ppsci/data/dataset/cylinder_dataset.py index 3a49b7d436..a6be86ba9b 100644 --- a/ppsci/data/dataset/cylinder_dataset.py +++ b/ppsci/data/dataset/cylinder_dataset.py @@ -1,215 +1,215 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -from os import path as osp -from typing import Tuple - -import numpy as np -import paddle -from paddle import io - -from ppsci.data.dataset import airfoil_dataset - -try: - import pgl -except ModuleNotFoundError: - pass - -SU2_SHAPE_IDS = { - "line": 3, - "triangle": 5, - "quad": 9, -} - - -class MeshCylinderDataset(io.Dataset): - """Dataset for `MeshCylinder`. - - Args: - input_keys (Tuple[str, ...]): Name of input data. - label_keys (Tuple[str, ...]): Name of label data. - data_dir (str): Directory of MeshCylinder data. - mesh_graph_path (str): Path of mesh graph. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.MeshAirfoilDataset( - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... "data_dir": "/path/to/MeshAirfoilDataset", - ... "mesh_graph_path": "/path/to/file.su2", - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - use_pgl: bool = True - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - mesh_graph_path: str, - ): - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.file_list = os.listdir(self.data_dir) - self.len = len(self.file_list) - self.mesh_graph = airfoil_dataset._get_mesh_graph(mesh_graph_path) - - self.normalization_factors = np.array( - [[978.6001, 48.9258, 24.8404], [-692.3159, -6.9950, -24.8572]], - dtype=paddle.get_default_dtype(), - ) - - self.nodes = self.mesh_graph[0] - self.meshnodes = self.mesh_graph[0] - self.edges = self.mesh_graph[1] - self.elems_list = self.mesh_graph[2] - self.marker_dict = self.mesh_graph[3] - self.bounder = [] - self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) - for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): - for elem in marker_elems: - self.node_markers[elem[0]] = i - self.node_markers[elem[1]] = i - - self.raw_graphs = [self.get(i) for i in range(len(self))] - - def __len__(self): - return self.len - - def __getitem__(self, idx): - return ( - { - self.input_keys[0]: self.raw_graphs[idx], - }, - { - self.label_keys[0]: self.raw_graphs[idx], - }, - None, - ) - - def get(self, idx): - with open(osp.join(self.data_dir, self.file_list[idx]), "r") as f: - field = [] - pos = [] - for line in f.read().splitlines()[1:]: - lines_pos = line.split(",")[1:3] - lines_field = line.split(",")[3:] - numbers_float = list(eval(i) for i in lines_pos) - array = np.array(numbers_float, paddle.get_default_dtype()) - pos.append(array) - numbers_float = list(eval(i) for i in lines_field) - array = np.array(numbers_float, paddle.get_default_dtype()) - field.append(array) - - field = np.stack(field, axis=0) - pos = np.stack(pos, axis=0) - indexlist = [] - for i in range(self.meshnodes.shape[0]): - b = self.meshnodes[i : (i + 1)] - b = np.squeeze(b) - index = np.nonzero( - np.sum((pos == b), axis=1, dtype=paddle.get_default_dtype()) - == pos.shape[1] - ) - indexlist.append(index) - indexlist = np.stack(indexlist, axis=0) - indexlist = np.squeeze(indexlist) - fields = field[indexlist] - velocity = self._get_params_from_name(self.file_list[idx]) - - norm_aoa = velocity / 40 - # add physics parameters to graph - nodes = np.concatenate( - [ - self.nodes, - np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], - self.node_markers, - ], - axis=-1, - ).astype(paddle.get_default_dtype()) - - data = pgl.Graph( - num_nodes=nodes.shape[0], - edges=self.edges, - ) - data.x = nodes - data.y = fields - data.pos = self.nodes - data.edge_index = self.edges - data.velocity = velocity - - sender = data.x[data.edge_index[0]] - receiver = data.x[data.edge_index[1]] - relation_pos = sender[:, 0:2] - receiver[:, 0:2] - post = np.linalg.norm(relation_pos, ord=2, axis=1, keepdims=True).astype( - paddle.get_default_dtype() - ) - data.edge_attr = post - std_epsilon = [1e-8] - a = np.mean(data.edge_attr, axis=0) - b = data.edge_attr.std(axis=0) - b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) - data.edge_attr = (data.edge_attr - a) / b - a = np.mean(data.y, axis=0) - b = data.y.std(axis=0) - b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) - data.y = (data.y - a) / b - data.norm_max = a - data.norm_min = b - - # find the face of the boundary,our cylinder dataset come from fluent solver - with open(osp.join(osp.dirname(self.data_dir), "bounder"), "r") as f: - field = [] - pos = [] - for line in f.read().splitlines()[1:]: - lines_pos = line.split(",")[1:3] - lines_field = line.split(",")[3:] - numbers_float = list(eval(i) for i in lines_pos) - array = np.array(numbers_float, paddle.get_default_dtype()) - pos.append(array) - numbers_float = list(eval(i) for i in lines_field) - array = np.array(numbers_float, paddle.get_default_dtype()) - field.append(array) - - field = np.stack(field, axis=0) - pos = np.stack(pos, axis=0) - - indexlist = [] - for i in range(pos.shape[0]): - b = pos[i : (i + 1)] - b = np.squeeze(b) - index = np.nonzero( - np.sum((self.nodes == b), axis=1, dtype=paddle.get_default_dtype()) - == self.nodes.shape[1] - ) - indexlist.append(index) - - indexlist = np.stack(indexlist, axis=0) - indexlist = np.squeeze(indexlist) - self.bounder = indexlist - return data - - def _get_params_from_name(self, filename): - s = filename.rsplit(".", 1)[0] - reynolds = np.array(s[13:])[np.newaxis].astype(paddle.get_default_dtype()) - return reynolds +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from os import path as osp +from typing import Tuple + +import numpy as np +import paddle +from paddle import io + +from ppsci.data.dataset import airfoil_dataset + +try: + import pgl +except ModuleNotFoundError: + pass + +SU2_SHAPE_IDS = { + "line": 3, + "triangle": 5, + "quad": 9, +} + + +class MeshCylinderDataset(io.Dataset): + """Dataset for `MeshCylinder`. + + Args: + input_keys (Tuple[str, ...]): Name of input data. + label_keys (Tuple[str, ...]): Name of label data. + data_dir (str): Directory of MeshCylinder data. + mesh_graph_path (str): Path of mesh graph. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.MeshAirfoilDataset( + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "data_dir": "/path/to/MeshAirfoilDataset", + ... "mesh_graph_path": "/path/to/file.su2", + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + use_pgl: bool = True + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + mesh_graph_path: str, + ): + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.file_list = os.listdir(self.data_dir) + self.len = len(self.file_list) + self.mesh_graph = airfoil_dataset._get_mesh_graph(mesh_graph_path) + + self.normalization_factors = np.array( + [[978.6001, 48.9258, 24.8404], [-692.3159, -6.9950, -24.8572]], + dtype=paddle.get_default_dtype(), + ) + + self.nodes = self.mesh_graph[0] + self.meshnodes = self.mesh_graph[0] + self.edges = self.mesh_graph[1] + self.elems_list = self.mesh_graph[2] + self.marker_dict = self.mesh_graph[3] + self.bounder = [] + self.node_markers = np.full([self.nodes.shape[0], 1], fill_value=-1) + for i, (marker_tag, marker_elems) in enumerate(self.marker_dict.items()): + for elem in marker_elems: + self.node_markers[elem[0]] = i + self.node_markers[elem[1]] = i + + self.raw_graphs = [self.get(i) for i in range(len(self))] + + def __len__(self): + return self.len + + def __getitem__(self, idx): + return ( + { + self.input_keys[0]: self.raw_graphs[idx], + }, + { + self.label_keys[0]: self.raw_graphs[idx], + }, + None, + ) + + def get(self, idx): + with open(osp.join(self.data_dir, self.file_list[idx]), "r") as f: + field = [] + pos = [] + for line in f.read().splitlines()[1:]: + lines_pos = line.split(",")[1:3] + lines_field = line.split(",")[3:] + numbers_float = list(eval(i) for i in lines_pos) + array = np.array(numbers_float, paddle.get_default_dtype()) + pos.append(array) + numbers_float = list(eval(i) for i in lines_field) + array = np.array(numbers_float, paddle.get_default_dtype()) + field.append(array) + + field = np.stack(field, axis=0) + pos = np.stack(pos, axis=0) + indexlist = [] + for i in range(self.meshnodes.shape[0]): + b = self.meshnodes[i : (i + 1)] + b = np.squeeze(b) + index = np.nonzero( + np.sum((pos == b), axis=1, dtype=paddle.get_default_dtype()) + == pos.shape[1] + ) + indexlist.append(index) + indexlist = np.stack(indexlist, axis=0) + indexlist = np.squeeze(indexlist) + fields = field[indexlist] + velocity = self._get_params_from_name(self.file_list[idx]) + + norm_aoa = velocity / 40 + # add physics parameters to graph + nodes = np.concatenate( + [ + self.nodes, + np.repeat(a=norm_aoa, repeats=self.nodes.shape[0])[:, np.newaxis], + self.node_markers, + ], + axis=-1, + ).astype(paddle.get_default_dtype()) + + data = pgl.Graph( + num_nodes=nodes.shape[0], + edges=self.edges, + ) + data.x = nodes + data.y = fields + data.pos = self.nodes + data.edge_index = self.edges + data.velocity = velocity + + sender = data.x[data.edge_index[0]] + receiver = data.x[data.edge_index[1]] + relation_pos = sender[:, 0:2] - receiver[:, 0:2] + post = np.linalg.norm(relation_pos, ord=2, axis=1, keepdims=True).astype( + paddle.get_default_dtype() + ) + data.edge_attr = post + std_epsilon = [1e-8] + a = np.mean(data.edge_attr, axis=0) + b = data.edge_attr.std(axis=0) + b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) + data.edge_attr = (data.edge_attr - a) / b + a = np.mean(data.y, axis=0) + b = data.y.std(axis=0) + b = np.maximum(b, std_epsilon).astype(paddle.get_default_dtype()) + data.y = (data.y - a) / b + data.norm_max = a + data.norm_min = b + + # find the face of the boundary,our cylinder dataset come from fluent solver + with open(osp.join(osp.dirname(self.data_dir), "bounder"), "r") as f: + field = [] + pos = [] + for line in f.read().splitlines()[1:]: + lines_pos = line.split(",")[1:3] + lines_field = line.split(",")[3:] + numbers_float = list(eval(i) for i in lines_pos) + array = np.array(numbers_float, paddle.get_default_dtype()) + pos.append(array) + numbers_float = list(eval(i) for i in lines_field) + array = np.array(numbers_float, paddle.get_default_dtype()) + field.append(array) + + field = np.stack(field, axis=0) + pos = np.stack(pos, axis=0) + + indexlist = [] + for i in range(pos.shape[0]): + b = pos[i : (i + 1)] + b = np.squeeze(b) + index = np.nonzero( + np.sum((self.nodes == b), axis=1, dtype=paddle.get_default_dtype()) + == self.nodes.shape[1] + ) + indexlist.append(index) + + indexlist = np.stack(indexlist, axis=0) + indexlist = np.squeeze(indexlist) + self.bounder = indexlist + return data + + def _get_params_from_name(self, filename): + s = filename.rsplit(".", 1)[0] + reynolds = np.array(s[13:])[np.newaxis].astype(paddle.get_default_dtype()) + return reynolds diff --git a/ppsci/data/dataset/darcyflow_dataset.py b/ppsci/data/dataset/darcyflow_dataset.py index 3e748eb785..637b30ccc0 100644 --- a/ppsci/data/dataset/darcyflow_dataset.py +++ b/ppsci/data/dataset/darcyflow_dataset.py @@ -1,296 +1,296 @@ -from pathlib import Path -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle -from paddle import io - - -# normalization, pointwise gaussian -class UnitGaussianNormalizer: - def __init__(self, x, eps=1e-7, reduce_dim=[0], verbose=True): - super().__init__() - n_samples, *shape = x.shape - self.sample_shape = shape - self.verbose = verbose - self.reduce_dim = reduce_dim - - # x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T - self.mean = paddle.mean(x, reduce_dim, keepdim=True).squeeze(0) - self.std = paddle.std(x, reduce_dim, keepdim=True).squeeze(0) - self.eps = eps - - if verbose: - print( - f"UnitGaussianNormalizer init on {n_samples}, reducing over {reduce_dim}, samples of shape {shape}." - ) - print(f" Mean and std of shape {self.mean.shape}, eps={eps}") - - def encode(self, x): - - x -= self.mean - x /= self.std + self.eps - return x - - def decode(self, x, sample_idx=None): - if sample_idx is None: - std = self.std + self.eps # n - mean = self.mean - else: - if len(self.mean.shape) == len(sample_idx[0].shape): - std = self.std[sample_idx] + self.eps # batch*n - mean = self.mean[sample_idx] - if len(self.mean.shape) > len(sample_idx[0].shape): - std = self.std[:, sample_idx] + self.eps # T*batch*n - mean = self.mean[:, sample_idx] - - # x is in shape of batch*n or T*batch*n - x *= std - x += mean - - return x - - -def get_grid_positional_encoding( - input_tensor, grid_boundaries=[[0, 1], [0, 1]], channel_dim=1 -): - """Appends grid positional encoding to an input tensor, concatenating as additional dimensions along the channels. - - Args: - input_tensor (paddle.Tensor): The input tensor. - grid_boundaries (list, optional): The boundaries of the grid. Defaults to [[0, 1], [0, 1]]. - channel_dim (int, optional): The location of unsqueeze. Defaults to 1. - """ - - shape = list(input_tensor.shape) - if len(shape) == 2: - height, width = shape - else: - _, height, width = shape - - xt = paddle.linspace(grid_boundaries[0][0], grid_boundaries[0][1], height + 1)[:-1] - yt = paddle.linspace(grid_boundaries[1][0], grid_boundaries[1][1], width + 1)[:-1] - - grid_x, grid_y = paddle.meshgrid(xt, yt, indexing="ij") - - if len(shape) == 2: - grid_x = grid_x.unsqueeze(channel_dim) - grid_y = grid_y.unsqueeze(channel_dim) - else: - grid_x = grid_x.unsqueeze(0).unsqueeze(channel_dim) - grid_y = grid_y.unsqueeze(0).unsqueeze(channel_dim) - - return grid_x, grid_y - - -def regular_grid(spatial_dims, grid_boundaries=[[0, 1], [0, 1]]): - """ - Appends grid positional encoding to an input tensor, concatenating as additional dimensions along the channels - """ - height, width = spatial_dims - - xt = paddle.linspace(grid_boundaries[0][0], grid_boundaries[0][1], height + 1)[:-1] - yt = paddle.linspace(grid_boundaries[1][0], grid_boundaries[1][1], width + 1)[:-1] - - grid_x, grid_y = paddle.meshgrid(xt, yt, indexing="ij") - - grid_x = grid_x.tile((1, 1)) - grid_y = grid_y.tile((1, 1)) - - return grid_x, grid_y - - -class PositionalEmbedding2D: - def __init__(self, grid_boundaries=[[0, 1], [0, 1]]): - self.grid_boundaries = grid_boundaries - self._grid = None - self._res = None - - def grid(self, spatial_dims, dtype): - """Grid generates 2D grid needed for pos encoding - and caches the grid associated with MRU resolution - - Args: - spatial_dims (tuple[int,...]): Sizes of spatial resolution. - dtype (str): Dtype to encode data. - - Returns: - paddle.Tensor: Output grids to concatenate - """ - # handle case of multiple train resolutions - if self._grid is None or self._res != spatial_dims: - grid_x, grid_y = regular_grid( - spatial_dims, grid_boundaries=self.grid_boundaries - ) - - grid_x = grid_x.astype(dtype).unsqueeze(0).unsqueeze(0) - grid_y = grid_y.astype(dtype).unsqueeze(0).unsqueeze(0) - self._grid = grid_x, grid_y - self._res = spatial_dims - - return self._grid - - def __call__(self, data): - if data.ndim == 3: - data = data.unsqueeze(0) - x, y = self.grid(data.shape[-2:], data.dtype) - out = paddle.concat( - (data, x.expand([1, -1, -1, -1]), y.expand([1, -1, -1, -1])), axis=1 - ) - return out.squeeze(0) - - -class DarcyFlowDataset(io.Dataset): - """Loads a small Darcy-Flow dataset - - Training contains 1000 samples in resolution 16x16. - Testing contains 100 samples at resolution 16x16 and - 50 samples at resolution 32x32. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - data_dir (str): The directory to load data from. - weight_dict (Optional[Dict[str, float]], optional): Define the weight of each constraint variable. Defaults to None. - test_resolutions (List[int,...]): The resolutions to test dataset. Default is [16, 32]. - grid_boundaries (List[int,...]): The boundaries of the grid. Default is [[0,1],[0,1]]. - positional_encoding (bool): Whether to use positional encoding. Default is True - encode_input (bool): Whether to encode the input. Default is False - encode_output (bool): Whether to encode the output. Default is True - encoding (str): The type of encoding. Default is 'channel-wise'. - channel_dim (int): The location of unsqueeze. Default is 1. - where to put the channel dimension. Defaults size is batch, channel, height, width - data_split (str): Wether to use training or test dataset. Default is 'train'. - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - weight_dict: Optional[Dict[str, float]] = None, - test_resolutions: Tuple[int, ...] = [32], - train_resolution: int = 32, - grid_boundaries: Tuple[Tuple[int, ...], ...] = [[0, 1], [0, 1]], - positional_encoding: bool = True, - encode_input: bool = False, - encode_output: bool = True, - encoding: str = "channel-wise", - channel_dim: int = 1, - data_split: str = "train", - ): - super().__init__() - for res in test_resolutions: - if res not in [16, 32]: - raise ValueError( - f"Only 32 and 64 are supported for test resolution, but got {test_resolutions}" - ) - - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.test_resolutions = test_resolutions - self.train_resolution = train_resolution - self.grid_boundaries = grid_boundaries - self.positional_encoding = positional_encoding - self.encode_input = encode_input - self.encode_output = encode_output - self.encoding = encoding - self.channel_dim = channel_dim - self.data_split = data_split - - # train path - path_train = ( - Path(self.data_dir) - .joinpath(f"darcy_train_{self.train_resolution}.npy") - .as_posix() - ) - self.x_train, self.y_train = self.read_data(path_train) - # test path - path_test_1 = ( - Path(self.data_dir) - .joinpath(f"darcy_test_{self.test_resolutions[0]}.npy") - .as_posix() - ) - self.x_test_1, self.y_test_1 = self.read_data(path_test_1) - path_test_2 = ( - Path(self.data_dir) - .joinpath(f"darcy_test_{self.test_resolutions[1]}.npy") - .as_posix() - ) - self.x_test_2, self.y_test_2 = self.read_data(path_test_2) - - # input encoder - if self.encode_input: - self.input_encoder = self.encode_data(self.x_train) - self.x_train = self.input_encoder.encode(self.x_train) - self.x_test_1 = self.input_encoder.encode(self.x_test_1) - self.x_test_2 = self.input_encoder.encode(self.x_test_2) - else: - self.input_encoder = None - # output encoder - if self.encode_output: - self.output_encoder = self.encode_data(self.y_train) - self.y_train = self.output_encoder.encode(self.y_train) - else: - self.output_encoder = None - - if positional_encoding: - self.transform_x = PositionalEmbedding2D(grid_boundaries) - - def read_data(self, path): - # load with numpy - data = np.load(path, allow_pickle=True).item() - x = ( - paddle.to_tensor(data["x"]) - .unsqueeze(self.channel_dim) - .astype("float32") - .clone() - ) - y = paddle.to_tensor(data["y"]).unsqueeze(self.channel_dim).clone() - del data - return x, y - - def encode_data(self, data): - if self.encoding == "channel-wise": - reduce_dims = list(range(data.ndim)) - elif self.encoding == "pixel-wise": - reduce_dims = [0] - input_encoder = UnitGaussianNormalizer(data, reduce_dim=reduce_dims) - return input_encoder - - def __len__(self): - if self.data_split == "train": - return self.x_train.shape[0] - elif self.data_split == "test_16x16": - return self.x_test_1.shape[0] - else: - return self.x_test_2.shape[0] - - def __getitem__(self, index): - if self.data_split == "train": - x = self.x_train[index] - y = self.y_train[index] - - elif self.data_split == "test_16x16": - x = self.x_test_1[index] - y = self.y_test_1[index] - else: - x = self.x_test_2[index] - y = self.y_test_2[index] - - if self.transform_x is not None: - x = self.transform_x(x) - - input_item = {self.input_keys[0]: x} - label_item = {self.label_keys[0]: y} - weight_item = self.weight_dict - - return input_item, label_item, weight_item +from pathlib import Path +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +from paddle import io + + +# normalization, pointwise gaussian +class UnitGaussianNormalizer: + def __init__(self, x, eps=1e-7, reduce_dim=[0], verbose=True): + super().__init__() + n_samples, *shape = x.shape + self.sample_shape = shape + self.verbose = verbose + self.reduce_dim = reduce_dim + + # x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T + self.mean = paddle.mean(x, reduce_dim, keepdim=True).squeeze(0) + self.std = paddle.std(x, reduce_dim, keepdim=True).squeeze(0) + self.eps = eps + + if verbose: + print( + f"UnitGaussianNormalizer init on {n_samples}, reducing over {reduce_dim}, samples of shape {shape}." + ) + print(f" Mean and std of shape {self.mean.shape}, eps={eps}") + + def encode(self, x): + + x -= self.mean + x /= self.std + self.eps + return x + + def decode(self, x, sample_idx=None): + if sample_idx is None: + std = self.std + self.eps # n + mean = self.mean + else: + if len(self.mean.shape) == len(sample_idx[0].shape): + std = self.std[sample_idx] + self.eps # batch*n + mean = self.mean[sample_idx] + if len(self.mean.shape) > len(sample_idx[0].shape): + std = self.std[:, sample_idx] + self.eps # T*batch*n + mean = self.mean[:, sample_idx] + + # x is in shape of batch*n or T*batch*n + x *= std + x += mean + + return x + + +def get_grid_positional_encoding( + input_tensor, grid_boundaries=[[0, 1], [0, 1]], channel_dim=1 +): + """Appends grid positional encoding to an input tensor, concatenating as additional dimensions along the channels. + + Args: + input_tensor (paddle.Tensor): The input tensor. + grid_boundaries (list, optional): The boundaries of the grid. Defaults to [[0, 1], [0, 1]]. + channel_dim (int, optional): The location of unsqueeze. Defaults to 1. + """ + + shape = list(input_tensor.shape) + if len(shape) == 2: + height, width = shape + else: + _, height, width = shape + + xt = paddle.linspace(grid_boundaries[0][0], grid_boundaries[0][1], height + 1)[:-1] + yt = paddle.linspace(grid_boundaries[1][0], grid_boundaries[1][1], width + 1)[:-1] + + grid_x, grid_y = paddle.meshgrid(xt, yt, indexing="ij") + + if len(shape) == 2: + grid_x = grid_x.unsqueeze(channel_dim) + grid_y = grid_y.unsqueeze(channel_dim) + else: + grid_x = grid_x.unsqueeze(0).unsqueeze(channel_dim) + grid_y = grid_y.unsqueeze(0).unsqueeze(channel_dim) + + return grid_x, grid_y + + +def regular_grid(spatial_dims, grid_boundaries=[[0, 1], [0, 1]]): + """ + Appends grid positional encoding to an input tensor, concatenating as additional dimensions along the channels + """ + height, width = spatial_dims + + xt = paddle.linspace(grid_boundaries[0][0], grid_boundaries[0][1], height + 1)[:-1] + yt = paddle.linspace(grid_boundaries[1][0], grid_boundaries[1][1], width + 1)[:-1] + + grid_x, grid_y = paddle.meshgrid(xt, yt, indexing="ij") + + grid_x = grid_x.tile((1, 1)) + grid_y = grid_y.tile((1, 1)) + + return grid_x, grid_y + + +class PositionalEmbedding2D: + def __init__(self, grid_boundaries=[[0, 1], [0, 1]]): + self.grid_boundaries = grid_boundaries + self._grid = None + self._res = None + + def grid(self, spatial_dims, dtype): + """Grid generates 2D grid needed for pos encoding + and caches the grid associated with MRU resolution + + Args: + spatial_dims (tuple[int,...]): Sizes of spatial resolution. + dtype (str): Dtype to encode data. + + Returns: + paddle.Tensor: Output grids to concatenate + """ + # handle case of multiple train resolutions + if self._grid is None or self._res != spatial_dims: + grid_x, grid_y = regular_grid( + spatial_dims, grid_boundaries=self.grid_boundaries + ) + + grid_x = grid_x.astype(dtype).unsqueeze(0).unsqueeze(0) + grid_y = grid_y.astype(dtype).unsqueeze(0).unsqueeze(0) + self._grid = grid_x, grid_y + self._res = spatial_dims + + return self._grid + + def __call__(self, data): + if data.ndim == 3: + data = data.unsqueeze(0) + x, y = self.grid(data.shape[-2:], data.dtype) + out = paddle.concat( + (data, x.expand([1, -1, -1, -1]), y.expand([1, -1, -1, -1])), axis=1 + ) + return out.squeeze(0) + + +class DarcyFlowDataset(io.Dataset): + """Loads a small Darcy-Flow dataset + + Training contains 1000 samples in resolution 16x16. + Testing contains 100 samples at resolution 16x16 and + 50 samples at resolution 32x32. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + data_dir (str): The directory to load data from. + weight_dict (Optional[Dict[str, float]], optional): Define the weight of each constraint variable. Defaults to None. + test_resolutions (List[int,...]): The resolutions to test dataset. Default is [16, 32]. + grid_boundaries (List[int,...]): The boundaries of the grid. Default is [[0,1],[0,1]]. + positional_encoding (bool): Whether to use positional encoding. Default is True + encode_input (bool): Whether to encode the input. Default is False + encode_output (bool): Whether to encode the output. Default is True + encoding (str): The type of encoding. Default is 'channel-wise'. + channel_dim (int): The location of unsqueeze. Default is 1. + where to put the channel dimension. Defaults size is batch, channel, height, width + data_split (str): Wether to use training or test dataset. Default is 'train'. + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + weight_dict: Optional[Dict[str, float]] = None, + test_resolutions: Tuple[int, ...] = [32], + train_resolution: int = 32, + grid_boundaries: Tuple[Tuple[int, ...], ...] = [[0, 1], [0, 1]], + positional_encoding: bool = True, + encode_input: bool = False, + encode_output: bool = True, + encoding: str = "channel-wise", + channel_dim: int = 1, + data_split: str = "train", + ): + super().__init__() + for res in test_resolutions: + if res not in [16, 32]: + raise ValueError( + f"Only 32 and 64 are supported for test resolution, but got {test_resolutions}" + ) + + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.test_resolutions = test_resolutions + self.train_resolution = train_resolution + self.grid_boundaries = grid_boundaries + self.positional_encoding = positional_encoding + self.encode_input = encode_input + self.encode_output = encode_output + self.encoding = encoding + self.channel_dim = channel_dim + self.data_split = data_split + + # train path + path_train = ( + Path(self.data_dir) + .joinpath(f"darcy_train_{self.train_resolution}.npy") + .as_posix() + ) + self.x_train, self.y_train = self.read_data(path_train) + # test path + path_test_1 = ( + Path(self.data_dir) + .joinpath(f"darcy_test_{self.test_resolutions[0]}.npy") + .as_posix() + ) + self.x_test_1, self.y_test_1 = self.read_data(path_test_1) + path_test_2 = ( + Path(self.data_dir) + .joinpath(f"darcy_test_{self.test_resolutions[1]}.npy") + .as_posix() + ) + self.x_test_2, self.y_test_2 = self.read_data(path_test_2) + + # input encoder + if self.encode_input: + self.input_encoder = self.encode_data(self.x_train) + self.x_train = self.input_encoder.encode(self.x_train) + self.x_test_1 = self.input_encoder.encode(self.x_test_1) + self.x_test_2 = self.input_encoder.encode(self.x_test_2) + else: + self.input_encoder = None + # output encoder + if self.encode_output: + self.output_encoder = self.encode_data(self.y_train) + self.y_train = self.output_encoder.encode(self.y_train) + else: + self.output_encoder = None + + if positional_encoding: + self.transform_x = PositionalEmbedding2D(grid_boundaries) + + def read_data(self, path): + # load with numpy + data = np.load(path, allow_pickle=True).item() + x = ( + paddle.to_tensor(data["x"]) + .unsqueeze(self.channel_dim) + .astype("float32") + .clone() + ) + y = paddle.to_tensor(data["y"]).unsqueeze(self.channel_dim).clone() + del data + return x, y + + def encode_data(self, data): + if self.encoding == "channel-wise": + reduce_dims = list(range(data.ndim)) + elif self.encoding == "pixel-wise": + reduce_dims = [0] + input_encoder = UnitGaussianNormalizer(data, reduce_dim=reduce_dims) + return input_encoder + + def __len__(self): + if self.data_split == "train": + return self.x_train.shape[0] + elif self.data_split == "test_16x16": + return self.x_test_1.shape[0] + else: + return self.x_test_2.shape[0] + + def __getitem__(self, index): + if self.data_split == "train": + x = self.x_train[index] + y = self.y_train[index] + + elif self.data_split == "test_16x16": + x = self.x_test_1[index] + y = self.y_test_1[index] + else: + x = self.x_test_2[index] + y = self.y_test_2[index] + + if self.transform_x is not None: + x = self.transform_x(x) + + input_item = {self.input_keys[0]: x} + label_item = {self.label_keys[0]: y} + weight_item = self.weight_dict + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/dgmr_dataset.py b/ppsci/data/dataset/dgmr_dataset.py index 8490f679bc..83ba4bee4d 100644 --- a/ppsci/data/dataset/dgmr_dataset.py +++ b/ppsci/data/dataset/dgmr_dataset.py @@ -1,95 +1,95 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import importlib -from typing import Tuple - -import numpy as np -from numpy.random import default_rng -from paddle import io - - -class DGMRDataset(io.Dataset): - """ - Dataset class for DGMR (Deep Generative Model for Radar) model. - This open-sourced UK dataset has been mirrored to HuggingFace Datasets https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km. - If the reader cannot load the dataset from Hugging Face, please manually download it and modify the dataset_path to the local path for loading. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - split (str, optional): The split of the dataset, "validation" or "train". Defaults to "validation". - num_input_frames (int, optional): Number of input frames. Defaults to 4. - num_target_frames (int, optional): Number of target frames. Defaults to 18. - dataset_path (str, optional): Path to the dataset. Defaults to "openclimatefix/nimrod-uk-1km". - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.DGMRDataset(("input", ), ("output", )) # doctest: +SKIP - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - split: str = "validation", - num_input_frames: int = 4, - num_target_frames: int = 18, - dataset_path: str = "openclimatefix/nimrod-uk-1km", - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - self.num_input_frames = num_input_frames - self.num_target_frames = num_target_frames - if not importlib.util.find_spec("datasets"): - raise ModuleNotFoundError( - "Please install datasets with `pip install datasets`" - " before exporting onnx model." - ) - import datasets - - self.reader = datasets.load_dataset( - dataset_path, "sample", split=split, streaming=True, trust_remote_code=True - ) - self.iter_reader = self.reader - - def __len__(self): - return 1000 - - def __getitem__(self, idx): - try: - row = next(self.iter_reader) - except Exception: - rng = default_rng(42) - self.iter_reader = iter( - self.reader.shuffle( - seed=rng.integers(low=0, high=100000), buffer_size=1000 - ) - ) - row = next(self.iter_reader) - radar_frames = row["radar_frames"] - input_frames = radar_frames[ - -self.num_target_frames - self.num_input_frames : -self.num_target_frames - ] - target_frames = radar_frames[-self.num_target_frames :] - input_item = { - self.input_keys[0]: np.moveaxis(input_frames, [0, 1, 2, 3], [0, 2, 3, 1]) - } - label_item = { - self.label_keys[0]: np.moveaxis(target_frames, [0, 1, 2, 3], [0, 2, 3, 1]) - } - return input_item, label_item +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +from typing import Tuple + +import numpy as np +from numpy.random import default_rng +from paddle import io + + +class DGMRDataset(io.Dataset): + """ + Dataset class for DGMR (Deep Generative Model for Radar) model. + This open-sourced UK dataset has been mirrored to HuggingFace Datasets https://huggingface.co/datasets/openclimatefix/nimrod-uk-1km. + If the reader cannot load the dataset from Hugging Face, please manually download it and modify the dataset_path to the local path for loading. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + split (str, optional): The split of the dataset, "validation" or "train". Defaults to "validation". + num_input_frames (int, optional): Number of input frames. Defaults to 4. + num_target_frames (int, optional): Number of target frames. Defaults to 18. + dataset_path (str, optional): Path to the dataset. Defaults to "openclimatefix/nimrod-uk-1km". + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.DGMRDataset(("input", ), ("output", )) # doctest: +SKIP + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + split: str = "validation", + num_input_frames: int = 4, + num_target_frames: int = 18, + dataset_path: str = "openclimatefix/nimrod-uk-1km", + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + self.num_input_frames = num_input_frames + self.num_target_frames = num_target_frames + if not importlib.util.find_spec("datasets"): + raise ModuleNotFoundError( + "Please install datasets with `pip install datasets`" + " before exporting onnx model." + ) + import datasets + + self.reader = datasets.load_dataset( + dataset_path, "sample", split=split, streaming=True, trust_remote_code=True + ) + self.iter_reader = self.reader + + def __len__(self): + return 1000 + + def __getitem__(self, idx): + try: + row = next(self.iter_reader) + except Exception: + rng = default_rng(42) + self.iter_reader = iter( + self.reader.shuffle( + seed=rng.integers(low=0, high=100000), buffer_size=1000 + ) + ) + row = next(self.iter_reader) + radar_frames = row["radar_frames"] + input_frames = radar_frames[ + -self.num_target_frames - self.num_input_frames : -self.num_target_frames + ] + target_frames = radar_frames[-self.num_target_frames :] + input_item = { + self.input_keys[0]: np.moveaxis(input_frames, [0, 1, 2, 3], [0, 2, 3, 1]) + } + label_item = { + self.label_keys[0]: np.moveaxis(target_frames, [0, 1, 2, 3], [0, 2, 3, 1]) + } + return input_item, label_item diff --git a/ppsci/data/dataset/enso_dataset.py b/ppsci/data/dataset/enso_dataset.py index 601fcec413..9b04c649ce 100644 --- a/ppsci/data/dataset/enso_dataset.py +++ b/ppsci/data/dataset/enso_dataset.py @@ -1,405 +1,405 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import importlib -from pathlib import Path -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -from paddle import io - -NINO_WINDOW_T = 3 # Nino index is the sliding average over sst, window size is 3 -CMIP6_SST_MAX = 10.198975563049316 -CMIP6_SST_MIN = -16.549121856689453 -CMIP5_SST_MAX = 8.991744995117188 -CMIP5_SST_MIN = -9.33076286315918 -CMIP6_NINO_MAX = 4.138188362121582 -CMIP6_NINO_MIN = -3.5832221508026123 -CMIP5_NINO_MAX = 3.8253555297851562 -CMIP5_NINO_MIN = -2.691682815551758 -SST_MAX = max(CMIP6_SST_MAX, CMIP5_SST_MAX) -SST_MIN = min(CMIP6_SST_MIN, CMIP5_SST_MIN) - - -def scale_sst(sst): - return (sst - SST_MIN) / (SST_MAX - SST_MIN) - - -def scale_back_sst(sst): - return (SST_MAX - SST_MIN) * sst + SST_MIN - - -def prepare_inputs_targets( - len_time, input_gap, input_length, pred_shift, pred_length, samples_gap -): - """Prepares the input and target indices for training. - - Args: - len_time (int): The total number of time steps in the dataset. - input_gap (int): Time gaps between two consecutive input frames. - input_length (int): The number of input frames. - pred_shift (int): The lead_time of the last target to be predicted. - pred_length (int): The number of frames to be predicted. - samples_gap (int): Stride of seq sampling. - """ - - if pred_shift < pred_length: - raise ValueError("pred_shift should be small than pred_length") - input_span = input_gap * (input_length - 1) + 1 - pred_gap = pred_shift // pred_length - input_ind = np.arange(0, input_span, input_gap) - target_ind = np.arange(0, pred_shift, pred_gap) + input_span + pred_gap - 1 - ind = np.concatenate([input_ind, target_ind]).reshape(1, input_length + pred_length) - max_n_sample = len_time - (input_span + pred_shift - 1) - ind = ind + np.arange(max_n_sample)[:, np.newaxis] @ np.ones( - (1, input_length + pred_length), dtype=int - ) - return ind[::samples_gap] - - -def fold(data, size=36, stride=12): - """Inverse of unfold/sliding window operation - only applicable to the case where the size of the sliding windows is n*stride - - Args: - data (tuple[int,...]): The input data.(N, size, *). - size (int, optional): The size of a single datum.The Defaults to 36. - stride (int, optional): The step.Defaults to 12. - - Returns: - outdata (np.array): (N_, *).N/size is the number/width of sliding blocks - """ - if size % stride != 0: - raise ValueError("size modulo stride should be zero") - times = size // stride - remain = (data.shape[0] - 1) % times - if remain > 0: - ls = list(data[::times]) + [data[-1, -(remain * stride) :]] - outdata = np.concatenate(ls, axis=0) # (36*(151//3+1)+remain*stride, *, 15) - else: - outdata = np.concatenate(data[::times], axis=0) # (36*(151/3+1), *, 15) - assert ( - outdata.shape[0] == size * ((data.shape[0] - 1) // times + 1) + remain * stride - ) - return outdata - - -def data_transform(data, num_years_per_model): - """The transform of the input data. - - Args: - data (Tuple[list,...]): The input data.Shape of (N, 36, *). - num_years_per_model (int): The number of years associated with each model.151/140. - """ - length = data.shape[0] - assert length % num_years_per_model == 0 - num_models = length // num_years_per_model - outdata = np.stack( - np.split(data, length / num_years_per_model, axis=0), axis=-1 - ) # (151, 36, *, 15) - # cmip6sst outdata.shape = (151, 36, 24, 48, 15) = (year, month, lat, lon, model) - # cmip5sst outdata.shape = (140, 36, 24, 48, 17) - # cmip6nino outdata.shape = (151, 36, 15) - # cmip5nino outdata.shape = (140, 36, 17) - outdata = fold(outdata, size=36, stride=12) - # cmip6sst outdata.shape = (1836, 24, 48, 15), 1836 == 151 * 12 + 24 - # cmip5sst outdata.shape = (1704, 24, 48, 17) - # cmip6nino outdata.shape = (1836, 15) - # cmip5nino outdata.shape = (1704, 17) - - # check output data - assert outdata.shape[-1] == num_models - assert not np.any(np.isnan(outdata)) - return outdata - - -def read_raw_data(ds_dir, out_dir=None): - """Read and process raw cmip data from CMIP_train.nc and CMIP_label.nc - - Args: - ds_dir (str): The path of the dataset. - out_dir (str): The path of output. Defaults to None. - """ - - import xarray as xr - - train_cmip = xr.open_dataset(Path(ds_dir) / "CMIP_train.nc").transpose( - "year", "month", "lat", "lon" - ) - label_cmip = xr.open_dataset(Path(ds_dir) / "CMIP_label.nc").transpose( - "year", "month" - ) - # train_cmip.sst.values.shape = (4645, 36, 24, 48) - - # select longitudes - lon = train_cmip.lon.values - lon = lon[np.logical_and(lon >= 95, lon <= 330)] - train_cmip = train_cmip.sel(lon=lon) - - cmip6sst = data_transform( - data=train_cmip.sst.values[:2265], num_years_per_model=151 - ) - cmip5sst = data_transform( - data=train_cmip.sst.values[2265:], num_years_per_model=140 - ) - cmip6nino = data_transform( - data=label_cmip.nino.values[:2265], num_years_per_model=151 - ) - cmip5nino = data_transform( - data=label_cmip.nino.values[2265:], num_years_per_model=140 - ) - - # cmip6sst.shape = (1836, 24, 48, 15) - # cmip5sst.shape = (1704, 24, 48, 17) - assert len(cmip6sst.shape) == 4 - assert len(cmip5sst.shape) == 4 - assert len(cmip6nino.shape) == 2 - assert len(cmip5nino.shape) == 2 - # store processed data for faster data access - if out_dir is not None: - ds_cmip6 = xr.Dataset( - { - "sst": (["month", "lat", "lon", "model"], cmip6sst), - "nino": (["month", "model"], cmip6nino), - }, - coords={ - "month": np.repeat( - np.arange(1, 13)[None], cmip6nino.shape[0] // 12, axis=0 - ).flatten(), - "lat": train_cmip.lat.values, - "lon": train_cmip.lon.values, - "model": np.arange(15) + 1, - }, - ) - ds_cmip6.to_netcdf(Path(out_dir) / "cmip6.nc") - ds_cmip5 = xr.Dataset( - { - "sst": (["month", "lat", "lon", "model"], cmip5sst), - "nino": (["month", "model"], cmip5nino), - }, - coords={ - "month": np.repeat( - np.arange(1, 13)[None], cmip5nino.shape[0] // 12, axis=0 - ).flatten(), - "lat": train_cmip.lat.values, - "lon": train_cmip.lon.values, - "model": np.arange(17) + 1, - }, - ) - ds_cmip5.to_netcdf(Path(out_dir) / "cmip5.nc") - train_cmip.close() - label_cmip.close() - return cmip6sst, cmip5sst, cmip6nino, cmip5nino - - -def cat_over_last_dim(data): - """Treat different models (15 from CMIP6, 17 from CMIP5) as batch_size - e.g., cmip6sst.shape = (178, 38, 24, 48, 15), converted_cmip6sst.shape = (2670, 38, 24, 48) - e.g., cmip5sst.shape = (165, 38, 24, 48, 15), converted_cmip6sst.shape = (2475, 38, 24, 48) - """ - - return np.concatenate(np.moveaxis(data, -1, 0), axis=0) - - -class ENSODataset(io.Dataset): - """The El Niño/Southern Oscillation dataset. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). - data_dir (str): The directory of data. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. - in_len (int, optional): The length of input data. Defaults to 12. - out_len (int, optional): The length of out data. Defaults to 26. - in_stride (int, optional): The stride of input data. Defaults to 1. - out_stride (int, optional): The stride of output data. Defaults to 1. - train_samples_gap (int, optional): The stride of sequence sampling during training. Defaults to 10. - e.g., samples_gap = 10, the first seq contains [0, 1, ..., T-1] frame indices, the second seq contains [10, 11, .., T+9] - eval_samples_gap (int, optional): The stride of sequence sampling during eval. Defaults to 11. - normalize_sst (bool, optional): Whether to use normalization. Defaults to True. - batch_size (int, optional): Batch size. Defaults to 1. - num_workers (int, optional): The num of workers. Defaults to 1. - training (str, optional): Training pathse. Defaults to "train". - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - weight_dict: Optional[Dict[str, float]] = None, - in_len=12, - out_len=26, - in_stride=1, - out_stride=1, - train_samples_gap=10, - eval_samples_gap=11, - normalize_sst=True, - # datamodule_only - batch_size=1, - num_workers=1, - training="train", - ): - super(ENSODataset, self).__init__() - if importlib.util.find_spec("xarray") is None: - raise ModuleNotFoundError( - "To use RadarDataset, please install 'xarray' with: `pip install " - "xarray` first." - ) - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.in_len = in_len - self.out_len = out_len - self.in_stride = in_stride - self.out_stride = out_stride - self.train_samples_gap = train_samples_gap - self.eval_samples_gap = eval_samples_gap - self.normalize_sst = normalize_sst - # datamodule_only - self.batch_size = batch_size - if num_workers != 1: - raise ValueError( - "Current implementation does not support `num_workers != 1`!" - ) - self.num_workers = num_workers - self.training = training - - # pre-data - cmip6sst, cmip5sst, cmip6nino, cmip5nino = read_raw_data(self.data_dir) - # TODO: more flexible train/val/test split - self.sst_train = [cmip6sst, cmip5sst[..., :-2]] - self.nino_train = [cmip6nino, cmip5nino[..., :-2]] - self.sst_eval = [cmip5sst[..., -2:-1]] - self.nino_eval = [cmip5nino[..., -2:-1]] - self.sst_test = [cmip5sst[..., -1:]] - self.nino_test = [cmip5nino[..., -1:]] - - self.sst, self.target_nino = self.create_data() - - def create_data( - self, - ): - if self.training == "train": - sst_cmip6 = self.sst_train[0] - nino_cmip6 = self.nino_train[0] - sst_cmip5 = self.sst_train[1] - nino_cmip5 = self.nino_train[1] - samples_gap = self.train_samples_gap - elif self.training == "eval": - sst_cmip6 = None - nino_cmip6 = None - sst_cmip5 = self.sst_eval[0] - nino_cmip5 = self.nino_eval[0] - samples_gap = self.eval_samples_gap - elif self.training == "test": - sst_cmip6 = None - nino_cmip6 = None - sst_cmip5 = self.sst_test[0] - nino_cmip5 = self.nino_test[0] - samples_gap = self.eval_samples_gap - - # cmip6 (N, *, 15) - # cmip5 (N, *, 17) - sst = [] - target_nino = [] - - nino_idx_slice = slice( - self.in_len, self.in_len + self.out_len - NINO_WINDOW_T + 1 - ) # e.g., 12:36 - if sst_cmip6 is not None: - assert len(sst_cmip6.shape) == 4 - assert len(nino_cmip6.shape) == 2 - idx_sst = prepare_inputs_targets( - len_time=sst_cmip6.shape[0], - input_length=self.in_len, - input_gap=self.in_stride, - pred_shift=self.out_len * self.out_stride, - pred_length=self.out_len, - samples_gap=samples_gap, - ) - - sst.append(cat_over_last_dim(sst_cmip6[idx_sst])) - target_nino.append( - cat_over_last_dim(nino_cmip6[idx_sst[:, nino_idx_slice]]) - ) - if sst_cmip5 is not None: - assert len(sst_cmip5.shape) == 4 - assert len(nino_cmip5.shape) == 2 - idx_sst = prepare_inputs_targets( - len_time=sst_cmip5.shape[0], - input_length=self.in_len, - input_gap=self.in_stride, - pred_shift=self.out_len * self.out_stride, - pred_length=self.out_len, - samples_gap=samples_gap, - ) - sst.append(cat_over_last_dim(sst_cmip5[idx_sst])) - target_nino.append( - cat_over_last_dim(nino_cmip5[idx_sst[:, nino_idx_slice]]) - ) - - # sst data containing both the input and target - self.sst = np.concatenate(sst, axis=0) # (N, in_len+out_len, lat, lon) - if self.normalize_sst: - self.sst = scale_sst(self.sst) - # nino data containing the target only - self.target_nino = np.concatenate( - target_nino, axis=0 - ) # (N, out_len+NINO_WINDOW_T-1) - assert self.sst.shape[0] == self.target_nino.shape[0] - assert self.sst.shape[1] == self.in_len + self.out_len - assert self.target_nino.shape[1] == self.out_len - NINO_WINDOW_T + 1 - return self.sst, self.target_nino - - def get_datashape(self): - return {"sst": self.sst.shape, "nino target": self.target_nino.shape} - - def __len__(self): - return self.sst.shape[0] - - def __getitem__(self, idx): - sst_data = self.sst[idx].astype("float32") - sst_data = sst_data[..., np.newaxis] - in_seq = sst_data[: self.in_len, ...] # ( in_len, lat, lon, 1) - target_seq = sst_data[self.in_len :, ...] # ( in_len, lat, lon, 1) - weight_item = self.weight_dict - - if self.training == "train": - input_item = {self.input_keys[0]: in_seq} - label_item = { - self.label_keys[0]: target_seq, - } - - return input_item, label_item, weight_item - else: - input_item = {self.input_keys[0]: in_seq} - label_item = { - self.label_keys[0]: target_seq, - self.label_keys[1]: self.target_nino[idx], - } - - return input_item, label_item, weight_item +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +from pathlib import Path +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +from paddle import io + +NINO_WINDOW_T = 3 # Nino index is the sliding average over sst, window size is 3 +CMIP6_SST_MAX = 10.198975563049316 +CMIP6_SST_MIN = -16.549121856689453 +CMIP5_SST_MAX = 8.991744995117188 +CMIP5_SST_MIN = -9.33076286315918 +CMIP6_NINO_MAX = 4.138188362121582 +CMIP6_NINO_MIN = -3.5832221508026123 +CMIP5_NINO_MAX = 3.8253555297851562 +CMIP5_NINO_MIN = -2.691682815551758 +SST_MAX = max(CMIP6_SST_MAX, CMIP5_SST_MAX) +SST_MIN = min(CMIP6_SST_MIN, CMIP5_SST_MIN) + + +def scale_sst(sst): + return (sst - SST_MIN) / (SST_MAX - SST_MIN) + + +def scale_back_sst(sst): + return (SST_MAX - SST_MIN) * sst + SST_MIN + + +def prepare_inputs_targets( + len_time, input_gap, input_length, pred_shift, pred_length, samples_gap +): + """Prepares the input and target indices for training. + + Args: + len_time (int): The total number of time steps in the dataset. + input_gap (int): Time gaps between two consecutive input frames. + input_length (int): The number of input frames. + pred_shift (int): The lead_time of the last target to be predicted. + pred_length (int): The number of frames to be predicted. + samples_gap (int): Stride of seq sampling. + """ + + if pred_shift < pred_length: + raise ValueError("pred_shift should be small than pred_length") + input_span = input_gap * (input_length - 1) + 1 + pred_gap = pred_shift // pred_length + input_ind = np.arange(0, input_span, input_gap) + target_ind = np.arange(0, pred_shift, pred_gap) + input_span + pred_gap - 1 + ind = np.concatenate([input_ind, target_ind]).reshape(1, input_length + pred_length) + max_n_sample = len_time - (input_span + pred_shift - 1) + ind = ind + np.arange(max_n_sample)[:, np.newaxis] @ np.ones( + (1, input_length + pred_length), dtype=int + ) + return ind[::samples_gap] + + +def fold(data, size=36, stride=12): + """Inverse of unfold/sliding window operation + only applicable to the case where the size of the sliding windows is n*stride + + Args: + data (tuple[int,...]): The input data.(N, size, *). + size (int, optional): The size of a single datum.The Defaults to 36. + stride (int, optional): The step.Defaults to 12. + + Returns: + outdata (np.array): (N_, *).N/size is the number/width of sliding blocks + """ + if size % stride != 0: + raise ValueError("size modulo stride should be zero") + times = size // stride + remain = (data.shape[0] - 1) % times + if remain > 0: + ls = list(data[::times]) + [data[-1, -(remain * stride) :]] + outdata = np.concatenate(ls, axis=0) # (36*(151//3+1)+remain*stride, *, 15) + else: + outdata = np.concatenate(data[::times], axis=0) # (36*(151/3+1), *, 15) + assert ( + outdata.shape[0] == size * ((data.shape[0] - 1) // times + 1) + remain * stride + ) + return outdata + + +def data_transform(data, num_years_per_model): + """The transform of the input data. + + Args: + data (Tuple[list,...]): The input data.Shape of (N, 36, *). + num_years_per_model (int): The number of years associated with each model.151/140. + """ + length = data.shape[0] + assert length % num_years_per_model == 0 + num_models = length // num_years_per_model + outdata = np.stack( + np.split(data, length / num_years_per_model, axis=0), axis=-1 + ) # (151, 36, *, 15) + # cmip6sst outdata.shape = (151, 36, 24, 48, 15) = (year, month, lat, lon, model) + # cmip5sst outdata.shape = (140, 36, 24, 48, 17) + # cmip6nino outdata.shape = (151, 36, 15) + # cmip5nino outdata.shape = (140, 36, 17) + outdata = fold(outdata, size=36, stride=12) + # cmip6sst outdata.shape = (1836, 24, 48, 15), 1836 == 151 * 12 + 24 + # cmip5sst outdata.shape = (1704, 24, 48, 17) + # cmip6nino outdata.shape = (1836, 15) + # cmip5nino outdata.shape = (1704, 17) + + # check output data + assert outdata.shape[-1] == num_models + assert not np.any(np.isnan(outdata)) + return outdata + + +def read_raw_data(ds_dir, out_dir=None): + """Read and process raw cmip data from CMIP_train.nc and CMIP_label.nc + + Args: + ds_dir (str): The path of the dataset. + out_dir (str): The path of output. Defaults to None. + """ + + import xarray as xr + + train_cmip = xr.open_dataset(Path(ds_dir) / "CMIP_train.nc").transpose( + "year", "month", "lat", "lon" + ) + label_cmip = xr.open_dataset(Path(ds_dir) / "CMIP_label.nc").transpose( + "year", "month" + ) + # train_cmip.sst.values.shape = (4645, 36, 24, 48) + + # select longitudes + lon = train_cmip.lon.values + lon = lon[np.logical_and(lon >= 95, lon <= 330)] + train_cmip = train_cmip.sel(lon=lon) + + cmip6sst = data_transform( + data=train_cmip.sst.values[:2265], num_years_per_model=151 + ) + cmip5sst = data_transform( + data=train_cmip.sst.values[2265:], num_years_per_model=140 + ) + cmip6nino = data_transform( + data=label_cmip.nino.values[:2265], num_years_per_model=151 + ) + cmip5nino = data_transform( + data=label_cmip.nino.values[2265:], num_years_per_model=140 + ) + + # cmip6sst.shape = (1836, 24, 48, 15) + # cmip5sst.shape = (1704, 24, 48, 17) + assert len(cmip6sst.shape) == 4 + assert len(cmip5sst.shape) == 4 + assert len(cmip6nino.shape) == 2 + assert len(cmip5nino.shape) == 2 + # store processed data for faster data access + if out_dir is not None: + ds_cmip6 = xr.Dataset( + { + "sst": (["month", "lat", "lon", "model"], cmip6sst), + "nino": (["month", "model"], cmip6nino), + }, + coords={ + "month": np.repeat( + np.arange(1, 13)[None], cmip6nino.shape[0] // 12, axis=0 + ).flatten(), + "lat": train_cmip.lat.values, + "lon": train_cmip.lon.values, + "model": np.arange(15) + 1, + }, + ) + ds_cmip6.to_netcdf(Path(out_dir) / "cmip6.nc") + ds_cmip5 = xr.Dataset( + { + "sst": (["month", "lat", "lon", "model"], cmip5sst), + "nino": (["month", "model"], cmip5nino), + }, + coords={ + "month": np.repeat( + np.arange(1, 13)[None], cmip5nino.shape[0] // 12, axis=0 + ).flatten(), + "lat": train_cmip.lat.values, + "lon": train_cmip.lon.values, + "model": np.arange(17) + 1, + }, + ) + ds_cmip5.to_netcdf(Path(out_dir) / "cmip5.nc") + train_cmip.close() + label_cmip.close() + return cmip6sst, cmip5sst, cmip6nino, cmip5nino + + +def cat_over_last_dim(data): + """Treat different models (15 from CMIP6, 17 from CMIP5) as batch_size + e.g., cmip6sst.shape = (178, 38, 24, 48, 15), converted_cmip6sst.shape = (2670, 38, 24, 48) + e.g., cmip5sst.shape = (165, 38, 24, 48, 15), converted_cmip6sst.shape = (2475, 38, 24, 48) + """ + + return np.concatenate(np.moveaxis(data, -1, 0), axis=0) + + +class ENSODataset(io.Dataset): + """The El Niño/Southern Oscillation dataset. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). + data_dir (str): The directory of data. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. + in_len (int, optional): The length of input data. Defaults to 12. + out_len (int, optional): The length of out data. Defaults to 26. + in_stride (int, optional): The stride of input data. Defaults to 1. + out_stride (int, optional): The stride of output data. Defaults to 1. + train_samples_gap (int, optional): The stride of sequence sampling during training. Defaults to 10. + e.g., samples_gap = 10, the first seq contains [0, 1, ..., T-1] frame indices, the second seq contains [10, 11, .., T+9] + eval_samples_gap (int, optional): The stride of sequence sampling during eval. Defaults to 11. + normalize_sst (bool, optional): Whether to use normalization. Defaults to True. + batch_size (int, optional): Batch size. Defaults to 1. + num_workers (int, optional): The num of workers. Defaults to 1. + training (str, optional): Training pathse. Defaults to "train". + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + weight_dict: Optional[Dict[str, float]] = None, + in_len=12, + out_len=26, + in_stride=1, + out_stride=1, + train_samples_gap=10, + eval_samples_gap=11, + normalize_sst=True, + # datamodule_only + batch_size=1, + num_workers=1, + training="train", + ): + super(ENSODataset, self).__init__() + if importlib.util.find_spec("xarray") is None: + raise ModuleNotFoundError( + "To use RadarDataset, please install 'xarray' with: `pip install " + "xarray` first." + ) + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.in_len = in_len + self.out_len = out_len + self.in_stride = in_stride + self.out_stride = out_stride + self.train_samples_gap = train_samples_gap + self.eval_samples_gap = eval_samples_gap + self.normalize_sst = normalize_sst + # datamodule_only + self.batch_size = batch_size + if num_workers != 1: + raise ValueError( + "Current implementation does not support `num_workers != 1`!" + ) + self.num_workers = num_workers + self.training = training + + # pre-data + cmip6sst, cmip5sst, cmip6nino, cmip5nino = read_raw_data(self.data_dir) + # TODO: more flexible train/val/test split + self.sst_train = [cmip6sst, cmip5sst[..., :-2]] + self.nino_train = [cmip6nino, cmip5nino[..., :-2]] + self.sst_eval = [cmip5sst[..., -2:-1]] + self.nino_eval = [cmip5nino[..., -2:-1]] + self.sst_test = [cmip5sst[..., -1:]] + self.nino_test = [cmip5nino[..., -1:]] + + self.sst, self.target_nino = self.create_data() + + def create_data( + self, + ): + if self.training == "train": + sst_cmip6 = self.sst_train[0] + nino_cmip6 = self.nino_train[0] + sst_cmip5 = self.sst_train[1] + nino_cmip5 = self.nino_train[1] + samples_gap = self.train_samples_gap + elif self.training == "eval": + sst_cmip6 = None + nino_cmip6 = None + sst_cmip5 = self.sst_eval[0] + nino_cmip5 = self.nino_eval[0] + samples_gap = self.eval_samples_gap + elif self.training == "test": + sst_cmip6 = None + nino_cmip6 = None + sst_cmip5 = self.sst_test[0] + nino_cmip5 = self.nino_test[0] + samples_gap = self.eval_samples_gap + + # cmip6 (N, *, 15) + # cmip5 (N, *, 17) + sst = [] + target_nino = [] + + nino_idx_slice = slice( + self.in_len, self.in_len + self.out_len - NINO_WINDOW_T + 1 + ) # e.g., 12:36 + if sst_cmip6 is not None: + assert len(sst_cmip6.shape) == 4 + assert len(nino_cmip6.shape) == 2 + idx_sst = prepare_inputs_targets( + len_time=sst_cmip6.shape[0], + input_length=self.in_len, + input_gap=self.in_stride, + pred_shift=self.out_len * self.out_stride, + pred_length=self.out_len, + samples_gap=samples_gap, + ) + + sst.append(cat_over_last_dim(sst_cmip6[idx_sst])) + target_nino.append( + cat_over_last_dim(nino_cmip6[idx_sst[:, nino_idx_slice]]) + ) + if sst_cmip5 is not None: + assert len(sst_cmip5.shape) == 4 + assert len(nino_cmip5.shape) == 2 + idx_sst = prepare_inputs_targets( + len_time=sst_cmip5.shape[0], + input_length=self.in_len, + input_gap=self.in_stride, + pred_shift=self.out_len * self.out_stride, + pred_length=self.out_len, + samples_gap=samples_gap, + ) + sst.append(cat_over_last_dim(sst_cmip5[idx_sst])) + target_nino.append( + cat_over_last_dim(nino_cmip5[idx_sst[:, nino_idx_slice]]) + ) + + # sst data containing both the input and target + self.sst = np.concatenate(sst, axis=0) # (N, in_len+out_len, lat, lon) + if self.normalize_sst: + self.sst = scale_sst(self.sst) + # nino data containing the target only + self.target_nino = np.concatenate( + target_nino, axis=0 + ) # (N, out_len+NINO_WINDOW_T-1) + assert self.sst.shape[0] == self.target_nino.shape[0] + assert self.sst.shape[1] == self.in_len + self.out_len + assert self.target_nino.shape[1] == self.out_len - NINO_WINDOW_T + 1 + return self.sst, self.target_nino + + def get_datashape(self): + return {"sst": self.sst.shape, "nino target": self.target_nino.shape} + + def __len__(self): + return self.sst.shape[0] + + def __getitem__(self, idx): + sst_data = self.sst[idx].astype("float32") + sst_data = sst_data[..., np.newaxis] + in_seq = sst_data[: self.in_len, ...] # ( in_len, lat, lon, 1) + target_seq = sst_data[self.in_len :, ...] # ( in_len, lat, lon, 1) + weight_item = self.weight_dict + + if self.training == "train": + input_item = {self.input_keys[0]: in_seq} + label_item = { + self.label_keys[0]: target_seq, + } + + return input_item, label_item, weight_item + else: + input_item = {self.input_keys[0]: in_seq} + label_item = { + self.label_keys[0]: target_seq, + self.label_keys[1]: self.target_nino[idx], + } + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/era5_dataset.py b/ppsci/data/dataset/era5_dataset.py index 75cf89c754..0fdcb653dd 100644 --- a/ppsci/data/dataset/era5_dataset.py +++ b/ppsci/data/dataset/era5_dataset.py @@ -1,249 +1,249 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import glob -from typing import Dict -from typing import Optional -from typing import Tuple - -try: - import h5py -except ModuleNotFoundError: - pass - -import numpy as np -import paddle -from paddle import io -from paddle import vision - - -class ERA5Dataset(io.Dataset): - """Class for ERA5 dataset. - - Args: - file_path (str): Data set path. - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - precip_file_path (Optional[str]): Precipitation data set path. Defaults to None. - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - vars_channel (Optional[Tuple[int, ...]]): The variable channel index in ERA5 dataset. Defaults to None. - num_label_timestamps (int, optional): Number of timestamp of label. Defaults to 1. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - training (bool, optional): Whether in train mode. Defaults to True. - stride (int, optional): Stride of sampling data. Defaults to 1. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.ERA5Dataset( - ... "file_path": "/path/to/ERA5Dataset", - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - precip_file_path: Optional[str] = None, - weight_dict: Optional[Dict[str, float]] = None, - vars_channel: Optional[Tuple[int, ...]] = None, - num_label_timestamps: int = 1, - transforms: Optional[vision.Compose] = None, - training: bool = True, - stride: int = 1, - ): - super().__init__() - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - self.precip_file_path = precip_file_path - - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.vars_channel = list(range(20)) if vars_channel is None else vars_channel - self.num_label_timestamps = num_label_timestamps - self.transforms = transforms - self.training = training - self.stride = stride - - self.files = self.read_data(file_path) - self.n_years = len(self.files) - self.num_samples_per_year = self.files[0].shape[0] - self.num_samples = self.n_years * self.num_samples_per_year - if self.precip_file_path is not None: - self.precip_files = self.read_data(precip_file_path, "tp") - - def read_data(self, path: str, var="fields"): - paths = [path] if path.endswith(".h5") else glob.glob(path + "/*.h5") - paths.sort() - files = [] - for path_ in paths: - _file = h5py.File(path_, "r") - files.append(_file[var]) - return files - - def __len__(self): - return self.num_samples // self.stride - - def __getitem__(self, global_idx): - global_idx *= self.stride - year_idx = global_idx // self.num_samples_per_year - local_idx = global_idx % self.num_samples_per_year - step = 0 if local_idx >= self.num_samples_per_year - 1 else 1 - - if self.num_label_timestamps > 1: - if local_idx >= self.num_samples_per_year - self.num_label_timestamps: - local_idx = self.num_samples_per_year - self.num_label_timestamps - 1 - - input_file = self.files[year_idx] - label_file = ( - self.precip_files[year_idx] - if self.precip_file_path is not None - else input_file - ) - if self.precip_file_path is not None and year_idx == 0 and self.training: - # first year has 2 missing samples in precip (they are first two time points) - lim = self.num_samples_per_year - 2 - local_idx = local_idx % lim - step = 0 if local_idx >= lim - 1 else 1 - input_idx = local_idx + 2 - label_idx = local_idx + step - else: - input_idx, label_idx = local_idx, local_idx + step - - input_item = {self.input_keys[0]: input_file[input_idx, self.vars_channel]} - - label_item = {} - for i in range(self.num_label_timestamps): - if self.precip_file_path is not None: - label_item[self.label_keys[i]] = np.expand_dims( - label_file[label_idx + i], 0 - ) - else: - label_item[self.label_keys[i]] = label_file[ - label_idx + i, self.vars_channel - ] - - weight_shape = [1] * len(next(iter(label_item.values())).shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return input_item, label_item, weight_item - - -class ERA5SampledDataset(io.Dataset): - """Class for ERA5 sampled dataset. - - Args: - file_path (str): Data set path. - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.ERA5SampledDataset( - ... "file_path": "/path/to/ERA5SampledDataset", - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... ) # doctest: +SKIP - >>> # get the length of the dataset - >>> dataset_size = len(dataset) # doctest: +SKIP - >>> # get the first sample of the data - >>> first_sample = dataset[0] # doctest: +SKIP - >>> print("First sample:", first_sample) # doctest: +SKIP - """ - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - weight_dict: Optional[Dict[str, float]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.transforms = transforms - - self.files = self.read_data(file_path) - self.num_samples = len(self.files) - - def read_data(self, path: str): - paths = glob.glob(path + "/*.h5") - paths.sort() - files = [] - for _path in paths: - _file = h5py.File(_path, "r") - files.append(_file) - return files - - def __len__(self): - return self.num_samples - - def __getitem__(self, global_idx): - _file = self.files[global_idx] - - input_item = {} - for key in _file["input_dict"]: - input_item[key] = np.asarray( - _file["input_dict"][key], paddle.get_default_dtype() - ) - - label_item = {} - for key in _file["label_dict"]: - label_item[key] = np.asarray( - _file["label_dict"][key], paddle.get_default_dtype() - ) - - weight_shape = [1] * len(next(iter(label_item.values())).shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return input_item, label_item, weight_item +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import glob +from typing import Dict +from typing import Optional +from typing import Tuple + +try: + import h5py +except ModuleNotFoundError: + pass + +import numpy as np +import paddle +from paddle import io +from paddle import vision + + +class ERA5Dataset(io.Dataset): + """Class for ERA5 dataset. + + Args: + file_path (str): Data set path. + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + precip_file_path (Optional[str]): Precipitation data set path. Defaults to None. + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + vars_channel (Optional[Tuple[int, ...]]): The variable channel index in ERA5 dataset. Defaults to None. + num_label_timestamps (int, optional): Number of timestamp of label. Defaults to 1. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + training (bool, optional): Whether in train mode. Defaults to True. + stride (int, optional): Stride of sampling data. Defaults to 1. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.ERA5Dataset( + ... "file_path": "/path/to/ERA5Dataset", + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + precip_file_path: Optional[str] = None, + weight_dict: Optional[Dict[str, float]] = None, + vars_channel: Optional[Tuple[int, ...]] = None, + num_label_timestamps: int = 1, + transforms: Optional[vision.Compose] = None, + training: bool = True, + stride: int = 1, + ): + super().__init__() + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + self.precip_file_path = precip_file_path + + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.vars_channel = list(range(20)) if vars_channel is None else vars_channel + self.num_label_timestamps = num_label_timestamps + self.transforms = transforms + self.training = training + self.stride = stride + + self.files = self.read_data(file_path) + self.n_years = len(self.files) + self.num_samples_per_year = self.files[0].shape[0] + self.num_samples = self.n_years * self.num_samples_per_year + if self.precip_file_path is not None: + self.precip_files = self.read_data(precip_file_path, "tp") + + def read_data(self, path: str, var="fields"): + paths = [path] if path.endswith(".h5") else glob.glob(path + "/*.h5") + paths.sort() + files = [] + for path_ in paths: + _file = h5py.File(path_, "r") + files.append(_file[var]) + return files + + def __len__(self): + return self.num_samples // self.stride + + def __getitem__(self, global_idx): + global_idx *= self.stride + year_idx = global_idx // self.num_samples_per_year + local_idx = global_idx % self.num_samples_per_year + step = 0 if local_idx >= self.num_samples_per_year - 1 else 1 + + if self.num_label_timestamps > 1: + if local_idx >= self.num_samples_per_year - self.num_label_timestamps: + local_idx = self.num_samples_per_year - self.num_label_timestamps - 1 + + input_file = self.files[year_idx] + label_file = ( + self.precip_files[year_idx] + if self.precip_file_path is not None + else input_file + ) + if self.precip_file_path is not None and year_idx == 0 and self.training: + # first year has 2 missing samples in precip (they are first two time points) + lim = self.num_samples_per_year - 2 + local_idx = local_idx % lim + step = 0 if local_idx >= lim - 1 else 1 + input_idx = local_idx + 2 + label_idx = local_idx + step + else: + input_idx, label_idx = local_idx, local_idx + step + + input_item = {self.input_keys[0]: input_file[input_idx, self.vars_channel]} + + label_item = {} + for i in range(self.num_label_timestamps): + if self.precip_file_path is not None: + label_item[self.label_keys[i]] = np.expand_dims( + label_file[label_idx + i], 0 + ) + else: + label_item[self.label_keys[i]] = label_file[ + label_idx + i, self.vars_channel + ] + + weight_shape = [1] * len(next(iter(label_item.values())).shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return input_item, label_item, weight_item + + +class ERA5SampledDataset(io.Dataset): + """Class for ERA5 sampled dataset. + + Args: + file_path (str): Data set path. + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.ERA5SampledDataset( + ... "file_path": "/path/to/ERA5SampledDataset", + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... ) # doctest: +SKIP + >>> # get the length of the dataset + >>> dataset_size = len(dataset) # doctest: +SKIP + >>> # get the first sample of the data + >>> first_sample = dataset[0] # doctest: +SKIP + >>> print("First sample:", first_sample) # doctest: +SKIP + """ + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + weight_dict: Optional[Dict[str, float]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.transforms = transforms + + self.files = self.read_data(file_path) + self.num_samples = len(self.files) + + def read_data(self, path: str): + paths = glob.glob(path + "/*.h5") + paths.sort() + files = [] + for _path in paths: + _file = h5py.File(_path, "r") + files.append(_file) + return files + + def __len__(self): + return self.num_samples + + def __getitem__(self, global_idx): + _file = self.files[global_idx] + + input_item = {} + for key in _file["input_dict"]: + input_item[key] = np.asarray( + _file["input_dict"][key], paddle.get_default_dtype() + ) + + label_item = {} + for key in _file["label_dict"]: + label_item[key] = np.asarray( + _file["label_dict"][key], paddle.get_default_dtype() + ) + + weight_shape = [1] * len(next(iter(label_item.values())).shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/ext_moe_enso_dataset.py b/ppsci/data/dataset/ext_moe_enso_dataset.py index 5286b4bfe2..730b1b2a6e 100644 --- a/ppsci/data/dataset/ext_moe_enso_dataset.py +++ b/ppsci/data/dataset/ext_moe_enso_dataset.py @@ -1,406 +1,406 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import importlib -from pathlib import Path -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -from paddle import io - -NINO_WINDOW_T = 3 # Nino index is the sliding average over sst, window size is 3 -CMIP6_SST_MAX = 10.198975563049316 -CMIP6_SST_MIN = -16.549121856689453 -CMIP5_SST_MAX = 8.991744995117188 -CMIP5_SST_MIN = -9.33076286315918 -CMIP6_NINO_MAX = 4.138188362121582 -CMIP6_NINO_MIN = -3.5832221508026123 -CMIP5_NINO_MAX = 3.8253555297851562 -CMIP5_NINO_MIN = -2.691682815551758 -SST_MAX = max(CMIP6_SST_MAX, CMIP5_SST_MAX) -SST_MIN = min(CMIP6_SST_MIN, CMIP5_SST_MIN) - - -def scale_sst(sst): - return (sst - SST_MIN) / (SST_MAX - SST_MIN) - - -def scale_back_sst(sst): - return (SST_MAX - SST_MIN) * sst + SST_MIN - - -def prepare_inputs_targets( - len_time, input_gap, input_length, pred_shift, pred_length, samples_gap -): - """Prepares the input and target indices for training. - - Args: - len_time (int): The total number of time steps in the dataset. - input_gap (int): Time gaps between two consecutive input frames. - input_length (int): The number of input frames. - pred_shift (int): The lead_time of the last target to be predicted. - pred_length (int): The number of frames to be predicted. - samples_gap (int): Stride of seq sampling. - """ - - if pred_shift < pred_length: - raise ValueError("Pred_shift should be small than pred_length") - input_span = input_gap * (input_length - 1) + 1 - pred_gap = pred_shift // pred_length - input_ind = np.arange(0, input_span, input_gap) - target_ind = np.arange(0, pred_shift, pred_gap) + input_span + pred_gap - 1 - ind = np.concatenate([input_ind, target_ind]).reshape(1, input_length + pred_length) - max_n_sample = len_time - (input_span + pred_shift - 1) - ind = ind + np.arange(max_n_sample)[:, np.newaxis] @ np.ones( - (1, input_length + pred_length), dtype=int - ) - return ind[::samples_gap] - - -def fold(data, size=36, stride=12): - """inverse of unfold/sliding window operation - only applicable to the case where the size of the sliding windows is n*stride - - Args: - data (tuple[int,...]): The input data.(N, size, *). - size (int, optional): The size of a single datum.The Defaults to 36. - stride (int, optional): The step.Defaults to 12. - - Returns: - outdata (np.ndarray): (N_, *).N/size is the number/width of sliding blocks - """ - - if size % stride != 0: - raise ValueError("size modulo stride should be zero") - times = size // stride - remain = (data.shape[0] - 1) % times - if remain > 0: - ls = list(data[::times]) + [data[-1, -(remain * stride) :]] - outdata = np.concatenate(ls, axis=0) # (36*(151//3+1)+remain*stride, *, 15) - else: - outdata = np.concatenate(data[::times], axis=0) # (36*(151/3+1), *, 15) - assert ( - outdata.shape[0] == size * ((data.shape[0] - 1) // times + 1) + remain * stride - ) - return outdata - - -def data_transform(data, num_years_per_model): - """The transform of the input data. - - Args: - data (Tuple[list,...]): The input data.Shape of (N, 36, *). - num_years_per_model (int): The number of years associated with each model.151/140. - """ - - length = data.shape[0] - assert length % num_years_per_model == 0 - num_models = length // num_years_per_model - outdata = np.stack( - np.split(data, length / num_years_per_model, axis=0), axis=-1 - ) # (151, 36, *, 15) - # cmip6sst outdata.shape = (151, 36, 24, 48, 15) = (year, month, lat, lon, model) - # cmip5sst outdata.shape = (140, 36, 24, 48, 17) - # cmip6nino outdata.shape = (151, 36, 15) - # cmip5nino outdata.shape = (140, 36, 17) - outdata = fold(outdata, size=36, stride=12) - # cmip6sst outdata.shape = (1836, 24, 48, 15), 1836 == 151 * 12 + 24 - # cmip5sst outdata.shape = (1704, 24, 48, 17) - # cmip6nino outdata.shape = (1836, 15) - # cmip5nino outdata.shape = (1704, 17) - - # check output data - assert outdata.shape[-1] == num_models - assert not np.any(np.isnan(outdata)) - return outdata - - -def read_raw_data(ds_dir, out_dir=None): - """read and process raw cmip data from CMIP_train.nc and CMIP_label.nc - - Args: - ds_dir (str): The path of the dataset. - out_dir (str): The path of output. Defaults to None. - """ - import xarray as xr - - train_cmip = xr.open_dataset( - Path(ds_dir) / "CMIP_train.nc", engine="h5netcdf" - ).transpose("year", "month", "lat", "lon") - label_cmip = xr.open_dataset( - Path(ds_dir) / "CMIP_label.nc", engine="h5netcdf" - ).transpose("year", "month") - # train_cmip.sst.values.shape = (4645, 36, 24, 48) - - # select longitudes - lon = train_cmip.lon.values - lon = lon[np.logical_and(lon >= 95, lon <= 330)] - train_cmip = train_cmip.sel(lon=lon) - - cmip6sst = data_transform( - data=train_cmip.sst.values[:2265], num_years_per_model=151 - ) - cmip5sst = data_transform( - data=train_cmip.sst.values[2265:], num_years_per_model=140 - ) - cmip6nino = data_transform( - data=label_cmip.nino.values[:2265], num_years_per_model=151 - ) - cmip5nino = data_transform( - data=label_cmip.nino.values[2265:], num_years_per_model=140 - ) - - # cmip6sst.shape = (1836, 24, 48, 15) - # cmip5sst.shape = (1704, 24, 48, 17) - assert len(cmip6sst.shape) == 4 - assert len(cmip5sst.shape) == 4 - assert len(cmip6nino.shape) == 2 - assert len(cmip5nino.shape) == 2 - # store processed data for faster data access - if out_dir is not None: - ds_cmip6 = xr.Dataset( - { - "sst": (["month", "lat", "lon", "model"], cmip6sst), - "nino": (["month", "model"], cmip6nino), - }, - coords={ - "month": np.repeat( - np.arange(1, 13)[None], cmip6nino.shape[0] // 12, axis=0 - ).flatten(), - "lat": train_cmip.lat.values, - "lon": train_cmip.lon.values, - "model": np.arange(15) + 1, - }, - ) - ds_cmip6.to_netcdf(Path(out_dir) / "cmip6.nc") - ds_cmip5 = xr.Dataset( - { - "sst": (["month", "lat", "lon", "model"], cmip5sst), - "nino": (["month", "model"], cmip5nino), - }, - coords={ - "month": np.repeat( - np.arange(1, 13)[None], cmip5nino.shape[0] // 12, axis=0 - ).flatten(), - "lat": train_cmip.lat.values, - "lon": train_cmip.lon.values, - "model": np.arange(17) + 1, - }, - ) - ds_cmip5.to_netcdf(Path(out_dir) / "cmip5.nc") - train_cmip.close() - label_cmip.close() - return cmip6sst, cmip5sst, cmip6nino, cmip5nino - - -def cat_over_last_dim(data): - """treat different models (15 from CMIP6, 17 from CMIP5) as batch_size - e.g., cmip6sst.shape = (178, 38, 24, 48, 15), converted_cmip6sst.shape = (2670, 38, 24, 48) - e.g., cmip5sst.shape = (165, 38, 24, 48, 15), converted_cmip6sst.shape = (2475, 38, 24, 48) - """ - - return np.concatenate(np.moveaxis(data, -1, 0), axis=0) - - -class ExtMoEENSODataset(io.Dataset): - """The El Niño/Southern Oscillation dataset. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). - data_dir (str): The directory of data. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. - in_len (int, optional): The length of input data. Defaults to 12. - out_len (int, optional): The length of out data. Defaults to 26. - in_stride (int, optional): The stride of input data. Defaults to 1. - out_stride (int, optional): The stride of output data. Defaults to 1. - train_samples_gap (int, optional): The stride of sequence sampling during training. Defaults to 10. - e.g., samples_gap = 10, the first seq contains [0, 1, ..., T-1] frame indices, the second seq contains [10, 11, .., T+9] - eval_samples_gap (int, optional): The stride of sequence sampling during eval. Defaults to 11. - normalize_sst (bool, optional): Whether to use normalization. Defaults to True. - batch_size (int, optional): Batch size. Defaults to 1. - num_workers (int, optional): The num of workers. Defaults to 1. - training (str, optional): Training pathse. Defaults to "train". - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - weight_dict: Optional[Dict[str, float]] = None, - in_len: int = 12, - out_len: int = 26, - in_stride: int = 1, - out_stride: int = 1, - train_samples_gap: int = 10, - eval_samples_gap: int = 11, - normalize_sst: bool = True, - batch_size: int = 1, - num_workers: int = 1, - training: str = "train", - ): - super(ExtMoEENSODataset, self).__init__() - if importlib.util.find_spec("xarray") is None: - raise ModuleNotFoundError( - "To use RadarDataset, please install 'xarray' with: `pip install " - "xarray` first." - ) - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.in_len = in_len - self.out_len = out_len - self.in_stride = in_stride - self.out_stride = out_stride - self.train_samples_gap = train_samples_gap - self.eval_samples_gap = eval_samples_gap - self.normalize_sst = normalize_sst - # datamodule_only - self.batch_size = batch_size - if num_workers != 1: - raise ValueError( - "Current implementation does not support `num_workers != 1`!" - ) - self.num_workers = num_workers - self.training = training - - # pre-data - cmip6sst, cmip5sst, cmip6nino, cmip5nino = read_raw_data(self.data_dir) - # TODO: more flexible train/val/test split - self.sst_train = [cmip6sst, cmip5sst[..., :-2]] - self.nino_train = [cmip6nino, cmip5nino[..., :-2]] - self.sst_eval = [cmip5sst[..., -2:-1]] - self.nino_eval = [cmip5nino[..., -2:-1]] - self.sst_test = [cmip5sst[..., -1:]] - self.nino_test = [cmip5nino[..., -1:]] - - self.sst, self.target_nino = self.create_data() - - def create_data( - self, - ): - if self.training == "train": - sst_cmip6 = self.sst_train[0] - nino_cmip6 = self.nino_train[0] - sst_cmip5 = self.sst_train[1] - nino_cmip5 = self.nino_train[1] - samples_gap = self.train_samples_gap - elif self.training == "eval": - sst_cmip6 = None - nino_cmip6 = None - sst_cmip5 = self.sst_eval[0] - nino_cmip5 = self.nino_eval[0] - samples_gap = self.eval_samples_gap - elif self.training == "test": - sst_cmip6 = None - nino_cmip6 = None - sst_cmip5 = self.sst_test[0] - nino_cmip5 = self.nino_test[0] - samples_gap = self.eval_samples_gap - - # cmip6 (N, *, 15) - # cmip5 (N, *, 17) - sst = [] - target_nino = [] - - nino_idx_slice = slice( - self.in_len, self.in_len + self.out_len - NINO_WINDOW_T + 1 - ) # e.g., 12:36 - if sst_cmip6 is not None: - assert len(sst_cmip6.shape) == 4 - assert len(nino_cmip6.shape) == 2 - idx_sst = prepare_inputs_targets( - len_time=sst_cmip6.shape[0], - input_length=self.in_len, - input_gap=self.in_stride, - pred_shift=self.out_len * self.out_stride, - pred_length=self.out_len, - samples_gap=samples_gap, - ) - - sst.append(cat_over_last_dim(sst_cmip6[idx_sst])) - target_nino.append( - cat_over_last_dim(nino_cmip6[idx_sst[:, nino_idx_slice]]) - ) - if sst_cmip5 is not None: - assert len(sst_cmip5.shape) == 4 - assert len(nino_cmip5.shape) == 2 - idx_sst = prepare_inputs_targets( - len_time=sst_cmip5.shape[0], - input_length=self.in_len, - input_gap=self.in_stride, - pred_shift=self.out_len * self.out_stride, - pred_length=self.out_len, - samples_gap=samples_gap, - ) - sst.append(cat_over_last_dim(sst_cmip5[idx_sst])) - target_nino.append( - cat_over_last_dim(nino_cmip5[idx_sst[:, nino_idx_slice]]) - ) - - # sst data containing both the input and target - self.sst = np.concatenate(sst, axis=0) # (N, in_len+out_len, lat, lon) - if self.normalize_sst: - self.sst = scale_sst(self.sst) - # nino data containing the target only - self.target_nino = np.concatenate( - target_nino, axis=0 - ) # (N, out_len+NINO_WINDOW_T-1) - assert self.sst.shape[0] == self.target_nino.shape[0] - assert self.sst.shape[1] == self.in_len + self.out_len - assert self.target_nino.shape[1] == self.out_len - NINO_WINDOW_T + 1 - - return self.sst, self.target_nino - - def get_datashape(self): - return {"sst": self.sst.shape, "nino target": self.target_nino.shape} - - def __len__(self): - return self.sst.shape[0] - - def __getitem__(self, idx): - sst_data = self.sst[idx].astype("float32") - sst_data = sst_data[..., np.newaxis] - in_seq = sst_data[: self.in_len, ...] # ( in_len, lat, lon, 1) - target_seq = sst_data[self.in_len :, ...] # ( in_len, lat, lon, 1) - weight_item = self.weight_dict - - if self.training == "train": - input_item = {self.input_keys[0]: in_seq, "sst_target": target_seq} - label_item = { - self.label_keys[0]: target_seq, - } - - return input_item, label_item, weight_item - else: - input_item = {self.input_keys[0]: in_seq, "sst_target": target_seq} - label_item = { - self.label_keys[0]: target_seq, - self.label_keys[1]: self.target_nino[idx], - } - - return input_item, label_item, weight_item +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib +from pathlib import Path +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +from paddle import io + +NINO_WINDOW_T = 3 # Nino index is the sliding average over sst, window size is 3 +CMIP6_SST_MAX = 10.198975563049316 +CMIP6_SST_MIN = -16.549121856689453 +CMIP5_SST_MAX = 8.991744995117188 +CMIP5_SST_MIN = -9.33076286315918 +CMIP6_NINO_MAX = 4.138188362121582 +CMIP6_NINO_MIN = -3.5832221508026123 +CMIP5_NINO_MAX = 3.8253555297851562 +CMIP5_NINO_MIN = -2.691682815551758 +SST_MAX = max(CMIP6_SST_MAX, CMIP5_SST_MAX) +SST_MIN = min(CMIP6_SST_MIN, CMIP5_SST_MIN) + + +def scale_sst(sst): + return (sst - SST_MIN) / (SST_MAX - SST_MIN) + + +def scale_back_sst(sst): + return (SST_MAX - SST_MIN) * sst + SST_MIN + + +def prepare_inputs_targets( + len_time, input_gap, input_length, pred_shift, pred_length, samples_gap +): + """Prepares the input and target indices for training. + + Args: + len_time (int): The total number of time steps in the dataset. + input_gap (int): Time gaps between two consecutive input frames. + input_length (int): The number of input frames. + pred_shift (int): The lead_time of the last target to be predicted. + pred_length (int): The number of frames to be predicted. + samples_gap (int): Stride of seq sampling. + """ + + if pred_shift < pred_length: + raise ValueError("Pred_shift should be small than pred_length") + input_span = input_gap * (input_length - 1) + 1 + pred_gap = pred_shift // pred_length + input_ind = np.arange(0, input_span, input_gap) + target_ind = np.arange(0, pred_shift, pred_gap) + input_span + pred_gap - 1 + ind = np.concatenate([input_ind, target_ind]).reshape(1, input_length + pred_length) + max_n_sample = len_time - (input_span + pred_shift - 1) + ind = ind + np.arange(max_n_sample)[:, np.newaxis] @ np.ones( + (1, input_length + pred_length), dtype=int + ) + return ind[::samples_gap] + + +def fold(data, size=36, stride=12): + """inverse of unfold/sliding window operation + only applicable to the case where the size of the sliding windows is n*stride + + Args: + data (tuple[int,...]): The input data.(N, size, *). + size (int, optional): The size of a single datum.The Defaults to 36. + stride (int, optional): The step.Defaults to 12. + + Returns: + outdata (np.ndarray): (N_, *).N/size is the number/width of sliding blocks + """ + + if size % stride != 0: + raise ValueError("size modulo stride should be zero") + times = size // stride + remain = (data.shape[0] - 1) % times + if remain > 0: + ls = list(data[::times]) + [data[-1, -(remain * stride) :]] + outdata = np.concatenate(ls, axis=0) # (36*(151//3+1)+remain*stride, *, 15) + else: + outdata = np.concatenate(data[::times], axis=0) # (36*(151/3+1), *, 15) + assert ( + outdata.shape[0] == size * ((data.shape[0] - 1) // times + 1) + remain * stride + ) + return outdata + + +def data_transform(data, num_years_per_model): + """The transform of the input data. + + Args: + data (Tuple[list,...]): The input data.Shape of (N, 36, *). + num_years_per_model (int): The number of years associated with each model.151/140. + """ + + length = data.shape[0] + assert length % num_years_per_model == 0 + num_models = length // num_years_per_model + outdata = np.stack( + np.split(data, length / num_years_per_model, axis=0), axis=-1 + ) # (151, 36, *, 15) + # cmip6sst outdata.shape = (151, 36, 24, 48, 15) = (year, month, lat, lon, model) + # cmip5sst outdata.shape = (140, 36, 24, 48, 17) + # cmip6nino outdata.shape = (151, 36, 15) + # cmip5nino outdata.shape = (140, 36, 17) + outdata = fold(outdata, size=36, stride=12) + # cmip6sst outdata.shape = (1836, 24, 48, 15), 1836 == 151 * 12 + 24 + # cmip5sst outdata.shape = (1704, 24, 48, 17) + # cmip6nino outdata.shape = (1836, 15) + # cmip5nino outdata.shape = (1704, 17) + + # check output data + assert outdata.shape[-1] == num_models + assert not np.any(np.isnan(outdata)) + return outdata + + +def read_raw_data(ds_dir, out_dir=None): + """read and process raw cmip data from CMIP_train.nc and CMIP_label.nc + + Args: + ds_dir (str): The path of the dataset. + out_dir (str): The path of output. Defaults to None. + """ + import xarray as xr + + train_cmip = xr.open_dataset( + Path(ds_dir) / "CMIP_train.nc", engine="h5netcdf" + ).transpose("year", "month", "lat", "lon") + label_cmip = xr.open_dataset( + Path(ds_dir) / "CMIP_label.nc", engine="h5netcdf" + ).transpose("year", "month") + # train_cmip.sst.values.shape = (4645, 36, 24, 48) + + # select longitudes + lon = train_cmip.lon.values + lon = lon[np.logical_and(lon >= 95, lon <= 330)] + train_cmip = train_cmip.sel(lon=lon) + + cmip6sst = data_transform( + data=train_cmip.sst.values[:2265], num_years_per_model=151 + ) + cmip5sst = data_transform( + data=train_cmip.sst.values[2265:], num_years_per_model=140 + ) + cmip6nino = data_transform( + data=label_cmip.nino.values[:2265], num_years_per_model=151 + ) + cmip5nino = data_transform( + data=label_cmip.nino.values[2265:], num_years_per_model=140 + ) + + # cmip6sst.shape = (1836, 24, 48, 15) + # cmip5sst.shape = (1704, 24, 48, 17) + assert len(cmip6sst.shape) == 4 + assert len(cmip5sst.shape) == 4 + assert len(cmip6nino.shape) == 2 + assert len(cmip5nino.shape) == 2 + # store processed data for faster data access + if out_dir is not None: + ds_cmip6 = xr.Dataset( + { + "sst": (["month", "lat", "lon", "model"], cmip6sst), + "nino": (["month", "model"], cmip6nino), + }, + coords={ + "month": np.repeat( + np.arange(1, 13)[None], cmip6nino.shape[0] // 12, axis=0 + ).flatten(), + "lat": train_cmip.lat.values, + "lon": train_cmip.lon.values, + "model": np.arange(15) + 1, + }, + ) + ds_cmip6.to_netcdf(Path(out_dir) / "cmip6.nc") + ds_cmip5 = xr.Dataset( + { + "sst": (["month", "lat", "lon", "model"], cmip5sst), + "nino": (["month", "model"], cmip5nino), + }, + coords={ + "month": np.repeat( + np.arange(1, 13)[None], cmip5nino.shape[0] // 12, axis=0 + ).flatten(), + "lat": train_cmip.lat.values, + "lon": train_cmip.lon.values, + "model": np.arange(17) + 1, + }, + ) + ds_cmip5.to_netcdf(Path(out_dir) / "cmip5.nc") + train_cmip.close() + label_cmip.close() + return cmip6sst, cmip5sst, cmip6nino, cmip5nino + + +def cat_over_last_dim(data): + """treat different models (15 from CMIP6, 17 from CMIP5) as batch_size + e.g., cmip6sst.shape = (178, 38, 24, 48, 15), converted_cmip6sst.shape = (2670, 38, 24, 48) + e.g., cmip5sst.shape = (165, 38, 24, 48, 15), converted_cmip6sst.shape = (2475, 38, 24, 48) + """ + + return np.concatenate(np.moveaxis(data, -1, 0), axis=0) + + +class ExtMoEENSODataset(io.Dataset): + """The El Niño/Southern Oscillation dataset. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). + data_dir (str): The directory of data. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. + in_len (int, optional): The length of input data. Defaults to 12. + out_len (int, optional): The length of out data. Defaults to 26. + in_stride (int, optional): The stride of input data. Defaults to 1. + out_stride (int, optional): The stride of output data. Defaults to 1. + train_samples_gap (int, optional): The stride of sequence sampling during training. Defaults to 10. + e.g., samples_gap = 10, the first seq contains [0, 1, ..., T-1] frame indices, the second seq contains [10, 11, .., T+9] + eval_samples_gap (int, optional): The stride of sequence sampling during eval. Defaults to 11. + normalize_sst (bool, optional): Whether to use normalization. Defaults to True. + batch_size (int, optional): Batch size. Defaults to 1. + num_workers (int, optional): The num of workers. Defaults to 1. + training (str, optional): Training pathse. Defaults to "train". + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + weight_dict: Optional[Dict[str, float]] = None, + in_len: int = 12, + out_len: int = 26, + in_stride: int = 1, + out_stride: int = 1, + train_samples_gap: int = 10, + eval_samples_gap: int = 11, + normalize_sst: bool = True, + batch_size: int = 1, + num_workers: int = 1, + training: str = "train", + ): + super(ExtMoEENSODataset, self).__init__() + if importlib.util.find_spec("xarray") is None: + raise ModuleNotFoundError( + "To use RadarDataset, please install 'xarray' with: `pip install " + "xarray` first." + ) + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.in_len = in_len + self.out_len = out_len + self.in_stride = in_stride + self.out_stride = out_stride + self.train_samples_gap = train_samples_gap + self.eval_samples_gap = eval_samples_gap + self.normalize_sst = normalize_sst + # datamodule_only + self.batch_size = batch_size + if num_workers != 1: + raise ValueError( + "Current implementation does not support `num_workers != 1`!" + ) + self.num_workers = num_workers + self.training = training + + # pre-data + cmip6sst, cmip5sst, cmip6nino, cmip5nino = read_raw_data(self.data_dir) + # TODO: more flexible train/val/test split + self.sst_train = [cmip6sst, cmip5sst[..., :-2]] + self.nino_train = [cmip6nino, cmip5nino[..., :-2]] + self.sst_eval = [cmip5sst[..., -2:-1]] + self.nino_eval = [cmip5nino[..., -2:-1]] + self.sst_test = [cmip5sst[..., -1:]] + self.nino_test = [cmip5nino[..., -1:]] + + self.sst, self.target_nino = self.create_data() + + def create_data( + self, + ): + if self.training == "train": + sst_cmip6 = self.sst_train[0] + nino_cmip6 = self.nino_train[0] + sst_cmip5 = self.sst_train[1] + nino_cmip5 = self.nino_train[1] + samples_gap = self.train_samples_gap + elif self.training == "eval": + sst_cmip6 = None + nino_cmip6 = None + sst_cmip5 = self.sst_eval[0] + nino_cmip5 = self.nino_eval[0] + samples_gap = self.eval_samples_gap + elif self.training == "test": + sst_cmip6 = None + nino_cmip6 = None + sst_cmip5 = self.sst_test[0] + nino_cmip5 = self.nino_test[0] + samples_gap = self.eval_samples_gap + + # cmip6 (N, *, 15) + # cmip5 (N, *, 17) + sst = [] + target_nino = [] + + nino_idx_slice = slice( + self.in_len, self.in_len + self.out_len - NINO_WINDOW_T + 1 + ) # e.g., 12:36 + if sst_cmip6 is not None: + assert len(sst_cmip6.shape) == 4 + assert len(nino_cmip6.shape) == 2 + idx_sst = prepare_inputs_targets( + len_time=sst_cmip6.shape[0], + input_length=self.in_len, + input_gap=self.in_stride, + pred_shift=self.out_len * self.out_stride, + pred_length=self.out_len, + samples_gap=samples_gap, + ) + + sst.append(cat_over_last_dim(sst_cmip6[idx_sst])) + target_nino.append( + cat_over_last_dim(nino_cmip6[idx_sst[:, nino_idx_slice]]) + ) + if sst_cmip5 is not None: + assert len(sst_cmip5.shape) == 4 + assert len(nino_cmip5.shape) == 2 + idx_sst = prepare_inputs_targets( + len_time=sst_cmip5.shape[0], + input_length=self.in_len, + input_gap=self.in_stride, + pred_shift=self.out_len * self.out_stride, + pred_length=self.out_len, + samples_gap=samples_gap, + ) + sst.append(cat_over_last_dim(sst_cmip5[idx_sst])) + target_nino.append( + cat_over_last_dim(nino_cmip5[idx_sst[:, nino_idx_slice]]) + ) + + # sst data containing both the input and target + self.sst = np.concatenate(sst, axis=0) # (N, in_len+out_len, lat, lon) + if self.normalize_sst: + self.sst = scale_sst(self.sst) + # nino data containing the target only + self.target_nino = np.concatenate( + target_nino, axis=0 + ) # (N, out_len+NINO_WINDOW_T-1) + assert self.sst.shape[0] == self.target_nino.shape[0] + assert self.sst.shape[1] == self.in_len + self.out_len + assert self.target_nino.shape[1] == self.out_len - NINO_WINDOW_T + 1 + + return self.sst, self.target_nino + + def get_datashape(self): + return {"sst": self.sst.shape, "nino target": self.target_nino.shape} + + def __len__(self): + return self.sst.shape[0] + + def __getitem__(self, idx): + sst_data = self.sst[idx].astype("float32") + sst_data = sst_data[..., np.newaxis] + in_seq = sst_data[: self.in_len, ...] # ( in_len, lat, lon, 1) + target_seq = sst_data[self.in_len :, ...] # ( in_len, lat, lon, 1) + weight_item = self.weight_dict + + if self.training == "train": + input_item = {self.input_keys[0]: in_seq, "sst_target": target_seq} + label_item = { + self.label_keys[0]: target_seq, + } + + return input_item, label_item, weight_item + else: + input_item = {self.input_keys[0]: in_seq, "sst_target": target_seq} + label_item = { + self.label_keys[0]: target_seq, + self.label_keys[1]: self.target_nino[idx], + } + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/mat_dataset.py b/ppsci/data/dataset/mat_dataset.py index 609e35aeaa..e319367d8a 100644 --- a/ppsci/data/dataset/mat_dataset.py +++ b/ppsci/data/dataset/mat_dataset.py @@ -1,287 +1,287 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -from paddle import io -from paddle import vision - -from ppsci.utils import misc -from ppsci.utils import reader - - -class MatDataset(io.Dataset): - """Dataset class for .mat file. - - Args: - file_path (str): Mat file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - i.e. {inner_key: outer_key}. Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.MatDataset( - ... "/path/to/file.mat" - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...] = (), - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_mat_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = ( - {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} - if weight_dict is not None - else {} - ) - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - weight_item = {key: value[idx] for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - return self._len - - -class IterableMatDataset(io.IterableDataset): - """IterableMatDataset for full-data loading. - - Args: - file_path (str): Mat file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - i.e. {inner_key: outer_key}. Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.IterableMatDataset( - ... "/path/to/file.mat" - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...] = (), - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_mat_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = ( - {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} - if weight_dict is not None - else {} - ) - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} - self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} - self.weight = { - key: paddle.to_tensor(value) for key, value in self.weight.items() - } - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - @property - def num_samples(self): - """Number of samples within current dataset.""" - return self._len - - def __iter__(self): - if callable(self.transforms): - input_, label_, weight_ = self.transforms( - self.input, self.label, self.weight - ) - yield input_, label_, weight_ - else: - yield self.input, self.label, self.weight - - def __len__(self): - return 1 +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +from paddle import io +from paddle import vision + +from ppsci.utils import misc +from ppsci.utils import reader + + +class MatDataset(io.Dataset): + """Dataset class for .mat file. + + Args: + file_path (str): Mat file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + i.e. {inner_key: outer_key}. Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.MatDataset( + ... "/path/to/file.mat" + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...] = (), + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_mat_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = ( + {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} + if weight_dict is not None + else {} + ) + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + weight_item = {key: value[idx] for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + return self._len + + +class IterableMatDataset(io.IterableDataset): + """IterableMatDataset for full-data loading. + + Args: + file_path (str): Mat file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + i.e. {inner_key: outer_key}. Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.IterableMatDataset( + ... "/path/to/file.mat" + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...] = (), + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_mat_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = ( + {key: np.ones_like(next(iter(self.label.values()))) for key in self.label} + if weight_dict is not None + else {} + ) + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} + self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} + self.weight = { + key: paddle.to_tensor(value) for key, value in self.weight.items() + } + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + @property + def num_samples(self): + """Number of samples within current dataset.""" + return self._len + + def __iter__(self): + if callable(self.transforms): + input_, label_, weight_ = self.transforms( + self.input, self.label, self.weight + ) + yield input_, label_, weight_ + else: + yield self.input, self.label, self.weight + + def __len__(self): + return 1 diff --git a/ppsci/data/dataset/moflow_dataset.py b/ppsci/data/dataset/moflow_dataset.py index ba6e6e07d5..0cc94aba1b 100644 --- a/ppsci/data/dataset/moflow_dataset.py +++ b/ppsci/data/dataset/moflow_dataset.py @@ -1,437 +1,437 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Copyright 2020 Chengxi Zang - -from __future__ import annotations - -import os -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple - -import numpy as np -import pandas as pd -from paddle import io -from tqdm import tqdm - -from ppsci.utils import logger - -try: - from rdkit import Chem - from rdkit.Chem import rdmolops -except ModuleNotFoundError: - pass - - -class MolGraph: - """ - Args: - max_atoms (int): Max number of atoms for each molecule, if the - number of atoms is more than this value, this data is simply - ignored. - Setting negative value indicates no limit for max atoms. - out_size (int): It specifies the size of array returned by - `get_input_features`. - If the number of atoms in the molecule is less than this value, - the returned arrays is padded to have fixed size. - Setting negative value indicates do not pad returned array. - add_Hs (bool): If True, implicit Hs are added. - kekulize (bool): If True, Kekulizes the molecule. - """ - - def __init__(self, max_atoms=-1, out_size=-1, add_Hs=False, kekulize=False): - super(MolGraph, self).__init__() - self.add_Hs = add_Hs - self.kekulize = kekulize - if max_atoms >= 0 and out_size >= 0 and max_atoms > out_size: - raise ValueError( - f"max_atoms {max_atoms} must be less or equal to out_size {out_size}" - ) - self.max_atoms = max_atoms - self.out_size = out_size - - def get_input_features(self, mol): - """ - get input features - Args: - mol (Mol): mol instance - - Returns: - (tuple): (`atom`, `adj`) - - """ - self.type_check_num_atoms(mol, self.max_atoms) - atom_array = self.construct_atomic_number_array(mol, out_size=self.out_size) - adj_array = self.construct_discrete_edge_matrix(mol, out_size=self.out_size) - return atom_array, adj_array - - def prepare_smiles_and_mol(self, mol): - """Prepare `smiles` and `mol` used in following preprocessing. - This method is called before `get_input_features` is called, by parser - class. - This method may be overriden to support custom `smile`/`mol` extraction - Args: - mol (mol): mol instance - - Returns (tuple): (`smiles`, `mol`) - """ - canonical_smiles = Chem.MolToSmiles(mol, isomericSmiles=False, canonical=True) - mol = Chem.MolFromSmiles(canonical_smiles) - if self.add_Hs: - mol = Chem.AddHs(mol) - if self.kekulize: - Chem.Kekulize(mol) - return canonical_smiles, mol - - def get_label(self, mol, label_names=None): - """Extracts label information from a molecule. - This method extracts properties whose keys are - specified by ``label_names`` from a molecule ``mol`` - and returns these values as a list. - The order of the values is same as that of ``label_names``. - If the molecule does not have a - property with some label, this function fills the corresponding - index of the returned list with ``None``. - - Args: - mol (rdkit.Chem.Mol): molecule whose features to be extracted - label_names (None or iterable): list of label names. - - Returns: - list of str: label information. Its length is equal to - that of ``label_names``. If ``label_names`` is ``None``, - this function returns an empty list. - - """ - if label_names is None: - return [] - label_list = [] - for label_name in label_names: - if mol.HasProp(label_name): - label_list.append(mol.GetProp(label_name)) - else: - label_list.append(None) - return label_list - - def type_check_num_atoms(self, mol, num_max_atoms=-1): - """Check number of atoms in `mol` does not exceed `num_max_atoms` - If number of atoms in `mol` exceeds the number `num_max_atoms`, it will - raise `MolGraphError` exception. - - Args: - mol (Mol): - num_max_atoms (int): If negative value is set, not check number of - atoms. - - """ - num_atoms = mol.GetNumAtoms() - if num_max_atoms >= 0 and num_atoms > num_max_atoms: - raise MolGraphError( - f"Number of atoms in mol {num_atoms} exceeds num_max_atoms {num_max_atoms}" - ) - - def construct_atomic_number_array(self, mol, out_size=-1): - """Returns atomic numbers of atoms consisting a molecule. - - Args: - mol (rdkit.Chem.Mol): Input molecule. - out_size (int): The size of returned array. - If this option is negative, it does not take any effect. - Otherwise, it must be larger than the number of atoms - in the input molecules. In that case, the tail of - the array is padded with zeros. - - Returns: - numpy.ndarray: an array consisting of atomic numbers - of atoms in the molecule. - """ - atom_list = [a.GetAtomicNum() for a in mol.GetAtoms()] - n_atom = len(atom_list) - if out_size < 0: - return np.array(atom_list, dtype=np.int32) - elif out_size >= n_atom: - atom_array = np.zeros(out_size, dtype=np.int32) - atom_array[:n_atom] = np.array(atom_list, dtype=np.int32) - return atom_array - else: - raise ValueError( - f"`out_size` (={out_size}) must be negative or larger than or equal to " - f"the number of atoms in the input molecules (={n_atom})." - ) - - def construct_adj_matrix(self, mol, out_size=-1, self_connection=True): - """Returns the adjacent matrix of the given molecule. - - This function returns the adjacent matrix of the given molecule. - Contrary to the specification of - :func:`rdkit.Chem.rdmolops.GetAdjacencyMatrix`, - The diagonal entries of the returned matrix are all-one. - - Args: - mol (rdkit.Chem.Mol): Input molecule. - out_size (int): The size of the returned matrix. - If this option is negative, it does not take any effect. - Otherwise, it must be larger than the number of atoms - in the input molecules. In that case, the adjacent - matrix is expanded and zeros are padded to right - columns and bottom rows. - self_connection (bool): Add self connection or not. - If True, diagonal element of adjacency matrix is filled with 1. - - Returns: - adj_array (numpy.ndarray): The adjacent matrix of the input molecule. - It is 2-dimensional array with shape (atoms1, atoms2), where - atoms1 & atoms2 represent from and to of the edge respectively. - If ``out_size`` is non-negative, the returned - its size is equal to that value. Otherwise, - it is equal to the number of atoms in the the molecule. - """ - adj = rdmolops.GetAdjacencyMatrix(mol) - s0, s1 = tuple(adj.shape) - if s0 != s1: - raise ValueError( - f"The adjacent matrix of the input moleculehas an invalid shape: ({s0}, " - f"{s1}). It must be square." - ) - if self_connection: - adj = adj + np.eye(s0) - if out_size < 0: - adj_array = adj.astype(np.float32) - elif out_size >= s0: - adj_array = np.zeros((out_size, out_size), dtype=np.float32) - adj_array[:s0, :s1] = adj - else: - raise ValueError( - f"`out_size` (={out_size}) must be negative or larger than or equal to " - f"the number of atoms in the input molecules (={s0})." - ) - return adj_array - - def construct_discrete_edge_matrix(self, mol, out_size=-1): - """Returns the edge-type dependent adjacency matrix of the given molecule. - - Args: - mol (rdkit.Chem.Mol): Input molecule. - out_size (int): The size of the returned matrix. - If this option is negative, it does not take any effect. - Otherwise, it must be larger than the number of atoms - in the input molecules. In that case, the adjacent - matrix is expanded and zeros are padded to right - columns and bottom rows. - - Returns: - adj_array (numpy.ndarray): The adjacent matrix of the input molecule. - It is 3-dimensional array with shape (edge_type, atoms1, atoms2), - where edge_type represents the bond type, - atoms1 & atoms2 represent from and to of the edge respectively. - If ``out_size`` is non-negative, its size is equal to that value. - Otherwise, it is equal to the number of atoms in the the molecule. - """ - if mol is None: - raise MolGraphError("mol is None") - N = mol.GetNumAtoms() - if out_size < 0: - size = N - elif out_size >= N: - size = out_size - else: - raise ValueError( - f"out_size {out_size} is smaller than number of atoms in mol {N}" - ) - adjs = np.zeros((4, size, size), dtype=np.float32) - bond_type_to_channel = { - Chem.BondType.SINGLE: 0, - Chem.BondType.DOUBLE: 1, - Chem.BondType.TRIPLE: 2, - Chem.BondType.AROMATIC: 3, - } - for bond in mol.GetBonds(): - bond_type = bond.GetBondType() - ch = bond_type_to_channel[bond_type] - i = bond.GetBeginAtomIdx() - j = bond.GetEndAtomIdx() - adjs[ch, i, j] = 1.0 - adjs[ch, j, i] = 1.0 - return adjs - - -class MolGraphError(Exception): - pass - - -class MOlFLOWDataset(io.Dataset): - """Class for moflow qm9 and zinc250k Dataset of a tuple of datasets. - - It combines multiple datasets into one dataset. Each example is represented - by a tuple whose ``i``-th item corresponds to the i-th dataset. - And each ``i``-th dataset is expected to be an instance of numpy.ndarray. - - Args: - file_path (str): Data set path. - data_name (str): Data name, "qm9" or "zinc250k" - valid_idx (List[int, ...]): Data for validate - mode (str): "train" or "eval", output Data - input_keys (Tuple[str, ...]): Input keys, such as ("nodes","edges",). - label_keys (Tuple[str, ...]): labels (str or list or None) . - smiles_col (str): smiles column - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. - transform_fn: An optional function applied to an item bofre returning - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - file_path: str, - data_name: str, - valid_idx: List[int, ...], - mode: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - smiles_col: str, - weight_dict: Optional[Dict[str, float]] = None, - transform_fn: Optional[Callable] = None, - ): - super().__init__() - self.file_path = file_path - self.data_name = data_name - self.input_keys = input_keys - self.label_keys = label_keys - self.smiles_col = smiles_col - self.weight_dict = weight_dict - - if data_name == "qm9": - max_atoms = 9 - elif data_name == "zinc250k": - max_atoms = 38 - - self.molgraph = MolGraph(out_size=max_atoms, kekulize=True) - self.logger = logger - # read and deal data from file - inputs, labels = self.load_csv_file(file_path, data_name + ".csv") - train_idx = [t for t in range(len(inputs[0])) if t not in valid_idx] - self.train_idx = train_idx - # data train or test - if mode == "train": - inputs = [ - np.array(list(io.Subset(dataset=in_put, indices=train_idx))) - for in_put in inputs - ] - labels = np.array(list(io.Subset(dataset=labels, indices=train_idx))) - elif mode == "eval": - inputs = [ - np.array(list(io.Subset(dataset=in_put, indices=valid_idx))) - for in_put in inputs - ] - labels = np.array(list(io.Subset(dataset=labels, indices=valid_idx))) - - # fetch input data - self.input = {key: inputs[i] for i, key in enumerate(self.input_keys)} - # fetch label data - self.label = {"label": labels} - - self.logger.message( - f"Dataload finished. MODE {mode}, " - f"inputs {len(next(iter(self.input.values())))}, " - f"labelS {len(next(iter(self.label.values())))}" - ) - - self._length = len(next(iter(self.input.values()))) - self.transform = transform_fn - - def __getitem__(self, index: int): - input_item = {key: value[index] for key, value in self.input.items()} - label_item = {key: value[index] for key, value in self.label.items()} - - if self.transform: - input_item, label_item = self.transform_func(input_item, label_item) - - return (input_item, label_item, {}) - - def __len__(self): - return self._length - - def load_csv_file(self, path: str, name: str): - """Parse DataFrame using `MolGraph` and prepare a dataset instance - Labels are extracted from `labels` columns and input features are - extracted from smiles information in `smiles` column. - """ - file = os.path.join(path, name) - df = pd.read_csv(file, index_col=0) - all_nodes = [] - all_edges = [] - # inputs = [] - - total_count = df.shape[0] - fail_count = 0 - success_count = 0 - if isinstance(self.molgraph, MolGraph): - for smiles in tqdm(df[self.smiles_col], total=df.shape[0]): - try: - mol = Chem.MolFromSmiles(smiles) - if mol is None: - fail_count += 1 - continue - canonical_smiles, mol = self.molgraph.prepare_smiles_and_mol(mol) - nodes, edges = self.molgraph.get_input_features(mol) - - except MolGraphError as e: - fail_count += 1 - self.logger.warning(f"parse(), type: {type(e).__name__}, {e.args}") - continue - except Exception as e: - self.logger.warning(f"parse(), type: {type(e).__name__}, {e.args}") - fail_count += 1 - continue - # raw_data = misc.convert_to_dict(np.array([nodes, edges]), self.input_keys) - - all_nodes.append(nodes) - all_edges.append(edges) - # inputs.append(raw_data) - - success_count += 1 - - labels = np.array( - [*(df[label_col].values for label_col in self.label_keys)] - ).T - result = [np.array(all_nodes), np.array(all_edges)], labels - self.logger.message( - f"Preprocess finished. FAIL {fail_count}, " - f"SUCCESS {success_count}, TOTAL {total_count}" - ) - else: - raise NotImplementedError - - return result - - def transform_func(self, data_dict, label_dict): - items = [] - length = len(next(iter(data_dict.values()))) - for idx in range(length): - input_item = [value[idx] for key, value in data_dict.items()] - label_item = [value[idx] for key, value in label_dict.items()] - item = input_item + label_item - if self.transform: - item = self.transform(item) - items.append(item) - items = np.array(items, dtype=object).T - - data_dict = {key: np.stack(items[i], axis=0) for i, key in enumerate(data_dict)} - label_dict = {key: np.vstack(item[2]) for key in label_dict} - - return data_dict, label_dict +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2020 Chengxi Zang + +from __future__ import annotations + +import os +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple + +import numpy as np +import pandas as pd +from paddle import io +from tqdm import tqdm + +from ppsci.utils import logger + +try: + from rdkit import Chem + from rdkit.Chem import rdmolops +except ModuleNotFoundError: + pass + + +class MolGraph: + """ + Args: + max_atoms (int): Max number of atoms for each molecule, if the + number of atoms is more than this value, this data is simply + ignored. + Setting negative value indicates no limit for max atoms. + out_size (int): It specifies the size of array returned by + `get_input_features`. + If the number of atoms in the molecule is less than this value, + the returned arrays is padded to have fixed size. + Setting negative value indicates do not pad returned array. + add_Hs (bool): If True, implicit Hs are added. + kekulize (bool): If True, Kekulizes the molecule. + """ + + def __init__(self, max_atoms=-1, out_size=-1, add_Hs=False, kekulize=False): + super(MolGraph, self).__init__() + self.add_Hs = add_Hs + self.kekulize = kekulize + if max_atoms >= 0 and out_size >= 0 and max_atoms > out_size: + raise ValueError( + f"max_atoms {max_atoms} must be less or equal to out_size {out_size}" + ) + self.max_atoms = max_atoms + self.out_size = out_size + + def get_input_features(self, mol): + """ + get input features + Args: + mol (Mol): mol instance + + Returns: + (tuple): (`atom`, `adj`) + + """ + self.type_check_num_atoms(mol, self.max_atoms) + atom_array = self.construct_atomic_number_array(mol, out_size=self.out_size) + adj_array = self.construct_discrete_edge_matrix(mol, out_size=self.out_size) + return atom_array, adj_array + + def prepare_smiles_and_mol(self, mol): + """Prepare `smiles` and `mol` used in following preprocessing. + This method is called before `get_input_features` is called, by parser + class. + This method may be overriden to support custom `smile`/`mol` extraction + Args: + mol (mol): mol instance + + Returns (tuple): (`smiles`, `mol`) + """ + canonical_smiles = Chem.MolToSmiles(mol, isomericSmiles=False, canonical=True) + mol = Chem.MolFromSmiles(canonical_smiles) + if self.add_Hs: + mol = Chem.AddHs(mol) + if self.kekulize: + Chem.Kekulize(mol) + return canonical_smiles, mol + + def get_label(self, mol, label_names=None): + """Extracts label information from a molecule. + This method extracts properties whose keys are + specified by ``label_names`` from a molecule ``mol`` + and returns these values as a list. + The order of the values is same as that of ``label_names``. + If the molecule does not have a + property with some label, this function fills the corresponding + index of the returned list with ``None``. + + Args: + mol (rdkit.Chem.Mol): molecule whose features to be extracted + label_names (None or iterable): list of label names. + + Returns: + list of str: label information. Its length is equal to + that of ``label_names``. If ``label_names`` is ``None``, + this function returns an empty list. + + """ + if label_names is None: + return [] + label_list = [] + for label_name in label_names: + if mol.HasProp(label_name): + label_list.append(mol.GetProp(label_name)) + else: + label_list.append(None) + return label_list + + def type_check_num_atoms(self, mol, num_max_atoms=-1): + """Check number of atoms in `mol` does not exceed `num_max_atoms` + If number of atoms in `mol` exceeds the number `num_max_atoms`, it will + raise `MolGraphError` exception. + + Args: + mol (Mol): + num_max_atoms (int): If negative value is set, not check number of + atoms. + + """ + num_atoms = mol.GetNumAtoms() + if num_max_atoms >= 0 and num_atoms > num_max_atoms: + raise MolGraphError( + f"Number of atoms in mol {num_atoms} exceeds num_max_atoms {num_max_atoms}" + ) + + def construct_atomic_number_array(self, mol, out_size=-1): + """Returns atomic numbers of atoms consisting a molecule. + + Args: + mol (rdkit.Chem.Mol): Input molecule. + out_size (int): The size of returned array. + If this option is negative, it does not take any effect. + Otherwise, it must be larger than the number of atoms + in the input molecules. In that case, the tail of + the array is padded with zeros. + + Returns: + numpy.ndarray: an array consisting of atomic numbers + of atoms in the molecule. + """ + atom_list = [a.GetAtomicNum() for a in mol.GetAtoms()] + n_atom = len(atom_list) + if out_size < 0: + return np.array(atom_list, dtype=np.int32) + elif out_size >= n_atom: + atom_array = np.zeros(out_size, dtype=np.int32) + atom_array[:n_atom] = np.array(atom_list, dtype=np.int32) + return atom_array + else: + raise ValueError( + f"`out_size` (={out_size}) must be negative or larger than or equal to " + f"the number of atoms in the input molecules (={n_atom})." + ) + + def construct_adj_matrix(self, mol, out_size=-1, self_connection=True): + """Returns the adjacent matrix of the given molecule. + + This function returns the adjacent matrix of the given molecule. + Contrary to the specification of + :func:`rdkit.Chem.rdmolops.GetAdjacencyMatrix`, + The diagonal entries of the returned matrix are all-one. + + Args: + mol (rdkit.Chem.Mol): Input molecule. + out_size (int): The size of the returned matrix. + If this option is negative, it does not take any effect. + Otherwise, it must be larger than the number of atoms + in the input molecules. In that case, the adjacent + matrix is expanded and zeros are padded to right + columns and bottom rows. + self_connection (bool): Add self connection or not. + If True, diagonal element of adjacency matrix is filled with 1. + + Returns: + adj_array (numpy.ndarray): The adjacent matrix of the input molecule. + It is 2-dimensional array with shape (atoms1, atoms2), where + atoms1 & atoms2 represent from and to of the edge respectively. + If ``out_size`` is non-negative, the returned + its size is equal to that value. Otherwise, + it is equal to the number of atoms in the the molecule. + """ + adj = rdmolops.GetAdjacencyMatrix(mol) + s0, s1 = tuple(adj.shape) + if s0 != s1: + raise ValueError( + f"The adjacent matrix of the input moleculehas an invalid shape: ({s0}, " + f"{s1}). It must be square." + ) + if self_connection: + adj = adj + np.eye(s0) + if out_size < 0: + adj_array = adj.astype(np.float32) + elif out_size >= s0: + adj_array = np.zeros((out_size, out_size), dtype=np.float32) + adj_array[:s0, :s1] = adj + else: + raise ValueError( + f"`out_size` (={out_size}) must be negative or larger than or equal to " + f"the number of atoms in the input molecules (={s0})." + ) + return adj_array + + def construct_discrete_edge_matrix(self, mol, out_size=-1): + """Returns the edge-type dependent adjacency matrix of the given molecule. + + Args: + mol (rdkit.Chem.Mol): Input molecule. + out_size (int): The size of the returned matrix. + If this option is negative, it does not take any effect. + Otherwise, it must be larger than the number of atoms + in the input molecules. In that case, the adjacent + matrix is expanded and zeros are padded to right + columns and bottom rows. + + Returns: + adj_array (numpy.ndarray): The adjacent matrix of the input molecule. + It is 3-dimensional array with shape (edge_type, atoms1, atoms2), + where edge_type represents the bond type, + atoms1 & atoms2 represent from and to of the edge respectively. + If ``out_size`` is non-negative, its size is equal to that value. + Otherwise, it is equal to the number of atoms in the the molecule. + """ + if mol is None: + raise MolGraphError("mol is None") + N = mol.GetNumAtoms() + if out_size < 0: + size = N + elif out_size >= N: + size = out_size + else: + raise ValueError( + f"out_size {out_size} is smaller than number of atoms in mol {N}" + ) + adjs = np.zeros((4, size, size), dtype=np.float32) + bond_type_to_channel = { + Chem.BondType.SINGLE: 0, + Chem.BondType.DOUBLE: 1, + Chem.BondType.TRIPLE: 2, + Chem.BondType.AROMATIC: 3, + } + for bond in mol.GetBonds(): + bond_type = bond.GetBondType() + ch = bond_type_to_channel[bond_type] + i = bond.GetBeginAtomIdx() + j = bond.GetEndAtomIdx() + adjs[ch, i, j] = 1.0 + adjs[ch, j, i] = 1.0 + return adjs + + +class MolGraphError(Exception): + pass + + +class MOlFLOWDataset(io.Dataset): + """Class for moflow qm9 and zinc250k Dataset of a tuple of datasets. + + It combines multiple datasets into one dataset. Each example is represented + by a tuple whose ``i``-th item corresponds to the i-th dataset. + And each ``i``-th dataset is expected to be an instance of numpy.ndarray. + + Args: + file_path (str): Data set path. + data_name (str): Data name, "qm9" or "zinc250k" + valid_idx (List[int, ...]): Data for validate + mode (str): "train" or "eval", output Data + input_keys (Tuple[str, ...]): Input keys, such as ("nodes","edges",). + label_keys (Tuple[str, ...]): labels (str or list or None) . + smiles_col (str): smiles column + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. + transform_fn: An optional function applied to an item bofre returning + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + file_path: str, + data_name: str, + valid_idx: List[int, ...], + mode: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + smiles_col: str, + weight_dict: Optional[Dict[str, float]] = None, + transform_fn: Optional[Callable] = None, + ): + super().__init__() + self.file_path = file_path + self.data_name = data_name + self.input_keys = input_keys + self.label_keys = label_keys + self.smiles_col = smiles_col + self.weight_dict = weight_dict + + if data_name == "qm9": + max_atoms = 9 + elif data_name == "zinc250k": + max_atoms = 38 + + self.molgraph = MolGraph(out_size=max_atoms, kekulize=True) + self.logger = logger + # read and deal data from file + inputs, labels = self.load_csv_file(file_path, data_name + ".csv") + train_idx = [t for t in range(len(inputs[0])) if t not in valid_idx] + self.train_idx = train_idx + # data train or test + if mode == "train": + inputs = [ + np.array(list(io.Subset(dataset=in_put, indices=train_idx))) + for in_put in inputs + ] + labels = np.array(list(io.Subset(dataset=labels, indices=train_idx))) + elif mode == "eval": + inputs = [ + np.array(list(io.Subset(dataset=in_put, indices=valid_idx))) + for in_put in inputs + ] + labels = np.array(list(io.Subset(dataset=labels, indices=valid_idx))) + + # fetch input data + self.input = {key: inputs[i] for i, key in enumerate(self.input_keys)} + # fetch label data + self.label = {"label": labels} + + self.logger.message( + f"Dataload finished. MODE {mode}, " + f"inputs {len(next(iter(self.input.values())))}, " + f"labelS {len(next(iter(self.label.values())))}" + ) + + self._length = len(next(iter(self.input.values()))) + self.transform = transform_fn + + def __getitem__(self, index: int): + input_item = {key: value[index] for key, value in self.input.items()} + label_item = {key: value[index] for key, value in self.label.items()} + + if self.transform: + input_item, label_item = self.transform_func(input_item, label_item) + + return (input_item, label_item, {}) + + def __len__(self): + return self._length + + def load_csv_file(self, path: str, name: str): + """Parse DataFrame using `MolGraph` and prepare a dataset instance + Labels are extracted from `labels` columns and input features are + extracted from smiles information in `smiles` column. + """ + file = os.path.join(path, name) + df = pd.read_csv(file, index_col=0) + all_nodes = [] + all_edges = [] + # inputs = [] + + total_count = df.shape[0] + fail_count = 0 + success_count = 0 + if isinstance(self.molgraph, MolGraph): + for smiles in tqdm(df[self.smiles_col], total=df.shape[0]): + try: + mol = Chem.MolFromSmiles(smiles) + if mol is None: + fail_count += 1 + continue + canonical_smiles, mol = self.molgraph.prepare_smiles_and_mol(mol) + nodes, edges = self.molgraph.get_input_features(mol) + + except MolGraphError as e: + fail_count += 1 + self.logger.warning(f"parse(), type: {type(e).__name__}, {e.args}") + continue + except Exception as e: + self.logger.warning(f"parse(), type: {type(e).__name__}, {e.args}") + fail_count += 1 + continue + # raw_data = misc.convert_to_dict(np.array([nodes, edges]), self.input_keys) + + all_nodes.append(nodes) + all_edges.append(edges) + # inputs.append(raw_data) + + success_count += 1 + + labels = np.array( + [*(df[label_col].values for label_col in self.label_keys)] + ).T + result = [np.array(all_nodes), np.array(all_edges)], labels + self.logger.message( + f"Preprocess finished. FAIL {fail_count}, " + f"SUCCESS {success_count}, TOTAL {total_count}" + ) + else: + raise NotImplementedError + + return result + + def transform_func(self, data_dict, label_dict): + items = [] + length = len(next(iter(data_dict.values()))) + for idx in range(length): + input_item = [value[idx] for key, value in data_dict.items()] + label_item = [value[idx] for key, value in label_dict.items()] + item = input_item + label_item + if self.transform: + item = self.transform(item) + items.append(item) + items = np.array(items, dtype=object).T + + data_dict = {key: np.stack(items[i], axis=0) for i, key in enumerate(data_dict)} + label_dict = {key: np.vstack(item[2]) for key in label_dict} + + return data_dict, label_dict diff --git a/ppsci/data/dataset/mrms_dataset.py b/ppsci/data/dataset/mrms_dataset.py index bee3337f7e..d235d06397 100644 --- a/ppsci/data/dataset/mrms_dataset.py +++ b/ppsci/data/dataset/mrms_dataset.py @@ -1,251 +1,251 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import glob -import os.path as osp -from datetime import datetime -from datetime import timedelta -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple - -try: - import h5py -except ModuleNotFoundError: - pass -import numpy as np -import paddle -from paddle import io -from paddle import vision - - -class MRMSDataset(io.Dataset): - """Class for MRMS dataset. MRMS day's data is stored in a .h5 file. Each file includes keys "date"/"time_interval"/"dataset". - - Args: - file_path (str): Dataset path. - input_keys (Tuple[str, ...]): Input keys, usually there is only one, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, usually there is only one, such as ("output",). - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - date_period (Tuple[str,...], optional): Dates of data. Scale is [start_date, end_date] with format "%Y%m%d". Defaults to ("20230101","20230101"). - num_input_timestamps (int, optional): Number of timestamp of input. Defaults to 1. - num_label_timestamps (int, optional): Number of timestamp of label. Defaults to 1. - stride (int, optional): Stride of sampling data. Defaults to 1. - transforms (Optional[vision.Compose]): Composed transform functor(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.MRMSDataset( - ... "file_path": "/path/to/MRMSDataset", - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... "date_period": ("20230101","20230131"), - ... "num_input_timestamps": 9, - ... "num_label_timestamps": 20, - ... "transforms": transform, - ... "stride": 1, - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - weight_dict: Optional[Dict[str, float]] = None, - date_period: Tuple[str, ...] = ("20230101", "20230101"), - num_input_timestamps: int = 1, - num_label_timestamps: int = 1, - stride: int = 1, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.date_list = self._get_date_strs(date_period) - self.num_input_timestamps = num_input_timestamps - self.num_label_timestamps = num_label_timestamps - self.stride = stride - self.transforms = transforms - - self.files = self._read_data(file_path) - self.num_samples_per_day = self.files[0].shape[0] - self.num_samples = self.num_samples_per_day * len(self.date_list) - - def _get_date_strs(self, date_period: Tuple[str, ...]) -> List: - """Get a string list of all dates within given period. - - Args: - date_period (Tuple[str,...]): Dates of data. Scale is [start_date, end_date] with format "%Y%m%d". - """ - start_time = datetime.strptime(date_period[0], "%Y%m%d") - end_time = datetime.strptime(date_period[1], "%Y%m%d") - results = [] - current_time = start_time - while current_time <= end_time: - date_str = current_time.strftime("%Y%m%d") - results.append(date_str) - current_time += timedelta(days=1) - return results - - def _read_data(self, path: str): - if path.endswith(".h5"): - paths = [path] - else: - paths = [ - _path - for _path in glob.glob(osp.join(path, "*.h5")) - if _path.split(".h5")[0].split("_")[-1] in self.date_list - ] - assert len(paths) == len( - self.date_list - ), f"Data of {len(self.date_list)} days wanted but only {len(paths)} days be found" - paths.sort() - - files = [h5py.File(_path, "r")["dataset"] for _path in paths] - return files - - def __len__(self): - return ( - self.num_samples // self.stride - - self.num_input_timestamps - - self.num_label_timestamps - + 1 - ) - - def __getitem__(self, global_idx): - global_idx *= self.stride - _samples = np.empty( - ( - self.num_input_timestamps + self.num_label_timestamps, - *self.files[0].shape[1:], - ), - dtype=paddle.get_default_dtype(), - ) - for idx in range(self.num_input_timestamps + self.num_label_timestamps): - sample_idx = global_idx + idx * self.stride - day_idx = sample_idx // self.num_samples_per_day - local_idx = sample_idx % self.num_samples_per_day - _samples[idx] = self.files[day_idx][local_idx] - - input_item = {self.input_keys[0]: _samples[: self.num_input_timestamps]} - label_item = {self.label_keys[0]: _samples[self.num_input_timestamps :]} - - weight_shape = [1] * len(next(iter(label_item.values())).shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return input_item, label_item, weight_item - - -class MRMSSampledDataset(io.Dataset): - """Class for MRMS sampled dataset. MRMS one sample's data is stored in a .h5 file. Each file includes keys "date"/"time_interval"/"dataset". - The class just return data by input_item and values of label_item are empty for all label_keys. - - Args: - file_path (str): Dataset path. - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - num_total_timestamps (int, optional): Number of timestamp of input+label. Defaults to 1. - transforms (Optional[vision.Compose]): Composed transform functor(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.MRMSSampledDataset( - ... "file_path": "/path/to/MRMSSampledDataset", - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... "num_total_timestamps": 29, - ... ) # doctest: +SKIP - >>> # get the length of the dataset - >>> dataset_size = len(dataset) # doctest: +SKIP - >>> # get the first sample of the data - >>> first_sample = dataset[0] # doctest: +SKIP - >>> print("First sample:", first_sample) # doctest: +SKIP - """ - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - weight_dict: Optional[Dict[str, float]] = None, - num_total_timestamps: int = 1, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.num_total_timestamps = num_total_timestamps - self.transforms = transforms - - self.files = self._read_data(file_path) - self.num_samples = len(self.files) - - def _read_data(self, path: str): - paths = glob.glob(osp.join(path, "*.h5")) - paths.sort() - files = [h5py.File(_path, "r")["dataset"] for _path in paths] - return files - - def __len__(self): - return self.num_samples - self.num_total_timestamps + 1 - - def __getitem__(self, global_idx): - _samples = [] - for idx in range(global_idx, global_idx + self.num_total_timestamps): - _samples.append(np.expand_dims(self.files[idx], axis=0)) - - input_item = { - self.input_keys[0]: np.concatenate(_samples, axis=0).astype( - paddle.get_default_dtype() - ) - } - label_item = {} - weight_item = {} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return input_item, label_item, weight_item +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import glob +import os.path as osp +from datetime import datetime +from datetime import timedelta +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple + +try: + import h5py +except ModuleNotFoundError: + pass +import numpy as np +import paddle +from paddle import io +from paddle import vision + + +class MRMSDataset(io.Dataset): + """Class for MRMS dataset. MRMS day's data is stored in a .h5 file. Each file includes keys "date"/"time_interval"/"dataset". + + Args: + file_path (str): Dataset path. + input_keys (Tuple[str, ...]): Input keys, usually there is only one, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, usually there is only one, such as ("output",). + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + date_period (Tuple[str,...], optional): Dates of data. Scale is [start_date, end_date] with format "%Y%m%d". Defaults to ("20230101","20230101"). + num_input_timestamps (int, optional): Number of timestamp of input. Defaults to 1. + num_label_timestamps (int, optional): Number of timestamp of label. Defaults to 1. + stride (int, optional): Stride of sampling data. Defaults to 1. + transforms (Optional[vision.Compose]): Composed transform functor(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.MRMSDataset( + ... "file_path": "/path/to/MRMSDataset", + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "date_period": ("20230101","20230131"), + ... "num_input_timestamps": 9, + ... "num_label_timestamps": 20, + ... "transforms": transform, + ... "stride": 1, + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + weight_dict: Optional[Dict[str, float]] = None, + date_period: Tuple[str, ...] = ("20230101", "20230101"), + num_input_timestamps: int = 1, + num_label_timestamps: int = 1, + stride: int = 1, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.date_list = self._get_date_strs(date_period) + self.num_input_timestamps = num_input_timestamps + self.num_label_timestamps = num_label_timestamps + self.stride = stride + self.transforms = transforms + + self.files = self._read_data(file_path) + self.num_samples_per_day = self.files[0].shape[0] + self.num_samples = self.num_samples_per_day * len(self.date_list) + + def _get_date_strs(self, date_period: Tuple[str, ...]) -> List: + """Get a string list of all dates within given period. + + Args: + date_period (Tuple[str,...]): Dates of data. Scale is [start_date, end_date] with format "%Y%m%d". + """ + start_time = datetime.strptime(date_period[0], "%Y%m%d") + end_time = datetime.strptime(date_period[1], "%Y%m%d") + results = [] + current_time = start_time + while current_time <= end_time: + date_str = current_time.strftime("%Y%m%d") + results.append(date_str) + current_time += timedelta(days=1) + return results + + def _read_data(self, path: str): + if path.endswith(".h5"): + paths = [path] + else: + paths = [ + _path + for _path in glob.glob(osp.join(path, "*.h5")) + if _path.split(".h5")[0].split("_")[-1] in self.date_list + ] + assert len(paths) == len( + self.date_list + ), f"Data of {len(self.date_list)} days wanted but only {len(paths)} days be found" + paths.sort() + + files = [h5py.File(_path, "r")["dataset"] for _path in paths] + return files + + def __len__(self): + return ( + self.num_samples // self.stride + - self.num_input_timestamps + - self.num_label_timestamps + + 1 + ) + + def __getitem__(self, global_idx): + global_idx *= self.stride + _samples = np.empty( + ( + self.num_input_timestamps + self.num_label_timestamps, + *self.files[0].shape[1:], + ), + dtype=paddle.get_default_dtype(), + ) + for idx in range(self.num_input_timestamps + self.num_label_timestamps): + sample_idx = global_idx + idx * self.stride + day_idx = sample_idx // self.num_samples_per_day + local_idx = sample_idx % self.num_samples_per_day + _samples[idx] = self.files[day_idx][local_idx] + + input_item = {self.input_keys[0]: _samples[: self.num_input_timestamps]} + label_item = {self.label_keys[0]: _samples[self.num_input_timestamps :]} + + weight_shape = [1] * len(next(iter(label_item.values())).shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return input_item, label_item, weight_item + + +class MRMSSampledDataset(io.Dataset): + """Class for MRMS sampled dataset. MRMS one sample's data is stored in a .h5 file. Each file includes keys "date"/"time_interval"/"dataset". + The class just return data by input_item and values of label_item are empty for all label_keys. + + Args: + file_path (str): Dataset path. + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + num_total_timestamps (int, optional): Number of timestamp of input+label. Defaults to 1. + transforms (Optional[vision.Compose]): Composed transform functor(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.MRMSSampledDataset( + ... "file_path": "/path/to/MRMSSampledDataset", + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "num_total_timestamps": 29, + ... ) # doctest: +SKIP + >>> # get the length of the dataset + >>> dataset_size = len(dataset) # doctest: +SKIP + >>> # get the first sample of the data + >>> first_sample = dataset[0] # doctest: +SKIP + >>> print("First sample:", first_sample) # doctest: +SKIP + """ + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + weight_dict: Optional[Dict[str, float]] = None, + num_total_timestamps: int = 1, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.num_total_timestamps = num_total_timestamps + self.transforms = transforms + + self.files = self._read_data(file_path) + self.num_samples = len(self.files) + + def _read_data(self, path: str): + paths = glob.glob(osp.join(path, "*.h5")) + paths.sort() + files = [h5py.File(_path, "r")["dataset"] for _path in paths] + return files + + def __len__(self): + return self.num_samples - self.num_total_timestamps + 1 + + def __getitem__(self, global_idx): + _samples = [] + for idx in range(global_idx, global_idx + self.num_total_timestamps): + _samples.append(np.expand_dims(self.files[idx], axis=0)) + + input_item = { + self.input_keys[0]: np.concatenate(_samples, axis=0).astype( + paddle.get_default_dtype() + ) + } + label_item = {} + weight_item = {} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/npz_dataset.py b/ppsci/data/dataset/npz_dataset.py index 76d737d021..6df0f58e4e 100644 --- a/ppsci/data/dataset/npz_dataset.py +++ b/ppsci/data/dataset/npz_dataset.py @@ -1,279 +1,279 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -from paddle import io -from paddle import vision - -from ppsci.utils import misc -from ppsci.utils import reader - - -class NPZDataset(io.Dataset): - """Dataset class for .npz file. - - Args: - file_path (str): Npz file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - i.e. {inner_key: outer_key}. Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.NPZDataset( - ... "/path/to/file.npz" - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...] = (), - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_npz_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = {} - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - weight_item = {key: value[idx] for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - return self._len - - -class IterableNPZDataset(io.IterableDataset): - """IterableNPZDataset for full-data loading. - - Args: - file_path (str): Npz file path. - input_keys (Tuple[str, ...]): List of input keys. - label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). - alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. - i.e. {inner_key: outer_key}. Defaults to None. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of - each constraint variable. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data - in the time dimension. Defaults to None. - transforms (Optional[vision.Compose]): Compose object contains sample wise - transform(s). Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.IterableNPZDataset( - ... "/path/to/file.npz" - ... ("x",), - ... ("u",), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...] = (), - alias_dict: Optional[Dict[str, str]] = None, - weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, - timestamps: Optional[Tuple[float, ...]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - - # read raw data from file - raw_data = reader.load_npz_file( - file_path, - input_keys + label_keys, - alias_dict, - ) - # filter raw data by given timestamps if specified - if timestamps is not None: - if "t" in raw_data: - # filter data according to given timestamps - raw_time_array = raw_data["t"] - mask = [] - for ti in timestamps: - mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - mask = np.concatenate(mask, 0) - raw_data = raw_data[mask] - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - else: - # repeat data according to given timestamps - raw_data = misc.convert_to_array( - raw_data, self.input_keys + self.label_keys - ) - raw_data = misc.combine_array_with_time(raw_data, timestamps) - self.input_keys = ("t",) + tuple(self.input_keys) - raw_data = misc.convert_to_dict( - raw_data, self.input_keys + self.label_keys - ) - - # fetch input data - self.input = { - key: value for key, value in raw_data.items() if key in self.input_keys - } - # fetch label data - self.label = { - key: value for key, value in raw_data.items() if key in self.label_keys - } - - # prepare weights - self.weight = {} - if weight_dict is not None: - for key, value in weight_dict.items(): - if isinstance(value, (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), value - ) - elif callable(value): - func = value - self.weight[key] = func(self.input) - if isinstance(self.weight[key], (int, float)): - self.weight[key] = np.full_like( - next(iter(self.label.values())), self.weight[key] - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} - self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} - self.weight = { - key: paddle.to_tensor(value) for key, value in self.weight.items() - } - - self.transforms = transforms - self._len = len(next(iter(self.input.values()))) - - @property - def num_samples(self): - """Number of samples within current dataset.""" - return self._len - - def __iter__(self): - if callable(self.transforms): - input_, label_, weight_ = self.transforms( - self.input, self.label, self.weight - ) - yield input_, label_, weight_ - else: - yield self.input, self.label, self.weight - - def __len__(self): - return 1 +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +from paddle import io +from paddle import vision + +from ppsci.utils import misc +from ppsci.utils import reader + + +class NPZDataset(io.Dataset): + """Dataset class for .npz file. + + Args: + file_path (str): Npz file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + i.e. {inner_key: outer_key}. Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.NPZDataset( + ... "/path/to/file.npz" + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...] = (), + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_npz_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = {} + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + weight_item = {key: value[idx] for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + return self._len + + +class IterableNPZDataset(io.IterableDataset): + """IterableNPZDataset for full-data loading. + + Args: + file_path (str): Npz file path. + input_keys (Tuple[str, ...]): List of input keys. + label_keys (Tuple[str, ...], optional): List of label keys. Defaults to (). + alias_dict (Optional[Dict[str, str]]): Dict of alias(es) for input and label keys. + i.e. {inner_key: outer_key}. Defaults to None. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of + each constraint variable. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): The number of repetitions of the data + in the time dimension. Defaults to None. + transforms (Optional[vision.Compose]): Compose object contains sample wise + transform(s). Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.IterableNPZDataset( + ... "/path/to/file.npz" + ... ("x",), + ... ("u",), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...] = (), + alias_dict: Optional[Dict[str, str]] = None, + weight_dict: Optional[Dict[str, Union[Callable, float]]] = None, + timestamps: Optional[Tuple[float, ...]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + + # read raw data from file + raw_data = reader.load_npz_file( + file_path, + input_keys + label_keys, + alias_dict, + ) + # filter raw data by given timestamps if specified + if timestamps is not None: + if "t" in raw_data: + # filter data according to given timestamps + raw_time_array = raw_data["t"] + mask = [] + for ti in timestamps: + mask.append(np.nonzero(np.isclose(raw_time_array, ti).flatten())[0]) + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + mask = np.concatenate(mask, 0) + raw_data = raw_data[mask] + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + else: + # repeat data according to given timestamps + raw_data = misc.convert_to_array( + raw_data, self.input_keys + self.label_keys + ) + raw_data = misc.combine_array_with_time(raw_data, timestamps) + self.input_keys = ("t",) + tuple(self.input_keys) + raw_data = misc.convert_to_dict( + raw_data, self.input_keys + self.label_keys + ) + + # fetch input data + self.input = { + key: value for key, value in raw_data.items() if key in self.input_keys + } + # fetch label data + self.label = { + key: value for key, value in raw_data.items() if key in self.label_keys + } + + # prepare weights + self.weight = {} + if weight_dict is not None: + for key, value in weight_dict.items(): + if isinstance(value, (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), value + ) + elif callable(value): + func = value + self.weight[key] = func(self.input) + if isinstance(self.weight[key], (int, float)): + self.weight[key] = np.full_like( + next(iter(self.label.values())), self.weight[key] + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + self.input = {key: paddle.to_tensor(value) for key, value in self.input.items()} + self.label = {key: paddle.to_tensor(value) for key, value in self.label.items()} + self.weight = { + key: paddle.to_tensor(value) for key, value in self.weight.items() + } + + self.transforms = transforms + self._len = len(next(iter(self.input.values()))) + + @property + def num_samples(self): + """Number of samples within current dataset.""" + return self._len + + def __iter__(self): + if callable(self.transforms): + input_, label_, weight_ = self.transforms( + self.input, self.label, self.weight + ) + yield input_, label_, weight_ + else: + yield self.input, self.label, self.weight + + def __len__(self): + return 1 diff --git a/ppsci/data/dataset/pems_dataset.py b/ppsci/data/dataset/pems_dataset.py index 1e3dde0a3d..d13a2ae325 100644 --- a/ppsci/data/dataset/pems_dataset.py +++ b/ppsci/data/dataset/pems_dataset.py @@ -1,151 +1,151 @@ -import os -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -import pandas as pd -from paddle.io import Dataset -from paddle.vision.transforms import Compose - - -class StandardScaler: - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def transform(self, data): - return (data - self.mean) / self.std - - def inverse_transform(self, data): - return (data * self.std) + self.mean - - -def add_window_horizon(data, in_step=12, out_step=12): - length = len(data) - end_index = length - out_step - in_step - X = [] - Y = [] - for i in range(end_index + 1): - X.append(data[i : i + in_step]) - Y.append(data[i + in_step : i + in_step + out_step]) - return X, Y - - -def get_edge_index(file_path, bi=True, reduce="mean"): - TYPE_DICT = {0: np.int64, 1: np.int64, 2: np.float32} - df = pd.read_csv( - os.path.join(file_path, "dist.csv"), - skiprows=1, - header=None, - sep=",", - dtype=TYPE_DICT, - ) - - edge_index = df.loc[:, [0, 1]].values.T - edge_attr = df.loc[:, 2].values - - if bi: - re_edge_index = np.concatenate((edge_index[1:, :], edge_index[:1, :]), axis=0) - edge_index = np.concatenate((edge_index, re_edge_index), axis=-1) - edge_attr = np.concatenate((edge_attr, edge_attr), axis=0) - - num = np.max(edge_index) + 1 - adj = np.zeros((num, num), dtype=np.float32) - - if reduce == "sum": - adj[edge_index[0], edge_index[1]] = 1.0 - elif reduce == "mean": - adj[edge_index[0], edge_index[1]] = 1.0 - adj = adj / adj.sum(axis=-1) - else: - raise ValueError - - return edge_index, edge_attr, adj - - -class PEMSDataset(Dataset): - """Dataset class for PEMSD4 and PEMSD8 dataset. - - Args: - file_path (str): Dataset root path. - split (str): Dataset split label. - input_keys (Tuple[str, ...]): A tuple of input keys. - label_keys (Tuple[str, ...]): A tuple of label keys. - weight_dict (Optional[Dict[str, float]]): Define the weight of each constraint variable. Defaults to None. - transforms (Optional[Compose]): Compose object contains sample wise transform(s). Defaults to None. - norm_input (bool): Whether to normalize the input. Defaults to True. - norm_label (bool): Whether to normalize the output. Defaults to False. - input_len (int): The input timesteps. Defaults to 12. - label_len (int): The output timesteps. Defaults to 12. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.PEMSDataset( - ... "./Data/PEMSD4", - ... "train", - ... ("input",), - ... ("label",), - ... ) # doctest: +SKIP - """ - - def __init__( - self, - file_path: str, - split: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - weight_dict: Optional[Dict[str, float]] = None, - transforms: Optional[Compose] = None, - norm_input: bool = True, - norm_label: bool = False, - input_len: int = 12, - label_len: int = 12, - ): - super().__init__() - - self.input_keys = input_keys - self.label_keys = label_keys - self.weight_dict = weight_dict - - self.transforms = transforms - self.norm_input = norm_input - self.norm_label = norm_label - - data = np.load(os.path.join(file_path, f"{split}.npy")).astype(np.float32) - - self.mean = np.load(os.path.join(file_path, "mean.npy")).astype(np.float32) - self.std = np.load(os.path.join(file_path, "std.npy")).astype(np.float32) - self.scaler = StandardScaler(self.mean, self.std) - - X, Y = add_window_horizon(data, input_len, label_len) - if norm_input: - X = self.scaler.transform(X) - if norm_label: - Y = self.scaler.transform(Y) - - self._len = X.shape[0] - - self.input = {input_keys[0]: X} - self.label = {label_keys[0]: Y} - - if weight_dict is not None: - self.weight_dict = {key: np.array(1.0) for key in self.label_keys} - self.weight_dict.update(weight_dict) - else: - self.weight = {} - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - weight_item = {key: value[idx] for key, value in self.weight.items()} - - if self.transforms is not None: - input_item, label_item, weight_item = self.transforms( - input_item, label_item, weight_item - ) - - return (input_item, label_item, weight_item) - - def __len__(self): - return self._len +import os +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +import pandas as pd +from paddle.io import Dataset +from paddle.vision.transforms import Compose + + +class StandardScaler: + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def transform(self, data): + return (data - self.mean) / self.std + + def inverse_transform(self, data): + return (data * self.std) + self.mean + + +def add_window_horizon(data, in_step=12, out_step=12): + length = len(data) + end_index = length - out_step - in_step + X = [] + Y = [] + for i in range(end_index + 1): + X.append(data[i : i + in_step]) + Y.append(data[i + in_step : i + in_step + out_step]) + return X, Y + + +def get_edge_index(file_path, bi=True, reduce="mean"): + TYPE_DICT = {0: np.int64, 1: np.int64, 2: np.float32} + df = pd.read_csv( + os.path.join(file_path, "dist.csv"), + skiprows=1, + header=None, + sep=",", + dtype=TYPE_DICT, + ) + + edge_index = df.loc[:, [0, 1]].values.T + edge_attr = df.loc[:, 2].values + + if bi: + re_edge_index = np.concatenate((edge_index[1:, :], edge_index[:1, :]), axis=0) + edge_index = np.concatenate((edge_index, re_edge_index), axis=-1) + edge_attr = np.concatenate((edge_attr, edge_attr), axis=0) + + num = np.max(edge_index) + 1 + adj = np.zeros((num, num), dtype=np.float32) + + if reduce == "sum": + adj[edge_index[0], edge_index[1]] = 1.0 + elif reduce == "mean": + adj[edge_index[0], edge_index[1]] = 1.0 + adj = adj / adj.sum(axis=-1) + else: + raise ValueError + + return edge_index, edge_attr, adj + + +class PEMSDataset(Dataset): + """Dataset class for PEMSD4 and PEMSD8 dataset. + + Args: + file_path (str): Dataset root path. + split (str): Dataset split label. + input_keys (Tuple[str, ...]): A tuple of input keys. + label_keys (Tuple[str, ...]): A tuple of label keys. + weight_dict (Optional[Dict[str, float]]): Define the weight of each constraint variable. Defaults to None. + transforms (Optional[Compose]): Compose object contains sample wise transform(s). Defaults to None. + norm_input (bool): Whether to normalize the input. Defaults to True. + norm_label (bool): Whether to normalize the output. Defaults to False. + input_len (int): The input timesteps. Defaults to 12. + label_len (int): The output timesteps. Defaults to 12. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.PEMSDataset( + ... "./Data/PEMSD4", + ... "train", + ... ("input",), + ... ("label",), + ... ) # doctest: +SKIP + """ + + def __init__( + self, + file_path: str, + split: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + weight_dict: Optional[Dict[str, float]] = None, + transforms: Optional[Compose] = None, + norm_input: bool = True, + norm_label: bool = False, + input_len: int = 12, + label_len: int = 12, + ): + super().__init__() + + self.input_keys = input_keys + self.label_keys = label_keys + self.weight_dict = weight_dict + + self.transforms = transforms + self.norm_input = norm_input + self.norm_label = norm_label + + data = np.load(os.path.join(file_path, f"{split}.npy")).astype(np.float32) + + self.mean = np.load(os.path.join(file_path, "mean.npy")).astype(np.float32) + self.std = np.load(os.path.join(file_path, "std.npy")).astype(np.float32) + self.scaler = StandardScaler(self.mean, self.std) + + X, Y = add_window_horizon(data, input_len, label_len) + if norm_input: + X = self.scaler.transform(X) + if norm_label: + Y = self.scaler.transform(Y) + + self._len = X.shape[0] + + self.input = {input_keys[0]: X} + self.label = {label_keys[0]: Y} + + if weight_dict is not None: + self.weight_dict = {key: np.array(1.0) for key in self.label_keys} + self.weight_dict.update(weight_dict) + else: + self.weight = {} + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + weight_item = {key: value[idx] for key, value in self.weight.items()} + + if self.transforms is not None: + input_item, label_item, weight_item = self.transforms( + input_item, label_item, weight_item + ) + + return (input_item, label_item, weight_item) + + def __len__(self): + return self._len diff --git a/ppsci/data/dataset/radar_dataset.py b/ppsci/data/dataset/radar_dataset.py index e484558455..776ba8890a 100644 --- a/ppsci/data/dataset/radar_dataset.py +++ b/ppsci/data/dataset/radar_dataset.py @@ -1,146 +1,146 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -from typing import Dict -from typing import Optional -from typing import Tuple - -try: - import cv2 -except ModuleNotFoundError: - pass - -import importlib - -import numpy as np -import paddle -from paddle import io - - -class RadarDataset(io.Dataset): - """Class for Radar dataset. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - image_width (int): Image width. - image_height (int): Image height. - total_length (int): Total length. - dataset_path (str): Dataset path. - data_type (str): Input and output data type. Defaults to paddle.get_default_dtype(). - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.RadarDataset( - ... "input_keys": ("input",), - ... "label_keys": ("output",), - ... "image_width": 512, - ... "image_height": 512, - ... "total_length": 29, - ... "dataset_path": "datasets/mrms/figure", - ... "data_type": paddle.get_default_dtype(), - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - image_width: int, - image_height: int, - total_length: int, - dataset_path: str, - data_type: str = paddle.get_default_dtype(), - weight_dict: Optional[Dict[str, float]] = None, - ): - super().__init__() - if importlib.util.find_spec("cv2") is None: - raise ModuleNotFoundError( - "To use RadarDataset, please install 'opencv-python' with: `pip install " - "opencv-python` first." - ) - self.input_keys = input_keys - self.label_keys = label_keys - self.img_width = image_width - self.img_height = image_height - self.length = total_length - self.dataset_path = dataset_path - self.data_type = data_type - - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.case_list = [] - name_list = os.listdir(self.dataset_path) - name_list.sort() - for name in name_list: - case = [] - for i in range(29): - case.append( - self.dataset_path - + "/" - + name - + "/" - + name - + "-" - + str(i).zfill(2) - + ".png" - ) - self.case_list.append(case) - - def _load(self, index): - data = [] - for img_path in self.case_list[index]: - img = cv2.imread(img_path, 2) - data.append(np.expand_dims(img, axis=0)) - data = np.concatenate(data, axis=0).astype(self.data_type) / 10.0 - 3.0 - assert data.shape[1] <= 1024 and data.shape[2] <= 1024 - return data - - def __getitem__(self, index): - data = self._load(index)[-self.length :].copy() - mask = np.ones_like(data) - mask[data < 0] = 0 - data[data < 0] = 0 - data = np.clip(data, 0, 128) - vid = np.zeros( - (self.length, self.img_height, self.img_width, 2), dtype=self.data_type - ) - vid[..., 0] = data - vid[..., 1] = mask - - input_item = {self.input_keys[0]: vid} - label_item = {} - weight_item = {} - for key in self.label_keys: - label_item[key] = np.asarray([], paddle.get_default_dtype()) - if len(label_item) > 0: - weight_shape = [1] * len(next(iter(label_item.values())).shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - return input_item, label_item, weight_item - - def __len__(self): - return len(self.case_list) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import Dict +from typing import Optional +from typing import Tuple + +try: + import cv2 +except ModuleNotFoundError: + pass + +import importlib + +import numpy as np +import paddle +from paddle import io + + +class RadarDataset(io.Dataset): + """Class for Radar dataset. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + image_width (int): Image width. + image_height (int): Image height. + total_length (int): Total length. + dataset_path (str): Dataset path. + data_type (str): Input and output data type. Defaults to paddle.get_default_dtype(). + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.RadarDataset( + ... "input_keys": ("input",), + ... "label_keys": ("output",), + ... "image_width": 512, + ... "image_height": 512, + ... "total_length": 29, + ... "dataset_path": "datasets/mrms/figure", + ... "data_type": paddle.get_default_dtype(), + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + image_width: int, + image_height: int, + total_length: int, + dataset_path: str, + data_type: str = paddle.get_default_dtype(), + weight_dict: Optional[Dict[str, float]] = None, + ): + super().__init__() + if importlib.util.find_spec("cv2") is None: + raise ModuleNotFoundError( + "To use RadarDataset, please install 'opencv-python' with: `pip install " + "opencv-python` first." + ) + self.input_keys = input_keys + self.label_keys = label_keys + self.img_width = image_width + self.img_height = image_height + self.length = total_length + self.dataset_path = dataset_path + self.data_type = data_type + + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.case_list = [] + name_list = os.listdir(self.dataset_path) + name_list.sort() + for name in name_list: + case = [] + for i in range(29): + case.append( + self.dataset_path + + "/" + + name + + "/" + + name + + "-" + + str(i).zfill(2) + + ".png" + ) + self.case_list.append(case) + + def _load(self, index): + data = [] + for img_path in self.case_list[index]: + img = cv2.imread(img_path, 2) + data.append(np.expand_dims(img, axis=0)) + data = np.concatenate(data, axis=0).astype(self.data_type) / 10.0 - 3.0 + assert data.shape[1] <= 1024 and data.shape[2] <= 1024 + return data + + def __getitem__(self, index): + data = self._load(index)[-self.length :].copy() + mask = np.ones_like(data) + mask[data < 0] = 0 + data[data < 0] = 0 + data = np.clip(data, 0, 128) + vid = np.zeros( + (self.length, self.img_height, self.img_width, 2), dtype=self.data_type + ) + vid[..., 0] = data + vid[..., 1] = mask + + input_item = {self.input_keys[0]: vid} + label_item = {} + weight_item = {} + for key in self.label_keys: + label_item[key] = np.asarray([], paddle.get_default_dtype()) + if len(label_item) > 0: + weight_shape = [1] * len(next(iter(label_item.values())).shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + return input_item, label_item, weight_item + + def __len__(self): + return len(self.case_list) diff --git a/ppsci/data/dataset/sevir_dataset.py b/ppsci/data/dataset/sevir_dataset.py index 42ae274c2b..9ef6d89f99 100644 --- a/ppsci/data/dataset/sevir_dataset.py +++ b/ppsci/data/dataset/sevir_dataset.py @@ -1,814 +1,814 @@ -import datetime -import os -from copy import deepcopy -from typing import Dict -from typing import Optional -from typing import Sequence -from typing import Tuple -from typing import Union - -try: - import h5py -except ModuleNotFoundError: - pass -import numpy as np -import paddle -import paddle.nn.functional as F -import pandas as pd -from paddle import io - -# SEVIR Dataset constants -SEVIR_DATA_TYPES = ["vis", "ir069", "ir107", "vil", "lght"] -SEVIR_RAW_DTYPES = { - "vis": np.int16, - "ir069": np.int16, - "ir107": np.int16, - "vil": np.uint8, - "lght": np.int16, -} -LIGHTING_FRAME_TIMES = np.arange(-120.0, 125.0, 5) * 60 -SEVIR_DATA_SHAPE = { - "lght": (48, 48), -} -PREPROCESS_SCALE_SEVIR = { - "vis": 1, # Not utilized in original paper - "ir069": 1 / 1174.68, - "ir107": 1 / 2562.43, - "vil": 1 / 47.54, - "lght": 1 / 0.60517, -} -PREPROCESS_OFFSET_SEVIR = { - "vis": 0, # Not utilized in original paper - "ir069": 3683.58, - "ir107": 1552.80, - "vil": -33.44, - "lght": -0.02990, -} -PREPROCESS_SCALE_01 = { - "vis": 1, - "ir069": 1, - "ir107": 1, - "vil": 1 / 255, # currently the only one implemented - "lght": 1, -} -PREPROCESS_OFFSET_01 = { - "vis": 0, - "ir069": 0, - "ir107": 0, - "vil": 0, # currently the only one implemented - "lght": 0, -} - - -def change_layout_np(data, in_layout="NHWT", out_layout="NHWT", ret_contiguous=False): - # first convert to 'NHWT' - if in_layout == "NHWT": - pass - elif in_layout == "NTHW": - data = np.transpose(data, axes=(0, 2, 3, 1)) - elif in_layout == "NWHT": - data = np.transpose(data, axes=(0, 2, 1, 3)) - elif in_layout == "NTCHW": - data = data[:, :, 0, :, :] - data = np.transpose(data, axes=(0, 2, 3, 1)) - elif in_layout == "NTHWC": - data = data[:, :, :, :, 0] - data = np.transpose(data, axes=(0, 2, 3, 1)) - elif in_layout == "NTWHC": - data = data[:, :, :, :, 0] - data = np.transpose(data, axes=(0, 3, 2, 1)) - elif in_layout == "TNHW": - data = np.transpose(data, axes=(1, 2, 3, 0)) - elif in_layout == "TNCHW": - data = data[:, :, 0, :, :] - data = np.transpose(data, axes=(1, 2, 3, 0)) - else: - raise NotImplementedError(f"{in_layout} is invalid.") - - if out_layout == "NHWT": - pass - elif out_layout == "NTHW": - data = np.transpose(data, axes=(0, 3, 1, 2)) - elif out_layout == "NWHT": - data = np.transpose(data, axes=(0, 2, 1, 3)) - elif out_layout == "NTCHW": - data = np.transpose(data, axes=(0, 3, 1, 2)) - data = np.expand_dims(data, axis=2) - elif out_layout == "NTHWC": - data = np.transpose(data, axes=(0, 3, 1, 2)) - data = np.expand_dims(data, axis=-1) - elif out_layout == "NTWHC": - data = np.transpose(data, axes=(0, 3, 2, 1)) - data = np.expand_dims(data, axis=-1) - elif out_layout == "TNHW": - data = np.transpose(data, axes=(3, 0, 1, 2)) - elif out_layout == "TNCHW": - data = np.transpose(data, axes=(3, 0, 1, 2)) - data = np.expand_dims(data, axis=2) - else: - raise NotImplementedError(f"{out_layout} is invalid.") - if ret_contiguous: - data = data.ascontiguousarray() - return data - - -def change_layout_paddle( - data, in_layout="NHWT", out_layout="NHWT", ret_contiguous=False -): - # first convert to 'NHWT' - if in_layout == "NHWT": - pass - elif in_layout == "NTHW": - data = data.transpose(perm=[0, 2, 3, 1]) - elif in_layout == "NTCHW": - data = data[:, :, 0, :, :] - data = data.transpose(perm=[0, 2, 3, 1]) - elif in_layout == "NTHWC": - data = data[:, :, :, :, 0] - data = data.transpose(perm=[0, 2, 3, 1]) - elif in_layout == "TNHW": - data = data.transpose(perm=[1, 2, 3, 0]) - elif in_layout == "TNCHW": - data = data[:, :, 0, :, :] - data = data.transpose(perm=[1, 2, 3, 0]) - else: - raise NotImplementedError(f"{in_layout} is invalid.") - - if out_layout == "NHWT": - pass - elif out_layout == "NTHW": - data = data.transpose(perm=[0, 3, 1, 2]) - elif out_layout == "NTCHW": - data = data.transpose(perm=[0, 3, 1, 2]) - data = paddle.unsqueeze(data, axis=2) - elif out_layout == "NTHWC": - data = data.transpose(perm=[0, 3, 1, 2]) - data = paddle.unsqueeze(data, axis=-1) - elif out_layout == "TNHW": - data = data.transpose(perm=[3, 0, 1, 2]) - elif out_layout == "TNCHW": - data = data.transpose(perm=[3, 0, 1, 2]) - data = paddle.unsqueeze(data, axis=2) - else: - raise NotImplementedError(f"{out_layout} is invalid.") - return data - - -def path_splitall(path): - allparts = [] - while 1: - parts = os.path.split(path) - if parts[0] == path: # sentinel for absolute paths - allparts.insert(0, parts[0]) - break - elif parts[1] == path: # sentinel for relative paths - allparts.insert(0, parts[1]) - break - else: - path = parts[0] - allparts.insert(0, parts[1]) - return allparts - - -class SEVIRDataset(io.Dataset): - """The Storm EVent ImagRy dataset. - - Args: - input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). - label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). - data_dir (str): The path of the dataset. - weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. - data_types (Sequence[str], optional): A subset of SEVIR_DATA_TYPES. Defaults to [ "vil", ]. - seq_len (int, optional): The length of the data sequences. Should be smaller than the max length raw_seq_len. Defaults to 49. - raw_seq_len (int, optional): The length of the raw data sequences. Defaults to 49. - sample_mode (str, optional): The mode of sampling, eg.'random' or 'sequent'. Defaults to "sequent". - stride (int, optional): Useful when sample_mode == 'sequent' - stride must not be smaller than out_len to prevent data leakage in testing. Defaults to 12. - batch_size (int, optional): The batch size. Defaults to 1. - layout (str, optional): Consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W' - The layout of sampled data. Raw data layout is 'NHWT'. - valid layout: 'NHWT', 'NTHW', 'NTCHW', 'TNHW', 'TNCHW'. Defaults to "NHWT". - in_len (int, optional): The length of input data. Defaults to 13. - out_len (int, optional): The length of output data. Defaults to 12. - num_shard (int, optional): Split the whole dataset into num_shard parts for distributed training. Defaults to 1. - rank (int, optional): Rank of the current process within num_shard. Defaults to 0. - split_mode (str, optional): If 'ceil', all `num_shard` dataloaders have the same length = ceil(total_len / num_shard). - Different dataloaders may have some duplicated data batches, if the total size of datasets is not divided by num_shard. - if 'floor', all `num_shard` dataloaders have the same length = floor(total_len / num_shard). - The last several data batches may be wasted, if the total size of datasets is not divided by num_shard. - if 'uneven', the last datasets has larger length when the total length is not divided by num_shard. - The uneven split leads to synchronization error in dist.all_reduce() or dist.barrier(). - See related issue: https://github.com/pytorch/pytorch/issues/33148 - Notice: this also affects the behavior of `self.use_up`. Defaults to "uneven". - start_date (datetime.datetime, optional): Start time of SEVIR samples to generate. Defaults to None. - end_date (datetime.datetime, optional): End time of SEVIR samples to generate. Defaults to None. - datetime_filter (function, optional): Mask function applied to time_utc column of catalog (return true to keep the row). - Pass function of the form lambda t : COND(t) - Example: lambda t: np.logical_and(t.dt.hour>=13,t.dt.hour<=21) # Generate only day-time events. Defaults to None. - catalog_filter (function, optional): Function or None or 'default' - Mask function applied to entire catalog dataframe (return true to keep row). - Pass function of the form lambda catalog: COND(catalog) - Example: lambda c: [s[0]=='S' for s in c.id] # Generate only the 'S' events - shuffle (bool, optional): If True, data samples are shuffled before each epoch. Defaults to False. - shuffle_seed (int, optional): Seed to use for shuffling. Defaults to 1. - output_type (np.dtype, optional): The type of generated tensors. Defaults to np.float32. - preprocess (bool, optional): If True, self.preprocess_data_dict(data_dict) is called before each sample generated. Defaults to True. - rescale_method (str, optional): The method of rescale. Defaults to "01". - downsample_dict (Dict[str, Sequence[int]], optional): Downsample_dict.keys() == data_types. - downsample_dict[key] is a Sequence of (t_factor, h_factor, w_factor),representing the downsampling factors of all dimensions. Defaults to None. - verbose (bool, optional): Verbose when opening raw data files. Defaults to False. - training (str, optional): Training pathse. Defaults to "train". - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - weight_dict: Optional[Dict[str, float]] = None, - data_types: Sequence[str] = [ - "vil", - ], - seq_len: int = 49, - raw_seq_len: int = 49, - sample_mode: str = "sequent", - stride: int = 12, - batch_size: int = 1, - layout: str = "NHWT", - in_len: int = 13, - out_len: int = 12, - num_shard: int = 1, - rank: int = 0, - split_mode: str = "uneven", - start_date: datetime.datetime = None, - end_date: datetime.datetime = None, - datetime_filter=None, - catalog_filter="default", - shuffle: bool = False, - shuffle_seed: int = 1, - output_type=np.float32, - preprocess: bool = True, - rescale_method: str = "01", - downsample_dict: Dict[str, Sequence[int]] = None, - verbose: bool = False, - training="train", - ): - super(SEVIRDataset, self).__init__() - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - # sevir - SEVIR_ROOT_DIR = os.path.join(self.data_dir, "sevir") - sevir_catalog = os.path.join(SEVIR_ROOT_DIR, "CATALOG.csv") - sevir_data_dir = os.path.join(SEVIR_ROOT_DIR, "data") - # sevir-lr - # SEVIR_ROOT_DIR = os.path.join(self.data_dir, "sevir_lr") - # SEVIR_CATALOG = os.path.join(SEVIR_ROOT_DIR, "CATALOG.csv") - # SEVIR_DATA_DIR = os.path.join(SEVIR_ROOT_DIR, "data") - - if data_types is None: - data_types = SEVIR_DATA_TYPES - else: - assert set(data_types).issubset(SEVIR_DATA_TYPES) - - # configs which should not be modified - self._dtypes = SEVIR_RAW_DTYPES - self.lght_frame_times = LIGHTING_FRAME_TIMES - self.data_shape = SEVIR_DATA_SHAPE - - self.raw_seq_len = raw_seq_len - self.seq_len = seq_len - - if seq_len > raw_seq_len: - raise ValueError("seq_len must be small than raw_seq_len") - - if sample_mode not in ["random", "sequent"]: - raise ValueError("sample_mode must be 'random' or 'sequent'.") - - self.sample_mode = sample_mode - self.stride = stride - self.batch_size = batch_size - valid_layout = ("NHWT", "NTHW", "NTCHW", "NTHWC", "TNHW", "TNCHW") - if layout not in valid_layout: - raise ValueError( - f"Invalid layout = {layout}! Must be one of {valid_layout}." - ) - self.layout = layout - self.in_len = in_len - self.out_len = out_len - - self.num_shard = num_shard - self.rank = rank - valid_split_mode = ("ceil", "floor", "uneven") - if split_mode not in valid_split_mode: - raise ValueError( - f"Invalid split_mode: {split_mode}! Must be one of {valid_split_mode}." - ) - self.split_mode = split_mode - self._samples = None - self._hdf_files = {} - self.data_types = data_types - if isinstance(sevir_catalog, str): - self.catalog = pd.read_csv( - sevir_catalog, parse_dates=["time_utc"], low_memory=False - ) - else: - self.catalog = sevir_catalog - self.sevir_data_dir = sevir_data_dir - self.datetime_filter = datetime_filter - self.catalog_filter = catalog_filter - self.start_date = start_date - self.end_date = end_date - # train val test split - self.start_date = ( - datetime.datetime(*start_date) if start_date is not None else None - ) - self.end_date = datetime.datetime(*end_date) if end_date is not None else None - - self.shuffle = shuffle - self.shuffle_seed = int(shuffle_seed) - self.output_type = output_type - self.preprocess = preprocess - self.downsample_dict = downsample_dict - self.rescale_method = rescale_method - self.verbose = verbose - - if self.start_date is not None: - self.catalog = self.catalog[self.catalog.time_utc > self.start_date] - if self.end_date is not None: - self.catalog = self.catalog[self.catalog.time_utc <= self.end_date] - if self.datetime_filter: - self.catalog = self.catalog[self.datetime_filter(self.catalog.time_utc)] - - if self.catalog_filter is not None: - if self.catalog_filter == "default": - self.catalog_filter = lambda c: c.pct_missing == 0 - self.catalog = self.catalog[self.catalog_filter(self.catalog)] - - self._compute_samples() - self._open_files(verbose=self.verbose) - - def _compute_samples(self): - """ - Computes the list of samples in catalog to be used. This sets self._samples - """ - # locate all events containing colocated data_types - imgt = self.data_types - imgts = set(imgt) - filtcat = self.catalog[ - np.logical_or.reduce([self.catalog.img_type == i for i in imgt]) - ] - # remove rows missing one or more requested img_types - filtcat = filtcat.groupby("id").filter( - lambda x: imgts.issubset(set(x["img_type"])) - ) - # If there are repeated IDs, remove them (this is a bug in SEVIR) - # TODO: is it necessary to keep one of them instead of deleting them all - filtcat = filtcat.groupby("id").filter(lambda x: x.shape[0] == len(imgt)) - self._samples = filtcat.groupby("id").apply( - lambda df: self._df_to_series(df, imgt) - ) - if self.shuffle: - self.shuffle_samples() - - def shuffle_samples(self): - self._samples = self._samples.sample(frac=1, random_state=self.shuffle_seed) - - def _df_to_series(self, df, imgt): - d = {} - df = df.set_index("img_type") - for i in imgt: - s = df.loc[i] - idx = s.file_index if i != "lght" else s.id - d.update({f"{i}_filename": [s.file_name], f"{i}_index": [idx]}) - - return pd.DataFrame(d) - - def _open_files(self, verbose=True): - """ - Opens HDF files - """ - imgt = self.data_types - hdf_filenames = [] - for t in imgt: - hdf_filenames += list(np.unique(self._samples[f"{t}_filename"].values)) - self._hdf_files = {} - for f in hdf_filenames: - if verbose: - print("Opening HDF5 file for reading", f) - self._hdf_files[f] = h5py.File(self.sevir_data_dir + "/" + f, "r") - - def close(self): - """ - Closes all open file handles - """ - for f in self._hdf_files: - self._hdf_files[f].close() - self._hdf_files = {} - - @property - def num_seq_per_event(self): - return 1 + (self.raw_seq_len - self.seq_len) // self.stride - - @property - def total_num_seq(self): - """ - The total number of sequences within each shard. - Notice that it is not the product of `self.num_seq_per_event` and `self.total_num_event`. - """ - return int(self.num_seq_per_event * self.num_event) - - @property - def total_num_event(self): - """ - The total number of events in the whole dataset, before split into different shards. - """ - return int(self._samples.shape[0]) - - @property - def start_event_idx(self): - """ - The event idx used in certain rank should satisfy event_idx >= start_event_idx - """ - return self.total_num_event // self.num_shard * self.rank - - @property - def end_event_idx(self): - """ - The event idx used in certain rank should satisfy event_idx < end_event_idx - - """ - if self.split_mode == "ceil": - _last_start_event_idx = ( - self.total_num_event // self.num_shard * (self.num_shard - 1) - ) - _num_event = self.total_num_event - _last_start_event_idx - return self.start_event_idx + _num_event - elif self.split_mode == "floor": - return self.total_num_event // self.num_shard * (self.rank + 1) - else: # self.split_mode == 'uneven': - if self.rank == self.num_shard - 1: # the last process - return self.total_num_event - else: - return self.total_num_event // self.num_shard * (self.rank + 1) - - @property - def num_event(self): - """ - The number of events split into each rank - """ - return self.end_event_idx - self.start_event_idx - - def __len__(self): - """ - Used only when self.sample_mode == 'sequent' - """ - return self.total_num_seq // self.batch_size - - def _read_data(self, row, data): - """ - Iteratively read data into data dict. Finally data[imgt] gets shape (batch_size, height, width, raw_seq_len). - - Args: - row (Dict,optional): A series with fields IMGTYPE_filename, IMGTYPE_index, IMGTYPE_time_index. - data (Dict,optional): , data[imgt] is a data tensor with shape = (tmp_batch_size, height, width, raw_seq_len). - - Returns: - data (np.array): Updated data. Updated shape = (tmp_batch_size + 1, height, width, raw_seq_len). - """ - - imgtyps = np.unique([x.split("_")[0] for x in list(row.keys())]) - for t in imgtyps: - fname = row[f"{t}_filename"] - idx = row[f"{t}_index"] - t_slice = slice(0, None) - # Need to bin lght counts into grid - if t == "lght": - lght_data = self._hdf_files[fname][idx][:] - data_i = self._lght_to_grid(lght_data, t_slice) - else: - data_i = self._hdf_files[fname][t][idx : idx + 1, :, :, t_slice] - data[t] = ( - np.concatenate((data[t], data_i), axis=0) if (t in data) else data_i - ) - return data - - def _lght_to_grid(self, data, t_slice=slice(0, None)): - """ - Converts Nx5 lightning data matrix into a 2D grid of pixel counts - """ - # out_size = (48,48,len(self.lght_frame_times)-1) if isinstance(t_slice,(slice,)) else (48,48) - out_size = ( - (*self.data_shape["lght"], len(self.lght_frame_times)) - if t_slice.stop is None - else (*self.data_shape["lght"], 1) - ) - if data.shape[0] == 0: - return np.zeros((1,) + out_size, dtype=np.float32) - - # filter out points outside the grid - x, y = data[:, 3], data[:, 4] - m = np.logical_and.reduce([x >= 0, x < out_size[0], y >= 0, y < out_size[1]]) - data = data[m, :] - if data.shape[0] == 0: - return np.zeros((1,) + out_size, dtype=np.float32) - - # Filter/separate times - t = data[:, 0] - if t_slice.stop is not None: # select only one time bin - if t_slice.stop > 0: - if t_slice.stop < len(self.lght_frame_times): - tm = np.logical_and( - t >= self.lght_frame_times[t_slice.stop - 1], - t < self.lght_frame_times[t_slice.stop], - ) - else: - tm = t >= self.lght_frame_times[-1] - else: # special case: frame 0 uses lght from frame 1 - tm = np.logical_and( - t >= self.lght_frame_times[0], t < self.lght_frame_times[1] - ) - # tm=np.logical_and( (t>=FRAME_TIMES[t_slice],t self.end_event_idx: - pad_size = event_idx_slice_end - self.end_event_idx - event_idx_slice_end = self.end_event_idx - pd_batch = self._samples.iloc[event_idx:event_idx_slice_end] - data = {} - for index, row in pd_batch.iterrows(): - data = self._read_data(row, data) - if pad_size > 0: - event_batch = [] - for t in self.data_types: - pad_shape = [ - pad_size, - ] + list(data[t].shape[1:]) - data_pad = np.concatenate( - ( - data[t].astype(self.output_type), - np.zeros(pad_shape, dtype=self.output_type), - ), - axis=0, - ) - event_batch.append(data_pad) - else: - event_batch = [data[t].astype(self.output_type) for t in self.data_types] - return event_batch - - def __iter__(self): - return self - - @staticmethod - def preprocess_data_dict( - data_dict, data_types=None, layout="NHWT", rescale="01" - ) -> Dict[str, Union[np.ndarray, paddle.Tensor]]: - """The preprocess of data dict. - - Args: - data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): The dict of data. - data_types (Sequence[str]) : The data types that we want to rescale. This mainly excludes "mask" from preprocessing. - layout (str) : consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'. - rescale (str): - 'sevir': use the offsets and scale factors in original implementation. - '01': scale all values to range 0 to 1, currently only supports 'vil'. - - Returns: - data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): preprocessed data. - """ - - if rescale == "sevir": - scale_dict = PREPROCESS_SCALE_SEVIR - offset_dict = PREPROCESS_OFFSET_SEVIR - elif rescale == "01": - scale_dict = PREPROCESS_SCALE_01 - offset_dict = PREPROCESS_OFFSET_01 - else: - raise ValueError(f"Invalid rescale option: {rescale}.") - if data_types is None: - data_types = data_dict.keys() - for key, data in data_dict.items(): - if key in data_types: - if isinstance(data, np.ndarray): - data = scale_dict[key] * ( - data.astype(np.float32) + offset_dict[key] - ) - data = change_layout_np( - data=data, in_layout="NHWT", out_layout=layout - ) - elif isinstance(data, paddle.Tensor): - data = scale_dict[key] * (data.astype("float32") + offset_dict[key]) - data = change_layout_paddle( - data=data, in_layout="NHWT", out_layout=layout - ) - data_dict[key] = data - return data_dict - - @staticmethod - def process_data_dict_back(data_dict, data_types=None, rescale="01"): - if rescale == "sevir": - scale_dict = PREPROCESS_SCALE_SEVIR - offset_dict = PREPROCESS_OFFSET_SEVIR - elif rescale == "01": - scale_dict = PREPROCESS_SCALE_01 - offset_dict = PREPROCESS_OFFSET_01 - else: - raise ValueError(f"Invalid rescale option: {rescale}.") - if data_types is None: - data_types = data_dict.keys() - for key in data_types: - data = data_dict[key] - data = data.astype("float32") / scale_dict[key] - offset_dict[key] - data_dict[key] = data - return data_dict - - @staticmethod - def data_dict_to_tensor(data_dict, data_types=None): - """ - Convert each element in data_dict to paddle.Tensor (copy without grad). - """ - ret_dict = {} - if data_types is None: - data_types = data_dict.keys() - for key, data in data_dict.items(): - if key in data_types: - if isinstance(data, paddle.Tensor): - ret_dict[key] = data.detach().clone() - elif isinstance(data, np.ndarray): - ret_dict[key] = paddle.to_tensor(data) - else: - raise ValueError( - f"Invalid data type: {type(data)}. Should be paddle.Tensor or np.ndarray" - ) - else: # key == "mask" - ret_dict[key] = data - return ret_dict - - @staticmethod - def downsample_data_dict( - data_dict, data_types=None, factors_dict=None, layout="NHWT" - ) -> Dict[str, paddle.Tensor]: - """The downsample of data. - - Args: - data_dict (Dict[str, Union[np.array, paddle.Tensor]]): The dict of data. - factors_dict (Optional[Dict[str, Sequence[int]]]):each element `factors` is - a Sequence of int, representing (t_factor, h_factor, w_factor). - - Returns: - downsampled_data_dict (Dict[str, paddle.Tensor]): Modify on a deep copy of - data_dict instead of directly modifying the original data_dict. - """ - - if factors_dict is None: - factors_dict = {} - if data_types is None: - data_types = data_dict.keys() - downsampled_data_dict = SEVIRDataset.data_dict_to_tensor( - data_dict=data_dict, data_types=data_types - ) # make a copy - for key, data in data_dict.items(): - factors = factors_dict.get(key, None) - if factors is not None: - downsampled_data_dict[key] = change_layout_paddle( - data=downsampled_data_dict[key], in_layout=layout, out_layout="NTHW" - ) - # downsample t dimension - t_slice = [ - slice(None, None), - ] * 4 - t_slice[1] = slice(None, None, factors[0]) - downsampled_data_dict[key] = downsampled_data_dict[key][tuple(t_slice)] - # downsample spatial dimensions - downsampled_data_dict[key] = F.avg_pool2d( - input=downsampled_data_dict[key], - kernel_size=(factors[1], factors[2]), - ) - - downsampled_data_dict[key] = change_layout_paddle( - data=downsampled_data_dict[key], in_layout="NTHW", out_layout=layout - ) - - return downsampled_data_dict - - def layout_to_in_out_slice( - self, - ): - t_axis = self.layout.find("T") - num_axes = len(self.layout) - in_slice = [ - slice(None, None), - ] * num_axes - out_slice = deepcopy(in_slice) - in_slice[t_axis] = slice(None, self.in_len) - if self.out_len is None: - out_slice[t_axis] = slice(self.in_len, None) - else: - out_slice[t_axis] = slice(self.in_len, self.in_len + self.out_len) - return in_slice, out_slice - - def __getitem__(self, index): - event_idx = (index * self.batch_size) // self.num_seq_per_event - seq_idx = (index * self.batch_size) % self.num_seq_per_event - num_sampled = 0 - sampled_idx_list = [] # list of (event_idx, seq_idx) records - while num_sampled < self.batch_size: - sampled_idx_list.append({"event_idx": event_idx, "seq_idx": seq_idx}) - seq_idx += 1 - if seq_idx >= self.num_seq_per_event: - event_idx += 1 - seq_idx = 0 - num_sampled += 1 - - start_event_idx = sampled_idx_list[0]["event_idx"] - event_batch_size = sampled_idx_list[-1]["event_idx"] - start_event_idx + 1 - - event_batch = self._load_event_batch( - event_idx=start_event_idx, event_batch_size=event_batch_size - ) - ret_dict = {} - for sampled_idx in sampled_idx_list: - batch_slice = [ - sampled_idx["event_idx"] - start_event_idx, - ] # use [] to keepdim - seq_slice = slice( - sampled_idx["seq_idx"] * self.stride, - sampled_idx["seq_idx"] * self.stride + self.seq_len, - ) - for imgt_idx, imgt in enumerate(self.data_types): - sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice] - if imgt in ret_dict: - ret_dict[imgt] = np.concatenate( - (ret_dict[imgt], sampled_seq), axis=0 - ) - else: - ret_dict.update({imgt: sampled_seq}) - - ret_dict = self.data_dict_to_tensor( - data_dict=ret_dict, data_types=self.data_types - ) - if self.preprocess: - ret_dict = self.preprocess_data_dict( - data_dict=ret_dict, - data_types=self.data_types, - layout=self.layout, - rescale=self.rescale_method, - ) - - if self.downsample_dict is not None: - ret_dict = self.downsample_data_dict( - data_dict=ret_dict, - data_types=self.data_types, - factors_dict=self.downsample_dict, - layout=self.layout, - ) - in_slice, out_slice = self.layout_to_in_out_slice() - data_seq = ret_dict["vil"] - if isinstance(data_seq, paddle.Tensor): - data_seq = data_seq.numpy() - x = data_seq[in_slice[0], in_slice[1], in_slice[2], in_slice[3], in_slice[4]] - y = data_seq[ - out_slice[0], out_slice[1], out_slice[2], out_slice[3], out_slice[4] - ] - - weight_item = self.weight_dict - input_item = {self.input_keys[0]: x} - label_item = { - self.label_keys[0]: y, - } - - return input_item, label_item, weight_item +import datetime +import os +from copy import deepcopy +from typing import Dict +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + +try: + import h5py +except ModuleNotFoundError: + pass +import numpy as np +import paddle +import paddle.nn.functional as F +import pandas as pd +from paddle import io + +# SEVIR Dataset constants +SEVIR_DATA_TYPES = ["vis", "ir069", "ir107", "vil", "lght"] +SEVIR_RAW_DTYPES = { + "vis": np.int16, + "ir069": np.int16, + "ir107": np.int16, + "vil": np.uint8, + "lght": np.int16, +} +LIGHTING_FRAME_TIMES = np.arange(-120.0, 125.0, 5) * 60 +SEVIR_DATA_SHAPE = { + "lght": (48, 48), +} +PREPROCESS_SCALE_SEVIR = { + "vis": 1, # Not utilized in original paper + "ir069": 1 / 1174.68, + "ir107": 1 / 2562.43, + "vil": 1 / 47.54, + "lght": 1 / 0.60517, +} +PREPROCESS_OFFSET_SEVIR = { + "vis": 0, # Not utilized in original paper + "ir069": 3683.58, + "ir107": 1552.80, + "vil": -33.44, + "lght": -0.02990, +} +PREPROCESS_SCALE_01 = { + "vis": 1, + "ir069": 1, + "ir107": 1, + "vil": 1 / 255, # currently the only one implemented + "lght": 1, +} +PREPROCESS_OFFSET_01 = { + "vis": 0, + "ir069": 0, + "ir107": 0, + "vil": 0, # currently the only one implemented + "lght": 0, +} + + +def change_layout_np(data, in_layout="NHWT", out_layout="NHWT", ret_contiguous=False): + # first convert to 'NHWT' + if in_layout == "NHWT": + pass + elif in_layout == "NTHW": + data = np.transpose(data, axes=(0, 2, 3, 1)) + elif in_layout == "NWHT": + data = np.transpose(data, axes=(0, 2, 1, 3)) + elif in_layout == "NTCHW": + data = data[:, :, 0, :, :] + data = np.transpose(data, axes=(0, 2, 3, 1)) + elif in_layout == "NTHWC": + data = data[:, :, :, :, 0] + data = np.transpose(data, axes=(0, 2, 3, 1)) + elif in_layout == "NTWHC": + data = data[:, :, :, :, 0] + data = np.transpose(data, axes=(0, 3, 2, 1)) + elif in_layout == "TNHW": + data = np.transpose(data, axes=(1, 2, 3, 0)) + elif in_layout == "TNCHW": + data = data[:, :, 0, :, :] + data = np.transpose(data, axes=(1, 2, 3, 0)) + else: + raise NotImplementedError(f"{in_layout} is invalid.") + + if out_layout == "NHWT": + pass + elif out_layout == "NTHW": + data = np.transpose(data, axes=(0, 3, 1, 2)) + elif out_layout == "NWHT": + data = np.transpose(data, axes=(0, 2, 1, 3)) + elif out_layout == "NTCHW": + data = np.transpose(data, axes=(0, 3, 1, 2)) + data = np.expand_dims(data, axis=2) + elif out_layout == "NTHWC": + data = np.transpose(data, axes=(0, 3, 1, 2)) + data = np.expand_dims(data, axis=-1) + elif out_layout == "NTWHC": + data = np.transpose(data, axes=(0, 3, 2, 1)) + data = np.expand_dims(data, axis=-1) + elif out_layout == "TNHW": + data = np.transpose(data, axes=(3, 0, 1, 2)) + elif out_layout == "TNCHW": + data = np.transpose(data, axes=(3, 0, 1, 2)) + data = np.expand_dims(data, axis=2) + else: + raise NotImplementedError(f"{out_layout} is invalid.") + if ret_contiguous: + data = data.ascontiguousarray() + return data + + +def change_layout_paddle( + data, in_layout="NHWT", out_layout="NHWT", ret_contiguous=False +): + # first convert to 'NHWT' + if in_layout == "NHWT": + pass + elif in_layout == "NTHW": + data = data.transpose(perm=[0, 2, 3, 1]) + elif in_layout == "NTCHW": + data = data[:, :, 0, :, :] + data = data.transpose(perm=[0, 2, 3, 1]) + elif in_layout == "NTHWC": + data = data[:, :, :, :, 0] + data = data.transpose(perm=[0, 2, 3, 1]) + elif in_layout == "TNHW": + data = data.transpose(perm=[1, 2, 3, 0]) + elif in_layout == "TNCHW": + data = data[:, :, 0, :, :] + data = data.transpose(perm=[1, 2, 3, 0]) + else: + raise NotImplementedError(f"{in_layout} is invalid.") + + if out_layout == "NHWT": + pass + elif out_layout == "NTHW": + data = data.transpose(perm=[0, 3, 1, 2]) + elif out_layout == "NTCHW": + data = data.transpose(perm=[0, 3, 1, 2]) + data = paddle.unsqueeze(data, axis=2) + elif out_layout == "NTHWC": + data = data.transpose(perm=[0, 3, 1, 2]) + data = paddle.unsqueeze(data, axis=-1) + elif out_layout == "TNHW": + data = data.transpose(perm=[3, 0, 1, 2]) + elif out_layout == "TNCHW": + data = data.transpose(perm=[3, 0, 1, 2]) + data = paddle.unsqueeze(data, axis=2) + else: + raise NotImplementedError(f"{out_layout} is invalid.") + return data + + +def path_splitall(path): + allparts = [] + while 1: + parts = os.path.split(path) + if parts[0] == path: # sentinel for absolute paths + allparts.insert(0, parts[0]) + break + elif parts[1] == path: # sentinel for relative paths + allparts.insert(0, parts[1]) + break + else: + path = parts[0] + allparts.insert(0, parts[1]) + return allparts + + +class SEVIRDataset(io.Dataset): + """The Storm EVent ImagRy dataset. + + Args: + input_keys (Tuple[str, ...]): Name of input keys, such as ("input",). + label_keys (Tuple[str, ...]): Name of label keys, such as ("output",). + data_dir (str): The path of the dataset. + weight_dict (Optional[Dict[str, Union[Callable, float]]]): Define the weight of each constraint variable. Defaults to None. + data_types (Sequence[str], optional): A subset of SEVIR_DATA_TYPES. Defaults to [ "vil", ]. + seq_len (int, optional): The length of the data sequences. Should be smaller than the max length raw_seq_len. Defaults to 49. + raw_seq_len (int, optional): The length of the raw data sequences. Defaults to 49. + sample_mode (str, optional): The mode of sampling, eg.'random' or 'sequent'. Defaults to "sequent". + stride (int, optional): Useful when sample_mode == 'sequent' + stride must not be smaller than out_len to prevent data leakage in testing. Defaults to 12. + batch_size (int, optional): The batch size. Defaults to 1. + layout (str, optional): Consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W' + The layout of sampled data. Raw data layout is 'NHWT'. + valid layout: 'NHWT', 'NTHW', 'NTCHW', 'TNHW', 'TNCHW'. Defaults to "NHWT". + in_len (int, optional): The length of input data. Defaults to 13. + out_len (int, optional): The length of output data. Defaults to 12. + num_shard (int, optional): Split the whole dataset into num_shard parts for distributed training. Defaults to 1. + rank (int, optional): Rank of the current process within num_shard. Defaults to 0. + split_mode (str, optional): If 'ceil', all `num_shard` dataloaders have the same length = ceil(total_len / num_shard). + Different dataloaders may have some duplicated data batches, if the total size of datasets is not divided by num_shard. + if 'floor', all `num_shard` dataloaders have the same length = floor(total_len / num_shard). + The last several data batches may be wasted, if the total size of datasets is not divided by num_shard. + if 'uneven', the last datasets has larger length when the total length is not divided by num_shard. + The uneven split leads to synchronization error in dist.all_reduce() or dist.barrier(). + See related issue: https://github.com/pytorch/pytorch/issues/33148 + Notice: this also affects the behavior of `self.use_up`. Defaults to "uneven". + start_date (datetime.datetime, optional): Start time of SEVIR samples to generate. Defaults to None. + end_date (datetime.datetime, optional): End time of SEVIR samples to generate. Defaults to None. + datetime_filter (function, optional): Mask function applied to time_utc column of catalog (return true to keep the row). + Pass function of the form lambda t : COND(t) + Example: lambda t: np.logical_and(t.dt.hour>=13,t.dt.hour<=21) # Generate only day-time events. Defaults to None. + catalog_filter (function, optional): Function or None or 'default' + Mask function applied to entire catalog dataframe (return true to keep row). + Pass function of the form lambda catalog: COND(catalog) + Example: lambda c: [s[0]=='S' for s in c.id] # Generate only the 'S' events + shuffle (bool, optional): If True, data samples are shuffled before each epoch. Defaults to False. + shuffle_seed (int, optional): Seed to use for shuffling. Defaults to 1. + output_type (np.dtype, optional): The type of generated tensors. Defaults to np.float32. + preprocess (bool, optional): If True, self.preprocess_data_dict(data_dict) is called before each sample generated. Defaults to True. + rescale_method (str, optional): The method of rescale. Defaults to "01". + downsample_dict (Dict[str, Sequence[int]], optional): Downsample_dict.keys() == data_types. + downsample_dict[key] is a Sequence of (t_factor, h_factor, w_factor),representing the downsampling factors of all dimensions. Defaults to None. + verbose (bool, optional): Verbose when opening raw data files. Defaults to False. + training (str, optional): Training pathse. Defaults to "train". + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + weight_dict: Optional[Dict[str, float]] = None, + data_types: Sequence[str] = [ + "vil", + ], + seq_len: int = 49, + raw_seq_len: int = 49, + sample_mode: str = "sequent", + stride: int = 12, + batch_size: int = 1, + layout: str = "NHWT", + in_len: int = 13, + out_len: int = 12, + num_shard: int = 1, + rank: int = 0, + split_mode: str = "uneven", + start_date: datetime.datetime = None, + end_date: datetime.datetime = None, + datetime_filter=None, + catalog_filter="default", + shuffle: bool = False, + shuffle_seed: int = 1, + output_type=np.float32, + preprocess: bool = True, + rescale_method: str = "01", + downsample_dict: Dict[str, Sequence[int]] = None, + verbose: bool = False, + training="train", + ): + super(SEVIRDataset, self).__init__() + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + # sevir + SEVIR_ROOT_DIR = os.path.join(self.data_dir, "sevir") + sevir_catalog = os.path.join(SEVIR_ROOT_DIR, "CATALOG.csv") + sevir_data_dir = os.path.join(SEVIR_ROOT_DIR, "data") + # sevir-lr + # SEVIR_ROOT_DIR = os.path.join(self.data_dir, "sevir_lr") + # SEVIR_CATALOG = os.path.join(SEVIR_ROOT_DIR, "CATALOG.csv") + # SEVIR_DATA_DIR = os.path.join(SEVIR_ROOT_DIR, "data") + + if data_types is None: + data_types = SEVIR_DATA_TYPES + else: + assert set(data_types).issubset(SEVIR_DATA_TYPES) + + # configs which should not be modified + self._dtypes = SEVIR_RAW_DTYPES + self.lght_frame_times = LIGHTING_FRAME_TIMES + self.data_shape = SEVIR_DATA_SHAPE + + self.raw_seq_len = raw_seq_len + self.seq_len = seq_len + + if seq_len > raw_seq_len: + raise ValueError("seq_len must be small than raw_seq_len") + + if sample_mode not in ["random", "sequent"]: + raise ValueError("sample_mode must be 'random' or 'sequent'.") + + self.sample_mode = sample_mode + self.stride = stride + self.batch_size = batch_size + valid_layout = ("NHWT", "NTHW", "NTCHW", "NTHWC", "TNHW", "TNCHW") + if layout not in valid_layout: + raise ValueError( + f"Invalid layout = {layout}! Must be one of {valid_layout}." + ) + self.layout = layout + self.in_len = in_len + self.out_len = out_len + + self.num_shard = num_shard + self.rank = rank + valid_split_mode = ("ceil", "floor", "uneven") + if split_mode not in valid_split_mode: + raise ValueError( + f"Invalid split_mode: {split_mode}! Must be one of {valid_split_mode}." + ) + self.split_mode = split_mode + self._samples = None + self._hdf_files = {} + self.data_types = data_types + if isinstance(sevir_catalog, str): + self.catalog = pd.read_csv( + sevir_catalog, parse_dates=["time_utc"], low_memory=False + ) + else: + self.catalog = sevir_catalog + self.sevir_data_dir = sevir_data_dir + self.datetime_filter = datetime_filter + self.catalog_filter = catalog_filter + self.start_date = start_date + self.end_date = end_date + # train val test split + self.start_date = ( + datetime.datetime(*start_date) if start_date is not None else None + ) + self.end_date = datetime.datetime(*end_date) if end_date is not None else None + + self.shuffle = shuffle + self.shuffle_seed = int(shuffle_seed) + self.output_type = output_type + self.preprocess = preprocess + self.downsample_dict = downsample_dict + self.rescale_method = rescale_method + self.verbose = verbose + + if self.start_date is not None: + self.catalog = self.catalog[self.catalog.time_utc > self.start_date] + if self.end_date is not None: + self.catalog = self.catalog[self.catalog.time_utc <= self.end_date] + if self.datetime_filter: + self.catalog = self.catalog[self.datetime_filter(self.catalog.time_utc)] + + if self.catalog_filter is not None: + if self.catalog_filter == "default": + self.catalog_filter = lambda c: c.pct_missing == 0 + self.catalog = self.catalog[self.catalog_filter(self.catalog)] + + self._compute_samples() + self._open_files(verbose=self.verbose) + + def _compute_samples(self): + """ + Computes the list of samples in catalog to be used. This sets self._samples + """ + # locate all events containing colocated data_types + imgt = self.data_types + imgts = set(imgt) + filtcat = self.catalog[ + np.logical_or.reduce([self.catalog.img_type == i for i in imgt]) + ] + # remove rows missing one or more requested img_types + filtcat = filtcat.groupby("id").filter( + lambda x: imgts.issubset(set(x["img_type"])) + ) + # If there are repeated IDs, remove them (this is a bug in SEVIR) + # TODO: is it necessary to keep one of them instead of deleting them all + filtcat = filtcat.groupby("id").filter(lambda x: x.shape[0] == len(imgt)) + self._samples = filtcat.groupby("id").apply( + lambda df: self._df_to_series(df, imgt) + ) + if self.shuffle: + self.shuffle_samples() + + def shuffle_samples(self): + self._samples = self._samples.sample(frac=1, random_state=self.shuffle_seed) + + def _df_to_series(self, df, imgt): + d = {} + df = df.set_index("img_type") + for i in imgt: + s = df.loc[i] + idx = s.file_index if i != "lght" else s.id + d.update({f"{i}_filename": [s.file_name], f"{i}_index": [idx]}) + + return pd.DataFrame(d) + + def _open_files(self, verbose=True): + """ + Opens HDF files + """ + imgt = self.data_types + hdf_filenames = [] + for t in imgt: + hdf_filenames += list(np.unique(self._samples[f"{t}_filename"].values)) + self._hdf_files = {} + for f in hdf_filenames: + if verbose: + print("Opening HDF5 file for reading", f) + self._hdf_files[f] = h5py.File(self.sevir_data_dir + "/" + f, "r") + + def close(self): + """ + Closes all open file handles + """ + for f in self._hdf_files: + self._hdf_files[f].close() + self._hdf_files = {} + + @property + def num_seq_per_event(self): + return 1 + (self.raw_seq_len - self.seq_len) // self.stride + + @property + def total_num_seq(self): + """ + The total number of sequences within each shard. + Notice that it is not the product of `self.num_seq_per_event` and `self.total_num_event`. + """ + return int(self.num_seq_per_event * self.num_event) + + @property + def total_num_event(self): + """ + The total number of events in the whole dataset, before split into different shards. + """ + return int(self._samples.shape[0]) + + @property + def start_event_idx(self): + """ + The event idx used in certain rank should satisfy event_idx >= start_event_idx + """ + return self.total_num_event // self.num_shard * self.rank + + @property + def end_event_idx(self): + """ + The event idx used in certain rank should satisfy event_idx < end_event_idx + + """ + if self.split_mode == "ceil": + _last_start_event_idx = ( + self.total_num_event // self.num_shard * (self.num_shard - 1) + ) + _num_event = self.total_num_event - _last_start_event_idx + return self.start_event_idx + _num_event + elif self.split_mode == "floor": + return self.total_num_event // self.num_shard * (self.rank + 1) + else: # self.split_mode == 'uneven': + if self.rank == self.num_shard - 1: # the last process + return self.total_num_event + else: + return self.total_num_event // self.num_shard * (self.rank + 1) + + @property + def num_event(self): + """ + The number of events split into each rank + """ + return self.end_event_idx - self.start_event_idx + + def __len__(self): + """ + Used only when self.sample_mode == 'sequent' + """ + return self.total_num_seq // self.batch_size + + def _read_data(self, row, data): + """ + Iteratively read data into data dict. Finally data[imgt] gets shape (batch_size, height, width, raw_seq_len). + + Args: + row (Dict,optional): A series with fields IMGTYPE_filename, IMGTYPE_index, IMGTYPE_time_index. + data (Dict,optional): , data[imgt] is a data tensor with shape = (tmp_batch_size, height, width, raw_seq_len). + + Returns: + data (np.array): Updated data. Updated shape = (tmp_batch_size + 1, height, width, raw_seq_len). + """ + + imgtyps = np.unique([x.split("_")[0] for x in list(row.keys())]) + for t in imgtyps: + fname = row[f"{t}_filename"] + idx = row[f"{t}_index"] + t_slice = slice(0, None) + # Need to bin lght counts into grid + if t == "lght": + lght_data = self._hdf_files[fname][idx][:] + data_i = self._lght_to_grid(lght_data, t_slice) + else: + data_i = self._hdf_files[fname][t][idx : idx + 1, :, :, t_slice] + data[t] = ( + np.concatenate((data[t], data_i), axis=0) if (t in data) else data_i + ) + return data + + def _lght_to_grid(self, data, t_slice=slice(0, None)): + """ + Converts Nx5 lightning data matrix into a 2D grid of pixel counts + """ + # out_size = (48,48,len(self.lght_frame_times)-1) if isinstance(t_slice,(slice,)) else (48,48) + out_size = ( + (*self.data_shape["lght"], len(self.lght_frame_times)) + if t_slice.stop is None + else (*self.data_shape["lght"], 1) + ) + if data.shape[0] == 0: + return np.zeros((1,) + out_size, dtype=np.float32) + + # filter out points outside the grid + x, y = data[:, 3], data[:, 4] + m = np.logical_and.reduce([x >= 0, x < out_size[0], y >= 0, y < out_size[1]]) + data = data[m, :] + if data.shape[0] == 0: + return np.zeros((1,) + out_size, dtype=np.float32) + + # Filter/separate times + t = data[:, 0] + if t_slice.stop is not None: # select only one time bin + if t_slice.stop > 0: + if t_slice.stop < len(self.lght_frame_times): + tm = np.logical_and( + t >= self.lght_frame_times[t_slice.stop - 1], + t < self.lght_frame_times[t_slice.stop], + ) + else: + tm = t >= self.lght_frame_times[-1] + else: # special case: frame 0 uses lght from frame 1 + tm = np.logical_and( + t >= self.lght_frame_times[0], t < self.lght_frame_times[1] + ) + # tm=np.logical_and( (t>=FRAME_TIMES[t_slice],t self.end_event_idx: + pad_size = event_idx_slice_end - self.end_event_idx + event_idx_slice_end = self.end_event_idx + pd_batch = self._samples.iloc[event_idx:event_idx_slice_end] + data = {} + for index, row in pd_batch.iterrows(): + data = self._read_data(row, data) + if pad_size > 0: + event_batch = [] + for t in self.data_types: + pad_shape = [ + pad_size, + ] + list(data[t].shape[1:]) + data_pad = np.concatenate( + ( + data[t].astype(self.output_type), + np.zeros(pad_shape, dtype=self.output_type), + ), + axis=0, + ) + event_batch.append(data_pad) + else: + event_batch = [data[t].astype(self.output_type) for t in self.data_types] + return event_batch + + def __iter__(self): + return self + + @staticmethod + def preprocess_data_dict( + data_dict, data_types=None, layout="NHWT", rescale="01" + ) -> Dict[str, Union[np.ndarray, paddle.Tensor]]: + """The preprocess of data dict. + + Args: + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): The dict of data. + data_types (Sequence[str]) : The data types that we want to rescale. This mainly excludes "mask" from preprocessing. + layout (str) : consists of batch_size 'N', seq_len 'T', channel 'C', height 'H', width 'W'. + rescale (str): + 'sevir': use the offsets and scale factors in original implementation. + '01': scale all values to range 0 to 1, currently only supports 'vil'. + + Returns: + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): preprocessed data. + """ + + if rescale == "sevir": + scale_dict = PREPROCESS_SCALE_SEVIR + offset_dict = PREPROCESS_OFFSET_SEVIR + elif rescale == "01": + scale_dict = PREPROCESS_SCALE_01 + offset_dict = PREPROCESS_OFFSET_01 + else: + raise ValueError(f"Invalid rescale option: {rescale}.") + if data_types is None: + data_types = data_dict.keys() + for key, data in data_dict.items(): + if key in data_types: + if isinstance(data, np.ndarray): + data = scale_dict[key] * ( + data.astype(np.float32) + offset_dict[key] + ) + data = change_layout_np( + data=data, in_layout="NHWT", out_layout=layout + ) + elif isinstance(data, paddle.Tensor): + data = scale_dict[key] * (data.astype("float32") + offset_dict[key]) + data = change_layout_paddle( + data=data, in_layout="NHWT", out_layout=layout + ) + data_dict[key] = data + return data_dict + + @staticmethod + def process_data_dict_back(data_dict, data_types=None, rescale="01"): + if rescale == "sevir": + scale_dict = PREPROCESS_SCALE_SEVIR + offset_dict = PREPROCESS_OFFSET_SEVIR + elif rescale == "01": + scale_dict = PREPROCESS_SCALE_01 + offset_dict = PREPROCESS_OFFSET_01 + else: + raise ValueError(f"Invalid rescale option: {rescale}.") + if data_types is None: + data_types = data_dict.keys() + for key in data_types: + data = data_dict[key] + data = data.astype("float32") / scale_dict[key] - offset_dict[key] + data_dict[key] = data + return data_dict + + @staticmethod + def data_dict_to_tensor(data_dict, data_types=None): + """ + Convert each element in data_dict to paddle.Tensor (copy without grad). + """ + ret_dict = {} + if data_types is None: + data_types = data_dict.keys() + for key, data in data_dict.items(): + if key in data_types: + if isinstance(data, paddle.Tensor): + ret_dict[key] = data.detach().clone() + elif isinstance(data, np.ndarray): + ret_dict[key] = paddle.to_tensor(data) + else: + raise ValueError( + f"Invalid data type: {type(data)}. Should be paddle.Tensor or np.ndarray" + ) + else: # key == "mask" + ret_dict[key] = data + return ret_dict + + @staticmethod + def downsample_data_dict( + data_dict, data_types=None, factors_dict=None, layout="NHWT" + ) -> Dict[str, paddle.Tensor]: + """The downsample of data. + + Args: + data_dict (Dict[str, Union[np.array, paddle.Tensor]]): The dict of data. + factors_dict (Optional[Dict[str, Sequence[int]]]):each element `factors` is + a Sequence of int, representing (t_factor, h_factor, w_factor). + + Returns: + downsampled_data_dict (Dict[str, paddle.Tensor]): Modify on a deep copy of + data_dict instead of directly modifying the original data_dict. + """ + + if factors_dict is None: + factors_dict = {} + if data_types is None: + data_types = data_dict.keys() + downsampled_data_dict = SEVIRDataset.data_dict_to_tensor( + data_dict=data_dict, data_types=data_types + ) # make a copy + for key, data in data_dict.items(): + factors = factors_dict.get(key, None) + if factors is not None: + downsampled_data_dict[key] = change_layout_paddle( + data=downsampled_data_dict[key], in_layout=layout, out_layout="NTHW" + ) + # downsample t dimension + t_slice = [ + slice(None, None), + ] * 4 + t_slice[1] = slice(None, None, factors[0]) + downsampled_data_dict[key] = downsampled_data_dict[key][tuple(t_slice)] + # downsample spatial dimensions + downsampled_data_dict[key] = F.avg_pool2d( + input=downsampled_data_dict[key], + kernel_size=(factors[1], factors[2]), + ) + + downsampled_data_dict[key] = change_layout_paddle( + data=downsampled_data_dict[key], in_layout="NTHW", out_layout=layout + ) + + return downsampled_data_dict + + def layout_to_in_out_slice( + self, + ): + t_axis = self.layout.find("T") + num_axes = len(self.layout) + in_slice = [ + slice(None, None), + ] * num_axes + out_slice = deepcopy(in_slice) + in_slice[t_axis] = slice(None, self.in_len) + if self.out_len is None: + out_slice[t_axis] = slice(self.in_len, None) + else: + out_slice[t_axis] = slice(self.in_len, self.in_len + self.out_len) + return in_slice, out_slice + + def __getitem__(self, index): + event_idx = (index * self.batch_size) // self.num_seq_per_event + seq_idx = (index * self.batch_size) % self.num_seq_per_event + num_sampled = 0 + sampled_idx_list = [] # list of (event_idx, seq_idx) records + while num_sampled < self.batch_size: + sampled_idx_list.append({"event_idx": event_idx, "seq_idx": seq_idx}) + seq_idx += 1 + if seq_idx >= self.num_seq_per_event: + event_idx += 1 + seq_idx = 0 + num_sampled += 1 + + start_event_idx = sampled_idx_list[0]["event_idx"] + event_batch_size = sampled_idx_list[-1]["event_idx"] - start_event_idx + 1 + + event_batch = self._load_event_batch( + event_idx=start_event_idx, event_batch_size=event_batch_size + ) + ret_dict = {} + for sampled_idx in sampled_idx_list: + batch_slice = [ + sampled_idx["event_idx"] - start_event_idx, + ] # use [] to keepdim + seq_slice = slice( + sampled_idx["seq_idx"] * self.stride, + sampled_idx["seq_idx"] * self.stride + self.seq_len, + ) + for imgt_idx, imgt in enumerate(self.data_types): + sampled_seq = event_batch[imgt_idx][batch_slice, :, :, seq_slice] + if imgt in ret_dict: + ret_dict[imgt] = np.concatenate( + (ret_dict[imgt], sampled_seq), axis=0 + ) + else: + ret_dict.update({imgt: sampled_seq}) + + ret_dict = self.data_dict_to_tensor( + data_dict=ret_dict, data_types=self.data_types + ) + if self.preprocess: + ret_dict = self.preprocess_data_dict( + data_dict=ret_dict, + data_types=self.data_types, + layout=self.layout, + rescale=self.rescale_method, + ) + + if self.downsample_dict is not None: + ret_dict = self.downsample_data_dict( + data_dict=ret_dict, + data_types=self.data_types, + factors_dict=self.downsample_dict, + layout=self.layout, + ) + in_slice, out_slice = self.layout_to_in_out_slice() + data_seq = ret_dict["vil"] + if isinstance(data_seq, paddle.Tensor): + data_seq = data_seq.numpy() + x = data_seq[in_slice[0], in_slice[1], in_slice[2], in_slice[3], in_slice[4]] + y = data_seq[ + out_slice[0], out_slice[1], out_slice[2], out_slice[3], out_slice[4] + ] + + weight_item = self.weight_dict + input_item = {self.input_keys[0]: x} + label_item = { + self.label_keys[0]: y, + } + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/spherical_swe_dataset.py b/ppsci/data/dataset/spherical_swe_dataset.py index 68e29e7883..4246aa8121 100644 --- a/ppsci/data/dataset/spherical_swe_dataset.py +++ b/ppsci/data/dataset/spherical_swe_dataset.py @@ -1,104 +1,104 @@ -from pathlib import Path -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -from paddle import io - - -class SphericalSWEDataset(io.Dataset): - """Loads a Spherical Shallow Water equations dataset - - Training contains 200 samples in resolution 32x64. - Testing contains 50 samples at resolution 32x64 and 50 samples at resolution 64x128. - - Args: - input_keys (Tuple[str, ...]): Input keys, such as ("input",). - label_keys (Tuple[str, ...]): Output keys, such as ("output",). - data_dir (str): The directory to load data from. - weight_dict (Optional[Dict[str, float]], optional): Define the weight of each constraint variable. - Defaults to None. - test_resolutions (Tuple[str, ...], optional): The resolutions to test dataset. Defaults to ["34x64", "64x128"]. - train_resolution (str, optional): The resolutions to train dataset. Defaults to "34x64". - data_split (str, optional): Specify the dataset split, either 'train' , 'test_32x64',or 'test_64x128'. - Defaults to "train". - """ - - def __init__( - self, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - data_dir: str, - weight_dict: Optional[Dict[str, float]] = None, - test_resolutions: Tuple[str, ...] = ["34x64", "64x128"], - train_resolution: str = "34x64", - data_split: str = "train", - ): - super().__init__() - self.input_keys = input_keys - self.label_keys = label_keys - self.data_dir = data_dir - self.weight_dict = {} if weight_dict is None else weight_dict - if weight_dict is not None: - self.weight_dict = {key: 1.0 for key in self.label_keys} - self.weight_dict.update(weight_dict) - - self.test_resolutions = test_resolutions - self.train_resolution = train_resolution - self.data_split = data_split - - # train path - path_train = ( - Path(self.data_dir) - .joinpath(f"train_SWE_{self.train_resolution}.npy") - .as_posix() - ) - self.x_train, self.y_train = self.read_data(path_train) - # test path - path_test_1 = ( - Path(self.data_dir) - .joinpath(f"test_SWE_{self.test_resolutions[0]}.npy") - .as_posix() - ) - self.x_test_1, self.y_test_1 = self.read_data(path_test_1) - path_test_2 = ( - Path(self.data_dir) - .joinpath(f"test_SWE_{self.test_resolutions[1]}.npy") - .as_posix() - ) - self.x_test_2, self.y_test_2 = self.read_data(path_test_2) - - def read_data(self, path): - # load with numpy - data = np.load(path, allow_pickle=True).item() - x = data["x"].astype("float32") - y = data["y"].astype("float32") - del data - return x, y - - def __len__(self): - if self.data_split == "train": - return self.x_train.shape[0] - elif self.data_split == "test_32x64": - return self.x_test_1.shape[0] - else: - return self.x_test_2.shape[0] - - def __getitem__(self, index): - if self.data_split == "train": - x = self.x_train[index] - y = self.y_train[index] - - elif self.data_split == "test_32x64": - x = self.x_test_1[index] - y = self.y_test_1[index] - else: - x = self.x_test_2[index] - y = self.y_test_2[index] - - input_item = {self.input_keys[0]: x} - label_item = {self.label_keys[0]: y} - weight_item = self.weight_dict - - return input_item, label_item, weight_item +from pathlib import Path +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +from paddle import io + + +class SphericalSWEDataset(io.Dataset): + """Loads a Spherical Shallow Water equations dataset + + Training contains 200 samples in resolution 32x64. + Testing contains 50 samples at resolution 32x64 and 50 samples at resolution 64x128. + + Args: + input_keys (Tuple[str, ...]): Input keys, such as ("input",). + label_keys (Tuple[str, ...]): Output keys, such as ("output",). + data_dir (str): The directory to load data from. + weight_dict (Optional[Dict[str, float]], optional): Define the weight of each constraint variable. + Defaults to None. + test_resolutions (Tuple[str, ...], optional): The resolutions to test dataset. Defaults to ["34x64", "64x128"]. + train_resolution (str, optional): The resolutions to train dataset. Defaults to "34x64". + data_split (str, optional): Specify the dataset split, either 'train' , 'test_32x64',or 'test_64x128'. + Defaults to "train". + """ + + def __init__( + self, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + data_dir: str, + weight_dict: Optional[Dict[str, float]] = None, + test_resolutions: Tuple[str, ...] = ["34x64", "64x128"], + train_resolution: str = "34x64", + data_split: str = "train", + ): + super().__init__() + self.input_keys = input_keys + self.label_keys = label_keys + self.data_dir = data_dir + self.weight_dict = {} if weight_dict is None else weight_dict + if weight_dict is not None: + self.weight_dict = {key: 1.0 for key in self.label_keys} + self.weight_dict.update(weight_dict) + + self.test_resolutions = test_resolutions + self.train_resolution = train_resolution + self.data_split = data_split + + # train path + path_train = ( + Path(self.data_dir) + .joinpath(f"train_SWE_{self.train_resolution}.npy") + .as_posix() + ) + self.x_train, self.y_train = self.read_data(path_train) + # test path + path_test_1 = ( + Path(self.data_dir) + .joinpath(f"test_SWE_{self.test_resolutions[0]}.npy") + .as_posix() + ) + self.x_test_1, self.y_test_1 = self.read_data(path_test_1) + path_test_2 = ( + Path(self.data_dir) + .joinpath(f"test_SWE_{self.test_resolutions[1]}.npy") + .as_posix() + ) + self.x_test_2, self.y_test_2 = self.read_data(path_test_2) + + def read_data(self, path): + # load with numpy + data = np.load(path, allow_pickle=True).item() + x = data["x"].astype("float32") + y = data["y"].astype("float32") + del data + return x, y + + def __len__(self): + if self.data_split == "train": + return self.x_train.shape[0] + elif self.data_split == "test_32x64": + return self.x_test_1.shape[0] + else: + return self.x_test_2.shape[0] + + def __getitem__(self, index): + if self.data_split == "train": + x = self.x_train[index] + y = self.y_train[index] + + elif self.data_split == "test_32x64": + x = self.x_test_1[index] + y = self.y_test_1[index] + else: + x = self.x_test_2[index] + y = self.y_test_2[index] + + input_item = {self.input_keys[0]: x} + label_item = {self.label_keys[0]: y} + weight_item = self.weight_dict + + return input_item, label_item, weight_item diff --git a/ppsci/data/dataset/trphysx_dataset.py b/ppsci/data/dataset/trphysx_dataset.py index 3160951530..6006746779 100644 --- a/ppsci/data/dataset/trphysx_dataset.py +++ b/ppsci/data/dataset/trphysx_dataset.py @@ -1,326 +1,326 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) -""" - -from __future__ import annotations - -import os -from typing import Dict -from typing import Optional -from typing import Tuple - -try: - import h5py -except ModuleNotFoundError: - pass -import numpy as np -import paddle -from paddle import io - -from ppsci.arch import base - - -class LorenzDataset(io.Dataset): - """Dataset for training Lorenz model. - - Args: - file_path (str): Data set path. - input_keys (Tuple[str, ...]): Input keys, such as ("states",). - label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - block_size (int): Data block size. - stride (int): Data stride. - ndata (Optional[int]): Number of data series to use. Defaults to None. - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.LorenzDataset( - ... "file_path": "/path/to/LorenzDataset", - ... "input_keys": ("x",), - ... "label_keys": ("v",), - ... "block_size": 32, - ... "stride": 16, - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - block_size: int, - stride: int, - ndata: Optional[int] = None, - weight_dict: Optional[Dict[str, float]] = None, - embedding_model: Optional[base.Arch] = None, - ): - super().__init__() - if not os.path.exists(file_path): - raise FileNotFoundError( - f"file_path({file_path}) not exists. Please download dataset first. " - "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/lorenz_training_rk.hdf5. " - "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/lorenz_valid_rk.hdf5." - ) - - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - - self.block_size = block_size - self.stride = stride - self.ndata = ndata - self.weight_dict = {key: 1.0 for key in self.label_keys} - if weight_dict is not None: - self.weight_dict.update(weight_dict) - - self.data = self.read_data(file_path, block_size, stride) - self.embedding_model = embedding_model - if embedding_model is None: - self.embedding_data = None - else: - embedding_model.eval() - with paddle.no_grad(): - data_tensor = paddle.to_tensor(self.data) - embedding_data_tensor = embedding_model.encoder(data_tensor) - self.embedding_data = embedding_data_tensor.numpy() - - def read_data(self, file_path: str, block_size: int, stride: int): - data = [] - with h5py.File(file_path, "r") as f: - data_num = 0 - for key in f.keys(): - data_series = np.asarray(f[key], dtype=paddle.get_default_dtype()) - for i in range(0, data_series.shape[0] - block_size + 1, stride): - data.append(data_series[i : i + block_size]) - data_num += 1 - if self.ndata is not None and data_num >= self.ndata: - break - return np.asarray(data) - - def __len__(self): - return len(self.data) - - def __getitem__(self, idx): - # when embedding data is None - if self.embedding_data is None: - data_item = self.data[idx] - input_item = {self.input_keys[0]: data_item} - label_item = { - self.label_keys[0]: data_item[1:, :], - self.label_keys[1]: data_item, - } - else: - data_item = self.embedding_data[idx] - input_item = {self.input_keys[0]: data_item[:-1, :]} - label_item = {self.label_keys[0]: data_item[1:, :]} - if len(self.label_keys) == 2: - label_item[self.label_keys[1]] = self.data[idx][1:, :] - - weight_shape = [1] * len(data_item.shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - return (input_item, label_item, weight_item) - - -class RosslerDataset(LorenzDataset): - """Dataset for training Rossler model. - - Args: - file_path (str): Data set path. - input_keys (Tuple[str, ...]): Input keys, such as ("states",). - label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - block_size (int): Data block size. - stride (int): Data stride. - ndata (Optional[int]): Number of data series to use. Defaults to None. - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.RosslerDataset( - ... "file_path": "/path/to/RosslerDataset", - ... "input_keys": ("x",), - ... "label_keys": ("v",), - ... "block_size": 32, - ... "stride": 16, - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - block_size: int, - stride: int, - ndata: Optional[int] = None, - weight_dict: Optional[Dict[str, float]] = None, - embedding_model: Optional[base.Arch] = None, - ): - if not os.path.exists(file_path): - raise FileNotFoundError( - f"file_path({file_path}) not exists. Please download dataset first. " - "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/rossler_training.hdf5. " - "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/rossler_valid.hdf5." - ) - super().__init__( - file_path, - input_keys, - label_keys, - block_size, - stride, - ndata, - weight_dict, - embedding_model, - ) - - -class CylinderDataset(io.Dataset): - """Dataset for training Cylinder model. - - Args: - file_path (str): Data set path. - input_keys (Tuple[str, ...]): Input keys, such as ("states","visc"). - label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). - block_size (int): Data block size. - stride (int): Data stride. - ndata (Optional[int]): Number of data series to use. Defaults to None. - weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. - embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. - embedding_batch_size (int, optional): The batch size of embedding model. Defaults to 64. - - Examples: - >>> import ppsci - >>> dataset = ppsci.data.dataset.CylinderDataset( - ... "file_path": "/path/to/CylinderDataset", - ... "input_keys": ("x",), - ... "label_keys": ("v",), - ... "block_size": 32, - ... "stride": 16, - ... ) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = False - - def __init__( - self, - file_path: str, - input_keys: Tuple[str, ...], - label_keys: Tuple[str, ...], - block_size: int, - stride: int, - ndata: Optional[int] = None, - weight_dict: Optional[Dict[str, float]] = None, - embedding_model: Optional[base.Arch] = None, - embedding_batch_size: int = 64, - ): - if not os.path.exists(file_path): - raise FileNotFoundError( - f"file_path({file_path}) not exists. Please download dataset first. " - "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/cylinder_training.hdf5. " - "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/cylinder_valid.hdf5." - ) - super().__init__() - self.file_path = file_path - self.input_keys = input_keys - self.label_keys = label_keys - - self.block_size = block_size - self.stride = stride - self.ndata = ndata - self.weight_dict = {key: 1.0 for key in self.label_keys} - if weight_dict is not None: - self.weight_dict.update(weight_dict) - - self.data, self.visc = self.read_data(file_path, block_size, stride) - self.embedding_model = embedding_model - if embedding_model is None: - self.embedding_data = None - else: - embedding_model.eval() - with paddle.no_grad(): - data_tensor = paddle.to_tensor(self.data) - visc_tensor = paddle.to_tensor(self.visc) - embedding_data = [] - for i in range(0, len(data_tensor), embedding_batch_size): - start, end = i, min(i + embedding_batch_size, len(data_tensor)) - embedding_data_batch = embedding_model.encoder( - data_tensor[start:end], visc_tensor[start:end] - ) - embedding_data.append(embedding_data_batch.numpy()) - self.embedding_data = np.concatenate(embedding_data) - - def read_data(self, file_path: str, block_size: int, stride: int): - data = [] - visc = [] - with h5py.File(file_path, "r") as f: - data_num = 0 - for key in f.keys(): - visc0 = 2.0 / float(key) - ux = np.asarray(f[key + "/ux"], dtype=paddle.get_default_dtype()) - uy = np.asarray(f[key + "/uy"], dtype=paddle.get_default_dtype()) - p = np.asarray(f[key + "/p"], dtype=paddle.get_default_dtype()) - data_series = np.stack([ux, uy, p], axis=1) - - for i in range(0, data_series.shape[0] - block_size + 1, stride): - data.append(data_series[i : i + block_size]) - visc.append([visc0]) - - data_num += 1 - if self.ndata is not None and data_num >= self.ndata: - break - - data = np.asarray(data) - visc = np.asarray(visc, dtype=paddle.get_default_dtype()) - return data, visc - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - if self.embedding_data is None: - data_item = self.data[i] - input_item = { - self.input_keys[0]: data_item, - self.input_keys[1]: self.visc[i], - } - label_item = { - self.label_keys[0]: data_item[1:], - self.label_keys[1]: data_item, - } - else: - data_item = self.embedding_data[i] - input_item = {self.input_keys[0]: data_item[:-1, :]} - label_item = {self.label_keys[0]: data_item[1:, :]} - if len(self.label_keys) == 2: - label_item[self.label_keys[1]] = data_item[1:, :] - weight_shape = [1] * len(data_item.shape) - weight_item = { - key: np.full(weight_shape, value, paddle.get_default_dtype()) - for key, value in self.weight_dict.items() - } - return (input_item, label_item, weight_item) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [transformer-physx](https://github.com/zabaras/transformer-physx) +""" + +from __future__ import annotations + +import os +from typing import Dict +from typing import Optional +from typing import Tuple + +try: + import h5py +except ModuleNotFoundError: + pass +import numpy as np +import paddle +from paddle import io + +from ppsci.arch import base + + +class LorenzDataset(io.Dataset): + """Dataset for training Lorenz model. + + Args: + file_path (str): Data set path. + input_keys (Tuple[str, ...]): Input keys, such as ("states",). + label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + block_size (int): Data block size. + stride (int): Data stride. + ndata (Optional[int]): Number of data series to use. Defaults to None. + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.LorenzDataset( + ... "file_path": "/path/to/LorenzDataset", + ... "input_keys": ("x",), + ... "label_keys": ("v",), + ... "block_size": 32, + ... "stride": 16, + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + block_size: int, + stride: int, + ndata: Optional[int] = None, + weight_dict: Optional[Dict[str, float]] = None, + embedding_model: Optional[base.Arch] = None, + ): + super().__init__() + if not os.path.exists(file_path): + raise FileNotFoundError( + f"file_path({file_path}) not exists. Please download dataset first. " + "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/lorenz_training_rk.hdf5. " + "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/lorenz_valid_rk.hdf5." + ) + + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + + self.block_size = block_size + self.stride = stride + self.ndata = ndata + self.weight_dict = {key: 1.0 for key in self.label_keys} + if weight_dict is not None: + self.weight_dict.update(weight_dict) + + self.data = self.read_data(file_path, block_size, stride) + self.embedding_model = embedding_model + if embedding_model is None: + self.embedding_data = None + else: + embedding_model.eval() + with paddle.no_grad(): + data_tensor = paddle.to_tensor(self.data) + embedding_data_tensor = embedding_model.encoder(data_tensor) + self.embedding_data = embedding_data_tensor.numpy() + + def read_data(self, file_path: str, block_size: int, stride: int): + data = [] + with h5py.File(file_path, "r") as f: + data_num = 0 + for key in f.keys(): + data_series = np.asarray(f[key], dtype=paddle.get_default_dtype()) + for i in range(0, data_series.shape[0] - block_size + 1, stride): + data.append(data_series[i : i + block_size]) + data_num += 1 + if self.ndata is not None and data_num >= self.ndata: + break + return np.asarray(data) + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + # when embedding data is None + if self.embedding_data is None: + data_item = self.data[idx] + input_item = {self.input_keys[0]: data_item} + label_item = { + self.label_keys[0]: data_item[1:, :], + self.label_keys[1]: data_item, + } + else: + data_item = self.embedding_data[idx] + input_item = {self.input_keys[0]: data_item[:-1, :]} + label_item = {self.label_keys[0]: data_item[1:, :]} + if len(self.label_keys) == 2: + label_item[self.label_keys[1]] = self.data[idx][1:, :] + + weight_shape = [1] * len(data_item.shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + return (input_item, label_item, weight_item) + + +class RosslerDataset(LorenzDataset): + """Dataset for training Rossler model. + + Args: + file_path (str): Data set path. + input_keys (Tuple[str, ...]): Input keys, such as ("states",). + label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + block_size (int): Data block size. + stride (int): Data stride. + ndata (Optional[int]): Number of data series to use. Defaults to None. + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.RosslerDataset( + ... "file_path": "/path/to/RosslerDataset", + ... "input_keys": ("x",), + ... "label_keys": ("v",), + ... "block_size": 32, + ... "stride": 16, + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + block_size: int, + stride: int, + ndata: Optional[int] = None, + weight_dict: Optional[Dict[str, float]] = None, + embedding_model: Optional[base.Arch] = None, + ): + if not os.path.exists(file_path): + raise FileNotFoundError( + f"file_path({file_path}) not exists. Please download dataset first. " + "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/rossler_training.hdf5. " + "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/rossler_valid.hdf5." + ) + super().__init__( + file_path, + input_keys, + label_keys, + block_size, + stride, + ndata, + weight_dict, + embedding_model, + ) + + +class CylinderDataset(io.Dataset): + """Dataset for training Cylinder model. + + Args: + file_path (str): Data set path. + input_keys (Tuple[str, ...]): Input keys, such as ("states","visc"). + label_keys (Tuple[str, ...]): Output keys, such as ("pred_states", "recover_states"). + block_size (int): Data block size. + stride (int): Data stride. + ndata (Optional[int]): Number of data series to use. Defaults to None. + weight_dict (Optional[Dict[str, float]]): Weight dictionary. Defaults to None. + embedding_model (Optional[base.Arch]): Embedding model. Defaults to None. + embedding_batch_size (int, optional): The batch size of embedding model. Defaults to 64. + + Examples: + >>> import ppsci + >>> dataset = ppsci.data.dataset.CylinderDataset( + ... "file_path": "/path/to/CylinderDataset", + ... "input_keys": ("x",), + ... "label_keys": ("v",), + ... "block_size": 32, + ... "stride": 16, + ... ) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = False + + def __init__( + self, + file_path: str, + input_keys: Tuple[str, ...], + label_keys: Tuple[str, ...], + block_size: int, + stride: int, + ndata: Optional[int] = None, + weight_dict: Optional[Dict[str, float]] = None, + embedding_model: Optional[base.Arch] = None, + embedding_batch_size: int = 64, + ): + if not os.path.exists(file_path): + raise FileNotFoundError( + f"file_path({file_path}) not exists. Please download dataset first. " + "Training: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/cylinder_training.hdf5. " + "Valid: https://paddle-org.bj.bcebos.com/paddlescience/datasets/transformer_physx/cylinder_valid.hdf5." + ) + super().__init__() + self.file_path = file_path + self.input_keys = input_keys + self.label_keys = label_keys + + self.block_size = block_size + self.stride = stride + self.ndata = ndata + self.weight_dict = {key: 1.0 for key in self.label_keys} + if weight_dict is not None: + self.weight_dict.update(weight_dict) + + self.data, self.visc = self.read_data(file_path, block_size, stride) + self.embedding_model = embedding_model + if embedding_model is None: + self.embedding_data = None + else: + embedding_model.eval() + with paddle.no_grad(): + data_tensor = paddle.to_tensor(self.data) + visc_tensor = paddle.to_tensor(self.visc) + embedding_data = [] + for i in range(0, len(data_tensor), embedding_batch_size): + start, end = i, min(i + embedding_batch_size, len(data_tensor)) + embedding_data_batch = embedding_model.encoder( + data_tensor[start:end], visc_tensor[start:end] + ) + embedding_data.append(embedding_data_batch.numpy()) + self.embedding_data = np.concatenate(embedding_data) + + def read_data(self, file_path: str, block_size: int, stride: int): + data = [] + visc = [] + with h5py.File(file_path, "r") as f: + data_num = 0 + for key in f.keys(): + visc0 = 2.0 / float(key) + ux = np.asarray(f[key + "/ux"], dtype=paddle.get_default_dtype()) + uy = np.asarray(f[key + "/uy"], dtype=paddle.get_default_dtype()) + p = np.asarray(f[key + "/p"], dtype=paddle.get_default_dtype()) + data_series = np.stack([ux, uy, p], axis=1) + + for i in range(0, data_series.shape[0] - block_size + 1, stride): + data.append(data_series[i : i + block_size]) + visc.append([visc0]) + + data_num += 1 + if self.ndata is not None and data_num >= self.ndata: + break + + data = np.asarray(data) + visc = np.asarray(visc, dtype=paddle.get_default_dtype()) + return data, visc + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + if self.embedding_data is None: + data_item = self.data[i] + input_item = { + self.input_keys[0]: data_item, + self.input_keys[1]: self.visc[i], + } + label_item = { + self.label_keys[0]: data_item[1:], + self.label_keys[1]: data_item, + } + else: + data_item = self.embedding_data[i] + input_item = {self.input_keys[0]: data_item[:-1, :]} + label_item = {self.label_keys[0]: data_item[1:, :]} + if len(self.label_keys) == 2: + label_item[self.label_keys[1]] = data_item[1:, :] + weight_shape = [1] * len(data_item.shape) + weight_item = { + key: np.full(weight_shape, value, paddle.get_default_dtype()) + for key, value in self.weight_dict.items() + } + return (input_item, label_item, weight_item) diff --git a/ppsci/data/dataset/vtu_dataset.py b/ppsci/data/dataset/vtu_dataset.py index fb0c9201b7..8a759d1f93 100644 --- a/ppsci/data/dataset/vtu_dataset.py +++ b/ppsci/data/dataset/vtu_dataset.py @@ -1,106 +1,106 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -from paddle import io -from paddle import vision - -from ppsci.utils import reader - - -class VtuDataset(io.Dataset): - """Dataset class for .vtu file. - - Args: - file_path (str): *.vtu file path. - input_keys (Optional[Tuple[str, ...]]): Tuple of input keys. Defaults to None. - label_keys (Optional[Tuple[str, ...]]): Tuple of label keys. Defaults to None. - time_step (Optional[int]): Time step with unit second. Defaults to None. - time_index (Optional[Tuple[int, ...]]): Time index tuple in increasing order. - labels (Optional[Dict[str, float]]): Temporary variable for [load_vtk_with_time_file]. - transforms (vision.Compose, optional): Compose object contains sample wise. - transform(s). - - Examples: - >>> from ppsci.data.dataset import VtuDataset - - >>> dataset = VtuDataset(file_path='example.vtu') # doctest: +SKIP - - >>> # get the length of the dataset - >>> dataset_size = len(dataset) # doctest: +SKIP - >>> # get the first sample of the data - >>> first_sample = dataset[0] # doctest: +SKIP - >>> print("First sample:", first_sample) # doctest: +SKIP - """ - - # Whether support batch indexing for speeding up fetching process. - batch_index: bool = True - - def __init__( - self, - file_path: str, - input_keys: Optional[Tuple[str, ...]] = None, - label_keys: Optional[Tuple[str, ...]] = None, - time_step: Optional[int] = None, - time_index: Optional[Tuple[int, ...]] = None, - labels: Optional[Dict[str, float]] = None, - transforms: Optional[vision.Compose] = None, - ): - super().__init__() - - # load data from file - if time_step is not None and time_index is not None: - _input, _label = reader.load_vtk_file( - file_path, time_step, time_index, input_keys, label_keys - ) - _label = {key: _label[key] for key in label_keys} - elif time_step is None and time_index is None: - _input = reader.load_vtk_with_time_file(file_path) - _label = {} - for key, value in labels.items(): - if isinstance(value, (int, float)): - _label[key] = np.full_like( - next(iter(_input.values())), value, "float32" - ) - else: - _label[key] = value - else: - raise ValueError( - "Error, read vtu with time_step and time_index, or neither" - ) - - # transform - _input = transforms(_input) - _label = transforms(_label) - - self.input = _input - self.label = _label - self.input_keys = input_keys - self.label_keys = label_keys - self.transforms = transforms - self.num_samples = len(next(iter(self.input.values()))) - - def __getitem__(self, idx): - input_item = {key: value[idx] for key, value in self.input.items()} - label_item = {key: value[idx] for key, value in self.label.items()} - return (input_item, label_item, {}) - - def __len__(self): - return self.num_samples +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +from paddle import io +from paddle import vision + +from ppsci.utils import reader + + +class VtuDataset(io.Dataset): + """Dataset class for .vtu file. + + Args: + file_path (str): *.vtu file path. + input_keys (Optional[Tuple[str, ...]]): Tuple of input keys. Defaults to None. + label_keys (Optional[Tuple[str, ...]]): Tuple of label keys. Defaults to None. + time_step (Optional[int]): Time step with unit second. Defaults to None. + time_index (Optional[Tuple[int, ...]]): Time index tuple in increasing order. + labels (Optional[Dict[str, float]]): Temporary variable for [load_vtk_with_time_file]. + transforms (vision.Compose, optional): Compose object contains sample wise. + transform(s). + + Examples: + >>> from ppsci.data.dataset import VtuDataset + + >>> dataset = VtuDataset(file_path='example.vtu') # doctest: +SKIP + + >>> # get the length of the dataset + >>> dataset_size = len(dataset) # doctest: +SKIP + >>> # get the first sample of the data + >>> first_sample = dataset[0] # doctest: +SKIP + >>> print("First sample:", first_sample) # doctest: +SKIP + """ + + # Whether support batch indexing for speeding up fetching process. + batch_index: bool = True + + def __init__( + self, + file_path: str, + input_keys: Optional[Tuple[str, ...]] = None, + label_keys: Optional[Tuple[str, ...]] = None, + time_step: Optional[int] = None, + time_index: Optional[Tuple[int, ...]] = None, + labels: Optional[Dict[str, float]] = None, + transforms: Optional[vision.Compose] = None, + ): + super().__init__() + + # load data from file + if time_step is not None and time_index is not None: + _input, _label = reader.load_vtk_file( + file_path, time_step, time_index, input_keys, label_keys + ) + _label = {key: _label[key] for key in label_keys} + elif time_step is None and time_index is None: + _input = reader.load_vtk_with_time_file(file_path) + _label = {} + for key, value in labels.items(): + if isinstance(value, (int, float)): + _label[key] = np.full_like( + next(iter(_input.values())), value, "float32" + ) + else: + _label[key] = value + else: + raise ValueError( + "Error, read vtu with time_step and time_index, or neither" + ) + + # transform + _input = transforms(_input) + _label = transforms(_label) + + self.input = _input + self.label = _label + self.input_keys = input_keys + self.label_keys = label_keys + self.transforms = transforms + self.num_samples = len(next(iter(self.input.values()))) + + def __getitem__(self, idx): + input_item = {key: value[idx] for key, value in self.input.items()} + label_item = {key: value[idx] for key, value in self.label.items()} + return (input_item, label_item, {}) + + def __len__(self): + return self.num_samples diff --git a/ppsci/data/process/__init__.py b/ppsci/data/process/__init__.py index f46c8dd9cf..8d34202962 100644 --- a/ppsci/data/process/__init__.py +++ b/ppsci/data/process/__init__.py @@ -1,21 +1,21 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.data.process import batch_transform -from ppsci.data.process import transform - -__all__ = [ - "batch_transform", - "transform", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.data.process import batch_transform +from ppsci.data.process import transform + +__all__ = [ + "batch_transform", + "transform", +] diff --git a/ppsci/data/process/batch_transform/__init__.py b/ppsci/data/process/batch_transform/__init__.py index 9e98f39264..171c7a9097 100644 --- a/ppsci/data/process/batch_transform/__init__.py +++ b/ppsci/data/process/batch_transform/__init__.py @@ -1,135 +1,135 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import numbers -from collections.abc import Mapping -from collections.abc import Sequence -from typing import Any -from typing import Callable -from typing import List -from typing import Optional - -import numpy as np -import paddle - -from ppsci.data.process import transform -from ppsci.data.process.batch_transform.preprocess import FunctionalBatchTransform - -try: - import pgl -except ModuleNotFoundError: - pass - - -__all__ = [ - "build_batch_transforms", - "default_collate_fn", - "FunctionalBatchTransform", -] - - -def default_collate_fn(batch: List[Any]) -> Any: - """Default_collate_fn for paddle dataloader. - - NOTE: This `default_collate_fn` is different from official `default_collate_fn` - which specially adapt case where sample is `None` and `pgl.Graph`. - - ref: https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/io/dataloader/collate.py#L25 - - Args: - batch (List[Any]): Batch of samples to be collated. - - Returns: - Any: Collated batch data. - """ - sample = batch[0] - if sample is None: - return None - elif isinstance(sample, np.ndarray): - batch = np.stack(batch, axis=0) - return batch - elif isinstance(sample, (paddle.Tensor, paddle.framework.core.eager.Tensor)): - return paddle.stack(batch, axis=0) - elif isinstance(sample, numbers.Number): - batch = np.array(batch) - return batch - elif isinstance(sample, (str, bytes)): - return batch - elif isinstance(sample, Mapping): - return {key: default_collate_fn([d[key] for d in batch]) for key in sample} - elif isinstance(sample, Sequence): - sample_fields_num = len(sample) - if not all(len(sample) == sample_fields_num for sample in iter(batch)): - raise RuntimeError("Fields number not same among samples in a batch") - return [default_collate_fn(fields) for fields in zip(*batch)] - elif str(type(sample)) == "": - # use str(type()) instead of isinstance() in case of pgl is not installed. - graph = pgl.Graph(num_nodes=sample.num_nodes, edges=sample.edges) - graph.x = np.concatenate([g.x for g in batch]) - graph.y = np.concatenate([g.y for g in batch]) - graph.edge_index = np.concatenate([g.edge_index for g in batch], axis=1) - - graph.edge_attr = np.concatenate([g.edge_attr for g in batch]) - graph.pos = np.concatenate([g.pos for g in batch]) - if hasattr(sample, "aoa"): - graph.aoa = np.concatenate([g.aoa for g in batch]) - if hasattr(sample, "mach_or_reynolds"): - graph.mach_or_reynolds = np.concatenate([g.mach_or_reynolds for g in batch]) - graph.tensor() - graph.shape = [len(batch)] - return graph - elif ( - str(type(sample)) - == "" - ): - graph = sample - graph.tensor() - graph.shape = [1] - return graph - raise TypeError( - "batch data can only contains: paddle.Tensor, numpy.ndarray, " - f"dict, list, number, None, pgl.Graph, GraphGridMesh, but got {type(sample)}" - ) - - -def build_transforms(cfg): - if not cfg: - return transform.Compose([]) - cfg = copy.deepcopy(cfg) - - transform_list = [] - for _item in cfg: - transform_cls = next(iter(_item.keys())) - transform_cfg = _item[transform_cls] - transform_obj = eval(transform_cls)(**transform_cfg) - transform_list.append(transform_obj) - - return transform.Compose(transform_list) - - -def build_batch_transforms(cfg, collate_fn: Optional[Callable]): - cfg = copy.deepcopy(cfg) - batch_transforms: Callable[[List[Any]], List[Any]] = build_transforms(cfg) - if collate_fn is None: - collate_fn = default_collate_fn - - def collate_fn_batch_transforms(batch: List[Any]): - # apply batch transform on separate samples - batch = batch_transforms(batch) - - # then collate separate samples into batched data - return collate_fn(batch) - - return collate_fn_batch_transforms +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import numbers +from collections.abc import Mapping +from collections.abc import Sequence +from typing import Any +from typing import Callable +from typing import List +from typing import Optional + +import numpy as np +import paddle + +from ppsci.data.process import transform +from ppsci.data.process.batch_transform.preprocess import FunctionalBatchTransform + +try: + import pgl +except ModuleNotFoundError: + pass + + +__all__ = [ + "build_batch_transforms", + "default_collate_fn", + "FunctionalBatchTransform", +] + + +def default_collate_fn(batch: List[Any]) -> Any: + """Default_collate_fn for paddle dataloader. + + NOTE: This `default_collate_fn` is different from official `default_collate_fn` + which specially adapt case where sample is `None` and `pgl.Graph`. + + ref: https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/io/dataloader/collate.py#L25 + + Args: + batch (List[Any]): Batch of samples to be collated. + + Returns: + Any: Collated batch data. + """ + sample = batch[0] + if sample is None: + return None + elif isinstance(sample, np.ndarray): + batch = np.stack(batch, axis=0) + return batch + elif isinstance(sample, (paddle.Tensor, paddle.framework.core.eager.Tensor)): + return paddle.stack(batch, axis=0) + elif isinstance(sample, numbers.Number): + batch = np.array(batch) + return batch + elif isinstance(sample, (str, bytes)): + return batch + elif isinstance(sample, Mapping): + return {key: default_collate_fn([d[key] for d in batch]) for key in sample} + elif isinstance(sample, Sequence): + sample_fields_num = len(sample) + if not all(len(sample) == sample_fields_num for sample in iter(batch)): + raise RuntimeError("Fields number not same among samples in a batch") + return [default_collate_fn(fields) for fields in zip(*batch)] + elif str(type(sample)) == "": + # use str(type()) instead of isinstance() in case of pgl is not installed. + graph = pgl.Graph(num_nodes=sample.num_nodes, edges=sample.edges) + graph.x = np.concatenate([g.x for g in batch]) + graph.y = np.concatenate([g.y for g in batch]) + graph.edge_index = np.concatenate([g.edge_index for g in batch], axis=1) + + graph.edge_attr = np.concatenate([g.edge_attr for g in batch]) + graph.pos = np.concatenate([g.pos for g in batch]) + if hasattr(sample, "aoa"): + graph.aoa = np.concatenate([g.aoa for g in batch]) + if hasattr(sample, "mach_or_reynolds"): + graph.mach_or_reynolds = np.concatenate([g.mach_or_reynolds for g in batch]) + graph.tensor() + graph.shape = [len(batch)] + return graph + elif ( + str(type(sample)) + == "" + ): + graph = sample + graph.tensor() + graph.shape = [1] + return graph + raise TypeError( + "batch data can only contains: paddle.Tensor, numpy.ndarray, " + f"dict, list, number, None, pgl.Graph, GraphGridMesh, but got {type(sample)}" + ) + + +def build_transforms(cfg): + if not cfg: + return transform.Compose([]) + cfg = copy.deepcopy(cfg) + + transform_list = [] + for _item in cfg: + transform_cls = next(iter(_item.keys())) + transform_cfg = _item[transform_cls] + transform_obj = eval(transform_cls)(**transform_cfg) + transform_list.append(transform_obj) + + return transform.Compose(transform_list) + + +def build_batch_transforms(cfg, collate_fn: Optional[Callable]): + cfg = copy.deepcopy(cfg) + batch_transforms: Callable[[List[Any]], List[Any]] = build_transforms(cfg) + if collate_fn is None: + collate_fn = default_collate_fn + + def collate_fn_batch_transforms(batch: List[Any]): + # apply batch transform on separate samples + batch = batch_transforms(batch) + + # then collate separate samples into batched data + return collate_fn(batch) + + return collate_fn_batch_transforms diff --git a/ppsci/data/process/batch_transform/preprocess.py b/ppsci/data/process/batch_transform/preprocess.py index 62ca5d3be1..cc6e9765ac 100644 --- a/ppsci/data/process/batch_transform/preprocess.py +++ b/ppsci/data/process/batch_transform/preprocess.py @@ -1,74 +1,74 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Any -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple - -import numpy as np - - -class FunctionalBatchTransform: - """Functional data transform class, which allows to use custom data transform function from given transform_func for special cases. - - Args: - transform_func (Callable): Function of batch data transform. - - Examples: - >>> import ppsci - >>> from typing import Tuple, Dict, Optional - >>> def batch_transform_func( - ... data_list: List[ - ... Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Optional[Dict[str, np.ndarray]]] - ... ], - ... ) -> List[Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Optional[Dict[str, np.ndarray]]]]: - ... input_dicts, label_dicts, weight_dicts = zip(*data_list) - ... - ... for input_dict in input_dicts: - ... for key in input_dict: - ... input_dict[key] = input_dict[key] * 2 - ... - ... for label_dict in label_dicts: - ... for key in label_dict: - ... label_dict[key] = label_dict[key] + 1.0 - ... - ... return list(zip(input_dicts, label_dicts, weight_dicts)) - ... - >>> # Create a FunctionalBatchTransform object with the batch_transform_func function - >>> transform = ppsci.data.batch_transform.FunctionalBatchTransform(batch_transform_func) - >>> # Define some sample data, labels, and weights - >>> data = [({'x': 1}, {'y': 2}, None), ({'x': 11}, {'y': 22}, None)] - >>> transformed_data = transform(data) - >>> for tuple in transformed_data: - ... print(tuple) - ({'x': 2}, {'y': 3.0}, None) - ({'x': 22}, {'y': 23.0}, None) - """ - - def __init__( - self, - transform_func: Callable[[List[Any]], List[Any]], - ): - self.transform_func = transform_func - - def __call__( - self, - data_list: List[Tuple[Optional[Dict[str, np.ndarray]], ...]], - ) -> List[Tuple[Optional[Dict[str, np.ndarray]], ...]]: - return self.transform_func(data_list) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple + +import numpy as np + + +class FunctionalBatchTransform: + """Functional data transform class, which allows to use custom data transform function from given transform_func for special cases. + + Args: + transform_func (Callable): Function of batch data transform. + + Examples: + >>> import ppsci + >>> from typing import Tuple, Dict, Optional + >>> def batch_transform_func( + ... data_list: List[ + ... Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Optional[Dict[str, np.ndarray]]] + ... ], + ... ) -> List[Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Optional[Dict[str, np.ndarray]]]]: + ... input_dicts, label_dicts, weight_dicts = zip(*data_list) + ... + ... for input_dict in input_dicts: + ... for key in input_dict: + ... input_dict[key] = input_dict[key] * 2 + ... + ... for label_dict in label_dicts: + ... for key in label_dict: + ... label_dict[key] = label_dict[key] + 1.0 + ... + ... return list(zip(input_dicts, label_dicts, weight_dicts)) + ... + >>> # Create a FunctionalBatchTransform object with the batch_transform_func function + >>> transform = ppsci.data.batch_transform.FunctionalBatchTransform(batch_transform_func) + >>> # Define some sample data, labels, and weights + >>> data = [({'x': 1}, {'y': 2}, None), ({'x': 11}, {'y': 22}, None)] + >>> transformed_data = transform(data) + >>> for tuple in transformed_data: + ... print(tuple) + ({'x': 2}, {'y': 3.0}, None) + ({'x': 22}, {'y': 23.0}, None) + """ + + def __init__( + self, + transform_func: Callable[[List[Any]], List[Any]], + ): + self.transform_func = transform_func + + def __call__( + self, + data_list: List[Tuple[Optional[Dict[str, np.ndarray]], ...]], + ) -> List[Tuple[Optional[Dict[str, np.ndarray]], ...]]: + return self.transform_func(data_list) diff --git a/ppsci/data/process/transform/__init__.py b/ppsci/data/process/transform/__init__.py index f5a4baa287..803453f5e2 100644 --- a/ppsci/data/process/transform/__init__.py +++ b/ppsci/data/process/transform/__init__.py @@ -1,72 +1,72 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import traceback -from typing import Any -from typing import Tuple - -from paddle import vision - -from ppsci.data.process.transform.preprocess import CropData -from ppsci.data.process.transform.preprocess import FunctionalTransform -from ppsci.data.process.transform.preprocess import Log1p -from ppsci.data.process.transform.preprocess import Normalize -from ppsci.data.process.transform.preprocess import Scale -from ppsci.data.process.transform.preprocess import SqueezeData -from ppsci.data.process.transform.preprocess import Translate - -__all__ = [ - "CropData", - "FunctionalTransform", - "Log1p", - "Normalize", - "Scale", - "SqueezeData", - "Translate", - "build_transforms", -] - - -class Compose(vision.Compose): - """Custom Compose for multiple items in given data.""" - - def __call__(self, *data: Tuple[Any, ...]): - for f in self.transforms: - try: - # NOTE: This is different from vision.Compose to allow receive multiple data items - data = f(*data) - except Exception as e: - stack_info = traceback.format_exc() - print( - f"fail to perform transform [{f}] with error: " - f"{e} and stack:\n{str(stack_info)}" - ) - raise e - return data - - -def build_transforms(cfg): - if not cfg: - return Compose([]) - cfg = copy.deepcopy(cfg) - - transform_list = [] - for _item in cfg: - transform_cls = next(iter(_item.keys())) - transform_cfg = _item[transform_cls] - transform = eval(transform_cls)(**transform_cfg) - transform_list.append(transform) - - return Compose(transform_list) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import traceback +from typing import Any +from typing import Tuple + +from paddle import vision + +from ppsci.data.process.transform.preprocess import CropData +from ppsci.data.process.transform.preprocess import FunctionalTransform +from ppsci.data.process.transform.preprocess import Log1p +from ppsci.data.process.transform.preprocess import Normalize +from ppsci.data.process.transform.preprocess import Scale +from ppsci.data.process.transform.preprocess import SqueezeData +from ppsci.data.process.transform.preprocess import Translate + +__all__ = [ + "CropData", + "FunctionalTransform", + "Log1p", + "Normalize", + "Scale", + "SqueezeData", + "Translate", + "build_transforms", +] + + +class Compose(vision.Compose): + """Custom Compose for multiple items in given data.""" + + def __call__(self, *data: Tuple[Any, ...]): + for f in self.transforms: + try: + # NOTE: This is different from vision.Compose to allow receive multiple data items + data = f(*data) + except Exception as e: + stack_info = traceback.format_exc() + print( + f"fail to perform transform [{f}] with error: " + f"{e} and stack:\n{str(stack_info)}" + ) + raise e + return data + + +def build_transforms(cfg): + if not cfg: + return Compose([]) + cfg = copy.deepcopy(cfg) + + transform_list = [] + for _item in cfg: + transform_cls = next(iter(_item.keys())) + transform_cfg = _item[transform_cls] + transform = eval(transform_cls)(**transform_cfg) + transform_list.append(transform) + + return Compose(transform_list) diff --git a/ppsci/data/process/transform/preprocess.py b/ppsci/data/process/transform/preprocess.py index 48f0fa1222..99d7ca4df2 100644 --- a/ppsci/data/process/transform/preprocess.py +++ b/ppsci/data/process/transform/preprocess.py @@ -1,331 +1,331 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Tuple -from typing import Union - -import numpy as np - - -class Translate: - """Translate class. - - Args: - offset (Dict[str, float]): Shift the input data according to the variable name - and coefficient specified in offset. - - Examples: - >>> import ppsci - >>> import numpy as np - - >>> input_dict = {"x": np.array([5.0, 10.0]), "y": np.array([20.0, 40.0])} - >>> label_dict = {"x": np.array([1.0, 2.0]), "y": np.array([3.0, 4.0])} - >>> weight_dict = {"x": np.array([10.0, 20.0]), "y": np.array([30.0, 40.0])} - - >>> translate = ppsci.data.transform.Translate({"x": 1.0, "y": -1.0}) - >>> translated_input_dict, translated_label_dict, translated_weight_dict = translate(input_dict, label_dict, weight_dict) - - >>> print(translated_input_dict) - {'x': array([ 6., 11.]), 'y': array([19., 39.])} - >>> print(translated_label_dict) - {'x': array([1., 2.]), 'y': array([3., 4.])} - >>> print(translated_weight_dict) - {'x': array([10., 20.]), 'y': array([30., 40.])} - """ - - def __init__(self, offset: Dict[str, float]): - self.offset = offset - - def __call__(self, input_dict, label_dict, weight_dict): - input_dict_copy = {**input_dict} - for key in self.offset: - if key in input_dict: - input_dict_copy[key] += self.offset[key] - return input_dict_copy, label_dict, weight_dict - - -class Scale: - """Scale class for data transformation. - - Args: - scale (Dict[str, float]): Scale the input data according to the variable name - and coefficient specified in scale. - - Examples: - >>> import ppsci - >>> translate = ppsci.data.transform.Scale({"x": 1.5, "y": 2.0}) - >>> input_dict = {"x": 10, "y": 20} - >>> label_dict = {"x": 100, "y": 200} - >>> weight_dict = {"x": 1000, "y": 2000} - >>> input_dict_scaled, label_dict_scaled, weight_dict_scaled = translate(input_dict, label_dict, weight_dict) - >>> print(input_dict_scaled) - {'x': 15.0, 'y': 40.0} - >>> print(label_dict_scaled) - {'x': 100, 'y': 200} - >>> print(weight_dict_scaled) - {'x': 1000, 'y': 2000} - """ - - def __init__(self, scale: Dict[str, float]): - self.scale = scale - - def __call__(self, input_dict, label_dict, weight_dict): - input_dict_copy = {**input_dict} - for key in self.scale: - if key in input_dict: - input_dict_copy[key] *= self.scale[key] - return input_dict_copy, label_dict, weight_dict - - -class Normalize: - """Normalize data class. - - NOTE: This transform will modify the input data dict inplace. - - Args: - mean (Union[np.ndarray, Tuple[float, ...]]): Mean of training dataset. - std (Union[np.ndarray, Tuple[float, ...]]): Standard Deviation of training dataset. - apply_keys (Tuple[str, ...], optional): Which data is the normalization method applied to. Defaults to ("input", "label"). - - Examples: - >>> import ppsci - >>> normalize = ppsci.data.transform.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) - >>> input_item = {"data": np.array([1.0, 2.0, 3.0])} - >>> label_item = {"data": np.array([4.0, 5.0, 6.0])} - >>> weight_item = np.array([0.1, 0.2, 0.3]) - >>> normalized_item = normalize(input_item, label_item, weight_item) - >>> print(normalized_item) - ({'data': array([1., 2., 3.])}, {'data': array([4., 5., 6.])}, array([0.1, 0.2, 0.3])) - """ - - def __init__( - self, - mean: Union[np.ndarray, Tuple[float, ...]], - std: Union[np.ndarray, Tuple[float, ...]], - apply_keys: Tuple[str, ...] = ("input", "label"), - ): - if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: - raise ValueError( - f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" - ) - self.mean = mean - self.std = std - self.apply_keys = apply_keys - - def __call__(self, input_item, label_item, weight_item): - if "input" in self.apply_keys: - for key, value in input_item.items(): - input_item[key] = (value - self.mean) / self.std - if "label" in self.apply_keys: - for key, value in label_item.items(): - label_item[key] = (value - self.mean) / self.std - return input_item, label_item, weight_item - - -class Log1p: - """Calculates the natural logarithm of one plus the data, element-wise. - - NOTE: This transform will modify the input data dict inplace. - - Args: - scale (float, optional): Scale data. Defaults to 1.0. - apply_keys (Tuple[str, ...], optional): Which data is the log1p method applied to. Defaults to ("input", "label"). - - Examples: - >>> import ppsci - >>> log1p = ppsci.data.transform.Log1p(1e-5) - >>> input_item = {"data": np.array([1.0, 2.0, 3.0])} - >>> label_item = {"data": np.array([4.0, 5.0, 6.0])} - >>> weight_item = np.array([0.1, 0.2, 0.3]) - >>> input_item_transformed, label_item_transformed, weight_item_transformed = log1p(input_item, label_item, weight_item) - >>> print(input_item_transformed) - {'data': array([11.51293546, 12.20607765, 12.61154109])} - >>> print(label_item_transformed) - {'data': array([12.89922233, 13.12236538, 13.3046866 ])} - >>> print(weight_item_transformed) - [0.1 0.2 0.3] - """ - - def __init__( - self, - scale: float = 1.0, - apply_keys: Tuple[str, ...] = ("input", "label"), - ): - if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: - raise ValueError( - f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" - ) - self.scale = scale - self.apply_keys = apply_keys - - def __call__(self, input_item, label_item, weight_item): - if "input" in self.apply_keys: - for key, value in input_item.items(): - input_item[key] = np.log1p(value / self.scale) - if "label" in self.apply_keys: - for key, value in label_item.items(): - label_item[key] = np.log1p(value / self.scale) - return input_item, label_item, weight_item - - -class CropData: - """Crop data class. - - This class is used to crop data based on a specified bounding box. - - NOTE: This transform will modify the input data dict inplace. - - Args: - xmin (Tuple[int, ...]): Bottom left corner point, [x0, y0]. - xmax (Tuple[int, ...]): Top right corner point, [x1, y1]. - apply_keys (Tuple[str, ...], optional): Which data is the crop method applied to. Defaults to ("input", "label"). - - Examples: - >>> import ppsci - >>> import numpy as np - >>> crop_data = ppsci.data.transform.CropData((0, 0), (256, 512)) - >>> input_item = {"input": np.zeros((3, 720, 1440))} - >>> label_item = {"label": np.zeros((3, 720, 1440))} - >>> weight_item = {"weight": np.ones((3, 720, 1440))} - >>> input_item, label_item, weight_item = crop_data(input_item, label_item, weight_item) - >>> print(input_item["input"].shape) - (3, 256, 512) - >>> print(label_item["label"].shape) - (3, 256, 512) - """ - - def __init__( - self, - xmin: Tuple[int, ...], - xmax: Tuple[int, ...], - apply_keys: Tuple[str, ...] = ("input", "label"), - ): - if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: - raise ValueError( - f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" - ) - self.xmin = xmin - self.xmax = xmax - self.apply_keys = apply_keys - - def __call__(self, input_item, label_item, weight_item): - if "input" in self.apply_keys: - for key, value in input_item.items(): - input_item[key] = value[ - :, self.xmin[0] : self.xmax[0], self.xmin[1] : self.xmax[1] - ] - if "label" in self.apply_keys: - for key, value in label_item.items(): - label_item[key] = value[ - :, self.xmin[0] : self.xmax[0], self.xmin[1] : self.xmax[1] - ] - return input_item, label_item, weight_item - - -class SqueezeData: - """Squeeze data class. - - NOTE: This transform will modify the input data dict inplace. - - Args: - apply_keys (Tuple[str, ...], optional): Which data is the squeeze method applied to. Defaults to ("input", "label"). - - Examples: - >>> import ppsci - >>> import numpy as np - >>> squeeze_data = ppsci.data.transform.SqueezeData() - >>> input_data = {"input": np.random.rand(10, 224, 224)} - >>> label_data = {"label": np.random.rand(10, 224, 224)} - >>> weight_data = {"weight": np.random.rand(10, 224, 224)} - >>> input_data_squeezed, label_data_squeezed, weight_data_squeezed = squeeze_data(input_data, label_data, weight_data) - """ - - def __init__(self, apply_keys: Tuple[str, ...] = ("input", "label")): - if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: - raise ValueError( - f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" - ) - self.apply_keys = apply_keys - - def __call__(self, input_item, label_item, weight_item): - if "input" in self.apply_keys: - for key, value in input_item.items(): - if value.ndim == 4: - B, C, H, W = value.shape - input_item[key] = value.reshape((B * C, H, W)) - if value.ndim != 3: - raise ValueError( - f"Only support squeeze data to ndim=3 now, but got ndim={value.ndim}" - ) - if "label" in self.apply_keys: - for key, value in label_item.items(): - if value.ndim == 4: - B, C, H, W = value.shape - label_item[key] = value.reshape((B * C, H, W)) - if value.ndim != 3: - raise ValueError( - f"Only support squeeze data to ndim=3 now, but got ndim={value.ndim}" - ) - return input_item, label_item, weight_item - - -class FunctionalTransform: - """Functional data transform class, which allows to use custom data transform function from given transform_func for special cases. - - Args: - transform_func (Callable): Function of data transform. - - Examples: - >>> # This is the transform_func function. It takes three dictionaries as input: data_dict, label_dict, and weight_dict. - >>> # The function will perform some transformations on the data in data_dict, convert all labels in label_dict to uppercase, - >>> # and modify the weights in weight_dict by dividing each weight by 10. - >>> # Finally, it returns the transformed data, labels, and weights as a tuple. - >>> import ppsci - >>> def transform_func(data_dict, label_dict, weight_dict): - ... for key in data_dict: - ... data_dict[key] = data_dict[key] * 2 - ... for key in label_dict: - ... label_dict[key] = label_dict[key] + 1.0 - ... for key in weight_dict: - ... weight_dict[key] = weight_dict[key] / 10 - ... return data_dict, label_dict, weight_dict - >>> transform = ppsci.data.transform.FunctionalTransform(transform_func) - >>> # Define some sample data, labels, and weights - >>> data = {'feature1': np.array([1, 2, 3]), 'feature2': np.array([4, 5, 6])} - >>> label = {'class': 0.0, 'instance': 0.1} - >>> weight = {'weight1': 0.5, 'weight2': 0.5} - >>> # Apply the transform function to the data, labels, and weights using the FunctionalTransform instance - >>> transformed_data = transform(data, label, weight) - >>> print(transformed_data) - ({'feature1': array([2, 4, 6]), 'feature2': array([ 8, 10, 12])}, {'class': 1.0, 'instance': 1.1}, {'weight1': 0.05, 'weight2': 0.05}) - """ - - def __init__( - self, - transform_func: Callable, - ): - self.transform_func = transform_func - - def __call__( - self, *data: Tuple[Dict[str, np.ndarray], ...] - ) -> Tuple[Dict[str, np.ndarray], ...]: - data_dict, label_dict, weight_dict = data - data_dict_copy = {**data_dict} - label_dict_copy = {**label_dict} - weight_dict_copy = {**weight_dict} if weight_dict is not None else {} - return self.transform_func(data_dict_copy, label_dict_copy, weight_dict_copy) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Tuple +from typing import Union + +import numpy as np + + +class Translate: + """Translate class. + + Args: + offset (Dict[str, float]): Shift the input data according to the variable name + and coefficient specified in offset. + + Examples: + >>> import ppsci + >>> import numpy as np + + >>> input_dict = {"x": np.array([5.0, 10.0]), "y": np.array([20.0, 40.0])} + >>> label_dict = {"x": np.array([1.0, 2.0]), "y": np.array([3.0, 4.0])} + >>> weight_dict = {"x": np.array([10.0, 20.0]), "y": np.array([30.0, 40.0])} + + >>> translate = ppsci.data.transform.Translate({"x": 1.0, "y": -1.0}) + >>> translated_input_dict, translated_label_dict, translated_weight_dict = translate(input_dict, label_dict, weight_dict) + + >>> print(translated_input_dict) + {'x': array([ 6., 11.]), 'y': array([19., 39.])} + >>> print(translated_label_dict) + {'x': array([1., 2.]), 'y': array([3., 4.])} + >>> print(translated_weight_dict) + {'x': array([10., 20.]), 'y': array([30., 40.])} + """ + + def __init__(self, offset: Dict[str, float]): + self.offset = offset + + def __call__(self, input_dict, label_dict, weight_dict): + input_dict_copy = {**input_dict} + for key in self.offset: + if key in input_dict: + input_dict_copy[key] += self.offset[key] + return input_dict_copy, label_dict, weight_dict + + +class Scale: + """Scale class for data transformation. + + Args: + scale (Dict[str, float]): Scale the input data according to the variable name + and coefficient specified in scale. + + Examples: + >>> import ppsci + >>> translate = ppsci.data.transform.Scale({"x": 1.5, "y": 2.0}) + >>> input_dict = {"x": 10, "y": 20} + >>> label_dict = {"x": 100, "y": 200} + >>> weight_dict = {"x": 1000, "y": 2000} + >>> input_dict_scaled, label_dict_scaled, weight_dict_scaled = translate(input_dict, label_dict, weight_dict) + >>> print(input_dict_scaled) + {'x': 15.0, 'y': 40.0} + >>> print(label_dict_scaled) + {'x': 100, 'y': 200} + >>> print(weight_dict_scaled) + {'x': 1000, 'y': 2000} + """ + + def __init__(self, scale: Dict[str, float]): + self.scale = scale + + def __call__(self, input_dict, label_dict, weight_dict): + input_dict_copy = {**input_dict} + for key in self.scale: + if key in input_dict: + input_dict_copy[key] *= self.scale[key] + return input_dict_copy, label_dict, weight_dict + + +class Normalize: + """Normalize data class. + + NOTE: This transform will modify the input data dict inplace. + + Args: + mean (Union[np.ndarray, Tuple[float, ...]]): Mean of training dataset. + std (Union[np.ndarray, Tuple[float, ...]]): Standard Deviation of training dataset. + apply_keys (Tuple[str, ...], optional): Which data is the normalization method applied to. Defaults to ("input", "label"). + + Examples: + >>> import ppsci + >>> normalize = ppsci.data.transform.Normalize((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)) + >>> input_item = {"data": np.array([1.0, 2.0, 3.0])} + >>> label_item = {"data": np.array([4.0, 5.0, 6.0])} + >>> weight_item = np.array([0.1, 0.2, 0.3]) + >>> normalized_item = normalize(input_item, label_item, weight_item) + >>> print(normalized_item) + ({'data': array([1., 2., 3.])}, {'data': array([4., 5., 6.])}, array([0.1, 0.2, 0.3])) + """ + + def __init__( + self, + mean: Union[np.ndarray, Tuple[float, ...]], + std: Union[np.ndarray, Tuple[float, ...]], + apply_keys: Tuple[str, ...] = ("input", "label"), + ): + if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: + raise ValueError( + f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" + ) + self.mean = mean + self.std = std + self.apply_keys = apply_keys + + def __call__(self, input_item, label_item, weight_item): + if "input" in self.apply_keys: + for key, value in input_item.items(): + input_item[key] = (value - self.mean) / self.std + if "label" in self.apply_keys: + for key, value in label_item.items(): + label_item[key] = (value - self.mean) / self.std + return input_item, label_item, weight_item + + +class Log1p: + """Calculates the natural logarithm of one plus the data, element-wise. + + NOTE: This transform will modify the input data dict inplace. + + Args: + scale (float, optional): Scale data. Defaults to 1.0. + apply_keys (Tuple[str, ...], optional): Which data is the log1p method applied to. Defaults to ("input", "label"). + + Examples: + >>> import ppsci + >>> log1p = ppsci.data.transform.Log1p(1e-5) + >>> input_item = {"data": np.array([1.0, 2.0, 3.0])} + >>> label_item = {"data": np.array([4.0, 5.0, 6.0])} + >>> weight_item = np.array([0.1, 0.2, 0.3]) + >>> input_item_transformed, label_item_transformed, weight_item_transformed = log1p(input_item, label_item, weight_item) + >>> print(input_item_transformed) + {'data': array([11.51293546, 12.20607765, 12.61154109])} + >>> print(label_item_transformed) + {'data': array([12.89922233, 13.12236538, 13.3046866 ])} + >>> print(weight_item_transformed) + [0.1 0.2 0.3] + """ + + def __init__( + self, + scale: float = 1.0, + apply_keys: Tuple[str, ...] = ("input", "label"), + ): + if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: + raise ValueError( + f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" + ) + self.scale = scale + self.apply_keys = apply_keys + + def __call__(self, input_item, label_item, weight_item): + if "input" in self.apply_keys: + for key, value in input_item.items(): + input_item[key] = np.log1p(value / self.scale) + if "label" in self.apply_keys: + for key, value in label_item.items(): + label_item[key] = np.log1p(value / self.scale) + return input_item, label_item, weight_item + + +class CropData: + """Crop data class. + + This class is used to crop data based on a specified bounding box. + + NOTE: This transform will modify the input data dict inplace. + + Args: + xmin (Tuple[int, ...]): Bottom left corner point, [x0, y0]. + xmax (Tuple[int, ...]): Top right corner point, [x1, y1]. + apply_keys (Tuple[str, ...], optional): Which data is the crop method applied to. Defaults to ("input", "label"). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> crop_data = ppsci.data.transform.CropData((0, 0), (256, 512)) + >>> input_item = {"input": np.zeros((3, 720, 1440))} + >>> label_item = {"label": np.zeros((3, 720, 1440))} + >>> weight_item = {"weight": np.ones((3, 720, 1440))} + >>> input_item, label_item, weight_item = crop_data(input_item, label_item, weight_item) + >>> print(input_item["input"].shape) + (3, 256, 512) + >>> print(label_item["label"].shape) + (3, 256, 512) + """ + + def __init__( + self, + xmin: Tuple[int, ...], + xmax: Tuple[int, ...], + apply_keys: Tuple[str, ...] = ("input", "label"), + ): + if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: + raise ValueError( + f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" + ) + self.xmin = xmin + self.xmax = xmax + self.apply_keys = apply_keys + + def __call__(self, input_item, label_item, weight_item): + if "input" in self.apply_keys: + for key, value in input_item.items(): + input_item[key] = value[ + :, self.xmin[0] : self.xmax[0], self.xmin[1] : self.xmax[1] + ] + if "label" in self.apply_keys: + for key, value in label_item.items(): + label_item[key] = value[ + :, self.xmin[0] : self.xmax[0], self.xmin[1] : self.xmax[1] + ] + return input_item, label_item, weight_item + + +class SqueezeData: + """Squeeze data class. + + NOTE: This transform will modify the input data dict inplace. + + Args: + apply_keys (Tuple[str, ...], optional): Which data is the squeeze method applied to. Defaults to ("input", "label"). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> squeeze_data = ppsci.data.transform.SqueezeData() + >>> input_data = {"input": np.random.rand(10, 224, 224)} + >>> label_data = {"label": np.random.rand(10, 224, 224)} + >>> weight_data = {"weight": np.random.rand(10, 224, 224)} + >>> input_data_squeezed, label_data_squeezed, weight_data_squeezed = squeeze_data(input_data, label_data, weight_data) + """ + + def __init__(self, apply_keys: Tuple[str, ...] = ("input", "label")): + if len(apply_keys) == 0 or len(set(apply_keys) | {"input", "label"}) > 2: + raise ValueError( + f"apply_keys should be a non empty subset of ('input', 'label'), but got {apply_keys}" + ) + self.apply_keys = apply_keys + + def __call__(self, input_item, label_item, weight_item): + if "input" in self.apply_keys: + for key, value in input_item.items(): + if value.ndim == 4: + B, C, H, W = value.shape + input_item[key] = value.reshape((B * C, H, W)) + if value.ndim != 3: + raise ValueError( + f"Only support squeeze data to ndim=3 now, but got ndim={value.ndim}" + ) + if "label" in self.apply_keys: + for key, value in label_item.items(): + if value.ndim == 4: + B, C, H, W = value.shape + label_item[key] = value.reshape((B * C, H, W)) + if value.ndim != 3: + raise ValueError( + f"Only support squeeze data to ndim=3 now, but got ndim={value.ndim}" + ) + return input_item, label_item, weight_item + + +class FunctionalTransform: + """Functional data transform class, which allows to use custom data transform function from given transform_func for special cases. + + Args: + transform_func (Callable): Function of data transform. + + Examples: + >>> # This is the transform_func function. It takes three dictionaries as input: data_dict, label_dict, and weight_dict. + >>> # The function will perform some transformations on the data in data_dict, convert all labels in label_dict to uppercase, + >>> # and modify the weights in weight_dict by dividing each weight by 10. + >>> # Finally, it returns the transformed data, labels, and weights as a tuple. + >>> import ppsci + >>> def transform_func(data_dict, label_dict, weight_dict): + ... for key in data_dict: + ... data_dict[key] = data_dict[key] * 2 + ... for key in label_dict: + ... label_dict[key] = label_dict[key] + 1.0 + ... for key in weight_dict: + ... weight_dict[key] = weight_dict[key] / 10 + ... return data_dict, label_dict, weight_dict + >>> transform = ppsci.data.transform.FunctionalTransform(transform_func) + >>> # Define some sample data, labels, and weights + >>> data = {'feature1': np.array([1, 2, 3]), 'feature2': np.array([4, 5, 6])} + >>> label = {'class': 0.0, 'instance': 0.1} + >>> weight = {'weight1': 0.5, 'weight2': 0.5} + >>> # Apply the transform function to the data, labels, and weights using the FunctionalTransform instance + >>> transformed_data = transform(data, label, weight) + >>> print(transformed_data) + ({'feature1': array([2, 4, 6]), 'feature2': array([ 8, 10, 12])}, {'class': 1.0, 'instance': 1.1}, {'weight1': 0.05, 'weight2': 0.05}) + """ + + def __init__( + self, + transform_func: Callable, + ): + self.transform_func = transform_func + + def __call__( + self, *data: Tuple[Dict[str, np.ndarray], ...] + ) -> Tuple[Dict[str, np.ndarray], ...]: + data_dict, label_dict, weight_dict = data + data_dict_copy = {**data_dict} + label_dict_copy = {**label_dict} + weight_dict_copy = {**weight_dict} if weight_dict is not None else {} + return self.transform_func(data_dict_copy, label_dict_copy, weight_dict_copy) diff --git a/ppsci/equation/__init__.py b/ppsci/equation/__init__.py index bcffef4060..cd52c73955 100644 --- a/ppsci/equation/__init__.py +++ b/ppsci/equation/__init__.py @@ -1,76 +1,76 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from ppsci.equation.fpde import FractionalPoisson -from ppsci.equation.ide import Volterra -from ppsci.equation.pde import DETACH_FUNC_NAME -from ppsci.equation.pde import NLSMB -from ppsci.equation.pde import PDE -from ppsci.equation.pde import AllenCahn -from ppsci.equation.pde import Biharmonic -from ppsci.equation.pde import HeatExchanger -from ppsci.equation.pde import Helmholtz -from ppsci.equation.pde import Laplace -from ppsci.equation.pde import LinearElasticity -from ppsci.equation.pde import NavierStokes -from ppsci.equation.pde import NormalDotVec -from ppsci.equation.pde import Poisson -from ppsci.equation.pde import Vibration -from ppsci.utils import logger -from ppsci.utils import misc - -__all__ = [ - "PDE", - "DETACH_FUNC_NAME", - "AllenCahn", - "Biharmonic", - "HeatExchanger", - "Helmholtz", - "Laplace", - "LinearElasticity", - "NavierStokes", - "NormalDotVec", - "Poisson", - "Vibration", - "Volterra", - "NLSMB", - "FractionalPoisson", - "build_equation", -] - - -def build_equation(cfg): - """Build equation(s) - - Args: - cfg (List[DictConfig]): Equation(s) config list. - - Returns: - Dict[str, Equation]: Equation(s) in dict. - """ - if cfg is None: - return None - cfg = copy.deepcopy(cfg) - eq_dict = misc.PrettyOrderedDict() - for _item in cfg: - eq_cls = next(iter(_item.keys())) - eq_cfg = _item[eq_cls] - eq_name = eq_cfg.pop("name", eq_cls) - eq_dict[eq_name] = eval(eq_cls)(**eq_cfg) - - logger.debug(str(eq_dict[eq_name])) - - return eq_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.equation.fpde import FractionalPoisson +from ppsci.equation.ide import Volterra +from ppsci.equation.pde import DETACH_FUNC_NAME +from ppsci.equation.pde import NLSMB +from ppsci.equation.pde import PDE +from ppsci.equation.pde import AllenCahn +from ppsci.equation.pde import Biharmonic +from ppsci.equation.pde import HeatExchanger +from ppsci.equation.pde import Helmholtz +from ppsci.equation.pde import Laplace +from ppsci.equation.pde import LinearElasticity +from ppsci.equation.pde import NavierStokes +from ppsci.equation.pde import NormalDotVec +from ppsci.equation.pde import Poisson +from ppsci.equation.pde import Vibration +from ppsci.utils import logger +from ppsci.utils import misc + +__all__ = [ + "PDE", + "DETACH_FUNC_NAME", + "AllenCahn", + "Biharmonic", + "HeatExchanger", + "Helmholtz", + "Laplace", + "LinearElasticity", + "NavierStokes", + "NormalDotVec", + "Poisson", + "Vibration", + "Volterra", + "NLSMB", + "FractionalPoisson", + "build_equation", +] + + +def build_equation(cfg): + """Build equation(s) + + Args: + cfg (List[DictConfig]): Equation(s) config list. + + Returns: + Dict[str, Equation]: Equation(s) in dict. + """ + if cfg is None: + return None + cfg = copy.deepcopy(cfg) + eq_dict = misc.PrettyOrderedDict() + for _item in cfg: + eq_cls = next(iter(_item.keys())) + eq_cfg = _item[eq_cls] + eq_name = eq_cfg.pop("name", eq_cls) + eq_dict[eq_name] = eval(eq_cls)(**eq_cfg) + + logger.debug(str(eq_dict[eq_name])) + + return eq_dict diff --git a/ppsci/equation/fpde/__init__.py b/ppsci/equation/fpde/__init__.py index 3e74ec56c7..4d91500065 100644 --- a/ppsci/equation/fpde/__init__.py +++ b/ppsci/equation/fpde/__init__.py @@ -1,19 +1,19 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.equation.fpde.fractional_poisson import FractionalPoisson - -__all__ = [ - "FractionalPoisson", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.equation.fpde.fractional_poisson import FractionalPoisson + +__all__ = [ + "FractionalPoisson", +] diff --git a/ppsci/equation/fpde/fractional_poisson.py b/ppsci/equation/fpde/fractional_poisson.py index 01b6fc929f..c89e95d72b 100644 --- a/ppsci/equation/fpde/fractional_poisson.py +++ b/ppsci/equation/fpde/fractional_poisson.py @@ -1,196 +1,196 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import math -from typing import Tuple - -import numpy as np -import paddle -from paddle import sparse -from scipy import special - -from ppsci import geometry -from ppsci.equation.pde import PDE -from ppsci.utils import misc - - -class FractionalPoisson(PDE): - """(TODO)Docstring of this class will be refined in the future. - - Args: - alpha (float): Alpha. - geom (geometry.Geometry): Computation geometry. - resolution (Tuple[int, ...]): Resolution. - - Examples: - >>> import ppsci - >>> geom_disk = ppsci.geometry.Disk([0, 0], 1) - >>> ALPHA = 0.5 - >>> fpde = ppsci.equation.FractionalPoisson(ALPHA, geom_disk, [8, 100]) - """ - - dtype = paddle.get_default_dtype() - - def __init__( - self, alpha: float, geom: geometry.Geometry, resolution: Tuple[int, ...] - ): - super().__init__() - self.alpha = alpha - self.geom = geom - self.resolution = resolution - self._w_init = self._init_weights() - - def compute_fpde_func(out): - x = paddle.concat((out["x"], out["y"]), axis=1) - y = out["u"] - indices, values, shape = self.int_mat - int_mat = sparse.sparse_coo_tensor( - [[p[0] for p in indices], [p[1] for p in indices]], - values, - shape, - stop_gradient=False, - ) - lhs = sparse.matmul(int_mat, y) - lhs = lhs[:, 0] - lhs *= ( - special.gamma((1 - self.alpha) / 2) - * special.gamma((2 + self.alpha) / 2) - / (2 * np.pi**1.5) - ) - x = x[: paddle.numel(lhs)] - rhs = ( - 2**self.alpha - * special.gamma(2 + self.alpha / 2) - * special.gamma(1 + self.alpha / 2) - * (1 - (1 + self.alpha / 2) * paddle.sum(x**2, axis=1)) - ) - res = lhs - rhs - return res - - self.add_equation("fpde", compute_fpde_func) - - def _init_weights(self): - n = self._dynamic_dist2npts(self.geom.diam) + 1 - w = [1.0] - for j in range(1, n): - w.append(w[-1] * (j - 1 - self.alpha) / j) - return np.array(w, dtype=self.dtype) - - def get_x(self, x_f): - if hasattr(self, "train_x"): - return self.train_x - - self.x0 = x_f - if np.any(self.geom.on_boundary(self.x0)): - raise ValueError("x0 contains boundary points.") - - if self.geom.ndim == 1: - dirns, dirn_w = [-1, 1], [1, 1] - elif self.geom.ndim == 2: - gauss_x, gauss_w = np.polynomial.legendre.leggauss(self.resolution[0]) - gauss_x, gauss_w = gauss_x.astype(self.dtype), gauss_w.astype(self.dtype) - thetas = np.pi * gauss_x + np.pi - dirns = np.vstack((np.cos(thetas), np.sin(thetas))).T - dirn_w = np.pi * gauss_w - elif self.geom.ndim == 3: - gauss_x, gauss_w = np.polynomial.legendre.leggauss(max(self.resolution[:2])) - gauss_x, gauss_w = gauss_x.astype(self.dtype), gauss_w.astype(self.dtype) - thetas = (np.pi * gauss_x[: self.resolution[0]] + np.pi) / 2 - phis = np.pi * gauss_x[: self.resolution[1]] + np.pi - dirns, dirn_w = [], [] - for i in range(self.resolution[0]): - for j in range(self.resolution[1]): - dirns.append( - [ - np.sin(thetas[i]) * np.cos(phis[j]), - np.sin(thetas[i]) * np.sin(phis[j]), - np.cos(thetas[i]), - ] - ) - dirn_w.append(gauss_w[i] * gauss_w[j] * np.sin(thetas[i])) - dirn_w = np.pi**2 / 2 * np.array(dirn_w) - - x, self.w = [], [] - for x0i in self.x0: - xi = list( - map( - lambda dirn: self.background_points( - x0i, dirn, self._dynamic_dist2npts, 0 - ), - dirns, - ) - ) - wi = list( - map( - lambda i: dirn_w[i] - * np.linalg.norm(xi[i][1] - xi[i][0]) ** (-self.alpha) - * self.get_weight(len(xi[i]) - 1), - range(len(dirns)), - ) - ) - # first order - # xi, wi = zip(self.modify_first_order(xij, wij) for xij, wij in zip(xi, wi)) - xi, wi = zip(*map(self.modify_first_order, xi, wi)) - # second order - # xi, wi = zip(*map(self.modify_second_order, xi, wi)) - # third order - # xi, wi = zip(*map(self.modify_third_order, xi, wi)) - x.append(np.vstack(xi)) - self.w.append(np.hstack(wi)) - self.x = np.vstack([self.x0] + x) - self.int_mat = self._get_int_matrix(self.x0) - self.train_x = misc.convert_to_dict(self.x, ("x", "y")) - return self.train_x - - def get_weight(self, n): - return self._w_init[: n + 1] - - def background_points(self, x, dirn, dist2npt, shift): - dirn = dirn / np.linalg.norm(dirn) - dx = self.distance2boundary_unitdirn(x, -dirn) - n = max(dist2npt(dx), 1) - h = dx / n - pts = x - np.arange(-shift, n - shift + 1, dtype=self.dtype)[:, None] * h * dirn - return pts - - def distance2boundary_unitdirn(self, x, dirn): - # https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection - xc = x - self.geom.center - xc = xc - ad = np.dot(xc, dirn) - return ( - -ad + (ad**2 - np.sum(xc * xc, axis=-1) + self.geom.radius**2) ** 0.5 - ).astype(self.dtype) - - def modify_first_order(self, x, w): - x = np.vstack(([2 * x[0] - x[1]], x[:-1])) - if not self.geom.is_inside(x[0:1])[0]: - return x[1:], w[1:] - return x, w - - def _dynamic_dist2npts(self, dx): - return int(math.ceil(self.resolution[-1] * dx)) - - def _get_int_matrix(self, x: np.ndarray) -> np.ndarray: - dense_shape = (x.shape[0], self.x.shape[0]) - indices, values = [], [] - beg = x.shape[0] - for i in range(x.shape[0]): - for _ in range(self.w[i].shape[0]): - indices.append([i, beg]) - beg += 1 - values = np.hstack((values, self.w[i])) - return indices, values.astype(self.dtype), dense_shape +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import math +from typing import Tuple + +import numpy as np +import paddle +from paddle import sparse +from scipy import special + +from ppsci import geometry +from ppsci.equation.pde import PDE +from ppsci.utils import misc + + +class FractionalPoisson(PDE): + """(TODO)Docstring of this class will be refined in the future. + + Args: + alpha (float): Alpha. + geom (geometry.Geometry): Computation geometry. + resolution (Tuple[int, ...]): Resolution. + + Examples: + >>> import ppsci + >>> geom_disk = ppsci.geometry.Disk([0, 0], 1) + >>> ALPHA = 0.5 + >>> fpde = ppsci.equation.FractionalPoisson(ALPHA, geom_disk, [8, 100]) + """ + + dtype = paddle.get_default_dtype() + + def __init__( + self, alpha: float, geom: geometry.Geometry, resolution: Tuple[int, ...] + ): + super().__init__() + self.alpha = alpha + self.geom = geom + self.resolution = resolution + self._w_init = self._init_weights() + + def compute_fpde_func(out): + x = paddle.concat((out["x"], out["y"]), axis=1) + y = out["u"] + indices, values, shape = self.int_mat + int_mat = sparse.sparse_coo_tensor( + [[p[0] for p in indices], [p[1] for p in indices]], + values, + shape, + stop_gradient=False, + ) + lhs = sparse.matmul(int_mat, y) + lhs = lhs[:, 0] + lhs *= ( + special.gamma((1 - self.alpha) / 2) + * special.gamma((2 + self.alpha) / 2) + / (2 * np.pi**1.5) + ) + x = x[: paddle.numel(lhs)] + rhs = ( + 2**self.alpha + * special.gamma(2 + self.alpha / 2) + * special.gamma(1 + self.alpha / 2) + * (1 - (1 + self.alpha / 2) * paddle.sum(x**2, axis=1)) + ) + res = lhs - rhs + return res + + self.add_equation("fpde", compute_fpde_func) + + def _init_weights(self): + n = self._dynamic_dist2npts(self.geom.diam) + 1 + w = [1.0] + for j in range(1, n): + w.append(w[-1] * (j - 1 - self.alpha) / j) + return np.array(w, dtype=self.dtype) + + def get_x(self, x_f): + if hasattr(self, "train_x"): + return self.train_x + + self.x0 = x_f + if np.any(self.geom.on_boundary(self.x0)): + raise ValueError("x0 contains boundary points.") + + if self.geom.ndim == 1: + dirns, dirn_w = [-1, 1], [1, 1] + elif self.geom.ndim == 2: + gauss_x, gauss_w = np.polynomial.legendre.leggauss(self.resolution[0]) + gauss_x, gauss_w = gauss_x.astype(self.dtype), gauss_w.astype(self.dtype) + thetas = np.pi * gauss_x + np.pi + dirns = np.vstack((np.cos(thetas), np.sin(thetas))).T + dirn_w = np.pi * gauss_w + elif self.geom.ndim == 3: + gauss_x, gauss_w = np.polynomial.legendre.leggauss(max(self.resolution[:2])) + gauss_x, gauss_w = gauss_x.astype(self.dtype), gauss_w.astype(self.dtype) + thetas = (np.pi * gauss_x[: self.resolution[0]] + np.pi) / 2 + phis = np.pi * gauss_x[: self.resolution[1]] + np.pi + dirns, dirn_w = [], [] + for i in range(self.resolution[0]): + for j in range(self.resolution[1]): + dirns.append( + [ + np.sin(thetas[i]) * np.cos(phis[j]), + np.sin(thetas[i]) * np.sin(phis[j]), + np.cos(thetas[i]), + ] + ) + dirn_w.append(gauss_w[i] * gauss_w[j] * np.sin(thetas[i])) + dirn_w = np.pi**2 / 2 * np.array(dirn_w) + + x, self.w = [], [] + for x0i in self.x0: + xi = list( + map( + lambda dirn: self.background_points( + x0i, dirn, self._dynamic_dist2npts, 0 + ), + dirns, + ) + ) + wi = list( + map( + lambda i: dirn_w[i] + * np.linalg.norm(xi[i][1] - xi[i][0]) ** (-self.alpha) + * self.get_weight(len(xi[i]) - 1), + range(len(dirns)), + ) + ) + # first order + # xi, wi = zip(self.modify_first_order(xij, wij) for xij, wij in zip(xi, wi)) + xi, wi = zip(*map(self.modify_first_order, xi, wi)) + # second order + # xi, wi = zip(*map(self.modify_second_order, xi, wi)) + # third order + # xi, wi = zip(*map(self.modify_third_order, xi, wi)) + x.append(np.vstack(xi)) + self.w.append(np.hstack(wi)) + self.x = np.vstack([self.x0] + x) + self.int_mat = self._get_int_matrix(self.x0) + self.train_x = misc.convert_to_dict(self.x, ("x", "y")) + return self.train_x + + def get_weight(self, n): + return self._w_init[: n + 1] + + def background_points(self, x, dirn, dist2npt, shift): + dirn = dirn / np.linalg.norm(dirn) + dx = self.distance2boundary_unitdirn(x, -dirn) + n = max(dist2npt(dx), 1) + h = dx / n + pts = x - np.arange(-shift, n - shift + 1, dtype=self.dtype)[:, None] * h * dirn + return pts + + def distance2boundary_unitdirn(self, x, dirn): + # https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection + xc = x - self.geom.center + xc = xc + ad = np.dot(xc, dirn) + return ( + -ad + (ad**2 - np.sum(xc * xc, axis=-1) + self.geom.radius**2) ** 0.5 + ).astype(self.dtype) + + def modify_first_order(self, x, w): + x = np.vstack(([2 * x[0] - x[1]], x[:-1])) + if not self.geom.is_inside(x[0:1])[0]: + return x[1:], w[1:] + return x, w + + def _dynamic_dist2npts(self, dx): + return int(math.ceil(self.resolution[-1] * dx)) + + def _get_int_matrix(self, x: np.ndarray) -> np.ndarray: + dense_shape = (x.shape[0], self.x.shape[0]) + indices, values = [], [] + beg = x.shape[0] + for i in range(x.shape[0]): + for _ in range(self.w[i].shape[0]): + indices.append([i, beg]) + beg += 1 + values = np.hstack((values, self.w[i])) + return indices, values.astype(self.dtype), dense_shape diff --git a/ppsci/equation/ide/__init__.py b/ppsci/equation/ide/__init__.py index 4d4cab56cb..2e8fec2f1a 100644 --- a/ppsci/equation/ide/__init__.py +++ b/ppsci/equation/ide/__init__.py @@ -1,19 +1,19 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.equation.ide.volterra import Volterra - -__all__ = [ - "Volterra", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.equation.ide.volterra import Volterra + +__all__ = [ + "Volterra", +] diff --git a/ppsci/equation/ide/volterra.py b/ppsci/equation/ide/volterra.py index 77fb6f3173..4b35b945c0 100644 --- a/ppsci/equation/ide/volterra.py +++ b/ppsci/equation/ide/volterra.py @@ -1,127 +1,127 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable - -import numpy as np -import paddle - -from ppsci.equation.pde import PDE - - -class Volterra(PDE): - r"""A second kind of volterra integral equation with Gaussian quadrature algorithm. - - $$ - x(t) - f(t)=\int_a^t K(t, s) x(s) d s - $$ - - [Volterra integral equation](https://en.wikipedia.org/wiki/Volterra_integral_equation) - - [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval) - - Args: - bound (float): Lower bound `a` for Volterra integral equation. - num_points (int): Sampled points in integral interval. - quad_deg (int): Number of quadrature. - kernel_func (Callable): Kernel func `K(t,s)`. - func (Callable): `x(t) - f(t)` in Volterra integral equation. - - Examples: - >>> import ppsci - >>> import numpy as np - >>> vol_eq = ppsci.equation.Volterra( - ... 0, 12, 20, lambda t, s: np.exp(s - t), lambda out: out["u"], - ... ) - """ - - dtype = paddle.get_default_dtype() - - def __init__( - self, - bound: float, - num_points: int, - quad_deg: int, - kernel_func: Callable, - func: Callable, - ): - super().__init__() - self.bound = bound - self.num_points = num_points - self.quad_deg = quad_deg - self.kernel_func = kernel_func - self.func = func - - self.quad_x, self.quad_w = np.polynomial.legendre.leggauss(quad_deg) - self.quad_x = self.quad_x.astype(Volterra.dtype).reshape([-1, 1]) # [Q, 1] - self.quad_x = paddle.to_tensor(self.quad_x) # [Q, 1] - - self.quad_w = self.quad_w.astype(Volterra.dtype) # [Q, ] - - def compute_volterra_func(out): - x, u = out["x"], out["u"] - lhs = self.func(out) - - int_mat = paddle.to_tensor(self._get_int_matrix(x), stop_gradient=False) - rhs = paddle.mm(int_mat, u) # (N, 1) - - volterra = lhs[: len(rhs)] - rhs - return volterra - - self.add_equation("volterra", compute_volterra_func) - - def get_quad_points(self, t: paddle.Tensor) -> paddle.Tensor: - """Scale and transform quad_x from [-1, 1] to range [a, b]. - - reference: https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval - - Args: - t (paddle.Tensor): Tensor array of upper bounds 't' for integral. - - Returns: - paddle.Tensor: Transformed points in desired range with shape of [N, Q]. - """ - a, b = self.bound, t - return ((b - a) / 2) @ self.quad_x.T + (b + a) / 2 - - def _get_quad_weights(self, t: float) -> np.ndarray: - """Scale weights to range according to given t and lower bound of integral. - - reference: https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval - - Args: - t (float): Array of upper bound 't' for integral. - - Returns: - np.ndarray: Transformed weights in desired range with shape of [Q, ]. - """ - a, b = self.bound, t - return (b - a) / 2 * self.quad_w - - def _get_int_matrix(self, x: np.ndarray) -> np.ndarray: - int_mat = np.zeros( - (self.num_points, self.num_points + (self.num_points * self.quad_deg)), - dtype=Volterra.dtype, - ) - for i in range(self.num_points): - xi = float(x[i]) - beg = self.num_points + self.quad_deg * i - end = self.num_points + self.quad_deg * (i + 1) - K = np.ravel( - self.kernel_func(np.full((self.quad_deg, 1), xi), x[beg:end].numpy()) - ) - int_mat[i, beg:end] = self._get_quad_weights(xi) * K - return int_mat +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable + +import numpy as np +import paddle + +from ppsci.equation.pde import PDE + + +class Volterra(PDE): + r"""A second kind of volterra integral equation with Gaussian quadrature algorithm. + + $$ + x(t) - f(t)=\int_a^t K(t, s) x(s) d s + $$ + + [Volterra integral equation](https://en.wikipedia.org/wiki/Volterra_integral_equation) + + [Gaussian quadrature](https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval) + + Args: + bound (float): Lower bound `a` for Volterra integral equation. + num_points (int): Sampled points in integral interval. + quad_deg (int): Number of quadrature. + kernel_func (Callable): Kernel func `K(t,s)`. + func (Callable): `x(t) - f(t)` in Volterra integral equation. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> vol_eq = ppsci.equation.Volterra( + ... 0, 12, 20, lambda t, s: np.exp(s - t), lambda out: out["u"], + ... ) + """ + + dtype = paddle.get_default_dtype() + + def __init__( + self, + bound: float, + num_points: int, + quad_deg: int, + kernel_func: Callable, + func: Callable, + ): + super().__init__() + self.bound = bound + self.num_points = num_points + self.quad_deg = quad_deg + self.kernel_func = kernel_func + self.func = func + + self.quad_x, self.quad_w = np.polynomial.legendre.leggauss(quad_deg) + self.quad_x = self.quad_x.astype(Volterra.dtype).reshape([-1, 1]) # [Q, 1] + self.quad_x = paddle.to_tensor(self.quad_x) # [Q, 1] + + self.quad_w = self.quad_w.astype(Volterra.dtype) # [Q, ] + + def compute_volterra_func(out): + x, u = out["x"], out["u"] + lhs = self.func(out) + + int_mat = paddle.to_tensor(self._get_int_matrix(x), stop_gradient=False) + rhs = paddle.mm(int_mat, u) # (N, 1) + + volterra = lhs[: len(rhs)] - rhs + return volterra + + self.add_equation("volterra", compute_volterra_func) + + def get_quad_points(self, t: paddle.Tensor) -> paddle.Tensor: + """Scale and transform quad_x from [-1, 1] to range [a, b]. + + reference: https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval + + Args: + t (paddle.Tensor): Tensor array of upper bounds 't' for integral. + + Returns: + paddle.Tensor: Transformed points in desired range with shape of [N, Q]. + """ + a, b = self.bound, t + return ((b - a) / 2) @ self.quad_x.T + (b + a) / 2 + + def _get_quad_weights(self, t: float) -> np.ndarray: + """Scale weights to range according to given t and lower bound of integral. + + reference: https://en.wikipedia.org/wiki/Gaussian_quadrature#Change_of_interval + + Args: + t (float): Array of upper bound 't' for integral. + + Returns: + np.ndarray: Transformed weights in desired range with shape of [Q, ]. + """ + a, b = self.bound, t + return (b - a) / 2 * self.quad_w + + def _get_int_matrix(self, x: np.ndarray) -> np.ndarray: + int_mat = np.zeros( + (self.num_points, self.num_points + (self.num_points * self.quad_deg)), + dtype=Volterra.dtype, + ) + for i in range(self.num_points): + xi = float(x[i]) + beg = self.num_points + self.quad_deg * i + end = self.num_points + self.quad_deg * (i + 1) + K = np.ravel( + self.kernel_func(np.full((self.quad_deg, 1), xi), x[beg:end].numpy()) + ) + int_mat[i, beg:end] = self._get_quad_weights(xi) * K + return int_mat diff --git a/ppsci/equation/pde/__init__.py b/ppsci/equation/pde/__init__.py index 0dbcea2a8f..369074dbfd 100644 --- a/ppsci/equation/pde/__init__.py +++ b/ppsci/equation/pde/__init__.py @@ -1,43 +1,43 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.equation.pde.allen_cahn import AllenCahn -from ppsci.equation.pde.base import DETACH_FUNC_NAME -from ppsci.equation.pde.base import PDE -from ppsci.equation.pde.biharmonic import Biharmonic -from ppsci.equation.pde.heat_exchanger import HeatExchanger -from ppsci.equation.pde.helmholtz import Helmholtz -from ppsci.equation.pde.laplace import Laplace -from ppsci.equation.pde.linear_elasticity import LinearElasticity -from ppsci.equation.pde.navier_stokes import NavierStokes -from ppsci.equation.pde.nls_m_b import NLSMB -from ppsci.equation.pde.normal_dot_vec import NormalDotVec -from ppsci.equation.pde.poisson import Poisson -from ppsci.equation.pde.viv import Vibration - -__all__ = [ - "PDE", - "DETACH_FUNC_NAME", - "AllenCahn", - "Biharmonic", - "HeatExchanger", - "Helmholtz", - "Laplace", - "LinearElasticity", - "NavierStokes", - "NLSMB", - "NormalDotVec", - "Poisson", - "Vibration", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.equation.pde.allen_cahn import AllenCahn +from ppsci.equation.pde.base import DETACH_FUNC_NAME +from ppsci.equation.pde.base import PDE +from ppsci.equation.pde.biharmonic import Biharmonic +from ppsci.equation.pde.heat_exchanger import HeatExchanger +from ppsci.equation.pde.helmholtz import Helmholtz +from ppsci.equation.pde.laplace import Laplace +from ppsci.equation.pde.linear_elasticity import LinearElasticity +from ppsci.equation.pde.navier_stokes import NavierStokes +from ppsci.equation.pde.nls_m_b import NLSMB +from ppsci.equation.pde.normal_dot_vec import NormalDotVec +from ppsci.equation.pde.poisson import Poisson +from ppsci.equation.pde.viv import Vibration + +__all__ = [ + "PDE", + "DETACH_FUNC_NAME", + "AllenCahn", + "Biharmonic", + "HeatExchanger", + "Helmholtz", + "Laplace", + "LinearElasticity", + "NavierStokes", + "NLSMB", + "NormalDotVec", + "Poisson", + "Vibration", +] diff --git a/ppsci/equation/pde/allen_cahn.py b/ppsci/equation/pde/allen_cahn.py index 44e0ec899f..f37142310c 100644 --- a/ppsci/equation/pde/allen_cahn.py +++ b/ppsci/equation/pde/allen_cahn.py @@ -1,64 +1,64 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -from ppsci.autodiff import jacobian -from ppsci.equation.pde import base - - -class AllenCahn(base.PDE): - r"""Class for Allen-Cahn equation. - - $$ - \dfrac{\partial u}{\partial t} - \epsilon^2 \Delta u + 5u^3 - 5u = 0 - $$ - - Args: - eps (float): Represents the characteristicscale of interfacial width, - influencing the thickness and dynamics of phase boundaries. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.AllenCahn(eps=0.01) - """ - - def __init__( - self, - eps: float, - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - self.eps = eps - # t, x = self.create_symbols("t x") - # invars = (t, x, ) - # u = self.create_function("u", invars) - # allen_cahn = u.diff(t) + 5 * u**3 - 5 * u - 0.0001 * u.diff(x, 2) - - # TODO: Pow(u,3) seems cause slightly larger L2 error than multiply(u*u*u) - def allen_cahn(out): - t, x = out["t"], out["x"] - u = out["u"] - u__t, u__x = jacobian(u, [t, x]) - u__x__x = jacobian(u__x, x) - - return u__t - (self.eps**2) * u__x__x + 5 * u * u * u - 5 * u - - self.add_equation("allen_cahn", allen_cahn) +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +from ppsci.autodiff import jacobian +from ppsci.equation.pde import base + + +class AllenCahn(base.PDE): + r"""Class for Allen-Cahn equation. + + $$ + \dfrac{\partial u}{\partial t} - \epsilon^2 \Delta u + 5u^3 - 5u = 0 + $$ + + Args: + eps (float): Represents the characteristicscale of interfacial width, + influencing the thickness and dynamics of phase boundaries. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.AllenCahn(eps=0.01) + """ + + def __init__( + self, + eps: float, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + self.eps = eps + # t, x = self.create_symbols("t x") + # invars = (t, x, ) + # u = self.create_function("u", invars) + # allen_cahn = u.diff(t) + 5 * u**3 - 5 * u - 0.0001 * u.diff(x, 2) + + # TODO: Pow(u,3) seems cause slightly larger L2 error than multiply(u*u*u) + def allen_cahn(out): + t, x = out["t"], out["x"] + u = out["u"] + u__t, u__x = jacobian(u, [t, x]) + u__x__x = jacobian(u__x, x) + + return u__t - (self.eps**2) * u__x__x + 5 * u * u * u - 5 * u + + self.add_equation("allen_cahn", allen_cahn) diff --git a/ppsci/equation/pde/base.py b/ppsci/equation/pde/base.py index e7fb337819..f60f6640aa 100644 --- a/ppsci/equation/pde/base.py +++ b/ppsci/equation/pde/base.py @@ -1,243 +1,243 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -import sympy as sp -from paddle import nn - -DETACH_FUNC_NAME = "detach" - - -class PDE: - """Base class for Partial Differential Equation.""" - - def __init__(self): - super().__init__() - self.equations: Dict[str, Union[Callable, sp.Basic]] = {} - # for PDE which has learnable parameter(s) - self.learnable_parameters = nn.ParameterList() - - self.detach_keys: Optional[Tuple[str, ...]] = None - - @staticmethod - def create_symbols( - symbol_str: str, - ) -> Union[sp.Symbol, Tuple[sp.Symbol, ...]]: - """Create symbolic variables. - - Args: - symbol_str (str): String contains symbols, such as "x", "x y z". - - Returns: - Union[sympy.Symbol, Tuple[sympy.Symbol, ...]]: Created symbol(s). - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.PDE() - >>> symbol_x = pde.create_symbols('x') - >>> symbols_xyz = pde.create_symbols('x y z') - >>> print(symbol_x) - x - >>> print(symbols_xyz) - (x, y, z) - """ - return sp.symbols(symbol_str) - - def create_function(self, name: str, invars: Tuple[sp.Symbol, ...]) -> sp.Function: - """Create named function depending on given invars. - - Args: - name (str): Function name. such as "u", "v", and "f". - invars (Tuple[sympy.Symbol, ...]): List of independent variable of function. - - Returns: - sympy.Function: Named sympy function. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.PDE() - >>> x, y, z = pde.create_symbols('x y z') - >>> u = pde.create_function('u', (x, y)) - >>> f = pde.create_function('f', (x, y, z)) - >>> print(u) - u(x, y) - >>> print(f) - f(x, y, z) - """ - expr = sp.Function(name)(*invars) - - return expr - - def _apply_detach(self): - """ - Wrap detached sub_expr into detach(sub_expr) to prevent gradient back-propagation, only for those items speicified in self.detach_keys. - - NOTE: This function is expected to be called after self.equations is ready in PDE.__init__. - - Examples: - >>> import ppsci - >>> ns = ppsci.equation.NavierStokes(1.0, 1.0, 2, False) - >>> print(ns) - NavierStokes - continuity: Derivative(u(x, y), x) + Derivative(v(x, y), y) - momentum_x: u(x, y)*Derivative(u(x, y), x) + v(x, y)*Derivative(u(x, y), y) + 1.0*Derivative(p(x, y), x) - 1.0*Derivative(u(x, y), (x, 2)) - 1.0*Derivative(u(x, y), (y, 2)) - momentum_y: u(x, y)*Derivative(v(x, y), x) + v(x, y)*Derivative(v(x, y), y) + 1.0*Derivative(p(x, y), y) - 1.0*Derivative(v(x, y), (x, 2)) - 1.0*Derivative(v(x, y), (y, 2)) - >>> detach_keys = ("u", "v__y") - >>> ns = ppsci.equation.NavierStokes(1.0, 1.0, 2, False, detach_keys=detach_keys) - >>> print(ns) - NavierStokes - continuity: detach(Derivative(v(x, y), y)) + Derivative(u(x, y), x) - momentum_x: detach(u(x, y))*Derivative(u(x, y), x) + v(x, y)*Derivative(u(x, y), y) + 1.0*Derivative(p(x, y), x) - 1.0*Derivative(u(x, y), (x, 2)) - 1.0*Derivative(u(x, y), (y, 2)) - momentum_y: detach(u(x, y))*Derivative(v(x, y), x) + detach(Derivative(v(x, y), y))*v(x, y) + 1.0*Derivative(p(x, y), y) - 1.0*Derivative(v(x, y), (x, 2)) - 1.0*Derivative(v(x, y), (y, 2)) - """ - if self.detach_keys is None: - return - - from copy import deepcopy - - from sympy.core.traversal import postorder_traversal - - from ppsci.utils.symbolic import _cvt_to_key - - for name, expr in self.equations.items(): - if not isinstance(expr, sp.Basic): - continue - # only process sympy expression - expr_ = deepcopy(expr) - for item in postorder_traversal(expr): - if _cvt_to_key(item) in self.detach_keys: - # inplace all related sub_expr into detach(sub_expr) - expr_ = expr_.replace(item, sp.Function(DETACH_FUNC_NAME)(item)) - - # remove all detach wrapper for more-than-once wrapped items to prevent duplicated wrapping - expr_ = expr_.replace( - sp.Function(DETACH_FUNC_NAME)( - sp.Function(DETACH_FUNC_NAME)(item) - ), - sp.Function(DETACH_FUNC_NAME)(item), - ) - - # remove unccessary detach wrapping for the first arg of Derivative - for item_ in list(postorder_traversal(expr_)): - if isinstance(item_, sp.Derivative): - if item_.args[0].name == DETACH_FUNC_NAME: - expr_ = expr_.replace( - item_, - sp.Derivative( - item_.args[0].args[0], *item_.args[1:] - ), - ) - - self.equations[name] = expr_ - - def add_equation(self, name: str, equation: Callable): - """Add an equation. - - Args: - name (str): Name of equation - equation (Callable): Computation function for equation. - - Examples: - >>> import ppsci - >>> import sympy - >>> pde = ppsci.equation.PDE() - >>> x, y = pde.create_symbols('x y') - >>> u = x**2 + y**2 - >>> equation = sympy.diff(u, x) + sympy.diff(u, y) - >>> pde.add_equation('linear_pde', equation) - >>> print(pde) - PDE - linear_pde: 2*x + 2*y - """ - self.equations.update({name: equation}) - - def parameters(self) -> List[paddle.Tensor]: - """Return learnable parameters contained in PDE. - - Returns: - List[Tensor]: A list of learnable parameters. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Vibration(2, -4, 0) - >>> print(pde.parameters()) - [Parameter containing: - Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=False, - -4.), Parameter containing: - Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=False, - 0.)] - """ - return self.learnable_parameters.parameters() - - def state_dict(self) -> Dict[str, paddle.Tensor]: - """Return named learnable parameters in dict. - - Returns: - Dict[str, Tensor]: A dict of states(str) and learnable parameters(Tensor). - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Vibration(2, -4, 0) - >>> print(pde.state_dict()) - OrderedDict([('0', Parameter containing: - Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, - -4.)), ('1', Parameter containing: - Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, - 0.))]) - """ - return self.learnable_parameters.state_dict() - - def set_state_dict( - self, state_dict: Dict[str, paddle.Tensor] - ) -> Tuple[List[str], List[str]]: - """Set state dict from dict. - - Args: - state_dict (Dict[str, paddle.Tensor]): The state dict to be set. - - Returns: - Tuple[List[str], List[str]]: List of missing_keys and unexpected_keys. - Expected to be two empty tuples mostly. - - Examples: - >>> import paddle - >>> import ppsci - >>> paddle.set_default_dtype("float64") - >>> pde = ppsci.equation.Vibration(2, -4, 0) - >>> state = pde.state_dict() - >>> state['0'] = paddle.to_tensor(-3.1) - >>> pde.set_state_dict(state) - ([], []) - >>> print(state) - OrderedDict([('0', Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=True, - -3.10000000)), ('1', Parameter containing: - Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, - 0.))]) - """ - return self.learnable_parameters.set_state_dict(state_dict) - - def __str__(self): - return "\n".join( - [self.__class__.__name__] - + [f" {name}: {eq}" for name, eq in self.equations.items()] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +import sympy as sp +from paddle import nn + +DETACH_FUNC_NAME = "detach" + + +class PDE: + """Base class for Partial Differential Equation.""" + + def __init__(self): + super().__init__() + self.equations: Dict[str, Union[Callable, sp.Basic]] = {} + # for PDE which has learnable parameter(s) + self.learnable_parameters = nn.ParameterList() + + self.detach_keys: Optional[Tuple[str, ...]] = None + + @staticmethod + def create_symbols( + symbol_str: str, + ) -> Union[sp.Symbol, Tuple[sp.Symbol, ...]]: + """Create symbolic variables. + + Args: + symbol_str (str): String contains symbols, such as "x", "x y z". + + Returns: + Union[sympy.Symbol, Tuple[sympy.Symbol, ...]]: Created symbol(s). + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.PDE() + >>> symbol_x = pde.create_symbols('x') + >>> symbols_xyz = pde.create_symbols('x y z') + >>> print(symbol_x) + x + >>> print(symbols_xyz) + (x, y, z) + """ + return sp.symbols(symbol_str) + + def create_function(self, name: str, invars: Tuple[sp.Symbol, ...]) -> sp.Function: + """Create named function depending on given invars. + + Args: + name (str): Function name. such as "u", "v", and "f". + invars (Tuple[sympy.Symbol, ...]): List of independent variable of function. + + Returns: + sympy.Function: Named sympy function. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.PDE() + >>> x, y, z = pde.create_symbols('x y z') + >>> u = pde.create_function('u', (x, y)) + >>> f = pde.create_function('f', (x, y, z)) + >>> print(u) + u(x, y) + >>> print(f) + f(x, y, z) + """ + expr = sp.Function(name)(*invars) + + return expr + + def _apply_detach(self): + """ + Wrap detached sub_expr into detach(sub_expr) to prevent gradient back-propagation, only for those items speicified in self.detach_keys. + + NOTE: This function is expected to be called after self.equations is ready in PDE.__init__. + + Examples: + >>> import ppsci + >>> ns = ppsci.equation.NavierStokes(1.0, 1.0, 2, False) + >>> print(ns) + NavierStokes + continuity: Derivative(u(x, y), x) + Derivative(v(x, y), y) + momentum_x: u(x, y)*Derivative(u(x, y), x) + v(x, y)*Derivative(u(x, y), y) + 1.0*Derivative(p(x, y), x) - 1.0*Derivative(u(x, y), (x, 2)) - 1.0*Derivative(u(x, y), (y, 2)) + momentum_y: u(x, y)*Derivative(v(x, y), x) + v(x, y)*Derivative(v(x, y), y) + 1.0*Derivative(p(x, y), y) - 1.0*Derivative(v(x, y), (x, 2)) - 1.0*Derivative(v(x, y), (y, 2)) + >>> detach_keys = ("u", "v__y") + >>> ns = ppsci.equation.NavierStokes(1.0, 1.0, 2, False, detach_keys=detach_keys) + >>> print(ns) + NavierStokes + continuity: detach(Derivative(v(x, y), y)) + Derivative(u(x, y), x) + momentum_x: detach(u(x, y))*Derivative(u(x, y), x) + v(x, y)*Derivative(u(x, y), y) + 1.0*Derivative(p(x, y), x) - 1.0*Derivative(u(x, y), (x, 2)) - 1.0*Derivative(u(x, y), (y, 2)) + momentum_y: detach(u(x, y))*Derivative(v(x, y), x) + detach(Derivative(v(x, y), y))*v(x, y) + 1.0*Derivative(p(x, y), y) - 1.0*Derivative(v(x, y), (x, 2)) - 1.0*Derivative(v(x, y), (y, 2)) + """ + if self.detach_keys is None: + return + + from copy import deepcopy + + from sympy.core.traversal import postorder_traversal + + from ppsci.utils.symbolic import _cvt_to_key + + for name, expr in self.equations.items(): + if not isinstance(expr, sp.Basic): + continue + # only process sympy expression + expr_ = deepcopy(expr) + for item in postorder_traversal(expr): + if _cvt_to_key(item) in self.detach_keys: + # inplace all related sub_expr into detach(sub_expr) + expr_ = expr_.replace(item, sp.Function(DETACH_FUNC_NAME)(item)) + + # remove all detach wrapper for more-than-once wrapped items to prevent duplicated wrapping + expr_ = expr_.replace( + sp.Function(DETACH_FUNC_NAME)( + sp.Function(DETACH_FUNC_NAME)(item) + ), + sp.Function(DETACH_FUNC_NAME)(item), + ) + + # remove unccessary detach wrapping for the first arg of Derivative + for item_ in list(postorder_traversal(expr_)): + if isinstance(item_, sp.Derivative): + if item_.args[0].name == DETACH_FUNC_NAME: + expr_ = expr_.replace( + item_, + sp.Derivative( + item_.args[0].args[0], *item_.args[1:] + ), + ) + + self.equations[name] = expr_ + + def add_equation(self, name: str, equation: Callable): + """Add an equation. + + Args: + name (str): Name of equation + equation (Callable): Computation function for equation. + + Examples: + >>> import ppsci + >>> import sympy + >>> pde = ppsci.equation.PDE() + >>> x, y = pde.create_symbols('x y') + >>> u = x**2 + y**2 + >>> equation = sympy.diff(u, x) + sympy.diff(u, y) + >>> pde.add_equation('linear_pde', equation) + >>> print(pde) + PDE + linear_pde: 2*x + 2*y + """ + self.equations.update({name: equation}) + + def parameters(self) -> List[paddle.Tensor]: + """Return learnable parameters contained in PDE. + + Returns: + List[Tensor]: A list of learnable parameters. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Vibration(2, -4, 0) + >>> print(pde.parameters()) + [Parameter containing: + Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=False, + -4.), Parameter containing: + Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=False, + 0.)] + """ + return self.learnable_parameters.parameters() + + def state_dict(self) -> Dict[str, paddle.Tensor]: + """Return named learnable parameters in dict. + + Returns: + Dict[str, Tensor]: A dict of states(str) and learnable parameters(Tensor). + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Vibration(2, -4, 0) + >>> print(pde.state_dict()) + OrderedDict([('0', Parameter containing: + Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, + -4.)), ('1', Parameter containing: + Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, + 0.))]) + """ + return self.learnable_parameters.state_dict() + + def set_state_dict( + self, state_dict: Dict[str, paddle.Tensor] + ) -> Tuple[List[str], List[str]]: + """Set state dict from dict. + + Args: + state_dict (Dict[str, paddle.Tensor]): The state dict to be set. + + Returns: + Tuple[List[str], List[str]]: List of missing_keys and unexpected_keys. + Expected to be two empty tuples mostly. + + Examples: + >>> import paddle + >>> import ppsci + >>> paddle.set_default_dtype("float64") + >>> pde = ppsci.equation.Vibration(2, -4, 0) + >>> state = pde.state_dict() + >>> state['0'] = paddle.to_tensor(-3.1) + >>> pde.set_state_dict(state) + ([], []) + >>> print(state) + OrderedDict([('0', Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=True, + -3.10000000)), ('1', Parameter containing: + Tensor(shape=[], dtype=float64, place=Place(gpu:0), stop_gradient=False, + 0.))]) + """ + return self.learnable_parameters.set_state_dict(state_dict) + + def __str__(self): + return "\n".join( + [self.__class__.__name__] + + [f" {name}: {eq}" for name, eq in self.equations.items()] + ) diff --git a/ppsci/equation/pde/biharmonic.py b/ppsci/equation/pde/biharmonic.py index 933888ac60..59980f6b19 100644 --- a/ppsci/equation/pde/biharmonic.py +++ b/ppsci/equation/pde/biharmonic.py @@ -1,74 +1,74 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple -from typing import Union - -import sympy - -from ppsci.equation.pde import base - - -class Biharmonic(base.PDE): - r"""Class for biharmonic equation with supporting special load. - - $$ - \nabla^4 \varphi = \dfrac{q}{D} - $$ - - Args: - dim (int): Dimension of equation. - q (Union[float, str, sympy.Basic]): Load. - D (Union[float, str]): Rigidity. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Biharmonic(2, -1.0, 1.0) - """ - - def __init__( - self, - dim: int, - q: Union[float, str, sympy.Basic], - D: Union[float, str], - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - - invars = self.create_symbols("x y z")[:dim] - u = self.create_function("u", invars) - - if isinstance(q, str): - q = self.create_function("q", invars) - if isinstance(D, str): - D = self.create_function("D", invars) - - self.dim = dim - self.q = q - self.D = D - - biharmonic = -self.q / self.D - for invar_i in invars: - for invar_j in invars: - biharmonic += u.diff(invar_i, 2).diff(invar_j, 2) - - self.add_equation("biharmonic", biharmonic) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple +from typing import Union + +import sympy + +from ppsci.equation.pde import base + + +class Biharmonic(base.PDE): + r"""Class for biharmonic equation with supporting special load. + + $$ + \nabla^4 \varphi = \dfrac{q}{D} + $$ + + Args: + dim (int): Dimension of equation. + q (Union[float, str, sympy.Basic]): Load. + D (Union[float, str]): Rigidity. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Biharmonic(2, -1.0, 1.0) + """ + + def __init__( + self, + dim: int, + q: Union[float, str, sympy.Basic], + D: Union[float, str], + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + + invars = self.create_symbols("x y z")[:dim] + u = self.create_function("u", invars) + + if isinstance(q, str): + q = self.create_function("q", invars) + if isinstance(D, str): + D = self.create_function("D", invars) + + self.dim = dim + self.q = q + self.D = D + + biharmonic = -self.q / self.D + for invar_i in invars: + for invar_j in invars: + biharmonic += u.diff(invar_i, 2).diff(invar_j, 2) + + self.add_equation("biharmonic", biharmonic) + + self._apply_detach() diff --git a/ppsci/equation/pde/heat_exchanger.py b/ppsci/equation/pde/heat_exchanger.py index c2e0107ff3..f547e1cd97 100644 --- a/ppsci/equation/pde/heat_exchanger.py +++ b/ppsci/equation/pde/heat_exchanger.py @@ -1,94 +1,94 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Union - -from ppsci.equation.pde import base - - -class HeatExchanger(base.PDE): - r"""Class for heat exchanger equation. - - $$ - \begin{aligned} - & L\left(\frac{q_m c_p}{v}\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial \tau}-L\left(q_m c_p\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{w}}-T_{\mathrm{c}}\right), \\ - & L\left(\frac{q_m c_p}{v}\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial \tau}+L\left(q_m c_p\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{w}}-T_{\mathrm{h}}\right), \\ - & \left(M c_p\right)_{\mathrm{w}} \frac{\partial T_{\mathrm{w}}}{\partial \tau}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{h}}-T_{\mathrm{w}}\right)+\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{c}}-T_{\mathrm{w}}\right). - \end{aligned} - $$ - - where: - - - $T$ is temperature, - - $q_m$ is mass flow rate, - - $c_p$ represents specific heat capacity, - - $v$ denotes flow velocity, - - $L$ stands for flow length, - - $\eta_{\mathrm{o}}$ signifies fin surface efficiency, - - $\alpha$ stands for heat transfer coefficient, - - $A$ indicates heat transfer area, - - $M$ represents the mass of the heat transfer structure, - - $\tau$ correspond to time, - - $x$ correspond flow direction, - - Subscripts $\mathrm{h}$, $\mathrm{c}$, and $\mathrm{w}$ denote the hot fluid side, cold fluid side, and heat transfer wall, respectively. - - Args: - alpha_h: $\frac{(\eta_o\alpha A)_h}{L(c_p)_h}$ - alpha_c: $\frac{(\eta_o\alpha A)_c}{L(c_p)_c}$ - v_h: $v_h$ - v_c: $v_c$ - w_h: $\frac{(\eta_o\alpha A)_h}{M(c_p)_w}$ - w_c: $\frac{(\eta_o\alpha A)_c}{M(c_p)_w}$ - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.HeatExchanger(1.0,1.0,1.0,1.0,1.0,1.0) - """ - - def __init__( - self, - alpha_h: Union[float, str], - alpha_c: Union[float, str], - v_h: Union[float, str], - v_c: Union[float, str], - w_h: Union[float, str], - w_c: Union[float, str], - ): - super().__init__() - x, t, qm_h, qm_c = self.create_symbols("x t qm_h qm_c") - - T_h = self.create_function("T_h", (x, t, qm_h)) - T_c = self.create_function("T_c", (x, t, qm_c)) - T_w = self.create_function("T_w", (x, t)) - - T_h_x = T_h.diff(x) - T_h_t = T_h.diff(t) - T_c_x = T_c.diff(x) - T_c_t = T_c.diff(t) - T_w_t = T_w.diff(t) - - beta_h = (alpha_h * v_h) / qm_h - beta_c = (alpha_c * v_c) / qm_c - - heat_boundary = T_h_t + v_h * T_h_x - beta_h * (T_w - T_h) - cold_boundary = T_c_t - v_c * T_c_x - beta_c * (T_w - T_c) - wall = T_w_t - w_h * (T_h - T_w) - w_c * (T_c - T_w) - - self.add_equation("heat_boundary", heat_boundary) - self.add_equation("cold_boundary", cold_boundary) - self.add_equation("wall", wall) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Union + +from ppsci.equation.pde import base + + +class HeatExchanger(base.PDE): + r"""Class for heat exchanger equation. + + $$ + \begin{aligned} + & L\left(\frac{q_m c_p}{v}\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial \tau}-L\left(q_m c_p\right)_{\mathrm{c}} \frac{\partial T_{\mathrm{c}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{w}}-T_{\mathrm{c}}\right), \\ + & L\left(\frac{q_m c_p}{v}\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial \tau}+L\left(q_m c_p\right)_{\mathrm{h}} \frac{\partial T_{\mathrm{h}}}{\partial x}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{w}}-T_{\mathrm{h}}\right), \\ + & \left(M c_p\right)_{\mathrm{w}} \frac{\partial T_{\mathrm{w}}}{\partial \tau}=\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{h}}\left(T_{\mathrm{h}}-T_{\mathrm{w}}\right)+\left(\eta_{\mathrm{o}} \alpha A\right)_{\mathrm{c}}\left(T_{\mathrm{c}}-T_{\mathrm{w}}\right). + \end{aligned} + $$ + + where: + + - $T$ is temperature, + - $q_m$ is mass flow rate, + - $c_p$ represents specific heat capacity, + - $v$ denotes flow velocity, + - $L$ stands for flow length, + - $\eta_{\mathrm{o}}$ signifies fin surface efficiency, + - $\alpha$ stands for heat transfer coefficient, + - $A$ indicates heat transfer area, + - $M$ represents the mass of the heat transfer structure, + - $\tau$ correspond to time, + - $x$ correspond flow direction, + - Subscripts $\mathrm{h}$, $\mathrm{c}$, and $\mathrm{w}$ denote the hot fluid side, cold fluid side, and heat transfer wall, respectively. + + Args: + alpha_h: $\frac{(\eta_o\alpha A)_h}{L(c_p)_h}$ + alpha_c: $\frac{(\eta_o\alpha A)_c}{L(c_p)_c}$ + v_h: $v_h$ + v_c: $v_c$ + w_h: $\frac{(\eta_o\alpha A)_h}{M(c_p)_w}$ + w_c: $\frac{(\eta_o\alpha A)_c}{M(c_p)_w}$ + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.HeatExchanger(1.0,1.0,1.0,1.0,1.0,1.0) + """ + + def __init__( + self, + alpha_h: Union[float, str], + alpha_c: Union[float, str], + v_h: Union[float, str], + v_c: Union[float, str], + w_h: Union[float, str], + w_c: Union[float, str], + ): + super().__init__() + x, t, qm_h, qm_c = self.create_symbols("x t qm_h qm_c") + + T_h = self.create_function("T_h", (x, t, qm_h)) + T_c = self.create_function("T_c", (x, t, qm_c)) + T_w = self.create_function("T_w", (x, t)) + + T_h_x = T_h.diff(x) + T_h_t = T_h.diff(t) + T_c_x = T_c.diff(x) + T_c_t = T_c.diff(t) + T_w_t = T_w.diff(t) + + beta_h = (alpha_h * v_h) / qm_h + beta_c = (alpha_c * v_c) / qm_c + + heat_boundary = T_h_t + v_h * T_h_x - beta_h * (T_w - T_h) + cold_boundary = T_c_t - v_c * T_c_x - beta_c * (T_w - T_c) + wall = T_w_t - w_h * (T_h - T_w) - w_c * (T_c - T_w) + + self.add_equation("heat_boundary", heat_boundary) + self.add_equation("cold_boundary", cold_boundary) + self.add_equation("wall", wall) + + self._apply_detach() diff --git a/ppsci/equation/pde/helmholtz.py b/ppsci/equation/pde/helmholtz.py index e71fdbe983..8e9b2a50b4 100644 --- a/ppsci/equation/pde/helmholtz.py +++ b/ppsci/equation/pde/helmholtz.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,3 +118,100 @@ def helmholtz(data_dict: Dict[str, paddle.Tensor]) -> paddle.Tensor: self.add_equation("helmholtz", helmholtz) self._apply_detach() +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple + +import paddle + +from ppsci.equation.pde import base + + +def hvp_revrev(f: Callable, primals: Tuple[paddle.Tensor, ...]) -> paddle.Tensor: + """Compute the Hessian vector product of f with respect to primals using + double backward trick in reverse mode AD. + + Args: + f (Callable): Function to compute HVP. + primals (Tuple[paddle.Tensor, ...]): Input tensors. + + Returns: + paddle.Tensor: Hessian vector product of f with respect to primals. + """ + # TODO: Merge this option into ppsci.autodiff.ad + g = lambda primals: paddle.incubate.autograd.jvp(f, primals)[1] + tangents_out = paddle.incubate.autograd.jvp(g, primals)[1] + return tangents_out[0] + + +class Helmholtz(base.PDE): + r"""Class for helmholtz equation. + + $$ + \nabla^2 u + k^2 u = f + $$ + + $$ + \text{where } f \text{ is the source term}. + $$ + + Args: + dim (int): Dimension of equation. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Helmholtz(2, -1.0, 1.0) + """ + + def __init__( + self, + dim: int, + k: float, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.dim = dim + self.k = k + self.detach_keys = detach_keys + + self.model: paddle.nn.Layer + + def helmholtz(data_dict: Dict[str, "paddle.Tensor"]): + x, y, z = ( + data_dict["x"], + data_dict["y"], + data_dict["z"], + ) + + # TODO: Hard code here, for hvp_revrev requires tuple input(s) but not dict + u__x__x = hvp_revrev(lambda x_: self.model.forward_tensor(x_, y, z), (x,)) + u__y__y = hvp_revrev(lambda y_: self.model.forward_tensor(x, y_, z), (y,)) + u__z__z = hvp_revrev(lambda z_: self.model.forward_tensor(x, y, z_), (z,)) + + out = (self.k**2) * data_dict["u"] + u__x__x + u__y__y + u__z__z + return out + + self.add_equation("helmholtz", helmholtz) + + self._apply_detach() +>>>>>>> Stashed changes diff --git a/ppsci/equation/pde/laplace.py b/ppsci/equation/pde/laplace.py index b99d7c8d9a..4d05bad257 100644 --- a/ppsci/equation/pde/laplace.py +++ b/ppsci/equation/pde/laplace.py @@ -1,55 +1,55 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -from ppsci.equation.pde import base - - -class Laplace(base.PDE): - r"""Class for laplace equation. - - $$ - \nabla^2 \varphi = 0 - $$ - - Args: - dim (int): Dimension of equation. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Laplace(2) - """ - - def __init__(self, dim: int, detach_keys: Optional[Tuple[str, ...]] = None): - super().__init__() - self.detach_keys = detach_keys - - invars = self.create_symbols("x y z")[:dim] - u = self.create_function("u", invars) - - self.dim = dim - - laplace = 0 - for invar in invars: - laplace += u.diff(invar, 2) - - self.add_equation("laplace", laplace) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +from ppsci.equation.pde import base + + +class Laplace(base.PDE): + r"""Class for laplace equation. + + $$ + \nabla^2 \varphi = 0 + $$ + + Args: + dim (int): Dimension of equation. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Laplace(2) + """ + + def __init__(self, dim: int, detach_keys: Optional[Tuple[str, ...]] = None): + super().__init__() + self.detach_keys = detach_keys + + invars = self.create_symbols("x y z")[:dim] + u = self.create_function("u", invars) + + self.dim = dim + + laplace = 0 + for invar in invars: + laplace += u.diff(invar, 2) + + self.add_equation("laplace", laplace) + + self._apply_detach() diff --git a/ppsci/equation/pde/linear_elasticity.py b/ppsci/equation/pde/linear_elasticity.py index 289d924899..74a719669d 100644 --- a/ppsci/equation/pde/linear_elasticity.py +++ b/ppsci/equation/pde/linear_elasticity.py @@ -1,184 +1,184 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple -from typing import Union - -import sympy as sp - -from ppsci.equation.pde import base - - -class LinearElasticity(base.PDE): - r"""Linear elasticity equations. - - Use either (E, nu) or (lambda_, mu) to define the material properties. - - $$ - \begin{cases} - stress\_disp_{xx} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial u}{\partial x} - \sigma_{xx} \\ - stress\_disp_{yy} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial v}{\partial y} - \sigma_{yy} \\ - stress\_disp_{zz} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial w}{\partial z} - \sigma_{zz} \\ - stress\_disp_{xy} = \mu(\dfrac{\partial u}{\partial y} + \dfrac{\partial v}{\partial x}) - \sigma_{xy} \\ - stress\_disp_{xz} = \mu(\dfrac{\partial u}{\partial z} + \dfrac{\partial w}{\partial x}) - \sigma_{xz} \\ - stress\_disp_{yz} = \mu(\dfrac{\partial v}{\partial z} + \dfrac{\partial w}{\partial y}) - \sigma_{yz} \\ - equilibrium_{x} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xx}}{\partial x} + \dfrac{\partial \sigma_{xy}}{\partial y} + \dfrac{\partial \sigma_{xz}}{\partial z}) \\ - equilibrium_{y} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xy}}{\partial x} + \dfrac{\partial \sigma_{yy}}{\partial y} + \dfrac{\partial \sigma_{yz}}{\partial z}) \\ - equilibrium_{z} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xz}}{\partial x} + \dfrac{\partial \sigma_{yz}}{\partial y} + \dfrac{\partial \sigma_{zz}}{\partial z}) \\ - \end{cases} - $$ - - Args: - E (Optional[Union[float, str]]): The Young's modulus. Defaults to None. - nu (Optional[Union[float, str]]): The Poisson's ratio. Defaults to None. - lambda_ (Optional[Union[float, str]]): Lamé's first parameter. Defaults to None. - mu (Optional[Union[float, str]]): Lamé's second parameter (shear modulus). Defaults to None. - rho (Union[float, str], optional): Mass density. Defaults to 1. - dim (int, optional): Dimension of the linear elasticity (2 or 3). Defaults to 3. - time (bool, optional): Whether contains time data. Defaults to False. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.LinearElasticity( - ... E=None, nu=None, lambda_=1e4, mu=100, dim=3 - ... ) - """ - - def __init__( - self, - E: Optional[Union[float, str]] = None, - nu: Optional[Union[float, str]] = None, - lambda_: Optional[Union[float, str]] = None, - mu: Optional[Union[float, str]] = None, - rho: Union[float, str] = 1, - dim: int = 3, - time: bool = False, - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - self.dim = dim - self.time = time - - t, x, y, z = self.create_symbols("t x y z") - normal_x, normal_y, normal_z = self.create_symbols("normal_x normal_y normal_z") - invars = (x, y) - if time: - invars = (t,) + invars - if self.dim == 3: - invars += (z,) - - u = self.create_function("u", invars) - v = self.create_function("v", invars) - w = self.create_function("w", invars) if dim == 3 else sp.Number(0) - - sigma_xx = self.create_function("sigma_xx", invars) - sigma_yy = self.create_function("sigma_yy", invars) - sigma_xy = self.create_function("sigma_xy", invars) - sigma_zz = ( - self.create_function("sigma_zz", invars) if dim == 3 else sp.Number(0) - ) - sigma_xz = ( - self.create_function("sigma_xz", invars) if dim == 3 else sp.Number(0) - ) - sigma_yz = ( - self.create_function("sigma_yz", invars) if dim == 3 else sp.Number(0) - ) - - # compute lambda and mu - if lambda_ is None: - if isinstance(nu, str): - nu = self.create_function(nu, invars) - if isinstance(E, str): - E = self.create_function(E, invars) - lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu)) - mu = E / (2 * (1 + nu)) - else: - if isinstance(lambda_, str): - lambda_ = self.create_function(lambda_, invars) - if isinstance(mu, str): - mu = self.create_function(mu, invars) - - if isinstance(rho, str): - rho = self.create_function(rho, invars) - - self.E = E - self.nu = nu - self.lambda_ = lambda_ - self.mu = mu - self.rho = rho - - # compute stress equations - stress_disp_xx = ( - lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) - + 2 * mu * u.diff(x) - - sigma_xx - ) - stress_disp_yy = ( - lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) - + 2 * mu * v.diff(y) - - sigma_yy - ) - stress_disp_zz = ( - lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) - + 2 * mu * w.diff(z) - - sigma_zz - ) - stress_disp_xy = mu * (u.diff(y) + v.diff(x)) - sigma_xy - stress_disp_xz = mu * (u.diff(z) + w.diff(x)) - sigma_xz - stress_disp_yz = mu * (v.diff(z) + w.diff(y)) - sigma_yz - - # compute equilibrium equations - equilibrium_x = rho * ((u.diff(t)).diff(t)) - ( - sigma_xx.diff(x) + sigma_xy.diff(y) + sigma_xz.diff(z) - ) - equilibrium_y = rho * ((v.diff(t)).diff(t)) - ( - sigma_xy.diff(x) + sigma_yy.diff(y) + sigma_yz.diff(z) - ) - equilibrium_z = rho * ((w.diff(t)).diff(t)) - ( - sigma_xz.diff(x) + sigma_yz.diff(y) + sigma_zz.diff(z) - ) - - # compute traction equations - traction_x = normal_x * sigma_xx + normal_y * sigma_xy + normal_z * sigma_xz - traction_y = normal_x * sigma_xy + normal_y * sigma_yy + normal_z * sigma_yz - traction_z = normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz - - # add stress equations - self.add_equation("stress_disp_xx", stress_disp_xx) - self.add_equation("stress_disp_yy", stress_disp_yy) - self.add_equation("stress_disp_xy", stress_disp_xy) - if self.dim == 3: - self.add_equation("stress_disp_zz", stress_disp_zz) - self.add_equation("stress_disp_xz", stress_disp_xz) - self.add_equation("stress_disp_yz", stress_disp_yz) - - # add equilibrium equations - self.add_equation("equilibrium_x", equilibrium_x) - self.add_equation("equilibrium_y", equilibrium_y) - if self.dim == 3: - self.add_equation("equilibrium_z", equilibrium_z) - - # add traction equations - self.add_equation("traction_x", traction_x) - self.add_equation("traction_y", traction_y) - if self.dim == 3: - self.add_equation("traction_z", traction_z) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple +from typing import Union + +import sympy as sp + +from ppsci.equation.pde import base + + +class LinearElasticity(base.PDE): + r"""Linear elasticity equations. + + Use either (E, nu) or (lambda_, mu) to define the material properties. + + $$ + \begin{cases} + stress\_disp_{xx} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial u}{\partial x} - \sigma_{xx} \\ + stress\_disp_{yy} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial v}{\partial y} - \sigma_{yy} \\ + stress\_disp_{zz} = \lambda(\dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z}) + 2\mu \dfrac{\partial w}{\partial z} - \sigma_{zz} \\ + stress\_disp_{xy} = \mu(\dfrac{\partial u}{\partial y} + \dfrac{\partial v}{\partial x}) - \sigma_{xy} \\ + stress\_disp_{xz} = \mu(\dfrac{\partial u}{\partial z} + \dfrac{\partial w}{\partial x}) - \sigma_{xz} \\ + stress\_disp_{yz} = \mu(\dfrac{\partial v}{\partial z} + \dfrac{\partial w}{\partial y}) - \sigma_{yz} \\ + equilibrium_{x} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xx}}{\partial x} + \dfrac{\partial \sigma_{xy}}{\partial y} + \dfrac{\partial \sigma_{xz}}{\partial z}) \\ + equilibrium_{y} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xy}}{\partial x} + \dfrac{\partial \sigma_{yy}}{\partial y} + \dfrac{\partial \sigma_{yz}}{\partial z}) \\ + equilibrium_{z} = \rho \dfrac{\partial^2 u}{\partial t^2} - (\dfrac{\partial \sigma_{xz}}{\partial x} + \dfrac{\partial \sigma_{yz}}{\partial y} + \dfrac{\partial \sigma_{zz}}{\partial z}) \\ + \end{cases} + $$ + + Args: + E (Optional[Union[float, str]]): The Young's modulus. Defaults to None. + nu (Optional[Union[float, str]]): The Poisson's ratio. Defaults to None. + lambda_ (Optional[Union[float, str]]): Lamé's first parameter. Defaults to None. + mu (Optional[Union[float, str]]): Lamé's second parameter (shear modulus). Defaults to None. + rho (Union[float, str], optional): Mass density. Defaults to 1. + dim (int, optional): Dimension of the linear elasticity (2 or 3). Defaults to 3. + time (bool, optional): Whether contains time data. Defaults to False. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.LinearElasticity( + ... E=None, nu=None, lambda_=1e4, mu=100, dim=3 + ... ) + """ + + def __init__( + self, + E: Optional[Union[float, str]] = None, + nu: Optional[Union[float, str]] = None, + lambda_: Optional[Union[float, str]] = None, + mu: Optional[Union[float, str]] = None, + rho: Union[float, str] = 1, + dim: int = 3, + time: bool = False, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + self.dim = dim + self.time = time + + t, x, y, z = self.create_symbols("t x y z") + normal_x, normal_y, normal_z = self.create_symbols("normal_x normal_y normal_z") + invars = (x, y) + if time: + invars = (t,) + invars + if self.dim == 3: + invars += (z,) + + u = self.create_function("u", invars) + v = self.create_function("v", invars) + w = self.create_function("w", invars) if dim == 3 else sp.Number(0) + + sigma_xx = self.create_function("sigma_xx", invars) + sigma_yy = self.create_function("sigma_yy", invars) + sigma_xy = self.create_function("sigma_xy", invars) + sigma_zz = ( + self.create_function("sigma_zz", invars) if dim == 3 else sp.Number(0) + ) + sigma_xz = ( + self.create_function("sigma_xz", invars) if dim == 3 else sp.Number(0) + ) + sigma_yz = ( + self.create_function("sigma_yz", invars) if dim == 3 else sp.Number(0) + ) + + # compute lambda and mu + if lambda_ is None: + if isinstance(nu, str): + nu = self.create_function(nu, invars) + if isinstance(E, str): + E = self.create_function(E, invars) + lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu)) + mu = E / (2 * (1 + nu)) + else: + if isinstance(lambda_, str): + lambda_ = self.create_function(lambda_, invars) + if isinstance(mu, str): + mu = self.create_function(mu, invars) + + if isinstance(rho, str): + rho = self.create_function(rho, invars) + + self.E = E + self.nu = nu + self.lambda_ = lambda_ + self.mu = mu + self.rho = rho + + # compute stress equations + stress_disp_xx = ( + lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) + + 2 * mu * u.diff(x) + - sigma_xx + ) + stress_disp_yy = ( + lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) + + 2 * mu * v.diff(y) + - sigma_yy + ) + stress_disp_zz = ( + lambda_ * (u.diff(x) + v.diff(y) + w.diff(z)) + + 2 * mu * w.diff(z) + - sigma_zz + ) + stress_disp_xy = mu * (u.diff(y) + v.diff(x)) - sigma_xy + stress_disp_xz = mu * (u.diff(z) + w.diff(x)) - sigma_xz + stress_disp_yz = mu * (v.diff(z) + w.diff(y)) - sigma_yz + + # compute equilibrium equations + equilibrium_x = rho * ((u.diff(t)).diff(t)) - ( + sigma_xx.diff(x) + sigma_xy.diff(y) + sigma_xz.diff(z) + ) + equilibrium_y = rho * ((v.diff(t)).diff(t)) - ( + sigma_xy.diff(x) + sigma_yy.diff(y) + sigma_yz.diff(z) + ) + equilibrium_z = rho * ((w.diff(t)).diff(t)) - ( + sigma_xz.diff(x) + sigma_yz.diff(y) + sigma_zz.diff(z) + ) + + # compute traction equations + traction_x = normal_x * sigma_xx + normal_y * sigma_xy + normal_z * sigma_xz + traction_y = normal_x * sigma_xy + normal_y * sigma_yy + normal_z * sigma_yz + traction_z = normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz + + # add stress equations + self.add_equation("stress_disp_xx", stress_disp_xx) + self.add_equation("stress_disp_yy", stress_disp_yy) + self.add_equation("stress_disp_xy", stress_disp_xy) + if self.dim == 3: + self.add_equation("stress_disp_zz", stress_disp_zz) + self.add_equation("stress_disp_xz", stress_disp_xz) + self.add_equation("stress_disp_yz", stress_disp_yz) + + # add equilibrium equations + self.add_equation("equilibrium_x", equilibrium_x) + self.add_equation("equilibrium_y", equilibrium_y) + if self.dim == 3: + self.add_equation("equilibrium_z", equilibrium_z) + + # add traction equations + self.add_equation("traction_x", traction_x) + self.add_equation("traction_y", traction_y) + if self.dim == 3: + self.add_equation("traction_z", traction_z) + + self._apply_detach() diff --git a/ppsci/equation/pde/navier_stokes.py b/ppsci/equation/pde/navier_stokes.py index c0d3d193a2..859a356ede 100644 --- a/ppsci/equation/pde/navier_stokes.py +++ b/ppsci/equation/pde/navier_stokes.py @@ -1,151 +1,151 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple -from typing import Union - -import sympy as sp -from sympy.parsing import sympy_parser as sp_parser - -from ppsci.equation.pde import base - - -class NavierStokes(base.PDE): - r"""Class for navier-stokes equation. - - $$ - \begin{cases} - \dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z} = 0 \\ - \dfrac{\partial u}{\partial t} + u\dfrac{\partial u}{\partial x} + v\dfrac{\partial u}{\partial y} + w\dfrac{\partial u}{\partial z} = - - \dfrac{1}{\rho}\dfrac{\partial p}{\partial x} - + \nu( - \dfrac{\partial ^2 u}{\partial x ^2} - + \dfrac{\partial ^2 u}{\partial y ^2} - + \dfrac{\partial ^2 u}{\partial z ^2} - ) \\ - \dfrac{\partial v}{\partial t} + u\dfrac{\partial v}{\partial x} + v\dfrac{\partial v}{\partial y} + w\dfrac{\partial v}{\partial z} = - - \dfrac{1}{\rho}\dfrac{\partial p}{\partial y} - + \nu( - \dfrac{\partial ^2 v}{\partial x ^2} - + \dfrac{\partial ^2 v}{\partial y ^2} - + \dfrac{\partial ^2 v}{\partial z ^2} - ) \\ - \dfrac{\partial w}{\partial t} + u\dfrac{\partial w}{\partial x} + v\dfrac{\partial w}{\partial y} + w\dfrac{\partial w}{\partial z} = - - \dfrac{1}{\rho}\dfrac{\partial p}{\partial z} - + \nu( - \dfrac{\partial ^2 w}{\partial x ^2} - + \dfrac{\partial ^2 w}{\partial y ^2} - + \dfrac{\partial ^2 w}{\partial z ^2} - ) \\ - \end{cases} - $$ - - Args: - nu (Union[float, str]): Dynamic viscosity. - rho (Union[float, str]): Density. - dim (int): Dimension of equation. - time (bool): Whether the equation is time-dependent. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.NavierStokes(0.1, 1.0, 3, False) - """ - - def __init__( - self, - nu: Union[float, str], - rho: Union[float, str], - dim: int, - time: bool, - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - self.dim = dim - self.time = time - - t, x, y, z = self.create_symbols("t x y z") - invars = (x, y) - if time: - invars = (t,) + invars - if dim == 3: - invars += (z,) - - if isinstance(nu, str): - nu = sp_parser.parse_expr(nu) - if isinstance(nu, sp.Symbol): - invars += (nu,) - - if isinstance(rho, str): - rho = sp_parser.parse_expr(rho) - if isinstance(rho, sp.Symbol): - invars += (rho,) - - self.nu = nu - self.rho = rho - - u = self.create_function("u", invars) - v = self.create_function("v", invars) - w = self.create_function("w", invars) if dim == 3 else sp.Number(0) - p = self.create_function("p", invars) - - continuity = u.diff(x) + v.diff(y) + w.diff(z) - momentum_x = ( - u.diff(t) - + u * u.diff(x) - + v * u.diff(y) - + w * u.diff(z) - - ( - (nu * u.diff(x)).diff(x) - + (nu * u.diff(y)).diff(y) - + (nu * u.diff(z)).diff(z) - ) - + 1 / rho * p.diff(x) - ) - momentum_y = ( - v.diff(t) - + u * v.diff(x) - + v * v.diff(y) - + w * v.diff(z) - - ( - (nu * v.diff(x)).diff(x) - + (nu * v.diff(y)).diff(y) - + (nu * v.diff(z)).diff(z) - ) - + 1 / rho * p.diff(y) - ) - momentum_z = ( - w.diff(t) - + u * w.diff(x) - + v * w.diff(y) - + w * w.diff(z) - - ( - (nu * w.diff(x)).diff(x) - + (nu * w.diff(y)).diff(y) - + (nu * w.diff(z)).diff(z) - ) - + 1 / rho * p.diff(z) - ) - self.add_equation("continuity", continuity) - self.add_equation("momentum_x", momentum_x) - self.add_equation("momentum_y", momentum_y) - if self.dim == 3: - self.add_equation("momentum_z", momentum_z) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple +from typing import Union + +import sympy as sp +from sympy.parsing import sympy_parser as sp_parser + +from ppsci.equation.pde import base + + +class NavierStokes(base.PDE): + r"""Class for navier-stokes equation. + + $$ + \begin{cases} + \dfrac{\partial u}{\partial x} + \dfrac{\partial v}{\partial y} + \dfrac{\partial w}{\partial z} = 0 \\ + \dfrac{\partial u}{\partial t} + u\dfrac{\partial u}{\partial x} + v\dfrac{\partial u}{\partial y} + w\dfrac{\partial u}{\partial z} = + - \dfrac{1}{\rho}\dfrac{\partial p}{\partial x} + + \nu( + \dfrac{\partial ^2 u}{\partial x ^2} + + \dfrac{\partial ^2 u}{\partial y ^2} + + \dfrac{\partial ^2 u}{\partial z ^2} + ) \\ + \dfrac{\partial v}{\partial t} + u\dfrac{\partial v}{\partial x} + v\dfrac{\partial v}{\partial y} + w\dfrac{\partial v}{\partial z} = + - \dfrac{1}{\rho}\dfrac{\partial p}{\partial y} + + \nu( + \dfrac{\partial ^2 v}{\partial x ^2} + + \dfrac{\partial ^2 v}{\partial y ^2} + + \dfrac{\partial ^2 v}{\partial z ^2} + ) \\ + \dfrac{\partial w}{\partial t} + u\dfrac{\partial w}{\partial x} + v\dfrac{\partial w}{\partial y} + w\dfrac{\partial w}{\partial z} = + - \dfrac{1}{\rho}\dfrac{\partial p}{\partial z} + + \nu( + \dfrac{\partial ^2 w}{\partial x ^2} + + \dfrac{\partial ^2 w}{\partial y ^2} + + \dfrac{\partial ^2 w}{\partial z ^2} + ) \\ + \end{cases} + $$ + + Args: + nu (Union[float, str]): Dynamic viscosity. + rho (Union[float, str]): Density. + dim (int): Dimension of equation. + time (bool): Whether the equation is time-dependent. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.NavierStokes(0.1, 1.0, 3, False) + """ + + def __init__( + self, + nu: Union[float, str], + rho: Union[float, str], + dim: int, + time: bool, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + self.dim = dim + self.time = time + + t, x, y, z = self.create_symbols("t x y z") + invars = (x, y) + if time: + invars = (t,) + invars + if dim == 3: + invars += (z,) + + if isinstance(nu, str): + nu = sp_parser.parse_expr(nu) + if isinstance(nu, sp.Symbol): + invars += (nu,) + + if isinstance(rho, str): + rho = sp_parser.parse_expr(rho) + if isinstance(rho, sp.Symbol): + invars += (rho,) + + self.nu = nu + self.rho = rho + + u = self.create_function("u", invars) + v = self.create_function("v", invars) + w = self.create_function("w", invars) if dim == 3 else sp.Number(0) + p = self.create_function("p", invars) + + continuity = u.diff(x) + v.diff(y) + w.diff(z) + momentum_x = ( + u.diff(t) + + u * u.diff(x) + + v * u.diff(y) + + w * u.diff(z) + - ( + (nu * u.diff(x)).diff(x) + + (nu * u.diff(y)).diff(y) + + (nu * u.diff(z)).diff(z) + ) + + 1 / rho * p.diff(x) + ) + momentum_y = ( + v.diff(t) + + u * v.diff(x) + + v * v.diff(y) + + w * v.diff(z) + - ( + (nu * v.diff(x)).diff(x) + + (nu * v.diff(y)).diff(y) + + (nu * v.diff(z)).diff(z) + ) + + 1 / rho * p.diff(y) + ) + momentum_z = ( + w.diff(t) + + u * w.diff(x) + + v * w.diff(y) + + w * w.diff(z) + - ( + (nu * w.diff(x)).diff(x) + + (nu * w.diff(y)).diff(y) + + (nu * w.diff(z)).diff(z) + ) + + 1 / rho * p.diff(z) + ) + self.add_equation("continuity", continuity) + self.add_equation("momentum_x", momentum_x) + self.add_equation("momentum_y", momentum_y) + if self.dim == 3: + self.add_equation("momentum_z", momentum_z) + + self._apply_detach() diff --git a/ppsci/equation/pde/nls_m_b.py b/ppsci/equation/pde/nls_m_b.py index 3db2984268..b2f4a27c13 100644 --- a/ppsci/equation/pde/nls_m_b.py +++ b/ppsci/equation/pde/nls_m_b.py @@ -1,101 +1,101 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple -from typing import Union - -from ppsci.equation.pde import base - - -class NLSMB(base.PDE): - r"""Class for nonlinear Schrodinger-Maxwell-Bloch equation. - - $$ - \begin{cases} - \dfrac{\partial E}{\partial x} = i \alpha_1 \dfrac{\partial^2 E}{\partial t ^2} - i \alpha_2 |E|^2 E+2 p \\ - \dfrac{\partial p}{\partial t} = 2 i \omega_0 p+2 E \eta \\ - \dfrac{\partial \eta}{\partial t} = -(E p^* + E^* p) - \end{cases} - $$ - - Args: - alpha_1 (Union[float, str]): Group velocity dispersion. - alpha_2 (Union[float, str]): Kerr nonlinearity. - omega_0 (Union[float, str]): The offset of resonance frequency. - time (bool): Whether the equation is time-dependent. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.NLSMB(0.5, -1.0, 0.5, True) - """ - - def __init__( - self, - alpha_1: Union[float, str], - alpha_2: Union[float, str], - omega_0: Union[float, str], - time: bool, - detach_keys: Optional[Tuple[str, ...]] = None, - ): - super().__init__() - self.detach_keys = detach_keys - self.time = time - - t, x = self.create_symbols("t x") - invars = (x,) - if time: - invars = (t,) + invars - - self.alpha_1 = alpha_1 - self.alpha_2 = alpha_2 - self.omega_0 = omega_0 - - Eu = self.create_function("Eu", invars) - Ev = self.create_function("Ev", invars) - pu = self.create_function("pu", invars) - pv = self.create_function("pv", invars) - eta = self.create_function("eta", invars) - - pu_t = pu.diff(t) - pv_t = pv.diff(t) - eta_t = eta.diff(t) - - Eu_x = Eu.diff(x) - Ev_x = Ev.diff(x) - - Eu_tt = Eu.diff(t).diff(t) - Ev_tt = Ev.diff(t).diff(t) - - Schrodinger_1 = ( - alpha_1 * Eu_tt - alpha_2 * Eu * (Eu**2 + Ev**2) + 2 * pv - Ev_x - ) - Schrodinger_2 = ( - alpha_1 * Ev_tt - alpha_2 * Ev * (Eu**2 + Ev**2) - 2 * pu + Eu_x - ) - Maxwell_1 = 2 * Ev * eta - pv_t + 2 * pu * omega_0 - Maxwell_2 = -2 * Eu * eta + pu_t + 2 * pv * omega_0 - Bloch = 2 * pv * Ev + 2 * pu * Eu + eta_t - - self.add_equation("Schrodinger_1", Schrodinger_1) - self.add_equation("Schrodinger_2", Schrodinger_2) - self.add_equation("Maxwell_1", Maxwell_1) - self.add_equation("Maxwell_2", Maxwell_2) - self.add_equation("Bloch", Bloch) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple +from typing import Union + +from ppsci.equation.pde import base + + +class NLSMB(base.PDE): + r"""Class for nonlinear Schrodinger-Maxwell-Bloch equation. + + $$ + \begin{cases} + \dfrac{\partial E}{\partial x} = i \alpha_1 \dfrac{\partial^2 E}{\partial t ^2} - i \alpha_2 |E|^2 E+2 p \\ + \dfrac{\partial p}{\partial t} = 2 i \omega_0 p+2 E \eta \\ + \dfrac{\partial \eta}{\partial t} = -(E p^* + E^* p) + \end{cases} + $$ + + Args: + alpha_1 (Union[float, str]): Group velocity dispersion. + alpha_2 (Union[float, str]): Kerr nonlinearity. + omega_0 (Union[float, str]): The offset of resonance frequency. + time (bool): Whether the equation is time-dependent. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.NLSMB(0.5, -1.0, 0.5, True) + """ + + def __init__( + self, + alpha_1: Union[float, str], + alpha_2: Union[float, str], + omega_0: Union[float, str], + time: bool, + detach_keys: Optional[Tuple[str, ...]] = None, + ): + super().__init__() + self.detach_keys = detach_keys + self.time = time + + t, x = self.create_symbols("t x") + invars = (x,) + if time: + invars = (t,) + invars + + self.alpha_1 = alpha_1 + self.alpha_2 = alpha_2 + self.omega_0 = omega_0 + + Eu = self.create_function("Eu", invars) + Ev = self.create_function("Ev", invars) + pu = self.create_function("pu", invars) + pv = self.create_function("pv", invars) + eta = self.create_function("eta", invars) + + pu_t = pu.diff(t) + pv_t = pv.diff(t) + eta_t = eta.diff(t) + + Eu_x = Eu.diff(x) + Ev_x = Ev.diff(x) + + Eu_tt = Eu.diff(t).diff(t) + Ev_tt = Ev.diff(t).diff(t) + + Schrodinger_1 = ( + alpha_1 * Eu_tt - alpha_2 * Eu * (Eu**2 + Ev**2) + 2 * pv - Ev_x + ) + Schrodinger_2 = ( + alpha_1 * Ev_tt - alpha_2 * Ev * (Eu**2 + Ev**2) - 2 * pu + Eu_x + ) + Maxwell_1 = 2 * Ev * eta - pv_t + 2 * pu * omega_0 + Maxwell_2 = -2 * Eu * eta + pu_t + 2 * pv * omega_0 + Bloch = 2 * pv * Ev + 2 * pu * Eu + eta_t + + self.add_equation("Schrodinger_1", Schrodinger_1) + self.add_equation("Schrodinger_2", Schrodinger_2) + self.add_equation("Maxwell_1", Maxwell_1) + self.add_equation("Maxwell_2", Maxwell_2) + self.add_equation("Bloch", Bloch) + + self._apply_detach() diff --git a/ppsci/equation/pde/normal_dot_vec.py b/ppsci/equation/pde/normal_dot_vec.py index a6f3942eeb..b38b1ca3c6 100644 --- a/ppsci/equation/pde/normal_dot_vec.py +++ b/ppsci/equation/pde/normal_dot_vec.py @@ -1,59 +1,59 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -from ppsci.equation.pde import base - - -class NormalDotVec(base.PDE): - r"""Normal Dot Vector. - - $$ - \mathbf{n} \cdot \mathbf{v} = 0 - $$ - - Args: - vec_keys (Tuple[str, ...]): Keys for vectors, such as ("u", "v", "w") for - velocity vector. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.NormalDotVec(("u", "v", "w")) - """ - - def __init__( - self, vec_keys: Tuple[str, ...], detach_keys: Optional[Tuple[str, ...]] = None - ): - super().__init__() - self.detach_keys = detach_keys - if not vec_keys: - raise ValueError(f"len(vec_keys)({len(vec_keys)}) should be larger than 0.") - - self.vec_keys = vec_keys - vec_vars = self.create_symbols(" ".join(vec_keys)) - normals = self.create_symbols("normal_x normal_y normal_z") - - normal_dot_vec = 0 - for (normal, vec) in zip(normals, vec_vars): - normal_dot_vec += normal * vec - - self.add_equation("normal_dot_vec", normal_dot_vec) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +from ppsci.equation.pde import base + + +class NormalDotVec(base.PDE): + r"""Normal Dot Vector. + + $$ + \mathbf{n} \cdot \mathbf{v} = 0 + $$ + + Args: + vec_keys (Tuple[str, ...]): Keys for vectors, such as ("u", "v", "w") for + velocity vector. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.NormalDotVec(("u", "v", "w")) + """ + + def __init__( + self, vec_keys: Tuple[str, ...], detach_keys: Optional[Tuple[str, ...]] = None + ): + super().__init__() + self.detach_keys = detach_keys + if not vec_keys: + raise ValueError(f"len(vec_keys)({len(vec_keys)}) should be larger than 0.") + + self.vec_keys = vec_keys + vec_vars = self.create_symbols(" ".join(vec_keys)) + normals = self.create_symbols("normal_x normal_y normal_z") + + normal_dot_vec = 0 + for (normal, vec) in zip(normals, vec_vars): + normal_dot_vec += normal * vec + + self.add_equation("normal_dot_vec", normal_dot_vec) + + self._apply_detach() diff --git a/ppsci/equation/pde/poisson.py b/ppsci/equation/pde/poisson.py index 4f9551a23a..b31ce1380d 100644 --- a/ppsci/equation/pde/poisson.py +++ b/ppsci/equation/pde/poisson.py @@ -1,53 +1,53 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Optional -from typing import Tuple - -from ppsci.equation.pde import base - - -class Poisson(base.PDE): - r"""Class for poisson equation. - - $$ - \nabla^2 \varphi = C - $$ - - Args: - dim (int): Dimension of equation. - detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. - Defaults to None. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Poisson(2) - """ - - def __init__(self, dim: int, detach_keys: Optional[Tuple[str, ...]] = None): - super().__init__() - self.detach_keys = detach_keys - invars = self.create_symbols("x y z")[:dim] - p = self.create_function("p", invars) - self.dim = dim - - poisson = 0 - for invar in invars: - poisson += p.diff(invar, 2) - - self.add_equation("poisson", poisson) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from typing import Tuple + +from ppsci.equation.pde import base + + +class Poisson(base.PDE): + r"""Class for poisson equation. + + $$ + \nabla^2 \varphi = C + $$ + + Args: + dim (int): Dimension of equation. + detach_keys (Optional[Tuple[str, ...]]): Keys used for detach during computing. + Defaults to None. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Poisson(2) + """ + + def __init__(self, dim: int, detach_keys: Optional[Tuple[str, ...]] = None): + super().__init__() + self.detach_keys = detach_keys + invars = self.create_symbols("x y z")[:dim] + p = self.create_function("p", invars) + self.dim = dim + + poisson = 0 + for invar in invars: + poisson += p.diff(invar, 2) + + self.add_equation("poisson", poisson) + + self._apply_detach() diff --git a/ppsci/equation/pde/viv.py b/ppsci/equation/pde/viv.py index c3d85895f1..d3e9b7a29d 100644 --- a/ppsci/equation/pde/viv.py +++ b/ppsci/equation/pde/viv.py @@ -1,64 +1,64 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import paddle -import sympy as sp -from paddle.nn import initializer - -from ppsci.equation.pde import base - - -class Vibration(base.PDE): - r"""Vortex induced vibration equation. - - $$ - \rho \dfrac{\partial^2 \eta}{\partial t^2} + e^{k1} \dfrac{\partial \eta}{\partial t} + e^{k2} \eta = f - $$ - - Args: - rho (float): Generalized mass. - k1 (float): Learnable parameter for modal damping. - k2 (float): Learnable parameter for generalized stiffness. - - Examples: - >>> import ppsci - >>> pde = ppsci.equation.Vibration(1.0, 4.0, -1.0) - """ - - def __init__(self, rho: float, k1: float, k2: float): - super().__init__() - self.rho = rho - self.k1 = paddle.create_parameter( - shape=[], - dtype=paddle.get_default_dtype(), - default_initializer=initializer.Constant(k1), - ) - self.k2 = paddle.create_parameter( - shape=[], - dtype=paddle.get_default_dtype(), - default_initializer=initializer.Constant(k2), - ) - self.learnable_parameters.append(self.k1) - self.learnable_parameters.append(self.k2) - - t_f = self.create_symbols("t_f") - eta = self.create_function("eta", (t_f,)) - k1 = self.create_symbols(self.k1.name) - k2 = self.create_symbols(self.k2.name) - f = self.rho * eta.diff(t_f, 2) + sp.exp(k1) * eta.diff(t_f) + sp.exp(k2) * eta - self.add_equation("f", f) - - self._apply_detach() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import paddle +import sympy as sp +from paddle.nn import initializer + +from ppsci.equation.pde import base + + +class Vibration(base.PDE): + r"""Vortex induced vibration equation. + + $$ + \rho \dfrac{\partial^2 \eta}{\partial t^2} + e^{k1} \dfrac{\partial \eta}{\partial t} + e^{k2} \eta = f + $$ + + Args: + rho (float): Generalized mass. + k1 (float): Learnable parameter for modal damping. + k2 (float): Learnable parameter for generalized stiffness. + + Examples: + >>> import ppsci + >>> pde = ppsci.equation.Vibration(1.0, 4.0, -1.0) + """ + + def __init__(self, rho: float, k1: float, k2: float): + super().__init__() + self.rho = rho + self.k1 = paddle.create_parameter( + shape=[], + dtype=paddle.get_default_dtype(), + default_initializer=initializer.Constant(k1), + ) + self.k2 = paddle.create_parameter( + shape=[], + dtype=paddle.get_default_dtype(), + default_initializer=initializer.Constant(k2), + ) + self.learnable_parameters.append(self.k1) + self.learnable_parameters.append(self.k2) + + t_f = self.create_symbols("t_f") + eta = self.create_function("eta", (t_f,)) + k1 = self.create_symbols(self.k1.name) + k2 = self.create_symbols(self.k2.name) + f = self.rho * eta.diff(t_f, 2) + sp.exp(k1) * eta.diff(t_f) + sp.exp(k2) * eta + self.add_equation("f", f) + + self._apply_detach() diff --git a/ppsci/experimental/__init__.py b/ppsci/experimental/__init__.py index 842f19428a..3b40e9a81b 100644 --- a/ppsci/experimental/__init__.py +++ b/ppsci/experimental/__init__.py @@ -1,37 +1,37 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This module is for experimental API -""" - -from ppsci.experimental.math_module import bessel_i0 -from ppsci.experimental.math_module import bessel_i0e -from ppsci.experimental.math_module import bessel_i1 -from ppsci.experimental.math_module import bessel_i1e -from ppsci.experimental.math_module import fractional_diff -from ppsci.experimental.math_module import gaussian_integrate -from ppsci.experimental.math_module import montecarlo_integrate -from ppsci.experimental.math_module import trapezoid_integrate - -__all__ = [ - "bessel_i0", - "bessel_i0e", - "bessel_i1", - "bessel_i1e", - "fractional_diff", - "gaussian_integrate", - "trapezoid_integrate", - "montecarlo_integrate", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module is for experimental API +""" + +from ppsci.experimental.math_module import bessel_i0 +from ppsci.experimental.math_module import bessel_i0e +from ppsci.experimental.math_module import bessel_i1 +from ppsci.experimental.math_module import bessel_i1e +from ppsci.experimental.math_module import fractional_diff +from ppsci.experimental.math_module import gaussian_integrate +from ppsci.experimental.math_module import montecarlo_integrate +from ppsci.experimental.math_module import trapezoid_integrate + +__all__ = [ + "bessel_i0", + "bessel_i0e", + "bessel_i1", + "bessel_i1e", + "fractional_diff", + "gaussian_integrate", + "trapezoid_integrate", + "montecarlo_integrate", +] diff --git a/ppsci/experimental/math_module.py b/ppsci/experimental/math_module.py index 9f42421bfa..bc4fdfe411 100644 --- a/ppsci/experimental/math_module.py +++ b/ppsci/experimental/math_module.py @@ -1,646 +1,646 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import functools -from typing import Any -from typing import Callable -from typing import List -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -from typing_extensions import Literal - - -def bessel_i0(x: paddle.Tensor) -> paddle.Tensor: - """Zero-order modified Bézier curve functions of the first kind. - - Args: - x (paddle.Tensor): Input data of the formula. - - Examples: - >>> import paddle - >>> import ppsci - >>> res = ppsci.experimental.bessel_i0(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) - """ - return paddle.i0(x) - - -def bessel_i0e(x: paddle.Tensor) -> paddle.Tensor: - """Exponentially scaled zero-order modified Bézier curve functions of the first kind. - - Args: - x (paddle.Tensor): Input data of the formula. - - Examples: - >>> import paddle - >>> import ppsci - >>> res = ppsci.experimental.bessel_i0e(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) - """ - return paddle.i0e(x) - - -def bessel_i1(x: paddle.Tensor) -> paddle.Tensor: - """First-order modified Bézier curve functions of the first kind. - - Args: - x (paddle.Tensor): Input data of the formula. - - Examples: - >>> import paddle - >>> import ppsci - >>> res = ppsci.experimental.bessel_i1(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) - """ - return paddle.i1(x) - - -def bessel_i1e(x: paddle.Tensor) -> paddle.Tensor: - """Exponentially scaled first-order modified Bézier curve functions of the first kind. - - Args: - x (paddle.Tensor): Input data of the formula. - - Examples: - >>> import paddle - >>> import ppsci - >>> res = ppsci.experimental.bessel_i1e(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) - """ - return paddle.i1e(x) - - -def expand_func_values_and_squeeze_integral(f: Callable): - """This decorator ensures that the trailing dimension of integrands is indeed the integrand dimension. - This is pertinent in the 1d case when the sampled values are often of shape `(N,)`. Then, to maintain backward - consistency, we squeeze the result in the 1d case so it does not have any trailing dimensions. - - Args: - f (Callable): The wrapped function. - """ - - @functools.wraps(f) - def wrap(*args, **kwargs): - # i.e we only have one dimension, or the second dimension (that of the integrand) is 1 - is_1d = len(args[0].shape) == 1 or ( - len(args[0].shape) == 2 and args[0].shape[1] == 1 - ) - if is_1d: - return paddle.squeeze( - f(paddle.unsqueeze(args[0], axis=1), *args[1:], **kwargs) - ) - return f(*args, **kwargs) - - return wrap - - -def gaussian_integrate( - fn: Callable[[Any], paddle.Tensor], - dim: int, - N: int, - integration_domains: List[List[float]], - dtype: Literal["float32", "float64"] = "float64", -) -> paddle.Tensor: - """Integrate given function using gaussian quadrature. - - Args: - fn (Callable[[Any], paddle.Tensor]): Function to be integrated. - dim (int): Dimensionality of the integrand. - N (int): Number of dicretization points. - integration_domains (List[List[float]]): Intergration domains. - dtype (Literal["float32", "float64"], optional): Dtype used during computation. Defaults to "float64". - - Returns: - paddle.Tensor: Integral result. - - Examples: - >>> import numpy as np - >>> import paddle - >>> import ppsci.experimental - >>> func = lambda x: paddle.sin(x) - >>> dim = 1 - >>> N = 500 - >>> integration_domains = [[0, np.pi]] - >>> result = ppsci.experimental.gaussian_integrate(func, dim, N, integration_domains) - >>> np.testing.assert_allclose(float(result), 2.0, 1e-6) - >>> print(float(result)) - 1.9999999999999576 - """ - - def _compatible_meshgrid(*args: paddle.Tensor, **kwargs: paddle.Tensor): - # TODO(HydrogenSulfate): paddle.meshgrid do not support single Tensor, - # which will be fixed in paddle framework. - if len(args) == 1: - return args - else: - return paddle.meshgrid(*args, **kwargs) - - def _roots(N: int) -> np.ndarray: - return np.polynomial.legendre.leggauss(N)[0] - - def _calculate_grid( - N: int, - integration_domains: paddle.Tensor, - ) -> Tuple[paddle.Tensor, paddle.Tensor, int]: - """Calculate grid points, widths and N per dim - - Args: - N (int): Number of points. - integration_domain (paddle.Tensor): Integration domain. - - Returns: - Tuple[paddle.Tensor, paddle.Tensor, int]: Grid points, grid widths and - Number of grid slices per dimension. - """ - # Create grid and assemble evaluation points - grid_1d = [] - _dim = integration_domains.shape[0] - n_per_dim = int(N ** (1.0 / _dim) + 1e-8) - - # Determine for each dimension grid points and mesh width - def _resize_roots( - integration_domain: Tuple[float, float], roots: np.ndarray - ): # scale from [-1,1] to [a,b] - a = integration_domain[0] - b = integration_domain[1] - return ((b - a) / 2) * roots + ((a + b) / 2) - - for dim in range(_dim): - grid_1d.append(_resize_roots(integration_domains[dim], _roots(n_per_dim))) - h = paddle.stack([grid_1d[dim][1] - grid_1d[dim][0] for dim in range(_dim)]) - - # Get grid points - points = _compatible_meshgrid(*grid_1d) - points = paddle.stack([mg.reshape([-1]) for mg in points], axis=1) - - return points, h, n_per_dim - - def _evaluate_integrand(fn, points, weights=None, fn_args=None) -> paddle.Tensor: - """Evaluate the integrand function at the passed points. - - Args: - fn (function): Integrand function. - points (paddle.Tensor): Integration points. - weights (paddle.Tensor, optional): Integration weights. Defaults to None. - fn_args (list or tuple, optional): Any arguments required by the function. Defaults to None. - - Returns: - paddle.Tensor: Integral result. - """ - if fn_args is None: - fn_args = () - - result = fn(points, *fn_args) - if not str(result.dtype).endswith(dtype): - result = result.astype(dtype) - - if result.shape[0] != points.shape[0]: - raise ValueError( - f"The passed function was given {points.shape[0]} points but only returned {result.shape[0]} value(s)." - f"Please ensure that your function is vectorized, i.e. can be called with multiple evaluation points at once. It should return a tensor " - f"where first dimension matches length of passed elements. " - ) - - if weights is not None: - if ( - len(result.shape) > 1 - ): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *= - integrand_shape = result.shape[1:] - weights = paddle.repeat_interleave( - paddle.unsqueeze(weights, axis=1), np.prod(integrand_shape) - ).reshape((weights.shape[0], *(integrand_shape))) - result *= weights - - return result - - def _weights(N, dim): - """Return the weights, broadcast across the dimensions, generated from the polynomial of choice. - - Args: - N (int): Number of nodes. - dim (int): Number of dimensions. - - Returns: - paddle.Tensor: Integration weights. - """ - weights = paddle.to_tensor(np.polynomial.legendre.leggauss(N)[1], dtype=dtype) - return paddle.prod( - paddle.stack(_compatible_meshgrid(*([weights] * dim)), axis=0), - axis=0, - ).reshape([-1]) - - def _apply_composite_rule(cur_dim_areas, dim, hs, domain): - """Apply "composite" rule for gaussian integrals - - cur_dim_areas will contain the areas per dimension - """ - # We collapse dimension by dimension - for cur_dim in range(dim): - cur_dim_areas = ( - 0.5 - * (domain[cur_dim][1] - domain[cur_dim][0]) - * paddle.sum( - cur_dim_areas, axis=len(cur_dim_areas.shape) - 1, dtype=dtype - ) - ) - return cur_dim_areas - - @expand_func_values_and_squeeze_integral - def _calculate_result( - function_values: paddle.Tensor, - dim: int, - n_per_dim: int, - hs: paddle.Tensor, - integration_domains: paddle.Tensor, - ) -> paddle.Tensor: - """Apply the "composite rule" to calculate a result from the evaluated integrand. - - Args: - function_values (paddle.Tensor): Output of the integrand. - dim (int): Dimensionality. - n_per_dim (int): Number of grid slices per dimension. - hs (paddle.Tensor): Distances between grid slices for each dimension. - - Returns: - paddle.Tensor: Quadrature result. - """ - # Reshape the output to be [integrand_dim,N,N,...] points instead of [integrand_dim,dim*N] points - integrand_shape = function_values.shape[1:] - dim_shape = [n_per_dim] * dim - new_shape = [*integrand_shape, *dim_shape] - - perm = list(range(len(function_values.shape))) - if len(perm) >= 2: - perm.append(perm.pop(0)) - reshaped_function_values = paddle.transpose(function_values, perm) - reshaped_function_values = reshaped_function_values.reshape(new_shape) - - assert new_shape == list( - reshaped_function_values.shape - ), f"reshaping produced shape {reshaped_function_values.shape}, expected shape was {new_shape}" - - result = _apply_composite_rule( - reshaped_function_values, dim, hs, integration_domains - ) - return result - - assert dtype in [ - "float32", - "float64", - ], f"dtype must be either 'float32' or 'float64', but got {dtype}" - - neg = False - for i, (a, b) in enumerate(integration_domains): - if a > b: - neg = not neg - integration_domains[i] = [b, a] - - integration_domains = paddle.to_tensor( - integration_domains, - dtype=dtype, - ) - - if integration_domains.shape[0] != dim: - raise ValueError( - f"The number of integration domain({integration_domains.shape[0]}) " - f"must be equal to the given 'dim'({dim})." - ) - if integration_domains.shape[1] != 2: - raise ValueError( - f"integration_domain should be in format of [[a_1, b_1], [a_2, b_2], ..., " - f"[a_dim, b_dim]], but got each range of integration is {integration_domains[0]}" - ) - grid_points, hs, n_per_dim = _calculate_grid(N, integration_domains) - - function_values = _evaluate_integrand( - fn, grid_points, weights=_weights(n_per_dim, dim) - ) - - result = _calculate_result(function_values, dim, n_per_dim, hs, integration_domains) - return result if (not neg) else -result - - -def fractional_diff( - func: Callable, alpha: float, a: float, t: float, h: float, dtype="float64" -) -> paddle.Tensor: - r"""Compute fractional derivative of given function at point t with fractional order - alpha using [Caputo derivative of fractional](https://en.wikipedia.org/wiki/Fractional_calculus#Caputo_fractional_derivative). - - $$ - D_t^\alpha f(t)=\frac{1}{\Gamma(n-\alpha)} \int_0^t \frac{f^{(n)}(s)}{(t-s)^{\alpha+1-n}} d s . - $$ - - $$ - s.t. 0 \lt \alpha \lt 1 . - $$ - - Args: - func (Callable): Function to compute the fractional derivative of. - alpha (float): Fractional order. - t (float): Point to compute the fractional derivative at. - a (float): Start point of the fractional integral. - h (float): Step size for finite difference. - dtype (str, optional): Data dtype during computation. Defaults to "float64". - - Returns: - paddle.Tensor: Fractional derivative result of the function at t. - - Examples: - >>> from ppsci.experimental import fractional_diff - >>> import numpy as np - >>> # define f(x) = x^2 - >>> def f(x): - ... return x * x - >>> # compute 0.5-order fractional derivative of f(x) at t=1.0 with step size h=1e-6 - >>> res = fractional_diff(f, alpha=0.5, a=0, t=1.0, h=1e-6, dtype="float64") - >>> np.testing.assert_allclose(float(res), 1.503547, 1e-6) - """ - - if not (0 < alpha < 1): - raise NotImplementedError( - f"Given alpha should be in range (0, 1), but got {alpha}" - ) - - def _finite_derivative( - func: Callable, x: paddle.Tensor, dx: float - ) -> paddle.Tensor: - """Compute the finite difference of a function at x using centered difference. - - Args: - func (Callable): Function to compute the finite difference of. - x (paddle.Tensor): Point to compute the finite difference at. - dx (float): Delta to use for the finite difference. - - Returns: - paddle.Tensor: First-order Finite difference of the function at x. - """ - return (func(x + dx) - func(x - dx)) / (2 * dx) - - def int_func(s): - return _finite_derivative(func, s, dx=h) / (t - s) ** (alpha) - - result = ( - 1.0 / paddle.exp(paddle.lgamma(paddle.to_tensor(1.0 - alpha, dtype=dtype))) - ) * gaussian_integrate( - int_func, dim=1, N=2**10 + 1, integration_domains=[[a, t]], dtype=dtype - ) - return result - - -def trapezoid_integrate( - y: paddle.Tensor, - x: paddle.Tensor = None, - dx: float = None, - axis: int = -1, - mode: Literal["sum", "cumsum"] = "sum", -) -> paddle.Tensor: - """ - Integrate along the given axis using the composite trapezoidal rule. Use the sum method. - - Args: - y (paddle.Tensor): Input to be integrated. - x (paddle.Tensor, optional): The sample points corresponding to the input samples. its shape should be - (1) input.shape; (2) the input.shape[axis] if axis is not default. Defaults to None. - dx (float, optional): The sample points are assumed to be evenly spaced and it is the spacing between sample points. - If 'x' and 'dx' are both default, 'dx' is set to 1 by default. Defaults to None. - axis (int, optional): The axis along which to integrate. Defaults to -1. - mode (Literal["sum", "cumsum"], optional): Which type cumulative sum function used. Defaults to "sum". - - Returns: - paddle.Tensor: Integral result. If dim of input is N, return is N-1 dim. - - Examples: - >>> import paddle - >>> import ppsci - >>> y = paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32") - >>> res = ppsci.experimental.trapezoid_integrate(y) - >>> print(res) - Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [2., 8.]) - >>> res = ppsci.experimental.trapezoid_integrate(y, mode="cumsum") - >>> print(res) - Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [[0.50000000, 2. ], - [3.50000000, 8. ]]) - >>> res = ppsci.experimental.trapezoid_integrate( - ... y, x=paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32") - ... ) - >>> print(res) - Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [2., 8.]) - >>> res = ppsci.experimental.trapezoid_integrate( - ... y, x=paddle.to_tensor([0, 1], dtype="float32"), axis=0 - ... ) - >>> print(res) - Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [1.50000000, 2.50000000, 3.50000000]) - >>> res = ppsci.experimental.trapezoid_integrate( - ... y, x=paddle.to_tensor([0, 1, 2], dtype="float32"), axis=1 - ... ) - >>> print(res) - Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [2., 8.]) - >>> res = ppsci.experimental.trapezoid_integrate(y, dx=2) - >>> print(res) - Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [4. , 16.]) - """ - if mode == "sum": - return paddle.trapezoid(y, x, dx, axis) - elif mode == "cumsum": - return paddle.cumulative_trapezoid(y, x, dx, axis) - else: - raise ValueError(f'mode should be "sum" or "cumsum", but got {mode}') - - -def montecarlo_integrate( - fn: Callable, - dim: int, - N: int = 1000, - integration_domain: Union[List[List[float]], paddle.Tensor] = None, - seed: int = None, -) -> paddle.Tensor: - """Integrates the passed function on the passed domain using vanilla Monte - Carlo Integration. - - Args: - fn (Callable): The function to integrate over. - dim (int): Dimensionality of the function's domain over which to - integrate. - N (Optional[int]): Number of sample points to use for the integration. - Defaults to 1000. - integration_domain (Union[List[List[float]], paddle.Tensor]): Integration - domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. - seed (Optional[int]): Random number generation seed to the sampling - point creation, only set if provided. Defaults to None. - - Raises: - ValueError: If len(integration_domain) != dim - - Returns: - paddle.Tensor: Integral result. - - Examples: - >>> import paddle - >>> import ppsci - - >>> _ = paddle.seed(1024) - >>> # The function we want to integrate, in this example - >>> # f(x0,x1) = sin(x0) + e^x1 for x0=[0,1] and x1=[-1,1] - >>> # Note that the function needs to support multiple evaluations at once (first - >>> # dimension of x here) - >>> # Expected result here is ~3.2698 - >>> def some_function(x): - ... return paddle.sin(x[:, 0]) + paddle.exp(x[:, 1]) - - >>> # Compute the function integral by sampling 10000 points over domain - >>> integral_value = ppsci.experimental.montecarlo_integrate( - ... some_function, - ... dim=2, - ... N=10000, - ... integration_domain=[[0, 1], [-1, 1]], - ... ) - - >>> print(integral_value) - Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.25152588) - """ - - @expand_func_values_and_squeeze_integral - def calculate_result(function_values, integration_domain): - """Calculate an integral result from the function evaluations - - Args: - function_values (paddle.Tensor): Output of the integrand - integration_domain (paddle.Tensor): Integration domain - - Returns: - Quadrature result - """ - scales = integration_domain[:, 1] - integration_domain[:, 0] - volume = paddle.prod(scales) - - # Integral = V / N * sum(func values) - N = function_values.shape[0] - integral = volume * paddle.sum(function_values, axis=0) / N - return integral - - def calculate_sample_points( - N: int, integration_domain: paddle.Tensor, seed: Optional[int] = None - ): - """Calculate random points for the integrand evaluation. - - Args: - N (int): Number of points - integration_domain (paddle.Tensor): Integration domain. - seed (int, optional): Random number generation seed for the sampling point creation, only set if provided. Defaults to None. - Returns: - Sample points. - """ - dim = integration_domain.shape[0] - domain_starts = integration_domain[:, 0] - domain_sizes = integration_domain[:, 1] - domain_starts - # Scale and translate random numbers via broadcasting - return ( - paddle.uniform( - shape=[N, dim], - dtype=domain_sizes.dtype, - min=0.0, - max=1.0, - seed=seed or 0, - ) - * domain_sizes - + domain_starts - ) - - if dim is not None: - if dim < 1: - raise ValueError("Dimension needs to be 1 or larger.") - if N is not None: - if N < 1 or type(N) is not int: - raise ValueError("N has to be a positive integer.") - - integration_domain = _setup_integration_domain(dim, integration_domain) - sample_points = calculate_sample_points(N, integration_domain, seed) - function_values, _ = _evaluate_integrand(fn, sample_points) - return calculate_result(function_values, integration_domain) - - -def _setup_integration_domain( - dim: int, integration_domain: Union[List[List[float]], paddle.Tensor] -) -> paddle.Tensor: - """Sets up the integration domain if unspecified by the user. - Args: - dim (int): Dimensionality of the integration domain. - integration_domain (List or Tensor): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. - - Returns: - Integration domain. - """ - # If no integration_domain is specified, create [-1,1]^d bounds - if integration_domain is None: - integration_domain = [[-1.0, 1.0]] * dim - - integration_domain = [[float(b) for b in bounds] for bounds in integration_domain] - - integration_domain = paddle.to_tensor(integration_domain) - - if tuple(integration_domain.shape) != (dim, 2): - raise ValueError( - "The integration domain has an unexpected shape. " - f"Expected {(dim, 2)}, got {integration_domain.shape}" - ) - return integration_domain - - -def _evaluate_integrand(fn, points, weights=None, args=None): - """Evaluate the integrand function at the passed points. - - Args: - fn (Callable): Integrand function. - points (paddle.Tensor): Integration points. - weights (Optional[paddle.Tensor]): Integration weights. Defaults to None. - args (Optional[List, Tuple]): Any arguments required by the function. Defaults to None. - - Returns: - padlde.Tensor: Integrand function output. - int: Number of evaluated points. - """ - num_points = points.shape[0] - - if args is None: - args = () - - result = fn(points, *args) - num_results = result.shape[0] - if num_results != num_points: - raise ValueError( - f"The passed function was given {num_points} points but only returned {num_results} value(s)." - f"Please ensure that your function is vectorized, i.e. can be called with multiple evaluation points at once. It should return a tensor " - f"where first dimension matches length of passed elements. " - ) - - if weights is not None: - if ( - len(result.shape) > 1 - ): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *= - integrand_shape = paddle.to_tensor(result.shape[1:]) - weights = paddle.tile( - paddle.unsqueeze(weights, axis=1), paddle.prod(integrand_shape) - ).reshape((weights.shape[0], *(integrand_shape))) - result *= weights - - return result, num_points +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import functools +from typing import Any +from typing import Callable +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +from typing_extensions import Literal + + +def bessel_i0(x: paddle.Tensor) -> paddle.Tensor: + """Zero-order modified Bézier curve functions of the first kind. + + Args: + x (paddle.Tensor): Input data of the formula. + + Examples: + >>> import paddle + >>> import ppsci + >>> res = ppsci.experimental.bessel_i0(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) + """ + return paddle.i0(x) + + +def bessel_i0e(x: paddle.Tensor) -> paddle.Tensor: + """Exponentially scaled zero-order modified Bézier curve functions of the first kind. + + Args: + x (paddle.Tensor): Input data of the formula. + + Examples: + >>> import paddle + >>> import ppsci + >>> res = ppsci.experimental.bessel_i0e(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) + """ + return paddle.i0e(x) + + +def bessel_i1(x: paddle.Tensor) -> paddle.Tensor: + """First-order modified Bézier curve functions of the first kind. + + Args: + x (paddle.Tensor): Input data of the formula. + + Examples: + >>> import paddle + >>> import ppsci + >>> res = ppsci.experimental.bessel_i1(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) + """ + return paddle.i1(x) + + +def bessel_i1e(x: paddle.Tensor) -> paddle.Tensor: + """Exponentially scaled first-order modified Bézier curve functions of the first kind. + + Args: + x (paddle.Tensor): Input data of the formula. + + Examples: + >>> import paddle + >>> import ppsci + >>> res = ppsci.experimental.bessel_i1e(paddle.to_tensor([0, 1, 2, 3, 4], dtype="float32")) + """ + return paddle.i1e(x) + + +def expand_func_values_and_squeeze_integral(f: Callable): + """This decorator ensures that the trailing dimension of integrands is indeed the integrand dimension. + This is pertinent in the 1d case when the sampled values are often of shape `(N,)`. Then, to maintain backward + consistency, we squeeze the result in the 1d case so it does not have any trailing dimensions. + + Args: + f (Callable): The wrapped function. + """ + + @functools.wraps(f) + def wrap(*args, **kwargs): + # i.e we only have one dimension, or the second dimension (that of the integrand) is 1 + is_1d = len(args[0].shape) == 1 or ( + len(args[0].shape) == 2 and args[0].shape[1] == 1 + ) + if is_1d: + return paddle.squeeze( + f(paddle.unsqueeze(args[0], axis=1), *args[1:], **kwargs) + ) + return f(*args, **kwargs) + + return wrap + + +def gaussian_integrate( + fn: Callable[[Any], paddle.Tensor], + dim: int, + N: int, + integration_domains: List[List[float]], + dtype: Literal["float32", "float64"] = "float64", +) -> paddle.Tensor: + """Integrate given function using gaussian quadrature. + + Args: + fn (Callable[[Any], paddle.Tensor]): Function to be integrated. + dim (int): Dimensionality of the integrand. + N (int): Number of dicretization points. + integration_domains (List[List[float]]): Intergration domains. + dtype (Literal["float32", "float64"], optional): Dtype used during computation. Defaults to "float64". + + Returns: + paddle.Tensor: Integral result. + + Examples: + >>> import numpy as np + >>> import paddle + >>> import ppsci.experimental + >>> func = lambda x: paddle.sin(x) + >>> dim = 1 + >>> N = 500 + >>> integration_domains = [[0, np.pi]] + >>> result = ppsci.experimental.gaussian_integrate(func, dim, N, integration_domains) + >>> np.testing.assert_allclose(float(result), 2.0, 1e-6) + >>> print(float(result)) + 1.9999999999999576 + """ + + def _compatible_meshgrid(*args: paddle.Tensor, **kwargs: paddle.Tensor): + # TODO(HydrogenSulfate): paddle.meshgrid do not support single Tensor, + # which will be fixed in paddle framework. + if len(args) == 1: + return args + else: + return paddle.meshgrid(*args, **kwargs) + + def _roots(N: int) -> np.ndarray: + return np.polynomial.legendre.leggauss(N)[0] + + def _calculate_grid( + N: int, + integration_domains: paddle.Tensor, + ) -> Tuple[paddle.Tensor, paddle.Tensor, int]: + """Calculate grid points, widths and N per dim + + Args: + N (int): Number of points. + integration_domain (paddle.Tensor): Integration domain. + + Returns: + Tuple[paddle.Tensor, paddle.Tensor, int]: Grid points, grid widths and + Number of grid slices per dimension. + """ + # Create grid and assemble evaluation points + grid_1d = [] + _dim = integration_domains.shape[0] + n_per_dim = int(N ** (1.0 / _dim) + 1e-8) + + # Determine for each dimension grid points and mesh width + def _resize_roots( + integration_domain: Tuple[float, float], roots: np.ndarray + ): # scale from [-1,1] to [a,b] + a = integration_domain[0] + b = integration_domain[1] + return ((b - a) / 2) * roots + ((a + b) / 2) + + for dim in range(_dim): + grid_1d.append(_resize_roots(integration_domains[dim], _roots(n_per_dim))) + h = paddle.stack([grid_1d[dim][1] - grid_1d[dim][0] for dim in range(_dim)]) + + # Get grid points + points = _compatible_meshgrid(*grid_1d) + points = paddle.stack([mg.reshape([-1]) for mg in points], axis=1) + + return points, h, n_per_dim + + def _evaluate_integrand(fn, points, weights=None, fn_args=None) -> paddle.Tensor: + """Evaluate the integrand function at the passed points. + + Args: + fn (function): Integrand function. + points (paddle.Tensor): Integration points. + weights (paddle.Tensor, optional): Integration weights. Defaults to None. + fn_args (list or tuple, optional): Any arguments required by the function. Defaults to None. + + Returns: + paddle.Tensor: Integral result. + """ + if fn_args is None: + fn_args = () + + result = fn(points, *fn_args) + if not str(result.dtype).endswith(dtype): + result = result.astype(dtype) + + if result.shape[0] != points.shape[0]: + raise ValueError( + f"The passed function was given {points.shape[0]} points but only returned {result.shape[0]} value(s)." + f"Please ensure that your function is vectorized, i.e. can be called with multiple evaluation points at once. It should return a tensor " + f"where first dimension matches length of passed elements. " + ) + + if weights is not None: + if ( + len(result.shape) > 1 + ): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *= + integrand_shape = result.shape[1:] + weights = paddle.repeat_interleave( + paddle.unsqueeze(weights, axis=1), np.prod(integrand_shape) + ).reshape((weights.shape[0], *(integrand_shape))) + result *= weights + + return result + + def _weights(N, dim): + """Return the weights, broadcast across the dimensions, generated from the polynomial of choice. + + Args: + N (int): Number of nodes. + dim (int): Number of dimensions. + + Returns: + paddle.Tensor: Integration weights. + """ + weights = paddle.to_tensor(np.polynomial.legendre.leggauss(N)[1], dtype=dtype) + return paddle.prod( + paddle.stack(_compatible_meshgrid(*([weights] * dim)), axis=0), + axis=0, + ).reshape([-1]) + + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): + """Apply "composite" rule for gaussian integrals + + cur_dim_areas will contain the areas per dimension + """ + # We collapse dimension by dimension + for cur_dim in range(dim): + cur_dim_areas = ( + 0.5 + * (domain[cur_dim][1] - domain[cur_dim][0]) + * paddle.sum( + cur_dim_areas, axis=len(cur_dim_areas.shape) - 1, dtype=dtype + ) + ) + return cur_dim_areas + + @expand_func_values_and_squeeze_integral + def _calculate_result( + function_values: paddle.Tensor, + dim: int, + n_per_dim: int, + hs: paddle.Tensor, + integration_domains: paddle.Tensor, + ) -> paddle.Tensor: + """Apply the "composite rule" to calculate a result from the evaluated integrand. + + Args: + function_values (paddle.Tensor): Output of the integrand. + dim (int): Dimensionality. + n_per_dim (int): Number of grid slices per dimension. + hs (paddle.Tensor): Distances between grid slices for each dimension. + + Returns: + paddle.Tensor: Quadrature result. + """ + # Reshape the output to be [integrand_dim,N,N,...] points instead of [integrand_dim,dim*N] points + integrand_shape = function_values.shape[1:] + dim_shape = [n_per_dim] * dim + new_shape = [*integrand_shape, *dim_shape] + + perm = list(range(len(function_values.shape))) + if len(perm) >= 2: + perm.append(perm.pop(0)) + reshaped_function_values = paddle.transpose(function_values, perm) + reshaped_function_values = reshaped_function_values.reshape(new_shape) + + assert new_shape == list( + reshaped_function_values.shape + ), f"reshaping produced shape {reshaped_function_values.shape}, expected shape was {new_shape}" + + result = _apply_composite_rule( + reshaped_function_values, dim, hs, integration_domains + ) + return result + + assert dtype in [ + "float32", + "float64", + ], f"dtype must be either 'float32' or 'float64', but got {dtype}" + + neg = False + for i, (a, b) in enumerate(integration_domains): + if a > b: + neg = not neg + integration_domains[i] = [b, a] + + integration_domains = paddle.to_tensor( + integration_domains, + dtype=dtype, + ) + + if integration_domains.shape[0] != dim: + raise ValueError( + f"The number of integration domain({integration_domains.shape[0]}) " + f"must be equal to the given 'dim'({dim})." + ) + if integration_domains.shape[1] != 2: + raise ValueError( + f"integration_domain should be in format of [[a_1, b_1], [a_2, b_2], ..., " + f"[a_dim, b_dim]], but got each range of integration is {integration_domains[0]}" + ) + grid_points, hs, n_per_dim = _calculate_grid(N, integration_domains) + + function_values = _evaluate_integrand( + fn, grid_points, weights=_weights(n_per_dim, dim) + ) + + result = _calculate_result(function_values, dim, n_per_dim, hs, integration_domains) + return result if (not neg) else -result + + +def fractional_diff( + func: Callable, alpha: float, a: float, t: float, h: float, dtype="float64" +) -> paddle.Tensor: + r"""Compute fractional derivative of given function at point t with fractional order + alpha using [Caputo derivative of fractional](https://en.wikipedia.org/wiki/Fractional_calculus#Caputo_fractional_derivative). + + $$ + D_t^\alpha f(t)=\frac{1}{\Gamma(n-\alpha)} \int_0^t \frac{f^{(n)}(s)}{(t-s)^{\alpha+1-n}} d s . + $$ + + $$ + s.t. 0 \lt \alpha \lt 1 . + $$ + + Args: + func (Callable): Function to compute the fractional derivative of. + alpha (float): Fractional order. + t (float): Point to compute the fractional derivative at. + a (float): Start point of the fractional integral. + h (float): Step size for finite difference. + dtype (str, optional): Data dtype during computation. Defaults to "float64". + + Returns: + paddle.Tensor: Fractional derivative result of the function at t. + + Examples: + >>> from ppsci.experimental import fractional_diff + >>> import numpy as np + >>> # define f(x) = x^2 + >>> def f(x): + ... return x * x + >>> # compute 0.5-order fractional derivative of f(x) at t=1.0 with step size h=1e-6 + >>> res = fractional_diff(f, alpha=0.5, a=0, t=1.0, h=1e-6, dtype="float64") + >>> np.testing.assert_allclose(float(res), 1.503547, 1e-6) + """ + + if not (0 < alpha < 1): + raise NotImplementedError( + f"Given alpha should be in range (0, 1), but got {alpha}" + ) + + def _finite_derivative( + func: Callable, x: paddle.Tensor, dx: float + ) -> paddle.Tensor: + """Compute the finite difference of a function at x using centered difference. + + Args: + func (Callable): Function to compute the finite difference of. + x (paddle.Tensor): Point to compute the finite difference at. + dx (float): Delta to use for the finite difference. + + Returns: + paddle.Tensor: First-order Finite difference of the function at x. + """ + return (func(x + dx) - func(x - dx)) / (2 * dx) + + def int_func(s): + return _finite_derivative(func, s, dx=h) / (t - s) ** (alpha) + + result = ( + 1.0 / paddle.exp(paddle.lgamma(paddle.to_tensor(1.0 - alpha, dtype=dtype))) + ) * gaussian_integrate( + int_func, dim=1, N=2**10 + 1, integration_domains=[[a, t]], dtype=dtype + ) + return result + + +def trapezoid_integrate( + y: paddle.Tensor, + x: paddle.Tensor = None, + dx: float = None, + axis: int = -1, + mode: Literal["sum", "cumsum"] = "sum", +) -> paddle.Tensor: + """ + Integrate along the given axis using the composite trapezoidal rule. Use the sum method. + + Args: + y (paddle.Tensor): Input to be integrated. + x (paddle.Tensor, optional): The sample points corresponding to the input samples. its shape should be + (1) input.shape; (2) the input.shape[axis] if axis is not default. Defaults to None. + dx (float, optional): The sample points are assumed to be evenly spaced and it is the spacing between sample points. + If 'x' and 'dx' are both default, 'dx' is set to 1 by default. Defaults to None. + axis (int, optional): The axis along which to integrate. Defaults to -1. + mode (Literal["sum", "cumsum"], optional): Which type cumulative sum function used. Defaults to "sum". + + Returns: + paddle.Tensor: Integral result. If dim of input is N, return is N-1 dim. + + Examples: + >>> import paddle + >>> import ppsci + >>> y = paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32") + >>> res = ppsci.experimental.trapezoid_integrate(y) + >>> print(res) + Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [2., 8.]) + >>> res = ppsci.experimental.trapezoid_integrate(y, mode="cumsum") + >>> print(res) + Tensor(shape=[2, 2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[0.50000000, 2. ], + [3.50000000, 8. ]]) + >>> res = ppsci.experimental.trapezoid_integrate( + ... y, x=paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32") + ... ) + >>> print(res) + Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [2., 8.]) + >>> res = ppsci.experimental.trapezoid_integrate( + ... y, x=paddle.to_tensor([0, 1], dtype="float32"), axis=0 + ... ) + >>> print(res) + Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [1.50000000, 2.50000000, 3.50000000]) + >>> res = ppsci.experimental.trapezoid_integrate( + ... y, x=paddle.to_tensor([0, 1, 2], dtype="float32"), axis=1 + ... ) + >>> print(res) + Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [2., 8.]) + >>> res = ppsci.experimental.trapezoid_integrate(y, dx=2) + >>> print(res) + Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [4. , 16.]) + """ + if mode == "sum": + return paddle.trapezoid(y, x, dx, axis) + elif mode == "cumsum": + return paddle.cumulative_trapezoid(y, x, dx, axis) + else: + raise ValueError(f'mode should be "sum" or "cumsum", but got {mode}') + + +def montecarlo_integrate( + fn: Callable, + dim: int, + N: int = 1000, + integration_domain: Union[List[List[float]], paddle.Tensor] = None, + seed: int = None, +) -> paddle.Tensor: + """Integrates the passed function on the passed domain using vanilla Monte + Carlo Integration. + + Args: + fn (Callable): The function to integrate over. + dim (int): Dimensionality of the function's domain over which to + integrate. + N (Optional[int]): Number of sample points to use for the integration. + Defaults to 1000. + integration_domain (Union[List[List[float]], paddle.Tensor]): Integration + domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. + seed (Optional[int]): Random number generation seed to the sampling + point creation, only set if provided. Defaults to None. + + Raises: + ValueError: If len(integration_domain) != dim + + Returns: + paddle.Tensor: Integral result. + + Examples: + >>> import paddle + >>> import ppsci + + >>> _ = paddle.seed(1024) + >>> # The function we want to integrate, in this example + >>> # f(x0,x1) = sin(x0) + e^x1 for x0=[0,1] and x1=[-1,1] + >>> # Note that the function needs to support multiple evaluations at once (first + >>> # dimension of x here) + >>> # Expected result here is ~3.2698 + >>> def some_function(x): + ... return paddle.sin(x[:, 0]) + paddle.exp(x[:, 1]) + + >>> # Compute the function integral by sampling 10000 points over domain + >>> integral_value = ppsci.experimental.montecarlo_integrate( + ... some_function, + ... dim=2, + ... N=10000, + ... integration_domain=[[0, 1], [-1, 1]], + ... ) + + >>> print(integral_value) + Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.25152588) + """ + + @expand_func_values_and_squeeze_integral + def calculate_result(function_values, integration_domain): + """Calculate an integral result from the function evaluations + + Args: + function_values (paddle.Tensor): Output of the integrand + integration_domain (paddle.Tensor): Integration domain + + Returns: + Quadrature result + """ + scales = integration_domain[:, 1] - integration_domain[:, 0] + volume = paddle.prod(scales) + + # Integral = V / N * sum(func values) + N = function_values.shape[0] + integral = volume * paddle.sum(function_values, axis=0) / N + return integral + + def calculate_sample_points( + N: int, integration_domain: paddle.Tensor, seed: Optional[int] = None + ): + """Calculate random points for the integrand evaluation. + + Args: + N (int): Number of points + integration_domain (paddle.Tensor): Integration domain. + seed (int, optional): Random number generation seed for the sampling point creation, only set if provided. Defaults to None. + Returns: + Sample points. + """ + dim = integration_domain.shape[0] + domain_starts = integration_domain[:, 0] + domain_sizes = integration_domain[:, 1] - domain_starts + # Scale and translate random numbers via broadcasting + return ( + paddle.uniform( + shape=[N, dim], + dtype=domain_sizes.dtype, + min=0.0, + max=1.0, + seed=seed or 0, + ) + * domain_sizes + + domain_starts + ) + + if dim is not None: + if dim < 1: + raise ValueError("Dimension needs to be 1 or larger.") + if N is not None: + if N < 1 or type(N) is not int: + raise ValueError("N has to be a positive integer.") + + integration_domain = _setup_integration_domain(dim, integration_domain) + sample_points = calculate_sample_points(N, integration_domain, seed) + function_values, _ = _evaluate_integrand(fn, sample_points) + return calculate_result(function_values, integration_domain) + + +def _setup_integration_domain( + dim: int, integration_domain: Union[List[List[float]], paddle.Tensor] +) -> paddle.Tensor: + """Sets up the integration domain if unspecified by the user. + Args: + dim (int): Dimensionality of the integration domain. + integration_domain (List or Tensor): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. + + Returns: + Integration domain. + """ + # If no integration_domain is specified, create [-1,1]^d bounds + if integration_domain is None: + integration_domain = [[-1.0, 1.0]] * dim + + integration_domain = [[float(b) for b in bounds] for bounds in integration_domain] + + integration_domain = paddle.to_tensor(integration_domain) + + if tuple(integration_domain.shape) != (dim, 2): + raise ValueError( + "The integration domain has an unexpected shape. " + f"Expected {(dim, 2)}, got {integration_domain.shape}" + ) + return integration_domain + + +def _evaluate_integrand(fn, points, weights=None, args=None): + """Evaluate the integrand function at the passed points. + + Args: + fn (Callable): Integrand function. + points (paddle.Tensor): Integration points. + weights (Optional[paddle.Tensor]): Integration weights. Defaults to None. + args (Optional[List, Tuple]): Any arguments required by the function. Defaults to None. + + Returns: + padlde.Tensor: Integrand function output. + int: Number of evaluated points. + """ + num_points = points.shape[0] + + if args is None: + args = () + + result = fn(points, *args) + num_results = result.shape[0] + if num_results != num_points: + raise ValueError( + f"The passed function was given {num_points} points but only returned {num_results} value(s)." + f"Please ensure that your function is vectorized, i.e. can be called with multiple evaluation points at once. It should return a tensor " + f"where first dimension matches length of passed elements. " + ) + + if weights is not None: + if ( + len(result.shape) > 1 + ): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *= + integrand_shape = paddle.to_tensor(result.shape[1:]) + weights = paddle.tile( + paddle.unsqueeze(weights, axis=1), paddle.prod(integrand_shape) + ).reshape((weights.shape[0], *(integrand_shape))) + result *= weights + + return result, num_points diff --git a/ppsci/geometry/__init__.py b/ppsci/geometry/__init__.py index 30b4ad0859..2e381334d9 100644 --- a/ppsci/geometry/__init__.py +++ b/ppsci/geometry/__init__.py @@ -1,83 +1,83 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from ppsci.geometry.geometry import Geometry -from ppsci.geometry.geometry_1d import Interval -from ppsci.geometry.geometry_2d import Disk -from ppsci.geometry.geometry_2d import Polygon -from ppsci.geometry.geometry_2d import Rectangle -from ppsci.geometry.geometry_2d import Triangle -from ppsci.geometry.geometry_3d import Cuboid -from ppsci.geometry.geometry_3d import Sphere -from ppsci.geometry.geometry_nd import Hypercube -from ppsci.geometry.geometry_nd import Hypersphere -from ppsci.geometry.mesh import Mesh -from ppsci.geometry.mesh import SDFMesh -from ppsci.geometry.pointcloud import PointCloud -from ppsci.geometry.timedomain import TimeDomain -from ppsci.geometry.timedomain import TimeXGeometry -from ppsci.utils import logger -from ppsci.utils import misc - -__all__ = [ - "build_geometry", - "Cuboid", - "Disk", - "Geometry", - "Hypercube", - "Hypersphere", - "Interval", - "Mesh", - "SDFMesh", - "Polygon", - "Rectangle", - "Sphere", - "TimeDomain", - "TimeXGeometry", - "Triangle", - "PointCloud", -] - - -def build_geometry(cfg): - """Build geometry(ies) - - Args: - cfg (List[DictConfig]): Geometry config list. - - Returns: - Dict[str, Geometry]: Geometry(ies) in dict. - """ - if cfg is None: - return None - cfg = copy.deepcopy(cfg) - - geom_dict = misc.PrettyOrderedDict() - for _item in cfg: - geom_cls = next(iter(_item.keys())) - geom_cfg = _item[geom_cls] - geom_name = geom_cfg.pop("name", geom_cls) - if geom_cls == "TimeXGeometry": - time_cfg = geom_cfg.pop("TimeDomain") - geom_cls = next(iter(geom_cfg.keys())) - geom_dict[geom_name] = TimeXGeometry( - TimeDomain(**time_cfg), eval(geom_cls)(**geom_cfg[geom_cls]) - ) - else: - geom_dict[geom_name] = eval(geom_cls)(**geom_cfg) - - logger.debug(str(geom_dict[geom_name])) - return geom_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.geometry.geometry import Geometry +from ppsci.geometry.geometry_1d import Interval +from ppsci.geometry.geometry_2d import Disk +from ppsci.geometry.geometry_2d import Polygon +from ppsci.geometry.geometry_2d import Rectangle +from ppsci.geometry.geometry_2d import Triangle +from ppsci.geometry.geometry_3d import Cuboid +from ppsci.geometry.geometry_3d import Sphere +from ppsci.geometry.geometry_nd import Hypercube +from ppsci.geometry.geometry_nd import Hypersphere +from ppsci.geometry.mesh import Mesh +from ppsci.geometry.mesh import SDFMesh +from ppsci.geometry.pointcloud import PointCloud +from ppsci.geometry.timedomain import TimeDomain +from ppsci.geometry.timedomain import TimeXGeometry +from ppsci.utils import logger +from ppsci.utils import misc + +__all__ = [ + "build_geometry", + "Cuboid", + "Disk", + "Geometry", + "Hypercube", + "Hypersphere", + "Interval", + "Mesh", + "SDFMesh", + "Polygon", + "Rectangle", + "Sphere", + "TimeDomain", + "TimeXGeometry", + "Triangle", + "PointCloud", +] + + +def build_geometry(cfg): + """Build geometry(ies) + + Args: + cfg (List[DictConfig]): Geometry config list. + + Returns: + Dict[str, Geometry]: Geometry(ies) in dict. + """ + if cfg is None: + return None + cfg = copy.deepcopy(cfg) + + geom_dict = misc.PrettyOrderedDict() + for _item in cfg: + geom_cls = next(iter(_item.keys())) + geom_cfg = _item[geom_cls] + geom_name = geom_cfg.pop("name", geom_cls) + if geom_cls == "TimeXGeometry": + time_cfg = geom_cfg.pop("TimeDomain") + geom_cls = next(iter(geom_cfg.keys())) + geom_dict[geom_name] = TimeXGeometry( + TimeDomain(**time_cfg), eval(geom_cls)(**geom_cfg[geom_cls]) + ) + else: + geom_dict[geom_name] = eval(geom_cls)(**geom_cfg) + + logger.debug(str(geom_dict[geom_name])) + return geom_dict diff --git a/ppsci/geometry/csg.py b/ppsci/geometry/csg.py index 87534bedd6..5bc266f6d8 100644 --- a/ppsci/geometry/csg.py +++ b/ppsci/geometry/csg.py @@ -1,337 +1,337 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -import numpy as np -import paddle - -from ppsci.geometry import geometry - - -class CSGUnion(geometry.Geometry): - """Construct an object by CSG Union(except for Mesh).""" - - def __init__(self, geom1, geom2): - if geom1.ndim != geom2.ndim: - raise ValueError( - f"{geom1}.ndim({geom1.ndim}) should be equal to " - f"{geom2}.ndim({geom1.ndim})" - ) - super().__init__( - geom1.ndim, - ( - np.minimum(geom1.bbox[0], geom2.bbox[0]), - np.maximum(geom1.bbox[1], geom2.bbox[1]), - ), - geom1.diam + geom2.diam, - ) - self.geom1 = geom1 - self.geom2 = geom2 - - def is_inside(self, x): - return np.logical_or(self.geom1.is_inside(x), self.geom2.is_inside(x)) - - def on_boundary(self, x): - return np.logical_or( - np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x)), - np.logical_and(self.geom2.on_boundary(x), ~self.geom1.is_inside(x)), - ) - - def boundary_normal(self, x): - return np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x))[ - :, np.newaxis - ] * self.geom1.boundary_normal(x) + np.logical_and( - self.geom2.on_boundary(x), ~self.geom1.is_inside(x) - )[ - :, np.newaxis - ] * self.geom2.boundary_normal( - x - ) - - def random_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - points = ( - np.random.rand(n, self.ndim) * (self.bbox[1] - self.bbox[0]) - + self.bbox[0] - ) - points = points[self.is_inside(points)] - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - _size += len(points) - return x - - def random_boundary_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) - geom1_boundary_points = geom1_boundary_points[ - ~self.geom2.is_inside(geom1_boundary_points) - ] - - geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) - geom2_boundary_points = geom2_boundary_points[ - ~self.geom1.is_inside(geom2_boundary_points) - ] - - points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) - points = np.random.permutation(points) - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - _size += len(points) - return x - - def periodic_point(self, x, component): - x = np.copy(x) - on_boundary_geom1 = np.logical_and( - self.geom1.on_boundary(x), ~self.geom2.is_inside(x) - ) - x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ - on_boundary_geom1 - ] - on_boundary_geom2 = np.logical_and( - self.geom2.on_boundary(x), ~self.geom1.is_inside(x) - ) - x[on_boundary_geom2] = self.geom2.periodic_point(x, component)[ - on_boundary_geom2 - ] - return x - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field of CSG union of two geometries. - ref: https://iquilezles.org/articles/distfunctions/ - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF - value, the shape is [N, D]. - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - """ - sdf1 = self.geom1.sdf_func(points) - sdf2 = self.geom2.sdf_func(points) - return np.minimum(sdf1, sdf2) - - -class CSGDifference(geometry.Geometry): - """Construct an object by CSG Difference.""" - - def __init__(self, geom1, geom2): - if geom1.ndim != geom2.ndim: - raise ValueError( - f"{geom1}.ndim({geom1.ndim}) should be equal to " - f"{geom2}.ndim({geom1.ndim})." - ) - super().__init__(geom1.ndim, geom1.bbox, geom1.diam) - self.geom1 = geom1 - self.geom2 = geom2 - - def is_inside(self, x): - return np.logical_and(self.geom1.is_inside(x), ~self.geom2.is_inside(x)) - - def on_boundary(self, x): - return np.logical_or( - np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x)), - np.logical_and(self.geom1.is_inside(x), self.geom2.on_boundary(x)), - ) - - def boundary_normal(self, x): - return np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x))[ - :, np.newaxis - ] * self.geom1.boundary_normal(x) + np.logical_and( - self.geom1.is_inside(x), self.geom2.on_boundary(x) - )[ - :, np.newaxis - ] * -self.geom2.boundary_normal( - x - ) - - def random_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - tmp = self.geom1.random_points(n, random=random) - tmp = tmp[~self.geom2.is_inside(tmp)] - - if len(tmp) > n - _size: - tmp = tmp[: n - _size] - x[_size : _size + len(tmp)] = tmp - _size += len(tmp) - return x - - def random_boundary_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) - geom1_boundary_points = geom1_boundary_points[ - ~self.geom2.is_inside(geom1_boundary_points) - ] - - geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) - geom2_boundary_points = geom2_boundary_points[ - self.geom1.is_inside(geom2_boundary_points) - ] - - points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) - points = np.random.permutation(points) - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - _size += len(points) - return x - - def periodic_point(self, x, component): - x = np.copy(x) - on_boundary_geom1 = np.logical_and( - self.geom1.on_boundary(x), ~self.geom2.is_inside(x) - ) - x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ - on_boundary_geom1 - ] - return x - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field of CSG difference of two geometries. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF - value, the shape is [N, D]. - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - """ - sdf1 = self.geom1.sdf_func(points) - sdf2 = self.geom2.sdf_func(points) - return np.maximum(sdf1, -sdf2) - - -class CSGIntersection(geometry.Geometry): - """Construct an object by CSG Intersection.""" - - def __init__(self, geom1, geom2): - if geom1.ndim != geom2.ndim: - raise ValueError( - f"{geom1}.ndim({geom1.ndim}) should be equal to " - f"{geom2}.ndim({geom1.ndim})" - ) - super().__init__( - geom1.ndim, - ( - np.maximum(geom1.bbox[0], geom2.bbox[0]), - np.minimum(geom1.bbox[1], geom2.bbox[1]), - ), - min(geom1.diam, geom2.diam), - ) - self.geom1 = geom1 - self.geom2 = geom2 - - def is_inside(self, x): - return np.logical_and(self.geom1.is_inside(x), self.geom2.is_inside(x)) - - def on_boundary(self, x): - return np.logical_or( - np.logical_and(self.geom1.on_boundary(x), self.geom2.is_inside(x)), - np.logical_and(self.geom1.is_inside(x), self.geom2.on_boundary(x)), - ) - - def boundary_normal(self, x): - return np.logical_and(self.geom1.on_boundary(x), self.geom2.is_inside(x))[ - :, np.newaxis - ] * self.geom1.boundary_normal(x) + np.logical_and( - self.geom1.is_inside(x), self.geom2.on_boundary(x) - )[ - :, np.newaxis - ] * self.geom2.boundary_normal( - x - ) - - def random_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - points = self.geom1.random_points(n, random=random) - points = points[self.geom2.is_inside(points)] - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - _size += len(points) - return x - - def random_boundary_points(self, n, random="pseudo"): - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size = 0 - while _size < n: - geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) - geom1_boundary_points = geom1_boundary_points[ - self.geom2.is_inside(geom1_boundary_points) - ] - - geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) - geom2_boundary_points = geom2_boundary_points[ - self.geom1.is_inside(geom2_boundary_points) - ] - - points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) - points = np.random.permutation(points) - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - _size += len(points) - return x - - def periodic_point(self, x, component): - x = np.copy(x) - on_boundary_geom1 = np.logical_and( - self.geom1.on_boundary(x), self.geom2.is_inside(x) - ) - x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ - on_boundary_geom1 - ] - on_boundary_geom2 = np.logical_and( - self.geom2.on_boundary(x), self.geom1.is_inside(x) - ) - x[on_boundary_geom2] = self.geom2.periodic_point(x, component)[ - on_boundary_geom2 - ] - return x - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field of CSG intersection of two geometries. - ref: https://iquilezles.org/articles/distfunctions/ - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF - value the shape is [N, D]. - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - """ - sdf1 = self.geom1.sdf_func(points) - sdf2 = self.geom2.sdf_func(points) - return np.maximum(sdf1, sdf2) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +import numpy as np +import paddle + +from ppsci.geometry import geometry + + +class CSGUnion(geometry.Geometry): + """Construct an object by CSG Union(except for Mesh).""" + + def __init__(self, geom1, geom2): + if geom1.ndim != geom2.ndim: + raise ValueError( + f"{geom1}.ndim({geom1.ndim}) should be equal to " + f"{geom2}.ndim({geom1.ndim})" + ) + super().__init__( + geom1.ndim, + ( + np.minimum(geom1.bbox[0], geom2.bbox[0]), + np.maximum(geom1.bbox[1], geom2.bbox[1]), + ), + geom1.diam + geom2.diam, + ) + self.geom1 = geom1 + self.geom2 = geom2 + + def is_inside(self, x): + return np.logical_or(self.geom1.is_inside(x), self.geom2.is_inside(x)) + + def on_boundary(self, x): + return np.logical_or( + np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x)), + np.logical_and(self.geom2.on_boundary(x), ~self.geom1.is_inside(x)), + ) + + def boundary_normal(self, x): + return np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x))[ + :, np.newaxis + ] * self.geom1.boundary_normal(x) + np.logical_and( + self.geom2.on_boundary(x), ~self.geom1.is_inside(x) + )[ + :, np.newaxis + ] * self.geom2.boundary_normal( + x + ) + + def random_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + points = ( + np.random.rand(n, self.ndim) * (self.bbox[1] - self.bbox[0]) + + self.bbox[0] + ) + points = points[self.is_inside(points)] + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + _size += len(points) + return x + + def random_boundary_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) + geom1_boundary_points = geom1_boundary_points[ + ~self.geom2.is_inside(geom1_boundary_points) + ] + + geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) + geom2_boundary_points = geom2_boundary_points[ + ~self.geom1.is_inside(geom2_boundary_points) + ] + + points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) + points = np.random.permutation(points) + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + _size += len(points) + return x + + def periodic_point(self, x, component): + x = np.copy(x) + on_boundary_geom1 = np.logical_and( + self.geom1.on_boundary(x), ~self.geom2.is_inside(x) + ) + x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ + on_boundary_geom1 + ] + on_boundary_geom2 = np.logical_and( + self.geom2.on_boundary(x), ~self.geom1.is_inside(x) + ) + x[on_boundary_geom2] = self.geom2.periodic_point(x, component)[ + on_boundary_geom2 + ] + return x + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field of CSG union of two geometries. + ref: https://iquilezles.org/articles/distfunctions/ + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF + value, the shape is [N, D]. + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + """ + sdf1 = self.geom1.sdf_func(points) + sdf2 = self.geom2.sdf_func(points) + return np.minimum(sdf1, sdf2) + + +class CSGDifference(geometry.Geometry): + """Construct an object by CSG Difference.""" + + def __init__(self, geom1, geom2): + if geom1.ndim != geom2.ndim: + raise ValueError( + f"{geom1}.ndim({geom1.ndim}) should be equal to " + f"{geom2}.ndim({geom1.ndim})." + ) + super().__init__(geom1.ndim, geom1.bbox, geom1.diam) + self.geom1 = geom1 + self.geom2 = geom2 + + def is_inside(self, x): + return np.logical_and(self.geom1.is_inside(x), ~self.geom2.is_inside(x)) + + def on_boundary(self, x): + return np.logical_or( + np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x)), + np.logical_and(self.geom1.is_inside(x), self.geom2.on_boundary(x)), + ) + + def boundary_normal(self, x): + return np.logical_and(self.geom1.on_boundary(x), ~self.geom2.is_inside(x))[ + :, np.newaxis + ] * self.geom1.boundary_normal(x) + np.logical_and( + self.geom1.is_inside(x), self.geom2.on_boundary(x) + )[ + :, np.newaxis + ] * -self.geom2.boundary_normal( + x + ) + + def random_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + tmp = self.geom1.random_points(n, random=random) + tmp = tmp[~self.geom2.is_inside(tmp)] + + if len(tmp) > n - _size: + tmp = tmp[: n - _size] + x[_size : _size + len(tmp)] = tmp + _size += len(tmp) + return x + + def random_boundary_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) + geom1_boundary_points = geom1_boundary_points[ + ~self.geom2.is_inside(geom1_boundary_points) + ] + + geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) + geom2_boundary_points = geom2_boundary_points[ + self.geom1.is_inside(geom2_boundary_points) + ] + + points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) + points = np.random.permutation(points) + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + _size += len(points) + return x + + def periodic_point(self, x, component): + x = np.copy(x) + on_boundary_geom1 = np.logical_and( + self.geom1.on_boundary(x), ~self.geom2.is_inside(x) + ) + x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ + on_boundary_geom1 + ] + return x + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field of CSG difference of two geometries. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF + value, the shape is [N, D]. + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + """ + sdf1 = self.geom1.sdf_func(points) + sdf2 = self.geom2.sdf_func(points) + return np.maximum(sdf1, -sdf2) + + +class CSGIntersection(geometry.Geometry): + """Construct an object by CSG Intersection.""" + + def __init__(self, geom1, geom2): + if geom1.ndim != geom2.ndim: + raise ValueError( + f"{geom1}.ndim({geom1.ndim}) should be equal to " + f"{geom2}.ndim({geom1.ndim})" + ) + super().__init__( + geom1.ndim, + ( + np.maximum(geom1.bbox[0], geom2.bbox[0]), + np.minimum(geom1.bbox[1], geom2.bbox[1]), + ), + min(geom1.diam, geom2.diam), + ) + self.geom1 = geom1 + self.geom2 = geom2 + + def is_inside(self, x): + return np.logical_and(self.geom1.is_inside(x), self.geom2.is_inside(x)) + + def on_boundary(self, x): + return np.logical_or( + np.logical_and(self.geom1.on_boundary(x), self.geom2.is_inside(x)), + np.logical_and(self.geom1.is_inside(x), self.geom2.on_boundary(x)), + ) + + def boundary_normal(self, x): + return np.logical_and(self.geom1.on_boundary(x), self.geom2.is_inside(x))[ + :, np.newaxis + ] * self.geom1.boundary_normal(x) + np.logical_and( + self.geom1.is_inside(x), self.geom2.on_boundary(x) + )[ + :, np.newaxis + ] * self.geom2.boundary_normal( + x + ) + + def random_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + points = self.geom1.random_points(n, random=random) + points = points[self.geom2.is_inside(points)] + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + _size += len(points) + return x + + def random_boundary_points(self, n, random="pseudo"): + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size = 0 + while _size < n: + geom1_boundary_points = self.geom1.random_boundary_points(n, random=random) + geom1_boundary_points = geom1_boundary_points[ + self.geom2.is_inside(geom1_boundary_points) + ] + + geom2_boundary_points = self.geom2.random_boundary_points(n, random=random) + geom2_boundary_points = geom2_boundary_points[ + self.geom1.is_inside(geom2_boundary_points) + ] + + points = np.concatenate((geom1_boundary_points, geom2_boundary_points)) + points = np.random.permutation(points) + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + _size += len(points) + return x + + def periodic_point(self, x, component): + x = np.copy(x) + on_boundary_geom1 = np.logical_and( + self.geom1.on_boundary(x), self.geom2.is_inside(x) + ) + x[on_boundary_geom1] = self.geom1.periodic_point(x, component)[ + on_boundary_geom1 + ] + on_boundary_geom2 = np.logical_and( + self.geom2.on_boundary(x), self.geom1.is_inside(x) + ) + x[on_boundary_geom2] = self.geom2.periodic_point(x, component)[ + on_boundary_geom2 + ] + return x + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field of CSG intersection of two geometries. + ref: https://iquilezles.org/articles/distfunctions/ + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF + value the shape is [N, D]. + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + """ + sdf1 = self.geom1.sdf_func(points) + sdf2 = self.geom2.sdf_func(points) + return np.maximum(sdf1, sdf2) diff --git a/ppsci/geometry/geometry.py b/ppsci/geometry/geometry.py index a06168488a..138c6d78a3 100644 --- a/ppsci/geometry/geometry.py +++ b/ppsci/geometry/geometry.py @@ -1,706 +1,706 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" -from __future__ import annotations - -import abc -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle -from typing_extensions import Literal - -from ppsci.utils import logger -from ppsci.utils import misc - - -class Geometry: - """Base class for geometry. - - Args: - ndim (int): Number of geometry dimension. - bbox (Tuple[np.ndarray, np.ndarray]): Bounding box of upper and lower. - diam (float): Diameter of geometry. - """ - - def __init__(self, ndim: int, bbox: Tuple[np.ndarray, np.ndarray], diam: float): - self.ndim = ndim - self.bbox = bbox - self.diam = min(diam, np.linalg.norm(bbox[1] - bbox[0])) - - @property - def dim_keys(self): - return ("x", "y", "z")[: self.ndim] - - @abc.abstractmethod - def is_inside(self, x: np.ndarray) -> np.ndarray: - """Returns a boolean array where x is inside the geometry. - - Args: - x (np.ndarray): Points to check if inside the geometry. The shape is [N, D], - where D is the number of dimension of geometry. - - Returns: - np.ndarray: Boolean array where x is inside the geometry. The shape is [N]. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval = ppsci.geometry.Interval(0, 1) - >>> x = np.array([[0], [0.5], [1.5]]) - >>> interval.is_inside(x) - array([ True, True, False]) - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> x = np.array([[0.0, 0.0], [0.5, 0.5], [1.5, 1.5]]) - >>> rectangle.is_inside(x) - array([ True, True, False]) - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]]) - >>> cuboid.is_inside(x) - array([ True, True, False]) - """ - - @abc.abstractmethod - def on_boundary(self, x: np.ndarray) -> np.ndarray: - """Returns a boolean array where x is on geometry boundary. - - Args: - x (np.ndarray): Points to check if on the geometry boundary. The shape is [N, D], - where D is the number of dimension of geometry. - - Returns: - np.ndarray: Boolean array where x is on the geometry boundary. The shape is [N]. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval = ppsci.geometry.Interval(0, 1) - >>> x = np.array([[0], [0.5], [1.5]]) - >>> interval.on_boundary(x) - array([ True, False, False]) - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> x = np.array([[0, 0], [0.5, 0.5], [1, 1.5]]) - >>> rectangle.on_boundary(x) - array([ True, False, False]) - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1.5]]) - >>> cuboid.on_boundary(x) - array([ True, False, False]) - """ - - def boundary_normal(self, x): - """Compute the unit normal at x.""" - raise NotImplementedError(f"{self}.boundary_normal is not implemented") - - def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: - """Compute the equi-spaced points in the geometry. - - Warings: - This function is not implemented, please use random_points instead. - - Args: - n (int): Number of points. - boundary (bool): Include boundary points. Defaults to True. - - Returns: - np.ndarray: Random points in the geometry. The shape is [N, D]. - """ - logger.warning( - f"{self}.uniform_points not implemented. " f"Use random_points instead." - ) - return self.random_points(n) - - def sample_interior( - self, - n: int, - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - compute_sdf_derivatives: bool = False, - ) -> Dict[str, np.ndarray]: - """Sample random points in the geometry and return those meet criteria. - - Args: - n (int): Number of points. - random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". - pseudo: Pseudo random. - Halton: Halton sequence. - LHS: Latin Hypercube Sampling. - criteria (Optional[Callable[..., np.ndarray]]): Criteria function. Given - coords from differnet dimension and return a boolean array with shape [n,]. - Defaults to None. - evenly (bool): Evenly sample points. Defaults to False. - compute_sdf_derivatives (bool): Compute SDF derivatives. Defaults to False. - - Returns: - Dict[str, np.ndarray]: Random points in the geometry. The shape is [N, D]. - their signed distance function. The shape is [N, 1]. - their derivatives of SDF(optional). The shape is [N, D]. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> np.random.seed(42) - >>> interval = ppsci.geometry.Interval(0, 1) - >>> interval.sample_interior(2) - {'x': array([[0.37454012], - [0.9507143 ]], dtype=float32), 'sdf': array([[0.37454012], - [0.04928571]], dtype=float32)} - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> rectangle.sample_interior(2, "pseudo", None, False, True) - {'x': array([[0.7319939 ], - [0.15601864]], dtype=float32), 'y': array([[0.5986585 ], - [0.15599452]], dtype=float32), 'sdf': array([[0.2680061 ], - [0.15599453]], dtype=float32), 'sdf__x': array([[-1.0001659 ], - [ 0.25868416]], dtype=float32), 'sdf__y': array([[-0. ], - [ 0.74118376]], dtype=float32)} - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> cuboid.sample_interior(2, "pseudo", None, True, True) - {'x': array([[0.], - [0.]], dtype=float32), 'y': array([[0.], - [0.]], dtype=float32), 'z': array([[0.], - [1.]], dtype=float32), 'sdf': array([[0.], - [0.]], dtype=float32), 'sdf__x': array([[0.50008297], - [0.50008297]], dtype=float32), 'sdf__y': array([[0.50008297], - [0.50008297]], dtype=float32), 'sdf__z': array([[ 0.50008297], - [-0.49948692]], dtype=float32)} - """ - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size, _ntry, _nsuc = 0, 0, 0 - while _size < n: - if evenly: - points = self.uniform_points(n) - else: - if misc.typename(self) == "TimeXGeometry": - points = self.random_points(n, random, criteria) - else: - points = self.random_points(n, random) - - if criteria is not None: - criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() - points = points[criteria_mask] - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - - _size += len(points) - _ntry += 1 - if len(points) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample interior points failed, " - "please check correctness of geometry and given criteria." - ) - - # if sdf_func added, return x_dict and sdf_dict, else, only return the x_dict - if hasattr(self, "sdf_func"): - sdf = -self.sdf_func(x) - sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) - sdf_derives_dict = {} - if compute_sdf_derivatives: - sdf_derives = -self.sdf_derivatives(x) - sdf_derives_dict = misc.convert_to_dict( - sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) - ) - else: - sdf_dict = {} - sdf_derives_dict = {} - x_dict = misc.convert_to_dict(x, self.dim_keys) - - return {**x_dict, **sdf_dict, **sdf_derives_dict} - - def sample_boundary( - self, - n: int, - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - ) -> Dict[str, np.ndarray]: - """Compute the random points in the geometry and return those meet criteria. - - Args: - n (int): Number of points. - random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". - pseudo: Pseudo random. - Halton: Halton sequence. - LHS: Latin Hypercube Sampling. - criteria (Optional[Callable[..., np.ndarray]]): Criteria function. Given - coords from differnet dimension and return a boolean array with shape [n,]. - Defaults to None. - evenly (bool): Evenly sample points. Defaults to False. - - Returns: - Dict[str, np.ndarray]: Random points in the geometry. The shape is [N, D]. - their normal vectors. The shape is [N, D]. - their area. The shape is [N, 1].(only if the geometry is a mesh) - - Examples: - >>> import numpy as np - >>> import ppsci - >>> np.random.seed(42) - >>> interval = ppsci.geometry.Interval(0, 1) - >>> interval.sample_boundary(2) - {'x': array([[0.], - [1.]], dtype=float32), 'normal_x': array([[-1.], - [ 1.]], dtype=float32)} - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> rectangle.sample_boundary(2) - {'x': array([[1.], - [0.]], dtype=float32), 'y': array([[0.49816048], - [0.19714284]], dtype=float32), 'normal_x': array([[ 1.], - [-1.]], dtype=float32), 'normal_y': array([[0.], - [0.]], dtype=float32)} - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> cuboid.sample_boundary(2) - {'x': array([[0.83244264], - [0.18182497]], dtype=float32), 'y': array([[0.21233912], - [0.1834045 ]], dtype=float32), 'z': array([[0.], - [1.]], dtype=float32), 'normal_x': array([[0.], - [0.]], dtype=float32), 'normal_y': array([[0.], - [0.]], dtype=float32), 'normal_z': array([[-1.], - [ 1.]], dtype=float32)} - """ - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size, _ntry, _nsuc = 0, 0, 0 - while _size < n: - if evenly: - if ( - misc.typename(self) == "TimeXGeometry" - and misc.typename(self.geometry) == "Mesh" - ): - points, normal, area = self.uniform_boundary_points(n) - else: - points = self.uniform_boundary_points(n) - else: - if ( - misc.typename(self) == "TimeXGeometry" - and misc.typename(self.geometry) == "Mesh" - ): - points, normal, area = self.random_boundary_points(n, random) - else: - if misc.typename(self) == "TimeXGeometry": - points = self.random_boundary_points(n, random, criteria) - else: - points = self.random_boundary_points(n, random) - - if criteria is not None: - criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() - points = points[criteria_mask] - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - - _size += len(points) - _ntry += 1 - if len(points) > 0: - _nsuc += 1 - - if _ntry >= 10000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - if not ( - misc.typename(self) == "TimeXGeometry" - and misc.typename(self.geometry) == "Mesh" - ): - normal = self.boundary_normal(x) - - normal_dict = misc.convert_to_dict( - normal[:, 1:] if "t" in self.dim_keys else normal, - [f"normal_{key}" for key in self.dim_keys if key != "t"], - ) - x_dict = misc.convert_to_dict(x, self.dim_keys) - if ( - misc.typename(self) == "TimeXGeometry" - and misc.typename(self.geometry) == "Mesh" - ): - area_dict = misc.convert_to_dict(area[:, 1:], ["area"]) - return {**x_dict, **normal_dict, **area_dict} - - return {**x_dict, **normal_dict} - - @abc.abstractmethod - def random_points( - self, n: int, random: Literal["pseudo", "Halton", "LHS"] = "pseudo" - ) -> np.ndarray: - """Compute the random points in the geometry. - - Args: - n (int): Number of points. - random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". - pseudo: Pseudo random. - Halton: Halton sequence. - LHS: Latin Hypercube Sampling. - - Returns: - np.ndarray: Random points in the geometry. The shape is [N, D]. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> np.random.seed(42) - >>> interval = ppsci.geometry.Interval(0, 1) - >>> interval.random_points(2) - array([[0.37454012], - [0.9507143 ]], dtype=float32) - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> rectangle.random_points(2) - array([[0.7319939 , 0.5986585 ], - [0.15601864, 0.15599452]], dtype=float32) - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> cuboid.random_points(2) - array([[0.05808361, 0.8661761 , 0.601115 ], - [0.7080726 , 0.02058449, 0.96990985]], dtype=float32) - """ - - def uniform_boundary_points(self, n: int) -> np.ndarray: - """Compute the equi-spaced points on the boundary(not implemented). - - Warings: - This function is not implemented, please use random_boundary_points instead. - - Args: - n (int): Number of points. - - Returns: - np.ndarray: Random points on the boundary. The shape is [N, D]. - """ - logger.warning( - f"{self}.uniform_boundary_points not implemented. " - f"Use random_boundary_points instead." - ) - return self.random_boundary_points(n) - - @abc.abstractmethod - def random_boundary_points( - self, n: int, random: Literal["pseudo", "Halton", "LHS"] = "pseudo" - ) -> np.ndarray: - """Compute the random points on the boundary. - - Args: - n (int): Number of points. - random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". - pseudo: Pseudo random. - Halton: Halton sequence. - LHS: Latin Hypercube Sampling. - - Returns: - np.ndarray: Random points on the boundary. The shape is [N, D]. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> np.random.seed(42) - >>> interval = ppsci.geometry.Interval(0, 1) - >>> interval.random_boundary_points(2) - array([[0.], - [1.]], dtype=float32) - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> rectangle.random_boundary_points(2) - array([[1. , 0.49816048], - [0. , 0.19714284]], dtype=float32) - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> cuboid.random_boundary_points(2) - array([[0.83244264, 0.21233912, 0. ], - [0.18182497, 0.1834045 , 1. ]], dtype=float32) - """ - - def periodic_point(self, x: np.ndarray, component: int): - """Compute the periodic image of x(not implemented). - - Warings: - This function is not implemented. - """ - raise NotImplementedError(f"{self}.periodic_point to be implemented") - - def sdf_derivatives(self, x: np.ndarray, epsilon: float = 1e-4) -> np.ndarray: - """Compute derivatives of SDF function. - - Args: - x (np.ndarray): Points for computing SDF derivatives using central - difference. The shape is [N, D], D is the number of dimension of - geometry. - epsilon (float): Derivative step. Defaults to 1e-4. - - Returns: - np.ndarray: Derivatives of corresponding SDF function. - The shape is [N, D]. D is the number of dimension of geometry. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval = ppsci.geometry.Interval(0, 1) - >>> x = np.array([[0], [0.5], [1.5]]) - >>> interval.sdf_derivatives(x) - array([[-1.], - [ 0.], - [ 1.]]) - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> x = np.array([[0.0, 0.0], [0.5, 0.5], [1.5, 1.5]]) - >>> rectangle.sdf_derivatives(x) - array([[-0.5 , -0.5 ], - [ 0. , 0. ], - [ 0.70710678, 0.70710678]]) - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1]]) - >>> cuboid.sdf_derivatives(x) - array([[-0.5, -0.5, -0.5], - [ 0. , 0. , 0. ], - [ 0.5, 0.5, 0.5]]) - """ - if not hasattr(self, "sdf_func"): - raise NotImplementedError( - f"{misc.typename(self)}.sdf_func should be implemented " - "when using 'sdf_derivatives'." - ) - # Only compute sdf derivatives for those already implement `sdf_func` method. - sdf_derives = np.empty_like(x) - for i in range(self.ndim): - h = np.zeros_like(x) - h[:, i] += epsilon / 2 - derives_at_i = (self.sdf_func(x + h) - self.sdf_func(x - h)) / epsilon - sdf_derives[:, i : i + 1] = derives_at_i - return sdf_derives - - def union(self, other: "Geometry") -> "Geometry": - """CSG Union. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The union of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0, 1) - >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) - >>> union = interval1.union(interval2) - >>> union.bbox - (array([[0.]]), array([[1.5]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0, 0), (2, 3)) - >>> rectangle2 = ppsci.geometry.Rectangle((0, 0), (3, 2)) - >>> union = rectangle1.union(rectangle2) - >>> union.bbox - (array([0., 0.], dtype=float32), array([3., 3.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> union = cuboid1 | cuboid2 - >>> union.bbox - (array([0., 0., 0.], dtype=float32), array([2., 2., 2.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGUnion(self, other) - - def __or__(self, other: "Geometry") -> "Geometry": - """CSG Union. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The union of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0, 1) - >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) - >>> union = interval1.__or__(interval2) - >>> union.bbox - (array([[0.]]), array([[1.5]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0, 0), (2, 3)) - >>> rectangle2 = ppsci.geometry.Rectangle((0, 0), (3, 2)) - >>> union = rectangle1.__or__(rectangle2) - >>> union.bbox - (array([0., 0.], dtype=float32), array([3., 3.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> union = cuboid1 | cuboid2 - >>> union.bbox - (array([0., 0., 0.], dtype=float32), array([2., 2., 2.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGUnion(self, other) - - def difference(self, other: "Geometry") -> "Geometry": - """CSG Difference. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The difference of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0.0, 2.0) - >>> interval2 = ppsci.geometry.Interval(1.0, 3.0) - >>> difference = interval1.difference(interval2) - >>> difference.bbox - (array([[0.]]), array([[2.]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) - >>> rectangle2 = ppsci.geometry.Rectangle((1.0, 1.0), (2.0, 2.0)) - >>> difference = rectangle1.difference(rectangle2) - >>> difference.bbox - (array([0., 0.], dtype=float32), array([2., 3.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> difference = cuboid1 - cuboid2 - >>> difference.bbox - (array([0., 0., 0.], dtype=float32), array([1., 2., 2.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGDifference(self, other) - - def __sub__(self, other: "Geometry") -> "Geometry": - """CSG Difference. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The difference of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0.0, 2.0) - >>> interval2 = ppsci.geometry.Interval(1.0, 3.0) - >>> difference = interval1.__sub__(interval2) - >>> difference.bbox - (array([[0.]]), array([[2.]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) - >>> rectangle2 = ppsci.geometry.Rectangle((1.0, 1.0), (2.0, 2.0)) - >>> difference = rectangle1.__sub__(rectangle2) - >>> difference.bbox - (array([0., 0.], dtype=float32), array([2., 3.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> difference = cuboid1 - cuboid2 - >>> difference.bbox - (array([0., 0., 0.], dtype=float32), array([1., 2., 2.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGDifference(self, other) - - def intersection(self, other: "Geometry") -> "Geometry": - """CSG Intersection. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The intersection of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0.0, 1.0) - >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) - >>> intersection = interval1.intersection(interval2) - >>> intersection.bbox - (array([[0.5]]), array([[1.]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) - >>> rectangle2 = ppsci.geometry.Rectangle((0.0, 0.0), (3.0, 2.0)) - >>> intersection = rectangle1.intersection(rectangle2) - >>> intersection.bbox - (array([0., 0.], dtype=float32), array([2., 2.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> intersection = cuboid1 & cuboid2 - >>> intersection.bbox - (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGIntersection(self, other) - - def __and__(self, other: "Geometry") -> "Geometry": - """CSG Intersection. - - Args: - other (Geometry): The other geometry. - - Returns: - Geometry: The intersection of two geometries. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> interval1 = ppsci.geometry.Interval(0.0, 1.0) - >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) - >>> intersection = interval1.__and__(interval2) - >>> intersection.bbox - (array([[0.5]]), array([[1.]])) - >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) - >>> rectangle2 = ppsci.geometry.Rectangle((0.0, 0.0), (3.0, 2.0)) - >>> intersection = rectangle1.__and__(rectangle2) - >>> intersection.bbox - (array([0., 0.], dtype=float32), array([2., 2.], dtype=float32)) - >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) - >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) - >>> intersection = cuboid1 & cuboid2 - >>> intersection.bbox - (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)) - """ - from ppsci.geometry import csg - - return csg.CSGIntersection(self, other) - - def __str__(self) -> str: - """Return the name of class. - - Returns: - str: Meta information of geometry. - - Examples: - >>> import ppsci - >>> interval = ppsci.geometry.Interval(0, 1) - >>> interval.__str__() - "Interval, ndim = 1, bbox = (array([[0]]), array([[1]])), diam = 1, dim_keys = ('x',)" - >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> rectangle.__str__() - "Rectangle, ndim = 2, bbox = (array([0., 0.], dtype=float32), array([1., 1.], dtype=float32)), diam = 1.4142135381698608, dim_keys = ('x', 'y')" - >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - >>> cuboid.__str__() - "Cuboid, ndim = 3, bbox = (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)), diam = 1.7320507764816284, dim_keys = ('x', 'y', 'z')" - """ - return ", ".join( - [ - self.__class__.__name__, - f"ndim = {self.ndim}", - f"bbox = {self.bbox}", - f"diam = {self.diam}", - f"dim_keys = {self.dim_keys}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" +from __future__ import annotations + +import abc +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle +from typing_extensions import Literal + +from ppsci.utils import logger +from ppsci.utils import misc + + +class Geometry: + """Base class for geometry. + + Args: + ndim (int): Number of geometry dimension. + bbox (Tuple[np.ndarray, np.ndarray]): Bounding box of upper and lower. + diam (float): Diameter of geometry. + """ + + def __init__(self, ndim: int, bbox: Tuple[np.ndarray, np.ndarray], diam: float): + self.ndim = ndim + self.bbox = bbox + self.diam = min(diam, np.linalg.norm(bbox[1] - bbox[0])) + + @property + def dim_keys(self): + return ("x", "y", "z")[: self.ndim] + + @abc.abstractmethod + def is_inside(self, x: np.ndarray) -> np.ndarray: + """Returns a boolean array where x is inside the geometry. + + Args: + x (np.ndarray): Points to check if inside the geometry. The shape is [N, D], + where D is the number of dimension of geometry. + + Returns: + np.ndarray: Boolean array where x is inside the geometry. The shape is [N]. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval = ppsci.geometry.Interval(0, 1) + >>> x = np.array([[0], [0.5], [1.5]]) + >>> interval.is_inside(x) + array([ True, True, False]) + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> x = np.array([[0.0, 0.0], [0.5, 0.5], [1.5, 1.5]]) + >>> rectangle.is_inside(x) + array([ True, True, False]) + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]]) + >>> cuboid.is_inside(x) + array([ True, True, False]) + """ + + @abc.abstractmethod + def on_boundary(self, x: np.ndarray) -> np.ndarray: + """Returns a boolean array where x is on geometry boundary. + + Args: + x (np.ndarray): Points to check if on the geometry boundary. The shape is [N, D], + where D is the number of dimension of geometry. + + Returns: + np.ndarray: Boolean array where x is on the geometry boundary. The shape is [N]. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval = ppsci.geometry.Interval(0, 1) + >>> x = np.array([[0], [0.5], [1.5]]) + >>> interval.on_boundary(x) + array([ True, False, False]) + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> x = np.array([[0, 0], [0.5, 0.5], [1, 1.5]]) + >>> rectangle.on_boundary(x) + array([ True, False, False]) + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1.5]]) + >>> cuboid.on_boundary(x) + array([ True, False, False]) + """ + + def boundary_normal(self, x): + """Compute the unit normal at x.""" + raise NotImplementedError(f"{self}.boundary_normal is not implemented") + + def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: + """Compute the equi-spaced points in the geometry. + + Warings: + This function is not implemented, please use random_points instead. + + Args: + n (int): Number of points. + boundary (bool): Include boundary points. Defaults to True. + + Returns: + np.ndarray: Random points in the geometry. The shape is [N, D]. + """ + logger.warning( + f"{self}.uniform_points not implemented. " f"Use random_points instead." + ) + return self.random_points(n) + + def sample_interior( + self, + n: int, + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + compute_sdf_derivatives: bool = False, + ) -> Dict[str, np.ndarray]: + """Sample random points in the geometry and return those meet criteria. + + Args: + n (int): Number of points. + random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". + pseudo: Pseudo random. + Halton: Halton sequence. + LHS: Latin Hypercube Sampling. + criteria (Optional[Callable[..., np.ndarray]]): Criteria function. Given + coords from differnet dimension and return a boolean array with shape [n,]. + Defaults to None. + evenly (bool): Evenly sample points. Defaults to False. + compute_sdf_derivatives (bool): Compute SDF derivatives. Defaults to False. + + Returns: + Dict[str, np.ndarray]: Random points in the geometry. The shape is [N, D]. + their signed distance function. The shape is [N, 1]. + their derivatives of SDF(optional). The shape is [N, D]. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> np.random.seed(42) + >>> interval = ppsci.geometry.Interval(0, 1) + >>> interval.sample_interior(2) + {'x': array([[0.37454012], + [0.9507143 ]], dtype=float32), 'sdf': array([[0.37454012], + [0.04928571]], dtype=float32)} + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> rectangle.sample_interior(2, "pseudo", None, False, True) + {'x': array([[0.7319939 ], + [0.15601864]], dtype=float32), 'y': array([[0.5986585 ], + [0.15599452]], dtype=float32), 'sdf': array([[0.2680061 ], + [0.15599453]], dtype=float32), 'sdf__x': array([[-1.0001659 ], + [ 0.25868416]], dtype=float32), 'sdf__y': array([[-0. ], + [ 0.74118376]], dtype=float32)} + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> cuboid.sample_interior(2, "pseudo", None, True, True) + {'x': array([[0.], + [0.]], dtype=float32), 'y': array([[0.], + [0.]], dtype=float32), 'z': array([[0.], + [1.]], dtype=float32), 'sdf': array([[0.], + [0.]], dtype=float32), 'sdf__x': array([[0.50008297], + [0.50008297]], dtype=float32), 'sdf__y': array([[0.50008297], + [0.50008297]], dtype=float32), 'sdf__z': array([[ 0.50008297], + [-0.49948692]], dtype=float32)} + """ + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size, _ntry, _nsuc = 0, 0, 0 + while _size < n: + if evenly: + points = self.uniform_points(n) + else: + if misc.typename(self) == "TimeXGeometry": + points = self.random_points(n, random, criteria) + else: + points = self.random_points(n, random) + + if criteria is not None: + criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() + points = points[criteria_mask] + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + + _size += len(points) + _ntry += 1 + if len(points) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample interior points failed, " + "please check correctness of geometry and given criteria." + ) + + # if sdf_func added, return x_dict and sdf_dict, else, only return the x_dict + if hasattr(self, "sdf_func"): + sdf = -self.sdf_func(x) + sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) + sdf_derives_dict = {} + if compute_sdf_derivatives: + sdf_derives = -self.sdf_derivatives(x) + sdf_derives_dict = misc.convert_to_dict( + sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) + ) + else: + sdf_dict = {} + sdf_derives_dict = {} + x_dict = misc.convert_to_dict(x, self.dim_keys) + + return {**x_dict, **sdf_dict, **sdf_derives_dict} + + def sample_boundary( + self, + n: int, + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + ) -> Dict[str, np.ndarray]: + """Compute the random points in the geometry and return those meet criteria. + + Args: + n (int): Number of points. + random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". + pseudo: Pseudo random. + Halton: Halton sequence. + LHS: Latin Hypercube Sampling. + criteria (Optional[Callable[..., np.ndarray]]): Criteria function. Given + coords from differnet dimension and return a boolean array with shape [n,]. + Defaults to None. + evenly (bool): Evenly sample points. Defaults to False. + + Returns: + Dict[str, np.ndarray]: Random points in the geometry. The shape is [N, D]. + their normal vectors. The shape is [N, D]. + their area. The shape is [N, 1].(only if the geometry is a mesh) + + Examples: + >>> import numpy as np + >>> import ppsci + >>> np.random.seed(42) + >>> interval = ppsci.geometry.Interval(0, 1) + >>> interval.sample_boundary(2) + {'x': array([[0.], + [1.]], dtype=float32), 'normal_x': array([[-1.], + [ 1.]], dtype=float32)} + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> rectangle.sample_boundary(2) + {'x': array([[1.], + [0.]], dtype=float32), 'y': array([[0.49816048], + [0.19714284]], dtype=float32), 'normal_x': array([[ 1.], + [-1.]], dtype=float32), 'normal_y': array([[0.], + [0.]], dtype=float32)} + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> cuboid.sample_boundary(2) + {'x': array([[0.83244264], + [0.18182497]], dtype=float32), 'y': array([[0.21233912], + [0.1834045 ]], dtype=float32), 'z': array([[0.], + [1.]], dtype=float32), 'normal_x': array([[0.], + [0.]], dtype=float32), 'normal_y': array([[0.], + [0.]], dtype=float32), 'normal_z': array([[-1.], + [ 1.]], dtype=float32)} + """ + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size, _ntry, _nsuc = 0, 0, 0 + while _size < n: + if evenly: + if ( + misc.typename(self) == "TimeXGeometry" + and misc.typename(self.geometry) == "Mesh" + ): + points, normal, area = self.uniform_boundary_points(n) + else: + points = self.uniform_boundary_points(n) + else: + if ( + misc.typename(self) == "TimeXGeometry" + and misc.typename(self.geometry) == "Mesh" + ): + points, normal, area = self.random_boundary_points(n, random) + else: + if misc.typename(self) == "TimeXGeometry": + points = self.random_boundary_points(n, random, criteria) + else: + points = self.random_boundary_points(n, random) + + if criteria is not None: + criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() + points = points[criteria_mask] + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + + _size += len(points) + _ntry += 1 + if len(points) > 0: + _nsuc += 1 + + if _ntry >= 10000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + if not ( + misc.typename(self) == "TimeXGeometry" + and misc.typename(self.geometry) == "Mesh" + ): + normal = self.boundary_normal(x) + + normal_dict = misc.convert_to_dict( + normal[:, 1:] if "t" in self.dim_keys else normal, + [f"normal_{key}" for key in self.dim_keys if key != "t"], + ) + x_dict = misc.convert_to_dict(x, self.dim_keys) + if ( + misc.typename(self) == "TimeXGeometry" + and misc.typename(self.geometry) == "Mesh" + ): + area_dict = misc.convert_to_dict(area[:, 1:], ["area"]) + return {**x_dict, **normal_dict, **area_dict} + + return {**x_dict, **normal_dict} + + @abc.abstractmethod + def random_points( + self, n: int, random: Literal["pseudo", "Halton", "LHS"] = "pseudo" + ) -> np.ndarray: + """Compute the random points in the geometry. + + Args: + n (int): Number of points. + random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". + pseudo: Pseudo random. + Halton: Halton sequence. + LHS: Latin Hypercube Sampling. + + Returns: + np.ndarray: Random points in the geometry. The shape is [N, D]. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> np.random.seed(42) + >>> interval = ppsci.geometry.Interval(0, 1) + >>> interval.random_points(2) + array([[0.37454012], + [0.9507143 ]], dtype=float32) + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> rectangle.random_points(2) + array([[0.7319939 , 0.5986585 ], + [0.15601864, 0.15599452]], dtype=float32) + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> cuboid.random_points(2) + array([[0.05808361, 0.8661761 , 0.601115 ], + [0.7080726 , 0.02058449, 0.96990985]], dtype=float32) + """ + + def uniform_boundary_points(self, n: int) -> np.ndarray: + """Compute the equi-spaced points on the boundary(not implemented). + + Warings: + This function is not implemented, please use random_boundary_points instead. + + Args: + n (int): Number of points. + + Returns: + np.ndarray: Random points on the boundary. The shape is [N, D]. + """ + logger.warning( + f"{self}.uniform_boundary_points not implemented. " + f"Use random_boundary_points instead." + ) + return self.random_boundary_points(n) + + @abc.abstractmethod + def random_boundary_points( + self, n: int, random: Literal["pseudo", "Halton", "LHS"] = "pseudo" + ) -> np.ndarray: + """Compute the random points on the boundary. + + Args: + n (int): Number of points. + random (Literal["pseudo", "Halton", "LHS"]): Random method. Defaults to "pseudo". + pseudo: Pseudo random. + Halton: Halton sequence. + LHS: Latin Hypercube Sampling. + + Returns: + np.ndarray: Random points on the boundary. The shape is [N, D]. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> np.random.seed(42) + >>> interval = ppsci.geometry.Interval(0, 1) + >>> interval.random_boundary_points(2) + array([[0.], + [1.]], dtype=float32) + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> rectangle.random_boundary_points(2) + array([[1. , 0.49816048], + [0. , 0.19714284]], dtype=float32) + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> cuboid.random_boundary_points(2) + array([[0.83244264, 0.21233912, 0. ], + [0.18182497, 0.1834045 , 1. ]], dtype=float32) + """ + + def periodic_point(self, x: np.ndarray, component: int): + """Compute the periodic image of x(not implemented). + + Warings: + This function is not implemented. + """ + raise NotImplementedError(f"{self}.periodic_point to be implemented") + + def sdf_derivatives(self, x: np.ndarray, epsilon: float = 1e-4) -> np.ndarray: + """Compute derivatives of SDF function. + + Args: + x (np.ndarray): Points for computing SDF derivatives using central + difference. The shape is [N, D], D is the number of dimension of + geometry. + epsilon (float): Derivative step. Defaults to 1e-4. + + Returns: + np.ndarray: Derivatives of corresponding SDF function. + The shape is [N, D]. D is the number of dimension of geometry. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval = ppsci.geometry.Interval(0, 1) + >>> x = np.array([[0], [0.5], [1.5]]) + >>> interval.sdf_derivatives(x) + array([[-1.], + [ 0.], + [ 1.]]) + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> x = np.array([[0.0, 0.0], [0.5, 0.5], [1.5, 1.5]]) + >>> rectangle.sdf_derivatives(x) + array([[-0.5 , -0.5 ], + [ 0. , 0. ], + [ 0.70710678, 0.70710678]]) + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> x = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1]]) + >>> cuboid.sdf_derivatives(x) + array([[-0.5, -0.5, -0.5], + [ 0. , 0. , 0. ], + [ 0.5, 0.5, 0.5]]) + """ + if not hasattr(self, "sdf_func"): + raise NotImplementedError( + f"{misc.typename(self)}.sdf_func should be implemented " + "when using 'sdf_derivatives'." + ) + # Only compute sdf derivatives for those already implement `sdf_func` method. + sdf_derives = np.empty_like(x) + for i in range(self.ndim): + h = np.zeros_like(x) + h[:, i] += epsilon / 2 + derives_at_i = (self.sdf_func(x + h) - self.sdf_func(x - h)) / epsilon + sdf_derives[:, i : i + 1] = derives_at_i + return sdf_derives + + def union(self, other: "Geometry") -> "Geometry": + """CSG Union. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The union of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0, 1) + >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) + >>> union = interval1.union(interval2) + >>> union.bbox + (array([[0.]]), array([[1.5]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0, 0), (2, 3)) + >>> rectangle2 = ppsci.geometry.Rectangle((0, 0), (3, 2)) + >>> union = rectangle1.union(rectangle2) + >>> union.bbox + (array([0., 0.], dtype=float32), array([3., 3.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> union = cuboid1 | cuboid2 + >>> union.bbox + (array([0., 0., 0.], dtype=float32), array([2., 2., 2.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGUnion(self, other) + + def __or__(self, other: "Geometry") -> "Geometry": + """CSG Union. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The union of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0, 1) + >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) + >>> union = interval1.__or__(interval2) + >>> union.bbox + (array([[0.]]), array([[1.5]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0, 0), (2, 3)) + >>> rectangle2 = ppsci.geometry.Rectangle((0, 0), (3, 2)) + >>> union = rectangle1.__or__(rectangle2) + >>> union.bbox + (array([0., 0.], dtype=float32), array([3., 3.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> union = cuboid1 | cuboid2 + >>> union.bbox + (array([0., 0., 0.], dtype=float32), array([2., 2., 2.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGUnion(self, other) + + def difference(self, other: "Geometry") -> "Geometry": + """CSG Difference. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The difference of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0.0, 2.0) + >>> interval2 = ppsci.geometry.Interval(1.0, 3.0) + >>> difference = interval1.difference(interval2) + >>> difference.bbox + (array([[0.]]), array([[2.]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) + >>> rectangle2 = ppsci.geometry.Rectangle((1.0, 1.0), (2.0, 2.0)) + >>> difference = rectangle1.difference(rectangle2) + >>> difference.bbox + (array([0., 0.], dtype=float32), array([2., 3.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> difference = cuboid1 - cuboid2 + >>> difference.bbox + (array([0., 0., 0.], dtype=float32), array([1., 2., 2.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGDifference(self, other) + + def __sub__(self, other: "Geometry") -> "Geometry": + """CSG Difference. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The difference of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0.0, 2.0) + >>> interval2 = ppsci.geometry.Interval(1.0, 3.0) + >>> difference = interval1.__sub__(interval2) + >>> difference.bbox + (array([[0.]]), array([[2.]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) + >>> rectangle2 = ppsci.geometry.Rectangle((1.0, 1.0), (2.0, 2.0)) + >>> difference = rectangle1.__sub__(rectangle2) + >>> difference.bbox + (array([0., 0.], dtype=float32), array([2., 3.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> difference = cuboid1 - cuboid2 + >>> difference.bbox + (array([0., 0., 0.], dtype=float32), array([1., 2., 2.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGDifference(self, other) + + def intersection(self, other: "Geometry") -> "Geometry": + """CSG Intersection. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The intersection of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0.0, 1.0) + >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) + >>> intersection = interval1.intersection(interval2) + >>> intersection.bbox + (array([[0.5]]), array([[1.]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) + >>> rectangle2 = ppsci.geometry.Rectangle((0.0, 0.0), (3.0, 2.0)) + >>> intersection = rectangle1.intersection(rectangle2) + >>> intersection.bbox + (array([0., 0.], dtype=float32), array([2., 2.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> intersection = cuboid1 & cuboid2 + >>> intersection.bbox + (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGIntersection(self, other) + + def __and__(self, other: "Geometry") -> "Geometry": + """CSG Intersection. + + Args: + other (Geometry): The other geometry. + + Returns: + Geometry: The intersection of two geometries. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> interval1 = ppsci.geometry.Interval(0.0, 1.0) + >>> interval2 = ppsci.geometry.Interval(0.5, 1.5) + >>> intersection = interval1.__and__(interval2) + >>> intersection.bbox + (array([[0.5]]), array([[1.]])) + >>> rectangle1 = ppsci.geometry.Rectangle((0.0, 0.0), (2.0, 3.0)) + >>> rectangle2 = ppsci.geometry.Rectangle((0.0, 0.0), (3.0, 2.0)) + >>> intersection = rectangle1.__and__(rectangle2) + >>> intersection.bbox + (array([0., 0.], dtype=float32), array([2., 2.], dtype=float32)) + >>> cuboid1 = ppsci.geometry.Cuboid((0, 0, 0), (1, 2, 2)) + >>> cuboid2 = ppsci.geometry.Cuboid((0, 0, 0), (2, 1, 1)) + >>> intersection = cuboid1 & cuboid2 + >>> intersection.bbox + (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)) + """ + from ppsci.geometry import csg + + return csg.CSGIntersection(self, other) + + def __str__(self) -> str: + """Return the name of class. + + Returns: + str: Meta information of geometry. + + Examples: + >>> import ppsci + >>> interval = ppsci.geometry.Interval(0, 1) + >>> interval.__str__() + "Interval, ndim = 1, bbox = (array([[0]]), array([[1]])), diam = 1, dim_keys = ('x',)" + >>> rectangle = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> rectangle.__str__() + "Rectangle, ndim = 2, bbox = (array([0., 0.], dtype=float32), array([1., 1.], dtype=float32)), diam = 1.4142135381698608, dim_keys = ('x', 'y')" + >>> cuboid = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + >>> cuboid.__str__() + "Cuboid, ndim = 3, bbox = (array([0., 0., 0.], dtype=float32), array([1., 1., 1.], dtype=float32)), diam = 1.7320507764816284, dim_keys = ('x', 'y', 'z')" + """ + return ", ".join( + [ + self.__class__.__name__, + f"ndim = {self.ndim}", + f"bbox = {self.bbox}", + f"diam = {self.diam}", + f"dim_keys = {self.dim_keys}", + ] + ) diff --git a/ppsci/geometry/geometry_1d.py b/ppsci/geometry/geometry_1d.py index d5de01fe56..f83418dd9b 100644 --- a/ppsci/geometry/geometry_1d.py +++ b/ppsci/geometry/geometry_1d.py @@ -1,119 +1,119 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -import numpy as np -import paddle - -from ppsci.geometry import geometry -from ppsci.geometry.sampler import sample -from ppsci.utils import misc - - -class Interval(geometry.Geometry): - """Class for interval. - - Args: - l (float): Left position of interval. - r (float): Right position of interval. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Interval(-1, 1) - """ - - def __init__(self, l: float, r: float): - super().__init__(1, (np.array([[l]]), np.array([[r]])), r - l) - self.l = l - self.r = r - - def is_inside(self, x: np.ndarray): - return ((self.l <= x) & (x <= self.r)).flatten() - - def on_boundary(self, x: np.ndarray): - return (np.isclose(x, self.l) | np.isclose(x, self.r)).flatten() - - def boundary_normal(self, x: np.ndarray): - return -np.isclose(x, self.l).astype(paddle.get_default_dtype()) + np.isclose( - x, self.r - ).astype(paddle.get_default_dtype()) - - def uniform_points(self, n: int, boundary: bool = True): - if boundary: - return np.linspace( - self.l, self.r, n, dtype=paddle.get_default_dtype() - ).reshape([-1, 1]) - return np.linspace( - self.l, self.r, n + 1, endpoint=False, dtype=paddle.get_default_dtype() - )[1:].reshape([-1, 1]) - - def random_points(self, n: int, random: str = "pseudo"): - x = sample(n, 1, random) - return (self.l + x * self.diam).astype(paddle.get_default_dtype()) - - def uniform_boundary_points(self, n: int): - if n == 1: - return np.array([[self.l]], dtype=paddle.get_default_dtype()) - xl = np.full([n // 2, 1], self.l, dtype=paddle.get_default_dtype()) - xr = np.full([n - n // 2, 1], self.r, dtype=paddle.get_default_dtype()) - return np.concatenate((xl, xr), axis=0) - - def random_boundary_points(self, n: int, random: str = "pseudo"): - if n == 2: - return np.array([[self.l], [self.r]], dtype=paddle.get_default_dtype()) - return ( - np.random.choice([self.l, self.r], n) - .reshape([-1, 1]) - .astype(paddle.get_default_dtype()) - ) - - def periodic_point(self, x: np.ndarray, component: int = 0): - x_array = misc.convert_to_array(x, self.dim_keys) - periodic_x = x_array - periodic_x[np.isclose(x_array, self.l)] = self.r - periodic_x[np.isclose(x_array, self.r)] = self.l - periodic_x_normal = self.boundary_normal(periodic_x) - - periodic_x = misc.convert_to_dict(periodic_x, self.dim_keys) - periodic_x_normal = misc.convert_to_dict( - periodic_x_normal, [f"normal_{k}" for k in self.dim_keys] - ) - return {**periodic_x, **periodic_x_normal} - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 1] - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - return -((self.r - self.l) / 2 - np.abs(points - (self.l + self.r) / 2)) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +import numpy as np +import paddle + +from ppsci.geometry import geometry +from ppsci.geometry.sampler import sample +from ppsci.utils import misc + + +class Interval(geometry.Geometry): + """Class for interval. + + Args: + l (float): Left position of interval. + r (float): Right position of interval. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Interval(-1, 1) + """ + + def __init__(self, l: float, r: float): + super().__init__(1, (np.array([[l]]), np.array([[r]])), r - l) + self.l = l + self.r = r + + def is_inside(self, x: np.ndarray): + return ((self.l <= x) & (x <= self.r)).flatten() + + def on_boundary(self, x: np.ndarray): + return (np.isclose(x, self.l) | np.isclose(x, self.r)).flatten() + + def boundary_normal(self, x: np.ndarray): + return -np.isclose(x, self.l).astype(paddle.get_default_dtype()) + np.isclose( + x, self.r + ).astype(paddle.get_default_dtype()) + + def uniform_points(self, n: int, boundary: bool = True): + if boundary: + return np.linspace( + self.l, self.r, n, dtype=paddle.get_default_dtype() + ).reshape([-1, 1]) + return np.linspace( + self.l, self.r, n + 1, endpoint=False, dtype=paddle.get_default_dtype() + )[1:].reshape([-1, 1]) + + def random_points(self, n: int, random: str = "pseudo"): + x = sample(n, 1, random) + return (self.l + x * self.diam).astype(paddle.get_default_dtype()) + + def uniform_boundary_points(self, n: int): + if n == 1: + return np.array([[self.l]], dtype=paddle.get_default_dtype()) + xl = np.full([n // 2, 1], self.l, dtype=paddle.get_default_dtype()) + xr = np.full([n - n // 2, 1], self.r, dtype=paddle.get_default_dtype()) + return np.concatenate((xl, xr), axis=0) + + def random_boundary_points(self, n: int, random: str = "pseudo"): + if n == 2: + return np.array([[self.l], [self.r]], dtype=paddle.get_default_dtype()) + return ( + np.random.choice([self.l, self.r], n) + .reshape([-1, 1]) + .astype(paddle.get_default_dtype()) + ) + + def periodic_point(self, x: np.ndarray, component: int = 0): + x_array = misc.convert_to_array(x, self.dim_keys) + periodic_x = x_array + periodic_x[np.isclose(x_array, self.l)] = self.r + periodic_x[np.isclose(x_array, self.r)] = self.l + periodic_x_normal = self.boundary_normal(periodic_x) + + periodic_x = misc.convert_to_dict(periodic_x, self.dim_keys) + periodic_x_normal = misc.convert_to_dict( + periodic_x_normal, [f"normal_{k}" for k in self.dim_keys] + ) + return {**periodic_x, **periodic_x_normal} + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 1] + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + return -((self.r - self.l) / 2 - np.abs(points - (self.l + self.r) / 2)) diff --git a/ppsci/geometry/geometry_2d.py b/ppsci/geometry/geometry_2d.py index 2df6293b27..d18981d98a 100644 --- a/ppsci/geometry/geometry_2d.py +++ b/ppsci/geometry/geometry_2d.py @@ -1,706 +1,706 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -from typing import Tuple - -import numpy as np -import paddle -from scipy import spatial - -from ppsci.geometry import geometry -from ppsci.geometry import geometry_nd -from ppsci.geometry import sampler - - -class Disk(geometry.Geometry): - """Class for disk geometry - - Args: - center (Tuple[float, float]): Center point of disk [x0, y0]. - radius (float): Radius of disk. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Disk((0.0, 0.0), 1.0) - """ - - def __init__(self, center: Tuple[float, float], radius: float): - self.center = np.array(center, dtype=paddle.get_default_dtype()) - self.radius = radius - super().__init__(2, (self.center - radius, self.center + radius), 2 * radius) - - def is_inside(self, x): - return np.linalg.norm(x - self.center, axis=1) <= self.radius - - def on_boundary(self, x): - return np.isclose(np.linalg.norm(x - self.center, axis=1), self.radius) - - def boundary_normal(self, x): - ox = x - self.center - ox_len = np.linalg.norm(ox, axis=1, keepdims=True) - ox = (ox / ox_len) * np.isclose(ox_len, self.radius).astype( - paddle.get_default_dtype() - ) - return ox - - def random_points(self, n, random="pseudo"): - # http://mathworld.wolfram.com/DiskPointPicking.html - rng = sampler.sample(n, 2, random) - r, theta = rng[:, 0], 2 * np.pi * rng[:, 1] - x = np.sqrt(r) * np.cos(theta) - y = np.sqrt(r) * np.sin(theta) - return self.radius * np.stack((x, y), axis=1) + self.center - - def uniform_boundary_points(self, n): - theta = np.linspace( - 0, 2 * np.pi, num=n, endpoint=False, dtype=paddle.get_default_dtype() - ) - X = np.stack((np.cos(theta), np.sin(theta)), axis=1) - return self.radius * X + self.center - - def random_boundary_points(self, n, random="pseudo"): - theta = 2 * np.pi * sampler.sample(n, 1, random) - X = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) - return self.radius * X + self.center - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 2] - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - sdf = self.radius - np.linalg.norm(points - self.center, axis=1) - sdf = -sdf[..., np.newaxis] - return sdf - - -class Rectangle(geometry_nd.Hypercube): - """Class for rectangle geometry - - Args: - xmin (Tuple[float, float]): Bottom left corner point, [x0, y0]. - xmax (Tuple[float, float]): Top right corner point, [x1, y1]. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0)) - """ - - def __init__(self, xmin, xmax): - super().__init__(xmin, xmax) - self.perimeter = 2 * np.sum(self.xmax - self.xmin) - self.area = np.prod(self.xmax - self.xmin) - - def uniform_boundary_points(self, n): - nx, ny = np.ceil(n / self.perimeter * (self.xmax - self.xmin)).astype(int) - bottom = np.hstack( - ( - np.linspace( - self.xmin[0], - self.xmax[0], - nx, - endpoint=False, - dtype=paddle.get_default_dtype(), - ).reshape([nx, 1]), - np.full([nx, 1], self.xmin[1], dtype=paddle.get_default_dtype()), - ) - ) - right = np.hstack( - ( - np.full([ny, 1], self.xmax[0], dtype=paddle.get_default_dtype()), - np.linspace( - self.xmin[1], - self.xmax[1], - ny, - endpoint=False, - dtype=paddle.get_default_dtype(), - ).reshape([ny, 1]), - ) - ) - top = np.hstack( - ( - np.linspace( - self.xmin[0], self.xmax[0], nx + 1, dtype=paddle.get_default_dtype() - )[1:].reshape([nx, 1]), - np.full([nx, 1], self.xmax[1], dtype=paddle.get_default_dtype()), - ) - ) - left = np.hstack( - ( - np.full([ny, 1], self.xmin[0], dtype=paddle.get_default_dtype()), - np.linspace( - self.xmin[1], self.xmax[1], ny + 1, dtype=paddle.get_default_dtype() - )[1:].reshape([ny, 1]), - ) - ) - x = np.vstack((bottom, right, top, left)) - if len(x) > n: - x = x[0:n] - return x - - def random_boundary_points(self, n, random="pseudo"): - l1 = self.xmax[0] - self.xmin[0] - l2 = l1 + self.xmax[1] - self.xmin[1] - l3 = l2 + l1 - u = np.ravel(sampler.sample(n + 10, 1, random)) - # Remove the possible points very close to the corners - u = u[~np.isclose(u, l1 / self.perimeter)] - u = u[~np.isclose(u, l3 / self.perimeter)] - u = u[0:n] - - u *= self.perimeter - x = [] - for l in u: - if l < l1: - x.append([self.xmin[0] + l, self.xmin[1]]) - elif l < l2: - x.append([self.xmax[0], self.xmin[1] + (l - l1)]) - elif l < l3: - x.append([self.xmax[0] - (l - l2), self.xmax[1]]) - else: - x.append([self.xmin[0], self.xmax[1] - (l - l3)]) - return np.vstack(x) - - @staticmethod - def is_valid(vertices): - """Check if the geometry is a Rectangle.""" - return ( - len(vertices) == 4 - and np.isclose(np.prod(vertices[1] - vertices[0]), 0) - and np.isclose(np.prod(vertices[2] - vertices[1]), 0) - and np.isclose(np.prod(vertices[3] - vertices[2]), 0) - and np.isclose(np.prod(vertices[0] - vertices[3]), 0) - ) - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape of the array is [N, 2]. - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - center = (self.xmin + self.xmax) / 2 - dist_to_boundary = ( - np.abs(points - center) - np.array([self.xmax - self.xmin]) / 2 - ) - return ( - np.linalg.norm(np.maximum(dist_to_boundary, 0), axis=1) - + np.minimum(np.max(dist_to_boundary, axis=1), 0) - ).reshape(-1, 1) - - -class Triangle(geometry.Geometry): - """Class for Triangle - - The order of vertices can be in a clockwise or counterclockwise direction. The - vertices will be re-ordered in counterclockwise (right hand rule). - - Args: - x1 (Tuple[float, float]): First point of Triangle [x0, y0]. - x2 (Tuple[float, float]): Second point of Triangle [x1, y1]. - x3 (Tuple[float, float]): Third point of Triangle [x2, y2]. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Triangle((0, 0), (1, 0), (0, 1)) - """ - - def __init__(self, x1, x2, x3): - self.area = polygon_signed_area([x1, x2, x3]) - # Clockwise - if self.area < 0: - self.area = -self.area - x2, x3 = x3, x2 - - self.x1 = np.array(x1, dtype=paddle.get_default_dtype()) - self.x2 = np.array(x2, dtype=paddle.get_default_dtype()) - self.x3 = np.array(x3, dtype=paddle.get_default_dtype()) - - self.v12 = self.x2 - self.x1 - self.v23 = self.x3 - self.x2 - self.v31 = self.x1 - self.x3 - self.l12 = np.linalg.norm(self.v12) - self.l23 = np.linalg.norm(self.v23) - self.l31 = np.linalg.norm(self.v31) - self.n12 = self.v12 / self.l12 - self.n23 = self.v23 / self.l23 - self.n31 = self.v31 / self.l31 - self.n12_normal = clockwise_rotation_90(self.n12) - self.n23_normal = clockwise_rotation_90(self.n23) - self.n31_normal = clockwise_rotation_90(self.n31) - self.perimeter = self.l12 + self.l23 + self.l31 - - super().__init__( - 2, - (np.minimum(x1, np.minimum(x2, x3)), np.maximum(x1, np.maximum(x2, x3))), - self.l12 - * self.l23 - * self.l31 - / ( - self.perimeter - * (self.l12 + self.l23 - self.l31) - * (self.l23 + self.l31 - self.l12) - * (self.l31 + self.l12 - self.l23) - ) - ** 0.5, - ) - - def is_inside(self, x): - # https://stackoverflow.com/a/2049593/12679294 - _sign = np.stack( - [ - np.cross(self.v12, x - self.x1), - np.cross(self.v23, x - self.x2), - np.cross(self.v31, x - self.x3), - ], - axis=1, - ) - return ~(np.any(_sign > 0, axis=-1) & np.any(_sign < 0, axis=-1)) - - def on_boundary(self, x): - l1 = np.linalg.norm(x - self.x1, axis=-1) - l2 = np.linalg.norm(x - self.x2, axis=-1) - l3 = np.linalg.norm(x - self.x3, axis=-1) - return np.any( - np.isclose( - [l1 + l2 - self.l12, l2 + l3 - self.l23, l3 + l1 - self.l31], - 0, - atol=1e-6, - ), - axis=0, - ) - - def boundary_normal(self, x): - l1 = np.linalg.norm(x - self.x1, axis=-1, keepdims=True) - l2 = np.linalg.norm(x - self.x2, axis=-1, keepdims=True) - l3 = np.linalg.norm(x - self.x3, axis=-1, keepdims=True) - on12 = np.isclose(l1 + l2, self.l12) - on23 = np.isclose(l2 + l3, self.l23) - on31 = np.isclose(l3 + l1, self.l31) - # Check points on the vertexes - if np.any(np.count_nonzero(np.hstack([on12, on23, on31]), axis=-1) > 1): - raise ValueError( - "{}.boundary_normal do not accept points on the vertexes.".format( - self.__class__.__name__ - ) - ) - return self.n12_normal * on12 + self.n23_normal * on23 + self.n31_normal * on31 - - def random_points(self, n, random="pseudo"): - # There are two methods for triangle point picking. - # Method 1 (used here): - # - https://math.stackexchange.com/questions/18686/uniform-random-point-in-triangle - # Method 2: - # - http://mathworld.wolfram.com/TrianglePointPicking.html - # - https://hbfs.wordpress.com/2010/10/05/random-points-in-a-triangle-generating-random-sequences-ii/ - # - https://stackoverflow.com/questions/19654251/random-point-inside-triangle-inside-java - sqrt_r1 = np.sqrt(np.random.rand(n, 1)) - r2 = np.random.rand(n, 1) - return ( - (1 - sqrt_r1) * self.x1 - + sqrt_r1 * (1 - r2) * self.x2 - + r2 * sqrt_r1 * self.x3 - ) - - def uniform_boundary_points(self, n): - density = n / self.perimeter - x12 = ( - np.linspace( - 0, - 1, - num=int(np.ceil(density * self.l12)), - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None] - * self.v12 - + self.x1 - ) - x23 = ( - np.linspace( - 0, - 1, - num=int(np.ceil(density * self.l23)), - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None] - * self.v23 - + self.x2 - ) - x31 = ( - np.linspace( - 0, - 1, - num=int(np.ceil(density * self.l31)), - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None] - * self.v31 - + self.x3 - ) - x = np.vstack((x12, x23, x31)) - if len(x) > n: - x = x[0:n] - return x - - def random_boundary_points(self, n, random="pseudo"): - u = np.ravel(sampler.sample(n + 2, 1, random)) - # Remove the possible points very close to the corners - u = u[np.logical_not(np.isclose(u, self.l12 / self.perimeter))] - u = u[np.logical_not(np.isclose(u, (self.l12 + self.l23) / self.perimeter))] - u = u[:n] - - u *= self.perimeter - x = [] - for l in u: - if l < self.l12: - x.append(l * self.n12 + self.x1) - elif l < self.l12 + self.l23: - x.append((l - self.l12) * self.n23 + self.x2) - else: - x.append((l - self.l12 - self.l23) * self.n31 + self.x3) - return np.vstack(x) - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape of the array is [N, 2]. - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - v1p = points - self.x1 # v1p: vector from x1 to points - v2p = points - self.x2 - v3p = points - self.x3 - # vv12_p: vertical vector of points to v12(If the vertical point is in the extension of v12, - # the vector will be the vector from x1 to points) - vv12_p = ( - self.v12 - * np.clip(np.dot(v1p, self.v12.reshape(2, -1)) / self.l12**2, 0, 1) - - v1p - ) - vv23_p = ( - self.v23 - * np.clip(np.dot(v2p, self.v23.reshape(2, -1)) / self.l23**2, 0, 1) - - v2p - ) - vv31_p = ( - self.v31 - * np.clip(np.dot(v3p, self.v31.reshape(2, -1)) / self.l31**2, 0, 1) - - v3p - ) - is_inside = self.is_inside(points).reshape(-1, 1) * 2 - 1 - len_vv12_p = np.linalg.norm(vv12_p, axis=1, keepdims=True) - len_vv23_p = np.linalg.norm(vv23_p, axis=1, keepdims=True) - len_vv31_p = np.linalg.norm(vv31_p, axis=1, keepdims=True) - mini_dist = np.minimum(np.minimum(len_vv12_p, len_vv23_p), len_vv31_p) - return is_inside * mini_dist - - -class Polygon(geometry.Geometry): - """Class for simple polygon. - - Args: - vertices (Tuple[Tuple[float, float], ...]): The order of vertices can be in a - clockwise or counter-clockwise direction. The vertices will be re-ordered in - counterclockwise (right hand rule). - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Polygon(((0, 0), (1, 0), (2, 1), (2, 2), (0, 2))) - """ - - def __init__(self, vertices): - self.vertices = np.array(vertices, dtype=paddle.get_default_dtype()) - if len(vertices) == 3: - raise ValueError("The polygon is a triangle. Use Triangle instead.") - if Rectangle.is_valid(self.vertices): - raise ValueError("The polygon is a rectangle. Use Rectangle instead.") - - self.area = polygon_signed_area(self.vertices) - # Clockwise - if self.area < 0: - self.area = -self.area - self.vertices = np.flipud(self.vertices) - - self.diagonals = spatial.distance.squareform( - spatial.distance.pdist(self.vertices) - ) - super().__init__( - 2, - (np.amin(self.vertices, axis=0), np.amax(self.vertices, axis=0)), - np.max(self.diagonals), - ) - self.nvertices = len(self.vertices) - self.perimeter = np.sum( - [self.diagonals[i, i + 1] for i in range(-1, self.nvertices - 1)] - ) - self.bbox = np.array( - [np.min(self.vertices, axis=0), np.max(self.vertices, axis=0)], - dtype=paddle.get_default_dtype(), - ) - - self.segments = self.vertices[1:] - self.vertices[:-1] - self.segments = np.vstack((self.vertices[0] - self.vertices[-1], self.segments)) - self.normal = clockwise_rotation_90(self.segments.T).T - self.normal = self.normal / np.linalg.norm(self.normal, axis=1).reshape(-1, 1) - - def is_inside(self, x): - def wn_PnPoly(P, V): - """Winding number algorithm. - - https://en.wikipedia.org/wiki/Point_in_polygon - http://geomalgorithms.com/a03-_inclusion.html - - Args: - P: A point. - V: Vertex points of a polygon. - - Returns: - wn: Winding number (=0 only if P is outside polygon). - """ - wn = np.zeros(len(P)) # Winding number counter - - # Repeat the first vertex at end - # Loop through all edges of the polygon - for i in range(-1, self.nvertices - 1): # Edge from V[i] to V[i+1] - tmp = np.all( - np.hstack( - [ - V[i, 1] <= P[:, 1:2], # Start y <= P[1] - V[i + 1, 1] > P[:, 1:2], # An upward crossing - is_left(V[i], V[i + 1], P) > 0, # P left of edge - ] - ), - axis=-1, - ) - wn[tmp] += 1 # Have a valid up intersect - tmp = np.all( - np.hstack( - [ - V[i, 1] > P[:, 1:2], # Start y > P[1] - V[i + 1, 1] <= P[:, 1:2], # A downward crossing - is_left(V[i], V[i + 1], P) < 0, # P right of edge - ] - ), - axis=-1, - ) - wn[tmp] -= 1 # Have a valid down intersect - return wn - - return wn_PnPoly(x, self.vertices) != 0 - - def on_boundary(self, x): - _on = np.zeros(shape=len(x), dtype=np.int) - for i in range(-1, self.nvertices - 1): - l1 = np.linalg.norm(self.vertices[i] - x, axis=-1) - l2 = np.linalg.norm(self.vertices[i + 1] - x, axis=-1) - _on[np.isclose(l1 + l2, self.diagonals[i, i + 1])] += 1 - return _on > 0 - - def random_points(self, n, random="pseudo"): - x = np.empty((0, 2), dtype=paddle.get_default_dtype()) - vbbox = self.bbox[1] - self.bbox[0] - while len(x) < n: - x_new = sampler.sample(n, 2, "pseudo") * vbbox + self.bbox[0] - x = np.vstack((x, x_new[self.is_inside(x_new)])) - return x[:n] - - def uniform_boundary_points(self, n): - density = n / self.perimeter - x = [] - for i in range(-1, self.nvertices - 1): - x.append( - np.linspace( - 0, - 1, - num=int(np.ceil(density * self.diagonals[i, i + 1])), - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None] - * (self.vertices[i + 1] - self.vertices[i]) - + self.vertices[i] - ) - x = np.vstack(x) - if len(x) > n: - x = x[0:n] - return x - - def random_boundary_points(self, n, random="pseudo"): - u = np.ravel(sampler.sample(n + self.nvertices, 1, random)) - # Remove the possible points very close to the corners - l = 0 - for i in range(0, self.nvertices - 1): - l += self.diagonals[i, i + 1] - u = u[np.logical_not(np.isclose(u, l / self.perimeter))] - u = u[:n] - u *= self.perimeter - u.sort() - - x = [] - i = -1 - l0 = 0 - l1 = l0 + self.diagonals[i, i + 1] - v = (self.vertices[i + 1] - self.vertices[i]) / self.diagonals[i, i + 1] - for l in u: - if l > l1: - i += 1 - l0, l1 = l1, l1 + self.diagonals[i, i + 1] - v = (self.vertices[i + 1] - self.vertices[i]) / self.diagonals[i, i + 1] - x.append((l - l0) * v + self.vertices[i]) - return np.vstack(x) - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 2] - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - sdf_value = np.empty((points.shape[0], 1), dtype=paddle.get_default_dtype()) - for n in range(points.shape[0]): - distance = np.dot( - points[n] - self.vertices[0], points[n] - self.vertices[0] - ) - inside_tag = 1.0 - for i in range(self.vertices.shape[0]): - j = (self.vertices.shape[0] - 1) if i == 0 else (i - 1) - # Calculate the shortest distance from point P to each edge. - vector_ij = self.vertices[j] - self.vertices[i] - vector_in = points[n] - self.vertices[i] - distance_vector = vector_in - vector_ij * np.clip( - np.dot(vector_in, vector_ij) / np.dot(vector_ij, vector_ij), - 0.0, - 1.0, - ) - distance = np.minimum( - distance, np.dot(distance_vector, distance_vector) - ) - # Calculate the inside and outside using the Odd-even rule - odd_even_rule_number = np.array( - [ - points[n][1] >= self.vertices[i][1], - points[n][1] < self.vertices[j][1], - vector_ij[0] * vector_in[1] > vector_ij[1] * vector_in[0], - ] - ) - if odd_even_rule_number.all() or np.all(~odd_even_rule_number): - inside_tag *= -1.0 - sdf_value[n] = inside_tag * np.sqrt(distance) - return -sdf_value - - -def polygon_signed_area(vertices): - """The (signed) area of a simple polygon. - - If the vertices are in the counterclockwise direction, then the area is positive; if - they are in the clockwise direction, the area is negative. - - Shoelace formula: https://en.wikipedia.org/wiki/Shoelace_formula - - Args: - vertices (np.ndarray): Polygon vertices with shape of [N, 2]. - - Returns: - float: The (signed) area of a simple polygon. - """ - x, y = zip(*vertices) - x = np.array(list(x) + [x[0]], dtype=paddle.get_default_dtype()) - y = np.array(list(y) + [y[0]], dtype=paddle.get_default_dtype()) - return 0.5 * (np.sum(x[:-1] * y[1:]) - np.sum(x[1:] * y[:-1])) - - -def clockwise_rotation_90(v): - """Rotate a vector of 90 degrees clockwise about the origin. - - Args: - v (np.ndarray): Vector with shape of [2, N]. - - Returns: - np.ndarray: Rotated vector with shape of [2, N]. - """ - return np.array([v[1], -v[0]], dtype=paddle.get_default_dtype()) - - -def is_left(P0, P1, P2): - """Test if a point is Left|On|Right of an infinite line. - - See: the January 2001 Algorithm "Area of 2D and 3D Triangles and Polygons". - - Args: - P0 (np.ndarray): One point in the line. - P1 (np.ndarray): One point in the line. - P2 (np.ndarray): A array of point to be tested with shape of [N, 2]. - - Returns: - np.ndarray: >0 if P2 left of the line through P0 and P1, =0 if P2 on the line, <0 if P2 - right of the line. - """ - return np.cross(P1 - P0, P2 - P0, axis=-1).reshape((-1, 1)) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +from typing import Tuple + +import numpy as np +import paddle +from scipy import spatial + +from ppsci.geometry import geometry +from ppsci.geometry import geometry_nd +from ppsci.geometry import sampler + + +class Disk(geometry.Geometry): + """Class for disk geometry + + Args: + center (Tuple[float, float]): Center point of disk [x0, y0]. + radius (float): Radius of disk. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Disk((0.0, 0.0), 1.0) + """ + + def __init__(self, center: Tuple[float, float], radius: float): + self.center = np.array(center, dtype=paddle.get_default_dtype()) + self.radius = radius + super().__init__(2, (self.center - radius, self.center + radius), 2 * radius) + + def is_inside(self, x): + return np.linalg.norm(x - self.center, axis=1) <= self.radius + + def on_boundary(self, x): + return np.isclose(np.linalg.norm(x - self.center, axis=1), self.radius) + + def boundary_normal(self, x): + ox = x - self.center + ox_len = np.linalg.norm(ox, axis=1, keepdims=True) + ox = (ox / ox_len) * np.isclose(ox_len, self.radius).astype( + paddle.get_default_dtype() + ) + return ox + + def random_points(self, n, random="pseudo"): + # http://mathworld.wolfram.com/DiskPointPicking.html + rng = sampler.sample(n, 2, random) + r, theta = rng[:, 0], 2 * np.pi * rng[:, 1] + x = np.sqrt(r) * np.cos(theta) + y = np.sqrt(r) * np.sin(theta) + return self.radius * np.stack((x, y), axis=1) + self.center + + def uniform_boundary_points(self, n): + theta = np.linspace( + 0, 2 * np.pi, num=n, endpoint=False, dtype=paddle.get_default_dtype() + ) + X = np.stack((np.cos(theta), np.sin(theta)), axis=1) + return self.radius * X + self.center + + def random_boundary_points(self, n, random="pseudo"): + theta = 2 * np.pi * sampler.sample(n, 1, random) + X = np.concatenate((np.cos(theta), np.sin(theta)), axis=1) + return self.radius * X + self.center + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 2] + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + sdf = self.radius - np.linalg.norm(points - self.center, axis=1) + sdf = -sdf[..., np.newaxis] + return sdf + + +class Rectangle(geometry_nd.Hypercube): + """Class for rectangle geometry + + Args: + xmin (Tuple[float, float]): Bottom left corner point, [x0, y0]. + xmax (Tuple[float, float]): Top right corner point, [x1, y1]. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Rectangle((0.0, 0.0), (1.0, 1.0)) + """ + + def __init__(self, xmin, xmax): + super().__init__(xmin, xmax) + self.perimeter = 2 * np.sum(self.xmax - self.xmin) + self.area = np.prod(self.xmax - self.xmin) + + def uniform_boundary_points(self, n): + nx, ny = np.ceil(n / self.perimeter * (self.xmax - self.xmin)).astype(int) + bottom = np.hstack( + ( + np.linspace( + self.xmin[0], + self.xmax[0], + nx, + endpoint=False, + dtype=paddle.get_default_dtype(), + ).reshape([nx, 1]), + np.full([nx, 1], self.xmin[1], dtype=paddle.get_default_dtype()), + ) + ) + right = np.hstack( + ( + np.full([ny, 1], self.xmax[0], dtype=paddle.get_default_dtype()), + np.linspace( + self.xmin[1], + self.xmax[1], + ny, + endpoint=False, + dtype=paddle.get_default_dtype(), + ).reshape([ny, 1]), + ) + ) + top = np.hstack( + ( + np.linspace( + self.xmin[0], self.xmax[0], nx + 1, dtype=paddle.get_default_dtype() + )[1:].reshape([nx, 1]), + np.full([nx, 1], self.xmax[1], dtype=paddle.get_default_dtype()), + ) + ) + left = np.hstack( + ( + np.full([ny, 1], self.xmin[0], dtype=paddle.get_default_dtype()), + np.linspace( + self.xmin[1], self.xmax[1], ny + 1, dtype=paddle.get_default_dtype() + )[1:].reshape([ny, 1]), + ) + ) + x = np.vstack((bottom, right, top, left)) + if len(x) > n: + x = x[0:n] + return x + + def random_boundary_points(self, n, random="pseudo"): + l1 = self.xmax[0] - self.xmin[0] + l2 = l1 + self.xmax[1] - self.xmin[1] + l3 = l2 + l1 + u = np.ravel(sampler.sample(n + 10, 1, random)) + # Remove the possible points very close to the corners + u = u[~np.isclose(u, l1 / self.perimeter)] + u = u[~np.isclose(u, l3 / self.perimeter)] + u = u[0:n] + + u *= self.perimeter + x = [] + for l in u: + if l < l1: + x.append([self.xmin[0] + l, self.xmin[1]]) + elif l < l2: + x.append([self.xmax[0], self.xmin[1] + (l - l1)]) + elif l < l3: + x.append([self.xmax[0] - (l - l2), self.xmax[1]]) + else: + x.append([self.xmin[0], self.xmax[1] - (l - l3)]) + return np.vstack(x) + + @staticmethod + def is_valid(vertices): + """Check if the geometry is a Rectangle.""" + return ( + len(vertices) == 4 + and np.isclose(np.prod(vertices[1] - vertices[0]), 0) + and np.isclose(np.prod(vertices[2] - vertices[1]), 0) + and np.isclose(np.prod(vertices[3] - vertices[2]), 0) + and np.isclose(np.prod(vertices[0] - vertices[3]), 0) + ) + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape of the array is [N, 2]. + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + center = (self.xmin + self.xmax) / 2 + dist_to_boundary = ( + np.abs(points - center) - np.array([self.xmax - self.xmin]) / 2 + ) + return ( + np.linalg.norm(np.maximum(dist_to_boundary, 0), axis=1) + + np.minimum(np.max(dist_to_boundary, axis=1), 0) + ).reshape(-1, 1) + + +class Triangle(geometry.Geometry): + """Class for Triangle + + The order of vertices can be in a clockwise or counterclockwise direction. The + vertices will be re-ordered in counterclockwise (right hand rule). + + Args: + x1 (Tuple[float, float]): First point of Triangle [x0, y0]. + x2 (Tuple[float, float]): Second point of Triangle [x1, y1]. + x3 (Tuple[float, float]): Third point of Triangle [x2, y2]. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Triangle((0, 0), (1, 0), (0, 1)) + """ + + def __init__(self, x1, x2, x3): + self.area = polygon_signed_area([x1, x2, x3]) + # Clockwise + if self.area < 0: + self.area = -self.area + x2, x3 = x3, x2 + + self.x1 = np.array(x1, dtype=paddle.get_default_dtype()) + self.x2 = np.array(x2, dtype=paddle.get_default_dtype()) + self.x3 = np.array(x3, dtype=paddle.get_default_dtype()) + + self.v12 = self.x2 - self.x1 + self.v23 = self.x3 - self.x2 + self.v31 = self.x1 - self.x3 + self.l12 = np.linalg.norm(self.v12) + self.l23 = np.linalg.norm(self.v23) + self.l31 = np.linalg.norm(self.v31) + self.n12 = self.v12 / self.l12 + self.n23 = self.v23 / self.l23 + self.n31 = self.v31 / self.l31 + self.n12_normal = clockwise_rotation_90(self.n12) + self.n23_normal = clockwise_rotation_90(self.n23) + self.n31_normal = clockwise_rotation_90(self.n31) + self.perimeter = self.l12 + self.l23 + self.l31 + + super().__init__( + 2, + (np.minimum(x1, np.minimum(x2, x3)), np.maximum(x1, np.maximum(x2, x3))), + self.l12 + * self.l23 + * self.l31 + / ( + self.perimeter + * (self.l12 + self.l23 - self.l31) + * (self.l23 + self.l31 - self.l12) + * (self.l31 + self.l12 - self.l23) + ) + ** 0.5, + ) + + def is_inside(self, x): + # https://stackoverflow.com/a/2049593/12679294 + _sign = np.stack( + [ + np.cross(self.v12, x - self.x1), + np.cross(self.v23, x - self.x2), + np.cross(self.v31, x - self.x3), + ], + axis=1, + ) + return ~(np.any(_sign > 0, axis=-1) & np.any(_sign < 0, axis=-1)) + + def on_boundary(self, x): + l1 = np.linalg.norm(x - self.x1, axis=-1) + l2 = np.linalg.norm(x - self.x2, axis=-1) + l3 = np.linalg.norm(x - self.x3, axis=-1) + return np.any( + np.isclose( + [l1 + l2 - self.l12, l2 + l3 - self.l23, l3 + l1 - self.l31], + 0, + atol=1e-6, + ), + axis=0, + ) + + def boundary_normal(self, x): + l1 = np.linalg.norm(x - self.x1, axis=-1, keepdims=True) + l2 = np.linalg.norm(x - self.x2, axis=-1, keepdims=True) + l3 = np.linalg.norm(x - self.x3, axis=-1, keepdims=True) + on12 = np.isclose(l1 + l2, self.l12) + on23 = np.isclose(l2 + l3, self.l23) + on31 = np.isclose(l3 + l1, self.l31) + # Check points on the vertexes + if np.any(np.count_nonzero(np.hstack([on12, on23, on31]), axis=-1) > 1): + raise ValueError( + "{}.boundary_normal do not accept points on the vertexes.".format( + self.__class__.__name__ + ) + ) + return self.n12_normal * on12 + self.n23_normal * on23 + self.n31_normal * on31 + + def random_points(self, n, random="pseudo"): + # There are two methods for triangle point picking. + # Method 1 (used here): + # - https://math.stackexchange.com/questions/18686/uniform-random-point-in-triangle + # Method 2: + # - http://mathworld.wolfram.com/TrianglePointPicking.html + # - https://hbfs.wordpress.com/2010/10/05/random-points-in-a-triangle-generating-random-sequences-ii/ + # - https://stackoverflow.com/questions/19654251/random-point-inside-triangle-inside-java + sqrt_r1 = np.sqrt(np.random.rand(n, 1)) + r2 = np.random.rand(n, 1) + return ( + (1 - sqrt_r1) * self.x1 + + sqrt_r1 * (1 - r2) * self.x2 + + r2 * sqrt_r1 * self.x3 + ) + + def uniform_boundary_points(self, n): + density = n / self.perimeter + x12 = ( + np.linspace( + 0, + 1, + num=int(np.ceil(density * self.l12)), + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None] + * self.v12 + + self.x1 + ) + x23 = ( + np.linspace( + 0, + 1, + num=int(np.ceil(density * self.l23)), + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None] + * self.v23 + + self.x2 + ) + x31 = ( + np.linspace( + 0, + 1, + num=int(np.ceil(density * self.l31)), + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None] + * self.v31 + + self.x3 + ) + x = np.vstack((x12, x23, x31)) + if len(x) > n: + x = x[0:n] + return x + + def random_boundary_points(self, n, random="pseudo"): + u = np.ravel(sampler.sample(n + 2, 1, random)) + # Remove the possible points very close to the corners + u = u[np.logical_not(np.isclose(u, self.l12 / self.perimeter))] + u = u[np.logical_not(np.isclose(u, (self.l12 + self.l23) / self.perimeter))] + u = u[:n] + + u *= self.perimeter + x = [] + for l in u: + if l < self.l12: + x.append(l * self.n12 + self.x1) + elif l < self.l12 + self.l23: + x.append((l - self.l12) * self.n23 + self.x2) + else: + x.append((l - self.l12 - self.l23) * self.n31 + self.x3) + return np.vstack(x) + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape of the array is [N, 2]. + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + v1p = points - self.x1 # v1p: vector from x1 to points + v2p = points - self.x2 + v3p = points - self.x3 + # vv12_p: vertical vector of points to v12(If the vertical point is in the extension of v12, + # the vector will be the vector from x1 to points) + vv12_p = ( + self.v12 + * np.clip(np.dot(v1p, self.v12.reshape(2, -1)) / self.l12**2, 0, 1) + - v1p + ) + vv23_p = ( + self.v23 + * np.clip(np.dot(v2p, self.v23.reshape(2, -1)) / self.l23**2, 0, 1) + - v2p + ) + vv31_p = ( + self.v31 + * np.clip(np.dot(v3p, self.v31.reshape(2, -1)) / self.l31**2, 0, 1) + - v3p + ) + is_inside = self.is_inside(points).reshape(-1, 1) * 2 - 1 + len_vv12_p = np.linalg.norm(vv12_p, axis=1, keepdims=True) + len_vv23_p = np.linalg.norm(vv23_p, axis=1, keepdims=True) + len_vv31_p = np.linalg.norm(vv31_p, axis=1, keepdims=True) + mini_dist = np.minimum(np.minimum(len_vv12_p, len_vv23_p), len_vv31_p) + return is_inside * mini_dist + + +class Polygon(geometry.Geometry): + """Class for simple polygon. + + Args: + vertices (Tuple[Tuple[float, float], ...]): The order of vertices can be in a + clockwise or counter-clockwise direction. The vertices will be re-ordered in + counterclockwise (right hand rule). + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Polygon(((0, 0), (1, 0), (2, 1), (2, 2), (0, 2))) + """ + + def __init__(self, vertices): + self.vertices = np.array(vertices, dtype=paddle.get_default_dtype()) + if len(vertices) == 3: + raise ValueError("The polygon is a triangle. Use Triangle instead.") + if Rectangle.is_valid(self.vertices): + raise ValueError("The polygon is a rectangle. Use Rectangle instead.") + + self.area = polygon_signed_area(self.vertices) + # Clockwise + if self.area < 0: + self.area = -self.area + self.vertices = np.flipud(self.vertices) + + self.diagonals = spatial.distance.squareform( + spatial.distance.pdist(self.vertices) + ) + super().__init__( + 2, + (np.amin(self.vertices, axis=0), np.amax(self.vertices, axis=0)), + np.max(self.diagonals), + ) + self.nvertices = len(self.vertices) + self.perimeter = np.sum( + [self.diagonals[i, i + 1] for i in range(-1, self.nvertices - 1)] + ) + self.bbox = np.array( + [np.min(self.vertices, axis=0), np.max(self.vertices, axis=0)], + dtype=paddle.get_default_dtype(), + ) + + self.segments = self.vertices[1:] - self.vertices[:-1] + self.segments = np.vstack((self.vertices[0] - self.vertices[-1], self.segments)) + self.normal = clockwise_rotation_90(self.segments.T).T + self.normal = self.normal / np.linalg.norm(self.normal, axis=1).reshape(-1, 1) + + def is_inside(self, x): + def wn_PnPoly(P, V): + """Winding number algorithm. + + https://en.wikipedia.org/wiki/Point_in_polygon + http://geomalgorithms.com/a03-_inclusion.html + + Args: + P: A point. + V: Vertex points of a polygon. + + Returns: + wn: Winding number (=0 only if P is outside polygon). + """ + wn = np.zeros(len(P)) # Winding number counter + + # Repeat the first vertex at end + # Loop through all edges of the polygon + for i in range(-1, self.nvertices - 1): # Edge from V[i] to V[i+1] + tmp = np.all( + np.hstack( + [ + V[i, 1] <= P[:, 1:2], # Start y <= P[1] + V[i + 1, 1] > P[:, 1:2], # An upward crossing + is_left(V[i], V[i + 1], P) > 0, # P left of edge + ] + ), + axis=-1, + ) + wn[tmp] += 1 # Have a valid up intersect + tmp = np.all( + np.hstack( + [ + V[i, 1] > P[:, 1:2], # Start y > P[1] + V[i + 1, 1] <= P[:, 1:2], # A downward crossing + is_left(V[i], V[i + 1], P) < 0, # P right of edge + ] + ), + axis=-1, + ) + wn[tmp] -= 1 # Have a valid down intersect + return wn + + return wn_PnPoly(x, self.vertices) != 0 + + def on_boundary(self, x): + _on = np.zeros(shape=len(x), dtype=np.int) + for i in range(-1, self.nvertices - 1): + l1 = np.linalg.norm(self.vertices[i] - x, axis=-1) + l2 = np.linalg.norm(self.vertices[i + 1] - x, axis=-1) + _on[np.isclose(l1 + l2, self.diagonals[i, i + 1])] += 1 + return _on > 0 + + def random_points(self, n, random="pseudo"): + x = np.empty((0, 2), dtype=paddle.get_default_dtype()) + vbbox = self.bbox[1] - self.bbox[0] + while len(x) < n: + x_new = sampler.sample(n, 2, "pseudo") * vbbox + self.bbox[0] + x = np.vstack((x, x_new[self.is_inside(x_new)])) + return x[:n] + + def uniform_boundary_points(self, n): + density = n / self.perimeter + x = [] + for i in range(-1, self.nvertices - 1): + x.append( + np.linspace( + 0, + 1, + num=int(np.ceil(density * self.diagonals[i, i + 1])), + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None] + * (self.vertices[i + 1] - self.vertices[i]) + + self.vertices[i] + ) + x = np.vstack(x) + if len(x) > n: + x = x[0:n] + return x + + def random_boundary_points(self, n, random="pseudo"): + u = np.ravel(sampler.sample(n + self.nvertices, 1, random)) + # Remove the possible points very close to the corners + l = 0 + for i in range(0, self.nvertices - 1): + l += self.diagonals[i, i + 1] + u = u[np.logical_not(np.isclose(u, l / self.perimeter))] + u = u[:n] + u *= self.perimeter + u.sort() + + x = [] + i = -1 + l0 = 0 + l1 = l0 + self.diagonals[i, i + 1] + v = (self.vertices[i + 1] - self.vertices[i]) / self.diagonals[i, i + 1] + for l in u: + if l > l1: + i += 1 + l0, l1 = l1, l1 + self.diagonals[i, i + 1] + v = (self.vertices[i + 1] - self.vertices[i]) / self.diagonals[i, i + 1] + x.append((l - l0) * v + self.vertices[i]) + return np.vstack(x) + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 2] + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + sdf_value = np.empty((points.shape[0], 1), dtype=paddle.get_default_dtype()) + for n in range(points.shape[0]): + distance = np.dot( + points[n] - self.vertices[0], points[n] - self.vertices[0] + ) + inside_tag = 1.0 + for i in range(self.vertices.shape[0]): + j = (self.vertices.shape[0] - 1) if i == 0 else (i - 1) + # Calculate the shortest distance from point P to each edge. + vector_ij = self.vertices[j] - self.vertices[i] + vector_in = points[n] - self.vertices[i] + distance_vector = vector_in - vector_ij * np.clip( + np.dot(vector_in, vector_ij) / np.dot(vector_ij, vector_ij), + 0.0, + 1.0, + ) + distance = np.minimum( + distance, np.dot(distance_vector, distance_vector) + ) + # Calculate the inside and outside using the Odd-even rule + odd_even_rule_number = np.array( + [ + points[n][1] >= self.vertices[i][1], + points[n][1] < self.vertices[j][1], + vector_ij[0] * vector_in[1] > vector_ij[1] * vector_in[0], + ] + ) + if odd_even_rule_number.all() or np.all(~odd_even_rule_number): + inside_tag *= -1.0 + sdf_value[n] = inside_tag * np.sqrt(distance) + return -sdf_value + + +def polygon_signed_area(vertices): + """The (signed) area of a simple polygon. + + If the vertices are in the counterclockwise direction, then the area is positive; if + they are in the clockwise direction, the area is negative. + + Shoelace formula: https://en.wikipedia.org/wiki/Shoelace_formula + + Args: + vertices (np.ndarray): Polygon vertices with shape of [N, 2]. + + Returns: + float: The (signed) area of a simple polygon. + """ + x, y = zip(*vertices) + x = np.array(list(x) + [x[0]], dtype=paddle.get_default_dtype()) + y = np.array(list(y) + [y[0]], dtype=paddle.get_default_dtype()) + return 0.5 * (np.sum(x[:-1] * y[1:]) - np.sum(x[1:] * y[:-1])) + + +def clockwise_rotation_90(v): + """Rotate a vector of 90 degrees clockwise about the origin. + + Args: + v (np.ndarray): Vector with shape of [2, N]. + + Returns: + np.ndarray: Rotated vector with shape of [2, N]. + """ + return np.array([v[1], -v[0]], dtype=paddle.get_default_dtype()) + + +def is_left(P0, P1, P2): + """Test if a point is Left|On|Right of an infinite line. + + See: the January 2001 Algorithm "Area of 2D and 3D Triangles and Polygons". + + Args: + P0 (np.ndarray): One point in the line. + P1 (np.ndarray): One point in the line. + P2 (np.ndarray): A array of point to be tested with shape of [N, 2]. + + Returns: + np.ndarray: >0 if P2 left of the line through P0 and P1, =0 if P2 on the line, <0 if P2 + right of the line. + """ + return np.cross(P1 - P0, P2 - P0, axis=-1).reshape((-1, 1)) diff --git a/ppsci/geometry/geometry_3d.py b/ppsci/geometry/geometry_3d.py index 8af958b1b9..3bbf6a12c5 100644 --- a/ppsci/geometry/geometry_3d.py +++ b/ppsci/geometry/geometry_3d.py @@ -1,203 +1,203 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -import itertools -from typing import Tuple - -import numpy as np -import paddle - -from ppsci.geometry import geometry_2d -from ppsci.geometry import geometry_nd - - -class Cuboid(geometry_nd.Hypercube): - """Class for Cuboid - - Args: - xmin (Tuple[float, float, float]): Bottom left corner point [x0, y0, z0]. - xmax (Tuple[float, float, float]): Top right corner point [x1, y1, z1]. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) - """ - - def __init__( - self, xmin: Tuple[float, float, float], xmax: Tuple[float, float, float] - ): - super().__init__(xmin, xmax) - dx = self.xmax - self.xmin - self.area = 2 * np.sum(dx * np.roll(dx, 2)) - - def random_boundary_points(self, n, random="pseudo"): - pts = [] - density = n / self.area - rect = geometry_2d.Rectangle(self.xmin[:-1], self.xmax[:-1]) - for z in [self.xmin[-1], self.xmax[-1]]: - u = rect.random_points(int(np.ceil(density * rect.area)), random=random) - pts.append( - np.hstack( - (u, np.full((len(u), 1), z, dtype=paddle.get_default_dtype())) - ) - ) - rect = geometry_2d.Rectangle(self.xmin[::2], self.xmax[::2]) - for y in [self.xmin[1], self.xmax[1]]: - u = rect.random_points(int(np.ceil(density * rect.area)), random=random) - pts.append( - np.hstack( - ( - u[:, 0:1], - np.full((len(u), 1), y, dtype=paddle.get_default_dtype()), - u[:, 1:], - ) - ) - ) - rect = geometry_2d.Rectangle(self.xmin[1:], self.xmax[1:]) - for x in [self.xmin[0], self.xmax[0]]: - u = rect.random_points(int(np.ceil(density * rect.area)), random=random) - pts.append( - np.hstack( - (np.full((len(u), 1), x, dtype=paddle.get_default_dtype()), u) - ) - ) - pts = np.vstack(pts) - if len(pts) > n: - return pts[np.random.choice(len(pts), size=n, replace=False)] - return pts - - def uniform_boundary_points(self, n): - h = (self.area / n) ** 0.5 - nx, ny, nz = np.ceil((self.xmax - self.xmin) / h).astype(int) + 1 - x = np.linspace( - self.xmin[0], self.xmax[0], num=nx, dtype=paddle.get_default_dtype() - ) - y = np.linspace( - self.xmin[1], self.xmax[1], num=ny, dtype=paddle.get_default_dtype() - ) - z = np.linspace( - self.xmin[2], self.xmax[2], num=nz, dtype=paddle.get_default_dtype() - ) - - pts = [] - for v in [self.xmin[-1], self.xmax[-1]]: - u = list(itertools.product(x, y)) - pts.append( - np.hstack( - (u, np.full((len(u), 1), v, dtype=paddle.get_default_dtype())) - ) - ) - if nz > 2: - for v in [self.xmin[1], self.xmax[1]]: - u = np.array( - list(itertools.product(x, z[1:-1])), - dtype=paddle.get_default_dtype(), - ) - pts.append( - np.hstack( - ( - u[:, 0:1], - np.full((len(u), 1), v, dtype=paddle.get_default_dtype()), - u[:, 1:], - ) - ) - ) - if ny > 2 and nz > 2: - for v in [self.xmin[0], self.xmax[0]]: - u = list(itertools.product(y[1:-1], z[1:-1])) - pts.append( - np.hstack( - (np.full((len(u), 1), v, dtype=paddle.get_default_dtype()), u) - ) - ) - pts = np.vstack(pts) - if len(pts) > n: - return pts[np.random.choice(len(pts), size=n, replace=False)] - return pts - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 3] - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - sdf = ( - ((self.xmax - self.xmin) / 2 - abs(points - (self.xmin + self.xmax) / 2)) - ).min(axis=1) - sdf = -sdf[..., np.newaxis] - return sdf - - -class Sphere(geometry_nd.Hypersphere): - """Class for Sphere - - Args: - center (Tuple[float, float, float]): Center of the sphere [x0, y0, z0]. - radius (float): Radius of the sphere. - """ - - def __init__(self, center, radius): - super().__init__(center, radius) - - def uniform_boundary_points(self, n: int): - nl = np.arange(1, n + 1).astype(paddle.get_default_dtype()) - g = (np.sqrt(5) - 1) / 2 - z = (2 * nl - 1) / n - 1 - x = np.sqrt(1 - z**2) * np.cos(2 * np.pi * nl * g) - y = np.sqrt(1 - z**2) * np.sin(2 * np.pi * nl * g) - return np.stack((x, y, z), axis=-1) - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 3] - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if points.shape[1] != self.ndim: - raise ValueError( - f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" - ) - sdf = self.radius - (((points - self.center) ** 2).sum(axis=1)) ** 0.5 - sdf = -sdf[..., np.newaxis] - return sdf +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +import itertools +from typing import Tuple + +import numpy as np +import paddle + +from ppsci.geometry import geometry_2d +from ppsci.geometry import geometry_nd + + +class Cuboid(geometry_nd.Hypercube): + """Class for Cuboid + + Args: + xmin (Tuple[float, float, float]): Bottom left corner point [x0, y0, z0]. + xmax (Tuple[float, float, float]): Top right corner point [x1, y1, z1]. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Cuboid((0, 0, 0), (1, 1, 1)) + """ + + def __init__( + self, xmin: Tuple[float, float, float], xmax: Tuple[float, float, float] + ): + super().__init__(xmin, xmax) + dx = self.xmax - self.xmin + self.area = 2 * np.sum(dx * np.roll(dx, 2)) + + def random_boundary_points(self, n, random="pseudo"): + pts = [] + density = n / self.area + rect = geometry_2d.Rectangle(self.xmin[:-1], self.xmax[:-1]) + for z in [self.xmin[-1], self.xmax[-1]]: + u = rect.random_points(int(np.ceil(density * rect.area)), random=random) + pts.append( + np.hstack( + (u, np.full((len(u), 1), z, dtype=paddle.get_default_dtype())) + ) + ) + rect = geometry_2d.Rectangle(self.xmin[::2], self.xmax[::2]) + for y in [self.xmin[1], self.xmax[1]]: + u = rect.random_points(int(np.ceil(density * rect.area)), random=random) + pts.append( + np.hstack( + ( + u[:, 0:1], + np.full((len(u), 1), y, dtype=paddle.get_default_dtype()), + u[:, 1:], + ) + ) + ) + rect = geometry_2d.Rectangle(self.xmin[1:], self.xmax[1:]) + for x in [self.xmin[0], self.xmax[0]]: + u = rect.random_points(int(np.ceil(density * rect.area)), random=random) + pts.append( + np.hstack( + (np.full((len(u), 1), x, dtype=paddle.get_default_dtype()), u) + ) + ) + pts = np.vstack(pts) + if len(pts) > n: + return pts[np.random.choice(len(pts), size=n, replace=False)] + return pts + + def uniform_boundary_points(self, n): + h = (self.area / n) ** 0.5 + nx, ny, nz = np.ceil((self.xmax - self.xmin) / h).astype(int) + 1 + x = np.linspace( + self.xmin[0], self.xmax[0], num=nx, dtype=paddle.get_default_dtype() + ) + y = np.linspace( + self.xmin[1], self.xmax[1], num=ny, dtype=paddle.get_default_dtype() + ) + z = np.linspace( + self.xmin[2], self.xmax[2], num=nz, dtype=paddle.get_default_dtype() + ) + + pts = [] + for v in [self.xmin[-1], self.xmax[-1]]: + u = list(itertools.product(x, y)) + pts.append( + np.hstack( + (u, np.full((len(u), 1), v, dtype=paddle.get_default_dtype())) + ) + ) + if nz > 2: + for v in [self.xmin[1], self.xmax[1]]: + u = np.array( + list(itertools.product(x, z[1:-1])), + dtype=paddle.get_default_dtype(), + ) + pts.append( + np.hstack( + ( + u[:, 0:1], + np.full((len(u), 1), v, dtype=paddle.get_default_dtype()), + u[:, 1:], + ) + ) + ) + if ny > 2 and nz > 2: + for v in [self.xmin[0], self.xmax[0]]: + u = list(itertools.product(y[1:-1], z[1:-1])) + pts.append( + np.hstack( + (np.full((len(u), 1), v, dtype=paddle.get_default_dtype()), u) + ) + ) + pts = np.vstack(pts) + if len(pts) > n: + return pts[np.random.choice(len(pts), size=n, replace=False)] + return pts + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 3] + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + sdf = ( + ((self.xmax - self.xmin) / 2 - abs(points - (self.xmin + self.xmax) / 2)) + ).min(axis=1) + sdf = -sdf[..., np.newaxis] + return sdf + + +class Sphere(geometry_nd.Hypersphere): + """Class for Sphere + + Args: + center (Tuple[float, float, float]): Center of the sphere [x0, y0, z0]. + radius (float): Radius of the sphere. + """ + + def __init__(self, center, radius): + super().__init__(center, radius) + + def uniform_boundary_points(self, n: int): + nl = np.arange(1, n + 1).astype(paddle.get_default_dtype()) + g = (np.sqrt(5) - 1) / 2 + z = (2 * nl - 1) / n - 1 + x = np.sqrt(1 - z**2) * np.cos(2 * np.pi * nl * g) + y = np.sqrt(1 - z**2) * np.sin(2 * np.pi * nl * g) + return np.stack((x, y, z), axis=-1) + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 3] + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if points.shape[1] != self.ndim: + raise ValueError( + f"Shape of given points should be [*, {self.ndim}], but got {points.shape}" + ) + sdf = self.radius - (((points - self.center) ** 2).sum(axis=1)) ** 0.5 + sdf = -sdf[..., np.newaxis] + return sdf diff --git a/ppsci/geometry/geometry_nd.py b/ppsci/geometry/geometry_nd.py index 84a8a3edbc..8b99c2d0e9 100644 --- a/ppsci/geometry/geometry_nd.py +++ b/ppsci/geometry/geometry_nd.py @@ -1,196 +1,196 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" -from __future__ import annotations - -import itertools -from typing import Tuple - -import numpy as np -import paddle -from scipy import stats -from sklearn import preprocessing - -from ppsci.geometry import geometry -from ppsci.geometry import sampler -from ppsci.utils import misc - - -class Hypercube(geometry.Geometry): - """Multi-dimensional hyper cube. - - Args: - xmin (Tuple[float, ...]): Lower corner point. - xmax (Tuple[float, ...]): Upper corner point. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Hypercube((0, 0, 0, 0), (1, 1, 1, 1)) - """ - - def __init__(self, xmin: Tuple[float, ...], xmax: Tuple[float, ...]): - if len(xmin) != len(xmax): - raise ValueError("Dimensions of xmin and xmax do not match.") - - self.xmin = np.array(xmin, dtype=paddle.get_default_dtype()) - self.xmax = np.array(xmax, dtype=paddle.get_default_dtype()) - if np.any(self.xmin >= self.xmax): - raise ValueError("xmin >= xmax") - - self.side_length = self.xmax - self.xmin - super().__init__( - len(xmin), (self.xmin, self.xmax), np.linalg.norm(self.side_length) - ) - self.volume = np.prod(self.side_length, dtype=paddle.get_default_dtype()) - - def is_inside(self, x): - return np.logical_and( - np.all(x >= self.xmin, axis=-1), np.all(x <= self.xmax, axis=-1) - ) - - def on_boundary(self, x): - _on_boundary = np.logical_or( - np.any(np.isclose(x, self.xmin), axis=-1), - np.any(np.isclose(x, self.xmax), axis=-1), - ) - return np.logical_and(self.is_inside(x), _on_boundary) - - def boundary_normal(self, x): - _n = -np.isclose(x, self.xmin).astype(paddle.get_default_dtype()) + np.isclose( - x, self.xmax - ) - # For vertices, the normal is averaged for all directions - idx = np.count_nonzero(_n, axis=-1) > 1 - if np.any(idx): - l = np.linalg.norm(_n[idx], axis=-1, keepdims=True) - _n[idx] /= l - return _n - - def uniform_points(self, n, boundary=True): - dx = (self.volume / n) ** (1 / self.ndim) - xi = [] - for i in range(self.ndim): - ni = int(np.ceil(self.side_length[i] / dx)) - if boundary: - xi.append( - np.linspace( - self.xmin[i], - self.xmax[i], - num=ni, - dtype=paddle.get_default_dtype(), - ) - ) - else: - xi.append( - np.linspace( - self.xmin[i], - self.xmax[i], - num=ni + 1, - endpoint=False, - dtype=paddle.get_default_dtype(), - )[1:] - ) - x = np.array(list(itertools.product(*xi)), dtype=paddle.get_default_dtype()) - if len(x) > n: - x = x[0:n] - return x - - def random_points(self, n, random="pseudo"): - x = sampler.sample(n, self.ndim, random) - # print(f"Hypercube's range: {self.__class__.__name__}", self.xmin, self.xmax) - return (self.xmax - self.xmin) * x + self.xmin - - def random_boundary_points(self, n, random="pseudo"): - x = sampler.sample(n, self.ndim, random) - # Randomly pick a dimension - rand_dim = np.random.randint(self.ndim, size=n) - # Replace value of the randomly picked dimension with the nearest boundary value (0 or 1) - x[np.arange(n), rand_dim] = np.round(x[np.arange(n), rand_dim]) - return (self.xmax - self.xmin) * x + self.xmin - - def periodic_point(self, x, component): - y = misc.convert_to_array(x, self.dim_keys) - _on_xmin = np.isclose(y[:, component], self.xmin[component]) - _on_xmax = np.isclose(y[:, component], self.xmax[component]) - y[:, component][_on_xmin] = self.xmax[component] - y[:, component][_on_xmax] = self.xmin[component] - y_normal = self.boundary_normal(y) - - y = misc.convert_to_dict(y, self.dim_keys) - y_normal = misc.convert_to_dict( - y_normal, [f"normal_{k}" for k in self.dim_keys] - ) - return {**y, **y_normal} - - -class Hypersphere(geometry.Geometry): - """Multi-dimensional hyper sphere. - - Args: - center (Tuple[float, ...]): Center point coordinate. - radius (Tuple[float, ...]): Radius along each dimension. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Hypersphere((0, 0, 0, 0), 1.0) - """ - - def __init__(self, center, radius): - self.center = np.array(center, dtype=paddle.get_default_dtype()) - self.radius = radius - super().__init__( - len(center), (self.center - radius, self.center + radius), 2 * radius - ) - - self._r2 = radius**2 - - def is_inside(self, x): - return np.linalg.norm(x - self.center, axis=-1) <= self.radius - - def on_boundary(self, x): - return np.isclose(np.linalg.norm(x - self.center, axis=-1), self.radius) - - def boundary_normal(self, x): - _n = x - self.center - l = np.linalg.norm(_n, axis=-1, keepdims=True) - _n = _n / l * np.isclose(l, self.radius) - return _n - - def random_points(self, n, random="pseudo"): - # https://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability - if random == "pseudo": - U = np.random.rand(n, 1).astype(paddle.get_default_dtype()) - X = np.random.normal(size=(n, self.ndim)).astype(paddle.get_default_dtype()) - else: - rng = sampler.sample(n, self.ndim + 1, random) - U, X = rng[:, 0:1], rng[:, 1:] # Error if X = [0, 0, ...] - X = stats.norm.ppf(X).astype(paddle.get_default_dtype()) - X = preprocessing.normalize(X) - X = U ** (1 / self.ndim) * X - return self.radius * X + self.center - - def random_boundary_points(self, n, random="pseudo"): - # http://mathworld.wolfram.com/HyperspherePointPicking.html - if random == "pseudo": - X = np.random.normal(size=(n, self.ndim)).astype(paddle.get_default_dtype()) - else: - U = sampler.sample( - n, self.ndim, random - ) # Error for [0, 0, ...] or [0.5, 0.5, ...] - X = stats.norm.ppf(U).astype(paddle.get_default_dtype()) - X = preprocessing.normalize(X) - return self.radius * X + self.center +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" +from __future__ import annotations + +import itertools +from typing import Tuple + +import numpy as np +import paddle +from scipy import stats +from sklearn import preprocessing + +from ppsci.geometry import geometry +from ppsci.geometry import sampler +from ppsci.utils import misc + + +class Hypercube(geometry.Geometry): + """Multi-dimensional hyper cube. + + Args: + xmin (Tuple[float, ...]): Lower corner point. + xmax (Tuple[float, ...]): Upper corner point. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Hypercube((0, 0, 0, 0), (1, 1, 1, 1)) + """ + + def __init__(self, xmin: Tuple[float, ...], xmax: Tuple[float, ...]): + if len(xmin) != len(xmax): + raise ValueError("Dimensions of xmin and xmax do not match.") + + self.xmin = np.array(xmin, dtype=paddle.get_default_dtype()) + self.xmax = np.array(xmax, dtype=paddle.get_default_dtype()) + if np.any(self.xmin >= self.xmax): + raise ValueError("xmin >= xmax") + + self.side_length = self.xmax - self.xmin + super().__init__( + len(xmin), (self.xmin, self.xmax), np.linalg.norm(self.side_length) + ) + self.volume = np.prod(self.side_length, dtype=paddle.get_default_dtype()) + + def is_inside(self, x): + return np.logical_and( + np.all(x >= self.xmin, axis=-1), np.all(x <= self.xmax, axis=-1) + ) + + def on_boundary(self, x): + _on_boundary = np.logical_or( + np.any(np.isclose(x, self.xmin), axis=-1), + np.any(np.isclose(x, self.xmax), axis=-1), + ) + return np.logical_and(self.is_inside(x), _on_boundary) + + def boundary_normal(self, x): + _n = -np.isclose(x, self.xmin).astype(paddle.get_default_dtype()) + np.isclose( + x, self.xmax + ) + # For vertices, the normal is averaged for all directions + idx = np.count_nonzero(_n, axis=-1) > 1 + if np.any(idx): + l = np.linalg.norm(_n[idx], axis=-1, keepdims=True) + _n[idx] /= l + return _n + + def uniform_points(self, n, boundary=True): + dx = (self.volume / n) ** (1 / self.ndim) + xi = [] + for i in range(self.ndim): + ni = int(np.ceil(self.side_length[i] / dx)) + if boundary: + xi.append( + np.linspace( + self.xmin[i], + self.xmax[i], + num=ni, + dtype=paddle.get_default_dtype(), + ) + ) + else: + xi.append( + np.linspace( + self.xmin[i], + self.xmax[i], + num=ni + 1, + endpoint=False, + dtype=paddle.get_default_dtype(), + )[1:] + ) + x = np.array(list(itertools.product(*xi)), dtype=paddle.get_default_dtype()) + if len(x) > n: + x = x[0:n] + return x + + def random_points(self, n, random="pseudo"): + x = sampler.sample(n, self.ndim, random) + # print(f"Hypercube's range: {self.__class__.__name__}", self.xmin, self.xmax) + return (self.xmax - self.xmin) * x + self.xmin + + def random_boundary_points(self, n, random="pseudo"): + x = sampler.sample(n, self.ndim, random) + # Randomly pick a dimension + rand_dim = np.random.randint(self.ndim, size=n) + # Replace value of the randomly picked dimension with the nearest boundary value (0 or 1) + x[np.arange(n), rand_dim] = np.round(x[np.arange(n), rand_dim]) + return (self.xmax - self.xmin) * x + self.xmin + + def periodic_point(self, x, component): + y = misc.convert_to_array(x, self.dim_keys) + _on_xmin = np.isclose(y[:, component], self.xmin[component]) + _on_xmax = np.isclose(y[:, component], self.xmax[component]) + y[:, component][_on_xmin] = self.xmax[component] + y[:, component][_on_xmax] = self.xmin[component] + y_normal = self.boundary_normal(y) + + y = misc.convert_to_dict(y, self.dim_keys) + y_normal = misc.convert_to_dict( + y_normal, [f"normal_{k}" for k in self.dim_keys] + ) + return {**y, **y_normal} + + +class Hypersphere(geometry.Geometry): + """Multi-dimensional hyper sphere. + + Args: + center (Tuple[float, ...]): Center point coordinate. + radius (Tuple[float, ...]): Radius along each dimension. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Hypersphere((0, 0, 0, 0), 1.0) + """ + + def __init__(self, center, radius): + self.center = np.array(center, dtype=paddle.get_default_dtype()) + self.radius = radius + super().__init__( + len(center), (self.center - radius, self.center + radius), 2 * radius + ) + + self._r2 = radius**2 + + def is_inside(self, x): + return np.linalg.norm(x - self.center, axis=-1) <= self.radius + + def on_boundary(self, x): + return np.isclose(np.linalg.norm(x - self.center, axis=-1), self.radius) + + def boundary_normal(self, x): + _n = x - self.center + l = np.linalg.norm(_n, axis=-1, keepdims=True) + _n = _n / l * np.isclose(l, self.radius) + return _n + + def random_points(self, n, random="pseudo"): + # https://math.stackexchange.com/questions/87230/picking-random-points-in-the-volume-of-sphere-with-uniform-probability + if random == "pseudo": + U = np.random.rand(n, 1).astype(paddle.get_default_dtype()) + X = np.random.normal(size=(n, self.ndim)).astype(paddle.get_default_dtype()) + else: + rng = sampler.sample(n, self.ndim + 1, random) + U, X = rng[:, 0:1], rng[:, 1:] # Error if X = [0, 0, ...] + X = stats.norm.ppf(X).astype(paddle.get_default_dtype()) + X = preprocessing.normalize(X) + X = U ** (1 / self.ndim) * X + return self.radius * X + self.center + + def random_boundary_points(self, n, random="pseudo"): + # http://mathworld.wolfram.com/HyperspherePointPicking.html + if random == "pseudo": + X = np.random.normal(size=(n, self.ndim)).astype(paddle.get_default_dtype()) + else: + U = sampler.sample( + n, self.ndim, random + ) # Error for [0, 0, ...] or [0.5, 0.5, ...] + X = stats.norm.ppf(U).astype(paddle.get_default_dtype()) + X = preprocessing.normalize(X) + return self.radius * X + self.center diff --git a/ppsci/geometry/inflation.py b/ppsci/geometry/inflation.py index 198a7cd223..56f1572bb4 100644 --- a/ppsci/geometry/inflation.py +++ b/ppsci/geometry/inflation.py @@ -1,192 +1,192 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import numpy as np -import paddle - -from ppsci.utils import checker - -if not checker.dynamic_import_to_globals(["pymesh", "open3d"]): - raise ModuleNotFoundError - -__all__ = [ - "pymesh_inflation", -] - - -def open3d_inflation( - mesh: open3d.geometry.TriangleMesh, distance: float, direction: int = 1 -) -> open3d.geometry.TriangleMesh: - """Inflate mesh geometry. - - Args: - mesh (open3d.geometry.TriangleMesh): Open3D mesh object. - distance (float): Distance along exterior normal to inflate. - direction (int): 1 for exterior normal, -1 for interior normal. Defaults to 1. - - Returns: - open3d.geometry.TriangleMesh: Inflated mesh. - """ - mesh.remove_duplicated_vertices() - mesh.remove_degenerate_triangles() - mesh.remove_duplicated_triangles() - mesh.remove_unreferenced_vertices() - triangles = np.asarray(mesh.triangles) - points = np.asarray(mesh.vertices) - - remove_ids = [] - for i, point in enumerate(points): - boolean_index = np.argwhere(triangles == i)[:, 0] - if len(boolean_index) < 3: - remove_ids.append(i) - mesh.remove_vertices_by_index(remove_ids) - - points = np.asarray(mesh.vertices, dtype=paddle.get_default_dtype()) - mesh.compute_triangle_normals() - normals = np.asarray(mesh.triangle_normals, dtype=paddle.get_default_dtype()) - mesh.orient_triangles() - triangles = np.asarray(mesh.triangles, dtype=paddle.get_default_dtype()) - new_points = [] - for i, point in enumerate(points): - boolean_index = np.argwhere(triangles == i)[:, 0] - normal = normals[boolean_index] * direction - d = np.ones(len(normal), dtype=paddle.get_default_dtype()) * distance - - new_point = np.linalg.lstsq(normal, d, rcond=None)[0].squeeze() - new_point = point + new_point - if np.linalg.norm(new_point - point) > distance * 2: - # TODO : Find a better way to solve the bad inflation - new_point = point + distance * normal.mean(axis=0) - - new_points.append(new_point) - - new_points = np.array(new_points, dtype=paddle.get_default_dtype()) - new_mesh = open3d.geometry.TriangleMesh( - open3d.utility.Vector3dVector(new_points), - open3d.utility.Vector3iVector(triangles), - ) - - new_mesh.remove_duplicated_vertices() - new_mesh.remove_degenerate_triangles() - new_mesh.remove_duplicated_triangles() - new_mesh.remove_unreferenced_vertices() - new_mesh.compute_triangle_normals() - return new_mesh - - -def pymesh_inflation(mesh: pymesh.Mesh, distance: float) -> pymesh.Mesh: - """Inflate mesh by distance. - - Args: - mesh (pymesh.Mesh): PyMesh object. - distance (float): Inflation distance. - - Returns: - pymesh.Mesh: Inflated mesh. - """ - vertices = np.array(mesh.vertices, dtype=paddle.get_default_dtype()) - faces = np.array(mesh.faces) - open3d_mesh = open3d.geometry.TriangleMesh( - open3d.utility.Vector3dVector(vertices), open3d.utility.Vector3iVector(faces) - ) - inflated_open3d_mesh = open3d_inflation( - open3d_mesh, abs(distance), 1.0 if distance >= 0.0 else -1.0 - ) - vertices = np.array(inflated_open3d_mesh.vertices, dtype=paddle.get_default_dtype()) - faces = np.array(inflated_open3d_mesh.triangles) - inflated_pymesh = pymesh.form_mesh(vertices, faces) - return inflated_pymesh - - -def offset(mesh, distance) -> open3d.geometry.TriangleMesh: - """Offset the 2D mesh - - Args: - mesh (open3d.geometry.TriangleMesh): The mesh to be offset. - distance (float): The distance to offset. - - Returns: - open3d.geometry.TriangleMesh: Result mesh. - """ - # check if the mesh is 2D - mesh.compute_triangle_normals() - normals = np.asarray(mesh.triangle_normals, dtype=paddle.get_default_dtype()) - if not np.allclose(normals[:, :-1], 0): - raise ValueError("The mesh is not 2D") - - mesh.remove_duplicated_vertices() - mesh.remove_degenerate_triangles() - mesh.remove_duplicated_triangles() - mesh.remove_unreferenced_vertices() - triangles = np.asarray(mesh.triangles, dtype=paddle.get_default_dtype()) - - edges = np.vstack( - [triangles[:, [0, 1]], triangles[:, [1, 2]], triangles[:, [2, 0]]] - ) - edges = set(map(tuple, edges)) - edges = np.array(list(edges)) - - vertices = np.asarray(mesh.vertices, dtype=paddle.get_default_dtype())[:, :-1] - edges_in_triangle = np.array( - [ - np.intersect1d( - np.argwhere(triangles == edge[0])[:, 0], - np.argwhere(triangles == edge[1])[:, 0], - ) - for edge in edges - ], - dtype=object, - ) - surface_edges = edges[[len(i) == 1 for i in edges_in_triangle]] - edges_in_triangle = [i for i in edges_in_triangle if len(i) == 1] - - edges_normals = [] - for edge, triangle in zip(surface_edges, edges_in_triangle): - triangle = triangles[triangle].squeeze() - other_point = vertices[np.setdiff1d(triangle, edge)].squeeze() - edge = vertices[edge] - u = (other_point[0] - edge[0][0]) * (edge[0][0] - edge[1][0]) + ( - other_point[1] - edge[0][1] - ) * (edge[0][1] - edge[1][1]) - u = u / np.sum((edge[0] - edge[1]) ** 2) - edge_normal = edge[0] + u * (edge[0] - edge[1]) - edge_normal = edge_normal - other_point - edges_normals.append(edge_normal) - - edges_normals = np.array(edges_normals, dtype=paddle.get_default_dtype()) - edges_normals = edges_normals / np.linalg.norm(edges_normals, axis=1)[:, None] - - new_mesh = open3d.geometry.TriangleMesh() - new_vertices = [] - for point in set(surface_edges.reshape(-1)): - index = np.argwhere(surface_edges == point)[:, 0] - normal = edges_normals[index] - d = np.ones(len(index), dtype=paddle.get_default_dtype()) * distance - new_point = np.linalg.lstsq(normal, d, rcond=None)[0] - new_point = vertices[point] + new_point - new_vertices.append(new_point) - - new_vertices = np.hstack( - ( - np.array(new_vertices, dtype=paddle.get_default_dtype()), - np.zeros((len(new_vertices), 1), dtype=paddle.get_default_dtype()), - ) - ) - new_mesh.vertices = open3d.utility.Vector3dVector(new_vertices) - new_mesh.triangles = open3d.utility.Vector3iVector(triangles) - new_mesh.compute_triangle_normals() - new_mesh.compute_vertex_normals() - return new_mesh +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import numpy as np +import paddle + +from ppsci.utils import checker + +if not checker.dynamic_import_to_globals(["pymesh", "open3d"]): + raise ModuleNotFoundError + +__all__ = [ + "pymesh_inflation", +] + + +def open3d_inflation( + mesh: open3d.geometry.TriangleMesh, distance: float, direction: int = 1 +) -> open3d.geometry.TriangleMesh: + """Inflate mesh geometry. + + Args: + mesh (open3d.geometry.TriangleMesh): Open3D mesh object. + distance (float): Distance along exterior normal to inflate. + direction (int): 1 for exterior normal, -1 for interior normal. Defaults to 1. + + Returns: + open3d.geometry.TriangleMesh: Inflated mesh. + """ + mesh.remove_duplicated_vertices() + mesh.remove_degenerate_triangles() + mesh.remove_duplicated_triangles() + mesh.remove_unreferenced_vertices() + triangles = np.asarray(mesh.triangles) + points = np.asarray(mesh.vertices) + + remove_ids = [] + for i, point in enumerate(points): + boolean_index = np.argwhere(triangles == i)[:, 0] + if len(boolean_index) < 3: + remove_ids.append(i) + mesh.remove_vertices_by_index(remove_ids) + + points = np.asarray(mesh.vertices, dtype=paddle.get_default_dtype()) + mesh.compute_triangle_normals() + normals = np.asarray(mesh.triangle_normals, dtype=paddle.get_default_dtype()) + mesh.orient_triangles() + triangles = np.asarray(mesh.triangles, dtype=paddle.get_default_dtype()) + new_points = [] + for i, point in enumerate(points): + boolean_index = np.argwhere(triangles == i)[:, 0] + normal = normals[boolean_index] * direction + d = np.ones(len(normal), dtype=paddle.get_default_dtype()) * distance + + new_point = np.linalg.lstsq(normal, d, rcond=None)[0].squeeze() + new_point = point + new_point + if np.linalg.norm(new_point - point) > distance * 2: + # TODO : Find a better way to solve the bad inflation + new_point = point + distance * normal.mean(axis=0) + + new_points.append(new_point) + + new_points = np.array(new_points, dtype=paddle.get_default_dtype()) + new_mesh = open3d.geometry.TriangleMesh( + open3d.utility.Vector3dVector(new_points), + open3d.utility.Vector3iVector(triangles), + ) + + new_mesh.remove_duplicated_vertices() + new_mesh.remove_degenerate_triangles() + new_mesh.remove_duplicated_triangles() + new_mesh.remove_unreferenced_vertices() + new_mesh.compute_triangle_normals() + return new_mesh + + +def pymesh_inflation(mesh: pymesh.Mesh, distance: float) -> pymesh.Mesh: + """Inflate mesh by distance. + + Args: + mesh (pymesh.Mesh): PyMesh object. + distance (float): Inflation distance. + + Returns: + pymesh.Mesh: Inflated mesh. + """ + vertices = np.array(mesh.vertices, dtype=paddle.get_default_dtype()) + faces = np.array(mesh.faces) + open3d_mesh = open3d.geometry.TriangleMesh( + open3d.utility.Vector3dVector(vertices), open3d.utility.Vector3iVector(faces) + ) + inflated_open3d_mesh = open3d_inflation( + open3d_mesh, abs(distance), 1.0 if distance >= 0.0 else -1.0 + ) + vertices = np.array(inflated_open3d_mesh.vertices, dtype=paddle.get_default_dtype()) + faces = np.array(inflated_open3d_mesh.triangles) + inflated_pymesh = pymesh.form_mesh(vertices, faces) + return inflated_pymesh + + +def offset(mesh, distance) -> open3d.geometry.TriangleMesh: + """Offset the 2D mesh + + Args: + mesh (open3d.geometry.TriangleMesh): The mesh to be offset. + distance (float): The distance to offset. + + Returns: + open3d.geometry.TriangleMesh: Result mesh. + """ + # check if the mesh is 2D + mesh.compute_triangle_normals() + normals = np.asarray(mesh.triangle_normals, dtype=paddle.get_default_dtype()) + if not np.allclose(normals[:, :-1], 0): + raise ValueError("The mesh is not 2D") + + mesh.remove_duplicated_vertices() + mesh.remove_degenerate_triangles() + mesh.remove_duplicated_triangles() + mesh.remove_unreferenced_vertices() + triangles = np.asarray(mesh.triangles, dtype=paddle.get_default_dtype()) + + edges = np.vstack( + [triangles[:, [0, 1]], triangles[:, [1, 2]], triangles[:, [2, 0]]] + ) + edges = set(map(tuple, edges)) + edges = np.array(list(edges)) + + vertices = np.asarray(mesh.vertices, dtype=paddle.get_default_dtype())[:, :-1] + edges_in_triangle = np.array( + [ + np.intersect1d( + np.argwhere(triangles == edge[0])[:, 0], + np.argwhere(triangles == edge[1])[:, 0], + ) + for edge in edges + ], + dtype=object, + ) + surface_edges = edges[[len(i) == 1 for i in edges_in_triangle]] + edges_in_triangle = [i for i in edges_in_triangle if len(i) == 1] + + edges_normals = [] + for edge, triangle in zip(surface_edges, edges_in_triangle): + triangle = triangles[triangle].squeeze() + other_point = vertices[np.setdiff1d(triangle, edge)].squeeze() + edge = vertices[edge] + u = (other_point[0] - edge[0][0]) * (edge[0][0] - edge[1][0]) + ( + other_point[1] - edge[0][1] + ) * (edge[0][1] - edge[1][1]) + u = u / np.sum((edge[0] - edge[1]) ** 2) + edge_normal = edge[0] + u * (edge[0] - edge[1]) + edge_normal = edge_normal - other_point + edges_normals.append(edge_normal) + + edges_normals = np.array(edges_normals, dtype=paddle.get_default_dtype()) + edges_normals = edges_normals / np.linalg.norm(edges_normals, axis=1)[:, None] + + new_mesh = open3d.geometry.TriangleMesh() + new_vertices = [] + for point in set(surface_edges.reshape(-1)): + index = np.argwhere(surface_edges == point)[:, 0] + normal = edges_normals[index] + d = np.ones(len(index), dtype=paddle.get_default_dtype()) * distance + new_point = np.linalg.lstsq(normal, d, rcond=None)[0] + new_point = vertices[point] + new_point + new_vertices.append(new_point) + + new_vertices = np.hstack( + ( + np.array(new_vertices, dtype=paddle.get_default_dtype()), + np.zeros((len(new_vertices), 1), dtype=paddle.get_default_dtype()), + ) + ) + new_mesh.vertices = open3d.utility.Vector3dVector(new_vertices) + new_mesh.triangles = open3d.utility.Vector3iVector(triangles) + new_mesh.compute_triangle_normals() + new_mesh.compute_vertex_normals() + return new_mesh diff --git a/ppsci/geometry/mesh.py b/ppsci/geometry/mesh.py index 8a363ca36b..e991c68f1b 100644 --- a/ppsci/geometry/mesh.py +++ b/ppsci/geometry/mesh.py @@ -1,1392 +1,1392 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle - -try: - from stl import mesh as np_mesh_module -except ModuleNotFoundError: - pass -except ImportError: - pass - -from typing_extensions import Literal - -from ppsci.geometry import geometry -from ppsci.geometry import geometry_3d -from ppsci.geometry import sampler -from ppsci.geometry import sdf as sdf_module -from ppsci.utils import checker -from ppsci.utils import misc - -if TYPE_CHECKING: - import pymesh - - -class Mesh(geometry.Geometry): - """Class for mesh geometry. - - Args: - mesh (Union[str, Mesh]): Mesh file path or mesh object, such as "/path/to/mesh.stl". - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.Mesh("/path/to/mesh.stl") # doctest: +SKIP - """ - - def __init__(self, mesh: Union["pymesh.Mesh", str]): - # check if pymesh is installed when using Mesh Class - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package." - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - if isinstance(mesh, str): - self.py_mesh = pymesh.meshio.load_mesh(mesh) - elif isinstance(mesh, pymesh.Mesh): - self.py_mesh = mesh - else: - raise ValueError("arg `mesh` should be path string or `pymesh.Mesh`") - - self.init_mesh() - - @classmethod - def from_pymesh(cls, mesh: "pymesh.Mesh") -> "Mesh": - """Instantiate Mesh object with given PyMesh object. - - Args: - mesh (pymesh.Mesh): PyMesh object. - - Returns: - Mesh: Instantiated ppsci.geometry.Mesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> import numpy as np # doctest: +SKIP - >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP - >>> mesh = ppsci.geometry.Mesh.from_pymesh(box) # doctest: +SKIP - >>> print(mesh.vertices) # doctest: +SKIP - [[0. 0. 0.] - [1. 0. 0.] - [1. 1. 0.] - [0. 1. 0.] - [0. 0. 1.] - [1. 0. 1.] - [1. 1. 1.] - [0. 1. 1.]] - """ - # check if pymesh is installed when using Mesh Class - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package." - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - if isinstance(mesh, pymesh.Mesh): - return cls(mesh) - else: - raise ValueError( - f"arg `mesh` should be type of `pymesh.Mesh`, but got {type(mesh)}" - ) - - def init_mesh(self): - """Initialize necessary variables for mesh""" - if "face_normal" not in self.py_mesh.get_attribute_names(): - self.py_mesh.add_attribute("face_normal") - self.face_normal = self.py_mesh.get_attribute("face_normal").reshape([-1, 3]) - - if not checker.dynamic_import_to_globals(["open3d"]): - raise ImportError( - "Could not import open3d python package. " - "Please install it with `pip install open3d`." - ) - import open3d - - self.open3d_mesh = open3d.geometry.TriangleMesh( - open3d.utility.Vector3dVector(np.array(self.py_mesh.vertices)), - open3d.utility.Vector3iVector(np.array(self.py_mesh.faces)), - ) - self.open3d_mesh.compute_vertex_normals() - - self.vertices = self.py_mesh.vertices - self.faces = self.py_mesh.faces - self.vectors = self.vertices[self.faces] - super().__init__( - self.vertices.shape[-1], - (np.amin(self.vertices, axis=0), np.amax(self.vertices, axis=0)), - np.inf, - ) - self.v0 = self.vectors[:, 0] - self.v1 = self.vectors[:, 1] - self.v2 = self.vectors[:, 2] - self.num_vertices = self.py_mesh.num_vertices - self.num_faces = self.py_mesh.num_faces - - if not checker.dynamic_import_to_globals(["pysdf"]): - raise ImportError( - "Could not import pysdf python package. " - "Please install open3d with `pip install pysdf`." - ) - import pysdf - - self.pysdf = pysdf.SDF(self.vertices, self.faces) - self.bounds = ( - ((np.min(self.vectors[:, :, 0])), np.max(self.vectors[:, :, 0])), - ((np.min(self.vectors[:, :, 1])), np.max(self.vectors[:, :, 1])), - ((np.min(self.vectors[:, :, 2])), np.max(self.vectors[:, :, 2])), - ) - - def sdf_func(self, points: np.ndarray) -> np.ndarray: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 3] - - Returns: - np.ndarray: SDF values of input points without squared, the shape is [N, 1]. - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package." - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - sdf, _, _, _ = pymesh.signed_distance_to_mesh(self.py_mesh, points) - sdf = sdf[..., np.newaxis].astype(paddle.get_default_dtype()) - return sdf - - def is_inside(self, x): - # NOTE: point on boundary is included - return self.pysdf.contains(x) - - def on_boundary(self, x): - return np.isclose(self.sdf_func(x), 0.0).ravel() - - def translate(self, translation: np.ndarray, relative: bool = True) -> "Mesh": - """Translate by given offsets. - - NOTE: This API generate a completely new Mesh object with translated geometry, - without modifying original Mesh object inplace. - - Args: - translation (np.ndarray): Translation offsets, numpy array of shape (3,): - [offset_x, offset_y, offset_z]. - relative (bool, optional): Whether translate relatively. Defaults to True. - - Returns: - Mesh: Translated Mesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> import numpy as np - >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP - >>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP - >>> print(mesh.vertices) # doctest: +SKIP - [[0. 0. 0.] - [1. 0. 0.] - [1. 1. 0.] - [0. 1. 0.] - [0. 0. 1.] - [1. 0. 1.] - [1. 1. 1.] - [0. 1. 1.]] - >>> print(mesh.translate((-0.5, 0, 0.5), False).vertices) # the center is moved to the translation vector. # doctest: +SKIP - [[-1. -0.5 0. ] - [ 0. -0.5 0. ] - [ 0. 0.5 0. ] - [-1. 0.5 0. ] - [-1. -0.5 1. ] - [ 0. -0.5 1. ] - [ 0. 0.5 1. ] - [-1. 0.5 1. ]] - >>> print(mesh.translate((-0.5, 0, 0.5), True).vertices) # the translation vector is directly added to the geometry coordinates # doctest: +SKIP - [[-0.5 0. 0.5] - [ 0.5 0. 0.5] - [ 0.5 1. 0.5] - [-0.5 1. 0.5] - [-0.5 0. 1.5] - [ 0.5 0. 1.5] - [ 0.5 1. 1.5] - [-0.5 1. 1.5]] - """ - vertices = np.array(self.vertices, dtype=paddle.get_default_dtype()) - faces = np.array(self.faces) - - if not checker.dynamic_import_to_globals(("open3d", "pymesh")): - raise ImportError( - "Could not import open3d and pymesh python package. " - "Please install open3d with `pip install open3d` and " - "pymesh as https://paddlescience-docs.readthedocs.io/zh/latest/zh/install_setup/#__tabbed_4_1" - ) - import open3d # isort:skip - import pymesh # isort:skip - - open3d_mesh = open3d.geometry.TriangleMesh( - open3d.utility.Vector3dVector(vertices), - open3d.utility.Vector3iVector(faces), - ) - open3d_mesh = open3d_mesh.translate(translation, relative) - translated_mesh = pymesh.form_mesh( - np.asarray(open3d_mesh.vertices, dtype=paddle.get_default_dtype()), faces - ) - # Generate a new Mesh object using class method - return Mesh.from_pymesh(translated_mesh) - - def scale( - self, scale: float, center: Tuple[float, float, float] = (0, 0, 0) - ) -> "Mesh": - """Scale by given scale coefficient and center coordinate. - - NOTE: This API generate a completely new Mesh object with scaled geometry, - without modifying original Mesh object inplace. - - Args: - scale (float): Scale coefficient. - center (Tuple[float,float,float], optional): Center coordinate, [x, y, z]. - Defaults to (0, 0, 0). - - Returns: - Mesh: Scaled Mesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> import numpy as np - >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP - >>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP - >>> print(mesh.vertices) # doctest: +SKIP - [[0. 0. 0.] - [1. 0. 0.] - [1. 1. 0.] - [0. 1. 0.] - [0. 0. 1.] - [1. 0. 1.] - [1. 1. 1.] - [0. 1. 1.]] - >>> mesh = mesh.scale(2, (0.25, 0.5, 0.75)) # doctest: +SKIP - >>> print(mesh.vertices) # doctest: +SKIP - [[-0.25 -0.5 -0.75] - [ 1.75 -0.5 -0.75] - [ 1.75 1.5 -0.75] - [-0.25 1.5 -0.75] - [-0.25 -0.5 1.25] - [ 1.75 -0.5 1.25] - [ 1.75 1.5 1.25] - [-0.25 1.5 1.25]] - """ - vertices = np.array(self.vertices, dtype=paddle.get_default_dtype()) - faces = np.array(self.faces, dtype=paddle.get_default_dtype()) - - if not checker.dynamic_import_to_globals(("open3d", "pymesh")): - raise ImportError( - "Could not import open3d and pymesh python package. " - "Please install open3d with `pip install open3d` and " - "pymesh as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import open3d # isort:skip - import pymesh # isort:skip - - open3d_mesh = open3d.geometry.TriangleMesh( - open3d.utility.Vector3dVector(vertices), - open3d.utility.Vector3iVector(faces), - ) - open3d_mesh = open3d_mesh.scale(scale, center) - scaled_pymesh = pymesh.form_mesh( - np.asarray(open3d_mesh.vertices, dtype=paddle.get_default_dtype()), faces - ) - # Generate a new Mesh object using class method - return Mesh.from_pymesh(scaled_pymesh) - - def uniform_boundary_points(self, n: int): - """Compute the equi-spaced points on the boundary.""" - return self.pysdf.sample_surface(n) - - def inflated_random_points(self, n, distance, random="pseudo", criteria=None): - if not isinstance(n, (tuple, list)): - n = [n] - if not isinstance(distance, (tuple, list)): - distance = [distance] - if len(n) != len(distance): - raise ValueError( - f"len(n)({len(n)}) should be equal to len(distance)({len(distance)})" - ) - - from ppsci.geometry import inflation - - all_points = [] - all_areas = [] - for _n, _dist in zip(n, distance): - inflated_mesh = Mesh(inflation.pymesh_inflation(self.py_mesh, _dist)) - points, areas = inflated_mesh.random_points(_n, random, criteria) - all_points.append(points) - all_areas.append(areas) - - all_points = np.concatenate(all_points, axis=0) - all_areas = np.concatenate(all_areas, axis=0) - return all_points, all_areas - - def _approximate_area( - self, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable] = None, - n_appr: int = 10000, - ) -> float: - """Approximate area with given `criteria` and `n_appr` points by Monte Carlo - algorithm. - - Args: - random (str, optional): Random method. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria function. Defaults to None. - n_appr (int): Number of points for approximating area. Defaults to 10000. - - Returns: - float: Approximation area with given criteria. - """ - triangle_areas = area_of_triangles(self.v0, self.v1, self.v2) - triangle_probabilities = triangle_areas / np.linalg.norm(triangle_areas, ord=1) - triangle_index = np.arange(triangle_probabilities.shape[0]) - npoint_per_triangle = np.random.choice( - triangle_index, n_appr, p=triangle_probabilities - ) - npoint_per_triangle, _ = np.histogram( - npoint_per_triangle, - np.arange(triangle_probabilities.shape[0] + 1) - 0.5, - ) - - appr_areas = [] - if criteria is not None: - aux_points = [] - - for i, npoint in enumerate(npoint_per_triangle): - if npoint == 0: - continue - # sample points for computing criteria mask if criteria is given - if criteria is not None: - points_at_triangle_i = sample_in_triangle( - self.v0[i], self.v1[i], self.v2[i], npoint, random - ) - aux_points.append(points_at_triangle_i) - - appr_areas.append( - np.full( - (npoint, 1), triangle_areas[i] / npoint, paddle.get_default_dtype() - ) - ) - appr_areas = np.concatenate(appr_areas, axis=0) # [n_appr, 1] - - # set invalid area to 0 by computing criteria mask with auxiliary points - if criteria is not None: - aux_points = np.concatenate(aux_points, axis=0) # [n_appr, 3] - criteria_mask = criteria(*np.split(aux_points, self.ndim, 1)) - appr_areas *= criteria_mask - return appr_areas.sum() - - def random_boundary_points(self, n, random="pseudo"): - triangle_area = area_of_triangles(self.v0, self.v1, self.v2) - triangle_prob = triangle_area / np.linalg.norm(triangle_area, ord=1) - npoint_per_triangle = np.random.choice( - np.arange(len(triangle_prob)), n, p=triangle_prob - ) - npoint_per_triangle, _ = np.histogram( - npoint_per_triangle, np.arange(len(triangle_prob) + 1) - 0.5 - ) - - points = [] - normal = [] - areas = [] - for i, npoint in enumerate(npoint_per_triangle): - if npoint == 0: - continue - points_at_triangle_i = sample_in_triangle( - self.v0[i], self.v1[i], self.v2[i], npoint, random - ) - normal_at_triangle_i = np.tile(self.face_normal[i], (npoint, 1)).astype( - paddle.get_default_dtype() - ) - areas_at_triangle_i = np.full( - (npoint, 1), - triangle_area[i] / npoint, - dtype=paddle.get_default_dtype(), - ) - - points.append(points_at_triangle_i) - normal.append(normal_at_triangle_i) - areas.append(areas_at_triangle_i) - - points = np.concatenate(points, axis=0) - normal = np.concatenate(normal, axis=0) - areas = np.concatenate(areas, axis=0) - - return points, normal, areas - - def sample_boundary( - self, - n: int, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - inflation_dist: Union[float, Tuple[float, ...]] = None, - ) -> Dict[str, np.ndarray]: - # TODO(sensen): Support for time-dependent points(repeat data in time) - if inflation_dist is not None: - if not isinstance(n, (tuple, list)): - n = [n] - if not isinstance(inflation_dist, (tuple, list)): - inflation_dist = [inflation_dist] - if len(n) != len(inflation_dist): - raise ValueError( - f"len(n)({len(n)}) should be equal to len(inflation_dist)({len(inflation_dist)})" - ) - - from ppsci.geometry import inflation - - inflated_data_dict = {} - for _n, _dist in zip(n, inflation_dist): - # 1. manually inflate mesh at first - inflated_mesh = Mesh(inflation.pymesh_inflation(self.py_mesh, _dist)) - # 2. compute all data by sample_boundary with `inflation_dist=None` - data_dict = inflated_mesh.sample_boundary( - _n, - random, - criteria, - evenly, - inflation_dist=None, - ) - for key, value in data_dict.items(): - if key not in inflated_data_dict: - inflated_data_dict[key] = value - else: - inflated_data_dict[key] = np.concatenate( - (inflated_data_dict[key], value), axis=0 - ) - return inflated_data_dict - else: - if evenly: - raise ValueError( - "Can't sample evenly on mesh now, please set evenly=False." - ) - _size, _ntry, _nsuc = 0, 0, 0 - all_points = [] - all_normal = [] - while _size < n: - points, normal, _ = self.random_boundary_points(n, random) - if criteria is not None: - criteria_mask = criteria( - *np.split(points, self.ndim, axis=1) - ).ravel() - points = points[criteria_mask] - normal = normal[criteria_mask] - - if len(points) > n - _size: - points = points[: n - _size] - normal = normal[: n - _size] - - all_points.append(points) - all_normal.append(normal) - - _size += len(points) - _ntry += 1 - if len(points) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - all_points = np.concatenate(all_points, axis=0) - all_normal = np.concatenate(all_normal, axis=0) - appr_area = self._approximate_area(random, criteria) - all_areas = np.full((n, 1), appr_area / n, paddle.get_default_dtype()) - - x_dict = misc.convert_to_dict(all_points, self.dim_keys) - normal_dict = misc.convert_to_dict( - all_normal, [f"normal_{key}" for key in self.dim_keys if key != "t"] - ) - area_dict = misc.convert_to_dict(all_areas, ["area"]) - return {**x_dict, **normal_dict, **area_dict} - - def random_points(self, n, random="pseudo", criteria=None): - _size = 0 - all_points = [] - cuboid = geometry_3d.Cuboid( - [bound[0] for bound in self.bounds], - [bound[1] for bound in self.bounds], - ) - _nsample, _nvalid = 0, 0 - while _size < n: - random_points = cuboid.random_points(n, random) - valid_mask = self.is_inside(random_points) - - if criteria: - valid_mask &= criteria( - *np.split(random_points, self.ndim, axis=1) - ).ravel() - valid_points = random_points[valid_mask] - _nvalid += len(valid_points) - - if len(valid_points) > n - _size: - valid_points = valid_points[: n - _size] - - all_points.append(valid_points) - _size += len(valid_points) - _nsample += n - - all_points = np.concatenate(all_points, axis=0) - cuboid_volume = np.prod([b[1] - b[0] for b in self.bounds]) - all_areas = np.full( - (n, 1), cuboid_volume * (_nvalid / _nsample) / n, paddle.get_default_dtype() - ) - return all_points, all_areas - - def sample_interior( - self, - n: int, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - compute_sdf_derivatives: bool = False, - ): - """Sample random points in the geometry and return those meet criteria.""" - if evenly: - # TODO(sensen): Implement uniform sample for mesh interior. - raise NotImplementedError( - "uniformly sample for interior in mesh is not support yet, " - "you may need to set evenly=False in config dict of constraint" - ) - points, areas = self.random_points(n, random, criteria) - - x_dict = misc.convert_to_dict(points, self.dim_keys) - area_dict = misc.convert_to_dict(areas, ("area",)) - - # NOTE: add negative to the sdf values because weight should be positive. - sdf = -self.sdf_func(points) - sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) - - sdf_derives_dict = {} - if compute_sdf_derivatives: - sdf_derives = -self.sdf_derivatives(points) - sdf_derives_dict = misc.convert_to_dict( - sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) - ) - - return {**x_dict, **area_dict, **sdf_dict, **sdf_derives_dict} - - def union(self, other: "Mesh"): - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package. " - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - csg = pymesh.CSGTree( - {"union": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} - ) - return Mesh(csg.mesh) - - def __or__(self, other: "Mesh"): - return self.union(other) - - def __add__(self, other: "Mesh"): - return self.union(other) - - def difference(self, other: "Mesh"): - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package. " - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - csg = pymesh.CSGTree( - {"difference": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} - ) - return Mesh(csg.mesh) - - def __sub__(self, other: "Mesh"): - return self.difference(other) - - def intersection(self, other: "Mesh"): - if not checker.dynamic_import_to_globals(["pymesh"]): - raise ImportError( - "Could not import pymesh python package. " - "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." - ) - import pymesh - - csg = pymesh.CSGTree( - {"intersection": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} - ) - return Mesh(csg.mesh) - - def __and__(self, other: "Mesh"): - return self.intersection(other) - - def __str__(self) -> str: - """Return the name of class""" - return ", ".join( - [ - self.__class__.__name__, - f"num_vertices = {self.num_vertices}", - f"num_faces = {self.num_faces}", - f"bounds = {self.bounds}", - f"dim_keys = {self.dim_keys}", - ] - ) - - -class SDFMesh(geometry.Geometry): - """Class for SDF geometry, a kind of implicit surface mesh. - - Args: - vectors (np.ndarray): Vectors of triangles of mesh with shape [M, 3, 3]. - normals (np.ndarray): Unit normals of each triangle face with shape [M, 3]. - sdf_func (Callable[[np.ndarray, bool], np.ndarray]): Signed distance function - of the triangle mesh. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.SDFMesh.from_stl("/path/to/mesh.stl") # doctest: +SKIP - """ - - eps = 1e-6 - - def __init__( - self, - vectors: np.ndarray, - normals: np.ndarray, - sdf_func: Callable[[np.ndarray, bool], np.ndarray], - ): - if vectors.shape[1:] != (3, 3): - raise ValueError( - f"The shape of `vectors` must be [M, 3, 3], but got {vectors.shape}" - ) - if normals.shape[1] != 3: - raise ValueError( - f"The shape of `normals` must be [M, 3], but got {normals.shape}" - ) - self.vectors = vectors - self.face_normal = normals - self.sdf_func = sdf_func # overwrite sdf_func - self.bounds = ( - ((np.min(self.vectors[:, :, 0])), np.max(self.vectors[:, :, 0])), - ((np.min(self.vectors[:, :, 1])), np.max(self.vectors[:, :, 1])), - ((np.min(self.vectors[:, :, 2])), np.max(self.vectors[:, :, 2])), - ) - self.ndim = 3 - super().__init__( - self.vectors.shape[-1], - (np.amin(self.vectors, axis=(0, 1)), np.amax(self.vectors, axis=(0, 1))), - np.inf, - ) - - @property - def v0(self) -> np.ndarray: - return self.vectors[:, 0] - - @property - def v1(self) -> np.ndarray: - return self.vectors[:, 1] - - @property - def v2(self) -> np.ndarray: - return self.vectors[:, 2] - - @classmethod - def from_stl(cls, mesh_file: str) -> "SDFMesh": - """Instantiate SDFMesh from given mesh file. - - Args: - mesh_file (str): Path to triangle mesh file. - - Returns: - SDFMesh: Instantiated ppsci.geometry.SDFMesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> import numpy as np # doctest: +SKIP - >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP - >>> pymesh.save_mesh("box.stl", box) # doctest: +SKIP - >>> mesh = ppsci.geometry.SDFMesh.from_stl("box.stl") # doctest: +SKIP - >>> print(sdfmesh.vectors.shape) # doctest: +SKIP - (12, 3, 3) - """ - # check if pymesh is installed when using Mesh Class - if not checker.dynamic_import_to_globals(["stl"]): - raise ImportError( - "Could not import stl python package. " - "Please install numpy-stl with: pip install 'numpy-stl>=2.16,<2.17'" - ) - - np_mesh_obj = np_mesh_module.Mesh.from_file(mesh_file) - return cls( - np_mesh_obj.vectors, - np_mesh_obj.get_unit_normals(), - make_sdf(np_mesh_obj.vectors), - ) - - def sdf_func( - self, points: np.ndarray, compute_sdf_derivatives: bool = False - ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: - """Compute signed distance field. - - Args: - points (np.ndarray): The coordinate points used to calculate the SDF value, - the shape is [N, 3] - compute_sdf_derivatives (bool): Whether to compute SDF derivatives. - Defaults to False. - - Returns: - Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: - If compute_sdf_derivatives is True, then return both SDF values([N, 1]) - and their derivatives([N, 3]); otherwise only return SDF values([N, 1]). - - NOTE: This function usually returns ndarray with negative values, because - according to the definition of SDF, the SDF value of the coordinate point inside - the object(interior points) is negative, the outside is positive, and the edge - is 0. Therefore, when used for weighting, a negative sign is often added before - the result of this function. - """ - # normalize triangles - x_min, y_min, z_min = np.min(points, axis=0) - x_max, y_max, z_max = np.max(points, axis=0) - max_dis = max(max((x_max - x_min), (y_max - y_min)), (z_max - z_min)) - store_triangles = np.array(self.vectors, dtype=np.float64) - store_triangles[:, :, 0] -= x_min - store_triangles[:, :, 1] -= y_min - store_triangles[:, :, 2] -= z_min - store_triangles *= 1 / max_dis - store_triangles = store_triangles.reshape([-1, 3]) - - # normalize query points - points = points.copy() - points[:, 0] -= x_min - points[:, 1] -= y_min - points[:, 2] -= z_min - points *= 1 / max_dis - points = points.astype(np.float64).ravel() - - # compute sdf values for query points - sdf = sdf_module.signed_distance_field( - store_triangles, - np.arange((store_triangles.shape[0])), - points, - include_hit_points=compute_sdf_derivatives, - ) - if compute_sdf_derivatives: - sdf, hit_points = sdf - - sdf = sdf.numpy() # [N] - sdf = np.expand_dims(max_dis * sdf, axis=1) # [N, 1] - - if compute_sdf_derivatives: - hit_points = hit_points.numpy() # [N, 3] - # Gradient of SDF is the unit vector from the query point to the hit point. - sdf_derives = hit_points - points - sdf_derives /= np.linalg.norm(sdf_derives, axis=1, keepdims=True) - return sdf, sdf_derives - - return sdf - - def is_inside(self, x): - # NOTE: point on boundary is included - return np.less(self.sdf_func(x), 0.0).ravel() - - def on_boundary(self, x: np.ndarray, normal: np.ndarray) -> np.ndarray: - x_plus = x + self.eps * normal - x_minus = x - self.eps * normal - - sdf_x_plus = self.sdf_func(x_plus) - sdf_x_minus = self.sdf_func(x_minus) - mask_on_boundary = np.less_equal(sdf_x_plus * sdf_x_minus, 0) - return mask_on_boundary.ravel() - - def translate(self, translation: np.ndarray) -> "SDFMesh": - """Translate by given offsets. - - NOTE: This API generate a completely new Mesh object with translated geometry, - without modifying original Mesh object inplace. - - Args: - translation (np.ndarray): Translation offsets, numpy array of shape (3,): - [offset_x, offset_y, offset_z]. - - Returns: - Mesh: Translated Mesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> mesh = ppsci.geometry.SDFMesh.from_stl('/path/to/mesh.stl') # doctest: +SKIP - >>> mesh = mesh.translate(np.array([1, -1, 2])) # doctest: +SKIP - """ - new_vectors = self.vectors + translation.reshape([1, 1, 3]) - - return SDFMesh( - new_vectors, - self.face_normal, - make_sdf(new_vectors), - ) - - def scale(self, scale: float) -> "SDFMesh": - """Scale by given scale coefficient and center coordinate. - - NOTE: This API generate a completely new Mesh object with scaled geometry, - without modifying original Mesh object inplace. - - Args: - scale (float): Scale coefficient. - - Returns: - Mesh: Scaled Mesh object. - - Examples: - >>> import ppsci - >>> import pymesh # doctest: +SKIP - >>> mesh = ppsci.geometry.SDFMesh.from_stl('/path/to/mesh.stl') # doctest: +SKIP - >>> mesh = mesh.scale(np.array([1.3, 1.5, 2.0])) # doctest: +SKIP - """ - new_vectors = self.vectors * scale - return SDFMesh( - new_vectors, - self.face_normal, - make_sdf(new_vectors), - ) - - def uniform_boundary_points(self, n: int): - """Compute the equi-spaced points on the boundary.""" - raise NotImplementedError( - "'uniform_boundary_points' is not available in SDFMesh." - ) - - def inflated_random_points(self, n, distance, random="pseudo", criteria=None): - raise NotImplementedError( - "'inflated_random_points' is not available in SDFMesh." - ) - - def _approximate_area( - self, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable] = None, - n_appr: int = 10000, - ) -> float: - """Approximate area with given `criteria` and `n_appr` points by Monte Carlo - algorithm. - - Args: - random (str, optional): Random method. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria function. Defaults to None. - n_appr (int): Number of points for approximating area. Defaults to 10000. - - Returns: - float: Approximation area with given criteria. - """ - triangle_areas = area_of_triangles(self.v0, self.v1, self.v2) - triangle_probabilities = triangle_areas / np.linalg.norm(triangle_areas, ord=1) - triangle_index = np.arange(triangle_probabilities.shape[0]) - npoint_per_triangle = np.random.choice( - triangle_index, n_appr, p=triangle_probabilities - ) - npoint_per_triangle, _ = np.histogram( - npoint_per_triangle, - np.arange(triangle_probabilities.shape[0] + 1) - 0.5, - ) - - aux_points = [] - aux_normals = [] - appr_areas = [] - - for i, npoint in enumerate(npoint_per_triangle): - if npoint == 0: - continue - # sample points for computing criteria mask if criteria is given - points_at_triangle_i = sample_in_triangle( - self.v0[i], self.v1[i], self.v2[i], npoint, random - ) - normal_at_triangle_i = np.tile( - self.face_normal[i].reshape(1, 3), (npoint, 1) - ) - aux_points.append(points_at_triangle_i) - aux_normals.append(normal_at_triangle_i) - appr_areas.append( - np.full( - (npoint, 1), triangle_areas[i] / npoint, paddle.get_default_dtype() - ) - ) - - aux_points = np.concatenate(aux_points, axis=0) # [n_appr, 3] - aux_normals = np.concatenate(aux_normals, axis=0) # [n_appr, 3] - appr_areas = np.concatenate(appr_areas, axis=0) # [n_appr, 1] - valid_mask = self.on_boundary(aux_points, aux_normals)[:, None] - # set invalid area to 0 by computing criteria mask with auxiliary points - if criteria is not None: - criteria_mask = criteria(*np.split(aux_points, self.ndim, 1)) - assert valid_mask.shape == criteria_mask.shape - valid_mask = np.logical_and(valid_mask, criteria_mask) - - appr_areas *= valid_mask - - return appr_areas.sum() - - def random_boundary_points( - self, n, random="pseudo" - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - triangle_area = area_of_triangles(self.v0, self.v1, self.v2) - triangle_prob = triangle_area / np.linalg.norm(triangle_area, ord=1) - npoint_per_triangle = np.random.choice( - np.arange(len(triangle_prob)), n, p=triangle_prob - ) - npoint_per_triangle, _ = np.histogram( - npoint_per_triangle, np.arange(len(triangle_prob) + 1) - 0.5 - ) - - points = [] - normal = [] - areas = [] - for i, npoint in enumerate(npoint_per_triangle): - if npoint == 0: - continue - points_at_triangle_i = sample_in_triangle( - self.v0[i], self.v1[i], self.v2[i], npoint, random - ) - normal_at_triangle_i = np.tile(self.face_normal[i], (npoint, 1)).astype( - paddle.get_default_dtype() - ) - areas_at_triangle_i = np.full( - (npoint, 1), - triangle_area[i] / npoint, - dtype=paddle.get_default_dtype(), - ) - - points.append(points_at_triangle_i) - normal.append(normal_at_triangle_i) - areas.append(areas_at_triangle_i) - - points = np.concatenate(points, axis=0) - normal = np.concatenate(normal, axis=0) - areas = np.concatenate(areas, axis=0) - - return points, normal, areas - - def sample_boundary( - self, - n: int, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - inflation_dist: Union[float, Tuple[float, ...]] = None, - ) -> Dict[str, np.ndarray]: - # TODO(sensen): Support for time-dependent points(repeat data in time) - if inflation_dist is not None: - raise NotImplementedError("Not implemented yet") - else: - if evenly: - raise ValueError( - "Can't sample evenly on mesh now, please set evenly=False." - ) - _size, _ntry, _nsuc = 0, 0, 0 - all_points = [] - all_normal = [] - while _size < n: - points, normal, _ = self.random_boundary_points(n, random) - valid_mask = self.on_boundary(points, normal) - - if criteria is not None: - criteria_mask = criteria( - *np.split(points, self.ndim, axis=1) - ).ravel() - assert valid_mask.shape == criteria_mask.shape - valid_mask = np.logical_and(valid_mask, criteria_mask) - - points = points[valid_mask] - normal = normal[valid_mask] - - if len(points) > n - _size: - points = points[: n - _size] - normal = normal[: n - _size] - - all_points.append(points) - all_normal.append(normal) - - _size += len(points) - _ntry += 1 - if len(points) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - all_points = np.concatenate(all_points, axis=0) - all_normal = np.concatenate(all_normal, axis=0) - _appr_area = self._approximate_area(random, criteria) - all_areas = np.full((n, 1), _appr_area / n, paddle.get_default_dtype()) - - x_dict = misc.convert_to_dict(all_points, self.dim_keys) - normal_dict = misc.convert_to_dict( - all_normal, [f"normal_{key}" for key in self.dim_keys if key != "t"] - ) - area_dict = misc.convert_to_dict(all_areas, ["area"]) - return {**x_dict, **normal_dict, **area_dict} - - def random_points(self, n, random="pseudo", criteria=None): - _size = 0 - all_points = [] - cuboid = geometry_3d.Cuboid( - [bound[0] for bound in self.bounds], - [bound[1] for bound in self.bounds], - ) - _nsample, _nvalid = 0, 0 - while _size < n: - random_points = cuboid.random_points(n, random) - valid_mask = self.is_inside(random_points) - - if criteria: - criteria_mask = criteria( - *np.split(random_points, self.ndim, axis=1) - ).ravel() - assert valid_mask.shape == criteria_mask.shape - valid_mask = np.logical_and(valid_mask, criteria_mask) - - valid_points = random_points[valid_mask] - _nvalid += len(valid_points) - - if len(valid_points) > n - _size: - valid_points = valid_points[: n - _size] - - all_points.append(valid_points) - _size += len(valid_points) - _nsample += n - - all_points = np.concatenate(all_points, axis=0) - cuboid_volume = np.prod([b[1] - b[0] for b in self.bounds]) - all_areas = np.full( - (n, 1), cuboid_volume * (_nvalid / _nsample) / n, paddle.get_default_dtype() - ) - return all_points, all_areas - - def sample_interior( - self, - n: int, - random: Literal["pseudo"] = "pseudo", - criteria: Optional[Callable[..., np.ndarray]] = None, - evenly: bool = False, - compute_sdf_derivatives: bool = False, - ): - """Sample random points in the geometry and return those meet criteria.""" - if evenly: - # TODO(sensen): Implement uniform sample for mesh interior. - raise NotImplementedError( - "uniformly sample for interior in mesh is not support yet, " - "you may need to set evenly=False in config dict of constraint" - ) - points, areas = self.random_points(n, random, criteria) - - x_dict = misc.convert_to_dict(points, self.dim_keys) - area_dict = misc.convert_to_dict(areas, ("area",)) - - sdf = self.sdf_func(points, compute_sdf_derivatives) - if compute_sdf_derivatives: - sdf, sdf_derives = sdf - - # NOTE: Negate sdf because weight should be positive. - sdf_dict = misc.convert_to_dict(-sdf, ("sdf",)) - - sdf_derives_dict = {} - if compute_sdf_derivatives: - # NOTE: Negate sdf derivatives - sdf_derives_dict = misc.convert_to_dict( - -sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) - ) - - return {**x_dict, **area_dict, **sdf_dict, **sdf_derives_dict} - - def union(self, other: "SDFMesh"): - new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) - new_normals = np.concatenate([self.face_normal, other.face_normal], axis=0) - - def make_union_new_sdf(sdf_func1, sdf_func2): - def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): - # Invert definition of sdf to make boolean operation accurate - # see: https://iquilezles.org/articles/interiordistance/ - sdf_self = sdf_func1(points, compute_sdf_derivatives) - sdf_other = sdf_func2(points, compute_sdf_derivatives) - if compute_sdf_derivatives: - sdf_self, sdf_derives_self = sdf_self - sdf_other, sdf_derives_other = sdf_other - - computed_sdf = -np.maximum(-sdf_self, -sdf_other) - - if compute_sdf_derivatives: - computed_sdf_derives = -np.where( - sdf_self < sdf_other, - sdf_derives_self, - sdf_derives_other, - ) - return computed_sdf, computed_sdf_derives - - return computed_sdf - - return new_sdf_func - - return SDFMesh( - new_vectors, - new_normals, - make_union_new_sdf(self.sdf_func, other.sdf_func), - ) - - def __or__(self, other: "SDFMesh"): - return self.union(other) - - def __add__(self, other: "SDFMesh"): - return self.union(other) - - def difference(self, other: "SDFMesh"): - new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) - new_normals = np.concatenate([self.face_normal, -other.face_normal], axis=0) - - def make_difference_new_sdf(sdf_func1, sdf_func2): - def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): - # Invert definition of sdf to make boolean operation accurate - # see: https://iquilezles.org/articles/interiordistance/ - sdf_self = sdf_func1(points, compute_sdf_derivatives) - sdf_other = sdf_func2(points, compute_sdf_derivatives) - if compute_sdf_derivatives: - sdf_self, sdf_derives_self = sdf_self - sdf_other, sdf_derives_other = sdf_other - - computed_sdf = -np.minimum(-sdf_self, sdf_other) - - if compute_sdf_derivatives: - computed_sdf_derives = np.where( - -sdf_self < sdf_other, - -sdf_derives_self, - sdf_derives_other, - ) - return computed_sdf, computed_sdf_derives - - return computed_sdf - - return new_sdf_func - - return SDFMesh( - new_vectors, - new_normals, - make_difference_new_sdf(self.sdf_func, other.sdf_func), - ) - - def __sub__(self, other: "SDFMesh"): - return self.difference(other) - - def intersection(self, other: "SDFMesh"): - new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) - new_normals = np.concatenate([self.face_normal, other.face_normal], axis=0) - - def make_intersection_new_sdf(sdf_func1, sdf_func2): - def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): - # Invert definition of sdf to make boolean operation accurate - # see: https://iquilezles.org/articles/interiordistance/ - sdf_self = sdf_func1(points, compute_sdf_derivatives) - sdf_other = sdf_func2(points, compute_sdf_derivatives) - if compute_sdf_derivatives: - sdf_self, sdf_derives_self = sdf_self - sdf_other, sdf_derives_other = sdf_other - - computed_sdf = -np.minimum(-sdf_self, -sdf_other) - - if compute_sdf_derivatives: - computed_sdf_derives = np.where( - sdf_self > sdf_other, - -sdf_derives_self, - -sdf_derives_other, - ) - return computed_sdf, computed_sdf_derives - - return computed_sdf - - return new_sdf_func - - return SDFMesh( - new_vectors, - new_normals, - make_intersection_new_sdf(self.sdf_func, other.sdf_func), - ) - - def __and__(self, other: "SDFMesh"): - return self.intersection(other) - - def __str__(self) -> str: - """Return the name of class""" - return ", ".join( - [ - self.__class__.__name__, - f"num_faces = {self.vectors.shape[0]}", - f"bounds = {self.bounds}", - f"dim_keys = {self.dim_keys}", - ] - ) - - -def area_of_triangles(v0, v1, v2): - """Ref https://math.stackexchange.com/questions/128991/how-to-calculate-the-area-of-a-3d-triangle - - Args: - v0 (np.ndarray): Coordinates of the first vertex of the triangle surface with shape of [N, 3]. - v1 (np.ndarray): Coordinates of the second vertex of the triangle surface with shape of [N, 3]. - v2 (np.ndarray): Coordinates of the third vertex of the triangle surface with shape of [N, 3]. - - Returns: - np.ndarray: Area of each triangle with shape of [N, ]. - """ - a = np.sqrt( - (v0[:, 0] - v1[:, 0]) ** 2 - + (v0[:, 1] - v1[:, 1]) ** 2 - + (v0[:, 2] - v1[:, 2]) ** 2 - + 1e-10 - ) - b = np.sqrt( - (v1[:, 0] - v2[:, 0]) ** 2 - + (v1[:, 1] - v2[:, 1]) ** 2 - + (v1[:, 2] - v2[:, 2]) ** 2 - + 1e-10 - ) - c = np.sqrt( - (v0[:, 0] - v2[:, 0]) ** 2 - + (v0[:, 1] - v2[:, 1]) ** 2 - + (v0[:, 2] - v2[:, 2]) ** 2 - + 1e-10 - ) - p = (a + b + c) / 2 - area = np.sqrt(p * (p - a) * (p - b) * (p - c) + 1e-10) - return area - - -def sample_in_triangle(v0, v1, v2, n, random="pseudo", criteria=None): - """ - Uniformly sample n points in an 3D triangle defined by 3 vertices v0, v1, v2 - https://math.stackexchange.com/questions/18686/uniform-random-point-in-triangle - - Args: - v0 (np.ndarray): Coordinates of the first vertex of an triangle with shape of [3, ]. - v1 (np.ndarray): Coordinates of the second vertex of an triangle with shape of [3, ]. - v2 (np.ndarray): Coordinates of the third vertex of an triangle with shape of [3, ]. - n (int): Number of points to be sampled. - - Returns: - np.ndarray: Coordinates of sampled n points with shape of [n, 3]. - """ - xs, ys, zs = [], [], [] - _size = 0 - while _size < n: - r1 = sampler.sample(n, 1, random).ravel() - r2 = sampler.sample(n, 1, random).ravel() - s1 = np.sqrt(r1) - x = v0[0] * (1.0 - s1) + v1[0] * (1.0 - r2) * s1 + v2[0] * r2 * s1 - y = v0[1] * (1.0 - s1) + v1[1] * (1.0 - r2) * s1 + v2[1] * r2 * s1 - z = v0[2] * (1.0 - s1) + v1[2] * (1.0 - r2) * s1 + v2[2] * r2 * s1 - - if criteria is not None: - criteria_mask = criteria(x, y, z).ravel() - x = x[criteria_mask] - y = y[criteria_mask] - z = z[criteria_mask] - - if len(x) > n - _size: - x = x[: n - _size] - y = y[: n - _size] - z = z[: n - _size] - - xs.append(x) - ys.append(y) - zs.append(z) - _size += len(x) - - xs = np.concatenate(xs, axis=0) - ys = np.concatenate(ys, axis=0) - zs = np.concatenate(zs, axis=0) - - return np.stack([xs, ys, zs], axis=1) - - -def make_sdf(vectors: np.ndarray): - def sdf_func(points: np.ndarray, compute_sdf_derivatives=False): - points = points.copy() - x_min, y_min, z_min = np.min(points, axis=0) - x_max, y_max, z_max = np.max(points, axis=0) - max_dis = max(max((x_max - x_min), (y_max - y_min)), (z_max - z_min)) - store_triangles = vectors.copy() - store_triangles[:, :, 0] -= x_min - store_triangles[:, :, 1] -= y_min - store_triangles[:, :, 2] -= z_min - store_triangles *= 1 / max_dis - store_triangles = store_triangles.reshape([-1, 3]) - points[:, 0] -= x_min - points[:, 1] -= y_min - points[:, 2] -= z_min - points *= 1 / max_dis - points = points.astype(np.float64).ravel() - - # compute sdf values - sdf = sdf_module.signed_distance_field( - store_triangles, - np.arange((store_triangles.shape[0])), - points, - include_hit_points=compute_sdf_derivatives, - ) - if compute_sdf_derivatives: - sdf, sdf_derives = sdf - - sdf = sdf.numpy() - sdf = np.expand_dims(max_dis * sdf, axis=1) - - if compute_sdf_derivatives: - sdf_derives = sdf_derives.numpy().reshape(-1) - sdf_derives = -(sdf_derives - points) - sdf_derives = np.reshape(sdf_derives, (sdf_derives.shape[0] // 3, 3)) - sdf_derives = sdf_derives / np.linalg.norm( - sdf_derives, axis=1, keepdims=True - ) - return sdf, sdf_derives - - return sdf - - return sdf_func +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle + +try: + from stl import mesh as np_mesh_module +except ModuleNotFoundError: + pass +except ImportError: + pass + +from typing_extensions import Literal + +from ppsci.geometry import geometry +from ppsci.geometry import geometry_3d +from ppsci.geometry import sampler +from ppsci.geometry import sdf as sdf_module +from ppsci.utils import checker +from ppsci.utils import misc + +if TYPE_CHECKING: + import pymesh + + +class Mesh(geometry.Geometry): + """Class for mesh geometry. + + Args: + mesh (Union[str, Mesh]): Mesh file path or mesh object, such as "/path/to/mesh.stl". + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.Mesh("/path/to/mesh.stl") # doctest: +SKIP + """ + + def __init__(self, mesh: Union["pymesh.Mesh", str]): + # check if pymesh is installed when using Mesh Class + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package." + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + if isinstance(mesh, str): + self.py_mesh = pymesh.meshio.load_mesh(mesh) + elif isinstance(mesh, pymesh.Mesh): + self.py_mesh = mesh + else: + raise ValueError("arg `mesh` should be path string or `pymesh.Mesh`") + + self.init_mesh() + + @classmethod + def from_pymesh(cls, mesh: "pymesh.Mesh") -> "Mesh": + """Instantiate Mesh object with given PyMesh object. + + Args: + mesh (pymesh.Mesh): PyMesh object. + + Returns: + Mesh: Instantiated ppsci.geometry.Mesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> import numpy as np # doctest: +SKIP + >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP + >>> mesh = ppsci.geometry.Mesh.from_pymesh(box) # doctest: +SKIP + >>> print(mesh.vertices) # doctest: +SKIP + [[0. 0. 0.] + [1. 0. 0.] + [1. 1. 0.] + [0. 1. 0.] + [0. 0. 1.] + [1. 0. 1.] + [1. 1. 1.] + [0. 1. 1.]] + """ + # check if pymesh is installed when using Mesh Class + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package." + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + if isinstance(mesh, pymesh.Mesh): + return cls(mesh) + else: + raise ValueError( + f"arg `mesh` should be type of `pymesh.Mesh`, but got {type(mesh)}" + ) + + def init_mesh(self): + """Initialize necessary variables for mesh""" + if "face_normal" not in self.py_mesh.get_attribute_names(): + self.py_mesh.add_attribute("face_normal") + self.face_normal = self.py_mesh.get_attribute("face_normal").reshape([-1, 3]) + + if not checker.dynamic_import_to_globals(["open3d"]): + raise ImportError( + "Could not import open3d python package. " + "Please install it with `pip install open3d`." + ) + import open3d + + self.open3d_mesh = open3d.geometry.TriangleMesh( + open3d.utility.Vector3dVector(np.array(self.py_mesh.vertices)), + open3d.utility.Vector3iVector(np.array(self.py_mesh.faces)), + ) + self.open3d_mesh.compute_vertex_normals() + + self.vertices = self.py_mesh.vertices + self.faces = self.py_mesh.faces + self.vectors = self.vertices[self.faces] + super().__init__( + self.vertices.shape[-1], + (np.amin(self.vertices, axis=0), np.amax(self.vertices, axis=0)), + np.inf, + ) + self.v0 = self.vectors[:, 0] + self.v1 = self.vectors[:, 1] + self.v2 = self.vectors[:, 2] + self.num_vertices = self.py_mesh.num_vertices + self.num_faces = self.py_mesh.num_faces + + if not checker.dynamic_import_to_globals(["pysdf"]): + raise ImportError( + "Could not import pysdf python package. " + "Please install open3d with `pip install pysdf`." + ) + import pysdf + + self.pysdf = pysdf.SDF(self.vertices, self.faces) + self.bounds = ( + ((np.min(self.vectors[:, :, 0])), np.max(self.vectors[:, :, 0])), + ((np.min(self.vectors[:, :, 1])), np.max(self.vectors[:, :, 1])), + ((np.min(self.vectors[:, :, 2])), np.max(self.vectors[:, :, 2])), + ) + + def sdf_func(self, points: np.ndarray) -> np.ndarray: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 3] + + Returns: + np.ndarray: SDF values of input points without squared, the shape is [N, 1]. + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package." + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + sdf, _, _, _ = pymesh.signed_distance_to_mesh(self.py_mesh, points) + sdf = sdf[..., np.newaxis].astype(paddle.get_default_dtype()) + return sdf + + def is_inside(self, x): + # NOTE: point on boundary is included + return self.pysdf.contains(x) + + def on_boundary(self, x): + return np.isclose(self.sdf_func(x), 0.0).ravel() + + def translate(self, translation: np.ndarray, relative: bool = True) -> "Mesh": + """Translate by given offsets. + + NOTE: This API generate a completely new Mesh object with translated geometry, + without modifying original Mesh object inplace. + + Args: + translation (np.ndarray): Translation offsets, numpy array of shape (3,): + [offset_x, offset_y, offset_z]. + relative (bool, optional): Whether translate relatively. Defaults to True. + + Returns: + Mesh: Translated Mesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> import numpy as np + >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP + >>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP + >>> print(mesh.vertices) # doctest: +SKIP + [[0. 0. 0.] + [1. 0. 0.] + [1. 1. 0.] + [0. 1. 0.] + [0. 0. 1.] + [1. 0. 1.] + [1. 1. 1.] + [0. 1. 1.]] + >>> print(mesh.translate((-0.5, 0, 0.5), False).vertices) # the center is moved to the translation vector. # doctest: +SKIP + [[-1. -0.5 0. ] + [ 0. -0.5 0. ] + [ 0. 0.5 0. ] + [-1. 0.5 0. ] + [-1. -0.5 1. ] + [ 0. -0.5 1. ] + [ 0. 0.5 1. ] + [-1. 0.5 1. ]] + >>> print(mesh.translate((-0.5, 0, 0.5), True).vertices) # the translation vector is directly added to the geometry coordinates # doctest: +SKIP + [[-0.5 0. 0.5] + [ 0.5 0. 0.5] + [ 0.5 1. 0.5] + [-0.5 1. 0.5] + [-0.5 0. 1.5] + [ 0.5 0. 1.5] + [ 0.5 1. 1.5] + [-0.5 1. 1.5]] + """ + vertices = np.array(self.vertices, dtype=paddle.get_default_dtype()) + faces = np.array(self.faces) + + if not checker.dynamic_import_to_globals(("open3d", "pymesh")): + raise ImportError( + "Could not import open3d and pymesh python package. " + "Please install open3d with `pip install open3d` and " + "pymesh as https://paddlescience-docs.readthedocs.io/zh/latest/zh/install_setup/#__tabbed_4_1" + ) + import open3d # isort:skip + import pymesh # isort:skip + + open3d_mesh = open3d.geometry.TriangleMesh( + open3d.utility.Vector3dVector(vertices), + open3d.utility.Vector3iVector(faces), + ) + open3d_mesh = open3d_mesh.translate(translation, relative) + translated_mesh = pymesh.form_mesh( + np.asarray(open3d_mesh.vertices, dtype=paddle.get_default_dtype()), faces + ) + # Generate a new Mesh object using class method + return Mesh.from_pymesh(translated_mesh) + + def scale( + self, scale: float, center: Tuple[float, float, float] = (0, 0, 0) + ) -> "Mesh": + """Scale by given scale coefficient and center coordinate. + + NOTE: This API generate a completely new Mesh object with scaled geometry, + without modifying original Mesh object inplace. + + Args: + scale (float): Scale coefficient. + center (Tuple[float,float,float], optional): Center coordinate, [x, y, z]. + Defaults to (0, 0, 0). + + Returns: + Mesh: Scaled Mesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> import numpy as np + >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP + >>> mesh = ppsci.geometry.Mesh(box) # doctest: +SKIP + >>> print(mesh.vertices) # doctest: +SKIP + [[0. 0. 0.] + [1. 0. 0.] + [1. 1. 0.] + [0. 1. 0.] + [0. 0. 1.] + [1. 0. 1.] + [1. 1. 1.] + [0. 1. 1.]] + >>> mesh = mesh.scale(2, (0.25, 0.5, 0.75)) # doctest: +SKIP + >>> print(mesh.vertices) # doctest: +SKIP + [[-0.25 -0.5 -0.75] + [ 1.75 -0.5 -0.75] + [ 1.75 1.5 -0.75] + [-0.25 1.5 -0.75] + [-0.25 -0.5 1.25] + [ 1.75 -0.5 1.25] + [ 1.75 1.5 1.25] + [-0.25 1.5 1.25]] + """ + vertices = np.array(self.vertices, dtype=paddle.get_default_dtype()) + faces = np.array(self.faces, dtype=paddle.get_default_dtype()) + + if not checker.dynamic_import_to_globals(("open3d", "pymesh")): + raise ImportError( + "Could not import open3d and pymesh python package. " + "Please install open3d with `pip install open3d` and " + "pymesh as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import open3d # isort:skip + import pymesh # isort:skip + + open3d_mesh = open3d.geometry.TriangleMesh( + open3d.utility.Vector3dVector(vertices), + open3d.utility.Vector3iVector(faces), + ) + open3d_mesh = open3d_mesh.scale(scale, center) + scaled_pymesh = pymesh.form_mesh( + np.asarray(open3d_mesh.vertices, dtype=paddle.get_default_dtype()), faces + ) + # Generate a new Mesh object using class method + return Mesh.from_pymesh(scaled_pymesh) + + def uniform_boundary_points(self, n: int): + """Compute the equi-spaced points on the boundary.""" + return self.pysdf.sample_surface(n) + + def inflated_random_points(self, n, distance, random="pseudo", criteria=None): + if not isinstance(n, (tuple, list)): + n = [n] + if not isinstance(distance, (tuple, list)): + distance = [distance] + if len(n) != len(distance): + raise ValueError( + f"len(n)({len(n)}) should be equal to len(distance)({len(distance)})" + ) + + from ppsci.geometry import inflation + + all_points = [] + all_areas = [] + for _n, _dist in zip(n, distance): + inflated_mesh = Mesh(inflation.pymesh_inflation(self.py_mesh, _dist)) + points, areas = inflated_mesh.random_points(_n, random, criteria) + all_points.append(points) + all_areas.append(areas) + + all_points = np.concatenate(all_points, axis=0) + all_areas = np.concatenate(all_areas, axis=0) + return all_points, all_areas + + def _approximate_area( + self, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable] = None, + n_appr: int = 10000, + ) -> float: + """Approximate area with given `criteria` and `n_appr` points by Monte Carlo + algorithm. + + Args: + random (str, optional): Random method. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria function. Defaults to None. + n_appr (int): Number of points for approximating area. Defaults to 10000. + + Returns: + float: Approximation area with given criteria. + """ + triangle_areas = area_of_triangles(self.v0, self.v1, self.v2) + triangle_probabilities = triangle_areas / np.linalg.norm(triangle_areas, ord=1) + triangle_index = np.arange(triangle_probabilities.shape[0]) + npoint_per_triangle = np.random.choice( + triangle_index, n_appr, p=triangle_probabilities + ) + npoint_per_triangle, _ = np.histogram( + npoint_per_triangle, + np.arange(triangle_probabilities.shape[0] + 1) - 0.5, + ) + + appr_areas = [] + if criteria is not None: + aux_points = [] + + for i, npoint in enumerate(npoint_per_triangle): + if npoint == 0: + continue + # sample points for computing criteria mask if criteria is given + if criteria is not None: + points_at_triangle_i = sample_in_triangle( + self.v0[i], self.v1[i], self.v2[i], npoint, random + ) + aux_points.append(points_at_triangle_i) + + appr_areas.append( + np.full( + (npoint, 1), triangle_areas[i] / npoint, paddle.get_default_dtype() + ) + ) + appr_areas = np.concatenate(appr_areas, axis=0) # [n_appr, 1] + + # set invalid area to 0 by computing criteria mask with auxiliary points + if criteria is not None: + aux_points = np.concatenate(aux_points, axis=0) # [n_appr, 3] + criteria_mask = criteria(*np.split(aux_points, self.ndim, 1)) + appr_areas *= criteria_mask + return appr_areas.sum() + + def random_boundary_points(self, n, random="pseudo"): + triangle_area = area_of_triangles(self.v0, self.v1, self.v2) + triangle_prob = triangle_area / np.linalg.norm(triangle_area, ord=1) + npoint_per_triangle = np.random.choice( + np.arange(len(triangle_prob)), n, p=triangle_prob + ) + npoint_per_triangle, _ = np.histogram( + npoint_per_triangle, np.arange(len(triangle_prob) + 1) - 0.5 + ) + + points = [] + normal = [] + areas = [] + for i, npoint in enumerate(npoint_per_triangle): + if npoint == 0: + continue + points_at_triangle_i = sample_in_triangle( + self.v0[i], self.v1[i], self.v2[i], npoint, random + ) + normal_at_triangle_i = np.tile(self.face_normal[i], (npoint, 1)).astype( + paddle.get_default_dtype() + ) + areas_at_triangle_i = np.full( + (npoint, 1), + triangle_area[i] / npoint, + dtype=paddle.get_default_dtype(), + ) + + points.append(points_at_triangle_i) + normal.append(normal_at_triangle_i) + areas.append(areas_at_triangle_i) + + points = np.concatenate(points, axis=0) + normal = np.concatenate(normal, axis=0) + areas = np.concatenate(areas, axis=0) + + return points, normal, areas + + def sample_boundary( + self, + n: int, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + inflation_dist: Union[float, Tuple[float, ...]] = None, + ) -> Dict[str, np.ndarray]: + # TODO(sensen): Support for time-dependent points(repeat data in time) + if inflation_dist is not None: + if not isinstance(n, (tuple, list)): + n = [n] + if not isinstance(inflation_dist, (tuple, list)): + inflation_dist = [inflation_dist] + if len(n) != len(inflation_dist): + raise ValueError( + f"len(n)({len(n)}) should be equal to len(inflation_dist)({len(inflation_dist)})" + ) + + from ppsci.geometry import inflation + + inflated_data_dict = {} + for _n, _dist in zip(n, inflation_dist): + # 1. manually inflate mesh at first + inflated_mesh = Mesh(inflation.pymesh_inflation(self.py_mesh, _dist)) + # 2. compute all data by sample_boundary with `inflation_dist=None` + data_dict = inflated_mesh.sample_boundary( + _n, + random, + criteria, + evenly, + inflation_dist=None, + ) + for key, value in data_dict.items(): + if key not in inflated_data_dict: + inflated_data_dict[key] = value + else: + inflated_data_dict[key] = np.concatenate( + (inflated_data_dict[key], value), axis=0 + ) + return inflated_data_dict + else: + if evenly: + raise ValueError( + "Can't sample evenly on mesh now, please set evenly=False." + ) + _size, _ntry, _nsuc = 0, 0, 0 + all_points = [] + all_normal = [] + while _size < n: + points, normal, _ = self.random_boundary_points(n, random) + if criteria is not None: + criteria_mask = criteria( + *np.split(points, self.ndim, axis=1) + ).ravel() + points = points[criteria_mask] + normal = normal[criteria_mask] + + if len(points) > n - _size: + points = points[: n - _size] + normal = normal[: n - _size] + + all_points.append(points) + all_normal.append(normal) + + _size += len(points) + _ntry += 1 + if len(points) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + all_points = np.concatenate(all_points, axis=0) + all_normal = np.concatenate(all_normal, axis=0) + appr_area = self._approximate_area(random, criteria) + all_areas = np.full((n, 1), appr_area / n, paddle.get_default_dtype()) + + x_dict = misc.convert_to_dict(all_points, self.dim_keys) + normal_dict = misc.convert_to_dict( + all_normal, [f"normal_{key}" for key in self.dim_keys if key != "t"] + ) + area_dict = misc.convert_to_dict(all_areas, ["area"]) + return {**x_dict, **normal_dict, **area_dict} + + def random_points(self, n, random="pseudo", criteria=None): + _size = 0 + all_points = [] + cuboid = geometry_3d.Cuboid( + [bound[0] for bound in self.bounds], + [bound[1] for bound in self.bounds], + ) + _nsample, _nvalid = 0, 0 + while _size < n: + random_points = cuboid.random_points(n, random) + valid_mask = self.is_inside(random_points) + + if criteria: + valid_mask &= criteria( + *np.split(random_points, self.ndim, axis=1) + ).ravel() + valid_points = random_points[valid_mask] + _nvalid += len(valid_points) + + if len(valid_points) > n - _size: + valid_points = valid_points[: n - _size] + + all_points.append(valid_points) + _size += len(valid_points) + _nsample += n + + all_points = np.concatenate(all_points, axis=0) + cuboid_volume = np.prod([b[1] - b[0] for b in self.bounds]) + all_areas = np.full( + (n, 1), cuboid_volume * (_nvalid / _nsample) / n, paddle.get_default_dtype() + ) + return all_points, all_areas + + def sample_interior( + self, + n: int, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + compute_sdf_derivatives: bool = False, + ): + """Sample random points in the geometry and return those meet criteria.""" + if evenly: + # TODO(sensen): Implement uniform sample for mesh interior. + raise NotImplementedError( + "uniformly sample for interior in mesh is not support yet, " + "you may need to set evenly=False in config dict of constraint" + ) + points, areas = self.random_points(n, random, criteria) + + x_dict = misc.convert_to_dict(points, self.dim_keys) + area_dict = misc.convert_to_dict(areas, ("area",)) + + # NOTE: add negative to the sdf values because weight should be positive. + sdf = -self.sdf_func(points) + sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) + + sdf_derives_dict = {} + if compute_sdf_derivatives: + sdf_derives = -self.sdf_derivatives(points) + sdf_derives_dict = misc.convert_to_dict( + sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) + ) + + return {**x_dict, **area_dict, **sdf_dict, **sdf_derives_dict} + + def union(self, other: "Mesh"): + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package. " + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + csg = pymesh.CSGTree( + {"union": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} + ) + return Mesh(csg.mesh) + + def __or__(self, other: "Mesh"): + return self.union(other) + + def __add__(self, other: "Mesh"): + return self.union(other) + + def difference(self, other: "Mesh"): + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package. " + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + csg = pymesh.CSGTree( + {"difference": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} + ) + return Mesh(csg.mesh) + + def __sub__(self, other: "Mesh"): + return self.difference(other) + + def intersection(self, other: "Mesh"): + if not checker.dynamic_import_to_globals(["pymesh"]): + raise ImportError( + "Could not import pymesh python package. " + "Please install it as https://pymesh.readthedocs.io/en/latest/installation.html." + ) + import pymesh + + csg = pymesh.CSGTree( + {"intersection": [{"mesh": self.py_mesh}, {"mesh": other.py_mesh}]} + ) + return Mesh(csg.mesh) + + def __and__(self, other: "Mesh"): + return self.intersection(other) + + def __str__(self) -> str: + """Return the name of class""" + return ", ".join( + [ + self.__class__.__name__, + f"num_vertices = {self.num_vertices}", + f"num_faces = {self.num_faces}", + f"bounds = {self.bounds}", + f"dim_keys = {self.dim_keys}", + ] + ) + + +class SDFMesh(geometry.Geometry): + """Class for SDF geometry, a kind of implicit surface mesh. + + Args: + vectors (np.ndarray): Vectors of triangles of mesh with shape [M, 3, 3]. + normals (np.ndarray): Unit normals of each triangle face with shape [M, 3]. + sdf_func (Callable[[np.ndarray, bool], np.ndarray]): Signed distance function + of the triangle mesh. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.SDFMesh.from_stl("/path/to/mesh.stl") # doctest: +SKIP + """ + + eps = 1e-6 + + def __init__( + self, + vectors: np.ndarray, + normals: np.ndarray, + sdf_func: Callable[[np.ndarray, bool], np.ndarray], + ): + if vectors.shape[1:] != (3, 3): + raise ValueError( + f"The shape of `vectors` must be [M, 3, 3], but got {vectors.shape}" + ) + if normals.shape[1] != 3: + raise ValueError( + f"The shape of `normals` must be [M, 3], but got {normals.shape}" + ) + self.vectors = vectors + self.face_normal = normals + self.sdf_func = sdf_func # overwrite sdf_func + self.bounds = ( + ((np.min(self.vectors[:, :, 0])), np.max(self.vectors[:, :, 0])), + ((np.min(self.vectors[:, :, 1])), np.max(self.vectors[:, :, 1])), + ((np.min(self.vectors[:, :, 2])), np.max(self.vectors[:, :, 2])), + ) + self.ndim = 3 + super().__init__( + self.vectors.shape[-1], + (np.amin(self.vectors, axis=(0, 1)), np.amax(self.vectors, axis=(0, 1))), + np.inf, + ) + + @property + def v0(self) -> np.ndarray: + return self.vectors[:, 0] + + @property + def v1(self) -> np.ndarray: + return self.vectors[:, 1] + + @property + def v2(self) -> np.ndarray: + return self.vectors[:, 2] + + @classmethod + def from_stl(cls, mesh_file: str) -> "SDFMesh": + """Instantiate SDFMesh from given mesh file. + + Args: + mesh_file (str): Path to triangle mesh file. + + Returns: + SDFMesh: Instantiated ppsci.geometry.SDFMesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> import numpy as np # doctest: +SKIP + >>> box = pymesh.generate_box_mesh(np.array([0, 0, 0]), np.array([1, 1, 1])) # doctest: +SKIP + >>> pymesh.save_mesh("box.stl", box) # doctest: +SKIP + >>> mesh = ppsci.geometry.SDFMesh.from_stl("box.stl") # doctest: +SKIP + >>> print(sdfmesh.vectors.shape) # doctest: +SKIP + (12, 3, 3) + """ + # check if pymesh is installed when using Mesh Class + if not checker.dynamic_import_to_globals(["stl"]): + raise ImportError( + "Could not import stl python package. " + "Please install numpy-stl with: pip install 'numpy-stl>=2.16,<2.17'" + ) + + np_mesh_obj = np_mesh_module.Mesh.from_file(mesh_file) + return cls( + np_mesh_obj.vectors, + np_mesh_obj.get_unit_normals(), + make_sdf(np_mesh_obj.vectors), + ) + + def sdf_func( + self, points: np.ndarray, compute_sdf_derivatives: bool = False + ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: + """Compute signed distance field. + + Args: + points (np.ndarray): The coordinate points used to calculate the SDF value, + the shape is [N, 3] + compute_sdf_derivatives (bool): Whether to compute SDF derivatives. + Defaults to False. + + Returns: + Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: + If compute_sdf_derivatives is True, then return both SDF values([N, 1]) + and their derivatives([N, 3]); otherwise only return SDF values([N, 1]). + + NOTE: This function usually returns ndarray with negative values, because + according to the definition of SDF, the SDF value of the coordinate point inside + the object(interior points) is negative, the outside is positive, and the edge + is 0. Therefore, when used for weighting, a negative sign is often added before + the result of this function. + """ + # normalize triangles + x_min, y_min, z_min = np.min(points, axis=0) + x_max, y_max, z_max = np.max(points, axis=0) + max_dis = max(max((x_max - x_min), (y_max - y_min)), (z_max - z_min)) + store_triangles = np.array(self.vectors, dtype=np.float64) + store_triangles[:, :, 0] -= x_min + store_triangles[:, :, 1] -= y_min + store_triangles[:, :, 2] -= z_min + store_triangles *= 1 / max_dis + store_triangles = store_triangles.reshape([-1, 3]) + + # normalize query points + points = points.copy() + points[:, 0] -= x_min + points[:, 1] -= y_min + points[:, 2] -= z_min + points *= 1 / max_dis + points = points.astype(np.float64).ravel() + + # compute sdf values for query points + sdf = sdf_module.signed_distance_field( + store_triangles, + np.arange((store_triangles.shape[0])), + points, + include_hit_points=compute_sdf_derivatives, + ) + if compute_sdf_derivatives: + sdf, hit_points = sdf + + sdf = sdf.numpy() # [N] + sdf = np.expand_dims(max_dis * sdf, axis=1) # [N, 1] + + if compute_sdf_derivatives: + hit_points = hit_points.numpy() # [N, 3] + # Gradient of SDF is the unit vector from the query point to the hit point. + sdf_derives = hit_points - points + sdf_derives /= np.linalg.norm(sdf_derives, axis=1, keepdims=True) + return sdf, sdf_derives + + return sdf + + def is_inside(self, x): + # NOTE: point on boundary is included + return np.less(self.sdf_func(x), 0.0).ravel() + + def on_boundary(self, x: np.ndarray, normal: np.ndarray) -> np.ndarray: + x_plus = x + self.eps * normal + x_minus = x - self.eps * normal + + sdf_x_plus = self.sdf_func(x_plus) + sdf_x_minus = self.sdf_func(x_minus) + mask_on_boundary = np.less_equal(sdf_x_plus * sdf_x_minus, 0) + return mask_on_boundary.ravel() + + def translate(self, translation: np.ndarray) -> "SDFMesh": + """Translate by given offsets. + + NOTE: This API generate a completely new Mesh object with translated geometry, + without modifying original Mesh object inplace. + + Args: + translation (np.ndarray): Translation offsets, numpy array of shape (3,): + [offset_x, offset_y, offset_z]. + + Returns: + Mesh: Translated Mesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> mesh = ppsci.geometry.SDFMesh.from_stl('/path/to/mesh.stl') # doctest: +SKIP + >>> mesh = mesh.translate(np.array([1, -1, 2])) # doctest: +SKIP + """ + new_vectors = self.vectors + translation.reshape([1, 1, 3]) + + return SDFMesh( + new_vectors, + self.face_normal, + make_sdf(new_vectors), + ) + + def scale(self, scale: float) -> "SDFMesh": + """Scale by given scale coefficient and center coordinate. + + NOTE: This API generate a completely new Mesh object with scaled geometry, + without modifying original Mesh object inplace. + + Args: + scale (float): Scale coefficient. + + Returns: + Mesh: Scaled Mesh object. + + Examples: + >>> import ppsci + >>> import pymesh # doctest: +SKIP + >>> mesh = ppsci.geometry.SDFMesh.from_stl('/path/to/mesh.stl') # doctest: +SKIP + >>> mesh = mesh.scale(np.array([1.3, 1.5, 2.0])) # doctest: +SKIP + """ + new_vectors = self.vectors * scale + return SDFMesh( + new_vectors, + self.face_normal, + make_sdf(new_vectors), + ) + + def uniform_boundary_points(self, n: int): + """Compute the equi-spaced points on the boundary.""" + raise NotImplementedError( + "'uniform_boundary_points' is not available in SDFMesh." + ) + + def inflated_random_points(self, n, distance, random="pseudo", criteria=None): + raise NotImplementedError( + "'inflated_random_points' is not available in SDFMesh." + ) + + def _approximate_area( + self, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable] = None, + n_appr: int = 10000, + ) -> float: + """Approximate area with given `criteria` and `n_appr` points by Monte Carlo + algorithm. + + Args: + random (str, optional): Random method. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria function. Defaults to None. + n_appr (int): Number of points for approximating area. Defaults to 10000. + + Returns: + float: Approximation area with given criteria. + """ + triangle_areas = area_of_triangles(self.v0, self.v1, self.v2) + triangle_probabilities = triangle_areas / np.linalg.norm(triangle_areas, ord=1) + triangle_index = np.arange(triangle_probabilities.shape[0]) + npoint_per_triangle = np.random.choice( + triangle_index, n_appr, p=triangle_probabilities + ) + npoint_per_triangle, _ = np.histogram( + npoint_per_triangle, + np.arange(triangle_probabilities.shape[0] + 1) - 0.5, + ) + + aux_points = [] + aux_normals = [] + appr_areas = [] + + for i, npoint in enumerate(npoint_per_triangle): + if npoint == 0: + continue + # sample points for computing criteria mask if criteria is given + points_at_triangle_i = sample_in_triangle( + self.v0[i], self.v1[i], self.v2[i], npoint, random + ) + normal_at_triangle_i = np.tile( + self.face_normal[i].reshape(1, 3), (npoint, 1) + ) + aux_points.append(points_at_triangle_i) + aux_normals.append(normal_at_triangle_i) + appr_areas.append( + np.full( + (npoint, 1), triangle_areas[i] / npoint, paddle.get_default_dtype() + ) + ) + + aux_points = np.concatenate(aux_points, axis=0) # [n_appr, 3] + aux_normals = np.concatenate(aux_normals, axis=0) # [n_appr, 3] + appr_areas = np.concatenate(appr_areas, axis=0) # [n_appr, 1] + valid_mask = self.on_boundary(aux_points, aux_normals)[:, None] + # set invalid area to 0 by computing criteria mask with auxiliary points + if criteria is not None: + criteria_mask = criteria(*np.split(aux_points, self.ndim, 1)) + assert valid_mask.shape == criteria_mask.shape + valid_mask = np.logical_and(valid_mask, criteria_mask) + + appr_areas *= valid_mask + + return appr_areas.sum() + + def random_boundary_points( + self, n, random="pseudo" + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + triangle_area = area_of_triangles(self.v0, self.v1, self.v2) + triangle_prob = triangle_area / np.linalg.norm(triangle_area, ord=1) + npoint_per_triangle = np.random.choice( + np.arange(len(triangle_prob)), n, p=triangle_prob + ) + npoint_per_triangle, _ = np.histogram( + npoint_per_triangle, np.arange(len(triangle_prob) + 1) - 0.5 + ) + + points = [] + normal = [] + areas = [] + for i, npoint in enumerate(npoint_per_triangle): + if npoint == 0: + continue + points_at_triangle_i = sample_in_triangle( + self.v0[i], self.v1[i], self.v2[i], npoint, random + ) + normal_at_triangle_i = np.tile(self.face_normal[i], (npoint, 1)).astype( + paddle.get_default_dtype() + ) + areas_at_triangle_i = np.full( + (npoint, 1), + triangle_area[i] / npoint, + dtype=paddle.get_default_dtype(), + ) + + points.append(points_at_triangle_i) + normal.append(normal_at_triangle_i) + areas.append(areas_at_triangle_i) + + points = np.concatenate(points, axis=0) + normal = np.concatenate(normal, axis=0) + areas = np.concatenate(areas, axis=0) + + return points, normal, areas + + def sample_boundary( + self, + n: int, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + inflation_dist: Union[float, Tuple[float, ...]] = None, + ) -> Dict[str, np.ndarray]: + # TODO(sensen): Support for time-dependent points(repeat data in time) + if inflation_dist is not None: + raise NotImplementedError("Not implemented yet") + else: + if evenly: + raise ValueError( + "Can't sample evenly on mesh now, please set evenly=False." + ) + _size, _ntry, _nsuc = 0, 0, 0 + all_points = [] + all_normal = [] + while _size < n: + points, normal, _ = self.random_boundary_points(n, random) + valid_mask = self.on_boundary(points, normal) + + if criteria is not None: + criteria_mask = criteria( + *np.split(points, self.ndim, axis=1) + ).ravel() + assert valid_mask.shape == criteria_mask.shape + valid_mask = np.logical_and(valid_mask, criteria_mask) + + points = points[valid_mask] + normal = normal[valid_mask] + + if len(points) > n - _size: + points = points[: n - _size] + normal = normal[: n - _size] + + all_points.append(points) + all_normal.append(normal) + + _size += len(points) + _ntry += 1 + if len(points) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + all_points = np.concatenate(all_points, axis=0) + all_normal = np.concatenate(all_normal, axis=0) + _appr_area = self._approximate_area(random, criteria) + all_areas = np.full((n, 1), _appr_area / n, paddle.get_default_dtype()) + + x_dict = misc.convert_to_dict(all_points, self.dim_keys) + normal_dict = misc.convert_to_dict( + all_normal, [f"normal_{key}" for key in self.dim_keys if key != "t"] + ) + area_dict = misc.convert_to_dict(all_areas, ["area"]) + return {**x_dict, **normal_dict, **area_dict} + + def random_points(self, n, random="pseudo", criteria=None): + _size = 0 + all_points = [] + cuboid = geometry_3d.Cuboid( + [bound[0] for bound in self.bounds], + [bound[1] for bound in self.bounds], + ) + _nsample, _nvalid = 0, 0 + while _size < n: + random_points = cuboid.random_points(n, random) + valid_mask = self.is_inside(random_points) + + if criteria: + criteria_mask = criteria( + *np.split(random_points, self.ndim, axis=1) + ).ravel() + assert valid_mask.shape == criteria_mask.shape + valid_mask = np.logical_and(valid_mask, criteria_mask) + + valid_points = random_points[valid_mask] + _nvalid += len(valid_points) + + if len(valid_points) > n - _size: + valid_points = valid_points[: n - _size] + + all_points.append(valid_points) + _size += len(valid_points) + _nsample += n + + all_points = np.concatenate(all_points, axis=0) + cuboid_volume = np.prod([b[1] - b[0] for b in self.bounds]) + all_areas = np.full( + (n, 1), cuboid_volume * (_nvalid / _nsample) / n, paddle.get_default_dtype() + ) + return all_points, all_areas + + def sample_interior( + self, + n: int, + random: Literal["pseudo"] = "pseudo", + criteria: Optional[Callable[..., np.ndarray]] = None, + evenly: bool = False, + compute_sdf_derivatives: bool = False, + ): + """Sample random points in the geometry and return those meet criteria.""" + if evenly: + # TODO(sensen): Implement uniform sample for mesh interior. + raise NotImplementedError( + "uniformly sample for interior in mesh is not support yet, " + "you may need to set evenly=False in config dict of constraint" + ) + points, areas = self.random_points(n, random, criteria) + + x_dict = misc.convert_to_dict(points, self.dim_keys) + area_dict = misc.convert_to_dict(areas, ("area",)) + + sdf = self.sdf_func(points, compute_sdf_derivatives) + if compute_sdf_derivatives: + sdf, sdf_derives = sdf + + # NOTE: Negate sdf because weight should be positive. + sdf_dict = misc.convert_to_dict(-sdf, ("sdf",)) + + sdf_derives_dict = {} + if compute_sdf_derivatives: + # NOTE: Negate sdf derivatives + sdf_derives_dict = misc.convert_to_dict( + -sdf_derives, tuple(f"sdf__{key}" for key in self.dim_keys) + ) + + return {**x_dict, **area_dict, **sdf_dict, **sdf_derives_dict} + + def union(self, other: "SDFMesh"): + new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) + new_normals = np.concatenate([self.face_normal, other.face_normal], axis=0) + + def make_union_new_sdf(sdf_func1, sdf_func2): + def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): + # Invert definition of sdf to make boolean operation accurate + # see: https://iquilezles.org/articles/interiordistance/ + sdf_self = sdf_func1(points, compute_sdf_derivatives) + sdf_other = sdf_func2(points, compute_sdf_derivatives) + if compute_sdf_derivatives: + sdf_self, sdf_derives_self = sdf_self + sdf_other, sdf_derives_other = sdf_other + + computed_sdf = -np.maximum(-sdf_self, -sdf_other) + + if compute_sdf_derivatives: + computed_sdf_derives = -np.where( + sdf_self < sdf_other, + sdf_derives_self, + sdf_derives_other, + ) + return computed_sdf, computed_sdf_derives + + return computed_sdf + + return new_sdf_func + + return SDFMesh( + new_vectors, + new_normals, + make_union_new_sdf(self.sdf_func, other.sdf_func), + ) + + def __or__(self, other: "SDFMesh"): + return self.union(other) + + def __add__(self, other: "SDFMesh"): + return self.union(other) + + def difference(self, other: "SDFMesh"): + new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) + new_normals = np.concatenate([self.face_normal, -other.face_normal], axis=0) + + def make_difference_new_sdf(sdf_func1, sdf_func2): + def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): + # Invert definition of sdf to make boolean operation accurate + # see: https://iquilezles.org/articles/interiordistance/ + sdf_self = sdf_func1(points, compute_sdf_derivatives) + sdf_other = sdf_func2(points, compute_sdf_derivatives) + if compute_sdf_derivatives: + sdf_self, sdf_derives_self = sdf_self + sdf_other, sdf_derives_other = sdf_other + + computed_sdf = -np.minimum(-sdf_self, sdf_other) + + if compute_sdf_derivatives: + computed_sdf_derives = np.where( + -sdf_self < sdf_other, + -sdf_derives_self, + sdf_derives_other, + ) + return computed_sdf, computed_sdf_derives + + return computed_sdf + + return new_sdf_func + + return SDFMesh( + new_vectors, + new_normals, + make_difference_new_sdf(self.sdf_func, other.sdf_func), + ) + + def __sub__(self, other: "SDFMesh"): + return self.difference(other) + + def intersection(self, other: "SDFMesh"): + new_vectors = np.concatenate([self.vectors, other.vectors], axis=0) + new_normals = np.concatenate([self.face_normal, other.face_normal], axis=0) + + def make_intersection_new_sdf(sdf_func1, sdf_func2): + def new_sdf_func(points: np.ndarray, compute_sdf_derivatives: bool = False): + # Invert definition of sdf to make boolean operation accurate + # see: https://iquilezles.org/articles/interiordistance/ + sdf_self = sdf_func1(points, compute_sdf_derivatives) + sdf_other = sdf_func2(points, compute_sdf_derivatives) + if compute_sdf_derivatives: + sdf_self, sdf_derives_self = sdf_self + sdf_other, sdf_derives_other = sdf_other + + computed_sdf = -np.minimum(-sdf_self, -sdf_other) + + if compute_sdf_derivatives: + computed_sdf_derives = np.where( + sdf_self > sdf_other, + -sdf_derives_self, + -sdf_derives_other, + ) + return computed_sdf, computed_sdf_derives + + return computed_sdf + + return new_sdf_func + + return SDFMesh( + new_vectors, + new_normals, + make_intersection_new_sdf(self.sdf_func, other.sdf_func), + ) + + def __and__(self, other: "SDFMesh"): + return self.intersection(other) + + def __str__(self) -> str: + """Return the name of class""" + return ", ".join( + [ + self.__class__.__name__, + f"num_faces = {self.vectors.shape[0]}", + f"bounds = {self.bounds}", + f"dim_keys = {self.dim_keys}", + ] + ) + + +def area_of_triangles(v0, v1, v2): + """Ref https://math.stackexchange.com/questions/128991/how-to-calculate-the-area-of-a-3d-triangle + + Args: + v0 (np.ndarray): Coordinates of the first vertex of the triangle surface with shape of [N, 3]. + v1 (np.ndarray): Coordinates of the second vertex of the triangle surface with shape of [N, 3]. + v2 (np.ndarray): Coordinates of the third vertex of the triangle surface with shape of [N, 3]. + + Returns: + np.ndarray: Area of each triangle with shape of [N, ]. + """ + a = np.sqrt( + (v0[:, 0] - v1[:, 0]) ** 2 + + (v0[:, 1] - v1[:, 1]) ** 2 + + (v0[:, 2] - v1[:, 2]) ** 2 + + 1e-10 + ) + b = np.sqrt( + (v1[:, 0] - v2[:, 0]) ** 2 + + (v1[:, 1] - v2[:, 1]) ** 2 + + (v1[:, 2] - v2[:, 2]) ** 2 + + 1e-10 + ) + c = np.sqrt( + (v0[:, 0] - v2[:, 0]) ** 2 + + (v0[:, 1] - v2[:, 1]) ** 2 + + (v0[:, 2] - v2[:, 2]) ** 2 + + 1e-10 + ) + p = (a + b + c) / 2 + area = np.sqrt(p * (p - a) * (p - b) * (p - c) + 1e-10) + return area + + +def sample_in_triangle(v0, v1, v2, n, random="pseudo", criteria=None): + """ + Uniformly sample n points in an 3D triangle defined by 3 vertices v0, v1, v2 + https://math.stackexchange.com/questions/18686/uniform-random-point-in-triangle + + Args: + v0 (np.ndarray): Coordinates of the first vertex of an triangle with shape of [3, ]. + v1 (np.ndarray): Coordinates of the second vertex of an triangle with shape of [3, ]. + v2 (np.ndarray): Coordinates of the third vertex of an triangle with shape of [3, ]. + n (int): Number of points to be sampled. + + Returns: + np.ndarray: Coordinates of sampled n points with shape of [n, 3]. + """ + xs, ys, zs = [], [], [] + _size = 0 + while _size < n: + r1 = sampler.sample(n, 1, random).ravel() + r2 = sampler.sample(n, 1, random).ravel() + s1 = np.sqrt(r1) + x = v0[0] * (1.0 - s1) + v1[0] * (1.0 - r2) * s1 + v2[0] * r2 * s1 + y = v0[1] * (1.0 - s1) + v1[1] * (1.0 - r2) * s1 + v2[1] * r2 * s1 + z = v0[2] * (1.0 - s1) + v1[2] * (1.0 - r2) * s1 + v2[2] * r2 * s1 + + if criteria is not None: + criteria_mask = criteria(x, y, z).ravel() + x = x[criteria_mask] + y = y[criteria_mask] + z = z[criteria_mask] + + if len(x) > n - _size: + x = x[: n - _size] + y = y[: n - _size] + z = z[: n - _size] + + xs.append(x) + ys.append(y) + zs.append(z) + _size += len(x) + + xs = np.concatenate(xs, axis=0) + ys = np.concatenate(ys, axis=0) + zs = np.concatenate(zs, axis=0) + + return np.stack([xs, ys, zs], axis=1) + + +def make_sdf(vectors: np.ndarray): + def sdf_func(points: np.ndarray, compute_sdf_derivatives=False): + points = points.copy() + x_min, y_min, z_min = np.min(points, axis=0) + x_max, y_max, z_max = np.max(points, axis=0) + max_dis = max(max((x_max - x_min), (y_max - y_min)), (z_max - z_min)) + store_triangles = vectors.copy() + store_triangles[:, :, 0] -= x_min + store_triangles[:, :, 1] -= y_min + store_triangles[:, :, 2] -= z_min + store_triangles *= 1 / max_dis + store_triangles = store_triangles.reshape([-1, 3]) + points[:, 0] -= x_min + points[:, 1] -= y_min + points[:, 2] -= z_min + points *= 1 / max_dis + points = points.astype(np.float64).ravel() + + # compute sdf values + sdf = sdf_module.signed_distance_field( + store_triangles, + np.arange((store_triangles.shape[0])), + points, + include_hit_points=compute_sdf_derivatives, + ) + if compute_sdf_derivatives: + sdf, sdf_derives = sdf + + sdf = sdf.numpy() + sdf = np.expand_dims(max_dis * sdf, axis=1) + + if compute_sdf_derivatives: + sdf_derives = sdf_derives.numpy().reshape(-1) + sdf_derives = -(sdf_derives - points) + sdf_derives = np.reshape(sdf_derives, (sdf_derives.shape[0] // 3, 3)) + sdf_derives = sdf_derives / np.linalg.norm( + sdf_derives, axis=1, keepdims=True + ) + return sdf, sdf_derives + + return sdf + + return sdf_func diff --git a/ppsci/geometry/pointcloud.py b/ppsci/geometry/pointcloud.py index ae4d4fb4cc..e5656a5e54 100644 --- a/ppsci/geometry/pointcloud.py +++ b/ppsci/geometry/pointcloud.py @@ -1,312 +1,312 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np - -from ppsci.geometry import geometry -from ppsci.utils import misc - - -class PointCloud(geometry.Geometry): - """Class for point cloud geometry, i.e. a set of points from given file or array. - - Args: - interior (Dict[str, np.ndarray]): Filepath or dict data, which store interior points of a point cloud, such as {"x": np.ndarray, "y": np.ndarray}. - coord_keys (Tuple[str, ...]): Tuple of coordinate keys, such as ("x", "y"). - boundary (Dict[str, np.ndarray]): Boundary points of a point cloud. Defaults to None. - boundary_normal (Dict[str, np.ndarray]): Boundary normal points of a point cloud. Defaults to None. - - Examples: - >>> import ppsci - >>> import numpy as np - >>> interior_points = {"x": np.linspace(-1, 1, dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) - """ - - def __init__( - self, - interior: Dict[str, np.ndarray], - coord_keys: Tuple[str, ...], - boundary: Optional[Dict[str, np.ndarray]] = None, - boundary_normal: Optional[Dict[str, np.ndarray]] = None, - ): - # Interior points - self.interior = misc.convert_to_array(interior, coord_keys) - self.len = self.interior.shape[0] - - # Boundary points - self.boundary = boundary - if self.boundary is not None: - self.boundary = misc.convert_to_array(self.boundary, coord_keys) - - # Boundary normal points - self.normal = boundary_normal - if self.normal is not None: - self.normal = misc.convert_to_array( - self.normal, tuple(f"{key}_normal" for key in coord_keys) - ) - if list(self.normal.shape) != list(self.boundary.shape): - raise ValueError( - f"boundary's shape({self.boundary.shape}) must equal " - f"to normal's shape({self.normal.shape})" - ) - - self.input_keys = coord_keys - super().__init__( - len(coord_keys), - (np.amin(self.interior, axis=0), np.amax(self.interior, axis=0)), - np.inf, - ) - - @property - def dim_keys(self): - return self.input_keys - - def is_inside(self, x): - # NOTE: point on boundary is included - return ( - np.isclose((x[:, None, :] - self.interior[None, :, :]), 0, atol=1e-6) - .all(axis=2) - .any(axis=1) - ) - - def on_boundary(self, x): - if not self.boundary: - raise ValueError( - "self.boundary must be initialized" " when call 'on_boundary' function" - ) - return ( - np.isclose( - (x[:, None, :] - self.boundary[None, :, :]), - 0, - atol=1e-6, - ) - .all(axis=2) - .any(axis=1) - ) - - def translate(self, translation: np.ndarray) -> "PointCloud": - """ - Translate the geometry by the given offset. - - Args: - translation (np.ndarray): Translation offset.The shape of translation must be the same as the shape of the interior points. - - Returns: - PointCloud: Translated point cloud. - - Examples: - >>> import ppsci - >>> import numpy as np - >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) - >>> translation = np.array([1.0]) - >>> print(geom.translate(translation).interior) - [[1. ] - [1.5] - [2. ] - [2.5] - [3. ]] - >>> interior_points_2d = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1)), - ... "y": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom_2d = ppsci.geometry.PointCloud(interior_points_2d, ("x", "y")) - >>> translation_2d = np.array([1.0, 3.0]) - >>> print(geom_2d.translate(translation_2d).interior) - [[1. 3. ] - [1.5 3.5] - [2. 4. ] - [2.5 4.5] - [3. 5. ]] - """ - for i, offset in enumerate(translation): - self.interior[:, i] += offset - if self.boundary: - self.boundary += offset - return self - - def scale(self, scale: np.ndarray) -> "PointCloud": - """ - Scale the geometry by the given factor. - - Args: - scale (np.ndarray): Scale factor.The shape of scale must be the same as the shape of the interior points. - - Returns: - PointCloud: Scaled point cloud. - - Examples: - >>> import ppsci - >>> import numpy as np - >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) - >>> scale = np.array([2.0]) - >>> print(geom.scale(scale).interior) - [[0.] - [1.] - [2.] - [3.] - [4.]] - >>> interior_points_2d = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1)), - ... "y": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom_2d = ppsci.geometry.PointCloud(interior_points_2d, ("x", "y")) - >>> scale_2d = np.array([2.0, 0.5]) - >>> print(geom_2d.scale(scale_2d).interior) - [[0. 0. ] - [1. 0.25] - [2. 0.5 ] - [3. 0.75] - [4. 1. ]] - """ - for i, _scale in enumerate(scale): - self.interior[:, i] *= _scale - if self.boundary: - self.boundary[:, i] *= _scale - if self.normal: - self.normal[:, i] *= _scale - return self - - def uniform_boundary_points(self, n: int): - """Compute the equi-spaced points on the boundary.""" - raise NotImplementedError( - "PointCloud do not have 'uniform_boundary_points' method" - ) - - def random_boundary_points(self, n: int, random: str = "pseudo") -> np.ndarray: - """Randomly sample points on the boundary. - - Args: - n (int): Number of sample points. - random (str): Random method. Defaults to "pseudo". - - Returns: - np.ndarray: Randomly sampled points on the boundary.The shape of the returned array is (n, ndim). - - Examples: - >>> import ppsci - >>> import numpy as np - >>> np.random.seed(0) - >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> boundary_points = {"x": np.array([0.0, 2.0], dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",), boundary_points) - >>> print(geom.random_boundary_points(1)) - [[2.]] - """ - assert self.boundary is not None, ( - "boundary points can't be empty when call " - "'random_boundary_points' method" - ) - assert n <= len(self.boundary), ( - f"number of sample points({n}) " - f"can't be more than that in boundary({len(self.boundary)})" - ) - return self.boundary[ - np.random.choice(len(self.boundary), size=n, replace=False) - ] - - def random_points(self, n: int, random: str = "pseudo") -> np.ndarray: - """Randomly sample points in the geometry. - - Args: - n (int): Number of sample points. - random (str): Random method. Defaults to "pseudo". - - Returns: - np.ndarray: Randomly sampled points in the geometry.The shape of the returned array is (n, ndim). - - Examples: - >>> import ppsci - >>> import numpy as np - >>> np.random.seed(0) - >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) - >>> print(geom.random_points(2)) - [[1.] - [0.]] - """ - assert n <= len(self.interior), ( - f"number of sample points({n}) " - f"can't be more than that in points({len(self.interior)})" - ) - return self.interior[ - np.random.choice(len(self.interior), size=n, replace=False) - ] - - def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: - """Compute the equi-spaced points in the geometry. - - Args: - n (int): Number of sample points. - boundary (bool): Whether to include boundary points. Defaults to True. - - Returns: - np.ndarray: Equi-spaced points in the geometry.The shape of the returned array is (n, ndim). - - Examples: - >>> import ppsci - >>> import numpy as np - >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} - >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) - >>> print(geom.uniform_points(2)) - [[0. ] - [0.5]] - """ - return self.interior[:n] - - def union(self, other): - raise NotImplementedError( - "Union operation for PointCloud is not supported yet." - ) - - def __or__(self, other): - raise NotImplementedError( - "Union operation for PointCloud is not supported yet." - ) - - def difference(self, other): - raise NotImplementedError( - "Subtraction operation for PointCloud is not supported yet." - ) - - def __sub__(self, other): - raise NotImplementedError( - "Subtraction operation for PointCloud is not supported yet." - ) - - def intersection(self, other): - raise NotImplementedError( - "Intersection operation for PointCloud is not supported yet." - ) - - def __and__(self, other): - raise NotImplementedError( - "Intersection operation for PointCloud is not supported yet." - ) - - def __str__(self) -> str: - """Return the name of class.""" - return ", ".join( - [ - self.__class__.__name__, - f"num_points = {len(self.interior)}", - f"ndim = {self.ndim}", - f"bbox = {self.bbox}", - f"dim_keys = {self.dim_keys}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np + +from ppsci.geometry import geometry +from ppsci.utils import misc + + +class PointCloud(geometry.Geometry): + """Class for point cloud geometry, i.e. a set of points from given file or array. + + Args: + interior (Dict[str, np.ndarray]): Filepath or dict data, which store interior points of a point cloud, such as {"x": np.ndarray, "y": np.ndarray}. + coord_keys (Tuple[str, ...]): Tuple of coordinate keys, such as ("x", "y"). + boundary (Dict[str, np.ndarray]): Boundary points of a point cloud. Defaults to None. + boundary_normal (Dict[str, np.ndarray]): Boundary normal points of a point cloud. Defaults to None. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> interior_points = {"x": np.linspace(-1, 1, dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) + """ + + def __init__( + self, + interior: Dict[str, np.ndarray], + coord_keys: Tuple[str, ...], + boundary: Optional[Dict[str, np.ndarray]] = None, + boundary_normal: Optional[Dict[str, np.ndarray]] = None, + ): + # Interior points + self.interior = misc.convert_to_array(interior, coord_keys) + self.len = self.interior.shape[0] + + # Boundary points + self.boundary = boundary + if self.boundary is not None: + self.boundary = misc.convert_to_array(self.boundary, coord_keys) + + # Boundary normal points + self.normal = boundary_normal + if self.normal is not None: + self.normal = misc.convert_to_array( + self.normal, tuple(f"{key}_normal" for key in coord_keys) + ) + if list(self.normal.shape) != list(self.boundary.shape): + raise ValueError( + f"boundary's shape({self.boundary.shape}) must equal " + f"to normal's shape({self.normal.shape})" + ) + + self.input_keys = coord_keys + super().__init__( + len(coord_keys), + (np.amin(self.interior, axis=0), np.amax(self.interior, axis=0)), + np.inf, + ) + + @property + def dim_keys(self): + return self.input_keys + + def is_inside(self, x): + # NOTE: point on boundary is included + return ( + np.isclose((x[:, None, :] - self.interior[None, :, :]), 0, atol=1e-6) + .all(axis=2) + .any(axis=1) + ) + + def on_boundary(self, x): + if not self.boundary: + raise ValueError( + "self.boundary must be initialized" " when call 'on_boundary' function" + ) + return ( + np.isclose( + (x[:, None, :] - self.boundary[None, :, :]), + 0, + atol=1e-6, + ) + .all(axis=2) + .any(axis=1) + ) + + def translate(self, translation: np.ndarray) -> "PointCloud": + """ + Translate the geometry by the given offset. + + Args: + translation (np.ndarray): Translation offset.The shape of translation must be the same as the shape of the interior points. + + Returns: + PointCloud: Translated point cloud. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) + >>> translation = np.array([1.0]) + >>> print(geom.translate(translation).interior) + [[1. ] + [1.5] + [2. ] + [2.5] + [3. ]] + >>> interior_points_2d = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1)), + ... "y": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom_2d = ppsci.geometry.PointCloud(interior_points_2d, ("x", "y")) + >>> translation_2d = np.array([1.0, 3.0]) + >>> print(geom_2d.translate(translation_2d).interior) + [[1. 3. ] + [1.5 3.5] + [2. 4. ] + [2.5 4.5] + [3. 5. ]] + """ + for i, offset in enumerate(translation): + self.interior[:, i] += offset + if self.boundary: + self.boundary += offset + return self + + def scale(self, scale: np.ndarray) -> "PointCloud": + """ + Scale the geometry by the given factor. + + Args: + scale (np.ndarray): Scale factor.The shape of scale must be the same as the shape of the interior points. + + Returns: + PointCloud: Scaled point cloud. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) + >>> scale = np.array([2.0]) + >>> print(geom.scale(scale).interior) + [[0.] + [1.] + [2.] + [3.] + [4.]] + >>> interior_points_2d = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1)), + ... "y": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom_2d = ppsci.geometry.PointCloud(interior_points_2d, ("x", "y")) + >>> scale_2d = np.array([2.0, 0.5]) + >>> print(geom_2d.scale(scale_2d).interior) + [[0. 0. ] + [1. 0.25] + [2. 0.5 ] + [3. 0.75] + [4. 1. ]] + """ + for i, _scale in enumerate(scale): + self.interior[:, i] *= _scale + if self.boundary: + self.boundary[:, i] *= _scale + if self.normal: + self.normal[:, i] *= _scale + return self + + def uniform_boundary_points(self, n: int): + """Compute the equi-spaced points on the boundary.""" + raise NotImplementedError( + "PointCloud do not have 'uniform_boundary_points' method" + ) + + def random_boundary_points(self, n: int, random: str = "pseudo") -> np.ndarray: + """Randomly sample points on the boundary. + + Args: + n (int): Number of sample points. + random (str): Random method. Defaults to "pseudo". + + Returns: + np.ndarray: Randomly sampled points on the boundary.The shape of the returned array is (n, ndim). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> np.random.seed(0) + >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> boundary_points = {"x": np.array([0.0, 2.0], dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",), boundary_points) + >>> print(geom.random_boundary_points(1)) + [[2.]] + """ + assert self.boundary is not None, ( + "boundary points can't be empty when call " + "'random_boundary_points' method" + ) + assert n <= len(self.boundary), ( + f"number of sample points({n}) " + f"can't be more than that in boundary({len(self.boundary)})" + ) + return self.boundary[ + np.random.choice(len(self.boundary), size=n, replace=False) + ] + + def random_points(self, n: int, random: str = "pseudo") -> np.ndarray: + """Randomly sample points in the geometry. + + Args: + n (int): Number of sample points. + random (str): Random method. Defaults to "pseudo". + + Returns: + np.ndarray: Randomly sampled points in the geometry.The shape of the returned array is (n, ndim). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> np.random.seed(0) + >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) + >>> print(geom.random_points(2)) + [[1.] + [0.]] + """ + assert n <= len(self.interior), ( + f"number of sample points({n}) " + f"can't be more than that in points({len(self.interior)})" + ) + return self.interior[ + np.random.choice(len(self.interior), size=n, replace=False) + ] + + def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: + """Compute the equi-spaced points in the geometry. + + Args: + n (int): Number of sample points. + boundary (bool): Whether to include boundary points. Defaults to True. + + Returns: + np.ndarray: Equi-spaced points in the geometry.The shape of the returned array is (n, ndim). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> interior_points = {"x": np.linspace(0, 2, 5, dtype="float32").reshape((-1, 1))} + >>> geom = ppsci.geometry.PointCloud(interior_points, ("x",)) + >>> print(geom.uniform_points(2)) + [[0. ] + [0.5]] + """ + return self.interior[:n] + + def union(self, other): + raise NotImplementedError( + "Union operation for PointCloud is not supported yet." + ) + + def __or__(self, other): + raise NotImplementedError( + "Union operation for PointCloud is not supported yet." + ) + + def difference(self, other): + raise NotImplementedError( + "Subtraction operation for PointCloud is not supported yet." + ) + + def __sub__(self, other): + raise NotImplementedError( + "Subtraction operation for PointCloud is not supported yet." + ) + + def intersection(self, other): + raise NotImplementedError( + "Intersection operation for PointCloud is not supported yet." + ) + + def __and__(self, other): + raise NotImplementedError( + "Intersection operation for PointCloud is not supported yet." + ) + + def __str__(self) -> str: + """Return the name of class.""" + return ", ".join( + [ + self.__class__.__name__, + f"num_points = {len(self.interior)}", + f"ndim = {self.ndim}", + f"bbox = {self.bbox}", + f"dim_keys = {self.dim_keys}", + ] + ) diff --git a/ppsci/geometry/sampler.py b/ppsci/geometry/sampler.py index a6de5015ff..c207e3a12a 100644 --- a/ppsci/geometry/sampler.py +++ b/ppsci/geometry/sampler.py @@ -1,92 +1,92 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -import numpy as np -import paddle -import skopt -from typing_extensions import Literal - - -def sample( - n_samples: int, ndim: int, method: Literal["pseudo", "Halton", "LHS"] = "pseudo" -) -> np.ndarray: - """Generate pseudorandom or quasi-random samples in [0, 1]^ndim. - - Args: - n_samples (int): The number of samples. - ndim (int): Number of dimension. - method (str): One of the following: "pseudo" (pseudorandom), "LHS" (Latin - hypercube sampling), "Halton" (Halton sequence), "Hammersley" (Hammersley - sequence), or "Sobol" (Sobol sequence). - - Returns: - np.ndarray: Generated random samples with shape of [n_samples, ndim]. - """ - if method == "pseudo": - return pseudorandom(n_samples, ndim) - if method in ["LHS", "Halton", "Hammersley", "Sobol"]: - return quasirandom(n_samples, ndim, method) - raise ValueError(f"Sampling method({method}) is not available.") - - -def pseudorandom(n_samples: int, ndim: int) -> np.ndarray: - """Pseudo random.""" - # If random seed is set, then the rng based code always returns the same random - # number, which may not be what we expect. - # rng = np.random.default_rng(config.random_seed) - # return rng.random(size=(n_samples, ndim), dtype=dtype=paddle.get_default_dtype()) - return np.random.random(size=(n_samples, ndim)).astype( - dtype=paddle.get_default_dtype() - ) - - -def quasirandom( - n_samples: int, ndim: int, method: Literal["pseudo", "LHS"] -) -> np.ndarray: - """Quasi random""" - # Certain points should be removed: - # - Boundary points such as [..., 0, ...] - # - Special points [0, 0, 0, ...] and [0.5, 0.5, 0.5, ...], which cause error in - # Hypersphere.random_points() and Hypersphere.random_boundary_points() - skip = 0 - if method == "LHS": - sampler = skopt.sampler.Lhs() - elif method == "Halton": - # 1st point: [0, 0, ...] - sampler = skopt.sampler.Halton(min_skip=1, max_skip=1) - elif method == "Hammersley": - # 1st point: [0, 0, ...] - if ndim == 1: - sampler = skopt.sampler.Hammersly(min_skip=1, max_skip=1) - else: - sampler = skopt.sampler.Hammersly() - skip = 1 - elif method == "Sobol": - # 1st point: [0, 0, ...], 2nd point: [0.5, 0.5, ...] - sampler = skopt.sampler.Sobol(randomize=False) - if ndim < 3: - skip = 1 - else: - skip = 2 - space = [(0.0, 1.0)] * ndim - return np.asarray( - sampler.generate(space, n_samples + skip)[skip:], - dtype=paddle.get_default_dtype(), - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +import numpy as np +import paddle +import skopt +from typing_extensions import Literal + + +def sample( + n_samples: int, ndim: int, method: Literal["pseudo", "Halton", "LHS"] = "pseudo" +) -> np.ndarray: + """Generate pseudorandom or quasi-random samples in [0, 1]^ndim. + + Args: + n_samples (int): The number of samples. + ndim (int): Number of dimension. + method (str): One of the following: "pseudo" (pseudorandom), "LHS" (Latin + hypercube sampling), "Halton" (Halton sequence), "Hammersley" (Hammersley + sequence), or "Sobol" (Sobol sequence). + + Returns: + np.ndarray: Generated random samples with shape of [n_samples, ndim]. + """ + if method == "pseudo": + return pseudorandom(n_samples, ndim) + if method in ["LHS", "Halton", "Hammersley", "Sobol"]: + return quasirandom(n_samples, ndim, method) + raise ValueError(f"Sampling method({method}) is not available.") + + +def pseudorandom(n_samples: int, ndim: int) -> np.ndarray: + """Pseudo random.""" + # If random seed is set, then the rng based code always returns the same random + # number, which may not be what we expect. + # rng = np.random.default_rng(config.random_seed) + # return rng.random(size=(n_samples, ndim), dtype=dtype=paddle.get_default_dtype()) + return np.random.random(size=(n_samples, ndim)).astype( + dtype=paddle.get_default_dtype() + ) + + +def quasirandom( + n_samples: int, ndim: int, method: Literal["pseudo", "LHS"] +) -> np.ndarray: + """Quasi random""" + # Certain points should be removed: + # - Boundary points such as [..., 0, ...] + # - Special points [0, 0, 0, ...] and [0.5, 0.5, 0.5, ...], which cause error in + # Hypersphere.random_points() and Hypersphere.random_boundary_points() + skip = 0 + if method == "LHS": + sampler = skopt.sampler.Lhs() + elif method == "Halton": + # 1st point: [0, 0, ...] + sampler = skopt.sampler.Halton(min_skip=1, max_skip=1) + elif method == "Hammersley": + # 1st point: [0, 0, ...] + if ndim == 1: + sampler = skopt.sampler.Hammersly(min_skip=1, max_skip=1) + else: + sampler = skopt.sampler.Hammersly() + skip = 1 + elif method == "Sobol": + # 1st point: [0, 0, ...], 2nd point: [0.5, 0.5, ...] + sampler = skopt.sampler.Sobol(randomize=False) + if ndim < 3: + skip = 1 + else: + skip = 2 + space = [(0.0, 1.0)] * ndim + return np.asarray( + sampler.generate(space, n_samples + skip)[skip:], + dtype=paddle.get_default_dtype(), + ) diff --git a/ppsci/geometry/sdf.py b/ppsci/geometry/sdf.py index bb3e260540..82dcec4de1 100644 --- a/ppsci/geometry/sdf.py +++ b/ppsci/geometry/sdf.py @@ -1,198 +1,198 @@ -# SPDX-FileCopyrightText: Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. -# SPDX-FileCopyrightText: All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ruff: noqa: F401 - -# modified from: https://github.com/NVIDIA/modulus/blob/main/modulus/utils/sdf.py - -from __future__ import annotations - -import importlib.util -from typing import Tuple -from typing import overload - -from numpy import ndarray - -try: - import warp as wp - - @wp.kernel - def _bvh_query_distance( - mesh: wp.uint64, - points: wp.array(dtype=wp.vec3f), - max_dist: wp.float32, - sdf: wp.array(dtype=wp.float32), - sdf_hit_point: wp.array(dtype=wp.vec3f), - sdf_hit_point_id: wp.array(dtype=wp.int32), - ): - - """ - Computes the signed distance from each point in the given array `points` - to the mesh represented by `mesh`,within the maximum distance `max_dist`, - and stores the result in the array `sdf`. - - Parameters: - mesh (wp.uint64): The identifier of the mesh. - points (wp.array): An array of 3D points for which to compute the - signed distance. - max_dist (wp.float32): The maximum distance within which to search - for the closest point on the mesh. - sdf (wp.array): An array to store the computed signed distances. - sdf_hit_point (wp.array): An array to store the computed hit points. - sdf_hit_point_id (wp.array): An array to store the computed hit point ids. - - Returns: - None - """ - tid = wp.tid() - - res = wp.mesh_query_point_sign_normal(mesh, points[tid], max_dist) - - mesh_ = wp.mesh_get(mesh) - - p0 = mesh_.points[mesh_.indices[3 * res.face + 0]] - p1 = mesh_.points[mesh_.indices[3 * res.face + 1]] - p2 = mesh_.points[mesh_.indices[3 * res.face + 2]] - - p_closest = res.u * p0 + res.v * p1 + (1.0 - res.u - res.v) * p2 - - sdf[tid] = res.sign * wp.abs(wp.length(points[tid] - p_closest)) - sdf_hit_point[tid] = p_closest - sdf_hit_point_id[tid] = res.face - -except ModuleNotFoundError: - pass -except Exception: - raise - - -@overload -def signed_distance_field( - mesh_vertices: list[tuple[float, float, float]], - mesh_indices: ndarray, - input_points: list[tuple[float, float, float]], - max_dist: float = 1e8, - include_hit_points: bool = False, - include_hit_points_id: bool = False, -) -> wp.array: - ... - - -@overload -def signed_distance_field( - mesh_vertices: list[tuple[float, float, float]], - mesh_indices: ndarray, - input_points: list[tuple[float, float, float]], - max_dist: float = 1e8, - include_hit_points: bool = True, - include_hit_points_id: bool = False, -) -> Tuple[wp.array, wp.array]: - ... - - -@overload -def signed_distance_field( - mesh_vertices: list[tuple[float, float, float]], - mesh_indices: ndarray, - input_points: list[tuple[float, float, float]], - max_dist: float = 1e8, - include_hit_points: bool = False, - include_hit_points_id: bool = True, -) -> Tuple[wp.array, wp.array]: - ... - - -@overload -def signed_distance_field( - mesh_vertices: list[tuple[float, float, float]], - mesh_indices: ndarray, - input_points: list[tuple[float, float, float]], - max_dist: float = 1e8, - include_hit_points: bool = True, - include_hit_points_id: bool = True, -) -> Tuple[wp.array, wp.array, wp.array]: - ... - - -def signed_distance_field( - mesh_vertices: list[tuple[float, float, float]], - mesh_indices: ndarray, - input_points: list[tuple[float, float, float]], - max_dist: float = 1e8, - include_hit_points: bool = False, - include_hit_points_id: bool = False, -) -> wp.array: - """ - Computes the signed distance field (SDF) for a given mesh and input points. - - Args: - mesh_vertices (list[tuple[float, float, float]]): List of vertices defining the mesh. - mesh_indices (list[tuple[int, int, int]]): List of indices defining the triangles of the mesh. - input_points (list[tuple[float, float, float]]): List of input points for which to compute the SDF. - max_dist (float, optional): Maximum distance within which to search for - the closest point on the mesh. Default is 1e8. - include_hit_points (bool, optional): Whether to include hit points in - the output. Default is False. - include_hit_points_id (bool, optional): Whether to include hit point - IDs in the output. Default is False. - - Returns: - wp.array: An array containing the computed signed distance field. - - Example: - >>> mesh_vertices = [(0, 0, 0), (1, 0, 0), (0, 1, 0)] - >>> mesh_indices = np.array((0, 1, 2)) - >>> input_points = [(0.5, 0.5, 0.5)] - >>> signed_distance_field(mesh_vertices, mesh_indices, input_points).numpy() - Module modulus.utils.sdf load on device 'cuda:0' took ... - array([0.5], dtype=float32) - """ - if not importlib.util.find_spec("warp"): - raise ModuleNotFoundError("Please install warp with: pip install warp-lang") - - wp.init() - mesh = wp.Mesh( - wp.array(mesh_vertices, dtype=wp.vec3), - wp.array(mesh_indices, dtype=wp.int32), - ) - - sdf_points = wp.array(input_points, dtype=wp.vec3) - - sdf = wp.zeros(shape=sdf_points.shape, dtype=wp.float32) - sdf_hit_point = wp.zeros(shape=sdf_points.shape, dtype=wp.vec3f) - sdf_hit_point_id = wp.zeros(shape=sdf_points.shape, dtype=wp.int32) - - wp.launch( - kernel=_bvh_query_distance, - dim=len(sdf_points), - inputs=[ - mesh.id, - sdf_points, - max_dist, - sdf, - sdf_hit_point, - sdf_hit_point_id, - ], - ) - - if include_hit_points and include_hit_points_id: - return (sdf, sdf_hit_point, sdf_hit_point_id) - elif include_hit_points: - return (sdf, sdf_hit_point) - elif include_hit_points_id: - return (sdf, sdf_hit_point_id) - else: - return sdf +# SPDX-FileCopyrightText: Copyright (c) 2023 - 2024 NVIDIA CORPORATION & AFFILIATES. +# SPDX-FileCopyrightText: All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ruff: noqa: F401 + +# modified from: https://github.com/NVIDIA/modulus/blob/main/modulus/utils/sdf.py + +from __future__ import annotations + +import importlib.util +from typing import Tuple +from typing import overload + +from numpy import ndarray + +try: + import warp as wp + + @wp.kernel + def _bvh_query_distance( + mesh: wp.uint64, + points: wp.array(dtype=wp.vec3f), + max_dist: wp.float32, + sdf: wp.array(dtype=wp.float32), + sdf_hit_point: wp.array(dtype=wp.vec3f), + sdf_hit_point_id: wp.array(dtype=wp.int32), + ): + + """ + Computes the signed distance from each point in the given array `points` + to the mesh represented by `mesh`,within the maximum distance `max_dist`, + and stores the result in the array `sdf`. + + Parameters: + mesh (wp.uint64): The identifier of the mesh. + points (wp.array): An array of 3D points for which to compute the + signed distance. + max_dist (wp.float32): The maximum distance within which to search + for the closest point on the mesh. + sdf (wp.array): An array to store the computed signed distances. + sdf_hit_point (wp.array): An array to store the computed hit points. + sdf_hit_point_id (wp.array): An array to store the computed hit point ids. + + Returns: + None + """ + tid = wp.tid() + + res = wp.mesh_query_point_sign_normal(mesh, points[tid], max_dist) + + mesh_ = wp.mesh_get(mesh) + + p0 = mesh_.points[mesh_.indices[3 * res.face + 0]] + p1 = mesh_.points[mesh_.indices[3 * res.face + 1]] + p2 = mesh_.points[mesh_.indices[3 * res.face + 2]] + + p_closest = res.u * p0 + res.v * p1 + (1.0 - res.u - res.v) * p2 + + sdf[tid] = res.sign * wp.abs(wp.length(points[tid] - p_closest)) + sdf_hit_point[tid] = p_closest + sdf_hit_point_id[tid] = res.face + +except ModuleNotFoundError: + pass +except Exception: + raise + + +@overload +def signed_distance_field( + mesh_vertices: list[tuple[float, float, float]], + mesh_indices: ndarray, + input_points: list[tuple[float, float, float]], + max_dist: float = 1e8, + include_hit_points: bool = False, + include_hit_points_id: bool = False, +) -> wp.array: + ... + + +@overload +def signed_distance_field( + mesh_vertices: list[tuple[float, float, float]], + mesh_indices: ndarray, + input_points: list[tuple[float, float, float]], + max_dist: float = 1e8, + include_hit_points: bool = True, + include_hit_points_id: bool = False, +) -> Tuple[wp.array, wp.array]: + ... + + +@overload +def signed_distance_field( + mesh_vertices: list[tuple[float, float, float]], + mesh_indices: ndarray, + input_points: list[tuple[float, float, float]], + max_dist: float = 1e8, + include_hit_points: bool = False, + include_hit_points_id: bool = True, +) -> Tuple[wp.array, wp.array]: + ... + + +@overload +def signed_distance_field( + mesh_vertices: list[tuple[float, float, float]], + mesh_indices: ndarray, + input_points: list[tuple[float, float, float]], + max_dist: float = 1e8, + include_hit_points: bool = True, + include_hit_points_id: bool = True, +) -> Tuple[wp.array, wp.array, wp.array]: + ... + + +def signed_distance_field( + mesh_vertices: list[tuple[float, float, float]], + mesh_indices: ndarray, + input_points: list[tuple[float, float, float]], + max_dist: float = 1e8, + include_hit_points: bool = False, + include_hit_points_id: bool = False, +) -> wp.array: + """ + Computes the signed distance field (SDF) for a given mesh and input points. + + Args: + mesh_vertices (list[tuple[float, float, float]]): List of vertices defining the mesh. + mesh_indices (list[tuple[int, int, int]]): List of indices defining the triangles of the mesh. + input_points (list[tuple[float, float, float]]): List of input points for which to compute the SDF. + max_dist (float, optional): Maximum distance within which to search for + the closest point on the mesh. Default is 1e8. + include_hit_points (bool, optional): Whether to include hit points in + the output. Default is False. + include_hit_points_id (bool, optional): Whether to include hit point + IDs in the output. Default is False. + + Returns: + wp.array: An array containing the computed signed distance field. + + Example: + >>> mesh_vertices = [(0, 0, 0), (1, 0, 0), (0, 1, 0)] + >>> mesh_indices = np.array((0, 1, 2)) + >>> input_points = [(0.5, 0.5, 0.5)] + >>> signed_distance_field(mesh_vertices, mesh_indices, input_points).numpy() + Module modulus.utils.sdf load on device 'cuda:0' took ... + array([0.5], dtype=float32) + """ + if not importlib.util.find_spec("warp"): + raise ModuleNotFoundError("Please install warp with: pip install warp-lang") + + wp.init() + mesh = wp.Mesh( + wp.array(mesh_vertices, dtype=wp.vec3), + wp.array(mesh_indices, dtype=wp.int32), + ) + + sdf_points = wp.array(input_points, dtype=wp.vec3) + + sdf = wp.zeros(shape=sdf_points.shape, dtype=wp.float32) + sdf_hit_point = wp.zeros(shape=sdf_points.shape, dtype=wp.vec3f) + sdf_hit_point_id = wp.zeros(shape=sdf_points.shape, dtype=wp.int32) + + wp.launch( + kernel=_bvh_query_distance, + dim=len(sdf_points), + inputs=[ + mesh.id, + sdf_points, + max_dist, + sdf, + sdf_hit_point, + sdf_hit_point_id, + ], + ) + + if include_hit_points and include_hit_points_id: + return (sdf, sdf_hit_point, sdf_hit_point_id) + elif include_hit_points: + return (sdf, sdf_hit_point) + elif include_hit_points_id: + return (sdf, sdf_hit_point_id) + else: + return sdf diff --git a/ppsci/geometry/timedomain.py b/ppsci/geometry/timedomain.py index 909944a9b4..c5a25250d9 100644 --- a/ppsci/geometry/timedomain.py +++ b/ppsci/geometry/timedomain.py @@ -1,793 +1,793 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) -""" - -from __future__ import annotations - -import itertools -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np -import paddle - -from ppsci.geometry import geometry -from ppsci.geometry import geometry_1d -from ppsci.geometry import geometry_2d -from ppsci.geometry import geometry_3d -from ppsci.geometry import geometry_nd -from ppsci.geometry import mesh -from ppsci.utils import misc - - -class TimeDomain(geometry_1d.Interval): - """Class for timedomain, an special interval geometry. - - Args: - t0 (float): Start of time. - t1 (float): End of time. - time_step (Optional[float]): Step interval of time. Defaults to None. - timestamps (Optional[Tuple[float, ...]]): List of timestamps. - Defaults to None. - - Examples: - >>> import ppsci - >>> geom = ppsci.geometry.TimeDomain(0, 1) - """ - - def __init__( - self, - t0: float, - t1: float, - time_step: Optional[float] = None, - timestamps: Optional[Tuple[float, ...]] = None, - ): - super().__init__(t0, t1) - self.t0 = t0 - self.t1 = t1 - self.time_step = time_step - if timestamps is None: - self.timestamps = None - else: - self.timestamps = np.array( - timestamps, dtype=paddle.get_default_dtype() - ).reshape([-1]) - if time_step is not None: - if time_step <= 0: - raise ValueError(f"time_step({time_step}) must be larger than 0.") - self.num_timestamps = int(np.ceil((t1 - t0) / time_step)) + 1 - elif timestamps is not None: - self.num_timestamps = len(timestamps) - - def on_initial(self, t: np.ndarray) -> np.ndarray: - """Check if a specific time is on the initial time point. - - Args: - t (np.ndarray): The time to be checked. - - Returns: - np.ndarray: Bool numpy array of whether the specific time is on the initial time point. - - Examples: - >>> import paddle - >>> import ppsci - >>> geom = ppsci.geometry.TimeDomain(0, 1) - >>> T = [0, 0.01, 0.126, 0.2, 0.3] - >>> check = geom.on_initial(T) - >>> print(check) - [ True False False False False] - """ - return np.isclose(t, self.t0).flatten() - - -class TimeXGeometry(geometry.Geometry): - """Class for combination of time and geometry. - - Args: - timedomain (TimeDomain): TimeDomain object. - geometry (geometry.Geometry): Geometry object. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - """ - - def __init__(self, timedomain: TimeDomain, geometry: geometry.Geometry): - self.timedomain = timedomain - self.geometry = geometry - self.ndim = geometry.ndim + timedomain.ndim - - @property - def dim_keys(self): - return ("t",) + self.geometry.dim_keys - - def on_boundary(self, x): - # [N, ndim(txyz)] - return self.geometry.on_boundary(x[:, 1:]) - - def on_initial(self, x): - # [N, 1(t)] - return self.timedomain.on_initial(x[:, :1]) - - def boundary_normal(self, x): - # x: [N, ndim(txyz)] - normal = self.geometry.boundary_normal(x[:, 1:]) - return np.hstack((x[:, :1], normal)) - - def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: - """Uniform points on the spatial-temporal domain. - Geometry volume ~ bbox. - Time volume ~ diam. - - Args: - n (int): The total number of sample points to be generated. - boundary (bool): Indicates whether boundary points are included, default is True. - - Returns: - np.ndarray: a set of spatial-temporal coordinate points 'tx' that represent sample points evenly distributed within the spatial-temporal domain. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.uniform_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - if self.timedomain.time_step is not None: - # exclude start time t0 - nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) - nx = int(np.ceil(n / nt)) - elif self.timedomain.timestamps is not None: - # exclude start time t0 - nt = self.timedomain.num_timestamps - 1 - nx = int(np.ceil(n / nt)) - else: - nx = int( - np.ceil( - ( - n - * np.prod(self.geometry.bbox[1] - self.geometry.bbox[0]) - / self.timedomain.diam - ) - ** 0.5 - ) - ) - nt = int(np.ceil(n / nx)) - x = self.geometry.uniform_points(nx, boundary=boundary) - nx = len(x) - if boundary and ( - self.timedomain.time_step is None and self.timedomain.timestamps is None - ): - t = self.timedomain.uniform_points(nt, boundary=True) - else: - if self.timedomain.time_step is not None: - t = np.linspace( - self.timedomain.t1, - self.timedomain.t0, - num=nt, - endpoint=boundary, - dtype=paddle.get_default_dtype(), - )[:, None][::-1] - else: - t = self.timedomain.timestamps[1:] - tx = [] - for ti in t: - tx.append( - np.hstack((np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x)) - ) - tx = np.vstack(tx) - if len(tx) > n: - tx = tx[:n] - return tx - - def random_points( - self, n: int, random: str = "pseudo", criteria: Optional[Callable] = None - ) -> np.ndarray: - """Generate random points on the spatial-temporal domain. - - Args: - n (int): The total number of random points to generate. - random (str): Specifies the way to generate random points, default is "pseudo" , which means that a pseudo-random number generator is used. - criteria (Optional[Callable]): A method that filters on the generated random points. Defaults to None. - - Returns: - np.ndarray: A set of random spatial-temporal points. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.random_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - if self.timedomain.time_step is None and self.timedomain.timestamps is None: - raise ValueError("Either time_step or timestamps must be provided.") - # time evenly and geometry random, if time_step if specified - if self.timedomain.time_step is not None: - nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) - t = np.linspace( - self.timedomain.t1, - self.timedomain.t0, - num=nt, - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None][ - ::-1 - ] # [nt, 1] - # 1. sample nx points in static geometry with criteria - nx = int(np.ceil(n / nt)) - _size, _ntry, _nsuc = 0, 0, 0 - x = np.empty( - shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() - ) - while _size < nx: - _x = self.geometry.random_points(nx, random) - if criteria is not None: - # fix arg 't' to None in criteria there - criteria_mask = criteria( - None, *np.split(_x, self.geometry.ndim, axis=1) - ).flatten() - _x = _x[criteria_mask] - if len(_x) > nx - _size: - _x = _x[: nx - _size] - x[_size : _size + len(_x)] = _x - - _size += len(_x) - _ntry += 1 - if len(_x) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample points failed, " - "please check correctness of geometry and given criteria." - ) - - # 2. repeat spatial points along time - tx = [] - for ti in t: - tx.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) - ) - ) - tx = np.vstack(tx) - if len(tx) > n: - tx = tx[:n] - return tx - elif self.timedomain.timestamps is not None: - nt = self.timedomain.num_timestamps - 1 - t = self.timedomain.timestamps[1:] - nx = int(np.ceil(n / nt)) - - _size, _ntry, _nsuc = 0, 0, 0 - x = np.empty( - shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() - ) - while _size < nx: - _x = self.geometry.random_points(nx, random) - if criteria is not None: - # fix arg 't' to None in criteria there - criteria_mask = criteria( - None, *np.split(_x, self.geometry.ndim, axis=1) - ).flatten() - _x = _x[criteria_mask] - if len(_x) > nx - _size: - _x = _x[: nx - _size] - x[_size : _size + len(_x)] = _x - - _size += len(_x) - _ntry += 1 - if len(_x) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample interior points failed, " - "please check correctness of geometry and given criteria." - ) - - tx = [] - for ti in t: - tx.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) - ) - ) - tx = np.vstack(tx) - if len(tx) > n: - tx = tx[:n] - return tx - - if isinstance(self.geometry, geometry_1d.Interval): - geom = geometry_2d.Rectangle( - [self.timedomain.t0, self.geometry.l], - [self.timedomain.t1, self.geometry.r], - ) - return geom.random_points(n, random=random) - - if isinstance(self.geometry, geometry_2d.Rectangle): - geom = geometry_3d.Cuboid( - [self.timedomain.t0, self.geometry.xmin[0], self.geometry.xmin[1]], - [self.timedomain.t1, self.geometry.xmax[0], self.geometry.xmax[1]], - ) - return geom.random_points(n, random=random) - - if isinstance(self.geometry, (geometry_3d.Cuboid, geometry_nd.Hypercube)): - geom = geometry_nd.Hypercube( - np.append(self.timedomain.t0, self.geometry.xmin), - np.append(self.timedomain.t1, self.geometry.xmax), - ) - return geom.random_points(n, random=random) - - x = self.geometry.random_points(n, random=random) - t = self.timedomain.random_points(n, random=random) - t = np.random.permutation(t) - return np.hstack((t, x)) - - def uniform_boundary_points( - self, n: int, criteria: Optional[Callable] = None - ) -> np.ndarray: - """Uniform boundary points on the spatial-temporal domain. - Geometry surface area ~ bbox. - Time surface area ~ diam. - - Args: - n (int): The total number of boundary points on the spatial-temporal domain to be generated that are evenly distributed across geometry boundaries. - criteria (Optional[Callable]): Used to filter the generated boundary points, only points that meet certain conditions are retained. Default is None. - - Returns: - np.ndarray: A set of point coordinates evenly distributed across geometry boundaries on the spatial-temporal domain. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.uniform_boundary_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - if self.geometry.ndim == 1: - nx = 2 - else: - s = 2 * sum( - map( - lambda l: l[0] * l[1], - itertools.combinations( - self.geometry.bbox[1] - self.geometry.bbox[0], 2 - ), - ) - ) - nx = int((n * s / self.timedomain.diam) ** 0.5) - nt = int(np.ceil(n / nx)) - - _size, _ntry, _nsuc = 0, 0, 0 - x = np.empty(shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype()) - while _size < nx: - _x = self.geometry.uniform_boundary_points(nx) - if criteria is not None: - # fix arg 't' to None in criteria there - criteria_mask = criteria( - None, *np.split(_x, self.geometry.ndim, axis=1) - ).flatten() - _x = _x[criteria_mask] - if len(_x) > nx - _size: - _x = _x[: nx - _size] - x[_size : _size + len(_x)] = _x - - _size += len(_x) - _ntry += 1 - if len(_x) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - nx = len(x) - t = np.linspace( - self.timedomain.t1, - self.timedomain.t0, - num=nt, - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None][::-1] - tx = [] - for ti in t: - tx.append( - np.hstack((np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x)) - ) - tx = np.vstack(tx) - if len(tx) > n: - tx = tx[:n] - return tx - - def random_boundary_points( - self, n: int, random: str = "pseudo", criteria: Optional[Callable] = None - ) -> np.ndarray: - """Random boundary points on the spatial-temporal domain. - - Args: - n (int): The total number of spatial-temporal points generated on a given geometry boundary. - random (str): Controls the way to generate random points. Default is "pseudo". - criteria (Optional[Callable]): Used to filter the generated boundary points, only points that meet certain conditions are retained. Default is None. - - Returns: - np.ndarray: A set of point coordinates randomly distributed across geometry boundaries on the spatial-temporal domain. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.random_boundary_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - if self.timedomain.time_step is None and self.timedomain.timestamps is None: - raise ValueError("Either time_step or timestamps must be provided.") - if self.timedomain.time_step is not None: - # exclude start time t0 - nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) - t = np.linspace( - self.timedomain.t1, - self.timedomain.t0, - num=nt, - endpoint=False, - dtype=paddle.get_default_dtype(), - )[:, None][::-1] - nx = int(np.ceil(n / nt)) - - if isinstance(self.geometry, mesh.Mesh): - x, _n, a = self.geometry.random_boundary_points(nx, random=random) - else: - _size, _ntry, _nsuc = 0, 0, 0 - x = np.empty( - shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() - ) - while _size < nx: - _x = self.geometry.random_boundary_points(nx, random) - if criteria is not None: - # fix arg 't' to None in criteria there - criteria_mask = criteria( - None, *np.split(_x, self.geometry.ndim, axis=1) - ).flatten() - _x = _x[criteria_mask] - if len(_x) > nx - _size: - _x = _x[: nx - _size] - x[_size : _size + len(_x)] = _x - - _size += len(_x) - _ntry += 1 - if len(_x) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - t_x = [] - if isinstance(self.geometry, mesh.Mesh): - t_normal = [] - t_area = [] - - for ti in t: - t_x.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) - ) - ) - if isinstance(self.geometry, mesh.Mesh): - t_normal.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), _n) - ) - ) - t_area.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), a) - ) - ) - - t_x = np.vstack(t_x) - if isinstance(self.geometry, mesh.Mesh): - t_normal = np.vstack(t_normal) - t_area = np.vstack(t_area) - - if len(t_x) > n: - t_x = t_x[:n] - if isinstance(self.geometry, mesh.Mesh): - t_normal = t_normal[:n] - t_area = t_area[:n] - - if isinstance(self.geometry, mesh.Mesh): - return t_x, t_normal, t_area - else: - return t_x - elif self.timedomain.timestamps is not None: - # exclude start time t0 - nt = self.timedomain.num_timestamps - 1 - t = self.timedomain.timestamps[1:] - nx = int(np.ceil(n / nt)) - - if isinstance(self.geometry, mesh.Mesh): - x, _n, a = self.geometry.random_boundary_points(nx, random=random) - else: - _size, _ntry, _nsuc = 0, 0, 0 - x = np.empty( - shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() - ) - while _size < nx: - _x = self.geometry.random_boundary_points(nx, random) - if criteria is not None: - # fix arg 't' to None in criteria there - criteria_mask = criteria( - None, *np.split(_x, self.geometry.ndim, axis=1) - ).flatten() - _x = _x[criteria_mask] - if len(_x) > nx - _size: - _x = _x[: nx - _size] - x[_size : _size + len(_x)] = _x - - _size += len(_x) - _ntry += 1 - if len(_x) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample boundary points failed, " - "please check correctness of geometry and given criteria." - ) - - t_x = [] - if isinstance(self.geometry, mesh.Mesh): - t_normal = [] - t_area = [] - - for ti in t: - t_x.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) - ) - ) - if isinstance(self.geometry, mesh.Mesh): - t_normal.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), _n) - ) - ) - t_area.append( - np.hstack( - (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), a) - ) - ) - - t_x = np.vstack(t_x) - if isinstance(self.geometry, mesh.Mesh): - t_normal = np.vstack(t_normal) - t_area = np.vstack(t_area) - - if len(t_x) > n: - t_x = t_x[:n] - if isinstance(self.geometry, mesh.Mesh): - t_normal = t_normal[:n] - t_area = t_area[:n] - - if isinstance(self.geometry, mesh.Mesh): - return t_x, t_normal, t_area - else: - return t_x - else: - if isinstance(self.geometry, mesh.Mesh): - x, _n, a = self.geometry.random_boundary_points(n, random=random) - else: - x = self.geometry.random_boundary_points(n, random=random) - - t = self.timedomain.random_points(n, random=random) - t = np.random.permutation(t) - - t_x = np.hstack((t, x)) - - if isinstance(self.geometry, mesh.Mesh): - t_normal = np.hstack((_n, t)) - t_area = np.hstack((_n, t)) - return t_x, t_normal, t_area - else: - return t_x - - def uniform_initial_points(self, n: int) -> np.ndarray: - """Generate evenly distributed point coordinates on the spatial-temporal domain at the initial moment. - - Args: - n (int): The total number of generated points. - - Returns: - np.ndarray: A set of point coordinates evenly distributed on the spatial-temporal domain at the initial moment. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.uniform_initial_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - x = self.geometry.uniform_points(n, True) - t = self.timedomain.t0 - if len(x) > n: - x = x[:n] - return np.hstack((np.full([n, 1], t, dtype=paddle.get_default_dtype()), x)) - - def random_initial_points(self, n: int, random: str = "pseudo") -> np.ndarray: - """Generate randomly distributed point coordinates on the spatial-temporal domain at the initial moment. - - Args: - n (int): The total number of generated points. - random (str): Controls the way to generate random points. Default is "pseudo". - - Returns: - np.ndarray: A set of point coordinates randomly distributed on the spatial-temporal domain at the initial moment. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.random_initial_points(1000) - >>> print(ts.shape) - (1000, 3) - """ - x = self.geometry.random_points(n, random=random) - t = self.timedomain.t0 - return np.hstack((np.full([n, 1], t, dtype=paddle.get_default_dtype()), x)) - - def periodic_point( - self, x: Dict[str, np.ndarray], component: int - ) -> Dict[str, np.ndarray]: - """Process given point coordinates to satisfy the periodic boundary conditions of the geometry. - - Args: - x (Dict[str, np.ndarray]): Contains the coordinates and timestamps of the points. It represents the coordinates of the point to be processed. - component (int): Specifies the components or dimensions of specific spatial coordinates that are periodically processed. - - Returns: - Dict[str, np.ndarray] : contains the original timestamps and the coordinates of the spatial point after periodic processing. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.sample_boundary(1000) - >>> result = time_geom.periodic_point(ts, 0) - >>> for k,v in result.items(): - ... print(k, v.shape) - t (1000, 1) - x (1000, 1) - y (1000, 1) - normal_x (1000, 1) - normal_y (1000, 1) - """ - xp = self.geometry.periodic_point(x, component) - txp = {"t": x["t"], **xp} - return txp - - def sample_initial_interior( - self, - n: int, - random: str = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - compute_sdf_derivatives: bool = False, - ) -> Dict[str, np.ndarray]: - """Sample random points in the time-geometry and return those meet criteria. - - Args: - n (int): The total number of interior points generated. - random (str): The method used to specify the initial point of generation. Default is "pseudo". - criteria (Optional[Callable]): Used to filter the generated interior points, only points that meet certain conditions are retained. Default is None. - evenly (bool): Indicates whether the initial points are generated evenly. Default is False. - compute_sdf_derivatives (bool): Indicates whether to calculate the derivative of signed distance function or not. Default is False. - - Returns: - np.ndarray: Contains the coordinates of the initial internal point generated, as well as the potentially computed signed distance function and its derivative. - - Examples: - >>> import ppsci - >>> timedomain = ppsci.geometry.TimeDomain(0, 1) - >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) - >>> ts = time_geom.sample_initial_interior(1000) - >>> for k,v in ts.items(): - ... print(k, v.shape) - t (1000, 1) - x (1000, 1) - y (1000, 1) - sdf (1000, 1) - """ - x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) - _size, _ntry, _nsuc = 0, 0, 0 - while _size < n: - if evenly: - points = self.uniform_initial_points(n) - else: - points = self.random_initial_points(n, random) - - if criteria is not None: - criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() - points = points[criteria_mask] - - if len(points) > n - _size: - points = points[: n - _size] - x[_size : _size + len(points)] = points - - _size += len(points) - _ntry += 1 - if len(points) > 0: - _nsuc += 1 - - if _ntry >= 1000 and _nsuc == 0: - raise ValueError( - "Sample initial interior points failed, " - "please check correctness of geometry and given criteria." - ) - - # if sdf_func added, return x_dict and sdf_dict, else, only return the x_dict - if hasattr(self.geometry, "sdf_func"): - # compute sdf excluding time t - sdf = -self.geometry.sdf_func(x[..., 1:]) - sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) - sdf_derives_dict = {} - if compute_sdf_derivatives: - # compute sdf derivatives excluding time t - sdf_derives = -self.geometry.sdf_derivatives(x[..., 1:]) - sdf_derives_dict = misc.convert_to_dict( - sdf_derives, tuple(f"sdf__{key}" for key in self.geometry.dim_keys) - ) - else: - sdf_dict = {} - sdf_derives_dict = {} - x_dict = misc.convert_to_dict(x, self.dim_keys) - - return {**x_dict, **sdf_dict, **sdf_derives_dict} - - def __str__(self) -> str: - """Return the name of class""" - return ", ".join( - [ - self.__class__.__name__, - f"ndim = {self.ndim}", - f"bbox = (time){self.timedomain.bbox} x (space){self.geometry.bbox}", - f"diam = (time){self.timedomain.diam} x (space){self.geometry.diam}", - f"dim_keys = {self.dim_keys}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Code below is heavily based on [https://github.com/lululxvi/deepxde](https://github.com/lululxvi/deepxde) +""" + +from __future__ import annotations + +import itertools +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np +import paddle + +from ppsci.geometry import geometry +from ppsci.geometry import geometry_1d +from ppsci.geometry import geometry_2d +from ppsci.geometry import geometry_3d +from ppsci.geometry import geometry_nd +from ppsci.geometry import mesh +from ppsci.utils import misc + + +class TimeDomain(geometry_1d.Interval): + """Class for timedomain, an special interval geometry. + + Args: + t0 (float): Start of time. + t1 (float): End of time. + time_step (Optional[float]): Step interval of time. Defaults to None. + timestamps (Optional[Tuple[float, ...]]): List of timestamps. + Defaults to None. + + Examples: + >>> import ppsci + >>> geom = ppsci.geometry.TimeDomain(0, 1) + """ + + def __init__( + self, + t0: float, + t1: float, + time_step: Optional[float] = None, + timestamps: Optional[Tuple[float, ...]] = None, + ): + super().__init__(t0, t1) + self.t0 = t0 + self.t1 = t1 + self.time_step = time_step + if timestamps is None: + self.timestamps = None + else: + self.timestamps = np.array( + timestamps, dtype=paddle.get_default_dtype() + ).reshape([-1]) + if time_step is not None: + if time_step <= 0: + raise ValueError(f"time_step({time_step}) must be larger than 0.") + self.num_timestamps = int(np.ceil((t1 - t0) / time_step)) + 1 + elif timestamps is not None: + self.num_timestamps = len(timestamps) + + def on_initial(self, t: np.ndarray) -> np.ndarray: + """Check if a specific time is on the initial time point. + + Args: + t (np.ndarray): The time to be checked. + + Returns: + np.ndarray: Bool numpy array of whether the specific time is on the initial time point. + + Examples: + >>> import paddle + >>> import ppsci + >>> geom = ppsci.geometry.TimeDomain(0, 1) + >>> T = [0, 0.01, 0.126, 0.2, 0.3] + >>> check = geom.on_initial(T) + >>> print(check) + [ True False False False False] + """ + return np.isclose(t, self.t0).flatten() + + +class TimeXGeometry(geometry.Geometry): + """Class for combination of time and geometry. + + Args: + timedomain (TimeDomain): TimeDomain object. + geometry (geometry.Geometry): Geometry object. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + """ + + def __init__(self, timedomain: TimeDomain, geometry: geometry.Geometry): + self.timedomain = timedomain + self.geometry = geometry + self.ndim = geometry.ndim + timedomain.ndim + + @property + def dim_keys(self): + return ("t",) + self.geometry.dim_keys + + def on_boundary(self, x): + # [N, ndim(txyz)] + return self.geometry.on_boundary(x[:, 1:]) + + def on_initial(self, x): + # [N, 1(t)] + return self.timedomain.on_initial(x[:, :1]) + + def boundary_normal(self, x): + # x: [N, ndim(txyz)] + normal = self.geometry.boundary_normal(x[:, 1:]) + return np.hstack((x[:, :1], normal)) + + def uniform_points(self, n: int, boundary: bool = True) -> np.ndarray: + """Uniform points on the spatial-temporal domain. + Geometry volume ~ bbox. + Time volume ~ diam. + + Args: + n (int): The total number of sample points to be generated. + boundary (bool): Indicates whether boundary points are included, default is True. + + Returns: + np.ndarray: a set of spatial-temporal coordinate points 'tx' that represent sample points evenly distributed within the spatial-temporal domain. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.uniform_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + if self.timedomain.time_step is not None: + # exclude start time t0 + nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) + nx = int(np.ceil(n / nt)) + elif self.timedomain.timestamps is not None: + # exclude start time t0 + nt = self.timedomain.num_timestamps - 1 + nx = int(np.ceil(n / nt)) + else: + nx = int( + np.ceil( + ( + n + * np.prod(self.geometry.bbox[1] - self.geometry.bbox[0]) + / self.timedomain.diam + ) + ** 0.5 + ) + ) + nt = int(np.ceil(n / nx)) + x = self.geometry.uniform_points(nx, boundary=boundary) + nx = len(x) + if boundary and ( + self.timedomain.time_step is None and self.timedomain.timestamps is None + ): + t = self.timedomain.uniform_points(nt, boundary=True) + else: + if self.timedomain.time_step is not None: + t = np.linspace( + self.timedomain.t1, + self.timedomain.t0, + num=nt, + endpoint=boundary, + dtype=paddle.get_default_dtype(), + )[:, None][::-1] + else: + t = self.timedomain.timestamps[1:] + tx = [] + for ti in t: + tx.append( + np.hstack((np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x)) + ) + tx = np.vstack(tx) + if len(tx) > n: + tx = tx[:n] + return tx + + def random_points( + self, n: int, random: str = "pseudo", criteria: Optional[Callable] = None + ) -> np.ndarray: + """Generate random points on the spatial-temporal domain. + + Args: + n (int): The total number of random points to generate. + random (str): Specifies the way to generate random points, default is "pseudo" , which means that a pseudo-random number generator is used. + criteria (Optional[Callable]): A method that filters on the generated random points. Defaults to None. + + Returns: + np.ndarray: A set of random spatial-temporal points. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.random_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + if self.timedomain.time_step is None and self.timedomain.timestamps is None: + raise ValueError("Either time_step or timestamps must be provided.") + # time evenly and geometry random, if time_step if specified + if self.timedomain.time_step is not None: + nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) + t = np.linspace( + self.timedomain.t1, + self.timedomain.t0, + num=nt, + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None][ + ::-1 + ] # [nt, 1] + # 1. sample nx points in static geometry with criteria + nx = int(np.ceil(n / nt)) + _size, _ntry, _nsuc = 0, 0, 0 + x = np.empty( + shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() + ) + while _size < nx: + _x = self.geometry.random_points(nx, random) + if criteria is not None: + # fix arg 't' to None in criteria there + criteria_mask = criteria( + None, *np.split(_x, self.geometry.ndim, axis=1) + ).flatten() + _x = _x[criteria_mask] + if len(_x) > nx - _size: + _x = _x[: nx - _size] + x[_size : _size + len(_x)] = _x + + _size += len(_x) + _ntry += 1 + if len(_x) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample points failed, " + "please check correctness of geometry and given criteria." + ) + + # 2. repeat spatial points along time + tx = [] + for ti in t: + tx.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) + ) + ) + tx = np.vstack(tx) + if len(tx) > n: + tx = tx[:n] + return tx + elif self.timedomain.timestamps is not None: + nt = self.timedomain.num_timestamps - 1 + t = self.timedomain.timestamps[1:] + nx = int(np.ceil(n / nt)) + + _size, _ntry, _nsuc = 0, 0, 0 + x = np.empty( + shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() + ) + while _size < nx: + _x = self.geometry.random_points(nx, random) + if criteria is not None: + # fix arg 't' to None in criteria there + criteria_mask = criteria( + None, *np.split(_x, self.geometry.ndim, axis=1) + ).flatten() + _x = _x[criteria_mask] + if len(_x) > nx - _size: + _x = _x[: nx - _size] + x[_size : _size + len(_x)] = _x + + _size += len(_x) + _ntry += 1 + if len(_x) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample interior points failed, " + "please check correctness of geometry and given criteria." + ) + + tx = [] + for ti in t: + tx.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) + ) + ) + tx = np.vstack(tx) + if len(tx) > n: + tx = tx[:n] + return tx + + if isinstance(self.geometry, geometry_1d.Interval): + geom = geometry_2d.Rectangle( + [self.timedomain.t0, self.geometry.l], + [self.timedomain.t1, self.geometry.r], + ) + return geom.random_points(n, random=random) + + if isinstance(self.geometry, geometry_2d.Rectangle): + geom = geometry_3d.Cuboid( + [self.timedomain.t0, self.geometry.xmin[0], self.geometry.xmin[1]], + [self.timedomain.t1, self.geometry.xmax[0], self.geometry.xmax[1]], + ) + return geom.random_points(n, random=random) + + if isinstance(self.geometry, (geometry_3d.Cuboid, geometry_nd.Hypercube)): + geom = geometry_nd.Hypercube( + np.append(self.timedomain.t0, self.geometry.xmin), + np.append(self.timedomain.t1, self.geometry.xmax), + ) + return geom.random_points(n, random=random) + + x = self.geometry.random_points(n, random=random) + t = self.timedomain.random_points(n, random=random) + t = np.random.permutation(t) + return np.hstack((t, x)) + + def uniform_boundary_points( + self, n: int, criteria: Optional[Callable] = None + ) -> np.ndarray: + """Uniform boundary points on the spatial-temporal domain. + Geometry surface area ~ bbox. + Time surface area ~ diam. + + Args: + n (int): The total number of boundary points on the spatial-temporal domain to be generated that are evenly distributed across geometry boundaries. + criteria (Optional[Callable]): Used to filter the generated boundary points, only points that meet certain conditions are retained. Default is None. + + Returns: + np.ndarray: A set of point coordinates evenly distributed across geometry boundaries on the spatial-temporal domain. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.uniform_boundary_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + if self.geometry.ndim == 1: + nx = 2 + else: + s = 2 * sum( + map( + lambda l: l[0] * l[1], + itertools.combinations( + self.geometry.bbox[1] - self.geometry.bbox[0], 2 + ), + ) + ) + nx = int((n * s / self.timedomain.diam) ** 0.5) + nt = int(np.ceil(n / nx)) + + _size, _ntry, _nsuc = 0, 0, 0 + x = np.empty(shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype()) + while _size < nx: + _x = self.geometry.uniform_boundary_points(nx) + if criteria is not None: + # fix arg 't' to None in criteria there + criteria_mask = criteria( + None, *np.split(_x, self.geometry.ndim, axis=1) + ).flatten() + _x = _x[criteria_mask] + if len(_x) > nx - _size: + _x = _x[: nx - _size] + x[_size : _size + len(_x)] = _x + + _size += len(_x) + _ntry += 1 + if len(_x) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + nx = len(x) + t = np.linspace( + self.timedomain.t1, + self.timedomain.t0, + num=nt, + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None][::-1] + tx = [] + for ti in t: + tx.append( + np.hstack((np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x)) + ) + tx = np.vstack(tx) + if len(tx) > n: + tx = tx[:n] + return tx + + def random_boundary_points( + self, n: int, random: str = "pseudo", criteria: Optional[Callable] = None + ) -> np.ndarray: + """Random boundary points on the spatial-temporal domain. + + Args: + n (int): The total number of spatial-temporal points generated on a given geometry boundary. + random (str): Controls the way to generate random points. Default is "pseudo". + criteria (Optional[Callable]): Used to filter the generated boundary points, only points that meet certain conditions are retained. Default is None. + + Returns: + np.ndarray: A set of point coordinates randomly distributed across geometry boundaries on the spatial-temporal domain. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.001) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.random_boundary_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + if self.timedomain.time_step is None and self.timedomain.timestamps is None: + raise ValueError("Either time_step or timestamps must be provided.") + if self.timedomain.time_step is not None: + # exclude start time t0 + nt = int(np.ceil(self.timedomain.diam / self.timedomain.time_step)) + t = np.linspace( + self.timedomain.t1, + self.timedomain.t0, + num=nt, + endpoint=False, + dtype=paddle.get_default_dtype(), + )[:, None][::-1] + nx = int(np.ceil(n / nt)) + + if isinstance(self.geometry, mesh.Mesh): + x, _n, a = self.geometry.random_boundary_points(nx, random=random) + else: + _size, _ntry, _nsuc = 0, 0, 0 + x = np.empty( + shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() + ) + while _size < nx: + _x = self.geometry.random_boundary_points(nx, random) + if criteria is not None: + # fix arg 't' to None in criteria there + criteria_mask = criteria( + None, *np.split(_x, self.geometry.ndim, axis=1) + ).flatten() + _x = _x[criteria_mask] + if len(_x) > nx - _size: + _x = _x[: nx - _size] + x[_size : _size + len(_x)] = _x + + _size += len(_x) + _ntry += 1 + if len(_x) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + t_x = [] + if isinstance(self.geometry, mesh.Mesh): + t_normal = [] + t_area = [] + + for ti in t: + t_x.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) + ) + ) + if isinstance(self.geometry, mesh.Mesh): + t_normal.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), _n) + ) + ) + t_area.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), a) + ) + ) + + t_x = np.vstack(t_x) + if isinstance(self.geometry, mesh.Mesh): + t_normal = np.vstack(t_normal) + t_area = np.vstack(t_area) + + if len(t_x) > n: + t_x = t_x[:n] + if isinstance(self.geometry, mesh.Mesh): + t_normal = t_normal[:n] + t_area = t_area[:n] + + if isinstance(self.geometry, mesh.Mesh): + return t_x, t_normal, t_area + else: + return t_x + elif self.timedomain.timestamps is not None: + # exclude start time t0 + nt = self.timedomain.num_timestamps - 1 + t = self.timedomain.timestamps[1:] + nx = int(np.ceil(n / nt)) + + if isinstance(self.geometry, mesh.Mesh): + x, _n, a = self.geometry.random_boundary_points(nx, random=random) + else: + _size, _ntry, _nsuc = 0, 0, 0 + x = np.empty( + shape=(nx, self.geometry.ndim), dtype=paddle.get_default_dtype() + ) + while _size < nx: + _x = self.geometry.random_boundary_points(nx, random) + if criteria is not None: + # fix arg 't' to None in criteria there + criteria_mask = criteria( + None, *np.split(_x, self.geometry.ndim, axis=1) + ).flatten() + _x = _x[criteria_mask] + if len(_x) > nx - _size: + _x = _x[: nx - _size] + x[_size : _size + len(_x)] = _x + + _size += len(_x) + _ntry += 1 + if len(_x) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample boundary points failed, " + "please check correctness of geometry and given criteria." + ) + + t_x = [] + if isinstance(self.geometry, mesh.Mesh): + t_normal = [] + t_area = [] + + for ti in t: + t_x.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), x) + ) + ) + if isinstance(self.geometry, mesh.Mesh): + t_normal.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), _n) + ) + ) + t_area.append( + np.hstack( + (np.full([nx, 1], ti, dtype=paddle.get_default_dtype()), a) + ) + ) + + t_x = np.vstack(t_x) + if isinstance(self.geometry, mesh.Mesh): + t_normal = np.vstack(t_normal) + t_area = np.vstack(t_area) + + if len(t_x) > n: + t_x = t_x[:n] + if isinstance(self.geometry, mesh.Mesh): + t_normal = t_normal[:n] + t_area = t_area[:n] + + if isinstance(self.geometry, mesh.Mesh): + return t_x, t_normal, t_area + else: + return t_x + else: + if isinstance(self.geometry, mesh.Mesh): + x, _n, a = self.geometry.random_boundary_points(n, random=random) + else: + x = self.geometry.random_boundary_points(n, random=random) + + t = self.timedomain.random_points(n, random=random) + t = np.random.permutation(t) + + t_x = np.hstack((t, x)) + + if isinstance(self.geometry, mesh.Mesh): + t_normal = np.hstack((_n, t)) + t_area = np.hstack((_n, t)) + return t_x, t_normal, t_area + else: + return t_x + + def uniform_initial_points(self, n: int) -> np.ndarray: + """Generate evenly distributed point coordinates on the spatial-temporal domain at the initial moment. + + Args: + n (int): The total number of generated points. + + Returns: + np.ndarray: A set of point coordinates evenly distributed on the spatial-temporal domain at the initial moment. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.uniform_initial_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + x = self.geometry.uniform_points(n, True) + t = self.timedomain.t0 + if len(x) > n: + x = x[:n] + return np.hstack((np.full([n, 1], t, dtype=paddle.get_default_dtype()), x)) + + def random_initial_points(self, n: int, random: str = "pseudo") -> np.ndarray: + """Generate randomly distributed point coordinates on the spatial-temporal domain at the initial moment. + + Args: + n (int): The total number of generated points. + random (str): Controls the way to generate random points. Default is "pseudo". + + Returns: + np.ndarray: A set of point coordinates randomly distributed on the spatial-temporal domain at the initial moment. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.random_initial_points(1000) + >>> print(ts.shape) + (1000, 3) + """ + x = self.geometry.random_points(n, random=random) + t = self.timedomain.t0 + return np.hstack((np.full([n, 1], t, dtype=paddle.get_default_dtype()), x)) + + def periodic_point( + self, x: Dict[str, np.ndarray], component: int + ) -> Dict[str, np.ndarray]: + """Process given point coordinates to satisfy the periodic boundary conditions of the geometry. + + Args: + x (Dict[str, np.ndarray]): Contains the coordinates and timestamps of the points. It represents the coordinates of the point to be processed. + component (int): Specifies the components or dimensions of specific spatial coordinates that are periodically processed. + + Returns: + Dict[str, np.ndarray] : contains the original timestamps and the coordinates of the spatial point after periodic processing. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1, 0.1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.sample_boundary(1000) + >>> result = time_geom.periodic_point(ts, 0) + >>> for k,v in result.items(): + ... print(k, v.shape) + t (1000, 1) + x (1000, 1) + y (1000, 1) + normal_x (1000, 1) + normal_y (1000, 1) + """ + xp = self.geometry.periodic_point(x, component) + txp = {"t": x["t"], **xp} + return txp + + def sample_initial_interior( + self, + n: int, + random: str = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + compute_sdf_derivatives: bool = False, + ) -> Dict[str, np.ndarray]: + """Sample random points in the time-geometry and return those meet criteria. + + Args: + n (int): The total number of interior points generated. + random (str): The method used to specify the initial point of generation. Default is "pseudo". + criteria (Optional[Callable]): Used to filter the generated interior points, only points that meet certain conditions are retained. Default is None. + evenly (bool): Indicates whether the initial points are generated evenly. Default is False. + compute_sdf_derivatives (bool): Indicates whether to calculate the derivative of signed distance function or not. Default is False. + + Returns: + np.ndarray: Contains the coordinates of the initial internal point generated, as well as the potentially computed signed distance function and its derivative. + + Examples: + >>> import ppsci + >>> timedomain = ppsci.geometry.TimeDomain(0, 1) + >>> geom = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> time_geom = ppsci.geometry.TimeXGeometry(timedomain, geom) + >>> ts = time_geom.sample_initial_interior(1000) + >>> for k,v in ts.items(): + ... print(k, v.shape) + t (1000, 1) + x (1000, 1) + y (1000, 1) + sdf (1000, 1) + """ + x = np.empty(shape=(n, self.ndim), dtype=paddle.get_default_dtype()) + _size, _ntry, _nsuc = 0, 0, 0 + while _size < n: + if evenly: + points = self.uniform_initial_points(n) + else: + points = self.random_initial_points(n, random) + + if criteria is not None: + criteria_mask = criteria(*np.split(points, self.ndim, axis=1)).flatten() + points = points[criteria_mask] + + if len(points) > n - _size: + points = points[: n - _size] + x[_size : _size + len(points)] = points + + _size += len(points) + _ntry += 1 + if len(points) > 0: + _nsuc += 1 + + if _ntry >= 1000 and _nsuc == 0: + raise ValueError( + "Sample initial interior points failed, " + "please check correctness of geometry and given criteria." + ) + + # if sdf_func added, return x_dict and sdf_dict, else, only return the x_dict + if hasattr(self.geometry, "sdf_func"): + # compute sdf excluding time t + sdf = -self.geometry.sdf_func(x[..., 1:]) + sdf_dict = misc.convert_to_dict(sdf, ("sdf",)) + sdf_derives_dict = {} + if compute_sdf_derivatives: + # compute sdf derivatives excluding time t + sdf_derives = -self.geometry.sdf_derivatives(x[..., 1:]) + sdf_derives_dict = misc.convert_to_dict( + sdf_derives, tuple(f"sdf__{key}" for key in self.geometry.dim_keys) + ) + else: + sdf_dict = {} + sdf_derives_dict = {} + x_dict = misc.convert_to_dict(x, self.dim_keys) + + return {**x_dict, **sdf_dict, **sdf_derives_dict} + + def __str__(self) -> str: + """Return the name of class""" + return ", ".join( + [ + self.__class__.__name__, + f"ndim = {self.ndim}", + f"bbox = (time){self.timedomain.bbox} x (space){self.geometry.bbox}", + f"diam = (time){self.timedomain.diam} x (space){self.geometry.diam}", + f"dim_keys = {self.dim_keys}", + ] + ) diff --git a/ppsci/loss/__init__.py b/ppsci/loss/__init__.py index d9502e8248..40955ecd6d 100644 --- a/ppsci/loss/__init__.py +++ b/ppsci/loss/__init__.py @@ -1,67 +1,67 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from ppsci.loss import mtl -from ppsci.loss.base import Loss -from ppsci.loss.chamfer import ChamferLoss -from ppsci.loss.func import FunctionalLoss -from ppsci.loss.integral import IntegralLoss -from ppsci.loss.kl import KLLoss -from ppsci.loss.l1 import L1Loss -from ppsci.loss.l1 import PeriodicL1Loss -from ppsci.loss.l2 import L2Loss -from ppsci.loss.l2 import L2RelLoss -from ppsci.loss.l2 import PeriodicL2Loss -from ppsci.loss.mae import MAELoss -from ppsci.loss.mse import CausalMSELoss -from ppsci.loss.mse import MSELoss -from ppsci.loss.mse import MSELossWithL2Decay -from ppsci.loss.mse import PeriodicMSELoss - -__all__ = [ - "Loss", - "FunctionalLoss", - "IntegralLoss", - "L1Loss", - "PeriodicL1Loss", - "L2Loss", - "L2RelLoss", - "PeriodicL2Loss", - "MAELoss", - "CausalMSELoss", - "ChamferLoss", - "MSELoss", - "MSELossWithL2Decay", - "PeriodicMSELoss", - "KLLoss", - "mtl", -] - - -def build_loss(cfg): - """Build loss. - - Args: - cfg (DictConfig): Loss config. - - Returns: - Loss: Callable loss object. - """ - cfg = copy.deepcopy(cfg) - - loss_cls = cfg.pop("name") - loss = eval(loss_cls)(**cfg) - return loss +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.loss import mtl +from ppsci.loss.base import Loss +from ppsci.loss.chamfer import ChamferLoss +from ppsci.loss.func import FunctionalLoss +from ppsci.loss.integral import IntegralLoss +from ppsci.loss.kl import KLLoss +from ppsci.loss.l1 import L1Loss +from ppsci.loss.l1 import PeriodicL1Loss +from ppsci.loss.l2 import L2Loss +from ppsci.loss.l2 import L2RelLoss +from ppsci.loss.l2 import PeriodicL2Loss +from ppsci.loss.mae import MAELoss +from ppsci.loss.mse import CausalMSELoss +from ppsci.loss.mse import MSELoss +from ppsci.loss.mse import MSELossWithL2Decay +from ppsci.loss.mse import PeriodicMSELoss + +__all__ = [ + "Loss", + "FunctionalLoss", + "IntegralLoss", + "L1Loss", + "PeriodicL1Loss", + "L2Loss", + "L2RelLoss", + "PeriodicL2Loss", + "MAELoss", + "CausalMSELoss", + "ChamferLoss", + "MSELoss", + "MSELossWithL2Decay", + "PeriodicMSELoss", + "KLLoss", + "mtl", +] + + +def build_loss(cfg): + """Build loss. + + Args: + cfg (DictConfig): Loss config. + + Returns: + Loss: Callable loss object. + """ + cfg = copy.deepcopy(cfg) + + loss_cls = cfg.pop("name") + loss = eval(loss_cls)(**cfg) + return loss diff --git a/ppsci/loss/base.py b/ppsci/loss/base.py index 378013bb9e..48ea76e176 100644 --- a/ppsci/loss/base.py +++ b/ppsci/loss/base.py @@ -1,38 +1,38 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Union - -from paddle import nn -from typing_extensions import Literal - - -class Loss(nn.Layer): - """Base class for loss.""" - - def __init__( - self, - reduction: Literal["mean", "sum"], - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - super().__init__() - self.reduction = reduction - self.weight = weight - - def __str__(self): - return f"{self.__class__.__name__}(reduction={self.reduction}, weight={self.weight})" +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Union + +from paddle import nn +from typing_extensions import Literal + + +class Loss(nn.Layer): + """Base class for loss.""" + + def __init__( + self, + reduction: Literal["mean", "sum"], + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + super().__init__() + self.reduction = reduction + self.weight = weight + + def __str__(self): + return f"{self.__class__.__name__}(reduction={self.reduction}, weight={self.weight})" diff --git a/ppsci/loss/chamfer.py b/ppsci/loss/chamfer.py index 8740f2a18b..63ae64f5c5 100644 --- a/ppsci/loss/chamfer.py +++ b/ppsci/loss/chamfer.py @@ -1,92 +1,92 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Union - -import paddle - -from ppsci.loss import base - - -class ChamferLoss(base.Loss): - r"""Class for Chamfe distance loss. - - $$ - L = \dfrac{1}{S_1} \sum_{x \in S_1} \min_{y \in S_2} \Vert x - y \Vert_2^2 + \dfrac{1}{S_2} \sum_{y \in S_2} \min_{x \in S_1} \Vert y - x \Vert_2^2 - $$ - - $$ - \text{where } S_1 \text{ and } S_2 \text{ is the coordinate matrix of two point clouds}. - $$ - - Args: - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import ChamferLoss - >>> _ = paddle.seed(42) - >>> batch_point_cloud1 = paddle.rand([2, 100, 3]) - >>> batch_point_cloud2 = paddle.rand([2, 50, 3]) - >>> output_dict = {"s1": batch_point_cloud1} - >>> label_dict = {"s1": batch_point_cloud2} - >>> weight = {"s1": 0.8} - >>> loss = ChamferLoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'s1': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.04415882)} - """ - - def __init__( - self, - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - super().__init__("mean", weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - s1 = output_dict[key] - s2 = label_dict[key] - N1, N2 = s1.shape[1], s2.shape[1] - - # [B, N1, N2, 3] - s1_expand = paddle.expand(s1.reshape([-1, N1, 1, 3]), shape=[-1, N1, N2, 3]) - # [B, N1, N2, 3] - s2_expand = paddle.expand(s2.reshape([-1, 1, N2, 3]), shape=[-1, N1, N2, 3]) - - dis = ((s1_expand - s2_expand) ** 2).sum(axis=3) # [B, N1, N2] - loss_s12 = dis.min(axis=2) # [B, N1] - loss_s21 = dis.min(axis=1) # [B, N2] - loss = loss_s12.mean() + loss_s21.mean() - - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Union + +import paddle + +from ppsci.loss import base + + +class ChamferLoss(base.Loss): + r"""Class for Chamfe distance loss. + + $$ + L = \dfrac{1}{S_1} \sum_{x \in S_1} \min_{y \in S_2} \Vert x - y \Vert_2^2 + \dfrac{1}{S_2} \sum_{y \in S_2} \min_{x \in S_1} \Vert y - x \Vert_2^2 + $$ + + $$ + \text{where } S_1 \text{ and } S_2 \text{ is the coordinate matrix of two point clouds}. + $$ + + Args: + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import ChamferLoss + >>> _ = paddle.seed(42) + >>> batch_point_cloud1 = paddle.rand([2, 100, 3]) + >>> batch_point_cloud2 = paddle.rand([2, 50, 3]) + >>> output_dict = {"s1": batch_point_cloud1} + >>> label_dict = {"s1": batch_point_cloud2} + >>> weight = {"s1": 0.8} + >>> loss = ChamferLoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'s1': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.04415882)} + """ + + def __init__( + self, + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + super().__init__("mean", weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + s1 = output_dict[key] + s2 = label_dict[key] + N1, N2 = s1.shape[1], s2.shape[1] + + # [B, N1, N2, 3] + s1_expand = paddle.expand(s1.reshape([-1, N1, 1, 3]), shape=[-1, N1, N2, 3]) + # [B, N1, N2, 3] + s2_expand = paddle.expand(s2.reshape([-1, 1, N2, 3]), shape=[-1, N1, N2, 3]) + + dis = ((s1_expand - s2_expand) ** 2).sum(axis=3) # [B, N1, N2] + loss_s12 = dis.min(axis=2) # [B, N1] + loss_s21 = dis.min(axis=1) # [B, N2] + loss = loss_s12.mean() + loss_s21.mean() + + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/func.py b/ppsci/loss/func.py index 49992c5f7f..f00919db49 100644 --- a/ppsci/loss/func.py +++ b/ppsci/loss/func.py @@ -1,94 +1,94 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import paddle - -from ppsci.loss import base - - -class FunctionalLoss(base.Loss): - r"""Functional loss class, which allows to use custom loss computing function from given loss_expr for complex computation cases. - - $$ - L = f(\mathbf{x}, \mathbf{y}) - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - loss_expr (Callable[..., paddle.Tensor]): Function for custom loss computation. - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import FunctionalLoss - >>> import paddle.nn.functional as F - >>> def mse_sum_loss(output_dict, label_dict, weight_dict=None): - ... losses = 0 - ... for key in output_dict.keys(): - ... loss = F.mse_loss(output_dict[key], label_dict[key], "sum") - ... if weight_dict: - ... loss *= weight_dict[key] - ... losses += loss - ... return {"mse_loss": losses} - >>> loss = FunctionalLoss(mse_sum_loss) - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight_dict = {'u': 0.8, 'v': 0.2} - >>> result = loss(output_dict, label_dict, weight_dict) - >>> print(result) - {'mse_loss': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 17.89600182)} - """ - - def __init__( - self, - loss_expr: Callable[..., paddle.Tensor], - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - super().__init__(None, weight) - self.loss_expr = loss_expr - - def forward( - self, output_dict, label_dict=None, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = self.loss_expr(output_dict, label_dict, weight_dict) - - assert isinstance(losses, dict), ( - "Loss computed by custom function should be type of 'dict', " - f"but got {type(losses)}." - " Please check the return type of custom loss function." - ) - - for key in losses: - assert isinstance( - losses[key], (paddle.Tensor, paddle.static.Variable, paddle.pir.Value) - ), ( - "Loss computed by custom function should be type of 'paddle.Tensor', " - f"'paddle.static.Variable' or 'paddle.pir.Value', but got {type(losses[key])}." - " Please check the return type of custom loss function." - ) - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import paddle + +from ppsci.loss import base + + +class FunctionalLoss(base.Loss): + r"""Functional loss class, which allows to use custom loss computing function from given loss_expr for complex computation cases. + + $$ + L = f(\mathbf{x}, \mathbf{y}) + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + loss_expr (Callable[..., paddle.Tensor]): Function for custom loss computation. + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import FunctionalLoss + >>> import paddle.nn.functional as F + >>> def mse_sum_loss(output_dict, label_dict, weight_dict=None): + ... losses = 0 + ... for key in output_dict.keys(): + ... loss = F.mse_loss(output_dict[key], label_dict[key], "sum") + ... if weight_dict: + ... loss *= weight_dict[key] + ... losses += loss + ... return {"mse_loss": losses} + >>> loss = FunctionalLoss(mse_sum_loss) + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight_dict = {'u': 0.8, 'v': 0.2} + >>> result = loss(output_dict, label_dict, weight_dict) + >>> print(result) + {'mse_loss': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 17.89600182)} + """ + + def __init__( + self, + loss_expr: Callable[..., paddle.Tensor], + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + super().__init__(None, weight) + self.loss_expr = loss_expr + + def forward( + self, output_dict, label_dict=None, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = self.loss_expr(output_dict, label_dict, weight_dict) + + assert isinstance(losses, dict), ( + "Loss computed by custom function should be type of 'dict', " + f"but got {type(losses)}." + " Please check the return type of custom loss function." + ) + + for key in losses: + assert isinstance( + losses[key], (paddle.Tensor, paddle.static.Variable, paddle.pir.Value) + ), ( + "Loss computed by custom function should be type of 'paddle.Tensor', " + f"'paddle.static.Variable' or 'paddle.pir.Value', but got {type(losses[key])}." + " Please check the return type of custom loss function." + ) + + return losses diff --git a/ppsci/loss/integral.py b/ppsci/loss/integral.py index 74223c73fc..b9dd7d79f7 100644 --- a/ppsci/loss/integral.py +++ b/ppsci/loss/integral.py @@ -1,112 +1,112 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Dict -from typing import Optional -from typing import Union - -import paddle.nn.functional as F -from typing_extensions import Literal - -from ppsci.loss import base - -if TYPE_CHECKING: - import paddle - - -class IntegralLoss(base.Loss): - r"""Class for integral loss with Monte-Carlo integration algorithm. - - $$ - L = - \begin{cases} - \dfrac{1}{N} \Vert \displaystyle\sum_{i=1}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='mean'} \\ - \Vert \displaystyle\sum_{i=0}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='sum'} - \end{cases} - $$ - - $$ - \mathbf{x}, \mathbf{s} \in \mathcal{R}^{M \times N}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import IntegralLoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), - ... 'area': paddle.to_tensor([[0.01, 0.02, 0.03], [0.01, 0.02, 0.03]])} - >>> label_dict = {'u': paddle.to_tensor([-1.8, 0.0]), - ... 'v': paddle.to_tensor([0.1, 0.1])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = IntegralLoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.40780795), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.00131200)} - - >>> loss = IntegralLoss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.81561589), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.00262400)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.mse_loss( - (output_dict[key] * output_dict["area"]).sum(axis=1), - label_dict[key], - "none", - ) - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Dict +from typing import Optional +from typing import Union + +import paddle.nn.functional as F +from typing_extensions import Literal + +from ppsci.loss import base + +if TYPE_CHECKING: + import paddle + + +class IntegralLoss(base.Loss): + r"""Class for integral loss with Monte-Carlo integration algorithm. + + $$ + L = + \begin{cases} + \dfrac{1}{N} \Vert \displaystyle\sum_{i=1}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='mean'} \\ + \Vert \displaystyle\sum_{i=0}^{M}{\mathbf{s}_i \cdot \mathbf{x}_i} - \mathbf{y} \Vert_2^2, & \text{if reduction='sum'} + \end{cases} + $$ + + $$ + \mathbf{x}, \mathbf{s} \in \mathcal{R}^{M \times N}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import IntegralLoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), + ... 'area': paddle.to_tensor([[0.01, 0.02, 0.03], [0.01, 0.02, 0.03]])} + >>> label_dict = {'u': paddle.to_tensor([-1.8, 0.0]), + ... 'v': paddle.to_tensor([0.1, 0.1])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = IntegralLoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.40780795), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.00131200)} + + >>> loss = IntegralLoss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.81561589), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.00262400)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.mse_loss( + (output_dict[key] * output_dict["area"]).sum(axis=1), + label_dict[key], + "none", + ) + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/kl.py b/ppsci/loss/kl.py index c07c3ed2c6..3ef87d367e 100644 --- a/ppsci/loss/kl.py +++ b/ppsci/loss/kl.py @@ -1,51 +1,51 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Union - -import paddle -from typing_extensions import Literal - -from ppsci.loss import base - - -class KLLoss(base.Loss): - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict=None, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - mu, log_sigma = output_dict["mu"], output_dict["log_sigma"] - - base = paddle.exp(2.0 * log_sigma) + paddle.pow(mu, 2) - 1.0 - 2.0 * log_sigma - loss = 0.5 * paddle.sum(base) / mu.shape[0] - - losses["kl_loss"] = loss - - return loss +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Union + +import paddle +from typing_extensions import Literal + +from ppsci.loss import base + + +class KLLoss(base.Loss): + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict=None, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + mu, log_sigma = output_dict["mu"], output_dict["log_sigma"] + + base = paddle.exp(2.0 * log_sigma) + paddle.pow(mu, 2) - 1.0 - 2.0 * log_sigma + loss = 0.5 * paddle.sum(base) / mu.shape[0] + + losses["kl_loss"] = loss + + return loss diff --git a/ppsci/loss/l1.py b/ppsci/loss/l1.py index 3edbc2e102..0b68aaaebe 100644 --- a/ppsci/loss/l1.py +++ b/ppsci/loss/l1.py @@ -1,219 +1,219 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Dict -from typing import Optional -from typing import Union - -import paddle.nn.functional as F -from typing_extensions import Literal - -from ppsci.loss import base - -if TYPE_CHECKING: - - import paddle - - -class L1Loss(base.Loss): - r"""Class for l1 loss. - - $$ - L = \Vert \mathbf{x} - \mathbf{y} \Vert_1 - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - when `reduction` is set to "mean" - - $$ - L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) - $$ - - when `reduction` is set to "sum" - - $$ - L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import L1Loss - >>> output_dict = {"u": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... "v": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {"u": paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... "v": paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {"u": 0.8, "v": 0.2} - >>> loss = L1Loss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.35999998)} - - >>> loss = L1Loss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 6.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.71999997)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.l1_loss(output_dict[key], label_dict[key], "none") - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - loss = loss.sum(axis=1) - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses - - -class PeriodicL1Loss(base.Loss): - r"""Class for periodic l1 loss. - - $$ - L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 - $$ - - $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, - $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. - - when `reduction` is set to "mean" - - $$ - L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) - $$ - - when `reduction` is set to "sum" - - $$ - L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import PeriodicL1Loss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = PeriodicL1Loss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.35999990), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.83999997)} - - >>> loss = PeriodicL1Loss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.35999990), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.83999997)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - n_output = len(output_dict[key]) - if n_output % 2 > 0: - raise ValueError( - f"Length of output({n_output}) of key({key}) should be even." - ) - - n_output //= 2 - loss = F.l1_loss( - output_dict[key][:n_output], output_dict[key][n_output:], "none" - ) - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - if "area" in output_dict: - loss *= output_dict["area"] - - loss = loss.sum(axis=1) - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Dict +from typing import Optional +from typing import Union + +import paddle.nn.functional as F +from typing_extensions import Literal + +from ppsci.loss import base + +if TYPE_CHECKING: + + import paddle + + +class L1Loss(base.Loss): + r"""Class for l1 loss. + + $$ + L = \Vert \mathbf{x} - \mathbf{y} \Vert_1 + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + when `reduction` is set to "mean" + + $$ + L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) + $$ + + when `reduction` is set to "sum" + + $$ + L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_1 \right) + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import L1Loss + >>> output_dict = {"u": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... "v": paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {"u": paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... "v": paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {"u": 0.8, "v": 0.2} + >>> loss = L1Loss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.35999998)} + + >>> loss = L1Loss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 6.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.71999997)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.l1_loss(output_dict[key], label_dict[key], "none") + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + loss = loss.sum(axis=1) + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses + + +class PeriodicL1Loss(base.Loss): + r"""Class for periodic l1 loss. + + $$ + L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 + $$ + + $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, + $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. + + when `reduction` is set to "mean" + + $$ + L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) + $$ + + when `reduction` is set to "sum" + + $$ + L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_1 \right) + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import PeriodicL1Loss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = PeriodicL1Loss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.35999990), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.83999997)} + + >>> loss = PeriodicL1Loss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.35999990), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.83999997)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + n_output = len(output_dict[key]) + if n_output % 2 > 0: + raise ValueError( + f"Length of output({n_output}) of key({key}) should be even." + ) + + n_output //= 2 + loss = F.l1_loss( + output_dict[key][:n_output], output_dict[key][n_output:], "none" + ) + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + if "area" in output_dict: + loss *= output_dict["area"] + + loss = loss.sum(axis=1) + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/l2.py b/ppsci/loss/l2.py index 7b65a937c6..4118797080 100644 --- a/ppsci/loss/l2.py +++ b/ppsci/loss/l2.py @@ -1,310 +1,310 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Union - -import paddle -import paddle.nn.functional as F -from typing_extensions import Literal - -from ppsci.loss import base - - -class L2Loss(base.Loss): - r"""Class for l2 loss. - - $$ - L =\Vert \mathbf{x} - \mathbf{y} \Vert_2 - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - when `reduction` is set to "mean" - - $$ - L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) - $$ - - when `reduction` is set to "sum" - - $$ - L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import L2Loss - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = L2Loss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.52735591), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.26148924)} - >>> loss = L2Loss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 5.05471182), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.52297848)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.mse_loss(output_dict[key], label_dict[key], "none") - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - loss = loss.sum(axis=1).sqrt() - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses - - -class PeriodicL2Loss(base.Loss): - r"""Class for Periodic l2 loss. - - $$ - L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 - $$ - - $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, - $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. - - when `reduction` is set to "mean" - - $$ - L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) - $$ - - when `reduction` is set to "sum" - - $$ - L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import PeriodicL2Loss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = PeriodicL2Loss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.14065409), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.53516352)} - - >>> loss = PeriodicL2Loss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.14065409), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.53516352)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - n_output = len(output_dict[key]) - if n_output % 2 > 0: - raise ValueError( - f"Length of output({n_output}) of key({key}) should be even." - ) - n_output //= 2 - - loss = F.mse_loss( - output_dict[key][:n_output], output_dict[key][n_output:], "none" - ) - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - loss = loss.sum(axis=1).sqrt() - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses - - -class L2RelLoss(base.Loss): - r"""Class for l2 relative loss. - - $$ - L = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - when `reduction` is set to "mean" - - $$ - L = MEAN \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) - $$ - - when `reduction` is set to "sum" - - $$ - L = SUM \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import L2RelLoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = L2RelLoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.08776188), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.84900820)} - - >>> loss = L2RelLoss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.17552376), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.69801641)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def rel_loss(self, x, y): - batch_size = x.shape[0] - x_ = x.reshape((batch_size, -1)) - y_ = y.reshape((batch_size, -1)) - diff_norms = paddle.norm(x_ - y_, p=2, axis=1) - y_norms = paddle.norm(y_, p=2, axis=1) - return diff_norms / y_norms - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = self.rel_loss(output_dict[key], label_dict[key]) - if weight_dict: - loss *= weight_dict[key] - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, float): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Union + +import paddle +import paddle.nn.functional as F +from typing_extensions import Literal + +from ppsci.loss import base + + +class L2Loss(base.Loss): + r"""Class for l2 loss. + + $$ + L =\Vert \mathbf{x} - \mathbf{y} \Vert_2 + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + when `reduction` is set to "mean" + + $$ + L = MEAN \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) + $$ + + when `reduction` is set to "sum" + + $$ + L = SUM \left( \Vert \mathbf{x} - \mathbf{y} \Vert_2 \right) + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import L2Loss + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = L2Loss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.52735591), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.26148924)} + >>> loss = L2Loss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 5.05471182), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.52297848)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.mse_loss(output_dict[key], label_dict[key], "none") + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + loss = loss.sum(axis=1).sqrt() + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses + + +class PeriodicL2Loss(base.Loss): + r"""Class for Periodic l2 loss. + + $$ + L = \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 + $$ + + $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, + $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. + + when `reduction` is set to "mean" + + $$ + L = MEAN \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) + $$ + + when `reduction` is set to "sum" + + $$ + L = SUM \left( \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2 \right) + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import PeriodicL2Loss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 2.2, 0.9], [1.1, 0.8, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 0.0, 1.0], [-0.2, 0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = PeriodicL2Loss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.14065409), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.53516352)} + + >>> loss = PeriodicL2Loss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.14065409), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.53516352)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + n_output = len(output_dict[key]) + if n_output % 2 > 0: + raise ValueError( + f"Length of output({n_output}) of key({key}) should be even." + ) + n_output //= 2 + + loss = F.mse_loss( + output_dict[key][:n_output], output_dict[key][n_output:], "none" + ) + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + loss = loss.sum(axis=1).sqrt() + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses + + +class L2RelLoss(base.Loss): + r"""Class for l2 relative loss. + + $$ + L = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + when `reduction` is set to "mean" + + $$ + L = MEAN \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) + $$ + + when `reduction` is set to "sum" + + $$ + L = SUM \left( \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\Vert \mathbf{y} \Vert_2} \right) + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import L2RelLoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = L2RelLoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.08776188), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.84900820)} + + >>> loss = L2RelLoss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.17552376), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.69801641)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def rel_loss(self, x, y): + batch_size = x.shape[0] + x_ = x.reshape((batch_size, -1)) + y_ = y.reshape((batch_size, -1)) + diff_norms = paddle.norm(x_ - y_, p=2, axis=1) + y_norms = paddle.norm(y_, p=2, axis=1) + return diff_norms / y_norms + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = self.rel_loss(output_dict[key], label_dict[key]) + if weight_dict: + loss *= weight_dict[key] + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, float): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/mae.py b/ppsci/loss/mae.py index ff7869535c..5c57a59db1 100644 --- a/ppsci/loss/mae.py +++ b/ppsci/loss/mae.py @@ -1,109 +1,109 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Dict -from typing import Optional -from typing import Union - -import paddle.nn.functional as F -from typing_extensions import Literal - -from ppsci.loss import base - -if TYPE_CHECKING: - import paddle - - -class MAELoss(base.Loss): - r"""Class for mean absolute error loss. - - $$ - L = - \begin{cases} - \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='mean'} \\ - \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='sum'} - \end{cases} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import MAELoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = MAELoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.50000000), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.17999999)} - - >>> loss = MAELoss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 6.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.71999997)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.l1_loss(output_dict[key], label_dict[key], "none") - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Dict +from typing import Optional +from typing import Union + +import paddle.nn.functional as F +from typing_extensions import Literal + +from ppsci.loss import base + +if TYPE_CHECKING: + import paddle + + +class MAELoss(base.Loss): + r"""Class for mean absolute error loss. + + $$ + L = + \begin{cases} + \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='mean'} \\ + \Vert {\mathbf{x}-\mathbf{y}} \Vert_1, & \text{if reduction='sum'} + \end{cases} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import MAELoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = MAELoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.50000000), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.17999999)} + + >>> loss = MAELoss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 6.), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.71999997)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.l1_loss(output_dict[key], label_dict[key], "none") + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/mse.py b/ppsci/loss/mse.py index 184f11cc40..628db8a5bc 100644 --- a/ppsci/loss/mse.py +++ b/ppsci/loss/mse.py @@ -1,355 +1,355 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Union - -import paddle -import paddle.nn.functional as F -from typing_extensions import Literal - -from ppsci.loss import base - - -class MSELoss(base.Loss): - r"""Class for mean squared error loss. - - $$ - L = - \begin{cases} - \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='mean'} \\ - \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='sum'} - \end{cases} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import MSELoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = MSELoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 4.28600025), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.18800001)} - - >>> loss = MSELoss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 17.14400101), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.75200003)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.mse_loss(output_dict[key], label_dict[key], "none") - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses - - -class CausalMSELoss(base.Loss): - r"""Class for mean squared error loss. - - $$ - L = \frac{1}{M} \displaystyle\sum_{i=1}^M{w_i} \mathcal{L}_r^i, - $$ - - where $w_i=\exp (-\epsilon \displaystyle\sum_{k=1}^{i-1} \mathcal{L}_r^k), i=2,3, \ldots, M.$ - - Args: - n_chunks (int): $M$, Number of split time windows. - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - tol (float, optional): Causal tolerance, i.e. $\epsilon$ in paper. Defaults to 1.0. - - Examples: - >>> import paddle - >>> from ppsci.loss import CausalMSELoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9, 1.0], [1.1, -1.3, 0.0]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0, -0.1], [-0.2, 2.5, 2.0]])} - >>> loss = CausalMSELoss(n_chunks=3) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.96841478)} - """ - - def __init__( - self, - n_chunks: int, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - tol: float = 1.0, - ): - if n_chunks <= 0: - raise ValueError(f"n_chunks should be positive, but got {n_chunks}") - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - self.n_chunks = n_chunks - self.tol = tol - self.register_buffer( - "acc_mat", paddle.tril(paddle.ones([n_chunks, n_chunks]), -1) - ) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - loss = F.mse_loss(output_dict[key], label_dict[key], "none") - if weight_dict and key in weight_dict: - loss *= weight_dict[key] - - if "area" in output_dict: - loss *= output_dict["area"] - - # causal weighting - loss_t = loss.reshape([self.n_chunks, -1]) # [nt, nx] - weight_t = paddle.exp( - -self.tol * (self.acc_mat @ loss_t.mean(-1, keepdim=True)) - ) # [nt, nt] x [nt, 1] ==> [nt, 1] - assert weight_t.shape[0] == self.n_chunks - loss = loss_t * weight_t.detach() - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses - - -class MSELossWithL2Decay(MSELoss): - r"""MSELoss with L2 decay. - - $$ - L = - \begin{cases} - \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='mean'} \\ - \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='sum'} - \end{cases} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}, \mathbf{K_i} \in \mathcal{R}^{O_i \times P_i} - $$ - - $M$ is the number of which apply regularization on. - - Args: - reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean". - regularization_dict (Optional[Dict[str, float]]): Regularization dictionary. Defaults to None. - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Raises: - ValueError: reduction should be 'mean' or 'sum'. - - Examples: - >>> import paddle - >>> from ppsci.loss import MSELossWithL2Decay - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> regularization_dict = {'u': 2.0} - >>> loss = MSELossWithL2Decay(regularization_dict=regularization_dict, weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 7.91999960), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.18800001)} - - >>> regularization_dict = {'v': 1.0} - >>> loss = MSELossWithL2Decay(reduction="sum", regularization_dict=regularization_dict, weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 17.14400101), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 3.95999980)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - regularization_dict: Optional[Dict[str, float]] = None, - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - self.regularization_dict = regularization_dict - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = super().forward(output_dict, label_dict, weight_dict) - - if self.regularization_dict is not None: - for reg_key, reg_weight in self.regularization_dict.items(): - loss = output_dict[reg_key].pow(2).sum() - losses[reg_key] = loss * reg_weight - - return losses - - -class PeriodicMSELoss(base.Loss): - r"""Class for periodic mean squared error loss. - - $$ - L = - \begin{cases} - \dfrac{1}{N} \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='mean'} \\ - \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='sum'} - \end{cases} - $$ - - $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, - $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. - - Args: - reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". - weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import PeriodicMSELoss - - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> weight = {'u': 0.8, 'v': 0.2} - >>> loss = PeriodicMSELoss(weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.07999969), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.51999992)} - - >>> loss = PeriodicMSELoss(reduction="sum", weight=weight) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 4.15999937), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.03999984)} - """ - - def __init__( - self, - reduction: Literal["mean", "sum"] = "mean", - weight: Optional[Union[float, Dict[str, float]]] = None, - ): - if reduction not in ["mean", "sum"]: - raise ValueError( - f"reduction should be 'mean' or 'sum', but got {reduction}" - ) - super().__init__(reduction, weight) - - def forward( - self, output_dict, label_dict, weight_dict=None - ) -> Dict[str, "paddle.Tensor"]: - losses = {} - - for key in label_dict: - n_output = len(output_dict[key]) - if n_output % 2 > 0: - raise ValueError( - f"Length of output({n_output}) of key({key}) should be even." - ) - - n_output //= 2 - loss = F.mse_loss( - output_dict[key][:n_output], output_dict[key][n_output:], "none" - ) - if weight_dict: - loss *= weight_dict[key] - if "area" in output_dict: - loss *= output_dict["area"] - - if self.reduction == "sum": - loss = loss.sum() - elif self.reduction == "mean": - loss = loss.mean() - - if isinstance(self.weight, (float, int)): - loss *= self.weight - elif isinstance(self.weight, dict) and key in self.weight: - loss *= self.weight[key] - - losses[key] = loss - - return losses +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Union + +import paddle +import paddle.nn.functional as F +from typing_extensions import Literal + +from ppsci.loss import base + + +class MSELoss(base.Loss): + r"""Class for mean squared error loss. + + $$ + L = + \begin{cases} + \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='mean'} \\ + \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2, & \text{if reduction='sum'} + \end{cases} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import MSELoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = MSELoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 4.28600025), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.18800001)} + + >>> loss = MSELoss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 17.14400101), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.75200003)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.mse_loss(output_dict[key], label_dict[key], "none") + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses + + +class CausalMSELoss(base.Loss): + r"""Class for mean squared error loss. + + $$ + L = \frac{1}{M} \displaystyle\sum_{i=1}^M{w_i} \mathcal{L}_r^i, + $$ + + where $w_i=\exp (-\epsilon \displaystyle\sum_{k=1}^{i-1} \mathcal{L}_r^k), i=2,3, \ldots, M.$ + + Args: + n_chunks (int): $M$, Number of split time windows. + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + tol (float, optional): Causal tolerance, i.e. $\epsilon$ in paper. Defaults to 1.0. + + Examples: + >>> import paddle + >>> from ppsci.loss import CausalMSELoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9, 1.0], [1.1, -1.3, 0.0]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0, -0.1], [-0.2, 2.5, 2.0]])} + >>> loss = CausalMSELoss(n_chunks=3) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.96841478)} + """ + + def __init__( + self, + n_chunks: int, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + tol: float = 1.0, + ): + if n_chunks <= 0: + raise ValueError(f"n_chunks should be positive, but got {n_chunks}") + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + self.n_chunks = n_chunks + self.tol = tol + self.register_buffer( + "acc_mat", paddle.tril(paddle.ones([n_chunks, n_chunks]), -1) + ) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + loss = F.mse_loss(output_dict[key], label_dict[key], "none") + if weight_dict and key in weight_dict: + loss *= weight_dict[key] + + if "area" in output_dict: + loss *= output_dict["area"] + + # causal weighting + loss_t = loss.reshape([self.n_chunks, -1]) # [nt, nx] + weight_t = paddle.exp( + -self.tol * (self.acc_mat @ loss_t.mean(-1, keepdim=True)) + ) # [nt, nt] x [nt, 1] ==> [nt, 1] + assert weight_t.shape[0] == self.n_chunks + loss = loss_t * weight_t.detach() + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses + + +class MSELossWithL2Decay(MSELoss): + r"""MSELoss with L2 decay. + + $$ + L = + \begin{cases} + \dfrac{1}{N} \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='mean'} \\ + \Vert {\mathbf{x}-\mathbf{y}} \Vert_2^2 + \displaystyle\sum_{i=1}^{M}{\Vert \mathbf{K_i} \Vert_F^2}, & \text{if reduction='sum'} + \end{cases} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N}, \mathbf{K_i} \in \mathcal{R}^{O_i \times P_i} + $$ + + $M$ is the number of which apply regularization on. + + Args: + reduction (Literal["mean", "sum"], optional): Specifies the reduction to apply to the output: 'mean' | 'sum'. Defaults to "mean". + regularization_dict (Optional[Dict[str, float]]): Regularization dictionary. Defaults to None. + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Raises: + ValueError: reduction should be 'mean' or 'sum'. + + Examples: + >>> import paddle + >>> from ppsci.loss import MSELossWithL2Decay + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> regularization_dict = {'u': 2.0} + >>> loss = MSELossWithL2Decay(regularization_dict=regularization_dict, weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 7.91999960), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.18800001)} + + >>> regularization_dict = {'v': 1.0} + >>> loss = MSELossWithL2Decay(reduction="sum", regularization_dict=regularization_dict, weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 17.14400101), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 3.95999980)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + regularization_dict: Optional[Dict[str, float]] = None, + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + self.regularization_dict = regularization_dict + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = super().forward(output_dict, label_dict, weight_dict) + + if self.regularization_dict is not None: + for reg_key, reg_weight in self.regularization_dict.items(): + loss = output_dict[reg_key].pow(2).sum() + losses[reg_key] = loss * reg_weight + + return losses + + +class PeriodicMSELoss(base.Loss): + r"""Class for periodic mean squared error loss. + + $$ + L = + \begin{cases} + \dfrac{1}{N} \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='mean'} \\ + \Vert \mathbf{x_l}-\mathbf{x_r} \Vert_2^2, & \text{if reduction='sum'} + \end{cases} + $$ + + $\mathbf{x_l} \in \mathcal{R}^{N}$ is the first half of batch output, + $\mathbf{x_r} \in \mathcal{R}^{N}$ is the second half of batch output. + + Args: + reduction (Literal["mean", "sum"], optional): Reduction method. Defaults to "mean". + weight (Optional[Union[float, Dict[str, float]]]): Weight for loss. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import PeriodicMSELoss + + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> weight = {'u': 0.8, 'v': 0.2} + >>> loss = PeriodicMSELoss(weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.07999969), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.51999992)} + + >>> loss = PeriodicMSELoss(reduction="sum", weight=weight) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 4.15999937), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.03999984)} + """ + + def __init__( + self, + reduction: Literal["mean", "sum"] = "mean", + weight: Optional[Union[float, Dict[str, float]]] = None, + ): + if reduction not in ["mean", "sum"]: + raise ValueError( + f"reduction should be 'mean' or 'sum', but got {reduction}" + ) + super().__init__(reduction, weight) + + def forward( + self, output_dict, label_dict, weight_dict=None + ) -> Dict[str, "paddle.Tensor"]: + losses = {} + + for key in label_dict: + n_output = len(output_dict[key]) + if n_output % 2 > 0: + raise ValueError( + f"Length of output({n_output}) of key({key}) should be even." + ) + + n_output //= 2 + loss = F.mse_loss( + output_dict[key][:n_output], output_dict[key][n_output:], "none" + ) + if weight_dict: + loss *= weight_dict[key] + if "area" in output_dict: + loss *= output_dict["area"] + + if self.reduction == "sum": + loss = loss.sum() + elif self.reduction == "mean": + loss = loss.mean() + + if isinstance(self.weight, (float, int)): + loss *= self.weight + elif isinstance(self.weight, dict) and key in self.weight: + loss *= self.weight[key] + + losses[key] = loss + + return losses diff --git a/ppsci/loss/mtl/__init__.py b/ppsci/loss/mtl/__init__.py index 3bff2aaa7f..f85ad34fd9 100644 --- a/ppsci/loss/mtl/__init__.py +++ b/ppsci/loss/mtl/__init__.py @@ -1,49 +1,49 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from ppsci.loss.mtl.agda import AGDA -from ppsci.loss.mtl.base import LossAggregator -from ppsci.loss.mtl.grad_norm import GradNorm -from ppsci.loss.mtl.ntk import NTK -from ppsci.loss.mtl.pcgrad import PCGrad -from ppsci.loss.mtl.relobralo import Relobralo -from ppsci.loss.mtl.sum import Sum - -__all__ = [ - "AGDA", - "GradNorm", - "LossAggregator", - "PCGrad", - "Relobralo", - "Sum", - "NTK", -] - - -def build_mtl_aggregator(cfg): - """Build loss aggregator with multi-task learning method. - - Args: - cfg (DictConfig): Aggregator config. - - Returns: - Loss: Callable loss aggregator object. - """ - cfg = copy.deepcopy(cfg) - - aggregator_cls = cfg.pop("name") - aggregator = eval(aggregator_cls)(**cfg) - return aggregator +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.loss.mtl.agda import AGDA +from ppsci.loss.mtl.base import LossAggregator +from ppsci.loss.mtl.grad_norm import GradNorm +from ppsci.loss.mtl.ntk import NTK +from ppsci.loss.mtl.pcgrad import PCGrad +from ppsci.loss.mtl.relobralo import Relobralo +from ppsci.loss.mtl.sum import Sum + +__all__ = [ + "AGDA", + "GradNorm", + "LossAggregator", + "PCGrad", + "Relobralo", + "Sum", + "NTK", +] + + +def build_mtl_aggregator(cfg): + """Build loss aggregator with multi-task learning method. + + Args: + cfg (DictConfig): Aggregator config. + + Returns: + Loss: Callable loss aggregator object. + """ + cfg = copy.deepcopy(cfg) + + aggregator_cls = cfg.pop("name") + aggregator = eval(aggregator_cls)(**cfg) + return aggregator diff --git a/ppsci/loss/mtl/agda.py b/ppsci/loss/mtl/agda.py index d7ea3b43f5..832aca4bca 100644 --- a/ppsci/loss/mtl/agda.py +++ b/ppsci/loss/mtl/agda.py @@ -1,161 +1,161 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import ClassVar -from typing import List - -import paddle -from paddle import nn - -from ppsci.loss.mtl import base - - -class AGDA(base.LossAggregator): - r""" - **A**daptive **G**radient **D**escent **A**lgorithm - - [Physics-informed neural network based on a new adaptive gradient descent algorithm for solving partial differential equations of flow problems](https://pubs.aip.org/aip/pof/article-abstract/35/6/063608/2899773/Physics-informed-neural-network-based-on-a-new) - - NOTE: This loss aggregator is only suitable for two-task learning and the first task loss must be PDE loss. - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - - Args: - model (nn.Layer): Training model. - M (int, optional): Smoothing period. Defaults to 100. - gamma (float, optional): Smooth factor. Defaults to 0.999. - - Examples: - >>> import paddle - >>> from ppsci.loss import mtl - >>> model = paddle.nn.Linear(3, 4) - >>> loss_aggregator = mtl.AGDA(model) - >>> for i in range(5): - ... x1 = paddle.randn([8, 3]) - ... x2 = paddle.randn([8, 3]) - ... y1 = model(x1) - ... y2 = model(x2) - ... pde_loss = paddle.sum(y1) - ... bc_loss = paddle.sum((y2 - 2) ** 2) - ... loss_aggregator({'pde_loss': pde_loss, 'bc_loss': bc_loss}).backward() - """ - should_persist: ClassVar[bool] = False - - def __init__(self, model: nn.Layer, M: int = 100, gamma: float = 0.999) -> None: - super().__init__(model) - self.M = M - self.gamma = gamma - self.Lf_smooth = 0 - self.Lu_smooth = 0 - self.Lf_tilde_acc = 0.0 - self.Lu_tilde_acc = 0.0 - - def __call__(self, losses, step: int = 0) -> "AGDA": - if len(losses) != 2: - raise ValueError( - f"Number of losses(tasks) for AGDA shoule be 2, but got {len(losses)}" - ) - return super().__call__(losses, step) - - def backward(self) -> None: - grads_list = self._compute_grads() - with paddle.no_grad(): - refined_grads = self._refine_grads(grads_list) - self._set_grads(refined_grads) - - def _compute_grads(self) -> List[paddle.Tensor]: - # compute all gradients derived by each loss - grads_list = [] # num_params x num_losses - for key in self.losses: - # backward with current loss - self.losses[key].backward() - grads_list.append( - paddle.concat( - [ - param.grad.clone().reshape([-1]) - for param in self.model.parameters() - if param.grad is not None - ], - axis=0, - ) - ) - # clear gradients for current loss for not affecting other loss - self.model.clear_gradients() - - return grads_list - - def _refine_grads(self, grads_list: List[paddle.Tensor]) -> List[paddle.Tensor]: - # compute moving average of L^smooth_i(n) - eq.(16) - losses_seq = list(self.losses.values()) - self.Lf_smooth = ( - self.gamma * self.Lf_smooth + (1 - self.gamma) * losses_seq[0].item() - ) - self.Lu_smooth = ( - self.gamma * self.Lu_smooth + (1 - self.gamma) * losses_seq[1].item() - ) - - # compute L^smooth_i(kM) - eq.(17) - if self.step % self.M == 0: - Lf_smooth_kM = self.Lf_smooth - Lu_smooth_kM = self.Lu_smooth - Lf_tilde = self.Lf_smooth / Lf_smooth_kM - Lu_tilde = self.Lu_smooth / Lu_smooth_kM - - # compute r_i(n) - eq.(18) - self.Lf_tilde_acc += Lf_tilde - self.Lu_tilde_acc += Lu_tilde - rf = Lf_tilde / self.Lf_tilde_acc - ru = Lu_tilde / self.Lu_tilde_acc - - # compute E(g(n)) - step1(1) - gf_magn = (grads_list[0] * grads_list[0]).sum().sqrt() - gu_magn = (grads_list[1] * grads_list[1]).sum().sqrt() - Eg = (gf_magn + gu_magn) / 2 - - # compute \omega_f(n) - step1(2) - omega_f = (rf * (Eg - gf_magn) + gf_magn) / gf_magn - omega_u = (ru * (Eg - gu_magn) + gu_magn) / gu_magn - - # compute g_bar(n) - step1(3) - gf_bar = omega_f * grads_list[0] - gu_bar = omega_u * grads_list[1] - - # compute gradient projection - step2(1) - dot_product = (gf_bar * gu_bar).sum() - if dot_product < 0: - gu_bar = gu_bar - (dot_product / (gf_bar * gf_bar).sum()) * gf_bar - grads_list = [gf_bar, gu_bar] - - proj_grads: List[paddle.Tensor] = [] - for j in range(len(self.losses)): - start_idx = 0 - for idx, var in enumerate(self.model.parameters()): - grad_shape = var.shape - flatten_dim = var.numel() - refined_grad = grads_list[j][start_idx : start_idx + flatten_dim] - refined_grad = paddle.reshape(refined_grad, grad_shape) - if len(proj_grads) < self.param_num: - proj_grads.append(refined_grad) - else: - proj_grads[idx] += refined_grad - start_idx += flatten_dim - return proj_grads - - def _set_grads(self, grads_list: List[paddle.Tensor]) -> None: - for i, param in enumerate(self.model.parameters()): - param.grad = grads_list[i] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ClassVar +from typing import List + +import paddle +from paddle import nn + +from ppsci.loss.mtl import base + + +class AGDA(base.LossAggregator): + r""" + **A**daptive **G**radient **D**escent **A**lgorithm + + [Physics-informed neural network based on a new adaptive gradient descent algorithm for solving partial differential equations of flow problems](https://pubs.aip.org/aip/pof/article-abstract/35/6/063608/2899773/Physics-informed-neural-network-based-on-a-new) + + NOTE: This loss aggregator is only suitable for two-task learning and the first task loss must be PDE loss. + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + + Args: + model (nn.Layer): Training model. + M (int, optional): Smoothing period. Defaults to 100. + gamma (float, optional): Smooth factor. Defaults to 0.999. + + Examples: + >>> import paddle + >>> from ppsci.loss import mtl + >>> model = paddle.nn.Linear(3, 4) + >>> loss_aggregator = mtl.AGDA(model) + >>> for i in range(5): + ... x1 = paddle.randn([8, 3]) + ... x2 = paddle.randn([8, 3]) + ... y1 = model(x1) + ... y2 = model(x2) + ... pde_loss = paddle.sum(y1) + ... bc_loss = paddle.sum((y2 - 2) ** 2) + ... loss_aggregator({'pde_loss': pde_loss, 'bc_loss': bc_loss}).backward() + """ + should_persist: ClassVar[bool] = False + + def __init__(self, model: nn.Layer, M: int = 100, gamma: float = 0.999) -> None: + super().__init__(model) + self.M = M + self.gamma = gamma + self.Lf_smooth = 0 + self.Lu_smooth = 0 + self.Lf_tilde_acc = 0.0 + self.Lu_tilde_acc = 0.0 + + def __call__(self, losses, step: int = 0) -> "AGDA": + if len(losses) != 2: + raise ValueError( + f"Number of losses(tasks) for AGDA shoule be 2, but got {len(losses)}" + ) + return super().__call__(losses, step) + + def backward(self) -> None: + grads_list = self._compute_grads() + with paddle.no_grad(): + refined_grads = self._refine_grads(grads_list) + self._set_grads(refined_grads) + + def _compute_grads(self) -> List[paddle.Tensor]: + # compute all gradients derived by each loss + grads_list = [] # num_params x num_losses + for key in self.losses: + # backward with current loss + self.losses[key].backward() + grads_list.append( + paddle.concat( + [ + param.grad.clone().reshape([-1]) + for param in self.model.parameters() + if param.grad is not None + ], + axis=0, + ) + ) + # clear gradients for current loss for not affecting other loss + self.model.clear_gradients() + + return grads_list + + def _refine_grads(self, grads_list: List[paddle.Tensor]) -> List[paddle.Tensor]: + # compute moving average of L^smooth_i(n) - eq.(16) + losses_seq = list(self.losses.values()) + self.Lf_smooth = ( + self.gamma * self.Lf_smooth + (1 - self.gamma) * losses_seq[0].item() + ) + self.Lu_smooth = ( + self.gamma * self.Lu_smooth + (1 - self.gamma) * losses_seq[1].item() + ) + + # compute L^smooth_i(kM) - eq.(17) + if self.step % self.M == 0: + Lf_smooth_kM = self.Lf_smooth + Lu_smooth_kM = self.Lu_smooth + Lf_tilde = self.Lf_smooth / Lf_smooth_kM + Lu_tilde = self.Lu_smooth / Lu_smooth_kM + + # compute r_i(n) - eq.(18) + self.Lf_tilde_acc += Lf_tilde + self.Lu_tilde_acc += Lu_tilde + rf = Lf_tilde / self.Lf_tilde_acc + ru = Lu_tilde / self.Lu_tilde_acc + + # compute E(g(n)) - step1(1) + gf_magn = (grads_list[0] * grads_list[0]).sum().sqrt() + gu_magn = (grads_list[1] * grads_list[1]).sum().sqrt() + Eg = (gf_magn + gu_magn) / 2 + + # compute \omega_f(n) - step1(2) + omega_f = (rf * (Eg - gf_magn) + gf_magn) / gf_magn + omega_u = (ru * (Eg - gu_magn) + gu_magn) / gu_magn + + # compute g_bar(n) - step1(3) + gf_bar = omega_f * grads_list[0] + gu_bar = omega_u * grads_list[1] + + # compute gradient projection - step2(1) + dot_product = (gf_bar * gu_bar).sum() + if dot_product < 0: + gu_bar = gu_bar - (dot_product / (gf_bar * gf_bar).sum()) * gf_bar + grads_list = [gf_bar, gu_bar] + + proj_grads: List[paddle.Tensor] = [] + for j in range(len(self.losses)): + start_idx = 0 + for idx, var in enumerate(self.model.parameters()): + grad_shape = var.shape + flatten_dim = var.numel() + refined_grad = grads_list[j][start_idx : start_idx + flatten_dim] + refined_grad = paddle.reshape(refined_grad, grad_shape) + if len(proj_grads) < self.param_num: + proj_grads.append(refined_grad) + else: + proj_grads[idx] += refined_grad + start_idx += flatten_dim + return proj_grads + + def _set_grads(self, grads_list: List[paddle.Tensor]) -> None: + for i, param in enumerate(self.model.parameters()): + param.grad = grads_list[i] diff --git a/ppsci/loss/mtl/base.py b/ppsci/loss/mtl/base.py index eec88c9c00..d97d019609 100644 --- a/ppsci/loss/mtl/base.py +++ b/ppsci/loss/mtl/base.py @@ -1,68 +1,68 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import ClassVar -from typing import Dict -from typing import Union - -from paddle import nn - -if TYPE_CHECKING: - import paddle - - -class LossAggregator(nn.Layer): - """Base class of loss aggregator mainly for multitask learning. - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - - Args: - model (nn.Layer): Training model. - """ - - should_persist: ClassVar[bool] = False - - def __init__(self, model: nn.Layer) -> None: - super().__init__() - self.model = model - self.step = 0 - self.param_num = 0 - for param in self.model.parameters(): - if not param.stop_gradient: - self.param_num += 1 - - def forward( - self, losses: Dict[str, "paddle.Tensor"], step: int = 0 - ) -> Union["paddle.Tensor", "LossAggregator"]: - self.losses = losses - self.loss_num = len(losses) - self.step = step - return self - - def backward(self) -> None: - raise NotImplementedError( - f"'backward' should be implemented in subclass {self.__class__.__name__}" - ) - - def state_dict(self): - agg_state = super().state_dict() - model_state = self.model.state_dict() - # remove model parameters from state dict for already in pdparams - agg_state = {k: v for k, v in agg_state.items() if k not in model_state} - return agg_state +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import ClassVar +from typing import Dict +from typing import Union + +from paddle import nn + +if TYPE_CHECKING: + import paddle + + +class LossAggregator(nn.Layer): + """Base class of loss aggregator mainly for multitask learning. + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + + Args: + model (nn.Layer): Training model. + """ + + should_persist: ClassVar[bool] = False + + def __init__(self, model: nn.Layer) -> None: + super().__init__() + self.model = model + self.step = 0 + self.param_num = 0 + for param in self.model.parameters(): + if not param.stop_gradient: + self.param_num += 1 + + def forward( + self, losses: Dict[str, "paddle.Tensor"], step: int = 0 + ) -> Union["paddle.Tensor", "LossAggregator"]: + self.losses = losses + self.loss_num = len(losses) + self.step = step + return self + + def backward(self) -> None: + raise NotImplementedError( + f"'backward' should be implemented in subclass {self.__class__.__name__}" + ) + + def state_dict(self): + agg_state = super().state_dict() + model_state = self.model.state_dict() + # remove model parameters from state dict for already in pdparams + agg_state = {k: v for k, v in agg_state.items() if k not in model_state} + return agg_state diff --git a/ppsci/loss/mtl/grad_norm.py b/ppsci/loss/mtl/grad_norm.py index 309d3c257c..e67a344ce1 100644 --- a/ppsci/loss/mtl/grad_norm.py +++ b/ppsci/loss/mtl/grad_norm.py @@ -1,145 +1,145 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import ClassVar -from typing import Dict -from typing import List - -import paddle -from paddle import nn - -from ppsci.loss.mtl import base - -# from ppsci.utils import logger - - -class GradNorm(base.LossAggregator): - r"""GradNorm loss weighting algorithm. - - reference: [https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/jaxpi/models.py#L132-L146](https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/jaxpi/models.py#L132-L146) - - $$ - \begin{align*} - L^t &= \sum_{i=1}^{N}{\tilde{w}_i^t\cdot L_i^t}, \\ - \text{where } \\ - \tilde{w}_i^0&=1, \\ - \tilde{w}_i^t&=\tilde{w}_i^{t-1}\cdot m+w_i^t\cdot (1-m), t\ge1\\ - w_i^t&=\dfrac{\overline{\Vert \nabla_{\theta}{L_i^t} \Vert_2}}{\Vert \nabla_{\theta}{L_i^t} \Vert_2}, \\ - \overline{\Vert \nabla_{\theta}{L_i^t} \Vert_2}&=\dfrac{1}{N}\sum_{i=1}^N{\Vert \nabla_{\theta}{L_i^t} \Vert_2}, \\ - &t \text{ is the training step started from 0}. - \end{align*} - $$ - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - - Args: - model (nn.Layer): Training model. - num_losses (int, optional): Number of losses. Defaults to 1. - update_freq (int, optional): Weight updating frequency. Defaults to 1000. - momentum (float, optional): Momentum $m$ for moving weight. Defaults to 0.9. - init_weights (List[float]): Initial weights list. Defaults to None. - - Examples: - >>> import paddle - >>> from ppsci.loss import mtl - >>> model = paddle.nn.Linear(3, 4) - >>> loss_aggregator = mtl.GradNorm(model, num_losses=2) - >>> for i in range(5): - ... x1 = paddle.randn([8, 3]) - ... x2 = paddle.randn([8, 3]) - ... y1 = model(x1) - ... y2 = model(x2) - ... loss1 = paddle.sum(y1) - ... loss2 = paddle.sum((y2 - 2) ** 2) - ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() - """ - should_persist: ClassVar[bool] = True - weight: paddle.Tensor - - def __init__( - self, - model: nn.Layer, - num_losses: int = 1, - update_freq: int = 1000, - momentum: float = 0.9, - init_weights: List[float] = None, - ) -> None: - super().__init__(model) - self.step = 0 - self.num_losses = num_losses - self.update_freq = update_freq - self.momentum = momentum - if init_weights is not None and num_losses != len(init_weights): - raise ValueError( - f"Length of init_weights({len(init_weights)}) should be equal to " - f"num_losses({num_losses})." - ) - self.register_buffer( - "weight", - paddle.to_tensor(init_weights, dtype="float32") - if init_weights is not None - else paddle.ones([num_losses]), - ) - - def _compute_weight(self, losses: List["paddle.Tensor"]) -> List["paddle.Tensor"]: - grad_norms = [] - for loss in losses: - loss.backward(retain_graph=True) # NOTE: Keep graph for loss backward - with paddle.no_grad(): - grad_vector = paddle.concat( - [ - p.grad.reshape([-1]) - for p in self.model.parameters() - if p.grad is not None - ] - ) - grad_norms.append(paddle.linalg.norm(grad_vector, p=2)) - self.model.clear_gradients() - - mean_grad_norm = paddle.mean(paddle.stack(grad_norms)) - weight = [(mean_grad_norm / x) for x in grad_norms] - - return weight - - def __call__( - self, losses: Dict[str, "paddle.Tensor"], step: int = 0 - ) -> "paddle.Tensor": - assert len(losses) == self.num_losses, ( - f"Length of given losses({len(losses)}) should be equal to " - f"num_losses({self.num_losses})." - ) - self.step = step - - # compute current loss with moving weights - loss = 0.0 - for i, key in enumerate(losses): - if i == 0: - loss = self.weight[i] * losses[key] - else: - loss += self.weight[i] * losses[key] - - # update moving weights every 'update_freq' steps - if self.step % self.update_freq == 0: - weight = self._compute_weight(list(losses.values())) - for i in range(self.num_losses): - self.weight[i].set_value( - self.momentum * self.weight[i] + (1 - self.momentum) * weight[i] - ) - # logger.message(f"weight at step {self.step}: {self.weight.numpy()}") - - return loss +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ClassVar +from typing import Dict +from typing import List + +import paddle +from paddle import nn + +from ppsci.loss.mtl import base + +# from ppsci.utils import logger + + +class GradNorm(base.LossAggregator): + r"""GradNorm loss weighting algorithm. + + reference: [https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/jaxpi/models.py#L132-L146](https://github.com/PredictiveIntelligenceLab/jaxpi/blob/main/jaxpi/models.py#L132-L146) + + $$ + \begin{align*} + L^t &= \sum_{i=1}^{N}{\tilde{w}_i^t\cdot L_i^t}, \\ + \text{where } \\ + \tilde{w}_i^0&=1, \\ + \tilde{w}_i^t&=\tilde{w}_i^{t-1}\cdot m+w_i^t\cdot (1-m), t\ge1\\ + w_i^t&=\dfrac{\overline{\Vert \nabla_{\theta}{L_i^t} \Vert_2}}{\Vert \nabla_{\theta}{L_i^t} \Vert_2}, \\ + \overline{\Vert \nabla_{\theta}{L_i^t} \Vert_2}&=\dfrac{1}{N}\sum_{i=1}^N{\Vert \nabla_{\theta}{L_i^t} \Vert_2}, \\ + &t \text{ is the training step started from 0}. + \end{align*} + $$ + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + + Args: + model (nn.Layer): Training model. + num_losses (int, optional): Number of losses. Defaults to 1. + update_freq (int, optional): Weight updating frequency. Defaults to 1000. + momentum (float, optional): Momentum $m$ for moving weight. Defaults to 0.9. + init_weights (List[float]): Initial weights list. Defaults to None. + + Examples: + >>> import paddle + >>> from ppsci.loss import mtl + >>> model = paddle.nn.Linear(3, 4) + >>> loss_aggregator = mtl.GradNorm(model, num_losses=2) + >>> for i in range(5): + ... x1 = paddle.randn([8, 3]) + ... x2 = paddle.randn([8, 3]) + ... y1 = model(x1) + ... y2 = model(x2) + ... loss1 = paddle.sum(y1) + ... loss2 = paddle.sum((y2 - 2) ** 2) + ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() + """ + should_persist: ClassVar[bool] = True + weight: paddle.Tensor + + def __init__( + self, + model: nn.Layer, + num_losses: int = 1, + update_freq: int = 1000, + momentum: float = 0.9, + init_weights: List[float] = None, + ) -> None: + super().__init__(model) + self.step = 0 + self.num_losses = num_losses + self.update_freq = update_freq + self.momentum = momentum + if init_weights is not None and num_losses != len(init_weights): + raise ValueError( + f"Length of init_weights({len(init_weights)}) should be equal to " + f"num_losses({num_losses})." + ) + self.register_buffer( + "weight", + paddle.to_tensor(init_weights, dtype="float32") + if init_weights is not None + else paddle.ones([num_losses]), + ) + + def _compute_weight(self, losses: List["paddle.Tensor"]) -> List["paddle.Tensor"]: + grad_norms = [] + for loss in losses: + loss.backward(retain_graph=True) # NOTE: Keep graph for loss backward + with paddle.no_grad(): + grad_vector = paddle.concat( + [ + p.grad.reshape([-1]) + for p in self.model.parameters() + if p.grad is not None + ] + ) + grad_norms.append(paddle.linalg.norm(grad_vector, p=2)) + self.model.clear_gradients() + + mean_grad_norm = paddle.mean(paddle.stack(grad_norms)) + weight = [(mean_grad_norm / x) for x in grad_norms] + + return weight + + def __call__( + self, losses: Dict[str, "paddle.Tensor"], step: int = 0 + ) -> "paddle.Tensor": + assert len(losses) == self.num_losses, ( + f"Length of given losses({len(losses)}) should be equal to " + f"num_losses({self.num_losses})." + ) + self.step = step + + # compute current loss with moving weights + loss = 0.0 + for i, key in enumerate(losses): + if i == 0: + loss = self.weight[i] * losses[key] + else: + loss += self.weight[i] * losses[key] + + # update moving weights every 'update_freq' steps + if self.step % self.update_freq == 0: + weight = self._compute_weight(list(losses.values())) + for i in range(self.num_losses): + self.weight[i].set_value( + self.momentum * self.weight[i] + (1 - self.momentum) * weight[i] + ) + # logger.message(f"weight at step {self.step}: {self.weight.numpy()}") + + return loss diff --git a/ppsci/loss/mtl/ntk.py b/ppsci/loss/mtl/ntk.py index b2dab91fc7..561a4aae4a 100644 --- a/ppsci/loss/mtl/ntk.py +++ b/ppsci/loss/mtl/ntk.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -116,3 +117,91 @@ def __call__( self.weight[i].set_value(computed_weight[i]) return loss +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import ClassVar +from typing import List + +import paddle + +from ppsci.loss.mtl import base + +if TYPE_CHECKING: + from paddle import nn + + +class NTK(base.LossAggregator): + should_persist: ClassVar[bool] = True + + def __init__( + self, + model: nn.Layer, + num_losses: int = 1, + update_freq: int = 1000, + ) -> None: + super().__init__(model) + self.step = 0 + self.num_losses = num_losses + self.update_freq = update_freq + self.register_buffer("weight", paddle.ones([num_losses])) + + def _compute_weight(self, losses): + ntk_sum = 0 + ntk_value = [] + for loss in losses: + loss.backward(retain_graph=True) # NOTE: Keep graph for loss backward + with paddle.no_grad(): + grad = paddle.concat( + [ + p.grad.reshape([-1]) + for p in self.model.parameters() + if p.grad is not None + ] + ) + ntk_value.append( + paddle.sqrt( + paddle.sum(grad.detach() ** 2), + ) + ) + + ntk_sum += paddle.sum(paddle.stack(ntk_value, axis=0)) + ntk_weight = [(ntk_sum / x) for x in ntk_value] + + return ntk_weight + + def __call__(self, losses: List["paddle.Tensor"], step: int = 0) -> "paddle.Tensor": + assert len(losses) == self.num_losses, ( + f"Length of given losses({len(losses)}) should be equal to " + f"num_losses({self.num_losses})." + ) + self.step = step + + # compute current loss with moving weights + loss = self.weight[0] * losses[0] + for i in range(1, len(losses)): + loss += self.weight[i] * losses[i] + + # update moving weights every 'update_freq' steps + if self.step % self.update_freq == 0: + computed_weight = self._compute_weight(losses) + for i in range(self.num_losses): + self.weight[i].set_value(computed_weight[i]) + + return loss +>>>>>>> Stashed changes diff --git a/ppsci/loss/mtl/pcgrad.py b/ppsci/loss/mtl/pcgrad.py index 45b5923110..a0cae52625 100644 --- a/ppsci/loss/mtl/pcgrad.py +++ b/ppsci/loss/mtl/pcgrad.py @@ -1,124 +1,124 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import ClassVar -from typing import List - -import numpy as np -import paddle -from paddle import nn - -from ppsci.loss.mtl import base - - -class PCGrad(base.LossAggregator): - r""" - **P**rojecting **C**onflicting Gradients - - [Gradient Surgery for Multi-Task Learning](https://papers.nips.cc/paper/2020/hash/3fe78a8acf5fda99de95303940a2420c-Abstract.html) - - Code reference: [https://github.com/tianheyu927/PCGrad/blob/master/PCGrad_tf.py](https://github.com/tianheyu927/PCGrad/blob/master/PCGrad_tf.py) - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - - Args: - model (nn.Layer): Training model. - - Examples: - >>> import paddle - >>> from ppsci.loss import mtl - >>> model = paddle.nn.Linear(3, 4) - >>> loss_aggregator = mtl.PCGrad(model) - >>> for i in range(5): - ... x1 = paddle.randn([8, 3]) - ... x2 = paddle.randn([8, 3]) - ... y1 = model(x1) - ... y2 = model(x2) - ... loss1 = paddle.sum(y1) - ... loss2 = paddle.sum((y2 - 2) ** 2) - ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() - """ - should_persist: ClassVar[bool] = False - - def __init__(self, model: nn.Layer) -> None: - super().__init__(model) - self._zero = paddle.zeros([]) - - def backward(self) -> None: - # shuffle order of losses - keys = list(self.losses.keys()) - np.random.shuffle(keys) - self.losses = {key: self.losses[key] for key in keys} - - grads_list = self._compute_grads() - with paddle.no_grad(): - refined_grads = self._refine_grads(grads_list) - self._set_grads(refined_grads) - - def _compute_grads(self) -> List[paddle.Tensor]: - # compute all gradients derived by each loss - grads_list = [] # num_params x num_losses - for key in self.losses: - # backward with current loss - self.losses[key].backward() - grads_list.append( - paddle.concat( - [ - param.grad.clone().reshape([-1]) - for param in self.model.parameters() - if param.grad is not None - ], - axis=0, - ) - ) - # clear gradients for current loss for not affecting other loss - self.model.clear_gradients() - - return grads_list - - def _refine_grads(self, grads_list: List[paddle.Tensor]) -> List[paddle.Tensor]: - def proj_grad(grad: paddle.Tensor): - for k in range(self.loss_num): - inner_product = paddle.sum(grad * grads_list[k]) - proj_direction = inner_product / paddle.sum( - grads_list[k] * grads_list[k] - ) - grad = grad - paddle.minimum(proj_direction, self._zero) * grads_list[k] - return grad - - grads_list = [proj_grad(grad) for grad in grads_list] - - # Unpack flattened projected gradients back to their original shapes. - proj_grads: List[paddle.Tensor] = [] - for j in range(self.loss_num): - start_idx = 0 - for idx, var in enumerate(self.model.parameters()): - grad_shape = var.shape - flatten_dim = var.numel() - refined_grad = grads_list[j][start_idx : start_idx + flatten_dim] - refined_grad = paddle.reshape(refined_grad, grad_shape) - if len(proj_grads) < self.param_num: - proj_grads.append(refined_grad) - else: - proj_grads[idx] += refined_grad - start_idx += flatten_dim - return proj_grads - - def _set_grads(self, grads_list: List[paddle.Tensor]) -> None: - for i, param in enumerate(self.model.parameters()): - param.grad = grads_list[i] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ClassVar +from typing import List + +import numpy as np +import paddle +from paddle import nn + +from ppsci.loss.mtl import base + + +class PCGrad(base.LossAggregator): + r""" + **P**rojecting **C**onflicting Gradients + + [Gradient Surgery for Multi-Task Learning](https://papers.nips.cc/paper/2020/hash/3fe78a8acf5fda99de95303940a2420c-Abstract.html) + + Code reference: [https://github.com/tianheyu927/PCGrad/blob/master/PCGrad_tf.py](https://github.com/tianheyu927/PCGrad/blob/master/PCGrad_tf.py) + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + + Args: + model (nn.Layer): Training model. + + Examples: + >>> import paddle + >>> from ppsci.loss import mtl + >>> model = paddle.nn.Linear(3, 4) + >>> loss_aggregator = mtl.PCGrad(model) + >>> for i in range(5): + ... x1 = paddle.randn([8, 3]) + ... x2 = paddle.randn([8, 3]) + ... y1 = model(x1) + ... y2 = model(x2) + ... loss1 = paddle.sum(y1) + ... loss2 = paddle.sum((y2 - 2) ** 2) + ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() + """ + should_persist: ClassVar[bool] = False + + def __init__(self, model: nn.Layer) -> None: + super().__init__(model) + self._zero = paddle.zeros([]) + + def backward(self) -> None: + # shuffle order of losses + keys = list(self.losses.keys()) + np.random.shuffle(keys) + self.losses = {key: self.losses[key] for key in keys} + + grads_list = self._compute_grads() + with paddle.no_grad(): + refined_grads = self._refine_grads(grads_list) + self._set_grads(refined_grads) + + def _compute_grads(self) -> List[paddle.Tensor]: + # compute all gradients derived by each loss + grads_list = [] # num_params x num_losses + for key in self.losses: + # backward with current loss + self.losses[key].backward() + grads_list.append( + paddle.concat( + [ + param.grad.clone().reshape([-1]) + for param in self.model.parameters() + if param.grad is not None + ], + axis=0, + ) + ) + # clear gradients for current loss for not affecting other loss + self.model.clear_gradients() + + return grads_list + + def _refine_grads(self, grads_list: List[paddle.Tensor]) -> List[paddle.Tensor]: + def proj_grad(grad: paddle.Tensor): + for k in range(self.loss_num): + inner_product = paddle.sum(grad * grads_list[k]) + proj_direction = inner_product / paddle.sum( + grads_list[k] * grads_list[k] + ) + grad = grad - paddle.minimum(proj_direction, self._zero) * grads_list[k] + return grad + + grads_list = [proj_grad(grad) for grad in grads_list] + + # Unpack flattened projected gradients back to their original shapes. + proj_grads: List[paddle.Tensor] = [] + for j in range(self.loss_num): + start_idx = 0 + for idx, var in enumerate(self.model.parameters()): + grad_shape = var.shape + flatten_dim = var.numel() + refined_grad = grads_list[j][start_idx : start_idx + flatten_dim] + refined_grad = paddle.reshape(refined_grad, grad_shape) + if len(proj_grads) < self.param_num: + proj_grads.append(refined_grad) + else: + proj_grads[idx] += refined_grad + start_idx += flatten_dim + return proj_grads + + def _set_grads(self, grads_list: List[paddle.Tensor]) -> None: + for i, param in enumerate(self.model.parameters()): + param.grad = grads_list[i] diff --git a/ppsci/loss/mtl/relobralo.py b/ppsci/loss/mtl/relobralo.py index 02ec8f1339..3beeb9b16e 100644 --- a/ppsci/loss/mtl/relobralo.py +++ b/ppsci/loss/mtl/relobralo.py @@ -1,127 +1,127 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import ClassVar -from typing import Dict - -import paddle -from paddle import nn - - -class Relobralo(nn.Layer): - r""" - **Re**lative **Lo**ss **B**alancing with **Ra**ndom **Lo**okback - - [Multi-Objective Loss Balancing for Physics-Informed Deep Learning](https://arxiv.org/abs/2110.09813) - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - - Args: - num_losses (int): Number of losses. - alpha (float, optional): Ability for remembering past in paper. Defaults to 0.95. - beta (float, optional): Parameter for generating $\rho$ from bernoulli distribution, - and $E[\rho](=\beta)$ should be close to 1. Defaults to 0.99. - tau (float, optional): Temperature factor. Equivalent to softmax when $\tau$=1.0, - equivalent to argmax when $\tau$=0. Defaults to 1.0. - eps (float, optional): $\epsilon$ to avoid divided by 0 in losses. Defaults to 1e-8. - - Examples: - >>> import paddle - >>> from ppsci.loss import mtl - >>> model = paddle.nn.Linear(3, 4) - >>> loss_aggregator = mtl.Relobralo(num_losses=2) - >>> for i in range(5): - ... x1 = paddle.randn([8, 3]) - ... x2 = paddle.randn([8, 3]) - ... y1 = model(x1) - ... y2 = model(x2) - ... loss1 = paddle.sum(y1) - ... loss2 = paddle.sum((y2 - 2) ** 2) - ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() - """ - should_persist: ClassVar[bool] = True - - def __init__( - self, - num_losses: int, - alpha: float = 0.95, - beta: float = 0.99, - tau: float = 1.0, - eps: float = 1e-8, - ) -> None: - super().__init__() - self.step = 0 - self.num_losses: int = num_losses - self.alpha: float = alpha - self.beta: float = beta - self.tau: float = tau - self.eps: float = eps - self.register_buffer("losses_init", paddle.zeros([self.num_losses])) - self.register_buffer("losses_prev", paddle.zeros([self.num_losses])) - self.register_buffer("lmbda", paddle.ones([self.num_losses])) - - def _softmax(self, vec: "paddle.Tensor") -> "paddle.Tensor": - max_item = vec.max() - result = paddle.exp(vec - max_item) / paddle.exp(vec - max_item).sum() - return result - - def _compute_bal( - self, losses_vec1: "paddle.Tensor", losses_vec2: "paddle.Tensor" - ) -> "paddle.Tensor": - return self.num_losses * ( - self._softmax(losses_vec1 / (self.tau * losses_vec2 + self.eps)) - ) - - def __call__( - self, losses: Dict[str, "paddle.Tensor"], step: int = 0 - ) -> "paddle.Tensor": - assert len(losses) == self.num_losses, ( - f"Length of given losses({len(losses)}) should be equal to " - f"num_losses({self.num_losses})." - ) - self.step = step - losses_stacked = paddle.stack(list(losses.values())) # [num_losses, ] - - if self.step == 0: - loss = losses_stacked.sum() - with paddle.no_grad(): - paddle.assign(losses_stacked.detach(), self.losses_init) - else: - with paddle.no_grad(): - # 1. update lambda_hist - rho = paddle.bernoulli(paddle.to_tensor(self.beta)) - lmbda_hist = rho * self.lmbda + (1 - rho) * self._compute_bal( - losses_stacked, self.losses_init - ) - - # 2. update lambda - paddle.assign( - self.alpha * lmbda_hist - + (1 - self.alpha) - * self._compute_bal(losses_stacked, self.losses_prev), - self.lmbda, - ) - - # 3. compute reweighted total loss with lambda - loss = (losses_stacked * self.lmbda).sum() - - # update losses_prev at the end of each step - with paddle.no_grad(): - paddle.assign(losses_stacked.detach(), self.losses_prev) - - return loss +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import ClassVar +from typing import Dict + +import paddle +from paddle import nn + + +class Relobralo(nn.Layer): + r""" + **Re**lative **Lo**ss **B**alancing with **Ra**ndom **Lo**okback + + [Multi-Objective Loss Balancing for Physics-Informed Deep Learning](https://arxiv.org/abs/2110.09813) + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + + Args: + num_losses (int): Number of losses. + alpha (float, optional): Ability for remembering past in paper. Defaults to 0.95. + beta (float, optional): Parameter for generating $\rho$ from bernoulli distribution, + and $E[\rho](=\beta)$ should be close to 1. Defaults to 0.99. + tau (float, optional): Temperature factor. Equivalent to softmax when $\tau$=1.0, + equivalent to argmax when $\tau$=0. Defaults to 1.0. + eps (float, optional): $\epsilon$ to avoid divided by 0 in losses. Defaults to 1e-8. + + Examples: + >>> import paddle + >>> from ppsci.loss import mtl + >>> model = paddle.nn.Linear(3, 4) + >>> loss_aggregator = mtl.Relobralo(num_losses=2) + >>> for i in range(5): + ... x1 = paddle.randn([8, 3]) + ... x2 = paddle.randn([8, 3]) + ... y1 = model(x1) + ... y2 = model(x2) + ... loss1 = paddle.sum(y1) + ... loss2 = paddle.sum((y2 - 2) ** 2) + ... loss_aggregator({'loss1': loss1, 'loss2': loss2}).backward() + """ + should_persist: ClassVar[bool] = True + + def __init__( + self, + num_losses: int, + alpha: float = 0.95, + beta: float = 0.99, + tau: float = 1.0, + eps: float = 1e-8, + ) -> None: + super().__init__() + self.step = 0 + self.num_losses: int = num_losses + self.alpha: float = alpha + self.beta: float = beta + self.tau: float = tau + self.eps: float = eps + self.register_buffer("losses_init", paddle.zeros([self.num_losses])) + self.register_buffer("losses_prev", paddle.zeros([self.num_losses])) + self.register_buffer("lmbda", paddle.ones([self.num_losses])) + + def _softmax(self, vec: "paddle.Tensor") -> "paddle.Tensor": + max_item = vec.max() + result = paddle.exp(vec - max_item) / paddle.exp(vec - max_item).sum() + return result + + def _compute_bal( + self, losses_vec1: "paddle.Tensor", losses_vec2: "paddle.Tensor" + ) -> "paddle.Tensor": + return self.num_losses * ( + self._softmax(losses_vec1 / (self.tau * losses_vec2 + self.eps)) + ) + + def __call__( + self, losses: Dict[str, "paddle.Tensor"], step: int = 0 + ) -> "paddle.Tensor": + assert len(losses) == self.num_losses, ( + f"Length of given losses({len(losses)}) should be equal to " + f"num_losses({self.num_losses})." + ) + self.step = step + losses_stacked = paddle.stack(list(losses.values())) # [num_losses, ] + + if self.step == 0: + loss = losses_stacked.sum() + with paddle.no_grad(): + paddle.assign(losses_stacked.detach(), self.losses_init) + else: + with paddle.no_grad(): + # 1. update lambda_hist + rho = paddle.bernoulli(paddle.to_tensor(self.beta)) + lmbda_hist = rho * self.lmbda + (1 - rho) * self._compute_bal( + losses_stacked, self.losses_init + ) + + # 2. update lambda + paddle.assign( + self.alpha * lmbda_hist + + (1 - self.alpha) + * self._compute_bal(losses_stacked, self.losses_prev), + self.lmbda, + ) + + # 3. compute reweighted total loss with lambda + loss = (losses_stacked * self.lmbda).sum() + + # update losses_prev at the end of each step + with paddle.no_grad(): + paddle.assign(losses_stacked.detach(), self.losses_prev) + + return loss diff --git a/ppsci/loss/mtl/sum.py b/ppsci/loss/mtl/sum.py index d2c9a7bd50..539abf9d74 100644 --- a/ppsci/loss/mtl/sum.py +++ b/ppsci/loss/mtl/sum.py @@ -1,60 +1,60 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Dict - -if TYPE_CHECKING: - import paddle - -from typing import ClassVar - -from ppsci.loss.mtl.base import LossAggregator - - -class Sum(LossAggregator): - r""" - **Default loss aggregator** which do simple summation for given losses as below. - - $$ - loss = \sum_i^N losses_i - $$ - - Attributes: - should_persist(bool): Whether to persist the loss aggregator when saving. - Those loss aggregators with parameters and/or buffers should be persisted. - """ - should_persist: ClassVar[bool] = False - - def __init__(self) -> None: - self.step = 0 - - def __call__( - self, losses: Dict[str, "paddle.Tensor"], step: int = 0 - ) -> "paddle.Tensor": - assert ( - len(losses) > 0 - ), f"Number of given losses({len(losses)}) can not be empty." - self.step = step - - total_loss = 0.0 - for i, key in enumerate(losses): - if i == 0: - total_loss = losses[key] - else: - total_loss += losses[key] - - return total_loss +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Dict + +if TYPE_CHECKING: + import paddle + +from typing import ClassVar + +from ppsci.loss.mtl.base import LossAggregator + + +class Sum(LossAggregator): + r""" + **Default loss aggregator** which do simple summation for given losses as below. + + $$ + loss = \sum_i^N losses_i + $$ + + Attributes: + should_persist(bool): Whether to persist the loss aggregator when saving. + Those loss aggregators with parameters and/or buffers should be persisted. + """ + should_persist: ClassVar[bool] = False + + def __init__(self) -> None: + self.step = 0 + + def __call__( + self, losses: Dict[str, "paddle.Tensor"], step: int = 0 + ) -> "paddle.Tensor": + assert ( + len(losses) > 0 + ), f"Number of given losses({len(losses)}) can not be empty." + self.step = step + + total_loss = 0.0 + for i, key in enumerate(losses): + if i == 0: + total_loss = losses[key] + else: + total_loss += losses[key] + + return total_loss diff --git a/ppsci/metric/__init__.py b/ppsci/metric/__init__.py index 0a1d069aa9..9d521acd1d 100644 --- a/ppsci/metric/__init__.py +++ b/ppsci/metric/__init__.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -61,3 +62,64 @@ def build_metric(cfg): metric = eval(metric_cls)(**metric_cfg) metric_dict[metric_cls] = metric return metric_dict +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.metric.anomaly_coef import LatitudeWeightedACC +from ppsci.metric.base import Metric +from ppsci.metric.func import FunctionalMetric +from ppsci.metric.l2_rel import L2Rel +from ppsci.metric.l2_rel import MeanL2Rel +from ppsci.metric.mae import MAE +from ppsci.metric.mse import MSE +from ppsci.metric.rmse import RMSE +from ppsci.metric.rmse import LatitudeWeightedRMSE +from ppsci.utils import misc + +__all__ = [ + "LatitudeWeightedACC", + "Metric", + "FunctionalMetric", + "L2Rel", + "MeanL2Rel", + "MAE", + "MSE", + "RMSE", + "LatitudeWeightedRMSE", + "build_metric", +] + + +def build_metric(cfg): + """Build metric. + + Args: + cfg (List[DictConfig]): List of metric config. + + Returns: + Dict[str, Metric]: Dict of callable metric object. + """ + cfg = copy.deepcopy(cfg) + + metric_dict = misc.PrettyOrderedDict() + for _item in cfg: + metric_cls = next(iter(_item.keys())) + metric_cfg = _item.pop(metric_cls) + metric = eval(metric_cls)(**metric_cfg) + metric_dict[metric_cls] = metric + return metric_dict +>>>>>>> Stashed changes diff --git a/ppsci/metric/anomaly_coef.py b/ppsci/metric/anomaly_coef.py index 33633228ca..7b6b640410 100644 --- a/ppsci/metric/anomaly_coef.py +++ b/ppsci/metric/anomaly_coef.py @@ -1,122 +1,122 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle - -from ppsci.metric import base - - -class LatitudeWeightedACC(base.Metric): - r"""Latitude weighted anomaly correlation coefficient. - - $$ - metric = - \dfrac{\sum\limits_{m,n}{L_mX_{mn}Y_{mn}}}{\sqrt{\sum\limits_{m,n}{L_mX_{mn}^{2}}\sum\limits_{m,n}{L_mY_{mn}^{2}}}} - $$ - - $$ - L_m = N_{lat}\dfrac{\cos(lat_m)}{\sum\limits_{j=1}^{N_{lat}}\cos(lat_j)} - $$ - - $lat_m$ is the latitude at m. - $N_{lat}$ is the number of latitude set by `num_lat`. - - Args: - num_lat (int): Number of latitude. - mean (Optional[Union[np.array, Tuple[float, ...]]]): Mean of training data. Defaults to None. - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - variable_dict (Optional[Dict[str, int]]): Variable dictionary, the key is the name of a variable and - the value is its index. Defaults to None. - unlog (bool, optional): Whether calculate expm1 for all elements in the array. Defaults to False. - scale (float, optional): The scale value used after expm1. Defaults to 1e-5. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> mean = np.random.randn(20, 720, 1440) - >>> metric = ppsci.metric.LatitudeWeightedACC(720, mean=mean) - """ - - def __init__( - self, - num_lat: int, - mean: Optional[Union[np.array, Tuple[float, ...]]], - keep_batch: bool = False, - variable_dict: Optional[Dict[str, int]] = None, - unlog: bool = False, - scale: float = 1e-5, - ): - super().__init__(keep_batch) - self.num_lat = num_lat - self.mean = ( - None if mean is None else paddle.to_tensor(mean, paddle.get_default_dtype()) - ) - self.variable_dict = variable_dict - self.unlog = unlog - self.scale = scale - - self.weight = self.get_latitude_weight(num_lat) - - def get_latitude_weight(self, num_lat: int = 720): - lat_t = paddle.linspace(start=0, stop=1, num=num_lat) - lat_t = paddle.cos(3.1416 * (0.5 - lat_t)) - weight = num_lat * lat_t / paddle.sum(lat_t) - weight = weight.reshape((1, 1, -1, 1)) - return weight - - def scale_expm1(self, x: paddle.Tensor): - return self.scale * paddle.expm1(x) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - - for key in label_dict: - output = ( - self.scale_expm1(output_dict[key]) if self.unlog else output_dict[key] - ) - label = self.scale_expm1(label_dict[key]) if self.unlog else label_dict[key] - - if self.mean is not None: - output = output - self.mean - label = label - self.mean - - rmse = paddle.sum( - self.weight * output * label, axis=(-1, -2) - ) / paddle.sqrt( - paddle.sum(self.weight * output**2, axis=(-1, -2)) - * paddle.sum(self.weight * label**2, axis=(-1, -2)) - ) - - if self.variable_dict is not None: - for variable_name, idx in self.variable_dict.items(): - if self.keep_batch: - metric_dict[f"{key}.{variable_name}"] = rmse[:, idx] - else: - metric_dict[f"{key}.{variable_name}"] = rmse[:, idx].mean() - else: - if self.keep_batch: - metric_dict[key] = rmse.mean(axis=1) - else: - metric_dict[key] = rmse.mean() - - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle + +from ppsci.metric import base + + +class LatitudeWeightedACC(base.Metric): + r"""Latitude weighted anomaly correlation coefficient. + + $$ + metric = + \dfrac{\sum\limits_{m,n}{L_mX_{mn}Y_{mn}}}{\sqrt{\sum\limits_{m,n}{L_mX_{mn}^{2}}\sum\limits_{m,n}{L_mY_{mn}^{2}}}} + $$ + + $$ + L_m = N_{lat}\dfrac{\cos(lat_m)}{\sum\limits_{j=1}^{N_{lat}}\cos(lat_j)} + $$ + + $lat_m$ is the latitude at m. + $N_{lat}$ is the number of latitude set by `num_lat`. + + Args: + num_lat (int): Number of latitude. + mean (Optional[Union[np.array, Tuple[float, ...]]]): Mean of training data. Defaults to None. + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + variable_dict (Optional[Dict[str, int]]): Variable dictionary, the key is the name of a variable and + the value is its index. Defaults to None. + unlog (bool, optional): Whether calculate expm1 for all elements in the array. Defaults to False. + scale (float, optional): The scale value used after expm1. Defaults to 1e-5. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> mean = np.random.randn(20, 720, 1440) + >>> metric = ppsci.metric.LatitudeWeightedACC(720, mean=mean) + """ + + def __init__( + self, + num_lat: int, + mean: Optional[Union[np.array, Tuple[float, ...]]], + keep_batch: bool = False, + variable_dict: Optional[Dict[str, int]] = None, + unlog: bool = False, + scale: float = 1e-5, + ): + super().__init__(keep_batch) + self.num_lat = num_lat + self.mean = ( + None if mean is None else paddle.to_tensor(mean, paddle.get_default_dtype()) + ) + self.variable_dict = variable_dict + self.unlog = unlog + self.scale = scale + + self.weight = self.get_latitude_weight(num_lat) + + def get_latitude_weight(self, num_lat: int = 720): + lat_t = paddle.linspace(start=0, stop=1, num=num_lat) + lat_t = paddle.cos(3.1416 * (0.5 - lat_t)) + weight = num_lat * lat_t / paddle.sum(lat_t) + weight = weight.reshape((1, 1, -1, 1)) + return weight + + def scale_expm1(self, x: paddle.Tensor): + return self.scale * paddle.expm1(x) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + + for key in label_dict: + output = ( + self.scale_expm1(output_dict[key]) if self.unlog else output_dict[key] + ) + label = self.scale_expm1(label_dict[key]) if self.unlog else label_dict[key] + + if self.mean is not None: + output = output - self.mean + label = label - self.mean + + rmse = paddle.sum( + self.weight * output * label, axis=(-1, -2) + ) / paddle.sqrt( + paddle.sum(self.weight * output**2, axis=(-1, -2)) + * paddle.sum(self.weight * label**2, axis=(-1, -2)) + ) + + if self.variable_dict is not None: + for variable_name, idx in self.variable_dict.items(): + if self.keep_batch: + metric_dict[f"{key}.{variable_name}"] = rmse[:, idx] + else: + metric_dict[f"{key}.{variable_name}"] = rmse[:, idx].mean() + else: + if self.keep_batch: + metric_dict[key] = rmse.mean(axis=1) + else: + metric_dict[key] = rmse.mean() + + return metric_dict diff --git a/ppsci/metric/base.py b/ppsci/metric/base.py index 750e629882..bf8a8a2a35 100644 --- a/ppsci/metric/base.py +++ b/ppsci/metric/base.py @@ -1,25 +1,25 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from paddle import nn - - -class Metric(nn.Layer): - """Base class for metric.""" - - def __init__(self, keep_batch: bool = False): - super().__init__() - self.keep_batch = keep_batch +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from paddle import nn + + +class Metric(nn.Layer): + """Base class for metric.""" + + def __init__(self, keep_batch: bool = False): + super().__init__() + self.keep_batch = keep_batch diff --git a/ppsci/metric/func.py b/ppsci/metric/func.py index bee646b656..14fe8e2f0f 100644 --- a/ppsci/metric/func.py +++ b/ppsci/metric/func.py @@ -1,66 +1,66 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Callable -from typing import Dict - -from ppsci.metric import base - -if TYPE_CHECKING: - import paddle - - -class FunctionalMetric(base.Metric): - r"""Functional metric class, which allows to use custom metric computing function from given metric_expr for complex computation cases. - - Args: - metric_expr (Callable): Expression of metric calculation. - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import FunctionalMetric - >>> def metric_expr(output_dict, *args): - ... rel_l2 = 0 - ... for key in output_dict: - ... length = int(len(output_dict[key])/2) - ... out_dict = output_dict[key][:length] - ... label_dict = output_dict[key][length:] - ... rel_l2 += paddle.norm(out_dict - label_dict) / paddle.norm(label_dict) - ... return {"rel_l2": rel_l2} - >>> metric_dict = FunctionalMetric(metric_expr) - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-0.2, 1.5], [-0.1, -0.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-1.8, 1.0], [-0.2, 2.5]])} - >>> result = metric_dict(output_dict) - >>> print(result) - {'rel_l2': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.59985542)} - """ - - def __init__( - self, - metric_expr: Callable[ - [Dict["str", "paddle.Tensor"], Dict["str", "paddle.Tensor"]], - Dict["str", "paddle.Tensor"], - ], - keep_batch: bool = False, - ): - super().__init__(keep_batch) - self.metric_expr = metric_expr - - def forward(self, output_dict, label_dict=None) -> Dict[str, "paddle.Tensor"]: - return self.metric_expr(output_dict, label_dict) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Callable +from typing import Dict + +from ppsci.metric import base + +if TYPE_CHECKING: + import paddle + + +class FunctionalMetric(base.Metric): + r"""Functional metric class, which allows to use custom metric computing function from given metric_expr for complex computation cases. + + Args: + metric_expr (Callable): Expression of metric calculation. + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import FunctionalMetric + >>> def metric_expr(output_dict, *args): + ... rel_l2 = 0 + ... for key in output_dict: + ... length = int(len(output_dict[key])/2) + ... out_dict = output_dict[key][:length] + ... label_dict = output_dict[key][length:] + ... rel_l2 += paddle.norm(out_dict - label_dict) / paddle.norm(label_dict) + ... return {"rel_l2": rel_l2} + >>> metric_dict = FunctionalMetric(metric_expr) + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-0.2, 1.5], [-0.1, -0.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-1.8, 1.0], [-0.2, 2.5]])} + >>> result = metric_dict(output_dict) + >>> print(result) + {'rel_l2': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.59985542)} + """ + + def __init__( + self, + metric_expr: Callable[ + [Dict["str", "paddle.Tensor"], Dict["str", "paddle.Tensor"]], + Dict["str", "paddle.Tensor"], + ], + keep_batch: bool = False, + ): + super().__init__(keep_batch) + self.metric_expr = metric_expr + + def forward(self, output_dict, label_dict=None) -> Dict[str, "paddle.Tensor"]: + return self.metric_expr(output_dict, label_dict) diff --git a/ppsci/metric/l2_rel.py b/ppsci/metric/l2_rel.py index 2a64e9befc..67dc0f6da8 100644 --- a/ppsci/metric/l2_rel.py +++ b/ppsci/metric/l2_rel.py @@ -1,139 +1,139 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict - -import numpy as np -import paddle - -from ppsci.metric import base - - -class L2Rel(base.Metric): - r"""Class for l2 relative error. - - NOTE: This metric API is slightly different from `MeanL2Rel`, difference is as below: - - - `L2Rel` regards the input sample as a whole and calculates the l2 relative error of the whole; - - `MeanL2Rel` will calculate L2Rel separately for each input sample and return the average of l2 relative error for all samples. - - $$ - metric = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\max(\Vert \mathbf{y} \Vert_2, \epsilon)} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import L2Rel - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> loss = L2Rel() - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.42658269), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 9.69535923)} - """ - - # NOTE: Avoid divide by zero in result - # see https://github.com/scikit-learn/scikit-learn/pull/15007 - EPS: float = np.finfo(np.float32).eps - - def __init__(self, keep_batch: bool = False): - if keep_batch: - raise ValueError(f"keep_batch should be False, but got {keep_batch}.") - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - rel_l2 = paddle.norm(label_dict[key] - output_dict[key], p=2) / paddle.norm( - label_dict[key], p=2 - ).clip(min=self.EPS) - metric_dict[key] = rel_l2 - - return metric_dict - - -class MeanL2Rel(base.Metric): - r"""Class for mean l2 relative error. - - NOTE: This metric API is slightly different from `L2Rel`, difference is as below: - - - `MeanL2Rel` will calculate L2Rel separately for each input sample and return the average of l2 relative error for all samples. - - `L2Rel` regards the input sample as a whole and calculates the l2 relative error of the whole; - - $$ - metric = \dfrac{1}{M} \sum_{i=1}^{M}\dfrac{\Vert \mathbf{x_i} - \mathbf{y_i} \Vert_2}{\max(\Vert \mathbf{y_i} \Vert_2, \epsilon) } - $$ - - $$ - \mathbf{x_i}, \mathbf{y_i} \in \mathcal{R}^{N} - $$ - - Args: - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import MeanL2Rel - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> loss = MeanL2Rel() - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.35970235), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 9.24504089)} - >>> loss = MeanL2Rel(keep_batch=True) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [1.11803389, 1.60137081]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [6.32455540 , 12.16552544])} - """ - - # NOTE: Avoid divide by zero in result - # see https://github.com/scikit-learn/scikit-learn/pull/15007 - EPS: float = np.finfo(np.float32).eps - - def __init__(self, keep_batch: bool = False): - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - rel_l2 = paddle.norm( - label_dict[key] - output_dict[key], p=2, axis=1 - ) / paddle.norm(label_dict[key], p=2, axis=1).clip(min=self.EPS) - if self.keep_batch: - metric_dict[key] = rel_l2 - else: - metric_dict[key] = rel_l2.mean() - - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict + +import numpy as np +import paddle + +from ppsci.metric import base + + +class L2Rel(base.Metric): + r"""Class for l2 relative error. + + NOTE: This metric API is slightly different from `MeanL2Rel`, difference is as below: + + - `L2Rel` regards the input sample as a whole and calculates the l2 relative error of the whole; + - `MeanL2Rel` will calculate L2Rel separately for each input sample and return the average of l2 relative error for all samples. + + $$ + metric = \dfrac{\Vert \mathbf{x} - \mathbf{y} \Vert_2}{\max(\Vert \mathbf{y} \Vert_2, \epsilon)} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import L2Rel + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = L2Rel() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.42658269), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 9.69535923)} + """ + + # NOTE: Avoid divide by zero in result + # see https://github.com/scikit-learn/scikit-learn/pull/15007 + EPS: float = np.finfo(np.float32).eps + + def __init__(self, keep_batch: bool = False): + if keep_batch: + raise ValueError(f"keep_batch should be False, but got {keep_batch}.") + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + rel_l2 = paddle.norm(label_dict[key] - output_dict[key], p=2) / paddle.norm( + label_dict[key], p=2 + ).clip(min=self.EPS) + metric_dict[key] = rel_l2 + + return metric_dict + + +class MeanL2Rel(base.Metric): + r"""Class for mean l2 relative error. + + NOTE: This metric API is slightly different from `L2Rel`, difference is as below: + + - `MeanL2Rel` will calculate L2Rel separately for each input sample and return the average of l2 relative error for all samples. + - `L2Rel` regards the input sample as a whole and calculates the l2 relative error of the whole; + + $$ + metric = \dfrac{1}{M} \sum_{i=1}^{M}\dfrac{\Vert \mathbf{x_i} - \mathbf{y_i} \Vert_2}{\max(\Vert \mathbf{y_i} \Vert_2, \epsilon) } + $$ + + $$ + \mathbf{x_i}, \mathbf{y_i} \in \mathcal{R}^{N} + $$ + + Args: + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import MeanL2Rel + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MeanL2Rel() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.35970235), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 9.24504089)} + >>> loss = MeanL2Rel(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [1.11803389, 1.60137081]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [6.32455540 , 12.16552544])} + """ + + # NOTE: Avoid divide by zero in result + # see https://github.com/scikit-learn/scikit-learn/pull/15007 + EPS: float = np.finfo(np.float32).eps + + def __init__(self, keep_batch: bool = False): + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + rel_l2 = paddle.norm( + label_dict[key] - output_dict[key], p=2, axis=1 + ) / paddle.norm(label_dict[key], p=2, axis=1).clip(min=self.EPS) + if self.keep_batch: + metric_dict[key] = rel_l2 + else: + metric_dict[key] = rel_l2.mean() + + return metric_dict diff --git a/ppsci/metric/mae.py b/ppsci/metric/mae.py index 3b6ebdedbb..38a4f59b25 100644 --- a/ppsci/metric/mae.py +++ b/ppsci/metric/mae.py @@ -1,73 +1,73 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict - -import paddle -import paddle.nn.functional as F - -from ppsci.metric import base - - -class MAE(base.Metric): - r"""Mean absolute error. - - $$ - metric = \dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_1 - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import MAE - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> loss = MAE() - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 1.87500000), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.89999998)} - >>> loss = MAE(keep_batch=True) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [1.20000005, 2.54999995]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [0.59999996, 1.20000005])} - """ - - def __init__(self, keep_batch: bool = False): - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - mae = F.l1_loss(output_dict[key], label_dict[key], "none") - if self.keep_batch: - metric_dict[key] = mae.mean(axis=tuple(range(1, mae.ndim))) - else: - metric_dict[key] = mae.mean() - - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict + +import paddle +import paddle.nn.functional as F + +from ppsci.metric import base + + +class MAE(base.Metric): + r"""Mean absolute error. + + $$ + metric = \dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_1 + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import MAE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MAE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.87500000), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.89999998)} + >>> loss = MAE(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [1.20000005, 2.54999995]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [0.59999996, 1.20000005])} + """ + + def __init__(self, keep_batch: bool = False): + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + mae = F.l1_loss(output_dict[key], label_dict[key], "none") + if self.keep_batch: + metric_dict[key] = mae.mean(axis=tuple(range(1, mae.ndim))) + else: + metric_dict[key] = mae.mean() + + return metric_dict diff --git a/ppsci/metric/mse.py b/ppsci/metric/mse.py index 9e47a7cf5a..7b8370fb24 100644 --- a/ppsci/metric/mse.py +++ b/ppsci/metric/mse.py @@ -1,73 +1,73 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict - -import paddle -import paddle.nn.functional as F - -from ppsci.metric import base - - -class MSE(base.Metric): - r"""Mean square error - - $$ - metric = \dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_2^2 - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import MSE - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> loss = MSE() - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 5.35750008), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.94000000)} - >>> loss = MSE(keep_batch=True) - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [2.65000010, 8.06499958]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, - [0.39999998, 1.48000002])} - """ - - def __init__(self, keep_batch: bool = False): - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - mse = F.mse_loss(output_dict[key], label_dict[key], "none") - if self.keep_batch: - metric_dict[key] = mse.mean(axis=tuple(range(1, mse.ndim))) - else: - metric_dict[key] = mse.mean() - - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict + +import paddle +import paddle.nn.functional as F + +from ppsci.metric import base + + +class MSE(base.Metric): + r"""Mean square error + + $$ + metric = \dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_2^2 + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import MSE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MSE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 5.35750008), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.94000000)} + >>> loss = MSE(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [2.65000010, 8.06499958]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [0.39999998, 1.48000002])} + """ + + def __init__(self, keep_batch: bool = False): + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + mse = F.mse_loss(output_dict[key], label_dict[key], "none") + if self.keep_batch: + metric_dict[key] = mse.mean(axis=tuple(range(1, mse.ndim))) + else: + metric_dict[key] = mse.mean() + + return metric_dict diff --git a/ppsci/metric/rmse.py b/ppsci/metric/rmse.py index 55be8e9102..5d56fcfa6b 100644 --- a/ppsci/metric/rmse.py +++ b/ppsci/metric/rmse.py @@ -1,155 +1,155 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle -import paddle.nn.functional as F - -from ppsci.metric import base - - -class RMSE(base.Metric): - r"""Root mean square error - - $$ - metric = \sqrt{\dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_2^2} - $$ - - $$ - \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} - $$ - - Args: - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - - Examples: - >>> import paddle - >>> from ppsci.metric import RMSE - >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), - ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} - >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), - ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} - >>> loss = RMSE() - >>> result = loss(output_dict, label_dict) - >>> print(result) - {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 2.31462741), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, - 0.96953595)} - """ - - def __init__(self, keep_batch: bool = False): - if keep_batch: - raise ValueError(f"keep_batch should be False, but got {keep_batch}.") - super().__init__(keep_batch) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - rmse = F.mse_loss(output_dict[key], label_dict[key], "mean") ** 0.5 - metric_dict[key] = rmse - - return metric_dict - - -class LatitudeWeightedRMSE(base.Metric): - r"""Latitude weighted root mean square error. - - $$ - metric =\sqrt{\dfrac{1}{MN}\sum\limits_{m=1}^{M}\sum\limits_{n=1}^{N}L_m(X_{mn}-Y_{mn})^{2}} - $$ - - $$ - L_m = N_{lat}\dfrac{\cos(lat_m)}{\sum\limits_{j=1}^{N_{lat}}\cos(lat_j)} - $$ - - $lat_m$ is the latitude at m. - $N_{lat}$ is the number of latitude set by `num_lat`. - - Args: - num_lat (int): Number of latitude. - std (Optional[Union[np.array, Tuple[float, ...]]]): Standard Deviation of training dataset. Defaults to None. - keep_batch (bool, optional): Whether keep batch axis. Defaults to False. - variable_dict (Optional[Dict[str, int]]): Variable dictionary, the key is the name of a variable and - the value is its index. Defaults to None. - unlog (bool, optional): Whether calculate expm1 for all elements in the array. Defaults to False. - scale (float, optional): The scale value used after expm1. Defaults to 1e-5. - - Examples: - >>> import numpy as np - >>> import ppsci - >>> std = np.random.randn(20, 1, 1) - >>> metric = ppsci.metric.LatitudeWeightedRMSE(720, std=std) - """ - - def __init__( - self, - num_lat: int, - std: Optional[Union[np.array, Tuple[float, ...]]] = None, - keep_batch: bool = False, - variable_dict: Dict[str, int] = None, - unlog: bool = False, - scale: float = 1e-5, - ): - super().__init__(keep_batch) - self.num_lat = num_lat - self.std = ( - None - if std is None - else paddle.to_tensor(std, paddle.get_default_dtype()).reshape((1, -1)) - ) - self.variable_dict = variable_dict - self.unlog = unlog - self.scale = scale - self.weight = self.get_latitude_weight(num_lat) - - def get_latitude_weight(self, num_lat: int = 720): - lat_t = paddle.linspace(start=0, stop=1, num=num_lat) - lat_t = paddle.cos(3.1416 * (0.5 - lat_t)) - weight = num_lat * lat_t / paddle.sum(lat_t) - weight = weight.reshape((1, 1, -1, 1)) - return weight - - def scale_expm1(self, x: paddle.Tensor): - return self.scale * paddle.expm1(x) - - @paddle.no_grad() - def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: - metric_dict = {} - for key in label_dict: - output = ( - self.scale_expm1(output_dict[key]) if self.unlog else output_dict[key] - ) - label = self.scale_expm1(label_dict[key]) if self.unlog else label_dict[key] - - mse = F.mse_loss(output, label, "none") - rmse = (mse * self.weight).mean(axis=(-1, -2)) ** 0.5 - if self.std is not None: - rmse = rmse * self.std - if self.variable_dict is not None: - for variable_name, idx in self.variable_dict.items(): - metric_dict[f"{key}.{variable_name}"] = ( - rmse[:, idx] if self.keep_batch else rmse[:, idx].mean() - ) - else: - metric_dict[key] = rmse.mean(axis=1) if self.keep_batch else rmse.mean() - - return metric_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle +import paddle.nn.functional as F + +from ppsci.metric import base + + +class RMSE(base.Metric): + r"""Root mean square error + + $$ + metric = \sqrt{\dfrac{1}{N} \Vert \mathbf{x} - \mathbf{y} \Vert_2^2} + $$ + + $$ + \mathbf{x}, \mathbf{y} \in \mathcal{R}^{N} + $$ + + Args: + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + + Examples: + >>> import paddle + >>> from ppsci.metric import RMSE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = RMSE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.31462741), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.96953595)} + """ + + def __init__(self, keep_batch: bool = False): + if keep_batch: + raise ValueError(f"keep_batch should be False, but got {keep_batch}.") + super().__init__(keep_batch) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + rmse = F.mse_loss(output_dict[key], label_dict[key], "mean") ** 0.5 + metric_dict[key] = rmse + + return metric_dict + + +class LatitudeWeightedRMSE(base.Metric): + r"""Latitude weighted root mean square error. + + $$ + metric =\sqrt{\dfrac{1}{MN}\sum\limits_{m=1}^{M}\sum\limits_{n=1}^{N}L_m(X_{mn}-Y_{mn})^{2}} + $$ + + $$ + L_m = N_{lat}\dfrac{\cos(lat_m)}{\sum\limits_{j=1}^{N_{lat}}\cos(lat_j)} + $$ + + $lat_m$ is the latitude at m. + $N_{lat}$ is the number of latitude set by `num_lat`. + + Args: + num_lat (int): Number of latitude. + std (Optional[Union[np.array, Tuple[float, ...]]]): Standard Deviation of training dataset. Defaults to None. + keep_batch (bool, optional): Whether keep batch axis. Defaults to False. + variable_dict (Optional[Dict[str, int]]): Variable dictionary, the key is the name of a variable and + the value is its index. Defaults to None. + unlog (bool, optional): Whether calculate expm1 for all elements in the array. Defaults to False. + scale (float, optional): The scale value used after expm1. Defaults to 1e-5. + + Examples: + >>> import numpy as np + >>> import ppsci + >>> std = np.random.randn(20, 1, 1) + >>> metric = ppsci.metric.LatitudeWeightedRMSE(720, std=std) + """ + + def __init__( + self, + num_lat: int, + std: Optional[Union[np.array, Tuple[float, ...]]] = None, + keep_batch: bool = False, + variable_dict: Dict[str, int] = None, + unlog: bool = False, + scale: float = 1e-5, + ): + super().__init__(keep_batch) + self.num_lat = num_lat + self.std = ( + None + if std is None + else paddle.to_tensor(std, paddle.get_default_dtype()).reshape((1, -1)) + ) + self.variable_dict = variable_dict + self.unlog = unlog + self.scale = scale + self.weight = self.get_latitude_weight(num_lat) + + def get_latitude_weight(self, num_lat: int = 720): + lat_t = paddle.linspace(start=0, stop=1, num=num_lat) + lat_t = paddle.cos(3.1416 * (0.5 - lat_t)) + weight = num_lat * lat_t / paddle.sum(lat_t) + weight = weight.reshape((1, 1, -1, 1)) + return weight + + def scale_expm1(self, x: paddle.Tensor): + return self.scale * paddle.expm1(x) + + @paddle.no_grad() + def forward(self, output_dict, label_dict) -> Dict[str, "paddle.Tensor"]: + metric_dict = {} + for key in label_dict: + output = ( + self.scale_expm1(output_dict[key]) if self.unlog else output_dict[key] + ) + label = self.scale_expm1(label_dict[key]) if self.unlog else label_dict[key] + + mse = F.mse_loss(output, label, "none") + rmse = (mse * self.weight).mean(axis=(-1, -2)) ** 0.5 + if self.std is not None: + rmse = rmse * self.std + if self.variable_dict is not None: + for variable_name, idx in self.variable_dict.items(): + metric_dict[f"{key}.{variable_name}"] = ( + rmse[:, idx] if self.keep_batch else rmse[:, idx].mean() + ) + else: + metric_dict[key] = rmse.mean(axis=1) if self.keep_batch else rmse.mean() + + return metric_dict diff --git a/ppsci/optimizer/__init__.py b/ppsci/optimizer/__init__.py index c03b0717ee..41ebb0d468 100644 --- a/ppsci/optimizer/__init__.py +++ b/ppsci/optimizer/__init__.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -82,3 +83,87 @@ def build_optimizer(cfg, model_list, epochs, iters_per_epoch): if isinstance(lr_scheduler, float): return optimizer, None return optimizer, lr_scheduler +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.optimizer import lr_scheduler +from ppsci.optimizer.optimizer import LBFGS +from ppsci.optimizer.optimizer import SGD +from ppsci.optimizer.optimizer import Adam +from ppsci.optimizer.optimizer import AdamW +from ppsci.optimizer.optimizer import Momentum +from ppsci.optimizer.optimizer import OptimizerList +from ppsci.optimizer.optimizer import RMSProp + +__all__ = [ + "LBFGS", + "SGD", + "Adam", + "AdamW", + "Momentum", + "RMSProp", + "OptimizerList", + "lr_scheduler", +] + + +def build_lr_scheduler(cfg, epochs, iters_per_epoch): + """Build learning rate scheduler. + + Args: + cfg (DictConfig): Learning rate scheduler config. + epochs (int): Total epochs. + iters_per_epoch (int): Number of iterations of one epoch. + + Returns: + LRScheduler: Learning rate scheduler. + """ + cfg = copy.deepcopy(cfg) + cfg.update({"epochs": epochs, "iters_per_epoch": iters_per_epoch}) + lr_scheduler_cls = cfg.pop("name") + lr_scheduler_ = eval(lr_scheduler_cls)(**cfg) + return lr_scheduler_() + + +def build_optimizer(cfg, model_list, epochs, iters_per_epoch): + """Build optimizer and learning rate scheduler + + Args: + cfg (DictConfig): Learning rate scheduler config. + model_list (Tuple[nn.Layer, ...]): Tuple of model(s). + epochs (int): Total epochs. + iters_per_epoch (int): Number of iterations of one epoch. + + Returns: + Optimizer, LRScheduler: Optimizer and learning rate scheduler. + """ + # build lr_scheduler + cfg = copy.deepcopy(cfg) + lr_cfg = cfg.pop("lr") + if isinstance(lr_cfg, float): + lr_scheduler = lr_cfg + else: + lr_scheduler = build_lr_scheduler(lr_cfg, epochs, iters_per_epoch) + + # build optimizer + opt_cls = cfg.pop("name") + optimizer = eval(opt_cls)(learning_rate=lr_scheduler, **cfg)(model_list) + + if isinstance(lr_scheduler, float): + return optimizer, None + return optimizer, lr_scheduler +>>>>>>> Stashed changes diff --git a/ppsci/optimizer/lr_scheduler.py b/ppsci/optimizer/lr_scheduler.py index cad7da3503..d166890725 100644 --- a/ppsci/optimizer/lr_scheduler.py +++ b/ppsci/optimizer/lr_scheduler.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -909,3 +910,848 @@ def __getitem__(self, idx): def __setitem__(self, idx, sch): raise NotImplementedError("Can not modify any item in SchedulerList.") +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import abc +import math +from typing import Callable +from typing import List +from typing import Tuple +from typing import Union + +from paddle.optimizer import lr + +from ppsci.utils import logger + +__all__ = [ + "Linear", + "Cosine", + "Step", + "Piecewise", + "MultiStepDecay", + "ExponentialDecay", + "CosineWarmRestarts", + "OneCycleLR", + "LambdaDecay", +] + + +class LRBase: + """Base class for custom learning rates. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + warmup_epoch (int): Number of warmup epochs. + warmup_start_lr (float): Start learning rate within warmup. + last_epoch (int): Last epoch. + by_epoch (bool): Learning rate decays by epoch when by_epoch is True, else by iter. + verbose (bool): If True, prints a message to stdout for each update. Defaults to False. + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + warmup_epoch: int, + warmup_start_lr: float, + last_epoch: int, + by_epoch: bool, + verbose: bool = False, + ) -> None: + """Initialize and record the necessary parameters.""" + super().__init__() + if warmup_epoch >= epochs: + msg = ( + "When using warm up, the value of 'Global.epochs' should be greater " + "than value of 'Optimizer.lr.warmup_epoch'. The value of " + f"'Optimizer.lr.warmup_epoch' has been set to {epochs}." + ) + logger.warning(msg) + warmup_epoch = epochs + self.epochs = epochs + self.iters_per_epoch = iters_per_epoch + self.learning_rate = learning_rate + self.warmup_epoch = warmup_epoch + self.warmup_steps = ( + self.warmup_epoch + if by_epoch + else round(self.warmup_epoch * self.iters_per_epoch) + ) + self.warmup_start_lr = warmup_start_lr + self.last_epoch = last_epoch + self.by_epoch = by_epoch + self.verbose = verbose + + @abc.abstractmethod + def __call__(self, *args, **kwargs) -> lr.LRScheduler: + """Generate an learning rate scheduler. + + Returns: + lr.LinearWarmup: learning rate scheduler. + """ + pass + + def linear_warmup( + self, learning_rate: Union[float, lr.LRScheduler] + ) -> lr.LinearWarmup: + """Add an Linear Warmup before learning_rate. + + Args: + learning_rate (Union[float, lr.LRScheduler]): Original learning rate without + warmup. + + Returns: + lr.LinearWarmup: learning rate scheduler with warmup. + """ + warmup_lr = lr.LinearWarmup( + learning_rate=learning_rate, + warmup_steps=self.warmup_steps, + start_lr=self.warmup_start_lr, + end_lr=self.learning_rate, + last_epoch=self.last_epoch, + verbose=self.verbose, + ) + return warmup_lr + + +class Constant(lr.LRScheduler): + """Constant learning rate Class implementation. + + Args: + learning_rate (float): The initial learning rate. + last_epoch (int, optional): The index of last epoch. Default: -1. + """ + + def __init__(self, learning_rate: float, last_epoch: int = -1): + self.learning_rate = learning_rate + self.last_epoch = last_epoch + super().__init__() + + def get_lr(self) -> float: + """Always return the same learning rate""" + return self.learning_rate + + +class Linear(LRBase): + """Linear learning rate decay. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + end_lr (float, optional): The minimum final learning rate. Defaults to 0.0. + power (float, optional): Power of polynomial. Defaults to 1.0. + cycle (bool, optional): Whether the learning rate rises again. If True, then the learning rate will rise when it decrease + to ``end_lr`` . If False, the learning rate is monotone decreasing. Defaults to False. + warmup_epoch (int): Number of warmup epochs. + warmup_start_lr (float): Start learning rate within warmup. + last_epoch (int): Last epoch. + by_epoch (bool): Learning rate decays by epoch when by_epoch is True, else by iter. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.Linear(10, 2, 0.001)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + end_lr: float = 0.0, + power: float = 1.0, + cycle: bool = False, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.decay_steps = (epochs - self.warmup_epoch) * iters_per_epoch + self.end_lr = end_lr + self.power = power + self.cycle = cycle + self.warmup_steps = round(self.warmup_epoch * iters_per_epoch) + if self.by_epoch: + self.decay_steps = self.epochs - self.warmup_epoch + + def __call__(self): + learning_rate = ( + lr.PolynomialDecay( + learning_rate=self.learning_rate, + decay_steps=self.decay_steps, + end_lr=self.end_lr, + power=self.power, + cycle=self.cycle, + last_epoch=self.last_epoch, + ) + if self.decay_steps > 0 + else Constant(self.learning_rate) + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class ExponentialDecay(LRBase): + """ExponentialDecay learning rate decay. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + gamma (float): The decay rate. + decay_steps (int): The number of steps to decay. + warmup_epoch (int): Number of warmup epochs. + warmup_start_lr (float): Start learning rate within warmup. + last_epoch (int): Last epoch. + by_epoch (bool): Learning rate decays by epoch when by_epoch is True, else by iter. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.ExponentialDecay(10, 2, 1e-3, 0.95, 3)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + gamma: float, + decay_steps: int, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.decay_steps = decay_steps + self.gamma = gamma + self.warmup_steps = round(self.warmup_epoch * iters_per_epoch) + if self.by_epoch: + self.decay_steps /= iters_per_epoch + + def __call__(self): + learning_rate = lr.ExponentialDecay( + learning_rate=self.learning_rate, + gamma=self.gamma ** (1 / self.decay_steps), + last_epoch=self.last_epoch, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Cosine(LRBase): + """Cosine learning rate decay. + + lr = 0.05 * (math.cos(epoch * (math.pi / epochs)) + 1) + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + eta_min (float, optional): Minimum learning rate. Defaults to 0.0. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, + else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.Cosine(10, 2, 1e-3)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + eta_min: float = 0.0, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.T_max = (self.epochs - self.warmup_epoch) * self.iters_per_epoch + self.eta_min = eta_min + if self.by_epoch: + self.T_max = self.epochs - self.warmup_epoch + + def __call__(self): + learning_rate = ( + lr.CosineAnnealingDecay( + learning_rate=self.learning_rate, + T_max=self.T_max, + eta_min=self.eta_min, + last_epoch=self.last_epoch, + ) + if self.T_max > 0 + else Constant(self.learning_rate) + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Step(LRBase): + """Step learning rate decay. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + step_size (int): The interval to update. + gamma (float, optional): The Ratio that the learning rate will be reduced. + ``new_lr = origin_lr * gamma``. It should be less than 1.0. Default: 0.1. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, + else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.Step(10, 1, 1e-3, 2, 0.95)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + step_size: int, + gamma: float, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.step_size = step_size * iters_per_epoch + self.gamma = gamma + if self.by_epoch: + self.step_size = step_size + + def __call__(self): + learning_rate = lr.StepDecay( + learning_rate=self.learning_rate, + step_size=self.step_size, + gamma=self.gamma, + last_epoch=self.last_epoch, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class Piecewise(LRBase): + """Piecewise learning rate decay + + Args: + epochs (int): Total epoch(s) + iters_per_epoch (int): Number of iterations within an epoch + decay_epochs (Tuple[int, ...]): A list of steps numbers. The type of element in the + list is python int. + values (Tuple[float, ...]): Tuple of learning rate values that will be picked during + different epoch boundaries. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, + else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.Piecewise( + ... 10, 1, [2, 4], (1e-3, 1e-4, 1e-5) + ... )() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + decay_epochs: Tuple[int, ...], + values: Tuple[float, ...], + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + values[0], + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.values = values + self.boundaries_steps = [e * iters_per_epoch for e in decay_epochs] + if self.by_epoch is True: + self.boundaries_steps = decay_epochs + + def __call__(self): + learning_rate = lr.PiecewiseDecay( + boundaries=self.boundaries_steps, + values=self.values, + last_epoch=self.last_epoch, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class MultiStepDecay(LRBase): + """MultiStepDecay learning rate decay + + Args: + epochs (int): Total epoch(s) + iters_per_epoch (int): Number of iterations within an epoch + learning_rate (float): Learning rate + milestones (Tuple[int, ...]): Tuple of each boundaries. should be increasing. + gamma (float, optional): The Ratio that the learning rate will be reduced. + `new_lr = origin_lr * gamma`. It should be less than 1.0. Defaults to 0.1. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, + else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.MultiStepDecay(10, 1, 1e-3, (4, 5))() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + milestones: Tuple[int, ...], + gamma: float = 0.1, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.milestones = [x * iters_per_epoch for x in milestones] + self.gamma = gamma + if self.by_epoch: + self.milestones = milestones + + def __call__(self): + learning_rate = lr.MultiStepDecay( + learning_rate=self.learning_rate, + milestones=self.milestones, + gamma=self.gamma, + last_epoch=self.last_epoch, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class CosineAnnealingWarmRestarts(lr.LRScheduler): + """The implementation of cosine annealing schedule with warm restarts. + + Args: + learning_rate (float): Learning rate + T_0 (int): Number of iterations for the first restart. + T_mult (int, optional): A factor increases T_i after a restart. Defaults to 1. + eta_min (float, optional): Minimum learning rate. Defaults to 0. + last_epoch (int, optional): The index of last epoch. Defaults to -1. + verbose (bool, optional): If `True`, prints a message to stdout for each update. Defaults to False. + """ + + def __init__( + self, + learning_rate: float, + T_0: int, + T_mult: int = 1, + eta_min: float = 0.0, + last_epoch: int = -1, + verbose: bool = False, + ): + if T_0 <= 0 or not isinstance(T_0, int): + raise ValueError(f"Expected positive integer T_0, but got {T_0}") + if T_mult < 1 or not isinstance(T_mult, int): + raise ValueError(f"Expected integer T_mult >= 1, but got {T_mult}") + self.T_0 = T_0 + self.T_i = T_0 + self.T_mult = T_mult + self.eta_min = eta_min + self.T_cur = last_epoch + super().__init__(learning_rate, last_epoch, verbose) + + def get_lr(self): + return ( + self.eta_min + + (self.base_lr - self.eta_min) + * (1 + math.cos(math.pi * self.T_cur / self.T_i)) + / 2 + ) + + def step(self, epoch=None): + if epoch is None and self.last_epoch < 0: + epoch = 0 + + if epoch is None: + epoch = self.last_epoch + 1 + self.T_cur = self.T_cur + 1 + if self.T_cur >= self.T_i: + self.T_cur = self.T_cur - self.T_i + self.T_i = self.T_i * self.T_mult + else: + if epoch < 0: + raise ValueError(f"Expected non-negative epoch, but got {epoch}") + if epoch >= self.T_0: + if self.T_mult == 1: + self.T_cur = epoch % self.T_0 + else: + n = int( + math.log( + (epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult + ) + ) + self.T_cur = epoch - self.T_0 * (self.T_mult**n - 1) / ( + self.T_mult - 1 + ) + self.T_i = self.T_0 * self.T_mult ** (n) + else: + self.T_i = self.T_0 + self.T_cur = epoch + self.last_epoch = math.floor(epoch) + self.last_lr = self.get_lr() + + +class CosineWarmRestarts(LRBase): + """Set the learning rate using a cosine annealing schedule with warm restarts. + + Args: + epochs (int): Total epoch(s) + iters_per_epoch (int): Number of iterations within an epoch + learning_rate (float): Learning rate + T_0 (int): Number of iterations for the first restart. + T_mult (int): A factor increases T_i after a restart + eta_min (float, optional): Minimum learning rate. Defaults to 0.0. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.CosineWarmRestarts(20, 1, 1e-3, 14, 2)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + T_0: int, + T_mult: int, + eta_min: float = 0.0, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.T_0 = T_0 + self.T_mult = T_mult + self.eta_min = eta_min + if self.by_epoch is False: + self.T_0 = T_0 * iters_per_epoch + + def __call__(self): + learning_rate = CosineAnnealingWarmRestarts( + learning_rate=self.learning_rate, + T_0=self.T_0, + T_mult=self.T_mult, + eta_min=self.eta_min, + last_epoch=self.last_epoch, + verbose=self.verbose, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class OneCycleLR(LRBase): + """Sets the learning rate according to the one cycle learning rate scheduler. + The scheduler adjusts the learning rate from an initial learning rate to the maximum learning rate and then + from that maximum learning rate to the minimum learning rate, which is much less than the initial learning rate. + + It has been proposed in [Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120). + + Please note that the default behavior of this scheduler follows the fastai implementation of one cycle, + which claims that **"unpublished work has shown even better results by using only two phases"**. + If you want the behavior of this scheduler to be consistent with the paper, please set `three_phase=True`. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + max_learning_rate (float): The maximum learning rate. It is a python float number. Functionally, it defines the initial learning rate by `divide_factor` . + divide_factor (float, optional): Initial learning rate will be determined by initial_learning_rate = max_learning_rate / divide_factor. Defaults to 25.0. + end_learning_rate (float, optional): The minimum learning rate during training, it should be much less than initial learning rate. Defaults to 0.0001. + phase_pct (float): The percentage of total steps which used to increasing learning rate. Defaults to 0.3. + anneal_strategy (str, optional): Strategy of adjusting learning rate. "cos" for cosine annealing, "linear" for linear annealing. Defaults to "cos". + three_phase (bool, optional): Whether to use three phase. Defaults to False. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, else by iter. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.OneCycleLR(100, 1, 1e-3)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + max_learning_rate: float, + divide_factor: float = 25.0, + end_learning_rate: float = 0.0001, + phase_pct: float = 0.3, + anneal_strategy: str = "cos", + three_phase: bool = False, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + max_learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + ) + self.total_steps = epochs + if not by_epoch: + self.total_steps *= iters_per_epoch + self.divide_factor = divide_factor + self.end_learning_rate = end_learning_rate + self.phase_pct = phase_pct + self.anneal_strategy = anneal_strategy + self.three_phase = three_phase + + def __call__(self): + learning_rate = lr.OneCycleLR( + max_learning_rate=self.learning_rate, + total_steps=self.total_steps, + divide_factor=self.divide_factor, + end_learning_rate=self.end_learning_rate, + phase_pct=self.phase_pct, + anneal_strategy=self.anneal_strategy, + three_phase=self.three_phase, + last_epoch=self.last_epoch, + verbose=self.verbose, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class LambdaDecay(LRBase): + """This interface provides a lambda function to set the learning rate strategy. + + Args: + epochs (int): Total epoch(s). + iters_per_epoch (int): Number of iterations within an epoch. + learning_rate (float): Learning rate. + lr_lambda (Callable): A lambda function that calculates a factor through epoch, which is multiplied by the initial learning rate. + warmup_epoch (int, optional): The epoch numbers for LinearWarmup. Defaults to 0. + warmup_start_lr (float, optional): Start learning rate within warmup. Defaults to 0.0. + last_epoch (int, optional): Last epoch. Defaults to -1. + by_epoch (bool, optional): Learning rate decays by epoch when by_epoch is True, + else by iter. Defaults to False. + verbose (bool, optional): If True, prints a message to stdout for each update. Defaults to False. + + Examples: + >>> import ppsci + >>> lr = ppsci.optimizer.lr_scheduler.LambdaDecay(0.5, lr_lambda=lambda x:0.95**x, verbose=True)() + """ + + def __init__( + self, + epochs: int, + iters_per_epoch: int, + learning_rate: float, + lr_lambda: Callable, + warmup_epoch: int = 0, + warmup_start_lr: float = 0.0, + last_epoch: int = -1, + by_epoch: bool = False, + verbose: bool = False, + ): + super().__init__( + epochs, + iters_per_epoch, + learning_rate, + warmup_epoch, + warmup_start_lr, + last_epoch, + by_epoch, + verbose, + ) + self.learning_rate = learning_rate + self.lr_lambda = lr_lambda + self.last_epoch = last_epoch + self.verbose = verbose + self.by_epoch = by_epoch + + def __call__(self): + learning_rate = lr.LambdaDecay( + learning_rate=self.learning_rate, + lr_lambda=self.lr_lambda, + last_epoch=self.last_epoch, + verbose=self.verbose, + ) + + if self.warmup_steps > 0: + learning_rate = self.linear_warmup(learning_rate) + + setattr(learning_rate, "by_epoch", self.by_epoch) + return learning_rate + + +class SchedulerList: + """SchedulerList which wrap more than one scheduler. + + Args: + scheduler_list (Tuple[lr.LRScheduler, ...]): Schedulers listed in a tuple. + + Examples: + >>> import ppsci + >>> sch1 = ppsci.optimizer.lr_scheduler.Linear(10, 2, 0.001)() + >>> sch2 = ppsci.optimizer.lr_scheduler.ExponentialDecay(10, 2, 1e-3, 0.95, 3)() + >>> sch = ppsci.optimizer.lr_scheduler.SchedulerList((sch1, sch2)) + """ + + def __init__(self, scheduler_list: Tuple[lr.LRScheduler, ...]): + super().__init__() + self._sch_list = scheduler_list + self.by_epoch = False + + def step(self): + for sch in self._sch_list: + sch.step() + + def get_lr(self) -> float: + """Return learning rate of first scheduler""" + return self._sch_list[0].get_lr() + + def _state_keys(self) -> List[str]: + return ["last_epoch", "last_lr"] + + def __len__(self) -> int: + return len(self._sch_list) + + def __getitem__(self, idx): + return self._sch_list[idx] + + def __setitem__(self, idx, sch): + raise NotImplementedError("Can not modify any item in SchedulerList.") +>>>>>>> Stashed changes diff --git a/ppsci/optimizer/optimizer.py b/ppsci/optimizer/optimizer.py index fd4b3d447a..c3b9630dec 100644 --- a/ppsci/optimizer/optimizer.py +++ b/ppsci/optimizer/optimizer.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -647,3 +648,555 @@ def __setitem__(self, idx, opt): def __iter__(self): yield from iter(self._opt_list) +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Dict +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +from paddle import nn +from paddle import optimizer as optim +from paddle import regularizer +from paddle.incubate import optimizer as incubate_optim +from typing_extensions import Literal + +from ppsci.utils import logger +from ppsci.utils import misc + +if TYPE_CHECKING: + import paddle + +__all__ = ["SGD", "Momentum", "Adam", "RMSProp", "AdamW", "LBFGS", "OptimizerList"] + + +class SGD: + """Stochastic Gradient Descent. + + Args: + learning_rate (Union[float, optim.lr.LRScheduler], optional): The learning rate + used to update parameter(s). Defaults to 0.001. + weight_decay (Optional[Union[float, regularizer.L1Decay, regularizer.L2Decay]]): + Regularization strategy. Defaults to None. + grad_clip (Optional[Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm]]): + Gradient clipping strategy. Defaults to None. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.SGD(1e-3)(model) + """ + + def __init__( + self, + learning_rate: Union[float, optim.lr.LRScheduler] = 0.001, + weight_decay: Optional[ + Union[float, regularizer.L1Decay, regularizer.L2Decay] + ] = None, + grad_clip: Optional[ + Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm] + ] = None, + ): + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + opt = optim.SGD( + learning_rate=self.learning_rate, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + ) + return opt + + +class Momentum: + """Simple Momentum optimizer with velocity state. + + Args: + learning_rate (Union[float, optim.lr.LRScheduler]): The learning rate + used to update parameter(s). + momentum (float): Momentum factor. + weight_decay (Optional[Union[float, regularizer.L1Decay, regularizer.L2Decay]]): + Regularization strategy. Defaults to None. + grad_clip (Optional[Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm]]): + Gradient clipping strategy. Defaults to None. + use_nesterov (bool, optional): Whether to use nesterov momentum. Defaults to False. + no_weight_decay_name (Optional[str]): List of names of no weight decay parameters split by white space. Defaults to None. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.Momentum(1e-3, 0.9)(model) + """ + + def __init__( + self, + learning_rate: Union[float, optim.lr.LRScheduler], + momentum: float, + weight_decay: Optional[ + Union[float, regularizer.L1Decay, regularizer.L2Decay] + ] = None, + grad_clip: Optional[ + Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm] + ] = None, + use_nesterov: bool = False, + no_weight_decay_name: Optional[str] = None, + ): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.use_nesterov = use_nesterov + self.no_weight_decay_name_list = ( + no_weight_decay_name.split() if no_weight_decay_name else [] + ) + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = None + if len(self.no_weight_decay_name_list) > 0: + params_with_decay = [] + params_without_decay = [] + for m in model_list: + params = [ + p + for n, p in m.named_parameters() + if not any(nd in n for nd in self.no_weight_decay_name_list) + ] + params_with_decay.extend(params) + params = [ + p + for n, p in m.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] + params_without_decay.extend(params) + parameters = [ + {"params": params_with_decay, "weight_decay": self.weight_decay}, + {"params": params_without_decay, "weight_decay": 0.0}, + ] + else: + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + use_nesterov=self.use_nesterov, + parameters=parameters, + ) + if hasattr(opt, "_use_multi_tensor"): + opt = optim.Momentum( + learning_rate=self.learning_rate, + momentum=self.momentum, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters, + use_nesterov=self.use_nesterov, + use_multi_tensor=True, + ) + return opt + + +class Adam: + """Adam: A Method for Stochastic Optimization. + + Args: + learning_rate (Union[float, optim.lr.LRScheduler], optional): The learning rate + used to update parameter(s). Defaults to 0.001. + beta1 (float, optional): The exponential decay rate for the 1st moment estimates. Defaults to 0.9. + beta2 (float, optional): The exponential decay rate for the 2nd moment estimates. Defaults to 0.999. + epsilon (float, optional): A small float value for numerical stability. Defaults to 1e-08. + weight_decay (Optional[Union[float, regularizer.L1Decay, regularizer.L2Decay]]): Regularization strategy. Defaults to None. + grad_clip (Optional[Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm]]): Gradient clipping strategy. Defaults to None. + lazy_mode (bool, optional): Whether to enable lazy mode for moving-average. Defaults to False. + amsgrad (bool, optional): Whether to use the AMSGrad variant of this algorithm from the paper + `On the Convergence of Adam and Beyond `_. Defaults to False. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.Adam(1e-3)(model) + """ + + def __init__( + self, + learning_rate: Union[float, optim.lr.LRScheduler] = 0.001, + beta1: float = 0.9, + beta2: float = 0.999, + epsilon: float = 1e-08, + weight_decay: Optional[ + Union[float, regularizer.L1Decay, regularizer.L2Decay] + ] = None, + grad_clip: Optional[ + Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm] + ] = None, + lazy_mode: bool = False, + amsgrad: bool = False, + ): + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.learning_rate = learning_rate + self.weight_decay = weight_decay + self.grad_clip = grad_clip + self.lazy_mode = lazy_mode + self.amsgrad = amsgrad + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + import inspect + + extra_kwargs = {} + if "amsgrad" in inspect.signature(optim.Adam.__init__).parameters: + extra_kwargs["amsgrad"] = self.amsgrad + opt = optim.Adam( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + lazy_mode=self.lazy_mode, + parameters=parameters, + **extra_kwargs, + ) + return opt + + +class LBFGS: + """The L-BFGS is a quasi-Newton method for solving an unconstrained optimization + problem over a differentiable function. Closely related is the Newton method for minimization. + + Args: + learning_rate (float, optional): The learning rate + used to update parameter(s). Defaults to 1.0. + max_iter (int, optional): Maximal number of iterations per optimization step. + Defaults to 1. + max_eval (Optional[int]): Maximal number of function evaluations per + optimization step. Defaults to None. + tolerance_grad (float, optional): Termination tolerance on first order optimality. + Defaults to 1e-07. + tolerance_change (float, optional): Termination tolerance on function + value/parameter changes. Defaults to 1e-09. + history_size (int, optional): Update history size. Defaults to 100. + line_search_fn (Optional[Literal["strong_wolfe"]]): Either 'strong_wolfe' or None. + Defaults to "strong_wolfe". + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.LBFGS(1e-3)(model) + """ + + def __init__( + self, + learning_rate: float = 1.0, + max_iter: int = 1, + max_eval: Optional[int] = None, + tolerance_grad: float = 1e-07, + tolerance_change: float = 1e-09, + history_size: int = 100, + line_search_fn: Optional[Literal["strong_wolfe"]] = "strong_wolfe", + ): + self.lr = learning_rate + self.max_iter = max_iter + self.max_eval = max_eval + self.tolerance_grad = tolerance_grad + self.tolerance_change = tolerance_change + self.history_size = history_size + self.line_search_fn = line_search_fn + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + try: + opt = getattr(optim, "LBFGS")( + learning_rate=self.lr, + max_iter=self.max_iter, + max_eval=self.max_eval, + tolerance_grad=self.tolerance_grad, + tolerance_change=self.tolerance_change, + history_size=self.history_size, + line_search_fn=self.line_search_fn, + parameters=parameters, + ) + except AttributeError: + opt = getattr(incubate_optim, "LBFGS")( + learning_rate=self.lr, + max_iter=self.max_iter, + max_eval=self.max_eval, + tolerance_grad=self.tolerance_grad, + tolerance_change=self.tolerance_change, + history_size=self.history_size, + line_search_fn=self.line_search_fn, + parameters=parameters, + ) + return opt + + +class RMSProp: + """Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning rate method. + + Args: + learning_rate (Union[float, optim.lr.LRScheduler]): The learning rate + used to update parameter(s) + rho (float, optional): Factor ρ in equation. Defaults to 0.95. + epsilon (float, optional): Factor ϵ in equation as a smoothing term. Defaults to 1e-6. + momentum (float, optional):β in equation is the momentum term. Defaults to 0.0. + weight_decay (Optional[Union[float, regularizer.L1Decay, regularizer.L2Decay]]): + Regularization strategy. Defaults to None. + grad_clip (Optional[Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm]]): + Gradient clipping strategy. Defaults to None. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.RMSProp(1e-3)(model) + """ + + def __init__( + self, + learning_rate: Union[float, optim.lr.LRScheduler], + rho: float = 0.95, + epsilon: float = 1e-6, + momentum: float = 0.0, + weight_decay: Optional[ + Union[float, regularizer.L1Decay, regularizer.L2Decay] + ] = None, + grad_clip: Optional[ + Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm] + ] = None, + ): + super().__init__() + self.learning_rate = learning_rate + self.momentum = momentum + self.rho = rho + self.epsilon = epsilon + self.weight_decay = weight_decay + self.grad_clip = grad_clip + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + opt = optim.RMSProp( + learning_rate=self.learning_rate, + momentum=self.momentum, + rho=self.rho, + epsilon=self.epsilon, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + parameters=parameters, + ) + return opt + + +class AdamW: + """AdamW is implemented based on DECOUPLED WEIGHT DECAY REGULARIZATION. + + Args: + learning_rate (Union[float, optim.lr.LRScheduler], optional): The learning rate + used to update parameter(s). Defaults to 0.001. + beta1 (float, optional): The exponential decay rate for the 1st moment estimates. Defaults to 0.9. + beta2 (float, optional): The exponential decay rate for the 2nd moment estimates. Defaults to 0.999. + epsilon (float, optional): A small float value for numerical stability. Defaults to 1e-8. + weight_decay (float, optional): Regularization coefficient. Defaults to 0.01. + grad_clip (Optional[Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm]]): Gradient clipping strategy. Defaults to None. + no_weight_decay_name (Optional[str]): List of names of no weight decay parameters split by white space. Defaults to None. + one_dim_param_no_weight_decay (bool, optional): Apply no weight decay on 1-D parameter(s). Defaults to False. + amsgrad (bool, optional): Whether to use the AMSGrad variant of this algorithm from the paper + `On the Convergence of Adam and Beyond `_. Defaults to False. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt = ppsci.optimizer.AdamW(1e-3)(model) + """ + + def __init__( + self, + learning_rate: Union[float, optim.lr.LRScheduler] = 0.001, + beta1: float = 0.9, + beta2: float = 0.999, + epsilon: float = 1e-8, + weight_decay: float = 0.001, + grad_clip: Optional[ + Union[nn.ClipGradByNorm, nn.ClipGradByValue, nn.ClipGradByGlobalNorm] + ] = None, + no_weight_decay_name: Optional[str] = None, + one_dim_param_no_weight_decay: bool = False, + amsgrad: bool = False, + ): + super().__init__() + self.learning_rate = learning_rate + self.beta1 = beta1 + self.beta2 = beta2 + self.epsilon = epsilon + self.grad_clip = grad_clip + self.weight_decay = weight_decay + self.no_weight_decay_name_list = ( + no_weight_decay_name.split() if no_weight_decay_name else [] + ) + self.one_dim_param_no_weight_decay = one_dim_param_no_weight_decay + self.amsgrad = amsgrad + + def __call__(self, model_list: Union[nn.Layer, Tuple[nn.Layer, ...]]): + # model_list is None in static graph + if not isinstance(model_list, (tuple, list)): + model_list = (model_list,) + parameters = ( + sum([m.parameters() for m in model_list], []) if model_list else None + ) + + # TODO(gaotingquan): Model_list is None when in static graph, "no_weight_decay" not work. + if model_list is None: + if ( + self.one_dim_param_no_weight_decay + or len(self.no_weight_decay_name_list) != 0 + ): + msg = '"AdamW" does not support setting "no_weight_decay" in static graph. Please use dynamic graph.' + logger.error(Exception(msg)) + raise Exception(msg) + + self.no_weight_decay_param_name_list = ( + [ + p.name + for model in model_list + for n, p in model.named_parameters() + if any(nd in n for nd in self.no_weight_decay_name_list) + ] + if model_list + else [] + ) + + if self.one_dim_param_no_weight_decay: + self.no_weight_decay_param_name_list += ( + [ + p.name + for model in model_list + for n, p in model.named_parameters() + if len(p.shape) == 1 + ] + if model_list + else [] + ) + import inspect + + extra_kwargs = {} + if "amsgrad" in inspect.signature(optim.AdamW.__init__).parameters: + extra_kwargs["amsgrad"] = self.amsgrad + + opt = optim.AdamW( + learning_rate=self.learning_rate, + beta1=self.beta1, + beta2=self.beta2, + epsilon=self.epsilon, + parameters=parameters, + weight_decay=self.weight_decay, + grad_clip=self.grad_clip, + apply_decay_param_fun=self._apply_decay_param_fun, + **extra_kwargs, + ) + return opt + + def _apply_decay_param_fun(self, name): + return name not in self.no_weight_decay_param_name_list + + +class OptimizerList: + """OptimizerList which wrap more than one optimizer. + NOTE: LBFGS is not supported yet. + + Args: + optimizer_list (Tuple[optim.Optimizer, ...]): Optimizers listed in a tuple. + + Examples: + >>> import ppsci + >>> model1 = ppsci.arch.MLP(("x",), ("u",), 5, 20) + >>> opt1 = ppsci.optimizer.Adam(1e-3)(model1) + >>> model2 = ppsci.arch.MLP(("y",), ("v",), 5, 20) + >>> opt2 = ppsci.optimizer.Adam(1e-3)(model2) + >>> opt = ppsci.optimizer.OptimizerList((opt1, opt2)) + """ + + def __init__(self, optimizer_list: Tuple[optim.Optimizer, ...]): + super().__init__() + self._opt_list = optimizer_list + if "LBFGS" in set(misc.typename(opt) for opt in optimizer_list): + raise ValueError("LBFGS is not supported in OptimizerList yet.") + + def step(self): + for opt in self._opt_list: + opt.step() + + def clear_grad(self): + for opt in self._opt_list: + opt.clear_grad() + + def get_lr(self) -> float: + """Return learning rate of first optimizer""" + return self._opt_list[0].get_lr() + + def set_state_dict(self, state_dicts: List[Dict[str, "paddle.Tensor"]]): + for i, opt in enumerate(self._opt_list): + opt.set_state_dict(state_dicts[i]) + + def state_dict(self) -> List[Dict[str, "paddle.Tensor"]]: + state_dicts = [opt.state_dict() for opt in self._opt_list] + return state_dicts + + def __len__(self) -> int: + return len(self._opt_list) + + def __getitem__(self, idx): + return self._opt_list[idx] + + def __setitem__(self, idx, opt): + raise NotImplementedError("Can not modify any item in OptimizerList.") + + def __iter__(self): + yield from iter(self._opt_list) +>>>>>>> Stashed changes diff --git a/ppsci/solver/__init__.py b/ppsci/solver/__init__.py index 03f97bc2d9..6c27bd3f97 100644 --- a/ppsci/solver/__init__.py +++ b/ppsci/solver/__init__.py @@ -1,25 +1,25 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ppsci.solver import eval -from ppsci.solver import train -from ppsci.solver import visu -from ppsci.solver.solver import Solver - -__all__ = [ - "eval", - "train", - "visu", - "Solver", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ppsci.solver import eval +from ppsci.solver import train +from ppsci.solver import visu +from ppsci.solver.solver import Solver + +__all__ = [ + "eval", + "train", + "visu", + "Solver", +] diff --git a/ppsci/solver/eval.py b/ppsci/solver/eval.py index 1af7655901..52e5fff436 100644 --- a/ppsci/solver/eval.py +++ b/ppsci/solver/eval.py @@ -1,316 +1,316 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import time -from typing import TYPE_CHECKING -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import paddle -from paddle import io - -from ppsci.solver import printer -from ppsci.solver.train import _compute_batch_size -from ppsci.utils import misc - -if TYPE_CHECKING: - from pgl.utils import data as pgl_data - - from ppsci import solver - - -def _get_dataset_length( - data_loader: Union["io.DataLoader", "pgl_data.Dataloader", "io.IterableDataset"] -) -> int: - """Get full dataset length of given dataloader. - - Args: - data_loader (Union[io.DataLoader, pgl_data.Dataloader, io.IterableDataset]): - Given dataloader. - - Returns: - int: Length of full dataset. - """ - if isinstance(data_loader, io.DataLoader): - num_samples = len(data_loader.dataset) - elif isinstance(data_loader, io.IterableDataset): - num_samples = data_loader.num_samples - elif str(type(data_loader)) == "": - num_samples = len(data_loader.dataset) - else: - raise NotImplementedError( - f"Can not fetch the length of given dataset({type(data_loader)})." - ) - - return num_samples - - -def _eval_by_dataset( - solver: "solver.Solver", epoch_id: Optional[int], log_freq: int -) -> Tuple[float, Dict[str, Dict[str, float]]]: - """Evaluate with computing metric on total samples(default process). - - NOTE: This is the default evaluation method as general for most cases, but may not - memory-efficiency for large dataset or large output. - - Args: - solver (solver.Solver): Main Solver. - epoch_id (Optional[int]): Epoch id. - log_freq (int): Log evaluation information every `log_freq` steps. - - Returns: - Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts - computed during evaluation. - """ - target_metric: float = float("inf") - metric_dict_group: Dict[str, Dict[str, float]] = misc.PrettyOrderedDict() - for _, _validator in solver.validator.items(): - all_output = misc.Prettydefaultdict(list) - all_label = misc.Prettydefaultdict(list) - num_samples = _get_dataset_length(_validator.data_loader) - - loss_dict = misc.Prettydefaultdict(float) - reader_tic = time.perf_counter() - batch_tic = time.perf_counter() - for iter_id, batch in enumerate(_validator.data_loader, start=1): - input_dict, label_dict, weight_dict = batch - reader_cost = time.perf_counter() - reader_tic - - for v in input_dict.values(): - if hasattr(v, "stop_gradient"): - v.stop_gradient = False - - # forward - with solver.autocast_context_manager( - solver.use_amp, solver.amp_level - ), solver.no_grad_context_manager(solver.eval_with_no_grad): - output_dict, validator_loss = solver.forward_helper.eval_forward( - _validator.output_expr, - input_dict, - solver.model, - _validator, - label_dict, - weight_dict, - ) - - loss_dict[f"{_validator.name}/loss"] = float( - sum(list(validator_loss.values())) - ) - - for key, output in output_dict.items(): - all_output[key].append( - (output.detach() if hasattr(output, "detach") else output) - if solver.world_size == 1 - else misc.all_gather(output.detach()) - ) - - for key, label in label_dict.items(): - all_label[key].append( - (label.detach() if hasattr(label, "detach") else label) - if solver.world_size == 1 - else misc.all_gather(label.detach()) - ) - - batch_cost = time.perf_counter() - batch_tic - solver.eval_time_info["reader_cost"].update(reader_cost) - solver.eval_time_info["batch_cost"].update(batch_cost) - batch_size = _compute_batch_size(input_dict) - printer.update_eval_loss(solver, loss_dict, batch_size) - if ( - iter_id == 1 - or iter_id % log_freq == 0 - or iter_id == len(_validator.data_loader) - ): - printer.log_eval_info( - solver, - batch_size, - epoch_id, - len(_validator.data_loader), - iter_id, - ) - - reader_tic = time.perf_counter() - batch_tic = time.perf_counter() - - # concatenate all data and discard padded sample(s) - for key in all_output: - if paddle.is_tensor(all_output[key][0]): - all_output[key] = paddle.concat(all_output[key]) - if len(all_output[key]) > num_samples: - all_output[key] = all_output[key][:num_samples] - - for key in all_label: - if paddle.is_tensor(all_label[key][0]): - all_label[key] = paddle.concat(all_label[key]) - if len(all_label[key]) > num_samples: - all_label[key] = all_label[key][:num_samples] - - for metric_name, metric_func in _validator.metric.items(): - # NOTE: compute metric with entire output and label - metric_dict = metric_func(all_output, all_label) - metric_dict_group[metric_name] = { - k: float(v) for k, v in metric_dict.items() - } - for var_name, metric_value in metric_dict.items(): - metric_str = f"{_validator.name}/{metric_name}.{var_name}" - if metric_str not in solver.eval_output_info: - solver.eval_output_info[metric_str] = misc.AverageMeter( - metric_str, ".5f" - ) - solver.eval_output_info[metric_str].update( - float(metric_value), num_samples - ) - - # use the first metric for return value - tmp = metric_dict_group - while isinstance(tmp, dict): - tmp = next(iter(tmp.values())) - # avoid that none of metric is set - if isinstance(tmp, float): - target_metric = float(tmp) - - return target_metric, metric_dict_group - - -def _eval_by_batch( - solver: "solver.Solver", epoch_id: Optional[int], log_freq: int -) -> Tuple[float, Dict[str, Dict[str, float]]]: - """Evaluate with computing metric by batch, which is memory-efficient. - - NOTE: This is a evaluation function for large dataset or large output, as is more - memory-efficiency than evaluating by dataset, but less general because some metric - is not independent among samples, e.g. L2 relative error. - - Args: - solver (solver.Solver): Main Solver. - epoch_id (Optional[int]): Epoch id. - log_freq (int): Log evaluation information every `log_freq` steps. - - Returns: - Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts - computed during evaluation. - """ - target_metric: float = float("inf") - metric_dict_group: Dict[str, Dict[str, float]] = misc.PrettyOrderedDict() - for _, _validator in solver.validator.items(): - num_samples = _get_dataset_length(_validator.data_loader) - - loss_dict = misc.Prettydefaultdict(float) - reader_tic = time.perf_counter() - batch_tic = time.perf_counter() - for iter_id, batch in enumerate(_validator.data_loader, start=1): - input_dict, label_dict, weight_dict = batch - reader_cost = time.perf_counter() - reader_tic - - batch_size = _compute_batch_size(input_dict) - for v in input_dict.values(): - if hasattr(v, "stop_gradient"): - v.stop_gradient = False - - # forward - with solver.autocast_context_manager( - solver.use_amp, solver.amp_level - ), solver.no_grad_context_manager(solver.eval_with_no_grad): - output_dict, validator_loss = solver.forward_helper.eval_forward( - _validator.output_expr, - input_dict, - solver.model, - _validator, - label_dict, - weight_dict, - ) - - loss_dict[f"{_validator.name}/loss"] = float( - sum(list(validator_loss.values())) - ) - - # collect batch metric - for metric_name, metric_func in _validator.metric.items(): - metric_dict_group[metric_name] = misc.Prettydefaultdict(list) - metric_dict = metric_func(output_dict, label_dict) - for var_name, metric_value in metric_dict.items(): - metric_dict_group[metric_name][var_name].append( - metric_value - if solver.world_size == 1 - else misc.all_gather(metric_value) - ) - - batch_cost = time.perf_counter() - batch_tic - solver.eval_time_info["reader_cost"].update(reader_cost) - solver.eval_time_info["batch_cost"].update(batch_cost) - printer.update_eval_loss(solver, loss_dict, batch_size) - if ( - iter_id == 1 - or iter_id % log_freq == 0 - or iter_id == len(_validator.data_loader) - ): - printer.log_eval_info( - solver, - batch_size, - epoch_id, - len(_validator.data_loader), - iter_id, - ) - - reader_tic = time.perf_counter() - batch_tic = time.perf_counter() - - # concatenate all metric and discard metric of padded sample(s) - for metric_name, metric_dict in metric_dict_group.items(): - for var_name, metric_value in metric_dict.items(): - # NOTE: concat single metric(scalar) list into metric vector - metric_value = paddle.concat(metric_value)[:num_samples] - # NOTE: compute metric via averaging metric over all samples, - # this might be not general for certain evaluation case - metric_value = float(metric_value.mean()) - metric_dict_group[metric_name][var_name] = metric_value - metric_str = f"{_validator.name}/{metric_name}.{var_name}" - if metric_str not in solver.eval_output_info: - solver.eval_output_info[metric_str] = misc.AverageMeter( - metric_str, ".5f" - ) - solver.eval_output_info[metric_str].update(metric_value, num_samples) - - # use the first metric for return value - tmp = metric_dict_group - while isinstance(tmp, dict): - tmp = next(iter(tmp.values())) - # avoid that none of metric is set - if isinstance(tmp, float): - target_metric = tmp - - return target_metric, metric_dict_group - - -def eval_func( - solver: "solver.Solver", epoch_id: Optional[int], log_freq: int -) -> Tuple[float, Dict[str, Dict[str, float]]]: - """Evaluation function. - - Args: - solver (solver.Solver): Main Solver. - epoch_id (Optional[int]): Epoch id. - log_freq (int): Log evaluation information every `log_freq` steps. - - Returns: - Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts - computed during evaluation. - """ - if solver.compute_metric_by_batch: - return _eval_by_batch(solver, epoch_id, log_freq) - return _eval_by_dataset(solver, epoch_id, log_freq) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import time +from typing import TYPE_CHECKING +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import paddle +from paddle import io + +from ppsci.solver import printer +from ppsci.solver.train import _compute_batch_size +from ppsci.utils import misc + +if TYPE_CHECKING: + from pgl.utils import data as pgl_data + + from ppsci import solver + + +def _get_dataset_length( + data_loader: Union["io.DataLoader", "pgl_data.Dataloader", "io.IterableDataset"] +) -> int: + """Get full dataset length of given dataloader. + + Args: + data_loader (Union[io.DataLoader, pgl_data.Dataloader, io.IterableDataset]): + Given dataloader. + + Returns: + int: Length of full dataset. + """ + if isinstance(data_loader, io.DataLoader): + num_samples = len(data_loader.dataset) + elif isinstance(data_loader, io.IterableDataset): + num_samples = data_loader.num_samples + elif str(type(data_loader)) == "": + num_samples = len(data_loader.dataset) + else: + raise NotImplementedError( + f"Can not fetch the length of given dataset({type(data_loader)})." + ) + + return num_samples + + +def _eval_by_dataset( + solver: "solver.Solver", epoch_id: Optional[int], log_freq: int +) -> Tuple[float, Dict[str, Dict[str, float]]]: + """Evaluate with computing metric on total samples(default process). + + NOTE: This is the default evaluation method as general for most cases, but may not + memory-efficiency for large dataset or large output. + + Args: + solver (solver.Solver): Main Solver. + epoch_id (Optional[int]): Epoch id. + log_freq (int): Log evaluation information every `log_freq` steps. + + Returns: + Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts + computed during evaluation. + """ + target_metric: float = float("inf") + metric_dict_group: Dict[str, Dict[str, float]] = misc.PrettyOrderedDict() + for _, _validator in solver.validator.items(): + all_output = misc.Prettydefaultdict(list) + all_label = misc.Prettydefaultdict(list) + num_samples = _get_dataset_length(_validator.data_loader) + + loss_dict = misc.Prettydefaultdict(float) + reader_tic = time.perf_counter() + batch_tic = time.perf_counter() + for iter_id, batch in enumerate(_validator.data_loader, start=1): + input_dict, label_dict, weight_dict = batch + reader_cost = time.perf_counter() - reader_tic + + for v in input_dict.values(): + if hasattr(v, "stop_gradient"): + v.stop_gradient = False + + # forward + with solver.autocast_context_manager( + solver.use_amp, solver.amp_level + ), solver.no_grad_context_manager(solver.eval_with_no_grad): + output_dict, validator_loss = solver.forward_helper.eval_forward( + _validator.output_expr, + input_dict, + solver.model, + _validator, + label_dict, + weight_dict, + ) + + loss_dict[f"{_validator.name}/loss"] = float( + sum(list(validator_loss.values())) + ) + + for key, output in output_dict.items(): + all_output[key].append( + (output.detach() if hasattr(output, "detach") else output) + if solver.world_size == 1 + else misc.all_gather(output.detach()) + ) + + for key, label in label_dict.items(): + all_label[key].append( + (label.detach() if hasattr(label, "detach") else label) + if solver.world_size == 1 + else misc.all_gather(label.detach()) + ) + + batch_cost = time.perf_counter() - batch_tic + solver.eval_time_info["reader_cost"].update(reader_cost) + solver.eval_time_info["batch_cost"].update(batch_cost) + batch_size = _compute_batch_size(input_dict) + printer.update_eval_loss(solver, loss_dict, batch_size) + if ( + iter_id == 1 + or iter_id % log_freq == 0 + or iter_id == len(_validator.data_loader) + ): + printer.log_eval_info( + solver, + batch_size, + epoch_id, + len(_validator.data_loader), + iter_id, + ) + + reader_tic = time.perf_counter() + batch_tic = time.perf_counter() + + # concatenate all data and discard padded sample(s) + for key in all_output: + if paddle.is_tensor(all_output[key][0]): + all_output[key] = paddle.concat(all_output[key]) + if len(all_output[key]) > num_samples: + all_output[key] = all_output[key][:num_samples] + + for key in all_label: + if paddle.is_tensor(all_label[key][0]): + all_label[key] = paddle.concat(all_label[key]) + if len(all_label[key]) > num_samples: + all_label[key] = all_label[key][:num_samples] + + for metric_name, metric_func in _validator.metric.items(): + # NOTE: compute metric with entire output and label + metric_dict = metric_func(all_output, all_label) + metric_dict_group[metric_name] = { + k: float(v) for k, v in metric_dict.items() + } + for var_name, metric_value in metric_dict.items(): + metric_str = f"{_validator.name}/{metric_name}.{var_name}" + if metric_str not in solver.eval_output_info: + solver.eval_output_info[metric_str] = misc.AverageMeter( + metric_str, ".5f" + ) + solver.eval_output_info[metric_str].update( + float(metric_value), num_samples + ) + + # use the first metric for return value + tmp = metric_dict_group + while isinstance(tmp, dict): + tmp = next(iter(tmp.values())) + # avoid that none of metric is set + if isinstance(tmp, float): + target_metric = float(tmp) + + return target_metric, metric_dict_group + + +def _eval_by_batch( + solver: "solver.Solver", epoch_id: Optional[int], log_freq: int +) -> Tuple[float, Dict[str, Dict[str, float]]]: + """Evaluate with computing metric by batch, which is memory-efficient. + + NOTE: This is a evaluation function for large dataset or large output, as is more + memory-efficiency than evaluating by dataset, but less general because some metric + is not independent among samples, e.g. L2 relative error. + + Args: + solver (solver.Solver): Main Solver. + epoch_id (Optional[int]): Epoch id. + log_freq (int): Log evaluation information every `log_freq` steps. + + Returns: + Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts + computed during evaluation. + """ + target_metric: float = float("inf") + metric_dict_group: Dict[str, Dict[str, float]] = misc.PrettyOrderedDict() + for _, _validator in solver.validator.items(): + num_samples = _get_dataset_length(_validator.data_loader) + + loss_dict = misc.Prettydefaultdict(float) + reader_tic = time.perf_counter() + batch_tic = time.perf_counter() + for iter_id, batch in enumerate(_validator.data_loader, start=1): + input_dict, label_dict, weight_dict = batch + reader_cost = time.perf_counter() - reader_tic + + batch_size = _compute_batch_size(input_dict) + for v in input_dict.values(): + if hasattr(v, "stop_gradient"): + v.stop_gradient = False + + # forward + with solver.autocast_context_manager( + solver.use_amp, solver.amp_level + ), solver.no_grad_context_manager(solver.eval_with_no_grad): + output_dict, validator_loss = solver.forward_helper.eval_forward( + _validator.output_expr, + input_dict, + solver.model, + _validator, + label_dict, + weight_dict, + ) + + loss_dict[f"{_validator.name}/loss"] = float( + sum(list(validator_loss.values())) + ) + + # collect batch metric + for metric_name, metric_func in _validator.metric.items(): + metric_dict_group[metric_name] = misc.Prettydefaultdict(list) + metric_dict = metric_func(output_dict, label_dict) + for var_name, metric_value in metric_dict.items(): + metric_dict_group[metric_name][var_name].append( + metric_value + if solver.world_size == 1 + else misc.all_gather(metric_value) + ) + + batch_cost = time.perf_counter() - batch_tic + solver.eval_time_info["reader_cost"].update(reader_cost) + solver.eval_time_info["batch_cost"].update(batch_cost) + printer.update_eval_loss(solver, loss_dict, batch_size) + if ( + iter_id == 1 + or iter_id % log_freq == 0 + or iter_id == len(_validator.data_loader) + ): + printer.log_eval_info( + solver, + batch_size, + epoch_id, + len(_validator.data_loader), + iter_id, + ) + + reader_tic = time.perf_counter() + batch_tic = time.perf_counter() + + # concatenate all metric and discard metric of padded sample(s) + for metric_name, metric_dict in metric_dict_group.items(): + for var_name, metric_value in metric_dict.items(): + # NOTE: concat single metric(scalar) list into metric vector + metric_value = paddle.concat(metric_value)[:num_samples] + # NOTE: compute metric via averaging metric over all samples, + # this might be not general for certain evaluation case + metric_value = float(metric_value.mean()) + metric_dict_group[metric_name][var_name] = metric_value + metric_str = f"{_validator.name}/{metric_name}.{var_name}" + if metric_str not in solver.eval_output_info: + solver.eval_output_info[metric_str] = misc.AverageMeter( + metric_str, ".5f" + ) + solver.eval_output_info[metric_str].update(metric_value, num_samples) + + # use the first metric for return value + tmp = metric_dict_group + while isinstance(tmp, dict): + tmp = next(iter(tmp.values())) + # avoid that none of metric is set + if isinstance(tmp, float): + target_metric = tmp + + return target_metric, metric_dict_group + + +def eval_func( + solver: "solver.Solver", epoch_id: Optional[int], log_freq: int +) -> Tuple[float, Dict[str, Dict[str, float]]]: + """Evaluation function. + + Args: + solver (solver.Solver): Main Solver. + epoch_id (Optional[int]): Epoch id. + log_freq (int): Log evaluation information every `log_freq` steps. + + Returns: + Tuple[float, Dict[str, Dict[str, float]]]: Target metric and all metric dicts + computed during evaluation. + """ + if solver.compute_metric_by_batch: + return _eval_by_batch(solver, epoch_id, log_freq) + return _eval_by_dataset(solver, epoch_id, log_freq) diff --git a/ppsci/solver/train.py b/ppsci/solver/train.py index 0dd098f5fe..0cddc01ab9 100644 --- a/ppsci/solver/train.py +++ b/ppsci/solver/train.py @@ -1,319 +1,319 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import sys -import time -from typing import TYPE_CHECKING -from typing import Dict -from typing import Sequence -from typing import Union - -import paddle -from paddle.distributed.fleet.utils import hybrid_parallel_util as hpu -from paddle.framework import core - -from ppsci.solver import printer -from ppsci.utils import misc - -if TYPE_CHECKING: - from ppsci import solver - - -def _compute_batch_size( - input_dict: Dict[str, Union[paddle.Tensor, Sequence[paddle.Tensor]]] -) -> int: - """Compute batch size from given input dict. - - NOTE: Returned `batch_size` might be inaccurate, but it won't affect the correctness - of the training results because `batch_size` is now only used for timing. - - Args: - input_dict (Dict[str, Union[paddle.Tensor, Sequence[paddle.Tensor]]]): Given input dict. - - Returns: - int: Batch size of input dict. - """ - sample = next(iter(input_dict.values())) - if hasattr(sample, "shape"): - return sample.shape[0] - elif hasattr(sample, "__len__"): # Might be inaccurate here. - return len(sample) - else: - raise ValueError("Unsupported type of input dict value.") - - -def train_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int): - """Train program for one epoch. - - Args: - solver (solver.Solver): Main solver. - epoch_id (int): Epoch id. - log_freq (int): Log training information every `log_freq` steps. - """ - batch_tic = time.perf_counter() - - for iter_id in range(1, solver.iters_per_epoch + 1): - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push( - f"Training iteration {solver.global_step + 1}" - ) # Training iteration - - total_batch_size = 0 - reader_cost = 0.0 - batch_cost = 0.0 - reader_tic = time.perf_counter() - - input_dicts = [] - label_dicts = [] - weight_dicts = [] - for _, _constraint in solver.constraint.items(): - # fetch data from data loader - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push("Data load") - - try: - input_dict, label_dict, weight_dict = next(_constraint.data_iter) - except StopIteration: - _constraint.data_iter = iter(_constraint.data_loader) - input_dict, label_dict, weight_dict = next(_constraint.data_iter) - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() - - reader_cost += time.perf_counter() - reader_tic - - for v in input_dict.values(): - if hasattr(v, "stop_gradient"): - v.stop_gradient = False - - # gather each constraint's input, label, weight to a list - input_dicts.append(input_dict) - label_dicts.append(label_dict) - weight_dicts.append(weight_dict) - total_batch_size += _compute_batch_size(input_dict) - reader_tic = time.perf_counter() - - loss_dict = misc.Prettydefaultdict(float) - loss_dict["loss"] = 0.0 - # forward for every constraint, including model and equation expression - with solver.no_sync_context_manager(solver.world_size > 1, solver.model): - with solver.autocast_context_manager(solver.use_amp, solver.amp_level): - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push("Loss computation") - - losses_all, losses_constraint = solver.forward_helper.train_forward( - tuple( - _constraint.output_expr - for _constraint in solver.constraint.values() - ), - input_dicts, - solver.model, - solver.constraint, - label_dicts, - weight_dicts, - ) - assert "loss" not in losses_all, ( - "Key 'loss' is not allowed in loss_dict for it is an preserved key" - " representing total loss, please use other name instead." - ) - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() # Loss computation - - # accumulate all losses - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push("Loss aggregator") - - total_loss = solver.loss_aggregator(losses_all, solver.global_step) - if solver.update_freq > 1: - total_loss = total_loss / solver.update_freq - - loss_dict.update(losses_constraint) - loss_dict["loss"] = float(total_loss) - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() # Loss aggregator - - # backward - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push("Loss backward") - - if solver.use_amp: - total_loss_scaled = solver.scaler.scale(total_loss) - total_loss_scaled.backward() - else: - total_loss.backward() - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() # Loss backward - - # update parameters - if iter_id % solver.update_freq == 0 or iter_id == solver.iters_per_epoch: - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push("Optimizer update") - - if solver.world_size > 1: - # fuse + allreduce manually before optimization if use DDP + no_sync - # details in https://github.com/PaddlePaddle/Paddle/issues/48898#issuecomment-1343838622 - hpu.fused_allreduce_gradients(list(solver.model.parameters()), None) - if solver.use_amp: - solver.scaler.minimize(solver.optimizer, total_loss_scaled) - else: - solver.optimizer.step() - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() # Optimizer update - - solver.optimizer.clear_grad() - - # update learning rate by step - if solver.lr_scheduler is not None and not solver.lr_scheduler.by_epoch: - solver.lr_scheduler.step() - - if solver.benchmark_flag: - paddle.device.synchronize() - batch_cost += time.perf_counter() - batch_tic - - # update and log training information - solver.global_step += 1 - solver.train_time_info["reader_cost"].update(reader_cost) - solver.train_time_info["batch_cost"].update(batch_cost) - printer.update_train_loss(solver, loss_dict, total_batch_size) - if ( - solver.global_step % log_freq == 0 - or solver.global_step == 1 - or solver.global_step == solver.max_steps - ): - printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) - - batch_tic = time.perf_counter() - - if solver.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() # Training iteration - NVTX_STOP_ITER = 25 - if solver.global_step >= NVTX_STOP_ITER: - print( - f"Only run {NVTX_STOP_ITER} steps when 'NVTX' is set in environment" - " for nsight analysis. Exit now ......\n" - ) - core.nvprof_stop() - sys.exit(0) - - -def train_LBFGS_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int): - """Train function for one epoch with L-BFGS optimizer. - - NOTE: L-BFGS training program do not support AMP now. - - Args: - solver (solver.Solver): Main solver. - epoch_id (int): Epoch id. - log_freq (int): Log training information every `log_freq` steps. - """ - batch_tic = time.perf_counter() - - for iter_id in range(1, solver.iters_per_epoch + 1): - loss_dict = misc.Prettydefaultdict(float) - loss_dict["loss"] = 0.0 - total_batch_size = 0 - reader_cost = 0.0 - batch_cost = 0.0 - reader_tic = time.perf_counter() - - input_dicts = [] - label_dicts = [] - weight_dicts = [] - for _, _constraint in solver.constraint.items(): - # fetch data from data loader - try: - input_dict, label_dict, weight_dict = next(_constraint.data_iter) - except StopIteration: - _constraint.data_iter = iter(_constraint.data_loader) - input_dict, label_dict, weight_dict = next(_constraint.data_iter) - reader_cost += time.perf_counter() - reader_tic - - for v in input_dict.values(): - if hasattr(v, "stop_gradient"): - v.stop_gradient = False - - # gather each constraint's input, label, weight to a list - input_dicts.append(input_dict) - label_dicts.append(label_dict) - weight_dicts.append(weight_dict) - total_batch_size += _compute_batch_size(input_dict) - reader_tic = time.perf_counter() - - def closure() -> paddle.Tensor: - """Forward-backward closure function for LBFGS optimizer. - - Returns: - paddle.Tensor: Computed loss scalar. - """ - with solver.no_sync_context_manager(solver.world_size > 1, solver.model): - with solver.autocast_context_manager(solver.use_amp, solver.amp_level): - # forward for every constraint, including model and equation expression - losses_all, losses_constraint = solver.forward_helper.train_forward( - tuple( - _constraint.output_expr - for _constraint in solver.constraint.values() - ), - input_dicts, - solver.model, - solver.constraint, - label_dicts, - weight_dicts, - ) - - # accumulate all losses - total_loss = solver.loss_aggregator(losses_all, solver.global_step) - loss_dict.update(losses_constraint) - loss_dict["loss"] = float(total_loss) - - # backward - solver.optimizer.clear_grad() - total_loss.backward() - - if solver.world_size > 1: - # fuse + allreduce manually before optimization if use DDP model - # details in https://github.com/PaddlePaddle/Paddle/issues/48898#issuecomment-1343838622 - hpu.fused_allreduce_gradients(list(solver.model.parameters()), None) - - return total_loss - - # update parameters - solver.optimizer.step(closure) - - # update learning rate by step - if solver.lr_scheduler is not None and not solver.lr_scheduler.by_epoch: - solver.lr_scheduler.step() - - if solver.benchmark_flag: - paddle.device.synchronize() - batch_cost += time.perf_counter() - batch_tic - - # update and log training information - solver.global_step += 1 - solver.train_time_info["reader_cost"].update(reader_cost) - solver.train_time_info["batch_cost"].update(batch_cost) - printer.update_train_loss(solver, loss_dict, total_batch_size) - if ( - solver.global_step % log_freq == 0 - or solver.global_step == 1 - or solver.global_step == solver.max_steps - ): - printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) - - batch_tic = time.perf_counter() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import sys +import time +from typing import TYPE_CHECKING +from typing import Dict +from typing import Sequence +from typing import Union + +import paddle +from paddle.distributed.fleet.utils import hybrid_parallel_util as hpu +from paddle.framework import core + +from ppsci.solver import printer +from ppsci.utils import misc + +if TYPE_CHECKING: + from ppsci import solver + + +def _compute_batch_size( + input_dict: Dict[str, Union[paddle.Tensor, Sequence[paddle.Tensor]]] +) -> int: + """Compute batch size from given input dict. + + NOTE: Returned `batch_size` might be inaccurate, but it won't affect the correctness + of the training results because `batch_size` is now only used for timing. + + Args: + input_dict (Dict[str, Union[paddle.Tensor, Sequence[paddle.Tensor]]]): Given input dict. + + Returns: + int: Batch size of input dict. + """ + sample = next(iter(input_dict.values())) + if hasattr(sample, "shape"): + return sample.shape[0] + elif hasattr(sample, "__len__"): # Might be inaccurate here. + return len(sample) + else: + raise ValueError("Unsupported type of input dict value.") + + +def train_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int): + """Train program for one epoch. + + Args: + solver (solver.Solver): Main solver. + epoch_id (int): Epoch id. + log_freq (int): Log training information every `log_freq` steps. + """ + batch_tic = time.perf_counter() + + for iter_id in range(1, solver.iters_per_epoch + 1): + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push( + f"Training iteration {solver.global_step + 1}" + ) # Training iteration + + total_batch_size = 0 + reader_cost = 0.0 + batch_cost = 0.0 + reader_tic = time.perf_counter() + + input_dicts = [] + label_dicts = [] + weight_dicts = [] + for _, _constraint in solver.constraint.items(): + # fetch data from data loader + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push("Data load") + + try: + input_dict, label_dict, weight_dict = next(_constraint.data_iter) + except StopIteration: + _constraint.data_iter = iter(_constraint.data_loader) + input_dict, label_dict, weight_dict = next(_constraint.data_iter) + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() + + reader_cost += time.perf_counter() - reader_tic + + for v in input_dict.values(): + if hasattr(v, "stop_gradient"): + v.stop_gradient = False + + # gather each constraint's input, label, weight to a list + input_dicts.append(input_dict) + label_dicts.append(label_dict) + weight_dicts.append(weight_dict) + total_batch_size += _compute_batch_size(input_dict) + reader_tic = time.perf_counter() + + loss_dict = misc.Prettydefaultdict(float) + loss_dict["loss"] = 0.0 + # forward for every constraint, including model and equation expression + with solver.no_sync_context_manager(solver.world_size > 1, solver.model): + with solver.autocast_context_manager(solver.use_amp, solver.amp_level): + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push("Loss computation") + + losses_all, losses_constraint = solver.forward_helper.train_forward( + tuple( + _constraint.output_expr + for _constraint in solver.constraint.values() + ), + input_dicts, + solver.model, + solver.constraint, + label_dicts, + weight_dicts, + ) + assert "loss" not in losses_all, ( + "Key 'loss' is not allowed in loss_dict for it is an preserved key" + " representing total loss, please use other name instead." + ) + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() # Loss computation + + # accumulate all losses + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push("Loss aggregator") + + total_loss = solver.loss_aggregator(losses_all, solver.global_step) + if solver.update_freq > 1: + total_loss = total_loss / solver.update_freq + + loss_dict.update(losses_constraint) + loss_dict["loss"] = float(total_loss) + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() # Loss aggregator + + # backward + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push("Loss backward") + + if solver.use_amp: + total_loss_scaled = solver.scaler.scale(total_loss) + total_loss_scaled.backward() + else: + total_loss.backward() + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() # Loss backward + + # update parameters + if iter_id % solver.update_freq == 0 or iter_id == solver.iters_per_epoch: + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push("Optimizer update") + + if solver.world_size > 1: + # fuse + allreduce manually before optimization if use DDP + no_sync + # details in https://github.com/PaddlePaddle/Paddle/issues/48898#issuecomment-1343838622 + hpu.fused_allreduce_gradients(list(solver.model.parameters()), None) + if solver.use_amp: + solver.scaler.minimize(solver.optimizer, total_loss_scaled) + else: + solver.optimizer.step() + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() # Optimizer update + + solver.optimizer.clear_grad() + + # update learning rate by step + if solver.lr_scheduler is not None and not solver.lr_scheduler.by_epoch: + solver.lr_scheduler.step() + + if solver.benchmark_flag: + paddle.device.synchronize() + batch_cost += time.perf_counter() - batch_tic + + # update and log training information + solver.global_step += 1 + solver.train_time_info["reader_cost"].update(reader_cost) + solver.train_time_info["batch_cost"].update(batch_cost) + printer.update_train_loss(solver, loss_dict, total_batch_size) + if ( + solver.global_step % log_freq == 0 + or solver.global_step == 1 + or solver.global_step == solver.max_steps + ): + printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) + + batch_tic = time.perf_counter() + + if solver.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() # Training iteration + NVTX_STOP_ITER = 25 + if solver.global_step >= NVTX_STOP_ITER: + print( + f"Only run {NVTX_STOP_ITER} steps when 'NVTX' is set in environment" + " for nsight analysis. Exit now ......\n" + ) + core.nvprof_stop() + sys.exit(0) + + +def train_LBFGS_epoch_func(solver: "solver.Solver", epoch_id: int, log_freq: int): + """Train function for one epoch with L-BFGS optimizer. + + NOTE: L-BFGS training program do not support AMP now. + + Args: + solver (solver.Solver): Main solver. + epoch_id (int): Epoch id. + log_freq (int): Log training information every `log_freq` steps. + """ + batch_tic = time.perf_counter() + + for iter_id in range(1, solver.iters_per_epoch + 1): + loss_dict = misc.Prettydefaultdict(float) + loss_dict["loss"] = 0.0 + total_batch_size = 0 + reader_cost = 0.0 + batch_cost = 0.0 + reader_tic = time.perf_counter() + + input_dicts = [] + label_dicts = [] + weight_dicts = [] + for _, _constraint in solver.constraint.items(): + # fetch data from data loader + try: + input_dict, label_dict, weight_dict = next(_constraint.data_iter) + except StopIteration: + _constraint.data_iter = iter(_constraint.data_loader) + input_dict, label_dict, weight_dict = next(_constraint.data_iter) + reader_cost += time.perf_counter() - reader_tic + + for v in input_dict.values(): + if hasattr(v, "stop_gradient"): + v.stop_gradient = False + + # gather each constraint's input, label, weight to a list + input_dicts.append(input_dict) + label_dicts.append(label_dict) + weight_dicts.append(weight_dict) + total_batch_size += _compute_batch_size(input_dict) + reader_tic = time.perf_counter() + + def closure() -> paddle.Tensor: + """Forward-backward closure function for LBFGS optimizer. + + Returns: + paddle.Tensor: Computed loss scalar. + """ + with solver.no_sync_context_manager(solver.world_size > 1, solver.model): + with solver.autocast_context_manager(solver.use_amp, solver.amp_level): + # forward for every constraint, including model and equation expression + losses_all, losses_constraint = solver.forward_helper.train_forward( + tuple( + _constraint.output_expr + for _constraint in solver.constraint.values() + ), + input_dicts, + solver.model, + solver.constraint, + label_dicts, + weight_dicts, + ) + + # accumulate all losses + total_loss = solver.loss_aggregator(losses_all, solver.global_step) + loss_dict.update(losses_constraint) + loss_dict["loss"] = float(total_loss) + + # backward + solver.optimizer.clear_grad() + total_loss.backward() + + if solver.world_size > 1: + # fuse + allreduce manually before optimization if use DDP model + # details in https://github.com/PaddlePaddle/Paddle/issues/48898#issuecomment-1343838622 + hpu.fused_allreduce_gradients(list(solver.model.parameters()), None) + + return total_loss + + # update parameters + solver.optimizer.step(closure) + + # update learning rate by step + if solver.lr_scheduler is not None and not solver.lr_scheduler.by_epoch: + solver.lr_scheduler.step() + + if solver.benchmark_flag: + paddle.device.synchronize() + batch_cost += time.perf_counter() - batch_tic + + # update and log training information + solver.global_step += 1 + solver.train_time_info["reader_cost"].update(reader_cost) + solver.train_time_info["batch_cost"].update(batch_cost) + printer.update_train_loss(solver, loss_dict, total_batch_size) + if ( + solver.global_step % log_freq == 0 + or solver.global_step == 1 + or solver.global_step == solver.max_steps + ): + printer.log_train_info(solver, total_batch_size, epoch_id, iter_id) + + batch_tic = time.perf_counter() diff --git a/ppsci/solver/visu.py b/ppsci/solver/visu.py index 80e11abe75..42ed60840b 100644 --- a/ppsci/solver/visu.py +++ b/ppsci/solver/visu.py @@ -1,98 +1,98 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -import os.path as osp -from typing import TYPE_CHECKING -from typing import Optional - -import paddle - -from ppsci.utils import misc - -if TYPE_CHECKING: - from ppsci import solver - - -def visualize_func(solver: "solver.Solver", epoch_id: Optional[int]): - """Visualization program. - - Args: - solver (solver.Solver): Main Solver. - epoch_id (Optional[int]): Epoch id. - """ - for _, _visualizer in solver.visualizer.items(): - all_input = misc.Prettydefaultdict(list) - all_output = misc.Prettydefaultdict(list) - - # NOTE: 'visualize_func' now do not apply data sharding(different from 'Solver.predict'), - # where every rank receive same input data and compute same output data - # (which will cause computational redundancy), - # but only the 0-rank(master) device save the visualization result into disk. - # TODO(HydrogenSulfate): This will be optimized in the future. - - input_dict = _visualizer.input_dict - batch_size = _visualizer.batch_size - num_samples = len(next(iter(input_dict.values()))) - batch_num = (num_samples + (batch_size - 1)) // batch_size - - for batch_id in range(batch_num): - batch_input_dict = {} - st = batch_id * batch_size - ed = min(num_samples, (batch_id + 1) * batch_size) - - # prepare batch input dict - for key in input_dict: - if not paddle.is_tensor(input_dict[key]): - batch_input_dict[key] = paddle.to_tensor( - input_dict[key][st:ed], paddle.get_default_dtype() - ) - else: - batch_input_dict[key] = input_dict[key][st:ed] - batch_input_dict[key].stop_gradient = False - - # forward - with solver.autocast_context_manager( - solver.use_amp, solver.amp_level - ), solver.no_grad_context_manager(solver.eval_with_no_grad): - batch_output_dict = solver.forward_helper.visu_forward( - _visualizer.output_expr, batch_input_dict, solver.model - ) - - # collect batch data with dtype fixed to float32 regardless of the dtypes of - # paddle runtime, which is most compatible with almost visualization tools. - for key, batch_input in batch_input_dict.items(): - all_input[key].append(batch_input.detach().astype("float32")) - for key, batch_output in batch_output_dict.items(): - all_output[key].append(batch_output.detach().astype("float32")) - - # concatenate all data - for key in all_input: - all_input[key] = paddle.concat(all_input[key]) - for key in all_output: - all_output[key] = paddle.concat(all_output[key]) - - # save visualization - with misc.RankZeroOnly(solver.rank) as is_master: - if is_master: - visual_dir = osp.join(solver.output_dir, "visual") - if epoch_id: - visual_dir = osp.join(visual_dir, f"epoch_{epoch_id}") - os.makedirs(visual_dir, exist_ok=True) - _visualizer.save( - osp.join(visual_dir, _visualizer.prefix), - {**all_input, **all_output}, - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import os.path as osp +from typing import TYPE_CHECKING +from typing import Optional + +import paddle + +from ppsci.utils import misc + +if TYPE_CHECKING: + from ppsci import solver + + +def visualize_func(solver: "solver.Solver", epoch_id: Optional[int]): + """Visualization program. + + Args: + solver (solver.Solver): Main Solver. + epoch_id (Optional[int]): Epoch id. + """ + for _, _visualizer in solver.visualizer.items(): + all_input = misc.Prettydefaultdict(list) + all_output = misc.Prettydefaultdict(list) + + # NOTE: 'visualize_func' now do not apply data sharding(different from 'Solver.predict'), + # where every rank receive same input data and compute same output data + # (which will cause computational redundancy), + # but only the 0-rank(master) device save the visualization result into disk. + # TODO(HydrogenSulfate): This will be optimized in the future. + + input_dict = _visualizer.input_dict + batch_size = _visualizer.batch_size + num_samples = len(next(iter(input_dict.values()))) + batch_num = (num_samples + (batch_size - 1)) // batch_size + + for batch_id in range(batch_num): + batch_input_dict = {} + st = batch_id * batch_size + ed = min(num_samples, (batch_id + 1) * batch_size) + + # prepare batch input dict + for key in input_dict: + if not paddle.is_tensor(input_dict[key]): + batch_input_dict[key] = paddle.to_tensor( + input_dict[key][st:ed], paddle.get_default_dtype() + ) + else: + batch_input_dict[key] = input_dict[key][st:ed] + batch_input_dict[key].stop_gradient = False + + # forward + with solver.autocast_context_manager( + solver.use_amp, solver.amp_level + ), solver.no_grad_context_manager(solver.eval_with_no_grad): + batch_output_dict = solver.forward_helper.visu_forward( + _visualizer.output_expr, batch_input_dict, solver.model + ) + + # collect batch data with dtype fixed to float32 regardless of the dtypes of + # paddle runtime, which is most compatible with almost visualization tools. + for key, batch_input in batch_input_dict.items(): + all_input[key].append(batch_input.detach().astype("float32")) + for key, batch_output in batch_output_dict.items(): + all_output[key].append(batch_output.detach().astype("float32")) + + # concatenate all data + for key in all_input: + all_input[key] = paddle.concat(all_input[key]) + for key in all_output: + all_output[key] = paddle.concat(all_output[key]) + + # save visualization + with misc.RankZeroOnly(solver.rank) as is_master: + if is_master: + visual_dir = osp.join(solver.output_dir, "visual") + if epoch_id: + visual_dir = osp.join(visual_dir, f"epoch_{epoch_id}") + os.makedirs(visual_dir, exist_ok=True) + _visualizer.save( + osp.join(visual_dir, _visualizer.prefix), + {**all_input, **all_output}, + ) diff --git a/ppsci/utils/__init__.py b/ppsci/utils/__init__.py index fa81a2fc61..9868a266d1 100644 --- a/ppsci/utils/__init__.py +++ b/ppsci/utils/__init__.py @@ -1,66 +1,66 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: Put config module import at the top level for register default config(s) in -# ConfigStore at the begining of ppsci -from ppsci.utils import config # isort:skip # noqa: F401 -from ppsci.utils import ema -from ppsci.utils import initializer -from ppsci.utils import logger -from ppsci.utils import misc -from ppsci.utils import reader -from ppsci.utils import writer -from ppsci.utils.checker import dynamic_import_to_globals -from ppsci.utils.checker import run_check -from ppsci.utils.checker import run_check_mesh -from ppsci.utils.expression import ExpressionSolver -from ppsci.utils.misc import AverageMeter -from ppsci.utils.misc import set_random_seed -from ppsci.utils.reader import load_csv_file -from ppsci.utils.reader import load_mat_file -from ppsci.utils.reader import load_npz_file -from ppsci.utils.reader import load_vtk_file -from ppsci.utils.reader import load_vtk_with_time_file -from ppsci.utils.save_load import load_checkpoint -from ppsci.utils.save_load import load_pretrain -from ppsci.utils.save_load import save_checkpoint -from ppsci.utils.symbolic import lambdify -from ppsci.utils.writer import save_csv_file -from ppsci.utils.writer import save_tecplot_file - -__all__ = [ - "AverageMeter", - "ExpressionSolver", - "initializer", - "logger", - "misc", - "ema", - "reader", - "writer", - "load_csv_file", - "load_mat_file", - "load_npz_file", - "load_vtk_file", - "load_vtk_with_time_file", - "save_csv_file", - "save_tecplot_file", - "dynamic_import_to_globals", - "run_check", - "run_check_mesh", - "set_random_seed", - "load_checkpoint", - "load_pretrain", - "save_checkpoint", - "lambdify", -] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: Put config module import at the top level for register default config(s) in +# ConfigStore at the begining of ppsci +from ppsci.utils import config # isort:skip # noqa: F401 +from ppsci.utils import ema +from ppsci.utils import initializer +from ppsci.utils import logger +from ppsci.utils import misc +from ppsci.utils import reader +from ppsci.utils import writer +from ppsci.utils.checker import dynamic_import_to_globals +from ppsci.utils.checker import run_check +from ppsci.utils.checker import run_check_mesh +from ppsci.utils.expression import ExpressionSolver +from ppsci.utils.misc import AverageMeter +from ppsci.utils.misc import set_random_seed +from ppsci.utils.reader import load_csv_file +from ppsci.utils.reader import load_mat_file +from ppsci.utils.reader import load_npz_file +from ppsci.utils.reader import load_vtk_file +from ppsci.utils.reader import load_vtk_with_time_file +from ppsci.utils.save_load import load_checkpoint +from ppsci.utils.save_load import load_pretrain +from ppsci.utils.save_load import save_checkpoint +from ppsci.utils.symbolic import lambdify +from ppsci.utils.writer import save_csv_file +from ppsci.utils.writer import save_tecplot_file + +__all__ = [ + "AverageMeter", + "ExpressionSolver", + "initializer", + "logger", + "misc", + "ema", + "reader", + "writer", + "load_csv_file", + "load_mat_file", + "load_npz_file", + "load_vtk_file", + "load_vtk_with_time_file", + "save_csv_file", + "save_tecplot_file", + "dynamic_import_to_globals", + "run_check", + "run_check_mesh", + "set_random_seed", + "load_checkpoint", + "load_pretrain", + "save_checkpoint", + "lambdify", +] diff --git a/ppsci/utils/callbacks.py b/ppsci/utils/callbacks.py index cd65d38d87..0951cee6f8 100644 --- a/ppsci/utils/callbacks.py +++ b/ppsci/utils/callbacks.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -110,3 +111,116 @@ def on_job_start(self, config: DictConfig, **kwargs: Any) -> None: core.set_prim_eager_enabled(True) core._set_prim_all_enabled(True) logger.message("Prim mode is enabled.") +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib.util +import inspect +import sys +from os import path as osp +from typing import Any + +from hydra.experimental.callback import Callback +from omegaconf import DictConfig + +from ppsci.utils import config as config_module +from ppsci.utils import logger +from ppsci.utils import misc + +RUNTIME_EXIT_CODE = 1 # for other errors +VALIDATION_ERROR_EXIT_CODE = 2 # for invalid argument detected in config file + + +class InitCallback(Callback): + """Callback class for: + 1. Parse config dict from given yaml file and check its validity. + 2. Fixing random seed to 'config.seed'. + 3. Initialize logger while creating output directory(if not exist). + 4. Enable prim mode if specified. + + NOTE: This callback is mainly for reducing unnecessary duplicate code in each + examples code when runing with hydra. + + This callback should be added to hydra config file as follows: + + ``` yaml hl_lines="7-11" + # content of example.yaml below + hydra: + run: + ... + job: + ... + callbacks: + init_callback: + _target_: ppsci.utils.callbacks.InitCallback # <-- add callback at here + xxx_callback: + _target_: ppsci.utils.callbacks.XxxCallback # <-- add more callback here + sweep: + ... + ... + ... + ``` + """ + + def on_job_start(self, config: DictConfig, **kwargs: Any) -> None: + if importlib.util.find_spec("pydantic") is not None: + from pydantic import ValidationError + else: + logger.error( + f"ModuleNotFoundError at {__file__}:{inspect.currentframe().f_lineno}\n" + "Please install pydantic with `pip install pydantic` when set callbacks" + " in your config yaml." + ) + sys.exit(RUNTIME_EXIT_CODE) + + # check given cfg using pre-defined pydantic schema in 'SolverConfig', + # error(s) will be printed and exit program if any checking failed at this step + try: + _model_pydantic = config_module.SolverConfig(**dict(config)) + full_cfg = DictConfig(_model_pydantic.model_dump()) + except ValidationError as e: + print(e) + sys.exit(VALIDATION_ERROR_EXIT_CODE) + except Exception as e: + print(e) + sys.exit(RUNTIME_EXIT_CODE) + + # fix random seed for reproducibility + misc.set_random_seed(full_cfg.seed) + + # initialze logger while creating output directory + logger.init_logger( + "ppsci", + osp.join(full_cfg.output_dir, f"{full_cfg.mode}.log") + if full_cfg.output_dir and full_cfg.mode not in ["export", "infer"] + else None, + full_cfg.log_level, + ) + + # set device before running into example function + if "device" in full_cfg: + import paddle + + paddle.device.set_device(full_cfg.device) + + # enable prim if specified + if "prim" in full_cfg and bool(full_cfg.prim): + # Mostly for compiler running with dy2st. + from paddle.framework import core + + core.set_prim_eager_enabled(True) + core._set_prim_all_enabled(True) + logger.message("Prim mode is enabled.") +>>>>>>> Stashed changes diff --git a/ppsci/utils/checker.py b/ppsci/utils/checker.py index 8991a89e2c..7fedb20888 100644 --- a/ppsci/utils/checker.py +++ b/ppsci/utils/checker.py @@ -1,287 +1,287 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import importlib.util -import traceback -from typing import Dict -from typing import Sequence -from typing import Union - -import paddle - -from ppsci.utils import logger - -__all__ = [ - "run_check", - "run_check_mesh", - "dynamic_import_to_globals", -] - - -def run_check() -> None: - """Check whether PaddleScience is installed correctly and running successfully on - your system. - - Examples: - >>> import ppsci - >>> ppsci.utils.run_check() # doctest: +SKIP - """ - # test demo code below. - import ppsci - - try: - ppsci.utils.set_random_seed(42) - ppsci.utils.logger.init_logger() - model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16, "tanh") - - equation = {"NavierStokes": ppsci.equation.NavierStokes(0.01, 1.0, 2, False)} - - geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} - - ITERS_PER_EPOCH = 5 - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": ITERS_PER_EPOCH, - } - - NPOINT_PDE = 8**2 - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, - ppsci.loss.MSELoss("sum"), - evenly=True, - weight_dict={ - "continuity": 0.0001, - "momentum_x": 0.0001, - "momentum_y": 0.0001, - }, - name="EQ", - ) - constraint = {pde_constraint.name: pde_constraint} - - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": 8**2, - "batch_size": 32, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - evenly=True, - metric={"MSE": ppsci.metric.MSE(False)}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - EPOCHS = 2 - optimizer = ppsci.optimizer.Adam(0.001)(model) - solver = ppsci.solver.Solver( - model, - constraint, - None, - optimizer, - None, - EPOCHS, - ITERS_PER_EPOCH, - device=paddle.device.get_device(), - equation=equation, - validator=validator, - ) - solver.train() - solver.eval(EPOCHS) - except Exception as e: - traceback.print_exc() - logger.error( - f"PaddleScience meets some problem with \n {repr(e)} \nplease check whether " - "Paddle's version and PaddleScience's version are both correct." - ) - else: - logger.message("PaddleScience is installed successfully.✨ 🍰 ✨") - - -def run_check_mesh() -> None: - """Check whether geometry packages is installed correctly and `ppsci.geometry.Mesh` - can running successfully on your system. - - Examples: - >>> import ppsci - >>> ppsci.utils.run_check_mesh() # doctest: +SKIP - """ - # test demo code below. - if importlib.util.find_spec("open3d") is None: - raise ModuleNotFoundError( - "Please install open3d first with: " "`pip install open3d`" - ) - if importlib.util.find_spec("pysdf") is None: - raise ModuleNotFoundError( - "Please install pysdf first with: `pip install pysdf`" - ) - if importlib.util.find_spec("pymesh") is None: - raise ModuleNotFoundError( - "Please install pymesh first as " - "https://paddlescience-docs.readthedocs.io/zh/latest/zh/install_setup/#__tabbed_4_4" - ) - - import numpy as np - import pymesh - - import ppsci - - try: - ppsci.utils.set_random_seed(42) - ppsci.utils.logger.init_logger() - model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16, "tanh") - - equation = {"NavierStokes": ppsci.equation.NavierStokes(0.01, 1.0, 2, False)} - - # create a 1x1x1 simple cube geometry - vertices = np.array( - [ - [0.0, 0.0, 0.0], - [1.0, 0.0, 0.0], - [0.0, 0.0, 1.0], - [1.0, 0.0, 1.0], - [0.0, 1.0, 0.0], - [1.0, 1.0, 0.0], - [0.0, 1.0, 1.0], - [1.0, 1.0, 1.0], - ] - ) # 8 vertices for mesh - faces = np.array( - [ - [4, 7, 5], - [4, 6, 7], - [0, 2, 4], - [2, 6, 4], - [0, 1, 2], - [1, 3, 2], - [1, 5, 7], - [1, 7, 3], - [2, 3, 7], - [2, 7, 6], - [0, 4, 1], - [1, 4, 5], - ] - ) # 12 triangle faces for mesh - box_mesh = pymesh.form_mesh(vertices, faces) - geom = {"rect": ppsci.geometry.Mesh(box_mesh)} - - ITERS_PER_EPOCH = 5 - train_dataloader_cfg = { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": ITERS_PER_EPOCH, - } - - NPOINT_PDE = 8**2 - pde_constraint = ppsci.constraint.InteriorConstraint( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["rect"], - {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, - ppsci.loss.MSELoss("sum"), - weight_dict={ - "continuity": "sdf", - "momentum_x": "sdf", - "momentum_y": "sdf", - }, - name="EQ", - ) - constraint = {pde_constraint.name: pde_constraint} - - residual_validator = ppsci.validate.GeometryValidator( - equation["NavierStokes"].equations, - {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, - geom["rect"], - { - "dataset": "NamedArrayDataset", - "total_size": 8**2, - "batch_size": 32, - "sampler": {"name": "BatchSampler"}, - }, - ppsci.loss.MSELoss("sum"), - metric={"MSE": ppsci.metric.MSE(False)}, - name="Residual", - ) - validator = {residual_validator.name: residual_validator} - - EPOCHS = 2 - optimizer = ppsci.optimizer.Adam(0.001)(model) - solver = ppsci.solver.Solver( - model, - constraint, - None, - optimizer, - None, - EPOCHS, - ITERS_PER_EPOCH, - device=paddle.device.get_device(), - equation=equation, - validator=validator, - ) - solver.train() - solver.eval(EPOCHS) - except Exception as e: - traceback.print_exc() - logger.error( - f"PaddleScience meets some problem with \n {repr(e)} \nplease check whether " - "open3d, pysdf, pybind11, PyMesh are all installed correctly." - ) - else: - logger.message("ppsci.geometry.Mesh module running successfully.✨ 🍰 ✨") - - -def dynamic_import_to_globals( - names: Union[str, Sequence[str]], alias: Dict[str, str] = None -) -> bool: - """Import module and add it to globals() by given names dynamically. - - Args: - names (Union[str, Sequence[str]]): Module name or sequence of module names. - alias (Dict[str, str]): Alias name of module when imported into globals(). - - Returns: - bool: Whether given names all exist. - """ - if isinstance(names, str): - names = (names,) - - if alias is None: - alias = {} - - for name in names: - # find module in environment by it's name and alias(if given) - module_spec = importlib.util.find_spec(name) - if module_spec is None and name in alias: - module_spec = importlib.util.find_spec(alias[name]) - - # log error and return False if module do not exist - if not module_spec: - logger.error(f"Module {name} should be installed first.") - return False - - # module exist, add to globals() if not in globals() - add_name = name - if add_name in alias: - add_name = alias[add_name] - if add_name not in globals(): - globals()[add_name] = importlib.import_module(name) - - return True +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import importlib.util +import traceback +from typing import Dict +from typing import Sequence +from typing import Union + +import paddle + +from ppsci.utils import logger + +__all__ = [ + "run_check", + "run_check_mesh", + "dynamic_import_to_globals", +] + + +def run_check() -> None: + """Check whether PaddleScience is installed correctly and running successfully on + your system. + + Examples: + >>> import ppsci + >>> ppsci.utils.run_check() # doctest: +SKIP + """ + # test demo code below. + import ppsci + + try: + ppsci.utils.set_random_seed(42) + ppsci.utils.logger.init_logger() + model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16, "tanh") + + equation = {"NavierStokes": ppsci.equation.NavierStokes(0.01, 1.0, 2, False)} + + geom = {"rect": ppsci.geometry.Rectangle((-0.05, -0.05), (0.05, 0.05))} + + ITERS_PER_EPOCH = 5 + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": ITERS_PER_EPOCH, + } + + NPOINT_PDE = 8**2 + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, + ppsci.loss.MSELoss("sum"), + evenly=True, + weight_dict={ + "continuity": 0.0001, + "momentum_x": 0.0001, + "momentum_y": 0.0001, + }, + name="EQ", + ) + constraint = {pde_constraint.name: pde_constraint} + + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": 8**2, + "batch_size": 32, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + evenly=True, + metric={"MSE": ppsci.metric.MSE(False)}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + EPOCHS = 2 + optimizer = ppsci.optimizer.Adam(0.001)(model) + solver = ppsci.solver.Solver( + model, + constraint, + None, + optimizer, + None, + EPOCHS, + ITERS_PER_EPOCH, + device=paddle.device.get_device(), + equation=equation, + validator=validator, + ) + solver.train() + solver.eval(EPOCHS) + except Exception as e: + traceback.print_exc() + logger.error( + f"PaddleScience meets some problem with \n {repr(e)} \nplease check whether " + "Paddle's version and PaddleScience's version are both correct." + ) + else: + logger.message("PaddleScience is installed successfully.✨ 🍰 ✨") + + +def run_check_mesh() -> None: + """Check whether geometry packages is installed correctly and `ppsci.geometry.Mesh` + can running successfully on your system. + + Examples: + >>> import ppsci + >>> ppsci.utils.run_check_mesh() # doctest: +SKIP + """ + # test demo code below. + if importlib.util.find_spec("open3d") is None: + raise ModuleNotFoundError( + "Please install open3d first with: " "`pip install open3d`" + ) + if importlib.util.find_spec("pysdf") is None: + raise ModuleNotFoundError( + "Please install pysdf first with: `pip install pysdf`" + ) + if importlib.util.find_spec("pymesh") is None: + raise ModuleNotFoundError( + "Please install pymesh first as " + "https://paddlescience-docs.readthedocs.io/zh/latest/zh/install_setup/#__tabbed_4_4" + ) + + import numpy as np + import pymesh + + import ppsci + + try: + ppsci.utils.set_random_seed(42) + ppsci.utils.logger.init_logger() + model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16, "tanh") + + equation = {"NavierStokes": ppsci.equation.NavierStokes(0.01, 1.0, 2, False)} + + # create a 1x1x1 simple cube geometry + vertices = np.array( + [ + [0.0, 0.0, 0.0], + [1.0, 0.0, 0.0], + [0.0, 0.0, 1.0], + [1.0, 0.0, 1.0], + [0.0, 1.0, 0.0], + [1.0, 1.0, 0.0], + [0.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ) # 8 vertices for mesh + faces = np.array( + [ + [4, 7, 5], + [4, 6, 7], + [0, 2, 4], + [2, 6, 4], + [0, 1, 2], + [1, 3, 2], + [1, 5, 7], + [1, 7, 3], + [2, 3, 7], + [2, 7, 6], + [0, 4, 1], + [1, 4, 5], + ] + ) # 12 triangle faces for mesh + box_mesh = pymesh.form_mesh(vertices, faces) + geom = {"rect": ppsci.geometry.Mesh(box_mesh)} + + ITERS_PER_EPOCH = 5 + train_dataloader_cfg = { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": ITERS_PER_EPOCH, + } + + NPOINT_PDE = 8**2 + pde_constraint = ppsci.constraint.InteriorConstraint( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["rect"], + {**train_dataloader_cfg, "batch_size": NPOINT_PDE}, + ppsci.loss.MSELoss("sum"), + weight_dict={ + "continuity": "sdf", + "momentum_x": "sdf", + "momentum_y": "sdf", + }, + name="EQ", + ) + constraint = {pde_constraint.name: pde_constraint} + + residual_validator = ppsci.validate.GeometryValidator( + equation["NavierStokes"].equations, + {"continuity": 0, "momentum_x": 0, "momentum_y": 0}, + geom["rect"], + { + "dataset": "NamedArrayDataset", + "total_size": 8**2, + "batch_size": 32, + "sampler": {"name": "BatchSampler"}, + }, + ppsci.loss.MSELoss("sum"), + metric={"MSE": ppsci.metric.MSE(False)}, + name="Residual", + ) + validator = {residual_validator.name: residual_validator} + + EPOCHS = 2 + optimizer = ppsci.optimizer.Adam(0.001)(model) + solver = ppsci.solver.Solver( + model, + constraint, + None, + optimizer, + None, + EPOCHS, + ITERS_PER_EPOCH, + device=paddle.device.get_device(), + equation=equation, + validator=validator, + ) + solver.train() + solver.eval(EPOCHS) + except Exception as e: + traceback.print_exc() + logger.error( + f"PaddleScience meets some problem with \n {repr(e)} \nplease check whether " + "open3d, pysdf, pybind11, PyMesh are all installed correctly." + ) + else: + logger.message("ppsci.geometry.Mesh module running successfully.✨ 🍰 ✨") + + +def dynamic_import_to_globals( + names: Union[str, Sequence[str]], alias: Dict[str, str] = None +) -> bool: + """Import module and add it to globals() by given names dynamically. + + Args: + names (Union[str, Sequence[str]]): Module name or sequence of module names. + alias (Dict[str, str]): Alias name of module when imported into globals(). + + Returns: + bool: Whether given names all exist. + """ + if isinstance(names, str): + names = (names,) + + if alias is None: + alias = {} + + for name in names: + # find module in environment by it's name and alias(if given) + module_spec = importlib.util.find_spec(name) + if module_spec is None and name in alias: + module_spec = importlib.util.find_spec(alias[name]) + + # log error and return False if module do not exist + if not module_spec: + logger.error(f"Module {name} should be installed first.") + return False + + # module exist, add to globals() if not in globals() + add_name = name + if add_name in alias: + add_name = alias[add_name] + if add_name not in globals(): + globals()[add_name] = importlib.import_module(name) + + return True diff --git a/ppsci/utils/download.py b/ppsci/utils/download.py index 291703e2d2..e399c94ead 100644 --- a/ppsci/utils/download.py +++ b/ppsci/utils/download.py @@ -1,285 +1,285 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import hashlib -import os -import os.path as osp -import shutil -import tarfile -import time -import zipfile - -import requests -import tqdm - -from ppsci.utils import logger -from ppsci.utils import misc - -__all__ = ["get_weights_path_from_url"] - -WEIGHTS_HOME = osp.expanduser("~/.paddlesci/weights") - -DOWNLOAD_RETRY_LIMIT = 3 - - -def is_url(path): - """ - Whether path is URL. - - Args: - path (str): URL string or not. - """ - return path.startswith("http://") or path.startswith("https://") - - -def get_weights_path_from_url(url, md5sum=None): - """Get weights path from WEIGHT_HOME, if not exists, - download it from url. - - Args: - url (str): Download url - md5sum (str): md5 sum of download package - - Returns: - str: a local path to save downloaded weights. - """ - path = get_path_from_url(url, WEIGHTS_HOME, md5sum) - return path - - -def _map_path(url, root_dir): - # parse path after download under root_dir - fname = osp.split(url)[-1] - fpath = fname - return osp.join(root_dir, fpath) - - -def get_path_from_url(url, root_dir, md5sum=None, check_exist=True, decompress=True): - """Download from given url to root_dir. - if file or directory specified by url is exists under - root_dir, return the path directly, otherwise download - from url and decompress it, return the path. - - Args: - url (str): Download url - root_dir (str): Root dir for downloading, it should be - WEIGHTS_HOME or DATASET_HOME - md5sum (str): md5 sum of download package - - Returns: - str: a local path to save downloaded models & weights & datasets. - """ - if not is_url(url): - raise ValueError(f"Given url({url}) is not valid") - # parse path after download to decompress under root_dir - fullpath = _map_path(url, root_dir) - # Mainly used to solve the problem of downloading data from different - # machines in the case of multiple machines. Different nodes will download - # data, and the same node will only download data once. - rank_id_curr_node = int(os.environ.get("PADDLE_RANK_IN_NODE", 0)) - - if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): - logger.message(f"Found {fullpath} already in {WEIGHTS_HOME}, skip downloading.") - else: - with misc.RankZeroOnly(rank_id_curr_node) as is_master: - if is_master: - fullpath = _download(url, root_dir, md5sum) - - if decompress and (tarfile.is_tarfile(fullpath) or zipfile.is_zipfile(fullpath)): - with misc.RankZeroOnly(rank_id_curr_node) as is_master: - if is_master: - fullpath = _decompress(fullpath) - - return fullpath - - -def _download(url, path, md5sum=None): - """ - Download from url, save to path. - - url (str): Download url - path (str): Download to given path - """ - if not osp.exists(path): - os.makedirs(path) - - fname = osp.split(url)[-1] - fullname = osp.join(path, fname) - retry_cnt = 0 - - while not (osp.exists(fullname) and _md5check(fullname, md5sum)): - if retry_cnt < DOWNLOAD_RETRY_LIMIT: - retry_cnt += 1 - else: - raise RuntimeError(f"Download from {url} failed. " "Retry limit reached") - - logger.message(f"Downloading {fname} from {url}") - - try: - req = requests.get(url, stream=True) - except Exception as e: # requests.exceptions.ConnectionError - logger.warning( - f"Downloading {fname} from {url} failed {retry_cnt + 1} times with exception {str(e)}" - ) - time.sleep(1) - continue - - if req.status_code != 200: - raise RuntimeError( - f"Downloading from {url} failed with code " f"{req.status_code}!" - ) - - # For protecting download interrupted, download to - # tmp_fullname firstly, move tmp_fullname to fullname - # after download finished - tmp_fullname = fullname + "_tmp" - total_size = req.headers.get("content-length") - with open(tmp_fullname, "wb") as f: - if total_size: - with tqdm.tqdm(total=(int(total_size) + 1023) // 1024) as pbar: - for chunk in req.iter_content(chunk_size=1024): - f.write(chunk) - pbar.update(1) - else: - for chunk in req.iter_content(chunk_size=1024): - if chunk: - f.write(chunk) - shutil.move(tmp_fullname, fullname) - logger.message(f"Finish downloading pretrained model and saved to {fullname}") - - return fullname - - -def _md5check(fullname, md5sum=None): - if md5sum is None: - return True - - logger.message(f"File {fullname} md5 checking...") - md5 = hashlib.md5() - with open(fullname, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): - md5.update(chunk) - calc_md5sum = md5.hexdigest() - - if calc_md5sum != md5sum: - logger.error( - f"File {fullname} md5 check failed, {calc_md5sum}(calc) != " - f"{md5sum}(base)" - ) - return False - return True - - -def _decompress(fname): - """ - Decompress for zip and tar file - """ - logger.message(f"Decompressing {fname}...") - - # For protecting decompressing interrupted, - # decompress to fpath_tmp directory firstly, if decompress - # succeed, move decompress files to fpath and delete - # fpath_tmp and remove download compress file. - - if tarfile.is_tarfile(fname): - uncompressed_path = _uncompress_file_tar(fname) - elif zipfile.is_zipfile(fname): - uncompressed_path = _uncompress_file_zip(fname) - else: - raise TypeError(f"Unsupported compress file type {fname}") - - return uncompressed_path - - -def _uncompress_file_zip(filepath): - with zipfile.ZipFile(filepath, "r") as files: - file_list = files.namelist() - - file_dir = os.path.dirname(filepath) - - if _is_a_single_file(file_list): - rootpath = file_list[0] - uncompressed_path = os.path.join(file_dir, rootpath) - - for item in file_list: - files.extract(item, file_dir) - - elif _is_a_single_dir(file_list): - rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - - for item in file_list: - files.extract(item, file_dir) - - else: - rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - if not os.path.exists(uncompressed_path): - os.makedirs(uncompressed_path) - for item in file_list: - files.extract(item, os.path.join(file_dir, rootpath)) - - return uncompressed_path - - -def _uncompress_file_tar(filepath, mode="r:*"): - with tarfile.open(filepath, mode) as files: - file_list = files.getnames() - - file_dir = os.path.dirname(filepath) - - if _is_a_single_file(file_list): - rootpath = file_list[0] - uncompressed_path = os.path.join(file_dir, rootpath) - for item in file_list: - files.extract(item, file_dir) - elif _is_a_single_dir(file_list): - rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - for item in file_list: - files.extract(item, file_dir) - else: - rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] - uncompressed_path = os.path.join(file_dir, rootpath) - if not os.path.exists(uncompressed_path): - os.makedirs(uncompressed_path) - - for item in file_list: - files.extract(item, os.path.join(file_dir, rootpath)) - - return uncompressed_path - - -def _is_a_single_file(file_list): - if len(file_list) == 1 and file_list[0].find(os.sep) < -1: - return True - return False - - -def _is_a_single_dir(file_list): - new_file_list = [] - for file_path in file_list: - if "/" in file_path: - file_path = file_path.replace("/", os.sep) - elif "\\" in file_path: - file_path = file_path.replace("\\", os.sep) - new_file_list.append(file_path) - - file_name = new_file_list[0].split(os.sep)[0] - for i in range(1, len(new_file_list)): - if file_name != new_file_list[i].split(os.sep)[0]: - return False - return True +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import hashlib +import os +import os.path as osp +import shutil +import tarfile +import time +import zipfile + +import requests +import tqdm + +from ppsci.utils import logger +from ppsci.utils import misc + +__all__ = ["get_weights_path_from_url"] + +WEIGHTS_HOME = osp.expanduser("~/.paddlesci/weights") + +DOWNLOAD_RETRY_LIMIT = 3 + + +def is_url(path): + """ + Whether path is URL. + + Args: + path (str): URL string or not. + """ + return path.startswith("http://") or path.startswith("https://") + + +def get_weights_path_from_url(url, md5sum=None): + """Get weights path from WEIGHT_HOME, if not exists, + download it from url. + + Args: + url (str): Download url + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded weights. + """ + path = get_path_from_url(url, WEIGHTS_HOME, md5sum) + return path + + +def _map_path(url, root_dir): + # parse path after download under root_dir + fname = osp.split(url)[-1] + fpath = fname + return osp.join(root_dir, fpath) + + +def get_path_from_url(url, root_dir, md5sum=None, check_exist=True, decompress=True): + """Download from given url to root_dir. + if file or directory specified by url is exists under + root_dir, return the path directly, otherwise download + from url and decompress it, return the path. + + Args: + url (str): Download url + root_dir (str): Root dir for downloading, it should be + WEIGHTS_HOME or DATASET_HOME + md5sum (str): md5 sum of download package + + Returns: + str: a local path to save downloaded models & weights & datasets. + """ + if not is_url(url): + raise ValueError(f"Given url({url}) is not valid") + # parse path after download to decompress under root_dir + fullpath = _map_path(url, root_dir) + # Mainly used to solve the problem of downloading data from different + # machines in the case of multiple machines. Different nodes will download + # data, and the same node will only download data once. + rank_id_curr_node = int(os.environ.get("PADDLE_RANK_IN_NODE", 0)) + + if osp.exists(fullpath) and check_exist and _md5check(fullpath, md5sum): + logger.message(f"Found {fullpath} already in {WEIGHTS_HOME}, skip downloading.") + else: + with misc.RankZeroOnly(rank_id_curr_node) as is_master: + if is_master: + fullpath = _download(url, root_dir, md5sum) + + if decompress and (tarfile.is_tarfile(fullpath) or zipfile.is_zipfile(fullpath)): + with misc.RankZeroOnly(rank_id_curr_node) as is_master: + if is_master: + fullpath = _decompress(fullpath) + + return fullpath + + +def _download(url, path, md5sum=None): + """ + Download from url, save to path. + + url (str): Download url + path (str): Download to given path + """ + if not osp.exists(path): + os.makedirs(path) + + fname = osp.split(url)[-1] + fullname = osp.join(path, fname) + retry_cnt = 0 + + while not (osp.exists(fullname) and _md5check(fullname, md5sum)): + if retry_cnt < DOWNLOAD_RETRY_LIMIT: + retry_cnt += 1 + else: + raise RuntimeError(f"Download from {url} failed. " "Retry limit reached") + + logger.message(f"Downloading {fname} from {url}") + + try: + req = requests.get(url, stream=True) + except Exception as e: # requests.exceptions.ConnectionError + logger.warning( + f"Downloading {fname} from {url} failed {retry_cnt + 1} times with exception {str(e)}" + ) + time.sleep(1) + continue + + if req.status_code != 200: + raise RuntimeError( + f"Downloading from {url} failed with code " f"{req.status_code}!" + ) + + # For protecting download interrupted, download to + # tmp_fullname firstly, move tmp_fullname to fullname + # after download finished + tmp_fullname = fullname + "_tmp" + total_size = req.headers.get("content-length") + with open(tmp_fullname, "wb") as f: + if total_size: + with tqdm.tqdm(total=(int(total_size) + 1023) // 1024) as pbar: + for chunk in req.iter_content(chunk_size=1024): + f.write(chunk) + pbar.update(1) + else: + for chunk in req.iter_content(chunk_size=1024): + if chunk: + f.write(chunk) + shutil.move(tmp_fullname, fullname) + logger.message(f"Finish downloading pretrained model and saved to {fullname}") + + return fullname + + +def _md5check(fullname, md5sum=None): + if md5sum is None: + return True + + logger.message(f"File {fullname} md5 checking...") + md5 = hashlib.md5() + with open(fullname, "rb") as f: + for chunk in iter(lambda: f.read(4096), b""): + md5.update(chunk) + calc_md5sum = md5.hexdigest() + + if calc_md5sum != md5sum: + logger.error( + f"File {fullname} md5 check failed, {calc_md5sum}(calc) != " + f"{md5sum}(base)" + ) + return False + return True + + +def _decompress(fname): + """ + Decompress for zip and tar file + """ + logger.message(f"Decompressing {fname}...") + + # For protecting decompressing interrupted, + # decompress to fpath_tmp directory firstly, if decompress + # succeed, move decompress files to fpath and delete + # fpath_tmp and remove download compress file. + + if tarfile.is_tarfile(fname): + uncompressed_path = _uncompress_file_tar(fname) + elif zipfile.is_zipfile(fname): + uncompressed_path = _uncompress_file_zip(fname) + else: + raise TypeError(f"Unsupported compress file type {fname}") + + return uncompressed_path + + +def _uncompress_file_zip(filepath): + with zipfile.ZipFile(filepath, "r") as files: + file_list = files.namelist() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + + for item in file_list: + files.extract(item, file_dir) + + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + return uncompressed_path + + +def _uncompress_file_tar(filepath, mode="r:*"): + with tarfile.open(filepath, mode) as files: + file_list = files.getnames() + + file_dir = os.path.dirname(filepath) + + if _is_a_single_file(file_list): + rootpath = file_list[0] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + elif _is_a_single_dir(file_list): + rootpath = os.path.splitext(file_list[0])[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + for item in file_list: + files.extract(item, file_dir) + else: + rootpath = os.path.splitext(filepath)[0].split(os.sep)[-1] + uncompressed_path = os.path.join(file_dir, rootpath) + if not os.path.exists(uncompressed_path): + os.makedirs(uncompressed_path) + + for item in file_list: + files.extract(item, os.path.join(file_dir, rootpath)) + + return uncompressed_path + + +def _is_a_single_file(file_list): + if len(file_list) == 1 and file_list[0].find(os.sep) < -1: + return True + return False + + +def _is_a_single_dir(file_list): + new_file_list = [] + for file_path in file_list: + if "/" in file_path: + file_path = file_path.replace("/", os.sep) + elif "\\" in file_path: + file_path = file_path.replace("\\", os.sep) + new_file_list.append(file_path) + + file_name = new_file_list[0].split(os.sep)[0] + for i in range(1, len(new_file_list)): + if file_name != new_file_list[i].split(os.sep)[0]: + return False + return True diff --git a/ppsci/utils/ema.py b/ppsci/utils/ema.py index 690ee6fda8..7cf84a6251 100644 --- a/ppsci/utils/ema.py +++ b/ppsci/utils/ema.py @@ -1,172 +1,172 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import itertools -from typing import Dict -from typing import Optional - -import paddle -from paddle import nn - -__all__ = [ - "AveragedModel", - "ExponentialMovingAverage", - "StochasticWeightAverage", -] - - -class AveragedModel(nn.Layer): - """Base class for Averaged Model. - - Args: - model (nn.Layer): The model to be averaged. - decay (float): The decay rate for averaging. - """ - - def __init__(self, model: nn.Layer, decay: Optional[float] = None): - super().__init__() - self.model = model # As a quick reference to online model - self.decay = decay - - self.params_shadow: Dict[str, paddle.Tensor] = {} # ema param or buffer - self.params_backup: Dict[str, paddle.Tensor] = {} # used for apply and restore - for name, param_or_buffer in itertools.chain( - self.model.named_parameters(), self.model.named_buffers() - ): - self.params_shadow[name] = param_or_buffer.clone().detach() - - self.register_buffer("n_avg", paddle.to_tensor(0, "int64"), True) - - def _update_fn_( - self, - shadow_param: paddle.Tensor, - model_param: paddle.Tensor, - step: paddle.Tensor, - ): - raise NotImplementedError("AveragedModel._update_fn_ should be implemented.") - - def update(self): - for name, param_or_buffer in itertools.chain( - self.model.named_parameters(), self.model.named_buffers() - ): - if not param_or_buffer.stop_gradient: - assert ( - name in self.params_shadow - ), f"Parameter: {name} should be in params_shadow dict, but not found." - - # only update floating and complex data - if paddle.is_floating_point(param_or_buffer) or paddle.is_complex( - param_or_buffer - ): - with paddle.no_grad(): - self._update_fn_( - self.params_shadow[name], - param_or_buffer, - self.n_avg, - ) - self.n_avg += 1 - - def apply_shadow(self): - """Set averaged model parameters to online model.""" - for name, param_or_buffer in itertools.chain( - self.model.named_parameters(), self.model.named_buffers() - ): - if name in self.params_shadow: - stop_gradient = param_or_buffer.stop_gradient - with paddle.no_grad(): - self.params_backup[name] = paddle.assign(param_or_buffer) - paddle.assign(self.params_shadow[name], param_or_buffer) - param_or_buffer.stop_gradient = stop_gradient - - def restore(self): - """Restore online model parameters from backup parameter dict.""" - assert self.params_backup, ( - "params_backup should not be empty, may be caused by calling 'restore' " - "before 'apply_shadow'." - ) - for name, param_or_buffer in itertools.chain( - self.model.named_parameters(), self.model.named_buffers() - ): - if name in self.params_backup: - assert name in self.params_shadow - stop_gradient = param_or_buffer.stop_gradient - with paddle.no_grad(): - paddle.assign(self.params_backup[name], param_or_buffer) - param_or_buffer.stop_gradient = stop_gradient - - self.params_backup = {} - - def set_state_dict(self, state_dict: Dict[str, paddle.Tensor]): - assert ( - "n_avg" in state_dict - ), "state_dict should contain 'n_avg' key, but not found." - self.n_avg.set_value(state_dict.pop("n_avg")) - self.params_shadow.update(state_dict) - - def state_dict(self) -> Dict[str, paddle.Tensor]: - return { - **self.params_shadow, - "n_avg": self.n_avg, - } - - -class ExponentialMovingAverage(AveragedModel): - r"""Implements the exponential moving average (EMA) of the model. - - All parameters are updated by the formula as below: - - $$ - \mathbf{\theta}_{EMA}^{t+1} = \alpha \mathbf{\theta}_{EMA}^{t} + (1 - \alpha) \mathbf{\theta}^{t} - $$ - - Where $\alpha$ is the decay rate, $\theta_{EMA}^{t}$ is the moving average parameters and $\theta^{t}$ is the online parameters at step $t$. - - Args: - model (nn.Layer): The model to be averaged. - decay (float): The decay rate for averaging. - """ - - def __init__(self, model: nn.Layer, decay: float = 0.9): - super().__init__(model, decay) - - def _update_fn_(self, shadow_param, model_param, step): - shadow_param.lerp_(model_param, 1.0 - self.decay) - - -class StochasticWeightAverage(AveragedModel): - r"""Implements the stochastic weight averaging (SWA) of the model. - - Stochastic Weight Averaging was proposed in [Averaging Weights Leads to Wider Optima and Better Generalization](https://arxiv.org/abs/1803.05407), - - All parameters are updated by the formula as below: - - $$ - \mathbf{\theta}_{SWA}^{t} = \frac{1}{t-t_0+1}\sum_{i=t_0}^t{\mathbf{\theta}^{i}} - $$ - - Where $\theta_{SWA}^{t}$ is the average parameters between step $t_0$ and $t$, $\theta^{i}$ is the online parameters at step $i$. - - Args: - model (nn.Layer): The model to be averaged. - """ - - def __init__(self, model: nn.Layer): - super().__init__(model, None) - self.n_avg += 1 # Set to 1 for model already initialized - - def _update_fn_(self, shadow_param, model_param, step): - dynamic_decay = step / (step + 1) - shadow_param.lerp_(model_param, 1.0 - dynamic_decay) +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import itertools +from typing import Dict +from typing import Optional + +import paddle +from paddle import nn + +__all__ = [ + "AveragedModel", + "ExponentialMovingAverage", + "StochasticWeightAverage", +] + + +class AveragedModel(nn.Layer): + """Base class for Averaged Model. + + Args: + model (nn.Layer): The model to be averaged. + decay (float): The decay rate for averaging. + """ + + def __init__(self, model: nn.Layer, decay: Optional[float] = None): + super().__init__() + self.model = model # As a quick reference to online model + self.decay = decay + + self.params_shadow: Dict[str, paddle.Tensor] = {} # ema param or buffer + self.params_backup: Dict[str, paddle.Tensor] = {} # used for apply and restore + for name, param_or_buffer in itertools.chain( + self.model.named_parameters(), self.model.named_buffers() + ): + self.params_shadow[name] = param_or_buffer.clone().detach() + + self.register_buffer("n_avg", paddle.to_tensor(0, "int64"), True) + + def _update_fn_( + self, + shadow_param: paddle.Tensor, + model_param: paddle.Tensor, + step: paddle.Tensor, + ): + raise NotImplementedError("AveragedModel._update_fn_ should be implemented.") + + def update(self): + for name, param_or_buffer in itertools.chain( + self.model.named_parameters(), self.model.named_buffers() + ): + if not param_or_buffer.stop_gradient: + assert ( + name in self.params_shadow + ), f"Parameter: {name} should be in params_shadow dict, but not found." + + # only update floating and complex data + if paddle.is_floating_point(param_or_buffer) or paddle.is_complex( + param_or_buffer + ): + with paddle.no_grad(): + self._update_fn_( + self.params_shadow[name], + param_or_buffer, + self.n_avg, + ) + self.n_avg += 1 + + def apply_shadow(self): + """Set averaged model parameters to online model.""" + for name, param_or_buffer in itertools.chain( + self.model.named_parameters(), self.model.named_buffers() + ): + if name in self.params_shadow: + stop_gradient = param_or_buffer.stop_gradient + with paddle.no_grad(): + self.params_backup[name] = paddle.assign(param_or_buffer) + paddle.assign(self.params_shadow[name], param_or_buffer) + param_or_buffer.stop_gradient = stop_gradient + + def restore(self): + """Restore online model parameters from backup parameter dict.""" + assert self.params_backup, ( + "params_backup should not be empty, may be caused by calling 'restore' " + "before 'apply_shadow'." + ) + for name, param_or_buffer in itertools.chain( + self.model.named_parameters(), self.model.named_buffers() + ): + if name in self.params_backup: + assert name in self.params_shadow + stop_gradient = param_or_buffer.stop_gradient + with paddle.no_grad(): + paddle.assign(self.params_backup[name], param_or_buffer) + param_or_buffer.stop_gradient = stop_gradient + + self.params_backup = {} + + def set_state_dict(self, state_dict: Dict[str, paddle.Tensor]): + assert ( + "n_avg" in state_dict + ), "state_dict should contain 'n_avg' key, but not found." + self.n_avg.set_value(state_dict.pop("n_avg")) + self.params_shadow.update(state_dict) + + def state_dict(self) -> Dict[str, paddle.Tensor]: + return { + **self.params_shadow, + "n_avg": self.n_avg, + } + + +class ExponentialMovingAverage(AveragedModel): + r"""Implements the exponential moving average (EMA) of the model. + + All parameters are updated by the formula as below: + + $$ + \mathbf{\theta}_{EMA}^{t+1} = \alpha \mathbf{\theta}_{EMA}^{t} + (1 - \alpha) \mathbf{\theta}^{t} + $$ + + Where $\alpha$ is the decay rate, $\theta_{EMA}^{t}$ is the moving average parameters and $\theta^{t}$ is the online parameters at step $t$. + + Args: + model (nn.Layer): The model to be averaged. + decay (float): The decay rate for averaging. + """ + + def __init__(self, model: nn.Layer, decay: float = 0.9): + super().__init__(model, decay) + + def _update_fn_(self, shadow_param, model_param, step): + shadow_param.lerp_(model_param, 1.0 - self.decay) + + +class StochasticWeightAverage(AveragedModel): + r"""Implements the stochastic weight averaging (SWA) of the model. + + Stochastic Weight Averaging was proposed in [Averaging Weights Leads to Wider Optima and Better Generalization](https://arxiv.org/abs/1803.05407), + + All parameters are updated by the formula as below: + + $$ + \mathbf{\theta}_{SWA}^{t} = \frac{1}{t-t_0+1}\sum_{i=t_0}^t{\mathbf{\theta}^{i}} + $$ + + Where $\theta_{SWA}^{t}$ is the average parameters between step $t_0$ and $t$, $\theta^{i}$ is the online parameters at step $i$. + + Args: + model (nn.Layer): The model to be averaged. + """ + + def __init__(self, model: nn.Layer): + super().__init__(model, None) + self.n_avg += 1 # Set to 1 for model already initialized + + def _update_fn_(self, shadow_param, model_param, step): + dynamic_decay = step / (step + 1) + shadow_param.lerp_(model_param, 1.0 - dynamic_decay) diff --git a/ppsci/utils/expression.py b/ppsci/utils/expression.py index 6bfcddd214..34db3cb2a3 100644 --- a/ppsci/utils/expression.py +++ b/ppsci/utils/expression.py @@ -1,212 +1,212 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple - -from paddle import jit -from paddle import nn -from paddle.framework import core - -if TYPE_CHECKING: - import paddle - from ppsci import constraint - from ppsci import validate - from ppsci import arch - -from ppsci.autodiff import clear - -__all__ = [ - "ExpressionSolver", -] - - -class ExpressionSolver(nn.Layer): - """Expression computing helper, which compute named result according to corresponding - function and related inputs. - - Examples: - >>> import ppsci - >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128) - >>> expr_solver = ExpressionSolver() - """ - - nvtx_flag: bool # only for nsight analysis - - def __init__(self): - super().__init__() - - def forward(self, *args, **kwargs): - raise NotImplementedError( - "Use train_forward/eval_forward/visu_forward instead of forward." - ) - - @jit.to_static - def train_forward( - self, - expr_dicts: Tuple[Dict[str, Callable], ...], - input_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], - model: arch.Arch, - constraint: Dict[str, "constraint.Constraint"], - label_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], - weight_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], - ) -> Tuple[Dict[str, "paddle.Tensor"], Dict[str, float]]: - """Forward computation for training, including model forward and equation - forward. - - Args: - expr_dicts (Tuple[Dict[str, Callable], ...]): Tuple of expression dicts. - input_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of input dicts. - model (arch.Arch): NN model. - constraint (Dict[str, "constraint.Constraint"]): Constraint dict. - label_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of label dicts. - weight_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of weight dicts. - - Returns: - Tuple[Dict[str, "paddle.Tensor"], Dict[str, float]]: - all_losses: A loss dictionary containing the output terms of all constraints, - constraint_losses: The loss values of all constraints. - """ - losses_all: Dict[str, "paddle.Tensor"] = {} - losses_constraint: Dict[str, float] = {} - - for i, cst_name in enumerate(constraint): - cst_obj = constraint[cst_name] - - # model forward - if self.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_push(f"Constraint {cst_name}") - - output_dict = model(input_dicts[i]) - - # equation forward - data_dict = {k: v for k, v in input_dicts[i].items()} - data_dict.update(output_dict) - for name, expr in expr_dicts[i].items(): - output_dict[name] = expr(data_dict) - - # put field 'area' into output_dict - if "area" in input_dicts[i]: - output_dict["area"] = input_dicts[i]["area"] - - # clear differentiation cache - clear() - - # compute loss for each constraint according to its' own output, label and weight - losses: Dict[str, "paddle.Tensor"] = cst_obj.loss( - output_dict, - label_dicts[i], - weight_dicts[i], - ) - # update losses into 'losses_all' and 'losses_constraint' - # 'losses_all': Will be send to loss aggregator for further computing final loss(scalar) - # 'losses_constraint': Will be used in logging - losses_constraint[cst_name] = 0.0 - for key in losses: - losses_constraint[cst_name] += losses[key].item() - if key in losses_all: - losses_all[key] += losses[key] - else: - losses_all[key] = losses[key] - - if self.nvtx_flag: # only for nsight analysis - core.nvprof_nvtx_pop() - - return losses_all, losses_constraint - - @jit.to_static - def eval_forward( - self, - expr_dict: Dict[str, Callable], - input_dict: Dict[str, "paddle.Tensor"], - model: arch.Arch, - validator: "validate.Validator", - label_dict: Dict[str, "paddle.Tensor"], - weight_dict: Dict[str, "paddle.Tensor"], - ) -> Tuple[Dict[str, "paddle.Tensor"], Dict[str, "paddle.Tensor"]]: - """Forward computation for evaluation, including model forward and equation - forward. - - Args: - expr_dict (Dict[str, Callable]): Expression dict. - input_dict (Dict[str, paddle.Tensor]): Input dict. - model (arch.Arch): NN model. - validator (validate.Validator): Validator. - label_dict (Dict[str, paddle.Tensor]): Label dict. - weight_dict (Dict[str, paddle.Tensor]): Weight dict. - - Returns: - Tuple[Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]]: Result dict and loss for - given validator. - """ - # model forward - output_dict = model(input_dict) - - # equation forward - data_dict = {k: v for k, v in input_dict.items()} - data_dict.update(output_dict) - for name, expr in expr_dict.items(): - output_dict[name] = expr(data_dict) - - # put field 'area' into output_dict - if "area" in input_dict: - output_dict["area"] = input_dict["area"] - - # clear differentiation cache - clear() - - # compute loss for each validator according to its' own output, label and weight - validator_losses = validator.loss( - output_dict, - label_dict, - weight_dict, - ) - return output_dict, validator_losses - - def visu_forward( - self, - expr_dict: Optional[Dict[str, Callable]], - input_dict: Dict[str, "paddle.Tensor"], - model: arch.Arch, - ) -> Dict[str, "paddle.Tensor"]: - """Forward computation for visualization, including model forward and equation - forward. - - Args: - expr_dict (Optional[Dict[str, Callable]]): Expression dict. - input_dict (Dict[str, paddle.Tensor]): Input dict. - model (arch.Arch): NN model. - - Returns: - Dict[str, paddle.Tensor]: Result dict for given expression dict. - """ - # model forward - output_dict = model(input_dict) - - if isinstance(expr_dict, dict): - # equation forward - data_dict = {k: v for k, v in input_dict.items()} - data_dict.update(output_dict) - for name, expr in expr_dict.items(): - output_dict[name] = expr(data_dict) - - # clear differentiation cache - clear() - - return output_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple + +from paddle import jit +from paddle import nn +from paddle.framework import core + +if TYPE_CHECKING: + import paddle + from ppsci import constraint + from ppsci import validate + from ppsci import arch + +from ppsci.autodiff import clear + +__all__ = [ + "ExpressionSolver", +] + + +class ExpressionSolver(nn.Layer): + """Expression computing helper, which compute named result according to corresponding + function and related inputs. + + Examples: + >>> import ppsci + >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 5, 128) + >>> expr_solver = ExpressionSolver() + """ + + nvtx_flag: bool # only for nsight analysis + + def __init__(self): + super().__init__() + + def forward(self, *args, **kwargs): + raise NotImplementedError( + "Use train_forward/eval_forward/visu_forward instead of forward." + ) + + @jit.to_static + def train_forward( + self, + expr_dicts: Tuple[Dict[str, Callable], ...], + input_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], + model: arch.Arch, + constraint: Dict[str, "constraint.Constraint"], + label_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], + weight_dicts: Tuple[Dict[str, "paddle.Tensor"], ...], + ) -> Tuple[Dict[str, "paddle.Tensor"], Dict[str, float]]: + """Forward computation for training, including model forward and equation + forward. + + Args: + expr_dicts (Tuple[Dict[str, Callable], ...]): Tuple of expression dicts. + input_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of input dicts. + model (arch.Arch): NN model. + constraint (Dict[str, "constraint.Constraint"]): Constraint dict. + label_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of label dicts. + weight_dicts (Tuple[Dict[str, paddle.Tensor], ...]): Tuple of weight dicts. + + Returns: + Tuple[Dict[str, "paddle.Tensor"], Dict[str, float]]: + all_losses: A loss dictionary containing the output terms of all constraints, + constraint_losses: The loss values of all constraints. + """ + losses_all: Dict[str, "paddle.Tensor"] = {} + losses_constraint: Dict[str, float] = {} + + for i, cst_name in enumerate(constraint): + cst_obj = constraint[cst_name] + + # model forward + if self.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_push(f"Constraint {cst_name}") + + output_dict = model(input_dicts[i]) + + # equation forward + data_dict = {k: v for k, v in input_dicts[i].items()} + data_dict.update(output_dict) + for name, expr in expr_dicts[i].items(): + output_dict[name] = expr(data_dict) + + # put field 'area' into output_dict + if "area" in input_dicts[i]: + output_dict["area"] = input_dicts[i]["area"] + + # clear differentiation cache + clear() + + # compute loss for each constraint according to its' own output, label and weight + losses: Dict[str, "paddle.Tensor"] = cst_obj.loss( + output_dict, + label_dicts[i], + weight_dicts[i], + ) + # update losses into 'losses_all' and 'losses_constraint' + # 'losses_all': Will be send to loss aggregator for further computing final loss(scalar) + # 'losses_constraint': Will be used in logging + losses_constraint[cst_name] = 0.0 + for key in losses: + losses_constraint[cst_name] += losses[key].item() + if key in losses_all: + losses_all[key] += losses[key] + else: + losses_all[key] = losses[key] + + if self.nvtx_flag: # only for nsight analysis + core.nvprof_nvtx_pop() + + return losses_all, losses_constraint + + @jit.to_static + def eval_forward( + self, + expr_dict: Dict[str, Callable], + input_dict: Dict[str, "paddle.Tensor"], + model: arch.Arch, + validator: "validate.Validator", + label_dict: Dict[str, "paddle.Tensor"], + weight_dict: Dict[str, "paddle.Tensor"], + ) -> Tuple[Dict[str, "paddle.Tensor"], Dict[str, "paddle.Tensor"]]: + """Forward computation for evaluation, including model forward and equation + forward. + + Args: + expr_dict (Dict[str, Callable]): Expression dict. + input_dict (Dict[str, paddle.Tensor]): Input dict. + model (arch.Arch): NN model. + validator (validate.Validator): Validator. + label_dict (Dict[str, paddle.Tensor]): Label dict. + weight_dict (Dict[str, paddle.Tensor]): Weight dict. + + Returns: + Tuple[Dict[str, paddle.Tensor], Dict[str, paddle.Tensor]]: Result dict and loss for + given validator. + """ + # model forward + output_dict = model(input_dict) + + # equation forward + data_dict = {k: v for k, v in input_dict.items()} + data_dict.update(output_dict) + for name, expr in expr_dict.items(): + output_dict[name] = expr(data_dict) + + # put field 'area' into output_dict + if "area" in input_dict: + output_dict["area"] = input_dict["area"] + + # clear differentiation cache + clear() + + # compute loss for each validator according to its' own output, label and weight + validator_losses = validator.loss( + output_dict, + label_dict, + weight_dict, + ) + return output_dict, validator_losses + + def visu_forward( + self, + expr_dict: Optional[Dict[str, Callable]], + input_dict: Dict[str, "paddle.Tensor"], + model: arch.Arch, + ) -> Dict[str, "paddle.Tensor"]: + """Forward computation for visualization, including model forward and equation + forward. + + Args: + expr_dict (Optional[Dict[str, Callable]]): Expression dict. + input_dict (Dict[str, paddle.Tensor]): Input dict. + model (arch.Arch): NN model. + + Returns: + Dict[str, paddle.Tensor]: Result dict for given expression dict. + """ + # model forward + output_dict = model(input_dict) + + if isinstance(expr_dict, dict): + # equation forward + data_dict = {k: v for k, v in input_dict.items()} + data_dict.update(output_dict) + for name, expr in expr_dict.items(): + output_dict[name] = expr(data_dict) + + # clear differentiation cache + clear() + + return output_dict diff --git a/ppsci/utils/initializer.py b/ppsci/utils/initializer.py index 222a2aad3d..842b0e24cc 100644 --- a/ppsci/utils/initializer.py +++ b/ppsci/utils/initializer.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -496,3 +497,503 @@ def glorot_normal_(tensor: paddle.Tensor) -> paddle.Tensor: trunc_normal_(tensor) tensor.set_value(tensor * stddev) return tensor +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The initialization method under this module is aligned with pytorch initialization. +If you need to use the initialization method of PaddlePaddle, please refer to +[paddle.nn.initializer](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/nn/initializer) + +This code is based on [torch.nn.init](https://github.com/pytorch/pytorch/blob/main/torch/nn/init.py) +Ths copyright of pytorch/pytorch is a BSD-style license, as found in the LICENSE file. +""" + +from __future__ import annotations + +import math + +import numpy as np +import paddle +from paddle import nn +from typing_extensions import Literal + +from ppsci.utils import logger + +__all__ = [ + "uniform_", + "normal_", + "trunc_normal_", + "glorot_normal_", + "constant_", + "ones_", + "zeros_", + "xavier_uniform_", + "xavier_normal_", + "kaiming_uniform_", + "kaiming_normal_", + "linear_init_", + "conv_init_", +] + + +def _no_grad_uniform_(tensor, a, b): + with paddle.no_grad(): + tensor.set_value( + paddle.uniform(shape=tensor.shape, dtype=tensor.dtype, min=a, max=b) + ) + return tensor + + +def _no_grad_normal_(tensor, mean=0.0, std=1.0): + with paddle.no_grad(): + tensor.set_value(paddle.normal(mean=mean, std=std, shape=tensor.shape)) + return tensor + + +def _no_grad_trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 + + if (mean < a - 2 * std) or (mean > b + 2 * std): + logger.warning( + f"mean({mean}) is more than 2 std({std}) from [a, b]([{a}, {b}]) in _no_grad_trunc_normal_. " + "The distribution of values may be incorrect." + ) + with paddle.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + _tensor = paddle.uniform( + shape=tensor.shape, dtype=tensor.dtype, min=2 * l - 1, max=2 * u - 1 + ) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + _tensor.erfinv_() + + # Transform to proper mean, std + _tensor = paddle.multiply( + _tensor, paddle.to_tensor(std * math.sqrt(2.0), tensor.dtype) + ) + _tensor = paddle.add(_tensor, paddle.to_tensor(mean, tensor.dtype)) + + # Clamp to ensure it"s in the proper range + _tensor = paddle.clip(_tensor, min=a, max=b) + tensor.set_value(_tensor) + return tensor + + +def _no_grad_fill_(tensor, value=0.0): + with paddle.no_grad(): + tensor.set_value(paddle.full_like(tensor, value, dtype=tensor.dtype)) + return tensor + + +def uniform_(tensor: paddle.Tensor, a: float, b: float) -> paddle.Tensor: + """Modify tensor inplace using uniform_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float): Min value. + b (float): Max value. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.uniform_(param, -1, 1) + """ + return _no_grad_uniform_(tensor, a, b) + + +def normal_( + tensor: paddle.Tensor, mean: float = 0.0, std: float = 1.0 +) -> paddle.Tensor: + """Modify tensor inplace using normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + mean (float, optional): Mean value. Defaults to 0.0. + std (float, optional): Std value. Defaults to 1.0. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.normal_(param, 0, 1) + """ + return _no_grad_normal_(tensor, mean, std) + + +def trunc_normal_( + tensor: paddle.Tensor, + mean: float = 0.0, + std: float = 1.0, + a: float = -2.0, + b: float = 2.0, +) -> paddle.Tensor: + """Modify tensor inplace using trunc_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + mean (float, optional): The mean of the normal distribution. Defaults to 0.0. + std (float, optional): The standard deviation of the normal distribution. Defaults to 1.0. + a (float, optional): The minimum cutoff value. Defaults to -2.0. + b (float, optional): The maximum cutoff value. Defaults to 2.0. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.trunc_normal_(param, 0.0, 1.0) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def constant_(tensor: paddle.Tensor, value: float = 0.0) -> paddle.Tensor: + """Modify tensor inplace using constant_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + value (float, optional): Value to fill tensor. Defaults to 0.0. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.constant_(param, 2) + """ + return _no_grad_fill_(tensor, value) + + +def ones_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using ones_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.ones_(param) + """ + return _no_grad_fill_(tensor, 1) + + +def zeros_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using zeros_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.zeros_(param) + """ + return _no_grad_fill_(tensor, 0) + + +def _calculate_fan_in_and_fan_out(tensor, reverse=False): + """ + Calculate (fan_in, _fan_out) for tensor. + + Args: + tensor (paddle.Tensor): paddle.Tensor. + reverse (bool): Tensor data format order, False by default as [fout, fin, ...]. + e.g. : conv.weight [cout, cin, kh, kw] is False; linear.weight [cin, cout] + is True. + + Return: + Tuple[float, float]: (fan_in, fan_out). + """ + if tensor.ndim < 2: + raise ValueError( + f"tensor.ndim should be no less than 2, but got {tensor.ndim}." + ) + + if reverse: + num_input_fmaps, num_output_fmaps = tensor.shape[0], tensor.shape[1] + else: + num_input_fmaps, num_output_fmaps = tensor.shape[1], tensor.shape[0] + + receptive_field_size = 1 + if tensor.ndim > 2: + receptive_field_size = np.prod(tensor.shape[2:]) + + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def xavier_uniform_( + tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False +) -> paddle.Tensor: + """Modify tensor inplace using xavier_uniform_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + gain (float, optional): Hyperparameter. Defaults to 1.0. + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.xavier_uniform_(param) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def xavier_normal_( + tensor: paddle.Tensor, gain: float = 1.0, reverse: bool = False +) -> paddle.Tensor: + """Modify tensor inplace using xavier_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + gain (float, optional): Hyperparameter. Defaults to 1.0. + reverse (bool, optional): Tensor data format order, False by + default as [fout, fin, ...]. Defaults to False. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.xavier_normal_(param) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse=reverse) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + return _no_grad_normal_(tensor, 0, std) + + +# reference: https://pytorch.org/docs/stable/_modules/torch/nn/init.html +def _calculate_correct_fan(tensor, mode, reverse=False): + mode = mode.lower() + valid_modes = ["fan_in", "fan_out"] + if mode not in valid_modes: + raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor, reverse) + + return fan_in if mode == "fan_in" else fan_out + + +def _calculate_gain(nonlinearity, param=None): + linear_fns = [ + "linear", + "conv1d", + "conv2d", + "conv3d", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + ] + if nonlinearity in linear_fns or nonlinearity == "sigmoid": + return 1 + elif nonlinearity == "tanh": + return 5.0 / 3 + elif nonlinearity == "relu": + return math.sqrt(2.0) + elif nonlinearity == "leaky_relu": + if param is None: + negative_slope = 0.01 + elif ( + not isinstance(param, bool) + and isinstance(param, int) + or isinstance(param, float) + ): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError(f"negative_slope {param} not a valid number") + return math.sqrt(2.0 / (1 + negative_slope**2)) + elif nonlinearity == "selu": + return 3.0 / 4 + else: + raise ValueError(f"Unsupported nonlinearity {nonlinearity}") + + +def kaiming_uniform_( + tensor: paddle.Tensor, + a: float = 0, + mode: Literal["fan_in", "fan_out"] = "fan_in", + nonlinearity: str = "leaky_relu", + reverse: bool = False, +) -> paddle.Tensor: + """Modify tensor inplace using kaiming_uniform method. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float, optional): The negative slope of the rectifier used after this layer. + Defaults to 0. + mode (Literal["fan_in", "fan_out"], optional): + ["fan_in", "fan_out"]. Defaults to "fan_in". + nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". + reverse (bool, optional): Tensor data format order, False by default as + [fout, fin, ...].. Defaults to False. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.kaiming_uniform_(param) + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + k = math.sqrt(3.0) * std + return _no_grad_uniform_(tensor, -k, k) + + +def kaiming_normal_( + tensor: paddle.Tensor, + a: float = 0, + mode: Literal["fan_in", "fan_out"] = "fan_in", + nonlinearity: str = "leaky_relu", + reverse: bool = False, +) -> paddle.Tensor: + """Modify tensor inplace using kaiming_normal_. + + Args: + tensor (paddle.Tensor): Paddle Tensor. + a (float, optional): The negative slope of the rectifier used after this layer. + Defaults to 0. + mode (Literal["fan_in", "fan_out"], optional): Either + 'fan_in' (default) or 'fan_out'. Defaults to "fan_in". + nonlinearity (str, optional): Nonlinearity method name. Defaults to "leaky_relu". + reverse (bool, optional): Tensor data format order. Defaults to False. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.kaiming_normal_(param) + """ + fan = _calculate_correct_fan(tensor, mode, reverse) + gain = _calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + return _no_grad_normal_(tensor, 0, std) + + +def linear_init_(module: nn.Layer) -> None: + """Initialize module's weight and bias as it is a linear layer. + + Args: + module (nn.Layer): Linear Layer to be initialized. + + Examples: + >>> import paddle + >>> import ppsci + >>> layer = paddle.nn.Linear(128, 256) + >>> ppsci.utils.initializer.linear_init_(layer) + """ + kaiming_uniform_(module.weight, a=math.sqrt(5)) + if module.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=True) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + uniform_(module.bias, -bound, bound) + + +def conv_init_(module: nn.Layer) -> None: + """Initialize module's weight and bias as it is a conv layer. + + Args: + module (nn.Layer): Convolution Layer to be initialized. + + Examples: + >>> import paddle + >>> import ppsci + >>> layer = paddle.nn.Conv2D(4, 16, 2) + >>> ppsci.utils.initializer.conv_init_(layer) + """ + kaiming_uniform_(module.weight, a=math.sqrt(5)) + if module.bias is not None: + fan_in, _ = _calculate_fan_in_and_fan_out(module.weight, reverse=False) + if fan_in != 0: + bound = 1 / math.sqrt(fan_in) + uniform_(module.bias, -bound, bound) + + +def glorot_normal_(tensor: paddle.Tensor) -> paddle.Tensor: + """Modify tensor inplace using jax-style glorot_normal. + + Args: + tensor (paddle.Tensor): Paddle Tensor/Paramter. + + Returns: + paddle.Tensor: Initialized tensor. + + Examples: + >>> import paddle + >>> import ppsci + >>> param = paddle.empty((128, 256), "float32") + >>> param = ppsci.utils.initializer.glorot_normal_(param) + """ + assert ( + tensor.ndim == 2 + ), f"glorot_normal_ only support 2D tensor now, but got ndim={tensor.ndim}" + fin, fout = tensor.shape + var = 2.0 / (fin + fout) + stddev = math.sqrt(var) * 0.87962566103423978 + trunc_normal_(tensor) + tensor.set_value(tensor * stddev) + return tensor +>>>>>>> Stashed changes diff --git a/ppsci/utils/reader.py b/ppsci/utils/reader.py index ef0fb8f191..e0d03852ee 100644 --- a/ppsci/utils/reader.py +++ b/ppsci/utils/reader.py @@ -1,266 +1,266 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import collections -import csv -import pickle -from typing import Dict -from typing import Optional -from typing import Tuple - -import meshio -import numpy as np -import paddle -import scipy.io as sio - -__all__ = [ - "load_csv_file", - "load_mat_file", - "load_npz_file", - "load_vtk_file", - "load_vtk_with_time_file", - "load_dat_file", -] - - -def load_csv_file( - file_path: str, - keys: Tuple[str, ...], - alias_dict: Optional[Dict[str, str]] = None, - delimiter: str = ",", - encoding: str = "utf-8", -) -> Dict[str, np.ndarray]: - """Load *.csv file and fetch data as given keys. - - Args: - file_path (str): CSV file path. - keys (Tuple[str, ...]): Required fetching keys. - alias_dict (Optional[Dict[str, str]]): Alias for keys, - i.e. {inner_key: outer_key}. Defaults to None. - encoding (str, optional): Encoding code when open file. Defaults to "utf-8". - - Returns: - Dict[str, np.ndarray]: Loaded data in dict. - """ - if alias_dict is None: - alias_dict = {} - - try: - # read all data from csv file - with open(file_path, "r", encoding=encoding) as csv_file: - reader = csv.DictReader(csv_file, delimiter=delimiter) - raw_data = collections.defaultdict(list) - for _, line_dict in enumerate(reader): - for key, value in line_dict.items(): - raw_data[key].append(value) - except FileNotFoundError as e: - raise e - - # convert to numpy array - data_dict = {} - for key in keys: - fetch_key = alias_dict[key] if key in alias_dict else key - if fetch_key not in raw_data: - raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") - data_dict[key] = np.asarray(raw_data[fetch_key]) - if not np.issubdtype(data_dict[key].dtype, np.integer): - data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) - data_dict[key] = data_dict[key].reshape([-1, 1]) - - return data_dict - - -def load_mat_file( - file_path: str, keys: Tuple[str, ...], alias_dict: Optional[Dict[str, str]] = None -) -> Dict[str, np.ndarray]: - """Load *.mat file and fetch data as given keys. - - Args: - file_path (str): Mat file path. - keys (Tuple[str, ...]): Required fetching keys. - alias_dict (Optional[Dict[str, str]]): Alias for keys, - i.e. {original_key: original_key}. Defaults to None. - - Returns: - Dict[str, np.ndarray]: Loaded data in dict. - """ - - if alias_dict is None: - alias_dict = {} - - try: - # read all data from mat file - raw_data = sio.loadmat(file_path) - except FileNotFoundError as e: - raise e - - # convert to numpy array - data_dict = {} - for key in keys: - fetch_key = alias_dict[key] if key in alias_dict else key - if fetch_key not in raw_data: - raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") - data_dict[key] = np.asarray(raw_data[fetch_key]) - if not np.issubdtype(data_dict[key].dtype, np.integer): - data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) - data_dict[key] = data_dict[key].reshape([-1, 1]) - - return data_dict - - -def load_npz_file( - file_path: str, keys: Tuple[str, ...], alias_dict: Optional[Dict[str, str]] = None -) -> Dict[str, np.ndarray]: - """Load *.npz file and fetch data as given keys. - - Args: - file_path (str): Npz file path. - keys (Tuple[str, ...]): Required fetching keys. - alias_dict (Optional[Dict[str, str]]): Alias for keys, - i.e. {original_key: original_key}. Defaults to None. - - Returns: - Dict[str, np.ndarray]: Loaded data in dict. - """ - - if alias_dict is None: - alias_dict = {} - - try: - # read all data from npz file - raw_data = np.load(file_path, allow_pickle=True) - except FileNotFoundError as e: - raise e - - # convert to numpy array - data_dict = {} - for key in keys: - fetch_key = alias_dict[key] if key in alias_dict else key - if fetch_key not in raw_data: - raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") - data_dict[key] = np.asarray(raw_data[fetch_key]) - if data_dict[key].dtype in (np.float16, np.float32, np.float64): - data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) - - return data_dict - - -def load_vtk_file( - filename_without_timeid: str, - time_step: float, - time_index: Tuple[int, ...], - input_keys: Tuple[str, ...], - label_keys: Optional[Tuple[str, ...]], -) -> Dict[str, np.ndarray]: - """Load coordinates and attached label from the *.vtu file. - - Args: - filename_without_timeid (str): File name without time id. - time_step (float): Physical time step. - time_index (Tuple[int, ...]): Physical time indexes. - input_keys (Tuple[str, ...]): Input coordinates name keys. - label_keys (Optional[Tuple[str, ...]]): Input label name keys. - - Returns: - Dict[str, np.ndarray]: Input coordinates dict, label coordinates dict - """ - input_dict = {var: [] for var in input_keys} - label_dict = {var: [] for var in label_keys} - for index in time_index: - file = filename_without_timeid + f"{index}.vtu" - mesh = meshio.read(file) - n = mesh.points.shape[0] - i = 0 - for key in input_dict: - if key == "t": - input_dict[key].append( - np.full((n, 1), index * time_step, paddle.get_default_dtype()) - ) - else: - input_dict[key].append( - mesh.points[:, i].reshape(n, 1).astype(paddle.get_default_dtype()) - ) - i += 1 - for i, key in enumerate(label_dict): - label_dict[key].append( - np.array(mesh.point_data[key], paddle.get_default_dtype()) - ) - for key in input_dict: - input_dict[key] = np.concatenate(input_dict[key]) - for key in label_dict: - label_dict[key] = np.concatenate(label_dict[key]) - - return input_dict, label_dict - - -def load_vtk_with_time_file(file: str) -> Dict[str, np.ndarray]: - """Temporary interface for points cloud, will be banished sooner. - - Args: - file (str): Input file name. - - Returns: - Dict[str, np.ndarray]: Input coordinates dict. - """ - mesh = meshio.read(file) - n = mesh.points.shape[0] - t = np.array(mesh.point_data["time"]) - x = mesh.points[:, 0].reshape(n, 1) - y = mesh.points[:, 1].reshape(n, 1) - z = mesh.points[:, 2].reshape(n, 1) - input_dict = {"t": t, "x": x, "y": y, "z": z} - return input_dict - - -def load_dat_file( - file_path: str, - keys: Tuple[str, ...] = None, - alias_dict: Optional[Dict[str, str]] = None, -) -> Dict[str, np.ndarray]: - """Load *.dat file and fetch data as given keys. - - Args: - file_path (str): Dat file path. - keys (Tuple[str, ...]): Required fetching keys. - alias_dict (Optional[Dict[str, str]]): Alias for keys, - i.e. {original_key: original_key}. Defaults to None. - - Returns: - Dict[str, np.ndarray]: Loaded data in dict. - """ - - if alias_dict is None: - alias_dict = {} - - try: - # read all data from .dat file - raw_data = pickle.load(open(file_path, "rb")) - except FileNotFoundError as e: - raise e - - # convert to numpy array - data_dict = {} - if keys is None: - keys = raw_data.keys() - for key in keys: - fetch_key = alias_dict[key] if key in alias_dict else key - if fetch_key not in raw_data: - raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") - data_dict[key] = np.asarray(raw_data[fetch_key]) - if data_dict[key].dtype in (np.float16, np.float32, np.float64): - data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) - - return data_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import collections +import csv +import pickle +from typing import Dict +from typing import Optional +from typing import Tuple + +import meshio +import numpy as np +import paddle +import scipy.io as sio + +__all__ = [ + "load_csv_file", + "load_mat_file", + "load_npz_file", + "load_vtk_file", + "load_vtk_with_time_file", + "load_dat_file", +] + + +def load_csv_file( + file_path: str, + keys: Tuple[str, ...], + alias_dict: Optional[Dict[str, str]] = None, + delimiter: str = ",", + encoding: str = "utf-8", +) -> Dict[str, np.ndarray]: + """Load *.csv file and fetch data as given keys. + + Args: + file_path (str): CSV file path. + keys (Tuple[str, ...]): Required fetching keys. + alias_dict (Optional[Dict[str, str]]): Alias for keys, + i.e. {inner_key: outer_key}. Defaults to None. + encoding (str, optional): Encoding code when open file. Defaults to "utf-8". + + Returns: + Dict[str, np.ndarray]: Loaded data in dict. + """ + if alias_dict is None: + alias_dict = {} + + try: + # read all data from csv file + with open(file_path, "r", encoding=encoding) as csv_file: + reader = csv.DictReader(csv_file, delimiter=delimiter) + raw_data = collections.defaultdict(list) + for _, line_dict in enumerate(reader): + for key, value in line_dict.items(): + raw_data[key].append(value) + except FileNotFoundError as e: + raise e + + # convert to numpy array + data_dict = {} + for key in keys: + fetch_key = alias_dict[key] if key in alias_dict else key + if fetch_key not in raw_data: + raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") + data_dict[key] = np.asarray(raw_data[fetch_key]) + if not np.issubdtype(data_dict[key].dtype, np.integer): + data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) + data_dict[key] = data_dict[key].reshape([-1, 1]) + + return data_dict + + +def load_mat_file( + file_path: str, keys: Tuple[str, ...], alias_dict: Optional[Dict[str, str]] = None +) -> Dict[str, np.ndarray]: + """Load *.mat file and fetch data as given keys. + + Args: + file_path (str): Mat file path. + keys (Tuple[str, ...]): Required fetching keys. + alias_dict (Optional[Dict[str, str]]): Alias for keys, + i.e. {original_key: original_key}. Defaults to None. + + Returns: + Dict[str, np.ndarray]: Loaded data in dict. + """ + + if alias_dict is None: + alias_dict = {} + + try: + # read all data from mat file + raw_data = sio.loadmat(file_path) + except FileNotFoundError as e: + raise e + + # convert to numpy array + data_dict = {} + for key in keys: + fetch_key = alias_dict[key] if key in alias_dict else key + if fetch_key not in raw_data: + raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") + data_dict[key] = np.asarray(raw_data[fetch_key]) + if not np.issubdtype(data_dict[key].dtype, np.integer): + data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) + data_dict[key] = data_dict[key].reshape([-1, 1]) + + return data_dict + + +def load_npz_file( + file_path: str, keys: Tuple[str, ...], alias_dict: Optional[Dict[str, str]] = None +) -> Dict[str, np.ndarray]: + """Load *.npz file and fetch data as given keys. + + Args: + file_path (str): Npz file path. + keys (Tuple[str, ...]): Required fetching keys. + alias_dict (Optional[Dict[str, str]]): Alias for keys, + i.e. {original_key: original_key}. Defaults to None. + + Returns: + Dict[str, np.ndarray]: Loaded data in dict. + """ + + if alias_dict is None: + alias_dict = {} + + try: + # read all data from npz file + raw_data = np.load(file_path, allow_pickle=True) + except FileNotFoundError as e: + raise e + + # convert to numpy array + data_dict = {} + for key in keys: + fetch_key = alias_dict[key] if key in alias_dict else key + if fetch_key not in raw_data: + raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") + data_dict[key] = np.asarray(raw_data[fetch_key]) + if data_dict[key].dtype in (np.float16, np.float32, np.float64): + data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) + + return data_dict + + +def load_vtk_file( + filename_without_timeid: str, + time_step: float, + time_index: Tuple[int, ...], + input_keys: Tuple[str, ...], + label_keys: Optional[Tuple[str, ...]], +) -> Dict[str, np.ndarray]: + """Load coordinates and attached label from the *.vtu file. + + Args: + filename_without_timeid (str): File name without time id. + time_step (float): Physical time step. + time_index (Tuple[int, ...]): Physical time indexes. + input_keys (Tuple[str, ...]): Input coordinates name keys. + label_keys (Optional[Tuple[str, ...]]): Input label name keys. + + Returns: + Dict[str, np.ndarray]: Input coordinates dict, label coordinates dict + """ + input_dict = {var: [] for var in input_keys} + label_dict = {var: [] for var in label_keys} + for index in time_index: + file = filename_without_timeid + f"{index}.vtu" + mesh = meshio.read(file) + n = mesh.points.shape[0] + i = 0 + for key in input_dict: + if key == "t": + input_dict[key].append( + np.full((n, 1), index * time_step, paddle.get_default_dtype()) + ) + else: + input_dict[key].append( + mesh.points[:, i].reshape(n, 1).astype(paddle.get_default_dtype()) + ) + i += 1 + for i, key in enumerate(label_dict): + label_dict[key].append( + np.array(mesh.point_data[key], paddle.get_default_dtype()) + ) + for key in input_dict: + input_dict[key] = np.concatenate(input_dict[key]) + for key in label_dict: + label_dict[key] = np.concatenate(label_dict[key]) + + return input_dict, label_dict + + +def load_vtk_with_time_file(file: str) -> Dict[str, np.ndarray]: + """Temporary interface for points cloud, will be banished sooner. + + Args: + file (str): Input file name. + + Returns: + Dict[str, np.ndarray]: Input coordinates dict. + """ + mesh = meshio.read(file) + n = mesh.points.shape[0] + t = np.array(mesh.point_data["time"]) + x = mesh.points[:, 0].reshape(n, 1) + y = mesh.points[:, 1].reshape(n, 1) + z = mesh.points[:, 2].reshape(n, 1) + input_dict = {"t": t, "x": x, "y": y, "z": z} + return input_dict + + +def load_dat_file( + file_path: str, + keys: Tuple[str, ...] = None, + alias_dict: Optional[Dict[str, str]] = None, +) -> Dict[str, np.ndarray]: + """Load *.dat file and fetch data as given keys. + + Args: + file_path (str): Dat file path. + keys (Tuple[str, ...]): Required fetching keys. + alias_dict (Optional[Dict[str, str]]): Alias for keys, + i.e. {original_key: original_key}. Defaults to None. + + Returns: + Dict[str, np.ndarray]: Loaded data in dict. + """ + + if alias_dict is None: + alias_dict = {} + + try: + # read all data from .dat file + raw_data = pickle.load(open(file_path, "rb")) + except FileNotFoundError as e: + raise e + + # convert to numpy array + data_dict = {} + if keys is None: + keys = raw_data.keys() + for key in keys: + fetch_key = alias_dict[key] if key in alias_dict else key + if fetch_key not in raw_data: + raise KeyError(f"fetch_key({fetch_key}) do not exist in raw_data.") + data_dict[key] = np.asarray(raw_data[fetch_key]) + if data_dict[key].dtype in (np.float16, np.float32, np.float64): + data_dict[key] = data_dict[key].astype(paddle.get_default_dtype()) + + return data_dict diff --git a/ppsci/utils/save_load.py b/ppsci/utils/save_load.py index e3a1160137..ffb9be211b 100644 --- a/ppsci/utils/save_load.py +++ b/ppsci/utils/save_load.py @@ -1,290 +1,290 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -from typing import TYPE_CHECKING -from typing import Any -from typing import Dict -from typing import Optional - -import paddle - -from ppsci.utils import download -from ppsci.utils import logger - -if TYPE_CHECKING: - from paddle import amp - from paddle import nn - from paddle import optimizer - - from ppsci import equation - from ppsci.loss import mtl - from ppsci.utils import ema - - -__all__ = [ - "load_checkpoint", - "save_checkpoint", - "load_pretrain", -] - - -def _load_pretrain_from_path( - path: str, - model: nn.Layer, - equation: Optional[Dict[str, equation.PDE]] = None, -): - """Load pretrained model from given path. - - Args: - path (str): File path of pretrained model, i.e. `/path/to/model.pdparams`. - model (nn.Layer): Model with parameters. - equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. - """ - if not (os.path.isdir(path) or os.path.exists(f"{path}.pdparams")): - raise FileNotFoundError( - f"Pretrained model path {path}.pdparams does not exists." - ) - - param_state_dict = paddle.load(f"{path}.pdparams") - model.set_state_dict(param_state_dict) - logger.message(f"Finish loading pretrained model from: {path}.pdparams") - if equation is not None: - if not os.path.exists(f"{path}.pdeqn"): - num_learnable_params = sum( - [len(eq.learnable_parameters) for eq in equation.values()] - ) - if num_learnable_params > 0: - logger.warning( - f"There are a total of {num_learnable_params} learnable parameters" - f" in the equation, but {path}.pdeqn not found." - ) - else: - equation_dict = paddle.load(f"{path}.pdeqn") - for name, _equation in equation.items(): - _equation.set_state_dict(equation_dict[name]) - logger.message( - f"Finish loading pretrained equation parameters from: {path}.pdeqn" - ) - - -def load_pretrain( - model: nn.Layer, - path: str, - equation: Optional[Dict[str, equation.PDE]] = None, -): - """ - Load pretrained model from given path or url. - - Args: - model (nn.Layer): Model with parameters. - path (str): File path or url of pretrained model, i.e. `/path/to/model.pdparams` - or `http://xxx.com/model.pdparams`. - equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. - - Examples: - >>> import ppsci - >>> from ppsci.utils import save_load - >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 9, 50, "tanh") - >>> save_load.load_pretrain( - ... model=model, - ... path="path/to/pretrain_model") # doctest: +SKIP - """ - if path.startswith("http"): - # download from path(url) and get its' physical path - eqn_path = path.replace(".pdparams", ".pdeqn", 1) - path = download.get_weights_path_from_url(path) - - # automatically download additional equation weights if avaiable - def is_url_accessible(url: str): - try: - import requests - - response = requests.head(url, timeout=5) - return response.status_code == requests.codes.ok - except requests.RequestException: - return False - except Exception: - return False - - if is_url_accessible(eqn_path): - download.get_weights_path_from_url(eqn_path) - - # remove ".pdparams" in suffix of path for convenient - if path.endswith(".pdparams"): - path = path[:-9] - _load_pretrain_from_path(path, model, equation) - - -def load_checkpoint( - path: str, - model: nn.Layer, - optimizer: optimizer.Optimizer, - grad_scaler: Optional[amp.GradScaler] = None, - equation: Optional[Dict[str, equation.PDE]] = None, - ema_model: Optional[ema.AveragedModel] = None, - aggregator: Optional[mtl.LossAggregator] = None, -) -> Dict[str, Any]: - """Load from checkpoint. - - Args: - path (str): Path for checkpoint. - model (nn.Layer): Model with parameters. - optimizer (optimizer.Optimizer): Optimizer for model. - grad_scaler (Optional[amp.GradScaler]): GradScaler for AMP. Defaults to None. - equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. - ema_model: Optional[ema.AveragedModel]: Average model. Defaults to None. - aggregator: Optional[mtl.LossAggregator]: Loss aggregator. Defaults to None. - - Returns: - Dict[str, Any]: Loaded metric information. - """ - if not os.path.exists(f"{path}.pdparams"): - raise FileNotFoundError(f"{path}.pdparams not exist.") - if not os.path.exists(f"{path}.pdopt"): - raise FileNotFoundError(f"{path}.pdopt not exist.") - if grad_scaler is not None and not os.path.exists(f"{path}.pdscaler"): - raise FileNotFoundError(f"{path}.scaler not exist.") - - # load state dict - param_dict = paddle.load(f"{path}.pdparams") - optim_dict = paddle.load(f"{path}.pdopt") - metric_dict = paddle.load(f"{path}.pdstates") - if grad_scaler is not None: - scaler_dict = paddle.load(f"{path}.pdscaler") - if equation is not None: - if not os.path.exists(f"{path}.pdeqn"): - logger.warning(f"{path}.pdeqn not found.") - equation_dict = None - else: - equation_dict = paddle.load(f"{path}.pdeqn") - - # set state dict - logger.message(f"* Loading model checkpoint from {path}.pdparams") - missing_keys, unexpected_keys = model.set_state_dict(param_dict) - if missing_keys: - logger.warning( - f"There are missing keys when loading checkpoint: {missing_keys}, " - "and corresponding parameters will be initialized by default." - ) - if unexpected_keys: - logger.warning( - f"There are redundant keys: {unexpected_keys}, " - "and corresponding weights will be ignored." - ) - - logger.message(f"* Loading optimizer checkpoint from {path}.pdopt") - optimizer.set_state_dict(optim_dict) - if grad_scaler is not None: - logger.message(f"* Loading grad scaler checkpoint from {path}.pdscaler") - grad_scaler.load_state_dict(scaler_dict) - if equation is not None and equation_dict is not None: - logger.message(f"* Loading equation checkpoint from {path}.pdeqn") - for name, _equation in equation.items(): - _equation.set_state_dict(equation_dict[name]) - - if ema_model: - logger.message(f"* Loading EMA checkpoint from {path}_ema.pdparams") - avg_param_dict = paddle.load(f"{path}_ema.pdparams") - ema_model.set_state_dict(avg_param_dict) - - if aggregator is not None and aggregator.should_persist: - logger.message(f"* Loading loss aggregator checkpoint from {path}.pdagg") - aggregator_dict = paddle.load(f"{path}.pdagg") - aggregator.set_state_dict(aggregator_dict) - - logger.message(f"Finish loading checkpoint from {path}") - return metric_dict - - -def save_checkpoint( - model: nn.Layer, - optimizer: Optional[optimizer.Optimizer], - metric: Dict[str, float], - grad_scaler: Optional[amp.GradScaler] = None, - output_dir: Optional[str] = None, - prefix: str = "model", - equation: Optional[Dict[str, equation.PDE]] = None, - print_log: bool = True, - ema_model: Optional[ema.AveragedModel] = None, - aggregator: Optional[mtl.LossAggregator] = None, -): - """ - Save checkpoint, including model params, optimizer params, metric information. - - Args: - model (nn.Layer): Model with parameters. - optimizer (Optional[optimizer.Optimizer]): Optimizer for model. - metric (Dict[str, float]): Metric information, such as {"RMSE": 0.1, "MAE": 0.2}. - grad_scaler (Optional[amp.GradScaler]): GradScaler for AMP. Defaults to None. - output_dir (Optional[str]): Directory for checkpoint storage. - prefix (str, optional): Prefix for storage. Defaults to "model". - equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. - print_log (bool, optional): Whether print saving log information, mainly for - keeping log tidy without duplicate 'Finish saving checkpoint ...' log strings. - Defaults to True. - ema_model: Optional[ema.AveragedModel]: Average model. Defaults to None. - aggregator: Optional[mtl.LossAggregator]: Loss aggregator. Defaults to None. - - Examples: - >>> import ppsci - >>> import paddle - >>> from ppsci.utils import save_load - >>> model = ppsci.arch.MLP(("x", "y", "z"), ("u", "v", "w"), 5, 64, "tanh") - >>> optimizer = ppsci.optimizer.Adam(0.001)(model) - >>> save_load.save_checkpoint(model, optimizer, {"RMSE": 0.1}, output_dir="path/to/output/dir") # doctest: +SKIP - """ - if paddle.distributed.get_rank() != 0: - return - - if output_dir is None: - logger.warning("output_dir is None, skip save_checkpoint") - return - - ckpt_dir = os.path.join(output_dir, "checkpoints") - ckpt_path = os.path.join(ckpt_dir, prefix) - os.makedirs(ckpt_dir, exist_ok=True) - - paddle.save(model.state_dict(), f"{ckpt_path}.pdparams") - if optimizer: - paddle.save(optimizer.state_dict(), f"{ckpt_path}.pdopt") - paddle.save(metric, f"{ckpt_path}.pdstates") - if grad_scaler is not None: - paddle.save(grad_scaler.state_dict(), f"{ckpt_path}.pdscaler") - if equation is not None: - num_learnable_params = sum( - [len(eq.learnable_parameters) for eq in equation.values()] - ) - if num_learnable_params > 0: - paddle.save( - {key: eq.state_dict() for key, eq in equation.items()}, - f"{ckpt_path}.pdeqn", - ) - - if ema_model: - paddle.save(ema_model.state_dict(), f"{ckpt_path}_ema.pdparams") - - if aggregator and aggregator.should_persist: - paddle.save(aggregator.state_dict(), f"{ckpt_path}.pdagg") - - if print_log: - log_str = f"Finish saving checkpoint to: {ckpt_path}" - if prefix == "latest": - log_str += ( - "(latest checkpoint will be saved every epoch as expected, " - "but this log will be printed only once for tidy logging)" - ) - logger.message(log_str) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict +from typing import Optional + +import paddle + +from ppsci.utils import download +from ppsci.utils import logger + +if TYPE_CHECKING: + from paddle import amp + from paddle import nn + from paddle import optimizer + + from ppsci import equation + from ppsci.loss import mtl + from ppsci.utils import ema + + +__all__ = [ + "load_checkpoint", + "save_checkpoint", + "load_pretrain", +] + + +def _load_pretrain_from_path( + path: str, + model: nn.Layer, + equation: Optional[Dict[str, equation.PDE]] = None, +): + """Load pretrained model from given path. + + Args: + path (str): File path of pretrained model, i.e. `/path/to/model.pdparams`. + model (nn.Layer): Model with parameters. + equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. + """ + if not (os.path.isdir(path) or os.path.exists(f"{path}.pdparams")): + raise FileNotFoundError( + f"Pretrained model path {path}.pdparams does not exists." + ) + + param_state_dict = paddle.load(f"{path}.pdparams") + model.set_state_dict(param_state_dict) + logger.message(f"Finish loading pretrained model from: {path}.pdparams") + if equation is not None: + if not os.path.exists(f"{path}.pdeqn"): + num_learnable_params = sum( + [len(eq.learnable_parameters) for eq in equation.values()] + ) + if num_learnable_params > 0: + logger.warning( + f"There are a total of {num_learnable_params} learnable parameters" + f" in the equation, but {path}.pdeqn not found." + ) + else: + equation_dict = paddle.load(f"{path}.pdeqn") + for name, _equation in equation.items(): + _equation.set_state_dict(equation_dict[name]) + logger.message( + f"Finish loading pretrained equation parameters from: {path}.pdeqn" + ) + + +def load_pretrain( + model: nn.Layer, + path: str, + equation: Optional[Dict[str, equation.PDE]] = None, +): + """ + Load pretrained model from given path or url. + + Args: + model (nn.Layer): Model with parameters. + path (str): File path or url of pretrained model, i.e. `/path/to/model.pdparams` + or `http://xxx.com/model.pdparams`. + equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. + + Examples: + >>> import ppsci + >>> from ppsci.utils import save_load + >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 9, 50, "tanh") + >>> save_load.load_pretrain( + ... model=model, + ... path="path/to/pretrain_model") # doctest: +SKIP + """ + if path.startswith("http"): + # download from path(url) and get its' physical path + eqn_path = path.replace(".pdparams", ".pdeqn", 1) + path = download.get_weights_path_from_url(path) + + # automatically download additional equation weights if avaiable + def is_url_accessible(url: str): + try: + import requests + + response = requests.head(url, timeout=5) + return response.status_code == requests.codes.ok + except requests.RequestException: + return False + except Exception: + return False + + if is_url_accessible(eqn_path): + download.get_weights_path_from_url(eqn_path) + + # remove ".pdparams" in suffix of path for convenient + if path.endswith(".pdparams"): + path = path[:-9] + _load_pretrain_from_path(path, model, equation) + + +def load_checkpoint( + path: str, + model: nn.Layer, + optimizer: optimizer.Optimizer, + grad_scaler: Optional[amp.GradScaler] = None, + equation: Optional[Dict[str, equation.PDE]] = None, + ema_model: Optional[ema.AveragedModel] = None, + aggregator: Optional[mtl.LossAggregator] = None, +) -> Dict[str, Any]: + """Load from checkpoint. + + Args: + path (str): Path for checkpoint. + model (nn.Layer): Model with parameters. + optimizer (optimizer.Optimizer): Optimizer for model. + grad_scaler (Optional[amp.GradScaler]): GradScaler for AMP. Defaults to None. + equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. + ema_model: Optional[ema.AveragedModel]: Average model. Defaults to None. + aggregator: Optional[mtl.LossAggregator]: Loss aggregator. Defaults to None. + + Returns: + Dict[str, Any]: Loaded metric information. + """ + if not os.path.exists(f"{path}.pdparams"): + raise FileNotFoundError(f"{path}.pdparams not exist.") + if not os.path.exists(f"{path}.pdopt"): + raise FileNotFoundError(f"{path}.pdopt not exist.") + if grad_scaler is not None and not os.path.exists(f"{path}.pdscaler"): + raise FileNotFoundError(f"{path}.scaler not exist.") + + # load state dict + param_dict = paddle.load(f"{path}.pdparams") + optim_dict = paddle.load(f"{path}.pdopt") + metric_dict = paddle.load(f"{path}.pdstates") + if grad_scaler is not None: + scaler_dict = paddle.load(f"{path}.pdscaler") + if equation is not None: + if not os.path.exists(f"{path}.pdeqn"): + logger.warning(f"{path}.pdeqn not found.") + equation_dict = None + else: + equation_dict = paddle.load(f"{path}.pdeqn") + + # set state dict + logger.message(f"* Loading model checkpoint from {path}.pdparams") + missing_keys, unexpected_keys = model.set_state_dict(param_dict) + if missing_keys: + logger.warning( + f"There are missing keys when loading checkpoint: {missing_keys}, " + "and corresponding parameters will be initialized by default." + ) + if unexpected_keys: + logger.warning( + f"There are redundant keys: {unexpected_keys}, " + "and corresponding weights will be ignored." + ) + + logger.message(f"* Loading optimizer checkpoint from {path}.pdopt") + optimizer.set_state_dict(optim_dict) + if grad_scaler is not None: + logger.message(f"* Loading grad scaler checkpoint from {path}.pdscaler") + grad_scaler.load_state_dict(scaler_dict) + if equation is not None and equation_dict is not None: + logger.message(f"* Loading equation checkpoint from {path}.pdeqn") + for name, _equation in equation.items(): + _equation.set_state_dict(equation_dict[name]) + + if ema_model: + logger.message(f"* Loading EMA checkpoint from {path}_ema.pdparams") + avg_param_dict = paddle.load(f"{path}_ema.pdparams") + ema_model.set_state_dict(avg_param_dict) + + if aggregator is not None and aggregator.should_persist: + logger.message(f"* Loading loss aggregator checkpoint from {path}.pdagg") + aggregator_dict = paddle.load(f"{path}.pdagg") + aggregator.set_state_dict(aggregator_dict) + + logger.message(f"Finish loading checkpoint from {path}") + return metric_dict + + +def save_checkpoint( + model: nn.Layer, + optimizer: Optional[optimizer.Optimizer], + metric: Dict[str, float], + grad_scaler: Optional[amp.GradScaler] = None, + output_dir: Optional[str] = None, + prefix: str = "model", + equation: Optional[Dict[str, equation.PDE]] = None, + print_log: bool = True, + ema_model: Optional[ema.AveragedModel] = None, + aggregator: Optional[mtl.LossAggregator] = None, +): + """ + Save checkpoint, including model params, optimizer params, metric information. + + Args: + model (nn.Layer): Model with parameters. + optimizer (Optional[optimizer.Optimizer]): Optimizer for model. + metric (Dict[str, float]): Metric information, such as {"RMSE": 0.1, "MAE": 0.2}. + grad_scaler (Optional[amp.GradScaler]): GradScaler for AMP. Defaults to None. + output_dir (Optional[str]): Directory for checkpoint storage. + prefix (str, optional): Prefix for storage. Defaults to "model". + equation (Optional[Dict[str, equation.PDE]]): Equations. Defaults to None. + print_log (bool, optional): Whether print saving log information, mainly for + keeping log tidy without duplicate 'Finish saving checkpoint ...' log strings. + Defaults to True. + ema_model: Optional[ema.AveragedModel]: Average model. Defaults to None. + aggregator: Optional[mtl.LossAggregator]: Loss aggregator. Defaults to None. + + Examples: + >>> import ppsci + >>> import paddle + >>> from ppsci.utils import save_load + >>> model = ppsci.arch.MLP(("x", "y", "z"), ("u", "v", "w"), 5, 64, "tanh") + >>> optimizer = ppsci.optimizer.Adam(0.001)(model) + >>> save_load.save_checkpoint(model, optimizer, {"RMSE": 0.1}, output_dir="path/to/output/dir") # doctest: +SKIP + """ + if paddle.distributed.get_rank() != 0: + return + + if output_dir is None: + logger.warning("output_dir is None, skip save_checkpoint") + return + + ckpt_dir = os.path.join(output_dir, "checkpoints") + ckpt_path = os.path.join(ckpt_dir, prefix) + os.makedirs(ckpt_dir, exist_ok=True) + + paddle.save(model.state_dict(), f"{ckpt_path}.pdparams") + if optimizer: + paddle.save(optimizer.state_dict(), f"{ckpt_path}.pdopt") + paddle.save(metric, f"{ckpt_path}.pdstates") + if grad_scaler is not None: + paddle.save(grad_scaler.state_dict(), f"{ckpt_path}.pdscaler") + if equation is not None: + num_learnable_params = sum( + [len(eq.learnable_parameters) for eq in equation.values()] + ) + if num_learnable_params > 0: + paddle.save( + {key: eq.state_dict() for key, eq in equation.items()}, + f"{ckpt_path}.pdeqn", + ) + + if ema_model: + paddle.save(ema_model.state_dict(), f"{ckpt_path}_ema.pdparams") + + if aggregator and aggregator.should_persist: + paddle.save(aggregator.state_dict(), f"{ckpt_path}.pdagg") + + if print_log: + log_str = f"Finish saving checkpoint to: {ckpt_path}" + if prefix == "latest": + log_str += ( + "(latest checkpoint will be saved every epoch as expected, " + "but this log will be printed only once for tidy logging)" + ) + logger.message(log_str) diff --git a/ppsci/utils/symbolic.py b/ppsci/utils/symbolic.py index 3acea3652f..20c38d66f2 100644 --- a/ppsci/utils/symbolic.py +++ b/ppsci/utils/symbolic.py @@ -1,981 +1,981 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Sympy to python function conversion module -""" - -from __future__ import annotations - -import functools -import os -from typing import Dict -from typing import List -from typing import Optional -from typing import Sequence -from typing import Tuple -from typing import Union - -import paddle -import sympy as sp -from paddle import nn -from typing_extensions import TypeAlias - -from ppsci import arch -from ppsci import equation -from ppsci.autodiff import hessian -from ppsci.autodiff import jacobian -from ppsci.utils import logger - -__all__ = [ - "lambdify", - "_cvt_to_key", -] - - -DATA_DICT: TypeAlias = Dict[str, paddle.Tensor] - -SYMPY_BUILTIN_FUNC: TypeAlias = Union[ - sp.sin, - sp.sinh, - sp.asin, - sp.cos, - sp.acos, - sp.cosh, - sp.tan, - sp.atan, - sp.atan2, - sp.acosh, - sp.asinh, - sp.tanh, - sp.atanh, - sp.erf, - sp.loggamma, - sp.exp, - sp.Pow, - sp.log, - sp.Max, - sp.Min, - sp.Abs, - sp.Heaviside, - sp.sign, - sp.ceiling, - sp.floor, - sp.Add, - sp.Mul, -] - -SYMPY_TO_PADDLE = { - sp.sin: paddle.sin, - sp.sinh: paddle.sinh, - sp.asin: paddle.asin, - sp.cos: paddle.cos, - sp.acos: paddle.acos, - sp.cosh: paddle.cosh, - sp.tan: paddle.tan, - sp.atan: paddle.atan, - sp.atan2: paddle.atan2, - sp.acosh: paddle.acosh, - sp.asinh: paddle.asinh, - sp.tanh: paddle.tanh, - sp.atanh: paddle.atanh, - sp.erf: paddle.erf, - sp.loggamma: paddle.lgamma, - sp.exp: paddle.exp, - sp.Pow: paddle.pow, - sp.log: paddle.log, - sp.Max: paddle.maximum, - sp.Min: paddle.minimum, - sp.Abs: paddle.abs, - sp.Heaviside: paddle.heaviside, - sp.sign: paddle.sign, - sp.ceiling: paddle.ceil, - sp.floor: paddle.floor, - # NOTE: sp.Add and sp.Mul is not included here for un-alignment with paddle - # and are implemented manually in 'OperatorNode._add_operator_func' and - # 'OperatorNode._mul_operator_func' -} - - -def _cvt_to_key(expr: sp.Basic) -> str: - """Convert sympy expression to a string key, mainly as retrieval key in dict. - - Args: - expr (sp.Basic): Sympy expression. - - Returns: - str: Converted string key. - """ - if isinstance(expr, sp.Function) and str(expr.func) == equation.DETACH_FUNC_NAME: - return f"{_cvt_to_key(expr.args[0])}_{equation.DETACH_FUNC_NAME}" - - if isinstance(expr, (sp.Symbol, sp.core.function.UndefinedFunction, sp.Function)): - # use name of custom function(e.g. "f") instead of itself(e.g. "f(x, y)") - # for simplicity. - if hasattr(expr, "name"): - return expr.name - else: - return str(expr) - elif isinstance(expr, sp.Derivative): - # convert "Derivative(u(x,y),(x,2),(y,2))" to "u__x__x__y__y" - expr_str = expr.args[0].name - for symbol, order in expr.args[1:]: - expr_str += f"__{symbol}" * order - return expr_str - else: - return str(expr) - - -class Node(nn.Layer): - """The base class of the node in expression tree. - - Args: - expr (sp.Basic): Sympy expression. - """ - - def __init__(self, expr: sp.Basic): - super().__init__() - self.expr = expr - self.key = _cvt_to_key(self.expr) - - def forward(self, **kwargs): - raise NotImplementedError("Node.forward is not implemented") - - def __str__(self): - return ( - f"{self.__class__.__name__}(expr: {self.expr}, " - f"expr_type: {type(self.expr)})" - ) - - def __repr__(self): - return f"{self.__class__.__name__}(expr: {self.expr})" - - -class DetachNode(Node): - """Class for detach operation in converted expression tree. - - Args: - expr (sp.Basic): Sympy expression. - """ - - def __init__(self, expr: sp.Basic): - super().__init__(expr) - self.child = _cvt_to_key(self.expr.args[0]) - - def forward(self, data_dict: DATA_DICT): - if self.key in data_dict: - return data_dict - - data_dict[self.key] = data_dict[self.child].detach() - return data_dict - - -class OperatorNode(Node): - """Class for operator node in converted expression tree. - - Args: - expr (SYMPY_BUILTIN_FUNC): Sympy expression. - """ - - def __init__( - self, - expr: SYMPY_BUILTIN_FUNC, - ): - super().__init__(expr) - # preprocess children's key instead of processing at run-time in forward - # which can reduce considerable overhead of time for calling "_cvt_to_key" - self.childs = [_cvt_to_key(arg) for arg in self.expr.args] - - if self.expr.func == sp.Add: - self._apply_func = self._add_operator_func - elif self.expr.func == sp.Mul: - self._apply_func = self._mul_operator_func - elif self.expr.func == sp.Heaviside: - self._apply_func = self._heaviside_operator_func - self._auxiliary_func = SYMPY_TO_PADDLE[sp.Heaviside] - self._auxiliary_func = functools.partial( - self._auxiliary_func, y=paddle.zeros([]) - ) - elif self.expr.func == sp.Min: - self._apply_func = self._minimum_operator_func - elif self.expr.func == sp.Max: - self._apply_func = self._maximum_operator_func - else: - self._apply_func = self._vanilla_operator_func - self._auxiliary_func = SYMPY_TO_PADDLE[self.expr.func] - - def forward(self, data_dict: DATA_DICT): - # use cache - if self.key in data_dict: - return data_dict - - return self._apply_func(data_dict) - - def _add_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = data_dict[self.childs[0]] - for p in self.childs[1:]: - data_dict[self.key] += data_dict[p] - return data_dict - - def _mul_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = data_dict[self.childs[0]] - for child in self.childs[1:]: - data_dict[self.key] *= data_dict[child] - return data_dict - - def _heaviside_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = self._auxiliary_func(data_dict[self.childs[0]]) - return data_dict - - def _minimum_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = paddle.minimum( - data_dict[self.childs[0]], data_dict[self.childs[1]] - ) - for i in range(2, len(self.childs)): - data_dict[self.key] = paddle.minimum( - data_dict[self.key], - data_dict[self.childs[i]], - ) - return data_dict - - def _maximum_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = paddle.maximum( - data_dict[self.childs[0]], data_dict[self.childs[1]] - ) - for i in range(2, len(self.childs)): - data_dict[self.key] = paddle.maximum( - data_dict[self.key], - data_dict[self.childs[i]], - ) - return data_dict - - def _vanilla_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = self._auxiliary_func( - *tuple(data_dict[child] for child in self.childs) - ) - return data_dict - - -class DerivativeNode(Node): - """Class for operator node in converted expression tree. - - Args: - expr (sp.Derivative): Sympy derivative expression. - create_graph (bool, optional): Whether to create the gradient graphs of - the computing process. When it is True, higher order derivatives are - supported to compute; when it is False, the gradient graphs of the - computing process would be discarded. Defaults to True. - retain_graph (Optional[bool]): Whether to retain the forward graph which - is used to calculate the gradient. When it is True, the graph would - be retained, in which way users can calculate backward twice for the - same graph. When it is False, the graph would be freed. Defaults to None, - which means it is equal to `create_graph`. - """ - - def __init__( - self, - expr: sp.Derivative, - create_graph: bool = True, - retain_graph: Optional[bool] = None, - ): - super().__init__(expr) - # preprocess children's key instead of processing at run-time in forward - # which can reduce considerable overhead of time for calling "_cvt_to_key" - self.childs = [_cvt_to_key(self.expr.args[0])] + [ - (_cvt_to_key(arg), int(order)) for (arg, order) in self.expr.args[1:] - ] - self.create_graph = create_graph - self.retain_graph = retain_graph - self._apply_func = self._derivate_operator_func - self.merged = False - - def forward(self, data_dict: DATA_DICT): - # use cache - if self.key in data_dict: - return data_dict - - return self._apply_func(data_dict) - - def _derivate_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - # NOTE: Derivative of 'sdf' function will not be executed here, which is already - # generated in 'data_dict' during points sampling using discrete difference - # method(see also: ppsci/geometry/geometry.py: Geometry.sdf_derivatives), - # such as 'sdf__x', 'sdf__y'. - data_dict[self.key] = data_dict[self.childs[0]] - for child, order in self.childs[1:]: - if order & 1: - data_dict[self.key] = jacobian( - data_dict[self.key], - data_dict[child], - create_graph=self.create_graph, - retain_graph=self.retain_graph, - ) - order -= 1 - for _ in range(0, order, 2): - data_dict[self.key] = hessian( - data_dict[self.key], - data_dict[child], - create_graph=self.create_graph, - retain_graph=self.retain_graph, - ) - order -= 2 - return data_dict - - -class FusedDerivativeNode(nn.Layer): - """Class for fused DerivativeNode. - - Args: - f_x_tuples (List[Tuple[Union[sp.Function, sp.Derivative], sp.Symbol]]): - indicate all derivatives of a function in list of tuples. e.g. - [(func1, var1), (func1, var2), (func1, var3), ...]. - create_graph (bool, optional): Whether to create the gradient graphs of - the computing process. When it is True, higher order derivatives are - supported to compute; when it is False, the gradient graphs of the - computing process would be discarded. Defaults to True. - retain_graph (Optional[bool]): Whether to retain the forward graph which - is used to calculate the gradient. When it is True, the graph would - be retained, in which way users can calculate backward twice for the - same graph. When it is False, the graph would be freed. Defaults to None, - which means it is equal to `create_graph`. - """ - - def __init__( - self, - f_x_tuples: List[Tuple[Union[sp.Function, sp.Derivative], sp.Symbol]], - create_graph: bool = True, - retain_graph: Optional[bool] = None, - ): - super().__init__() - self.expr: List[sp.Derivative] = [f.diff(x) for f, x in f_x_tuples] - self.key: List[str] = [_cvt_to_key(expr) for expr in self.expr] - self.create_graph = create_graph - self.retain_graph = retain_graph - - # preprocess children's key instead of processing at run-time in forward - # which can reduce considerable overhead of time for calling "_cvt_to_key" - self.y_key: str = _cvt_to_key(f_x_tuples[0][0]) - self.childs: List[str] = [_cvt_to_key(x) for _, x in f_x_tuples] - self._apply_func = self._parallel_derivate_operator_func - - def forward(self, data_dict: DATA_DICT): - # use cache - if all([key in data_dict for key in self.key]): - return data_dict - - return self._apply_func(data_dict) - - def _parallel_derivate_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: - # NOTE: Derivative of 'sdf' function will not be executed here, which is already - # generated in 'data_dict' during points sampling using discrete difference - # method(see also: ppsci/geometry/geometry.py: Geometry.sdf_derivatives), - # such as 'sdf__x', 'sdf__y'. - y_data: paddle.Tensor = data_dict[self.y_key] - xs_data: List[paddle.Tensor] = [data_dict[x_key] for x_key in self.childs] - y_wrt_xs_grad: List[paddle.Tensor] = jacobian( - y_data, - xs_data, - create_graph=self.create_graph, - retain_graph=self.retain_graph, - ) - for i, key in enumerate(self.key): - data_dict[key] = y_wrt_xs_grad[i] - return data_dict - - def __str__(self): - return ( - f"{self.__class__.__name__}(expr: {self.expr}, " - f"expr_type: {type(self.expr)})" - ) - - def __repr__(self): - return f"{self.__class__.__name__}(expr: {self.expr})" - - -class LayerNode(Node): - """Class for layer node in converted expression tree. - - Args: - expr (sp.core.function.UndefinedFunction): Sympy expression. - model (arch.Arch): NN model for computing forward result in this node. - """ - - def __init__( - self, - expr: sp.core.function.UndefinedFunction, - model: arch.Arch, - ): - super().__init__(expr) - self.model = model - - def forward(self, data_dict: DATA_DICT) -> DATA_DICT: - # use cache - if self.key in data_dict: - return data_dict - - output_dict = self.model(data_dict) - data_dict.update(output_dict) - - return data_dict - - -class ConstantNode(Node): - """Class for constant variable node in converted expression tree. - - Args: - expr (Union[sp.Number, sp.NumberSymbol]): Number expression. - """ - - def __init__(self, expr: Union[sp.Number, sp.NumberSymbol]): - super().__init__(expr) - if ( - self.expr.is_Float - or self.expr.is_Integer - or self.expr.is_Boolean - or self.expr.is_Rational - ): - self.expr = float(self.expr) - else: - raise TypeError( - "expr({expr}) should be Float/Integer/Boolean/Rational, " - f"but got {type(self.expr)}" - ) - self.expr = paddle.to_tensor(self.expr) - - def forward(self, data_dict: DATA_DICT) -> DATA_DICT: - # use cache - if self.key in data_dict: - return data_dict - - data_dict[self.key] = self.expr - return data_dict - - def __str__(self): - return ( - f"{self.__class__.__name__}(expr: {float(self.expr)}, " - f"expr_type: {type(self.expr)})" - ) - - -class ParameterNode(Node): - """Class for constant variable node in converted expression tree. - - Args: - expr (sp.Symbol): Parameter expression. - parameter (paddle.framework.io.EagerParamBase): Parameter tensor. - """ - - def __init__(self, expr: sp.Symbol, parameter: paddle.framework.io.EagerParamBase): - super().__init__(expr) - self.parameter = parameter - - def forward(self, data_dict: DATA_DICT) -> DATA_DICT: - data_dict[self.key] = self.parameter - return data_dict - - -class ComposedNode(nn.Layer): - """ - Compose list of several callable objects together. - """ - - def __init__(self, callable_nodes: List[Node]): - super().__init__() - assert len(callable_nodes) - self.callable_nodes = nn.LayerList(callable_nodes) - - def forward(self, data_dict: DATA_DICT) -> paddle.Tensor: - # call all callable_nodes in order - for i, func in enumerate(self.callable_nodes): - data_dict = func(data_dict) - - # return result of last node(root node) for target - return data_dict[self.callable_nodes[-1].key] - - -def _post_traverse(cur_node: sp.Basic, nodes: List[sp.Basic]) -> List[sp.Basic]: - """Traverse sympy expression tree in post-order. - - Args: - cur_node (sp.Basic): Sympy expression of current node. - nodes (List[sp.Basic]): Node list storing all tree nodes in post-order. - - Returns: - List[sp.Basic]: Node list storing all tree nodes in post-order. - """ - # traverse into sub-nodes - if isinstance(cur_node, sp.Function): - for arg in cur_node.args: - nodes = _post_traverse(arg, nodes) - nodes.append(cur_node) - elif isinstance(cur_node, sp.Derivative): - nodes = _post_traverse(cur_node.args[0], nodes) - nodes.append(cur_node) - elif isinstance(cur_node, sp.Symbol): - nodes.append(cur_node) - return nodes - elif isinstance(cur_node, sp.Number): - nodes.append(cur_node) - else: - for arg in cur_node.args: - nodes = _post_traverse(arg, nodes) - nodes.append(cur_node) - return nodes - - -def _visualize_graph(nodes: List[sp.Basic], graph_filename: str): - try: - import pygraphviz - except ModuleNotFoundError: - raise ModuleNotFoundError( - "Please install pygraphviz by steps below:\n" - "1. apt-get install graphviz graphviz-dev\n" - "2. python -m pip install pygraphviz" - ) - - SYMPY_BUILTIN_NAME = { - sp.sin: "sin", - sp.sinh: "sinh", - sp.asin: "asin", - sp.cos: "cos", - sp.acos: "acos", - sp.cosh: "cosh", - sp.tan: "tan", - sp.atan: "atan", - sp.atan2: "atan2", - sp.acosh: "acosh", - sp.asinh: "asinh", - sp.tanh: "tanh", - sp.atanh: "atanh", - sp.erf: "erf", - sp.loggamma: "loggamma", - sp.exp: "exp", - sp.Pow: "Pow", - sp.log: "log", - sp.Max: "Max", - sp.Min: "Min", - sp.Abs: "Abs", - sp.Heaviside: "Heaviside", - sp.sign: "sign", - sp.ceiling: "ceiling", - sp.floor: "floor", - sp.Add: "Add", - sp.Mul: "Mul", - } - naming_counter = {k: 0 for k in SYMPY_BUILTIN_NAME} - - def get_operator_name(node: sp.Function): - ret = f"{SYMPY_BUILTIN_NAME[node.func]}_{naming_counter[node.func]}" - naming_counter[node.func] += 1 - return ret - - graph = pygraphviz.AGraph(directed=True, rankdir="TB") - C_FUNC = "#9196f1" # purple color function node - C_DATA = "#feb64d" # orange color for data node - C_EDGE = "#000000" # black color for edge - - def add_edge(u: str, v: str, u_color: str = C_DATA, v_color: str = C_DATA): - """Add an edge from `u` to `v`. - - Args: - u (str): Name of begin node u. - v (str): Name of end node v. - u_color (str, optional): Color of node u. Defaults to '#feb64d'. - v_color (str, optional): Color of node v. Defaults to '#feb64d'. - """ - graph.add_node(u, style="filled", shape="ellipse", color=u_color) - graph.add_node(v, style="filled", shape="ellipse", color=v_color) - graph.add_edge(u, v, color=C_EDGE, style="solid", penwidth=0.5, arrowsize=0.5) - - for node in nodes: - if isinstance(node, tuple(SYMPY_BUILTIN_NAME.keys())): - operator_str = get_operator_name(node) - for arg in node.args: - add_edge(_cvt_to_key(arg), operator_str, v_color=C_FUNC) - add_edge(operator_str, _cvt_to_key(node), u_color=C_FUNC) - elif isinstance(node, sp.Function): - for arg in node.args: - add_edge(_cvt_to_key(arg), str(node), v_color=C_FUNC) - add_edge(str(node), _cvt_to_key(node), u_color=C_FUNC) - elif isinstance(node, sp.Derivative): - add_edge(str(node), _cvt_to_key(node), u_color=C_FUNC) - add_edge(_cvt_to_key(node.args[0]), str(node), v_color=C_FUNC) - for arg in node.args[1:]: - add_edge(_cvt_to_key(arg[0]), str(node), v_color=C_FUNC) - - # export graph to image - graph.layout() - image_path = f"{graph_filename}.png" - dot_path = f"{graph_filename}.dot" - if len(os.path.dirname(image_path)): - os.makedirs(os.path.dirname(image_path), exist_ok=True) - graph.draw(image_path, prog="dot") - graph.write(dot_path) - logger.message( - f"Computational graph has been written to: {image_path} and {dot_path}, " - "which can be visualized at: https://dreampuf.github.io/GraphvizOnline/" - ) - - -def _fuse_derivative_nodes( - derivative_exprs: List[sp.Derivative], -) -> List[FusedDerivativeNode]: - """Merge derivative nodes and return in list of FusedDerivativeNode after merger. - - Args: - derivative_exprs (List[sp.Derivative]): Derivatives sympy expression of same - function, e.g. [Derivative(u(x,y), x), Derivative(u(x,y), y)] - - Returns: - List[FusedDerivativeNode]: List of FusedDerivativeNode converting from mergable - derivatives. - """ - - class DerivativeTrie: - """Trie for unrolling derivative.""" - - def __init__(self, expr: sp.Basic): - self.expr: sp.Basic = expr - self.next: Dict["sp.Symbol", "DerivativeTrie"] = {} - - # unroll derivative expressions into a trie structure - trie_root = DerivativeTrie(derivative_exprs[0].args[0]) - for derivative_expr in derivative_exprs: - cur_node = trie_root - for (child, order) in derivative_expr.args[1:]: - for _ in range(order): - if child not in cur_node.next: - cur_node.next[child] = DerivativeTrie(cur_node.expr.diff(child)) - cur_node = cur_node.next[child] - - def dfs_trie( - node: DerivativeTrie, fused_derivative_nodes: List[FusedDerivativeNode] - ) -> None: - if node.next: - fused_derivative_nodes.append( - FusedDerivativeNode( - [(node.expr, name) for name in node.next], - ) - ) - for child in node.next: - dfs_trie(node.next[child], fused_derivative_nodes) - - # walk on derivative trie in pre-order and log fusable nodes - fused_derivative_nodes: List[FusedDerivativeNode] = [] - dfs_trie(trie_root, fused_derivative_nodes) - - return fused_derivative_nodes - - -def lambdify( - expr: Union[sp.Basic, List[sp.Basic]], - models: Optional[Union[arch.Arch, Tuple[arch.Arch, ...]]] = None, - extra_parameters: Optional[Sequence[paddle.Tensor]] = None, - graph_filename: Optional[str] = None, - create_graph: bool = True, - retain_graph: Optional[bool] = None, - fuse_derivative: bool = False, -) -> Union[ComposedNode, List[ComposedNode]]: - """Convert sympy expression to callable function. - - Args: - expr (Union[sp.Basic, List[sp.Basic]]): Sympy expression(s) to be converted. - Will return callable functions in list if multiple expressions are given, - else return one single callable function. - models (Optional[Union[arch.Arch, Tuple[arch.Arch, ...]]]): Model(s) for - computing forward result in `LayerNode`. - extra_parameters (Optional[nn.ParameterList]): Extra learnable parameters. - Defaults to None. - graph_filename (Optional[str]): Save computational graph to `graph_filename.png` - for given `expr`, if `graph_filename` is not None and a valid string, - such as 'momentum_x'. Defaults to None. - create_graph (bool, optional): Whether to create the gradient graphs of - the computing process. When it is True, higher order derivatives are - supported to compute. When it is False, the gradient graphs of the - computing process would be discarded. Defaults to True. - retain_graph (Optional[bool]): Whether to retain the forward graph which - is used to calculate the gradient. When it is True, the graph would - be retained, in which way users can calculate backward twice for the - same graph. When it is False, the graph would be freed. Defaults to None, - which means it is equal to `create_graph`. - fuse_derivative (bool, optional): Whether to fuse the derivative nodes. - For example, if `expr` is 'Derivative(u, x) + Derivative(u, y)' - It will compute grad(u, x) + grad(u, y) if fuse_derivative=False, - else will compute sum(grad(u, [x, y])) if fuse_derivative=True as is more - efficient in backward-graph. Defaults to False, as it is experimental so not - enabled by default if used independently. - - Returns: - Union[ComposedNode, List[ComposedNode]]: Callable object(s) for computing expr - with necessary input(s) data in dict given. - - Examples: - >>> import paddle - >>> import ppsci - >>> import sympy as sp - - >>> a, b, c, x, y = sp.symbols("a b c x y") - >>> u = sp.Function("u")(x, y) - >>> v = sp.Function("v")(x, y) - >>> z = -a + b * (c ** 2) + u * v + 2.3 - - >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 4, 16) - - >>> batch_size = 13 - >>> a_tensor = paddle.randn([batch_size, 1]) - >>> b_tensor = paddle.randn([batch_size, 1]) - >>> c_tensor = paddle.randn([batch_size, 1]) - >>> x_tensor = paddle.randn([batch_size, 1]) - >>> y_tensor = paddle.randn([batch_size, 1]) - - >>> model_output_dict = model({"x": x_tensor, "y": y_tensor}) - >>> u_tensor, v_tensor = model_output_dict["u"], model_output_dict["v"] - - >>> z_tensor_manually = ( - ... -a_tensor + b_tensor * (c_tensor ** 2) - ... + u_tensor * v_tensor + 2.3 - ... ) - >>> z_tensor_sympy = ppsci.lambdify(z, model)( - ... { - ... "a": a_tensor, - ... "b": b_tensor, - ... "c": c_tensor, - ... "x": x_tensor, - ... "y": y_tensor, - ... } - ... ) - - >>> paddle.allclose(z_tensor_manually, z_tensor_sympy).item() - True - """ - if not extra_parameters: - extra_parameters = () - - if isinstance(models, arch.ModelList): - models = tuple(models.model_list[i] for i in range(len(models.model_list))) - if not isinstance(models, (tuple, list)): - models = (models,) - - def _expr_to_callable_nodes( - single_expr: sp.Basic, graph_filename_: Optional[str] = None - ) -> List[Node]: - """Convert sympy expression to a sequence of nodes in topologic order. - - Args: - single_expr (sp.Basic): Single sympy expression, such as "a+b*c". - graph_filename_ (Optional[str]): Save computational graph to - `/path/to/graph_filename.png` for given `expr`, if `graph_filename` is not - None and a valid string, such as 'momentum_x'. Defaults to None. - - Returns: - List[Node]: Sequence of callable nodes. - """ - # NOTE: Those simplify methods may complicate given expr instead, so not use here - # simplify expression to reduce nodes in tree - # expr = sp.nsimplify(expr) - # expr = sp.expand(expr) - # expr = sp.simplify(expr) - - # remove 1.0 from sympy expression tree - single_expr = single_expr.subs(1.0, 1) - - # convert sympy expression tree to list of nodes in post-order - sympy_nodes: List[sp.Basic] = [] - sympy_nodes = _post_traverse(single_expr, sympy_nodes) - - # remove unnecessary symbol nodes already in input dict(except for parameter symbol) - _parameter_names = tuple(param.name for param in extra_parameters) - sympy_nodes = [ - node - for node in sympy_nodes - if (not node.is_Symbol) or (_cvt_to_key(node) in _parameter_names) - ] - - # remove duplicated node(s) with topological order kept - sympy_nodes = list(dict.fromkeys(sympy_nodes)) - - # convert sympy node to callable node - callable_nodes = [] - for i, node in enumerate(sympy_nodes): - if isinstance( - node, tuple(SYMPY_TO_PADDLE.keys()) + (sp.Add, sp.Mul, sp.Derivative) - ): - if isinstance(node, sp.Derivative): - callable_nodes.append( - DerivativeNode(node, create_graph, retain_graph) - ) - else: - callable_nodes.append(OperatorNode(node)) - elif isinstance(node, sp.Function): - if str(node.func) == equation.DETACH_FUNC_NAME: - callable_nodes.append(DetachNode(node)) - logger.debug(f"Detected detach node {node}") - else: - match_index = None - for j, model in enumerate(models): - if str(node.func) in model.output_keys: - callable_nodes.append( - LayerNode( - node, - model, - ) - ) - if match_index is not None: - raise ValueError( - f"Name of function: '{node}' should be unique along given" - f" models, but got same output_key: '{str(node.func)}' " - f"in given models[{match_index}] and models[{j}]." - ) - match_index = j - # NOTE: Skip 'sdf' function, which should be already generated in - # given data_dict - if match_index is None and str(node.func) != "sdf": - raise ValueError( - f"Node {node} can not match any model in given model(s)." - ) - elif node.is_Number or node.is_NumberSymbol: - callable_nodes.append(ConstantNode(node)) - elif isinstance(node, sp.Symbol): - callable_nodes.append( - ParameterNode( - node, - *[ - param - for param in extra_parameters - if param.name == node.name - ], - ) - ) - else: - raise NotImplementedError( - f"The node {node} is not supported in lambdify." - ) - - # NOTE: visualize computational graph using 'pygraphviz' - if isinstance(graph_filename, str): - _visualize_graph(sympy_nodes, os.path.join(graph_filename, graph_filename_)) - - return callable_nodes - - if isinstance(expr, sp.Basic): - callable_nodes_group = [_expr_to_callable_nodes(expr, "expr")] - else: - callable_nodes_group = [ - _expr_to_callable_nodes(expr_i, f"expr_{i}") - for i, expr_i in enumerate(expr) - ] - - # [Optional] Fused derivatives nodes that with same function to be differentiated - while fuse_derivative: - candidate_pos: List[Tuple[int, int]] = [] # [(group_id, node_id), ...] - - # use 4-nested for-loop to find all potential mergable derivative nodes - for i in range(len(callable_nodes_group)): - for j in range(len(callable_nodes_group[i])): - # skip non-derivative node - if not isinstance(callable_nodes_group[i][j], DerivativeNode): - continue - # skip sdf function since it is always already given in data_dict - if callable_nodes_group[i][j].expr.args[0].name == "sdf": - continue - # skip merged node - if callable_nodes_group[i][j].merged: - continue - - candidate_pos = [[i, j]] - for ii in range(len(callable_nodes_group)): - for jj in range(len(callable_nodes_group[ii])): - # skip non-derivative node - if not isinstance(callable_nodes_group[ii][jj], DerivativeNode): - continue - - # skip same node - if i == ii and j == jj: - continue - # skip merged node - if callable_nodes_group[ii][jj].merged: - continue - - # has same function item - if ( - callable_nodes_group[i][j].expr.args[0] - == callable_nodes_group[ii][jj].expr.args[0] - ): - candidate_pos.append([ii, jj]) - - if len(candidate_pos) > 1: - break - if len(candidate_pos) > 1: - break - - # merge all candidate nodes into one or more FusedDerivativeNode node - if len(candidate_pos) > 1: - fused_node_seq = _fuse_derivative_nodes( - [callable_nodes_group[gid][nid].expr for gid, nid in candidate_pos] - ) - assert isinstance( - fused_node_seq, list - ), "'fused_node_seq' should be list of 'FusedDerivativeNode'" - gid0, nid0 = candidate_pos[0] - logger.debug( - f"Fused {len(candidate_pos)} derivatives nodes: " - f"{[callable_nodes_group[i][j].expr for i, j in candidate_pos]} into" - f" {len(fused_node_seq)} fuse node sequence: {fused_node_seq} at position: ([{gid0}][{nid0}])" - ) - - # mark merged node - for i, (gid, nid) in enumerate(candidate_pos): - assert isinstance(callable_nodes_group[gid][nid], DerivativeNode) - callable_nodes_group[gid][nid].merged = True - - # replace first mergable node with fused node sequence(packed in list) - # then mask the rest merged node to None(except [gid0, nid0]) - for i, (gid, nid) in enumerate(candidate_pos[1:]): - # keep the end node of each group to avoid generating empty callable - # node sequence, this will not effect performance since cache strategy - # in Node.forward - if nid != len(callable_nodes_group[gid]) - 1: - callable_nodes_group[gid][nid] = None - - if nid0 == len(callable_nodes_group[gid0]) - 1: - callable_nodes_group[gid0].insert(nid0, fused_node_seq) - else: - callable_nodes_group[gid0][nid0] = fused_node_seq - - # re-organize callable_nodes_group, remove None element and unpack list - for i in range(len(callable_nodes_group)): - tmp = [] - for j in range(len(callable_nodes_group[i])): - if isinstance( - callable_nodes_group[i][j], (Node, FusedDerivativeNode) - ): - tmp.append(callable_nodes_group[i][j]) - elif isinstance(callable_nodes_group[i][j], list) and isinstance( - callable_nodes_group[i][j][0], FusedDerivativeNode - ): - tmp.extend(callable_nodes_group[i][j]) - else: - assert ( - callable_nodes_group[i][j] is None - ), f"Unexpected element: {callable_nodes_group[i][j]}" - callable_nodes_group[i] = tmp - else: - # exit while loop if no more fused - break - - # Compose callable nodes into one callable object - if isinstance(expr, sp.Basic): - return ComposedNode(callable_nodes_group[0]) - else: - return [ComposedNode(callable_nodes) for callable_nodes in callable_nodes_group] +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Sympy to python function conversion module +""" + +from __future__ import annotations + +import functools +import os +from typing import Dict +from typing import List +from typing import Optional +from typing import Sequence +from typing import Tuple +from typing import Union + +import paddle +import sympy as sp +from paddle import nn +from typing_extensions import TypeAlias + +from ppsci import arch +from ppsci import equation +from ppsci.autodiff import hessian +from ppsci.autodiff import jacobian +from ppsci.utils import logger + +__all__ = [ + "lambdify", + "_cvt_to_key", +] + + +DATA_DICT: TypeAlias = Dict[str, paddle.Tensor] + +SYMPY_BUILTIN_FUNC: TypeAlias = Union[ + sp.sin, + sp.sinh, + sp.asin, + sp.cos, + sp.acos, + sp.cosh, + sp.tan, + sp.atan, + sp.atan2, + sp.acosh, + sp.asinh, + sp.tanh, + sp.atanh, + sp.erf, + sp.loggamma, + sp.exp, + sp.Pow, + sp.log, + sp.Max, + sp.Min, + sp.Abs, + sp.Heaviside, + sp.sign, + sp.ceiling, + sp.floor, + sp.Add, + sp.Mul, +] + +SYMPY_TO_PADDLE = { + sp.sin: paddle.sin, + sp.sinh: paddle.sinh, + sp.asin: paddle.asin, + sp.cos: paddle.cos, + sp.acos: paddle.acos, + sp.cosh: paddle.cosh, + sp.tan: paddle.tan, + sp.atan: paddle.atan, + sp.atan2: paddle.atan2, + sp.acosh: paddle.acosh, + sp.asinh: paddle.asinh, + sp.tanh: paddle.tanh, + sp.atanh: paddle.atanh, + sp.erf: paddle.erf, + sp.loggamma: paddle.lgamma, + sp.exp: paddle.exp, + sp.Pow: paddle.pow, + sp.log: paddle.log, + sp.Max: paddle.maximum, + sp.Min: paddle.minimum, + sp.Abs: paddle.abs, + sp.Heaviside: paddle.heaviside, + sp.sign: paddle.sign, + sp.ceiling: paddle.ceil, + sp.floor: paddle.floor, + # NOTE: sp.Add and sp.Mul is not included here for un-alignment with paddle + # and are implemented manually in 'OperatorNode._add_operator_func' and + # 'OperatorNode._mul_operator_func' +} + + +def _cvt_to_key(expr: sp.Basic) -> str: + """Convert sympy expression to a string key, mainly as retrieval key in dict. + + Args: + expr (sp.Basic): Sympy expression. + + Returns: + str: Converted string key. + """ + if isinstance(expr, sp.Function) and str(expr.func) == equation.DETACH_FUNC_NAME: + return f"{_cvt_to_key(expr.args[0])}_{equation.DETACH_FUNC_NAME}" + + if isinstance(expr, (sp.Symbol, sp.core.function.UndefinedFunction, sp.Function)): + # use name of custom function(e.g. "f") instead of itself(e.g. "f(x, y)") + # for simplicity. + if hasattr(expr, "name"): + return expr.name + else: + return str(expr) + elif isinstance(expr, sp.Derivative): + # convert "Derivative(u(x,y),(x,2),(y,2))" to "u__x__x__y__y" + expr_str = expr.args[0].name + for symbol, order in expr.args[1:]: + expr_str += f"__{symbol}" * order + return expr_str + else: + return str(expr) + + +class Node(nn.Layer): + """The base class of the node in expression tree. + + Args: + expr (sp.Basic): Sympy expression. + """ + + def __init__(self, expr: sp.Basic): + super().__init__() + self.expr = expr + self.key = _cvt_to_key(self.expr) + + def forward(self, **kwargs): + raise NotImplementedError("Node.forward is not implemented") + + def __str__(self): + return ( + f"{self.__class__.__name__}(expr: {self.expr}, " + f"expr_type: {type(self.expr)})" + ) + + def __repr__(self): + return f"{self.__class__.__name__}(expr: {self.expr})" + + +class DetachNode(Node): + """Class for detach operation in converted expression tree. + + Args: + expr (sp.Basic): Sympy expression. + """ + + def __init__(self, expr: sp.Basic): + super().__init__(expr) + self.child = _cvt_to_key(self.expr.args[0]) + + def forward(self, data_dict: DATA_DICT): + if self.key in data_dict: + return data_dict + + data_dict[self.key] = data_dict[self.child].detach() + return data_dict + + +class OperatorNode(Node): + """Class for operator node in converted expression tree. + + Args: + expr (SYMPY_BUILTIN_FUNC): Sympy expression. + """ + + def __init__( + self, + expr: SYMPY_BUILTIN_FUNC, + ): + super().__init__(expr) + # preprocess children's key instead of processing at run-time in forward + # which can reduce considerable overhead of time for calling "_cvt_to_key" + self.childs = [_cvt_to_key(arg) for arg in self.expr.args] + + if self.expr.func == sp.Add: + self._apply_func = self._add_operator_func + elif self.expr.func == sp.Mul: + self._apply_func = self._mul_operator_func + elif self.expr.func == sp.Heaviside: + self._apply_func = self._heaviside_operator_func + self._auxiliary_func = SYMPY_TO_PADDLE[sp.Heaviside] + self._auxiliary_func = functools.partial( + self._auxiliary_func, y=paddle.zeros([]) + ) + elif self.expr.func == sp.Min: + self._apply_func = self._minimum_operator_func + elif self.expr.func == sp.Max: + self._apply_func = self._maximum_operator_func + else: + self._apply_func = self._vanilla_operator_func + self._auxiliary_func = SYMPY_TO_PADDLE[self.expr.func] + + def forward(self, data_dict: DATA_DICT): + # use cache + if self.key in data_dict: + return data_dict + + return self._apply_func(data_dict) + + def _add_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = data_dict[self.childs[0]] + for p in self.childs[1:]: + data_dict[self.key] += data_dict[p] + return data_dict + + def _mul_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = data_dict[self.childs[0]] + for child in self.childs[1:]: + data_dict[self.key] *= data_dict[child] + return data_dict + + def _heaviside_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = self._auxiliary_func(data_dict[self.childs[0]]) + return data_dict + + def _minimum_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = paddle.minimum( + data_dict[self.childs[0]], data_dict[self.childs[1]] + ) + for i in range(2, len(self.childs)): + data_dict[self.key] = paddle.minimum( + data_dict[self.key], + data_dict[self.childs[i]], + ) + return data_dict + + def _maximum_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = paddle.maximum( + data_dict[self.childs[0]], data_dict[self.childs[1]] + ) + for i in range(2, len(self.childs)): + data_dict[self.key] = paddle.maximum( + data_dict[self.key], + data_dict[self.childs[i]], + ) + return data_dict + + def _vanilla_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = self._auxiliary_func( + *tuple(data_dict[child] for child in self.childs) + ) + return data_dict + + +class DerivativeNode(Node): + """Class for operator node in converted expression tree. + + Args: + expr (sp.Derivative): Sympy derivative expression. + create_graph (bool, optional): Whether to create the gradient graphs of + the computing process. When it is True, higher order derivatives are + supported to compute; when it is False, the gradient graphs of the + computing process would be discarded. Defaults to True. + retain_graph (Optional[bool]): Whether to retain the forward graph which + is used to calculate the gradient. When it is True, the graph would + be retained, in which way users can calculate backward twice for the + same graph. When it is False, the graph would be freed. Defaults to None, + which means it is equal to `create_graph`. + """ + + def __init__( + self, + expr: sp.Derivative, + create_graph: bool = True, + retain_graph: Optional[bool] = None, + ): + super().__init__(expr) + # preprocess children's key instead of processing at run-time in forward + # which can reduce considerable overhead of time for calling "_cvt_to_key" + self.childs = [_cvt_to_key(self.expr.args[0])] + [ + (_cvt_to_key(arg), int(order)) for (arg, order) in self.expr.args[1:] + ] + self.create_graph = create_graph + self.retain_graph = retain_graph + self._apply_func = self._derivate_operator_func + self.merged = False + + def forward(self, data_dict: DATA_DICT): + # use cache + if self.key in data_dict: + return data_dict + + return self._apply_func(data_dict) + + def _derivate_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + # NOTE: Derivative of 'sdf' function will not be executed here, which is already + # generated in 'data_dict' during points sampling using discrete difference + # method(see also: ppsci/geometry/geometry.py: Geometry.sdf_derivatives), + # such as 'sdf__x', 'sdf__y'. + data_dict[self.key] = data_dict[self.childs[0]] + for child, order in self.childs[1:]: + if order & 1: + data_dict[self.key] = jacobian( + data_dict[self.key], + data_dict[child], + create_graph=self.create_graph, + retain_graph=self.retain_graph, + ) + order -= 1 + for _ in range(0, order, 2): + data_dict[self.key] = hessian( + data_dict[self.key], + data_dict[child], + create_graph=self.create_graph, + retain_graph=self.retain_graph, + ) + order -= 2 + return data_dict + + +class FusedDerivativeNode(nn.Layer): + """Class for fused DerivativeNode. + + Args: + f_x_tuples (List[Tuple[Union[sp.Function, sp.Derivative], sp.Symbol]]): + indicate all derivatives of a function in list of tuples. e.g. + [(func1, var1), (func1, var2), (func1, var3), ...]. + create_graph (bool, optional): Whether to create the gradient graphs of + the computing process. When it is True, higher order derivatives are + supported to compute; when it is False, the gradient graphs of the + computing process would be discarded. Defaults to True. + retain_graph (Optional[bool]): Whether to retain the forward graph which + is used to calculate the gradient. When it is True, the graph would + be retained, in which way users can calculate backward twice for the + same graph. When it is False, the graph would be freed. Defaults to None, + which means it is equal to `create_graph`. + """ + + def __init__( + self, + f_x_tuples: List[Tuple[Union[sp.Function, sp.Derivative], sp.Symbol]], + create_graph: bool = True, + retain_graph: Optional[bool] = None, + ): + super().__init__() + self.expr: List[sp.Derivative] = [f.diff(x) for f, x in f_x_tuples] + self.key: List[str] = [_cvt_to_key(expr) for expr in self.expr] + self.create_graph = create_graph + self.retain_graph = retain_graph + + # preprocess children's key instead of processing at run-time in forward + # which can reduce considerable overhead of time for calling "_cvt_to_key" + self.y_key: str = _cvt_to_key(f_x_tuples[0][0]) + self.childs: List[str] = [_cvt_to_key(x) for _, x in f_x_tuples] + self._apply_func = self._parallel_derivate_operator_func + + def forward(self, data_dict: DATA_DICT): + # use cache + if all([key in data_dict for key in self.key]): + return data_dict + + return self._apply_func(data_dict) + + def _parallel_derivate_operator_func(self, data_dict: DATA_DICT) -> DATA_DICT: + # NOTE: Derivative of 'sdf' function will not be executed here, which is already + # generated in 'data_dict' during points sampling using discrete difference + # method(see also: ppsci/geometry/geometry.py: Geometry.sdf_derivatives), + # such as 'sdf__x', 'sdf__y'. + y_data: paddle.Tensor = data_dict[self.y_key] + xs_data: List[paddle.Tensor] = [data_dict[x_key] for x_key in self.childs] + y_wrt_xs_grad: List[paddle.Tensor] = jacobian( + y_data, + xs_data, + create_graph=self.create_graph, + retain_graph=self.retain_graph, + ) + for i, key in enumerate(self.key): + data_dict[key] = y_wrt_xs_grad[i] + return data_dict + + def __str__(self): + return ( + f"{self.__class__.__name__}(expr: {self.expr}, " + f"expr_type: {type(self.expr)})" + ) + + def __repr__(self): + return f"{self.__class__.__name__}(expr: {self.expr})" + + +class LayerNode(Node): + """Class for layer node in converted expression tree. + + Args: + expr (sp.core.function.UndefinedFunction): Sympy expression. + model (arch.Arch): NN model for computing forward result in this node. + """ + + def __init__( + self, + expr: sp.core.function.UndefinedFunction, + model: arch.Arch, + ): + super().__init__(expr) + self.model = model + + def forward(self, data_dict: DATA_DICT) -> DATA_DICT: + # use cache + if self.key in data_dict: + return data_dict + + output_dict = self.model(data_dict) + data_dict.update(output_dict) + + return data_dict + + +class ConstantNode(Node): + """Class for constant variable node in converted expression tree. + + Args: + expr (Union[sp.Number, sp.NumberSymbol]): Number expression. + """ + + def __init__(self, expr: Union[sp.Number, sp.NumberSymbol]): + super().__init__(expr) + if ( + self.expr.is_Float + or self.expr.is_Integer + or self.expr.is_Boolean + or self.expr.is_Rational + ): + self.expr = float(self.expr) + else: + raise TypeError( + "expr({expr}) should be Float/Integer/Boolean/Rational, " + f"but got {type(self.expr)}" + ) + self.expr = paddle.to_tensor(self.expr) + + def forward(self, data_dict: DATA_DICT) -> DATA_DICT: + # use cache + if self.key in data_dict: + return data_dict + + data_dict[self.key] = self.expr + return data_dict + + def __str__(self): + return ( + f"{self.__class__.__name__}(expr: {float(self.expr)}, " + f"expr_type: {type(self.expr)})" + ) + + +class ParameterNode(Node): + """Class for constant variable node in converted expression tree. + + Args: + expr (sp.Symbol): Parameter expression. + parameter (paddle.framework.io.EagerParamBase): Parameter tensor. + """ + + def __init__(self, expr: sp.Symbol, parameter: paddle.framework.io.EagerParamBase): + super().__init__(expr) + self.parameter = parameter + + def forward(self, data_dict: DATA_DICT) -> DATA_DICT: + data_dict[self.key] = self.parameter + return data_dict + + +class ComposedNode(nn.Layer): + """ + Compose list of several callable objects together. + """ + + def __init__(self, callable_nodes: List[Node]): + super().__init__() + assert len(callable_nodes) + self.callable_nodes = nn.LayerList(callable_nodes) + + def forward(self, data_dict: DATA_DICT) -> paddle.Tensor: + # call all callable_nodes in order + for i, func in enumerate(self.callable_nodes): + data_dict = func(data_dict) + + # return result of last node(root node) for target + return data_dict[self.callable_nodes[-1].key] + + +def _post_traverse(cur_node: sp.Basic, nodes: List[sp.Basic]) -> List[sp.Basic]: + """Traverse sympy expression tree in post-order. + + Args: + cur_node (sp.Basic): Sympy expression of current node. + nodes (List[sp.Basic]): Node list storing all tree nodes in post-order. + + Returns: + List[sp.Basic]: Node list storing all tree nodes in post-order. + """ + # traverse into sub-nodes + if isinstance(cur_node, sp.Function): + for arg in cur_node.args: + nodes = _post_traverse(arg, nodes) + nodes.append(cur_node) + elif isinstance(cur_node, sp.Derivative): + nodes = _post_traverse(cur_node.args[0], nodes) + nodes.append(cur_node) + elif isinstance(cur_node, sp.Symbol): + nodes.append(cur_node) + return nodes + elif isinstance(cur_node, sp.Number): + nodes.append(cur_node) + else: + for arg in cur_node.args: + nodes = _post_traverse(arg, nodes) + nodes.append(cur_node) + return nodes + + +def _visualize_graph(nodes: List[sp.Basic], graph_filename: str): + try: + import pygraphviz + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Please install pygraphviz by steps below:\n" + "1. apt-get install graphviz graphviz-dev\n" + "2. python -m pip install pygraphviz" + ) + + SYMPY_BUILTIN_NAME = { + sp.sin: "sin", + sp.sinh: "sinh", + sp.asin: "asin", + sp.cos: "cos", + sp.acos: "acos", + sp.cosh: "cosh", + sp.tan: "tan", + sp.atan: "atan", + sp.atan2: "atan2", + sp.acosh: "acosh", + sp.asinh: "asinh", + sp.tanh: "tanh", + sp.atanh: "atanh", + sp.erf: "erf", + sp.loggamma: "loggamma", + sp.exp: "exp", + sp.Pow: "Pow", + sp.log: "log", + sp.Max: "Max", + sp.Min: "Min", + sp.Abs: "Abs", + sp.Heaviside: "Heaviside", + sp.sign: "sign", + sp.ceiling: "ceiling", + sp.floor: "floor", + sp.Add: "Add", + sp.Mul: "Mul", + } + naming_counter = {k: 0 for k in SYMPY_BUILTIN_NAME} + + def get_operator_name(node: sp.Function): + ret = f"{SYMPY_BUILTIN_NAME[node.func]}_{naming_counter[node.func]}" + naming_counter[node.func] += 1 + return ret + + graph = pygraphviz.AGraph(directed=True, rankdir="TB") + C_FUNC = "#9196f1" # purple color function node + C_DATA = "#feb64d" # orange color for data node + C_EDGE = "#000000" # black color for edge + + def add_edge(u: str, v: str, u_color: str = C_DATA, v_color: str = C_DATA): + """Add an edge from `u` to `v`. + + Args: + u (str): Name of begin node u. + v (str): Name of end node v. + u_color (str, optional): Color of node u. Defaults to '#feb64d'. + v_color (str, optional): Color of node v. Defaults to '#feb64d'. + """ + graph.add_node(u, style="filled", shape="ellipse", color=u_color) + graph.add_node(v, style="filled", shape="ellipse", color=v_color) + graph.add_edge(u, v, color=C_EDGE, style="solid", penwidth=0.5, arrowsize=0.5) + + for node in nodes: + if isinstance(node, tuple(SYMPY_BUILTIN_NAME.keys())): + operator_str = get_operator_name(node) + for arg in node.args: + add_edge(_cvt_to_key(arg), operator_str, v_color=C_FUNC) + add_edge(operator_str, _cvt_to_key(node), u_color=C_FUNC) + elif isinstance(node, sp.Function): + for arg in node.args: + add_edge(_cvt_to_key(arg), str(node), v_color=C_FUNC) + add_edge(str(node), _cvt_to_key(node), u_color=C_FUNC) + elif isinstance(node, sp.Derivative): + add_edge(str(node), _cvt_to_key(node), u_color=C_FUNC) + add_edge(_cvt_to_key(node.args[0]), str(node), v_color=C_FUNC) + for arg in node.args[1:]: + add_edge(_cvt_to_key(arg[0]), str(node), v_color=C_FUNC) + + # export graph to image + graph.layout() + image_path = f"{graph_filename}.png" + dot_path = f"{graph_filename}.dot" + if len(os.path.dirname(image_path)): + os.makedirs(os.path.dirname(image_path), exist_ok=True) + graph.draw(image_path, prog="dot") + graph.write(dot_path) + logger.message( + f"Computational graph has been written to: {image_path} and {dot_path}, " + "which can be visualized at: https://dreampuf.github.io/GraphvizOnline/" + ) + + +def _fuse_derivative_nodes( + derivative_exprs: List[sp.Derivative], +) -> List[FusedDerivativeNode]: + """Merge derivative nodes and return in list of FusedDerivativeNode after merger. + + Args: + derivative_exprs (List[sp.Derivative]): Derivatives sympy expression of same + function, e.g. [Derivative(u(x,y), x), Derivative(u(x,y), y)] + + Returns: + List[FusedDerivativeNode]: List of FusedDerivativeNode converting from mergable + derivatives. + """ + + class DerivativeTrie: + """Trie for unrolling derivative.""" + + def __init__(self, expr: sp.Basic): + self.expr: sp.Basic = expr + self.next: Dict["sp.Symbol", "DerivativeTrie"] = {} + + # unroll derivative expressions into a trie structure + trie_root = DerivativeTrie(derivative_exprs[0].args[0]) + for derivative_expr in derivative_exprs: + cur_node = trie_root + for (child, order) in derivative_expr.args[1:]: + for _ in range(order): + if child not in cur_node.next: + cur_node.next[child] = DerivativeTrie(cur_node.expr.diff(child)) + cur_node = cur_node.next[child] + + def dfs_trie( + node: DerivativeTrie, fused_derivative_nodes: List[FusedDerivativeNode] + ) -> None: + if node.next: + fused_derivative_nodes.append( + FusedDerivativeNode( + [(node.expr, name) for name in node.next], + ) + ) + for child in node.next: + dfs_trie(node.next[child], fused_derivative_nodes) + + # walk on derivative trie in pre-order and log fusable nodes + fused_derivative_nodes: List[FusedDerivativeNode] = [] + dfs_trie(trie_root, fused_derivative_nodes) + + return fused_derivative_nodes + + +def lambdify( + expr: Union[sp.Basic, List[sp.Basic]], + models: Optional[Union[arch.Arch, Tuple[arch.Arch, ...]]] = None, + extra_parameters: Optional[Sequence[paddle.Tensor]] = None, + graph_filename: Optional[str] = None, + create_graph: bool = True, + retain_graph: Optional[bool] = None, + fuse_derivative: bool = False, +) -> Union[ComposedNode, List[ComposedNode]]: + """Convert sympy expression to callable function. + + Args: + expr (Union[sp.Basic, List[sp.Basic]]): Sympy expression(s) to be converted. + Will return callable functions in list if multiple expressions are given, + else return one single callable function. + models (Optional[Union[arch.Arch, Tuple[arch.Arch, ...]]]): Model(s) for + computing forward result in `LayerNode`. + extra_parameters (Optional[nn.ParameterList]): Extra learnable parameters. + Defaults to None. + graph_filename (Optional[str]): Save computational graph to `graph_filename.png` + for given `expr`, if `graph_filename` is not None and a valid string, + such as 'momentum_x'. Defaults to None. + create_graph (bool, optional): Whether to create the gradient graphs of + the computing process. When it is True, higher order derivatives are + supported to compute. When it is False, the gradient graphs of the + computing process would be discarded. Defaults to True. + retain_graph (Optional[bool]): Whether to retain the forward graph which + is used to calculate the gradient. When it is True, the graph would + be retained, in which way users can calculate backward twice for the + same graph. When it is False, the graph would be freed. Defaults to None, + which means it is equal to `create_graph`. + fuse_derivative (bool, optional): Whether to fuse the derivative nodes. + For example, if `expr` is 'Derivative(u, x) + Derivative(u, y)' + It will compute grad(u, x) + grad(u, y) if fuse_derivative=False, + else will compute sum(grad(u, [x, y])) if fuse_derivative=True as is more + efficient in backward-graph. Defaults to False, as it is experimental so not + enabled by default if used independently. + + Returns: + Union[ComposedNode, List[ComposedNode]]: Callable object(s) for computing expr + with necessary input(s) data in dict given. + + Examples: + >>> import paddle + >>> import ppsci + >>> import sympy as sp + + >>> a, b, c, x, y = sp.symbols("a b c x y") + >>> u = sp.Function("u")(x, y) + >>> v = sp.Function("v")(x, y) + >>> z = -a + b * (c ** 2) + u * v + 2.3 + + >>> model = ppsci.arch.MLP(("x", "y"), ("u", "v"), 4, 16) + + >>> batch_size = 13 + >>> a_tensor = paddle.randn([batch_size, 1]) + >>> b_tensor = paddle.randn([batch_size, 1]) + >>> c_tensor = paddle.randn([batch_size, 1]) + >>> x_tensor = paddle.randn([batch_size, 1]) + >>> y_tensor = paddle.randn([batch_size, 1]) + + >>> model_output_dict = model({"x": x_tensor, "y": y_tensor}) + >>> u_tensor, v_tensor = model_output_dict["u"], model_output_dict["v"] + + >>> z_tensor_manually = ( + ... -a_tensor + b_tensor * (c_tensor ** 2) + ... + u_tensor * v_tensor + 2.3 + ... ) + >>> z_tensor_sympy = ppsci.lambdify(z, model)( + ... { + ... "a": a_tensor, + ... "b": b_tensor, + ... "c": c_tensor, + ... "x": x_tensor, + ... "y": y_tensor, + ... } + ... ) + + >>> paddle.allclose(z_tensor_manually, z_tensor_sympy).item() + True + """ + if not extra_parameters: + extra_parameters = () + + if isinstance(models, arch.ModelList): + models = tuple(models.model_list[i] for i in range(len(models.model_list))) + if not isinstance(models, (tuple, list)): + models = (models,) + + def _expr_to_callable_nodes( + single_expr: sp.Basic, graph_filename_: Optional[str] = None + ) -> List[Node]: + """Convert sympy expression to a sequence of nodes in topologic order. + + Args: + single_expr (sp.Basic): Single sympy expression, such as "a+b*c". + graph_filename_ (Optional[str]): Save computational graph to + `/path/to/graph_filename.png` for given `expr`, if `graph_filename` is not + None and a valid string, such as 'momentum_x'. Defaults to None. + + Returns: + List[Node]: Sequence of callable nodes. + """ + # NOTE: Those simplify methods may complicate given expr instead, so not use here + # simplify expression to reduce nodes in tree + # expr = sp.nsimplify(expr) + # expr = sp.expand(expr) + # expr = sp.simplify(expr) + + # remove 1.0 from sympy expression tree + single_expr = single_expr.subs(1.0, 1) + + # convert sympy expression tree to list of nodes in post-order + sympy_nodes: List[sp.Basic] = [] + sympy_nodes = _post_traverse(single_expr, sympy_nodes) + + # remove unnecessary symbol nodes already in input dict(except for parameter symbol) + _parameter_names = tuple(param.name for param in extra_parameters) + sympy_nodes = [ + node + for node in sympy_nodes + if (not node.is_Symbol) or (_cvt_to_key(node) in _parameter_names) + ] + + # remove duplicated node(s) with topological order kept + sympy_nodes = list(dict.fromkeys(sympy_nodes)) + + # convert sympy node to callable node + callable_nodes = [] + for i, node in enumerate(sympy_nodes): + if isinstance( + node, tuple(SYMPY_TO_PADDLE.keys()) + (sp.Add, sp.Mul, sp.Derivative) + ): + if isinstance(node, sp.Derivative): + callable_nodes.append( + DerivativeNode(node, create_graph, retain_graph) + ) + else: + callable_nodes.append(OperatorNode(node)) + elif isinstance(node, sp.Function): + if str(node.func) == equation.DETACH_FUNC_NAME: + callable_nodes.append(DetachNode(node)) + logger.debug(f"Detected detach node {node}") + else: + match_index = None + for j, model in enumerate(models): + if str(node.func) in model.output_keys: + callable_nodes.append( + LayerNode( + node, + model, + ) + ) + if match_index is not None: + raise ValueError( + f"Name of function: '{node}' should be unique along given" + f" models, but got same output_key: '{str(node.func)}' " + f"in given models[{match_index}] and models[{j}]." + ) + match_index = j + # NOTE: Skip 'sdf' function, which should be already generated in + # given data_dict + if match_index is None and str(node.func) != "sdf": + raise ValueError( + f"Node {node} can not match any model in given model(s)." + ) + elif node.is_Number or node.is_NumberSymbol: + callable_nodes.append(ConstantNode(node)) + elif isinstance(node, sp.Symbol): + callable_nodes.append( + ParameterNode( + node, + *[ + param + for param in extra_parameters + if param.name == node.name + ], + ) + ) + else: + raise NotImplementedError( + f"The node {node} is not supported in lambdify." + ) + + # NOTE: visualize computational graph using 'pygraphviz' + if isinstance(graph_filename, str): + _visualize_graph(sympy_nodes, os.path.join(graph_filename, graph_filename_)) + + return callable_nodes + + if isinstance(expr, sp.Basic): + callable_nodes_group = [_expr_to_callable_nodes(expr, "expr")] + else: + callable_nodes_group = [ + _expr_to_callable_nodes(expr_i, f"expr_{i}") + for i, expr_i in enumerate(expr) + ] + + # [Optional] Fused derivatives nodes that with same function to be differentiated + while fuse_derivative: + candidate_pos: List[Tuple[int, int]] = [] # [(group_id, node_id), ...] + + # use 4-nested for-loop to find all potential mergable derivative nodes + for i in range(len(callable_nodes_group)): + for j in range(len(callable_nodes_group[i])): + # skip non-derivative node + if not isinstance(callable_nodes_group[i][j], DerivativeNode): + continue + # skip sdf function since it is always already given in data_dict + if callable_nodes_group[i][j].expr.args[0].name == "sdf": + continue + # skip merged node + if callable_nodes_group[i][j].merged: + continue + + candidate_pos = [[i, j]] + for ii in range(len(callable_nodes_group)): + for jj in range(len(callable_nodes_group[ii])): + # skip non-derivative node + if not isinstance(callable_nodes_group[ii][jj], DerivativeNode): + continue + + # skip same node + if i == ii and j == jj: + continue + # skip merged node + if callable_nodes_group[ii][jj].merged: + continue + + # has same function item + if ( + callable_nodes_group[i][j].expr.args[0] + == callable_nodes_group[ii][jj].expr.args[0] + ): + candidate_pos.append([ii, jj]) + + if len(candidate_pos) > 1: + break + if len(candidate_pos) > 1: + break + + # merge all candidate nodes into one or more FusedDerivativeNode node + if len(candidate_pos) > 1: + fused_node_seq = _fuse_derivative_nodes( + [callable_nodes_group[gid][nid].expr for gid, nid in candidate_pos] + ) + assert isinstance( + fused_node_seq, list + ), "'fused_node_seq' should be list of 'FusedDerivativeNode'" + gid0, nid0 = candidate_pos[0] + logger.debug( + f"Fused {len(candidate_pos)} derivatives nodes: " + f"{[callable_nodes_group[i][j].expr for i, j in candidate_pos]} into" + f" {len(fused_node_seq)} fuse node sequence: {fused_node_seq} at position: ([{gid0}][{nid0}])" + ) + + # mark merged node + for i, (gid, nid) in enumerate(candidate_pos): + assert isinstance(callable_nodes_group[gid][nid], DerivativeNode) + callable_nodes_group[gid][nid].merged = True + + # replace first mergable node with fused node sequence(packed in list) + # then mask the rest merged node to None(except [gid0, nid0]) + for i, (gid, nid) in enumerate(candidate_pos[1:]): + # keep the end node of each group to avoid generating empty callable + # node sequence, this will not effect performance since cache strategy + # in Node.forward + if nid != len(callable_nodes_group[gid]) - 1: + callable_nodes_group[gid][nid] = None + + if nid0 == len(callable_nodes_group[gid0]) - 1: + callable_nodes_group[gid0].insert(nid0, fused_node_seq) + else: + callable_nodes_group[gid0][nid0] = fused_node_seq + + # re-organize callable_nodes_group, remove None element and unpack list + for i in range(len(callable_nodes_group)): + tmp = [] + for j in range(len(callable_nodes_group[i])): + if isinstance( + callable_nodes_group[i][j], (Node, FusedDerivativeNode) + ): + tmp.append(callable_nodes_group[i][j]) + elif isinstance(callable_nodes_group[i][j], list) and isinstance( + callable_nodes_group[i][j][0], FusedDerivativeNode + ): + tmp.extend(callable_nodes_group[i][j]) + else: + assert ( + callable_nodes_group[i][j] is None + ), f"Unexpected element: {callable_nodes_group[i][j]}" + callable_nodes_group[i] = tmp + else: + # exit while loop if no more fused + break + + # Compose callable nodes into one callable object + if isinstance(expr, sp.Basic): + return ComposedNode(callable_nodes_group[0]) + else: + return [ComposedNode(callable_nodes) for callable_nodes in callable_nodes_group] diff --git a/ppsci/utils/writer.py b/ppsci/utils/writer.py index eb7a6d55f3..21cef8a91f 100644 --- a/ppsci/utils/writer.py +++ b/ppsci/utils/writer.py @@ -1,222 +1,222 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import csv -import os -from typing import Dict -from typing import Optional -from typing import Tuple -from typing import Union - -import numpy as np -import paddle - -from ppsci.utils import logger - -__all__ = [ - "save_csv_file", -] - - -def save_csv_file( - filename: str, - data_dict: Dict[str, Union[np.ndarray, "paddle.Tensor"]], - keys: Tuple[str, ...], - alias_dict: Optional[Dict[str, str]] = None, - use_header: bool = True, - delimiter: str = ",", - encoding: str = "utf-8", -): - """Write numpy or tensor data into csv file. - - Args: - filename (str): Dump file path. - data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Numpy or tensor data in dict. - keys (Tuple[str, ...]): Keys for data_dict to be fetched. - alias_dict (Optional[Dict[str, str]], optional): Alias dict for keys, - i.e. {dump_key: dict_key}. Defaults to None. - use_header (bool, optional): Whether save csv with header. Defaults to True. - delimiter (str, optional): Delemiter for splitting different data field. Defaults to ",". - encoding (str, optional): Encoding. Defaults to "utf-8". - - Examples: - >>> import numpy as np - >>> from ppsci.utils import save_csv_file - >>> data_dict = { - ... "a": np.array([[1], [2], [3]]).astype("int64"), # [3, 1] - ... "b": np.array([[4.12], [5.25], [6.3370]]).astype("float32"), # [3, 1] - ... } - >>> save_csv_file( - ... "test.csv", - ... data_dict, - ... ("A", "B"), - ... alias_dict={"A": "a", "B": "b"}, - ... use_header=True, - ... delimiter=",", - ... encoding="utf-8", - ... ) # doctest: +SKIP - - >>> # == test.csv == - >>> # A,B - >>> # 1,4.12 - >>> # 2,5.25 - >>> # 3,6.337 - """ - if alias_dict is None: - alias_dict = {} - - # convert to numpy array - data_fields = [] - header = [] - for key in keys: - fetch_key = alias_dict.get(key, key) - data = data_dict[fetch_key] - if isinstance(data, paddle.Tensor): - data = data.numpy() # [num_of_samples, ] - - if isinstance(data, np.ndarray): - data = data.flatten() - data_fields.append(data) - - header.append(key) - - assert len(header) == len(data_fields) - - data_fields = zip(*data_fields) # transpose col data to row data - with open(filename, "w", newline="", encoding=encoding) as file: - writer = csv.writer(file, delimiter=delimiter) - - if use_header: - writer.writerow(header) - - writer.writerows(data_fields) - - logger.message(f"csv file has been dumped to: {filename}") - - -def save_tecplot_file( - filename: str, - data_dict: Dict[str, Union[np.ndarray, "paddle.Tensor"]], - keys: Tuple[str, ...], - num_x: int, - num_y: int, - alias_dict: Optional[Dict[str, str]] = None, - delimiter: str = " ", - encoding: str = "utf-8", - num_timestamps: int = 1, -): - """Write numpy or tensor data into tecplot file(s). - - Args: - filename (str): Tecplot file path. - data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Numpy or Tensor data in dict. - keys (Tuple[str, ...]): Target keys to be dumped. - num_x (int): The number of discrete points of the grid in the X-axis. Assuming - the discrete grid size is 20 x 30, then num_x=20. - num_y (int): The number of discrete points of the grid in the Y-axis. Assuming - the discrete grid size is 20 x 30, then num_y=30. - alias_dict (Optional[Dict[str, str]], optional): Alias dict for keys, - i.e. {dump_key: dict_key}. Defaults to None. - delimiter (str, optional): Delemiter for splitting different data field. Defaults to " ". - encoding (str, optional): Encoding. Defaults to "utf-8". - num_timestamps (int, optional): Number of timestamp over coord and value. Defaults to 1. - - Examples: - >>> import numpy as np - >>> from ppsci.utils import save_tecplot_file - >>> data_dict = { - ... "x": np.array([[-1.0], [-1.0], [-1.0], [-1.0], [-1.0], [-1.0]]), # [6, 1] - ... "y": np.array([[1.0], [2.0], [3.0], [1.0], [2.0], [3.0]]), # [6, 1] - ... "value": np.array([[3], [33], [333], [3333], [33333], [333333]]), # [6, 1] - ... } - >>> save_tecplot_file( - ... "./test.dat", - ... data_dict, - ... ("X", "Y", "value"), - ... num_x=1, - ... num_y=3, - ... alias_dict={"X": "x", "Y": "y"}, - ... num_timestamps=2, - ... ) # doctest: +SKIP - >>> # == test_t-0.dat == - >>> # title = "./test_t-0.dat" - >>> # variables = "X", "Y" - >>> # Zone I = 3, J = 1, F = POINT - >>> # -1.0 1.0 3.0 - >>> # -1.0 2.0 33.0 - >>> # -1.0 3.0 333.0 - - - >>> # == test_t-1.dat == - >>> # title = "./test_t-1.dat" - >>> # variables = "X", "Y" - >>> # Zone I = 3, J = 1, F = POINT - >>> # -1.0 1.0 3333.0 - >>> # -1.0 2.0 33333.0 - >>> # -1.0 3.0 333333.0 - """ - if alias_dict is None: - alias_dict = {} - - ntxy = len(next(iter(data_dict.values()))) - if ntxy % num_timestamps != 0: - raise ValueError( - f"num_points({ntxy}) must be a multiple of " - f"num_timestamps({num_timestamps})." - ) - nxy = ntxy // num_timestamps - - nx, ny = num_x, num_y - assert nx * ny == nxy, f"nx({nx}) * ny({ny}) != nxy({nxy})" - - if len(os.path.dirname(filename)): - os.makedirs(os.path.dirname(filename), exist_ok=True) - - if filename.endswith(".dat"): - filename = filename[:-4] - - for t in range(num_timestamps): - # write 1 tecplot file for each timestep - if num_timestamps > 1: - dump_filename = f"{filename}_t-{t}.dat" - else: - dump_filename = f"{filename}.dat" - - fetch_keys = [alias_dict.get(key, key) for key in keys] - with open(dump_filename, "w", encoding=encoding) as f: - # write meta information of tec - f.write(f'title = "{dump_filename}"\n') - header = ", ".join([f'"{key}"' for key in keys]) - f.write(f"variables = {header}\n") - - # NOTE: Tecplot is column-major, so we need to specify I = ny, J = nx, - # which is in contrast to our habits. - f.write(f"Zone I = {ny}, J = {nx}, F = POINT\n") - - # write points data into file - data_cur_time_step = [ - data_dict[key][t * nxy : (t + 1) * nxy] for key in fetch_keys - ] - - for items in zip(*data_cur_time_step): - f.write(delimiter.join([str(float(x)) for x in items]) + "\n") - - if num_timestamps > 1: - logger.message( - f"tecplot files are saved to: {filename}_t-0.dat ~ {filename}_t-{num_timestamps - 1}.dat" - ) - else: - logger.message(f"tecplot file is saved to: {filename}.dat") +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import csv +import os +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import numpy as np +import paddle + +from ppsci.utils import logger + +__all__ = [ + "save_csv_file", +] + + +def save_csv_file( + filename: str, + data_dict: Dict[str, Union[np.ndarray, "paddle.Tensor"]], + keys: Tuple[str, ...], + alias_dict: Optional[Dict[str, str]] = None, + use_header: bool = True, + delimiter: str = ",", + encoding: str = "utf-8", +): + """Write numpy or tensor data into csv file. + + Args: + filename (str): Dump file path. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Numpy or tensor data in dict. + keys (Tuple[str, ...]): Keys for data_dict to be fetched. + alias_dict (Optional[Dict[str, str]], optional): Alias dict for keys, + i.e. {dump_key: dict_key}. Defaults to None. + use_header (bool, optional): Whether save csv with header. Defaults to True. + delimiter (str, optional): Delemiter for splitting different data field. Defaults to ",". + encoding (str, optional): Encoding. Defaults to "utf-8". + + Examples: + >>> import numpy as np + >>> from ppsci.utils import save_csv_file + >>> data_dict = { + ... "a": np.array([[1], [2], [3]]).astype("int64"), # [3, 1] + ... "b": np.array([[4.12], [5.25], [6.3370]]).astype("float32"), # [3, 1] + ... } + >>> save_csv_file( + ... "test.csv", + ... data_dict, + ... ("A", "B"), + ... alias_dict={"A": "a", "B": "b"}, + ... use_header=True, + ... delimiter=",", + ... encoding="utf-8", + ... ) # doctest: +SKIP + + >>> # == test.csv == + >>> # A,B + >>> # 1,4.12 + >>> # 2,5.25 + >>> # 3,6.337 + """ + if alias_dict is None: + alias_dict = {} + + # convert to numpy array + data_fields = [] + header = [] + for key in keys: + fetch_key = alias_dict.get(key, key) + data = data_dict[fetch_key] + if isinstance(data, paddle.Tensor): + data = data.numpy() # [num_of_samples, ] + + if isinstance(data, np.ndarray): + data = data.flatten() + data_fields.append(data) + + header.append(key) + + assert len(header) == len(data_fields) + + data_fields = zip(*data_fields) # transpose col data to row data + with open(filename, "w", newline="", encoding=encoding) as file: + writer = csv.writer(file, delimiter=delimiter) + + if use_header: + writer.writerow(header) + + writer.writerows(data_fields) + + logger.message(f"csv file has been dumped to: {filename}") + + +def save_tecplot_file( + filename: str, + data_dict: Dict[str, Union[np.ndarray, "paddle.Tensor"]], + keys: Tuple[str, ...], + num_x: int, + num_y: int, + alias_dict: Optional[Dict[str, str]] = None, + delimiter: str = " ", + encoding: str = "utf-8", + num_timestamps: int = 1, +): + """Write numpy or tensor data into tecplot file(s). + + Args: + filename (str): Tecplot file path. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Numpy or Tensor data in dict. + keys (Tuple[str, ...]): Target keys to be dumped. + num_x (int): The number of discrete points of the grid in the X-axis. Assuming + the discrete grid size is 20 x 30, then num_x=20. + num_y (int): The number of discrete points of the grid in the Y-axis. Assuming + the discrete grid size is 20 x 30, then num_y=30. + alias_dict (Optional[Dict[str, str]], optional): Alias dict for keys, + i.e. {dump_key: dict_key}. Defaults to None. + delimiter (str, optional): Delemiter for splitting different data field. Defaults to " ". + encoding (str, optional): Encoding. Defaults to "utf-8". + num_timestamps (int, optional): Number of timestamp over coord and value. Defaults to 1. + + Examples: + >>> import numpy as np + >>> from ppsci.utils import save_tecplot_file + >>> data_dict = { + ... "x": np.array([[-1.0], [-1.0], [-1.0], [-1.0], [-1.0], [-1.0]]), # [6, 1] + ... "y": np.array([[1.0], [2.0], [3.0], [1.0], [2.0], [3.0]]), # [6, 1] + ... "value": np.array([[3], [33], [333], [3333], [33333], [333333]]), # [6, 1] + ... } + >>> save_tecplot_file( + ... "./test.dat", + ... data_dict, + ... ("X", "Y", "value"), + ... num_x=1, + ... num_y=3, + ... alias_dict={"X": "x", "Y": "y"}, + ... num_timestamps=2, + ... ) # doctest: +SKIP + >>> # == test_t-0.dat == + >>> # title = "./test_t-0.dat" + >>> # variables = "X", "Y" + >>> # Zone I = 3, J = 1, F = POINT + >>> # -1.0 1.0 3.0 + >>> # -1.0 2.0 33.0 + >>> # -1.0 3.0 333.0 + + + >>> # == test_t-1.dat == + >>> # title = "./test_t-1.dat" + >>> # variables = "X", "Y" + >>> # Zone I = 3, J = 1, F = POINT + >>> # -1.0 1.0 3333.0 + >>> # -1.0 2.0 33333.0 + >>> # -1.0 3.0 333333.0 + """ + if alias_dict is None: + alias_dict = {} + + ntxy = len(next(iter(data_dict.values()))) + if ntxy % num_timestamps != 0: + raise ValueError( + f"num_points({ntxy}) must be a multiple of " + f"num_timestamps({num_timestamps})." + ) + nxy = ntxy // num_timestamps + + nx, ny = num_x, num_y + assert nx * ny == nxy, f"nx({nx}) * ny({ny}) != nxy({nxy})" + + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + + if filename.endswith(".dat"): + filename = filename[:-4] + + for t in range(num_timestamps): + # write 1 tecplot file for each timestep + if num_timestamps > 1: + dump_filename = f"{filename}_t-{t}.dat" + else: + dump_filename = f"{filename}.dat" + + fetch_keys = [alias_dict.get(key, key) for key in keys] + with open(dump_filename, "w", encoding=encoding) as f: + # write meta information of tec + f.write(f'title = "{dump_filename}"\n') + header = ", ".join([f'"{key}"' for key in keys]) + f.write(f"variables = {header}\n") + + # NOTE: Tecplot is column-major, so we need to specify I = ny, J = nx, + # which is in contrast to our habits. + f.write(f"Zone I = {ny}, J = {nx}, F = POINT\n") + + # write points data into file + data_cur_time_step = [ + data_dict[key][t * nxy : (t + 1) * nxy] for key in fetch_keys + ] + + for items in zip(*data_cur_time_step): + f.write(delimiter.join([str(float(x)) for x in items]) + "\n") + + if num_timestamps > 1: + logger.message( + f"tecplot files are saved to: {filename}_t-0.dat ~ {filename}_t-{num_timestamps - 1}.dat" + ) + else: + logger.message(f"tecplot file is saved to: {filename}.dat") diff --git a/ppsci/validate/__init__.py b/ppsci/validate/__init__.py index 3bc1c9ae4d..6b6af533ac 100644 --- a/ppsci/validate/__init__.py +++ b/ppsci/validate/__init__.py @@ -1,81 +1,81 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy - -from ppsci.loss import build_loss -from ppsci.metric import build_metric -from ppsci.utils import logger -from ppsci.utils import misc -from ppsci.validate.base import Validator -from ppsci.validate.geo_validator import GeometryValidator -from ppsci.validate.sup_validator import SupervisedValidator - -__all__ = [ - "Validator", - "GeometryValidator", - "SupervisedValidator", -] - - -def build_validator(cfg, equation_dict, geom_dict): - """Build validator(s). - - Args: - cfg (List[DictConfig]): Validator(s) config list. - geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. - equation_dict (Dct[str, Equation]): Equation(s) in dict. - - Returns: - Dict[str, Validator]: Validator(s) in dict. - """ - if cfg is None: - return None - cfg = copy.deepcopy(cfg) - global_dataloader_cfg = cfg["dataloader"] - validator_cfg = cfg["content"] - - validator_dict = misc.PrettyOrderedDict() - for _item in validator_cfg: - validator_cls = next(iter(_item.keys())) - _validator_cfg = _item[validator_cls] - validator_name = _validator_cfg.get("name", validator_cls) - # select geometry - geom_name = _validator_cfg.pop("geom") - _validator_cfg["geom"] = geom_dict[geom_name] - - # update complete dataloader config - local_dataloader_cfg = _validator_cfg["dataloader"] - local_dataloader_cfg.update(global_dataloader_cfg) - - # select equation - for name, expr in _validator_cfg["output_expr"].items(): - if isinstance(expr, str) and expr in equation_dict: - _validator_cfg["output_expr"][name] = equation_dict[expr].equations[ - name - ] - - # build loss - _validator_cfg["loss"] = build_loss(_validator_cfg["loss"]) - - # build metric - _validator_cfg["metric"] = build_metric(_validator_cfg["metric"]) - - # instantiate validator - _validator_cfg["dataloader_cfg"] = _validator_cfg.pop("dataloader") - validator_dict[validator_name] = eval(validator_cls)(**_validator_cfg) - - logger.debug(str(validator_dict[validator_name])) - - return validator_dict +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from ppsci.loss import build_loss +from ppsci.metric import build_metric +from ppsci.utils import logger +from ppsci.utils import misc +from ppsci.validate.base import Validator +from ppsci.validate.geo_validator import GeometryValidator +from ppsci.validate.sup_validator import SupervisedValidator + +__all__ = [ + "Validator", + "GeometryValidator", + "SupervisedValidator", +] + + +def build_validator(cfg, equation_dict, geom_dict): + """Build validator(s). + + Args: + cfg (List[DictConfig]): Validator(s) config list. + geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. + equation_dict (Dct[str, Equation]): Equation(s) in dict. + + Returns: + Dict[str, Validator]: Validator(s) in dict. + """ + if cfg is None: + return None + cfg = copy.deepcopy(cfg) + global_dataloader_cfg = cfg["dataloader"] + validator_cfg = cfg["content"] + + validator_dict = misc.PrettyOrderedDict() + for _item in validator_cfg: + validator_cls = next(iter(_item.keys())) + _validator_cfg = _item[validator_cls] + validator_name = _validator_cfg.get("name", validator_cls) + # select geometry + geom_name = _validator_cfg.pop("geom") + _validator_cfg["geom"] = geom_dict[geom_name] + + # update complete dataloader config + local_dataloader_cfg = _validator_cfg["dataloader"] + local_dataloader_cfg.update(global_dataloader_cfg) + + # select equation + for name, expr in _validator_cfg["output_expr"].items(): + if isinstance(expr, str) and expr in equation_dict: + _validator_cfg["output_expr"][name] = equation_dict[expr].equations[ + name + ] + + # build loss + _validator_cfg["loss"] = build_loss(_validator_cfg["loss"]) + + # build metric + _validator_cfg["metric"] = build_metric(_validator_cfg["metric"]) + + # instantiate validator + _validator_cfg["dataloader_cfg"] = _validator_cfg.pop("dataloader") + validator_dict[validator_name] = eval(validator_cls)(**_validator_cfg) + + logger.debug(str(validator_dict[validator_name])) + + return validator_dict diff --git a/ppsci/validate/base.py b/ppsci/validate/base.py index 84760f25d7..8344ed3f16 100644 --- a/ppsci/validate/base.py +++ b/ppsci/validate/base.py @@ -1,69 +1,69 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import TYPE_CHECKING -from typing import Any -from typing import Dict -from typing import Optional - -from paddle import io - -from ppsci import data - -if TYPE_CHECKING: - from ppsci import loss - from ppsci import metric - - -class Validator: - """Base class for validators. - - Args: - dataset (io.Dataset): Dataset for validator. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. - name (str): Name of validator. - """ - - def __init__( - self, - dataset: io.Dataset, - dataloader_cfg: Dict[str, Any], - loss: "loss.Loss", - metric: Optional[Dict[str, "metric.Metric"]], - name: str, - ): - self.data_loader = data.build_dataloader(dataset, dataloader_cfg) - self.data_iter = iter(self.data_loader) - self.loss = loss - self.metric = metric - self.name = name - - def __str__(self): - return ", ".join( - [ - self.__class__.__name__, - f"name = {self.name}", - f"input_keys = {self.input_keys}", - f"output_keys = {self.output_keys}", - f"output_expr = {self.output_expr}", - f"label_dict = {self.label_dict}", - f"len(dataloader) = {len(self.data_loader)}", - f"loss = {self.loss}", - f"metric = {list(self.metric.keys())}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict +from typing import Optional + +from paddle import io + +from ppsci import data + +if TYPE_CHECKING: + from ppsci import loss + from ppsci import metric + + +class Validator: + """Base class for validators. + + Args: + dataset (io.Dataset): Dataset for validator. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. + name (str): Name of validator. + """ + + def __init__( + self, + dataset: io.Dataset, + dataloader_cfg: Dict[str, Any], + loss: "loss.Loss", + metric: Optional[Dict[str, "metric.Metric"]], + name: str, + ): + self.data_loader = data.build_dataloader(dataset, dataloader_cfg) + self.data_iter = iter(self.data_loader) + self.loss = loss + self.metric = metric + self.name = name + + def __str__(self): + return ", ".join( + [ + self.__class__.__name__, + f"name = {self.name}", + f"input_keys = {self.input_keys}", + f"output_keys = {self.output_keys}", + f"output_expr = {self.output_expr}", + f"label_dict = {self.label_dict}", + f"len(dataloader) = {len(self.data_loader)}", + f"loss = {self.loss}", + f"metric = {list(self.metric.keys())}", + ] + ) diff --git a/ppsci/validate/geo_validator.py b/ppsci/validate/geo_validator.py index 08f2e663a4..8f473f29db 100644 --- a/ppsci/validate/geo_validator.py +++ b/ppsci/validate/geo_validator.py @@ -1,161 +1,161 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Union - -import numpy as np -import paddle -import sympy -from typing_extensions import Literal - -from ppsci import geometry -from ppsci import loss -from ppsci import metric -from ppsci.data import dataset -from ppsci.validate import base - - -class GeometryValidator(base.Validator): - """Validator for geometry. - - Args: - output_expr (Dict[str, Callable]): Function in dict for computing output. - e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u - will be multiplied by model output v and the result will be named "u_mul_v". - label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing - label, which will be a reference value to participate in the loss calculation. - geom (geometry.Geometry): Geometry where data sampled from. - dataloader_cfg (Dict[str, Any]): Dataloader config. - loss (loss.Loss): Loss functor. - random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in - geometry. Defaults to "pseudo". - criteria (Optional[Callable]): Criteria for refining specified domain. Defaults to None. - evenly (bool, optional): Whether to use evenly distribution sampling. Defaults to False. - metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. Defaults to None. - with_initial (bool, optional): Whether the data contains time t0. Defaults to False. - name (Optional[str]): Name of validator. Defaults to None. - - Examples: - >>> import ppsci - >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) - >>> geom_validator = ppsci.validate.GeometryValidator( - ... {"u": lambda out: out["u"]}, - ... {"u": 0}, - ... rect, - ... { - ... "dataset": "IterableNamedArrayDataset", - ... "iters_per_epoch": 1, - ... "total_size": 32, - ... "batch_size": 16, - ... }, - ... ppsci.loss.MSELoss("mean"), - ... ) - """ - - def __init__( - self, - output_expr: Dict[str, Callable], - label_dict: Dict[str, Union[float, Callable]], - geom: geometry.Geometry, - dataloader_cfg: Dict[str, Any], - loss: loss.Loss, - random: Literal["pseudo", "Halton", "LHS"] = "pseudo", - criteria: Optional[Callable] = None, - evenly: bool = False, - metric: Optional[Dict[str, metric.Metric]] = None, - with_initial: bool = False, - name: Optional[str] = None, - ): - self.output_expr = output_expr - self.label_dict = label_dict - self.input_keys = geom.dim_keys - self.output_keys = tuple(label_dict.keys()) - - nx = dataloader_cfg["total_size"] - self.num_timestamps = 1 - # TODO(sensen): Simplify code below - if isinstance(geom, geometry.TimeXGeometry): - if geom.timedomain.num_timestamps is not None: - if with_initial: - # include t0 - self.num_timestamps = geom.timedomain.num_timestamps - assert ( - nx % self.num_timestamps == 0 - ), f"{nx} % {self.num_timestamps} != 0" - nx //= self.num_timestamps - input = geom.sample_interior( - nx * (geom.timedomain.num_timestamps - 1), - random, - criteria, - evenly, - ) - initial = geom.sample_initial_interior(nx, random, criteria, evenly) - input = { - key: np.vstack((initial[key], input[key])) for key in input - } - else: - # exclude t0 - self.num_timestamps = geom.timedomain.num_timestamps - 1 - assert ( - nx % self.num_timestamps == 0 - ), f"{nx} % {self.num_timestamps} != 0" - nx //= self.num_timestamps - input = geom.sample_interior( - nx * (geom.timedomain.num_timestamps - 1), - random, - criteria, - evenly, - ) - else: - raise NotImplementedError( - "TimeXGeometry with random timestamp not implemented yet." - ) - else: - input = geom.sample_interior(nx, random, criteria, evenly) - - label = {} - for key, value in label_dict.items(): - if isinstance(value, (int, float)): - label[key] = np.full_like(next(iter(input.values())), value) - elif isinstance(value, sympy.Basic): - func = sympy.lambdify( - sympy.symbols(geom.dim_keys), - value, - [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], - ) - label[key] = func( - **{k: v for k, v in input.items() if k in geom.dim_keys} - ) - elif callable(value): - func = value - label[key] = func(input) - if isinstance(label[key], (int, float)): - label[key] = np.full( - (next(iter(input.values())).shape[0], 1), - label[key], - paddle.get_default_dtype(), - ) - else: - raise NotImplementedError(f"type of {type(value)} is invalid yet.") - - weight = {key: np.ones_like(next(iter(label.values()))) for key in label} - - _dataset = getattr(dataset, dataloader_cfg["dataset"])(input, label, weight) - super().__init__(_dataset, dataloader_cfg, loss, metric, name) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Union + +import numpy as np +import paddle +import sympy +from typing_extensions import Literal + +from ppsci import geometry +from ppsci import loss +from ppsci import metric +from ppsci.data import dataset +from ppsci.validate import base + + +class GeometryValidator(base.Validator): + """Validator for geometry. + + Args: + output_expr (Dict[str, Callable]): Function in dict for computing output. + e.g. {"u_mul_v": lambda out: out["u"] * out["v"]} means the model output u + will be multiplied by model output v and the result will be named "u_mul_v". + label_dict (Dict[str, Union[float, Callable]]): Function in dict for computing + label, which will be a reference value to participate in the loss calculation. + geom (geometry.Geometry): Geometry where data sampled from. + dataloader_cfg (Dict[str, Any]): Dataloader config. + loss (loss.Loss): Loss functor. + random (Literal["pseudo", "Halton", "LHS"], optional): Random method for sampling data in + geometry. Defaults to "pseudo". + criteria (Optional[Callable]): Criteria for refining specified domain. Defaults to None. + evenly (bool, optional): Whether to use evenly distribution sampling. Defaults to False. + metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. Defaults to None. + with_initial (bool, optional): Whether the data contains time t0. Defaults to False. + name (Optional[str]): Name of validator. Defaults to None. + + Examples: + >>> import ppsci + >>> rect = ppsci.geometry.Rectangle((0, 0), (1, 1)) + >>> geom_validator = ppsci.validate.GeometryValidator( + ... {"u": lambda out: out["u"]}, + ... {"u": 0}, + ... rect, + ... { + ... "dataset": "IterableNamedArrayDataset", + ... "iters_per_epoch": 1, + ... "total_size": 32, + ... "batch_size": 16, + ... }, + ... ppsci.loss.MSELoss("mean"), + ... ) + """ + + def __init__( + self, + output_expr: Dict[str, Callable], + label_dict: Dict[str, Union[float, Callable]], + geom: geometry.Geometry, + dataloader_cfg: Dict[str, Any], + loss: loss.Loss, + random: Literal["pseudo", "Halton", "LHS"] = "pseudo", + criteria: Optional[Callable] = None, + evenly: bool = False, + metric: Optional[Dict[str, metric.Metric]] = None, + with_initial: bool = False, + name: Optional[str] = None, + ): + self.output_expr = output_expr + self.label_dict = label_dict + self.input_keys = geom.dim_keys + self.output_keys = tuple(label_dict.keys()) + + nx = dataloader_cfg["total_size"] + self.num_timestamps = 1 + # TODO(sensen): Simplify code below + if isinstance(geom, geometry.TimeXGeometry): + if geom.timedomain.num_timestamps is not None: + if with_initial: + # include t0 + self.num_timestamps = geom.timedomain.num_timestamps + assert ( + nx % self.num_timestamps == 0 + ), f"{nx} % {self.num_timestamps} != 0" + nx //= self.num_timestamps + input = geom.sample_interior( + nx * (geom.timedomain.num_timestamps - 1), + random, + criteria, + evenly, + ) + initial = geom.sample_initial_interior(nx, random, criteria, evenly) + input = { + key: np.vstack((initial[key], input[key])) for key in input + } + else: + # exclude t0 + self.num_timestamps = geom.timedomain.num_timestamps - 1 + assert ( + nx % self.num_timestamps == 0 + ), f"{nx} % {self.num_timestamps} != 0" + nx //= self.num_timestamps + input = geom.sample_interior( + nx * (geom.timedomain.num_timestamps - 1), + random, + criteria, + evenly, + ) + else: + raise NotImplementedError( + "TimeXGeometry with random timestamp not implemented yet." + ) + else: + input = geom.sample_interior(nx, random, criteria, evenly) + + label = {} + for key, value in label_dict.items(): + if isinstance(value, (int, float)): + label[key] = np.full_like(next(iter(input.values())), value) + elif isinstance(value, sympy.Basic): + func = sympy.lambdify( + sympy.symbols(geom.dim_keys), + value, + [{"amax": lambda xy, _: np.maximum(xy[0], xy[1])}, "numpy"], + ) + label[key] = func( + **{k: v for k, v in input.items() if k in geom.dim_keys} + ) + elif callable(value): + func = value + label[key] = func(input) + if isinstance(label[key], (int, float)): + label[key] = np.full( + (next(iter(input.values())).shape[0], 1), + label[key], + paddle.get_default_dtype(), + ) + else: + raise NotImplementedError(f"type of {type(value)} is invalid yet.") + + weight = {key: np.ones_like(next(iter(label.values()))) for key in label} + + _dataset = getattr(dataset, dataloader_cfg["dataset"])(input, label, weight) + super().__init__(_dataset, dataloader_cfg, loss, metric, name) diff --git a/ppsci/validate/sup_validator.py b/ppsci/validate/sup_validator.py index a88a02af5e..cd7f6dc0a4 100644 --- a/ppsci/validate/sup_validator.py +++ b/ppsci/validate/sup_validator.py @@ -1,103 +1,103 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -from typing import Any -from typing import Callable -from typing import Dict -from typing import Optional - -from ppsci import loss -from ppsci import metric -from ppsci.data import dataset -from ppsci.validate import base - - -class SupervisedValidator(base.Validator): - """Validator for supervised models. - - Args: - dataloader_cfg (Dict[str, Any]): Config of building a dataloader. - loss (loss.Loss): Loss functor. - output_expr (Optional[Dict[str, Callable]]): List of label expression. - metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. Defaults to None. - name (Optional[str]): Name of validator. Defaults to None. - - Examples: - >>> import ppsci - >>> valid_dataloader_cfg = { - ... "dataset": { - ... "name": "MatDataset", - ... "file_path": "/path/to/file.mat", - ... "input_keys": ("t_f",), - ... "label_keys": ("eta", "f"), - ... }, - ... "batch_size": 32, - ... "sampler": { - ... "name": "BatchSampler", - ... "drop_last": False, - ... "shuffle": False, - ... }, - ... } # doctest: +SKIP - >>> eta_mse_validator = ppsci.validate.SupervisedValidator( - ... valid_dataloader_cfg, - ... ppsci.loss.MSELoss("mean"), - ... {"eta": lambda out: out["eta"]}, - ... metric={"MSE": ppsci.metric.MSE()}, - ... name="eta_mse", - ... ) # doctest: +SKIP - """ - - def __init__( - self, - dataloader_cfg: Dict[str, Any], - loss: loss.Loss, - output_expr: Optional[Dict[str, Callable]] = None, - metric: Optional[Dict[str, metric.Metric]] = None, - name: Optional[str] = None, - ): - self.output_expr = output_expr - - # build dataset - _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) - - self.input_keys = _dataset.input_keys - self.output_keys = ( - tuple(output_expr.keys()) - if output_expr is not None - else _dataset.label_keys - ) - - if self.output_expr is None: - self.output_expr = { - key: lambda out, k=key: out[k] for key in self.output_keys - } - - # construct dataloader with dataset and dataloader_cfg - super().__init__(_dataset, dataloader_cfg, loss, metric, name) - - def __str__(self): - return ", ".join( - [ - self.__class__.__name__, - f"name = {self.name}", - f"input_keys = {self.input_keys}", - f"output_keys = {self.output_keys}", - f"output_expr = {self.output_expr}", - f"len(dataloader) = {len(self.data_loader)}", - f"loss = {self.loss}", - f"metric = {list(self.metric.keys())}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any +from typing import Callable +from typing import Dict +from typing import Optional + +from ppsci import loss +from ppsci import metric +from ppsci.data import dataset +from ppsci.validate import base + + +class SupervisedValidator(base.Validator): + """Validator for supervised models. + + Args: + dataloader_cfg (Dict[str, Any]): Config of building a dataloader. + loss (loss.Loss): Loss functor. + output_expr (Optional[Dict[str, Callable]]): List of label expression. + metric (Optional[Dict[str, metric.Metric]]): Named metric functors in dict. Defaults to None. + name (Optional[str]): Name of validator. Defaults to None. + + Examples: + >>> import ppsci + >>> valid_dataloader_cfg = { + ... "dataset": { + ... "name": "MatDataset", + ... "file_path": "/path/to/file.mat", + ... "input_keys": ("t_f",), + ... "label_keys": ("eta", "f"), + ... }, + ... "batch_size": 32, + ... "sampler": { + ... "name": "BatchSampler", + ... "drop_last": False, + ... "shuffle": False, + ... }, + ... } # doctest: +SKIP + >>> eta_mse_validator = ppsci.validate.SupervisedValidator( + ... valid_dataloader_cfg, + ... ppsci.loss.MSELoss("mean"), + ... {"eta": lambda out: out["eta"]}, + ... metric={"MSE": ppsci.metric.MSE()}, + ... name="eta_mse", + ... ) # doctest: +SKIP + """ + + def __init__( + self, + dataloader_cfg: Dict[str, Any], + loss: loss.Loss, + output_expr: Optional[Dict[str, Callable]] = None, + metric: Optional[Dict[str, metric.Metric]] = None, + name: Optional[str] = None, + ): + self.output_expr = output_expr + + # build dataset + _dataset = dataset.build_dataset(dataloader_cfg["dataset"]) + + self.input_keys = _dataset.input_keys + self.output_keys = ( + tuple(output_expr.keys()) + if output_expr is not None + else _dataset.label_keys + ) + + if self.output_expr is None: + self.output_expr = { + key: lambda out, k=key: out[k] for key in self.output_keys + } + + # construct dataloader with dataset and dataloader_cfg + super().__init__(_dataset, dataloader_cfg, loss, metric, name) + + def __str__(self): + return ", ".join( + [ + self.__class__.__name__, + f"name = {self.name}", + f"input_keys = {self.input_keys}", + f"output_keys = {self.output_keys}", + f"output_expr = {self.output_expr}", + f"len(dataloader) = {len(self.data_loader)}", + f"loss = {self.loss}", + f"metric = {list(self.metric.keys())}", + ] + ) diff --git a/ppsci/visualize/__init__.py b/ppsci/visualize/__init__.py index e6a5d49186..3f0d12845a 100644 --- a/ppsci/visualize/__init__.py +++ b/ppsci/visualize/__init__.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -80,3 +81,85 @@ def build_visualizer(cfg): visualizer_dict[visualizer_name] = visualizer return visualizer_dict +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import copy + +from ppsci.visualize.vtu import save_vtu_to_mesh + +from ppsci.visualize.base import Visualizer # isort:skip +from ppsci.visualize.visualizer import VisualizerScatter1D # isort:skip +from ppsci.visualize.visualizer import VisualizerScatter3D # isort:skip +from ppsci.visualize.visualizer import VisualizerVtu # isort:skip +from ppsci.visualize.visualizer import Visualizer2D # isort:skip +from ppsci.visualize.visualizer import Visualizer2DPlot # isort:skip +from ppsci.visualize.visualizer import Visualizer3D # isort:skip +from ppsci.visualize.visualizer import VisualizerWeather # isort:skip +from ppsci.visualize.radar import VisualizerRadar # isort:skip +from ppsci.visualize.vtu import save_vtu_from_dict # isort:skip +from ppsci.visualize.plot import save_plot_from_1d_dict # isort:skip +from ppsci.visualize.plot import save_plot_from_3d_dict # isort:skip +from ppsci.visualize.plot import save_plot_weather_from_dict # isort:skip + + +__all__ = [ + "Visualizer", + "VisualizerScatter1D", + "VisualizerScatter3D", + "VisualizerVtu", + "Visualizer2D", + "Visualizer2DPlot", + "Visualizer3D", + "VisualizerWeather", + "VisualizerRadar", + "save_vtu_from_dict", + "save_vtu_to_mesh", + "save_plot_from_1d_dict", + "save_plot_from_3d_dict", + "save_plot_weather_from_dict", +] + + +def build_visualizer(cfg): + """Build visualizer(s). + + Args: + cfg (List[DictConfig]): Visualizer(s) config list. + geom_dict (Dct[str, Geometry]): Geometry(ies) in dict. + equation_dict (Dct[str, Equation]): Equation(s) in dict. + + Returns: + Dict[str, Visualizer]: Visualizer(s) in dict. + """ + if cfg is None: + return None + cfg = copy.deepcopy(cfg) + + visualizer_dict = {} + for _item in cfg: + visualizer_cls = next(iter(_item.keys())) + visualizer_cfg = _item[visualizer_cls] + visualizer = eval(visualizer_cls)(**visualizer_cfg) + + visualizer_name = visualizer_cfg.get("name", visualizer_cls) + if visualizer_name in visualizer_dict: + raise ValueError(f"Name of visualizer({visualizer_name}) should be unique") + visualizer_dict[visualizer_name] = visualizer + + return visualizer_dict +>>>>>>> Stashed changes diff --git a/ppsci/visualize/base.py b/ppsci/visualize/base.py index b249efcabc..812c5f904c 100644 --- a/ppsci/visualize/base.py +++ b/ppsci/visualize/base.py @@ -1,65 +1,65 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import abc -from typing import Callable -from typing import Dict - -import numpy as np - - -class Visualizer: - """Base class for visualizer. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int): Batch size of data when computing result in visu.py. - num_timestamps (int): Number of timestamps. - prefix (str): Prefix for output file. - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int, - num_timestamps: int, - prefix: str, - ): - self.input_dict = input_dict - self.input_keys = tuple(input_dict.keys()) - self.output_expr = output_expr - self.output_keys = tuple(output_expr.keys()) - self.batch_size = batch_size - self.num_timestamps = num_timestamps - self.prefix = prefix - - @abc.abstractmethod - def save(self, data_dict): - """Visualize result from data_dict and save as files""" - - def __str__(self): - return ", ".join( - [ - f"input_keys: {self.input_keys}", - f"output_keys: {self.output_keys}", - f"output_expr: {self.output_expr}", - f"batch_size: {self.batch_size}", - f"num_timestamps: {self.num_timestamps}", - f"output file prefix: {self.prefix}", - ] - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import abc +from typing import Callable +from typing import Dict + +import numpy as np + + +class Visualizer: + """Base class for visualizer. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int): Batch size of data when computing result in visu.py. + num_timestamps (int): Number of timestamps. + prefix (str): Prefix for output file. + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int, + num_timestamps: int, + prefix: str, + ): + self.input_dict = input_dict + self.input_keys = tuple(input_dict.keys()) + self.output_expr = output_expr + self.output_keys = tuple(output_expr.keys()) + self.batch_size = batch_size + self.num_timestamps = num_timestamps + self.prefix = prefix + + @abc.abstractmethod + def save(self, data_dict): + """Visualize result from data_dict and save as files""" + + def __str__(self): + return ", ".join( + [ + f"input_keys: {self.input_keys}", + f"output_keys: {self.output_keys}", + f"output_expr: {self.output_expr}", + f"batch_size: {self.batch_size}", + f"num_timestamps: {self.num_timestamps}", + f"output file prefix: {self.prefix}", + ] + ) diff --git a/ppsci/visualize/plot.py b/ppsci/visualize/plot.py index 8d41ac17c6..75eb58786a 100644 --- a/ppsci/visualize/plot.py +++ b/ppsci/visualize/plot.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -578,3 +579,586 @@ def save_plot_weather_from_dict( "GIF", duration=1, ) +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import Dict +from typing import Optional +from typing import Tuple +from typing import Union + +import imageio +import matplotlib +import numpy as np +import paddle +from matplotlib import cm +from matplotlib import pyplot as plt +from matplotlib.legend_handler import HandlerBase +from matplotlib.patches import Rectangle +from mpl_toolkits.mplot3d.art3d import Line3DCollection + +from ppsci.utils import logger + +cnames = [ + "bisque", + "black", + "blanchedalmond", + "blue", + "blueviolet", + "brown", + "burlywood", + "cadetblue", + "chartreuse", + "orangered", + "orchid", + "palegoldenrod", + "palegreen", +] + +CMAPS = [ + "Reds", + "Blues", + "Greys", + "Purples", + "Greens", + "Oranges", + "YlOrBr", + "YlOrRd", + "OrRd", + "PuRd", + "RdPu", + "BuPu", + "GnBu", + "PuBu", + "YlGnBu", + "PuBuGn", + "BuGn", + "YlGn", +] + + +def _save_plot_from_1d_array(filename, coord, value, value_keys, num_timestamps=1): + """Save plot from given 1D data. + + Args: + filename (str): Filename. + coord (np.ndarray): Coordinate array. + value (Dict[str, np.ndarray]): Dict of value array. + value_keys (Tuple[str, ...]): Value keys. + num_timestamps (int, optional): Number of timestamps coord/value contains. Defaults to 1. + """ + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + fig, a = plt.subplots(len(value_keys), num_timestamps, squeeze=False) + fig.subplots_adjust(hspace=0.8) + + len_ts = len(coord) // num_timestamps + for t in range(num_timestamps): + st = t * len_ts + ed = (t + 1) * len_ts + coord_t = coord[st:ed] + + for i, key in enumerate(value_keys): + _value_t: np.ndarray = value[st:ed, i] + a[i][t].scatter( + coord_t, + _value_t, + color=cnames[i], + label=key, + s=2, + ) + if num_timestamps > 1: + a[i][t].set_title(f"{key}(t={t})") + else: + a[i][t].set_title(f"{key}") + a[i][t].grid(color="#c2ccd0", linestyle="--", linewidth=0.5) + a[i][t].legend() + + if num_timestamps == 1: + fig.savefig(filename, dpi=300) + else: + fig.savefig(f"{filename}_{t}", dpi=300) + + if num_timestamps == 1: + logger.message(f"1D result is saved to: {filename}.png") + else: + logger.message( + f"1D result is saved to: {filename}_0.png" + f" ~ {filename}_{num_timestamps - 1}.png" + ) + + +def save_plot_from_1d_dict( + filename, data_dict, coord_keys, value_keys, num_timestamps=1 +): + """Plot dict data as file. + + Args: + filename (str): Output filename. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. + coord_keys (Tuple[str, ...]): Tuple of coord key. such as ("x", "y"). + value_keys (Tuple[str, ...]): Tuple of value key. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamp in data_dict. Defaults to 1. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> filename = "path/to/file" + >>> data_dict = { + ... "x": np.array([[1], [2], [3],[4]]), + ... "u": np.array([[4], [5], [6],[4]]), + ... } + >>> coord_keys = ("x",) + >>> value_keys = ("u",) + >>> ppsci.visualize.save_plot_from_1d_dict(filename, data_dict, coord_keys, value_keys) # doctest: +SKIP + """ + space_ndim = len(coord_keys) - int("t" in coord_keys) + if space_ndim not in [1, 2, 3]: + raise ValueError(f"ndim of space coord ({space_ndim}) should be 1, 2 or 3") + + coord = [data_dict[k] for k in coord_keys if k != "t"] + value = [data_dict[k] for k in value_keys] if value_keys else None + + if isinstance(coord[0], paddle.Tensor): + coord = [x.numpy() for x in coord] + else: + coord = [x for x in coord] + coord = np.concatenate(coord, axis=1) + + if value is not None: + if isinstance(value[0], paddle.Tensor): + value = [x.numpy() for x in value] + else: + value = [x for x in value] + value = np.concatenate(value, axis=1) + + _save_plot_from_1d_array(filename, coord, value, value_keys, num_timestamps) + + +def _save_plot_from_2d_array( + filename: str, + visu_data: Tuple[np.ndarray, ...], + visu_keys: Tuple[str, ...], + num_timestamps: int = 1, + stride: int = 1, + xticks: Optional[Tuple[float, ...]] = None, + yticks: Optional[Tuple[float, ...]] = None, +): + """Save plot from given 2D data. + + Args: + filename (str): Filename. + visu_data (Tuple[np.ndarray, ...]): Data that requires visualization. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamps coord/value contains. Defaults to 1. + stride (int, optional): The time stride of visualization. Defaults to 1. + xticks (Optional[Tuple[float, ...]]): Tuple of xtick locations. Defaults to None. + yticks (Optional[Tuple[float, ...]]): Tuple of ytick locations. Defaults to None. + """ + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + + plt.close("all") + matplotlib.rcParams["xtick.labelsize"] = 5 + matplotlib.rcParams["ytick.labelsize"] = 5 + + fig, ax = plt.subplots( + len(visu_keys), + num_timestamps, + squeeze=False, + sharey=True, + figsize=(num_timestamps, len(visu_keys)), + ) + fig.subplots_adjust(hspace=0.3) + target_flag = any("target" in key for key in visu_keys) + for i, data in enumerate(visu_data): + if target_flag is False or "target" in visu_keys[i]: + c_max = np.amax(data) + c_min = np.amin(data) + + for t_idx in range(num_timestamps): + t = t_idx * stride + ax[i, t_idx].imshow( + data[t, :, :], + extent=[xticks.min(), xticks.max(), yticks.min(), yticks.max()], + cmap="inferno", + origin="lower", + vmax=c_max, + vmin=c_min, + ) + if xticks is not None: + ax[i, t_idx].set_xticks(xticks) + if yticks is not None: + ax[i, t_idx].set_yticks(yticks) + + ax[i, t_idx].set_title(f"t={t}", fontsize=8) + if t_idx == 0: + ax[i, 0].set_ylabel(visu_keys[i], fontsize=8) + + p0 = ax[i, -1].get_position().get_points().flatten() + ax_cbar = fig.add_axes([p0[2] + 0.005, p0[1], 0.0075, p0[3] - p0[1]]) + ticks = np.linspace(0, 1, 5) + tickLabels = np.linspace(c_min, c_max, 5) + tickLabels = [f"{t0:02.2f}" for t0 in tickLabels] + cbar = matplotlib.colorbar.ColorbarBase( + ax_cbar, cmap=plt.get_cmap("inferno"), orientation="vertical", ticks=ticks + ) + cbar.set_ticklabels(tickLabels, fontsize=5) + plt.savefig(f"{filename}", dpi=300) + + +def save_plot_from_2d_dict( + filename: str, + data_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], + visu_keys: Tuple[str, ...], + num_timestamps: int = 1, + stride: int = 1, + xticks: Optional[Tuple[float, ...]] = None, + yticks: Optional[Tuple[float, ...]] = None, +): + """Plot 2d dict data as file. + + Args: + filename (str): Output filename. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamp in data_dict. Defaults to 1. + stride (int, optional): The time stride of visualization. Defaults to 1. + xticks (Optional[Tuple[float,...]]): The list of xtick locations. Defaults to None. + yticks (Optional[Tuple[float,...]]): The list of ytick locations. Defaults to None. + """ + visu_data = [data_dict[k] for k in visu_keys] + if isinstance(visu_data[0], paddle.Tensor): + visu_data = [x.numpy() for x in visu_data] + _save_plot_from_2d_array( + filename, visu_data, visu_keys, num_timestamps, stride, xticks, yticks + ) + + +# Interface to LineCollection: +def _colorline3d( + x, y, z, t=None, cmap=plt.get_cmap("viridis"), linewidth=1, alpha=1.0, ax=None +): + """ + Plot a colored line with coordinates x and y + Optionally specify colors in the array z + Optionally specify a colormap, a norm function and a line width + https://stackoverflow.com/questions/52884221/how-to-plot-a-matplotlib-line-plot-using-colormap + """ + # Default colors equally spaced on [0, 1]: + if t is None: + t = np.linspace(0.25, 1.0, len(x)) + if ax is None: + ax = plt.gca() + + points = np.array([x, y, z]).T.reshape(-1, 1, 3) + segments = np.concatenate([points[:-1], points[1:]], axis=1) + + colors = np.array([cmap(i) for i in t]) + lc = Line3DCollection(segments, colors=colors, linewidth=linewidth, alpha=alpha) + ax.add_collection(lc) + ax.scatter(x, y, z, c=colors, marker="*", alpha=alpha) # Adding line markers + + +class HandlerColormap(HandlerBase): + """Class for creating colormap legend rectangles. + + Args: + cmap (matplotlib.cm): Matplotlib colormap. + num_stripes (int, optional): Number of contour levels (strips) in rectangle. Defaults to 8. + """ + + def __init__(self, cmap: matplotlib.cm, num_stripes: int = 8, **kw): + HandlerBase.__init__(self, **kw) + self.cmap = cmap + self.num_stripes = num_stripes + + def create_artists( + self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans + ): + stripes = [] + for i in range(self.num_stripes): + s = Rectangle( + [xdescent + i * width / self.num_stripes, ydescent], + width / self.num_stripes, + height, + fc=self.cmap((2 * i + 1) / (2 * self.num_stripes)), + transform=trans, + ) + stripes.append(s) + return stripes + + +def _save_plot_from_3d_array( + filename: str, + visu_data: Tuple[np.ndarray, ...], + visu_keys: Tuple[str, ...], + num_timestamps: int = 1, +): + """Save plot from given 3D data. + + Args: + filename (str): Filename. + visu_data (Tuple[np.ndarray, ...]): Data that requires visualization. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamps coord/value contains. Defaults to 1. + """ + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + + fig = plt.figure(figsize=(10, 10)) + len_ts = len(visu_data[0]) // num_timestamps + for t in range(num_timestamps): + ax = fig.add_subplot(1, num_timestamps, t + 1, projection="3d") + st = t * len_ts + ed = (t + 1) * len_ts + visu_data_t = [data[st:ed] for data in visu_data] + cmaps = [] + for i, data in enumerate(visu_data_t): + cmap = plt.get_cmap(CMAPS[i % len(CMAPS)]) + _colorline3d(data[:, 0], data[:, 1], data[:, 2], cmap=cmap, ax=ax) + cmaps.append(cmap) + cmap_handles = [Rectangle((0, 0), 1, 1) for _ in visu_keys] + handler_map = dict( + zip(cmap_handles, [HandlerColormap(cm, num_stripes=8) for cm in cmaps]) + ) + # Create custom legend with color map rectangles + ax.legend( + handles=cmap_handles, + labels=visu_keys, + handler_map=handler_map, + loc="upper right", + framealpha=0.95, + ) + if num_timestamps == 1: + fig.savefig(filename, dpi=300) + else: + fig.savefig(f"{filename}_{t}", dpi=300) + + if num_timestamps == 1: + logger.message(f"3D result is saved to: {filename}.png") + else: + logger.message( + f"3D result is saved to: {filename}_0.png" + f" ~ {filename}_{num_timestamps - 1}.png" + ) + + +def save_plot_from_3d_dict( + filename: str, + data_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], + visu_keys: Tuple[str, ...], + num_timestamps: int = 1, +): + """Plot dict data as file. + + Args: + filename (str): Output filename. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamp in data_dict. Defaults to 1. + + Examples: + >>> import numpy as np + >>> import ppsci + + >>> data_dict = { + ... "u": np.array([[[10], [20], [30], [40], [50]]]), + ... "v": np.array([[[5], [15], [25], [35], [45]]]), + ... } + + >>> ppsci.visualize.save_plot_from_3d_dict( + ... "path/to/file", + ... data_dict, + ... ("u", "v"), + ... 1, + ... ) # doctest: +SKIP + """ + visu_data = [data_dict[k] for k in visu_keys] + if isinstance(visu_data[0], paddle.Tensor): + visu_data = [x.numpy() for x in visu_data] + + _save_plot_from_3d_array(filename, visu_data, visu_keys, num_timestamps) + + +def _save_plot_weather_from_array( + filename: str, + pred: np.ndarray, + target: np.ndarray, + pred_key: str, + target_key: str, + xticks: Tuple[float, ...], + xticklabels: Tuple[str, ...], + yticks: Tuple[float, ...], + yticklabels: Tuple[str, ...], + vmin: float, + vmax: float, + colorbar_label: str = "", + log_norm: bool = False, +): + """Plot weather result as file from array data. + + Args: + filename (str): Output file name. + pred (np.ndarray): The predict data. + target (np.ndarray): The target data. + pred_key (str): The key of predict data. + target_key (str): The key of target data. + xticks (Tuple[float, ...]): The list of xtick locations. + xticklabels (Tuple[str, ...]): The x-axis' tick labels. + yticks (Tuple[float, ...]): The list of ytick locations. + yticklabels (Tuple[str, ...]): The y-axis' tick labels. + vmin (float): Minimum value that the colormap covers. + vmax (float): Maximal value that the colormap covers. + colorbar_label (str, optional): The color-bar label. Defaults to "". + log_norm (bool, optional): Whether use log norm. Defaults to False. + """ + + def plot_weather( + ax, + data, + title_text, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + cmap=cm.get_cmap("turbo", 1000), + ): + ax.title.set_text(title_text) + ax.set_yticks(yticks) + ax.set_yticklabels(yticklabels) + ax.set_xticks(xticks) + ax.set_xticklabels(xticklabels) + if not log_norm: + map_ = ax.imshow( + data, + interpolation="nearest", + cmap=cmap, + aspect="auto", + vmin=vmin, + vmax=vmax, + ) + else: + norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax, clip=True) + map_ = ax.imshow( + data, interpolation="nearest", cmap=cmap, aspect="auto", norm=norm + ) + plt.colorbar(mappable=map_, cax=None, ax=None, shrink=0.5, label=colorbar_label) + + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + fig = plt.figure(facecolor="w", figsize=(7, 7)) + ax = fig.add_subplot(2, 1, 1) + plot_weather( + ax, + pred, + pred_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + ) + bx = fig.add_subplot(2, 1, 2) + plot_weather( + bx, + target, + target_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin, + vmax, + log_norm, + ) + fig.savefig(filename, dpi=300) + plt.close() + + +def save_plot_weather_from_dict( + foldername: str, + data_dict: Dict[str, Union[np.ndarray, paddle.Tensor]], + visu_keys: Tuple[str, ...], + xticks: Tuple[float, ...], + xticklabels: Tuple[str, ...], + yticks: Tuple[float, ...], + yticklabels: Tuple[str, ...], + vmin: float, + vmax: float, + colorbar_label: str = "", + log_norm: bool = False, + num_timestamps: int = 1, +): + """Plot weather result as file from dict data. + + Args: + foldername (str): Output folder name. + data_dict (Dict[str, Union[np.ndarray, paddle.Tensor]]): Data in dict. + visu_keys (Tuple[str, ...]): Keys for visualizing data. such as ("output_6h", "target_6h"). + xticks (Tuple[float, ...]): The list of xtick locations. + xticklabels (Tuple[str, ...]): The x-axis' tick labels. + yticks (Tuple[float, ...]): The list of ytick locations, + yticklabels (Tuple[str, ...]): The y-axis' tick labels. + vmin (float): Minimum value that the colormap covers. + vmax (float): Maximal value that the colormap covers. + colorbar_label (str, optional): The colorbar label. Defaults to "". + log_norm (bool, optional): Whether use log norm. Defaults to False. + num_timestamps (int): Number of timestamp in data_dict. Defaults to 1. + """ + os.makedirs(foldername, exist_ok=True) + + visu_data = [data_dict[k] for k in visu_keys] + if isinstance(visu_data[0], paddle.Tensor): + visu_data = [x.numpy() for x in visu_data] + + frames = [] + for t in range(num_timestamps): + pred_key, target_key = visu_keys[2 * t], visu_keys[2 * t + 1] + pred_data = visu_data[2 * t] + target_data = visu_data[2 * t + 1] + filename_t = os.path.join(foldername, f"{t}.png") + _save_plot_weather_from_array( + filename_t, + pred_data, + target_data, + pred_key, + target_key, + xticks, + xticklabels, + yticks, + yticklabels, + vmin=vmin, + vmax=vmax, + colorbar_label=colorbar_label, + log_norm=log_norm, + ) + frames.append(imageio.imread(filename_t)) + filename = os.path.join(foldername, "result.gif") + imageio.mimsave( + filename, + frames, + "GIF", + duration=1, + ) +>>>>>>> Stashed changes diff --git a/ppsci/visualize/radar.py b/ppsci/visualize/radar.py index abde75b775..2ff0a973b6 100644 --- a/ppsci/visualize/radar.py +++ b/ppsci/visualize/radar.py @@ -1,124 +1,124 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os -from typing import Callable -from typing import Dict - -import matplotlib.pyplot as plt -import numpy as np - -from ppsci.visualize import base - - -class VisualizerRadar(base.Visualizer): - """Visualizer for NowcastNet Radar Dataset. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps - prefix (str, optional): Prefix for output file. - case_type (str, optional): Case type. - total_length (str, optional): Total length. - - Examples: - >>> import ppsci - >>> import paddle - >>> frames_tensor = paddle.randn([1, 29, 512, 512, 2]) - >>> visualizer = ppsci.visualize.VisualizerRadar( - ... {"input": frames_tensor}, - ... {"output": lambda out: out["output"]}, - ... num_timestamps=1, - ... prefix="v_nowcastnet", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - prefix: str = "vtu", - case_type: str = "normal", - total_length: int = 29, - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - self.case_type = case_type - self.total_length = total_length - self.input_dict = input_dict - - def save(self, path, data_dict): - if not os.path.exists(path): - os.makedirs(path) - test_ims = self.input_dict[list(self.input_dict.keys())[0]] - # keys: {"input", "output"} - img_gen = data_dict[list(data_dict.keys())[1]] - vis_info = {"vmin": 1, "vmax": 40} - if self.case_type == "normal": - test_ims_plot = test_ims[0][ - :-2, 256 - 192 : 256 + 192, 256 - 192 : 256 + 192 - ] - img_gen_plot = img_gen[0][:-2, 256 - 192 : 256 + 192, 256 - 192 : 256 + 192] - else: - test_ims_plot = test_ims[0][:-2] - img_gen_plot = img_gen[0][:-2] - save_plots( - test_ims_plot, - labels=[f"gt{i + 1}" for i in range(self.total_length)], - res_path=path, - vmin=vis_info["vmin"], - vmax=vis_info["vmax"], - ) - save_plots( - img_gen_plot, - labels=[f"pd{i + 1}" for i in range(9, self.total_length)], - res_path=path, - vmin=vis_info["vmin"], - vmax=vis_info["vmax"], - ) - - -def save_plots( - field, - labels, - res_path, - figsize=None, - vmin=0, - vmax=10, - cmap="viridis", - npy=False, - **imshow_args, -): - for i, data in enumerate(field): - if i >= len(labels): - break - plt.figure(figsize=figsize) - ax = plt.axes() - ax.set_axis_off() - alpha = data[..., 0] / 1 - alpha[alpha < 1] = 0 - alpha[alpha > 1] = 1 - ax.imshow( - data[..., 0], alpha=alpha, vmin=vmin, vmax=vmax, cmap=cmap, **imshow_args - ) - plt.savefig(os.path.join(res_path, labels[i] + ".png")) - plt.close() - if npy: - with open(os.path.join(res_path, labels[i] + ".npy"), "wb") as f: - np.save(f, data[..., 0]) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import Callable +from typing import Dict + +import matplotlib.pyplot as plt +import numpy as np + +from ppsci.visualize import base + + +class VisualizerRadar(base.Visualizer): + """Visualizer for NowcastNet Radar Dataset. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps + prefix (str, optional): Prefix for output file. + case_type (str, optional): Case type. + total_length (str, optional): Total length. + + Examples: + >>> import ppsci + >>> import paddle + >>> frames_tensor = paddle.randn([1, 29, 512, 512, 2]) + >>> visualizer = ppsci.visualize.VisualizerRadar( + ... {"input": frames_tensor}, + ... {"output": lambda out: out["output"]}, + ... num_timestamps=1, + ... prefix="v_nowcastnet", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + prefix: str = "vtu", + case_type: str = "normal", + total_length: int = 29, + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + self.case_type = case_type + self.total_length = total_length + self.input_dict = input_dict + + def save(self, path, data_dict): + if not os.path.exists(path): + os.makedirs(path) + test_ims = self.input_dict[list(self.input_dict.keys())[0]] + # keys: {"input", "output"} + img_gen = data_dict[list(data_dict.keys())[1]] + vis_info = {"vmin": 1, "vmax": 40} + if self.case_type == "normal": + test_ims_plot = test_ims[0][ + :-2, 256 - 192 : 256 + 192, 256 - 192 : 256 + 192 + ] + img_gen_plot = img_gen[0][:-2, 256 - 192 : 256 + 192, 256 - 192 : 256 + 192] + else: + test_ims_plot = test_ims[0][:-2] + img_gen_plot = img_gen[0][:-2] + save_plots( + test_ims_plot, + labels=[f"gt{i + 1}" for i in range(self.total_length)], + res_path=path, + vmin=vis_info["vmin"], + vmax=vis_info["vmax"], + ) + save_plots( + img_gen_plot, + labels=[f"pd{i + 1}" for i in range(9, self.total_length)], + res_path=path, + vmin=vis_info["vmin"], + vmax=vis_info["vmax"], + ) + + +def save_plots( + field, + labels, + res_path, + figsize=None, + vmin=0, + vmax=10, + cmap="viridis", + npy=False, + **imshow_args, +): + for i, data in enumerate(field): + if i >= len(labels): + break + plt.figure(figsize=figsize) + ax = plt.axes() + ax.set_axis_off() + alpha = data[..., 0] / 1 + alpha[alpha < 1] = 0 + alpha[alpha > 1] = 1 + ax.imshow( + data[..., 0], alpha=alpha, vmin=vmin, vmax=vmax, cmap=cmap, **imshow_args + ) + plt.savefig(os.path.join(res_path, labels[i] + ".png")) + plt.close() + if npy: + with open(os.path.join(res_path, labels[i] + ".npy"), "wb") as f: + np.save(f, data[..., 0]) diff --git a/ppsci/visualize/visualizer.py b/ppsci/visualize/visualizer.py index e3e602daa5..f1f0803f7f 100644 --- a/ppsci/visualize/visualizer.py +++ b/ppsci/visualize/visualizer.py @@ -1,409 +1,409 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import annotations - -import os.path as osp -from typing import Callable -from typing import Dict -from typing import Optional -from typing import Tuple - -import numpy as np - -from ppsci.visualize import base -from ppsci.visualize import plot -from ppsci.visualize import vtu - - -class VisualizerScatter1D(base.Visualizer): - """Visualizer for 1d scatter data. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - coord_keys (Tuple[str, ...]): Coordinate keys, such as ("x", "y"). - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps. Defaults to 1. - prefix (str, optional): Prefix for output file. Defaults to "plot". - - Examples: - >>> import ppsci - >>> visu_mat = {"t_f": np.random.randn(16, 1), "eta": np.random.randn(16, 1)} - >>> visualizer_eta = ppsci.visualize.VisualizerScatter1D( - ... visu_mat, - ... ("t_f",), - ... {"eta": lambda d: d["eta"]}, - ... num_timestamps=1, - ... prefix="viv_pred", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - coord_keys: Tuple[str, ...], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - prefix: str = "plot", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - self.coord_keys = coord_keys - - def save(self, filename, data_dict): - plot.save_plot_from_1d_dict( - filename, data_dict, self.coord_keys, self.output_keys, self.num_timestamps - ) - - -class VisualizerScatter3D(base.Visualizer): - """Visualizer for 3d scatter data. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps. Defaults to 1. - prefix (str, optional): Prefix for output file. Defaults to "plot3d_scatter". - - Examples: - >>> import ppsci - >>> vis_data = {"states": np.random.randn(16, 1)} - >>> visualizer = ppsci.visualize.VisualizerScatter3D( - ... vis_data, - ... {"states": lambda d: d["states"]}, - ... num_timestamps=1, - ... prefix="result_states", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - prefix: str = "plot3d_scatter", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - - def save(self, filename, data_dict): - data_dict = { - key: value for key, value in data_dict.items() if key in self.output_keys - } - value = data_dict[self.output_keys[0]] - dim = len(value.shape) - if dim == 3: - # value.shape=(B, T, 3) - for i in range(value.shape[0]): - cur_data_dict = {key: value[i] for key, value in data_dict.items()} - plot.save_plot_from_3d_dict( - filename + str(i), - cur_data_dict, - self.output_keys, - self.num_timestamps, - ) - else: - # value.shape=(T, 3) - plot.save_plot_from_3d_dict( - filename, data_dict, self.output_keys, self.num_timestamps - ) - - -class VisualizerVtu(base.Visualizer): - """Visualizer for 2D points data. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps - prefix (str, optional): Prefix for output file. - - Examples: - >>> import ppsci - >>> vis_points = { - ... "x": np.random.randn(128, 1), - ... "y": np.random.randn(128, 1), - ... "u": np.random.randn(128, 1), - ... "v": np.random.randn(128, 1), - ... } - >>> visualizer_u_v = ppsci.visualize.VisualizerVtu( - ... vis_points, - ... {"u": lambda d: d["u"], "v": lambda d: d["v"]}, - ... num_timestamps=1, - ... prefix="result_u_v", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - prefix: str = "vtu", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - - def save(self, filename, data_dict): - vtu.save_vtu_from_dict( - filename, data_dict, self.input_keys, self.output_keys, self.num_timestamps - ) - - -class Visualizer2D(base.Visualizer): - """Visualizer for 2D data. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps. Defaults to 1. - prefix (str, optional): Prefix for output file. Defaults to "plot2d". - - Examples: - >>> import ppsci - >>> vis_points = { - ... "x": np.random.randn(128, 1), - ... "y": np.random.randn(128, 1), - ... "u": np.random.randn(128, 1), - ... "v": np.random.randn(128, 1), - ... } - >>> visualizer_u_v = ppsci.visualize.Visualizer2D( - ... vis_points, - ... {"u": lambda d: d["u"], "v": lambda d: d["v"]}, - ... num_timestamps=1, - ... prefix="result_u_v", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - prefix: str = "plot2d", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - - -class Visualizer2DPlot(Visualizer2D): - """Visualizer for 2D data use matplotlib. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - num_timestamps (int, optional): Number of timestamps. - stride (int, optional): The time stride of visualization. Defaults to 1. - xticks (Optional[Tuple[float,...]]): The list of xtick locations. Defaults to None. - yticks (Optional[Tuple[float,...]]): The list of ytick locations. Defaults to None. - prefix (str, optional): Prefix for output file. Defaults to "plot2d". - - Examples: - >>> import ppsci - >>> vis_data = { - ... "target_ux": np.random.randn(128, 20, 1), - ... "pred_ux": np.random.randn(128, 20, 1), - ... } - >>> visualizer_states = ppsci.visualize.Visualizer2DPlot( - ... vis_data, - ... { - ... "target_ux": lambda d: d["states"][:, :, 0], - ... "pred_ux": lambda d: output_transform(d)[:, :, 0], - ... }, - ... batch_size=1, - ... num_timestamps=10, - ... stride=20, - ... xticks=np.linspace(-2, 14, 9), - ... yticks=np.linspace(-4, 4, 5), - ... prefix="result_states", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - num_timestamps: int = 1, - stride: int = 1, - xticks: Optional[Tuple[float, ...]] = None, - yticks: Optional[Tuple[float, ...]] = None, - prefix: str = "plot2d", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - self.stride = stride - self.xticks = xticks - self.yticks = yticks - - def save(self, filename, data_dict): - data_dict = { - key: value for key, value in data_dict.items() if key in self.output_keys - } - value = data_dict[self.output_keys[0]] - dim = len(value.shape) - if dim == 4: - # value.shape=(B, T, H, W) - for i in range(value.shape[0]): - cur_data_dict = {key: value[i] for key, value in data_dict.items()} - plot.save_plot_from_2d_dict( - filename + str(i), - cur_data_dict, - self.output_keys, - self.num_timestamps, - self.stride, - self.xticks, - self.yticks, - ) - else: - # value.shape=(T, H, W) - plot.save_plot_from_2d_dict( - filename, - data_dict, - self.output_keys, - self.num_timestamps, - self.stride, - self.xticks, - self.yticks, - ) - - -class Visualizer3D(base.Visualizer): - """Visualizer for 3D plot data. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. - label_dict (Dict[str, np.ndarray]): Label dict. - time_list (Optional[Tuple[float, ...]]): Time list. - prefix (str, optional): Prefix for output file. - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - batch_size: int = 64, - label_dict: Optional[Dict[str, np.ndarray]] = None, - time_list: Optional[Tuple[float, ...]] = None, - prefix: str = "vtu", - ): - self.label = label_dict - self.time_list = time_list - super().__init__(input_dict, output_expr, batch_size, len(time_list), prefix) - - def save(self, filename: str, data_dict: Dict[str, np.ndarray]): - n = int((next(iter(data_dict.values()))).shape[0] / self.num_timestamps) - coord_keys = [x for x in self.input_dict if x != "t"] - for i in range(len(self.time_list)): - vtu.save_vtu_to_mesh( - osp.join(filename, f"predict_{i+1}.vtu"), - {key: (data_dict[key][i * n : (i + 1) * n]) for key in data_dict}, - coord_keys, - self.output_keys, - ) - - -class VisualizerWeather(base.Visualizer): - """Visualizer for weather data use matplotlib. - - Args: - input_dict (Dict[str, np.ndarray]): Input dict. - output_expr (Dict[str, Callable]): Output expression. - xticks (Tuple[float, ...]): The list of xtick locations. - xticklabels (Tuple[str, ...]): The x-axis' tick labels. - yticks (Tuple[float, ...]): The list of ytick locations. - yticklabels (Tuple[str, ...]): The y-axis' tick labels. - vmin (float): Minimum value that the colormap covers. - vmax (float): Maximal value that the colormap covers. - colorbar_label (str, optional): The color-bar label. Defaults to "". - log_norm (bool, optional): Whether use log norm. Defaults to False. - batch_size (int, optional): : Batch size of data when computing result in visu.py. Defaults to 1. - num_timestamps (int, optional): Number of timestamps. Defaults to 1. - prefix (str, optional): Prefix for output file. Defaults to "plot_weather". - - Examples: - >>> import ppsci - >>> import numpy as np - >>> vis_data = { - ... "output_6h": np.random.randn(1, 720, 1440), - ... "target_6h": np.random.randn(1, 720, 1440), - ... } - >>> visualizer_weather = ppsci.visualize.VisualizerWeather( - ... vis_data, - ... { - ... "output_6h": lambda d: d["output_6h"], - ... "target_6h": lambda d: d["target_6h"], - ... }, - ... xticks=np.linspace(0, 1439, 13), - ... xticklabels=[str(i) for i in range(360, -1, -30)], - ... yticks=np.linspace(0, 719, 7), - ... yticklabels=[str(i) for i in range(90, -91, -30)], - ... vmin=0, - ... vmax=25, - ... prefix="result_states", - ... ) - """ - - def __init__( - self, - input_dict: Dict[str, np.ndarray], - output_expr: Dict[str, Callable], - xticks: Tuple[float, ...], - xticklabels: Tuple[str, ...], - yticks: Tuple[float, ...], - yticklabels: Tuple[str, ...], - vmin: float, - vmax: float, - colorbar_label: str = "", - log_norm: bool = False, - batch_size: int = 1, - num_timestamps: int = 1, - prefix: str = "plot_weather", - ): - super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) - self.xticks = xticks - self.xticklabels = xticklabels - self.yticks = yticks - self.yticklabels = yticklabels - self.vmin = vmin - self.vmax = vmax - self.colorbar_label = colorbar_label - self.log_norm = log_norm - - def save(self, filename, data_dict): - data_dict = {key: data_dict[key] for key in self.output_keys} - value = data_dict[self.output_keys[0]] - # value.shape=(B, H, W) - for i in range(value.shape[0]): - cur_data_dict = {key: value[i] for key, value in data_dict.items()} - plot.save_plot_weather_from_dict( - filename + str(i), - cur_data_dict, - self.output_keys, - self.xticks, - self.xticklabels, - self.yticks, - self.yticklabels, - self.vmin, - self.vmax, - self.colorbar_label, - self.log_norm, - self.num_timestamps, - ) +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os.path as osp +from typing import Callable +from typing import Dict +from typing import Optional +from typing import Tuple + +import numpy as np + +from ppsci.visualize import base +from ppsci.visualize import plot +from ppsci.visualize import vtu + + +class VisualizerScatter1D(base.Visualizer): + """Visualizer for 1d scatter data. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + coord_keys (Tuple[str, ...]): Coordinate keys, such as ("x", "y"). + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps. Defaults to 1. + prefix (str, optional): Prefix for output file. Defaults to "plot". + + Examples: + >>> import ppsci + >>> visu_mat = {"t_f": np.random.randn(16, 1), "eta": np.random.randn(16, 1)} + >>> visualizer_eta = ppsci.visualize.VisualizerScatter1D( + ... visu_mat, + ... ("t_f",), + ... {"eta": lambda d: d["eta"]}, + ... num_timestamps=1, + ... prefix="viv_pred", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + coord_keys: Tuple[str, ...], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + prefix: str = "plot", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + self.coord_keys = coord_keys + + def save(self, filename, data_dict): + plot.save_plot_from_1d_dict( + filename, data_dict, self.coord_keys, self.output_keys, self.num_timestamps + ) + + +class VisualizerScatter3D(base.Visualizer): + """Visualizer for 3d scatter data. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps. Defaults to 1. + prefix (str, optional): Prefix for output file. Defaults to "plot3d_scatter". + + Examples: + >>> import ppsci + >>> vis_data = {"states": np.random.randn(16, 1)} + >>> visualizer = ppsci.visualize.VisualizerScatter3D( + ... vis_data, + ... {"states": lambda d: d["states"]}, + ... num_timestamps=1, + ... prefix="result_states", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + prefix: str = "plot3d_scatter", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + + def save(self, filename, data_dict): + data_dict = { + key: value for key, value in data_dict.items() if key in self.output_keys + } + value = data_dict[self.output_keys[0]] + dim = len(value.shape) + if dim == 3: + # value.shape=(B, T, 3) + for i in range(value.shape[0]): + cur_data_dict = {key: value[i] for key, value in data_dict.items()} + plot.save_plot_from_3d_dict( + filename + str(i), + cur_data_dict, + self.output_keys, + self.num_timestamps, + ) + else: + # value.shape=(T, 3) + plot.save_plot_from_3d_dict( + filename, data_dict, self.output_keys, self.num_timestamps + ) + + +class VisualizerVtu(base.Visualizer): + """Visualizer for 2D points data. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps + prefix (str, optional): Prefix for output file. + + Examples: + >>> import ppsci + >>> vis_points = { + ... "x": np.random.randn(128, 1), + ... "y": np.random.randn(128, 1), + ... "u": np.random.randn(128, 1), + ... "v": np.random.randn(128, 1), + ... } + >>> visualizer_u_v = ppsci.visualize.VisualizerVtu( + ... vis_points, + ... {"u": lambda d: d["u"], "v": lambda d: d["v"]}, + ... num_timestamps=1, + ... prefix="result_u_v", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + prefix: str = "vtu", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + + def save(self, filename, data_dict): + vtu.save_vtu_from_dict( + filename, data_dict, self.input_keys, self.output_keys, self.num_timestamps + ) + + +class Visualizer2D(base.Visualizer): + """Visualizer for 2D data. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps. Defaults to 1. + prefix (str, optional): Prefix for output file. Defaults to "plot2d". + + Examples: + >>> import ppsci + >>> vis_points = { + ... "x": np.random.randn(128, 1), + ... "y": np.random.randn(128, 1), + ... "u": np.random.randn(128, 1), + ... "v": np.random.randn(128, 1), + ... } + >>> visualizer_u_v = ppsci.visualize.Visualizer2D( + ... vis_points, + ... {"u": lambda d: d["u"], "v": lambda d: d["v"]}, + ... num_timestamps=1, + ... prefix="result_u_v", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + prefix: str = "plot2d", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + + +class Visualizer2DPlot(Visualizer2D): + """Visualizer for 2D data use matplotlib. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + num_timestamps (int, optional): Number of timestamps. + stride (int, optional): The time stride of visualization. Defaults to 1. + xticks (Optional[Tuple[float,...]]): The list of xtick locations. Defaults to None. + yticks (Optional[Tuple[float,...]]): The list of ytick locations. Defaults to None. + prefix (str, optional): Prefix for output file. Defaults to "plot2d". + + Examples: + >>> import ppsci + >>> vis_data = { + ... "target_ux": np.random.randn(128, 20, 1), + ... "pred_ux": np.random.randn(128, 20, 1), + ... } + >>> visualizer_states = ppsci.visualize.Visualizer2DPlot( + ... vis_data, + ... { + ... "target_ux": lambda d: d["states"][:, :, 0], + ... "pred_ux": lambda d: output_transform(d)[:, :, 0], + ... }, + ... batch_size=1, + ... num_timestamps=10, + ... stride=20, + ... xticks=np.linspace(-2, 14, 9), + ... yticks=np.linspace(-4, 4, 5), + ... prefix="result_states", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + num_timestamps: int = 1, + stride: int = 1, + xticks: Optional[Tuple[float, ...]] = None, + yticks: Optional[Tuple[float, ...]] = None, + prefix: str = "plot2d", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + self.stride = stride + self.xticks = xticks + self.yticks = yticks + + def save(self, filename, data_dict): + data_dict = { + key: value for key, value in data_dict.items() if key in self.output_keys + } + value = data_dict[self.output_keys[0]] + dim = len(value.shape) + if dim == 4: + # value.shape=(B, T, H, W) + for i in range(value.shape[0]): + cur_data_dict = {key: value[i] for key, value in data_dict.items()} + plot.save_plot_from_2d_dict( + filename + str(i), + cur_data_dict, + self.output_keys, + self.num_timestamps, + self.stride, + self.xticks, + self.yticks, + ) + else: + # value.shape=(T, H, W) + plot.save_plot_from_2d_dict( + filename, + data_dict, + self.output_keys, + self.num_timestamps, + self.stride, + self.xticks, + self.yticks, + ) + + +class Visualizer3D(base.Visualizer): + """Visualizer for 3D plot data. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + batch_size (int, optional): Batch size of data when computing result in visu.py. Defaults to 64. + label_dict (Dict[str, np.ndarray]): Label dict. + time_list (Optional[Tuple[float, ...]]): Time list. + prefix (str, optional): Prefix for output file. + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + batch_size: int = 64, + label_dict: Optional[Dict[str, np.ndarray]] = None, + time_list: Optional[Tuple[float, ...]] = None, + prefix: str = "vtu", + ): + self.label = label_dict + self.time_list = time_list + super().__init__(input_dict, output_expr, batch_size, len(time_list), prefix) + + def save(self, filename: str, data_dict: Dict[str, np.ndarray]): + n = int((next(iter(data_dict.values()))).shape[0] / self.num_timestamps) + coord_keys = [x for x in self.input_dict if x != "t"] + for i in range(len(self.time_list)): + vtu.save_vtu_to_mesh( + osp.join(filename, f"predict_{i+1}.vtu"), + {key: (data_dict[key][i * n : (i + 1) * n]) for key in data_dict}, + coord_keys, + self.output_keys, + ) + + +class VisualizerWeather(base.Visualizer): + """Visualizer for weather data use matplotlib. + + Args: + input_dict (Dict[str, np.ndarray]): Input dict. + output_expr (Dict[str, Callable]): Output expression. + xticks (Tuple[float, ...]): The list of xtick locations. + xticklabels (Tuple[str, ...]): The x-axis' tick labels. + yticks (Tuple[float, ...]): The list of ytick locations. + yticklabels (Tuple[str, ...]): The y-axis' tick labels. + vmin (float): Minimum value that the colormap covers. + vmax (float): Maximal value that the colormap covers. + colorbar_label (str, optional): The color-bar label. Defaults to "". + log_norm (bool, optional): Whether use log norm. Defaults to False. + batch_size (int, optional): : Batch size of data when computing result in visu.py. Defaults to 1. + num_timestamps (int, optional): Number of timestamps. Defaults to 1. + prefix (str, optional): Prefix for output file. Defaults to "plot_weather". + + Examples: + >>> import ppsci + >>> import numpy as np + >>> vis_data = { + ... "output_6h": np.random.randn(1, 720, 1440), + ... "target_6h": np.random.randn(1, 720, 1440), + ... } + >>> visualizer_weather = ppsci.visualize.VisualizerWeather( + ... vis_data, + ... { + ... "output_6h": lambda d: d["output_6h"], + ... "target_6h": lambda d: d["target_6h"], + ... }, + ... xticks=np.linspace(0, 1439, 13), + ... xticklabels=[str(i) for i in range(360, -1, -30)], + ... yticks=np.linspace(0, 719, 7), + ... yticklabels=[str(i) for i in range(90, -91, -30)], + ... vmin=0, + ... vmax=25, + ... prefix="result_states", + ... ) + """ + + def __init__( + self, + input_dict: Dict[str, np.ndarray], + output_expr: Dict[str, Callable], + xticks: Tuple[float, ...], + xticklabels: Tuple[str, ...], + yticks: Tuple[float, ...], + yticklabels: Tuple[str, ...], + vmin: float, + vmax: float, + colorbar_label: str = "", + log_norm: bool = False, + batch_size: int = 1, + num_timestamps: int = 1, + prefix: str = "plot_weather", + ): + super().__init__(input_dict, output_expr, batch_size, num_timestamps, prefix) + self.xticks = xticks + self.xticklabels = xticklabels + self.yticks = yticks + self.yticklabels = yticklabels + self.vmin = vmin + self.vmax = vmax + self.colorbar_label = colorbar_label + self.log_norm = log_norm + + def save(self, filename, data_dict): + data_dict = {key: data_dict[key] for key in self.output_keys} + value = data_dict[self.output_keys[0]] + # value.shape=(B, H, W) + for i in range(value.shape[0]): + cur_data_dict = {key: value[i] for key, value in data_dict.items()} + plot.save_plot_weather_from_dict( + filename + str(i), + cur_data_dict, + self.output_keys, + self.xticks, + self.xticklabels, + self.yticks, + self.yticklabels, + self.vmin, + self.vmax, + self.colorbar_label, + self.log_norm, + self.num_timestamps, + ) diff --git a/ppsci/visualize/vtu.py b/ppsci/visualize/vtu.py index 500c7e2e84..6b1cd78dcb 100644 --- a/ppsci/visualize/vtu.py +++ b/ppsci/visualize/vtu.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); @@ -276,3 +277,197 @@ def save_vtu_to_mesh( if len(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename), exist_ok=True) mesh.write(filename) +======= +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +from typing import Dict +from typing import Tuple + +import meshio +import numpy as np +import paddle +from pyevtk import hl + +from ppsci.utils import logger + + +def _save_vtu_from_array(filename, coord, value, value_keys, num_timestamps=1): + """Save data to '*.vtu' file(s). + + Args: + filename (str): Output filename. + coord (np.ndarray): Coordinate points with shape of [N, 2] or [N, 3]. + value (np.ndarray): Value of each coord points with shape of [N, M]. + value_keys (Tuple[str, ...]): Names of each dimension of value, such as ("u", "v"). + num_timestamps (int, optional): Number of timestamp over coord and value. + Defaults to 1. + """ + if not isinstance(coord, np.ndarray): + raise ValueError(f"type of coord({type(coord)}) should be ndarray.") + if value is not None and not isinstance(value, np.ndarray): + raise ValueError(f"type of value({type(value)}) should be ndarray.") + if value is not None and len(coord) != len(value): + raise ValueError( + f"coord length({len(coord)}) should be equal to value length({len(value)})" + ) + if len(coord) % num_timestamps != 0: + raise ValueError( + f"coord length({len(coord)}) should be an integer multiple of " + f"num_timestamps({num_timestamps})" + ) + if coord.shape[1] not in [2, 3]: + raise ValueError(f"ndim of coord({coord.shape[1]}) should be 2 or 3.") + + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + + # discard extension name + if filename.endswith(".vtu"): + filename = filename[:-4] + npoint = len(coord) + coord_ndim = coord.shape[1] + + if value is None: + value = np.ones([npoint, 1], dtype=coord.dtype) + value_keys = ["dummy_key"] + + data_ndim = value.shape[1] + nx = npoint // num_timestamps + for t in range(num_timestamps): + # NOTE: each array in data_vtu should be 1-dim, i.e. [N, 1] will occur error. + if coord_ndim == 2: + axis_x = np.ascontiguousarray(coord[t * nx : (t + 1) * nx, 0]) + axis_y = np.ascontiguousarray(coord[t * nx : (t + 1) * nx, 1]) + axis_z = np.zeros([nx], dtype=paddle.get_default_dtype()) + elif coord_ndim == 3: + axis_x = np.ascontiguousarray(coord[t * nx : (t + 1) * nx, 0]) + axis_y = np.ascontiguousarray(coord[t * nx : (t + 1) * nx, 1]) + axis_z = np.ascontiguousarray(coord[t * nx : (t + 1) * nx, 2]) + + data_vtu = {} + for j in range(data_ndim): + data_vtu[value_keys[j]] = np.ascontiguousarray( + value[t * nx : (t + 1) * nx, j] + ) + + if num_timestamps > 1: + width = len(str(num_timestamps - 1)) + hl.pointsToVTK( + f"{filename}_t-{t:0{width}}", axis_x, axis_y, axis_z, data=data_vtu + ) + else: + hl.pointsToVTK(filename, axis_x, axis_y, axis_z, data=data_vtu) + + if num_timestamps > 1: + logger.message( + f"Visualization results are saved to: {filename}_t-{0:0{width}}.vtu ~ " + f"{filename}_t-{num_timestamps - 1:0{width}}.vtu" + ) + else: + logger.message(f"Visualization result is saved to: {filename}.vtu") + + +def save_vtu_from_dict( + filename: str, + data_dict: Dict[str, np.ndarray], + coord_keys: Tuple[str, ...], + value_keys: Tuple[str, ...], + num_timestamps: int = 1, +): + """Save dict data to '*.vtu' file. + + Args: + filename (str): Output filename. + data_dict (Dict[str, np.ndarray]): Data in dict. + coord_keys (Tuple[str, ...]): Tuple of coord key. such as ("x", "y"). + value_keys (Tuple[str, ...]): Tuple of value key. such as ("u", "v"). + num_timestamps (int, optional): Number of timestamp in data_dict. Defaults to 1. + + Examples: + >>> import ppsci + >>> import numpy as np + >>> filename = "path/to/file.vtu" + >>> data_dict = { + ... "x": np.array([[1], [2], [3],[4]]), + ... "y": np.array([[2], [3], [4],[4]]), + ... "z": np.array([[3], [4], [5],[4]]), + ... "u": np.array([[4], [5], [6],[4]]), + ... "v": np.array([[5], [6], [7],[4]]), + ... } + >>> coord_keys = ("x","y","z") + >>> value_keys = ("u","v") + >>> ppsci.visualize.save_vtu_from_dict(filename, data_dict, coord_keys, value_keys) # doctest: +SKIP + """ + if len(coord_keys) not in [2, 3, 4]: + raise ValueError(f"ndim of coord ({len(coord_keys)}) should be 2, 3 or 4") + + coord = [data_dict[k] for k in coord_keys if k not in ("t", "sdf")] + value = [data_dict[k] for k in value_keys] if value_keys else None + + coord = np.concatenate(coord, axis=1) + + if value is not None: + value = np.concatenate(value, axis=1) + + _save_vtu_from_array(filename, coord, value, value_keys, num_timestamps) + + +def save_vtu_to_mesh( + filename: str, + data_dict: Dict[str, np.ndarray], + coord_keys: Tuple[str, ...], + value_keys: Tuple[str, ...], +): + """Save data into .vtu format by meshio. + + Args: + filename (str): File name. + data_dict (Dict[str, np.ndarray]): Data in dict. + coord_keys (Tuple[str, ...]): Tuple of coord key. such as ("x", "y"). + value_keys (Tuple[str, ...]): Tuple of value key. such as ("u", "v"). + + Examples: + >>> import ppsci + >>> import numpy as np + >>> filename = "path/to/file.vtu" + >>> data_dict = { + ... "x": np.array([[1], [2], [3],[4]]), + ... "y": np.array([[2], [3], [4],[4]]), + ... "z": np.array([[3], [4], [5],[4]]), + ... "u": np.array([[4], [5], [6],[4]]), + ... "v": np.array([[5], [6], [7],[4]]), + ... } + >>> coord_keys = ("x","y","z") + >>> value_keys = ("u","v") + >>> ppsci.visualize.save_vtu_to_mesh(filename, data_dict, coord_keys, value_keys) # doctest: +SKIP + """ + npoint = len(next(iter(data_dict.values()))) + coord_ndim = len(coord_keys) + + # get the list variable transposed + points = np.stack(tuple(data_dict[key] for key in coord_keys)).reshape( + coord_ndim, npoint + ) + mesh = meshio.Mesh( + points=points.T, cells=[("vertex", np.arange(npoint).reshape(npoint, 1))] + ) + mesh.point_data = {key: data_dict[key] for key in value_keys} + if len(os.path.dirname(filename)): + os.makedirs(os.path.dirname(filename), exist_ok=True) + mesh.write(filename) +>>>>>>> Stashed changes diff --git a/pyproject.toml b/pyproject.toml index 7e7e83e519..0d8a01954f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream [build-system] requires = ["setuptools>=65", "setuptools_scm"] build-backend = "setuptools.build_meta" @@ -69,3 +70,74 @@ dependencies = { file = ["requirements.txt"] } [tool.isort] profile = "black" +======= +[build-system] +requires = ["setuptools>=65", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[project] +name = "paddlesci" +dynamic = ["version", "dependencies"] +description = "A library for scientific machine learning" +readme = "README.md" +license = { text = "Apache-2.0" } +authors = [{ name = "PaddlePaddle" }] +requires-python = ">=3.8" +keywords = [ + "Machine learning", + "Deep learning", + "Differential equations", + "AI4Science", + "Physics-informed neural networks", + "PaddlePaddle", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", +] + +[project.urls] +Homepage = "https://github.com/PaddlePaddle/PaddleScience" +"Bug Tracker" = "https://github.com/PaddlePaddle/PaddleScience/issues" +Changelog = "https://github.com/PaddlePaddle/PaddleScience/releases" +Documentation = "https://paddlescience-docs.readthedocs.io/zh/latest/" + +[tool.setuptools.packages.find] +where = ["."] +exclude = [ + "docs*", + "examples*", + "jointContribution*", + "test_tipc*", + "test*", + "tools*", +] + +[tool.ruff] +line-length = 88 +ignore = ["E501", "E741", "E731"] +extend-exclude = [ + "./ppsci/geometry/inflation.py", + "./ppsci/autodiff/__init__.py", +] + +[tool.setuptools_scm] +version_file = "ppsci/_version.py" +tag_regex = "v(\\d+\\.\\d+\\.\\d+)" +fallback_version = "0.0.0" +version_scheme = "post-release" + +[tool.setuptools.dynamic] +dependencies = { file = ["requirements.txt"] } + +[tool.isort] +profile = "black" +>>>>>>> Stashed changes diff --git a/recipe/build.sh b/recipe/build.sh index 2ac1d109fc..bdd4eb8214 100644 --- a/recipe/build.sh +++ b/recipe/build.sh @@ -1,2 +1,2 @@ -#!/bin/bash -$PYTHON -m pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple +#!/bin/bash +$PYTHON -m pip install . -i https://pypi.tuna.tsinghua.edu.cn/simple diff --git a/recipe/conda_build_config.yaml b/recipe/conda_build_config.yaml index fee71dfc69..9c27377de8 100644 --- a/recipe/conda_build_config.yaml +++ b/recipe/conda_build_config.yaml @@ -1,9 +1,9 @@ -python: - - 3.8 - - 3.9 - - 3.10 - -numpy: - - 1.26.1 - - 1.26.1 - - 1.26.1 +python: + - 3.8 + - 3.9 + - 3.10 + +numpy: + - 1.26.1 + - 1.26.1 + - 1.26.1 diff --git a/recipe/meta.yaml b/recipe/meta.yaml index e65442868b..583917d251 100644 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -1,105 +1,105 @@ -{% set name = "paddlesci" %} -{% set version = "0.0.0" %} - -package: - name: paddlesci - version: {{ version }} - -source: - path: .. - include: - - "ppsci/" - - "pyproject.toml" - - "LICENSE" - -build: - number: 0 - -requirements: - host: - - python >=3.8,<=3.10 - - pip - - colorlog - - einops - - h5py >3.9.0 - - hydra-core >=1.3.2 - - imageio - - matplotlib - - meshio ==5.3.4 - - numpy - - pydantic >=2.5.0 - - pyevtk - - pyyaml - - requests - - scikit-learn <1.5.0 - - scikit-optimize - - scipy - - seaborn - - sympy - - tqdm - - typing-extensions - - wget - run: - - python >=3.8,<=3.10 - - colorlog - - einops - - h5py >3.9.0 - - hydra-core >=1.3.2 - - imageio - - matplotlib - - meshio ==5.3.4 - - numpy - - pydantic >=2.5.0 - - pyevtk - - pyyaml - - requests - - scikit-learn <1.5.0 - - scikit-optimize - - scipy - - seaborn - - sympy - - tqdm - - typing-extensions - - wget - -about: - home: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ - license: Apache-2.0 - license_family: Apache - license_file: ../LICENSE - summary: 'PaddleScience is SDK and library for developing AI-driven scientific computing applications based on PaddlePaddle.' - - description: | - PaddleScience is SDK and library for developing AI-driven scientific computing applications based on PaddlePaddle. - doc_url: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ - dev_url: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ - -outputs: - ### Please annotate the other versions when building the package with certain python version. - # - name: {{ name }} - # skip: true # [osx or py<37 or py>310] - # requirements: - # host: - # - python 3.8.* - # run: - # - python 3.8.* - - - name: {{ name }} - skip: true # [osx or py<37 or py>310] - requirements: - host: - - python 3.9.* - run: - - python 3.9.* - - # - name: {{ name }} - # skip: true # [osx or py<37 or py>310] - # requirements: - # host: - # - python 3.10.* - # run: - # - python 3.10.* - -extra: - recipe-maintainers: - - HydrogenSulfate +{% set name = "paddlesci" %} +{% set version = "0.0.0" %} + +package: + name: paddlesci + version: {{ version }} + +source: + path: .. + include: + - "ppsci/" + - "pyproject.toml" + - "LICENSE" + +build: + number: 0 + +requirements: + host: + - python >=3.8,<=3.10 + - pip + - colorlog + - einops + - h5py >3.9.0 + - hydra-core >=1.3.2 + - imageio + - matplotlib + - meshio ==5.3.4 + - numpy + - pydantic >=2.5.0 + - pyevtk + - pyyaml + - requests + - scikit-learn <1.5.0 + - scikit-optimize + - scipy + - seaborn + - sympy + - tqdm + - typing-extensions + - wget + run: + - python >=3.8,<=3.10 + - colorlog + - einops + - h5py >3.9.0 + - hydra-core >=1.3.2 + - imageio + - matplotlib + - meshio ==5.3.4 + - numpy + - pydantic >=2.5.0 + - pyevtk + - pyyaml + - requests + - scikit-learn <1.5.0 + - scikit-optimize + - scipy + - seaborn + - sympy + - tqdm + - typing-extensions + - wget + +about: + home: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ + license: Apache-2.0 + license_family: Apache + license_file: ../LICENSE + summary: 'PaddleScience is SDK and library for developing AI-driven scientific computing applications based on PaddlePaddle.' + + description: | + PaddleScience is SDK and library for developing AI-driven scientific computing applications based on PaddlePaddle. + doc_url: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ + dev_url: https://paddlescience-docs.readthedocs.io/zh-cn/latest/ + +outputs: + ### Please annotate the other versions when building the package with certain python version. + # - name: {{ name }} + # skip: true # [osx or py<37 or py>310] + # requirements: + # host: + # - python 3.8.* + # run: + # - python 3.8.* + + - name: {{ name }} + skip: true # [osx or py<37 or py>310] + requirements: + host: + - python 3.9.* + run: + - python 3.9.* + + # - name: {{ name }} + # skip: true # [osx or py<37 or py>310] + # requirements: + # host: + # - python 3.10.* + # run: + # - python 3.10.* + +extra: + recipe-maintainers: + - HydrogenSulfate diff --git a/requirements.txt b/requirements.txt index 7efcb16d5f..8723ed8224 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,20 @@ -colorlog -einops -h5py -hydra-core -imageio -matplotlib -meshio==5.3.4 -numpy>=1.20.0,<2.0.0 -pydantic>=2.5.0 -pyevtk -pyyaml -requests -scikit-learn<1.5.0 -scikit-optimize -scipy -seaborn -sympy -tqdm -typing-extensions -wget +colorlog +einops +h5py +hydra-core +imageio +matplotlib +meshio==5.3.4 +numpy>=1.20.0,<2.0.0 +pydantic>=2.5.0 +pyevtk +pyyaml +requests +scikit-learn<1.5.0 +scikit-optimize +scipy +seaborn +sympy +tqdm +typing-extensions +wget diff --git a/setup.py b/setup.py index be651f4b8f..4243d2c7bf 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ +<<<<<<< Updated upstream """ Setup configuration """ @@ -58,3 +59,61 @@ def get_requirements() -> list: use_scm_version=True, setup_requires=["setuptools_scm"], ) +======= +""" +Setup configuration +""" + +import setuptools + + +def get_readme() -> str: + """get README""" + with open("README.md", encoding="utf-8") as f: + return f.read() + + +def get_requirements() -> list: + """get requirements from PaddleScience/requirements.txt""" + req_list = [] + with open("requirements.txt", "r") as f: + req_list = f.read().splitlines() + return req_list + + +if __name__ == "__main__": + setuptools.setup( + name="paddlesci", + author="PaddlePaddle", + url="https://github.com/PaddlePaddle/PaddleScience", + description=( + "PaddleScience is SDK and library for developing AI-driven scientific computing" + " applications based on PaddlePaddle." + ), + long_description=get_readme(), + long_description_content_type="text/markdown", + packages=setuptools.find_packages( + exclude=( + "docs", + "examples", + "jointContribution", + "test_tipc", + "test", + "tools", + ) + ), + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + ], + install_requires=get_requirements(), + ) +>>>>>>> Stashed changes diff --git a/test/equation/test_biharmonic.py b/test/equation/test_biharmonic.py index 8e1d6c2be0..ecd764a243 100644 --- a/test/equation/test_biharmonic.py +++ b/test/equation/test_biharmonic.py @@ -1,79 +1,79 @@ -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - -__all__ = [] - - -@pytest.mark.parametrize("dim", (2, 3)) -def test_biharmonic(dim): - """Test for biharmonic equation.""" - batch_size = 13 - input_dims = ("x", "y", "z")[:dim] - output_dims = ("u",) - - q = -1.0 - D = 1.0 - - # generate input data - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - x.stop_gradient = False - y.stop_gradient = False - input_data = paddle.concat([x, y], axis=1) - if dim == 3: - z = paddle.randn([batch_size, 1]) - z.stop_gradient = False - input_data = paddle.concat([x, y, z], axis=1) - - # build NN model - model = arch.MLP(input_dims, output_dims, 2, 16) - - # manually generate output - u = model.forward_tensor(input_data) - - # use self-defined jacobian and hessian - def jacobian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": - return paddle.grad(y, x, create_graph=True)[0] - - def hessian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": - return jacobian(jacobian(y, x), x) - - # compute expected result - expected_result = -q / D - - # compute fourth order derivative - vars = (x, y) - if dim == 3: - vars += (z,) - for var_i in vars: - for var_j in vars: - expected_result += hessian(hessian(u, var_i), var_j) - - # compute result using built-in Biharmonic module - biharmonic_equation = equation.Biharmonic(dim=dim, q=q, D=D) - for name, expr in biharmonic_equation.equations.items(): - if isinstance(expr, sp.Basic): - biharmonic_equation.equations[name] = ppsci.lambdify( - expr, - model, - ) - data_dict = { - "x": x, - "y": y, - "u": u, - } - if dim == 3: - data_dict["z"] = z - test_result = biharmonic_equation.equations["biharmonic"](data_dict) - - # check result whether is equal - assert paddle.allclose(expected_result, test_result) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + +__all__ = [] + + +@pytest.mark.parametrize("dim", (2, 3)) +def test_biharmonic(dim): + """Test for biharmonic equation.""" + batch_size = 13 + input_dims = ("x", "y", "z")[:dim] + output_dims = ("u",) + + q = -1.0 + D = 1.0 + + # generate input data + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + x.stop_gradient = False + y.stop_gradient = False + input_data = paddle.concat([x, y], axis=1) + if dim == 3: + z = paddle.randn([batch_size, 1]) + z.stop_gradient = False + input_data = paddle.concat([x, y, z], axis=1) + + # build NN model + model = arch.MLP(input_dims, output_dims, 2, 16) + + # manually generate output + u = model.forward_tensor(input_data) + + # use self-defined jacobian and hessian + def jacobian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": + return paddle.grad(y, x, create_graph=True)[0] + + def hessian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": + return jacobian(jacobian(y, x), x) + + # compute expected result + expected_result = -q / D + + # compute fourth order derivative + vars = (x, y) + if dim == 3: + vars += (z,) + for var_i in vars: + for var_j in vars: + expected_result += hessian(hessian(u, var_i), var_j) + + # compute result using built-in Biharmonic module + biharmonic_equation = equation.Biharmonic(dim=dim, q=q, D=D) + for name, expr in biharmonic_equation.equations.items(): + if isinstance(expr, sp.Basic): + biharmonic_equation.equations[name] = ppsci.lambdify( + expr, + model, + ) + data_dict = { + "x": x, + "y": y, + "u": u, + } + if dim == 3: + data_dict["z"] = z + test_result = biharmonic_equation.equations["biharmonic"](data_dict) + + # check result whether is equal + assert paddle.allclose(expected_result, test_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_detach.py b/test/equation/test_detach.py index e8517454c5..928d2d6dec 100644 --- a/test/equation/test_detach.py +++ b/test/equation/test_detach.py @@ -1,175 +1,175 @@ -import numpy as np -import paddle -import pytest - -import ppsci - -paddle.seed(42) -np.random.seed(42) - - -def test_equation_detach(): - # use N-S equation for test - all_items = [ - "u", - "u__x", - "u__y", - "u__x__x", - "v", - "v__x", - "v__y", - "v__x__x", - "p", - "p__x", - "p__y", - ] - model1 = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) - model2 = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) - input_data = { - "x": paddle.randn([16, 1]), - "y": paddle.randn([16, 1]), - } - input_data["x"].stop_gradient = False - input_data["y"].stop_gradient = False - for ii, in_state in enumerate(range(0, 1 << len(all_items), 5)): - detach_keys = [ - item for i, item in enumerate(all_items) if ((1 << i) & in_state) - ] - nu = 1.314 - rho = 0.156 - ns = ppsci.equation.NavierStokes(nu, rho, 2, False, detach_keys=detach_keys) - model2.set_state_dict(model1.state_dict()) - - exprs = ppsci.lambdify( - list(ns.equations.values()), - model1, - fuse_derivative=True, - ) - for name, f in zip(ns.equations, exprs): - input_data[name] = f(input_data) - - def compute_loss(data_dict): - u = data_dict["u"] - v = data_dict["v"] - p = data_dict["p"] - - u__x = data_dict["u__x"] - u__y = data_dict["u__y"] - u__x__x = data_dict["u__x__x"] - u__y__y = data_dict["u__y__y"] - - v = data_dict["v"] - v__x = data_dict["v__x"] - v__y = data_dict["v__y"] - v__x__x = data_dict["v__x__x"] - v__y__y = data_dict["v__y__y"] - - p = data_dict["p"] - p__x = data_dict["p__x"] - p__y = data_dict["p__y"] - - if "u" in detach_keys: - u = u.detach() - if "v" in detach_keys: - v = v.detach() - if "p" in detach_keys: - p = p.detach() - if "u__x" in detach_keys: - u__x = u__x.detach() - if "u__y" in detach_keys: - u__y = u__y.detach() - if "u__x__x" in detach_keys: - u__x__x = u__x__x.detach() - if "u__y__y" in detach_keys: - u__y__y = u__y__y.detach() - if "v__x" in detach_keys: - v__x = v__x.detach() - if "v__y" in detach_keys: - v__y = v__y.detach() - if "v__x__x" in detach_keys: - v__x__x = v__x__x.detach() - if "v__y__y" in detach_keys: - v__y__y = v__y__y.detach() - if "p__x" in detach_keys: - p__x = p__x.detach() - if "p__y" in detach_keys: - p__y = p__y.detach() - - # continuity - continuity = u__x + v__y - # momentum_x - momentum_x = ( - u * u__x + v * u__y - nu * (u__x__x + u__y__y) + (1 / rho) * p__x - ) - # momentum_y - momentum_y = ( - u * v__x + v * v__y - nu * (v__x__x + v__y__y) + (1 / rho) * p__y - ) - - return ( - (continuity**2).sum() - + (momentum_x**2).sum() - + (momentum_y**2).sum() - ) - - loss1 = compute_loss(input_data) - - loss1.backward() - - ppsci.autodiff.clear() - - input_data = { - "x": input_data["x"], - "y": input_data["y"], - } - x, y = input_data["x"], input_data["y"] - t = model2(input_data) - u, v, p = t["u"], t["v"], t["p"] - - u__x = ppsci.autodiff.jacobian(u, x) - u__y = ppsci.autodiff.jacobian(u, y) - u__x__x = ppsci.autodiff.hessian(u, x) - u__y__y = ppsci.autodiff.hessian(u, y) - - v__x = ppsci.autodiff.jacobian(v, x) - v__y = ppsci.autodiff.jacobian(v, y) - v__x__x = ppsci.autodiff.hessian(v, x) - v__y__y = ppsci.autodiff.hessian(v, y) - - p__x = ppsci.autodiff.jacobian(p, x) - p__y = ppsci.autodiff.jacobian(p, y) - - loss2 = compute_loss( - { - "u": u, - "v": v, - "p": p, - "u__x": u__x, - "u__y": u__y, - "u__x__x": u__x__x, - "u__y__y": u__y__y, - "v__x": v__x, - "v__y": v__y, - "v__x__x": v__x__x, - "v__y__y": v__y__y, - "p__x": p__x, - "p__y": p__y, - } - ) - loss2.backward() - - np.testing.assert_allclose(loss1.numpy(), loss2.numpy(), 0.0, 0.0) - - for p1, p2 in zip(model1.parameters(), model2.parameters()): - if (p1.grad is None) ^ (p2.grad is None): - raise AssertionError() - if p1.grad is not None and p2.grad is not None: - np.testing.assert_allclose(p1.grad.numpy(), p2.grad.numpy(), 1e-5, 1e-5) - - ppsci.autodiff.clear() - model1.clear_gradients() - model2.clear_gradients() - - -if __name__ == "__main__": - pytest.main() +import numpy as np +import paddle +import pytest + +import ppsci + +paddle.seed(42) +np.random.seed(42) + + +def test_equation_detach(): + # use N-S equation for test + all_items = [ + "u", + "u__x", + "u__y", + "u__x__x", + "v", + "v__x", + "v__y", + "v__x__x", + "p", + "p__x", + "p__y", + ] + model1 = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) + model2 = ppsci.arch.MLP(("x", "y"), ("u", "v", "p"), 3, 16) + input_data = { + "x": paddle.randn([16, 1]), + "y": paddle.randn([16, 1]), + } + input_data["x"].stop_gradient = False + input_data["y"].stop_gradient = False + for ii, in_state in enumerate(range(0, 1 << len(all_items), 5)): + detach_keys = [ + item for i, item in enumerate(all_items) if ((1 << i) & in_state) + ] + nu = 1.314 + rho = 0.156 + ns = ppsci.equation.NavierStokes(nu, rho, 2, False, detach_keys=detach_keys) + model2.set_state_dict(model1.state_dict()) + + exprs = ppsci.lambdify( + list(ns.equations.values()), + model1, + fuse_derivative=True, + ) + for name, f in zip(ns.equations, exprs): + input_data[name] = f(input_data) + + def compute_loss(data_dict): + u = data_dict["u"] + v = data_dict["v"] + p = data_dict["p"] + + u__x = data_dict["u__x"] + u__y = data_dict["u__y"] + u__x__x = data_dict["u__x__x"] + u__y__y = data_dict["u__y__y"] + + v = data_dict["v"] + v__x = data_dict["v__x"] + v__y = data_dict["v__y"] + v__x__x = data_dict["v__x__x"] + v__y__y = data_dict["v__y__y"] + + p = data_dict["p"] + p__x = data_dict["p__x"] + p__y = data_dict["p__y"] + + if "u" in detach_keys: + u = u.detach() + if "v" in detach_keys: + v = v.detach() + if "p" in detach_keys: + p = p.detach() + if "u__x" in detach_keys: + u__x = u__x.detach() + if "u__y" in detach_keys: + u__y = u__y.detach() + if "u__x__x" in detach_keys: + u__x__x = u__x__x.detach() + if "u__y__y" in detach_keys: + u__y__y = u__y__y.detach() + if "v__x" in detach_keys: + v__x = v__x.detach() + if "v__y" in detach_keys: + v__y = v__y.detach() + if "v__x__x" in detach_keys: + v__x__x = v__x__x.detach() + if "v__y__y" in detach_keys: + v__y__y = v__y__y.detach() + if "p__x" in detach_keys: + p__x = p__x.detach() + if "p__y" in detach_keys: + p__y = p__y.detach() + + # continuity + continuity = u__x + v__y + # momentum_x + momentum_x = ( + u * u__x + v * u__y - nu * (u__x__x + u__y__y) + (1 / rho) * p__x + ) + # momentum_y + momentum_y = ( + u * v__x + v * v__y - nu * (v__x__x + v__y__y) + (1 / rho) * p__y + ) + + return ( + (continuity**2).sum() + + (momentum_x**2).sum() + + (momentum_y**2).sum() + ) + + loss1 = compute_loss(input_data) + + loss1.backward() + + ppsci.autodiff.clear() + + input_data = { + "x": input_data["x"], + "y": input_data["y"], + } + x, y = input_data["x"], input_data["y"] + t = model2(input_data) + u, v, p = t["u"], t["v"], t["p"] + + u__x = ppsci.autodiff.jacobian(u, x) + u__y = ppsci.autodiff.jacobian(u, y) + u__x__x = ppsci.autodiff.hessian(u, x) + u__y__y = ppsci.autodiff.hessian(u, y) + + v__x = ppsci.autodiff.jacobian(v, x) + v__y = ppsci.autodiff.jacobian(v, y) + v__x__x = ppsci.autodiff.hessian(v, x) + v__y__y = ppsci.autodiff.hessian(v, y) + + p__x = ppsci.autodiff.jacobian(p, x) + p__y = ppsci.autodiff.jacobian(p, y) + + loss2 = compute_loss( + { + "u": u, + "v": v, + "p": p, + "u__x": u__x, + "u__y": u__y, + "u__x__x": u__x__x, + "u__y__y": u__y__y, + "v__x": v__x, + "v__y": v__y, + "v__x__x": v__x__x, + "v__y__y": v__y__y, + "p__x": p__x, + "p__y": p__y, + } + ) + loss2.backward() + + np.testing.assert_allclose(loss1.numpy(), loss2.numpy(), 0.0, 0.0) + + for p1, p2 in zip(model1.parameters(), model2.parameters()): + if (p1.grad is None) ^ (p2.grad is None): + raise AssertionError() + if p1.grad is not None and p2.grad is not None: + np.testing.assert_allclose(p1.grad.numpy(), p2.grad.numpy(), 1e-5, 1e-5) + + ppsci.autodiff.clear() + model1.clear_gradients() + model2.clear_gradients() + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_laplace.py b/test/equation/test_laplace.py index 6c438df3e4..04fdbebdde 100644 --- a/test/equation/test_laplace.py +++ b/test/equation/test_laplace.py @@ -1,71 +1,71 @@ -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - -__all__ = [] - - -@pytest.mark.parametrize("dim", (2, 3)) -def test_l1loss_mean(dim): - """Test for only mean.""" - batch_size = 13 - input_dims = ("x", "y", "z")[:dim] - output_dims = ("u",) - - # generate input data - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - x.stop_gradient = False - y.stop_gradient = False - input_data = paddle.concat([x, y], axis=1) - if dim == 3: - z = paddle.randn([batch_size, 1]) - z.stop_gradient = False - input_data = paddle.concat([x, y, z], axis=1) - - # build NN model - model = arch.MLP(input_dims, output_dims, 2, 16) - - # manually generate output - u = model.forward_tensor(input_data) - - # use self-defined jacobian and hessian - def jacobian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": - return paddle.grad(y, x, create_graph=True)[0] - - def hessian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": - return jacobian(jacobian(y, x), x) - - # compute expected result - expected_result = hessian(u, x) + hessian(u, y) - if dim == 3: - expected_result += hessian(u, z) - - # compute result using built-in Laplace module - laplace_equation = equation.Laplace(dim=dim) - for name, expr in laplace_equation.equations.items(): - if isinstance(expr, sp.Basic): - laplace_equation.equations[name] = ppsci.lambdify( - expr, - model, - ) - - data_dict = { - "x": x, - "y": y, - "u": u, - } - if dim == 3: - data_dict["z"] = z - test_result = laplace_equation.equations["laplace"](data_dict) - - # check result whether is equal - assert paddle.allclose(expected_result, test_result) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + +__all__ = [] + + +@pytest.mark.parametrize("dim", (2, 3)) +def test_l1loss_mean(dim): + """Test for only mean.""" + batch_size = 13 + input_dims = ("x", "y", "z")[:dim] + output_dims = ("u",) + + # generate input data + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + x.stop_gradient = False + y.stop_gradient = False + input_data = paddle.concat([x, y], axis=1) + if dim == 3: + z = paddle.randn([batch_size, 1]) + z.stop_gradient = False + input_data = paddle.concat([x, y, z], axis=1) + + # build NN model + model = arch.MLP(input_dims, output_dims, 2, 16) + + # manually generate output + u = model.forward_tensor(input_data) + + # use self-defined jacobian and hessian + def jacobian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": + return paddle.grad(y, x, create_graph=True)[0] + + def hessian(y: "paddle.Tensor", x: "paddle.Tensor") -> "paddle.Tensor": + return jacobian(jacobian(y, x), x) + + # compute expected result + expected_result = hessian(u, x) + hessian(u, y) + if dim == 3: + expected_result += hessian(u, z) + + # compute result using built-in Laplace module + laplace_equation = equation.Laplace(dim=dim) + for name, expr in laplace_equation.equations.items(): + if isinstance(expr, sp.Basic): + laplace_equation.equations[name] = ppsci.lambdify( + expr, + model, + ) + + data_dict = { + "x": x, + "y": y, + "u": u, + } + if dim == 3: + data_dict["z"] = z + test_result = laplace_equation.equations["laplace"](data_dict) + + # check result whether is equal + assert paddle.allclose(expected_result, test_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_linear_elasticity.py b/test/equation/test_linear_elasticity.py index 973e3df104..154f257872 100644 --- a/test/equation/test_linear_elasticity.py +++ b/test/equation/test_linear_elasticity.py @@ -1,316 +1,316 @@ -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - - -def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return paddle.grad(y, x, create_graph=True)[0] - - -def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return jacobian(jacobian(y, x), x) - - -def stress_disp_xx_expected_result(u, v, w, x, y, z, lambda_, mu, dim, sigma_xx): - stress_disp_xx = ( - lambda_ * (jacobian(u, x) + jacobian(v, y)) + 2 * mu * jacobian(u, x) - sigma_xx - ) - if dim == 3: - stress_disp_xx += lambda_ * jacobian(w, z) - return stress_disp_xx - - -def stress_disp_yy_expected_result(u, v, w, x, y, z, lambda_, mu, dim, sigma_yy): - stress_disp_yy = ( - lambda_ * (jacobian(u, x) + jacobian(v, y)) + 2 * mu * jacobian(v, y) - sigma_yy - ) - if dim == 3: - stress_disp_yy += lambda_ * jacobian(w, z) - return stress_disp_yy - - -def stress_disp_zz_expected_result(u, v, w, x, y, z, lambda_, mu, sigma_zz): - stress_disp_zz = ( - lambda_ * (jacobian(u, x) + jacobian(v, y) + jacobian(w, z)) - + 2 * mu * jacobian(w, z) - - sigma_zz - ) - return stress_disp_zz - - -def stress_disp_xy_expected_result(u, v, x, y, mu, sigma_xy): - stress_disp_xy = mu * (jacobian(u, y) + jacobian(v, x)) - sigma_xy - return stress_disp_xy - - -def stress_disp_xz_expected_result(u, w, x, z, mu, sigma_xz): - stress_disp_xz = mu * (jacobian(u, z) + jacobian(w, x)) - sigma_xz - return stress_disp_xz - - -def stress_disp_yz_expected_result(v, w, y, z, mu, sigma_yz): - stress_disp_yz = mu * (jacobian(v, z) + jacobian(w, y)) - sigma_yz - return stress_disp_yz - - -def equilibrium_x_expected_result( - u, x, y, z, t, rho, dim, time, sigma_xx, sigma_xy, sigma_xz=None -): - equilibrium_x = -jacobian(sigma_xx, x) - jacobian(sigma_xy, y) - if dim == 3: - equilibrium_x -= jacobian(sigma_xz, z) - if time: - equilibrium_x += rho * hessian(u, t) - return equilibrium_x - - -def equilibrium_y_expected_result( - v, x, y, z, t, rho, dim, time, sigma_yy, sigma_xy, sigma_yz=None -): - equilibrium_y = -jacobian(sigma_xy, x) - jacobian(sigma_yy, y) - if dim == 3: - equilibrium_y -= jacobian(sigma_yz, z) - if time: - equilibrium_y += rho * hessian(v, t) - return equilibrium_y - - -def equilibrium_z_expected_result( - w, x, y, z, t, rho, time, sigma_xz, sigma_yz, sigma_zz -): - equilibrium_z = ( - -jacobian(sigma_xz, x) - jacobian(sigma_yz, y) - jacobian(sigma_zz, z) - ) - if time: - equilibrium_z += rho * hessian(w, t) - return equilibrium_z - - -def traction_x_expected_result( - normal_x, normal_y, sigma_xx, sigma_xy, normal_z=None, sigma_xz=None -): - traction_x = normal_x * sigma_xx + normal_y * sigma_xy - if normal_z is not None and sigma_xz is not None: - traction_x += normal_z * sigma_xz - return traction_x - - -def traction_y_expected_result( - normal_x, normal_y, sigma_xy, sigma_yy, normal_z=None, sigma_yz=None -): - traction_y = normal_x * sigma_xy + normal_y * sigma_yy - if normal_z is not None and sigma_yz is not None: - traction_y += normal_z * sigma_yz - return traction_y - - -def traction_z_expected_result( - normal_x, normal_y, normal_z, sigma_xz, sigma_yz, sigma_zz -): - traction_z = normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz - return traction_z - - -@pytest.mark.parametrize( - "E, nu, lambda_, mu, rho, dim, time", - [ - (None, None, 1e3, 1e3, 1, 2, False), - (None, None, 1e3, 1e3, 1, 2, True), - (None, None, 1e3, 1e3, 1, 3, False), - (None, None, 1e3, 1e3, 1, 3, True), - ], -) -def test_linear_elasticity(E, nu, lambda_, mu, rho, dim, time): - paddle.seed(42) - batch_size = 13 - input_dims = ("x", "y", "z")[:dim] - if time: - input_dims += ("t",) - output_dims = ( - ( - "u", - "v", - "sigma_xx", - "sigma_yy", - "sigma_xy", - ) - if dim == 2 - else ( - "u", - "v", - "w", - "sigma_xx", - "sigma_yy", - "sigma_xy", - "sigma_zz", - "sigma_xz", - "sigma_yz", - ) - ) - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - z = paddle.randn([batch_size, 1]) if dim == 3 else None - t = paddle.randn([batch_size, 1]) if time else None - normal_x = paddle.randn([batch_size, 1]) - normal_y = paddle.randn([batch_size, 1]) - normal_z = paddle.randn([batch_size, 1]) if dim == 3 else None - - x.stop_gradient = False - y.stop_gradient = False - if time: - t.stop_gradient = False - if dim == 3: - z.stop_gradient = False - - input_data = paddle.concat([x, y], axis=1) - if time: - input_data = paddle.concat([t, input_data], axis=1) - if dim == 3: - input_data = paddle.concat([input_data, z], axis=1) - - model = arch.MLP(input_dims, output_dims, 2, 16) - - # model = nn.Sequential( - # nn.Linear(input_data.shape[1], 9 if dim == 3 else 5), - # nn.Tanh(), - # ) - - output = model.forward_tensor(input_data) - - u, v, *other_outputs = paddle.split(output, num_or_sections=output.shape[1], axis=1) - - if dim == 3: - w = other_outputs[0] - sigma_xx, sigma_xy, sigma_xz, sigma_yy, sigma_yz, sigma_zz = other_outputs[1:] - else: - w = None - sigma_xx, sigma_xy, sigma_yy = other_outputs[0:3] - sigma_xz, sigma_yz, sigma_zz = None, None, None - - expected_stress_disp_xx = stress_disp_xx_expected_result( - u, v, w, x, y, z, lambda_, mu, dim, sigma_xx - ) - expected_stress_disp_yy = stress_disp_yy_expected_result( - u, v, w, x, y, z, lambda_, mu, dim, sigma_yy - ) - expected_stress_disp_xy = stress_disp_xy_expected_result(u, v, x, y, mu, sigma_xy) - expected_equilibrium_x = equilibrium_x_expected_result( - u, x, y, z, t, rho, dim, time, sigma_xx, sigma_xy, sigma_xz - ) - expected_equilibrium_y = equilibrium_y_expected_result( - v, x, y, z, t, rho, dim, time, sigma_yy, sigma_xy, sigma_yz - ) - expected_traction_x = traction_x_expected_result( - normal_x, normal_y, sigma_xx, sigma_xy, normal_z, sigma_xz - ) - expected_traction_y = traction_y_expected_result( - normal_x, normal_y, sigma_xy, sigma_yy, normal_z, sigma_yz - ) - if dim == 3: - expected_stress_disp_zz = stress_disp_zz_expected_result( - u, v, w, x, y, z, lambda_, mu, sigma_zz - ) - expected_stress_disp_xz = stress_disp_xz_expected_result( - u, w, x, z, mu, sigma_xz - ) - expected_stress_disp_yz = stress_disp_yz_expected_result( - v, w, y, z, mu, sigma_yz - ) - expected_equilibrium_z = equilibrium_z_expected_result( - w, x, y, z, t, rho, time, sigma_xz, sigma_yz, sigma_zz - ) - expected_traction_z = traction_z_expected_result( - normal_x, normal_y, normal_z, sigma_xz, sigma_yz, sigma_zz - ) - - linear_elasticity = equation.LinearElasticity( - E=E, nu=nu, lambda_=lambda_, mu=mu, rho=rho, dim=dim, time=time - ) - for name, expr in linear_elasticity.equations.items(): - if isinstance(expr, sp.Basic): - linear_elasticity.equations[name] = ppsci.lambdify( - expr, - model, - ) - data_dict = { - "t": t, - "x": x, - "y": y, - "z": z, - "u": u, - "v": v, - "w": w, - "sigma_xx": sigma_xx, - "sigma_xy": sigma_xy, - "sigma_xz": sigma_xz, - "sigma_yy": sigma_yy, - "sigma_yz": sigma_yz, - "sigma_zz": sigma_zz, - "normal_x": normal_x, - "normal_y": normal_y, - "normal_z": normal_z, - } - if not time: - data_dict.pop("t") - if dim == 2: - data_dict.pop("w") - data_dict.pop("sigma_xz") - data_dict.pop("sigma_yz") - data_dict.pop("sigma_zz") - data_dict.pop("normal_z") - - test_output_names = [ - "stress_disp_xx", - "stress_disp_yy", - "stress_disp_xy", - "equilibrium_x", - "equilibrium_y", - "traction_x", - "traction_y", - ] - - if dim == 3: - test_output_names.extend( - [ - "stress_disp_zz", - "stress_disp_xz", - "stress_disp_yz", - "equilibrium_z", - "traction_z", - ] - ) - - test_output = {} - for name in test_output_names: - test_output[name] = linear_elasticity.equations[name](data_dict) - - expected_output = { - "stress_disp_xx": expected_stress_disp_xx, - "stress_disp_yy": expected_stress_disp_yy, - "stress_disp_xy": expected_stress_disp_xy, - "equilibrium_x": expected_equilibrium_x, - "equilibrium_y": expected_equilibrium_y, - "traction_x": expected_traction_x, - "traction_y": expected_traction_y, - } - if dim == 3: - expected_output.update( - { - "stress_disp_zz": expected_stress_disp_zz, - "stress_disp_xz": expected_stress_disp_xz, - "stress_disp_yz": expected_stress_disp_yz, - "equilibrium_z": expected_equilibrium_z, - "traction_z": expected_traction_z, - } - ) - - for name in test_output_names: - assert paddle.allclose(expected_output[name], test_output[name], atol=1e-7) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + + +def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return paddle.grad(y, x, create_graph=True)[0] + + +def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return jacobian(jacobian(y, x), x) + + +def stress_disp_xx_expected_result(u, v, w, x, y, z, lambda_, mu, dim, sigma_xx): + stress_disp_xx = ( + lambda_ * (jacobian(u, x) + jacobian(v, y)) + 2 * mu * jacobian(u, x) - sigma_xx + ) + if dim == 3: + stress_disp_xx += lambda_ * jacobian(w, z) + return stress_disp_xx + + +def stress_disp_yy_expected_result(u, v, w, x, y, z, lambda_, mu, dim, sigma_yy): + stress_disp_yy = ( + lambda_ * (jacobian(u, x) + jacobian(v, y)) + 2 * mu * jacobian(v, y) - sigma_yy + ) + if dim == 3: + stress_disp_yy += lambda_ * jacobian(w, z) + return stress_disp_yy + + +def stress_disp_zz_expected_result(u, v, w, x, y, z, lambda_, mu, sigma_zz): + stress_disp_zz = ( + lambda_ * (jacobian(u, x) + jacobian(v, y) + jacobian(w, z)) + + 2 * mu * jacobian(w, z) + - sigma_zz + ) + return stress_disp_zz + + +def stress_disp_xy_expected_result(u, v, x, y, mu, sigma_xy): + stress_disp_xy = mu * (jacobian(u, y) + jacobian(v, x)) - sigma_xy + return stress_disp_xy + + +def stress_disp_xz_expected_result(u, w, x, z, mu, sigma_xz): + stress_disp_xz = mu * (jacobian(u, z) + jacobian(w, x)) - sigma_xz + return stress_disp_xz + + +def stress_disp_yz_expected_result(v, w, y, z, mu, sigma_yz): + stress_disp_yz = mu * (jacobian(v, z) + jacobian(w, y)) - sigma_yz + return stress_disp_yz + + +def equilibrium_x_expected_result( + u, x, y, z, t, rho, dim, time, sigma_xx, sigma_xy, sigma_xz=None +): + equilibrium_x = -jacobian(sigma_xx, x) - jacobian(sigma_xy, y) + if dim == 3: + equilibrium_x -= jacobian(sigma_xz, z) + if time: + equilibrium_x += rho * hessian(u, t) + return equilibrium_x + + +def equilibrium_y_expected_result( + v, x, y, z, t, rho, dim, time, sigma_yy, sigma_xy, sigma_yz=None +): + equilibrium_y = -jacobian(sigma_xy, x) - jacobian(sigma_yy, y) + if dim == 3: + equilibrium_y -= jacobian(sigma_yz, z) + if time: + equilibrium_y += rho * hessian(v, t) + return equilibrium_y + + +def equilibrium_z_expected_result( + w, x, y, z, t, rho, time, sigma_xz, sigma_yz, sigma_zz +): + equilibrium_z = ( + -jacobian(sigma_xz, x) - jacobian(sigma_yz, y) - jacobian(sigma_zz, z) + ) + if time: + equilibrium_z += rho * hessian(w, t) + return equilibrium_z + + +def traction_x_expected_result( + normal_x, normal_y, sigma_xx, sigma_xy, normal_z=None, sigma_xz=None +): + traction_x = normal_x * sigma_xx + normal_y * sigma_xy + if normal_z is not None and sigma_xz is not None: + traction_x += normal_z * sigma_xz + return traction_x + + +def traction_y_expected_result( + normal_x, normal_y, sigma_xy, sigma_yy, normal_z=None, sigma_yz=None +): + traction_y = normal_x * sigma_xy + normal_y * sigma_yy + if normal_z is not None and sigma_yz is not None: + traction_y += normal_z * sigma_yz + return traction_y + + +def traction_z_expected_result( + normal_x, normal_y, normal_z, sigma_xz, sigma_yz, sigma_zz +): + traction_z = normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz + return traction_z + + +@pytest.mark.parametrize( + "E, nu, lambda_, mu, rho, dim, time", + [ + (None, None, 1e3, 1e3, 1, 2, False), + (None, None, 1e3, 1e3, 1, 2, True), + (None, None, 1e3, 1e3, 1, 3, False), + (None, None, 1e3, 1e3, 1, 3, True), + ], +) +def test_linear_elasticity(E, nu, lambda_, mu, rho, dim, time): + paddle.seed(42) + batch_size = 13 + input_dims = ("x", "y", "z")[:dim] + if time: + input_dims += ("t",) + output_dims = ( + ( + "u", + "v", + "sigma_xx", + "sigma_yy", + "sigma_xy", + ) + if dim == 2 + else ( + "u", + "v", + "w", + "sigma_xx", + "sigma_yy", + "sigma_xy", + "sigma_zz", + "sigma_xz", + "sigma_yz", + ) + ) + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + z = paddle.randn([batch_size, 1]) if dim == 3 else None + t = paddle.randn([batch_size, 1]) if time else None + normal_x = paddle.randn([batch_size, 1]) + normal_y = paddle.randn([batch_size, 1]) + normal_z = paddle.randn([batch_size, 1]) if dim == 3 else None + + x.stop_gradient = False + y.stop_gradient = False + if time: + t.stop_gradient = False + if dim == 3: + z.stop_gradient = False + + input_data = paddle.concat([x, y], axis=1) + if time: + input_data = paddle.concat([t, input_data], axis=1) + if dim == 3: + input_data = paddle.concat([input_data, z], axis=1) + + model = arch.MLP(input_dims, output_dims, 2, 16) + + # model = nn.Sequential( + # nn.Linear(input_data.shape[1], 9 if dim == 3 else 5), + # nn.Tanh(), + # ) + + output = model.forward_tensor(input_data) + + u, v, *other_outputs = paddle.split(output, num_or_sections=output.shape[1], axis=1) + + if dim == 3: + w = other_outputs[0] + sigma_xx, sigma_xy, sigma_xz, sigma_yy, sigma_yz, sigma_zz = other_outputs[1:] + else: + w = None + sigma_xx, sigma_xy, sigma_yy = other_outputs[0:3] + sigma_xz, sigma_yz, sigma_zz = None, None, None + + expected_stress_disp_xx = stress_disp_xx_expected_result( + u, v, w, x, y, z, lambda_, mu, dim, sigma_xx + ) + expected_stress_disp_yy = stress_disp_yy_expected_result( + u, v, w, x, y, z, lambda_, mu, dim, sigma_yy + ) + expected_stress_disp_xy = stress_disp_xy_expected_result(u, v, x, y, mu, sigma_xy) + expected_equilibrium_x = equilibrium_x_expected_result( + u, x, y, z, t, rho, dim, time, sigma_xx, sigma_xy, sigma_xz + ) + expected_equilibrium_y = equilibrium_y_expected_result( + v, x, y, z, t, rho, dim, time, sigma_yy, sigma_xy, sigma_yz + ) + expected_traction_x = traction_x_expected_result( + normal_x, normal_y, sigma_xx, sigma_xy, normal_z, sigma_xz + ) + expected_traction_y = traction_y_expected_result( + normal_x, normal_y, sigma_xy, sigma_yy, normal_z, sigma_yz + ) + if dim == 3: + expected_stress_disp_zz = stress_disp_zz_expected_result( + u, v, w, x, y, z, lambda_, mu, sigma_zz + ) + expected_stress_disp_xz = stress_disp_xz_expected_result( + u, w, x, z, mu, sigma_xz + ) + expected_stress_disp_yz = stress_disp_yz_expected_result( + v, w, y, z, mu, sigma_yz + ) + expected_equilibrium_z = equilibrium_z_expected_result( + w, x, y, z, t, rho, time, sigma_xz, sigma_yz, sigma_zz + ) + expected_traction_z = traction_z_expected_result( + normal_x, normal_y, normal_z, sigma_xz, sigma_yz, sigma_zz + ) + + linear_elasticity = equation.LinearElasticity( + E=E, nu=nu, lambda_=lambda_, mu=mu, rho=rho, dim=dim, time=time + ) + for name, expr in linear_elasticity.equations.items(): + if isinstance(expr, sp.Basic): + linear_elasticity.equations[name] = ppsci.lambdify( + expr, + model, + ) + data_dict = { + "t": t, + "x": x, + "y": y, + "z": z, + "u": u, + "v": v, + "w": w, + "sigma_xx": sigma_xx, + "sigma_xy": sigma_xy, + "sigma_xz": sigma_xz, + "sigma_yy": sigma_yy, + "sigma_yz": sigma_yz, + "sigma_zz": sigma_zz, + "normal_x": normal_x, + "normal_y": normal_y, + "normal_z": normal_z, + } + if not time: + data_dict.pop("t") + if dim == 2: + data_dict.pop("w") + data_dict.pop("sigma_xz") + data_dict.pop("sigma_yz") + data_dict.pop("sigma_zz") + data_dict.pop("normal_z") + + test_output_names = [ + "stress_disp_xx", + "stress_disp_yy", + "stress_disp_xy", + "equilibrium_x", + "equilibrium_y", + "traction_x", + "traction_y", + ] + + if dim == 3: + test_output_names.extend( + [ + "stress_disp_zz", + "stress_disp_xz", + "stress_disp_yz", + "equilibrium_z", + "traction_z", + ] + ) + + test_output = {} + for name in test_output_names: + test_output[name] = linear_elasticity.equations[name](data_dict) + + expected_output = { + "stress_disp_xx": expected_stress_disp_xx, + "stress_disp_yy": expected_stress_disp_yy, + "stress_disp_xy": expected_stress_disp_xy, + "equilibrium_x": expected_equilibrium_x, + "equilibrium_y": expected_equilibrium_y, + "traction_x": expected_traction_x, + "traction_y": expected_traction_y, + } + if dim == 3: + expected_output.update( + { + "stress_disp_zz": expected_stress_disp_zz, + "stress_disp_xz": expected_stress_disp_xz, + "stress_disp_yz": expected_stress_disp_yz, + "equilibrium_z": expected_equilibrium_z, + "traction_z": expected_traction_z, + } + ) + + for name in test_output_names: + assert paddle.allclose(expected_output[name], test_output[name], atol=1e-7) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_navier_stokes.py b/test/equation/test_navier_stokes.py index b2ba2cfb4f..527894b00a 100644 --- a/test/equation/test_navier_stokes.py +++ b/test/equation/test_navier_stokes.py @@ -1,182 +1,182 @@ -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - - -def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return paddle.grad(y, x, create_graph=True)[0] - - -def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return jacobian(jacobian(y, x), x) - - -def continuity_compute_func(x, y, u, v, dim, w=None, z=None): - continuity = jacobian(u, x) + jacobian(v, y) - if dim == 3: - continuity += jacobian(w, z) - return continuity - - -def momentum_x_compute_func( - nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None -): - momentum_x = ( - u * jacobian(u, x) - + v * jacobian(u, y) - - nu * hessian(u, x) - - nu * hessian(u, y) - + 1 / rho * jacobian(p, x) - ) - - if time: - momentum_x += jacobian(u, t) - if dim == 3: - momentum_x += w * jacobian(u, z) - momentum_x -= nu * hessian(u, z) - return momentum_x - - -def momentum_y_compute_func( - nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None -): - momentum_y = ( - u * jacobian(v, x) - + v * jacobian(v, y) - - nu * hessian(v, x) - - nu * hessian(v, y) - + 1 / rho * jacobian(p, y) - ) - - if time: - momentum_y += jacobian(v, t) - if dim == 3: - momentum_y += w * jacobian(v, z) - momentum_y -= nu * hessian(v, z) - return momentum_y - - -def momentum_z_compute_func( - nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None -): - momentum_z = ( - u * jacobian(w, x) - + v * jacobian(w, y) - + w * jacobian(w, z) - - nu * hessian(w, x) - - nu * hessian(w, y) - - nu * hessian(w, z) - + 1 / rho * jacobian(p, z) - ) - if time: - momentum_z += jacobian(w, t) - return momentum_z - - -@pytest.mark.parametrize( - "nu,rho,dim,time", - [ - (0.1, 1.0, 3, False), - (0.1, 1.0, 2, False), - (0.1, 1.0, 3, True), - (0.1, 1.0, 2, True), - ], -) -def test_navierstokes(nu, rho, dim, time): - batch_size = 13 - # generate input data - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - x.stop_gradient = False - y.stop_gradient = False - - input_dims = ("x", "y") - output_dims = ("u", "v", "p") if dim == 2 else ("u", "v", "w", "p") - inputs = (x, y) - - if time: - t = paddle.randn([batch_size, 1]) - t.stop_gradient = False - inputs = (t,) + inputs - input_dims = ("t",) + input_dims - if dim == 3: - z = paddle.randn([batch_size, 1]) - z.stop_gradient = False - inputs = inputs + (z,) - input_dims = input_dims + ("z",) - input_data = paddle.concat(inputs, axis=1) - - model = arch.MLP(input_dims, output_dims, 2, 16) - - # manually generate output - output = model.forward_tensor(input_data) - - if dim == 2: - u, v, p = paddle.split(output, num_or_sections=len(output_dims), axis=1) - w, z = None, None - else: - u, v, w, p = paddle.split(output, num_or_sections=len(output_dims), axis=1) - if not time: - t = None - expected_continuity = continuity_compute_func(x=x, y=y, u=u, v=v, dim=dim, w=w, z=z) - expected_momentum_x = momentum_x_compute_func( - nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t - ) - expected_momentum_y = momentum_y_compute_func( - nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t - ) - if dim == 3: - expected_momentum_z = momentum_z_compute_func( - nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t - ) - - # compute result using NavierStokes class - navier_stokes_equation = equation.NavierStokes(nu=nu, rho=rho, dim=dim, time=time) - for name, expr in navier_stokes_equation.equations.items(): - if isinstance(expr, sp.Basic): - navier_stokes_equation.equations[name] = ppsci.lambdify( - expr, - model, - ) - - data_dict = {"x": x, "y": y, "u": u, "v": v, "p": p} - if time: - data_dict["t"] = t - if dim == 3: - data_dict["z"] = z - data_dict["w"] = w - - test_output_names = [ - "continuity", - "momentum_x", - "momentum_y", - ] - - if dim == 3: - test_output_names.append("momentum_z") - - test_output = {} - for name in test_output_names: - test_output[name] = navier_stokes_equation.equations[name](data_dict) - - expected_output = { - "continuity": expected_continuity, - "momentum_x": expected_momentum_x, - "momentum_y": expected_momentum_y, - } - if dim == 3: - expected_output["momentum_z"] = expected_momentum_z - - # check result whether is equal - for name in test_output_names: - assert paddle.allclose( - expected_output[name], test_output[name], atol=1e-7 - ), f"{name}" - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + + +def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return paddle.grad(y, x, create_graph=True)[0] + + +def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return jacobian(jacobian(y, x), x) + + +def continuity_compute_func(x, y, u, v, dim, w=None, z=None): + continuity = jacobian(u, x) + jacobian(v, y) + if dim == 3: + continuity += jacobian(w, z) + return continuity + + +def momentum_x_compute_func( + nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None +): + momentum_x = ( + u * jacobian(u, x) + + v * jacobian(u, y) + - nu * hessian(u, x) + - nu * hessian(u, y) + + 1 / rho * jacobian(p, x) + ) + + if time: + momentum_x += jacobian(u, t) + if dim == 3: + momentum_x += w * jacobian(u, z) + momentum_x -= nu * hessian(u, z) + return momentum_x + + +def momentum_y_compute_func( + nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None +): + momentum_y = ( + u * jacobian(v, x) + + v * jacobian(v, y) + - nu * hessian(v, x) + - nu * hessian(v, y) + + 1 / rho * jacobian(p, y) + ) + + if time: + momentum_y += jacobian(v, t) + if dim == 3: + momentum_y += w * jacobian(v, z) + momentum_y -= nu * hessian(v, z) + return momentum_y + + +def momentum_z_compute_func( + nu, p, rho, x, y, u, v, dim, time=False, w=None, z=None, t=None +): + momentum_z = ( + u * jacobian(w, x) + + v * jacobian(w, y) + + w * jacobian(w, z) + - nu * hessian(w, x) + - nu * hessian(w, y) + - nu * hessian(w, z) + + 1 / rho * jacobian(p, z) + ) + if time: + momentum_z += jacobian(w, t) + return momentum_z + + +@pytest.mark.parametrize( + "nu,rho,dim,time", + [ + (0.1, 1.0, 3, False), + (0.1, 1.0, 2, False), + (0.1, 1.0, 3, True), + (0.1, 1.0, 2, True), + ], +) +def test_navierstokes(nu, rho, dim, time): + batch_size = 13 + # generate input data + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + x.stop_gradient = False + y.stop_gradient = False + + input_dims = ("x", "y") + output_dims = ("u", "v", "p") if dim == 2 else ("u", "v", "w", "p") + inputs = (x, y) + + if time: + t = paddle.randn([batch_size, 1]) + t.stop_gradient = False + inputs = (t,) + inputs + input_dims = ("t",) + input_dims + if dim == 3: + z = paddle.randn([batch_size, 1]) + z.stop_gradient = False + inputs = inputs + (z,) + input_dims = input_dims + ("z",) + input_data = paddle.concat(inputs, axis=1) + + model = arch.MLP(input_dims, output_dims, 2, 16) + + # manually generate output + output = model.forward_tensor(input_data) + + if dim == 2: + u, v, p = paddle.split(output, num_or_sections=len(output_dims), axis=1) + w, z = None, None + else: + u, v, w, p = paddle.split(output, num_or_sections=len(output_dims), axis=1) + if not time: + t = None + expected_continuity = continuity_compute_func(x=x, y=y, u=u, v=v, dim=dim, w=w, z=z) + expected_momentum_x = momentum_x_compute_func( + nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t + ) + expected_momentum_y = momentum_y_compute_func( + nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t + ) + if dim == 3: + expected_momentum_z = momentum_z_compute_func( + nu=nu, p=p, rho=rho, x=x, y=y, u=u, v=v, dim=dim, time=time, w=w, z=z, t=t + ) + + # compute result using NavierStokes class + navier_stokes_equation = equation.NavierStokes(nu=nu, rho=rho, dim=dim, time=time) + for name, expr in navier_stokes_equation.equations.items(): + if isinstance(expr, sp.Basic): + navier_stokes_equation.equations[name] = ppsci.lambdify( + expr, + model, + ) + + data_dict = {"x": x, "y": y, "u": u, "v": v, "p": p} + if time: + data_dict["t"] = t + if dim == 3: + data_dict["z"] = z + data_dict["w"] = w + + test_output_names = [ + "continuity", + "momentum_x", + "momentum_y", + ] + + if dim == 3: + test_output_names.append("momentum_z") + + test_output = {} + for name in test_output_names: + test_output[name] = navier_stokes_equation.equations[name](data_dict) + + expected_output = { + "continuity": expected_continuity, + "momentum_x": expected_momentum_x, + "momentum_y": expected_momentum_y, + } + if dim == 3: + expected_output["momentum_z"] = expected_momentum_z + + # check result whether is equal + for name in test_output_names: + assert paddle.allclose( + expected_output[name], test_output[name], atol=1e-7 + ), f"{name}" + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_normal_dot_vec.py b/test/equation/test_normal_dot_vec.py index e701d2ea68..c3035060e9 100644 --- a/test/equation/test_normal_dot_vec.py +++ b/test/equation/test_normal_dot_vec.py @@ -1,63 +1,63 @@ -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - - -def compute_func(x: tuple, y: tuple): - z_i = paddle.zeros_like(x[0]) - for x_i, y_i in zip(x, y): - z_i += x_i * y_i - return z_i - - -def test_normal_dot_vel(): - batch_size = 13 - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - z = paddle.randn([batch_size, 1]) - input_dims = ("x", "y", "z") - output_dims = ("u", "v", "w") - model = arch.MLP(input_dims, output_dims, 2, 16) - output_dict = model( - { - "x": x, - "y": y, - "z": z, - } - ) - u = output_dict["u"] - v = output_dict["v"] - w = output_dict["w"] - - normal_x = paddle.randn([batch_size, 1]) - normal_y = paddle.randn([batch_size, 1]) - normal_z = paddle.randn([batch_size, 1]) - - norm_doc_vec = equation.NormalDotVec(output_dims) - for name, expr in norm_doc_vec.equations.items(): - if isinstance(expr, sp.Basic): - norm_doc_vec.equations[name] = ppsci.lambdify( - expr, - model, - ) - out = { - "u": u, - "v": v, - "w": w, - "normal_x": normal_x, - "normal_y": normal_y, - "normal_z": normal_z, - } - - expected_result = compute_func((u, v, w), (normal_x, normal_y, normal_z)) - assert paddle.allclose( - norm_doc_vec.equations["normal_dot_vec"](out), expected_result - ) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + + +def compute_func(x: tuple, y: tuple): + z_i = paddle.zeros_like(x[0]) + for x_i, y_i in zip(x, y): + z_i += x_i * y_i + return z_i + + +def test_normal_dot_vel(): + batch_size = 13 + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + z = paddle.randn([batch_size, 1]) + input_dims = ("x", "y", "z") + output_dims = ("u", "v", "w") + model = arch.MLP(input_dims, output_dims, 2, 16) + output_dict = model( + { + "x": x, + "y": y, + "z": z, + } + ) + u = output_dict["u"] + v = output_dict["v"] + w = output_dict["w"] + + normal_x = paddle.randn([batch_size, 1]) + normal_y = paddle.randn([batch_size, 1]) + normal_z = paddle.randn([batch_size, 1]) + + norm_doc_vec = equation.NormalDotVec(output_dims) + for name, expr in norm_doc_vec.equations.items(): + if isinstance(expr, sp.Basic): + norm_doc_vec.equations[name] = ppsci.lambdify( + expr, + model, + ) + out = { + "u": u, + "v": v, + "w": w, + "normal_x": normal_x, + "normal_y": normal_y, + "normal_z": normal_z, + } + + expected_result = compute_func((u, v, w), (normal_x, normal_y, normal_z)) + assert paddle.allclose( + norm_doc_vec.equations["normal_dot_vec"](out), expected_result + ) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_pde_base.py b/test/equation/test_pde_base.py index eb9265ea74..0a34777a7f 100644 --- a/test/equation/test_pde_base.py +++ b/test/equation/test_pde_base.py @@ -1,136 +1,136 @@ -import paddle -import pytest -import sympy -from paddle import nn - -from ppsci.equation import PDE - - -class Test_PDE: - def test_pde_init(self): - """ - Testing the PDE class initialization - """ - pde = PDE() - assert isinstance(pde, PDE) - assert isinstance(pde.equations, dict) - assert isinstance(pde.learnable_parameters, nn.ParameterList) - - def test_pde_add_equation(self): - """ - initiate a PDE object and add an equation to it - """ - pde = PDE() - - def simple_equation(out): - x, y = out["x"], out["y"] - return x + y - - pde.add_equation("simple", simple_equation) - - assert "simple" in pde.equations - assert pde.equations["simple"] == simple_equation - assert pde.equations["simple"]({"x": 1, "y": 2}) == 3 - - # redefine the equation and add again - def simple_equation2(out): - x, y = out["x"], out["y"] - return x - y - - pde.add_equation("simple", simple_equation2) - - assert pde.equations["simple"] == simple_equation2 - assert pde.equations["simple"]({"x": 1, "y": 2}) == -1 - - def test_pde_create_symbols(self): - """ - initiate a PDE object and add three symbols to it - """ - pde = PDE() - - # create symbols - x, y, z = pde.create_symbols("x y z") - assert isinstance(x, sympy.Symbol) - assert isinstance(y, sympy.Symbol) - assert isinstance(z, sympy.Symbol) - - def test_pde_create_function(self): - """ - initiate a PDE object and add a symbolic function to it - """ - pde = PDE() - - # create symbols - x, y, z = pde.create_symbols("x y z") - - # create a function - f = pde.create_function(name="f", invars=(x, y, z)) - assert isinstance(f, sympy.Function) - assert f.args == (x, y, z) - - def test_pde_parameters(self): - """ - initiate a PDE object and add a learnable parameter to it - """ - pde = PDE() - - assert len(pde.parameters()) == 0 - - # add a learnable parameter - pde.learnable_parameters.append( - paddle.create_parameter(shape=[1], dtype="float32") - ) - - assert len(pde.parameters()) == 1 - - def test_pde_state_dict(self): - """ - initiate a PDE object, add a learnable parameter to it and check its state dict - """ - pde = PDE() - - assert len(pde.state_dict()) == 0 - - # add a learnable parameter - pde.learnable_parameters.append( - paddle.create_parameter(shape=[1], dtype="float32") - ) - - assert len(pde.state_dict()) == 1 - - def test_pde_set_state_dict(self): - """ - initiate a PDE object, set its state dict and check its state dict - """ - pde = PDE() - - assert len(pde.state_dict()) == 0 - # this is a paddle nn.ParameterList() - pde.learnable_parameters.append( - paddle.create_parameter(shape=[1], dtype="float32") - ) - external_state = pde.state_dict() - # change the value in the external_state to 2 - external_state["0"] = paddle.to_tensor([2.0]) - pde.set_state_dict(external_state) - assert pde.state_dict()["0"] == paddle.to_tensor([2.0]) - - def test_str(self): - """ - initiate a PDE object and check its string representation - """ - pde = PDE() - assert str(pde) == "PDE" - - # add an equation - def simple_equation(out): - x, y = out["x"], out["y"] - return x + y - - pde.add_equation("simple", simple_equation) - - assert str(pde).startswith("PDE\n simple: ") - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy +from paddle import nn + +from ppsci.equation import PDE + + +class Test_PDE: + def test_pde_init(self): + """ + Testing the PDE class initialization + """ + pde = PDE() + assert isinstance(pde, PDE) + assert isinstance(pde.equations, dict) + assert isinstance(pde.learnable_parameters, nn.ParameterList) + + def test_pde_add_equation(self): + """ + initiate a PDE object and add an equation to it + """ + pde = PDE() + + def simple_equation(out): + x, y = out["x"], out["y"] + return x + y + + pde.add_equation("simple", simple_equation) + + assert "simple" in pde.equations + assert pde.equations["simple"] == simple_equation + assert pde.equations["simple"]({"x": 1, "y": 2}) == 3 + + # redefine the equation and add again + def simple_equation2(out): + x, y = out["x"], out["y"] + return x - y + + pde.add_equation("simple", simple_equation2) + + assert pde.equations["simple"] == simple_equation2 + assert pde.equations["simple"]({"x": 1, "y": 2}) == -1 + + def test_pde_create_symbols(self): + """ + initiate a PDE object and add three symbols to it + """ + pde = PDE() + + # create symbols + x, y, z = pde.create_symbols("x y z") + assert isinstance(x, sympy.Symbol) + assert isinstance(y, sympy.Symbol) + assert isinstance(z, sympy.Symbol) + + def test_pde_create_function(self): + """ + initiate a PDE object and add a symbolic function to it + """ + pde = PDE() + + # create symbols + x, y, z = pde.create_symbols("x y z") + + # create a function + f = pde.create_function(name="f", invars=(x, y, z)) + assert isinstance(f, sympy.Function) + assert f.args == (x, y, z) + + def test_pde_parameters(self): + """ + initiate a PDE object and add a learnable parameter to it + """ + pde = PDE() + + assert len(pde.parameters()) == 0 + + # add a learnable parameter + pde.learnable_parameters.append( + paddle.create_parameter(shape=[1], dtype="float32") + ) + + assert len(pde.parameters()) == 1 + + def test_pde_state_dict(self): + """ + initiate a PDE object, add a learnable parameter to it and check its state dict + """ + pde = PDE() + + assert len(pde.state_dict()) == 0 + + # add a learnable parameter + pde.learnable_parameters.append( + paddle.create_parameter(shape=[1], dtype="float32") + ) + + assert len(pde.state_dict()) == 1 + + def test_pde_set_state_dict(self): + """ + initiate a PDE object, set its state dict and check its state dict + """ + pde = PDE() + + assert len(pde.state_dict()) == 0 + # this is a paddle nn.ParameterList() + pde.learnable_parameters.append( + paddle.create_parameter(shape=[1], dtype="float32") + ) + external_state = pde.state_dict() + # change the value in the external_state to 2 + external_state["0"] = paddle.to_tensor([2.0]) + pde.set_state_dict(external_state) + assert pde.state_dict()["0"] == paddle.to_tensor([2.0]) + + def test_str(self): + """ + initiate a PDE object and check its string representation + """ + pde = PDE() + assert str(pde) == "PDE" + + # add an equation + def simple_equation(out): + x, y = out["x"], out["y"] + return x + y + + pde.add_equation("simple", simple_equation) + + assert str(pde).startswith("PDE\n simple: ") + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_poisson.py b/test/equation/test_poisson.py index ca86d98db2..a88409c33d 100644 --- a/test/equation/test_poisson.py +++ b/test/equation/test_poisson.py @@ -1,83 +1,83 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import pytest -import sympy as sp - -import ppsci -from ppsci import arch -from ppsci import equation - -__all__ = [] - - -@pytest.mark.parametrize("dim", (2, 3)) -def test_poisson(dim): - """Test for only mean.""" - batch_size = 13 - input_dims = ("x", "y", "z")[:dim] - output_dims = ("p",) - - # generate input data - x = paddle.randn([batch_size, 1]) - y = paddle.randn([batch_size, 1]) - x.stop_gradient = False - y.stop_gradient = False - input_data = paddle.concat([x, y], axis=1) - if dim == 3: - z = paddle.randn([batch_size, 1]) - z.stop_gradient = False - input_data = paddle.concat([x, y, z], axis=1) - - # build NN model - model = arch.MLP(input_dims, output_dims, 2, 16) - - # manually generate output - p = model.forward_tensor(input_data) - - def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return paddle.grad(y, x, create_graph=True)[0] - - def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return jacobian(jacobian(y, x), x) - - # compute expected result - expected_result = hessian(p, x) + hessian(p, y) - if dim == 3: - expected_result += hessian(p, z) - - # compute result using built-in Laplace module - poisson_equation = equation.Poisson(dim=dim) - for name, expr in poisson_equation.equations.items(): - if isinstance(expr, sp.Basic): - poisson_equation.equations[name] = ppsci.lambdify( - expr, - model, - ) - - data_dict = { - "x": x, - "y": y, - "p": p, - } - if dim == 3: - data_dict["z"] = z - test_result = poisson_equation.equations["poisson"](data_dict) - # check result whether is equal - assert paddle.allclose(expected_result, test_result) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import pytest +import sympy as sp + +import ppsci +from ppsci import arch +from ppsci import equation + +__all__ = [] + + +@pytest.mark.parametrize("dim", (2, 3)) +def test_poisson(dim): + """Test for only mean.""" + batch_size = 13 + input_dims = ("x", "y", "z")[:dim] + output_dims = ("p",) + + # generate input data + x = paddle.randn([batch_size, 1]) + y = paddle.randn([batch_size, 1]) + x.stop_gradient = False + y.stop_gradient = False + input_data = paddle.concat([x, y], axis=1) + if dim == 3: + z = paddle.randn([batch_size, 1]) + z.stop_gradient = False + input_data = paddle.concat([x, y, z], axis=1) + + # build NN model + model = arch.MLP(input_dims, output_dims, 2, 16) + + # manually generate output + p = model.forward_tensor(input_data) + + def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return paddle.grad(y, x, create_graph=True)[0] + + def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return jacobian(jacobian(y, x), x) + + # compute expected result + expected_result = hessian(p, x) + hessian(p, y) + if dim == 3: + expected_result += hessian(p, z) + + # compute result using built-in Laplace module + poisson_equation = equation.Poisson(dim=dim) + for name, expr in poisson_equation.equations.items(): + if isinstance(expr, sp.Basic): + poisson_equation.equations[name] = ppsci.lambdify( + expr, + model, + ) + + data_dict = { + "x": x, + "y": y, + "p": p, + } + if dim == 3: + data_dict["z"] = z + test_result = poisson_equation.equations["poisson"](data_dict) + # check result whether is equal + assert paddle.allclose(expected_result, test_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/equation/test_viv.py b/test/equation/test_viv.py index 2dc979912d..9f14e19070 100644 --- a/test/equation/test_viv.py +++ b/test/equation/test_viv.py @@ -1,69 +1,69 @@ -import paddle -import pytest -import sympy as sp -from paddle.nn import initializer - -import ppsci -from ppsci import arch -from ppsci.equation.pde import Vibration - - -@pytest.mark.parametrize("rho,k1,k2", [(1.0, 4.0, -1.0)]) -def test_vibration(rho, k1, k2): - """Test for Vibration equation.""" - batch_size = 13 - rho = rho - k11 = paddle.create_parameter( - shape=[], - dtype=paddle.get_default_dtype(), - name="k11", - default_initializer=initializer.Constant(k1), - ) - k22 = paddle.create_parameter( - shape=[], - name="k22", - dtype=paddle.get_default_dtype(), - default_initializer=initializer.Constant(k2), - ) - # generate input data - t_f = paddle.randn([batch_size, 1]) - eta = paddle.randn([batch_size, 1]) - t_f.stop_gradient = False - eta.stop_gradient = False - input_data = t_f - input_dims = ("t_f",) - output_dims = ("eta",) - model = arch.MLP(input_dims, output_dims, 2, 16) - - # manually generate output - eta = model.forward_tensor(input_data) - - def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return paddle.grad(y, x, create_graph=True)[0] - - def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: - return jacobian(jacobian(y, x), x) - - expected_result = ( - rho * hessian(eta, t_f) - + paddle.exp(k11) * jacobian(eta, t_f) - + paddle.exp(k22) * eta - ) - - # compute result using Vibration class - vibration_equation = Vibration(rho=rho, k1=k1, k2=k2) - for name, expr in vibration_equation.equations.items(): - if isinstance(expr, sp.Basic): - vibration_equation.equations[name] = ppsci.lambdify( - expr, - model, - vibration_equation.learnable_parameters, - ) - input_data_dict = {"t_f": t_f} - test_result = vibration_equation.equations["f"](input_data_dict) - # check result whether is equal - assert paddle.allclose(expected_result, test_result) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest +import sympy as sp +from paddle.nn import initializer + +import ppsci +from ppsci import arch +from ppsci.equation.pde import Vibration + + +@pytest.mark.parametrize("rho,k1,k2", [(1.0, 4.0, -1.0)]) +def test_vibration(rho, k1, k2): + """Test for Vibration equation.""" + batch_size = 13 + rho = rho + k11 = paddle.create_parameter( + shape=[], + dtype=paddle.get_default_dtype(), + name="k11", + default_initializer=initializer.Constant(k1), + ) + k22 = paddle.create_parameter( + shape=[], + name="k22", + dtype=paddle.get_default_dtype(), + default_initializer=initializer.Constant(k2), + ) + # generate input data + t_f = paddle.randn([batch_size, 1]) + eta = paddle.randn([batch_size, 1]) + t_f.stop_gradient = False + eta.stop_gradient = False + input_data = t_f + input_dims = ("t_f",) + output_dims = ("eta",) + model = arch.MLP(input_dims, output_dims, 2, 16) + + # manually generate output + eta = model.forward_tensor(input_data) + + def jacobian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return paddle.grad(y, x, create_graph=True)[0] + + def hessian(y: paddle.Tensor, x: paddle.Tensor) -> paddle.Tensor: + return jacobian(jacobian(y, x), x) + + expected_result = ( + rho * hessian(eta, t_f) + + paddle.exp(k11) * jacobian(eta, t_f) + + paddle.exp(k22) * eta + ) + + # compute result using Vibration class + vibration_equation = Vibration(rho=rho, k1=k1, k2=k2) + for name, expr in vibration_equation.equations.items(): + if isinstance(expr, sp.Basic): + vibration_equation.equations[name] = ppsci.lambdify( + expr, + model, + vibration_equation.learnable_parameters, + ) + input_data_dict = {"t_f": t_f} + test_result = vibration_equation.equations["f"](input_data_dict) + # check result whether is equal + assert paddle.allclose(expected_result, test_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/experimental/test_gaussian_integrate.py b/test/experimental/test_gaussian_integrate.py index ff6430fd40..93aa260e23 100644 --- a/test/experimental/test_gaussian_integrate.py +++ b/test/experimental/test_gaussian_integrate.py @@ -1,70 +1,70 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable -from typing import List - -import numpy as np -import paddle -import pytest - -from ppsci.experimental import gaussian_integrate - -paddle.seed(1024) - - -@pytest.mark.parametrize( - "fn,dim,integration_domains,antideriv_func", - [ - (lambda x: paddle.exp(x), 1, [[-np.pi, np.pi * 2]], lambda x: np.exp(x)), - ( - lambda x: paddle.sin(x[:, 0]) + paddle.cos(x[:, 1]), - 2, - [[-np.pi, np.pi * 2], [-10, -np.pi * 3]], - lambda x, y: -y * np.cos(x) + x * np.sin(y), - ), - ], -) -@pytest.mark.parametrize("N", [int(1e2 + 1), int(1e3 + 1), int(1e4 + 1)]) -def test_gaussian_integrate( - fn: Callable, - dim: int, - N: int, - integration_domains: List[List[float]], - antideriv_func: Callable, -): - integrate_result = gaussian_integrate(fn, dim, N, integration_domains) - if dim == 1: - a, b = integration_domains[0][0], integration_domains[0][1] - reference_result = antideriv_func(b) - antideriv_func(a) - elif dim == 2: - a, b, c, d = ( - integration_domains[0][0], - integration_domains[0][1], - integration_domains[1][0], - integration_domains[1][1], - ) - reference_result = ( - antideriv_func(b, d) - - antideriv_func(a, d) - - antideriv_func(b, c) - + antideriv_func(a, c) - ) - else: - raise NotImplementedError - assert np.allclose(integrate_result.numpy(), reference_result) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable +from typing import List + +import numpy as np +import paddle +import pytest + +from ppsci.experimental import gaussian_integrate + +paddle.seed(1024) + + +@pytest.mark.parametrize( + "fn,dim,integration_domains,antideriv_func", + [ + (lambda x: paddle.exp(x), 1, [[-np.pi, np.pi * 2]], lambda x: np.exp(x)), + ( + lambda x: paddle.sin(x[:, 0]) + paddle.cos(x[:, 1]), + 2, + [[-np.pi, np.pi * 2], [-10, -np.pi * 3]], + lambda x, y: -y * np.cos(x) + x * np.sin(y), + ), + ], +) +@pytest.mark.parametrize("N", [int(1e2 + 1), int(1e3 + 1), int(1e4 + 1)]) +def test_gaussian_integrate( + fn: Callable, + dim: int, + N: int, + integration_domains: List[List[float]], + antideriv_func: Callable, +): + integrate_result = gaussian_integrate(fn, dim, N, integration_domains) + if dim == 1: + a, b = integration_domains[0][0], integration_domains[0][1] + reference_result = antideriv_func(b) - antideriv_func(a) + elif dim == 2: + a, b, c, d = ( + integration_domains[0][0], + integration_domains[0][1], + integration_domains[1][0], + integration_domains[1][1], + ) + reference_result = ( + antideriv_func(b, d) + - antideriv_func(a, d) + - antideriv_func(b, c) + + antideriv_func(a, c) + ) + else: + raise NotImplementedError + assert np.allclose(integrate_result.numpy(), reference_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/experimental/test_montecarlo_integrate.py b/test/experimental/test_montecarlo_integrate.py index fc6ba7c230..30b40e265a 100644 --- a/test/experimental/test_montecarlo_integrate.py +++ b/test/experimental/test_montecarlo_integrate.py @@ -1,55 +1,55 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable -from typing import List - -import numpy as np -import paddle -import pytest - -import ppsci - -paddle.seed(1024) - - -@pytest.mark.parametrize( - "fn, dim, N, integration_domains, expected", - [ - ( - lambda x: paddle.sin(x[:, 0]) + paddle.exp(x[:, 1]), - 2, - 10000, - [[0, 1], [-1, 1]], - 3.25152588, - ) - ], -) -def test_montecarlo_integrate( - fn: Callable, - dim: int, - N: int, - integration_domains: List[List[float]], - expected: float, -): - assert np.allclose( - ppsci.experimental.montecarlo_integrate( - fn, dim, N, integration_domains - ).numpy(), - expected, - ) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable +from typing import List + +import numpy as np +import paddle +import pytest + +import ppsci + +paddle.seed(1024) + + +@pytest.mark.parametrize( + "fn, dim, N, integration_domains, expected", + [ + ( + lambda x: paddle.sin(x[:, 0]) + paddle.exp(x[:, 1]), + 2, + 10000, + [[0, 1], [-1, 1]], + 3.25152588, + ) + ], +) +def test_montecarlo_integrate( + fn: Callable, + dim: int, + N: int, + integration_domains: List[List[float]], + expected: float, +): + assert np.allclose( + ppsci.experimental.montecarlo_integrate( + fn, dim, N, integration_domains + ).numpy(), + expected, + ) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/experimental/test_trapezoid_integrate.py b/test/experimental/test_trapezoid_integrate.py index 34ded2ec86..806b5ec971 100644 --- a/test/experimental/test_trapezoid_integrate.py +++ b/test/experimental/test_trapezoid_integrate.py @@ -1,100 +1,100 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Callable - -import numpy as np -import paddle -import pytest -from typing_extensions import Literal - -from ppsci.experimental import trapezoid_integrate - -paddle.seed(1024) - - -def trapezoid_sum_test(y, x, dx): - dx = 1 if not dx else dx - y = y.numpy() - res = [] - for i in range(len(y) - 1): - res.append((y[i] + y[i + 1]) * dx / 2) - return np.sum(np.array(res)) - - -def trapezoid_cum_test(y, x, dx): - dx = 1 if not dx else dx - y = y.numpy() - res = [] - for i in range(len(y) - 1): - res.append((y[i] + y[i + 1]) * dx / 2) - return np.cumsum(np.array(res)) - - -def trapezoid_x_test(y, x, dx): - dx = 1 if not dx else dx - y = y.numpy() - res = [] - for yi in y: - res_i = [] - for i in range(len(yi) - 1): - res_i.append((yi[i] + yi[i + 1]) * (x[i + 1] - x[i]) / 2) - res.append(res_i) - return np.sum(np.array(res), axis=1) - - -@pytest.mark.parametrize( - "y,x,dx,axis,mode,antideriv_func", - [ - ( - paddle.to_tensor([0, 1, 2, 3, 4, 5], dtype="float32"), - None, - None, - -1, - "sum", - trapezoid_sum_test, - ), - ( - paddle.to_tensor([0, 1, 2, 3, 4, 5], dtype="float32"), - None, - 2, - -1, - "cumsum", - trapezoid_cum_test, - ), - ( - paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32"), - paddle.to_tensor([0, 1, 2], dtype="float32"), - None, - 1, - "sum", - trapezoid_x_test, - ), - ], -) -def test_trapezoid_integrate( - y: paddle.Tensor, - x: paddle.Tensor, - dx: float, - axis: int, - mode: Literal["sum", "cumsum"], - antideriv_func: Callable, -): - integrate_result = trapezoid_integrate(y, x, dx, axis, mode) - reference_result = antideriv_func(y, x, dx) - assert np.allclose(integrate_result.numpy(), reference_result) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Callable + +import numpy as np +import paddle +import pytest +from typing_extensions import Literal + +from ppsci.experimental import trapezoid_integrate + +paddle.seed(1024) + + +def trapezoid_sum_test(y, x, dx): + dx = 1 if not dx else dx + y = y.numpy() + res = [] + for i in range(len(y) - 1): + res.append((y[i] + y[i + 1]) * dx / 2) + return np.sum(np.array(res)) + + +def trapezoid_cum_test(y, x, dx): + dx = 1 if not dx else dx + y = y.numpy() + res = [] + for i in range(len(y) - 1): + res.append((y[i] + y[i + 1]) * dx / 2) + return np.cumsum(np.array(res)) + + +def trapezoid_x_test(y, x, dx): + dx = 1 if not dx else dx + y = y.numpy() + res = [] + for yi in y: + res_i = [] + for i in range(len(yi) - 1): + res_i.append((yi[i] + yi[i + 1]) * (x[i + 1] - x[i]) / 2) + res.append(res_i) + return np.sum(np.array(res), axis=1) + + +@pytest.mark.parametrize( + "y,x,dx,axis,mode,antideriv_func", + [ + ( + paddle.to_tensor([0, 1, 2, 3, 4, 5], dtype="float32"), + None, + None, + -1, + "sum", + trapezoid_sum_test, + ), + ( + paddle.to_tensor([0, 1, 2, 3, 4, 5], dtype="float32"), + None, + 2, + -1, + "cumsum", + trapezoid_cum_test, + ), + ( + paddle.to_tensor([[0, 1, 2], [3, 4, 5]], dtype="float32"), + paddle.to_tensor([0, 1, 2], dtype="float32"), + None, + 1, + "sum", + trapezoid_x_test, + ), + ], +) +def test_trapezoid_integrate( + y: paddle.Tensor, + x: paddle.Tensor, + dx: float, + axis: int, + mode: Literal["sum", "cumsum"], + antideriv_func: Callable, +): + integrate_result = trapezoid_integrate(y, x, dx, axis, mode) + reference_result = antideriv_func(y, x, dx) + assert np.allclose(integrate_result.numpy(), reference_result) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/loss/aggregator.py b/test/loss/aggregator.py index dee3ff6887..18f93cfcb2 100644 --- a/test/loss/aggregator.py +++ b/test/loss/aggregator.py @@ -1,98 +1,98 @@ -import pytest - -import ppsci -from ppsci import arch -from ppsci.loss import mtl - -__all__ = [] - - -class AggregatorTest: - def __init__(self): - self.model = arch.MLP( - ("x", "y"), - ("u", "v"), - 3, - 16, - ) - - def _check_agg_state_dict(self, agg): - model_state = self.model.state_dict() - agg_state = agg.state_dict() - for k in agg_state: - assert k not in model_state - - def test_AGDA(self): - aggregator = mtl.AGDA(self.model) - assert aggregator.should_persist is False - - def test_GradNorm(self): - aggregator = mtl.GradNorm(self.model) - assert aggregator.should_persist is True - self._check_agg_state_dict(aggregator) - - def test_LossAggregator(self): - aggregator = mtl.AGDA(self.model) - assert aggregator.should_persist is False - - def test_PCGrad(self): - aggregator = mtl.PCGrad(self.model) - assert aggregator.should_persist is False - - def test_Relobralo(self): - aggregator = mtl.Relobralo(self.model) - assert aggregator.should_persist is True - self._check_agg_state_dict(aggregator) - - def test_Sum(self): - aggregator = mtl.Sum(self.model) - assert aggregator.should_persist is False - - def test_NTK(self): - aggregator = mtl.NTK(self.model) - assert aggregator.should_persist is True - self._check_agg_state_dict(aggregator) - - def test_restore_aggregator(self): - model = ppsci.arch.MLP( - ["x", "y"], - ["u"], - 2, - 16, - ) - opt = ppsci.optimizer.Adam(1e-3)(model) - equation = ppsci.equation.Laplace(2) - geom = ppsci.geometry.Rectangle([0, 0], [1, 1]) - BC = ppsci.constraint.BoundaryConstraint( - equation.equations, - {"laplace": 0.0}, - geom, - { - "dataset": "IterableNamedArrayDataset", - "iters_per_epoch": 10, - "batch_size": 16, - }, - loss=ppsci.loss.MSELoss(), - ) - solver = ppsci.solver.Solver( - model, - {"bound": BC}, - optimizer=opt, - output_dir="./tmp", - iters_per_epoch=10, - epochs=2, - ) - solver.train() - solver = ppsci.solver.Solver( - model, - {"bound": BC}, - optimizer=opt, - output_dir="./tmp", - iters_per_epoch=10, - epochs=2, - checkpoint_path="./tmp/checkpoints/latest", - ) - - -if __name__ == "__main__": - pytest.main() +import pytest + +import ppsci +from ppsci import arch +from ppsci.loss import mtl + +__all__ = [] + + +class AggregatorTest: + def __init__(self): + self.model = arch.MLP( + ("x", "y"), + ("u", "v"), + 3, + 16, + ) + + def _check_agg_state_dict(self, agg): + model_state = self.model.state_dict() + agg_state = agg.state_dict() + for k in agg_state: + assert k not in model_state + + def test_AGDA(self): + aggregator = mtl.AGDA(self.model) + assert aggregator.should_persist is False + + def test_GradNorm(self): + aggregator = mtl.GradNorm(self.model) + assert aggregator.should_persist is True + self._check_agg_state_dict(aggregator) + + def test_LossAggregator(self): + aggregator = mtl.AGDA(self.model) + assert aggregator.should_persist is False + + def test_PCGrad(self): + aggregator = mtl.PCGrad(self.model) + assert aggregator.should_persist is False + + def test_Relobralo(self): + aggregator = mtl.Relobralo(self.model) + assert aggregator.should_persist is True + self._check_agg_state_dict(aggregator) + + def test_Sum(self): + aggregator = mtl.Sum(self.model) + assert aggregator.should_persist is False + + def test_NTK(self): + aggregator = mtl.NTK(self.model) + assert aggregator.should_persist is True + self._check_agg_state_dict(aggregator) + + def test_restore_aggregator(self): + model = ppsci.arch.MLP( + ["x", "y"], + ["u"], + 2, + 16, + ) + opt = ppsci.optimizer.Adam(1e-3)(model) + equation = ppsci.equation.Laplace(2) + geom = ppsci.geometry.Rectangle([0, 0], [1, 1]) + BC = ppsci.constraint.BoundaryConstraint( + equation.equations, + {"laplace": 0.0}, + geom, + { + "dataset": "IterableNamedArrayDataset", + "iters_per_epoch": 10, + "batch_size": 16, + }, + loss=ppsci.loss.MSELoss(), + ) + solver = ppsci.solver.Solver( + model, + {"bound": BC}, + optimizer=opt, + output_dir="./tmp", + iters_per_epoch=10, + epochs=2, + ) + solver.train() + solver = ppsci.solver.Solver( + model, + {"bound": BC}, + optimizer=opt, + output_dir="./tmp", + iters_per_epoch=10, + epochs=2, + checkpoint_path="./tmp/checkpoints/latest", + ) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/loss/chamfer.py b/test/loss/chamfer.py index 4385d40f01..476e033f75 100644 --- a/test/loss/chamfer.py +++ b/test/loss/chamfer.py @@ -1,44 +1,44 @@ -import numpy as np -import paddle -import pytest - -from ppsci import loss - -__all__ = [] - - -def test_chamfer_loss(): - """Test for chamfer distance loss.""" - N1 = 100 - N2 = 50 - output_dict = {"s1": paddle.randn([1, N1, 3])} - label_dict = {"s1": paddle.randn([1, N2, 3])} - chamfer_loss = loss.ChamferLoss() - result = chamfer_loss(output_dict, label_dict) - - loss_cd_s1 = 0.0 - for i in range(N1): - min_i = None - for j in range(N2): - disij = ((output_dict["s1"][0, i] - label_dict["s1"][0, j]) ** 2).sum() - if min_i is None or disij < min_i: - min_i = disij - loss_cd_s1 += min_i - loss_cd_s1 /= N1 - - loss_cd_s2 = 0.0 - for j in range(N2): - min_j = None - for i in range(N1): - disij = ((output_dict["s1"][0, i] - label_dict["s1"][0, j]) ** 2).sum() - if min_j is None or disij < min_j: - min_j = disij - loss_cd_s2 += min_j - loss_cd_s2 /= N2 - - loss_cd = loss_cd_s1 + loss_cd_s2 - np.testing.assert_allclose(loss_cd.item(), result.item()) - - -if __name__ == "__main__": - pytest.main() +import numpy as np +import paddle +import pytest + +from ppsci import loss + +__all__ = [] + + +def test_chamfer_loss(): + """Test for chamfer distance loss.""" + N1 = 100 + N2 = 50 + output_dict = {"s1": paddle.randn([1, N1, 3])} + label_dict = {"s1": paddle.randn([1, N2, 3])} + chamfer_loss = loss.ChamferLoss() + result = chamfer_loss(output_dict, label_dict) + + loss_cd_s1 = 0.0 + for i in range(N1): + min_i = None + for j in range(N2): + disij = ((output_dict["s1"][0, i] - label_dict["s1"][0, j]) ** 2).sum() + if min_i is None or disij < min_i: + min_i = disij + loss_cd_s1 += min_i + loss_cd_s1 /= N1 + + loss_cd_s2 = 0.0 + for j in range(N2): + min_j = None + for i in range(N1): + disij = ((output_dict["s1"][0, i] - label_dict["s1"][0, j]) ** 2).sum() + if min_j is None or disij < min_j: + min_j = disij + loss_cd_s2 += min_j + loss_cd_s2 /= N2 + + loss_cd = loss_cd_s1 + loss_cd_s2 + np.testing.assert_allclose(loss_cd.item(), result.item()) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/loss/func.py b/test/loss/func.py index 2cb292db9d..e1124120a6 100644 --- a/test/loss/func.py +++ b/test/loss/func.py @@ -1,30 +1,30 @@ -import paddle -import pytest - -from ppsci import loss - -__all__ = [] - - -def test_non_tensor_return_type(): - """Test for biharmonic equation.""" - - def loss_func_return_tensor(input_dict, label_dict, weight_dict): - return (0.5 * (input_dict["x"] - label_dict["x"]) ** 2).sum() - - def loss_func_reuturn_builtin_float(input_dict, label_dict, weight_dict): - return (0.5 * (input_dict["x"] - label_dict["x"]) ** 2).sum().item() - - wrapped_loss1 = loss.FunctionalLoss(loss_func_return_tensor) - wrapped_loss2 = loss.FunctionalLoss(loss_func_reuturn_builtin_float) - - input_dict = {"x": paddle.randn([10, 1])} - label_dict = {"x": paddle.zeros([10, 1])} - - wrapped_loss1(input_dict, label_dict) - with pytest.raises(AssertionError): - wrapped_loss2(input_dict, label_dict) - - -if __name__ == "__main__": - pytest.main() +import paddle +import pytest + +from ppsci import loss + +__all__ = [] + + +def test_non_tensor_return_type(): + """Test for biharmonic equation.""" + + def loss_func_return_tensor(input_dict, label_dict, weight_dict): + return (0.5 * (input_dict["x"] - label_dict["x"]) ** 2).sum() + + def loss_func_reuturn_builtin_float(input_dict, label_dict, weight_dict): + return (0.5 * (input_dict["x"] - label_dict["x"]) ** 2).sum().item() + + wrapped_loss1 = loss.FunctionalLoss(loss_func_return_tensor) + wrapped_loss2 = loss.FunctionalLoss(loss_func_reuturn_builtin_float) + + input_dict = {"x": paddle.randn([10, 1])} + label_dict = {"x": paddle.zeros([10, 1])} + + wrapped_loss1(input_dict, label_dict) + with pytest.raises(AssertionError): + wrapped_loss2(input_dict, label_dict) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/probability/test_hmc2.py b/test/probability/test_hmc2.py index 2a602f44bb..b9d68d773c 100644 --- a/test/probability/test_hmc2.py +++ b/test/probability/test_hmc2.py @@ -1,63 +1,63 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import pytest -from paddle.distribution import Bernoulli -from paddle.distribution import Normal - -from ppsci.probability.hmc import HamiltonianMonteCarlo - -paddle.seed(1024) - - -# Example case on Pyro -def test_HamiltonianMonteCarlo2(): - true_coefs = paddle.to_tensor([1.0, 2.0, 3.0]) - data = paddle.randn((2000, 3)) - dim = 3 - - labels = ( - Bernoulli(paddle.nn.functional.sigmoid(paddle.matmul(data, true_coefs))) - .sample([1]) - .squeeze() - ) - rv_beta = Normal(paddle.zeros(dim), paddle.ones(dim)) - - def log_prior(**kwargs): - return paddle.sum(rv_beta.log_prob(kwargs["beta"])) - - def log_likelihood(**kwargs): - p = paddle.nn.functional.sigmoid(paddle.matmul(data, kwargs["beta"])) - return paddle.sum(labels * paddle.log(p) + (1 - labels) * paddle.log(1 - p)) - - # log posterior - def log_posterior(**kwargs): - return log_prior(**kwargs) + log_likelihood(**kwargs) - - initial_params = {"beta": paddle.to_tensor([0.5, 0.5, 0.5])} - - HMC = HamiltonianMonteCarlo( - log_posterior, path_len=0.040, step_size=0.0025, num_warmup_steps=500 - ) - trial = HMC.run_chain(500, initial_params) - - means = trial["beta"].mean(axis=0) - assert paddle.allclose(means[0], paddle.to_tensor(true_coefs[0]), rtol=0.2) - assert paddle.allclose(means[1], paddle.to_tensor(true_coefs[1]), rtol=0.2) - assert paddle.allclose(means[2], paddle.to_tensor(true_coefs[2]), rtol=0.2) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import pytest +from paddle.distribution import Bernoulli +from paddle.distribution import Normal + +from ppsci.probability.hmc import HamiltonianMonteCarlo + +paddle.seed(1024) + + +# Example case on Pyro +def test_HamiltonianMonteCarlo2(): + true_coefs = paddle.to_tensor([1.0, 2.0, 3.0]) + data = paddle.randn((2000, 3)) + dim = 3 + + labels = ( + Bernoulli(paddle.nn.functional.sigmoid(paddle.matmul(data, true_coefs))) + .sample([1]) + .squeeze() + ) + rv_beta = Normal(paddle.zeros(dim), paddle.ones(dim)) + + def log_prior(**kwargs): + return paddle.sum(rv_beta.log_prob(kwargs["beta"])) + + def log_likelihood(**kwargs): + p = paddle.nn.functional.sigmoid(paddle.matmul(data, kwargs["beta"])) + return paddle.sum(labels * paddle.log(p) + (1 - labels) * paddle.log(1 - p)) + + # log posterior + def log_posterior(**kwargs): + return log_prior(**kwargs) + log_likelihood(**kwargs) + + initial_params = {"beta": paddle.to_tensor([0.5, 0.5, 0.5])} + + HMC = HamiltonianMonteCarlo( + log_posterior, path_len=0.040, step_size=0.0025, num_warmup_steps=500 + ) + trial = HMC.run_chain(500, initial_params) + + means = trial["beta"].mean(axis=0) + assert paddle.allclose(means[0], paddle.to_tensor(true_coefs[0]), rtol=0.2) + assert paddle.allclose(means[1], paddle.to_tensor(true_coefs[1]), rtol=0.2) + assert paddle.allclose(means[2], paddle.to_tensor(true_coefs[2]), rtol=0.2) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/utils/test_config.py b/test/utils/test_config.py index 844d1f449f..f77d2cd355 100644 --- a/test/utils/test_config.py +++ b/test/utils/test_config.py @@ -1,50 +1,50 @@ -import os - -import hydra -import paddle -import pytest -import yaml - -from ppsci.utils.callbacks import InitCallback - -paddle.seed(1024) - - -@pytest.mark.parametrize( - "epochs,mode,seed", - [ - (-1, "train", 1024), - (20, "wrong_mode", 1024), - (10, "eval", -1), - ], -) -def test_invalid_epochs(tmpdir, epochs, mode, seed): - cfg_dict = { - "hydra": { - "callbacks": { - "init_callback": {"_target_": "ppsci.utils.callbacks.InitCallback"} - } - }, - "mode": mode, - "seed": seed, - "TRAIN": { - "epochs": epochs, - }, - } - - dir_ = os.path.dirname(__file__) - config_abs_path = os.path.join(dir_, "test_config.yaml") - with open(config_abs_path, "w") as f: - f.write(yaml.dump(cfg_dict)) - - with hydra.initialize(config_path="./", version_base=None): - cfg = hydra.compose(config_name="test_config.yaml") - - with pytest.raises(SystemExit) as exec_info: - InitCallback().on_job_start(config=cfg) - assert exec_info.value.code == 2 - - -# 这部分通常不需要,除非你想直接从脚本运行测试 -if __name__ == "__main__": - pytest.main() +import os + +import hydra +import paddle +import pytest +import yaml + +from ppsci.utils.callbacks import InitCallback + +paddle.seed(1024) + + +@pytest.mark.parametrize( + "epochs,mode,seed", + [ + (-1, "train", 1024), + (20, "wrong_mode", 1024), + (10, "eval", -1), + ], +) +def test_invalid_epochs(tmpdir, epochs, mode, seed): + cfg_dict = { + "hydra": { + "callbacks": { + "init_callback": {"_target_": "ppsci.utils.callbacks.InitCallback"} + } + }, + "mode": mode, + "seed": seed, + "TRAIN": { + "epochs": epochs, + }, + } + + dir_ = os.path.dirname(__file__) + config_abs_path = os.path.join(dir_, "test_config.yaml") + with open(config_abs_path, "w") as f: + f.write(yaml.dump(cfg_dict)) + + with hydra.initialize(config_path="./", version_base=None): + cfg = hydra.compose(config_name="test_config.yaml") + + with pytest.raises(SystemExit) as exec_info: + InitCallback().on_job_start(config=cfg) + assert exec_info.value.code == 2 + + +# 这部分通常不需要,除非你想直接从脚本运行测试 +if __name__ == "__main__": + pytest.main() diff --git a/test/utils/test_ema.py b/test/utils/test_ema.py index db7671fbe7..8107390351 100644 --- a/test/utils/test_ema.py +++ b/test/utils/test_ema.py @@ -1,262 +1,262 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import itertools - -import numpy as np -import paddle -import pytest - -import ppsci -from ppsci.utils import ema - - -def test_ema_accumulation(): - model = ppsci.arch.MLP( - ("x", "y", "z"), - ("u", "v", "w"), - 4, - 25, - ) - decay = 0.9 - avg_model = ema.ExponentialMovingAverage(model, decay) - - opt = ppsci.optimizer.Adam()(model) - - model_params_hist = { - k: [ - p.clone().detach(), - ] - for k, p in model.named_parameters() - } - N = 32 - T = 5 - for i in range(T): - input_data = { - "x": paddle.randn([N, 1]), - "y": paddle.randn([N, 1]), - "z": paddle.randn([N, 1]), - } - label_data = { - "u": paddle.randn([N, 1]), - "v": paddle.randn([N, 1]), - "w": paddle.randn([N, 1]), - } - output_data = model(input_data) - loss = sum( - [ - paddle.nn.functional.mse_loss(output, label) - for output, label in zip(output_data.values(), label_data.values()) - ] - ) - loss.backward() - opt.step() - opt.clear_grad() - avg_model.update() - - for k, p in model.named_parameters(): - model_params_hist[k].append(p.clone().detach()) - - for k, plist in model_params_hist.items(): - ema_p = model_params_hist[k][0] - for p in plist[1:]: - ema_p = ema_p * decay + p * (1 - decay) - - np.testing.assert_allclose(ema_p, avg_model.params_shadow[k], 1e-7, 1e-7) - - -def test_ema_apply_restore(): - model = ppsci.arch.MLP( - ("x", "y", "z"), - ("u", "v", "w"), - 4, - 25, - ) - model.linears[-1].weight.stop_gradient = True - model.linears[-2].bias.stop_gradient = True - decay = 0.9 - avg_model = ema.ExponentialMovingAverage(model, decay) - - opt = ppsci.optimizer.Adam()(model) - - N = 32 - T = 5 - for i in range(T): - input_data = { - "x": paddle.randn([N, 1]), - "y": paddle.randn([N, 1]), - "z": paddle.randn([N, 1]), - } - label_data = { - "u": paddle.randn([N, 1]), - "v": paddle.randn([N, 1]), - "w": paddle.randn([N, 1]), - } - output_data = model(input_data) - loss = sum( - [ - paddle.nn.functional.mse_loss(output, label) - for output, label in zip(output_data.values(), label_data.values()) - ] - ) - loss.backward() - opt.step() - opt.clear_grad() - avg_model.update() - - orignal_param = {k: v.clone() for k, v in model.named_parameters()} - - # test if stop_gradient are excluded - assert model.linears[-1].weight.name not in avg_model.params_shadow - assert model.linears[-2].bias.name not in avg_model.params_shadow - - # test if model paramter == backup - avg_model.apply_shadow() - for k in orignal_param: - if not orignal_param[k].stop_gradient: - np.testing.assert_allclose( - avg_model.params_backup[k], orignal_param[k], 1e-7, 1e-7 - ) - assert model.state_dict()[k].stop_gradient == orignal_param[k].stop_gradient - - # test if restored successfully - avg_model.restore() - for k in orignal_param: - np.testing.assert_allclose(model.state_dict()[k], orignal_param[k], 1e-7, 1e-7) - assert model.state_dict()[k].stop_gradient == orignal_param[k].stop_gradient - assert len(avg_model.params_backup) == 0 - - -def test_ema_buffer(): - model = ppsci.arch.MLP( - ("x", "y", "z"), - ("u", "v", "w"), - 4, - 25, - periods={"x": [1.0, True], "y": [2.0, False]}, - ) - model.linears[-1].weight.stop_gradient = True - model.linears[-2].bias.stop_gradient = True - model.register_buffer("buffer_1", paddle.randn([2, 3])) - model.register_buffer("buffer_2", paddle.randn([3, 3])) - model.register_buffer("buffer_3", paddle.randn([4, 3])) - - decay = 0.5 - avg_model = ema.ExponentialMovingAverage(model, decay) - - N = 32 - # update parames of model - opt = ppsci.optimizer.Adam()(model) - input_data = { - "x": paddle.randn([N, 1]), - "y": paddle.randn([N, 1]), - "z": paddle.randn([N, 1]), - } - label_data = { - "u": paddle.randn([N, 1]), - "v": paddle.randn([N, 1]), - "w": paddle.randn([N, 1]), - } - output_data = model(input_data) - loss = sum( - [ - paddle.nn.functional.mse_loss(output, label) - for output, label in zip(output_data.values(), label_data.values()) - ] - ) - loss.backward() - opt.step() - opt.clear_grad() - - model2 = copy.deepcopy(model) - avg_model.apply_shadow() - - opt = ppsci.optimizer.Adam()(model) - output_data = model(input_data) - loss = sum( - [ - paddle.nn.functional.mse_loss(output, label) - for output, label in zip(output_data.values(), label_data.values()) - ] - ) - loss.backward() - opt.step() - opt.clear_grad() - - for (n1, p1), (n2, p2) in zip( - itertools.chain(model.named_parameters(), model.named_buffers()), - itertools.chain(model2.named_parameters(), model2.named_buffers()), - ): - assert n1 == n2, f"{n1} {n2} do not equal." - assert p1.stop_gradient == p2.stop_gradient, f"{n1} {n2} do not equal." - np.testing.assert_array_equal(p1, p2) - - -def test_ema_state_dict(): - model = ppsci.arch.MLP( - ("x", "y", "z"), - ("u", "v", "w"), - 4, - 25, - periods={"x": [1.0, True], "y": [2.0, False]}, - ) - model.linears[-1].weight.stop_gradient = True - model.linears[-2].bias.stop_gradient = True - model.register_buffer("buffer_1", paddle.randn([2, 3])) - model.register_buffer("buffer_2", paddle.randn([3, 3])) - model.register_buffer("buffer_3", paddle.randn([4, 3])) - - decay = 0.5 - avg_model = ema.ExponentialMovingAverage(model, decay) - - N = 32 - # update parames of model - opt = ppsci.optimizer.Adam()(model) - input_data = { - "x": paddle.randn([N, 1]), - "y": paddle.randn([N, 1]), - "z": paddle.randn([N, 1]), - } - label_data = { - "u": paddle.randn([N, 1]), - "v": paddle.randn([N, 1]), - "w": paddle.randn([N, 1]), - } - output_data = model(input_data) - loss = sum( - [ - paddle.nn.functional.mse_loss(output, label) - for output, label in zip(output_data.values(), label_data.values()) - ] - ) - loss.backward() - opt.step() - opt.clear_grad() - avg_model.update() - - avg_model2 = ema.ExponentialMovingAverage(model, decay) - avg_model2.set_state_dict(avg_model.state_dict()) - - for (n1, p1), (n2, p2) in zip( - avg_model.state_dict().items(), - avg_model2.state_dict().items(), - ): - assert n1 == n2, f"{n1} {n2} do not equal." - assert p1.stop_gradient == p2.stop_gradient, f"{n1} {n2} do not equal." - np.testing.assert_array_equal(p1, p2) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import itertools + +import numpy as np +import paddle +import pytest + +import ppsci +from ppsci.utils import ema + + +def test_ema_accumulation(): + model = ppsci.arch.MLP( + ("x", "y", "z"), + ("u", "v", "w"), + 4, + 25, + ) + decay = 0.9 + avg_model = ema.ExponentialMovingAverage(model, decay) + + opt = ppsci.optimizer.Adam()(model) + + model_params_hist = { + k: [ + p.clone().detach(), + ] + for k, p in model.named_parameters() + } + N = 32 + T = 5 + for i in range(T): + input_data = { + "x": paddle.randn([N, 1]), + "y": paddle.randn([N, 1]), + "z": paddle.randn([N, 1]), + } + label_data = { + "u": paddle.randn([N, 1]), + "v": paddle.randn([N, 1]), + "w": paddle.randn([N, 1]), + } + output_data = model(input_data) + loss = sum( + [ + paddle.nn.functional.mse_loss(output, label) + for output, label in zip(output_data.values(), label_data.values()) + ] + ) + loss.backward() + opt.step() + opt.clear_grad() + avg_model.update() + + for k, p in model.named_parameters(): + model_params_hist[k].append(p.clone().detach()) + + for k, plist in model_params_hist.items(): + ema_p = model_params_hist[k][0] + for p in plist[1:]: + ema_p = ema_p * decay + p * (1 - decay) + + np.testing.assert_allclose(ema_p, avg_model.params_shadow[k], 1e-7, 1e-7) + + +def test_ema_apply_restore(): + model = ppsci.arch.MLP( + ("x", "y", "z"), + ("u", "v", "w"), + 4, + 25, + ) + model.linears[-1].weight.stop_gradient = True + model.linears[-2].bias.stop_gradient = True + decay = 0.9 + avg_model = ema.ExponentialMovingAverage(model, decay) + + opt = ppsci.optimizer.Adam()(model) + + N = 32 + T = 5 + for i in range(T): + input_data = { + "x": paddle.randn([N, 1]), + "y": paddle.randn([N, 1]), + "z": paddle.randn([N, 1]), + } + label_data = { + "u": paddle.randn([N, 1]), + "v": paddle.randn([N, 1]), + "w": paddle.randn([N, 1]), + } + output_data = model(input_data) + loss = sum( + [ + paddle.nn.functional.mse_loss(output, label) + for output, label in zip(output_data.values(), label_data.values()) + ] + ) + loss.backward() + opt.step() + opt.clear_grad() + avg_model.update() + + orignal_param = {k: v.clone() for k, v in model.named_parameters()} + + # test if stop_gradient are excluded + assert model.linears[-1].weight.name not in avg_model.params_shadow + assert model.linears[-2].bias.name not in avg_model.params_shadow + + # test if model paramter == backup + avg_model.apply_shadow() + for k in orignal_param: + if not orignal_param[k].stop_gradient: + np.testing.assert_allclose( + avg_model.params_backup[k], orignal_param[k], 1e-7, 1e-7 + ) + assert model.state_dict()[k].stop_gradient == orignal_param[k].stop_gradient + + # test if restored successfully + avg_model.restore() + for k in orignal_param: + np.testing.assert_allclose(model.state_dict()[k], orignal_param[k], 1e-7, 1e-7) + assert model.state_dict()[k].stop_gradient == orignal_param[k].stop_gradient + assert len(avg_model.params_backup) == 0 + + +def test_ema_buffer(): + model = ppsci.arch.MLP( + ("x", "y", "z"), + ("u", "v", "w"), + 4, + 25, + periods={"x": [1.0, True], "y": [2.0, False]}, + ) + model.linears[-1].weight.stop_gradient = True + model.linears[-2].bias.stop_gradient = True + model.register_buffer("buffer_1", paddle.randn([2, 3])) + model.register_buffer("buffer_2", paddle.randn([3, 3])) + model.register_buffer("buffer_3", paddle.randn([4, 3])) + + decay = 0.5 + avg_model = ema.ExponentialMovingAverage(model, decay) + + N = 32 + # update parames of model + opt = ppsci.optimizer.Adam()(model) + input_data = { + "x": paddle.randn([N, 1]), + "y": paddle.randn([N, 1]), + "z": paddle.randn([N, 1]), + } + label_data = { + "u": paddle.randn([N, 1]), + "v": paddle.randn([N, 1]), + "w": paddle.randn([N, 1]), + } + output_data = model(input_data) + loss = sum( + [ + paddle.nn.functional.mse_loss(output, label) + for output, label in zip(output_data.values(), label_data.values()) + ] + ) + loss.backward() + opt.step() + opt.clear_grad() + + model2 = copy.deepcopy(model) + avg_model.apply_shadow() + + opt = ppsci.optimizer.Adam()(model) + output_data = model(input_data) + loss = sum( + [ + paddle.nn.functional.mse_loss(output, label) + for output, label in zip(output_data.values(), label_data.values()) + ] + ) + loss.backward() + opt.step() + opt.clear_grad() + + for (n1, p1), (n2, p2) in zip( + itertools.chain(model.named_parameters(), model.named_buffers()), + itertools.chain(model2.named_parameters(), model2.named_buffers()), + ): + assert n1 == n2, f"{n1} {n2} do not equal." + assert p1.stop_gradient == p2.stop_gradient, f"{n1} {n2} do not equal." + np.testing.assert_array_equal(p1, p2) + + +def test_ema_state_dict(): + model = ppsci.arch.MLP( + ("x", "y", "z"), + ("u", "v", "w"), + 4, + 25, + periods={"x": [1.0, True], "y": [2.0, False]}, + ) + model.linears[-1].weight.stop_gradient = True + model.linears[-2].bias.stop_gradient = True + model.register_buffer("buffer_1", paddle.randn([2, 3])) + model.register_buffer("buffer_2", paddle.randn([3, 3])) + model.register_buffer("buffer_3", paddle.randn([4, 3])) + + decay = 0.5 + avg_model = ema.ExponentialMovingAverage(model, decay) + + N = 32 + # update parames of model + opt = ppsci.optimizer.Adam()(model) + input_data = { + "x": paddle.randn([N, 1]), + "y": paddle.randn([N, 1]), + "z": paddle.randn([N, 1]), + } + label_data = { + "u": paddle.randn([N, 1]), + "v": paddle.randn([N, 1]), + "w": paddle.randn([N, 1]), + } + output_data = model(input_data) + loss = sum( + [ + paddle.nn.functional.mse_loss(output, label) + for output, label in zip(output_data.values(), label_data.values()) + ] + ) + loss.backward() + opt.step() + opt.clear_grad() + avg_model.update() + + avg_model2 = ema.ExponentialMovingAverage(model, decay) + avg_model2.set_state_dict(avg_model.state_dict()) + + for (n1, p1), (n2, p2) in zip( + avg_model.state_dict().items(), + avg_model2.state_dict().items(), + ): + assert n1 == n2, f"{n1} {n2} do not equal." + assert p1.stop_gradient == p2.stop_gradient, f"{n1} {n2} do not equal." + np.testing.assert_array_equal(p1, p2) + + +if __name__ == "__main__": + pytest.main() diff --git a/test/utils/test_symbolic.py b/test/utils/test_symbolic.py index 86468bd43f..d8d604531e 100644 --- a/test/utils/test_symbolic.py +++ b/test/utils/test_symbolic.py @@ -1,153 +1,153 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import paddle -import pytest -import sympy as sp - -import ppsci - - -def test_multi_model_and_sdf(): - """Test for Vibration equation.""" - paddle.seed(2023) - # construct symbolic equation - x, y, z = sp.symbols("x y z") - invars = (x, y, z) - - u = sp.Function("u")(*invars) - v = sp.Function("v")(*invars) - w = sp.Function("w")(*invars) - p = sp.Function("p")(*invars) - - k = sp.Function("k")(u, v, w) - ep = sp.Function("ep")(u, v, p) - - sdf = sp.Function("sdf")(*invars) - sdf__x = sdf.diff(x) - sdf__y = sdf.diff(y) - sdf__z = sdf.diff(z) - - tmp1 = u * sdf + sdf__x * sdf__y - sdf__z - tmp2 = ep * tmp1 * k - out_var = tmp1 + tmp2 - - model1 = ppsci.arch.MLP( - (x.name, y.name, z.name), (u.name, v.name, w.name, p.name), 2, 8 - ) - model2 = ppsci.arch.MLP((u.name, v.name, w.name), (k.name,), 2, 6) - model3 = ppsci.arch.MLP((u.name, v.name, p.name), (ep.name,), 2, 6) - - # translate symbolic equation to paddle function - translated_func = ppsci.lambdify( - out_var, - (model1, model2, model3), - ) - # prepare input dict - geom = ppsci.geometry.Sphere([0, 0, 0], 2) - input_dict = geom.sample_interior( - 100, - compute_sdf_derivatives=True, - ) - input_dict = {k: paddle.to_tensor(v) for k, v in input_dict.items()} - input_dict_copy = {k: v for k, v in input_dict.items()} - # compute out_var using translated function - out_var_tensor = translated_func(input_dict) - - # compute out_var manually below - uvwp = model1(input_dict_copy) - u_eval, v_eval, w_eval, p_eval = ( - uvwp["u"], - uvwp["v"], - uvwp["w"], - uvwp["p"], - ) - k_eval = model2({**input_dict_copy, "u": u_eval, "v": v_eval, "w": w_eval})["k"] - ep_eval = model3({**input_dict_copy, "u": u_eval, "v": v_eval, "p": p_eval})["ep"] - sdf_eval = input_dict_copy["sdf"] - sdf__x_eval = input_dict_copy["sdf__x"] - sdf__y_eval = input_dict_copy["sdf__y"] - sdf__z_eval = input_dict_copy["sdf__z"] - - tmp1_eval = u_eval * sdf_eval + sdf__x_eval * sdf__y_eval - sdf__z_eval - tmp2_eval = ep_eval * tmp1_eval * k_eval - out_var_reference = tmp1_eval + tmp2_eval - - np.testing.assert_allclose( - out_var_tensor.numpy(), out_var_reference.numpy(), 1e-6, 0.0 - ) - - -def test_complicated_symbolic(): - paddle.seed(2023) - x_ten = paddle.randn([32, 1]) - x_ten.stop_gradient = False - y_ten = paddle.randn([32, 1]) - y_ten.stop_gradient = False - z_ten = paddle.randn([32, 1]) - z_ten.stop_gradient = False - - input_data = { - "x": x_ten, - "y": y_ten, - "z": z_ten, - } - x_sp, y_sp, z_sp = ppsci.equation.PDE.create_symbols("x y z") - f = sp.Function("f")(x_sp, y_sp, z_sp) - # g = sp.Function("g")(x_sp, y_sp, z_sp) - model_f = ppsci.arch.MLP((x_sp.name, y_sp.name, z_sp.name), (f.name,), 3, 6) - # model_g = ppsci.arch.MLP((x_sp.name, y_sp.name, z_sp.name), (f.name,), 3, 6) - - for test_id in range(100): - - def random_derivative(state): - ret = f - for k in range(4): - if state & (1 << k): - ret = ret.diff(x_sp) - else: - ret = ret.diff(y_sp) - return ret - - state1 = np.random.randint(0, 1 << 4) - state2 = np.random.randint(0, 1 << 4) - state3 = np.random.randint(0, 1 << 4) - state4 = np.random.randint(0, 1 << 4) - targets = [ - random_derivative(state1), - random_derivative(state2), - random_derivative(state3), - random_derivative(state4), - ] - eqs_fuse = ppsci.lambdify( - targets, - model_f, - fuse_derivative=True, - ) - eqs_expected = ppsci.lambdify( - targets, - model_f, - fuse_derivative=False, - ) - - for i in range(len(targets)): - output_fuse = eqs_fuse[i](input_data) - output_expected = eqs_expected[i](input_data) - np.testing.assert_allclose(output_fuse.numpy(), output_expected.numpy()) - ppsci.autodiff.clear() - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import paddle +import pytest +import sympy as sp + +import ppsci + + +def test_multi_model_and_sdf(): + """Test for Vibration equation.""" + paddle.seed(2023) + # construct symbolic equation + x, y, z = sp.symbols("x y z") + invars = (x, y, z) + + u = sp.Function("u")(*invars) + v = sp.Function("v")(*invars) + w = sp.Function("w")(*invars) + p = sp.Function("p")(*invars) + + k = sp.Function("k")(u, v, w) + ep = sp.Function("ep")(u, v, p) + + sdf = sp.Function("sdf")(*invars) + sdf__x = sdf.diff(x) + sdf__y = sdf.diff(y) + sdf__z = sdf.diff(z) + + tmp1 = u * sdf + sdf__x * sdf__y - sdf__z + tmp2 = ep * tmp1 * k + out_var = tmp1 + tmp2 + + model1 = ppsci.arch.MLP( + (x.name, y.name, z.name), (u.name, v.name, w.name, p.name), 2, 8 + ) + model2 = ppsci.arch.MLP((u.name, v.name, w.name), (k.name,), 2, 6) + model3 = ppsci.arch.MLP((u.name, v.name, p.name), (ep.name,), 2, 6) + + # translate symbolic equation to paddle function + translated_func = ppsci.lambdify( + out_var, + (model1, model2, model3), + ) + # prepare input dict + geom = ppsci.geometry.Sphere([0, 0, 0], 2) + input_dict = geom.sample_interior( + 100, + compute_sdf_derivatives=True, + ) + input_dict = {k: paddle.to_tensor(v) for k, v in input_dict.items()} + input_dict_copy = {k: v for k, v in input_dict.items()} + # compute out_var using translated function + out_var_tensor = translated_func(input_dict) + + # compute out_var manually below + uvwp = model1(input_dict_copy) + u_eval, v_eval, w_eval, p_eval = ( + uvwp["u"], + uvwp["v"], + uvwp["w"], + uvwp["p"], + ) + k_eval = model2({**input_dict_copy, "u": u_eval, "v": v_eval, "w": w_eval})["k"] + ep_eval = model3({**input_dict_copy, "u": u_eval, "v": v_eval, "p": p_eval})["ep"] + sdf_eval = input_dict_copy["sdf"] + sdf__x_eval = input_dict_copy["sdf__x"] + sdf__y_eval = input_dict_copy["sdf__y"] + sdf__z_eval = input_dict_copy["sdf__z"] + + tmp1_eval = u_eval * sdf_eval + sdf__x_eval * sdf__y_eval - sdf__z_eval + tmp2_eval = ep_eval * tmp1_eval * k_eval + out_var_reference = tmp1_eval + tmp2_eval + + np.testing.assert_allclose( + out_var_tensor.numpy(), out_var_reference.numpy(), 1e-6, 0.0 + ) + + +def test_complicated_symbolic(): + paddle.seed(2023) + x_ten = paddle.randn([32, 1]) + x_ten.stop_gradient = False + y_ten = paddle.randn([32, 1]) + y_ten.stop_gradient = False + z_ten = paddle.randn([32, 1]) + z_ten.stop_gradient = False + + input_data = { + "x": x_ten, + "y": y_ten, + "z": z_ten, + } + x_sp, y_sp, z_sp = ppsci.equation.PDE.create_symbols("x y z") + f = sp.Function("f")(x_sp, y_sp, z_sp) + # g = sp.Function("g")(x_sp, y_sp, z_sp) + model_f = ppsci.arch.MLP((x_sp.name, y_sp.name, z_sp.name), (f.name,), 3, 6) + # model_g = ppsci.arch.MLP((x_sp.name, y_sp.name, z_sp.name), (f.name,), 3, 6) + + for test_id in range(100): + + def random_derivative(state): + ret = f + for k in range(4): + if state & (1 << k): + ret = ret.diff(x_sp) + else: + ret = ret.diff(y_sp) + return ret + + state1 = np.random.randint(0, 1 << 4) + state2 = np.random.randint(0, 1 << 4) + state3 = np.random.randint(0, 1 << 4) + state4 = np.random.randint(0, 1 << 4) + targets = [ + random_derivative(state1), + random_derivative(state2), + random_derivative(state3), + random_derivative(state4), + ] + eqs_fuse = ppsci.lambdify( + targets, + model_f, + fuse_derivative=True, + ) + eqs_expected = ppsci.lambdify( + targets, + model_f, + fuse_derivative=False, + ) + + for i in range(len(targets)): + output_fuse = eqs_fuse[i](input_data) + output_expected = eqs_expected[i](input_data) + np.testing.assert_allclose(output_fuse.numpy(), output_expected.numpy()) + ppsci.autodiff.clear() + + +if __name__ == "__main__": + pytest.main() diff --git a/test/utils/test_writer.py b/test/utils/test_writer.py index cce3f69ab8..b89c1e4b2c 100644 --- a/test/utils/test_writer.py +++ b/test/utils/test_writer.py @@ -1,56 +1,56 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import pytest - -from ppsci.utils import reader -from ppsci.utils import writer - - -def test_save_csv_file(): - keys = ["x1", "y1", "z1"] - alias_dict = { - "x": "x1", - "y": "y1", - "z": "z1", - } - data_dict = { - keys[0]: np.random.randint(0, 255, (10, 1)), - keys[1]: np.random.rand(10, 1), - keys[2]: np.random.rand(10, 1), - } - file_path = "test_writer.csv" - writer.save_csv_file( - file_path, - data_dict, - keys, - alias_dict=alias_dict, - use_header=True, - ) - - reload_data_dict = reader.load_csv_file( - file_path, - keys, - alias_dict, - ) - - assert data_dict.keys() == reload_data_dict.keys() - for k in reload_data_dict: - assert reload_data_dict[k].shape == data_dict[k].shape - assert np.allclose(reload_data_dict[k], data_dict[k]) - - -if __name__ == "__main__": - pytest.main() +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import pytest + +from ppsci.utils import reader +from ppsci.utils import writer + + +def test_save_csv_file(): + keys = ["x1", "y1", "z1"] + alias_dict = { + "x": "x1", + "y": "y1", + "z": "z1", + } + data_dict = { + keys[0]: np.random.randint(0, 255, (10, 1)), + keys[1]: np.random.rand(10, 1), + keys[2]: np.random.rand(10, 1), + } + file_path = "test_writer.csv" + writer.save_csv_file( + file_path, + data_dict, + keys, + alias_dict=alias_dict, + use_header=True, + ) + + reload_data_dict = reader.load_csv_file( + file_path, + keys, + alias_dict, + ) + + assert data_dict.keys() == reload_data_dict.keys() + for k in reload_data_dict: + assert reload_data_dict[k].shape == data_dict[k].shape + assert np.allclose(reload_data_dict[k], data_dict[k]) + + +if __name__ == "__main__": + pytest.main() diff --git a/test_tipc/README.MD b/test_tipc/README.MD index 32584e3efc..4e55b8f921 100644 --- a/test_tipc/README.MD +++ b/test_tipc/README.MD @@ -1,82 +1,82 @@ -# PaddleScience TIPC Linux端Benchmark测试文档 - -TIPC功能测试的主程序为benchmark_train.sh,可以测试基于Python的模型训练性能测试功能,包括裁剪、量化、蒸馏。 - -## 1. Docker enviroment (运行环境) - -- docker image: registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.2-cudnn8-gcc82 -- paddle = 0.0.0 -- paddlescience = 0.0.0 -- python = 3.10 - -## 2. 测试结论汇总 - -| 算法名称 | 模型名称 | 精度 | 单机单卡 ips | 单机多卡 ips | 多机多卡 ips | 模型压缩(单机多卡) ips | -| :---- | :---- | :---- | :---- | :---- | :---- | :---- | -| PINNs | cylinder2d_unsteady_Re100.py | FP16
FP32 | -
1264165.641 | -
- | -
- | -
- | -| PINNs | euler_beam.py | FP16
FP32 | -
3667.54854 | -
- | -
- | -
- | - -## 3. 测试流程(以2d非定常圆柱绕流为例子) - -安装PaddlePaddle == 0.0.0 - -Process of running TIPC test (运行benchmark测试步骤) - -``` sh -git clone https://github.com/PaddlePaddle/PaddleScience.git -``` - -``` sh -cd PaddleScience -``` - -对于benchmark使用的数据有以下要求: - -- 训练数据不应过大,过大的数据会导致下载很慢,延长了benchmark的训练时间; -- 训练数据样本数不能过少,至少要保证训练过程稳定,能获取到稳定的ips; - -修改此目录下对应案例的txt参数文件,来生成不同log,注意对于训练epoch要修改训练py文件, - -``` sh -./PaddleScience/test_tipc/configs -``` - -单卡:自动运行打开Profiling - -``` sh -export CUDA_VISIBLE_DEVICES=0 -``` - -*在解析工具下载好之后,运行脚本 - -``` sh -mkdir jx -cd jx/ -wget https://paddle-qa.bj.bcebos.com/benchmark/tools.tar.gz -tar -zxvf tools.tar.gz -export BENCHMARK_ROOT=$PWD/tools -cd .. -``` - -在PaddleScience主目录下运行(以cylinder2d_unsteady_Re100.py为例) - -``` sh -export script_path=./test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt -bash ./test_tipc/prepare.sh $script_path benchmark_train -bash ./test_tipc/benchmark_train.sh $script_path benchmark_train -``` - -benchmark训练得到训练日志后,会自动保存训练日志并解析得到ips等信息,日志解析代码位于: - -- -运行成功会显示 - -``` sh -Run successfully with command -``` - -如果运行成功会生成index文件夹,该目录下面生成json文件[PaddleScience_cylinder2d_unsteady_train_newapi_bs1_fp32_DP_N1C1_speed],其中可以找到对应的ips数值 - -``` log -{"model_branch": "8ffce843ff16046c0303c5dc0cc558fbf8ce2b9d", "model_commit": null, "model_name": "cylinder2d_unsteady_Re100_bs1_fp32_DP", "batch_size": 1, "fp_item": "fp32", "run_mode": "DP", "convergence_value": 0, "convergence_key": "", "ips": 1264165.641, "speed_unit": "images/s", "device_num": "N1C1", "model_run_time": "114", "frame_commit": "e7716f10b2d500857581fce896d96023dd2d9693", "frame_version": "0.0.0"} -``` +# PaddleScience TIPC Linux端Benchmark测试文档 + +TIPC功能测试的主程序为benchmark_train.sh,可以测试基于Python的模型训练性能测试功能,包括裁剪、量化、蒸馏。 + +## 1. Docker enviroment (运行环境) + +- docker image: registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.2-cudnn8-gcc82 +- paddle = 0.0.0 +- paddlescience = 0.0.0 +- python = 3.10 + +## 2. 测试结论汇总 + +| 算法名称 | 模型名称 | 精度 | 单机单卡 ips | 单机多卡 ips | 多机多卡 ips | 模型压缩(单机多卡) ips | +| :---- | :---- | :---- | :---- | :---- | :---- | :---- | +| PINNs | cylinder2d_unsteady_Re100.py | FP16
FP32 | -
1264165.641 | -
- | -
- | -
- | +| PINNs | euler_beam.py | FP16
FP32 | -
3667.54854 | -
- | -
- | -
- | + +## 3. 测试流程(以2d非定常圆柱绕流为例子) + +安装PaddlePaddle == 0.0.0 + +Process of running TIPC test (运行benchmark测试步骤) + +``` sh +git clone https://github.com/PaddlePaddle/PaddleScience.git +``` + +``` sh +cd PaddleScience +``` + +对于benchmark使用的数据有以下要求: + +- 训练数据不应过大,过大的数据会导致下载很慢,延长了benchmark的训练时间; +- 训练数据样本数不能过少,至少要保证训练过程稳定,能获取到稳定的ips; + +修改此目录下对应案例的txt参数文件,来生成不同log,注意对于训练epoch要修改训练py文件, + +``` sh +./PaddleScience/test_tipc/configs +``` + +单卡:自动运行打开Profiling + +``` sh +export CUDA_VISIBLE_DEVICES=0 +``` + +*在解析工具下载好之后,运行脚本 + +``` sh +mkdir jx +cd jx/ +wget https://paddle-qa.bj.bcebos.com/benchmark/tools.tar.gz +tar -zxvf tools.tar.gz +export BENCHMARK_ROOT=$PWD/tools +cd .. +``` + +在PaddleScience主目录下运行(以cylinder2d_unsteady_Re100.py为例) + +``` sh +export script_path=./test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt +bash ./test_tipc/prepare.sh $script_path benchmark_train +bash ./test_tipc/benchmark_train.sh $script_path benchmark_train +``` + +benchmark训练得到训练日志后,会自动保存训练日志并解析得到ips等信息,日志解析代码位于: + +- +运行成功会显示 + +``` sh +Run successfully with command +``` + +如果运行成功会生成index文件夹,该目录下面生成json文件[PaddleScience_cylinder2d_unsteady_train_newapi_bs1_fp32_DP_N1C1_speed],其中可以找到对应的ips数值 + +``` log +{"model_branch": "8ffce843ff16046c0303c5dc0cc558fbf8ce2b9d", "model_commit": null, "model_name": "cylinder2d_unsteady_Re100_bs1_fp32_DP", "batch_size": 1, "fp_item": "fp32", "run_mode": "DP", "convergence_value": 0, "convergence_key": "", "ips": 1264165.641, "speed_unit": "images/s", "device_num": "N1C1", "model_run_time": "114", "frame_commit": "e7716f10b2d500857581fce896d96023dd2d9693", "frame_version": "0.0.0"} +``` diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index 95f2b4ee6a..8ad5159f4e 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -1,84 +1,84 @@ -#!/usr/bin/env bash -export PDSC_DIR=$(cd "$( dirname ${BASH_SOURCE[0]})"; cd ..; pwd) -export TEST_DIR="${PDSC_DIR}" -source ${TEST_DIR}/test_tipc/common_func.sh - -# Read txt in ./test_tipc/configs/ -PREPARE_PARAM_FILE=$1 -dataline=`cat $PREPARE_PARAM_FILE` -lines=(${dataline}) -workdir=$(func_parser_value "${lines[65]}") -export PYTHONPATH=$PYTHONPATH:$(pwd) - -# Test training benchmark for a model. -function _set_params(){ - model_item=$(func_parser_value "${lines[1]}") # (必选) 模型 item |fastscnn|segformer_b0| ocrnet_hrnetw48 - base_batch_size=$(func_parser_value "${lines[57]}") # (必选) 如果是静态图单进程,则表示每张卡上的BS,需在训练时*卡数 - fp_item=$(func_parser_value "${lines[58]}") # (必选) fp32|fp16 - epochs=$(func_parser_value "${lines[59]}") # (必选) Epochs - run_mode=$(func_parser_value "${lines[3]}") # (必选) MP模型并行|DP数据并行|PP流水线并行|混合并行DP1-MP1-PP1|DP1-MP4-PP1 - device_num=$(func_parser_value "${lines[4]}") # (必选) 使用的卡数量,N1C1|N1C8|N4C32 (4机32卡) - - backend="paddle" - model_repo="PaddleScience" # (必选) 模型套件的名字 - speed_unit="samples/sec" # (必选)速度指标单位 - skip_steps=0 # (必选)解析日志,跳过模型前几个性能不稳定的step - keyword="ips:" # (必选)解析日志,筛选出性能数据所在行的关键字 - convergence_key="loss:" # (可选)解析日志,筛选出收敛数据所在行的关键字 如:convergence_key="loss:" - -# 以下为通用执行命令,无特殊可不用修改 - model_name=${model_item}_bs${base_batch_size}_${fp_item}_${run_mode} # (必填) 且格式不要改动,与竞品名称对齐 - device=${CUDA_VISIBLE_DEVICES//,/ } - arr=(${device}) - num_gpu_devices=${#arr[*]} - run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # (必填) TRAIN_LOG_DIR benchmark框架设置该参数为全局变量 - speed_log_path=${LOG_PATH_INDEX_DIR:-$(pwd)} - - train_log_file=${run_log_path}/${model_repo}_${model_name}_${device_num}_log - speed_log_file=${speed_log_path}/${model_repo}_${model_name}_${device_num}_speed - echo run_log_path: ${run_log_path} -} - -function _analysis_log(){ - echo "train_log_file: ${train_log_file}" - echo "speed_log_file: ${speed_log_file}" - cmd="python "${BENCHMARK_ROOT}"/scripts/analysis.py --filename ${train_log_file} \ - --speed_log_file ${speed_log_file} \ - --model_name ${model_name} \ - --base_batch_size ${base_batch_size} \ - --run_mode ${run_mode} \ - --fp_item ${fp_item} \ - --keyword ${keyword} \ - --skip_steps ${skip_steps} \ - --device_num ${device_num} " - echo ${cmd} - eval $cmd -} - -function _train(){ - echo "current CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}, model_name=${model_name}, device_num=${device_num}, is profiling=${profiling}" - cd ${workdir} - train_cmd="python3.10 ${model_item}.py TRAIN.epochs=${epochs}" - echo "train_cmd: ${train_cmd}" - timeout 15m ${train_cmd} > ${train_log_file} 2>&1 - if [ $? -ne 0 ];then - echo -e "${model_name}, FAIL" - else - echo -e "${model_name}, SUCCESS" - fi - cd $(pwd) -} - -_set_params $@ -export frame_version=`python -c "import paddle;print(paddle.__version__)"` -export frame_commit=`python -c "import paddle;print(paddle.__git_commit__)"` -export model_branch=`git rev-parse HEAD` -echo "---------Paddle version = ${frame_version}" -echo "---------Paddle commit = ${frame_commit}" -echo "---------PaddleScience commit = ${model_branch}" - -job_bt=`date '+%Y%m%d%H%M%S'` -_train -job_et=`date '+%Y%m%d%H%M%S'` -export model_run_time=$((${job_et}-${job_bt})) -_analysis_log +#!/usr/bin/env bash +export PDSC_DIR=$(cd "$( dirname ${BASH_SOURCE[0]})"; cd ..; pwd) +export TEST_DIR="${PDSC_DIR}" +source ${TEST_DIR}/test_tipc/common_func.sh + +# Read txt in ./test_tipc/configs/ +PREPARE_PARAM_FILE=$1 +dataline=`cat $PREPARE_PARAM_FILE` +lines=(${dataline}) +workdir=$(func_parser_value "${lines[65]}") +export PYTHONPATH=$PYTHONPATH:$(pwd) + +# Test training benchmark for a model. +function _set_params(){ + model_item=$(func_parser_value "${lines[1]}") # (必选) 模型 item |fastscnn|segformer_b0| ocrnet_hrnetw48 + base_batch_size=$(func_parser_value "${lines[57]}") # (必选) 如果是静态图单进程,则表示每张卡上的BS,需在训练时*卡数 + fp_item=$(func_parser_value "${lines[58]}") # (必选) fp32|fp16 + epochs=$(func_parser_value "${lines[59]}") # (必选) Epochs + run_mode=$(func_parser_value "${lines[3]}") # (必选) MP模型并行|DP数据并行|PP流水线并行|混合并行DP1-MP1-PP1|DP1-MP4-PP1 + device_num=$(func_parser_value "${lines[4]}") # (必选) 使用的卡数量,N1C1|N1C8|N4C32 (4机32卡) + + backend="paddle" + model_repo="PaddleScience" # (必选) 模型套件的名字 + speed_unit="samples/sec" # (必选)速度指标单位 + skip_steps=0 # (必选)解析日志,跳过模型前几个性能不稳定的step + keyword="ips:" # (必选)解析日志,筛选出性能数据所在行的关键字 + convergence_key="loss:" # (可选)解析日志,筛选出收敛数据所在行的关键字 如:convergence_key="loss:" + +# 以下为通用执行命令,无特殊可不用修改 + model_name=${model_item}_bs${base_batch_size}_${fp_item}_${run_mode} # (必填) 且格式不要改动,与竞品名称对齐 + device=${CUDA_VISIBLE_DEVICES//,/ } + arr=(${device}) + num_gpu_devices=${#arr[*]} + run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # (必填) TRAIN_LOG_DIR benchmark框架设置该参数为全局变量 + speed_log_path=${LOG_PATH_INDEX_DIR:-$(pwd)} + + train_log_file=${run_log_path}/${model_repo}_${model_name}_${device_num}_log + speed_log_file=${speed_log_path}/${model_repo}_${model_name}_${device_num}_speed + echo run_log_path: ${run_log_path} +} + +function _analysis_log(){ + echo "train_log_file: ${train_log_file}" + echo "speed_log_file: ${speed_log_file}" + cmd="python "${BENCHMARK_ROOT}"/scripts/analysis.py --filename ${train_log_file} \ + --speed_log_file ${speed_log_file} \ + --model_name ${model_name} \ + --base_batch_size ${base_batch_size} \ + --run_mode ${run_mode} \ + --fp_item ${fp_item} \ + --keyword ${keyword} \ + --skip_steps ${skip_steps} \ + --device_num ${device_num} " + echo ${cmd} + eval $cmd +} + +function _train(){ + echo "current CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}, model_name=${model_name}, device_num=${device_num}, is profiling=${profiling}" + cd ${workdir} + train_cmd="python3.10 ${model_item}.py TRAIN.epochs=${epochs}" + echo "train_cmd: ${train_cmd}" + timeout 15m ${train_cmd} > ${train_log_file} 2>&1 + if [ $? -ne 0 ];then + echo -e "${model_name}, FAIL" + else + echo -e "${model_name}, SUCCESS" + fi + cd $(pwd) +} + +_set_params $@ +export frame_version=`python -c "import paddle;print(paddle.__version__)"` +export frame_commit=`python -c "import paddle;print(paddle.__git_commit__)"` +export model_branch=`git rev-parse HEAD` +echo "---------Paddle version = ${frame_version}" +echo "---------Paddle commit = ${frame_commit}" +echo "---------PaddleScience commit = ${model_branch}" + +job_bt=`date '+%Y%m%d%H%M%S'` +_train +job_et=`date '+%Y%m%d%H%M%S'` +export model_run_time=$((${job_et}-${job_bt})) +_analysis_log diff --git a/test_tipc/common_func.sh b/test_tipc/common_func.sh index 710fdeddb4..b7df5be9eb 100644 --- a/test_tipc/common_func.sh +++ b/test_tipc/common_func.sh @@ -1,79 +1,79 @@ -#!/bin/bash - -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -function func_parser_key(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[0]} - echo ${tmp} -} - -function func_parser_value(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[1]} - echo ${tmp} -} - -function func_set_params(){ - key=$1 - value=$2 - if [ ${key}x = "null"x ];then - echo " " - elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then - echo " " - else - echo "${key}=${value}" - fi -} - -function func_parser_params(){ - strs=$1 - MODE=$2 - IFS=":" - array=(${strs}) - key=${array[0]} - tmp=${array[1]} - IFS="|" - res="" - for _params in ${tmp[*]}; do - IFS="=" - array=(${_params}) - mode=${array[0]} - value=${array[1]} - if [[ ${mode} = ${MODE} ]]; then - IFS="|" - #echo $(func_set_params "${mode}" "${value}") - echo $value - break - fi - IFS="|" - done - echo ${res} -} - -function status_check(){ - last_status=$1 # the exit code - run_command=$2 - run_log=$3 - if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} - else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} - fi -} +#!/bin/bash + +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} + +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} + +function func_set_params(){ + key=$1 + value=$2 + if [ ${key}x = "null"x ];then + echo " " + elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then + echo " " + else + echo "${key}=${value}" + fi +} + +function func_parser_params(){ + strs=$1 + MODE=$2 + IFS=":" + array=(${strs}) + key=${array[0]} + tmp=${array[1]} + IFS="|" + res="" + for _params in ${tmp[*]}; do + IFS="=" + array=(${_params}) + mode=${array[0]} + value=${array[1]} + if [[ ${mode} = ${MODE} ]]; then + IFS="|" + #echo $(func_set_params "${mode}" "${value}") + echo $value + break + fi + IFS="|" + done + echo ${res} +} + +function status_check(){ + last_status=$1 # the exit code + run_command=$2 + run_log=$3 + if [ $last_status -eq 0 ]; then + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + else + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + fi +} diff --git a/test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt b/test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt index 955aeb44a5..38de14bfad 100644 --- a/test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt +++ b/test_tipc/configs/train_2d_unsteady_continuous/2d_unsteady_continuous_train_infer_python.txt @@ -1,63 +1,63 @@ -===========================train_params=========================== -model_name:cylinder2d_unsteady_Re100 -bs_item:1 -run_mode:DP -device_num:N1C1 -gpu_list:0 -Global.use_gpu:null -null:null -TRAIN.epochs -null:null -null:null -Global.pretrained_model:null -train_model_name:latest -train_infer_img_dir:null ---profiler_options:null -## -trainer:norm_train -norm_train:/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.checkpoints: -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:null -infer_export:tools/export_model.py -c null -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True ---enable_mkldnn:null ---cpu_threads:null ---rec_batch_num:1 ---use_tensorrt:null ---precision:null ---det_model_dir: ---image_dir:null -null:null ---benchmark:True -null:null -===========================train_benchmark_params========================== -batch_size:1 -fp_items:fp32 -epoch:200 -null:null;state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=0 -===========================prepare_params========================== -download_dataset:/examples/cylinder/2d_unsteady/download_dataset.py -pip:pip -workdir:examples/cylinder/2d_unsteady +===========================train_params=========================== +model_name:cylinder2d_unsteady_Re100 +bs_item:1 +run_mode:DP +device_num:N1C1 +gpu_list:0 +Global.use_gpu:null +null:null +TRAIN.epochs +null:null +null:null +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:null +--profiler_options:null +## +trainer:norm_train +norm_train:/examples/cylinder/2d_unsteady/cylinder2d_unsteady_Re100.py +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:null +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:null +infer_export:tools/export_model.py -c null -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True +--enable_mkldnn:null +--cpu_threads:null +--rec_batch_num:1 +--use_tensorrt:null +--precision:null +--det_model_dir: +--image_dir:null +null:null +--benchmark:True +null:null +===========================train_benchmark_params========================== +batch_size:1 +fp_items:fp32 +epoch:200 +null:null;state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=0 +===========================prepare_params========================== +download_dataset:/examples/cylinder/2d_unsteady/download_dataset.py +pip:pip +workdir:examples/cylinder/2d_unsteady diff --git a/test_tipc/configs/train_eular_beam/eular_beam_train_infer_python.txt b/test_tipc/configs/train_eular_beam/eular_beam_train_infer_python.txt index 3affc54dae..69cfc97220 100644 --- a/test_tipc/configs/train_eular_beam/eular_beam_train_infer_python.txt +++ b/test_tipc/configs/train_eular_beam/eular_beam_train_infer_python.txt @@ -1,63 +1,63 @@ -===========================train_params=========================== -model_name:euler_beam -bs_item:1 -run_mode:DP -device_num:N1C1 -python:python3.10 -gpu_list:0 -Global.use_gpu:null -null:null -TRAIN.epochs -null:null -null:null -null:null -train_model_name:latest -train_infer_img_dir:null -null:null -## -trainer:norm_train -norm_train:/examples/euler_beam/euler_beam.py -pact_train:null -fpgm_train:null -distill_train:null -null:null -null:null -## -===========================eval_params=========================== -eval:null -null:null -## -===========================infer_params=========================== -Global.save_inference_dir:./output/ -Global.checkpoints: -norm_export:null -quant_export:null -fpgm_export:null -distill_export:null -export1:null -export2:null -inference_dir:null -train_model:null -infer_export:tools/export_model.py -c null -o -infer_quant:False -inference:tools/infer/predict_det.py ---use_gpu:True ---enable_mkldnn:null ---cpu_threads:null ---rec_batch_num:1 ---use_tensorrt:null ---precision:fp32 ---det_model_dir: ---image_dir:null -null:null ---benchmark:True -===========================train_benchmark_params========================== -batch_size:1 -fp_items:fp32 -epoch:10000 ---profiler_options:batch_range=[1,1];state=GPU;tracer_option=Default;profile_path=model.profile -flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=0 -===========================prepare_params========================== -download_dataset: -pip:pip -workdir:examples/euler_beam +===========================train_params=========================== +model_name:euler_beam +bs_item:1 +run_mode:DP +device_num:N1C1 +python:python3.10 +gpu_list:0 +Global.use_gpu:null +null:null +TRAIN.epochs +null:null +null:null +null:null +train_model_name:latest +train_infer_img_dir:null +null:null +## +trainer:norm_train +norm_train:/examples/euler_beam/euler_beam.py +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:null +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.checkpoints: +norm_export:null +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +inference_dir:null +train_model:null +infer_export:tools/export_model.py -c null -o +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True +--enable_mkldnn:null +--cpu_threads:null +--rec_batch_num:1 +--use_tensorrt:null +--precision:fp32 +--det_model_dir: +--image_dir:null +null:null +--benchmark:True +===========================train_benchmark_params========================== +batch_size:1 +fp_items:fp32 +epoch:10000 +--profiler_options:batch_range=[1,1];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=0 +===========================prepare_params========================== +download_dataset: +pip:pip +workdir:examples/euler_beam diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 80a5c20e20..9847b1c49d 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -1,28 +1,28 @@ -# Set directories -export PDSC_DIR=$(cd "$( dirname ${BASH_SOURCE[0]})"; cd ..; pwd) -export TEST_DIR="${PDSC_DIR}" -export TIPC_TEST="ON" # open tipc log in solver.py -export PYTHONPATH=${PDSC_DIR} - -BENCHMARK_ROOT="${TEST_DIR}/test_tipc/tools" -echo -e "\n* [TEST_DIR] is now set : \n" ${TEST_DIR} "\n" -echo -e "\n* [BENCHMARK_ROOT] is now set : \n" ${BENCHMARK_ROOT} "\n" -echo -e "\n* [PYTHONPATH] is now set : \n" ${PYTHONPATH} "\n" - -source ${TEST_DIR}/test_tipc/common_func.sh - -# Read parameters from [/tipc/config/*/*.txt] -PREPARE_PARAM_FILE=$1 -dataline=`cat $PREPARE_PARAM_FILE` -lines=(${dataline}) -download_dataset=$(func_parser_value "${lines[63]}") -export pip=$(func_parser_value "${lines[64]}") -workdir=$(func_parser_value "${lines[65]}") -${pip} install --upgrade pip -${pip} install pybind11 -${pip} install -r requirements.txt - -if [ ${download_dataset} ] ; then - cd ${PDSC_DIR}/${workdir} - python3.10 ${PDSC_DIR}${download_dataset} -fi +# Set directories +export PDSC_DIR=$(cd "$( dirname ${BASH_SOURCE[0]})"; cd ..; pwd) +export TEST_DIR="${PDSC_DIR}" +export TIPC_TEST="ON" # open tipc log in solver.py +export PYTHONPATH=${PDSC_DIR} + +BENCHMARK_ROOT="${TEST_DIR}/test_tipc/tools" +echo -e "\n* [TEST_DIR] is now set : \n" ${TEST_DIR} "\n" +echo -e "\n* [BENCHMARK_ROOT] is now set : \n" ${BENCHMARK_ROOT} "\n" +echo -e "\n* [PYTHONPATH] is now set : \n" ${PYTHONPATH} "\n" + +source ${TEST_DIR}/test_tipc/common_func.sh + +# Read parameters from [/tipc/config/*/*.txt] +PREPARE_PARAM_FILE=$1 +dataline=`cat $PREPARE_PARAM_FILE` +lines=(${dataline}) +download_dataset=$(func_parser_value "${lines[63]}") +export pip=$(func_parser_value "${lines[64]}") +workdir=$(func_parser_value "${lines[65]}") +${pip} install --upgrade pip +${pip} install pybind11 +${pip} install -r requirements.txt + +if [ ${download_dataset} ] ; then + cd ${PDSC_DIR}/${workdir} + python3.10 ${PDSC_DIR}${download_dataset} +fi diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 7b44319620..345f5bfe14 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -1,393 +1,393 @@ -#!/bin/bash - -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source ${TEST_DIR}/test_tipc/common_func.sh - -FILENAME=$1 -# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer'] -MODE=$2 - -dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) - -# parser params -IFS=$'\n' -lines=(${dataline}) - -# The training params -model_name=$(func_parser_value "${lines[1]}") -python=$(func_parser_value "${lines[2]}") -gpu_list=$(func_parser_value "${lines[3]}") -train_use_gpu_key=$(func_parser_key "${lines[4]}") -train_use_gpu_value=$(func_parser_value "${lines[4]}") -autocast_list=$(func_parser_value "${lines[5]}") -autocast_key=$(func_parser_key "${lines[5]}") -epoch_key=$(func_parser_key "${lines[6]}") -epoch_num=$(func_parser_params "${lines[6]}" "${MODE}") -save_model_key=$(func_parser_key "${lines[7]}") -train_batch_key=$(func_parser_key "${lines[8]}") -train_batch_value=$(func_parser_params "${lines[8]}" "${MODE}") -pretrain_model_key=$(func_parser_key "${lines[9]}") -pretrain_model_value=$(func_parser_value "${lines[9]}") -train_model_name=$(func_parser_value "${lines[10]}") -train_infer_img_dir=$(func_parser_value "${lines[11]}") -train_param_key1=$(func_parser_key "${lines[12]}") -train_param_value1=$(func_parser_value "${lines[12]}") - -trainer_list=$(func_parser_value "${lines[14]}") -trainer_norm=$(func_parser_key "${lines[15]}") -norm_trainer=$(func_parser_value "${lines[15]}") -pact_key=$(func_parser_key "${lines[16]}") -pact_trainer=$(func_parser_value "${lines[16]}") -fpgm_key=$(func_parser_key "${lines[17]}") -fpgm_trainer=$(func_parser_value "${lines[17]}") -distill_key=$(func_parser_key "${lines[18]}") -distill_trainer=$(func_parser_value "${lines[18]}") -trainer_key1=$(func_parser_key "${lines[19]}") -trainer_value1=$(func_parser_value "${lines[19]}") -trainer_key2=$(func_parser_key "${lines[20]}") -trainer_value2=$(func_parser_value "${lines[20]}") - -eval_py=$(func_parser_value "${lines[23]}") -eval_key1=$(func_parser_key "${lines[24]}") -eval_value1=$(func_parser_value "${lines[24]}") - -save_infer_key=$(func_parser_key "${lines[27]}") -export_weight=$(func_parser_key "${lines[28]}") -norm_export=$(func_parser_value "${lines[29]}") -pact_export=$(func_parser_value "${lines[30]}") -fpgm_export=$(func_parser_value "${lines[31]}") -distill_export=$(func_parser_value "${lines[32]}") -export_key1=$(func_parser_key "${lines[33]}") -export_value1=$(func_parser_value "${lines[33]}") -export_key2=$(func_parser_key "${lines[34]}") -export_value2=$(func_parser_value "${lines[34]}") -inference_dir=$(func_parser_value "${lines[35]}") - -# parser inference model -infer_model_dir_list=$(func_parser_value "${lines[36]}") -infer_export_list=$(func_parser_value "${lines[37]}") -infer_is_quant=$(func_parser_value "${lines[38]}") -# parser inference -inference_py=$(func_parser_value "${lines[39]}") -use_gpu_key=$(func_parser_key "${lines[40]}") -use_gpu_list=$(func_parser_value "${lines[40]}") -use_mkldnn_key=$(func_parser_key "${lines[41]}") -use_mkldnn_list=$(func_parser_value "${lines[41]}") -cpu_threads_key=$(func_parser_key "${lines[42]}") -cpu_threads_list=$(func_parser_value "${lines[42]}") -batch_size_key=$(func_parser_key "${lines[43]}") -batch_size_list=$(func_parser_value "${lines[43]}") -use_trt_key=$(func_parser_key "${lines[44]}") -use_trt_list=$(func_parser_value "${lines[44]}") -precision_key=$(func_parser_key "${lines[45]}") -precision_list=$(func_parser_value "${lines[45]}") -infer_model_key=$(func_parser_key "${lines[46]}") -image_dir_key=$(func_parser_key "${lines[47]}") -infer_img_dir=$(func_parser_value "${lines[47]}") -save_log_key=$(func_parser_key "${lines[48]}") -benchmark_key=$(func_parser_key "${lines[49]}") -benchmark_value=$(func_parser_value "${lines[49]}") -infer_key1=$(func_parser_key "${lines[50]}") -infer_value1=$(func_parser_value "${lines[50]}") - -# parser klquant_infer -if [ ${MODE} = "klquant_whole_infer" ]; then - dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) - lines=(${dataline}) - model_name=$(func_parser_value "${lines[1]}") - python=$(func_parser_value "${lines[2]}") - export_weight=$(func_parser_key "${lines[3]}") - save_infer_key=$(func_parser_key "${lines[4]}") - # parser inference model - infer_model_dir_list=$(func_parser_value "${lines[5]}") - infer_export_list=$(func_parser_value "${lines[6]}") - infer_is_quant=$(func_parser_value "${lines[7]}") - # parser inference - inference_py=$(func_parser_value "${lines[8]}") - use_gpu_key=$(func_parser_key "${lines[9]}") - use_gpu_list=$(func_parser_value "${lines[9]}") - use_mkldnn_key=$(func_parser_key "${lines[10]}") - use_mkldnn_list=$(func_parser_value "${lines[10]}") - cpu_threads_key=$(func_parser_key "${lines[11]}") - cpu_threads_list=$(func_parser_value "${lines[11]}") - batch_size_key=$(func_parser_key "${lines[12]}") - batch_size_list=$(func_parser_value "${lines[12]}") - use_trt_key=$(func_parser_key "${lines[13]}") - use_trt_list=$(func_parser_value "${lines[13]}") - precision_key=$(func_parser_key "${lines[14]}") - precision_list=$(func_parser_value "${lines[14]}") - infer_model_key=$(func_parser_key "${lines[15]}") - image_dir_key=$(func_parser_key "${lines[16]}") - infer_img_dir=$(func_parser_value "${lines[16]}") - save_log_key=$(func_parser_key "${lines[17]}") - save_log_value=$(func_parser_value "${lines[17]}") - benchmark_key=$(func_parser_key "${lines[18]}") - benchmark_value=$(func_parser_value "${lines[18]}") - infer_key1=$(func_parser_key "${lines[19]}") - infer_value1=$(func_parser_value "${lines[19]}") -fi - -LOG_PATH="./test_tipc/output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results_python.log" - - -function func_inference(){ - IFS='|' - _python=$1 - _script=$2 - _model_dir=$3 - _log_path=$4 - _img_dir=$5 - _flag_quant=$6 - # inference - for use_gpu in ${use_gpu_list[*]}; do - if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then - continue - fi - for threads in ${cpu_threads_list[*]}; do - for batch_size in ${batch_size_list[*]}; do - for precision in ${precision_list[*]}; do - if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then - continue - fi # skip when enable fp16 but disable mkldnn - if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then - continue - fi # skip when quant model inference but precision is not int8 - set_precision=$(func_set_params "${precision_key}" "${precision}") - - _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" - set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") - set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") - set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") - set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") - set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") - set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") - set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " - eval $command - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" - done - done - done - done - elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then - for use_trt in ${use_trt_list[*]}; do - for precision in ${precision_list[*]}; do - if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then - continue - fi - if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then - continue - fi - if [[ ${use_trt} = "False" && ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then - continue - fi - for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" - set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") - set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") - set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") - set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") - set_precision=$(func_set_params "${precision_key}" "${precision}") - set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") - set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") - set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " - eval $command - last_status=${PIPESTATUS[0]} - eval "cat ${_save_log_path}" - status_check $last_status "${command}" "${status_log}" - - done - done - done - else - echo "Does not support hardware other than CPU and GPU Currently!" - fi - done -} - -if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - IFS="|" - infer_run_exports=(${infer_export_list}) - infer_quant_flag=(${infer_is_quant}) - for infer_model in ${infer_model_dir_list[*]}; do - # run export - if [ ${infer_run_exports[Count]} != "null" ];then - if [ ${MODE} = "klquant_whole_infer" ]; then - save_infer_dir="${infer_model}_klquant" - fi - if [ ${MODE} = "whole_infer" ]; then - save_infer_dir="${infer_model}" - fi - set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") - set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") - export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" - echo ${infer_run_exports[Count]} - echo $export_cmd - eval $export_cmd - status_export=$? - status_check $status_export "${export_cmd}" "${status_log}" - else - save_infer_dir=${infer_model} - fi - #run inference - is_quant=${infer_quant_flag[Count]} - if [ ${MODE} = "klquant_whole_infer" ]; then - is_quant="True" - fi - func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done -else - IFS="|" - export Count=0 - USE_GPU_KEY=(${train_use_gpu_value}) - for gpu in ${gpu_list[*]}; do - train_use_gpu=${USE_GPU_KEY[Count]} - Count=$(($Count + 1)) - ips="" - if [ ${gpu} = "-1" ];then - env="" - elif [ ${#gpu} -le 1 ];then - env="export CUDA_VISIBLE_DEVICES=${gpu}" - elif [ ${#gpu} -le 15 ];then - IFS="," - array=(${gpu}) - env="export CUDA_VISIBLE_DEVICES=${array[0]}" - IFS="|" - else - IFS=";" - array=(${gpu}) - ips=${array[0]} - gpu=${array[1]} - IFS="|" - env=" " - fi - for autocast in ${autocast_list[*]}; do - if [ ${autocast} = "amp" ]; then - set_amp_config="Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True" - else - set_amp_config=" " - fi - for trainer in ${trainer_list[*]}; do - flag_quant=False - if [ ${trainer} = ${pact_key} ]; then - run_train=${pact_trainer} - run_export=${pact_export} - flag_quant=True - elif [ ${trainer} = "${fpgm_key}" ]; then - run_train=${fpgm_trainer} - run_export=${fpgm_export} - elif [ ${trainer} = "${distill_key}" ]; then - run_train=${distill_trainer} - run_export=${distill_export} - elif [ ${trainer} = ${trainer_key1} ]; then - run_train=${trainer_value1} - run_export=${export_value1} - elif [[ ${trainer} = ${trainer_key2} ]]; then - run_train=${trainer_value2} - run_export=${export_value2} - else - run_train=${PDSC_DIR}${norm_trainer} - echo -e '\n' ${run_train} '\n' - run_export=${norm_export} - fi - - if [ ${run_train} = "null" ]; then - continue - fi - set_autocast=$(func_set_params "${autocast_key}" "${autocast}") - set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") - set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") - set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") - set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") - set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}") - if [ ${#ips} -le 26 ];then - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" - nodes=1 - else - IFS="," - ips_array=(${ips}) - IFS="|" - nodes=${#ips_array[@]} - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" - fi - - - set_save_model=$(func_set_params "${save_model_key}" "${save_log}") - if [ ${#gpu} -le 2 ];then # train with cpu or single gpu - cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} " - elif [ ${#ips} -le 26 ];then # train with multi-gpu - cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" - else # train with multi-machine - cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" - fi - # run train - eval $cmd - status_check $? "${cmd}" "${status_log}" - - set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") - - # run eval - if [ ${eval_py} != "null" ]; then - eval ${env} - set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") - eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" - eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" - fi - # run export model - if [ ${run_export} != "null" ]; then - # run export model - save_infer_path="${save_log}" - set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") - set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") - export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" - eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" - - #run inference - eval $env - save_infer_path="${save_log}" - if [[ ${inference_dir} != "null" ]] && [[ ${inference_dir} != '##' ]]; then - infer_model_dir="${save_infer_path}/${inference_dir}" - else - infer_model_dir=${save_infer_path} - fi - func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" - - eval "unset CUDA_VISIBLE_DEVICES" - fi - done # done with: for trainer in ${trainer_list[*]}; do - done # done with: for autocast in ${autocast_list[*]}; do - done # done with: for gpu in ${gpu_list[*]}; do -fi # end if [ ${MODE} = "infer" ]; then +#!/bin/bash + +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ${TEST_DIR}/test_tipc/common_func.sh + +FILENAME=$1 +# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer' 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer'] +MODE=$2 + +dataline=$(awk 'NR==1, NR==51{print}' $FILENAME) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +# The training params +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +gpu_list=$(func_parser_value "${lines[3]}") +train_use_gpu_key=$(func_parser_key "${lines[4]}") +train_use_gpu_value=$(func_parser_value "${lines[4]}") +autocast_list=$(func_parser_value "${lines[5]}") +autocast_key=$(func_parser_key "${lines[5]}") +epoch_key=$(func_parser_key "${lines[6]}") +epoch_num=$(func_parser_params "${lines[6]}" "${MODE}") +save_model_key=$(func_parser_key "${lines[7]}") +train_batch_key=$(func_parser_key "${lines[8]}") +train_batch_value=$(func_parser_params "${lines[8]}" "${MODE}") +pretrain_model_key=$(func_parser_key "${lines[9]}") +pretrain_model_value=$(func_parser_value "${lines[9]}") +train_model_name=$(func_parser_value "${lines[10]}") +train_infer_img_dir=$(func_parser_value "${lines[11]}") +train_param_key1=$(func_parser_key "${lines[12]}") +train_param_value1=$(func_parser_value "${lines[12]}") + +trainer_list=$(func_parser_value "${lines[14]}") +trainer_norm=$(func_parser_key "${lines[15]}") +norm_trainer=$(func_parser_value "${lines[15]}") +pact_key=$(func_parser_key "${lines[16]}") +pact_trainer=$(func_parser_value "${lines[16]}") +fpgm_key=$(func_parser_key "${lines[17]}") +fpgm_trainer=$(func_parser_value "${lines[17]}") +distill_key=$(func_parser_key "${lines[18]}") +distill_trainer=$(func_parser_value "${lines[18]}") +trainer_key1=$(func_parser_key "${lines[19]}") +trainer_value1=$(func_parser_value "${lines[19]}") +trainer_key2=$(func_parser_key "${lines[20]}") +trainer_value2=$(func_parser_value "${lines[20]}") + +eval_py=$(func_parser_value "${lines[23]}") +eval_key1=$(func_parser_key "${lines[24]}") +eval_value1=$(func_parser_value "${lines[24]}") + +save_infer_key=$(func_parser_key "${lines[27]}") +export_weight=$(func_parser_key "${lines[28]}") +norm_export=$(func_parser_value "${lines[29]}") +pact_export=$(func_parser_value "${lines[30]}") +fpgm_export=$(func_parser_value "${lines[31]}") +distill_export=$(func_parser_value "${lines[32]}") +export_key1=$(func_parser_key "${lines[33]}") +export_value1=$(func_parser_value "${lines[33]}") +export_key2=$(func_parser_key "${lines[34]}") +export_value2=$(func_parser_value "${lines[34]}") +inference_dir=$(func_parser_value "${lines[35]}") + +# parser inference model +infer_model_dir_list=$(func_parser_value "${lines[36]}") +infer_export_list=$(func_parser_value "${lines[37]}") +infer_is_quant=$(func_parser_value "${lines[38]}") +# parser inference +inference_py=$(func_parser_value "${lines[39]}") +use_gpu_key=$(func_parser_key "${lines[40]}") +use_gpu_list=$(func_parser_value "${lines[40]}") +use_mkldnn_key=$(func_parser_key "${lines[41]}") +use_mkldnn_list=$(func_parser_value "${lines[41]}") +cpu_threads_key=$(func_parser_key "${lines[42]}") +cpu_threads_list=$(func_parser_value "${lines[42]}") +batch_size_key=$(func_parser_key "${lines[43]}") +batch_size_list=$(func_parser_value "${lines[43]}") +use_trt_key=$(func_parser_key "${lines[44]}") +use_trt_list=$(func_parser_value "${lines[44]}") +precision_key=$(func_parser_key "${lines[45]}") +precision_list=$(func_parser_value "${lines[45]}") +infer_model_key=$(func_parser_key "${lines[46]}") +image_dir_key=$(func_parser_key "${lines[47]}") +infer_img_dir=$(func_parser_value "${lines[47]}") +save_log_key=$(func_parser_key "${lines[48]}") +benchmark_key=$(func_parser_key "${lines[49]}") +benchmark_value=$(func_parser_value "${lines[49]}") +infer_key1=$(func_parser_key "${lines[50]}") +infer_value1=$(func_parser_value "${lines[50]}") + +# parser klquant_infer +if [ ${MODE} = "klquant_whole_infer" ]; then + dataline=$(awk 'NR==1, NR==17{print}' $FILENAME) + lines=(${dataline}) + model_name=$(func_parser_value "${lines[1]}") + python=$(func_parser_value "${lines[2]}") + export_weight=$(func_parser_key "${lines[3]}") + save_infer_key=$(func_parser_key "${lines[4]}") + # parser inference model + infer_model_dir_list=$(func_parser_value "${lines[5]}") + infer_export_list=$(func_parser_value "${lines[6]}") + infer_is_quant=$(func_parser_value "${lines[7]}") + # parser inference + inference_py=$(func_parser_value "${lines[8]}") + use_gpu_key=$(func_parser_key "${lines[9]}") + use_gpu_list=$(func_parser_value "${lines[9]}") + use_mkldnn_key=$(func_parser_key "${lines[10]}") + use_mkldnn_list=$(func_parser_value "${lines[10]}") + cpu_threads_key=$(func_parser_key "${lines[11]}") + cpu_threads_list=$(func_parser_value "${lines[11]}") + batch_size_key=$(func_parser_key "${lines[12]}") + batch_size_list=$(func_parser_value "${lines[12]}") + use_trt_key=$(func_parser_key "${lines[13]}") + use_trt_list=$(func_parser_value "${lines[13]}") + precision_key=$(func_parser_key "${lines[14]}") + precision_list=$(func_parser_value "${lines[14]}") + infer_model_key=$(func_parser_key "${lines[15]}") + image_dir_key=$(func_parser_key "${lines[16]}") + infer_img_dir=$(func_parser_value "${lines[16]}") + save_log_key=$(func_parser_key "${lines[17]}") + save_log_value=$(func_parser_value "${lines[17]}") + benchmark_key=$(func_parser_key "${lines[18]}") + benchmark_value=$(func_parser_value "${lines[18]}") + infer_key1=$(func_parser_key "${lines[19]}") + infer_value1=$(func_parser_value "${lines[19]}") +fi + +LOG_PATH="./test_tipc/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results_python.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then + continue + fi # skip when enable fp16 but disable mkldnn + if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then + continue + fi # skip when quant model inference but precision is not int8 + set_precision=$(func_set_params "${precision_key}" "${precision}") + + _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" && ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + if [ ${MODE} = "klquant_whole_infer" ]; then + save_infer_dir="${infer_model}_klquant" + fi + if [ ${MODE} = "whole_infer" ]; then + save_infer_dir="${infer_model}" + fi + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}" + echo ${infer_run_exports[Count]} + echo $export_cmd + eval $export_cmd + status_export=$? + status_check $status_export "${export_cmd}" "${status_log}" + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + if [ ${MODE} = "klquant_whole_infer" ]; then + is_quant="True" + fi + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done +else + IFS="|" + export Count=0 + USE_GPU_KEY=(${train_use_gpu_value}) + for gpu in ${gpu_list[*]}; do + train_use_gpu=${USE_GPU_KEY[Count]} + Count=$(($Count + 1)) + ips="" + if [ ${gpu} = "-1" ];then + env="" + elif [ ${#gpu} -le 1 ];then + env="export CUDA_VISIBLE_DEVICES=${gpu}" + elif [ ${#gpu} -le 15 ];then + IFS="," + array=(${gpu}) + env="export CUDA_VISIBLE_DEVICES=${array[0]}" + IFS="|" + else + IFS=";" + array=(${gpu}) + ips=${array[0]} + gpu=${array[1]} + IFS="|" + env=" " + fi + for autocast in ${autocast_list[*]}; do + if [ ${autocast} = "amp" ]; then + set_amp_config="Global.use_amp=True Global.scale_loss=1024.0 Global.use_dynamic_loss_scaling=True" + else + set_amp_config=" " + fi + for trainer in ${trainer_list[*]}; do + flag_quant=False + if [ ${trainer} = ${pact_key} ]; then + run_train=${pact_trainer} + run_export=${pact_export} + flag_quant=True + elif [ ${trainer} = "${fpgm_key}" ]; then + run_train=${fpgm_trainer} + run_export=${fpgm_export} + elif [ ${trainer} = "${distill_key}" ]; then + run_train=${distill_trainer} + run_export=${distill_export} + elif [ ${trainer} = ${trainer_key1} ]; then + run_train=${trainer_value1} + run_export=${export_value1} + elif [[ ${trainer} = ${trainer_key2} ]]; then + run_train=${trainer_value2} + run_export=${export_value2} + else + run_train=${PDSC_DIR}${norm_trainer} + echo -e '\n' ${run_train} '\n' + run_export=${norm_export} + fi + + if [ ${run_train} = "null" ]; then + continue + fi + set_autocast=$(func_set_params "${autocast_key}" "${autocast}") + set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") + set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") + set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") + set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") + set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu}") + if [ ${#ips} -le 26 ];then + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + nodes=1 + else + IFS="," + ips_array=(${ips}) + IFS="|" + nodes=${#ips_array[@]} + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}" + fi + + + set_save_model=$(func_set_params "${save_model_key}" "${save_log}") + if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config} " + elif [ ${#ips} -le 26 ];then # train with multi-gpu + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" + else # train with multi-machine + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1} ${set_amp_config}" + fi + # run train + eval $cmd + status_check $? "${cmd}" "${status_log}" + + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") + + # run eval + if [ ${eval_py} != "null" ]; then + eval ${env} + set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval $eval_cmd + status_check $? "${eval_cmd}" "${status_log}" + fi + # run export model + if [ ${run_export} != "null" ]; then + # run export model + save_infer_path="${save_log}" + set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") + export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" + eval $export_cmd + status_check $? "${export_cmd}" "${status_log}" + + #run inference + eval $env + save_infer_path="${save_log}" + if [[ ${inference_dir} != "null" ]] && [[ ${inference_dir} != '##' ]]; then + infer_model_dir="${save_infer_path}/${inference_dir}" + else + infer_model_dir=${save_infer_path} + fi + func_inference "${python}" "${inference_py}" "${infer_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" + + eval "unset CUDA_VISIBLE_DEVICES" + fi + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for gpu in ${gpu_list[*]}; do +fi # end if [ ${MODE} = "infer" ]; then